11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17630d9c47SPaul Gortmaker #include <linux/export.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 23bc31b86aSWu Fengguang #include <linux/pagemap.h> 2403ba3782SJens Axboe #include <linux/kthread.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 28455b2864SDave Chinner #include <linux/tracepoint.h> 29719ea2fbSAl Viro #include <linux/device.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33bc31b86aSWu Fengguang * 4MB minimal write chunk size 34bc31b86aSWu Fengguang */ 35bc31b86aSWu Fengguang #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) 36bc31b86aSWu Fengguang 37bc31b86aSWu Fengguang /* 38c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 39c4a77a6cSJens Axboe */ 4083ba7b07SChristoph Hellwig struct wb_writeback_work { 41c4a77a6cSJens Axboe long nr_pages; 42c4a77a6cSJens Axboe struct super_block *sb; 430dc83bd3SJan Kara unsigned long *older_than_this; 44c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 456e6938b6SWu Fengguang unsigned int tagged_writepages:1; 4652957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4752957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4852957fe1SH Hartley Sweeten unsigned int for_background:1; 497747bd4bSDave Chinner unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 500e175a18SCurt Wohlgemuth enum wb_reason reason; /* why was writeback initiated? */ 51c4a77a6cSJens Axboe 528010c3b6SJens Axboe struct list_head list; /* pending work list */ 5383ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 5403ba3782SJens Axboe }; 5503ba3782SJens Axboe 56f11b00f3SAdrian Bunk /** 57f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 58f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 59f11b00f3SAdrian Bunk * 6003ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 6103ba3782SJens Axboe * backing device. 62f11b00f3SAdrian Bunk */ 63f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 64f11b00f3SAdrian Bunk { 6581d73a32SJan Kara return test_bit(BDI_writeback_running, &bdi->state); 66f11b00f3SAdrian Bunk } 6700d4e736STheodore Ts'o EXPORT_SYMBOL(writeback_in_progress); 68f11b00f3SAdrian Bunk 69de1414a6SChristoph Hellwig struct backing_dev_info *inode_to_bdi(struct inode *inode) 70692ebd17SJan Kara { 71692ebd17SJan Kara struct super_block *sb = inode->i_sb; 72495a276eSChristoph Hellwig #ifdef CONFIG_BLOCK 73a8855990SJan Kara if (sb_is_blkdev_sb(sb)) 74495a276eSChristoph Hellwig return blk_get_backing_dev_info(I_BDEV(inode)); 75495a276eSChristoph Hellwig #endif 76692ebd17SJan Kara return sb->s_bdi; 77692ebd17SJan Kara } 78de1414a6SChristoph Hellwig EXPORT_SYMBOL_GPL(inode_to_bdi); 79692ebd17SJan Kara 807ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 817ccf19a8SNick Piggin { 827ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 837ccf19a8SNick Piggin } 847ccf19a8SNick Piggin 8515eb77a0SWu Fengguang /* 8615eb77a0SWu Fengguang * Include the creation of the trace points after defining the 8715eb77a0SWu Fengguang * wb_writeback_work structure and inline functions so that the definition 8815eb77a0SWu Fengguang * remains local to this file. 8915eb77a0SWu Fengguang */ 9015eb77a0SWu Fengguang #define CREATE_TRACE_POINTS 9115eb77a0SWu Fengguang #include <trace/events/writeback.h> 9215eb77a0SWu Fengguang 93774016b2SSteven Whitehouse EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage); 94774016b2SSteven Whitehouse 955acda9d1SJan Kara static void bdi_wakeup_thread(struct backing_dev_info *bdi) 965acda9d1SJan Kara { 975acda9d1SJan Kara spin_lock_bh(&bdi->wb_lock); 985acda9d1SJan Kara if (test_bit(BDI_registered, &bdi->state)) 995acda9d1SJan Kara mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 1005acda9d1SJan Kara spin_unlock_bh(&bdi->wb_lock); 1015acda9d1SJan Kara } 1025acda9d1SJan Kara 1036585027aSJan Kara static void bdi_queue_work(struct backing_dev_info *bdi, 1046585027aSJan Kara struct wb_writeback_work *work) 1056585027aSJan Kara { 1066585027aSJan Kara trace_writeback_queue(bdi, work); 1076585027aSJan Kara 1086585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1095acda9d1SJan Kara if (!test_bit(BDI_registered, &bdi->state)) { 1105acda9d1SJan Kara if (work->done) 1115acda9d1SJan Kara complete(work->done); 1125acda9d1SJan Kara goto out_unlock; 1135acda9d1SJan Kara } 1146585027aSJan Kara list_add_tail(&work->list, &bdi->work_list); 115839a8e86STejun Heo mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 1165acda9d1SJan Kara out_unlock: 1175acda9d1SJan Kara spin_unlock_bh(&bdi->wb_lock); 11803ba3782SJens Axboe } 1191da177e4SLinus Torvalds 12083ba7b07SChristoph Hellwig static void 12183ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1220e175a18SCurt Wohlgemuth bool range_cyclic, enum wb_reason reason) 1231da177e4SLinus Torvalds { 12483ba7b07SChristoph Hellwig struct wb_writeback_work *work; 12503ba3782SJens Axboe 126bcddc3f0SJens Axboe /* 127bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 128bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 129bcddc3f0SJens Axboe */ 13083ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 13183ba7b07SChristoph Hellwig if (!work) { 132455b2864SDave Chinner trace_writeback_nowork(bdi); 1335acda9d1SJan Kara bdi_wakeup_thread(bdi); 13483ba7b07SChristoph Hellwig return; 13583ba7b07SChristoph Hellwig } 13683ba7b07SChristoph Hellwig 13783ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 13883ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 13983ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 1400e175a18SCurt Wohlgemuth work->reason = reason; 14183ba7b07SChristoph Hellwig 142f11fcae8SJens Axboe bdi_queue_work(bdi, work); 14303ba3782SJens Axboe } 144b6e51316SJens Axboe 145b6e51316SJens Axboe /** 146b6e51316SJens Axboe * bdi_start_writeback - start writeback 147b6e51316SJens Axboe * @bdi: the backing device to write from 148b6e51316SJens Axboe * @nr_pages: the number of pages to write 149786228abSMarcos Paulo de Souza * @reason: reason why some writeback work was initiated 150b6e51316SJens Axboe * 151b6e51316SJens Axboe * Description: 152b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 15325985edcSLucas De Marchi * started when this function returns, we make no guarantees on 1540e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 155b6e51316SJens Axboe * 156b6e51316SJens Axboe */ 1570e175a18SCurt Wohlgemuth void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1580e175a18SCurt Wohlgemuth enum wb_reason reason) 159b6e51316SJens Axboe { 1600e175a18SCurt Wohlgemuth __bdi_start_writeback(bdi, nr_pages, true, reason); 161d3ddec76SWu Fengguang } 162d3ddec76SWu Fengguang 163c5444198SChristoph Hellwig /** 164c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 165c5444198SChristoph Hellwig * @bdi: the backing device to write from 166c5444198SChristoph Hellwig * 167c5444198SChristoph Hellwig * Description: 1686585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 1696585027aSJan Kara * this function returns, it is only guaranteed that for given BDI 1706585027aSJan Kara * some IO is happening if we are over background dirty threshold. 1716585027aSJan Kara * Caller need not hold sb s_umount semaphore. 172c5444198SChristoph Hellwig */ 173c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 174c5444198SChristoph Hellwig { 1756585027aSJan Kara /* 1766585027aSJan Kara * We just wake up the flusher thread. It will perform background 1776585027aSJan Kara * writeback as soon as there is no other work to do. 1786585027aSJan Kara */ 17971927e84SWu Fengguang trace_writeback_wake_background(bdi); 1805acda9d1SJan Kara bdi_wakeup_thread(bdi); 1811da177e4SLinus Torvalds } 1821da177e4SLinus Torvalds 1831da177e4SLinus Torvalds /* 184a66979abSDave Chinner * Remove the inode from the writeback list it is on. 185a66979abSDave Chinner */ 186a66979abSDave Chinner void inode_wb_list_del(struct inode *inode) 187a66979abSDave Chinner { 188f758eeabSChristoph Hellwig struct backing_dev_info *bdi = inode_to_bdi(inode); 189a66979abSDave Chinner 190f758eeabSChristoph Hellwig spin_lock(&bdi->wb.list_lock); 191f758eeabSChristoph Hellwig list_del_init(&inode->i_wb_list); 192f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 193f758eeabSChristoph Hellwig } 194a66979abSDave Chinner 195a66979abSDave Chinner /* 1966610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1976610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1986610a0bcSAndrew Morton * 1996610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 20066f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 2016610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 2026610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 2036610a0bcSAndrew Morton */ 204f758eeabSChristoph Hellwig static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 2056610a0bcSAndrew Morton { 206f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 20703ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 20866f3b8e2SJens Axboe struct inode *tail; 2096610a0bcSAndrew Morton 2107ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 21166f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2126610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2136610a0bcSAndrew Morton } 2147ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_dirty); 2156610a0bcSAndrew Morton } 2166610a0bcSAndrew Morton 2176610a0bcSAndrew Morton /* 21866f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 219c986d1e2SAndrew Morton */ 220f758eeabSChristoph Hellwig static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 221c986d1e2SAndrew Morton { 222f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 2237ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_more_io); 224c986d1e2SAndrew Morton } 225c986d1e2SAndrew Morton 2261c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2271c0eeaf5SJoern Engel { 228365b94aeSJan Kara inode->i_state &= ~I_SYNC; 2294eff96ddSJan Kara /* If inode is clean an unused, put it into LRU now... */ 2304eff96ddSJan Kara inode_add_lru(inode); 231365b94aeSJan Kara /* Waiters must see I_SYNC cleared before being woken up */ 2321c0eeaf5SJoern Engel smp_mb(); 2331c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2341c0eeaf5SJoern Engel } 2351c0eeaf5SJoern Engel 236d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 237d2caa3c5SJeff Layton { 238d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 239d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 240d2caa3c5SJeff Layton /* 241d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 242d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 243d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2445b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 245d2caa3c5SJeff Layton */ 246d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 247d2caa3c5SJeff Layton #endif 248d2caa3c5SJeff Layton return ret; 249d2caa3c5SJeff Layton } 250d2caa3c5SJeff Layton 251c986d1e2SAndrew Morton /* 2520e2f2b23SWang Sheng-Hui * Move expired (dirtied before work->older_than_this) dirty inodes from 253697e6fedSJan Kara * @delaying_queue to @dispatch_queue. 2542c136579SFengguang Wu */ 255e84d0a4fSWu Fengguang static int move_expired_inodes(struct list_head *delaying_queue, 2562c136579SFengguang Wu struct list_head *dispatch_queue, 257ad4e38ddSCurt Wohlgemuth struct wb_writeback_work *work) 2582c136579SFengguang Wu { 2595c03449dSShaohua Li LIST_HEAD(tmp); 2605c03449dSShaohua Li struct list_head *pos, *node; 261cf137307SJens Axboe struct super_block *sb = NULL; 2625c03449dSShaohua Li struct inode *inode; 263cf137307SJens Axboe int do_sb_sort = 0; 264e84d0a4fSWu Fengguang int moved = 0; 2655c03449dSShaohua Li 2662c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2677ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 2680dc83bd3SJan Kara if (work->older_than_this && 2690dc83bd3SJan Kara inode_dirtied_after(inode, *work->older_than_this)) 2702c136579SFengguang Wu break; 271a8855990SJan Kara list_move(&inode->i_wb_list, &tmp); 272a8855990SJan Kara moved++; 273a8855990SJan Kara if (sb_is_blkdev_sb(inode->i_sb)) 274a8855990SJan Kara continue; 275cf137307SJens Axboe if (sb && sb != inode->i_sb) 276cf137307SJens Axboe do_sb_sort = 1; 277cf137307SJens Axboe sb = inode->i_sb; 2785c03449dSShaohua Li } 2795c03449dSShaohua Li 280cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 281cf137307SJens Axboe if (!do_sb_sort) { 282cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 283e84d0a4fSWu Fengguang goto out; 284cf137307SJens Axboe } 285cf137307SJens Axboe 2865c03449dSShaohua Li /* Move inodes from one superblock together */ 2875c03449dSShaohua Li while (!list_empty(&tmp)) { 2887ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 2895c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2907ccf19a8SNick Piggin inode = wb_inode(pos); 2915c03449dSShaohua Li if (inode->i_sb == sb) 2927ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 2932c136579SFengguang Wu } 2942c136579SFengguang Wu } 295e84d0a4fSWu Fengguang out: 296e84d0a4fSWu Fengguang return moved; 2975c03449dSShaohua Li } 2982c136579SFengguang Wu 2992c136579SFengguang Wu /* 3002c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 3014ea879b9SWu Fengguang * Before 3024ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 3034ea879b9SWu Fengguang * =============> gf edc BA 3044ea879b9SWu Fengguang * After 3054ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 3064ea879b9SWu Fengguang * =============> g fBAedc 3074ea879b9SWu Fengguang * | 3084ea879b9SWu Fengguang * +--> dequeue for IO 3092c136579SFengguang Wu */ 310ad4e38ddSCurt Wohlgemuth static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 3112c136579SFengguang Wu { 312e84d0a4fSWu Fengguang int moved; 313f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 3144ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 315ad4e38ddSCurt Wohlgemuth moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); 316ad4e38ddSCurt Wohlgemuth trace_writeback_queue_io(wb, work, moved); 31766f3b8e2SJens Axboe } 31866f3b8e2SJens Axboe 319a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 32066f3b8e2SJens Axboe { 3219fb0a7daSTejun Heo int ret; 3229fb0a7daSTejun Heo 3239fb0a7daSTejun Heo if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { 3249fb0a7daSTejun Heo trace_writeback_write_inode_start(inode, wbc); 3259fb0a7daSTejun Heo ret = inode->i_sb->s_op->write_inode(inode, wbc); 3269fb0a7daSTejun Heo trace_writeback_write_inode(inode, wbc); 3279fb0a7daSTejun Heo return ret; 3289fb0a7daSTejun Heo } 32903ba3782SJens Axboe return 0; 33066f3b8e2SJens Axboe } 33108d8e974SFengguang Wu 3322c136579SFengguang Wu /* 333169ebd90SJan Kara * Wait for writeback on an inode to complete. Called with i_lock held. 334169ebd90SJan Kara * Caller must make sure inode cannot go away when we drop i_lock. 33501c03194SChristoph Hellwig */ 336169ebd90SJan Kara static void __inode_wait_for_writeback(struct inode *inode) 337169ebd90SJan Kara __releases(inode->i_lock) 338169ebd90SJan Kara __acquires(inode->i_lock) 33901c03194SChristoph Hellwig { 34001c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 34101c03194SChristoph Hellwig wait_queue_head_t *wqh; 34201c03194SChristoph Hellwig 34301c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 34458a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 345250df6edSDave Chinner spin_unlock(&inode->i_lock); 34674316201SNeilBrown __wait_on_bit(wqh, &wq, bit_wait, 34774316201SNeilBrown TASK_UNINTERRUPTIBLE); 348250df6edSDave Chinner spin_lock(&inode->i_lock); 34958a9d3d8SRichard Kennedy } 35001c03194SChristoph Hellwig } 35101c03194SChristoph Hellwig 35201c03194SChristoph Hellwig /* 353169ebd90SJan Kara * Wait for writeback on an inode to complete. Caller must have inode pinned. 354169ebd90SJan Kara */ 355169ebd90SJan Kara void inode_wait_for_writeback(struct inode *inode) 356169ebd90SJan Kara { 357169ebd90SJan Kara spin_lock(&inode->i_lock); 358169ebd90SJan Kara __inode_wait_for_writeback(inode); 359169ebd90SJan Kara spin_unlock(&inode->i_lock); 360169ebd90SJan Kara } 361169ebd90SJan Kara 362169ebd90SJan Kara /* 363169ebd90SJan Kara * Sleep until I_SYNC is cleared. This function must be called with i_lock 364169ebd90SJan Kara * held and drops it. It is aimed for callers not holding any inode reference 365169ebd90SJan Kara * so once i_lock is dropped, inode can go away. 366169ebd90SJan Kara */ 367169ebd90SJan Kara static void inode_sleep_on_writeback(struct inode *inode) 368169ebd90SJan Kara __releases(inode->i_lock) 369169ebd90SJan Kara { 370169ebd90SJan Kara DEFINE_WAIT(wait); 371169ebd90SJan Kara wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 372169ebd90SJan Kara int sleep; 373169ebd90SJan Kara 374169ebd90SJan Kara prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 375169ebd90SJan Kara sleep = inode->i_state & I_SYNC; 376169ebd90SJan Kara spin_unlock(&inode->i_lock); 377169ebd90SJan Kara if (sleep) 378169ebd90SJan Kara schedule(); 379169ebd90SJan Kara finish_wait(wqh, &wait); 380169ebd90SJan Kara } 381169ebd90SJan Kara 382169ebd90SJan Kara /* 383ccb26b5aSJan Kara * Find proper writeback list for the inode depending on its current state and 384ccb26b5aSJan Kara * possibly also change of its state while we were doing writeback. Here we 385ccb26b5aSJan Kara * handle things such as livelock prevention or fairness of writeback among 386ccb26b5aSJan Kara * inodes. This function can be called only by flusher thread - noone else 387ccb26b5aSJan Kara * processes all inodes in writeback lists and requeueing inodes behind flusher 388ccb26b5aSJan Kara * thread's back can have unexpected consequences. 389ccb26b5aSJan Kara */ 390ccb26b5aSJan Kara static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, 391ccb26b5aSJan Kara struct writeback_control *wbc) 392ccb26b5aSJan Kara { 393ccb26b5aSJan Kara if (inode->i_state & I_FREEING) 394ccb26b5aSJan Kara return; 395ccb26b5aSJan Kara 396ccb26b5aSJan Kara /* 397ccb26b5aSJan Kara * Sync livelock prevention. Each inode is tagged and synced in one 398ccb26b5aSJan Kara * shot. If still dirty, it will be redirty_tail()'ed below. Update 399ccb26b5aSJan Kara * the dirty time to prevent enqueue and sync it again. 400ccb26b5aSJan Kara */ 401ccb26b5aSJan Kara if ((inode->i_state & I_DIRTY) && 402ccb26b5aSJan Kara (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 403ccb26b5aSJan Kara inode->dirtied_when = jiffies; 404ccb26b5aSJan Kara 4054f8ad655SJan Kara if (wbc->pages_skipped) { 4064f8ad655SJan Kara /* 4074f8ad655SJan Kara * writeback is not making progress due to locked 4084f8ad655SJan Kara * buffers. Skip this inode for now. 4094f8ad655SJan Kara */ 4104f8ad655SJan Kara redirty_tail(inode, wb); 4114f8ad655SJan Kara return; 4124f8ad655SJan Kara } 4134f8ad655SJan Kara 414ccb26b5aSJan Kara if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 415ccb26b5aSJan Kara /* 416ccb26b5aSJan Kara * We didn't write back all the pages. nfs_writepages() 417ccb26b5aSJan Kara * sometimes bales out without doing anything. 418ccb26b5aSJan Kara */ 419ccb26b5aSJan Kara if (wbc->nr_to_write <= 0) { 420ccb26b5aSJan Kara /* Slice used up. Queue for next turn. */ 421ccb26b5aSJan Kara requeue_io(inode, wb); 422ccb26b5aSJan Kara } else { 423ccb26b5aSJan Kara /* 424ccb26b5aSJan Kara * Writeback blocked by something other than 425ccb26b5aSJan Kara * congestion. Delay the inode for some time to 426ccb26b5aSJan Kara * avoid spinning on the CPU (100% iowait) 427ccb26b5aSJan Kara * retrying writeback of the dirty page/inode 428ccb26b5aSJan Kara * that cannot be performed immediately. 429ccb26b5aSJan Kara */ 430ccb26b5aSJan Kara redirty_tail(inode, wb); 431ccb26b5aSJan Kara } 432ccb26b5aSJan Kara } else if (inode->i_state & I_DIRTY) { 433ccb26b5aSJan Kara /* 434ccb26b5aSJan Kara * Filesystems can dirty the inode during writeback operations, 435ccb26b5aSJan Kara * such as delayed allocation during submission or metadata 436ccb26b5aSJan Kara * updates after data IO completion. 437ccb26b5aSJan Kara */ 438ccb26b5aSJan Kara redirty_tail(inode, wb); 439ccb26b5aSJan Kara } else { 440ccb26b5aSJan Kara /* The inode is clean. Remove from writeback lists. */ 441ccb26b5aSJan Kara list_del_init(&inode->i_wb_list); 442ccb26b5aSJan Kara } 443ccb26b5aSJan Kara } 444ccb26b5aSJan Kara 445ccb26b5aSJan Kara /* 4464f8ad655SJan Kara * Write out an inode and its dirty pages. Do not update the writeback list 4474f8ad655SJan Kara * linkage. That is left to the caller. The caller is also responsible for 4484f8ad655SJan Kara * setting I_SYNC flag and calling inode_sync_complete() to clear it. 4491da177e4SLinus Torvalds */ 4501da177e4SLinus Torvalds static int 451cd8ed2a4SYan Hong __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 4521da177e4SLinus Torvalds { 4531da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 454251d6a47SWu Fengguang long nr_to_write = wbc->nr_to_write; 45501c03194SChristoph Hellwig unsigned dirty; 4561da177e4SLinus Torvalds int ret; 4571da177e4SLinus Torvalds 4584f8ad655SJan Kara WARN_ON(!(inode->i_state & I_SYNC)); 4591da177e4SLinus Torvalds 4609fb0a7daSTejun Heo trace_writeback_single_inode_start(inode, wbc, nr_to_write); 4619fb0a7daSTejun Heo 4621da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 4631da177e4SLinus Torvalds 46426821ed4SChristoph Hellwig /* 46526821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 46626821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 4677747bd4bSDave Chinner * I/O completion. We don't do it for sync(2) writeback because it has a 4687747bd4bSDave Chinner * separate, external IO completion path and ->sync_fs for guaranteeing 4697747bd4bSDave Chinner * inode metadata is written back correctly. 47026821ed4SChristoph Hellwig */ 4717747bd4bSDave Chinner if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { 47226821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 4731da177e4SLinus Torvalds if (ret == 0) 4741da177e4SLinus Torvalds ret = err; 4751da177e4SLinus Torvalds } 4761da177e4SLinus Torvalds 4775547e8aaSDmitry Monakhov /* 4785547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 4795547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 4805547e8aaSDmitry Monakhov * write_inode() 4815547e8aaSDmitry Monakhov */ 482250df6edSDave Chinner spin_lock(&inode->i_lock); 4839c6ac78eSTejun Heo 4845547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 4859c6ac78eSTejun Heo inode->i_state &= ~I_DIRTY; 4869c6ac78eSTejun Heo 4879c6ac78eSTejun Heo /* 4889c6ac78eSTejun Heo * Paired with smp_mb() in __mark_inode_dirty(). This allows 4899c6ac78eSTejun Heo * __mark_inode_dirty() to test i_state without grabbing i_lock - 4909c6ac78eSTejun Heo * either they see the I_DIRTY bits cleared or we see the dirtied 4919c6ac78eSTejun Heo * inode. 4929c6ac78eSTejun Heo * 4939c6ac78eSTejun Heo * I_DIRTY_PAGES is always cleared together above even if @mapping 4949c6ac78eSTejun Heo * still has dirty pages. The flag is reinstated after smp_mb() if 4959c6ac78eSTejun Heo * necessary. This guarantees that either __mark_inode_dirty() 4969c6ac78eSTejun Heo * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. 4979c6ac78eSTejun Heo */ 4989c6ac78eSTejun Heo smp_mb(); 4999c6ac78eSTejun Heo 5009c6ac78eSTejun Heo if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 5019c6ac78eSTejun Heo inode->i_state |= I_DIRTY_PAGES; 5029c6ac78eSTejun Heo 503250df6edSDave Chinner spin_unlock(&inode->i_lock); 5049c6ac78eSTejun Heo 50526821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 50626821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 507a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 5081da177e4SLinus Torvalds if (ret == 0) 5091da177e4SLinus Torvalds ret = err; 5101da177e4SLinus Torvalds } 5114f8ad655SJan Kara trace_writeback_single_inode(inode, wbc, nr_to_write); 5124f8ad655SJan Kara return ret; 5134f8ad655SJan Kara } 5144f8ad655SJan Kara 5154f8ad655SJan Kara /* 5164f8ad655SJan Kara * Write out an inode's dirty pages. Either the caller has an active reference 5174f8ad655SJan Kara * on the inode or the inode has I_WILL_FREE set. 5184f8ad655SJan Kara * 5194f8ad655SJan Kara * This function is designed to be called for writing back one inode which 5204f8ad655SJan Kara * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() 5214f8ad655SJan Kara * and does more profound writeback list handling in writeback_sb_inodes(). 5224f8ad655SJan Kara */ 5234f8ad655SJan Kara static int 5244f8ad655SJan Kara writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, 5254f8ad655SJan Kara struct writeback_control *wbc) 5264f8ad655SJan Kara { 5274f8ad655SJan Kara int ret = 0; 5284f8ad655SJan Kara 5294f8ad655SJan Kara spin_lock(&inode->i_lock); 5304f8ad655SJan Kara if (!atomic_read(&inode->i_count)) 5314f8ad655SJan Kara WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 5324f8ad655SJan Kara else 5334f8ad655SJan Kara WARN_ON(inode->i_state & I_WILL_FREE); 5344f8ad655SJan Kara 5354f8ad655SJan Kara if (inode->i_state & I_SYNC) { 5364f8ad655SJan Kara if (wbc->sync_mode != WB_SYNC_ALL) 5374f8ad655SJan Kara goto out; 5384f8ad655SJan Kara /* 539169ebd90SJan Kara * It's a data-integrity sync. We must wait. Since callers hold 540169ebd90SJan Kara * inode reference or inode has I_WILL_FREE set, it cannot go 541169ebd90SJan Kara * away under us. 5424f8ad655SJan Kara */ 543169ebd90SJan Kara __inode_wait_for_writeback(inode); 5444f8ad655SJan Kara } 5454f8ad655SJan Kara WARN_ON(inode->i_state & I_SYNC); 5464f8ad655SJan Kara /* 547f9b0e058SJan Kara * Skip inode if it is clean and we have no outstanding writeback in 548f9b0e058SJan Kara * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this 549f9b0e058SJan Kara * function since flusher thread may be doing for example sync in 550f9b0e058SJan Kara * parallel and if we move the inode, it could get skipped. So here we 551f9b0e058SJan Kara * make sure inode is on some writeback list and leave it there unless 552f9b0e058SJan Kara * we have completely cleaned the inode. 5534f8ad655SJan Kara */ 554f9b0e058SJan Kara if (!(inode->i_state & I_DIRTY) && 555f9b0e058SJan Kara (wbc->sync_mode != WB_SYNC_ALL || 556f9b0e058SJan Kara !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 5574f8ad655SJan Kara goto out; 5584f8ad655SJan Kara inode->i_state |= I_SYNC; 5594f8ad655SJan Kara spin_unlock(&inode->i_lock); 5604f8ad655SJan Kara 561cd8ed2a4SYan Hong ret = __writeback_single_inode(inode, wbc); 5621da177e4SLinus Torvalds 563f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 564250df6edSDave Chinner spin_lock(&inode->i_lock); 5654f8ad655SJan Kara /* 5664f8ad655SJan Kara * If inode is clean, remove it from writeback lists. Otherwise don't 5674f8ad655SJan Kara * touch it. See comment above for explanation. 5684f8ad655SJan Kara */ 5694f8ad655SJan Kara if (!(inode->i_state & I_DIRTY)) 5704f8ad655SJan Kara list_del_init(&inode->i_wb_list); 5714f8ad655SJan Kara spin_unlock(&wb->list_lock); 5721c0eeaf5SJoern Engel inode_sync_complete(inode); 5734f8ad655SJan Kara out: 5744f8ad655SJan Kara spin_unlock(&inode->i_lock); 5751da177e4SLinus Torvalds return ret; 5761da177e4SLinus Torvalds } 5771da177e4SLinus Torvalds 5781a12d8bdSWu Fengguang static long writeback_chunk_size(struct backing_dev_info *bdi, 5791a12d8bdSWu Fengguang struct wb_writeback_work *work) 580d46db3d5SWu Fengguang { 581d46db3d5SWu Fengguang long pages; 582d46db3d5SWu Fengguang 583d46db3d5SWu Fengguang /* 584d46db3d5SWu Fengguang * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 585d46db3d5SWu Fengguang * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 586d46db3d5SWu Fengguang * here avoids calling into writeback_inodes_wb() more than once. 587d46db3d5SWu Fengguang * 588d46db3d5SWu Fengguang * The intended call sequence for WB_SYNC_ALL writeback is: 589d46db3d5SWu Fengguang * 590d46db3d5SWu Fengguang * wb_writeback() 591d46db3d5SWu Fengguang * writeback_sb_inodes() <== called only once 592d46db3d5SWu Fengguang * write_cache_pages() <== called once for each inode 593d46db3d5SWu Fengguang * (quickly) tag currently dirty pages 594d46db3d5SWu Fengguang * (maybe slowly) sync all tagged pages 595d46db3d5SWu Fengguang */ 596d46db3d5SWu Fengguang if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 597d46db3d5SWu Fengguang pages = LONG_MAX; 5981a12d8bdSWu Fengguang else { 5991a12d8bdSWu Fengguang pages = min(bdi->avg_write_bandwidth / 2, 6001a12d8bdSWu Fengguang global_dirty_limit / DIRTY_SCOPE); 6011a12d8bdSWu Fengguang pages = min(pages, work->nr_pages); 6021a12d8bdSWu Fengguang pages = round_down(pages + MIN_WRITEBACK_PAGES, 6031a12d8bdSWu Fengguang MIN_WRITEBACK_PAGES); 6041a12d8bdSWu Fengguang } 605d46db3d5SWu Fengguang 606d46db3d5SWu Fengguang return pages; 607d46db3d5SWu Fengguang } 608d46db3d5SWu Fengguang 60903ba3782SJens Axboe /* 610f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 611edadfb10SChristoph Hellwig * 612d46db3d5SWu Fengguang * Return the number of pages and/or inodes written. 613f11c9c5cSEdward Shishkin */ 614d46db3d5SWu Fengguang static long writeback_sb_inodes(struct super_block *sb, 615d46db3d5SWu Fengguang struct bdi_writeback *wb, 616d46db3d5SWu Fengguang struct wb_writeback_work *work) 61703ba3782SJens Axboe { 618d46db3d5SWu Fengguang struct writeback_control wbc = { 619d46db3d5SWu Fengguang .sync_mode = work->sync_mode, 620d46db3d5SWu Fengguang .tagged_writepages = work->tagged_writepages, 621d46db3d5SWu Fengguang .for_kupdate = work->for_kupdate, 622d46db3d5SWu Fengguang .for_background = work->for_background, 6237747bd4bSDave Chinner .for_sync = work->for_sync, 624d46db3d5SWu Fengguang .range_cyclic = work->range_cyclic, 625d46db3d5SWu Fengguang .range_start = 0, 626d46db3d5SWu Fengguang .range_end = LLONG_MAX, 627d46db3d5SWu Fengguang }; 628d46db3d5SWu Fengguang unsigned long start_time = jiffies; 629d46db3d5SWu Fengguang long write_chunk; 630d46db3d5SWu Fengguang long wrote = 0; /* count both pages and inodes */ 631d46db3d5SWu Fengguang 63203ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 6337ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 634edadfb10SChristoph Hellwig 635edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 636d46db3d5SWu Fengguang if (work->sb) { 637edadfb10SChristoph Hellwig /* 638edadfb10SChristoph Hellwig * We only want to write back data for this 639edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 640edadfb10SChristoph Hellwig * to it back onto the dirty list. 641edadfb10SChristoph Hellwig */ 642f758eeabSChristoph Hellwig redirty_tail(inode, wb); 64366f3b8e2SJens Axboe continue; 64466f3b8e2SJens Axboe } 645edadfb10SChristoph Hellwig 646edadfb10SChristoph Hellwig /* 647edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 648edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 649edadfb10SChristoph Hellwig * pin the next superblock. 650edadfb10SChristoph Hellwig */ 651d46db3d5SWu Fengguang break; 652edadfb10SChristoph Hellwig } 653edadfb10SChristoph Hellwig 6549843b76aSChristoph Hellwig /* 655331cbdeeSWanpeng Li * Don't bother with new inodes or inodes being freed, first 656331cbdeeSWanpeng Li * kind does not need periodic writeout yet, and for the latter 6579843b76aSChristoph Hellwig * kind writeout is handled by the freer. 6589843b76aSChristoph Hellwig */ 659250df6edSDave Chinner spin_lock(&inode->i_lock); 6609843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 661250df6edSDave Chinner spin_unlock(&inode->i_lock); 662fcc5c222SWu Fengguang redirty_tail(inode, wb); 6637ef0d737SNick Piggin continue; 6647ef0d737SNick Piggin } 665cc1676d9SJan Kara if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 666cc1676d9SJan Kara /* 667cc1676d9SJan Kara * If this inode is locked for writeback and we are not 668cc1676d9SJan Kara * doing writeback-for-data-integrity, move it to 669cc1676d9SJan Kara * b_more_io so that writeback can proceed with the 670cc1676d9SJan Kara * other inodes on s_io. 671cc1676d9SJan Kara * 672cc1676d9SJan Kara * We'll have another go at writing back this inode 673cc1676d9SJan Kara * when we completed a full scan of b_io. 674cc1676d9SJan Kara */ 675cc1676d9SJan Kara spin_unlock(&inode->i_lock); 676cc1676d9SJan Kara requeue_io(inode, wb); 677cc1676d9SJan Kara trace_writeback_sb_inodes_requeue(inode); 678cc1676d9SJan Kara continue; 679cc1676d9SJan Kara } 680f0d07b7fSJan Kara spin_unlock(&wb->list_lock); 681f0d07b7fSJan Kara 6824f8ad655SJan Kara /* 6834f8ad655SJan Kara * We already requeued the inode if it had I_SYNC set and we 6844f8ad655SJan Kara * are doing WB_SYNC_NONE writeback. So this catches only the 6854f8ad655SJan Kara * WB_SYNC_ALL case. 6864f8ad655SJan Kara */ 687169ebd90SJan Kara if (inode->i_state & I_SYNC) { 688169ebd90SJan Kara /* Wait for I_SYNC. This function drops i_lock... */ 689169ebd90SJan Kara inode_sleep_on_writeback(inode); 690169ebd90SJan Kara /* Inode may be gone, start again */ 691ead188f9SJan Kara spin_lock(&wb->list_lock); 692169ebd90SJan Kara continue; 693169ebd90SJan Kara } 6944f8ad655SJan Kara inode->i_state |= I_SYNC; 6954f8ad655SJan Kara spin_unlock(&inode->i_lock); 696169ebd90SJan Kara 6971a12d8bdSWu Fengguang write_chunk = writeback_chunk_size(wb->bdi, work); 698d46db3d5SWu Fengguang wbc.nr_to_write = write_chunk; 699d46db3d5SWu Fengguang wbc.pages_skipped = 0; 700250df6edSDave Chinner 701169ebd90SJan Kara /* 702169ebd90SJan Kara * We use I_SYNC to pin the inode in memory. While it is set 703169ebd90SJan Kara * evict_inode() will wait so the inode cannot be freed. 704169ebd90SJan Kara */ 705cd8ed2a4SYan Hong __writeback_single_inode(inode, &wbc); 706d46db3d5SWu Fengguang 707d46db3d5SWu Fengguang work->nr_pages -= write_chunk - wbc.nr_to_write; 708d46db3d5SWu Fengguang wrote += write_chunk - wbc.nr_to_write; 7094f8ad655SJan Kara spin_lock(&wb->list_lock); 7104f8ad655SJan Kara spin_lock(&inode->i_lock); 711d46db3d5SWu Fengguang if (!(inode->i_state & I_DIRTY)) 712d46db3d5SWu Fengguang wrote++; 7134f8ad655SJan Kara requeue_inode(inode, wb, &wbc); 7144f8ad655SJan Kara inode_sync_complete(inode); 7150f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 716169ebd90SJan Kara cond_resched_lock(&wb->list_lock); 717d46db3d5SWu Fengguang /* 718d46db3d5SWu Fengguang * bail out to wb_writeback() often enough to check 719d46db3d5SWu Fengguang * background threshold and other termination conditions. 720d46db3d5SWu Fengguang */ 721d46db3d5SWu Fengguang if (wrote) { 722d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 723d46db3d5SWu Fengguang break; 724d46db3d5SWu Fengguang if (work->nr_pages <= 0) 725d46db3d5SWu Fengguang break; 7261da177e4SLinus Torvalds } 7278bc3be27SFengguang Wu } 728d46db3d5SWu Fengguang return wrote; 729f11c9c5cSEdward Shishkin } 73038f21977SNick Piggin 731d46db3d5SWu Fengguang static long __writeback_inodes_wb(struct bdi_writeback *wb, 732d46db3d5SWu Fengguang struct wb_writeback_work *work) 733f11c9c5cSEdward Shishkin { 734d46db3d5SWu Fengguang unsigned long start_time = jiffies; 735d46db3d5SWu Fengguang long wrote = 0; 736f11c9c5cSEdward Shishkin 737f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 7387ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 739f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 740f11c9c5cSEdward Shishkin 74112ad3ab6SDave Chinner if (!grab_super_passive(sb)) { 7420e995816SWu Fengguang /* 7430e995816SWu Fengguang * grab_super_passive() may fail consistently due to 7440e995816SWu Fengguang * s_umount being grabbed by someone else. Don't use 7450e995816SWu Fengguang * requeue_io() to avoid busy retrying the inode/sb. 7460e995816SWu Fengguang */ 7470e995816SWu Fengguang redirty_tail(inode, wb); 748d19de7edSChristoph Hellwig continue; 749334132aeSChristoph Hellwig } 750d46db3d5SWu Fengguang wrote += writeback_sb_inodes(sb, wb, work); 751d19de7edSChristoph Hellwig drop_super(sb); 752f11c9c5cSEdward Shishkin 753d46db3d5SWu Fengguang /* refer to the same tests at the end of writeback_sb_inodes */ 754d46db3d5SWu Fengguang if (wrote) { 755d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 756d46db3d5SWu Fengguang break; 757d46db3d5SWu Fengguang if (work->nr_pages <= 0) 758f11c9c5cSEdward Shishkin break; 759f11c9c5cSEdward Shishkin } 760d46db3d5SWu Fengguang } 76166f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 762d46db3d5SWu Fengguang return wrote; 76366f3b8e2SJens Axboe } 76466f3b8e2SJens Axboe 7657d9f073bSWanpeng Li static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 7660e175a18SCurt Wohlgemuth enum wb_reason reason) 767edadfb10SChristoph Hellwig { 768d46db3d5SWu Fengguang struct wb_writeback_work work = { 769d46db3d5SWu Fengguang .nr_pages = nr_pages, 770d46db3d5SWu Fengguang .sync_mode = WB_SYNC_NONE, 771d46db3d5SWu Fengguang .range_cyclic = 1, 7720e175a18SCurt Wohlgemuth .reason = reason, 773d46db3d5SWu Fengguang }; 774edadfb10SChristoph Hellwig 775f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 776424b351fSWu Fengguang if (list_empty(&wb->b_io)) 777ad4e38ddSCurt Wohlgemuth queue_io(wb, &work); 778d46db3d5SWu Fengguang __writeback_inodes_wb(wb, &work); 779f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 780edadfb10SChristoph Hellwig 781d46db3d5SWu Fengguang return nr_pages - work.nr_pages; 78266f3b8e2SJens Axboe } 78366f3b8e2SJens Axboe 784b00949aaSWu Fengguang static bool over_bground_thresh(struct backing_dev_info *bdi) 78503ba3782SJens Axboe { 78603ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 78703ba3782SJens Axboe 78816c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 78903ba3782SJens Axboe 790b00949aaSWu Fengguang if (global_page_state(NR_FILE_DIRTY) + 791b00949aaSWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh) 792b00949aaSWu Fengguang return true; 793b00949aaSWu Fengguang 794b00949aaSWu Fengguang if (bdi_stat(bdi, BDI_RECLAIMABLE) > 795b00949aaSWu Fengguang bdi_dirty_limit(bdi, background_thresh)) 796b00949aaSWu Fengguang return true; 797b00949aaSWu Fengguang 798b00949aaSWu Fengguang return false; 79903ba3782SJens Axboe } 80003ba3782SJens Axboe 80103ba3782SJens Axboe /* 802e98be2d5SWu Fengguang * Called under wb->list_lock. If there are multiple wb per bdi, 803e98be2d5SWu Fengguang * only the flusher working on the first wb should do it. 804e98be2d5SWu Fengguang */ 805e98be2d5SWu Fengguang static void wb_update_bandwidth(struct bdi_writeback *wb, 806e98be2d5SWu Fengguang unsigned long start_time) 807e98be2d5SWu Fengguang { 808af6a3113SWu Fengguang __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); 809e98be2d5SWu Fengguang } 810e98be2d5SWu Fengguang 811e98be2d5SWu Fengguang /* 81203ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 81303ba3782SJens Axboe * 81403ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 81503ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 81603ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 81703ba3782SJens Axboe * older than a specific point in time. 81803ba3782SJens Axboe * 81903ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 82003ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 82103ba3782SJens Axboe * one-second gap. 82203ba3782SJens Axboe * 82303ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 82403ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 82503ba3782SJens Axboe */ 826c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 82783ba7b07SChristoph Hellwig struct wb_writeback_work *work) 82803ba3782SJens Axboe { 829e98be2d5SWu Fengguang unsigned long wb_start = jiffies; 830d46db3d5SWu Fengguang long nr_pages = work->nr_pages; 8310dc83bd3SJan Kara unsigned long oldest_jif; 832a5989bdcSJan Kara struct inode *inode; 833d46db3d5SWu Fengguang long progress; 83403ba3782SJens Axboe 8350dc83bd3SJan Kara oldest_jif = jiffies; 8360dc83bd3SJan Kara work->older_than_this = &oldest_jif; 83703ba3782SJens Axboe 838e8dfc305SWu Fengguang spin_lock(&wb->list_lock); 83903ba3782SJens Axboe for (;;) { 84003ba3782SJens Axboe /* 841d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 84203ba3782SJens Axboe */ 84383ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 84403ba3782SJens Axboe break; 84503ba3782SJens Axboe 84603ba3782SJens Axboe /* 847aa373cf5SJan Kara * Background writeout and kupdate-style writeback may 848aa373cf5SJan Kara * run forever. Stop them if there is other work to do 849aa373cf5SJan Kara * so that e.g. sync can proceed. They'll be restarted 850aa373cf5SJan Kara * after the other works are all done. 851aa373cf5SJan Kara */ 852aa373cf5SJan Kara if ((work->for_background || work->for_kupdate) && 853aa373cf5SJan Kara !list_empty(&wb->bdi->work_list)) 854aa373cf5SJan Kara break; 855aa373cf5SJan Kara 856aa373cf5SJan Kara /* 857d3ddec76SWu Fengguang * For background writeout, stop when we are below the 858d3ddec76SWu Fengguang * background dirty threshold 85903ba3782SJens Axboe */ 860b00949aaSWu Fengguang if (work->for_background && !over_bground_thresh(wb->bdi)) 86103ba3782SJens Axboe break; 86203ba3782SJens Axboe 8631bc36b64SJan Kara /* 8641bc36b64SJan Kara * Kupdate and background works are special and we want to 8651bc36b64SJan Kara * include all inodes that need writing. Livelock avoidance is 8661bc36b64SJan Kara * handled by these works yielding to any other work so we are 8671bc36b64SJan Kara * safe. 8681bc36b64SJan Kara */ 869ba9aa839SWu Fengguang if (work->for_kupdate) { 8700dc83bd3SJan Kara oldest_jif = jiffies - 871ba9aa839SWu Fengguang msecs_to_jiffies(dirty_expire_interval * 10); 8721bc36b64SJan Kara } else if (work->for_background) 8730dc83bd3SJan Kara oldest_jif = jiffies; 874028c2dd1SDave Chinner 875d46db3d5SWu Fengguang trace_writeback_start(wb->bdi, work); 876e8dfc305SWu Fengguang if (list_empty(&wb->b_io)) 877ad4e38ddSCurt Wohlgemuth queue_io(wb, work); 87883ba7b07SChristoph Hellwig if (work->sb) 879d46db3d5SWu Fengguang progress = writeback_sb_inodes(work->sb, wb, work); 880edadfb10SChristoph Hellwig else 881d46db3d5SWu Fengguang progress = __writeback_inodes_wb(wb, work); 882d46db3d5SWu Fengguang trace_writeback_written(wb->bdi, work); 883028c2dd1SDave Chinner 884e98be2d5SWu Fengguang wb_update_bandwidth(wb, wb_start); 88503ba3782SJens Axboe 88603ba3782SJens Axboe /* 88771fd05a8SJens Axboe * Did we write something? Try for more 888e6fb6da2SWu Fengguang * 889e6fb6da2SWu Fengguang * Dirty inodes are moved to b_io for writeback in batches. 890e6fb6da2SWu Fengguang * The completion of the current batch does not necessarily 891e6fb6da2SWu Fengguang * mean the overall work is done. So we keep looping as long 892e6fb6da2SWu Fengguang * as made some progress on cleaning pages or inodes. 89371fd05a8SJens Axboe */ 894d46db3d5SWu Fengguang if (progress) 89503ba3782SJens Axboe continue; 896a5989bdcSJan Kara /* 897e6fb6da2SWu Fengguang * No more inodes for IO, bail 898a5989bdcSJan Kara */ 899b7a2441fSWu Fengguang if (list_empty(&wb->b_more_io)) 90003ba3782SJens Axboe break; 90103ba3782SJens Axboe /* 9028010c3b6SJens Axboe * Nothing written. Wait for some inode to 9038010c3b6SJens Axboe * become available for writeback. Otherwise 9048010c3b6SJens Axboe * we'll just busyloop. 90503ba3782SJens Axboe */ 90603ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) { 907d46db3d5SWu Fengguang trace_writeback_wait(wb->bdi, work); 90803ba3782SJens Axboe inode = wb_inode(wb->b_more_io.prev); 909250df6edSDave Chinner spin_lock(&inode->i_lock); 910f0d07b7fSJan Kara spin_unlock(&wb->list_lock); 911169ebd90SJan Kara /* This function drops i_lock... */ 912169ebd90SJan Kara inode_sleep_on_writeback(inode); 913f0d07b7fSJan Kara spin_lock(&wb->list_lock); 91403ba3782SJens Axboe } 91503ba3782SJens Axboe } 916e8dfc305SWu Fengguang spin_unlock(&wb->list_lock); 91703ba3782SJens Axboe 918d46db3d5SWu Fengguang return nr_pages - work->nr_pages; 91903ba3782SJens Axboe } 92003ba3782SJens Axboe 92103ba3782SJens Axboe /* 92283ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 92303ba3782SJens Axboe */ 92483ba7b07SChristoph Hellwig static struct wb_writeback_work * 92508852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 92603ba3782SJens Axboe { 92783ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 92803ba3782SJens Axboe 9296467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 93083ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 93183ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 93283ba7b07SChristoph Hellwig struct wb_writeback_work, list); 93383ba7b07SChristoph Hellwig list_del_init(&work->list); 93403ba3782SJens Axboe } 9356467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 93683ba7b07SChristoph Hellwig return work; 93703ba3782SJens Axboe } 93803ba3782SJens Axboe 939cdf01dd5SLinus Torvalds /* 940cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 941cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 942cdf01dd5SLinus Torvalds */ 943cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 944cdf01dd5SLinus Torvalds { 945cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 946cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 947cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 948cdf01dd5SLinus Torvalds } 949cdf01dd5SLinus Torvalds 9506585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 9516585027aSJan Kara { 952b00949aaSWu Fengguang if (over_bground_thresh(wb->bdi)) { 9536585027aSJan Kara 9546585027aSJan Kara struct wb_writeback_work work = { 9556585027aSJan Kara .nr_pages = LONG_MAX, 9566585027aSJan Kara .sync_mode = WB_SYNC_NONE, 9576585027aSJan Kara .for_background = 1, 9586585027aSJan Kara .range_cyclic = 1, 9590e175a18SCurt Wohlgemuth .reason = WB_REASON_BACKGROUND, 9606585027aSJan Kara }; 9616585027aSJan Kara 9626585027aSJan Kara return wb_writeback(wb, &work); 9636585027aSJan Kara } 9646585027aSJan Kara 9656585027aSJan Kara return 0; 9666585027aSJan Kara } 9676585027aSJan Kara 96803ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 96903ba3782SJens Axboe { 97003ba3782SJens Axboe unsigned long expired; 97103ba3782SJens Axboe long nr_pages; 97203ba3782SJens Axboe 97369b62d01SJens Axboe /* 97469b62d01SJens Axboe * When set to zero, disable periodic writeback 97569b62d01SJens Axboe */ 97669b62d01SJens Axboe if (!dirty_writeback_interval) 97769b62d01SJens Axboe return 0; 97869b62d01SJens Axboe 97903ba3782SJens Axboe expired = wb->last_old_flush + 98003ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 98103ba3782SJens Axboe if (time_before(jiffies, expired)) 98203ba3782SJens Axboe return 0; 98303ba3782SJens Axboe 98403ba3782SJens Axboe wb->last_old_flush = jiffies; 985cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 98603ba3782SJens Axboe 987c4a77a6cSJens Axboe if (nr_pages) { 98883ba7b07SChristoph Hellwig struct wb_writeback_work work = { 989c4a77a6cSJens Axboe .nr_pages = nr_pages, 990c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 991c4a77a6cSJens Axboe .for_kupdate = 1, 992c4a77a6cSJens Axboe .range_cyclic = 1, 9930e175a18SCurt Wohlgemuth .reason = WB_REASON_PERIODIC, 994c4a77a6cSJens Axboe }; 995c4a77a6cSJens Axboe 99683ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 997c4a77a6cSJens Axboe } 99803ba3782SJens Axboe 99903ba3782SJens Axboe return 0; 100003ba3782SJens Axboe } 100103ba3782SJens Axboe 100203ba3782SJens Axboe /* 100303ba3782SJens Axboe * Retrieve work items and do the writeback they describe 100403ba3782SJens Axboe */ 100525d130baSWanpeng Li static long wb_do_writeback(struct bdi_writeback *wb) 100603ba3782SJens Axboe { 100703ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 100883ba7b07SChristoph Hellwig struct wb_writeback_work *work; 1009c4a77a6cSJens Axboe long wrote = 0; 101003ba3782SJens Axboe 101181d73a32SJan Kara set_bit(BDI_writeback_running, &wb->bdi->state); 101208852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 101383ba7b07SChristoph Hellwig 1014455b2864SDave Chinner trace_writeback_exec(bdi, work); 1015455b2864SDave Chinner 101683ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 101703ba3782SJens Axboe 101803ba3782SJens Axboe /* 101983ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 102083ba7b07SChristoph Hellwig * work item, otherwise just free it. 102103ba3782SJens Axboe */ 102283ba7b07SChristoph Hellwig if (work->done) 102383ba7b07SChristoph Hellwig complete(work->done); 102483ba7b07SChristoph Hellwig else 102583ba7b07SChristoph Hellwig kfree(work); 102603ba3782SJens Axboe } 102703ba3782SJens Axboe 102803ba3782SJens Axboe /* 102903ba3782SJens Axboe * Check for periodic writeback, kupdated() style 103003ba3782SJens Axboe */ 103103ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 10326585027aSJan Kara wrote += wb_check_background_flush(wb); 103381d73a32SJan Kara clear_bit(BDI_writeback_running, &wb->bdi->state); 103403ba3782SJens Axboe 103503ba3782SJens Axboe return wrote; 103603ba3782SJens Axboe } 103703ba3782SJens Axboe 103803ba3782SJens Axboe /* 103903ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 1040839a8e86STejun Heo * reschedules periodically and does kupdated style flushing. 104103ba3782SJens Axboe */ 1042839a8e86STejun Heo void bdi_writeback_workfn(struct work_struct *work) 104303ba3782SJens Axboe { 1044839a8e86STejun Heo struct bdi_writeback *wb = container_of(to_delayed_work(work), 1045839a8e86STejun Heo struct bdi_writeback, dwork); 104608243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 104703ba3782SJens Axboe long pages_written; 104803ba3782SJens Axboe 1049ef3b1019STejun Heo set_worker_desc("flush-%s", dev_name(bdi->dev)); 1050766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 105103ba3782SJens Axboe 1052839a8e86STejun Heo if (likely(!current_is_workqueue_rescuer() || 10535acda9d1SJan Kara !test_bit(BDI_registered, &bdi->state))) { 105403ba3782SJens Axboe /* 1055839a8e86STejun Heo * The normal path. Keep writing back @bdi until its 1056839a8e86STejun Heo * work_list is empty. Note that this path is also taken 1057839a8e86STejun Heo * if @bdi is shutting down even when we're running off the 1058839a8e86STejun Heo * rescuer as work_list needs to be drained. 105903ba3782SJens Axboe */ 1060839a8e86STejun Heo do { 106125d130baSWanpeng Li pages_written = wb_do_writeback(wb); 1062455b2864SDave Chinner trace_writeback_pages_written(pages_written); 1063839a8e86STejun Heo } while (!list_empty(&bdi->work_list)); 1064839a8e86STejun Heo } else { 1065253c34e9SArtem Bityutskiy /* 1066839a8e86STejun Heo * bdi_wq can't get enough workers and we're running off 1067839a8e86STejun Heo * the emergency worker. Don't hog it. Hopefully, 1024 is 1068839a8e86STejun Heo * enough for efficient IO. 1069253c34e9SArtem Bityutskiy */ 1070839a8e86STejun Heo pages_written = writeback_inodes_wb(&bdi->wb, 1024, 1071839a8e86STejun Heo WB_REASON_FORKER_THREAD); 1072839a8e86STejun Heo trace_writeback_pages_written(pages_written); 107303ba3782SJens Axboe } 107403ba3782SJens Axboe 10756ca738d6SDerek Basehore if (!list_empty(&bdi->work_list)) 10766ca738d6SDerek Basehore mod_delayed_work(bdi_wq, &wb->dwork, 0); 10776ca738d6SDerek Basehore else if (wb_has_dirty_io(wb) && dirty_writeback_interval) 10786ca738d6SDerek Basehore bdi_wakeup_thread_delayed(bdi); 1079455b2864SDave Chinner 1080839a8e86STejun Heo current->flags &= ~PF_SWAPWRITE; 108103ba3782SJens Axboe } 108203ba3782SJens Axboe 108303ba3782SJens Axboe /* 108403ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 108503ba3782SJens Axboe * the whole world. 108603ba3782SJens Axboe */ 10870e175a18SCurt Wohlgemuth void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) 108803ba3782SJens Axboe { 1089b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 1090b8c2f347SChristoph Hellwig 109147df3ddeSJan Kara if (!nr_pages) 109247df3ddeSJan Kara nr_pages = get_nr_dirty_pages(); 1093b8c2f347SChristoph Hellwig 1094b8c2f347SChristoph Hellwig rcu_read_lock(); 1095b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1096b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 1097b8c2f347SChristoph Hellwig continue; 10980e175a18SCurt Wohlgemuth __bdi_start_writeback(bdi, nr_pages, false, reason); 1099b8c2f347SChristoph Hellwig } 1100b8c2f347SChristoph Hellwig rcu_read_unlock(); 110103ba3782SJens Axboe } 110203ba3782SJens Axboe 110303ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 110403ba3782SJens Axboe { 110503ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 110603ba3782SJens Axboe struct dentry *dentry; 110703ba3782SJens Axboe const char *name = "?"; 110803ba3782SJens Axboe 110903ba3782SJens Axboe dentry = d_find_alias(inode); 111003ba3782SJens Axboe if (dentry) { 111103ba3782SJens Axboe spin_lock(&dentry->d_lock); 111203ba3782SJens Axboe name = (const char *) dentry->d_name.name; 111303ba3782SJens Axboe } 111403ba3782SJens Axboe printk(KERN_DEBUG 111503ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 111603ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 111703ba3782SJens Axboe name, inode->i_sb->s_id); 111803ba3782SJens Axboe if (dentry) { 111903ba3782SJens Axboe spin_unlock(&dentry->d_lock); 112003ba3782SJens Axboe dput(dentry); 112103ba3782SJens Axboe } 112203ba3782SJens Axboe } 112303ba3782SJens Axboe } 112403ba3782SJens Axboe 112503ba3782SJens Axboe /** 112603ba3782SJens Axboe * __mark_inode_dirty - internal function 112703ba3782SJens Axboe * @inode: inode to mark 112803ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 112903ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 113003ba3782SJens Axboe * mark_inode_dirty_sync. 113103ba3782SJens Axboe * 113203ba3782SJens Axboe * Put the inode on the super block's dirty list. 113303ba3782SJens Axboe * 113403ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 113503ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 113603ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 113703ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 113803ba3782SJens Axboe * 113903ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 114003ba3782SJens Axboe * them dirty. 114103ba3782SJens Axboe * 114203ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 114303ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 114403ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 114503ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 114603ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 114703ba3782SJens Axboe * blockdev inode. 114803ba3782SJens Axboe */ 114903ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 115003ba3782SJens Axboe { 115103ba3782SJens Axboe struct super_block *sb = inode->i_sb; 1152253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 115303ba3782SJens Axboe 115403ba3782SJens Axboe /* 115503ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 115603ba3782SJens Axboe * dirty the inode itself 115703ba3782SJens Axboe */ 115803ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 11599fb0a7daSTejun Heo trace_writeback_dirty_inode_start(inode, flags); 11609fb0a7daSTejun Heo 116103ba3782SJens Axboe if (sb->s_op->dirty_inode) 1162aa385729SChristoph Hellwig sb->s_op->dirty_inode(inode, flags); 11639fb0a7daSTejun Heo 11649fb0a7daSTejun Heo trace_writeback_dirty_inode(inode, flags); 116503ba3782SJens Axboe } 116603ba3782SJens Axboe 116703ba3782SJens Axboe /* 11689c6ac78eSTejun Heo * Paired with smp_mb() in __writeback_single_inode() for the 11699c6ac78eSTejun Heo * following lockless i_state test. See there for details. 117003ba3782SJens Axboe */ 117103ba3782SJens Axboe smp_mb(); 117203ba3782SJens Axboe 117303ba3782SJens Axboe if ((inode->i_state & flags) == flags) 117403ba3782SJens Axboe return; 117503ba3782SJens Axboe 117603ba3782SJens Axboe if (unlikely(block_dump)) 117703ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 117803ba3782SJens Axboe 1179250df6edSDave Chinner spin_lock(&inode->i_lock); 118003ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 118103ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 118203ba3782SJens Axboe 118303ba3782SJens Axboe inode->i_state |= flags; 118403ba3782SJens Axboe 118503ba3782SJens Axboe /* 118603ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 118703ba3782SJens Axboe * The unlocker will place the inode on the appropriate 118803ba3782SJens Axboe * superblock list, based upon its state. 118903ba3782SJens Axboe */ 119003ba3782SJens Axboe if (inode->i_state & I_SYNC) 1191250df6edSDave Chinner goto out_unlock_inode; 119203ba3782SJens Axboe 119303ba3782SJens Axboe /* 119403ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 119503ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 119603ba3782SJens Axboe */ 119703ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 11981d3382cbSAl Viro if (inode_unhashed(inode)) 1199250df6edSDave Chinner goto out_unlock_inode; 120003ba3782SJens Axboe } 1201a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 1202250df6edSDave Chinner goto out_unlock_inode; 120303ba3782SJens Axboe 120403ba3782SJens Axboe /* 120503ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 120603ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 120703ba3782SJens Axboe */ 120803ba3782SJens Axboe if (!was_dirty) { 1209a66979abSDave Chinner bool wakeup_bdi = false; 1210253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1211500b067cSJens Axboe 1212146d7009SJunxiao Bi spin_unlock(&inode->i_lock); 1213146d7009SJunxiao Bi spin_lock(&bdi->wb.list_lock); 1214253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 1215253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 1216253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 1217253c34e9SArtem Bityutskiy 1218253c34e9SArtem Bityutskiy /* 1219253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 1220253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 1221253c34e9SArtem Bityutskiy * bdi thread to make sure background 1222253c34e9SArtem Bityutskiy * write-back happens later. 1223253c34e9SArtem Bityutskiy */ 1224253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 1225253c34e9SArtem Bityutskiy wakeup_bdi = true; 1226500b067cSJens Axboe } 122703ba3782SJens Axboe 122803ba3782SJens Axboe inode->dirtied_when = jiffies; 12297ccf19a8SNick Piggin list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1230f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 1231253c34e9SArtem Bityutskiy 1232253c34e9SArtem Bityutskiy if (wakeup_bdi) 12336467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 1234a66979abSDave Chinner return; 1235a66979abSDave Chinner } 1236a66979abSDave Chinner } 1237a66979abSDave Chinner out_unlock_inode: 1238a66979abSDave Chinner spin_unlock(&inode->i_lock); 1239a66979abSDave Chinner 124003ba3782SJens Axboe } 124103ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 124203ba3782SJens Axboe 1243b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 124466f3b8e2SJens Axboe { 124538f21977SNick Piggin struct inode *inode, *old_inode = NULL; 124638f21977SNick Piggin 124703ba3782SJens Axboe /* 124803ba3782SJens Axboe * We need to be protected against the filesystem going from 124903ba3782SJens Axboe * r/o to r/w or vice versa. 125003ba3782SJens Axboe */ 1251b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 125203ba3782SJens Axboe 125355fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 125466f3b8e2SJens Axboe 125538f21977SNick Piggin /* 125638f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 125738f21977SNick Piggin * because there may have been pages dirtied before our sync 125838f21977SNick Piggin * call, but which had writeout started before we write it out. 125938f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 126038f21977SNick Piggin * we still have to wait for that writeout. 126138f21977SNick Piggin */ 1262b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1263250df6edSDave Chinner struct address_space *mapping = inode->i_mapping; 126438f21977SNick Piggin 1265250df6edSDave Chinner spin_lock(&inode->i_lock); 1266250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1267250df6edSDave Chinner (mapping->nrpages == 0)) { 1268250df6edSDave Chinner spin_unlock(&inode->i_lock); 126938f21977SNick Piggin continue; 1270250df6edSDave Chinner } 127138f21977SNick Piggin __iget(inode); 1272250df6edSDave Chinner spin_unlock(&inode->i_lock); 127355fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 127455fa6091SDave Chinner 127538f21977SNick Piggin /* 127655fa6091SDave Chinner * We hold a reference to 'inode' so it couldn't have been 127755fa6091SDave Chinner * removed from s_inodes list while we dropped the 127855fa6091SDave Chinner * inode_sb_list_lock. We cannot iput the inode now as we can 127955fa6091SDave Chinner * be holding the last reference and we cannot iput it under 128055fa6091SDave Chinner * inode_sb_list_lock. So we keep the reference and iput it 128155fa6091SDave Chinner * later. 128238f21977SNick Piggin */ 128338f21977SNick Piggin iput(old_inode); 128438f21977SNick Piggin old_inode = inode; 128538f21977SNick Piggin 128638f21977SNick Piggin filemap_fdatawait(mapping); 128738f21977SNick Piggin 128838f21977SNick Piggin cond_resched(); 128938f21977SNick Piggin 129055fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 129138f21977SNick Piggin } 129255fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 129338f21977SNick Piggin iput(old_inode); 129466f3b8e2SJens Axboe } 12951da177e4SLinus Torvalds 1296d8a8559cSJens Axboe /** 12973259f8beSChris Mason * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1298d8a8559cSJens Axboe * @sb: the superblock 12993259f8beSChris Mason * @nr: the number of pages to write 1300786228abSMarcos Paulo de Souza * @reason: reason why some writeback work initiated 13011da177e4SLinus Torvalds * 1302d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1303d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 13043259f8beSChris Mason * for IO completion of submitted IO. 13051da177e4SLinus Torvalds */ 13060e175a18SCurt Wohlgemuth void writeback_inodes_sb_nr(struct super_block *sb, 13070e175a18SCurt Wohlgemuth unsigned long nr, 13080e175a18SCurt Wohlgemuth enum wb_reason reason) 13091da177e4SLinus Torvalds { 131083ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 131183ba7b07SChristoph Hellwig struct wb_writeback_work work = { 13123c4d7165SChristoph Hellwig .sb = sb, 13133c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 13146e6938b6SWu Fengguang .tagged_writepages = 1, 131583ba7b07SChristoph Hellwig .done = &done, 13163259f8beSChris Mason .nr_pages = nr, 13170e175a18SCurt Wohlgemuth .reason = reason, 13183c4d7165SChristoph Hellwig }; 13190e3c9a22SJens Axboe 13206eedc701SJan Kara if (sb->s_bdi == &noop_backing_dev_info) 13216eedc701SJan Kara return; 1322cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 132383ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 132483ba7b07SChristoph Hellwig wait_for_completion(&done); 13251da177e4SLinus Torvalds } 13263259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 13273259f8beSChris Mason 13283259f8beSChris Mason /** 13293259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 13303259f8beSChris Mason * @sb: the superblock 1331786228abSMarcos Paulo de Souza * @reason: reason why some writeback work was initiated 13323259f8beSChris Mason * 13333259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 13343259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 13353259f8beSChris Mason * for IO completion of submitted IO. 13363259f8beSChris Mason */ 13370e175a18SCurt Wohlgemuth void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 13383259f8beSChris Mason { 13390e175a18SCurt Wohlgemuth return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 13403259f8beSChris Mason } 1341d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1342d8a8559cSJens Axboe 1343d8a8559cSJens Axboe /** 134410ee27a0SMiao Xie * try_to_writeback_inodes_sb_nr - try to start writeback if none underway 13453259f8beSChris Mason * @sb: the superblock 13463259f8beSChris Mason * @nr: the number of pages to write 134710ee27a0SMiao Xie * @reason: the reason of writeback 13483259f8beSChris Mason * 134910ee27a0SMiao Xie * Invoke writeback_inodes_sb_nr if no writeback is currently underway. 13503259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 13513259f8beSChris Mason */ 135210ee27a0SMiao Xie int try_to_writeback_inodes_sb_nr(struct super_block *sb, 13530e175a18SCurt Wohlgemuth unsigned long nr, 13540e175a18SCurt Wohlgemuth enum wb_reason reason) 13553259f8beSChris Mason { 135610ee27a0SMiao Xie if (writeback_in_progress(sb->s_bdi)) 135710ee27a0SMiao Xie return 1; 135810ee27a0SMiao Xie 135910ee27a0SMiao Xie if (!down_read_trylock(&sb->s_umount)) 136010ee27a0SMiao Xie return 0; 136110ee27a0SMiao Xie 13620e175a18SCurt Wohlgemuth writeback_inodes_sb_nr(sb, nr, reason); 13633259f8beSChris Mason up_read(&sb->s_umount); 13643259f8beSChris Mason return 1; 13653259f8beSChris Mason } 136610ee27a0SMiao Xie EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); 136710ee27a0SMiao Xie 136810ee27a0SMiao Xie /** 136910ee27a0SMiao Xie * try_to_writeback_inodes_sb - try to start writeback if none underway 137010ee27a0SMiao Xie * @sb: the superblock 137110ee27a0SMiao Xie * @reason: reason why some writeback work was initiated 137210ee27a0SMiao Xie * 137310ee27a0SMiao Xie * Implement by try_to_writeback_inodes_sb_nr() 137410ee27a0SMiao Xie * Returns 1 if writeback was started, 0 if not. 137510ee27a0SMiao Xie */ 137610ee27a0SMiao Xie int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 137710ee27a0SMiao Xie { 137810ee27a0SMiao Xie return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 137910ee27a0SMiao Xie } 138010ee27a0SMiao Xie EXPORT_SYMBOL(try_to_writeback_inodes_sb); 13813259f8beSChris Mason 13823259f8beSChris Mason /** 1383d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1384d8a8559cSJens Axboe * @sb: the superblock 1385d8a8559cSJens Axboe * 1386d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 13870dc83bd3SJan Kara * super_block. 1388d8a8559cSJens Axboe */ 13890dc83bd3SJan Kara void sync_inodes_sb(struct super_block *sb) 1390d8a8559cSJens Axboe { 139183ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 139283ba7b07SChristoph Hellwig struct wb_writeback_work work = { 13933c4d7165SChristoph Hellwig .sb = sb, 13943c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 13953c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 13963c4d7165SChristoph Hellwig .range_cyclic = 0, 139783ba7b07SChristoph Hellwig .done = &done, 13980e175a18SCurt Wohlgemuth .reason = WB_REASON_SYNC, 13997747bd4bSDave Chinner .for_sync = 1, 14003c4d7165SChristoph Hellwig }; 14013c4d7165SChristoph Hellwig 14026eedc701SJan Kara /* Nothing to do? */ 14036eedc701SJan Kara if (sb->s_bdi == &noop_backing_dev_info) 14046eedc701SJan Kara return; 1405cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1406cf37e972SChristoph Hellwig 140783ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 140883ba7b07SChristoph Hellwig wait_for_completion(&done); 140983ba7b07SChristoph Hellwig 1410b6e51316SJens Axboe wait_sb_inodes(sb); 1411d8a8559cSJens Axboe } 1412d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 14131da177e4SLinus Torvalds 14141da177e4SLinus Torvalds /** 14151da177e4SLinus Torvalds * write_inode_now - write an inode to disk 14161da177e4SLinus Torvalds * @inode: inode to write to disk 14171da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 14181da177e4SLinus Torvalds * 14197f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 14207f04c26dSAndrea Arcangeli * primarily needed by knfsd. 14217f04c26dSAndrea Arcangeli * 14227f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 14231da177e4SLinus Torvalds */ 14241da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 14251da177e4SLinus Torvalds { 1426f758eeabSChristoph Hellwig struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 14271da177e4SLinus Torvalds struct writeback_control wbc = { 14281da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 142918914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1430111ebb6eSOGAWA Hirofumi .range_start = 0, 1431111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 14321da177e4SLinus Torvalds }; 14331da177e4SLinus Torvalds 14341da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 143549364ce2SAndrew Morton wbc.nr_to_write = 0; 14361da177e4SLinus Torvalds 14371da177e4SLinus Torvalds might_sleep(); 14384f8ad655SJan Kara return writeback_single_inode(inode, wb, &wbc); 14391da177e4SLinus Torvalds } 14401da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 14411da177e4SLinus Torvalds 14421da177e4SLinus Torvalds /** 14431da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 14441da177e4SLinus Torvalds * @inode: the inode to sync 14451da177e4SLinus Torvalds * @wbc: controls the writeback mode 14461da177e4SLinus Torvalds * 14471da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 14481da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 14491da177e4SLinus Torvalds * update inode->i_state. 14501da177e4SLinus Torvalds * 14511da177e4SLinus Torvalds * The caller must have a ref on the inode. 14521da177e4SLinus Torvalds */ 14531da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 14541da177e4SLinus Torvalds { 14554f8ad655SJan Kara return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc); 14561da177e4SLinus Torvalds } 14571da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1458c3765016SChristoph Hellwig 1459c3765016SChristoph Hellwig /** 1460c691b9d9SAndrew Morton * sync_inode_metadata - write an inode to disk 1461c3765016SChristoph Hellwig * @inode: the inode to sync 1462c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1463c3765016SChristoph Hellwig * 1464c691b9d9SAndrew Morton * Write an inode to disk and adjust its dirty state after completion. 1465c3765016SChristoph Hellwig * 1466c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1467c3765016SChristoph Hellwig */ 1468c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1469c3765016SChristoph Hellwig { 1470c3765016SChristoph Hellwig struct writeback_control wbc = { 1471c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1472c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1473c3765016SChristoph Hellwig }; 1474c3765016SChristoph Hellwig 1475c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1476c3765016SChristoph Hellwig } 1477c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1478