11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17630d9c47SPaul Gortmaker #include <linux/export.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 23bc31b86aSWu Fengguang #include <linux/pagemap.h> 2403ba3782SJens Axboe #include <linux/kthread.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 28455b2864SDave Chinner #include <linux/tracepoint.h> 29719ea2fbSAl Viro #include <linux/device.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33bc31b86aSWu Fengguang * 4MB minimal write chunk size 34bc31b86aSWu Fengguang */ 35bc31b86aSWu Fengguang #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) 36bc31b86aSWu Fengguang 37bc31b86aSWu Fengguang /* 38c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 39c4a77a6cSJens Axboe */ 4083ba7b07SChristoph Hellwig struct wb_writeback_work { 41c4a77a6cSJens Axboe long nr_pages; 42c4a77a6cSJens Axboe struct super_block *sb; 430dc83bd3SJan Kara unsigned long *older_than_this; 44c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 456e6938b6SWu Fengguang unsigned int tagged_writepages:1; 4652957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4752957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4852957fe1SH Hartley Sweeten unsigned int for_background:1; 497747bd4bSDave Chinner unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 500e175a18SCurt Wohlgemuth enum wb_reason reason; /* why was writeback initiated? */ 51c4a77a6cSJens Axboe 528010c3b6SJens Axboe struct list_head list; /* pending work list */ 5383ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 5403ba3782SJens Axboe }; 5503ba3782SJens Axboe 56a2f48706STheodore Ts'o /* 57a2f48706STheodore Ts'o * If an inode is constantly having its pages dirtied, but then the 58a2f48706STheodore Ts'o * updates stop dirtytime_expire_interval seconds in the past, it's 59a2f48706STheodore Ts'o * possible for the worst case time between when an inode has its 60a2f48706STheodore Ts'o * timestamps updated and when they finally get written out to be two 61a2f48706STheodore Ts'o * dirtytime_expire_intervals. We set the default to 12 hours (in 62a2f48706STheodore Ts'o * seconds), which means most of the time inodes will have their 63a2f48706STheodore Ts'o * timestamps written to disk after 12 hours, but in the worst case a 64a2f48706STheodore Ts'o * few inodes might not their timestamps updated for 24 hours. 65a2f48706STheodore Ts'o */ 66a2f48706STheodore Ts'o unsigned int dirtytime_expire_interval = 12 * 60 * 60; 67a2f48706STheodore Ts'o 68f11b00f3SAdrian Bunk /** 69f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 70f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 71f11b00f3SAdrian Bunk * 7203ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 7303ba3782SJens Axboe * backing device. 74f11b00f3SAdrian Bunk */ 75f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 76f11b00f3SAdrian Bunk { 774452226eSTejun Heo return test_bit(WB_writeback_running, &bdi->wb.state); 78f11b00f3SAdrian Bunk } 7900d4e736STheodore Ts'o EXPORT_SYMBOL(writeback_in_progress); 80f11b00f3SAdrian Bunk 81de1414a6SChristoph Hellwig struct backing_dev_info *inode_to_bdi(struct inode *inode) 82692ebd17SJan Kara { 83b520252aSJens Axboe struct super_block *sb; 84b520252aSJens Axboe 85b520252aSJens Axboe if (!inode) 86b520252aSJens Axboe return &noop_backing_dev_info; 87b520252aSJens Axboe 88b520252aSJens Axboe sb = inode->i_sb; 89495a276eSChristoph Hellwig #ifdef CONFIG_BLOCK 90a8855990SJan Kara if (sb_is_blkdev_sb(sb)) 91495a276eSChristoph Hellwig return blk_get_backing_dev_info(I_BDEV(inode)); 92495a276eSChristoph Hellwig #endif 93692ebd17SJan Kara return sb->s_bdi; 94692ebd17SJan Kara } 95de1414a6SChristoph Hellwig EXPORT_SYMBOL_GPL(inode_to_bdi); 96692ebd17SJan Kara 977ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 987ccf19a8SNick Piggin { 997ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 1007ccf19a8SNick Piggin } 1017ccf19a8SNick Piggin 10215eb77a0SWu Fengguang /* 10315eb77a0SWu Fengguang * Include the creation of the trace points after defining the 10415eb77a0SWu Fengguang * wb_writeback_work structure and inline functions so that the definition 10515eb77a0SWu Fengguang * remains local to this file. 10615eb77a0SWu Fengguang */ 10715eb77a0SWu Fengguang #define CREATE_TRACE_POINTS 10815eb77a0SWu Fengguang #include <trace/events/writeback.h> 10915eb77a0SWu Fengguang 110774016b2SSteven Whitehouse EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage); 111774016b2SSteven Whitehouse 1125acda9d1SJan Kara static void bdi_wakeup_thread(struct backing_dev_info *bdi) 1135acda9d1SJan Kara { 1145acda9d1SJan Kara spin_lock_bh(&bdi->wb_lock); 1154452226eSTejun Heo if (test_bit(WB_registered, &bdi->wb.state)) 1165acda9d1SJan Kara mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 1175acda9d1SJan Kara spin_unlock_bh(&bdi->wb_lock); 1185acda9d1SJan Kara } 1195acda9d1SJan Kara 1206585027aSJan Kara static void bdi_queue_work(struct backing_dev_info *bdi, 1216585027aSJan Kara struct wb_writeback_work *work) 1226585027aSJan Kara { 1236585027aSJan Kara trace_writeback_queue(bdi, work); 1246585027aSJan Kara 1256585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1264452226eSTejun Heo if (!test_bit(WB_registered, &bdi->wb.state)) { 1275acda9d1SJan Kara if (work->done) 1285acda9d1SJan Kara complete(work->done); 1295acda9d1SJan Kara goto out_unlock; 1305acda9d1SJan Kara } 1316585027aSJan Kara list_add_tail(&work->list, &bdi->work_list); 132839a8e86STejun Heo mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 1335acda9d1SJan Kara out_unlock: 1345acda9d1SJan Kara spin_unlock_bh(&bdi->wb_lock); 13503ba3782SJens Axboe } 1361da177e4SLinus Torvalds 13783ba7b07SChristoph Hellwig static void 13883ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1390e175a18SCurt Wohlgemuth bool range_cyclic, enum wb_reason reason) 1401da177e4SLinus Torvalds { 14183ba7b07SChristoph Hellwig struct wb_writeback_work *work; 14203ba3782SJens Axboe 143bcddc3f0SJens Axboe /* 144bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 145bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 146bcddc3f0SJens Axboe */ 14783ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 14883ba7b07SChristoph Hellwig if (!work) { 149455b2864SDave Chinner trace_writeback_nowork(bdi); 1505acda9d1SJan Kara bdi_wakeup_thread(bdi); 15183ba7b07SChristoph Hellwig return; 15283ba7b07SChristoph Hellwig } 15383ba7b07SChristoph Hellwig 15483ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 15583ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 15683ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 1570e175a18SCurt Wohlgemuth work->reason = reason; 15883ba7b07SChristoph Hellwig 159f11fcae8SJens Axboe bdi_queue_work(bdi, work); 16003ba3782SJens Axboe } 161b6e51316SJens Axboe 162b6e51316SJens Axboe /** 163b6e51316SJens Axboe * bdi_start_writeback - start writeback 164b6e51316SJens Axboe * @bdi: the backing device to write from 165b6e51316SJens Axboe * @nr_pages: the number of pages to write 166786228abSMarcos Paulo de Souza * @reason: reason why some writeback work was initiated 167b6e51316SJens Axboe * 168b6e51316SJens Axboe * Description: 169b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 17025985edcSLucas De Marchi * started when this function returns, we make no guarantees on 1710e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 172b6e51316SJens Axboe * 173b6e51316SJens Axboe */ 1740e175a18SCurt Wohlgemuth void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1750e175a18SCurt Wohlgemuth enum wb_reason reason) 176b6e51316SJens Axboe { 1770e175a18SCurt Wohlgemuth __bdi_start_writeback(bdi, nr_pages, true, reason); 178d3ddec76SWu Fengguang } 179d3ddec76SWu Fengguang 180c5444198SChristoph Hellwig /** 181c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 182c5444198SChristoph Hellwig * @bdi: the backing device to write from 183c5444198SChristoph Hellwig * 184c5444198SChristoph Hellwig * Description: 1856585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 1866585027aSJan Kara * this function returns, it is only guaranteed that for given BDI 1876585027aSJan Kara * some IO is happening if we are over background dirty threshold. 1886585027aSJan Kara * Caller need not hold sb s_umount semaphore. 189c5444198SChristoph Hellwig */ 190c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 191c5444198SChristoph Hellwig { 1926585027aSJan Kara /* 1936585027aSJan Kara * We just wake up the flusher thread. It will perform background 1946585027aSJan Kara * writeback as soon as there is no other work to do. 1956585027aSJan Kara */ 19671927e84SWu Fengguang trace_writeback_wake_background(bdi); 1975acda9d1SJan Kara bdi_wakeup_thread(bdi); 1981da177e4SLinus Torvalds } 1991da177e4SLinus Torvalds 2001da177e4SLinus Torvalds /* 201a66979abSDave Chinner * Remove the inode from the writeback list it is on. 202a66979abSDave Chinner */ 203a66979abSDave Chinner void inode_wb_list_del(struct inode *inode) 204a66979abSDave Chinner { 205f758eeabSChristoph Hellwig struct backing_dev_info *bdi = inode_to_bdi(inode); 206a66979abSDave Chinner 207f758eeabSChristoph Hellwig spin_lock(&bdi->wb.list_lock); 208f758eeabSChristoph Hellwig list_del_init(&inode->i_wb_list); 209f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 210f758eeabSChristoph Hellwig } 211a66979abSDave Chinner 212a66979abSDave Chinner /* 2136610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 2146610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 2156610a0bcSAndrew Morton * 2166610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 21766f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 2186610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 2196610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 2206610a0bcSAndrew Morton */ 221f758eeabSChristoph Hellwig static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 2226610a0bcSAndrew Morton { 223f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 22403ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 22566f3b8e2SJens Axboe struct inode *tail; 2266610a0bcSAndrew Morton 2277ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 22866f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2296610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2306610a0bcSAndrew Morton } 2317ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_dirty); 2326610a0bcSAndrew Morton } 2336610a0bcSAndrew Morton 2346610a0bcSAndrew Morton /* 23566f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 236c986d1e2SAndrew Morton */ 237f758eeabSChristoph Hellwig static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 238c986d1e2SAndrew Morton { 239f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 2407ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_more_io); 241c986d1e2SAndrew Morton } 242c986d1e2SAndrew Morton 2431c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2441c0eeaf5SJoern Engel { 245365b94aeSJan Kara inode->i_state &= ~I_SYNC; 2464eff96ddSJan Kara /* If inode is clean an unused, put it into LRU now... */ 2474eff96ddSJan Kara inode_add_lru(inode); 248365b94aeSJan Kara /* Waiters must see I_SYNC cleared before being woken up */ 2491c0eeaf5SJoern Engel smp_mb(); 2501c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2511c0eeaf5SJoern Engel } 2521c0eeaf5SJoern Engel 253d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 254d2caa3c5SJeff Layton { 255d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 256d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 257d2caa3c5SJeff Layton /* 258d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 259d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 260d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2615b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 262d2caa3c5SJeff Layton */ 263d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 264d2caa3c5SJeff Layton #endif 265d2caa3c5SJeff Layton return ret; 266d2caa3c5SJeff Layton } 267d2caa3c5SJeff Layton 2680ae45f63STheodore Ts'o #define EXPIRE_DIRTY_ATIME 0x0001 2690ae45f63STheodore Ts'o 270c986d1e2SAndrew Morton /* 2710e2f2b23SWang Sheng-Hui * Move expired (dirtied before work->older_than_this) dirty inodes from 272697e6fedSJan Kara * @delaying_queue to @dispatch_queue. 2732c136579SFengguang Wu */ 274e84d0a4fSWu Fengguang static int move_expired_inodes(struct list_head *delaying_queue, 2752c136579SFengguang Wu struct list_head *dispatch_queue, 2760ae45f63STheodore Ts'o int flags, 277ad4e38ddSCurt Wohlgemuth struct wb_writeback_work *work) 2782c136579SFengguang Wu { 2790ae45f63STheodore Ts'o unsigned long *older_than_this = NULL; 2800ae45f63STheodore Ts'o unsigned long expire_time; 2815c03449dSShaohua Li LIST_HEAD(tmp); 2825c03449dSShaohua Li struct list_head *pos, *node; 283cf137307SJens Axboe struct super_block *sb = NULL; 2845c03449dSShaohua Li struct inode *inode; 285cf137307SJens Axboe int do_sb_sort = 0; 286e84d0a4fSWu Fengguang int moved = 0; 2875c03449dSShaohua Li 2880ae45f63STheodore Ts'o if ((flags & EXPIRE_DIRTY_ATIME) == 0) 2890ae45f63STheodore Ts'o older_than_this = work->older_than_this; 290a2f48706STheodore Ts'o else if (!work->for_sync) { 291a2f48706STheodore Ts'o expire_time = jiffies - (dirtytime_expire_interval * HZ); 2920ae45f63STheodore Ts'o older_than_this = &expire_time; 2930ae45f63STheodore Ts'o } 2942c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2957ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 2960ae45f63STheodore Ts'o if (older_than_this && 2970ae45f63STheodore Ts'o inode_dirtied_after(inode, *older_than_this)) 2982c136579SFengguang Wu break; 299a8855990SJan Kara list_move(&inode->i_wb_list, &tmp); 300a8855990SJan Kara moved++; 3010ae45f63STheodore Ts'o if (flags & EXPIRE_DIRTY_ATIME) 3020ae45f63STheodore Ts'o set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); 303a8855990SJan Kara if (sb_is_blkdev_sb(inode->i_sb)) 304a8855990SJan Kara continue; 305cf137307SJens Axboe if (sb && sb != inode->i_sb) 306cf137307SJens Axboe do_sb_sort = 1; 307cf137307SJens Axboe sb = inode->i_sb; 3085c03449dSShaohua Li } 3095c03449dSShaohua Li 310cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 311cf137307SJens Axboe if (!do_sb_sort) { 312cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 313e84d0a4fSWu Fengguang goto out; 314cf137307SJens Axboe } 315cf137307SJens Axboe 3165c03449dSShaohua Li /* Move inodes from one superblock together */ 3175c03449dSShaohua Li while (!list_empty(&tmp)) { 3187ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 3195c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 3207ccf19a8SNick Piggin inode = wb_inode(pos); 3215c03449dSShaohua Li if (inode->i_sb == sb) 3227ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 3232c136579SFengguang Wu } 3242c136579SFengguang Wu } 325e84d0a4fSWu Fengguang out: 326e84d0a4fSWu Fengguang return moved; 3275c03449dSShaohua Li } 3282c136579SFengguang Wu 3292c136579SFengguang Wu /* 3302c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 3314ea879b9SWu Fengguang * Before 3324ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 3334ea879b9SWu Fengguang * =============> gf edc BA 3344ea879b9SWu Fengguang * After 3354ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 3364ea879b9SWu Fengguang * =============> g fBAedc 3374ea879b9SWu Fengguang * | 3384ea879b9SWu Fengguang * +--> dequeue for IO 3392c136579SFengguang Wu */ 340ad4e38ddSCurt Wohlgemuth static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 3412c136579SFengguang Wu { 342e84d0a4fSWu Fengguang int moved; 3430ae45f63STheodore Ts'o 344f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 3454ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 3460ae45f63STheodore Ts'o moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); 3470ae45f63STheodore Ts'o moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, 3480ae45f63STheodore Ts'o EXPIRE_DIRTY_ATIME, work); 349ad4e38ddSCurt Wohlgemuth trace_writeback_queue_io(wb, work, moved); 35066f3b8e2SJens Axboe } 35166f3b8e2SJens Axboe 352a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 35366f3b8e2SJens Axboe { 3549fb0a7daSTejun Heo int ret; 3559fb0a7daSTejun Heo 3569fb0a7daSTejun Heo if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { 3579fb0a7daSTejun Heo trace_writeback_write_inode_start(inode, wbc); 3589fb0a7daSTejun Heo ret = inode->i_sb->s_op->write_inode(inode, wbc); 3599fb0a7daSTejun Heo trace_writeback_write_inode(inode, wbc); 3609fb0a7daSTejun Heo return ret; 3619fb0a7daSTejun Heo } 36203ba3782SJens Axboe return 0; 36366f3b8e2SJens Axboe } 36408d8e974SFengguang Wu 3652c136579SFengguang Wu /* 366169ebd90SJan Kara * Wait for writeback on an inode to complete. Called with i_lock held. 367169ebd90SJan Kara * Caller must make sure inode cannot go away when we drop i_lock. 36801c03194SChristoph Hellwig */ 369169ebd90SJan Kara static void __inode_wait_for_writeback(struct inode *inode) 370169ebd90SJan Kara __releases(inode->i_lock) 371169ebd90SJan Kara __acquires(inode->i_lock) 37201c03194SChristoph Hellwig { 37301c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 37401c03194SChristoph Hellwig wait_queue_head_t *wqh; 37501c03194SChristoph Hellwig 37601c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 37758a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 378250df6edSDave Chinner spin_unlock(&inode->i_lock); 37974316201SNeilBrown __wait_on_bit(wqh, &wq, bit_wait, 38074316201SNeilBrown TASK_UNINTERRUPTIBLE); 381250df6edSDave Chinner spin_lock(&inode->i_lock); 38258a9d3d8SRichard Kennedy } 38301c03194SChristoph Hellwig } 38401c03194SChristoph Hellwig 38501c03194SChristoph Hellwig /* 386169ebd90SJan Kara * Wait for writeback on an inode to complete. Caller must have inode pinned. 387169ebd90SJan Kara */ 388169ebd90SJan Kara void inode_wait_for_writeback(struct inode *inode) 389169ebd90SJan Kara { 390169ebd90SJan Kara spin_lock(&inode->i_lock); 391169ebd90SJan Kara __inode_wait_for_writeback(inode); 392169ebd90SJan Kara spin_unlock(&inode->i_lock); 393169ebd90SJan Kara } 394169ebd90SJan Kara 395169ebd90SJan Kara /* 396169ebd90SJan Kara * Sleep until I_SYNC is cleared. This function must be called with i_lock 397169ebd90SJan Kara * held and drops it. It is aimed for callers not holding any inode reference 398169ebd90SJan Kara * so once i_lock is dropped, inode can go away. 399169ebd90SJan Kara */ 400169ebd90SJan Kara static void inode_sleep_on_writeback(struct inode *inode) 401169ebd90SJan Kara __releases(inode->i_lock) 402169ebd90SJan Kara { 403169ebd90SJan Kara DEFINE_WAIT(wait); 404169ebd90SJan Kara wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 405169ebd90SJan Kara int sleep; 406169ebd90SJan Kara 407169ebd90SJan Kara prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 408169ebd90SJan Kara sleep = inode->i_state & I_SYNC; 409169ebd90SJan Kara spin_unlock(&inode->i_lock); 410169ebd90SJan Kara if (sleep) 411169ebd90SJan Kara schedule(); 412169ebd90SJan Kara finish_wait(wqh, &wait); 413169ebd90SJan Kara } 414169ebd90SJan Kara 415169ebd90SJan Kara /* 416ccb26b5aSJan Kara * Find proper writeback list for the inode depending on its current state and 417ccb26b5aSJan Kara * possibly also change of its state while we were doing writeback. Here we 418ccb26b5aSJan Kara * handle things such as livelock prevention or fairness of writeback among 419ccb26b5aSJan Kara * inodes. This function can be called only by flusher thread - noone else 420ccb26b5aSJan Kara * processes all inodes in writeback lists and requeueing inodes behind flusher 421ccb26b5aSJan Kara * thread's back can have unexpected consequences. 422ccb26b5aSJan Kara */ 423ccb26b5aSJan Kara static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, 424ccb26b5aSJan Kara struct writeback_control *wbc) 425ccb26b5aSJan Kara { 426ccb26b5aSJan Kara if (inode->i_state & I_FREEING) 427ccb26b5aSJan Kara return; 428ccb26b5aSJan Kara 429ccb26b5aSJan Kara /* 430ccb26b5aSJan Kara * Sync livelock prevention. Each inode is tagged and synced in one 431ccb26b5aSJan Kara * shot. If still dirty, it will be redirty_tail()'ed below. Update 432ccb26b5aSJan Kara * the dirty time to prevent enqueue and sync it again. 433ccb26b5aSJan Kara */ 434ccb26b5aSJan Kara if ((inode->i_state & I_DIRTY) && 435ccb26b5aSJan Kara (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 436ccb26b5aSJan Kara inode->dirtied_when = jiffies; 437ccb26b5aSJan Kara 4384f8ad655SJan Kara if (wbc->pages_skipped) { 4394f8ad655SJan Kara /* 4404f8ad655SJan Kara * writeback is not making progress due to locked 4414f8ad655SJan Kara * buffers. Skip this inode for now. 4424f8ad655SJan Kara */ 4434f8ad655SJan Kara redirty_tail(inode, wb); 4444f8ad655SJan Kara return; 4454f8ad655SJan Kara } 4464f8ad655SJan Kara 447ccb26b5aSJan Kara if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 448ccb26b5aSJan Kara /* 449ccb26b5aSJan Kara * We didn't write back all the pages. nfs_writepages() 450ccb26b5aSJan Kara * sometimes bales out without doing anything. 451ccb26b5aSJan Kara */ 452ccb26b5aSJan Kara if (wbc->nr_to_write <= 0) { 453ccb26b5aSJan Kara /* Slice used up. Queue for next turn. */ 454ccb26b5aSJan Kara requeue_io(inode, wb); 455ccb26b5aSJan Kara } else { 456ccb26b5aSJan Kara /* 457ccb26b5aSJan Kara * Writeback blocked by something other than 458ccb26b5aSJan Kara * congestion. Delay the inode for some time to 459ccb26b5aSJan Kara * avoid spinning on the CPU (100% iowait) 460ccb26b5aSJan Kara * retrying writeback of the dirty page/inode 461ccb26b5aSJan Kara * that cannot be performed immediately. 462ccb26b5aSJan Kara */ 463ccb26b5aSJan Kara redirty_tail(inode, wb); 464ccb26b5aSJan Kara } 465ccb26b5aSJan Kara } else if (inode->i_state & I_DIRTY) { 466ccb26b5aSJan Kara /* 467ccb26b5aSJan Kara * Filesystems can dirty the inode during writeback operations, 468ccb26b5aSJan Kara * such as delayed allocation during submission or metadata 469ccb26b5aSJan Kara * updates after data IO completion. 470ccb26b5aSJan Kara */ 471ccb26b5aSJan Kara redirty_tail(inode, wb); 4720ae45f63STheodore Ts'o } else if (inode->i_state & I_DIRTY_TIME) { 473a2f48706STheodore Ts'o inode->dirtied_when = jiffies; 4740ae45f63STheodore Ts'o list_move(&inode->i_wb_list, &wb->b_dirty_time); 475ccb26b5aSJan Kara } else { 476ccb26b5aSJan Kara /* The inode is clean. Remove from writeback lists. */ 477ccb26b5aSJan Kara list_del_init(&inode->i_wb_list); 478ccb26b5aSJan Kara } 479ccb26b5aSJan Kara } 480ccb26b5aSJan Kara 481ccb26b5aSJan Kara /* 4824f8ad655SJan Kara * Write out an inode and its dirty pages. Do not update the writeback list 4834f8ad655SJan Kara * linkage. That is left to the caller. The caller is also responsible for 4844f8ad655SJan Kara * setting I_SYNC flag and calling inode_sync_complete() to clear it. 4851da177e4SLinus Torvalds */ 4861da177e4SLinus Torvalds static int 487cd8ed2a4SYan Hong __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 4881da177e4SLinus Torvalds { 4891da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 490251d6a47SWu Fengguang long nr_to_write = wbc->nr_to_write; 49101c03194SChristoph Hellwig unsigned dirty; 4921da177e4SLinus Torvalds int ret; 4931da177e4SLinus Torvalds 4944f8ad655SJan Kara WARN_ON(!(inode->i_state & I_SYNC)); 4951da177e4SLinus Torvalds 4969fb0a7daSTejun Heo trace_writeback_single_inode_start(inode, wbc, nr_to_write); 4979fb0a7daSTejun Heo 4981da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 4991da177e4SLinus Torvalds 50026821ed4SChristoph Hellwig /* 50126821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 50226821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 5037747bd4bSDave Chinner * I/O completion. We don't do it for sync(2) writeback because it has a 5047747bd4bSDave Chinner * separate, external IO completion path and ->sync_fs for guaranteeing 5057747bd4bSDave Chinner * inode metadata is written back correctly. 50626821ed4SChristoph Hellwig */ 5077747bd4bSDave Chinner if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { 50826821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 5091da177e4SLinus Torvalds if (ret == 0) 5101da177e4SLinus Torvalds ret = err; 5111da177e4SLinus Torvalds } 5121da177e4SLinus Torvalds 5135547e8aaSDmitry Monakhov /* 5145547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 5155547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 5165547e8aaSDmitry Monakhov * write_inode() 5175547e8aaSDmitry Monakhov */ 518250df6edSDave Chinner spin_lock(&inode->i_lock); 5199c6ac78eSTejun Heo 5205547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 521a2f48706STheodore Ts'o if (inode->i_state & I_DIRTY_TIME) { 522a2f48706STheodore Ts'o if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || 523a2f48706STheodore Ts'o unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || 524a2f48706STheodore Ts'o unlikely(time_after(jiffies, 525a2f48706STheodore Ts'o (inode->dirtied_time_when + 526a2f48706STheodore Ts'o dirtytime_expire_interval * HZ)))) { 5270ae45f63STheodore Ts'o dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; 5280ae45f63STheodore Ts'o trace_writeback_lazytime(inode); 5290ae45f63STheodore Ts'o } 530a2f48706STheodore Ts'o } else 531a2f48706STheodore Ts'o inode->i_state &= ~I_DIRTY_TIME_EXPIRED; 5320ae45f63STheodore Ts'o inode->i_state &= ~dirty; 5339c6ac78eSTejun Heo 5349c6ac78eSTejun Heo /* 5359c6ac78eSTejun Heo * Paired with smp_mb() in __mark_inode_dirty(). This allows 5369c6ac78eSTejun Heo * __mark_inode_dirty() to test i_state without grabbing i_lock - 5379c6ac78eSTejun Heo * either they see the I_DIRTY bits cleared or we see the dirtied 5389c6ac78eSTejun Heo * inode. 5399c6ac78eSTejun Heo * 5409c6ac78eSTejun Heo * I_DIRTY_PAGES is always cleared together above even if @mapping 5419c6ac78eSTejun Heo * still has dirty pages. The flag is reinstated after smp_mb() if 5429c6ac78eSTejun Heo * necessary. This guarantees that either __mark_inode_dirty() 5439c6ac78eSTejun Heo * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. 5449c6ac78eSTejun Heo */ 5459c6ac78eSTejun Heo smp_mb(); 5469c6ac78eSTejun Heo 5479c6ac78eSTejun Heo if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 5489c6ac78eSTejun Heo inode->i_state |= I_DIRTY_PAGES; 5499c6ac78eSTejun Heo 550250df6edSDave Chinner spin_unlock(&inode->i_lock); 5519c6ac78eSTejun Heo 5520ae45f63STheodore Ts'o if (dirty & I_DIRTY_TIME) 5530ae45f63STheodore Ts'o mark_inode_dirty_sync(inode); 55426821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 5550ae45f63STheodore Ts'o if (dirty & ~I_DIRTY_PAGES) { 556a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 5571da177e4SLinus Torvalds if (ret == 0) 5581da177e4SLinus Torvalds ret = err; 5591da177e4SLinus Torvalds } 5604f8ad655SJan Kara trace_writeback_single_inode(inode, wbc, nr_to_write); 5614f8ad655SJan Kara return ret; 5624f8ad655SJan Kara } 5634f8ad655SJan Kara 5644f8ad655SJan Kara /* 5654f8ad655SJan Kara * Write out an inode's dirty pages. Either the caller has an active reference 5664f8ad655SJan Kara * on the inode or the inode has I_WILL_FREE set. 5674f8ad655SJan Kara * 5684f8ad655SJan Kara * This function is designed to be called for writing back one inode which 5694f8ad655SJan Kara * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() 5704f8ad655SJan Kara * and does more profound writeback list handling in writeback_sb_inodes(). 5714f8ad655SJan Kara */ 5724f8ad655SJan Kara static int 5734f8ad655SJan Kara writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, 5744f8ad655SJan Kara struct writeback_control *wbc) 5754f8ad655SJan Kara { 5764f8ad655SJan Kara int ret = 0; 5774f8ad655SJan Kara 5784f8ad655SJan Kara spin_lock(&inode->i_lock); 5794f8ad655SJan Kara if (!atomic_read(&inode->i_count)) 5804f8ad655SJan Kara WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 5814f8ad655SJan Kara else 5824f8ad655SJan Kara WARN_ON(inode->i_state & I_WILL_FREE); 5834f8ad655SJan Kara 5844f8ad655SJan Kara if (inode->i_state & I_SYNC) { 5854f8ad655SJan Kara if (wbc->sync_mode != WB_SYNC_ALL) 5864f8ad655SJan Kara goto out; 5874f8ad655SJan Kara /* 588169ebd90SJan Kara * It's a data-integrity sync. We must wait. Since callers hold 589169ebd90SJan Kara * inode reference or inode has I_WILL_FREE set, it cannot go 590169ebd90SJan Kara * away under us. 5914f8ad655SJan Kara */ 592169ebd90SJan Kara __inode_wait_for_writeback(inode); 5934f8ad655SJan Kara } 5944f8ad655SJan Kara WARN_ON(inode->i_state & I_SYNC); 5954f8ad655SJan Kara /* 596f9b0e058SJan Kara * Skip inode if it is clean and we have no outstanding writeback in 597f9b0e058SJan Kara * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this 598f9b0e058SJan Kara * function since flusher thread may be doing for example sync in 599f9b0e058SJan Kara * parallel and if we move the inode, it could get skipped. So here we 600f9b0e058SJan Kara * make sure inode is on some writeback list and leave it there unless 601f9b0e058SJan Kara * we have completely cleaned the inode. 6024f8ad655SJan Kara */ 6030ae45f63STheodore Ts'o if (!(inode->i_state & I_DIRTY_ALL) && 604f9b0e058SJan Kara (wbc->sync_mode != WB_SYNC_ALL || 605f9b0e058SJan Kara !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 6064f8ad655SJan Kara goto out; 6074f8ad655SJan Kara inode->i_state |= I_SYNC; 6084f8ad655SJan Kara spin_unlock(&inode->i_lock); 6094f8ad655SJan Kara 610cd8ed2a4SYan Hong ret = __writeback_single_inode(inode, wbc); 6111da177e4SLinus Torvalds 612f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 613250df6edSDave Chinner spin_lock(&inode->i_lock); 6144f8ad655SJan Kara /* 6154f8ad655SJan Kara * If inode is clean, remove it from writeback lists. Otherwise don't 6164f8ad655SJan Kara * touch it. See comment above for explanation. 6174f8ad655SJan Kara */ 6180ae45f63STheodore Ts'o if (!(inode->i_state & I_DIRTY_ALL)) 6194f8ad655SJan Kara list_del_init(&inode->i_wb_list); 6204f8ad655SJan Kara spin_unlock(&wb->list_lock); 6211c0eeaf5SJoern Engel inode_sync_complete(inode); 6224f8ad655SJan Kara out: 6234f8ad655SJan Kara spin_unlock(&inode->i_lock); 6241da177e4SLinus Torvalds return ret; 6251da177e4SLinus Torvalds } 6261da177e4SLinus Torvalds 6271a12d8bdSWu Fengguang static long writeback_chunk_size(struct backing_dev_info *bdi, 6281a12d8bdSWu Fengguang struct wb_writeback_work *work) 629d46db3d5SWu Fengguang { 630d46db3d5SWu Fengguang long pages; 631d46db3d5SWu Fengguang 632d46db3d5SWu Fengguang /* 633d46db3d5SWu Fengguang * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 634d46db3d5SWu Fengguang * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 635d46db3d5SWu Fengguang * here avoids calling into writeback_inodes_wb() more than once. 636d46db3d5SWu Fengguang * 637d46db3d5SWu Fengguang * The intended call sequence for WB_SYNC_ALL writeback is: 638d46db3d5SWu Fengguang * 639d46db3d5SWu Fengguang * wb_writeback() 640d46db3d5SWu Fengguang * writeback_sb_inodes() <== called only once 641d46db3d5SWu Fengguang * write_cache_pages() <== called once for each inode 642d46db3d5SWu Fengguang * (quickly) tag currently dirty pages 643d46db3d5SWu Fengguang * (maybe slowly) sync all tagged pages 644d46db3d5SWu Fengguang */ 645d46db3d5SWu Fengguang if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 646d46db3d5SWu Fengguang pages = LONG_MAX; 6471a12d8bdSWu Fengguang else { 6481a12d8bdSWu Fengguang pages = min(bdi->avg_write_bandwidth / 2, 6491a12d8bdSWu Fengguang global_dirty_limit / DIRTY_SCOPE); 6501a12d8bdSWu Fengguang pages = min(pages, work->nr_pages); 6511a12d8bdSWu Fengguang pages = round_down(pages + MIN_WRITEBACK_PAGES, 6521a12d8bdSWu Fengguang MIN_WRITEBACK_PAGES); 6531a12d8bdSWu Fengguang } 654d46db3d5SWu Fengguang 655d46db3d5SWu Fengguang return pages; 656d46db3d5SWu Fengguang } 657d46db3d5SWu Fengguang 65803ba3782SJens Axboe /* 659f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 660edadfb10SChristoph Hellwig * 661d46db3d5SWu Fengguang * Return the number of pages and/or inodes written. 662f11c9c5cSEdward Shishkin */ 663d46db3d5SWu Fengguang static long writeback_sb_inodes(struct super_block *sb, 664d46db3d5SWu Fengguang struct bdi_writeback *wb, 665d46db3d5SWu Fengguang struct wb_writeback_work *work) 66603ba3782SJens Axboe { 667d46db3d5SWu Fengguang struct writeback_control wbc = { 668d46db3d5SWu Fengguang .sync_mode = work->sync_mode, 669d46db3d5SWu Fengguang .tagged_writepages = work->tagged_writepages, 670d46db3d5SWu Fengguang .for_kupdate = work->for_kupdate, 671d46db3d5SWu Fengguang .for_background = work->for_background, 6727747bd4bSDave Chinner .for_sync = work->for_sync, 673d46db3d5SWu Fengguang .range_cyclic = work->range_cyclic, 674d46db3d5SWu Fengguang .range_start = 0, 675d46db3d5SWu Fengguang .range_end = LLONG_MAX, 676d46db3d5SWu Fengguang }; 677d46db3d5SWu Fengguang unsigned long start_time = jiffies; 678d46db3d5SWu Fengguang long write_chunk; 679d46db3d5SWu Fengguang long wrote = 0; /* count both pages and inodes */ 680d46db3d5SWu Fengguang 68103ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 6827ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 683edadfb10SChristoph Hellwig 684edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 685d46db3d5SWu Fengguang if (work->sb) { 686edadfb10SChristoph Hellwig /* 687edadfb10SChristoph Hellwig * We only want to write back data for this 688edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 689edadfb10SChristoph Hellwig * to it back onto the dirty list. 690edadfb10SChristoph Hellwig */ 691f758eeabSChristoph Hellwig redirty_tail(inode, wb); 69266f3b8e2SJens Axboe continue; 69366f3b8e2SJens Axboe } 694edadfb10SChristoph Hellwig 695edadfb10SChristoph Hellwig /* 696edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 697edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 698edadfb10SChristoph Hellwig * pin the next superblock. 699edadfb10SChristoph Hellwig */ 700d46db3d5SWu Fengguang break; 701edadfb10SChristoph Hellwig } 702edadfb10SChristoph Hellwig 7039843b76aSChristoph Hellwig /* 704331cbdeeSWanpeng Li * Don't bother with new inodes or inodes being freed, first 705331cbdeeSWanpeng Li * kind does not need periodic writeout yet, and for the latter 7069843b76aSChristoph Hellwig * kind writeout is handled by the freer. 7079843b76aSChristoph Hellwig */ 708250df6edSDave Chinner spin_lock(&inode->i_lock); 7099843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 710250df6edSDave Chinner spin_unlock(&inode->i_lock); 711fcc5c222SWu Fengguang redirty_tail(inode, wb); 7127ef0d737SNick Piggin continue; 7137ef0d737SNick Piggin } 714cc1676d9SJan Kara if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 715cc1676d9SJan Kara /* 716cc1676d9SJan Kara * If this inode is locked for writeback and we are not 717cc1676d9SJan Kara * doing writeback-for-data-integrity, move it to 718cc1676d9SJan Kara * b_more_io so that writeback can proceed with the 719cc1676d9SJan Kara * other inodes on s_io. 720cc1676d9SJan Kara * 721cc1676d9SJan Kara * We'll have another go at writing back this inode 722cc1676d9SJan Kara * when we completed a full scan of b_io. 723cc1676d9SJan Kara */ 724cc1676d9SJan Kara spin_unlock(&inode->i_lock); 725cc1676d9SJan Kara requeue_io(inode, wb); 726cc1676d9SJan Kara trace_writeback_sb_inodes_requeue(inode); 727cc1676d9SJan Kara continue; 728cc1676d9SJan Kara } 729f0d07b7fSJan Kara spin_unlock(&wb->list_lock); 730f0d07b7fSJan Kara 7314f8ad655SJan Kara /* 7324f8ad655SJan Kara * We already requeued the inode if it had I_SYNC set and we 7334f8ad655SJan Kara * are doing WB_SYNC_NONE writeback. So this catches only the 7344f8ad655SJan Kara * WB_SYNC_ALL case. 7354f8ad655SJan Kara */ 736169ebd90SJan Kara if (inode->i_state & I_SYNC) { 737169ebd90SJan Kara /* Wait for I_SYNC. This function drops i_lock... */ 738169ebd90SJan Kara inode_sleep_on_writeback(inode); 739169ebd90SJan Kara /* Inode may be gone, start again */ 740ead188f9SJan Kara spin_lock(&wb->list_lock); 741169ebd90SJan Kara continue; 742169ebd90SJan Kara } 7434f8ad655SJan Kara inode->i_state |= I_SYNC; 7444f8ad655SJan Kara spin_unlock(&inode->i_lock); 745169ebd90SJan Kara 7461a12d8bdSWu Fengguang write_chunk = writeback_chunk_size(wb->bdi, work); 747d46db3d5SWu Fengguang wbc.nr_to_write = write_chunk; 748d46db3d5SWu Fengguang wbc.pages_skipped = 0; 749250df6edSDave Chinner 750169ebd90SJan Kara /* 751169ebd90SJan Kara * We use I_SYNC to pin the inode in memory. While it is set 752169ebd90SJan Kara * evict_inode() will wait so the inode cannot be freed. 753169ebd90SJan Kara */ 754cd8ed2a4SYan Hong __writeback_single_inode(inode, &wbc); 755d46db3d5SWu Fengguang 756d46db3d5SWu Fengguang work->nr_pages -= write_chunk - wbc.nr_to_write; 757d46db3d5SWu Fengguang wrote += write_chunk - wbc.nr_to_write; 7584f8ad655SJan Kara spin_lock(&wb->list_lock); 7594f8ad655SJan Kara spin_lock(&inode->i_lock); 7600ae45f63STheodore Ts'o if (!(inode->i_state & I_DIRTY_ALL)) 761d46db3d5SWu Fengguang wrote++; 7624f8ad655SJan Kara requeue_inode(inode, wb, &wbc); 7634f8ad655SJan Kara inode_sync_complete(inode); 7640f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 765169ebd90SJan Kara cond_resched_lock(&wb->list_lock); 766d46db3d5SWu Fengguang /* 767d46db3d5SWu Fengguang * bail out to wb_writeback() often enough to check 768d46db3d5SWu Fengguang * background threshold and other termination conditions. 769d46db3d5SWu Fengguang */ 770d46db3d5SWu Fengguang if (wrote) { 771d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 772d46db3d5SWu Fengguang break; 773d46db3d5SWu Fengguang if (work->nr_pages <= 0) 774d46db3d5SWu Fengguang break; 7751da177e4SLinus Torvalds } 7768bc3be27SFengguang Wu } 777d46db3d5SWu Fengguang return wrote; 778f11c9c5cSEdward Shishkin } 77938f21977SNick Piggin 780d46db3d5SWu Fengguang static long __writeback_inodes_wb(struct bdi_writeback *wb, 781d46db3d5SWu Fengguang struct wb_writeback_work *work) 782f11c9c5cSEdward Shishkin { 783d46db3d5SWu Fengguang unsigned long start_time = jiffies; 784d46db3d5SWu Fengguang long wrote = 0; 785f11c9c5cSEdward Shishkin 786f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 7877ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 788f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 789f11c9c5cSEdward Shishkin 790eb6ef3dfSKonstantin Khlebnikov if (!trylock_super(sb)) { 7910e995816SWu Fengguang /* 792eb6ef3dfSKonstantin Khlebnikov * trylock_super() may fail consistently due to 7930e995816SWu Fengguang * s_umount being grabbed by someone else. Don't use 7940e995816SWu Fengguang * requeue_io() to avoid busy retrying the inode/sb. 7950e995816SWu Fengguang */ 7960e995816SWu Fengguang redirty_tail(inode, wb); 797d19de7edSChristoph Hellwig continue; 798334132aeSChristoph Hellwig } 799d46db3d5SWu Fengguang wrote += writeback_sb_inodes(sb, wb, work); 800eb6ef3dfSKonstantin Khlebnikov up_read(&sb->s_umount); 801f11c9c5cSEdward Shishkin 802d46db3d5SWu Fengguang /* refer to the same tests at the end of writeback_sb_inodes */ 803d46db3d5SWu Fengguang if (wrote) { 804d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 805d46db3d5SWu Fengguang break; 806d46db3d5SWu Fengguang if (work->nr_pages <= 0) 807f11c9c5cSEdward Shishkin break; 808f11c9c5cSEdward Shishkin } 809d46db3d5SWu Fengguang } 81066f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 811d46db3d5SWu Fengguang return wrote; 81266f3b8e2SJens Axboe } 81366f3b8e2SJens Axboe 8147d9f073bSWanpeng Li static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 8150e175a18SCurt Wohlgemuth enum wb_reason reason) 816edadfb10SChristoph Hellwig { 817d46db3d5SWu Fengguang struct wb_writeback_work work = { 818d46db3d5SWu Fengguang .nr_pages = nr_pages, 819d46db3d5SWu Fengguang .sync_mode = WB_SYNC_NONE, 820d46db3d5SWu Fengguang .range_cyclic = 1, 8210e175a18SCurt Wohlgemuth .reason = reason, 822d46db3d5SWu Fengguang }; 823edadfb10SChristoph Hellwig 824f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 825424b351fSWu Fengguang if (list_empty(&wb->b_io)) 826ad4e38ddSCurt Wohlgemuth queue_io(wb, &work); 827d46db3d5SWu Fengguang __writeback_inodes_wb(wb, &work); 828f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 829edadfb10SChristoph Hellwig 830d46db3d5SWu Fengguang return nr_pages - work.nr_pages; 83166f3b8e2SJens Axboe } 83266f3b8e2SJens Axboe 833b00949aaSWu Fengguang static bool over_bground_thresh(struct backing_dev_info *bdi) 83403ba3782SJens Axboe { 83503ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 83603ba3782SJens Axboe 83716c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 83803ba3782SJens Axboe 839b00949aaSWu Fengguang if (global_page_state(NR_FILE_DIRTY) + 840b00949aaSWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh) 841b00949aaSWu Fengguang return true; 842b00949aaSWu Fengguang 843b00949aaSWu Fengguang if (bdi_stat(bdi, BDI_RECLAIMABLE) > 844b00949aaSWu Fengguang bdi_dirty_limit(bdi, background_thresh)) 845b00949aaSWu Fengguang return true; 846b00949aaSWu Fengguang 847b00949aaSWu Fengguang return false; 84803ba3782SJens Axboe } 84903ba3782SJens Axboe 85003ba3782SJens Axboe /* 851e98be2d5SWu Fengguang * Called under wb->list_lock. If there are multiple wb per bdi, 852e98be2d5SWu Fengguang * only the flusher working on the first wb should do it. 853e98be2d5SWu Fengguang */ 854e98be2d5SWu Fengguang static void wb_update_bandwidth(struct bdi_writeback *wb, 855e98be2d5SWu Fengguang unsigned long start_time) 856e98be2d5SWu Fengguang { 857af6a3113SWu Fengguang __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); 858e98be2d5SWu Fengguang } 859e98be2d5SWu Fengguang 860e98be2d5SWu Fengguang /* 86103ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 86203ba3782SJens Axboe * 86303ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 86403ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 86503ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 86603ba3782SJens Axboe * older than a specific point in time. 86703ba3782SJens Axboe * 86803ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 86903ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 87003ba3782SJens Axboe * one-second gap. 87103ba3782SJens Axboe * 87203ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 87303ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 87403ba3782SJens Axboe */ 875c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 87683ba7b07SChristoph Hellwig struct wb_writeback_work *work) 87703ba3782SJens Axboe { 878e98be2d5SWu Fengguang unsigned long wb_start = jiffies; 879d46db3d5SWu Fengguang long nr_pages = work->nr_pages; 8800dc83bd3SJan Kara unsigned long oldest_jif; 881a5989bdcSJan Kara struct inode *inode; 882d46db3d5SWu Fengguang long progress; 88303ba3782SJens Axboe 8840dc83bd3SJan Kara oldest_jif = jiffies; 8850dc83bd3SJan Kara work->older_than_this = &oldest_jif; 88603ba3782SJens Axboe 887e8dfc305SWu Fengguang spin_lock(&wb->list_lock); 88803ba3782SJens Axboe for (;;) { 88903ba3782SJens Axboe /* 890d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 89103ba3782SJens Axboe */ 89283ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 89303ba3782SJens Axboe break; 89403ba3782SJens Axboe 89503ba3782SJens Axboe /* 896aa373cf5SJan Kara * Background writeout and kupdate-style writeback may 897aa373cf5SJan Kara * run forever. Stop them if there is other work to do 898aa373cf5SJan Kara * so that e.g. sync can proceed. They'll be restarted 899aa373cf5SJan Kara * after the other works are all done. 900aa373cf5SJan Kara */ 901aa373cf5SJan Kara if ((work->for_background || work->for_kupdate) && 902aa373cf5SJan Kara !list_empty(&wb->bdi->work_list)) 903aa373cf5SJan Kara break; 904aa373cf5SJan Kara 905aa373cf5SJan Kara /* 906d3ddec76SWu Fengguang * For background writeout, stop when we are below the 907d3ddec76SWu Fengguang * background dirty threshold 90803ba3782SJens Axboe */ 909b00949aaSWu Fengguang if (work->for_background && !over_bground_thresh(wb->bdi)) 91003ba3782SJens Axboe break; 91103ba3782SJens Axboe 9121bc36b64SJan Kara /* 9131bc36b64SJan Kara * Kupdate and background works are special and we want to 9141bc36b64SJan Kara * include all inodes that need writing. Livelock avoidance is 9151bc36b64SJan Kara * handled by these works yielding to any other work so we are 9161bc36b64SJan Kara * safe. 9171bc36b64SJan Kara */ 918ba9aa839SWu Fengguang if (work->for_kupdate) { 9190dc83bd3SJan Kara oldest_jif = jiffies - 920ba9aa839SWu Fengguang msecs_to_jiffies(dirty_expire_interval * 10); 9211bc36b64SJan Kara } else if (work->for_background) 9220dc83bd3SJan Kara oldest_jif = jiffies; 923028c2dd1SDave Chinner 924d46db3d5SWu Fengguang trace_writeback_start(wb->bdi, work); 925e8dfc305SWu Fengguang if (list_empty(&wb->b_io)) 926ad4e38ddSCurt Wohlgemuth queue_io(wb, work); 92783ba7b07SChristoph Hellwig if (work->sb) 928d46db3d5SWu Fengguang progress = writeback_sb_inodes(work->sb, wb, work); 929edadfb10SChristoph Hellwig else 930d46db3d5SWu Fengguang progress = __writeback_inodes_wb(wb, work); 931d46db3d5SWu Fengguang trace_writeback_written(wb->bdi, work); 932028c2dd1SDave Chinner 933e98be2d5SWu Fengguang wb_update_bandwidth(wb, wb_start); 93403ba3782SJens Axboe 93503ba3782SJens Axboe /* 93671fd05a8SJens Axboe * Did we write something? Try for more 937e6fb6da2SWu Fengguang * 938e6fb6da2SWu Fengguang * Dirty inodes are moved to b_io for writeback in batches. 939e6fb6da2SWu Fengguang * The completion of the current batch does not necessarily 940e6fb6da2SWu Fengguang * mean the overall work is done. So we keep looping as long 941e6fb6da2SWu Fengguang * as made some progress on cleaning pages or inodes. 94271fd05a8SJens Axboe */ 943d46db3d5SWu Fengguang if (progress) 94403ba3782SJens Axboe continue; 945a5989bdcSJan Kara /* 946e6fb6da2SWu Fengguang * No more inodes for IO, bail 947a5989bdcSJan Kara */ 948b7a2441fSWu Fengguang if (list_empty(&wb->b_more_io)) 94903ba3782SJens Axboe break; 95003ba3782SJens Axboe /* 9518010c3b6SJens Axboe * Nothing written. Wait for some inode to 9528010c3b6SJens Axboe * become available for writeback. Otherwise 9538010c3b6SJens Axboe * we'll just busyloop. 95403ba3782SJens Axboe */ 95503ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) { 956d46db3d5SWu Fengguang trace_writeback_wait(wb->bdi, work); 95703ba3782SJens Axboe inode = wb_inode(wb->b_more_io.prev); 958250df6edSDave Chinner spin_lock(&inode->i_lock); 959f0d07b7fSJan Kara spin_unlock(&wb->list_lock); 960169ebd90SJan Kara /* This function drops i_lock... */ 961169ebd90SJan Kara inode_sleep_on_writeback(inode); 962f0d07b7fSJan Kara spin_lock(&wb->list_lock); 96303ba3782SJens Axboe } 96403ba3782SJens Axboe } 965e8dfc305SWu Fengguang spin_unlock(&wb->list_lock); 96603ba3782SJens Axboe 967d46db3d5SWu Fengguang return nr_pages - work->nr_pages; 96803ba3782SJens Axboe } 96903ba3782SJens Axboe 97003ba3782SJens Axboe /* 97183ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 97203ba3782SJens Axboe */ 97383ba7b07SChristoph Hellwig static struct wb_writeback_work * 97408852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 97503ba3782SJens Axboe { 97683ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 97703ba3782SJens Axboe 9786467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 97983ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 98083ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 98183ba7b07SChristoph Hellwig struct wb_writeback_work, list); 98283ba7b07SChristoph Hellwig list_del_init(&work->list); 98303ba3782SJens Axboe } 9846467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 98583ba7b07SChristoph Hellwig return work; 98603ba3782SJens Axboe } 98703ba3782SJens Axboe 988cdf01dd5SLinus Torvalds /* 989cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 990cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 991cdf01dd5SLinus Torvalds */ 992cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 993cdf01dd5SLinus Torvalds { 994cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 995cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 996cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 997cdf01dd5SLinus Torvalds } 998cdf01dd5SLinus Torvalds 9996585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 10006585027aSJan Kara { 1001b00949aaSWu Fengguang if (over_bground_thresh(wb->bdi)) { 10026585027aSJan Kara 10036585027aSJan Kara struct wb_writeback_work work = { 10046585027aSJan Kara .nr_pages = LONG_MAX, 10056585027aSJan Kara .sync_mode = WB_SYNC_NONE, 10066585027aSJan Kara .for_background = 1, 10076585027aSJan Kara .range_cyclic = 1, 10080e175a18SCurt Wohlgemuth .reason = WB_REASON_BACKGROUND, 10096585027aSJan Kara }; 10106585027aSJan Kara 10116585027aSJan Kara return wb_writeback(wb, &work); 10126585027aSJan Kara } 10136585027aSJan Kara 10146585027aSJan Kara return 0; 10156585027aSJan Kara } 10166585027aSJan Kara 101703ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 101803ba3782SJens Axboe { 101903ba3782SJens Axboe unsigned long expired; 102003ba3782SJens Axboe long nr_pages; 102103ba3782SJens Axboe 102269b62d01SJens Axboe /* 102369b62d01SJens Axboe * When set to zero, disable periodic writeback 102469b62d01SJens Axboe */ 102569b62d01SJens Axboe if (!dirty_writeback_interval) 102669b62d01SJens Axboe return 0; 102769b62d01SJens Axboe 102803ba3782SJens Axboe expired = wb->last_old_flush + 102903ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 103003ba3782SJens Axboe if (time_before(jiffies, expired)) 103103ba3782SJens Axboe return 0; 103203ba3782SJens Axboe 103303ba3782SJens Axboe wb->last_old_flush = jiffies; 1034cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 103503ba3782SJens Axboe 1036c4a77a6cSJens Axboe if (nr_pages) { 103783ba7b07SChristoph Hellwig struct wb_writeback_work work = { 1038c4a77a6cSJens Axboe .nr_pages = nr_pages, 1039c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 1040c4a77a6cSJens Axboe .for_kupdate = 1, 1041c4a77a6cSJens Axboe .range_cyclic = 1, 10420e175a18SCurt Wohlgemuth .reason = WB_REASON_PERIODIC, 1043c4a77a6cSJens Axboe }; 1044c4a77a6cSJens Axboe 104583ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 1046c4a77a6cSJens Axboe } 104703ba3782SJens Axboe 104803ba3782SJens Axboe return 0; 104903ba3782SJens Axboe } 105003ba3782SJens Axboe 105103ba3782SJens Axboe /* 105203ba3782SJens Axboe * Retrieve work items and do the writeback they describe 105303ba3782SJens Axboe */ 105425d130baSWanpeng Li static long wb_do_writeback(struct bdi_writeback *wb) 105503ba3782SJens Axboe { 105603ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 105783ba7b07SChristoph Hellwig struct wb_writeback_work *work; 1058c4a77a6cSJens Axboe long wrote = 0; 105903ba3782SJens Axboe 10604452226eSTejun Heo set_bit(WB_writeback_running, &wb->state); 106108852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 106283ba7b07SChristoph Hellwig 1063455b2864SDave Chinner trace_writeback_exec(bdi, work); 1064455b2864SDave Chinner 106583ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 106603ba3782SJens Axboe 106703ba3782SJens Axboe /* 106883ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 106983ba7b07SChristoph Hellwig * work item, otherwise just free it. 107003ba3782SJens Axboe */ 107183ba7b07SChristoph Hellwig if (work->done) 107283ba7b07SChristoph Hellwig complete(work->done); 107383ba7b07SChristoph Hellwig else 107483ba7b07SChristoph Hellwig kfree(work); 107503ba3782SJens Axboe } 107603ba3782SJens Axboe 107703ba3782SJens Axboe /* 107803ba3782SJens Axboe * Check for periodic writeback, kupdated() style 107903ba3782SJens Axboe */ 108003ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 10816585027aSJan Kara wrote += wb_check_background_flush(wb); 10824452226eSTejun Heo clear_bit(WB_writeback_running, &wb->state); 108303ba3782SJens Axboe 108403ba3782SJens Axboe return wrote; 108503ba3782SJens Axboe } 108603ba3782SJens Axboe 108703ba3782SJens Axboe /* 108803ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 1089839a8e86STejun Heo * reschedules periodically and does kupdated style flushing. 109003ba3782SJens Axboe */ 1091839a8e86STejun Heo void bdi_writeback_workfn(struct work_struct *work) 109203ba3782SJens Axboe { 1093839a8e86STejun Heo struct bdi_writeback *wb = container_of(to_delayed_work(work), 1094839a8e86STejun Heo struct bdi_writeback, dwork); 109508243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 109603ba3782SJens Axboe long pages_written; 109703ba3782SJens Axboe 1098ef3b1019STejun Heo set_worker_desc("flush-%s", dev_name(bdi->dev)); 1099766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 110003ba3782SJens Axboe 1101839a8e86STejun Heo if (likely(!current_is_workqueue_rescuer() || 11024452226eSTejun Heo !test_bit(WB_registered, &wb->state))) { 110303ba3782SJens Axboe /* 1104839a8e86STejun Heo * The normal path. Keep writing back @bdi until its 1105839a8e86STejun Heo * work_list is empty. Note that this path is also taken 1106839a8e86STejun Heo * if @bdi is shutting down even when we're running off the 1107839a8e86STejun Heo * rescuer as work_list needs to be drained. 110803ba3782SJens Axboe */ 1109839a8e86STejun Heo do { 111025d130baSWanpeng Li pages_written = wb_do_writeback(wb); 1111455b2864SDave Chinner trace_writeback_pages_written(pages_written); 1112839a8e86STejun Heo } while (!list_empty(&bdi->work_list)); 1113839a8e86STejun Heo } else { 1114253c34e9SArtem Bityutskiy /* 1115839a8e86STejun Heo * bdi_wq can't get enough workers and we're running off 1116839a8e86STejun Heo * the emergency worker. Don't hog it. Hopefully, 1024 is 1117839a8e86STejun Heo * enough for efficient IO. 1118253c34e9SArtem Bityutskiy */ 1119839a8e86STejun Heo pages_written = writeback_inodes_wb(&bdi->wb, 1024, 1120839a8e86STejun Heo WB_REASON_FORKER_THREAD); 1121839a8e86STejun Heo trace_writeback_pages_written(pages_written); 112203ba3782SJens Axboe } 112303ba3782SJens Axboe 11246ca738d6SDerek Basehore if (!list_empty(&bdi->work_list)) 11256ca738d6SDerek Basehore mod_delayed_work(bdi_wq, &wb->dwork, 0); 11266ca738d6SDerek Basehore else if (wb_has_dirty_io(wb) && dirty_writeback_interval) 11276ca738d6SDerek Basehore bdi_wakeup_thread_delayed(bdi); 1128455b2864SDave Chinner 1129839a8e86STejun Heo current->flags &= ~PF_SWAPWRITE; 113003ba3782SJens Axboe } 113103ba3782SJens Axboe 113203ba3782SJens Axboe /* 113303ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 113403ba3782SJens Axboe * the whole world. 113503ba3782SJens Axboe */ 11360e175a18SCurt Wohlgemuth void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) 113703ba3782SJens Axboe { 1138b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 1139b8c2f347SChristoph Hellwig 114047df3ddeSJan Kara if (!nr_pages) 114147df3ddeSJan Kara nr_pages = get_nr_dirty_pages(); 1142b8c2f347SChristoph Hellwig 1143b8c2f347SChristoph Hellwig rcu_read_lock(); 1144b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1145b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 1146b8c2f347SChristoph Hellwig continue; 11470e175a18SCurt Wohlgemuth __bdi_start_writeback(bdi, nr_pages, false, reason); 1148b8c2f347SChristoph Hellwig } 1149b8c2f347SChristoph Hellwig rcu_read_unlock(); 115003ba3782SJens Axboe } 115103ba3782SJens Axboe 1152a2f48706STheodore Ts'o /* 1153a2f48706STheodore Ts'o * Wake up bdi's periodically to make sure dirtytime inodes gets 1154a2f48706STheodore Ts'o * written back periodically. We deliberately do *not* check the 1155a2f48706STheodore Ts'o * b_dirtytime list in wb_has_dirty_io(), since this would cause the 1156a2f48706STheodore Ts'o * kernel to be constantly waking up once there are any dirtytime 1157a2f48706STheodore Ts'o * inodes on the system. So instead we define a separate delayed work 1158a2f48706STheodore Ts'o * function which gets called much more rarely. (By default, only 1159a2f48706STheodore Ts'o * once every 12 hours.) 1160a2f48706STheodore Ts'o * 1161a2f48706STheodore Ts'o * If there is any other write activity going on in the file system, 1162a2f48706STheodore Ts'o * this function won't be necessary. But if the only thing that has 1163a2f48706STheodore Ts'o * happened on the file system is a dirtytime inode caused by an atime 1164a2f48706STheodore Ts'o * update, we need this infrastructure below to make sure that inode 1165a2f48706STheodore Ts'o * eventually gets pushed out to disk. 1166a2f48706STheodore Ts'o */ 1167a2f48706STheodore Ts'o static void wakeup_dirtytime_writeback(struct work_struct *w); 1168a2f48706STheodore Ts'o static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); 1169a2f48706STheodore Ts'o 1170a2f48706STheodore Ts'o static void wakeup_dirtytime_writeback(struct work_struct *w) 1171a2f48706STheodore Ts'o { 1172a2f48706STheodore Ts'o struct backing_dev_info *bdi; 1173a2f48706STheodore Ts'o 1174a2f48706STheodore Ts'o rcu_read_lock(); 1175a2f48706STheodore Ts'o list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1176a2f48706STheodore Ts'o if (list_empty(&bdi->wb.b_dirty_time)) 1177a2f48706STheodore Ts'o continue; 1178a2f48706STheodore Ts'o bdi_wakeup_thread(bdi); 1179a2f48706STheodore Ts'o } 1180a2f48706STheodore Ts'o rcu_read_unlock(); 1181a2f48706STheodore Ts'o schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1182a2f48706STheodore Ts'o } 1183a2f48706STheodore Ts'o 1184a2f48706STheodore Ts'o static int __init start_dirtytime_writeback(void) 1185a2f48706STheodore Ts'o { 1186a2f48706STheodore Ts'o schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1187a2f48706STheodore Ts'o return 0; 1188a2f48706STheodore Ts'o } 1189a2f48706STheodore Ts'o __initcall(start_dirtytime_writeback); 1190a2f48706STheodore Ts'o 11911efff914STheodore Ts'o int dirtytime_interval_handler(struct ctl_table *table, int write, 11921efff914STheodore Ts'o void __user *buffer, size_t *lenp, loff_t *ppos) 11931efff914STheodore Ts'o { 11941efff914STheodore Ts'o int ret; 11951efff914STheodore Ts'o 11961efff914STheodore Ts'o ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 11971efff914STheodore Ts'o if (ret == 0 && write) 11981efff914STheodore Ts'o mod_delayed_work(system_wq, &dirtytime_work, 0); 11991efff914STheodore Ts'o return ret; 12001efff914STheodore Ts'o } 12011efff914STheodore Ts'o 120203ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 120303ba3782SJens Axboe { 120403ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 120503ba3782SJens Axboe struct dentry *dentry; 120603ba3782SJens Axboe const char *name = "?"; 120703ba3782SJens Axboe 120803ba3782SJens Axboe dentry = d_find_alias(inode); 120903ba3782SJens Axboe if (dentry) { 121003ba3782SJens Axboe spin_lock(&dentry->d_lock); 121103ba3782SJens Axboe name = (const char *) dentry->d_name.name; 121203ba3782SJens Axboe } 121303ba3782SJens Axboe printk(KERN_DEBUG 121403ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 121503ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 121603ba3782SJens Axboe name, inode->i_sb->s_id); 121703ba3782SJens Axboe if (dentry) { 121803ba3782SJens Axboe spin_unlock(&dentry->d_lock); 121903ba3782SJens Axboe dput(dentry); 122003ba3782SJens Axboe } 122103ba3782SJens Axboe } 122203ba3782SJens Axboe } 122303ba3782SJens Axboe 122403ba3782SJens Axboe /** 122503ba3782SJens Axboe * __mark_inode_dirty - internal function 122603ba3782SJens Axboe * @inode: inode to mark 122703ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 122803ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 122903ba3782SJens Axboe * mark_inode_dirty_sync. 123003ba3782SJens Axboe * 123103ba3782SJens Axboe * Put the inode on the super block's dirty list. 123203ba3782SJens Axboe * 123303ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 123403ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 123503ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 123603ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 123703ba3782SJens Axboe * 123803ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 123903ba3782SJens Axboe * them dirty. 124003ba3782SJens Axboe * 124103ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 124203ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 124303ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 124403ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 124503ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 124603ba3782SJens Axboe * blockdev inode. 124703ba3782SJens Axboe */ 12480ae45f63STheodore Ts'o #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 124903ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 125003ba3782SJens Axboe { 125103ba3782SJens Axboe struct super_block *sb = inode->i_sb; 1252253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 12530ae45f63STheodore Ts'o int dirtytime; 12540ae45f63STheodore Ts'o 12550ae45f63STheodore Ts'o trace_writeback_mark_inode_dirty(inode, flags); 125603ba3782SJens Axboe 125703ba3782SJens Axboe /* 125803ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 125903ba3782SJens Axboe * dirty the inode itself 126003ba3782SJens Axboe */ 12610ae45f63STheodore Ts'o if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) { 12629fb0a7daSTejun Heo trace_writeback_dirty_inode_start(inode, flags); 12639fb0a7daSTejun Heo 126403ba3782SJens Axboe if (sb->s_op->dirty_inode) 1265aa385729SChristoph Hellwig sb->s_op->dirty_inode(inode, flags); 12669fb0a7daSTejun Heo 12679fb0a7daSTejun Heo trace_writeback_dirty_inode(inode, flags); 126803ba3782SJens Axboe } 12690ae45f63STheodore Ts'o if (flags & I_DIRTY_INODE) 12700ae45f63STheodore Ts'o flags &= ~I_DIRTY_TIME; 12710ae45f63STheodore Ts'o dirtytime = flags & I_DIRTY_TIME; 127203ba3782SJens Axboe 127303ba3782SJens Axboe /* 12749c6ac78eSTejun Heo * Paired with smp_mb() in __writeback_single_inode() for the 12759c6ac78eSTejun Heo * following lockless i_state test. See there for details. 127603ba3782SJens Axboe */ 127703ba3782SJens Axboe smp_mb(); 127803ba3782SJens Axboe 12790ae45f63STheodore Ts'o if (((inode->i_state & flags) == flags) || 12800ae45f63STheodore Ts'o (dirtytime && (inode->i_state & I_DIRTY_INODE))) 128103ba3782SJens Axboe return; 128203ba3782SJens Axboe 128303ba3782SJens Axboe if (unlikely(block_dump)) 128403ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 128503ba3782SJens Axboe 1286250df6edSDave Chinner spin_lock(&inode->i_lock); 12870ae45f63STheodore Ts'o if (dirtytime && (inode->i_state & I_DIRTY_INODE)) 12880ae45f63STheodore Ts'o goto out_unlock_inode; 128903ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 129003ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 129103ba3782SJens Axboe 12920ae45f63STheodore Ts'o if (flags & I_DIRTY_INODE) 12930ae45f63STheodore Ts'o inode->i_state &= ~I_DIRTY_TIME; 129403ba3782SJens Axboe inode->i_state |= flags; 129503ba3782SJens Axboe 129603ba3782SJens Axboe /* 129703ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 129803ba3782SJens Axboe * The unlocker will place the inode on the appropriate 129903ba3782SJens Axboe * superblock list, based upon its state. 130003ba3782SJens Axboe */ 130103ba3782SJens Axboe if (inode->i_state & I_SYNC) 1302250df6edSDave Chinner goto out_unlock_inode; 130303ba3782SJens Axboe 130403ba3782SJens Axboe /* 130503ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 130603ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 130703ba3782SJens Axboe */ 130803ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 13091d3382cbSAl Viro if (inode_unhashed(inode)) 1310250df6edSDave Chinner goto out_unlock_inode; 131103ba3782SJens Axboe } 1312a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 1313250df6edSDave Chinner goto out_unlock_inode; 131403ba3782SJens Axboe 131503ba3782SJens Axboe /* 131603ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 131703ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 131803ba3782SJens Axboe */ 131903ba3782SJens Axboe if (!was_dirty) { 1320a66979abSDave Chinner bool wakeup_bdi = false; 1321253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1322500b067cSJens Axboe 1323146d7009SJunxiao Bi spin_unlock(&inode->i_lock); 1324146d7009SJunxiao Bi spin_lock(&bdi->wb.list_lock); 1325253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 13264452226eSTejun Heo WARN(!test_bit(WB_registered, &bdi->wb.state), 1327253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 1328253c34e9SArtem Bityutskiy 1329253c34e9SArtem Bityutskiy /* 1330253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 1331253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 1332253c34e9SArtem Bityutskiy * bdi thread to make sure background 1333253c34e9SArtem Bityutskiy * write-back happens later. 1334253c34e9SArtem Bityutskiy */ 1335253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 1336253c34e9SArtem Bityutskiy wakeup_bdi = true; 1337500b067cSJens Axboe } 133803ba3782SJens Axboe 133903ba3782SJens Axboe inode->dirtied_when = jiffies; 1340a2f48706STheodore Ts'o if (dirtytime) 1341a2f48706STheodore Ts'o inode->dirtied_time_when = jiffies; 1342a2f48706STheodore Ts'o if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES)) 1343a2f48706STheodore Ts'o list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1344a2f48706STheodore Ts'o else 1345a2f48706STheodore Ts'o list_move(&inode->i_wb_list, 1346a2f48706STheodore Ts'o &bdi->wb.b_dirty_time); 1347f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 13480ae45f63STheodore Ts'o trace_writeback_dirty_inode_enqueue(inode); 1349253c34e9SArtem Bityutskiy 1350253c34e9SArtem Bityutskiy if (wakeup_bdi) 13516467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 1352a66979abSDave Chinner return; 1353a66979abSDave Chinner } 1354a66979abSDave Chinner } 1355a66979abSDave Chinner out_unlock_inode: 1356a66979abSDave Chinner spin_unlock(&inode->i_lock); 1357a66979abSDave Chinner 135803ba3782SJens Axboe } 135903ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 136003ba3782SJens Axboe 1361b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 136266f3b8e2SJens Axboe { 136338f21977SNick Piggin struct inode *inode, *old_inode = NULL; 136438f21977SNick Piggin 136503ba3782SJens Axboe /* 136603ba3782SJens Axboe * We need to be protected against the filesystem going from 136703ba3782SJens Axboe * r/o to r/w or vice versa. 136803ba3782SJens Axboe */ 1369b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 137003ba3782SJens Axboe 137155fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 137266f3b8e2SJens Axboe 137338f21977SNick Piggin /* 137438f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 137538f21977SNick Piggin * because there may have been pages dirtied before our sync 137638f21977SNick Piggin * call, but which had writeout started before we write it out. 137738f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 137838f21977SNick Piggin * we still have to wait for that writeout. 137938f21977SNick Piggin */ 1380b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1381250df6edSDave Chinner struct address_space *mapping = inode->i_mapping; 138238f21977SNick Piggin 1383250df6edSDave Chinner spin_lock(&inode->i_lock); 1384250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1385250df6edSDave Chinner (mapping->nrpages == 0)) { 1386250df6edSDave Chinner spin_unlock(&inode->i_lock); 138738f21977SNick Piggin continue; 1388250df6edSDave Chinner } 138938f21977SNick Piggin __iget(inode); 1390250df6edSDave Chinner spin_unlock(&inode->i_lock); 139155fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 139255fa6091SDave Chinner 139338f21977SNick Piggin /* 139455fa6091SDave Chinner * We hold a reference to 'inode' so it couldn't have been 139555fa6091SDave Chinner * removed from s_inodes list while we dropped the 139655fa6091SDave Chinner * inode_sb_list_lock. We cannot iput the inode now as we can 139755fa6091SDave Chinner * be holding the last reference and we cannot iput it under 139855fa6091SDave Chinner * inode_sb_list_lock. So we keep the reference and iput it 139955fa6091SDave Chinner * later. 140038f21977SNick Piggin */ 140138f21977SNick Piggin iput(old_inode); 140238f21977SNick Piggin old_inode = inode; 140338f21977SNick Piggin 140438f21977SNick Piggin filemap_fdatawait(mapping); 140538f21977SNick Piggin 140638f21977SNick Piggin cond_resched(); 140738f21977SNick Piggin 140855fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 140938f21977SNick Piggin } 141055fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 141138f21977SNick Piggin iput(old_inode); 141266f3b8e2SJens Axboe } 14131da177e4SLinus Torvalds 1414d8a8559cSJens Axboe /** 14153259f8beSChris Mason * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1416d8a8559cSJens Axboe * @sb: the superblock 14173259f8beSChris Mason * @nr: the number of pages to write 1418786228abSMarcos Paulo de Souza * @reason: reason why some writeback work initiated 14191da177e4SLinus Torvalds * 1420d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1421d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 14223259f8beSChris Mason * for IO completion of submitted IO. 14231da177e4SLinus Torvalds */ 14240e175a18SCurt Wohlgemuth void writeback_inodes_sb_nr(struct super_block *sb, 14250e175a18SCurt Wohlgemuth unsigned long nr, 14260e175a18SCurt Wohlgemuth enum wb_reason reason) 14271da177e4SLinus Torvalds { 142883ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 142983ba7b07SChristoph Hellwig struct wb_writeback_work work = { 14303c4d7165SChristoph Hellwig .sb = sb, 14313c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 14326e6938b6SWu Fengguang .tagged_writepages = 1, 143383ba7b07SChristoph Hellwig .done = &done, 14343259f8beSChris Mason .nr_pages = nr, 14350e175a18SCurt Wohlgemuth .reason = reason, 14363c4d7165SChristoph Hellwig }; 14370e3c9a22SJens Axboe 14386eedc701SJan Kara if (sb->s_bdi == &noop_backing_dev_info) 14396eedc701SJan Kara return; 1440cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 144183ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 144283ba7b07SChristoph Hellwig wait_for_completion(&done); 14431da177e4SLinus Torvalds } 14443259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 14453259f8beSChris Mason 14463259f8beSChris Mason /** 14473259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 14483259f8beSChris Mason * @sb: the superblock 1449786228abSMarcos Paulo de Souza * @reason: reason why some writeback work was initiated 14503259f8beSChris Mason * 14513259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 14523259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 14533259f8beSChris Mason * for IO completion of submitted IO. 14543259f8beSChris Mason */ 14550e175a18SCurt Wohlgemuth void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 14563259f8beSChris Mason { 14570e175a18SCurt Wohlgemuth return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 14583259f8beSChris Mason } 1459d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1460d8a8559cSJens Axboe 1461d8a8559cSJens Axboe /** 146210ee27a0SMiao Xie * try_to_writeback_inodes_sb_nr - try to start writeback if none underway 14633259f8beSChris Mason * @sb: the superblock 14643259f8beSChris Mason * @nr: the number of pages to write 146510ee27a0SMiao Xie * @reason: the reason of writeback 14663259f8beSChris Mason * 146710ee27a0SMiao Xie * Invoke writeback_inodes_sb_nr if no writeback is currently underway. 14683259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 14693259f8beSChris Mason */ 147010ee27a0SMiao Xie int try_to_writeback_inodes_sb_nr(struct super_block *sb, 14710e175a18SCurt Wohlgemuth unsigned long nr, 14720e175a18SCurt Wohlgemuth enum wb_reason reason) 14733259f8beSChris Mason { 147410ee27a0SMiao Xie if (writeback_in_progress(sb->s_bdi)) 147510ee27a0SMiao Xie return 1; 147610ee27a0SMiao Xie 147710ee27a0SMiao Xie if (!down_read_trylock(&sb->s_umount)) 147810ee27a0SMiao Xie return 0; 147910ee27a0SMiao Xie 14800e175a18SCurt Wohlgemuth writeback_inodes_sb_nr(sb, nr, reason); 14813259f8beSChris Mason up_read(&sb->s_umount); 14823259f8beSChris Mason return 1; 14833259f8beSChris Mason } 148410ee27a0SMiao Xie EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); 148510ee27a0SMiao Xie 148610ee27a0SMiao Xie /** 148710ee27a0SMiao Xie * try_to_writeback_inodes_sb - try to start writeback if none underway 148810ee27a0SMiao Xie * @sb: the superblock 148910ee27a0SMiao Xie * @reason: reason why some writeback work was initiated 149010ee27a0SMiao Xie * 149110ee27a0SMiao Xie * Implement by try_to_writeback_inodes_sb_nr() 149210ee27a0SMiao Xie * Returns 1 if writeback was started, 0 if not. 149310ee27a0SMiao Xie */ 149410ee27a0SMiao Xie int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 149510ee27a0SMiao Xie { 149610ee27a0SMiao Xie return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 149710ee27a0SMiao Xie } 149810ee27a0SMiao Xie EXPORT_SYMBOL(try_to_writeback_inodes_sb); 14993259f8beSChris Mason 15003259f8beSChris Mason /** 1501d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1502d8a8559cSJens Axboe * @sb: the superblock 1503d8a8559cSJens Axboe * 1504d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 15050dc83bd3SJan Kara * super_block. 1506d8a8559cSJens Axboe */ 15070dc83bd3SJan Kara void sync_inodes_sb(struct super_block *sb) 1508d8a8559cSJens Axboe { 150983ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 151083ba7b07SChristoph Hellwig struct wb_writeback_work work = { 15113c4d7165SChristoph Hellwig .sb = sb, 15123c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 15133c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 15143c4d7165SChristoph Hellwig .range_cyclic = 0, 151583ba7b07SChristoph Hellwig .done = &done, 15160e175a18SCurt Wohlgemuth .reason = WB_REASON_SYNC, 15177747bd4bSDave Chinner .for_sync = 1, 15183c4d7165SChristoph Hellwig }; 15193c4d7165SChristoph Hellwig 15206eedc701SJan Kara /* Nothing to do? */ 15216eedc701SJan Kara if (sb->s_bdi == &noop_backing_dev_info) 15226eedc701SJan Kara return; 1523cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1524cf37e972SChristoph Hellwig 152583ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 152683ba7b07SChristoph Hellwig wait_for_completion(&done); 152783ba7b07SChristoph Hellwig 1528b6e51316SJens Axboe wait_sb_inodes(sb); 1529d8a8559cSJens Axboe } 1530d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 15311da177e4SLinus Torvalds 15321da177e4SLinus Torvalds /** 15331da177e4SLinus Torvalds * write_inode_now - write an inode to disk 15341da177e4SLinus Torvalds * @inode: inode to write to disk 15351da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 15361da177e4SLinus Torvalds * 15377f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 15387f04c26dSAndrea Arcangeli * primarily needed by knfsd. 15397f04c26dSAndrea Arcangeli * 15407f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 15411da177e4SLinus Torvalds */ 15421da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 15431da177e4SLinus Torvalds { 1544f758eeabSChristoph Hellwig struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 15451da177e4SLinus Torvalds struct writeback_control wbc = { 15461da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 154718914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1548111ebb6eSOGAWA Hirofumi .range_start = 0, 1549111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 15501da177e4SLinus Torvalds }; 15511da177e4SLinus Torvalds 15521da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 155349364ce2SAndrew Morton wbc.nr_to_write = 0; 15541da177e4SLinus Torvalds 15551da177e4SLinus Torvalds might_sleep(); 15564f8ad655SJan Kara return writeback_single_inode(inode, wb, &wbc); 15571da177e4SLinus Torvalds } 15581da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 15591da177e4SLinus Torvalds 15601da177e4SLinus Torvalds /** 15611da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 15621da177e4SLinus Torvalds * @inode: the inode to sync 15631da177e4SLinus Torvalds * @wbc: controls the writeback mode 15641da177e4SLinus Torvalds * 15651da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 15661da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 15671da177e4SLinus Torvalds * update inode->i_state. 15681da177e4SLinus Torvalds * 15691da177e4SLinus Torvalds * The caller must have a ref on the inode. 15701da177e4SLinus Torvalds */ 15711da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 15721da177e4SLinus Torvalds { 15734f8ad655SJan Kara return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc); 15741da177e4SLinus Torvalds } 15751da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1576c3765016SChristoph Hellwig 1577c3765016SChristoph Hellwig /** 1578c691b9d9SAndrew Morton * sync_inode_metadata - write an inode to disk 1579c3765016SChristoph Hellwig * @inode: the inode to sync 1580c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1581c3765016SChristoph Hellwig * 1582c691b9d9SAndrew Morton * Write an inode to disk and adjust its dirty state after completion. 1583c3765016SChristoph Hellwig * 1584c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1585c3765016SChristoph Hellwig */ 1586c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1587c3765016SChristoph Hellwig { 1588c3765016SChristoph Hellwig struct writeback_control wbc = { 1589c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1590c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1591c3765016SChristoph Hellwig }; 1592c3765016SChristoph Hellwig 1593c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1594c3765016SChristoph Hellwig } 1595c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1596