11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17630d9c47SPaul Gortmaker #include <linux/export.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 23bc31b86aSWu Fengguang #include <linux/pagemap.h> 2403ba3782SJens Axboe #include <linux/kthread.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 28455b2864SDave Chinner #include <linux/tracepoint.h> 2907f3f05cSDavid Howells #include "internal.h" 301da177e4SLinus Torvalds 31d0bceac7SJens Axboe /* 32bc31b86aSWu Fengguang * 4MB minimal write chunk size 33bc31b86aSWu Fengguang */ 34bc31b86aSWu Fengguang #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) 35bc31b86aSWu Fengguang 36bc31b86aSWu Fengguang /* 37c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 38c4a77a6cSJens Axboe */ 3983ba7b07SChristoph Hellwig struct wb_writeback_work { 40c4a77a6cSJens Axboe long nr_pages; 41c4a77a6cSJens Axboe struct super_block *sb; 42d46db3d5SWu Fengguang unsigned long *older_than_this; 43c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 446e6938b6SWu Fengguang unsigned int tagged_writepages:1; 4552957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4652957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4752957fe1SH Hartley Sweeten unsigned int for_background:1; 480e175a18SCurt Wohlgemuth enum wb_reason reason; /* why was writeback initiated? */ 49c4a77a6cSJens Axboe 508010c3b6SJens Axboe struct list_head list; /* pending work list */ 5183ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 5203ba3782SJens Axboe }; 5303ba3782SJens Axboe 54f11b00f3SAdrian Bunk /** 55f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 56f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 57f11b00f3SAdrian Bunk * 5803ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 5903ba3782SJens Axboe * backing device. 60f11b00f3SAdrian Bunk */ 61f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 62f11b00f3SAdrian Bunk { 6381d73a32SJan Kara return test_bit(BDI_writeback_running, &bdi->state); 64f11b00f3SAdrian Bunk } 6500d4e736STheodore Ts'o EXPORT_SYMBOL(writeback_in_progress); 66f11b00f3SAdrian Bunk 67692ebd17SJan Kara static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 68692ebd17SJan Kara { 69692ebd17SJan Kara struct super_block *sb = inode->i_sb; 70692ebd17SJan Kara 71aaead25bSChristoph Hellwig if (strcmp(sb->s_type->name, "bdev") == 0) 72aaead25bSChristoph Hellwig return inode->i_mapping->backing_dev_info; 73aaead25bSChristoph Hellwig 74692ebd17SJan Kara return sb->s_bdi; 75692ebd17SJan Kara } 76692ebd17SJan Kara 777ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 787ccf19a8SNick Piggin { 797ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 807ccf19a8SNick Piggin } 817ccf19a8SNick Piggin 8215eb77a0SWu Fengguang /* 8315eb77a0SWu Fengguang * Include the creation of the trace points after defining the 8415eb77a0SWu Fengguang * wb_writeback_work structure and inline functions so that the definition 8515eb77a0SWu Fengguang * remains local to this file. 8615eb77a0SWu Fengguang */ 8715eb77a0SWu Fengguang #define CREATE_TRACE_POINTS 8815eb77a0SWu Fengguang #include <trace/events/writeback.h> 8915eb77a0SWu Fengguang 906585027aSJan Kara static void bdi_queue_work(struct backing_dev_info *bdi, 916585027aSJan Kara struct wb_writeback_work *work) 926585027aSJan Kara { 936585027aSJan Kara trace_writeback_queue(bdi, work); 946585027aSJan Kara 956585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 966585027aSJan Kara list_add_tail(&work->list, &bdi->work_list); 976467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 98839a8e86STejun Heo 99839a8e86STejun Heo mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 10003ba3782SJens Axboe } 1011da177e4SLinus Torvalds 10283ba7b07SChristoph Hellwig static void 10383ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1040e175a18SCurt Wohlgemuth bool range_cyclic, enum wb_reason reason) 1051da177e4SLinus Torvalds { 10683ba7b07SChristoph Hellwig struct wb_writeback_work *work; 10703ba3782SJens Axboe 108bcddc3f0SJens Axboe /* 109bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 110bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 111bcddc3f0SJens Axboe */ 11283ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 11383ba7b07SChristoph Hellwig if (!work) { 114455b2864SDave Chinner trace_writeback_nowork(bdi); 115839a8e86STejun Heo mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 11683ba7b07SChristoph Hellwig return; 11783ba7b07SChristoph Hellwig } 11883ba7b07SChristoph Hellwig 11983ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 12083ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 12183ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 1220e175a18SCurt Wohlgemuth work->reason = reason; 12383ba7b07SChristoph Hellwig 124f11fcae8SJens Axboe bdi_queue_work(bdi, work); 12503ba3782SJens Axboe } 126b6e51316SJens Axboe 127b6e51316SJens Axboe /** 128b6e51316SJens Axboe * bdi_start_writeback - start writeback 129b6e51316SJens Axboe * @bdi: the backing device to write from 130b6e51316SJens Axboe * @nr_pages: the number of pages to write 131786228abSMarcos Paulo de Souza * @reason: reason why some writeback work was initiated 132b6e51316SJens Axboe * 133b6e51316SJens Axboe * Description: 134b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 13525985edcSLucas De Marchi * started when this function returns, we make no guarantees on 1360e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 137b6e51316SJens Axboe * 138b6e51316SJens Axboe */ 1390e175a18SCurt Wohlgemuth void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1400e175a18SCurt Wohlgemuth enum wb_reason reason) 141b6e51316SJens Axboe { 1420e175a18SCurt Wohlgemuth __bdi_start_writeback(bdi, nr_pages, true, reason); 143d3ddec76SWu Fengguang } 144d3ddec76SWu Fengguang 145c5444198SChristoph Hellwig /** 146c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 147c5444198SChristoph Hellwig * @bdi: the backing device to write from 148c5444198SChristoph Hellwig * 149c5444198SChristoph Hellwig * Description: 1506585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 1516585027aSJan Kara * this function returns, it is only guaranteed that for given BDI 1526585027aSJan Kara * some IO is happening if we are over background dirty threshold. 1536585027aSJan Kara * Caller need not hold sb s_umount semaphore. 154c5444198SChristoph Hellwig */ 155c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 156c5444198SChristoph Hellwig { 1576585027aSJan Kara /* 1586585027aSJan Kara * We just wake up the flusher thread. It will perform background 1596585027aSJan Kara * writeback as soon as there is no other work to do. 1606585027aSJan Kara */ 16171927e84SWu Fengguang trace_writeback_wake_background(bdi); 162839a8e86STejun Heo mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 1631da177e4SLinus Torvalds } 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds /* 166a66979abSDave Chinner * Remove the inode from the writeback list it is on. 167a66979abSDave Chinner */ 168a66979abSDave Chinner void inode_wb_list_del(struct inode *inode) 169a66979abSDave Chinner { 170f758eeabSChristoph Hellwig struct backing_dev_info *bdi = inode_to_bdi(inode); 171a66979abSDave Chinner 172f758eeabSChristoph Hellwig spin_lock(&bdi->wb.list_lock); 173f758eeabSChristoph Hellwig list_del_init(&inode->i_wb_list); 174f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 175f758eeabSChristoph Hellwig } 176a66979abSDave Chinner 177a66979abSDave Chinner /* 1786610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1796610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1806610a0bcSAndrew Morton * 1816610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 18266f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 1836610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 1846610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 1856610a0bcSAndrew Morton */ 186f758eeabSChristoph Hellwig static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 1876610a0bcSAndrew Morton { 188f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 18903ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 19066f3b8e2SJens Axboe struct inode *tail; 1916610a0bcSAndrew Morton 1927ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 19366f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 1946610a0bcSAndrew Morton inode->dirtied_when = jiffies; 1956610a0bcSAndrew Morton } 1967ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_dirty); 1976610a0bcSAndrew Morton } 1986610a0bcSAndrew Morton 1996610a0bcSAndrew Morton /* 20066f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 201c986d1e2SAndrew Morton */ 202f758eeabSChristoph Hellwig static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 203c986d1e2SAndrew Morton { 204f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 2057ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_more_io); 206c986d1e2SAndrew Morton } 207c986d1e2SAndrew Morton 2081c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2091c0eeaf5SJoern Engel { 210365b94aeSJan Kara inode->i_state &= ~I_SYNC; 2114eff96ddSJan Kara /* If inode is clean an unused, put it into LRU now... */ 2124eff96ddSJan Kara inode_add_lru(inode); 213365b94aeSJan Kara /* Waiters must see I_SYNC cleared before being woken up */ 2141c0eeaf5SJoern Engel smp_mb(); 2151c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2161c0eeaf5SJoern Engel } 2171c0eeaf5SJoern Engel 218d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 219d2caa3c5SJeff Layton { 220d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 221d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 222d2caa3c5SJeff Layton /* 223d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 224d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 225d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2265b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 227d2caa3c5SJeff Layton */ 228d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 229d2caa3c5SJeff Layton #endif 230d2caa3c5SJeff Layton return ret; 231d2caa3c5SJeff Layton } 232d2caa3c5SJeff Layton 233c986d1e2SAndrew Morton /* 2340e2f2b23SWang Sheng-Hui * Move expired (dirtied before work->older_than_this) dirty inodes from 235697e6fedSJan Kara * @delaying_queue to @dispatch_queue. 2362c136579SFengguang Wu */ 237e84d0a4fSWu Fengguang static int move_expired_inodes(struct list_head *delaying_queue, 2382c136579SFengguang Wu struct list_head *dispatch_queue, 239ad4e38ddSCurt Wohlgemuth struct wb_writeback_work *work) 2402c136579SFengguang Wu { 2415c03449dSShaohua Li LIST_HEAD(tmp); 2425c03449dSShaohua Li struct list_head *pos, *node; 243cf137307SJens Axboe struct super_block *sb = NULL; 2445c03449dSShaohua Li struct inode *inode; 245cf137307SJens Axboe int do_sb_sort = 0; 246e84d0a4fSWu Fengguang int moved = 0; 2475c03449dSShaohua Li 2482c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2497ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 250ad4e38ddSCurt Wohlgemuth if (work->older_than_this && 251ad4e38ddSCurt Wohlgemuth inode_dirtied_after(inode, *work->older_than_this)) 2522c136579SFengguang Wu break; 253cf137307SJens Axboe if (sb && sb != inode->i_sb) 254cf137307SJens Axboe do_sb_sort = 1; 255cf137307SJens Axboe sb = inode->i_sb; 2567ccf19a8SNick Piggin list_move(&inode->i_wb_list, &tmp); 257e84d0a4fSWu Fengguang moved++; 2585c03449dSShaohua Li } 2595c03449dSShaohua Li 260cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 261cf137307SJens Axboe if (!do_sb_sort) { 262cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 263e84d0a4fSWu Fengguang goto out; 264cf137307SJens Axboe } 265cf137307SJens Axboe 2665c03449dSShaohua Li /* Move inodes from one superblock together */ 2675c03449dSShaohua Li while (!list_empty(&tmp)) { 2687ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 2695c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2707ccf19a8SNick Piggin inode = wb_inode(pos); 2715c03449dSShaohua Li if (inode->i_sb == sb) 2727ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 2732c136579SFengguang Wu } 2742c136579SFengguang Wu } 275e84d0a4fSWu Fengguang out: 276e84d0a4fSWu Fengguang return moved; 2775c03449dSShaohua Li } 2782c136579SFengguang Wu 2792c136579SFengguang Wu /* 2802c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 2814ea879b9SWu Fengguang * Before 2824ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2834ea879b9SWu Fengguang * =============> gf edc BA 2844ea879b9SWu Fengguang * After 2854ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2864ea879b9SWu Fengguang * =============> g fBAedc 2874ea879b9SWu Fengguang * | 2884ea879b9SWu Fengguang * +--> dequeue for IO 2892c136579SFengguang Wu */ 290ad4e38ddSCurt Wohlgemuth static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 2912c136579SFengguang Wu { 292e84d0a4fSWu Fengguang int moved; 293f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 2944ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 295ad4e38ddSCurt Wohlgemuth moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); 296ad4e38ddSCurt Wohlgemuth trace_writeback_queue_io(wb, work, moved); 29766f3b8e2SJens Axboe } 29866f3b8e2SJens Axboe 299a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 30066f3b8e2SJens Axboe { 3019fb0a7daSTejun Heo int ret; 3029fb0a7daSTejun Heo 3039fb0a7daSTejun Heo if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { 3049fb0a7daSTejun Heo trace_writeback_write_inode_start(inode, wbc); 3059fb0a7daSTejun Heo ret = inode->i_sb->s_op->write_inode(inode, wbc); 3069fb0a7daSTejun Heo trace_writeback_write_inode(inode, wbc); 3079fb0a7daSTejun Heo return ret; 3089fb0a7daSTejun Heo } 30903ba3782SJens Axboe return 0; 31066f3b8e2SJens Axboe } 31108d8e974SFengguang Wu 3122c136579SFengguang Wu /* 313169ebd90SJan Kara * Wait for writeback on an inode to complete. Called with i_lock held. 314169ebd90SJan Kara * Caller must make sure inode cannot go away when we drop i_lock. 31501c03194SChristoph Hellwig */ 316169ebd90SJan Kara static void __inode_wait_for_writeback(struct inode *inode) 317169ebd90SJan Kara __releases(inode->i_lock) 318169ebd90SJan Kara __acquires(inode->i_lock) 31901c03194SChristoph Hellwig { 32001c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 32101c03194SChristoph Hellwig wait_queue_head_t *wqh; 32201c03194SChristoph Hellwig 32301c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 32458a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 325250df6edSDave Chinner spin_unlock(&inode->i_lock); 32601c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 327250df6edSDave Chinner spin_lock(&inode->i_lock); 32858a9d3d8SRichard Kennedy } 32901c03194SChristoph Hellwig } 33001c03194SChristoph Hellwig 33101c03194SChristoph Hellwig /* 332169ebd90SJan Kara * Wait for writeback on an inode to complete. Caller must have inode pinned. 333169ebd90SJan Kara */ 334169ebd90SJan Kara void inode_wait_for_writeback(struct inode *inode) 335169ebd90SJan Kara { 336169ebd90SJan Kara spin_lock(&inode->i_lock); 337169ebd90SJan Kara __inode_wait_for_writeback(inode); 338169ebd90SJan Kara spin_unlock(&inode->i_lock); 339169ebd90SJan Kara } 340169ebd90SJan Kara 341169ebd90SJan Kara /* 342169ebd90SJan Kara * Sleep until I_SYNC is cleared. This function must be called with i_lock 343169ebd90SJan Kara * held and drops it. It is aimed for callers not holding any inode reference 344169ebd90SJan Kara * so once i_lock is dropped, inode can go away. 345169ebd90SJan Kara */ 346169ebd90SJan Kara static void inode_sleep_on_writeback(struct inode *inode) 347169ebd90SJan Kara __releases(inode->i_lock) 348169ebd90SJan Kara { 349169ebd90SJan Kara DEFINE_WAIT(wait); 350169ebd90SJan Kara wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 351169ebd90SJan Kara int sleep; 352169ebd90SJan Kara 353169ebd90SJan Kara prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 354169ebd90SJan Kara sleep = inode->i_state & I_SYNC; 355169ebd90SJan Kara spin_unlock(&inode->i_lock); 356169ebd90SJan Kara if (sleep) 357169ebd90SJan Kara schedule(); 358169ebd90SJan Kara finish_wait(wqh, &wait); 359169ebd90SJan Kara } 360169ebd90SJan Kara 361169ebd90SJan Kara /* 362ccb26b5aSJan Kara * Find proper writeback list for the inode depending on its current state and 363ccb26b5aSJan Kara * possibly also change of its state while we were doing writeback. Here we 364ccb26b5aSJan Kara * handle things such as livelock prevention or fairness of writeback among 365ccb26b5aSJan Kara * inodes. This function can be called only by flusher thread - noone else 366ccb26b5aSJan Kara * processes all inodes in writeback lists and requeueing inodes behind flusher 367ccb26b5aSJan Kara * thread's back can have unexpected consequences. 368ccb26b5aSJan Kara */ 369ccb26b5aSJan Kara static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, 370ccb26b5aSJan Kara struct writeback_control *wbc) 371ccb26b5aSJan Kara { 372ccb26b5aSJan Kara if (inode->i_state & I_FREEING) 373ccb26b5aSJan Kara return; 374ccb26b5aSJan Kara 375ccb26b5aSJan Kara /* 376ccb26b5aSJan Kara * Sync livelock prevention. Each inode is tagged and synced in one 377ccb26b5aSJan Kara * shot. If still dirty, it will be redirty_tail()'ed below. Update 378ccb26b5aSJan Kara * the dirty time to prevent enqueue and sync it again. 379ccb26b5aSJan Kara */ 380ccb26b5aSJan Kara if ((inode->i_state & I_DIRTY) && 381ccb26b5aSJan Kara (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 382ccb26b5aSJan Kara inode->dirtied_when = jiffies; 383ccb26b5aSJan Kara 3844f8ad655SJan Kara if (wbc->pages_skipped) { 3854f8ad655SJan Kara /* 3864f8ad655SJan Kara * writeback is not making progress due to locked 3874f8ad655SJan Kara * buffers. Skip this inode for now. 3884f8ad655SJan Kara */ 3894f8ad655SJan Kara redirty_tail(inode, wb); 3904f8ad655SJan Kara return; 3914f8ad655SJan Kara } 3924f8ad655SJan Kara 393ccb26b5aSJan Kara if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 394ccb26b5aSJan Kara /* 395ccb26b5aSJan Kara * We didn't write back all the pages. nfs_writepages() 396ccb26b5aSJan Kara * sometimes bales out without doing anything. 397ccb26b5aSJan Kara */ 398ccb26b5aSJan Kara if (wbc->nr_to_write <= 0) { 399ccb26b5aSJan Kara /* Slice used up. Queue for next turn. */ 400ccb26b5aSJan Kara requeue_io(inode, wb); 401ccb26b5aSJan Kara } else { 402ccb26b5aSJan Kara /* 403ccb26b5aSJan Kara * Writeback blocked by something other than 404ccb26b5aSJan Kara * congestion. Delay the inode for some time to 405ccb26b5aSJan Kara * avoid spinning on the CPU (100% iowait) 406ccb26b5aSJan Kara * retrying writeback of the dirty page/inode 407ccb26b5aSJan Kara * that cannot be performed immediately. 408ccb26b5aSJan Kara */ 409ccb26b5aSJan Kara redirty_tail(inode, wb); 410ccb26b5aSJan Kara } 411ccb26b5aSJan Kara } else if (inode->i_state & I_DIRTY) { 412ccb26b5aSJan Kara /* 413ccb26b5aSJan Kara * Filesystems can dirty the inode during writeback operations, 414ccb26b5aSJan Kara * such as delayed allocation during submission or metadata 415ccb26b5aSJan Kara * updates after data IO completion. 416ccb26b5aSJan Kara */ 417ccb26b5aSJan Kara redirty_tail(inode, wb); 418ccb26b5aSJan Kara } else { 419ccb26b5aSJan Kara /* The inode is clean. Remove from writeback lists. */ 420ccb26b5aSJan Kara list_del_init(&inode->i_wb_list); 421ccb26b5aSJan Kara } 422ccb26b5aSJan Kara } 423ccb26b5aSJan Kara 424ccb26b5aSJan Kara /* 4254f8ad655SJan Kara * Write out an inode and its dirty pages. Do not update the writeback list 4264f8ad655SJan Kara * linkage. That is left to the caller. The caller is also responsible for 4274f8ad655SJan Kara * setting I_SYNC flag and calling inode_sync_complete() to clear it. 4281da177e4SLinus Torvalds */ 4291da177e4SLinus Torvalds static int 430cd8ed2a4SYan Hong __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 4311da177e4SLinus Torvalds { 4321da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 433251d6a47SWu Fengguang long nr_to_write = wbc->nr_to_write; 43401c03194SChristoph Hellwig unsigned dirty; 4351da177e4SLinus Torvalds int ret; 4361da177e4SLinus Torvalds 4374f8ad655SJan Kara WARN_ON(!(inode->i_state & I_SYNC)); 4381da177e4SLinus Torvalds 4399fb0a7daSTejun Heo trace_writeback_single_inode_start(inode, wbc, nr_to_write); 4409fb0a7daSTejun Heo 4411da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 4421da177e4SLinus Torvalds 44326821ed4SChristoph Hellwig /* 44426821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 44526821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 44626821ed4SChristoph Hellwig * I/O completion. 44726821ed4SChristoph Hellwig */ 448a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 44926821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 4501da177e4SLinus Torvalds if (ret == 0) 4511da177e4SLinus Torvalds ret = err; 4521da177e4SLinus Torvalds } 4531da177e4SLinus Torvalds 4545547e8aaSDmitry Monakhov /* 4555547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 4565547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 4575547e8aaSDmitry Monakhov * write_inode() 4585547e8aaSDmitry Monakhov */ 459250df6edSDave Chinner spin_lock(&inode->i_lock); 4606290be1cSJan Kara /* Clear I_DIRTY_PAGES if we've written out all dirty pages */ 4616290be1cSJan Kara if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 4626290be1cSJan Kara inode->i_state &= ~I_DIRTY_PAGES; 4635547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 4645547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 465250df6edSDave Chinner spin_unlock(&inode->i_lock); 46626821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 46726821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 468a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 4691da177e4SLinus Torvalds if (ret == 0) 4701da177e4SLinus Torvalds ret = err; 4711da177e4SLinus Torvalds } 4724f8ad655SJan Kara trace_writeback_single_inode(inode, wbc, nr_to_write); 4734f8ad655SJan Kara return ret; 4744f8ad655SJan Kara } 4754f8ad655SJan Kara 4764f8ad655SJan Kara /* 4774f8ad655SJan Kara * Write out an inode's dirty pages. Either the caller has an active reference 4784f8ad655SJan Kara * on the inode or the inode has I_WILL_FREE set. 4794f8ad655SJan Kara * 4804f8ad655SJan Kara * This function is designed to be called for writing back one inode which 4814f8ad655SJan Kara * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() 4824f8ad655SJan Kara * and does more profound writeback list handling in writeback_sb_inodes(). 4834f8ad655SJan Kara */ 4844f8ad655SJan Kara static int 4854f8ad655SJan Kara writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, 4864f8ad655SJan Kara struct writeback_control *wbc) 4874f8ad655SJan Kara { 4884f8ad655SJan Kara int ret = 0; 4894f8ad655SJan Kara 4904f8ad655SJan Kara spin_lock(&inode->i_lock); 4914f8ad655SJan Kara if (!atomic_read(&inode->i_count)) 4924f8ad655SJan Kara WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 4934f8ad655SJan Kara else 4944f8ad655SJan Kara WARN_ON(inode->i_state & I_WILL_FREE); 4954f8ad655SJan Kara 4964f8ad655SJan Kara if (inode->i_state & I_SYNC) { 4974f8ad655SJan Kara if (wbc->sync_mode != WB_SYNC_ALL) 4984f8ad655SJan Kara goto out; 4994f8ad655SJan Kara /* 500169ebd90SJan Kara * It's a data-integrity sync. We must wait. Since callers hold 501169ebd90SJan Kara * inode reference or inode has I_WILL_FREE set, it cannot go 502169ebd90SJan Kara * away under us. 5034f8ad655SJan Kara */ 504169ebd90SJan Kara __inode_wait_for_writeback(inode); 5054f8ad655SJan Kara } 5064f8ad655SJan Kara WARN_ON(inode->i_state & I_SYNC); 5074f8ad655SJan Kara /* 5084f8ad655SJan Kara * Skip inode if it is clean. We don't want to mess with writeback 5094f8ad655SJan Kara * lists in this function since flusher thread may be doing for example 5104f8ad655SJan Kara * sync in parallel and if we move the inode, it could get skipped. So 5114f8ad655SJan Kara * here we make sure inode is on some writeback list and leave it there 5124f8ad655SJan Kara * unless we have completely cleaned the inode. 5134f8ad655SJan Kara */ 5144f8ad655SJan Kara if (!(inode->i_state & I_DIRTY)) 5154f8ad655SJan Kara goto out; 5164f8ad655SJan Kara inode->i_state |= I_SYNC; 5174f8ad655SJan Kara spin_unlock(&inode->i_lock); 5184f8ad655SJan Kara 519cd8ed2a4SYan Hong ret = __writeback_single_inode(inode, wbc); 5201da177e4SLinus Torvalds 521f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 522250df6edSDave Chinner spin_lock(&inode->i_lock); 5234f8ad655SJan Kara /* 5244f8ad655SJan Kara * If inode is clean, remove it from writeback lists. Otherwise don't 5254f8ad655SJan Kara * touch it. See comment above for explanation. 5264f8ad655SJan Kara */ 5274f8ad655SJan Kara if (!(inode->i_state & I_DIRTY)) 5284f8ad655SJan Kara list_del_init(&inode->i_wb_list); 5294f8ad655SJan Kara spin_unlock(&wb->list_lock); 5301c0eeaf5SJoern Engel inode_sync_complete(inode); 5314f8ad655SJan Kara out: 5324f8ad655SJan Kara spin_unlock(&inode->i_lock); 5331da177e4SLinus Torvalds return ret; 5341da177e4SLinus Torvalds } 5351da177e4SLinus Torvalds 5361a12d8bdSWu Fengguang static long writeback_chunk_size(struct backing_dev_info *bdi, 5371a12d8bdSWu Fengguang struct wb_writeback_work *work) 538d46db3d5SWu Fengguang { 539d46db3d5SWu Fengguang long pages; 540d46db3d5SWu Fengguang 541d46db3d5SWu Fengguang /* 542d46db3d5SWu Fengguang * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 543d46db3d5SWu Fengguang * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 544d46db3d5SWu Fengguang * here avoids calling into writeback_inodes_wb() more than once. 545d46db3d5SWu Fengguang * 546d46db3d5SWu Fengguang * The intended call sequence for WB_SYNC_ALL writeback is: 547d46db3d5SWu Fengguang * 548d46db3d5SWu Fengguang * wb_writeback() 549d46db3d5SWu Fengguang * writeback_sb_inodes() <== called only once 550d46db3d5SWu Fengguang * write_cache_pages() <== called once for each inode 551d46db3d5SWu Fengguang * (quickly) tag currently dirty pages 552d46db3d5SWu Fengguang * (maybe slowly) sync all tagged pages 553d46db3d5SWu Fengguang */ 554d46db3d5SWu Fengguang if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 555d46db3d5SWu Fengguang pages = LONG_MAX; 5561a12d8bdSWu Fengguang else { 5571a12d8bdSWu Fengguang pages = min(bdi->avg_write_bandwidth / 2, 5581a12d8bdSWu Fengguang global_dirty_limit / DIRTY_SCOPE); 5591a12d8bdSWu Fengguang pages = min(pages, work->nr_pages); 5601a12d8bdSWu Fengguang pages = round_down(pages + MIN_WRITEBACK_PAGES, 5611a12d8bdSWu Fengguang MIN_WRITEBACK_PAGES); 5621a12d8bdSWu Fengguang } 563d46db3d5SWu Fengguang 564d46db3d5SWu Fengguang return pages; 565d46db3d5SWu Fengguang } 566d46db3d5SWu Fengguang 56703ba3782SJens Axboe /* 568f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 569edadfb10SChristoph Hellwig * 570d46db3d5SWu Fengguang * Return the number of pages and/or inodes written. 571f11c9c5cSEdward Shishkin */ 572d46db3d5SWu Fengguang static long writeback_sb_inodes(struct super_block *sb, 573d46db3d5SWu Fengguang struct bdi_writeback *wb, 574d46db3d5SWu Fengguang struct wb_writeback_work *work) 57503ba3782SJens Axboe { 576d46db3d5SWu Fengguang struct writeback_control wbc = { 577d46db3d5SWu Fengguang .sync_mode = work->sync_mode, 578d46db3d5SWu Fengguang .tagged_writepages = work->tagged_writepages, 579d46db3d5SWu Fengguang .for_kupdate = work->for_kupdate, 580d46db3d5SWu Fengguang .for_background = work->for_background, 581d46db3d5SWu Fengguang .range_cyclic = work->range_cyclic, 582d46db3d5SWu Fengguang .range_start = 0, 583d46db3d5SWu Fengguang .range_end = LLONG_MAX, 584d46db3d5SWu Fengguang }; 585d46db3d5SWu Fengguang unsigned long start_time = jiffies; 586d46db3d5SWu Fengguang long write_chunk; 587d46db3d5SWu Fengguang long wrote = 0; /* count both pages and inodes */ 588d46db3d5SWu Fengguang 58903ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 5907ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 591edadfb10SChristoph Hellwig 592edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 593d46db3d5SWu Fengguang if (work->sb) { 594edadfb10SChristoph Hellwig /* 595edadfb10SChristoph Hellwig * We only want to write back data for this 596edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 597edadfb10SChristoph Hellwig * to it back onto the dirty list. 598edadfb10SChristoph Hellwig */ 599f758eeabSChristoph Hellwig redirty_tail(inode, wb); 60066f3b8e2SJens Axboe continue; 60166f3b8e2SJens Axboe } 602edadfb10SChristoph Hellwig 603edadfb10SChristoph Hellwig /* 604edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 605edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 606edadfb10SChristoph Hellwig * pin the next superblock. 607edadfb10SChristoph Hellwig */ 608d46db3d5SWu Fengguang break; 609edadfb10SChristoph Hellwig } 610edadfb10SChristoph Hellwig 6119843b76aSChristoph Hellwig /* 612331cbdeeSWanpeng Li * Don't bother with new inodes or inodes being freed, first 613331cbdeeSWanpeng Li * kind does not need periodic writeout yet, and for the latter 6149843b76aSChristoph Hellwig * kind writeout is handled by the freer. 6159843b76aSChristoph Hellwig */ 616250df6edSDave Chinner spin_lock(&inode->i_lock); 6179843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 618250df6edSDave Chinner spin_unlock(&inode->i_lock); 619fcc5c222SWu Fengguang redirty_tail(inode, wb); 6207ef0d737SNick Piggin continue; 6217ef0d737SNick Piggin } 622cc1676d9SJan Kara if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 623cc1676d9SJan Kara /* 624cc1676d9SJan Kara * If this inode is locked for writeback and we are not 625cc1676d9SJan Kara * doing writeback-for-data-integrity, move it to 626cc1676d9SJan Kara * b_more_io so that writeback can proceed with the 627cc1676d9SJan Kara * other inodes on s_io. 628cc1676d9SJan Kara * 629cc1676d9SJan Kara * We'll have another go at writing back this inode 630cc1676d9SJan Kara * when we completed a full scan of b_io. 631cc1676d9SJan Kara */ 632cc1676d9SJan Kara spin_unlock(&inode->i_lock); 633cc1676d9SJan Kara requeue_io(inode, wb); 634cc1676d9SJan Kara trace_writeback_sb_inodes_requeue(inode); 635cc1676d9SJan Kara continue; 636cc1676d9SJan Kara } 637f0d07b7fSJan Kara spin_unlock(&wb->list_lock); 638f0d07b7fSJan Kara 6394f8ad655SJan Kara /* 6404f8ad655SJan Kara * We already requeued the inode if it had I_SYNC set and we 6414f8ad655SJan Kara * are doing WB_SYNC_NONE writeback. So this catches only the 6424f8ad655SJan Kara * WB_SYNC_ALL case. 6434f8ad655SJan Kara */ 644169ebd90SJan Kara if (inode->i_state & I_SYNC) { 645169ebd90SJan Kara /* Wait for I_SYNC. This function drops i_lock... */ 646169ebd90SJan Kara inode_sleep_on_writeback(inode); 647169ebd90SJan Kara /* Inode may be gone, start again */ 648ead188f9SJan Kara spin_lock(&wb->list_lock); 649169ebd90SJan Kara continue; 650169ebd90SJan Kara } 6514f8ad655SJan Kara inode->i_state |= I_SYNC; 6524f8ad655SJan Kara spin_unlock(&inode->i_lock); 653169ebd90SJan Kara 6541a12d8bdSWu Fengguang write_chunk = writeback_chunk_size(wb->bdi, work); 655d46db3d5SWu Fengguang wbc.nr_to_write = write_chunk; 656d46db3d5SWu Fengguang wbc.pages_skipped = 0; 657250df6edSDave Chinner 658169ebd90SJan Kara /* 659169ebd90SJan Kara * We use I_SYNC to pin the inode in memory. While it is set 660169ebd90SJan Kara * evict_inode() will wait so the inode cannot be freed. 661169ebd90SJan Kara */ 662cd8ed2a4SYan Hong __writeback_single_inode(inode, &wbc); 663d46db3d5SWu Fengguang 664d46db3d5SWu Fengguang work->nr_pages -= write_chunk - wbc.nr_to_write; 665d46db3d5SWu Fengguang wrote += write_chunk - wbc.nr_to_write; 6664f8ad655SJan Kara spin_lock(&wb->list_lock); 6674f8ad655SJan Kara spin_lock(&inode->i_lock); 668d46db3d5SWu Fengguang if (!(inode->i_state & I_DIRTY)) 669d46db3d5SWu Fengguang wrote++; 6704f8ad655SJan Kara requeue_inode(inode, wb, &wbc); 6714f8ad655SJan Kara inode_sync_complete(inode); 6720f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 673169ebd90SJan Kara cond_resched_lock(&wb->list_lock); 674d46db3d5SWu Fengguang /* 675d46db3d5SWu Fengguang * bail out to wb_writeback() often enough to check 676d46db3d5SWu Fengguang * background threshold and other termination conditions. 677d46db3d5SWu Fengguang */ 678d46db3d5SWu Fengguang if (wrote) { 679d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 680d46db3d5SWu Fengguang break; 681d46db3d5SWu Fengguang if (work->nr_pages <= 0) 682d46db3d5SWu Fengguang break; 6831da177e4SLinus Torvalds } 6848bc3be27SFengguang Wu } 685d46db3d5SWu Fengguang return wrote; 686f11c9c5cSEdward Shishkin } 68738f21977SNick Piggin 688d46db3d5SWu Fengguang static long __writeback_inodes_wb(struct bdi_writeback *wb, 689d46db3d5SWu Fengguang struct wb_writeback_work *work) 690f11c9c5cSEdward Shishkin { 691d46db3d5SWu Fengguang unsigned long start_time = jiffies; 692d46db3d5SWu Fengguang long wrote = 0; 693f11c9c5cSEdward Shishkin 694f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 6957ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 696f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 697f11c9c5cSEdward Shishkin 69812ad3ab6SDave Chinner if (!grab_super_passive(sb)) { 6990e995816SWu Fengguang /* 7000e995816SWu Fengguang * grab_super_passive() may fail consistently due to 7010e995816SWu Fengguang * s_umount being grabbed by someone else. Don't use 7020e995816SWu Fengguang * requeue_io() to avoid busy retrying the inode/sb. 7030e995816SWu Fengguang */ 7040e995816SWu Fengguang redirty_tail(inode, wb); 705d19de7edSChristoph Hellwig continue; 706334132aeSChristoph Hellwig } 707d46db3d5SWu Fengguang wrote += writeback_sb_inodes(sb, wb, work); 708d19de7edSChristoph Hellwig drop_super(sb); 709f11c9c5cSEdward Shishkin 710d46db3d5SWu Fengguang /* refer to the same tests at the end of writeback_sb_inodes */ 711d46db3d5SWu Fengguang if (wrote) { 712d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 713d46db3d5SWu Fengguang break; 714d46db3d5SWu Fengguang if (work->nr_pages <= 0) 715f11c9c5cSEdward Shishkin break; 716f11c9c5cSEdward Shishkin } 717d46db3d5SWu Fengguang } 71866f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 719d46db3d5SWu Fengguang return wrote; 72066f3b8e2SJens Axboe } 72166f3b8e2SJens Axboe 7220e175a18SCurt Wohlgemuth long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 7230e175a18SCurt Wohlgemuth enum wb_reason reason) 724edadfb10SChristoph Hellwig { 725d46db3d5SWu Fengguang struct wb_writeback_work work = { 726d46db3d5SWu Fengguang .nr_pages = nr_pages, 727d46db3d5SWu Fengguang .sync_mode = WB_SYNC_NONE, 728d46db3d5SWu Fengguang .range_cyclic = 1, 7290e175a18SCurt Wohlgemuth .reason = reason, 730d46db3d5SWu Fengguang }; 731edadfb10SChristoph Hellwig 732f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 733424b351fSWu Fengguang if (list_empty(&wb->b_io)) 734ad4e38ddSCurt Wohlgemuth queue_io(wb, &work); 735d46db3d5SWu Fengguang __writeback_inodes_wb(wb, &work); 736f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 737edadfb10SChristoph Hellwig 738d46db3d5SWu Fengguang return nr_pages - work.nr_pages; 73966f3b8e2SJens Axboe } 74066f3b8e2SJens Axboe 741b00949aaSWu Fengguang static bool over_bground_thresh(struct backing_dev_info *bdi) 74203ba3782SJens Axboe { 74303ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 74403ba3782SJens Axboe 74516c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 74603ba3782SJens Axboe 747b00949aaSWu Fengguang if (global_page_state(NR_FILE_DIRTY) + 748b00949aaSWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh) 749b00949aaSWu Fengguang return true; 750b00949aaSWu Fengguang 751b00949aaSWu Fengguang if (bdi_stat(bdi, BDI_RECLAIMABLE) > 752b00949aaSWu Fengguang bdi_dirty_limit(bdi, background_thresh)) 753b00949aaSWu Fengguang return true; 754b00949aaSWu Fengguang 755b00949aaSWu Fengguang return false; 75603ba3782SJens Axboe } 75703ba3782SJens Axboe 75803ba3782SJens Axboe /* 759e98be2d5SWu Fengguang * Called under wb->list_lock. If there are multiple wb per bdi, 760e98be2d5SWu Fengguang * only the flusher working on the first wb should do it. 761e98be2d5SWu Fengguang */ 762e98be2d5SWu Fengguang static void wb_update_bandwidth(struct bdi_writeback *wb, 763e98be2d5SWu Fengguang unsigned long start_time) 764e98be2d5SWu Fengguang { 765af6a3113SWu Fengguang __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); 766e98be2d5SWu Fengguang } 767e98be2d5SWu Fengguang 768e98be2d5SWu Fengguang /* 76903ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 77003ba3782SJens Axboe * 77103ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 77203ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 77303ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 77403ba3782SJens Axboe * older than a specific point in time. 77503ba3782SJens Axboe * 77603ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 77703ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 77803ba3782SJens Axboe * one-second gap. 77903ba3782SJens Axboe * 78003ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 78103ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 78203ba3782SJens Axboe */ 783c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 78483ba7b07SChristoph Hellwig struct wb_writeback_work *work) 78503ba3782SJens Axboe { 786e98be2d5SWu Fengguang unsigned long wb_start = jiffies; 787d46db3d5SWu Fengguang long nr_pages = work->nr_pages; 78803ba3782SJens Axboe unsigned long oldest_jif; 789a5989bdcSJan Kara struct inode *inode; 790d46db3d5SWu Fengguang long progress; 79103ba3782SJens Axboe 792e185dda8SWu Fengguang oldest_jif = jiffies; 793d46db3d5SWu Fengguang work->older_than_this = &oldest_jif; 79403ba3782SJens Axboe 795e8dfc305SWu Fengguang spin_lock(&wb->list_lock); 79603ba3782SJens Axboe for (;;) { 79703ba3782SJens Axboe /* 798d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 79903ba3782SJens Axboe */ 80083ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 80103ba3782SJens Axboe break; 80203ba3782SJens Axboe 80303ba3782SJens Axboe /* 804aa373cf5SJan Kara * Background writeout and kupdate-style writeback may 805aa373cf5SJan Kara * run forever. Stop them if there is other work to do 806aa373cf5SJan Kara * so that e.g. sync can proceed. They'll be restarted 807aa373cf5SJan Kara * after the other works are all done. 808aa373cf5SJan Kara */ 809aa373cf5SJan Kara if ((work->for_background || work->for_kupdate) && 810aa373cf5SJan Kara !list_empty(&wb->bdi->work_list)) 811aa373cf5SJan Kara break; 812aa373cf5SJan Kara 813aa373cf5SJan Kara /* 814d3ddec76SWu Fengguang * For background writeout, stop when we are below the 815d3ddec76SWu Fengguang * background dirty threshold 81603ba3782SJens Axboe */ 817b00949aaSWu Fengguang if (work->for_background && !over_bground_thresh(wb->bdi)) 81803ba3782SJens Axboe break; 81903ba3782SJens Axboe 8201bc36b64SJan Kara /* 8211bc36b64SJan Kara * Kupdate and background works are special and we want to 8221bc36b64SJan Kara * include all inodes that need writing. Livelock avoidance is 8231bc36b64SJan Kara * handled by these works yielding to any other work so we are 8241bc36b64SJan Kara * safe. 8251bc36b64SJan Kara */ 826ba9aa839SWu Fengguang if (work->for_kupdate) { 827ba9aa839SWu Fengguang oldest_jif = jiffies - 828ba9aa839SWu Fengguang msecs_to_jiffies(dirty_expire_interval * 10); 8291bc36b64SJan Kara } else if (work->for_background) 8301bc36b64SJan Kara oldest_jif = jiffies; 831028c2dd1SDave Chinner 832d46db3d5SWu Fengguang trace_writeback_start(wb->bdi, work); 833e8dfc305SWu Fengguang if (list_empty(&wb->b_io)) 834ad4e38ddSCurt Wohlgemuth queue_io(wb, work); 83583ba7b07SChristoph Hellwig if (work->sb) 836d46db3d5SWu Fengguang progress = writeback_sb_inodes(work->sb, wb, work); 837edadfb10SChristoph Hellwig else 838d46db3d5SWu Fengguang progress = __writeback_inodes_wb(wb, work); 839d46db3d5SWu Fengguang trace_writeback_written(wb->bdi, work); 840028c2dd1SDave Chinner 841e98be2d5SWu Fengguang wb_update_bandwidth(wb, wb_start); 84203ba3782SJens Axboe 84303ba3782SJens Axboe /* 84471fd05a8SJens Axboe * Did we write something? Try for more 845e6fb6da2SWu Fengguang * 846e6fb6da2SWu Fengguang * Dirty inodes are moved to b_io for writeback in batches. 847e6fb6da2SWu Fengguang * The completion of the current batch does not necessarily 848e6fb6da2SWu Fengguang * mean the overall work is done. So we keep looping as long 849e6fb6da2SWu Fengguang * as made some progress on cleaning pages or inodes. 85071fd05a8SJens Axboe */ 851d46db3d5SWu Fengguang if (progress) 85203ba3782SJens Axboe continue; 853a5989bdcSJan Kara /* 854e6fb6da2SWu Fengguang * No more inodes for IO, bail 855a5989bdcSJan Kara */ 856b7a2441fSWu Fengguang if (list_empty(&wb->b_more_io)) 85703ba3782SJens Axboe break; 85803ba3782SJens Axboe /* 8598010c3b6SJens Axboe * Nothing written. Wait for some inode to 8608010c3b6SJens Axboe * become available for writeback. Otherwise 8618010c3b6SJens Axboe * we'll just busyloop. 86203ba3782SJens Axboe */ 86303ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) { 864d46db3d5SWu Fengguang trace_writeback_wait(wb->bdi, work); 86503ba3782SJens Axboe inode = wb_inode(wb->b_more_io.prev); 866250df6edSDave Chinner spin_lock(&inode->i_lock); 867f0d07b7fSJan Kara spin_unlock(&wb->list_lock); 868169ebd90SJan Kara /* This function drops i_lock... */ 869169ebd90SJan Kara inode_sleep_on_writeback(inode); 870f0d07b7fSJan Kara spin_lock(&wb->list_lock); 87103ba3782SJens Axboe } 87203ba3782SJens Axboe } 873e8dfc305SWu Fengguang spin_unlock(&wb->list_lock); 87403ba3782SJens Axboe 875d46db3d5SWu Fengguang return nr_pages - work->nr_pages; 87603ba3782SJens Axboe } 87703ba3782SJens Axboe 87803ba3782SJens Axboe /* 87983ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 88003ba3782SJens Axboe */ 88183ba7b07SChristoph Hellwig static struct wb_writeback_work * 88208852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 88303ba3782SJens Axboe { 88483ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 88503ba3782SJens Axboe 8866467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 88783ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 88883ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 88983ba7b07SChristoph Hellwig struct wb_writeback_work, list); 89083ba7b07SChristoph Hellwig list_del_init(&work->list); 89103ba3782SJens Axboe } 8926467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 89383ba7b07SChristoph Hellwig return work; 89403ba3782SJens Axboe } 89503ba3782SJens Axboe 896cdf01dd5SLinus Torvalds /* 897cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 898cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 899cdf01dd5SLinus Torvalds */ 900cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 901cdf01dd5SLinus Torvalds { 902cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 903cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 904cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 905cdf01dd5SLinus Torvalds } 906cdf01dd5SLinus Torvalds 9076585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 9086585027aSJan Kara { 909b00949aaSWu Fengguang if (over_bground_thresh(wb->bdi)) { 9106585027aSJan Kara 9116585027aSJan Kara struct wb_writeback_work work = { 9126585027aSJan Kara .nr_pages = LONG_MAX, 9136585027aSJan Kara .sync_mode = WB_SYNC_NONE, 9146585027aSJan Kara .for_background = 1, 9156585027aSJan Kara .range_cyclic = 1, 9160e175a18SCurt Wohlgemuth .reason = WB_REASON_BACKGROUND, 9176585027aSJan Kara }; 9186585027aSJan Kara 9196585027aSJan Kara return wb_writeback(wb, &work); 9206585027aSJan Kara } 9216585027aSJan Kara 9226585027aSJan Kara return 0; 9236585027aSJan Kara } 9246585027aSJan Kara 92503ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 92603ba3782SJens Axboe { 92703ba3782SJens Axboe unsigned long expired; 92803ba3782SJens Axboe long nr_pages; 92903ba3782SJens Axboe 93069b62d01SJens Axboe /* 93169b62d01SJens Axboe * When set to zero, disable periodic writeback 93269b62d01SJens Axboe */ 93369b62d01SJens Axboe if (!dirty_writeback_interval) 93469b62d01SJens Axboe return 0; 93569b62d01SJens Axboe 93603ba3782SJens Axboe expired = wb->last_old_flush + 93703ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 93803ba3782SJens Axboe if (time_before(jiffies, expired)) 93903ba3782SJens Axboe return 0; 94003ba3782SJens Axboe 94103ba3782SJens Axboe wb->last_old_flush = jiffies; 942cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 94303ba3782SJens Axboe 944c4a77a6cSJens Axboe if (nr_pages) { 94583ba7b07SChristoph Hellwig struct wb_writeback_work work = { 946c4a77a6cSJens Axboe .nr_pages = nr_pages, 947c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 948c4a77a6cSJens Axboe .for_kupdate = 1, 949c4a77a6cSJens Axboe .range_cyclic = 1, 9500e175a18SCurt Wohlgemuth .reason = WB_REASON_PERIODIC, 951c4a77a6cSJens Axboe }; 952c4a77a6cSJens Axboe 95383ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 954c4a77a6cSJens Axboe } 95503ba3782SJens Axboe 95603ba3782SJens Axboe return 0; 95703ba3782SJens Axboe } 95803ba3782SJens Axboe 95903ba3782SJens Axboe /* 96003ba3782SJens Axboe * Retrieve work items and do the writeback they describe 96103ba3782SJens Axboe */ 96203ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 96303ba3782SJens Axboe { 96403ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 96583ba7b07SChristoph Hellwig struct wb_writeback_work *work; 966c4a77a6cSJens Axboe long wrote = 0; 96703ba3782SJens Axboe 96881d73a32SJan Kara set_bit(BDI_writeback_running, &wb->bdi->state); 96908852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 97003ba3782SJens Axboe /* 97103ba3782SJens Axboe * Override sync mode, in case we must wait for completion 97283ba7b07SChristoph Hellwig * because this thread is exiting now. 97303ba3782SJens Axboe */ 97403ba3782SJens Axboe if (force_wait) 97583ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 97683ba7b07SChristoph Hellwig 977455b2864SDave Chinner trace_writeback_exec(bdi, work); 978455b2864SDave Chinner 97983ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 98003ba3782SJens Axboe 98103ba3782SJens Axboe /* 98283ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 98383ba7b07SChristoph Hellwig * work item, otherwise just free it. 98403ba3782SJens Axboe */ 98583ba7b07SChristoph Hellwig if (work->done) 98683ba7b07SChristoph Hellwig complete(work->done); 98783ba7b07SChristoph Hellwig else 98883ba7b07SChristoph Hellwig kfree(work); 98903ba3782SJens Axboe } 99003ba3782SJens Axboe 99103ba3782SJens Axboe /* 99203ba3782SJens Axboe * Check for periodic writeback, kupdated() style 99303ba3782SJens Axboe */ 99403ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 9956585027aSJan Kara wrote += wb_check_background_flush(wb); 99681d73a32SJan Kara clear_bit(BDI_writeback_running, &wb->bdi->state); 99703ba3782SJens Axboe 99803ba3782SJens Axboe return wrote; 99903ba3782SJens Axboe } 100003ba3782SJens Axboe 100103ba3782SJens Axboe /* 100203ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 1003839a8e86STejun Heo * reschedules periodically and does kupdated style flushing. 100403ba3782SJens Axboe */ 1005839a8e86STejun Heo void bdi_writeback_workfn(struct work_struct *work) 100603ba3782SJens Axboe { 1007839a8e86STejun Heo struct bdi_writeback *wb = container_of(to_delayed_work(work), 1008839a8e86STejun Heo struct bdi_writeback, dwork); 100908243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 101003ba3782SJens Axboe long pages_written; 101103ba3782SJens Axboe 1012766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 101303ba3782SJens Axboe 1014839a8e86STejun Heo if (likely(!current_is_workqueue_rescuer() || 1015839a8e86STejun Heo list_empty(&bdi->bdi_list))) { 101603ba3782SJens Axboe /* 1017839a8e86STejun Heo * The normal path. Keep writing back @bdi until its 1018839a8e86STejun Heo * work_list is empty. Note that this path is also taken 1019839a8e86STejun Heo * if @bdi is shutting down even when we're running off the 1020839a8e86STejun Heo * rescuer as work_list needs to be drained. 102103ba3782SJens Axboe */ 1022839a8e86STejun Heo do { 102303ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 1024455b2864SDave Chinner trace_writeback_pages_written(pages_written); 1025839a8e86STejun Heo } while (!list_empty(&bdi->work_list)); 1026839a8e86STejun Heo } else { 1027253c34e9SArtem Bityutskiy /* 1028839a8e86STejun Heo * bdi_wq can't get enough workers and we're running off 1029839a8e86STejun Heo * the emergency worker. Don't hog it. Hopefully, 1024 is 1030839a8e86STejun Heo * enough for efficient IO. 1031253c34e9SArtem Bityutskiy */ 1032839a8e86STejun Heo pages_written = writeback_inodes_wb(&bdi->wb, 1024, 1033839a8e86STejun Heo WB_REASON_FORKER_THREAD); 1034839a8e86STejun Heo trace_writeback_pages_written(pages_written); 103503ba3782SJens Axboe } 103603ba3782SJens Axboe 1037839a8e86STejun Heo if (!list_empty(&bdi->work_list) || 1038839a8e86STejun Heo (wb_has_dirty_io(wb) && dirty_writeback_interval)) 1039839a8e86STejun Heo queue_delayed_work(bdi_wq, &wb->dwork, 1040839a8e86STejun Heo msecs_to_jiffies(dirty_writeback_interval * 10)); 1041455b2864SDave Chinner 1042839a8e86STejun Heo current->flags &= ~PF_SWAPWRITE; 104303ba3782SJens Axboe } 104403ba3782SJens Axboe 104503ba3782SJens Axboe /* 104603ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 104703ba3782SJens Axboe * the whole world. 104803ba3782SJens Axboe */ 10490e175a18SCurt Wohlgemuth void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) 105003ba3782SJens Axboe { 1051b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 1052b8c2f347SChristoph Hellwig 105383ba7b07SChristoph Hellwig if (!nr_pages) { 105483ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 105503ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 1056b8c2f347SChristoph Hellwig } 1057b8c2f347SChristoph Hellwig 1058b8c2f347SChristoph Hellwig rcu_read_lock(); 1059b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1060b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 1061b8c2f347SChristoph Hellwig continue; 10620e175a18SCurt Wohlgemuth __bdi_start_writeback(bdi, nr_pages, false, reason); 1063b8c2f347SChristoph Hellwig } 1064b8c2f347SChristoph Hellwig rcu_read_unlock(); 106503ba3782SJens Axboe } 106603ba3782SJens Axboe 106703ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 106803ba3782SJens Axboe { 106903ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 107003ba3782SJens Axboe struct dentry *dentry; 107103ba3782SJens Axboe const char *name = "?"; 107203ba3782SJens Axboe 107303ba3782SJens Axboe dentry = d_find_alias(inode); 107403ba3782SJens Axboe if (dentry) { 107503ba3782SJens Axboe spin_lock(&dentry->d_lock); 107603ba3782SJens Axboe name = (const char *) dentry->d_name.name; 107703ba3782SJens Axboe } 107803ba3782SJens Axboe printk(KERN_DEBUG 107903ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 108003ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 108103ba3782SJens Axboe name, inode->i_sb->s_id); 108203ba3782SJens Axboe if (dentry) { 108303ba3782SJens Axboe spin_unlock(&dentry->d_lock); 108403ba3782SJens Axboe dput(dentry); 108503ba3782SJens Axboe } 108603ba3782SJens Axboe } 108703ba3782SJens Axboe } 108803ba3782SJens Axboe 108903ba3782SJens Axboe /** 109003ba3782SJens Axboe * __mark_inode_dirty - internal function 109103ba3782SJens Axboe * @inode: inode to mark 109203ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 109303ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 109403ba3782SJens Axboe * mark_inode_dirty_sync. 109503ba3782SJens Axboe * 109603ba3782SJens Axboe * Put the inode on the super block's dirty list. 109703ba3782SJens Axboe * 109803ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 109903ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 110003ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 110103ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 110203ba3782SJens Axboe * 110303ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 110403ba3782SJens Axboe * them dirty. 110503ba3782SJens Axboe * 110603ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 110703ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 110803ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 110903ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 111003ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 111103ba3782SJens Axboe * blockdev inode. 111203ba3782SJens Axboe */ 111303ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 111403ba3782SJens Axboe { 111503ba3782SJens Axboe struct super_block *sb = inode->i_sb; 1116253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 111703ba3782SJens Axboe 111803ba3782SJens Axboe /* 111903ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 112003ba3782SJens Axboe * dirty the inode itself 112103ba3782SJens Axboe */ 112203ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 11239fb0a7daSTejun Heo trace_writeback_dirty_inode_start(inode, flags); 11249fb0a7daSTejun Heo 112503ba3782SJens Axboe if (sb->s_op->dirty_inode) 1126aa385729SChristoph Hellwig sb->s_op->dirty_inode(inode, flags); 11279fb0a7daSTejun Heo 11289fb0a7daSTejun Heo trace_writeback_dirty_inode(inode, flags); 112903ba3782SJens Axboe } 113003ba3782SJens Axboe 113103ba3782SJens Axboe /* 113203ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 113303ba3782SJens Axboe * -- mikulas 113403ba3782SJens Axboe */ 113503ba3782SJens Axboe smp_mb(); 113603ba3782SJens Axboe 113703ba3782SJens Axboe /* avoid the locking if we can */ 113803ba3782SJens Axboe if ((inode->i_state & flags) == flags) 113903ba3782SJens Axboe return; 114003ba3782SJens Axboe 114103ba3782SJens Axboe if (unlikely(block_dump)) 114203ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 114303ba3782SJens Axboe 1144250df6edSDave Chinner spin_lock(&inode->i_lock); 114503ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 114603ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 114703ba3782SJens Axboe 114803ba3782SJens Axboe inode->i_state |= flags; 114903ba3782SJens Axboe 115003ba3782SJens Axboe /* 115103ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 115203ba3782SJens Axboe * The unlocker will place the inode on the appropriate 115303ba3782SJens Axboe * superblock list, based upon its state. 115403ba3782SJens Axboe */ 115503ba3782SJens Axboe if (inode->i_state & I_SYNC) 1156250df6edSDave Chinner goto out_unlock_inode; 115703ba3782SJens Axboe 115803ba3782SJens Axboe /* 115903ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 116003ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 116103ba3782SJens Axboe */ 116203ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 11631d3382cbSAl Viro if (inode_unhashed(inode)) 1164250df6edSDave Chinner goto out_unlock_inode; 116503ba3782SJens Axboe } 1166a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 1167250df6edSDave Chinner goto out_unlock_inode; 116803ba3782SJens Axboe 116903ba3782SJens Axboe /* 117003ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 117103ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 117203ba3782SJens Axboe */ 117303ba3782SJens Axboe if (!was_dirty) { 1174a66979abSDave Chinner bool wakeup_bdi = false; 1175253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1176500b067cSJens Axboe 1177253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 1178253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 1179253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 1180253c34e9SArtem Bityutskiy 1181253c34e9SArtem Bityutskiy /* 1182253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 1183253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 1184253c34e9SArtem Bityutskiy * bdi thread to make sure background 1185253c34e9SArtem Bityutskiy * write-back happens later. 1186253c34e9SArtem Bityutskiy */ 1187253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 1188253c34e9SArtem Bityutskiy wakeup_bdi = true; 1189500b067cSJens Axboe } 119003ba3782SJens Axboe 1191a66979abSDave Chinner spin_unlock(&inode->i_lock); 1192f758eeabSChristoph Hellwig spin_lock(&bdi->wb.list_lock); 119303ba3782SJens Axboe inode->dirtied_when = jiffies; 11947ccf19a8SNick Piggin list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1195f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 1196253c34e9SArtem Bityutskiy 1197253c34e9SArtem Bityutskiy if (wakeup_bdi) 11986467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 1199a66979abSDave Chinner return; 1200a66979abSDave Chinner } 1201a66979abSDave Chinner } 1202a66979abSDave Chinner out_unlock_inode: 1203a66979abSDave Chinner spin_unlock(&inode->i_lock); 1204a66979abSDave Chinner 120503ba3782SJens Axboe } 120603ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 120703ba3782SJens Axboe 1208b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 120966f3b8e2SJens Axboe { 121038f21977SNick Piggin struct inode *inode, *old_inode = NULL; 121138f21977SNick Piggin 121203ba3782SJens Axboe /* 121303ba3782SJens Axboe * We need to be protected against the filesystem going from 121403ba3782SJens Axboe * r/o to r/w or vice versa. 121503ba3782SJens Axboe */ 1216b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 121703ba3782SJens Axboe 121855fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 121966f3b8e2SJens Axboe 122038f21977SNick Piggin /* 122138f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 122238f21977SNick Piggin * because there may have been pages dirtied before our sync 122338f21977SNick Piggin * call, but which had writeout started before we write it out. 122438f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 122538f21977SNick Piggin * we still have to wait for that writeout. 122638f21977SNick Piggin */ 1227b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1228250df6edSDave Chinner struct address_space *mapping = inode->i_mapping; 122938f21977SNick Piggin 1230250df6edSDave Chinner spin_lock(&inode->i_lock); 1231250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1232250df6edSDave Chinner (mapping->nrpages == 0)) { 1233250df6edSDave Chinner spin_unlock(&inode->i_lock); 123438f21977SNick Piggin continue; 1235250df6edSDave Chinner } 123638f21977SNick Piggin __iget(inode); 1237250df6edSDave Chinner spin_unlock(&inode->i_lock); 123855fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 123955fa6091SDave Chinner 124038f21977SNick Piggin /* 124155fa6091SDave Chinner * We hold a reference to 'inode' so it couldn't have been 124255fa6091SDave Chinner * removed from s_inodes list while we dropped the 124355fa6091SDave Chinner * inode_sb_list_lock. We cannot iput the inode now as we can 124455fa6091SDave Chinner * be holding the last reference and we cannot iput it under 124555fa6091SDave Chinner * inode_sb_list_lock. So we keep the reference and iput it 124655fa6091SDave Chinner * later. 124738f21977SNick Piggin */ 124838f21977SNick Piggin iput(old_inode); 124938f21977SNick Piggin old_inode = inode; 125038f21977SNick Piggin 125138f21977SNick Piggin filemap_fdatawait(mapping); 125238f21977SNick Piggin 125338f21977SNick Piggin cond_resched(); 125438f21977SNick Piggin 125555fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 125638f21977SNick Piggin } 125755fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 125838f21977SNick Piggin iput(old_inode); 125966f3b8e2SJens Axboe } 12601da177e4SLinus Torvalds 1261d8a8559cSJens Axboe /** 12623259f8beSChris Mason * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1263d8a8559cSJens Axboe * @sb: the superblock 12643259f8beSChris Mason * @nr: the number of pages to write 1265786228abSMarcos Paulo de Souza * @reason: reason why some writeback work initiated 12661da177e4SLinus Torvalds * 1267d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1268d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 12693259f8beSChris Mason * for IO completion of submitted IO. 12701da177e4SLinus Torvalds */ 12710e175a18SCurt Wohlgemuth void writeback_inodes_sb_nr(struct super_block *sb, 12720e175a18SCurt Wohlgemuth unsigned long nr, 12730e175a18SCurt Wohlgemuth enum wb_reason reason) 12741da177e4SLinus Torvalds { 127583ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 127683ba7b07SChristoph Hellwig struct wb_writeback_work work = { 12773c4d7165SChristoph Hellwig .sb = sb, 12783c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 12796e6938b6SWu Fengguang .tagged_writepages = 1, 128083ba7b07SChristoph Hellwig .done = &done, 12813259f8beSChris Mason .nr_pages = nr, 12820e175a18SCurt Wohlgemuth .reason = reason, 12833c4d7165SChristoph Hellwig }; 12840e3c9a22SJens Axboe 12856eedc701SJan Kara if (sb->s_bdi == &noop_backing_dev_info) 12866eedc701SJan Kara return; 1287cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 128883ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 128983ba7b07SChristoph Hellwig wait_for_completion(&done); 12901da177e4SLinus Torvalds } 12913259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 12923259f8beSChris Mason 12933259f8beSChris Mason /** 12943259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 12953259f8beSChris Mason * @sb: the superblock 1296786228abSMarcos Paulo de Souza * @reason: reason why some writeback work was initiated 12973259f8beSChris Mason * 12983259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 12993259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 13003259f8beSChris Mason * for IO completion of submitted IO. 13013259f8beSChris Mason */ 13020e175a18SCurt Wohlgemuth void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 13033259f8beSChris Mason { 13040e175a18SCurt Wohlgemuth return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 13053259f8beSChris Mason } 1306d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1307d8a8559cSJens Axboe 1308d8a8559cSJens Axboe /** 130910ee27a0SMiao Xie * try_to_writeback_inodes_sb_nr - try to start writeback if none underway 13103259f8beSChris Mason * @sb: the superblock 13113259f8beSChris Mason * @nr: the number of pages to write 131210ee27a0SMiao Xie * @reason: the reason of writeback 13133259f8beSChris Mason * 131410ee27a0SMiao Xie * Invoke writeback_inodes_sb_nr if no writeback is currently underway. 13153259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 13163259f8beSChris Mason */ 131710ee27a0SMiao Xie int try_to_writeback_inodes_sb_nr(struct super_block *sb, 13180e175a18SCurt Wohlgemuth unsigned long nr, 13190e175a18SCurt Wohlgemuth enum wb_reason reason) 13203259f8beSChris Mason { 132110ee27a0SMiao Xie if (writeback_in_progress(sb->s_bdi)) 132210ee27a0SMiao Xie return 1; 132310ee27a0SMiao Xie 132410ee27a0SMiao Xie if (!down_read_trylock(&sb->s_umount)) 132510ee27a0SMiao Xie return 0; 132610ee27a0SMiao Xie 13270e175a18SCurt Wohlgemuth writeback_inodes_sb_nr(sb, nr, reason); 13283259f8beSChris Mason up_read(&sb->s_umount); 13293259f8beSChris Mason return 1; 13303259f8beSChris Mason } 133110ee27a0SMiao Xie EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); 133210ee27a0SMiao Xie 133310ee27a0SMiao Xie /** 133410ee27a0SMiao Xie * try_to_writeback_inodes_sb - try to start writeback if none underway 133510ee27a0SMiao Xie * @sb: the superblock 133610ee27a0SMiao Xie * @reason: reason why some writeback work was initiated 133710ee27a0SMiao Xie * 133810ee27a0SMiao Xie * Implement by try_to_writeback_inodes_sb_nr() 133910ee27a0SMiao Xie * Returns 1 if writeback was started, 0 if not. 134010ee27a0SMiao Xie */ 134110ee27a0SMiao Xie int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 134210ee27a0SMiao Xie { 134310ee27a0SMiao Xie return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 134410ee27a0SMiao Xie } 134510ee27a0SMiao Xie EXPORT_SYMBOL(try_to_writeback_inodes_sb); 13463259f8beSChris Mason 13473259f8beSChris Mason /** 1348d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1349d8a8559cSJens Axboe * @sb: the superblock 1350d8a8559cSJens Axboe * 1351d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1352cb9ef8d5SStefan Hajnoczi * super_block. 1353d8a8559cSJens Axboe */ 1354b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1355d8a8559cSJens Axboe { 135683ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 135783ba7b07SChristoph Hellwig struct wb_writeback_work work = { 13583c4d7165SChristoph Hellwig .sb = sb, 13593c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 13603c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 13613c4d7165SChristoph Hellwig .range_cyclic = 0, 136283ba7b07SChristoph Hellwig .done = &done, 13630e175a18SCurt Wohlgemuth .reason = WB_REASON_SYNC, 13643c4d7165SChristoph Hellwig }; 13653c4d7165SChristoph Hellwig 13666eedc701SJan Kara /* Nothing to do? */ 13676eedc701SJan Kara if (sb->s_bdi == &noop_backing_dev_info) 13686eedc701SJan Kara return; 1369cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1370cf37e972SChristoph Hellwig 137183ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 137283ba7b07SChristoph Hellwig wait_for_completion(&done); 137383ba7b07SChristoph Hellwig 1374b6e51316SJens Axboe wait_sb_inodes(sb); 1375d8a8559cSJens Axboe } 1376d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 13771da177e4SLinus Torvalds 13781da177e4SLinus Torvalds /** 13791da177e4SLinus Torvalds * write_inode_now - write an inode to disk 13801da177e4SLinus Torvalds * @inode: inode to write to disk 13811da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 13821da177e4SLinus Torvalds * 13837f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 13847f04c26dSAndrea Arcangeli * primarily needed by knfsd. 13857f04c26dSAndrea Arcangeli * 13867f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 13871da177e4SLinus Torvalds */ 13881da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 13891da177e4SLinus Torvalds { 1390f758eeabSChristoph Hellwig struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 13911da177e4SLinus Torvalds struct writeback_control wbc = { 13921da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 139318914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1394111ebb6eSOGAWA Hirofumi .range_start = 0, 1395111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 13961da177e4SLinus Torvalds }; 13971da177e4SLinus Torvalds 13981da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 139949364ce2SAndrew Morton wbc.nr_to_write = 0; 14001da177e4SLinus Torvalds 14011da177e4SLinus Torvalds might_sleep(); 14024f8ad655SJan Kara return writeback_single_inode(inode, wb, &wbc); 14031da177e4SLinus Torvalds } 14041da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 14051da177e4SLinus Torvalds 14061da177e4SLinus Torvalds /** 14071da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 14081da177e4SLinus Torvalds * @inode: the inode to sync 14091da177e4SLinus Torvalds * @wbc: controls the writeback mode 14101da177e4SLinus Torvalds * 14111da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 14121da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 14131da177e4SLinus Torvalds * update inode->i_state. 14141da177e4SLinus Torvalds * 14151da177e4SLinus Torvalds * The caller must have a ref on the inode. 14161da177e4SLinus Torvalds */ 14171da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 14181da177e4SLinus Torvalds { 14194f8ad655SJan Kara return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc); 14201da177e4SLinus Torvalds } 14211da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1422c3765016SChristoph Hellwig 1423c3765016SChristoph Hellwig /** 1424c691b9d9SAndrew Morton * sync_inode_metadata - write an inode to disk 1425c3765016SChristoph Hellwig * @inode: the inode to sync 1426c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1427c3765016SChristoph Hellwig * 1428c691b9d9SAndrew Morton * Write an inode to disk and adjust its dirty state after completion. 1429c3765016SChristoph Hellwig * 1430c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1431c3765016SChristoph Hellwig */ 1432c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1433c3765016SChristoph Hellwig { 1434c3765016SChristoph Hellwig struct writeback_control wbc = { 1435c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1436c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1437c3765016SChristoph Hellwig }; 1438c3765016SChristoph Hellwig 1439c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1440c3765016SChristoph Hellwig } 1441c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1442