11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 29455b2864SDave Chinner #include <linux/tracepoint.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 34c4a77a6cSJens Axboe */ 3583ba7b07SChristoph Hellwig struct wb_writeback_work { 36c4a77a6cSJens Axboe long nr_pages; 37c4a77a6cSJens Axboe struct super_block *sb; 38d46db3d5SWu Fengguang unsigned long *older_than_this; 39c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 406e6938b6SWu Fengguang unsigned int tagged_writepages:1; 4152957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4252957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4352957fe1SH Hartley Sweeten unsigned int for_background:1; 440e175a18SCurt Wohlgemuth enum wb_reason reason; /* why was writeback initiated? */ 45c4a77a6cSJens Axboe 468010c3b6SJens Axboe struct list_head list; /* pending work list */ 4783ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 4803ba3782SJens Axboe }; 4903ba3782SJens Axboe 500e175a18SCurt Wohlgemuth const char *wb_reason_name[] = { 510e175a18SCurt Wohlgemuth [WB_REASON_BACKGROUND] = "background", 520e175a18SCurt Wohlgemuth [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages", 530e175a18SCurt Wohlgemuth [WB_REASON_SYNC] = "sync", 540e175a18SCurt Wohlgemuth [WB_REASON_PERIODIC] = "periodic", 550e175a18SCurt Wohlgemuth [WB_REASON_LAPTOP_TIMER] = "laptop_timer", 560e175a18SCurt Wohlgemuth [WB_REASON_FREE_MORE_MEM] = "free_more_memory", 570e175a18SCurt Wohlgemuth [WB_REASON_FS_FREE_SPACE] = "fs_free_space", 580e175a18SCurt Wohlgemuth [WB_REASON_FORKER_THREAD] = "forker_thread" 590e175a18SCurt Wohlgemuth }; 600e175a18SCurt Wohlgemuth 61455b2864SDave Chinner /* 62455b2864SDave Chinner * Include the creation of the trace points after defining the 63455b2864SDave Chinner * wb_writeback_work structure so that the definition remains local to this 64455b2864SDave Chinner * file. 65455b2864SDave Chinner */ 66455b2864SDave Chinner #define CREATE_TRACE_POINTS 67455b2864SDave Chinner #include <trace/events/writeback.h> 68455b2864SDave Chinner 69455b2864SDave Chinner /* 70455b2864SDave Chinner * We don't actually have pdflush, but this one is exported though /proc... 71455b2864SDave Chinner */ 72455b2864SDave Chinner int nr_pdflush_threads; 73455b2864SDave Chinner 74f11b00f3SAdrian Bunk /** 75f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 76f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 77f11b00f3SAdrian Bunk * 7803ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 7903ba3782SJens Axboe * backing device. 80f11b00f3SAdrian Bunk */ 81f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 82f11b00f3SAdrian Bunk { 8381d73a32SJan Kara return test_bit(BDI_writeback_running, &bdi->state); 84f11b00f3SAdrian Bunk } 85f11b00f3SAdrian Bunk 86692ebd17SJan Kara static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 87692ebd17SJan Kara { 88692ebd17SJan Kara struct super_block *sb = inode->i_sb; 89692ebd17SJan Kara 90aaead25bSChristoph Hellwig if (strcmp(sb->s_type->name, "bdev") == 0) 91aaead25bSChristoph Hellwig return inode->i_mapping->backing_dev_info; 92aaead25bSChristoph Hellwig 93692ebd17SJan Kara return sb->s_bdi; 94692ebd17SJan Kara } 95692ebd17SJan Kara 967ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 977ccf19a8SNick Piggin { 987ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 997ccf19a8SNick Piggin } 1007ccf19a8SNick Piggin 1016585027aSJan Kara /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ 1026585027aSJan Kara static void bdi_wakeup_flusher(struct backing_dev_info *bdi) 1034195f73dSNick Piggin { 104fff5b85aSArtem Bityutskiy if (bdi->wb.task) { 105fff5b85aSArtem Bityutskiy wake_up_process(bdi->wb.task); 106fff5b85aSArtem Bityutskiy } else { 1071da177e4SLinus Torvalds /* 108fff5b85aSArtem Bityutskiy * The bdi thread isn't there, wake up the forker thread which 109fff5b85aSArtem Bityutskiy * will create and run it. 1101da177e4SLinus Torvalds */ 11103ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 1121da177e4SLinus Torvalds } 1136585027aSJan Kara } 1146585027aSJan Kara 1156585027aSJan Kara static void bdi_queue_work(struct backing_dev_info *bdi, 1166585027aSJan Kara struct wb_writeback_work *work) 1176585027aSJan Kara { 1186585027aSJan Kara trace_writeback_queue(bdi, work); 1196585027aSJan Kara 1206585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1216585027aSJan Kara list_add_tail(&work->list, &bdi->work_list); 1226585027aSJan Kara if (!bdi->wb.task) 1236585027aSJan Kara trace_writeback_nothread(bdi, work); 1246585027aSJan Kara bdi_wakeup_flusher(bdi); 1256467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 12603ba3782SJens Axboe } 1271da177e4SLinus Torvalds 12883ba7b07SChristoph Hellwig static void 12983ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1300e175a18SCurt Wohlgemuth bool range_cyclic, enum wb_reason reason) 1311da177e4SLinus Torvalds { 13283ba7b07SChristoph Hellwig struct wb_writeback_work *work; 13303ba3782SJens Axboe 134bcddc3f0SJens Axboe /* 135bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 136bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 137bcddc3f0SJens Axboe */ 13883ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 13983ba7b07SChristoph Hellwig if (!work) { 140455b2864SDave Chinner if (bdi->wb.task) { 141455b2864SDave Chinner trace_writeback_nowork(bdi); 14283ba7b07SChristoph Hellwig wake_up_process(bdi->wb.task); 143455b2864SDave Chinner } 14483ba7b07SChristoph Hellwig return; 14583ba7b07SChristoph Hellwig } 14683ba7b07SChristoph Hellwig 14783ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 14883ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 14983ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 1500e175a18SCurt Wohlgemuth work->reason = reason; 15183ba7b07SChristoph Hellwig 152f11fcae8SJens Axboe bdi_queue_work(bdi, work); 15303ba3782SJens Axboe } 154b6e51316SJens Axboe 155b6e51316SJens Axboe /** 156b6e51316SJens Axboe * bdi_start_writeback - start writeback 157b6e51316SJens Axboe * @bdi: the backing device to write from 158b6e51316SJens Axboe * @nr_pages: the number of pages to write 159b6e51316SJens Axboe * 160b6e51316SJens Axboe * Description: 161b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 16225985edcSLucas De Marchi * started when this function returns, we make no guarantees on 1630e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 164b6e51316SJens Axboe * 165b6e51316SJens Axboe */ 1660e175a18SCurt Wohlgemuth void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1670e175a18SCurt Wohlgemuth enum wb_reason reason) 168b6e51316SJens Axboe { 1690e175a18SCurt Wohlgemuth __bdi_start_writeback(bdi, nr_pages, true, reason); 170d3ddec76SWu Fengguang } 171d3ddec76SWu Fengguang 172c5444198SChristoph Hellwig /** 173c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 174c5444198SChristoph Hellwig * @bdi: the backing device to write from 175c5444198SChristoph Hellwig * 176c5444198SChristoph Hellwig * Description: 1776585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 1786585027aSJan Kara * this function returns, it is only guaranteed that for given BDI 1796585027aSJan Kara * some IO is happening if we are over background dirty threshold. 1806585027aSJan Kara * Caller need not hold sb s_umount semaphore. 181c5444198SChristoph Hellwig */ 182c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 183c5444198SChristoph Hellwig { 1846585027aSJan Kara /* 1856585027aSJan Kara * We just wake up the flusher thread. It will perform background 1866585027aSJan Kara * writeback as soon as there is no other work to do. 1876585027aSJan Kara */ 18871927e84SWu Fengguang trace_writeback_wake_background(bdi); 1896585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1906585027aSJan Kara bdi_wakeup_flusher(bdi); 1916585027aSJan Kara spin_unlock_bh(&bdi->wb_lock); 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds /* 195a66979abSDave Chinner * Remove the inode from the writeback list it is on. 196a66979abSDave Chinner */ 197a66979abSDave Chinner void inode_wb_list_del(struct inode *inode) 198a66979abSDave Chinner { 199f758eeabSChristoph Hellwig struct backing_dev_info *bdi = inode_to_bdi(inode); 200a66979abSDave Chinner 201f758eeabSChristoph Hellwig spin_lock(&bdi->wb.list_lock); 202f758eeabSChristoph Hellwig list_del_init(&inode->i_wb_list); 203f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 204f758eeabSChristoph Hellwig } 205a66979abSDave Chinner 206a66979abSDave Chinner /* 2076610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 2086610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 2096610a0bcSAndrew Morton * 2106610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 21166f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 2126610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 2136610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 2146610a0bcSAndrew Morton */ 215f758eeabSChristoph Hellwig static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 2166610a0bcSAndrew Morton { 217f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 21803ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 21966f3b8e2SJens Axboe struct inode *tail; 2206610a0bcSAndrew Morton 2217ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 22266f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2236610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2246610a0bcSAndrew Morton } 2257ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_dirty); 2266610a0bcSAndrew Morton } 2276610a0bcSAndrew Morton 2286610a0bcSAndrew Morton /* 22966f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 230c986d1e2SAndrew Morton */ 231f758eeabSChristoph Hellwig static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 232c986d1e2SAndrew Morton { 233f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 2347ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_more_io); 235c986d1e2SAndrew Morton } 236c986d1e2SAndrew Morton 2371c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2381c0eeaf5SJoern Engel { 2391c0eeaf5SJoern Engel /* 240a66979abSDave Chinner * Prevent speculative execution through 241f758eeabSChristoph Hellwig * spin_unlock(&wb->list_lock); 2421c0eeaf5SJoern Engel */ 243a66979abSDave Chinner 2441c0eeaf5SJoern Engel smp_mb(); 2451c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2461c0eeaf5SJoern Engel } 2471c0eeaf5SJoern Engel 248d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 249d2caa3c5SJeff Layton { 250d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 251d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 252d2caa3c5SJeff Layton /* 253d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 254d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 255d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2565b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 257d2caa3c5SJeff Layton */ 258d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 259d2caa3c5SJeff Layton #endif 260d2caa3c5SJeff Layton return ret; 261d2caa3c5SJeff Layton } 262d2caa3c5SJeff Layton 263c986d1e2SAndrew Morton /* 2642c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 2652c136579SFengguang Wu */ 266e84d0a4fSWu Fengguang static int move_expired_inodes(struct list_head *delaying_queue, 2672c136579SFengguang Wu struct list_head *dispatch_queue, 268ad4e38ddSCurt Wohlgemuth struct wb_writeback_work *work) 2692c136579SFengguang Wu { 2705c03449dSShaohua Li LIST_HEAD(tmp); 2715c03449dSShaohua Li struct list_head *pos, *node; 272cf137307SJens Axboe struct super_block *sb = NULL; 2735c03449dSShaohua Li struct inode *inode; 274cf137307SJens Axboe int do_sb_sort = 0; 275e84d0a4fSWu Fengguang int moved = 0; 2765c03449dSShaohua Li 2772c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2787ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 279ad4e38ddSCurt Wohlgemuth if (work->older_than_this && 280ad4e38ddSCurt Wohlgemuth inode_dirtied_after(inode, *work->older_than_this)) 2812c136579SFengguang Wu break; 282cf137307SJens Axboe if (sb && sb != inode->i_sb) 283cf137307SJens Axboe do_sb_sort = 1; 284cf137307SJens Axboe sb = inode->i_sb; 2857ccf19a8SNick Piggin list_move(&inode->i_wb_list, &tmp); 286e84d0a4fSWu Fengguang moved++; 2875c03449dSShaohua Li } 2885c03449dSShaohua Li 289cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 290cf137307SJens Axboe if (!do_sb_sort) { 291cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 292e84d0a4fSWu Fengguang goto out; 293cf137307SJens Axboe } 294cf137307SJens Axboe 2955c03449dSShaohua Li /* Move inodes from one superblock together */ 2965c03449dSShaohua Li while (!list_empty(&tmp)) { 2977ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 2985c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2997ccf19a8SNick Piggin inode = wb_inode(pos); 3005c03449dSShaohua Li if (inode->i_sb == sb) 3017ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 3022c136579SFengguang Wu } 3032c136579SFengguang Wu } 304e84d0a4fSWu Fengguang out: 305e84d0a4fSWu Fengguang return moved; 3065c03449dSShaohua Li } 3072c136579SFengguang Wu 3082c136579SFengguang Wu /* 3092c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 3104ea879b9SWu Fengguang * Before 3114ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 3124ea879b9SWu Fengguang * =============> gf edc BA 3134ea879b9SWu Fengguang * After 3144ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 3154ea879b9SWu Fengguang * =============> g fBAedc 3164ea879b9SWu Fengguang * | 3174ea879b9SWu Fengguang * +--> dequeue for IO 3182c136579SFengguang Wu */ 319ad4e38ddSCurt Wohlgemuth static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 3202c136579SFengguang Wu { 321e84d0a4fSWu Fengguang int moved; 322f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 3234ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 324ad4e38ddSCurt Wohlgemuth moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); 325ad4e38ddSCurt Wohlgemuth trace_writeback_queue_io(wb, work, moved); 32666f3b8e2SJens Axboe } 32766f3b8e2SJens Axboe 328a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 32966f3b8e2SJens Axboe { 33003ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 331a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 33203ba3782SJens Axboe return 0; 33366f3b8e2SJens Axboe } 33408d8e974SFengguang Wu 3352c136579SFengguang Wu /* 33601c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 33701c03194SChristoph Hellwig */ 338f758eeabSChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode, 339f758eeabSChristoph Hellwig struct bdi_writeback *wb) 34001c03194SChristoph Hellwig { 34101c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 34201c03194SChristoph Hellwig wait_queue_head_t *wqh; 34301c03194SChristoph Hellwig 34401c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 34558a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 346250df6edSDave Chinner spin_unlock(&inode->i_lock); 347f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 34801c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 349f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 350250df6edSDave Chinner spin_lock(&inode->i_lock); 35158a9d3d8SRichard Kennedy } 35201c03194SChristoph Hellwig } 35301c03194SChristoph Hellwig 35401c03194SChristoph Hellwig /* 355f758eeabSChristoph Hellwig * Write out an inode's dirty pages. Called under wb->list_lock and 3560f1b1fd8SDave Chinner * inode->i_lock. Either the caller has an active reference on the inode or 3570f1b1fd8SDave Chinner * the inode has I_WILL_FREE set. 35801c03194SChristoph Hellwig * 3591da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 3601da177e4SLinus Torvalds * 3611da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 3621da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 3631da177e4SLinus Torvalds * livelocks, etc. 3641da177e4SLinus Torvalds */ 3651da177e4SLinus Torvalds static int 366f758eeabSChristoph Hellwig writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, 367f758eeabSChristoph Hellwig struct writeback_control *wbc) 3681da177e4SLinus Torvalds { 3691da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 370251d6a47SWu Fengguang long nr_to_write = wbc->nr_to_write; 37101c03194SChristoph Hellwig unsigned dirty; 3721da177e4SLinus Torvalds int ret; 3731da177e4SLinus Torvalds 374f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 3750f1b1fd8SDave Chinner assert_spin_locked(&inode->i_lock); 3760f1b1fd8SDave Chinner 37701c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 37801c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 37901c03194SChristoph Hellwig else 38001c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 38101c03194SChristoph Hellwig 38201c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 38301c03194SChristoph Hellwig /* 38401c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 38566f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 38601c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 38701c03194SChristoph Hellwig * 38801c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 38966f3b8e2SJens Axboe * completed a full scan of b_io. 39001c03194SChristoph Hellwig */ 391a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 392f758eeabSChristoph Hellwig requeue_io(inode, wb); 393251d6a47SWu Fengguang trace_writeback_single_inode_requeue(inode, wbc, 394251d6a47SWu Fengguang nr_to_write); 39501c03194SChristoph Hellwig return 0; 39601c03194SChristoph Hellwig } 39701c03194SChristoph Hellwig 39801c03194SChristoph Hellwig /* 39901c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 40001c03194SChristoph Hellwig */ 401f758eeabSChristoph Hellwig inode_wait_for_writeback(inode, wb); 40201c03194SChristoph Hellwig } 40301c03194SChristoph Hellwig 4041c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 4051da177e4SLinus Torvalds 4065547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 4071c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 4085547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 409250df6edSDave Chinner spin_unlock(&inode->i_lock); 410f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 4111da177e4SLinus Torvalds 4121da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 4131da177e4SLinus Torvalds 41426821ed4SChristoph Hellwig /* 41526821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 41626821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 41726821ed4SChristoph Hellwig * I/O completion. 41826821ed4SChristoph Hellwig */ 419a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 42026821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 4211da177e4SLinus Torvalds if (ret == 0) 4221da177e4SLinus Torvalds ret = err; 4231da177e4SLinus Torvalds } 4241da177e4SLinus Torvalds 4255547e8aaSDmitry Monakhov /* 4265547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 4275547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 4285547e8aaSDmitry Monakhov * write_inode() 4295547e8aaSDmitry Monakhov */ 430250df6edSDave Chinner spin_lock(&inode->i_lock); 4315547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 4325547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 433250df6edSDave Chinner spin_unlock(&inode->i_lock); 43426821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 43526821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 436a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 4371da177e4SLinus Torvalds if (ret == 0) 4381da177e4SLinus Torvalds ret = err; 4391da177e4SLinus Torvalds } 4401da177e4SLinus Torvalds 441f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 442250df6edSDave Chinner spin_lock(&inode->i_lock); 4431c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 444a4ffdde6SAl Viro if (!(inode->i_state & I_FREEING)) { 44594c3dcbbSWu Fengguang /* 44694c3dcbbSWu Fengguang * Sync livelock prevention. Each inode is tagged and synced in 44794c3dcbbSWu Fengguang * one shot. If still dirty, it will be redirty_tail()'ed below. 44894c3dcbbSWu Fengguang * Update the dirty time to prevent enqueue and sync it again. 44994c3dcbbSWu Fengguang */ 45094c3dcbbSWu Fengguang if ((inode->i_state & I_DIRTY) && 45194c3dcbbSWu Fengguang (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 45294c3dcbbSWu Fengguang inode->dirtied_when = jiffies; 45394c3dcbbSWu Fengguang 45423539afcSWu Fengguang if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4551da177e4SLinus Torvalds /* 4561da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 457a50aeb40SWu Fengguang * sometimes bales out without doing anything. 4581da177e4SLinus Torvalds */ 4591da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4608bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 4618bc3be27SFengguang Wu /* 4628bc3be27SFengguang Wu * slice used up: queue for next turn 4638bc3be27SFengguang Wu */ 464f758eeabSChristoph Hellwig requeue_io(inode, wb); 4651da177e4SLinus Torvalds } else { 4661da177e4SLinus Torvalds /* 467a50aeb40SWu Fengguang * Writeback blocked by something other than 468a50aeb40SWu Fengguang * congestion. Delay the inode for some time to 469a50aeb40SWu Fengguang * avoid spinning on the CPU (100% iowait) 470a50aeb40SWu Fengguang * retrying writeback of the dirty page/inode 471a50aeb40SWu Fengguang * that cannot be performed immediately. 4728bc3be27SFengguang Wu */ 473f758eeabSChristoph Hellwig redirty_tail(inode, wb); 4748bc3be27SFengguang Wu } 47523539afcSWu Fengguang } else if (inode->i_state & I_DIRTY) { 47623539afcSWu Fengguang /* 47723539afcSWu Fengguang * Filesystems can dirty the inode during writeback 47823539afcSWu Fengguang * operations, such as delayed allocation during 47923539afcSWu Fengguang * submission or metadata updates after data IO 48023539afcSWu Fengguang * completion. 48123539afcSWu Fengguang */ 482f758eeabSChristoph Hellwig redirty_tail(inode, wb); 4831da177e4SLinus Torvalds } else { 4841da177e4SLinus Torvalds /* 4859e38d86fSNick Piggin * The inode is clean. At this point we either have 4869e38d86fSNick Piggin * a reference to the inode or it's on it's way out. 4879e38d86fSNick Piggin * No need to add it back to the LRU. 4881da177e4SLinus Torvalds */ 4897ccf19a8SNick Piggin list_del_init(&inode->i_wb_list); 4901da177e4SLinus Torvalds } 4911da177e4SLinus Torvalds } 4921c0eeaf5SJoern Engel inode_sync_complete(inode); 493251d6a47SWu Fengguang trace_writeback_single_inode(inode, wbc, nr_to_write); 4941da177e4SLinus Torvalds return ret; 4951da177e4SLinus Torvalds } 4961da177e4SLinus Torvalds 4971a12d8bdSWu Fengguang static long writeback_chunk_size(struct backing_dev_info *bdi, 4981a12d8bdSWu Fengguang struct wb_writeback_work *work) 499d46db3d5SWu Fengguang { 500d46db3d5SWu Fengguang long pages; 501d46db3d5SWu Fengguang 502d46db3d5SWu Fengguang /* 503d46db3d5SWu Fengguang * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 504d46db3d5SWu Fengguang * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 505d46db3d5SWu Fengguang * here avoids calling into writeback_inodes_wb() more than once. 506d46db3d5SWu Fengguang * 507d46db3d5SWu Fengguang * The intended call sequence for WB_SYNC_ALL writeback is: 508d46db3d5SWu Fengguang * 509d46db3d5SWu Fengguang * wb_writeback() 510d46db3d5SWu Fengguang * writeback_sb_inodes() <== called only once 511d46db3d5SWu Fengguang * write_cache_pages() <== called once for each inode 512d46db3d5SWu Fengguang * (quickly) tag currently dirty pages 513d46db3d5SWu Fengguang * (maybe slowly) sync all tagged pages 514d46db3d5SWu Fengguang */ 515d46db3d5SWu Fengguang if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 516d46db3d5SWu Fengguang pages = LONG_MAX; 5171a12d8bdSWu Fengguang else { 5181a12d8bdSWu Fengguang pages = min(bdi->avg_write_bandwidth / 2, 5191a12d8bdSWu Fengguang global_dirty_limit / DIRTY_SCOPE); 5201a12d8bdSWu Fengguang pages = min(pages, work->nr_pages); 5211a12d8bdSWu Fengguang pages = round_down(pages + MIN_WRITEBACK_PAGES, 5221a12d8bdSWu Fengguang MIN_WRITEBACK_PAGES); 5231a12d8bdSWu Fengguang } 524d46db3d5SWu Fengguang 525d46db3d5SWu Fengguang return pages; 526d46db3d5SWu Fengguang } 527d46db3d5SWu Fengguang 52803ba3782SJens Axboe /* 529f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 530edadfb10SChristoph Hellwig * 531edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 532f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 533f11c9c5cSEdward Shishkin * in reverse order. 534edadfb10SChristoph Hellwig * 535d46db3d5SWu Fengguang * Return the number of pages and/or inodes written. 536f11c9c5cSEdward Shishkin */ 537d46db3d5SWu Fengguang static long writeback_sb_inodes(struct super_block *sb, 538d46db3d5SWu Fengguang struct bdi_writeback *wb, 539d46db3d5SWu Fengguang struct wb_writeback_work *work) 54003ba3782SJens Axboe { 541d46db3d5SWu Fengguang struct writeback_control wbc = { 542d46db3d5SWu Fengguang .sync_mode = work->sync_mode, 543d46db3d5SWu Fengguang .tagged_writepages = work->tagged_writepages, 544d46db3d5SWu Fengguang .for_kupdate = work->for_kupdate, 545d46db3d5SWu Fengguang .for_background = work->for_background, 546d46db3d5SWu Fengguang .range_cyclic = work->range_cyclic, 547d46db3d5SWu Fengguang .range_start = 0, 548d46db3d5SWu Fengguang .range_end = LLONG_MAX, 549d46db3d5SWu Fengguang }; 550d46db3d5SWu Fengguang unsigned long start_time = jiffies; 551d46db3d5SWu Fengguang long write_chunk; 552d46db3d5SWu Fengguang long wrote = 0; /* count both pages and inodes */ 553d46db3d5SWu Fengguang 55403ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 5557ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 556edadfb10SChristoph Hellwig 557edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 558d46db3d5SWu Fengguang if (work->sb) { 559edadfb10SChristoph Hellwig /* 560edadfb10SChristoph Hellwig * We only want to write back data for this 561edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 562edadfb10SChristoph Hellwig * to it back onto the dirty list. 563edadfb10SChristoph Hellwig */ 564f758eeabSChristoph Hellwig redirty_tail(inode, wb); 56566f3b8e2SJens Axboe continue; 56666f3b8e2SJens Axboe } 567edadfb10SChristoph Hellwig 568edadfb10SChristoph Hellwig /* 569edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 570edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 571edadfb10SChristoph Hellwig * pin the next superblock. 572edadfb10SChristoph Hellwig */ 573d46db3d5SWu Fengguang break; 574edadfb10SChristoph Hellwig } 575edadfb10SChristoph Hellwig 5769843b76aSChristoph Hellwig /* 5779843b76aSChristoph Hellwig * Don't bother with new inodes or inodes beeing freed, first 5789843b76aSChristoph Hellwig * kind does not need peridic writeout yet, and for the latter 5799843b76aSChristoph Hellwig * kind writeout is handled by the freer. 5809843b76aSChristoph Hellwig */ 581250df6edSDave Chinner spin_lock(&inode->i_lock); 5829843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 583250df6edSDave Chinner spin_unlock(&inode->i_lock); 584fcc5c222SWu Fengguang redirty_tail(inode, wb); 5857ef0d737SNick Piggin continue; 5867ef0d737SNick Piggin } 5871da177e4SLinus Torvalds __iget(inode); 5881a12d8bdSWu Fengguang write_chunk = writeback_chunk_size(wb->bdi, work); 589d46db3d5SWu Fengguang wbc.nr_to_write = write_chunk; 590d46db3d5SWu Fengguang wbc.pages_skipped = 0; 591250df6edSDave Chinner 592d46db3d5SWu Fengguang writeback_single_inode(inode, wb, &wbc); 593d46db3d5SWu Fengguang 594d46db3d5SWu Fengguang work->nr_pages -= write_chunk - wbc.nr_to_write; 595d46db3d5SWu Fengguang wrote += write_chunk - wbc.nr_to_write; 596d46db3d5SWu Fengguang if (!(inode->i_state & I_DIRTY)) 597d46db3d5SWu Fengguang wrote++; 598d46db3d5SWu Fengguang if (wbc.pages_skipped) { 5991da177e4SLinus Torvalds /* 6001da177e4SLinus Torvalds * writeback is not making progress due to locked 6011da177e4SLinus Torvalds * buffers. Skip this inode for now. 6021da177e4SLinus Torvalds */ 603f758eeabSChristoph Hellwig redirty_tail(inode, wb); 6041da177e4SLinus Torvalds } 6050f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 606f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 6071da177e4SLinus Torvalds iput(inode); 6084ffc8444SOGAWA Hirofumi cond_resched(); 609f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 610d46db3d5SWu Fengguang /* 611d46db3d5SWu Fengguang * bail out to wb_writeback() often enough to check 612d46db3d5SWu Fengguang * background threshold and other termination conditions. 613d46db3d5SWu Fengguang */ 614d46db3d5SWu Fengguang if (wrote) { 615d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 616d46db3d5SWu Fengguang break; 617d46db3d5SWu Fengguang if (work->nr_pages <= 0) 618d46db3d5SWu Fengguang break; 6191da177e4SLinus Torvalds } 6208bc3be27SFengguang Wu } 621d46db3d5SWu Fengguang return wrote; 622f11c9c5cSEdward Shishkin } 62338f21977SNick Piggin 624d46db3d5SWu Fengguang static long __writeback_inodes_wb(struct bdi_writeback *wb, 625d46db3d5SWu Fengguang struct wb_writeback_work *work) 626f11c9c5cSEdward Shishkin { 627d46db3d5SWu Fengguang unsigned long start_time = jiffies; 628d46db3d5SWu Fengguang long wrote = 0; 629f11c9c5cSEdward Shishkin 630f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 6317ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 632f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 633f11c9c5cSEdward Shishkin 63412ad3ab6SDave Chinner if (!grab_super_passive(sb)) { 6350e995816SWu Fengguang /* 6360e995816SWu Fengguang * grab_super_passive() may fail consistently due to 6370e995816SWu Fengguang * s_umount being grabbed by someone else. Don't use 6380e995816SWu Fengguang * requeue_io() to avoid busy retrying the inode/sb. 6390e995816SWu Fengguang */ 6400e995816SWu Fengguang redirty_tail(inode, wb); 641d19de7edSChristoph Hellwig continue; 642334132aeSChristoph Hellwig } 643d46db3d5SWu Fengguang wrote += writeback_sb_inodes(sb, wb, work); 644d19de7edSChristoph Hellwig drop_super(sb); 645f11c9c5cSEdward Shishkin 646d46db3d5SWu Fengguang /* refer to the same tests at the end of writeback_sb_inodes */ 647d46db3d5SWu Fengguang if (wrote) { 648d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 649d46db3d5SWu Fengguang break; 650d46db3d5SWu Fengguang if (work->nr_pages <= 0) 651f11c9c5cSEdward Shishkin break; 652f11c9c5cSEdward Shishkin } 653d46db3d5SWu Fengguang } 65466f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 655d46db3d5SWu Fengguang return wrote; 65666f3b8e2SJens Axboe } 65766f3b8e2SJens Axboe 6580e175a18SCurt Wohlgemuth long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 6590e175a18SCurt Wohlgemuth enum wb_reason reason) 660edadfb10SChristoph Hellwig { 661d46db3d5SWu Fengguang struct wb_writeback_work work = { 662d46db3d5SWu Fengguang .nr_pages = nr_pages, 663d46db3d5SWu Fengguang .sync_mode = WB_SYNC_NONE, 664d46db3d5SWu Fengguang .range_cyclic = 1, 6650e175a18SCurt Wohlgemuth .reason = reason, 666d46db3d5SWu Fengguang }; 667edadfb10SChristoph Hellwig 668f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 669424b351fSWu Fengguang if (list_empty(&wb->b_io)) 670ad4e38ddSCurt Wohlgemuth queue_io(wb, &work); 671d46db3d5SWu Fengguang __writeback_inodes_wb(wb, &work); 672f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 673edadfb10SChristoph Hellwig 674d46db3d5SWu Fengguang return nr_pages - work.nr_pages; 67566f3b8e2SJens Axboe } 67666f3b8e2SJens Axboe 677b00949aaSWu Fengguang static bool over_bground_thresh(struct backing_dev_info *bdi) 67803ba3782SJens Axboe { 67903ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 68003ba3782SJens Axboe 68116c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 68203ba3782SJens Axboe 683b00949aaSWu Fengguang if (global_page_state(NR_FILE_DIRTY) + 684b00949aaSWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh) 685b00949aaSWu Fengguang return true; 686b00949aaSWu Fengguang 687b00949aaSWu Fengguang if (bdi_stat(bdi, BDI_RECLAIMABLE) > 688b00949aaSWu Fengguang bdi_dirty_limit(bdi, background_thresh)) 689b00949aaSWu Fengguang return true; 690b00949aaSWu Fengguang 691b00949aaSWu Fengguang return false; 69203ba3782SJens Axboe } 69303ba3782SJens Axboe 69403ba3782SJens Axboe /* 695e98be2d5SWu Fengguang * Called under wb->list_lock. If there are multiple wb per bdi, 696e98be2d5SWu Fengguang * only the flusher working on the first wb should do it. 697e98be2d5SWu Fengguang */ 698e98be2d5SWu Fengguang static void wb_update_bandwidth(struct bdi_writeback *wb, 699e98be2d5SWu Fengguang unsigned long start_time) 700e98be2d5SWu Fengguang { 701af6a3113SWu Fengguang __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); 702e98be2d5SWu Fengguang } 703e98be2d5SWu Fengguang 704e98be2d5SWu Fengguang /* 70503ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 70603ba3782SJens Axboe * 70703ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 70803ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 70903ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 71003ba3782SJens Axboe * older than a specific point in time. 71103ba3782SJens Axboe * 71203ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 71303ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 71403ba3782SJens Axboe * one-second gap. 71503ba3782SJens Axboe * 71603ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 71703ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 71803ba3782SJens Axboe */ 719c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 72083ba7b07SChristoph Hellwig struct wb_writeback_work *work) 72103ba3782SJens Axboe { 722e98be2d5SWu Fengguang unsigned long wb_start = jiffies; 723d46db3d5SWu Fengguang long nr_pages = work->nr_pages; 72403ba3782SJens Axboe unsigned long oldest_jif; 725a5989bdcSJan Kara struct inode *inode; 726d46db3d5SWu Fengguang long progress; 72703ba3782SJens Axboe 728e185dda8SWu Fengguang oldest_jif = jiffies; 729d46db3d5SWu Fengguang work->older_than_this = &oldest_jif; 73003ba3782SJens Axboe 731e8dfc305SWu Fengguang spin_lock(&wb->list_lock); 73203ba3782SJens Axboe for (;;) { 73303ba3782SJens Axboe /* 734d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 73503ba3782SJens Axboe */ 73683ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 73703ba3782SJens Axboe break; 73803ba3782SJens Axboe 73903ba3782SJens Axboe /* 740aa373cf5SJan Kara * Background writeout and kupdate-style writeback may 741aa373cf5SJan Kara * run forever. Stop them if there is other work to do 742aa373cf5SJan Kara * so that e.g. sync can proceed. They'll be restarted 743aa373cf5SJan Kara * after the other works are all done. 744aa373cf5SJan Kara */ 745aa373cf5SJan Kara if ((work->for_background || work->for_kupdate) && 746aa373cf5SJan Kara !list_empty(&wb->bdi->work_list)) 747aa373cf5SJan Kara break; 748aa373cf5SJan Kara 749aa373cf5SJan Kara /* 750d3ddec76SWu Fengguang * For background writeout, stop when we are below the 751d3ddec76SWu Fengguang * background dirty threshold 75203ba3782SJens Axboe */ 753b00949aaSWu Fengguang if (work->for_background && !over_bground_thresh(wb->bdi)) 75403ba3782SJens Axboe break; 75503ba3782SJens Axboe 756ba9aa839SWu Fengguang if (work->for_kupdate) { 757ba9aa839SWu Fengguang oldest_jif = jiffies - 758ba9aa839SWu Fengguang msecs_to_jiffies(dirty_expire_interval * 10); 759d46db3d5SWu Fengguang work->older_than_this = &oldest_jif; 760ba9aa839SWu Fengguang } 761028c2dd1SDave Chinner 762d46db3d5SWu Fengguang trace_writeback_start(wb->bdi, work); 763e8dfc305SWu Fengguang if (list_empty(&wb->b_io)) 764ad4e38ddSCurt Wohlgemuth queue_io(wb, work); 76583ba7b07SChristoph Hellwig if (work->sb) 766d46db3d5SWu Fengguang progress = writeback_sb_inodes(work->sb, wb, work); 767edadfb10SChristoph Hellwig else 768d46db3d5SWu Fengguang progress = __writeback_inodes_wb(wb, work); 769d46db3d5SWu Fengguang trace_writeback_written(wb->bdi, work); 770028c2dd1SDave Chinner 771e98be2d5SWu Fengguang wb_update_bandwidth(wb, wb_start); 77203ba3782SJens Axboe 77303ba3782SJens Axboe /* 77471fd05a8SJens Axboe * Did we write something? Try for more 775e6fb6da2SWu Fengguang * 776e6fb6da2SWu Fengguang * Dirty inodes are moved to b_io for writeback in batches. 777e6fb6da2SWu Fengguang * The completion of the current batch does not necessarily 778e6fb6da2SWu Fengguang * mean the overall work is done. So we keep looping as long 779e6fb6da2SWu Fengguang * as made some progress on cleaning pages or inodes. 78071fd05a8SJens Axboe */ 781d46db3d5SWu Fengguang if (progress) 78203ba3782SJens Axboe continue; 783a5989bdcSJan Kara /* 784e6fb6da2SWu Fengguang * No more inodes for IO, bail 785a5989bdcSJan Kara */ 786b7a2441fSWu Fengguang if (list_empty(&wb->b_more_io)) 78703ba3782SJens Axboe break; 78803ba3782SJens Axboe /* 7898010c3b6SJens Axboe * Nothing written. Wait for some inode to 7908010c3b6SJens Axboe * become available for writeback. Otherwise 7918010c3b6SJens Axboe * we'll just busyloop. 79203ba3782SJens Axboe */ 79303ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) { 794d46db3d5SWu Fengguang trace_writeback_wait(wb->bdi, work); 79503ba3782SJens Axboe inode = wb_inode(wb->b_more_io.prev); 796250df6edSDave Chinner spin_lock(&inode->i_lock); 797f758eeabSChristoph Hellwig inode_wait_for_writeback(inode, wb); 798250df6edSDave Chinner spin_unlock(&inode->i_lock); 79903ba3782SJens Axboe } 80003ba3782SJens Axboe } 801e8dfc305SWu Fengguang spin_unlock(&wb->list_lock); 80203ba3782SJens Axboe 803d46db3d5SWu Fengguang return nr_pages - work->nr_pages; 80403ba3782SJens Axboe } 80503ba3782SJens Axboe 80603ba3782SJens Axboe /* 80783ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 80803ba3782SJens Axboe */ 80983ba7b07SChristoph Hellwig static struct wb_writeback_work * 81008852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 81103ba3782SJens Axboe { 81283ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 81303ba3782SJens Axboe 8146467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 81583ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 81683ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 81783ba7b07SChristoph Hellwig struct wb_writeback_work, list); 81883ba7b07SChristoph Hellwig list_del_init(&work->list); 81903ba3782SJens Axboe } 8206467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 82183ba7b07SChristoph Hellwig return work; 82203ba3782SJens Axboe } 82303ba3782SJens Axboe 824cdf01dd5SLinus Torvalds /* 825cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 826cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 827cdf01dd5SLinus Torvalds */ 828cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 829cdf01dd5SLinus Torvalds { 830cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 831cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 832cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 833cdf01dd5SLinus Torvalds } 834cdf01dd5SLinus Torvalds 8356585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 8366585027aSJan Kara { 837b00949aaSWu Fengguang if (over_bground_thresh(wb->bdi)) { 8386585027aSJan Kara 8396585027aSJan Kara struct wb_writeback_work work = { 8406585027aSJan Kara .nr_pages = LONG_MAX, 8416585027aSJan Kara .sync_mode = WB_SYNC_NONE, 8426585027aSJan Kara .for_background = 1, 8436585027aSJan Kara .range_cyclic = 1, 8440e175a18SCurt Wohlgemuth .reason = WB_REASON_BACKGROUND, 8456585027aSJan Kara }; 8466585027aSJan Kara 8476585027aSJan Kara return wb_writeback(wb, &work); 8486585027aSJan Kara } 8496585027aSJan Kara 8506585027aSJan Kara return 0; 8516585027aSJan Kara } 8526585027aSJan Kara 85303ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 85403ba3782SJens Axboe { 85503ba3782SJens Axboe unsigned long expired; 85603ba3782SJens Axboe long nr_pages; 85703ba3782SJens Axboe 85869b62d01SJens Axboe /* 85969b62d01SJens Axboe * When set to zero, disable periodic writeback 86069b62d01SJens Axboe */ 86169b62d01SJens Axboe if (!dirty_writeback_interval) 86269b62d01SJens Axboe return 0; 86369b62d01SJens Axboe 86403ba3782SJens Axboe expired = wb->last_old_flush + 86503ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 86603ba3782SJens Axboe if (time_before(jiffies, expired)) 86703ba3782SJens Axboe return 0; 86803ba3782SJens Axboe 86903ba3782SJens Axboe wb->last_old_flush = jiffies; 870cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 87103ba3782SJens Axboe 872c4a77a6cSJens Axboe if (nr_pages) { 87383ba7b07SChristoph Hellwig struct wb_writeback_work work = { 874c4a77a6cSJens Axboe .nr_pages = nr_pages, 875c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 876c4a77a6cSJens Axboe .for_kupdate = 1, 877c4a77a6cSJens Axboe .range_cyclic = 1, 8780e175a18SCurt Wohlgemuth .reason = WB_REASON_PERIODIC, 879c4a77a6cSJens Axboe }; 880c4a77a6cSJens Axboe 88183ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 882c4a77a6cSJens Axboe } 88303ba3782SJens Axboe 88403ba3782SJens Axboe return 0; 88503ba3782SJens Axboe } 88603ba3782SJens Axboe 88703ba3782SJens Axboe /* 88803ba3782SJens Axboe * Retrieve work items and do the writeback they describe 88903ba3782SJens Axboe */ 89003ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 89103ba3782SJens Axboe { 89203ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 89383ba7b07SChristoph Hellwig struct wb_writeback_work *work; 894c4a77a6cSJens Axboe long wrote = 0; 89503ba3782SJens Axboe 89681d73a32SJan Kara set_bit(BDI_writeback_running, &wb->bdi->state); 89708852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 89803ba3782SJens Axboe /* 89903ba3782SJens Axboe * Override sync mode, in case we must wait for completion 90083ba7b07SChristoph Hellwig * because this thread is exiting now. 90103ba3782SJens Axboe */ 90203ba3782SJens Axboe if (force_wait) 90383ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 90483ba7b07SChristoph Hellwig 905455b2864SDave Chinner trace_writeback_exec(bdi, work); 906455b2864SDave Chinner 90783ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 90803ba3782SJens Axboe 90903ba3782SJens Axboe /* 91083ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 91183ba7b07SChristoph Hellwig * work item, otherwise just free it. 91203ba3782SJens Axboe */ 91383ba7b07SChristoph Hellwig if (work->done) 91483ba7b07SChristoph Hellwig complete(work->done); 91583ba7b07SChristoph Hellwig else 91683ba7b07SChristoph Hellwig kfree(work); 91703ba3782SJens Axboe } 91803ba3782SJens Axboe 91903ba3782SJens Axboe /* 92003ba3782SJens Axboe * Check for periodic writeback, kupdated() style 92103ba3782SJens Axboe */ 92203ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 9236585027aSJan Kara wrote += wb_check_background_flush(wb); 92481d73a32SJan Kara clear_bit(BDI_writeback_running, &wb->bdi->state); 92503ba3782SJens Axboe 92603ba3782SJens Axboe return wrote; 92703ba3782SJens Axboe } 92803ba3782SJens Axboe 92903ba3782SJens Axboe /* 93003ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 93103ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 93203ba3782SJens Axboe */ 93308243900SChristoph Hellwig int bdi_writeback_thread(void *data) 93403ba3782SJens Axboe { 93508243900SChristoph Hellwig struct bdi_writeback *wb = data; 93608243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 93703ba3782SJens Axboe long pages_written; 93803ba3782SJens Axboe 939766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 94008243900SChristoph Hellwig set_freezable(); 941ecd58403SArtem Bityutskiy wb->last_active = jiffies; 94203ba3782SJens Axboe 94303ba3782SJens Axboe /* 94408243900SChristoph Hellwig * Our parent may run at a different priority, just set us to normal 94503ba3782SJens Axboe */ 94608243900SChristoph Hellwig set_user_nice(current, 0); 94708243900SChristoph Hellwig 948455b2864SDave Chinner trace_writeback_thread_start(bdi); 949455b2864SDave Chinner 9508a32c441STejun Heo while (!kthread_freezable_should_stop(NULL)) { 9516467716aSArtem Bityutskiy /* 9526467716aSArtem Bityutskiy * Remove own delayed wake-up timer, since we are already awake 9536467716aSArtem Bityutskiy * and we'll take care of the preriodic write-back. 9546467716aSArtem Bityutskiy */ 9556467716aSArtem Bityutskiy del_timer(&wb->wakeup_timer); 9566467716aSArtem Bityutskiy 95703ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 95803ba3782SJens Axboe 959455b2864SDave Chinner trace_writeback_pages_written(pages_written); 960455b2864SDave Chinner 96103ba3782SJens Axboe if (pages_written) 962ecd58403SArtem Bityutskiy wb->last_active = jiffies; 96303ba3782SJens Axboe 964297252c8SArtem Bityutskiy set_current_state(TASK_INTERRUPTIBLE); 965b76b4014SJ. Bruce Fields if (!list_empty(&bdi->work_list) || kthread_should_stop()) { 966297252c8SArtem Bityutskiy __set_current_state(TASK_RUNNING); 967297252c8SArtem Bityutskiy continue; 96803ba3782SJens Axboe } 96903ba3782SJens Axboe 970253c34e9SArtem Bityutskiy if (wb_has_dirty_io(wb) && dirty_writeback_interval) 971fff5b85aSArtem Bityutskiy schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 972253c34e9SArtem Bityutskiy else { 973253c34e9SArtem Bityutskiy /* 974253c34e9SArtem Bityutskiy * We have nothing to do, so can go sleep without any 975253c34e9SArtem Bityutskiy * timeout and save power. When a work is queued or 976253c34e9SArtem Bityutskiy * something is made dirty - we will be woken up. 977253c34e9SArtem Bityutskiy */ 97869b62d01SJens Axboe schedule(); 979f9eadbbdSJens Axboe } 98003ba3782SJens Axboe } 98103ba3782SJens Axboe 982fff5b85aSArtem Bityutskiy /* Flush any work that raced with us exiting */ 98308243900SChristoph Hellwig if (!list_empty(&bdi->work_list)) 98408243900SChristoph Hellwig wb_do_writeback(wb, 1); 985455b2864SDave Chinner 986455b2864SDave Chinner trace_writeback_thread_stop(bdi); 98703ba3782SJens Axboe return 0; 98803ba3782SJens Axboe } 98903ba3782SJens Axboe 99008243900SChristoph Hellwig 99103ba3782SJens Axboe /* 99203ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 99303ba3782SJens Axboe * the whole world. 99403ba3782SJens Axboe */ 9950e175a18SCurt Wohlgemuth void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) 99603ba3782SJens Axboe { 997b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 998b8c2f347SChristoph Hellwig 99983ba7b07SChristoph Hellwig if (!nr_pages) { 100083ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 100103ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 1002b8c2f347SChristoph Hellwig } 1003b8c2f347SChristoph Hellwig 1004b8c2f347SChristoph Hellwig rcu_read_lock(); 1005b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1006b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 1007b8c2f347SChristoph Hellwig continue; 10080e175a18SCurt Wohlgemuth __bdi_start_writeback(bdi, nr_pages, false, reason); 1009b8c2f347SChristoph Hellwig } 1010b8c2f347SChristoph Hellwig rcu_read_unlock(); 101103ba3782SJens Axboe } 101203ba3782SJens Axboe 101303ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 101403ba3782SJens Axboe { 101503ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 101603ba3782SJens Axboe struct dentry *dentry; 101703ba3782SJens Axboe const char *name = "?"; 101803ba3782SJens Axboe 101903ba3782SJens Axboe dentry = d_find_alias(inode); 102003ba3782SJens Axboe if (dentry) { 102103ba3782SJens Axboe spin_lock(&dentry->d_lock); 102203ba3782SJens Axboe name = (const char *) dentry->d_name.name; 102303ba3782SJens Axboe } 102403ba3782SJens Axboe printk(KERN_DEBUG 102503ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 102603ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 102703ba3782SJens Axboe name, inode->i_sb->s_id); 102803ba3782SJens Axboe if (dentry) { 102903ba3782SJens Axboe spin_unlock(&dentry->d_lock); 103003ba3782SJens Axboe dput(dentry); 103103ba3782SJens Axboe } 103203ba3782SJens Axboe } 103303ba3782SJens Axboe } 103403ba3782SJens Axboe 103503ba3782SJens Axboe /** 103603ba3782SJens Axboe * __mark_inode_dirty - internal function 103703ba3782SJens Axboe * @inode: inode to mark 103803ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 103903ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 104003ba3782SJens Axboe * mark_inode_dirty_sync. 104103ba3782SJens Axboe * 104203ba3782SJens Axboe * Put the inode on the super block's dirty list. 104303ba3782SJens Axboe * 104403ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 104503ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 104603ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 104703ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 104803ba3782SJens Axboe * 104903ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 105003ba3782SJens Axboe * them dirty. 105103ba3782SJens Axboe * 105203ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 105303ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 105403ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 105503ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 105603ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 105703ba3782SJens Axboe * blockdev inode. 105803ba3782SJens Axboe */ 105903ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 106003ba3782SJens Axboe { 106103ba3782SJens Axboe struct super_block *sb = inode->i_sb; 1062253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 106303ba3782SJens Axboe 106403ba3782SJens Axboe /* 106503ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 106603ba3782SJens Axboe * dirty the inode itself 106703ba3782SJens Axboe */ 106803ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 106903ba3782SJens Axboe if (sb->s_op->dirty_inode) 1070aa385729SChristoph Hellwig sb->s_op->dirty_inode(inode, flags); 107103ba3782SJens Axboe } 107203ba3782SJens Axboe 107303ba3782SJens Axboe /* 107403ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 107503ba3782SJens Axboe * -- mikulas 107603ba3782SJens Axboe */ 107703ba3782SJens Axboe smp_mb(); 107803ba3782SJens Axboe 107903ba3782SJens Axboe /* avoid the locking if we can */ 108003ba3782SJens Axboe if ((inode->i_state & flags) == flags) 108103ba3782SJens Axboe return; 108203ba3782SJens Axboe 108303ba3782SJens Axboe if (unlikely(block_dump)) 108403ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 108503ba3782SJens Axboe 1086250df6edSDave Chinner spin_lock(&inode->i_lock); 108703ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 108803ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 108903ba3782SJens Axboe 109003ba3782SJens Axboe inode->i_state |= flags; 109103ba3782SJens Axboe 109203ba3782SJens Axboe /* 109303ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 109403ba3782SJens Axboe * The unlocker will place the inode on the appropriate 109503ba3782SJens Axboe * superblock list, based upon its state. 109603ba3782SJens Axboe */ 109703ba3782SJens Axboe if (inode->i_state & I_SYNC) 1098250df6edSDave Chinner goto out_unlock_inode; 109903ba3782SJens Axboe 110003ba3782SJens Axboe /* 110103ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 110203ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 110303ba3782SJens Axboe */ 110403ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 11051d3382cbSAl Viro if (inode_unhashed(inode)) 1106250df6edSDave Chinner goto out_unlock_inode; 110703ba3782SJens Axboe } 1108a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 1109250df6edSDave Chinner goto out_unlock_inode; 111003ba3782SJens Axboe 111103ba3782SJens Axboe /* 111203ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 111303ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 111403ba3782SJens Axboe */ 111503ba3782SJens Axboe if (!was_dirty) { 1116a66979abSDave Chinner bool wakeup_bdi = false; 1117253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1118500b067cSJens Axboe 1119253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 1120253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 1121253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 1122253c34e9SArtem Bityutskiy 1123253c34e9SArtem Bityutskiy /* 1124253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 1125253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 1126253c34e9SArtem Bityutskiy * bdi thread to make sure background 1127253c34e9SArtem Bityutskiy * write-back happens later. 1128253c34e9SArtem Bityutskiy */ 1129253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 1130253c34e9SArtem Bityutskiy wakeup_bdi = true; 1131500b067cSJens Axboe } 113203ba3782SJens Axboe 1133a66979abSDave Chinner spin_unlock(&inode->i_lock); 1134f758eeabSChristoph Hellwig spin_lock(&bdi->wb.list_lock); 113503ba3782SJens Axboe inode->dirtied_when = jiffies; 11367ccf19a8SNick Piggin list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1137f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 1138253c34e9SArtem Bityutskiy 1139253c34e9SArtem Bityutskiy if (wakeup_bdi) 11406467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 1141a66979abSDave Chinner return; 1142a66979abSDave Chinner } 1143a66979abSDave Chinner } 1144a66979abSDave Chinner out_unlock_inode: 1145a66979abSDave Chinner spin_unlock(&inode->i_lock); 1146a66979abSDave Chinner 114703ba3782SJens Axboe } 114803ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 114903ba3782SJens Axboe 115066f3b8e2SJens Axboe /* 115166f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 115266f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 115366f3b8e2SJens Axboe * 115466f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 115566f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 115666f3b8e2SJens Axboe * 115766f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 115866f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 115966f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 116066f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 116166f3b8e2SJens Axboe * 116266f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 116366f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 116466f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 116566f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 116666f3b8e2SJens Axboe */ 1167b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 116866f3b8e2SJens Axboe { 116938f21977SNick Piggin struct inode *inode, *old_inode = NULL; 117038f21977SNick Piggin 117103ba3782SJens Axboe /* 117203ba3782SJens Axboe * We need to be protected against the filesystem going from 117303ba3782SJens Axboe * r/o to r/w or vice versa. 117403ba3782SJens Axboe */ 1175b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 117603ba3782SJens Axboe 117755fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 117866f3b8e2SJens Axboe 117938f21977SNick Piggin /* 118038f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 118138f21977SNick Piggin * because there may have been pages dirtied before our sync 118238f21977SNick Piggin * call, but which had writeout started before we write it out. 118338f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 118438f21977SNick Piggin * we still have to wait for that writeout. 118538f21977SNick Piggin */ 1186b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1187250df6edSDave Chinner struct address_space *mapping = inode->i_mapping; 118838f21977SNick Piggin 1189250df6edSDave Chinner spin_lock(&inode->i_lock); 1190250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1191250df6edSDave Chinner (mapping->nrpages == 0)) { 1192250df6edSDave Chinner spin_unlock(&inode->i_lock); 119338f21977SNick Piggin continue; 1194250df6edSDave Chinner } 119538f21977SNick Piggin __iget(inode); 1196250df6edSDave Chinner spin_unlock(&inode->i_lock); 119755fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 119855fa6091SDave Chinner 119938f21977SNick Piggin /* 120055fa6091SDave Chinner * We hold a reference to 'inode' so it couldn't have been 120155fa6091SDave Chinner * removed from s_inodes list while we dropped the 120255fa6091SDave Chinner * inode_sb_list_lock. We cannot iput the inode now as we can 120355fa6091SDave Chinner * be holding the last reference and we cannot iput it under 120455fa6091SDave Chinner * inode_sb_list_lock. So we keep the reference and iput it 120555fa6091SDave Chinner * later. 120638f21977SNick Piggin */ 120738f21977SNick Piggin iput(old_inode); 120838f21977SNick Piggin old_inode = inode; 120938f21977SNick Piggin 121038f21977SNick Piggin filemap_fdatawait(mapping); 121138f21977SNick Piggin 121238f21977SNick Piggin cond_resched(); 121338f21977SNick Piggin 121455fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 121538f21977SNick Piggin } 121655fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 121738f21977SNick Piggin iput(old_inode); 121866f3b8e2SJens Axboe } 12191da177e4SLinus Torvalds 1220d8a8559cSJens Axboe /** 12213259f8beSChris Mason * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1222d8a8559cSJens Axboe * @sb: the superblock 12233259f8beSChris Mason * @nr: the number of pages to write 12241da177e4SLinus Torvalds * 1225d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1226d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 12273259f8beSChris Mason * for IO completion of submitted IO. 12281da177e4SLinus Torvalds */ 12290e175a18SCurt Wohlgemuth void writeback_inodes_sb_nr(struct super_block *sb, 12300e175a18SCurt Wohlgemuth unsigned long nr, 12310e175a18SCurt Wohlgemuth enum wb_reason reason) 12321da177e4SLinus Torvalds { 123383ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 123483ba7b07SChristoph Hellwig struct wb_writeback_work work = { 12353c4d7165SChristoph Hellwig .sb = sb, 12363c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 12376e6938b6SWu Fengguang .tagged_writepages = 1, 123883ba7b07SChristoph Hellwig .done = &done, 12393259f8beSChris Mason .nr_pages = nr, 12400e175a18SCurt Wohlgemuth .reason = reason, 12413c4d7165SChristoph Hellwig }; 12420e3c9a22SJens Axboe 1243cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 124483ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 124583ba7b07SChristoph Hellwig wait_for_completion(&done); 12461da177e4SLinus Torvalds } 12473259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 12483259f8beSChris Mason 12493259f8beSChris Mason /** 12503259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 12513259f8beSChris Mason * @sb: the superblock 12523259f8beSChris Mason * 12533259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 12543259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 12553259f8beSChris Mason * for IO completion of submitted IO. 12563259f8beSChris Mason */ 12570e175a18SCurt Wohlgemuth void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 12583259f8beSChris Mason { 12590e175a18SCurt Wohlgemuth return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 12603259f8beSChris Mason } 1261d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1262d8a8559cSJens Axboe 1263d8a8559cSJens Axboe /** 126417bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 126517bd55d0SEric Sandeen * @sb: the superblock 126617bd55d0SEric Sandeen * 126717bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 126817bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 126917bd55d0SEric Sandeen */ 12700e175a18SCurt Wohlgemuth int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason) 127117bd55d0SEric Sandeen { 127217bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1273cf37e972SChristoph Hellwig down_read(&sb->s_umount); 12740e175a18SCurt Wohlgemuth writeback_inodes_sb(sb, reason); 1275cf37e972SChristoph Hellwig up_read(&sb->s_umount); 127617bd55d0SEric Sandeen return 1; 127717bd55d0SEric Sandeen } else 127817bd55d0SEric Sandeen return 0; 127917bd55d0SEric Sandeen } 128017bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 128117bd55d0SEric Sandeen 128217bd55d0SEric Sandeen /** 12833259f8beSChris Mason * writeback_inodes_sb_if_idle - start writeback if none underway 12843259f8beSChris Mason * @sb: the superblock 12853259f8beSChris Mason * @nr: the number of pages to write 12863259f8beSChris Mason * 12873259f8beSChris Mason * Invoke writeback_inodes_sb if no writeback is currently underway. 12883259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 12893259f8beSChris Mason */ 12903259f8beSChris Mason int writeback_inodes_sb_nr_if_idle(struct super_block *sb, 12910e175a18SCurt Wohlgemuth unsigned long nr, 12920e175a18SCurt Wohlgemuth enum wb_reason reason) 12933259f8beSChris Mason { 12943259f8beSChris Mason if (!writeback_in_progress(sb->s_bdi)) { 12953259f8beSChris Mason down_read(&sb->s_umount); 12960e175a18SCurt Wohlgemuth writeback_inodes_sb_nr(sb, nr, reason); 12973259f8beSChris Mason up_read(&sb->s_umount); 12983259f8beSChris Mason return 1; 12993259f8beSChris Mason } else 13003259f8beSChris Mason return 0; 13013259f8beSChris Mason } 13023259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle); 13033259f8beSChris Mason 13043259f8beSChris Mason /** 1305d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1306d8a8559cSJens Axboe * @sb: the superblock 1307d8a8559cSJens Axboe * 1308d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1309cb9ef8d5SStefan Hajnoczi * super_block. 1310d8a8559cSJens Axboe */ 1311b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1312d8a8559cSJens Axboe { 131383ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 131483ba7b07SChristoph Hellwig struct wb_writeback_work work = { 13153c4d7165SChristoph Hellwig .sb = sb, 13163c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 13173c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 13183c4d7165SChristoph Hellwig .range_cyclic = 0, 131983ba7b07SChristoph Hellwig .done = &done, 13200e175a18SCurt Wohlgemuth .reason = WB_REASON_SYNC, 13213c4d7165SChristoph Hellwig }; 13223c4d7165SChristoph Hellwig 1323cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1324cf37e972SChristoph Hellwig 132583ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 132683ba7b07SChristoph Hellwig wait_for_completion(&done); 132783ba7b07SChristoph Hellwig 1328b6e51316SJens Axboe wait_sb_inodes(sb); 1329d8a8559cSJens Axboe } 1330d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 13311da177e4SLinus Torvalds 13321da177e4SLinus Torvalds /** 13331da177e4SLinus Torvalds * write_inode_now - write an inode to disk 13341da177e4SLinus Torvalds * @inode: inode to write to disk 13351da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 13361da177e4SLinus Torvalds * 13377f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 13387f04c26dSAndrea Arcangeli * primarily needed by knfsd. 13397f04c26dSAndrea Arcangeli * 13407f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 13411da177e4SLinus Torvalds */ 13421da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 13431da177e4SLinus Torvalds { 1344f758eeabSChristoph Hellwig struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 13451da177e4SLinus Torvalds int ret; 13461da177e4SLinus Torvalds struct writeback_control wbc = { 13471da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 134818914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1349111ebb6eSOGAWA Hirofumi .range_start = 0, 1350111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 13511da177e4SLinus Torvalds }; 13521da177e4SLinus Torvalds 13531da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 135449364ce2SAndrew Morton wbc.nr_to_write = 0; 13551da177e4SLinus Torvalds 13561da177e4SLinus Torvalds might_sleep(); 1357f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 13580f1b1fd8SDave Chinner spin_lock(&inode->i_lock); 1359f758eeabSChristoph Hellwig ret = writeback_single_inode(inode, wb, &wbc); 13600f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 1361f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 13621da177e4SLinus Torvalds if (sync) 13631c0eeaf5SJoern Engel inode_sync_wait(inode); 13641da177e4SLinus Torvalds return ret; 13651da177e4SLinus Torvalds } 13661da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 13671da177e4SLinus Torvalds 13681da177e4SLinus Torvalds /** 13691da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 13701da177e4SLinus Torvalds * @inode: the inode to sync 13711da177e4SLinus Torvalds * @wbc: controls the writeback mode 13721da177e4SLinus Torvalds * 13731da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 13741da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 13751da177e4SLinus Torvalds * update inode->i_state. 13761da177e4SLinus Torvalds * 13771da177e4SLinus Torvalds * The caller must have a ref on the inode. 13781da177e4SLinus Torvalds */ 13791da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 13801da177e4SLinus Torvalds { 1381f758eeabSChristoph Hellwig struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 13821da177e4SLinus Torvalds int ret; 13831da177e4SLinus Torvalds 1384f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 13850f1b1fd8SDave Chinner spin_lock(&inode->i_lock); 1386f758eeabSChristoph Hellwig ret = writeback_single_inode(inode, wb, wbc); 13870f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 1388f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 13891da177e4SLinus Torvalds return ret; 13901da177e4SLinus Torvalds } 13911da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1392c3765016SChristoph Hellwig 1393c3765016SChristoph Hellwig /** 1394c691b9d9SAndrew Morton * sync_inode_metadata - write an inode to disk 1395c3765016SChristoph Hellwig * @inode: the inode to sync 1396c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1397c3765016SChristoph Hellwig * 1398c691b9d9SAndrew Morton * Write an inode to disk and adjust its dirty state after completion. 1399c3765016SChristoph Hellwig * 1400c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1401c3765016SChristoph Hellwig */ 1402c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1403c3765016SChristoph Hellwig { 1404c3765016SChristoph Hellwig struct writeback_control wbc = { 1405c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1406c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1407c3765016SChristoph Hellwig }; 1408c3765016SChristoph Hellwig 1409c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1410c3765016SChristoph Hellwig } 1411c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1412