11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 29455b2864SDave Chinner #include <linux/tracepoint.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 34c4a77a6cSJens Axboe */ 3583ba7b07SChristoph Hellwig struct wb_writeback_work { 36c4a77a6cSJens Axboe long nr_pages; 37c4a77a6cSJens Axboe struct super_block *sb; 38c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 396e6938b6SWu Fengguang unsigned int tagged_writepages:1; 4052957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4152957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4252957fe1SH Hartley Sweeten unsigned int for_background:1; 43c4a77a6cSJens Axboe 448010c3b6SJens Axboe struct list_head list; /* pending work list */ 4583ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 4603ba3782SJens Axboe }; 4703ba3782SJens Axboe 48455b2864SDave Chinner /* 49455b2864SDave Chinner * Include the creation of the trace points after defining the 50455b2864SDave Chinner * wb_writeback_work structure so that the definition remains local to this 51455b2864SDave Chinner * file. 52455b2864SDave Chinner */ 53455b2864SDave Chinner #define CREATE_TRACE_POINTS 54455b2864SDave Chinner #include <trace/events/writeback.h> 55455b2864SDave Chinner 56455b2864SDave Chinner /* 57455b2864SDave Chinner * We don't actually have pdflush, but this one is exported though /proc... 58455b2864SDave Chinner */ 59455b2864SDave Chinner int nr_pdflush_threads; 60455b2864SDave Chinner 61f11b00f3SAdrian Bunk /** 62f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 63f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 64f11b00f3SAdrian Bunk * 6503ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 6603ba3782SJens Axboe * backing device. 67f11b00f3SAdrian Bunk */ 68f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 69f11b00f3SAdrian Bunk { 7081d73a32SJan Kara return test_bit(BDI_writeback_running, &bdi->state); 71f11b00f3SAdrian Bunk } 72f11b00f3SAdrian Bunk 73692ebd17SJan Kara static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 74692ebd17SJan Kara { 75692ebd17SJan Kara struct super_block *sb = inode->i_sb; 76692ebd17SJan Kara 77aaead25bSChristoph Hellwig if (strcmp(sb->s_type->name, "bdev") == 0) 78aaead25bSChristoph Hellwig return inode->i_mapping->backing_dev_info; 79aaead25bSChristoph Hellwig 80692ebd17SJan Kara return sb->s_bdi; 81692ebd17SJan Kara } 82692ebd17SJan Kara 837ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 847ccf19a8SNick Piggin { 857ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 867ccf19a8SNick Piggin } 877ccf19a8SNick Piggin 886585027aSJan Kara /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ 896585027aSJan Kara static void bdi_wakeup_flusher(struct backing_dev_info *bdi) 904195f73dSNick Piggin { 91fff5b85aSArtem Bityutskiy if (bdi->wb.task) { 92fff5b85aSArtem Bityutskiy wake_up_process(bdi->wb.task); 93fff5b85aSArtem Bityutskiy } else { 941da177e4SLinus Torvalds /* 95fff5b85aSArtem Bityutskiy * The bdi thread isn't there, wake up the forker thread which 96fff5b85aSArtem Bityutskiy * will create and run it. 971da177e4SLinus Torvalds */ 9803ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 991da177e4SLinus Torvalds } 1006585027aSJan Kara } 1016585027aSJan Kara 1026585027aSJan Kara static void bdi_queue_work(struct backing_dev_info *bdi, 1036585027aSJan Kara struct wb_writeback_work *work) 1046585027aSJan Kara { 1056585027aSJan Kara trace_writeback_queue(bdi, work); 1066585027aSJan Kara 1076585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1086585027aSJan Kara list_add_tail(&work->list, &bdi->work_list); 1096585027aSJan Kara if (!bdi->wb.task) 1106585027aSJan Kara trace_writeback_nothread(bdi, work); 1116585027aSJan Kara bdi_wakeup_flusher(bdi); 1126467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 11303ba3782SJens Axboe } 1141da177e4SLinus Torvalds 11583ba7b07SChristoph Hellwig static void 11683ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1176585027aSJan Kara bool range_cyclic) 1181da177e4SLinus Torvalds { 11983ba7b07SChristoph Hellwig struct wb_writeback_work *work; 12003ba3782SJens Axboe 121bcddc3f0SJens Axboe /* 122bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 123bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 124bcddc3f0SJens Axboe */ 12583ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 12683ba7b07SChristoph Hellwig if (!work) { 127455b2864SDave Chinner if (bdi->wb.task) { 128455b2864SDave Chinner trace_writeback_nowork(bdi); 12983ba7b07SChristoph Hellwig wake_up_process(bdi->wb.task); 130455b2864SDave Chinner } 13183ba7b07SChristoph Hellwig return; 13283ba7b07SChristoph Hellwig } 13383ba7b07SChristoph Hellwig 13483ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 13583ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 13683ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 13783ba7b07SChristoph Hellwig 138f11fcae8SJens Axboe bdi_queue_work(bdi, work); 13903ba3782SJens Axboe } 140b6e51316SJens Axboe 141b6e51316SJens Axboe /** 142b6e51316SJens Axboe * bdi_start_writeback - start writeback 143b6e51316SJens Axboe * @bdi: the backing device to write from 144b6e51316SJens Axboe * @nr_pages: the number of pages to write 145b6e51316SJens Axboe * 146b6e51316SJens Axboe * Description: 147b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 14825985edcSLucas De Marchi * started when this function returns, we make no guarantees on 1490e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 150b6e51316SJens Axboe * 151b6e51316SJens Axboe */ 152c5444198SChristoph Hellwig void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 153b6e51316SJens Axboe { 1546585027aSJan Kara __bdi_start_writeback(bdi, nr_pages, true); 155d3ddec76SWu Fengguang } 156d3ddec76SWu Fengguang 157c5444198SChristoph Hellwig /** 158c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 159c5444198SChristoph Hellwig * @bdi: the backing device to write from 160c5444198SChristoph Hellwig * 161c5444198SChristoph Hellwig * Description: 1626585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 1636585027aSJan Kara * this function returns, it is only guaranteed that for given BDI 1646585027aSJan Kara * some IO is happening if we are over background dirty threshold. 1656585027aSJan Kara * Caller need not hold sb s_umount semaphore. 166c5444198SChristoph Hellwig */ 167c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 168c5444198SChristoph Hellwig { 1696585027aSJan Kara /* 1706585027aSJan Kara * We just wake up the flusher thread. It will perform background 1716585027aSJan Kara * writeback as soon as there is no other work to do. 1726585027aSJan Kara */ 17371927e84SWu Fengguang trace_writeback_wake_background(bdi); 1746585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1756585027aSJan Kara bdi_wakeup_flusher(bdi); 1766585027aSJan Kara spin_unlock_bh(&bdi->wb_lock); 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds /* 180a66979abSDave Chinner * Remove the inode from the writeback list it is on. 181a66979abSDave Chinner */ 182a66979abSDave Chinner void inode_wb_list_del(struct inode *inode) 183a66979abSDave Chinner { 184f758eeabSChristoph Hellwig struct backing_dev_info *bdi = inode_to_bdi(inode); 185a66979abSDave Chinner 186f758eeabSChristoph Hellwig spin_lock(&bdi->wb.list_lock); 187f758eeabSChristoph Hellwig list_del_init(&inode->i_wb_list); 188f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 189f758eeabSChristoph Hellwig } 190a66979abSDave Chinner 191a66979abSDave Chinner /* 1926610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1936610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1946610a0bcSAndrew Morton * 1956610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 19666f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 1976610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 1986610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 1996610a0bcSAndrew Morton */ 200f758eeabSChristoph Hellwig static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 2016610a0bcSAndrew Morton { 202f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 20303ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 20466f3b8e2SJens Axboe struct inode *tail; 2056610a0bcSAndrew Morton 2067ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 20766f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2086610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2096610a0bcSAndrew Morton } 2107ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_dirty); 2116610a0bcSAndrew Morton } 2126610a0bcSAndrew Morton 2136610a0bcSAndrew Morton /* 21466f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 215c986d1e2SAndrew Morton */ 216f758eeabSChristoph Hellwig static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 217c986d1e2SAndrew Morton { 218f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 2197ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_more_io); 220c986d1e2SAndrew Morton } 221c986d1e2SAndrew Morton 2221c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2231c0eeaf5SJoern Engel { 2241c0eeaf5SJoern Engel /* 225a66979abSDave Chinner * Prevent speculative execution through 226f758eeabSChristoph Hellwig * spin_unlock(&wb->list_lock); 2271c0eeaf5SJoern Engel */ 228a66979abSDave Chinner 2291c0eeaf5SJoern Engel smp_mb(); 2301c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2311c0eeaf5SJoern Engel } 2321c0eeaf5SJoern Engel 233d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 234d2caa3c5SJeff Layton { 235d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 236d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 237d2caa3c5SJeff Layton /* 238d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 239d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 240d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2415b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 242d2caa3c5SJeff Layton */ 243d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 244d2caa3c5SJeff Layton #endif 245d2caa3c5SJeff Layton return ret; 246d2caa3c5SJeff Layton } 247d2caa3c5SJeff Layton 248c986d1e2SAndrew Morton /* 2492c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 2502c136579SFengguang Wu */ 2512c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 2522c136579SFengguang Wu struct list_head *dispatch_queue, 2532c136579SFengguang Wu unsigned long *older_than_this) 2542c136579SFengguang Wu { 2555c03449dSShaohua Li LIST_HEAD(tmp); 2565c03449dSShaohua Li struct list_head *pos, *node; 257cf137307SJens Axboe struct super_block *sb = NULL; 2585c03449dSShaohua Li struct inode *inode; 259cf137307SJens Axboe int do_sb_sort = 0; 2605c03449dSShaohua Li 2612c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2627ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 2632c136579SFengguang Wu if (older_than_this && 264d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 2652c136579SFengguang Wu break; 266cf137307SJens Axboe if (sb && sb != inode->i_sb) 267cf137307SJens Axboe do_sb_sort = 1; 268cf137307SJens Axboe sb = inode->i_sb; 2697ccf19a8SNick Piggin list_move(&inode->i_wb_list, &tmp); 2705c03449dSShaohua Li } 2715c03449dSShaohua Li 272cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 273cf137307SJens Axboe if (!do_sb_sort) { 274cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 275cf137307SJens Axboe return; 276cf137307SJens Axboe } 277cf137307SJens Axboe 2785c03449dSShaohua Li /* Move inodes from one superblock together */ 2795c03449dSShaohua Li while (!list_empty(&tmp)) { 2807ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 2815c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2827ccf19a8SNick Piggin inode = wb_inode(pos); 2835c03449dSShaohua Li if (inode->i_sb == sb) 2847ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 2852c136579SFengguang Wu } 2862c136579SFengguang Wu } 2875c03449dSShaohua Li } 2882c136579SFengguang Wu 2892c136579SFengguang Wu /* 2902c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 2914ea879b9SWu Fengguang * Before 2924ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2934ea879b9SWu Fengguang * =============> gf edc BA 2944ea879b9SWu Fengguang * After 2954ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2964ea879b9SWu Fengguang * =============> g fBAedc 2974ea879b9SWu Fengguang * | 2984ea879b9SWu Fengguang * +--> dequeue for IO 2992c136579SFengguang Wu */ 30003ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 3012c136579SFengguang Wu { 302f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 3034ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 30403ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 30566f3b8e2SJens Axboe } 30666f3b8e2SJens Axboe 307a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 30866f3b8e2SJens Axboe { 30903ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 310a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 31103ba3782SJens Axboe return 0; 31266f3b8e2SJens Axboe } 31308d8e974SFengguang Wu 3142c136579SFengguang Wu /* 31501c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 31601c03194SChristoph Hellwig */ 317f758eeabSChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode, 318f758eeabSChristoph Hellwig struct bdi_writeback *wb) 31901c03194SChristoph Hellwig { 32001c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 32101c03194SChristoph Hellwig wait_queue_head_t *wqh; 32201c03194SChristoph Hellwig 32301c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 32458a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 325250df6edSDave Chinner spin_unlock(&inode->i_lock); 326f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 32701c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 328f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 329250df6edSDave Chinner spin_lock(&inode->i_lock); 33058a9d3d8SRichard Kennedy } 33101c03194SChristoph Hellwig } 33201c03194SChristoph Hellwig 33301c03194SChristoph Hellwig /* 334f758eeabSChristoph Hellwig * Write out an inode's dirty pages. Called under wb->list_lock and 3350f1b1fd8SDave Chinner * inode->i_lock. Either the caller has an active reference on the inode or 3360f1b1fd8SDave Chinner * the inode has I_WILL_FREE set. 33701c03194SChristoph Hellwig * 3381da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 3391da177e4SLinus Torvalds * 3401da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 3411da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 3421da177e4SLinus Torvalds * livelocks, etc. 3431da177e4SLinus Torvalds */ 3441da177e4SLinus Torvalds static int 345f758eeabSChristoph Hellwig writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, 346f758eeabSChristoph Hellwig struct writeback_control *wbc) 3471da177e4SLinus Torvalds { 3481da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 34901c03194SChristoph Hellwig unsigned dirty; 3501da177e4SLinus Torvalds int ret; 3511da177e4SLinus Torvalds 352f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 3530f1b1fd8SDave Chinner assert_spin_locked(&inode->i_lock); 3540f1b1fd8SDave Chinner 35501c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 35601c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 35701c03194SChristoph Hellwig else 35801c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 35901c03194SChristoph Hellwig 36001c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 36101c03194SChristoph Hellwig /* 36201c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 36366f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 36401c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 36501c03194SChristoph Hellwig * 36601c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 36766f3b8e2SJens Axboe * completed a full scan of b_io. 36801c03194SChristoph Hellwig */ 369a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 370f758eeabSChristoph Hellwig requeue_io(inode, wb); 37101c03194SChristoph Hellwig return 0; 37201c03194SChristoph Hellwig } 37301c03194SChristoph Hellwig 37401c03194SChristoph Hellwig /* 37501c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 37601c03194SChristoph Hellwig */ 377f758eeabSChristoph Hellwig inode_wait_for_writeback(inode, wb); 37801c03194SChristoph Hellwig } 37901c03194SChristoph Hellwig 3801c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 3811da177e4SLinus Torvalds 3825547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 3831c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 3845547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 385250df6edSDave Chinner spin_unlock(&inode->i_lock); 386f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 3891da177e4SLinus Torvalds 39026821ed4SChristoph Hellwig /* 39126821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 39226821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 39326821ed4SChristoph Hellwig * I/O completion. 39426821ed4SChristoph Hellwig */ 395a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 39626821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 3971da177e4SLinus Torvalds if (ret == 0) 3981da177e4SLinus Torvalds ret = err; 3991da177e4SLinus Torvalds } 4001da177e4SLinus Torvalds 4015547e8aaSDmitry Monakhov /* 4025547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 4035547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 4045547e8aaSDmitry Monakhov * write_inode() 4055547e8aaSDmitry Monakhov */ 406250df6edSDave Chinner spin_lock(&inode->i_lock); 4075547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 4085547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 409250df6edSDave Chinner spin_unlock(&inode->i_lock); 41026821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 41126821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 412a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 4131da177e4SLinus Torvalds if (ret == 0) 4141da177e4SLinus Torvalds ret = err; 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 417f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 418250df6edSDave Chinner spin_lock(&inode->i_lock); 4191c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 420a4ffdde6SAl Viro if (!(inode->i_state & I_FREEING)) { 42194c3dcbbSWu Fengguang /* 42294c3dcbbSWu Fengguang * Sync livelock prevention. Each inode is tagged and synced in 42394c3dcbbSWu Fengguang * one shot. If still dirty, it will be redirty_tail()'ed below. 42494c3dcbbSWu Fengguang * Update the dirty time to prevent enqueue and sync it again. 42594c3dcbbSWu Fengguang */ 42694c3dcbbSWu Fengguang if ((inode->i_state & I_DIRTY) && 42794c3dcbbSWu Fengguang (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 42894c3dcbbSWu Fengguang inode->dirtied_when = jiffies; 42994c3dcbbSWu Fengguang 43023539afcSWu Fengguang if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4311da177e4SLinus Torvalds /* 4321da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 433a50aeb40SWu Fengguang * sometimes bales out without doing anything. 4341da177e4SLinus Torvalds */ 4351da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4368bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 4378bc3be27SFengguang Wu /* 4388bc3be27SFengguang Wu * slice used up: queue for next turn 4398bc3be27SFengguang Wu */ 440f758eeabSChristoph Hellwig requeue_io(inode, wb); 4411da177e4SLinus Torvalds } else { 4421da177e4SLinus Torvalds /* 443a50aeb40SWu Fengguang * Writeback blocked by something other than 444a50aeb40SWu Fengguang * congestion. Delay the inode for some time to 445a50aeb40SWu Fengguang * avoid spinning on the CPU (100% iowait) 446a50aeb40SWu Fengguang * retrying writeback of the dirty page/inode 447a50aeb40SWu Fengguang * that cannot be performed immediately. 4488bc3be27SFengguang Wu */ 449f758eeabSChristoph Hellwig redirty_tail(inode, wb); 4508bc3be27SFengguang Wu } 45123539afcSWu Fengguang } else if (inode->i_state & I_DIRTY) { 45223539afcSWu Fengguang /* 45323539afcSWu Fengguang * Filesystems can dirty the inode during writeback 45423539afcSWu Fengguang * operations, such as delayed allocation during 45523539afcSWu Fengguang * submission or metadata updates after data IO 45623539afcSWu Fengguang * completion. 45723539afcSWu Fengguang */ 458f758eeabSChristoph Hellwig redirty_tail(inode, wb); 4591da177e4SLinus Torvalds } else { 4601da177e4SLinus Torvalds /* 4619e38d86fSNick Piggin * The inode is clean. At this point we either have 4629e38d86fSNick Piggin * a reference to the inode or it's on it's way out. 4639e38d86fSNick Piggin * No need to add it back to the LRU. 4641da177e4SLinus Torvalds */ 4657ccf19a8SNick Piggin list_del_init(&inode->i_wb_list); 466cb9bd115SWu Fengguang wbc->inodes_written++; 4671da177e4SLinus Torvalds } 4681da177e4SLinus Torvalds } 4691c0eeaf5SJoern Engel inode_sync_complete(inode); 4701da177e4SLinus Torvalds return ret; 4711da177e4SLinus Torvalds } 4721da177e4SLinus Torvalds 47303ba3782SJens Axboe /* 474d19de7edSChristoph Hellwig * For background writeback the caller does not have the sb pinned 47503ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 47603ba3782SJens Axboe * go away while we are writing inodes from it. 47703ba3782SJens Axboe */ 478d19de7edSChristoph Hellwig static bool pin_sb_for_writeback(struct super_block *sb) 4791da177e4SLinus Torvalds { 48003ba3782SJens Axboe spin_lock(&sb_lock); 48129cb4859SChristoph Hellwig if (list_empty(&sb->s_instances)) { 48203ba3782SJens Axboe spin_unlock(&sb_lock); 48329cb4859SChristoph Hellwig return false; 48403ba3782SJens Axboe } 48529cb4859SChristoph Hellwig 48629cb4859SChristoph Hellwig sb->s_count++; 48729cb4859SChristoph Hellwig spin_unlock(&sb_lock); 48829cb4859SChristoph Hellwig 48929cb4859SChristoph Hellwig if (down_read_trylock(&sb->s_umount)) { 49029cb4859SChristoph Hellwig if (sb->s_root) 49129cb4859SChristoph Hellwig return true; 49203ba3782SJens Axboe up_read(&sb->s_umount); 49303ba3782SJens Axboe } 49429cb4859SChristoph Hellwig 49529cb4859SChristoph Hellwig put_super(sb); 496d19de7edSChristoph Hellwig return false; 49703ba3782SJens Axboe } 49803ba3782SJens Axboe 499f11c9c5cSEdward Shishkin /* 500f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 501edadfb10SChristoph Hellwig * 502edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 503f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 504f11c9c5cSEdward Shishkin * in reverse order. 505edadfb10SChristoph Hellwig * 506f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 507f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 508f11c9c5cSEdward Shishkin */ 509edadfb10SChristoph Hellwig static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 510edadfb10SChristoph Hellwig struct writeback_control *wbc, bool only_this_sb) 51103ba3782SJens Axboe { 51203ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 513f11c9c5cSEdward Shishkin long pages_skipped; 5147ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 515edadfb10SChristoph Hellwig 516edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 517edadfb10SChristoph Hellwig if (only_this_sb) { 518edadfb10SChristoph Hellwig /* 519edadfb10SChristoph Hellwig * We only want to write back data for this 520edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 521edadfb10SChristoph Hellwig * to it back onto the dirty list. 522edadfb10SChristoph Hellwig */ 523f758eeabSChristoph Hellwig redirty_tail(inode, wb); 52466f3b8e2SJens Axboe continue; 52566f3b8e2SJens Axboe } 526edadfb10SChristoph Hellwig 527edadfb10SChristoph Hellwig /* 528edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 529edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 530edadfb10SChristoph Hellwig * pin the next superblock. 531edadfb10SChristoph Hellwig */ 532f11c9c5cSEdward Shishkin return 0; 533edadfb10SChristoph Hellwig } 534edadfb10SChristoph Hellwig 5359843b76aSChristoph Hellwig /* 5369843b76aSChristoph Hellwig * Don't bother with new inodes or inodes beeing freed, first 5379843b76aSChristoph Hellwig * kind does not need peridic writeout yet, and for the latter 5389843b76aSChristoph Hellwig * kind writeout is handled by the freer. 5399843b76aSChristoph Hellwig */ 540250df6edSDave Chinner spin_lock(&inode->i_lock); 5419843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 542250df6edSDave Chinner spin_unlock(&inode->i_lock); 543f758eeabSChristoph Hellwig requeue_io(inode, wb); 5447ef0d737SNick Piggin continue; 5457ef0d737SNick Piggin } 5469843b76aSChristoph Hellwig 547d2caa3c5SJeff Layton /* 548d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 549d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 550d2caa3c5SJeff Layton */ 551250df6edSDave Chinner if (inode_dirtied_after(inode, wbc->wb_start)) { 552250df6edSDave Chinner spin_unlock(&inode->i_lock); 553f11c9c5cSEdward Shishkin return 1; 554250df6edSDave Chinner } 5551da177e4SLinus Torvalds 5561da177e4SLinus Torvalds __iget(inode); 557250df6edSDave Chinner 5581da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 559f758eeabSChristoph Hellwig writeback_single_inode(inode, wb, wbc); 5601da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 5611da177e4SLinus Torvalds /* 5621da177e4SLinus Torvalds * writeback is not making progress due to locked 5631da177e4SLinus Torvalds * buffers. Skip this inode for now. 5641da177e4SLinus Torvalds */ 565f758eeabSChristoph Hellwig redirty_tail(inode, wb); 5661da177e4SLinus Torvalds } 5670f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 568f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 5691da177e4SLinus Torvalds iput(inode); 5704ffc8444SOGAWA Hirofumi cond_resched(); 571f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 5728bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 5738bc3be27SFengguang Wu wbc->more_io = 1; 574f11c9c5cSEdward Shishkin return 1; 5751da177e4SLinus Torvalds } 57603ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 5778bc3be27SFengguang Wu wbc->more_io = 1; 5788bc3be27SFengguang Wu } 579f11c9c5cSEdward Shishkin /* b_io is empty */ 580f11c9c5cSEdward Shishkin return 1; 581f11c9c5cSEdward Shishkin } 58238f21977SNick Piggin 583e8dfc305SWu Fengguang static void __writeback_inodes_wb(struct bdi_writeback *wb, 584f11c9c5cSEdward Shishkin struct writeback_control *wbc) 585f11c9c5cSEdward Shishkin { 586f11c9c5cSEdward Shishkin int ret = 0; 5879ecc2738SJens Axboe 5887624ee72SJan Kara if (!wbc->wb_start) 589f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 590f11c9c5cSEdward Shishkin 591f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 5927ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 593f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 594f11c9c5cSEdward Shishkin 595334132aeSChristoph Hellwig if (!pin_sb_for_writeback(sb)) { 596f758eeabSChristoph Hellwig requeue_io(inode, wb); 597d19de7edSChristoph Hellwig continue; 598334132aeSChristoph Hellwig } 599edadfb10SChristoph Hellwig ret = writeback_sb_inodes(sb, wb, wbc, false); 600d19de7edSChristoph Hellwig drop_super(sb); 601f11c9c5cSEdward Shishkin 602f11c9c5cSEdward Shishkin if (ret) 603f11c9c5cSEdward Shishkin break; 604f11c9c5cSEdward Shishkin } 60566f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 60666f3b8e2SJens Axboe } 60766f3b8e2SJens Axboe 608e8dfc305SWu Fengguang void writeback_inodes_wb(struct bdi_writeback *wb, 609e8dfc305SWu Fengguang struct writeback_control *wbc) 610edadfb10SChristoph Hellwig { 611f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 612424b351fSWu Fengguang if (list_empty(&wb->b_io)) 613edadfb10SChristoph Hellwig queue_io(wb, wbc->older_than_this); 614e8dfc305SWu Fengguang __writeback_inodes_wb(wb, wbc); 615f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 616edadfb10SChristoph Hellwig } 617edadfb10SChristoph Hellwig 61803ba3782SJens Axboe /* 61903ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 62003ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 62103ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 62203ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 62303ba3782SJens Axboe * the dirty each time it has written this many pages. 62403ba3782SJens Axboe */ 62503ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 62603ba3782SJens Axboe 62703ba3782SJens Axboe static inline bool over_bground_thresh(void) 62803ba3782SJens Axboe { 62903ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 63003ba3782SJens Axboe 63116c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 63203ba3782SJens Axboe 63303ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 6344cbec4c8SWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh); 63503ba3782SJens Axboe } 63603ba3782SJens Axboe 63703ba3782SJens Axboe /* 63803ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 63903ba3782SJens Axboe * 64003ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 64103ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 64203ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 64303ba3782SJens Axboe * older than a specific point in time. 64403ba3782SJens Axboe * 64503ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 64603ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 64703ba3782SJens Axboe * one-second gap. 64803ba3782SJens Axboe * 64903ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 65003ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 65103ba3782SJens Axboe */ 652c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 65383ba7b07SChristoph Hellwig struct wb_writeback_work *work) 65403ba3782SJens Axboe { 65503ba3782SJens Axboe struct writeback_control wbc = { 65683ba7b07SChristoph Hellwig .sync_mode = work->sync_mode, 6576e6938b6SWu Fengguang .tagged_writepages = work->tagged_writepages, 65803ba3782SJens Axboe .older_than_this = NULL, 65983ba7b07SChristoph Hellwig .for_kupdate = work->for_kupdate, 66083ba7b07SChristoph Hellwig .for_background = work->for_background, 66183ba7b07SChristoph Hellwig .range_cyclic = work->range_cyclic, 66203ba3782SJens Axboe }; 66303ba3782SJens Axboe unsigned long oldest_jif; 66403ba3782SJens Axboe long wrote = 0; 6656e6938b6SWu Fengguang long write_chunk = MAX_WRITEBACK_PAGES; 666a5989bdcSJan Kara struct inode *inode; 66703ba3782SJens Axboe 668c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 669c4a77a6cSJens Axboe wbc.range_start = 0; 670c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 671c4a77a6cSJens Axboe } 67203ba3782SJens Axboe 673b9543dacSJan Kara /* 674b9543dacSJan Kara * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 675b9543dacSJan Kara * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 676b9543dacSJan Kara * here avoids calling into writeback_inodes_wb() more than once. 677b9543dacSJan Kara * 678b9543dacSJan Kara * The intended call sequence for WB_SYNC_ALL writeback is: 679b9543dacSJan Kara * 680b9543dacSJan Kara * wb_writeback() 681e8dfc305SWu Fengguang * writeback_sb_inodes() <== called only once 682b9543dacSJan Kara * write_cache_pages() <== called once for each inode 683b9543dacSJan Kara * (quickly) tag currently dirty pages 684b9543dacSJan Kara * (maybe slowly) sync all tagged pages 685b9543dacSJan Kara */ 6866e6938b6SWu Fengguang if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages) 687b9543dacSJan Kara write_chunk = LONG_MAX; 688b9543dacSJan Kara 6897624ee72SJan Kara wbc.wb_start = jiffies; /* livelock avoidance */ 690e8dfc305SWu Fengguang spin_lock(&wb->list_lock); 69103ba3782SJens Axboe for (;;) { 69203ba3782SJens Axboe /* 693d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 69403ba3782SJens Axboe */ 69583ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 69603ba3782SJens Axboe break; 69703ba3782SJens Axboe 69803ba3782SJens Axboe /* 699aa373cf5SJan Kara * Background writeout and kupdate-style writeback may 700aa373cf5SJan Kara * run forever. Stop them if there is other work to do 701aa373cf5SJan Kara * so that e.g. sync can proceed. They'll be restarted 702aa373cf5SJan Kara * after the other works are all done. 703aa373cf5SJan Kara */ 704aa373cf5SJan Kara if ((work->for_background || work->for_kupdate) && 705aa373cf5SJan Kara !list_empty(&wb->bdi->work_list)) 706aa373cf5SJan Kara break; 707aa373cf5SJan Kara 708aa373cf5SJan Kara /* 709d3ddec76SWu Fengguang * For background writeout, stop when we are below the 710d3ddec76SWu Fengguang * background dirty threshold 71103ba3782SJens Axboe */ 71283ba7b07SChristoph Hellwig if (work->for_background && !over_bground_thresh()) 71303ba3782SJens Axboe break; 71403ba3782SJens Axboe 715ba9aa839SWu Fengguang if (work->for_kupdate) { 716ba9aa839SWu Fengguang oldest_jif = jiffies - 717ba9aa839SWu Fengguang msecs_to_jiffies(dirty_expire_interval * 10); 718ba9aa839SWu Fengguang wbc.older_than_this = &oldest_jif; 719ba9aa839SWu Fengguang } 720ba9aa839SWu Fengguang 72103ba3782SJens Axboe wbc.more_io = 0; 722b9543dacSJan Kara wbc.nr_to_write = write_chunk; 72303ba3782SJens Axboe wbc.pages_skipped = 0; 724cb9bd115SWu Fengguang wbc.inodes_written = 0; 725028c2dd1SDave Chinner 726028c2dd1SDave Chinner trace_wbc_writeback_start(&wbc, wb->bdi); 727e8dfc305SWu Fengguang if (list_empty(&wb->b_io)) 728e8dfc305SWu Fengguang queue_io(wb, wbc.older_than_this); 72983ba7b07SChristoph Hellwig if (work->sb) 730e8dfc305SWu Fengguang writeback_sb_inodes(work->sb, wb, &wbc, true); 731edadfb10SChristoph Hellwig else 732e8dfc305SWu Fengguang __writeback_inodes_wb(wb, &wbc); 733028c2dd1SDave Chinner trace_wbc_writeback_written(&wbc, wb->bdi); 734028c2dd1SDave Chinner 735b9543dacSJan Kara work->nr_pages -= write_chunk - wbc.nr_to_write; 736b9543dacSJan Kara wrote += write_chunk - wbc.nr_to_write; 73703ba3782SJens Axboe 73803ba3782SJens Axboe /* 739e6fb6da2SWu Fengguang * Did we write something? Try for more 740e6fb6da2SWu Fengguang * 741e6fb6da2SWu Fengguang * Dirty inodes are moved to b_io for writeback in batches. 742e6fb6da2SWu Fengguang * The completion of the current batch does not necessarily 743e6fb6da2SWu Fengguang * mean the overall work is done. So we keep looping as long 744e6fb6da2SWu Fengguang * as made some progress on cleaning pages or inodes. 74503ba3782SJens Axboe */ 746e6fb6da2SWu Fengguang if (wbc.nr_to_write < write_chunk) 74771fd05a8SJens Axboe continue; 748cb9bd115SWu Fengguang if (wbc.inodes_written) 749cb9bd115SWu Fengguang continue; 75071fd05a8SJens Axboe /* 751e6fb6da2SWu Fengguang * No more inodes for IO, bail 75271fd05a8SJens Axboe */ 75371fd05a8SJens Axboe if (!wbc.more_io) 75471fd05a8SJens Axboe break; 75571fd05a8SJens Axboe /* 756a5989bdcSJan Kara * Nothing written. Wait for some inode to 757a5989bdcSJan Kara * become available for writeback. Otherwise 758a5989bdcSJan Kara * we'll just busyloop. 759a5989bdcSJan Kara */ 760a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 7617ccf19a8SNick Piggin inode = wb_inode(wb->b_more_io.prev); 762028c2dd1SDave Chinner trace_wbc_writeback_wait(&wbc, wb->bdi); 763250df6edSDave Chinner spin_lock(&inode->i_lock); 764f758eeabSChristoph Hellwig inode_wait_for_writeback(inode, wb); 765250df6edSDave Chinner spin_unlock(&inode->i_lock); 766a5989bdcSJan Kara } 76703ba3782SJens Axboe } 768e8dfc305SWu Fengguang spin_unlock(&wb->list_lock); 76903ba3782SJens Axboe 77003ba3782SJens Axboe return wrote; 77103ba3782SJens Axboe } 77203ba3782SJens Axboe 77303ba3782SJens Axboe /* 77483ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 77503ba3782SJens Axboe */ 77683ba7b07SChristoph Hellwig static struct wb_writeback_work * 77708852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 77803ba3782SJens Axboe { 77983ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 78003ba3782SJens Axboe 7816467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 78283ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 78383ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 78483ba7b07SChristoph Hellwig struct wb_writeback_work, list); 78583ba7b07SChristoph Hellwig list_del_init(&work->list); 78603ba3782SJens Axboe } 7876467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 78883ba7b07SChristoph Hellwig return work; 78903ba3782SJens Axboe } 79003ba3782SJens Axboe 791cdf01dd5SLinus Torvalds /* 792cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 793cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 794cdf01dd5SLinus Torvalds */ 795cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 796cdf01dd5SLinus Torvalds { 797cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 798cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 799cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 800cdf01dd5SLinus Torvalds } 801cdf01dd5SLinus Torvalds 8026585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 8036585027aSJan Kara { 8046585027aSJan Kara if (over_bground_thresh()) { 8056585027aSJan Kara 8066585027aSJan Kara struct wb_writeback_work work = { 8076585027aSJan Kara .nr_pages = LONG_MAX, 8086585027aSJan Kara .sync_mode = WB_SYNC_NONE, 8096585027aSJan Kara .for_background = 1, 8106585027aSJan Kara .range_cyclic = 1, 8116585027aSJan Kara }; 8126585027aSJan Kara 8136585027aSJan Kara return wb_writeback(wb, &work); 8146585027aSJan Kara } 8156585027aSJan Kara 8166585027aSJan Kara return 0; 8176585027aSJan Kara } 8186585027aSJan Kara 81903ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 82003ba3782SJens Axboe { 82103ba3782SJens Axboe unsigned long expired; 82203ba3782SJens Axboe long nr_pages; 82303ba3782SJens Axboe 82469b62d01SJens Axboe /* 82569b62d01SJens Axboe * When set to zero, disable periodic writeback 82669b62d01SJens Axboe */ 82769b62d01SJens Axboe if (!dirty_writeback_interval) 82869b62d01SJens Axboe return 0; 82969b62d01SJens Axboe 83003ba3782SJens Axboe expired = wb->last_old_flush + 83103ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 83203ba3782SJens Axboe if (time_before(jiffies, expired)) 83303ba3782SJens Axboe return 0; 83403ba3782SJens Axboe 83503ba3782SJens Axboe wb->last_old_flush = jiffies; 836cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 83703ba3782SJens Axboe 838c4a77a6cSJens Axboe if (nr_pages) { 83983ba7b07SChristoph Hellwig struct wb_writeback_work work = { 840c4a77a6cSJens Axboe .nr_pages = nr_pages, 841c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 842c4a77a6cSJens Axboe .for_kupdate = 1, 843c4a77a6cSJens Axboe .range_cyclic = 1, 844c4a77a6cSJens Axboe }; 845c4a77a6cSJens Axboe 84683ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 847c4a77a6cSJens Axboe } 84803ba3782SJens Axboe 84903ba3782SJens Axboe return 0; 85003ba3782SJens Axboe } 85103ba3782SJens Axboe 85203ba3782SJens Axboe /* 85303ba3782SJens Axboe * Retrieve work items and do the writeback they describe 85403ba3782SJens Axboe */ 85503ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 85603ba3782SJens Axboe { 85703ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 85883ba7b07SChristoph Hellwig struct wb_writeback_work *work; 859c4a77a6cSJens Axboe long wrote = 0; 86003ba3782SJens Axboe 86181d73a32SJan Kara set_bit(BDI_writeback_running, &wb->bdi->state); 86208852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 86303ba3782SJens Axboe /* 86403ba3782SJens Axboe * Override sync mode, in case we must wait for completion 86583ba7b07SChristoph Hellwig * because this thread is exiting now. 86603ba3782SJens Axboe */ 86703ba3782SJens Axboe if (force_wait) 86883ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 86983ba7b07SChristoph Hellwig 870455b2864SDave Chinner trace_writeback_exec(bdi, work); 871455b2864SDave Chinner 87283ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 87303ba3782SJens Axboe 87403ba3782SJens Axboe /* 87583ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 87683ba7b07SChristoph Hellwig * work item, otherwise just free it. 87703ba3782SJens Axboe */ 87883ba7b07SChristoph Hellwig if (work->done) 87983ba7b07SChristoph Hellwig complete(work->done); 88083ba7b07SChristoph Hellwig else 88183ba7b07SChristoph Hellwig kfree(work); 88203ba3782SJens Axboe } 88303ba3782SJens Axboe 88403ba3782SJens Axboe /* 88503ba3782SJens Axboe * Check for periodic writeback, kupdated() style 88603ba3782SJens Axboe */ 88703ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 8886585027aSJan Kara wrote += wb_check_background_flush(wb); 88981d73a32SJan Kara clear_bit(BDI_writeback_running, &wb->bdi->state); 89003ba3782SJens Axboe 89103ba3782SJens Axboe return wrote; 89203ba3782SJens Axboe } 89303ba3782SJens Axboe 89403ba3782SJens Axboe /* 89503ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 89603ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 89703ba3782SJens Axboe */ 89808243900SChristoph Hellwig int bdi_writeback_thread(void *data) 89903ba3782SJens Axboe { 90008243900SChristoph Hellwig struct bdi_writeback *wb = data; 90108243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 90203ba3782SJens Axboe long pages_written; 90303ba3782SJens Axboe 904766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 90508243900SChristoph Hellwig set_freezable(); 906ecd58403SArtem Bityutskiy wb->last_active = jiffies; 90703ba3782SJens Axboe 90803ba3782SJens Axboe /* 90908243900SChristoph Hellwig * Our parent may run at a different priority, just set us to normal 91003ba3782SJens Axboe */ 91108243900SChristoph Hellwig set_user_nice(current, 0); 91208243900SChristoph Hellwig 913455b2864SDave Chinner trace_writeback_thread_start(bdi); 914455b2864SDave Chinner 91503ba3782SJens Axboe while (!kthread_should_stop()) { 9166467716aSArtem Bityutskiy /* 9176467716aSArtem Bityutskiy * Remove own delayed wake-up timer, since we are already awake 9186467716aSArtem Bityutskiy * and we'll take care of the preriodic write-back. 9196467716aSArtem Bityutskiy */ 9206467716aSArtem Bityutskiy del_timer(&wb->wakeup_timer); 9216467716aSArtem Bityutskiy 92203ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 92303ba3782SJens Axboe 924455b2864SDave Chinner trace_writeback_pages_written(pages_written); 925455b2864SDave Chinner 92603ba3782SJens Axboe if (pages_written) 927ecd58403SArtem Bityutskiy wb->last_active = jiffies; 92803ba3782SJens Axboe 929297252c8SArtem Bityutskiy set_current_state(TASK_INTERRUPTIBLE); 930b76b4014SJ. Bruce Fields if (!list_empty(&bdi->work_list) || kthread_should_stop()) { 931297252c8SArtem Bityutskiy __set_current_state(TASK_RUNNING); 932297252c8SArtem Bityutskiy continue; 93303ba3782SJens Axboe } 93403ba3782SJens Axboe 935253c34e9SArtem Bityutskiy if (wb_has_dirty_io(wb) && dirty_writeback_interval) 936fff5b85aSArtem Bityutskiy schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 937253c34e9SArtem Bityutskiy else { 938253c34e9SArtem Bityutskiy /* 939253c34e9SArtem Bityutskiy * We have nothing to do, so can go sleep without any 940253c34e9SArtem Bityutskiy * timeout and save power. When a work is queued or 941253c34e9SArtem Bityutskiy * something is made dirty - we will be woken up. 942253c34e9SArtem Bityutskiy */ 94369b62d01SJens Axboe schedule(); 944f9eadbbdSJens Axboe } 94569b62d01SJens Axboe 94603ba3782SJens Axboe try_to_freeze(); 94703ba3782SJens Axboe } 94803ba3782SJens Axboe 949fff5b85aSArtem Bityutskiy /* Flush any work that raced with us exiting */ 95008243900SChristoph Hellwig if (!list_empty(&bdi->work_list)) 95108243900SChristoph Hellwig wb_do_writeback(wb, 1); 952455b2864SDave Chinner 953455b2864SDave Chinner trace_writeback_thread_stop(bdi); 95403ba3782SJens Axboe return 0; 95503ba3782SJens Axboe } 95603ba3782SJens Axboe 95708243900SChristoph Hellwig 95803ba3782SJens Axboe /* 95903ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 96003ba3782SJens Axboe * the whole world. 96103ba3782SJens Axboe */ 96203ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 96303ba3782SJens Axboe { 964b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 965b8c2f347SChristoph Hellwig 96683ba7b07SChristoph Hellwig if (!nr_pages) { 96783ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 96803ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 969b8c2f347SChristoph Hellwig } 970b8c2f347SChristoph Hellwig 971b8c2f347SChristoph Hellwig rcu_read_lock(); 972b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 973b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 974b8c2f347SChristoph Hellwig continue; 9756585027aSJan Kara __bdi_start_writeback(bdi, nr_pages, false); 976b8c2f347SChristoph Hellwig } 977b8c2f347SChristoph Hellwig rcu_read_unlock(); 97803ba3782SJens Axboe } 97903ba3782SJens Axboe 98003ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 98103ba3782SJens Axboe { 98203ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 98303ba3782SJens Axboe struct dentry *dentry; 98403ba3782SJens Axboe const char *name = "?"; 98503ba3782SJens Axboe 98603ba3782SJens Axboe dentry = d_find_alias(inode); 98703ba3782SJens Axboe if (dentry) { 98803ba3782SJens Axboe spin_lock(&dentry->d_lock); 98903ba3782SJens Axboe name = (const char *) dentry->d_name.name; 99003ba3782SJens Axboe } 99103ba3782SJens Axboe printk(KERN_DEBUG 99203ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 99303ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 99403ba3782SJens Axboe name, inode->i_sb->s_id); 99503ba3782SJens Axboe if (dentry) { 99603ba3782SJens Axboe spin_unlock(&dentry->d_lock); 99703ba3782SJens Axboe dput(dentry); 99803ba3782SJens Axboe } 99903ba3782SJens Axboe } 100003ba3782SJens Axboe } 100103ba3782SJens Axboe 100203ba3782SJens Axboe /** 100303ba3782SJens Axboe * __mark_inode_dirty - internal function 100403ba3782SJens Axboe * @inode: inode to mark 100503ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 100603ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 100703ba3782SJens Axboe * mark_inode_dirty_sync. 100803ba3782SJens Axboe * 100903ba3782SJens Axboe * Put the inode on the super block's dirty list. 101003ba3782SJens Axboe * 101103ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 101203ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 101303ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 101403ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 101503ba3782SJens Axboe * 101603ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 101703ba3782SJens Axboe * them dirty. 101803ba3782SJens Axboe * 101903ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 102003ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 102103ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 102203ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 102303ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 102403ba3782SJens Axboe * blockdev inode. 102503ba3782SJens Axboe */ 102603ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 102703ba3782SJens Axboe { 102803ba3782SJens Axboe struct super_block *sb = inode->i_sb; 1029253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 103003ba3782SJens Axboe 103103ba3782SJens Axboe /* 103203ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 103303ba3782SJens Axboe * dirty the inode itself 103403ba3782SJens Axboe */ 103503ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 103603ba3782SJens Axboe if (sb->s_op->dirty_inode) 1037aa385729SChristoph Hellwig sb->s_op->dirty_inode(inode, flags); 103803ba3782SJens Axboe } 103903ba3782SJens Axboe 104003ba3782SJens Axboe /* 104103ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 104203ba3782SJens Axboe * -- mikulas 104303ba3782SJens Axboe */ 104403ba3782SJens Axboe smp_mb(); 104503ba3782SJens Axboe 104603ba3782SJens Axboe /* avoid the locking if we can */ 104703ba3782SJens Axboe if ((inode->i_state & flags) == flags) 104803ba3782SJens Axboe return; 104903ba3782SJens Axboe 105003ba3782SJens Axboe if (unlikely(block_dump)) 105103ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 105203ba3782SJens Axboe 1053250df6edSDave Chinner spin_lock(&inode->i_lock); 105403ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 105503ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 105603ba3782SJens Axboe 105703ba3782SJens Axboe inode->i_state |= flags; 105803ba3782SJens Axboe 105903ba3782SJens Axboe /* 106003ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 106103ba3782SJens Axboe * The unlocker will place the inode on the appropriate 106203ba3782SJens Axboe * superblock list, based upon its state. 106303ba3782SJens Axboe */ 106403ba3782SJens Axboe if (inode->i_state & I_SYNC) 1065250df6edSDave Chinner goto out_unlock_inode; 106603ba3782SJens Axboe 106703ba3782SJens Axboe /* 106803ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 106903ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 107003ba3782SJens Axboe */ 107103ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 10721d3382cbSAl Viro if (inode_unhashed(inode)) 1073250df6edSDave Chinner goto out_unlock_inode; 107403ba3782SJens Axboe } 1075a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 1076250df6edSDave Chinner goto out_unlock_inode; 107703ba3782SJens Axboe 107803ba3782SJens Axboe /* 107903ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 108003ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 108103ba3782SJens Axboe */ 108203ba3782SJens Axboe if (!was_dirty) { 1083a66979abSDave Chinner bool wakeup_bdi = false; 1084253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1085500b067cSJens Axboe 1086253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 1087253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 1088253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 1089253c34e9SArtem Bityutskiy 1090253c34e9SArtem Bityutskiy /* 1091253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 1092253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 1093253c34e9SArtem Bityutskiy * bdi thread to make sure background 1094253c34e9SArtem Bityutskiy * write-back happens later. 1095253c34e9SArtem Bityutskiy */ 1096253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 1097253c34e9SArtem Bityutskiy wakeup_bdi = true; 1098500b067cSJens Axboe } 109903ba3782SJens Axboe 1100a66979abSDave Chinner spin_unlock(&inode->i_lock); 1101f758eeabSChristoph Hellwig spin_lock(&bdi->wb.list_lock); 110203ba3782SJens Axboe inode->dirtied_when = jiffies; 11037ccf19a8SNick Piggin list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1104f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 1105253c34e9SArtem Bityutskiy 1106253c34e9SArtem Bityutskiy if (wakeup_bdi) 11076467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 1108a66979abSDave Chinner return; 1109a66979abSDave Chinner } 1110a66979abSDave Chinner } 1111a66979abSDave Chinner out_unlock_inode: 1112a66979abSDave Chinner spin_unlock(&inode->i_lock); 1113a66979abSDave Chinner 111403ba3782SJens Axboe } 111503ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 111603ba3782SJens Axboe 111766f3b8e2SJens Axboe /* 111866f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 111966f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 112066f3b8e2SJens Axboe * 112166f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 112266f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 112366f3b8e2SJens Axboe * 112466f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 112566f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 112666f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 112766f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 112866f3b8e2SJens Axboe * 112966f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 113066f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 113166f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 113266f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 113366f3b8e2SJens Axboe */ 1134b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 113566f3b8e2SJens Axboe { 113638f21977SNick Piggin struct inode *inode, *old_inode = NULL; 113738f21977SNick Piggin 113803ba3782SJens Axboe /* 113903ba3782SJens Axboe * We need to be protected against the filesystem going from 114003ba3782SJens Axboe * r/o to r/w or vice versa. 114103ba3782SJens Axboe */ 1142b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 114303ba3782SJens Axboe 114455fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 114566f3b8e2SJens Axboe 114638f21977SNick Piggin /* 114738f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 114838f21977SNick Piggin * because there may have been pages dirtied before our sync 114938f21977SNick Piggin * call, but which had writeout started before we write it out. 115038f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 115138f21977SNick Piggin * we still have to wait for that writeout. 115238f21977SNick Piggin */ 1153b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1154250df6edSDave Chinner struct address_space *mapping = inode->i_mapping; 115538f21977SNick Piggin 1156250df6edSDave Chinner spin_lock(&inode->i_lock); 1157250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1158250df6edSDave Chinner (mapping->nrpages == 0)) { 1159250df6edSDave Chinner spin_unlock(&inode->i_lock); 116038f21977SNick Piggin continue; 1161250df6edSDave Chinner } 116238f21977SNick Piggin __iget(inode); 1163250df6edSDave Chinner spin_unlock(&inode->i_lock); 116455fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 116555fa6091SDave Chinner 116638f21977SNick Piggin /* 116755fa6091SDave Chinner * We hold a reference to 'inode' so it couldn't have been 116855fa6091SDave Chinner * removed from s_inodes list while we dropped the 116955fa6091SDave Chinner * inode_sb_list_lock. We cannot iput the inode now as we can 117055fa6091SDave Chinner * be holding the last reference and we cannot iput it under 117155fa6091SDave Chinner * inode_sb_list_lock. So we keep the reference and iput it 117255fa6091SDave Chinner * later. 117338f21977SNick Piggin */ 117438f21977SNick Piggin iput(old_inode); 117538f21977SNick Piggin old_inode = inode; 117638f21977SNick Piggin 117738f21977SNick Piggin filemap_fdatawait(mapping); 117838f21977SNick Piggin 117938f21977SNick Piggin cond_resched(); 118038f21977SNick Piggin 118155fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 118238f21977SNick Piggin } 118355fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 118438f21977SNick Piggin iput(old_inode); 118566f3b8e2SJens Axboe } 11861da177e4SLinus Torvalds 1187d8a8559cSJens Axboe /** 11883259f8beSChris Mason * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1189d8a8559cSJens Axboe * @sb: the superblock 11903259f8beSChris Mason * @nr: the number of pages to write 11911da177e4SLinus Torvalds * 1192d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1193d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 11943259f8beSChris Mason * for IO completion of submitted IO. 11951da177e4SLinus Torvalds */ 11963259f8beSChris Mason void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) 11971da177e4SLinus Torvalds { 119883ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 119983ba7b07SChristoph Hellwig struct wb_writeback_work work = { 12003c4d7165SChristoph Hellwig .sb = sb, 12013c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 12026e6938b6SWu Fengguang .tagged_writepages = 1, 120383ba7b07SChristoph Hellwig .done = &done, 12043259f8beSChris Mason .nr_pages = nr, 12053c4d7165SChristoph Hellwig }; 12060e3c9a22SJens Axboe 1207cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 120883ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 120983ba7b07SChristoph Hellwig wait_for_completion(&done); 12101da177e4SLinus Torvalds } 12113259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 12123259f8beSChris Mason 12133259f8beSChris Mason /** 12143259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 12153259f8beSChris Mason * @sb: the superblock 12163259f8beSChris Mason * 12173259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 12183259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 12193259f8beSChris Mason * for IO completion of submitted IO. 12203259f8beSChris Mason */ 12213259f8beSChris Mason void writeback_inodes_sb(struct super_block *sb) 12223259f8beSChris Mason { 1223925d169fSLinus Torvalds return writeback_inodes_sb_nr(sb, get_nr_dirty_pages()); 12243259f8beSChris Mason } 1225d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1226d8a8559cSJens Axboe 1227d8a8559cSJens Axboe /** 122817bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 122917bd55d0SEric Sandeen * @sb: the superblock 123017bd55d0SEric Sandeen * 123117bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 123217bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 123317bd55d0SEric Sandeen */ 123417bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 123517bd55d0SEric Sandeen { 123617bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1237cf37e972SChristoph Hellwig down_read(&sb->s_umount); 123817bd55d0SEric Sandeen writeback_inodes_sb(sb); 1239cf37e972SChristoph Hellwig up_read(&sb->s_umount); 124017bd55d0SEric Sandeen return 1; 124117bd55d0SEric Sandeen } else 124217bd55d0SEric Sandeen return 0; 124317bd55d0SEric Sandeen } 124417bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 124517bd55d0SEric Sandeen 124617bd55d0SEric Sandeen /** 12473259f8beSChris Mason * writeback_inodes_sb_if_idle - start writeback if none underway 12483259f8beSChris Mason * @sb: the superblock 12493259f8beSChris Mason * @nr: the number of pages to write 12503259f8beSChris Mason * 12513259f8beSChris Mason * Invoke writeback_inodes_sb if no writeback is currently underway. 12523259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 12533259f8beSChris Mason */ 12543259f8beSChris Mason int writeback_inodes_sb_nr_if_idle(struct super_block *sb, 12553259f8beSChris Mason unsigned long nr) 12563259f8beSChris Mason { 12573259f8beSChris Mason if (!writeback_in_progress(sb->s_bdi)) { 12583259f8beSChris Mason down_read(&sb->s_umount); 12593259f8beSChris Mason writeback_inodes_sb_nr(sb, nr); 12603259f8beSChris Mason up_read(&sb->s_umount); 12613259f8beSChris Mason return 1; 12623259f8beSChris Mason } else 12633259f8beSChris Mason return 0; 12643259f8beSChris Mason } 12653259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle); 12663259f8beSChris Mason 12673259f8beSChris Mason /** 1268d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1269d8a8559cSJens Axboe * @sb: the superblock 1270d8a8559cSJens Axboe * 1271d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1272cb9ef8d5SStefan Hajnoczi * super_block. 1273d8a8559cSJens Axboe */ 1274b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1275d8a8559cSJens Axboe { 127683ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 127783ba7b07SChristoph Hellwig struct wb_writeback_work work = { 12783c4d7165SChristoph Hellwig .sb = sb, 12793c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 12803c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 12813c4d7165SChristoph Hellwig .range_cyclic = 0, 128283ba7b07SChristoph Hellwig .done = &done, 12833c4d7165SChristoph Hellwig }; 12843c4d7165SChristoph Hellwig 1285cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1286cf37e972SChristoph Hellwig 128783ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 128883ba7b07SChristoph Hellwig wait_for_completion(&done); 128983ba7b07SChristoph Hellwig 1290b6e51316SJens Axboe wait_sb_inodes(sb); 1291d8a8559cSJens Axboe } 1292d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 12931da177e4SLinus Torvalds 12941da177e4SLinus Torvalds /** 12951da177e4SLinus Torvalds * write_inode_now - write an inode to disk 12961da177e4SLinus Torvalds * @inode: inode to write to disk 12971da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 12981da177e4SLinus Torvalds * 12997f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 13007f04c26dSAndrea Arcangeli * primarily needed by knfsd. 13017f04c26dSAndrea Arcangeli * 13027f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 13031da177e4SLinus Torvalds */ 13041da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 13051da177e4SLinus Torvalds { 1306f758eeabSChristoph Hellwig struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 13071da177e4SLinus Torvalds int ret; 13081da177e4SLinus Torvalds struct writeback_control wbc = { 13091da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 131018914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1311111ebb6eSOGAWA Hirofumi .range_start = 0, 1312111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 13131da177e4SLinus Torvalds }; 13141da177e4SLinus Torvalds 13151da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 131649364ce2SAndrew Morton wbc.nr_to_write = 0; 13171da177e4SLinus Torvalds 13181da177e4SLinus Torvalds might_sleep(); 1319f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 13200f1b1fd8SDave Chinner spin_lock(&inode->i_lock); 1321f758eeabSChristoph Hellwig ret = writeback_single_inode(inode, wb, &wbc); 13220f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 1323f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 13241da177e4SLinus Torvalds if (sync) 13251c0eeaf5SJoern Engel inode_sync_wait(inode); 13261da177e4SLinus Torvalds return ret; 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds /** 13311da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 13321da177e4SLinus Torvalds * @inode: the inode to sync 13331da177e4SLinus Torvalds * @wbc: controls the writeback mode 13341da177e4SLinus Torvalds * 13351da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 13361da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 13371da177e4SLinus Torvalds * update inode->i_state. 13381da177e4SLinus Torvalds * 13391da177e4SLinus Torvalds * The caller must have a ref on the inode. 13401da177e4SLinus Torvalds */ 13411da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 13421da177e4SLinus Torvalds { 1343f758eeabSChristoph Hellwig struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 13441da177e4SLinus Torvalds int ret; 13451da177e4SLinus Torvalds 1346f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 13470f1b1fd8SDave Chinner spin_lock(&inode->i_lock); 1348f758eeabSChristoph Hellwig ret = writeback_single_inode(inode, wb, wbc); 13490f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 1350f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 13511da177e4SLinus Torvalds return ret; 13521da177e4SLinus Torvalds } 13531da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1354c3765016SChristoph Hellwig 1355c3765016SChristoph Hellwig /** 1356c691b9d9SAndrew Morton * sync_inode_metadata - write an inode to disk 1357c3765016SChristoph Hellwig * @inode: the inode to sync 1358c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1359c3765016SChristoph Hellwig * 1360c691b9d9SAndrew Morton * Write an inode to disk and adjust its dirty state after completion. 1361c3765016SChristoph Hellwig * 1362c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1363c3765016SChristoph Hellwig */ 1364c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1365c3765016SChristoph Hellwig { 1366c3765016SChristoph Hellwig struct writeback_control wbc = { 1367c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1368c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1369c3765016SChristoph Hellwig }; 1370c3765016SChristoph Hellwig 1371c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1372c3765016SChristoph Hellwig } 1373c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1374