11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17630d9c47SPaul Gortmaker #include <linux/export.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 23bc31b86aSWu Fengguang #include <linux/pagemap.h> 2403ba3782SJens Axboe #include <linux/kthread.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 28455b2864SDave Chinner #include <linux/tracepoint.h> 29719ea2fbSAl Viro #include <linux/device.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33bc31b86aSWu Fengguang * 4MB minimal write chunk size 34bc31b86aSWu Fengguang */ 35bc31b86aSWu Fengguang #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) 36bc31b86aSWu Fengguang 37cc395d7fSTejun Heo struct wb_completion { 38cc395d7fSTejun Heo atomic_t cnt; 39cc395d7fSTejun Heo }; 40cc395d7fSTejun Heo 41bc31b86aSWu Fengguang /* 42c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 43c4a77a6cSJens Axboe */ 4483ba7b07SChristoph Hellwig struct wb_writeback_work { 45c4a77a6cSJens Axboe long nr_pages; 46c4a77a6cSJens Axboe struct super_block *sb; 470dc83bd3SJan Kara unsigned long *older_than_this; 48c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 496e6938b6SWu Fengguang unsigned int tagged_writepages:1; 5052957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 5152957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 5252957fe1SH Hartley Sweeten unsigned int for_background:1; 537747bd4bSDave Chinner unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 54ac7b19a3STejun Heo unsigned int auto_free:1; /* free on completion */ 5598754bf7STejun Heo unsigned int single_wait:1; 5698754bf7STejun Heo unsigned int single_done:1; 570e175a18SCurt Wohlgemuth enum wb_reason reason; /* why was writeback initiated? */ 58c4a77a6cSJens Axboe 598010c3b6SJens Axboe struct list_head list; /* pending work list */ 60cc395d7fSTejun Heo struct wb_completion *done; /* set if the caller waits */ 6103ba3782SJens Axboe }; 6203ba3782SJens Axboe 63a2f48706STheodore Ts'o /* 64cc395d7fSTejun Heo * If one wants to wait for one or more wb_writeback_works, each work's 65cc395d7fSTejun Heo * ->done should be set to a wb_completion defined using the following 66cc395d7fSTejun Heo * macro. Once all work items are issued with wb_queue_work(), the caller 67cc395d7fSTejun Heo * can wait for the completion of all using wb_wait_for_completion(). Work 68cc395d7fSTejun Heo * items which are waited upon aren't freed automatically on completion. 69cc395d7fSTejun Heo */ 70cc395d7fSTejun Heo #define DEFINE_WB_COMPLETION_ONSTACK(cmpl) \ 71cc395d7fSTejun Heo struct wb_completion cmpl = { \ 72cc395d7fSTejun Heo .cnt = ATOMIC_INIT(1), \ 73cc395d7fSTejun Heo } 74cc395d7fSTejun Heo 75cc395d7fSTejun Heo 76cc395d7fSTejun Heo /* 77a2f48706STheodore Ts'o * If an inode is constantly having its pages dirtied, but then the 78a2f48706STheodore Ts'o * updates stop dirtytime_expire_interval seconds in the past, it's 79a2f48706STheodore Ts'o * possible for the worst case time between when an inode has its 80a2f48706STheodore Ts'o * timestamps updated and when they finally get written out to be two 81a2f48706STheodore Ts'o * dirtytime_expire_intervals. We set the default to 12 hours (in 82a2f48706STheodore Ts'o * seconds), which means most of the time inodes will have their 83a2f48706STheodore Ts'o * timestamps written to disk after 12 hours, but in the worst case a 84a2f48706STheodore Ts'o * few inodes might not their timestamps updated for 24 hours. 85a2f48706STheodore Ts'o */ 86a2f48706STheodore Ts'o unsigned int dirtytime_expire_interval = 12 * 60 * 60; 87a2f48706STheodore Ts'o 887ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 897ccf19a8SNick Piggin { 907ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 917ccf19a8SNick Piggin } 927ccf19a8SNick Piggin 9315eb77a0SWu Fengguang /* 9415eb77a0SWu Fengguang * Include the creation of the trace points after defining the 9515eb77a0SWu Fengguang * wb_writeback_work structure and inline functions so that the definition 9615eb77a0SWu Fengguang * remains local to this file. 9715eb77a0SWu Fengguang */ 9815eb77a0SWu Fengguang #define CREATE_TRACE_POINTS 9915eb77a0SWu Fengguang #include <trace/events/writeback.h> 10015eb77a0SWu Fengguang 101774016b2SSteven Whitehouse EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage); 102774016b2SSteven Whitehouse 103d6c10f1fSTejun Heo static bool wb_io_lists_populated(struct bdi_writeback *wb) 104d6c10f1fSTejun Heo { 105d6c10f1fSTejun Heo if (wb_has_dirty_io(wb)) { 106d6c10f1fSTejun Heo return false; 107d6c10f1fSTejun Heo } else { 108d6c10f1fSTejun Heo set_bit(WB_has_dirty_io, &wb->state); 10995a46c65STejun Heo WARN_ON_ONCE(!wb->avg_write_bandwidth); 110766a9d6eSTejun Heo atomic_long_add(wb->avg_write_bandwidth, 111766a9d6eSTejun Heo &wb->bdi->tot_write_bandwidth); 112d6c10f1fSTejun Heo return true; 113d6c10f1fSTejun Heo } 114d6c10f1fSTejun Heo } 115d6c10f1fSTejun Heo 116d6c10f1fSTejun Heo static void wb_io_lists_depopulated(struct bdi_writeback *wb) 117d6c10f1fSTejun Heo { 118d6c10f1fSTejun Heo if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && 119766a9d6eSTejun Heo list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { 120d6c10f1fSTejun Heo clear_bit(WB_has_dirty_io, &wb->state); 12195a46c65STejun Heo WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, 12295a46c65STejun Heo &wb->bdi->tot_write_bandwidth) < 0); 123766a9d6eSTejun Heo } 124d6c10f1fSTejun Heo } 125d6c10f1fSTejun Heo 126d6c10f1fSTejun Heo /** 127d6c10f1fSTejun Heo * inode_wb_list_move_locked - move an inode onto a bdi_writeback IO list 128d6c10f1fSTejun Heo * @inode: inode to be moved 129d6c10f1fSTejun Heo * @wb: target bdi_writeback 130d6c10f1fSTejun Heo * @head: one of @wb->b_{dirty|io|more_io} 131d6c10f1fSTejun Heo * 132d6c10f1fSTejun Heo * Move @inode->i_wb_list to @list of @wb and set %WB_has_dirty_io. 133d6c10f1fSTejun Heo * Returns %true if @inode is the first occupant of the !dirty_time IO 134d6c10f1fSTejun Heo * lists; otherwise, %false. 135d6c10f1fSTejun Heo */ 136d6c10f1fSTejun Heo static bool inode_wb_list_move_locked(struct inode *inode, 137d6c10f1fSTejun Heo struct bdi_writeback *wb, 138d6c10f1fSTejun Heo struct list_head *head) 139d6c10f1fSTejun Heo { 140d6c10f1fSTejun Heo assert_spin_locked(&wb->list_lock); 141d6c10f1fSTejun Heo 142d6c10f1fSTejun Heo list_move(&inode->i_wb_list, head); 143d6c10f1fSTejun Heo 144d6c10f1fSTejun Heo /* dirty_time doesn't count as dirty_io until expiration */ 145d6c10f1fSTejun Heo if (head != &wb->b_dirty_time) 146d6c10f1fSTejun Heo return wb_io_lists_populated(wb); 147d6c10f1fSTejun Heo 148d6c10f1fSTejun Heo wb_io_lists_depopulated(wb); 149d6c10f1fSTejun Heo return false; 150d6c10f1fSTejun Heo } 151d6c10f1fSTejun Heo 152d6c10f1fSTejun Heo /** 153d6c10f1fSTejun Heo * inode_wb_list_del_locked - remove an inode from its bdi_writeback IO list 154d6c10f1fSTejun Heo * @inode: inode to be removed 155d6c10f1fSTejun Heo * @wb: bdi_writeback @inode is being removed from 156d6c10f1fSTejun Heo * 157d6c10f1fSTejun Heo * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and 158d6c10f1fSTejun Heo * clear %WB_has_dirty_io if all are empty afterwards. 159d6c10f1fSTejun Heo */ 160d6c10f1fSTejun Heo static void inode_wb_list_del_locked(struct inode *inode, 161d6c10f1fSTejun Heo struct bdi_writeback *wb) 162d6c10f1fSTejun Heo { 163d6c10f1fSTejun Heo assert_spin_locked(&wb->list_lock); 164d6c10f1fSTejun Heo 165d6c10f1fSTejun Heo list_del_init(&inode->i_wb_list); 166d6c10f1fSTejun Heo wb_io_lists_depopulated(wb); 167d6c10f1fSTejun Heo } 168d6c10f1fSTejun Heo 169f0054bb1STejun Heo static void wb_wakeup(struct bdi_writeback *wb) 1705acda9d1SJan Kara { 171f0054bb1STejun Heo spin_lock_bh(&wb->work_lock); 172f0054bb1STejun Heo if (test_bit(WB_registered, &wb->state)) 173f0054bb1STejun Heo mod_delayed_work(bdi_wq, &wb->dwork, 0); 174f0054bb1STejun Heo spin_unlock_bh(&wb->work_lock); 1755acda9d1SJan Kara } 1765acda9d1SJan Kara 177f0054bb1STejun Heo static void wb_queue_work(struct bdi_writeback *wb, 1786585027aSJan Kara struct wb_writeback_work *work) 1796585027aSJan Kara { 180f0054bb1STejun Heo trace_writeback_queue(wb->bdi, work); 1816585027aSJan Kara 182f0054bb1STejun Heo spin_lock_bh(&wb->work_lock); 18398754bf7STejun Heo if (!test_bit(WB_registered, &wb->state)) { 18498754bf7STejun Heo if (work->single_wait) 18598754bf7STejun Heo work->single_done = 1; 1865acda9d1SJan Kara goto out_unlock; 18798754bf7STejun Heo } 188cc395d7fSTejun Heo if (work->done) 189cc395d7fSTejun Heo atomic_inc(&work->done->cnt); 190f0054bb1STejun Heo list_add_tail(&work->list, &wb->work_list); 191f0054bb1STejun Heo mod_delayed_work(bdi_wq, &wb->dwork, 0); 1925acda9d1SJan Kara out_unlock: 193f0054bb1STejun Heo spin_unlock_bh(&wb->work_lock); 19403ba3782SJens Axboe } 1951da177e4SLinus Torvalds 196cc395d7fSTejun Heo /** 197cc395d7fSTejun Heo * wb_wait_for_completion - wait for completion of bdi_writeback_works 198cc395d7fSTejun Heo * @bdi: bdi work items were issued to 199cc395d7fSTejun Heo * @done: target wb_completion 200cc395d7fSTejun Heo * 201cc395d7fSTejun Heo * Wait for one or more work items issued to @bdi with their ->done field 202cc395d7fSTejun Heo * set to @done, which should have been defined with 203cc395d7fSTejun Heo * DEFINE_WB_COMPLETION_ONSTACK(). This function returns after all such 204cc395d7fSTejun Heo * work items are completed. Work items which are waited upon aren't freed 205cc395d7fSTejun Heo * automatically on completion. 206cc395d7fSTejun Heo */ 207cc395d7fSTejun Heo static void wb_wait_for_completion(struct backing_dev_info *bdi, 208cc395d7fSTejun Heo struct wb_completion *done) 209cc395d7fSTejun Heo { 210cc395d7fSTejun Heo atomic_dec(&done->cnt); /* put down the initial count */ 211cc395d7fSTejun Heo wait_event(bdi->wb_waitq, !atomic_read(&done->cnt)); 212cc395d7fSTejun Heo } 213cc395d7fSTejun Heo 214703c2708STejun Heo #ifdef CONFIG_CGROUP_WRITEBACK 215703c2708STejun Heo 216703c2708STejun Heo /** 217703c2708STejun Heo * inode_congested - test whether an inode is congested 218703c2708STejun Heo * @inode: inode to test for congestion 219703c2708STejun Heo * @cong_bits: mask of WB_[a]sync_congested bits to test 220703c2708STejun Heo * 221703c2708STejun Heo * Tests whether @inode is congested. @cong_bits is the mask of congestion 222703c2708STejun Heo * bits to test and the return value is the mask of set bits. 223703c2708STejun Heo * 224703c2708STejun Heo * If cgroup writeback is enabled for @inode, the congestion state is 225703c2708STejun Heo * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg 226703c2708STejun Heo * associated with @inode is congested; otherwise, the root wb's congestion 227703c2708STejun Heo * state is used. 228703c2708STejun Heo */ 229703c2708STejun Heo int inode_congested(struct inode *inode, int cong_bits) 230703c2708STejun Heo { 231703c2708STejun Heo if (inode) { 232703c2708STejun Heo struct bdi_writeback *wb = inode_to_wb(inode); 233703c2708STejun Heo if (wb) 234703c2708STejun Heo return wb_congested(wb, cong_bits); 235703c2708STejun Heo } 236703c2708STejun Heo 237703c2708STejun Heo return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 238703c2708STejun Heo } 239703c2708STejun Heo EXPORT_SYMBOL_GPL(inode_congested); 240703c2708STejun Heo 241f2b65121STejun Heo /** 24298754bf7STejun Heo * wb_wait_for_single_work - wait for completion of a single bdi_writeback_work 24398754bf7STejun Heo * @bdi: bdi the work item was issued to 24498754bf7STejun Heo * @work: work item to wait for 24598754bf7STejun Heo * 24698754bf7STejun Heo * Wait for the completion of @work which was issued to one of @bdi's 24798754bf7STejun Heo * bdi_writeback's. The caller must have set @work->single_wait before 24898754bf7STejun Heo * issuing it. This wait operates independently fo 24998754bf7STejun Heo * wb_wait_for_completion() and also disables automatic freeing of @work. 25098754bf7STejun Heo */ 25198754bf7STejun Heo static void wb_wait_for_single_work(struct backing_dev_info *bdi, 25298754bf7STejun Heo struct wb_writeback_work *work) 25398754bf7STejun Heo { 25498754bf7STejun Heo if (WARN_ON_ONCE(!work->single_wait)) 25598754bf7STejun Heo return; 25698754bf7STejun Heo 25798754bf7STejun Heo wait_event(bdi->wb_waitq, work->single_done); 25898754bf7STejun Heo 25998754bf7STejun Heo /* 26098754bf7STejun Heo * Paired with smp_wmb() in wb_do_writeback() and ensures that all 26198754bf7STejun Heo * modifications to @work prior to assertion of ->single_done is 26298754bf7STejun Heo * visible to the caller once this function returns. 26398754bf7STejun Heo */ 26498754bf7STejun Heo smp_rmb(); 26598754bf7STejun Heo } 26698754bf7STejun Heo 26798754bf7STejun Heo /** 268f2b65121STejun Heo * wb_split_bdi_pages - split nr_pages to write according to bandwidth 269f2b65121STejun Heo * @wb: target bdi_writeback to split @nr_pages to 270f2b65121STejun Heo * @nr_pages: number of pages to write for the whole bdi 271f2b65121STejun Heo * 272f2b65121STejun Heo * Split @wb's portion of @nr_pages according to @wb's write bandwidth in 273f2b65121STejun Heo * relation to the total write bandwidth of all wb's w/ dirty inodes on 274f2b65121STejun Heo * @wb->bdi. 275f2b65121STejun Heo */ 276f2b65121STejun Heo static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) 277f2b65121STejun Heo { 278f2b65121STejun Heo unsigned long this_bw = wb->avg_write_bandwidth; 279f2b65121STejun Heo unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); 280f2b65121STejun Heo 281f2b65121STejun Heo if (nr_pages == LONG_MAX) 282f2b65121STejun Heo return LONG_MAX; 283f2b65121STejun Heo 284f2b65121STejun Heo /* 285f2b65121STejun Heo * This may be called on clean wb's and proportional distribution 286f2b65121STejun Heo * may not make sense, just use the original @nr_pages in those 287f2b65121STejun Heo * cases. In general, we wanna err on the side of writing more. 288f2b65121STejun Heo */ 289f2b65121STejun Heo if (!tot_bw || this_bw >= tot_bw) 290f2b65121STejun Heo return nr_pages; 291f2b65121STejun Heo else 292f2b65121STejun Heo return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw); 293f2b65121STejun Heo } 294f2b65121STejun Heo 295f2b65121STejun Heo #else /* CONFIG_CGROUP_WRITEBACK */ 296f2b65121STejun Heo 297f2b65121STejun Heo static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) 298f2b65121STejun Heo { 299f2b65121STejun Heo return nr_pages; 300f2b65121STejun Heo } 301f2b65121STejun Heo 302703c2708STejun Heo #endif /* CONFIG_CGROUP_WRITEBACK */ 303703c2708STejun Heo 304c00ddad3STejun Heo void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 305c00ddad3STejun Heo bool range_cyclic, enum wb_reason reason) 306b6e51316SJens Axboe { 307c00ddad3STejun Heo struct wb_writeback_work *work; 308c00ddad3STejun Heo 309c00ddad3STejun Heo if (!wb_has_dirty_io(wb)) 310c00ddad3STejun Heo return; 311c00ddad3STejun Heo 312c00ddad3STejun Heo /* 313c00ddad3STejun Heo * This is WB_SYNC_NONE writeback, so if allocation fails just 314c00ddad3STejun Heo * wakeup the thread for old dirty data writeback 315c00ddad3STejun Heo */ 316c00ddad3STejun Heo work = kzalloc(sizeof(*work), GFP_ATOMIC); 317c00ddad3STejun Heo if (!work) { 318c00ddad3STejun Heo trace_writeback_nowork(wb->bdi); 319c00ddad3STejun Heo wb_wakeup(wb); 320c00ddad3STejun Heo return; 321c00ddad3STejun Heo } 322c00ddad3STejun Heo 323c00ddad3STejun Heo work->sync_mode = WB_SYNC_NONE; 324c00ddad3STejun Heo work->nr_pages = nr_pages; 325c00ddad3STejun Heo work->range_cyclic = range_cyclic; 326c00ddad3STejun Heo work->reason = reason; 327ac7b19a3STejun Heo work->auto_free = 1; 328c00ddad3STejun Heo 329c00ddad3STejun Heo wb_queue_work(wb, work); 330d3ddec76SWu Fengguang } 331d3ddec76SWu Fengguang 332c5444198SChristoph Hellwig /** 3339ecf4866STejun Heo * wb_start_background_writeback - start background writeback 3349ecf4866STejun Heo * @wb: bdi_writback to write from 335c5444198SChristoph Hellwig * 336c5444198SChristoph Hellwig * Description: 3376585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 3389ecf4866STejun Heo * this function returns, it is only guaranteed that for given wb 3396585027aSJan Kara * some IO is happening if we are over background dirty threshold. 3406585027aSJan Kara * Caller need not hold sb s_umount semaphore. 341c5444198SChristoph Hellwig */ 3429ecf4866STejun Heo void wb_start_background_writeback(struct bdi_writeback *wb) 343c5444198SChristoph Hellwig { 3446585027aSJan Kara /* 3456585027aSJan Kara * We just wake up the flusher thread. It will perform background 3466585027aSJan Kara * writeback as soon as there is no other work to do. 3476585027aSJan Kara */ 3489ecf4866STejun Heo trace_writeback_wake_background(wb->bdi); 3499ecf4866STejun Heo wb_wakeup(wb); 3501da177e4SLinus Torvalds } 3511da177e4SLinus Torvalds 3521da177e4SLinus Torvalds /* 353a66979abSDave Chinner * Remove the inode from the writeback list it is on. 354a66979abSDave Chinner */ 355a66979abSDave Chinner void inode_wb_list_del(struct inode *inode) 356a66979abSDave Chinner { 35752ebea74STejun Heo struct bdi_writeback *wb = inode_to_wb(inode); 358a66979abSDave Chinner 35952ebea74STejun Heo spin_lock(&wb->list_lock); 360d6c10f1fSTejun Heo inode_wb_list_del_locked(inode, wb); 36152ebea74STejun Heo spin_unlock(&wb->list_lock); 362f758eeabSChristoph Hellwig } 363a66979abSDave Chinner 364a66979abSDave Chinner /* 3656610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 3666610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 3676610a0bcSAndrew Morton * 3686610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 36966f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 3706610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 3716610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 3726610a0bcSAndrew Morton */ 373f758eeabSChristoph Hellwig static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 3746610a0bcSAndrew Morton { 37503ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 37666f3b8e2SJens Axboe struct inode *tail; 3776610a0bcSAndrew Morton 3787ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 37966f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 3806610a0bcSAndrew Morton inode->dirtied_when = jiffies; 3816610a0bcSAndrew Morton } 382d6c10f1fSTejun Heo inode_wb_list_move_locked(inode, wb, &wb->b_dirty); 3836610a0bcSAndrew Morton } 3846610a0bcSAndrew Morton 3856610a0bcSAndrew Morton /* 38666f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 387c986d1e2SAndrew Morton */ 388f758eeabSChristoph Hellwig static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 389c986d1e2SAndrew Morton { 390d6c10f1fSTejun Heo inode_wb_list_move_locked(inode, wb, &wb->b_more_io); 391c986d1e2SAndrew Morton } 392c986d1e2SAndrew Morton 3931c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 3941c0eeaf5SJoern Engel { 395365b94aeSJan Kara inode->i_state &= ~I_SYNC; 3964eff96ddSJan Kara /* If inode is clean an unused, put it into LRU now... */ 3974eff96ddSJan Kara inode_add_lru(inode); 398365b94aeSJan Kara /* Waiters must see I_SYNC cleared before being woken up */ 3991c0eeaf5SJoern Engel smp_mb(); 4001c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 4011c0eeaf5SJoern Engel } 4021c0eeaf5SJoern Engel 403d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 404d2caa3c5SJeff Layton { 405d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 406d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 407d2caa3c5SJeff Layton /* 408d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 409d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 410d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 4115b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 412d2caa3c5SJeff Layton */ 413d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 414d2caa3c5SJeff Layton #endif 415d2caa3c5SJeff Layton return ret; 416d2caa3c5SJeff Layton } 417d2caa3c5SJeff Layton 4180ae45f63STheodore Ts'o #define EXPIRE_DIRTY_ATIME 0x0001 4190ae45f63STheodore Ts'o 420c986d1e2SAndrew Morton /* 4210e2f2b23SWang Sheng-Hui * Move expired (dirtied before work->older_than_this) dirty inodes from 422697e6fedSJan Kara * @delaying_queue to @dispatch_queue. 4232c136579SFengguang Wu */ 424e84d0a4fSWu Fengguang static int move_expired_inodes(struct list_head *delaying_queue, 4252c136579SFengguang Wu struct list_head *dispatch_queue, 4260ae45f63STheodore Ts'o int flags, 427ad4e38ddSCurt Wohlgemuth struct wb_writeback_work *work) 4282c136579SFengguang Wu { 4290ae45f63STheodore Ts'o unsigned long *older_than_this = NULL; 4300ae45f63STheodore Ts'o unsigned long expire_time; 4315c03449dSShaohua Li LIST_HEAD(tmp); 4325c03449dSShaohua Li struct list_head *pos, *node; 433cf137307SJens Axboe struct super_block *sb = NULL; 4345c03449dSShaohua Li struct inode *inode; 435cf137307SJens Axboe int do_sb_sort = 0; 436e84d0a4fSWu Fengguang int moved = 0; 4375c03449dSShaohua Li 4380ae45f63STheodore Ts'o if ((flags & EXPIRE_DIRTY_ATIME) == 0) 4390ae45f63STheodore Ts'o older_than_this = work->older_than_this; 440a2f48706STheodore Ts'o else if (!work->for_sync) { 441a2f48706STheodore Ts'o expire_time = jiffies - (dirtytime_expire_interval * HZ); 4420ae45f63STheodore Ts'o older_than_this = &expire_time; 4430ae45f63STheodore Ts'o } 4442c136579SFengguang Wu while (!list_empty(delaying_queue)) { 4457ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 4460ae45f63STheodore Ts'o if (older_than_this && 4470ae45f63STheodore Ts'o inode_dirtied_after(inode, *older_than_this)) 4482c136579SFengguang Wu break; 449a8855990SJan Kara list_move(&inode->i_wb_list, &tmp); 450a8855990SJan Kara moved++; 4510ae45f63STheodore Ts'o if (flags & EXPIRE_DIRTY_ATIME) 4520ae45f63STheodore Ts'o set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); 453a8855990SJan Kara if (sb_is_blkdev_sb(inode->i_sb)) 454a8855990SJan Kara continue; 455cf137307SJens Axboe if (sb && sb != inode->i_sb) 456cf137307SJens Axboe do_sb_sort = 1; 457cf137307SJens Axboe sb = inode->i_sb; 4585c03449dSShaohua Li } 4595c03449dSShaohua Li 460cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 461cf137307SJens Axboe if (!do_sb_sort) { 462cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 463e84d0a4fSWu Fengguang goto out; 464cf137307SJens Axboe } 465cf137307SJens Axboe 4665c03449dSShaohua Li /* Move inodes from one superblock together */ 4675c03449dSShaohua Li while (!list_empty(&tmp)) { 4687ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 4695c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 4707ccf19a8SNick Piggin inode = wb_inode(pos); 4715c03449dSShaohua Li if (inode->i_sb == sb) 4727ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 4732c136579SFengguang Wu } 4742c136579SFengguang Wu } 475e84d0a4fSWu Fengguang out: 476e84d0a4fSWu Fengguang return moved; 4775c03449dSShaohua Li } 4782c136579SFengguang Wu 4792c136579SFengguang Wu /* 4802c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 4814ea879b9SWu Fengguang * Before 4824ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 4834ea879b9SWu Fengguang * =============> gf edc BA 4844ea879b9SWu Fengguang * After 4854ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 4864ea879b9SWu Fengguang * =============> g fBAedc 4874ea879b9SWu Fengguang * | 4884ea879b9SWu Fengguang * +--> dequeue for IO 4892c136579SFengguang Wu */ 490ad4e38ddSCurt Wohlgemuth static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 4912c136579SFengguang Wu { 492e84d0a4fSWu Fengguang int moved; 4930ae45f63STheodore Ts'o 494f758eeabSChristoph Hellwig assert_spin_locked(&wb->list_lock); 4954ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 4960ae45f63STheodore Ts'o moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); 4970ae45f63STheodore Ts'o moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, 4980ae45f63STheodore Ts'o EXPIRE_DIRTY_ATIME, work); 499d6c10f1fSTejun Heo if (moved) 500d6c10f1fSTejun Heo wb_io_lists_populated(wb); 501ad4e38ddSCurt Wohlgemuth trace_writeback_queue_io(wb, work, moved); 50266f3b8e2SJens Axboe } 50366f3b8e2SJens Axboe 504a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 50566f3b8e2SJens Axboe { 5069fb0a7daSTejun Heo int ret; 5079fb0a7daSTejun Heo 5089fb0a7daSTejun Heo if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { 5099fb0a7daSTejun Heo trace_writeback_write_inode_start(inode, wbc); 5109fb0a7daSTejun Heo ret = inode->i_sb->s_op->write_inode(inode, wbc); 5119fb0a7daSTejun Heo trace_writeback_write_inode(inode, wbc); 5129fb0a7daSTejun Heo return ret; 5139fb0a7daSTejun Heo } 51403ba3782SJens Axboe return 0; 51566f3b8e2SJens Axboe } 51608d8e974SFengguang Wu 5172c136579SFengguang Wu /* 518169ebd90SJan Kara * Wait for writeback on an inode to complete. Called with i_lock held. 519169ebd90SJan Kara * Caller must make sure inode cannot go away when we drop i_lock. 52001c03194SChristoph Hellwig */ 521169ebd90SJan Kara static void __inode_wait_for_writeback(struct inode *inode) 522169ebd90SJan Kara __releases(inode->i_lock) 523169ebd90SJan Kara __acquires(inode->i_lock) 52401c03194SChristoph Hellwig { 52501c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 52601c03194SChristoph Hellwig wait_queue_head_t *wqh; 52701c03194SChristoph Hellwig 52801c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 52958a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 530250df6edSDave Chinner spin_unlock(&inode->i_lock); 53174316201SNeilBrown __wait_on_bit(wqh, &wq, bit_wait, 53274316201SNeilBrown TASK_UNINTERRUPTIBLE); 533250df6edSDave Chinner spin_lock(&inode->i_lock); 53458a9d3d8SRichard Kennedy } 53501c03194SChristoph Hellwig } 53601c03194SChristoph Hellwig 53701c03194SChristoph Hellwig /* 538169ebd90SJan Kara * Wait for writeback on an inode to complete. Caller must have inode pinned. 539169ebd90SJan Kara */ 540169ebd90SJan Kara void inode_wait_for_writeback(struct inode *inode) 541169ebd90SJan Kara { 542169ebd90SJan Kara spin_lock(&inode->i_lock); 543169ebd90SJan Kara __inode_wait_for_writeback(inode); 544169ebd90SJan Kara spin_unlock(&inode->i_lock); 545169ebd90SJan Kara } 546169ebd90SJan Kara 547169ebd90SJan Kara /* 548169ebd90SJan Kara * Sleep until I_SYNC is cleared. This function must be called with i_lock 549169ebd90SJan Kara * held and drops it. It is aimed for callers not holding any inode reference 550169ebd90SJan Kara * so once i_lock is dropped, inode can go away. 551169ebd90SJan Kara */ 552169ebd90SJan Kara static void inode_sleep_on_writeback(struct inode *inode) 553169ebd90SJan Kara __releases(inode->i_lock) 554169ebd90SJan Kara { 555169ebd90SJan Kara DEFINE_WAIT(wait); 556169ebd90SJan Kara wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 557169ebd90SJan Kara int sleep; 558169ebd90SJan Kara 559169ebd90SJan Kara prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 560169ebd90SJan Kara sleep = inode->i_state & I_SYNC; 561169ebd90SJan Kara spin_unlock(&inode->i_lock); 562169ebd90SJan Kara if (sleep) 563169ebd90SJan Kara schedule(); 564169ebd90SJan Kara finish_wait(wqh, &wait); 565169ebd90SJan Kara } 566169ebd90SJan Kara 567169ebd90SJan Kara /* 568ccb26b5aSJan Kara * Find proper writeback list for the inode depending on its current state and 569ccb26b5aSJan Kara * possibly also change of its state while we were doing writeback. Here we 570ccb26b5aSJan Kara * handle things such as livelock prevention or fairness of writeback among 571ccb26b5aSJan Kara * inodes. This function can be called only by flusher thread - noone else 572ccb26b5aSJan Kara * processes all inodes in writeback lists and requeueing inodes behind flusher 573ccb26b5aSJan Kara * thread's back can have unexpected consequences. 574ccb26b5aSJan Kara */ 575ccb26b5aSJan Kara static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, 576ccb26b5aSJan Kara struct writeback_control *wbc) 577ccb26b5aSJan Kara { 578ccb26b5aSJan Kara if (inode->i_state & I_FREEING) 579ccb26b5aSJan Kara return; 580ccb26b5aSJan Kara 581ccb26b5aSJan Kara /* 582ccb26b5aSJan Kara * Sync livelock prevention. Each inode is tagged and synced in one 583ccb26b5aSJan Kara * shot. If still dirty, it will be redirty_tail()'ed below. Update 584ccb26b5aSJan Kara * the dirty time to prevent enqueue and sync it again. 585ccb26b5aSJan Kara */ 586ccb26b5aSJan Kara if ((inode->i_state & I_DIRTY) && 587ccb26b5aSJan Kara (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 588ccb26b5aSJan Kara inode->dirtied_when = jiffies; 589ccb26b5aSJan Kara 5904f8ad655SJan Kara if (wbc->pages_skipped) { 5914f8ad655SJan Kara /* 5924f8ad655SJan Kara * writeback is not making progress due to locked 5934f8ad655SJan Kara * buffers. Skip this inode for now. 5944f8ad655SJan Kara */ 5954f8ad655SJan Kara redirty_tail(inode, wb); 5964f8ad655SJan Kara return; 5974f8ad655SJan Kara } 5984f8ad655SJan Kara 599ccb26b5aSJan Kara if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 600ccb26b5aSJan Kara /* 601ccb26b5aSJan Kara * We didn't write back all the pages. nfs_writepages() 602ccb26b5aSJan Kara * sometimes bales out without doing anything. 603ccb26b5aSJan Kara */ 604ccb26b5aSJan Kara if (wbc->nr_to_write <= 0) { 605ccb26b5aSJan Kara /* Slice used up. Queue for next turn. */ 606ccb26b5aSJan Kara requeue_io(inode, wb); 607ccb26b5aSJan Kara } else { 608ccb26b5aSJan Kara /* 609ccb26b5aSJan Kara * Writeback blocked by something other than 610ccb26b5aSJan Kara * congestion. Delay the inode for some time to 611ccb26b5aSJan Kara * avoid spinning on the CPU (100% iowait) 612ccb26b5aSJan Kara * retrying writeback of the dirty page/inode 613ccb26b5aSJan Kara * that cannot be performed immediately. 614ccb26b5aSJan Kara */ 615ccb26b5aSJan Kara redirty_tail(inode, wb); 616ccb26b5aSJan Kara } 617ccb26b5aSJan Kara } else if (inode->i_state & I_DIRTY) { 618ccb26b5aSJan Kara /* 619ccb26b5aSJan Kara * Filesystems can dirty the inode during writeback operations, 620ccb26b5aSJan Kara * such as delayed allocation during submission or metadata 621ccb26b5aSJan Kara * updates after data IO completion. 622ccb26b5aSJan Kara */ 623ccb26b5aSJan Kara redirty_tail(inode, wb); 6240ae45f63STheodore Ts'o } else if (inode->i_state & I_DIRTY_TIME) { 625a2f48706STheodore Ts'o inode->dirtied_when = jiffies; 626d6c10f1fSTejun Heo inode_wb_list_move_locked(inode, wb, &wb->b_dirty_time); 627ccb26b5aSJan Kara } else { 628ccb26b5aSJan Kara /* The inode is clean. Remove from writeback lists. */ 629d6c10f1fSTejun Heo inode_wb_list_del_locked(inode, wb); 630ccb26b5aSJan Kara } 631ccb26b5aSJan Kara } 632ccb26b5aSJan Kara 633ccb26b5aSJan Kara /* 6344f8ad655SJan Kara * Write out an inode and its dirty pages. Do not update the writeback list 6354f8ad655SJan Kara * linkage. That is left to the caller. The caller is also responsible for 6364f8ad655SJan Kara * setting I_SYNC flag and calling inode_sync_complete() to clear it. 6371da177e4SLinus Torvalds */ 6381da177e4SLinus Torvalds static int 639cd8ed2a4SYan Hong __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 6401da177e4SLinus Torvalds { 6411da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 642251d6a47SWu Fengguang long nr_to_write = wbc->nr_to_write; 64301c03194SChristoph Hellwig unsigned dirty; 6441da177e4SLinus Torvalds int ret; 6451da177e4SLinus Torvalds 6464f8ad655SJan Kara WARN_ON(!(inode->i_state & I_SYNC)); 6471da177e4SLinus Torvalds 6489fb0a7daSTejun Heo trace_writeback_single_inode_start(inode, wbc, nr_to_write); 6499fb0a7daSTejun Heo 6501da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 6511da177e4SLinus Torvalds 65226821ed4SChristoph Hellwig /* 65326821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 65426821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 6557747bd4bSDave Chinner * I/O completion. We don't do it for sync(2) writeback because it has a 6567747bd4bSDave Chinner * separate, external IO completion path and ->sync_fs for guaranteeing 6577747bd4bSDave Chinner * inode metadata is written back correctly. 65826821ed4SChristoph Hellwig */ 6597747bd4bSDave Chinner if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { 66026821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 6611da177e4SLinus Torvalds if (ret == 0) 6621da177e4SLinus Torvalds ret = err; 6631da177e4SLinus Torvalds } 6641da177e4SLinus Torvalds 6655547e8aaSDmitry Monakhov /* 6665547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 6675547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 6685547e8aaSDmitry Monakhov * write_inode() 6695547e8aaSDmitry Monakhov */ 670250df6edSDave Chinner spin_lock(&inode->i_lock); 6719c6ac78eSTejun Heo 6725547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 673a2f48706STheodore Ts'o if (inode->i_state & I_DIRTY_TIME) { 674a2f48706STheodore Ts'o if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || 675a2f48706STheodore Ts'o unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || 676a2f48706STheodore Ts'o unlikely(time_after(jiffies, 677a2f48706STheodore Ts'o (inode->dirtied_time_when + 678a2f48706STheodore Ts'o dirtytime_expire_interval * HZ)))) { 6790ae45f63STheodore Ts'o dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; 6800ae45f63STheodore Ts'o trace_writeback_lazytime(inode); 6810ae45f63STheodore Ts'o } 682a2f48706STheodore Ts'o } else 683a2f48706STheodore Ts'o inode->i_state &= ~I_DIRTY_TIME_EXPIRED; 6840ae45f63STheodore Ts'o inode->i_state &= ~dirty; 6859c6ac78eSTejun Heo 6869c6ac78eSTejun Heo /* 6879c6ac78eSTejun Heo * Paired with smp_mb() in __mark_inode_dirty(). This allows 6889c6ac78eSTejun Heo * __mark_inode_dirty() to test i_state without grabbing i_lock - 6899c6ac78eSTejun Heo * either they see the I_DIRTY bits cleared or we see the dirtied 6909c6ac78eSTejun Heo * inode. 6919c6ac78eSTejun Heo * 6929c6ac78eSTejun Heo * I_DIRTY_PAGES is always cleared together above even if @mapping 6939c6ac78eSTejun Heo * still has dirty pages. The flag is reinstated after smp_mb() if 6949c6ac78eSTejun Heo * necessary. This guarantees that either __mark_inode_dirty() 6959c6ac78eSTejun Heo * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. 6969c6ac78eSTejun Heo */ 6979c6ac78eSTejun Heo smp_mb(); 6989c6ac78eSTejun Heo 6999c6ac78eSTejun Heo if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 7009c6ac78eSTejun Heo inode->i_state |= I_DIRTY_PAGES; 7019c6ac78eSTejun Heo 702250df6edSDave Chinner spin_unlock(&inode->i_lock); 7039c6ac78eSTejun Heo 7040ae45f63STheodore Ts'o if (dirty & I_DIRTY_TIME) 7050ae45f63STheodore Ts'o mark_inode_dirty_sync(inode); 70626821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 7070ae45f63STheodore Ts'o if (dirty & ~I_DIRTY_PAGES) { 708a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 7091da177e4SLinus Torvalds if (ret == 0) 7101da177e4SLinus Torvalds ret = err; 7111da177e4SLinus Torvalds } 7124f8ad655SJan Kara trace_writeback_single_inode(inode, wbc, nr_to_write); 7134f8ad655SJan Kara return ret; 7144f8ad655SJan Kara } 7154f8ad655SJan Kara 7164f8ad655SJan Kara /* 7174f8ad655SJan Kara * Write out an inode's dirty pages. Either the caller has an active reference 7184f8ad655SJan Kara * on the inode or the inode has I_WILL_FREE set. 7194f8ad655SJan Kara * 7204f8ad655SJan Kara * This function is designed to be called for writing back one inode which 7214f8ad655SJan Kara * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() 7224f8ad655SJan Kara * and does more profound writeback list handling in writeback_sb_inodes(). 7234f8ad655SJan Kara */ 7244f8ad655SJan Kara static int 7254f8ad655SJan Kara writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, 7264f8ad655SJan Kara struct writeback_control *wbc) 7274f8ad655SJan Kara { 7284f8ad655SJan Kara int ret = 0; 7294f8ad655SJan Kara 7304f8ad655SJan Kara spin_lock(&inode->i_lock); 7314f8ad655SJan Kara if (!atomic_read(&inode->i_count)) 7324f8ad655SJan Kara WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 7334f8ad655SJan Kara else 7344f8ad655SJan Kara WARN_ON(inode->i_state & I_WILL_FREE); 7354f8ad655SJan Kara 7364f8ad655SJan Kara if (inode->i_state & I_SYNC) { 7374f8ad655SJan Kara if (wbc->sync_mode != WB_SYNC_ALL) 7384f8ad655SJan Kara goto out; 7394f8ad655SJan Kara /* 740169ebd90SJan Kara * It's a data-integrity sync. We must wait. Since callers hold 741169ebd90SJan Kara * inode reference or inode has I_WILL_FREE set, it cannot go 742169ebd90SJan Kara * away under us. 7434f8ad655SJan Kara */ 744169ebd90SJan Kara __inode_wait_for_writeback(inode); 7454f8ad655SJan Kara } 7464f8ad655SJan Kara WARN_ON(inode->i_state & I_SYNC); 7474f8ad655SJan Kara /* 748f9b0e058SJan Kara * Skip inode if it is clean and we have no outstanding writeback in 749f9b0e058SJan Kara * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this 750f9b0e058SJan Kara * function since flusher thread may be doing for example sync in 751f9b0e058SJan Kara * parallel and if we move the inode, it could get skipped. So here we 752f9b0e058SJan Kara * make sure inode is on some writeback list and leave it there unless 753f9b0e058SJan Kara * we have completely cleaned the inode. 7544f8ad655SJan Kara */ 7550ae45f63STheodore Ts'o if (!(inode->i_state & I_DIRTY_ALL) && 756f9b0e058SJan Kara (wbc->sync_mode != WB_SYNC_ALL || 757f9b0e058SJan Kara !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 7584f8ad655SJan Kara goto out; 7594f8ad655SJan Kara inode->i_state |= I_SYNC; 7604f8ad655SJan Kara spin_unlock(&inode->i_lock); 7614f8ad655SJan Kara 762cd8ed2a4SYan Hong ret = __writeback_single_inode(inode, wbc); 7631da177e4SLinus Torvalds 764f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 765250df6edSDave Chinner spin_lock(&inode->i_lock); 7664f8ad655SJan Kara /* 7674f8ad655SJan Kara * If inode is clean, remove it from writeback lists. Otherwise don't 7684f8ad655SJan Kara * touch it. See comment above for explanation. 7694f8ad655SJan Kara */ 7700ae45f63STheodore Ts'o if (!(inode->i_state & I_DIRTY_ALL)) 771d6c10f1fSTejun Heo inode_wb_list_del_locked(inode, wb); 7724f8ad655SJan Kara spin_unlock(&wb->list_lock); 7731c0eeaf5SJoern Engel inode_sync_complete(inode); 7744f8ad655SJan Kara out: 7754f8ad655SJan Kara spin_unlock(&inode->i_lock); 7761da177e4SLinus Torvalds return ret; 7771da177e4SLinus Torvalds } 7781da177e4SLinus Torvalds 779a88a341aSTejun Heo static long writeback_chunk_size(struct bdi_writeback *wb, 7801a12d8bdSWu Fengguang struct wb_writeback_work *work) 781d46db3d5SWu Fengguang { 782d46db3d5SWu Fengguang long pages; 783d46db3d5SWu Fengguang 784d46db3d5SWu Fengguang /* 785d46db3d5SWu Fengguang * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 786d46db3d5SWu Fengguang * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 787d46db3d5SWu Fengguang * here avoids calling into writeback_inodes_wb() more than once. 788d46db3d5SWu Fengguang * 789d46db3d5SWu Fengguang * The intended call sequence for WB_SYNC_ALL writeback is: 790d46db3d5SWu Fengguang * 791d46db3d5SWu Fengguang * wb_writeback() 792d46db3d5SWu Fengguang * writeback_sb_inodes() <== called only once 793d46db3d5SWu Fengguang * write_cache_pages() <== called once for each inode 794d46db3d5SWu Fengguang * (quickly) tag currently dirty pages 795d46db3d5SWu Fengguang * (maybe slowly) sync all tagged pages 796d46db3d5SWu Fengguang */ 797d46db3d5SWu Fengguang if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 798d46db3d5SWu Fengguang pages = LONG_MAX; 7991a12d8bdSWu Fengguang else { 800a88a341aSTejun Heo pages = min(wb->avg_write_bandwidth / 2, 8011a12d8bdSWu Fengguang global_dirty_limit / DIRTY_SCOPE); 8021a12d8bdSWu Fengguang pages = min(pages, work->nr_pages); 8031a12d8bdSWu Fengguang pages = round_down(pages + MIN_WRITEBACK_PAGES, 8041a12d8bdSWu Fengguang MIN_WRITEBACK_PAGES); 8051a12d8bdSWu Fengguang } 806d46db3d5SWu Fengguang 807d46db3d5SWu Fengguang return pages; 808d46db3d5SWu Fengguang } 809d46db3d5SWu Fengguang 81003ba3782SJens Axboe /* 811f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 812edadfb10SChristoph Hellwig * 813d46db3d5SWu Fengguang * Return the number of pages and/or inodes written. 814f11c9c5cSEdward Shishkin */ 815d46db3d5SWu Fengguang static long writeback_sb_inodes(struct super_block *sb, 816d46db3d5SWu Fengguang struct bdi_writeback *wb, 817d46db3d5SWu Fengguang struct wb_writeback_work *work) 81803ba3782SJens Axboe { 819d46db3d5SWu Fengguang struct writeback_control wbc = { 820d46db3d5SWu Fengguang .sync_mode = work->sync_mode, 821d46db3d5SWu Fengguang .tagged_writepages = work->tagged_writepages, 822d46db3d5SWu Fengguang .for_kupdate = work->for_kupdate, 823d46db3d5SWu Fengguang .for_background = work->for_background, 8247747bd4bSDave Chinner .for_sync = work->for_sync, 825d46db3d5SWu Fengguang .range_cyclic = work->range_cyclic, 826d46db3d5SWu Fengguang .range_start = 0, 827d46db3d5SWu Fengguang .range_end = LLONG_MAX, 828d46db3d5SWu Fengguang }; 829d46db3d5SWu Fengguang unsigned long start_time = jiffies; 830d46db3d5SWu Fengguang long write_chunk; 831d46db3d5SWu Fengguang long wrote = 0; /* count both pages and inodes */ 832d46db3d5SWu Fengguang 83303ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 8347ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 835edadfb10SChristoph Hellwig 836edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 837d46db3d5SWu Fengguang if (work->sb) { 838edadfb10SChristoph Hellwig /* 839edadfb10SChristoph Hellwig * We only want to write back data for this 840edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 841edadfb10SChristoph Hellwig * to it back onto the dirty list. 842edadfb10SChristoph Hellwig */ 843f758eeabSChristoph Hellwig redirty_tail(inode, wb); 84466f3b8e2SJens Axboe continue; 84566f3b8e2SJens Axboe } 846edadfb10SChristoph Hellwig 847edadfb10SChristoph Hellwig /* 848edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 849edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 850edadfb10SChristoph Hellwig * pin the next superblock. 851edadfb10SChristoph Hellwig */ 852d46db3d5SWu Fengguang break; 853edadfb10SChristoph Hellwig } 854edadfb10SChristoph Hellwig 8559843b76aSChristoph Hellwig /* 856331cbdeeSWanpeng Li * Don't bother with new inodes or inodes being freed, first 857331cbdeeSWanpeng Li * kind does not need periodic writeout yet, and for the latter 8589843b76aSChristoph Hellwig * kind writeout is handled by the freer. 8599843b76aSChristoph Hellwig */ 860250df6edSDave Chinner spin_lock(&inode->i_lock); 8619843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 862250df6edSDave Chinner spin_unlock(&inode->i_lock); 863fcc5c222SWu Fengguang redirty_tail(inode, wb); 8647ef0d737SNick Piggin continue; 8657ef0d737SNick Piggin } 866cc1676d9SJan Kara if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 867cc1676d9SJan Kara /* 868cc1676d9SJan Kara * If this inode is locked for writeback and we are not 869cc1676d9SJan Kara * doing writeback-for-data-integrity, move it to 870cc1676d9SJan Kara * b_more_io so that writeback can proceed with the 871cc1676d9SJan Kara * other inodes on s_io. 872cc1676d9SJan Kara * 873cc1676d9SJan Kara * We'll have another go at writing back this inode 874cc1676d9SJan Kara * when we completed a full scan of b_io. 875cc1676d9SJan Kara */ 876cc1676d9SJan Kara spin_unlock(&inode->i_lock); 877cc1676d9SJan Kara requeue_io(inode, wb); 878cc1676d9SJan Kara trace_writeback_sb_inodes_requeue(inode); 879cc1676d9SJan Kara continue; 880cc1676d9SJan Kara } 881f0d07b7fSJan Kara spin_unlock(&wb->list_lock); 882f0d07b7fSJan Kara 8834f8ad655SJan Kara /* 8844f8ad655SJan Kara * We already requeued the inode if it had I_SYNC set and we 8854f8ad655SJan Kara * are doing WB_SYNC_NONE writeback. So this catches only the 8864f8ad655SJan Kara * WB_SYNC_ALL case. 8874f8ad655SJan Kara */ 888169ebd90SJan Kara if (inode->i_state & I_SYNC) { 889169ebd90SJan Kara /* Wait for I_SYNC. This function drops i_lock... */ 890169ebd90SJan Kara inode_sleep_on_writeback(inode); 891169ebd90SJan Kara /* Inode may be gone, start again */ 892ead188f9SJan Kara spin_lock(&wb->list_lock); 893169ebd90SJan Kara continue; 894169ebd90SJan Kara } 8954f8ad655SJan Kara inode->i_state |= I_SYNC; 8964f8ad655SJan Kara spin_unlock(&inode->i_lock); 897169ebd90SJan Kara 898a88a341aSTejun Heo write_chunk = writeback_chunk_size(wb, work); 899d46db3d5SWu Fengguang wbc.nr_to_write = write_chunk; 900d46db3d5SWu Fengguang wbc.pages_skipped = 0; 901250df6edSDave Chinner 902169ebd90SJan Kara /* 903169ebd90SJan Kara * We use I_SYNC to pin the inode in memory. While it is set 904169ebd90SJan Kara * evict_inode() will wait so the inode cannot be freed. 905169ebd90SJan Kara */ 906cd8ed2a4SYan Hong __writeback_single_inode(inode, &wbc); 907d46db3d5SWu Fengguang 908d46db3d5SWu Fengguang work->nr_pages -= write_chunk - wbc.nr_to_write; 909d46db3d5SWu Fengguang wrote += write_chunk - wbc.nr_to_write; 9104f8ad655SJan Kara spin_lock(&wb->list_lock); 9114f8ad655SJan Kara spin_lock(&inode->i_lock); 9120ae45f63STheodore Ts'o if (!(inode->i_state & I_DIRTY_ALL)) 913d46db3d5SWu Fengguang wrote++; 9144f8ad655SJan Kara requeue_inode(inode, wb, &wbc); 9154f8ad655SJan Kara inode_sync_complete(inode); 9160f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 917169ebd90SJan Kara cond_resched_lock(&wb->list_lock); 918d46db3d5SWu Fengguang /* 919d46db3d5SWu Fengguang * bail out to wb_writeback() often enough to check 920d46db3d5SWu Fengguang * background threshold and other termination conditions. 921d46db3d5SWu Fengguang */ 922d46db3d5SWu Fengguang if (wrote) { 923d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 924d46db3d5SWu Fengguang break; 925d46db3d5SWu Fengguang if (work->nr_pages <= 0) 926d46db3d5SWu Fengguang break; 9271da177e4SLinus Torvalds } 9288bc3be27SFengguang Wu } 929d46db3d5SWu Fengguang return wrote; 930f11c9c5cSEdward Shishkin } 93138f21977SNick Piggin 932d46db3d5SWu Fengguang static long __writeback_inodes_wb(struct bdi_writeback *wb, 933d46db3d5SWu Fengguang struct wb_writeback_work *work) 934f11c9c5cSEdward Shishkin { 935d46db3d5SWu Fengguang unsigned long start_time = jiffies; 936d46db3d5SWu Fengguang long wrote = 0; 937f11c9c5cSEdward Shishkin 938f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 9397ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 940f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 941f11c9c5cSEdward Shishkin 942eb6ef3dfSKonstantin Khlebnikov if (!trylock_super(sb)) { 9430e995816SWu Fengguang /* 944eb6ef3dfSKonstantin Khlebnikov * trylock_super() may fail consistently due to 9450e995816SWu Fengguang * s_umount being grabbed by someone else. Don't use 9460e995816SWu Fengguang * requeue_io() to avoid busy retrying the inode/sb. 9470e995816SWu Fengguang */ 9480e995816SWu Fengguang redirty_tail(inode, wb); 949d19de7edSChristoph Hellwig continue; 950334132aeSChristoph Hellwig } 951d46db3d5SWu Fengguang wrote += writeback_sb_inodes(sb, wb, work); 952eb6ef3dfSKonstantin Khlebnikov up_read(&sb->s_umount); 953f11c9c5cSEdward Shishkin 954d46db3d5SWu Fengguang /* refer to the same tests at the end of writeback_sb_inodes */ 955d46db3d5SWu Fengguang if (wrote) { 956d46db3d5SWu Fengguang if (time_is_before_jiffies(start_time + HZ / 10UL)) 957d46db3d5SWu Fengguang break; 958d46db3d5SWu Fengguang if (work->nr_pages <= 0) 959f11c9c5cSEdward Shishkin break; 960f11c9c5cSEdward Shishkin } 961d46db3d5SWu Fengguang } 96266f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 963d46db3d5SWu Fengguang return wrote; 96466f3b8e2SJens Axboe } 96566f3b8e2SJens Axboe 9667d9f073bSWanpeng Li static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 9670e175a18SCurt Wohlgemuth enum wb_reason reason) 968edadfb10SChristoph Hellwig { 969d46db3d5SWu Fengguang struct wb_writeback_work work = { 970d46db3d5SWu Fengguang .nr_pages = nr_pages, 971d46db3d5SWu Fengguang .sync_mode = WB_SYNC_NONE, 972d46db3d5SWu Fengguang .range_cyclic = 1, 9730e175a18SCurt Wohlgemuth .reason = reason, 974d46db3d5SWu Fengguang }; 975edadfb10SChristoph Hellwig 976f758eeabSChristoph Hellwig spin_lock(&wb->list_lock); 977424b351fSWu Fengguang if (list_empty(&wb->b_io)) 978ad4e38ddSCurt Wohlgemuth queue_io(wb, &work); 979d46db3d5SWu Fengguang __writeback_inodes_wb(wb, &work); 980f758eeabSChristoph Hellwig spin_unlock(&wb->list_lock); 981edadfb10SChristoph Hellwig 982d46db3d5SWu Fengguang return nr_pages - work.nr_pages; 98366f3b8e2SJens Axboe } 98466f3b8e2SJens Axboe 985a88a341aSTejun Heo static bool over_bground_thresh(struct bdi_writeback *wb) 98603ba3782SJens Axboe { 98703ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 98803ba3782SJens Axboe 98916c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 99003ba3782SJens Axboe 991b00949aaSWu Fengguang if (global_page_state(NR_FILE_DIRTY) + 992b00949aaSWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh) 993b00949aaSWu Fengguang return true; 994b00949aaSWu Fengguang 995a88a341aSTejun Heo if (wb_stat(wb, WB_RECLAIMABLE) > wb_dirty_limit(wb, background_thresh)) 996b00949aaSWu Fengguang return true; 997b00949aaSWu Fengguang 998b00949aaSWu Fengguang return false; 99903ba3782SJens Axboe } 100003ba3782SJens Axboe 100103ba3782SJens Axboe /* 1002e98be2d5SWu Fengguang * Called under wb->list_lock. If there are multiple wb per bdi, 1003e98be2d5SWu Fengguang * only the flusher working on the first wb should do it. 1004e98be2d5SWu Fengguang */ 1005e98be2d5SWu Fengguang static void wb_update_bandwidth(struct bdi_writeback *wb, 1006e98be2d5SWu Fengguang unsigned long start_time) 1007e98be2d5SWu Fengguang { 1008a88a341aSTejun Heo __wb_update_bandwidth(wb, 0, 0, 0, 0, 0, start_time); 1009e98be2d5SWu Fengguang } 1010e98be2d5SWu Fengguang 1011e98be2d5SWu Fengguang /* 101203ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 101303ba3782SJens Axboe * 101403ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 101503ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 101603ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 101703ba3782SJens Axboe * older than a specific point in time. 101803ba3782SJens Axboe * 101903ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 102003ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 102103ba3782SJens Axboe * one-second gap. 102203ba3782SJens Axboe * 102303ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 102403ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 102503ba3782SJens Axboe */ 1026c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 102783ba7b07SChristoph Hellwig struct wb_writeback_work *work) 102803ba3782SJens Axboe { 1029e98be2d5SWu Fengguang unsigned long wb_start = jiffies; 1030d46db3d5SWu Fengguang long nr_pages = work->nr_pages; 10310dc83bd3SJan Kara unsigned long oldest_jif; 1032a5989bdcSJan Kara struct inode *inode; 1033d46db3d5SWu Fengguang long progress; 103403ba3782SJens Axboe 10350dc83bd3SJan Kara oldest_jif = jiffies; 10360dc83bd3SJan Kara work->older_than_this = &oldest_jif; 103703ba3782SJens Axboe 1038e8dfc305SWu Fengguang spin_lock(&wb->list_lock); 103903ba3782SJens Axboe for (;;) { 104003ba3782SJens Axboe /* 1041d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 104203ba3782SJens Axboe */ 104383ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 104403ba3782SJens Axboe break; 104503ba3782SJens Axboe 104603ba3782SJens Axboe /* 1047aa373cf5SJan Kara * Background writeout and kupdate-style writeback may 1048aa373cf5SJan Kara * run forever. Stop them if there is other work to do 1049aa373cf5SJan Kara * so that e.g. sync can proceed. They'll be restarted 1050aa373cf5SJan Kara * after the other works are all done. 1051aa373cf5SJan Kara */ 1052aa373cf5SJan Kara if ((work->for_background || work->for_kupdate) && 1053f0054bb1STejun Heo !list_empty(&wb->work_list)) 1054aa373cf5SJan Kara break; 1055aa373cf5SJan Kara 1056aa373cf5SJan Kara /* 1057d3ddec76SWu Fengguang * For background writeout, stop when we are below the 1058d3ddec76SWu Fengguang * background dirty threshold 105903ba3782SJens Axboe */ 1060a88a341aSTejun Heo if (work->for_background && !over_bground_thresh(wb)) 106103ba3782SJens Axboe break; 106203ba3782SJens Axboe 10631bc36b64SJan Kara /* 10641bc36b64SJan Kara * Kupdate and background works are special and we want to 10651bc36b64SJan Kara * include all inodes that need writing. Livelock avoidance is 10661bc36b64SJan Kara * handled by these works yielding to any other work so we are 10671bc36b64SJan Kara * safe. 10681bc36b64SJan Kara */ 1069ba9aa839SWu Fengguang if (work->for_kupdate) { 10700dc83bd3SJan Kara oldest_jif = jiffies - 1071ba9aa839SWu Fengguang msecs_to_jiffies(dirty_expire_interval * 10); 10721bc36b64SJan Kara } else if (work->for_background) 10730dc83bd3SJan Kara oldest_jif = jiffies; 1074028c2dd1SDave Chinner 1075d46db3d5SWu Fengguang trace_writeback_start(wb->bdi, work); 1076e8dfc305SWu Fengguang if (list_empty(&wb->b_io)) 1077ad4e38ddSCurt Wohlgemuth queue_io(wb, work); 107883ba7b07SChristoph Hellwig if (work->sb) 1079d46db3d5SWu Fengguang progress = writeback_sb_inodes(work->sb, wb, work); 1080edadfb10SChristoph Hellwig else 1081d46db3d5SWu Fengguang progress = __writeback_inodes_wb(wb, work); 1082d46db3d5SWu Fengguang trace_writeback_written(wb->bdi, work); 1083028c2dd1SDave Chinner 1084e98be2d5SWu Fengguang wb_update_bandwidth(wb, wb_start); 108503ba3782SJens Axboe 108603ba3782SJens Axboe /* 108771fd05a8SJens Axboe * Did we write something? Try for more 1088e6fb6da2SWu Fengguang * 1089e6fb6da2SWu Fengguang * Dirty inodes are moved to b_io for writeback in batches. 1090e6fb6da2SWu Fengguang * The completion of the current batch does not necessarily 1091e6fb6da2SWu Fengguang * mean the overall work is done. So we keep looping as long 1092e6fb6da2SWu Fengguang * as made some progress on cleaning pages or inodes. 109371fd05a8SJens Axboe */ 1094d46db3d5SWu Fengguang if (progress) 109503ba3782SJens Axboe continue; 1096a5989bdcSJan Kara /* 1097e6fb6da2SWu Fengguang * No more inodes for IO, bail 1098a5989bdcSJan Kara */ 1099b7a2441fSWu Fengguang if (list_empty(&wb->b_more_io)) 110003ba3782SJens Axboe break; 110103ba3782SJens Axboe /* 11028010c3b6SJens Axboe * Nothing written. Wait for some inode to 11038010c3b6SJens Axboe * become available for writeback. Otherwise 11048010c3b6SJens Axboe * we'll just busyloop. 110503ba3782SJens Axboe */ 110603ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) { 1107d46db3d5SWu Fengguang trace_writeback_wait(wb->bdi, work); 110803ba3782SJens Axboe inode = wb_inode(wb->b_more_io.prev); 1109250df6edSDave Chinner spin_lock(&inode->i_lock); 1110f0d07b7fSJan Kara spin_unlock(&wb->list_lock); 1111169ebd90SJan Kara /* This function drops i_lock... */ 1112169ebd90SJan Kara inode_sleep_on_writeback(inode); 1113f0d07b7fSJan Kara spin_lock(&wb->list_lock); 111403ba3782SJens Axboe } 111503ba3782SJens Axboe } 1116e8dfc305SWu Fengguang spin_unlock(&wb->list_lock); 111703ba3782SJens Axboe 1118d46db3d5SWu Fengguang return nr_pages - work->nr_pages; 111903ba3782SJens Axboe } 112003ba3782SJens Axboe 112103ba3782SJens Axboe /* 112283ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 112303ba3782SJens Axboe */ 1124f0054bb1STejun Heo static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) 112503ba3782SJens Axboe { 112683ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 112703ba3782SJens Axboe 1128f0054bb1STejun Heo spin_lock_bh(&wb->work_lock); 1129f0054bb1STejun Heo if (!list_empty(&wb->work_list)) { 1130f0054bb1STejun Heo work = list_entry(wb->work_list.next, 113183ba7b07SChristoph Hellwig struct wb_writeback_work, list); 113283ba7b07SChristoph Hellwig list_del_init(&work->list); 113303ba3782SJens Axboe } 1134f0054bb1STejun Heo spin_unlock_bh(&wb->work_lock); 113583ba7b07SChristoph Hellwig return work; 113603ba3782SJens Axboe } 113703ba3782SJens Axboe 1138cdf01dd5SLinus Torvalds /* 1139cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 1140cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 1141cdf01dd5SLinus Torvalds */ 1142cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 1143cdf01dd5SLinus Torvalds { 1144cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 1145cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 1146cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 1147cdf01dd5SLinus Torvalds } 1148cdf01dd5SLinus Torvalds 11496585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 11506585027aSJan Kara { 1151a88a341aSTejun Heo if (over_bground_thresh(wb)) { 11526585027aSJan Kara 11536585027aSJan Kara struct wb_writeback_work work = { 11546585027aSJan Kara .nr_pages = LONG_MAX, 11556585027aSJan Kara .sync_mode = WB_SYNC_NONE, 11566585027aSJan Kara .for_background = 1, 11576585027aSJan Kara .range_cyclic = 1, 11580e175a18SCurt Wohlgemuth .reason = WB_REASON_BACKGROUND, 11596585027aSJan Kara }; 11606585027aSJan Kara 11616585027aSJan Kara return wb_writeback(wb, &work); 11626585027aSJan Kara } 11636585027aSJan Kara 11646585027aSJan Kara return 0; 11656585027aSJan Kara } 11666585027aSJan Kara 116703ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 116803ba3782SJens Axboe { 116903ba3782SJens Axboe unsigned long expired; 117003ba3782SJens Axboe long nr_pages; 117103ba3782SJens Axboe 117269b62d01SJens Axboe /* 117369b62d01SJens Axboe * When set to zero, disable periodic writeback 117469b62d01SJens Axboe */ 117569b62d01SJens Axboe if (!dirty_writeback_interval) 117669b62d01SJens Axboe return 0; 117769b62d01SJens Axboe 117803ba3782SJens Axboe expired = wb->last_old_flush + 117903ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 118003ba3782SJens Axboe if (time_before(jiffies, expired)) 118103ba3782SJens Axboe return 0; 118203ba3782SJens Axboe 118303ba3782SJens Axboe wb->last_old_flush = jiffies; 1184cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 118503ba3782SJens Axboe 1186c4a77a6cSJens Axboe if (nr_pages) { 118783ba7b07SChristoph Hellwig struct wb_writeback_work work = { 1188c4a77a6cSJens Axboe .nr_pages = nr_pages, 1189c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 1190c4a77a6cSJens Axboe .for_kupdate = 1, 1191c4a77a6cSJens Axboe .range_cyclic = 1, 11920e175a18SCurt Wohlgemuth .reason = WB_REASON_PERIODIC, 1193c4a77a6cSJens Axboe }; 1194c4a77a6cSJens Axboe 119583ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 1196c4a77a6cSJens Axboe } 119703ba3782SJens Axboe 119803ba3782SJens Axboe return 0; 119903ba3782SJens Axboe } 120003ba3782SJens Axboe 120103ba3782SJens Axboe /* 120203ba3782SJens Axboe * Retrieve work items and do the writeback they describe 120303ba3782SJens Axboe */ 120425d130baSWanpeng Li static long wb_do_writeback(struct bdi_writeback *wb) 120503ba3782SJens Axboe { 120683ba7b07SChristoph Hellwig struct wb_writeback_work *work; 1207c4a77a6cSJens Axboe long wrote = 0; 120803ba3782SJens Axboe 12094452226eSTejun Heo set_bit(WB_writeback_running, &wb->state); 1210f0054bb1STejun Heo while ((work = get_next_work_item(wb)) != NULL) { 1211cc395d7fSTejun Heo struct wb_completion *done = work->done; 121298754bf7STejun Heo bool need_wake_up = false; 121383ba7b07SChristoph Hellwig 1214f0054bb1STejun Heo trace_writeback_exec(wb->bdi, work); 1215455b2864SDave Chinner 121683ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 121703ba3782SJens Axboe 121898754bf7STejun Heo if (work->single_wait) { 121998754bf7STejun Heo WARN_ON_ONCE(work->auto_free); 122098754bf7STejun Heo /* paired w/ rmb in wb_wait_for_single_work() */ 122198754bf7STejun Heo smp_wmb(); 122298754bf7STejun Heo work->single_done = 1; 122398754bf7STejun Heo need_wake_up = true; 122498754bf7STejun Heo } else if (work->auto_free) { 122583ba7b07SChristoph Hellwig kfree(work); 122698754bf7STejun Heo } 122798754bf7STejun Heo 1228cc395d7fSTejun Heo if (done && atomic_dec_and_test(&done->cnt)) 122998754bf7STejun Heo need_wake_up = true; 123098754bf7STejun Heo 123198754bf7STejun Heo if (need_wake_up) 1232cc395d7fSTejun Heo wake_up_all(&wb->bdi->wb_waitq); 123303ba3782SJens Axboe } 123403ba3782SJens Axboe 123503ba3782SJens Axboe /* 123603ba3782SJens Axboe * Check for periodic writeback, kupdated() style 123703ba3782SJens Axboe */ 123803ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 12396585027aSJan Kara wrote += wb_check_background_flush(wb); 12404452226eSTejun Heo clear_bit(WB_writeback_running, &wb->state); 124103ba3782SJens Axboe 124203ba3782SJens Axboe return wrote; 124303ba3782SJens Axboe } 124403ba3782SJens Axboe 124503ba3782SJens Axboe /* 124603ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 1247839a8e86STejun Heo * reschedules periodically and does kupdated style flushing. 124803ba3782SJens Axboe */ 1249f0054bb1STejun Heo void wb_workfn(struct work_struct *work) 125003ba3782SJens Axboe { 1251839a8e86STejun Heo struct bdi_writeback *wb = container_of(to_delayed_work(work), 1252839a8e86STejun Heo struct bdi_writeback, dwork); 125303ba3782SJens Axboe long pages_written; 125403ba3782SJens Axboe 1255f0054bb1STejun Heo set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); 1256766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 125703ba3782SJens Axboe 1258839a8e86STejun Heo if (likely(!current_is_workqueue_rescuer() || 12594452226eSTejun Heo !test_bit(WB_registered, &wb->state))) { 126003ba3782SJens Axboe /* 1261f0054bb1STejun Heo * The normal path. Keep writing back @wb until its 1262839a8e86STejun Heo * work_list is empty. Note that this path is also taken 1263f0054bb1STejun Heo * if @wb is shutting down even when we're running off the 1264839a8e86STejun Heo * rescuer as work_list needs to be drained. 126503ba3782SJens Axboe */ 1266839a8e86STejun Heo do { 126725d130baSWanpeng Li pages_written = wb_do_writeback(wb); 1268455b2864SDave Chinner trace_writeback_pages_written(pages_written); 1269f0054bb1STejun Heo } while (!list_empty(&wb->work_list)); 1270839a8e86STejun Heo } else { 1271253c34e9SArtem Bityutskiy /* 1272839a8e86STejun Heo * bdi_wq can't get enough workers and we're running off 1273839a8e86STejun Heo * the emergency worker. Don't hog it. Hopefully, 1024 is 1274839a8e86STejun Heo * enough for efficient IO. 1275253c34e9SArtem Bityutskiy */ 1276f0054bb1STejun Heo pages_written = writeback_inodes_wb(wb, 1024, 1277839a8e86STejun Heo WB_REASON_FORKER_THREAD); 1278839a8e86STejun Heo trace_writeback_pages_written(pages_written); 127903ba3782SJens Axboe } 128003ba3782SJens Axboe 1281f0054bb1STejun Heo if (!list_empty(&wb->work_list)) 12826ca738d6SDerek Basehore mod_delayed_work(bdi_wq, &wb->dwork, 0); 12836ca738d6SDerek Basehore else if (wb_has_dirty_io(wb) && dirty_writeback_interval) 1284f0054bb1STejun Heo wb_wakeup_delayed(wb); 1285455b2864SDave Chinner 1286839a8e86STejun Heo current->flags &= ~PF_SWAPWRITE; 128703ba3782SJens Axboe } 128803ba3782SJens Axboe 128903ba3782SJens Axboe /* 129003ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 129103ba3782SJens Axboe * the whole world. 129203ba3782SJens Axboe */ 12930e175a18SCurt Wohlgemuth void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) 129403ba3782SJens Axboe { 1295b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 1296b8c2f347SChristoph Hellwig 129747df3ddeSJan Kara if (!nr_pages) 129847df3ddeSJan Kara nr_pages = get_nr_dirty_pages(); 1299b8c2f347SChristoph Hellwig 1300b8c2f347SChristoph Hellwig rcu_read_lock(); 1301f2b65121STejun Heo list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1302f2b65121STejun Heo struct bdi_writeback *wb; 1303f2b65121STejun Heo struct wb_iter iter; 1304f2b65121STejun Heo 1305f2b65121STejun Heo if (!bdi_has_dirty_io(bdi)) 1306f2b65121STejun Heo continue; 1307f2b65121STejun Heo 1308f2b65121STejun Heo bdi_for_each_wb(wb, bdi, &iter, 0) 1309f2b65121STejun Heo wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), 1310f2b65121STejun Heo false, reason); 1311f2b65121STejun Heo } 1312b8c2f347SChristoph Hellwig rcu_read_unlock(); 131303ba3782SJens Axboe } 131403ba3782SJens Axboe 1315a2f48706STheodore Ts'o /* 1316a2f48706STheodore Ts'o * Wake up bdi's periodically to make sure dirtytime inodes gets 1317a2f48706STheodore Ts'o * written back periodically. We deliberately do *not* check the 1318a2f48706STheodore Ts'o * b_dirtytime list in wb_has_dirty_io(), since this would cause the 1319a2f48706STheodore Ts'o * kernel to be constantly waking up once there are any dirtytime 1320a2f48706STheodore Ts'o * inodes on the system. So instead we define a separate delayed work 1321a2f48706STheodore Ts'o * function which gets called much more rarely. (By default, only 1322a2f48706STheodore Ts'o * once every 12 hours.) 1323a2f48706STheodore Ts'o * 1324a2f48706STheodore Ts'o * If there is any other write activity going on in the file system, 1325a2f48706STheodore Ts'o * this function won't be necessary. But if the only thing that has 1326a2f48706STheodore Ts'o * happened on the file system is a dirtytime inode caused by an atime 1327a2f48706STheodore Ts'o * update, we need this infrastructure below to make sure that inode 1328a2f48706STheodore Ts'o * eventually gets pushed out to disk. 1329a2f48706STheodore Ts'o */ 1330a2f48706STheodore Ts'o static void wakeup_dirtytime_writeback(struct work_struct *w); 1331a2f48706STheodore Ts'o static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); 1332a2f48706STheodore Ts'o 1333a2f48706STheodore Ts'o static void wakeup_dirtytime_writeback(struct work_struct *w) 1334a2f48706STheodore Ts'o { 1335a2f48706STheodore Ts'o struct backing_dev_info *bdi; 1336a2f48706STheodore Ts'o 1337a2f48706STheodore Ts'o rcu_read_lock(); 1338a2f48706STheodore Ts'o list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1339001fe6f6STejun Heo struct bdi_writeback *wb; 1340001fe6f6STejun Heo struct wb_iter iter; 1341001fe6f6STejun Heo 1342001fe6f6STejun Heo bdi_for_each_wb(wb, bdi, &iter, 0) 1343001fe6f6STejun Heo if (!list_empty(&bdi->wb.b_dirty_time)) 1344f0054bb1STejun Heo wb_wakeup(&bdi->wb); 1345a2f48706STheodore Ts'o } 1346a2f48706STheodore Ts'o rcu_read_unlock(); 1347a2f48706STheodore Ts'o schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1348a2f48706STheodore Ts'o } 1349a2f48706STheodore Ts'o 1350a2f48706STheodore Ts'o static int __init start_dirtytime_writeback(void) 1351a2f48706STheodore Ts'o { 1352a2f48706STheodore Ts'o schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1353a2f48706STheodore Ts'o return 0; 1354a2f48706STheodore Ts'o } 1355a2f48706STheodore Ts'o __initcall(start_dirtytime_writeback); 1356a2f48706STheodore Ts'o 13571efff914STheodore Ts'o int dirtytime_interval_handler(struct ctl_table *table, int write, 13581efff914STheodore Ts'o void __user *buffer, size_t *lenp, loff_t *ppos) 13591efff914STheodore Ts'o { 13601efff914STheodore Ts'o int ret; 13611efff914STheodore Ts'o 13621efff914STheodore Ts'o ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 13631efff914STheodore Ts'o if (ret == 0 && write) 13641efff914STheodore Ts'o mod_delayed_work(system_wq, &dirtytime_work, 0); 13651efff914STheodore Ts'o return ret; 13661efff914STheodore Ts'o } 13671efff914STheodore Ts'o 136803ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 136903ba3782SJens Axboe { 137003ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 137103ba3782SJens Axboe struct dentry *dentry; 137203ba3782SJens Axboe const char *name = "?"; 137303ba3782SJens Axboe 137403ba3782SJens Axboe dentry = d_find_alias(inode); 137503ba3782SJens Axboe if (dentry) { 137603ba3782SJens Axboe spin_lock(&dentry->d_lock); 137703ba3782SJens Axboe name = (const char *) dentry->d_name.name; 137803ba3782SJens Axboe } 137903ba3782SJens Axboe printk(KERN_DEBUG 138003ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 138103ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 138203ba3782SJens Axboe name, inode->i_sb->s_id); 138303ba3782SJens Axboe if (dentry) { 138403ba3782SJens Axboe spin_unlock(&dentry->d_lock); 138503ba3782SJens Axboe dput(dentry); 138603ba3782SJens Axboe } 138703ba3782SJens Axboe } 138803ba3782SJens Axboe } 138903ba3782SJens Axboe 139003ba3782SJens Axboe /** 139103ba3782SJens Axboe * __mark_inode_dirty - internal function 139203ba3782SJens Axboe * @inode: inode to mark 139303ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 139403ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 139503ba3782SJens Axboe * mark_inode_dirty_sync. 139603ba3782SJens Axboe * 139703ba3782SJens Axboe * Put the inode on the super block's dirty list. 139803ba3782SJens Axboe * 139903ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 140003ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 140103ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 140203ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 140303ba3782SJens Axboe * 140403ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 140503ba3782SJens Axboe * them dirty. 140603ba3782SJens Axboe * 140703ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 140803ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 140903ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 141003ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 141103ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 141203ba3782SJens Axboe * blockdev inode. 141303ba3782SJens Axboe */ 14140ae45f63STheodore Ts'o #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 141503ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 141603ba3782SJens Axboe { 141703ba3782SJens Axboe struct super_block *sb = inode->i_sb; 1418253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 14190ae45f63STheodore Ts'o int dirtytime; 14200ae45f63STheodore Ts'o 14210ae45f63STheodore Ts'o trace_writeback_mark_inode_dirty(inode, flags); 142203ba3782SJens Axboe 142303ba3782SJens Axboe /* 142403ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 142503ba3782SJens Axboe * dirty the inode itself 142603ba3782SJens Axboe */ 14270ae45f63STheodore Ts'o if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) { 14289fb0a7daSTejun Heo trace_writeback_dirty_inode_start(inode, flags); 14299fb0a7daSTejun Heo 143003ba3782SJens Axboe if (sb->s_op->dirty_inode) 1431aa385729SChristoph Hellwig sb->s_op->dirty_inode(inode, flags); 14329fb0a7daSTejun Heo 14339fb0a7daSTejun Heo trace_writeback_dirty_inode(inode, flags); 143403ba3782SJens Axboe } 14350ae45f63STheodore Ts'o if (flags & I_DIRTY_INODE) 14360ae45f63STheodore Ts'o flags &= ~I_DIRTY_TIME; 14370ae45f63STheodore Ts'o dirtytime = flags & I_DIRTY_TIME; 143803ba3782SJens Axboe 143903ba3782SJens Axboe /* 14409c6ac78eSTejun Heo * Paired with smp_mb() in __writeback_single_inode() for the 14419c6ac78eSTejun Heo * following lockless i_state test. See there for details. 144203ba3782SJens Axboe */ 144303ba3782SJens Axboe smp_mb(); 144403ba3782SJens Axboe 14450ae45f63STheodore Ts'o if (((inode->i_state & flags) == flags) || 14460ae45f63STheodore Ts'o (dirtytime && (inode->i_state & I_DIRTY_INODE))) 144703ba3782SJens Axboe return; 144803ba3782SJens Axboe 144903ba3782SJens Axboe if (unlikely(block_dump)) 145003ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 145103ba3782SJens Axboe 1452250df6edSDave Chinner spin_lock(&inode->i_lock); 14530ae45f63STheodore Ts'o if (dirtytime && (inode->i_state & I_DIRTY_INODE)) 14540ae45f63STheodore Ts'o goto out_unlock_inode; 145503ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 145603ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 145703ba3782SJens Axboe 145852ebea74STejun Heo inode_attach_wb(inode, NULL); 145952ebea74STejun Heo 14600ae45f63STheodore Ts'o if (flags & I_DIRTY_INODE) 14610ae45f63STheodore Ts'o inode->i_state &= ~I_DIRTY_TIME; 146203ba3782SJens Axboe inode->i_state |= flags; 146303ba3782SJens Axboe 146403ba3782SJens Axboe /* 146503ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 146603ba3782SJens Axboe * The unlocker will place the inode on the appropriate 146703ba3782SJens Axboe * superblock list, based upon its state. 146803ba3782SJens Axboe */ 146903ba3782SJens Axboe if (inode->i_state & I_SYNC) 1470250df6edSDave Chinner goto out_unlock_inode; 147103ba3782SJens Axboe 147203ba3782SJens Axboe /* 147303ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 147403ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 147503ba3782SJens Axboe */ 147603ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 14771d3382cbSAl Viro if (inode_unhashed(inode)) 1478250df6edSDave Chinner goto out_unlock_inode; 147903ba3782SJens Axboe } 1480a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 1481250df6edSDave Chinner goto out_unlock_inode; 148203ba3782SJens Axboe 148303ba3782SJens Axboe /* 148403ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 148503ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 148603ba3782SJens Axboe */ 148703ba3782SJens Axboe if (!was_dirty) { 1488d6c10f1fSTejun Heo struct list_head *dirty_list; 1489a66979abSDave Chinner bool wakeup_bdi = false; 1490253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1491500b067cSJens Axboe 1492146d7009SJunxiao Bi spin_unlock(&inode->i_lock); 1493146d7009SJunxiao Bi spin_lock(&bdi->wb.list_lock); 1494253c34e9SArtem Bityutskiy 1495d6c10f1fSTejun Heo WARN(bdi_cap_writeback_dirty(bdi) && 1496d6c10f1fSTejun Heo !test_bit(WB_registered, &bdi->wb.state), 1497d6c10f1fSTejun Heo "bdi-%s not registered\n", bdi->name); 149803ba3782SJens Axboe 149903ba3782SJens Axboe inode->dirtied_when = jiffies; 1500a2f48706STheodore Ts'o if (dirtytime) 1501a2f48706STheodore Ts'o inode->dirtied_time_when = jiffies; 1502d6c10f1fSTejun Heo 1503a2f48706STheodore Ts'o if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES)) 1504d6c10f1fSTejun Heo dirty_list = &bdi->wb.b_dirty; 1505a2f48706STheodore Ts'o else 1506d6c10f1fSTejun Heo dirty_list = &bdi->wb.b_dirty_time; 1507d6c10f1fSTejun Heo 1508d6c10f1fSTejun Heo wakeup_bdi = inode_wb_list_move_locked(inode, &bdi->wb, 1509d6c10f1fSTejun Heo dirty_list); 1510d6c10f1fSTejun Heo 1511f758eeabSChristoph Hellwig spin_unlock(&bdi->wb.list_lock); 15120ae45f63STheodore Ts'o trace_writeback_dirty_inode_enqueue(inode); 1513253c34e9SArtem Bityutskiy 1514d6c10f1fSTejun Heo /* 1515d6c10f1fSTejun Heo * If this is the first dirty inode for this bdi, 1516d6c10f1fSTejun Heo * we have to wake-up the corresponding bdi thread 1517d6c10f1fSTejun Heo * to make sure background write-back happens 1518d6c10f1fSTejun Heo * later. 1519d6c10f1fSTejun Heo */ 1520d6c10f1fSTejun Heo if (bdi_cap_writeback_dirty(bdi) && wakeup_bdi) 1521f0054bb1STejun Heo wb_wakeup_delayed(&bdi->wb); 1522a66979abSDave Chinner return; 1523a66979abSDave Chinner } 1524a66979abSDave Chinner } 1525a66979abSDave Chinner out_unlock_inode: 1526a66979abSDave Chinner spin_unlock(&inode->i_lock); 1527a66979abSDave Chinner 152803ba3782SJens Axboe } 152903ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 153003ba3782SJens Axboe 1531b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 153266f3b8e2SJens Axboe { 153338f21977SNick Piggin struct inode *inode, *old_inode = NULL; 153438f21977SNick Piggin 153503ba3782SJens Axboe /* 153603ba3782SJens Axboe * We need to be protected against the filesystem going from 153703ba3782SJens Axboe * r/o to r/w or vice versa. 153803ba3782SJens Axboe */ 1539b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 154003ba3782SJens Axboe 154155fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 154266f3b8e2SJens Axboe 154338f21977SNick Piggin /* 154438f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 154538f21977SNick Piggin * because there may have been pages dirtied before our sync 154638f21977SNick Piggin * call, but which had writeout started before we write it out. 154738f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 154838f21977SNick Piggin * we still have to wait for that writeout. 154938f21977SNick Piggin */ 1550b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1551250df6edSDave Chinner struct address_space *mapping = inode->i_mapping; 155238f21977SNick Piggin 1553250df6edSDave Chinner spin_lock(&inode->i_lock); 1554250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1555250df6edSDave Chinner (mapping->nrpages == 0)) { 1556250df6edSDave Chinner spin_unlock(&inode->i_lock); 155738f21977SNick Piggin continue; 1558250df6edSDave Chinner } 155938f21977SNick Piggin __iget(inode); 1560250df6edSDave Chinner spin_unlock(&inode->i_lock); 156155fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 156255fa6091SDave Chinner 156338f21977SNick Piggin /* 156455fa6091SDave Chinner * We hold a reference to 'inode' so it couldn't have been 156555fa6091SDave Chinner * removed from s_inodes list while we dropped the 156655fa6091SDave Chinner * inode_sb_list_lock. We cannot iput the inode now as we can 156755fa6091SDave Chinner * be holding the last reference and we cannot iput it under 156855fa6091SDave Chinner * inode_sb_list_lock. So we keep the reference and iput it 156955fa6091SDave Chinner * later. 157038f21977SNick Piggin */ 157138f21977SNick Piggin iput(old_inode); 157238f21977SNick Piggin old_inode = inode; 157338f21977SNick Piggin 157438f21977SNick Piggin filemap_fdatawait(mapping); 157538f21977SNick Piggin 157638f21977SNick Piggin cond_resched(); 157738f21977SNick Piggin 157855fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 157938f21977SNick Piggin } 158055fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 158138f21977SNick Piggin iput(old_inode); 158266f3b8e2SJens Axboe } 15831da177e4SLinus Torvalds 1584f30a7d0cSTejun Heo static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr, 1585f30a7d0cSTejun Heo enum wb_reason reason, bool skip_if_busy) 15861da177e4SLinus Torvalds { 1587cc395d7fSTejun Heo DEFINE_WB_COMPLETION_ONSTACK(done); 158883ba7b07SChristoph Hellwig struct wb_writeback_work work = { 15893c4d7165SChristoph Hellwig .sb = sb, 15903c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 15916e6938b6SWu Fengguang .tagged_writepages = 1, 159283ba7b07SChristoph Hellwig .done = &done, 15933259f8beSChris Mason .nr_pages = nr, 15940e175a18SCurt Wohlgemuth .reason = reason, 15953c4d7165SChristoph Hellwig }; 1596e7972912STejun Heo struct backing_dev_info *bdi = sb->s_bdi; 15970e3c9a22SJens Axboe 1598e7972912STejun Heo if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) 15996eedc701SJan Kara return; 1600cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1601f30a7d0cSTejun Heo 1602f30a7d0cSTejun Heo if (skip_if_busy && writeback_in_progress(&bdi->wb)) 1603f30a7d0cSTejun Heo return; 1604f30a7d0cSTejun Heo 1605e7972912STejun Heo wb_queue_work(&bdi->wb, &work); 1606cc395d7fSTejun Heo wb_wait_for_completion(bdi, &done); 16071da177e4SLinus Torvalds } 1608f30a7d0cSTejun Heo 1609f30a7d0cSTejun Heo /** 1610f30a7d0cSTejun Heo * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1611f30a7d0cSTejun Heo * @sb: the superblock 1612f30a7d0cSTejun Heo * @nr: the number of pages to write 1613f30a7d0cSTejun Heo * @reason: reason why some writeback work initiated 1614f30a7d0cSTejun Heo * 1615f30a7d0cSTejun Heo * Start writeback on some inodes on this super_block. No guarantees are made 1616f30a7d0cSTejun Heo * on how many (if any) will be written, and this function does not wait 1617f30a7d0cSTejun Heo * for IO completion of submitted IO. 1618f30a7d0cSTejun Heo */ 1619f30a7d0cSTejun Heo void writeback_inodes_sb_nr(struct super_block *sb, 1620f30a7d0cSTejun Heo unsigned long nr, 1621f30a7d0cSTejun Heo enum wb_reason reason) 1622f30a7d0cSTejun Heo { 1623f30a7d0cSTejun Heo __writeback_inodes_sb_nr(sb, nr, reason, false); 1624f30a7d0cSTejun Heo } 16253259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 16263259f8beSChris Mason 16273259f8beSChris Mason /** 16283259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 16293259f8beSChris Mason * @sb: the superblock 1630786228abSMarcos Paulo de Souza * @reason: reason why some writeback work was initiated 16313259f8beSChris Mason * 16323259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 16333259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 16343259f8beSChris Mason * for IO completion of submitted IO. 16353259f8beSChris Mason */ 16360e175a18SCurt Wohlgemuth void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 16373259f8beSChris Mason { 16380e175a18SCurt Wohlgemuth return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 16393259f8beSChris Mason } 1640d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1641d8a8559cSJens Axboe 1642d8a8559cSJens Axboe /** 164310ee27a0SMiao Xie * try_to_writeback_inodes_sb_nr - try to start writeback if none underway 16443259f8beSChris Mason * @sb: the superblock 16453259f8beSChris Mason * @nr: the number of pages to write 164610ee27a0SMiao Xie * @reason: the reason of writeback 16473259f8beSChris Mason * 164810ee27a0SMiao Xie * Invoke writeback_inodes_sb_nr if no writeback is currently underway. 16493259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 16503259f8beSChris Mason */ 1651f30a7d0cSTejun Heo bool try_to_writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr, 16520e175a18SCurt Wohlgemuth enum wb_reason reason) 16533259f8beSChris Mason { 165410ee27a0SMiao Xie if (!down_read_trylock(&sb->s_umount)) 1655f30a7d0cSTejun Heo return false; 165610ee27a0SMiao Xie 1657f30a7d0cSTejun Heo __writeback_inodes_sb_nr(sb, nr, reason, true); 16583259f8beSChris Mason up_read(&sb->s_umount); 1659f30a7d0cSTejun Heo return true; 16603259f8beSChris Mason } 166110ee27a0SMiao Xie EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); 166210ee27a0SMiao Xie 166310ee27a0SMiao Xie /** 166410ee27a0SMiao Xie * try_to_writeback_inodes_sb - try to start writeback if none underway 166510ee27a0SMiao Xie * @sb: the superblock 166610ee27a0SMiao Xie * @reason: reason why some writeback work was initiated 166710ee27a0SMiao Xie * 166810ee27a0SMiao Xie * Implement by try_to_writeback_inodes_sb_nr() 166910ee27a0SMiao Xie * Returns 1 if writeback was started, 0 if not. 167010ee27a0SMiao Xie */ 1671f30a7d0cSTejun Heo bool try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 167210ee27a0SMiao Xie { 167310ee27a0SMiao Xie return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 167410ee27a0SMiao Xie } 167510ee27a0SMiao Xie EXPORT_SYMBOL(try_to_writeback_inodes_sb); 16763259f8beSChris Mason 16773259f8beSChris Mason /** 1678d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1679d8a8559cSJens Axboe * @sb: the superblock 1680d8a8559cSJens Axboe * 1681d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 16820dc83bd3SJan Kara * super_block. 1683d8a8559cSJens Axboe */ 16840dc83bd3SJan Kara void sync_inodes_sb(struct super_block *sb) 1685d8a8559cSJens Axboe { 1686cc395d7fSTejun Heo DEFINE_WB_COMPLETION_ONSTACK(done); 168783ba7b07SChristoph Hellwig struct wb_writeback_work work = { 16883c4d7165SChristoph Hellwig .sb = sb, 16893c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 16903c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 16913c4d7165SChristoph Hellwig .range_cyclic = 0, 169283ba7b07SChristoph Hellwig .done = &done, 16930e175a18SCurt Wohlgemuth .reason = WB_REASON_SYNC, 16947747bd4bSDave Chinner .for_sync = 1, 16953c4d7165SChristoph Hellwig }; 1696e7972912STejun Heo struct backing_dev_info *bdi = sb->s_bdi; 16973c4d7165SChristoph Hellwig 16986eedc701SJan Kara /* Nothing to do? */ 1699e7972912STejun Heo if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) 17006eedc701SJan Kara return; 1701cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1702cf37e972SChristoph Hellwig 1703e7972912STejun Heo wb_queue_work(&bdi->wb, &work); 1704cc395d7fSTejun Heo wb_wait_for_completion(bdi, &done); 170583ba7b07SChristoph Hellwig 1706b6e51316SJens Axboe wait_sb_inodes(sb); 1707d8a8559cSJens Axboe } 1708d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 17091da177e4SLinus Torvalds 17101da177e4SLinus Torvalds /** 17111da177e4SLinus Torvalds * write_inode_now - write an inode to disk 17121da177e4SLinus Torvalds * @inode: inode to write to disk 17131da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 17141da177e4SLinus Torvalds * 17157f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 17167f04c26dSAndrea Arcangeli * primarily needed by knfsd. 17177f04c26dSAndrea Arcangeli * 17187f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 17191da177e4SLinus Torvalds */ 17201da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 17211da177e4SLinus Torvalds { 1722f758eeabSChristoph Hellwig struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 17231da177e4SLinus Torvalds struct writeback_control wbc = { 17241da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 172518914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1726111ebb6eSOGAWA Hirofumi .range_start = 0, 1727111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 17281da177e4SLinus Torvalds }; 17291da177e4SLinus Torvalds 17301da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 173149364ce2SAndrew Morton wbc.nr_to_write = 0; 17321da177e4SLinus Torvalds 17331da177e4SLinus Torvalds might_sleep(); 17344f8ad655SJan Kara return writeback_single_inode(inode, wb, &wbc); 17351da177e4SLinus Torvalds } 17361da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 17371da177e4SLinus Torvalds 17381da177e4SLinus Torvalds /** 17391da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 17401da177e4SLinus Torvalds * @inode: the inode to sync 17411da177e4SLinus Torvalds * @wbc: controls the writeback mode 17421da177e4SLinus Torvalds * 17431da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 17441da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 17451da177e4SLinus Torvalds * update inode->i_state. 17461da177e4SLinus Torvalds * 17471da177e4SLinus Torvalds * The caller must have a ref on the inode. 17481da177e4SLinus Torvalds */ 17491da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 17501da177e4SLinus Torvalds { 17514f8ad655SJan Kara return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc); 17521da177e4SLinus Torvalds } 17531da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1754c3765016SChristoph Hellwig 1755c3765016SChristoph Hellwig /** 1756c691b9d9SAndrew Morton * sync_inode_metadata - write an inode to disk 1757c3765016SChristoph Hellwig * @inode: the inode to sync 1758c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1759c3765016SChristoph Hellwig * 1760c691b9d9SAndrew Morton * Write an inode to disk and adjust its dirty state after completion. 1761c3765016SChristoph Hellwig * 1762c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1763c3765016SChristoph Hellwig */ 1764c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1765c3765016SChristoph Hellwig { 1766c3765016SChristoph Hellwig struct writeback_control wbc = { 1767c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1768c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1769c3765016SChristoph Hellwig }; 1770c3765016SChristoph Hellwig 1771c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1772c3765016SChristoph Hellwig } 1773c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1774