11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 191da177e4SLinus Torvalds #include <linux/sched.h> 201da177e4SLinus Torvalds #include <linux/fs.h> 211da177e4SLinus Torvalds #include <linux/mm.h> 2203ba3782SJens Axboe #include <linux/kthread.h> 2303ba3782SJens Axboe #include <linux/freezer.h> 241da177e4SLinus Torvalds #include <linux/writeback.h> 251da177e4SLinus Torvalds #include <linux/blkdev.h> 261da177e4SLinus Torvalds #include <linux/backing-dev.h> 271da177e4SLinus Torvalds #include <linux/buffer_head.h> 2807f3f05cSDavid Howells #include "internal.h" 291da177e4SLinus Torvalds 3066f3b8e2SJens Axboe #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 31f11b00f3SAdrian Bunk 3203ba3782SJens Axboe /* 33d0bceac7SJens Axboe * We don't actually have pdflush, but this one is exported though /proc... 34d0bceac7SJens Axboe */ 35d0bceac7SJens Axboe int nr_pdflush_threads; 36d0bceac7SJens Axboe 37d0bceac7SJens Axboe /* 38c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 39c4a77a6cSJens Axboe */ 40c4a77a6cSJens Axboe struct wb_writeback_args { 41c4a77a6cSJens Axboe long nr_pages; 42c4a77a6cSJens Axboe struct super_block *sb; 43c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 44d3ddec76SWu Fengguang int for_kupdate:1; 45d3ddec76SWu Fengguang int range_cyclic:1; 46d3ddec76SWu Fengguang int for_background:1; 47c4a77a6cSJens Axboe }; 48c4a77a6cSJens Axboe 49c4a77a6cSJens Axboe /* 5003ba3782SJens Axboe * Work items for the bdi_writeback threads 51f11b00f3SAdrian Bunk */ 5203ba3782SJens Axboe struct bdi_work { 538010c3b6SJens Axboe struct list_head list; /* pending work list */ 548010c3b6SJens Axboe struct rcu_head rcu_head; /* for RCU free/clear of work */ 5503ba3782SJens Axboe 568010c3b6SJens Axboe unsigned long seen; /* threads that have seen this work */ 578010c3b6SJens Axboe atomic_t pending; /* number of threads still to do work */ 5803ba3782SJens Axboe 598010c3b6SJens Axboe struct wb_writeback_args args; /* writeback arguments */ 6003ba3782SJens Axboe 618010c3b6SJens Axboe unsigned long state; /* flag bits, see WS_* */ 6203ba3782SJens Axboe }; 6303ba3782SJens Axboe 6403ba3782SJens Axboe enum { 6503ba3782SJens Axboe WS_USED_B = 0, 6603ba3782SJens Axboe WS_ONSTACK_B, 6703ba3782SJens Axboe }; 6803ba3782SJens Axboe 6903ba3782SJens Axboe #define WS_USED (1 << WS_USED_B) 7003ba3782SJens Axboe #define WS_ONSTACK (1 << WS_ONSTACK_B) 7103ba3782SJens Axboe 7203ba3782SJens Axboe static inline bool bdi_work_on_stack(struct bdi_work *work) 73f11b00f3SAdrian Bunk { 7403ba3782SJens Axboe return test_bit(WS_ONSTACK_B, &work->state); 7503ba3782SJens Axboe } 7603ba3782SJens Axboe 7703ba3782SJens Axboe static inline void bdi_work_init(struct bdi_work *work, 78b6e51316SJens Axboe struct wb_writeback_args *args) 7903ba3782SJens Axboe { 8003ba3782SJens Axboe INIT_RCU_HEAD(&work->rcu_head); 81b6e51316SJens Axboe work->args = *args; 8203ba3782SJens Axboe work->state = WS_USED; 8303ba3782SJens Axboe } 8403ba3782SJens Axboe 85f11b00f3SAdrian Bunk /** 86f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 87f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 88f11b00f3SAdrian Bunk * 8903ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 9003ba3782SJens Axboe * backing device. 91f11b00f3SAdrian Bunk */ 92f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 93f11b00f3SAdrian Bunk { 9403ba3782SJens Axboe return !list_empty(&bdi->work_list); 95f11b00f3SAdrian Bunk } 96f11b00f3SAdrian Bunk 9703ba3782SJens Axboe static void bdi_work_clear(struct bdi_work *work) 98f11b00f3SAdrian Bunk { 9903ba3782SJens Axboe clear_bit(WS_USED_B, &work->state); 10003ba3782SJens Axboe smp_mb__after_clear_bit(); 1011ef7d9aaSNick Piggin /* 1021ef7d9aaSNick Piggin * work can have disappeared at this point. bit waitq functions 1031ef7d9aaSNick Piggin * should be able to tolerate this, provided bdi_sched_wait does 1041ef7d9aaSNick Piggin * not dereference it's pointer argument. 1051ef7d9aaSNick Piggin */ 10603ba3782SJens Axboe wake_up_bit(&work->state, WS_USED_B); 107f11b00f3SAdrian Bunk } 108f11b00f3SAdrian Bunk 10903ba3782SJens Axboe static void bdi_work_free(struct rcu_head *head) 1104195f73dSNick Piggin { 11103ba3782SJens Axboe struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); 1124195f73dSNick Piggin 11303ba3782SJens Axboe if (!bdi_work_on_stack(work)) 11403ba3782SJens Axboe kfree(work); 11503ba3782SJens Axboe else 11603ba3782SJens Axboe bdi_work_clear(work); 1174195f73dSNick Piggin } 1184195f73dSNick Piggin 11903ba3782SJens Axboe static void wb_work_complete(struct bdi_work *work) 1201da177e4SLinus Torvalds { 121c4a77a6cSJens Axboe const enum writeback_sync_modes sync_mode = work->args.sync_mode; 12277b9d059SNick Piggin int onstack = bdi_work_on_stack(work); 1231da177e4SLinus Torvalds 1241da177e4SLinus Torvalds /* 12503ba3782SJens Axboe * For allocated work, we can clear the done/seen bit right here. 12603ba3782SJens Axboe * For on-stack work, we need to postpone both the clear and free 12703ba3782SJens Axboe * to after the RCU grace period, since the stack could be invalidated 12803ba3782SJens Axboe * as soon as bdi_work_clear() has done the wakeup. 1291da177e4SLinus Torvalds */ 13077b9d059SNick Piggin if (!onstack) 13103ba3782SJens Axboe bdi_work_clear(work); 13277b9d059SNick Piggin if (sync_mode == WB_SYNC_NONE || onstack) 13303ba3782SJens Axboe call_rcu(&work->rcu_head, bdi_work_free); 1341da177e4SLinus Torvalds } 1351da177e4SLinus Torvalds 13603ba3782SJens Axboe static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) 13703ba3782SJens Axboe { 1381da177e4SLinus Torvalds /* 13903ba3782SJens Axboe * The caller has retrieved the work arguments from this work, 14003ba3782SJens Axboe * drop our reference. If this is the last ref, delete and free it 14103ba3782SJens Axboe */ 14203ba3782SJens Axboe if (atomic_dec_and_test(&work->pending)) { 14303ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 14403ba3782SJens Axboe 14503ba3782SJens Axboe spin_lock(&bdi->wb_lock); 14603ba3782SJens Axboe list_del_rcu(&work->list); 14703ba3782SJens Axboe spin_unlock(&bdi->wb_lock); 14803ba3782SJens Axboe 14903ba3782SJens Axboe wb_work_complete(work); 15003ba3782SJens Axboe } 15103ba3782SJens Axboe } 15203ba3782SJens Axboe 15303ba3782SJens Axboe static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) 15403ba3782SJens Axboe { 15503ba3782SJens Axboe work->seen = bdi->wb_mask; 15603ba3782SJens Axboe BUG_ON(!work->seen); 15703ba3782SJens Axboe atomic_set(&work->pending, bdi->wb_cnt); 15803ba3782SJens Axboe BUG_ON(!bdi->wb_cnt); 15903ba3782SJens Axboe 16003ba3782SJens Axboe /* 161deed62edSNick Piggin * list_add_tail_rcu() contains the necessary barriers to 162deed62edSNick Piggin * make sure the above stores are seen before the item is 163deed62edSNick Piggin * noticed on the list 1641da177e4SLinus Torvalds */ 16503ba3782SJens Axboe spin_lock(&bdi->wb_lock); 16603ba3782SJens Axboe list_add_tail_rcu(&work->list, &bdi->work_list); 16703ba3782SJens Axboe spin_unlock(&bdi->wb_lock); 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds /* 17003ba3782SJens Axboe * If the default thread isn't there, make sure we add it. When 17103ba3782SJens Axboe * it gets created and wakes up, we'll run this work. 1721da177e4SLinus Torvalds */ 17303ba3782SJens Axboe if (unlikely(list_empty_careful(&bdi->wb_list))) 17403ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 17503ba3782SJens Axboe else { 17603ba3782SJens Axboe struct bdi_writeback *wb = &bdi->wb; 1771da177e4SLinus Torvalds 1781ef7d9aaSNick Piggin if (wb->task) 17903ba3782SJens Axboe wake_up_process(wb->task); 1801da177e4SLinus Torvalds } 18103ba3782SJens Axboe } 1821da177e4SLinus Torvalds 1831da177e4SLinus Torvalds /* 18403ba3782SJens Axboe * Used for on-stack allocated work items. The caller needs to wait until 18503ba3782SJens Axboe * the wb threads have acked the work before it's safe to continue. 1861da177e4SLinus Torvalds */ 18703ba3782SJens Axboe static void bdi_wait_on_work_clear(struct bdi_work *work) 1881da177e4SLinus Torvalds { 18903ba3782SJens Axboe wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait, 19003ba3782SJens Axboe TASK_UNINTERRUPTIBLE); 19103ba3782SJens Axboe } 19203ba3782SJens Axboe 193f11fcae8SJens Axboe static void bdi_alloc_queue_work(struct backing_dev_info *bdi, 194b6e51316SJens Axboe struct wb_writeback_args *args) 19503ba3782SJens Axboe { 19603ba3782SJens Axboe struct bdi_work *work; 19703ba3782SJens Axboe 198bcddc3f0SJens Axboe /* 199bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 200bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 201bcddc3f0SJens Axboe */ 20203ba3782SJens Axboe work = kmalloc(sizeof(*work), GFP_ATOMIC); 203bcddc3f0SJens Axboe if (work) { 204b6e51316SJens Axboe bdi_work_init(work, args); 205f11fcae8SJens Axboe bdi_queue_work(bdi, work); 206bcddc3f0SJens Axboe } else { 207bcddc3f0SJens Axboe struct bdi_writeback *wb = &bdi->wb; 208bcddc3f0SJens Axboe 209bcddc3f0SJens Axboe if (wb->task) 210bcddc3f0SJens Axboe wake_up_process(wb->task); 211bcddc3f0SJens Axboe } 21203ba3782SJens Axboe } 21303ba3782SJens Axboe 214b6e51316SJens Axboe /** 215b6e51316SJens Axboe * bdi_sync_writeback - start and wait for writeback 216b6e51316SJens Axboe * @bdi: the backing device to write from 217b6e51316SJens Axboe * @sb: write inodes from this super_block 218b6e51316SJens Axboe * 219b6e51316SJens Axboe * Description: 220b6e51316SJens Axboe * This does WB_SYNC_ALL data integrity writeback and waits for the 221b6e51316SJens Axboe * IO to complete. Callers must hold the sb s_umount semaphore for 222b6e51316SJens Axboe * reading, to avoid having the super disappear before we are done. 22303ba3782SJens Axboe */ 224b6e51316SJens Axboe static void bdi_sync_writeback(struct backing_dev_info *bdi, 225b6e51316SJens Axboe struct super_block *sb) 226b6e51316SJens Axboe { 227b6e51316SJens Axboe struct wb_writeback_args args = { 228b6e51316SJens Axboe .sb = sb, 229b6e51316SJens Axboe .sync_mode = WB_SYNC_ALL, 230b6e51316SJens Axboe .nr_pages = LONG_MAX, 231b6e51316SJens Axboe .range_cyclic = 0, 232b6e51316SJens Axboe }; 233f0fad8a5SChristoph Hellwig struct bdi_work work; 234f0fad8a5SChristoph Hellwig 235b6e51316SJens Axboe bdi_work_init(&work, &args); 236f0fad8a5SChristoph Hellwig work.state |= WS_ONSTACK; 237f0fad8a5SChristoph Hellwig 238b6e51316SJens Axboe bdi_queue_work(bdi, &work); 239f0fad8a5SChristoph Hellwig bdi_wait_on_work_clear(&work); 24003ba3782SJens Axboe } 241b6e51316SJens Axboe 242b6e51316SJens Axboe /** 243b6e51316SJens Axboe * bdi_start_writeback - start writeback 244b6e51316SJens Axboe * @bdi: the backing device to write from 2454b6764faSJaswinder Singh Rajput * @sb: write inodes from this super_block 246b6e51316SJens Axboe * @nr_pages: the number of pages to write 247b6e51316SJens Axboe * 248b6e51316SJens Axboe * Description: 249b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 250b6e51316SJens Axboe * started when this function returns, we make no guarentees on 251b6e51316SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 252b6e51316SJens Axboe * 253b6e51316SJens Axboe */ 254a72bfd4dSJens Axboe void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, 255a72bfd4dSJens Axboe long nr_pages) 256b6e51316SJens Axboe { 257b6e51316SJens Axboe struct wb_writeback_args args = { 258a72bfd4dSJens Axboe .sb = sb, 259b6e51316SJens Axboe .sync_mode = WB_SYNC_NONE, 260b6e51316SJens Axboe .nr_pages = nr_pages, 261b6e51316SJens Axboe .range_cyclic = 1, 262b6e51316SJens Axboe }; 263b6e51316SJens Axboe 264d3ddec76SWu Fengguang /* 265d3ddec76SWu Fengguang * We treat @nr_pages=0 as the special case to do background writeback, 266d3ddec76SWu Fengguang * ie. to sync pages until the background dirty threshold is reached. 267d3ddec76SWu Fengguang */ 268d3ddec76SWu Fengguang if (!nr_pages) { 269d3ddec76SWu Fengguang args.nr_pages = LONG_MAX; 270d3ddec76SWu Fengguang args.for_background = 1; 271d3ddec76SWu Fengguang } 272d3ddec76SWu Fengguang 273b6e51316SJens Axboe bdi_alloc_queue_work(bdi, &args); 2741da177e4SLinus Torvalds } 2751da177e4SLinus Torvalds 2761da177e4SLinus Torvalds /* 2776610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 2786610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 2796610a0bcSAndrew Morton * 2806610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 28166f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 2826610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 2836610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 2846610a0bcSAndrew Morton */ 2856610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 2866610a0bcSAndrew Morton { 28703ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 2886610a0bcSAndrew Morton 28903ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 29066f3b8e2SJens Axboe struct inode *tail; 2916610a0bcSAndrew Morton 29203ba3782SJens Axboe tail = list_entry(wb->b_dirty.next, struct inode, i_list); 29366f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2946610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2956610a0bcSAndrew Morton } 29603ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 2976610a0bcSAndrew Morton } 2986610a0bcSAndrew Morton 2996610a0bcSAndrew Morton /* 30066f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 301c986d1e2SAndrew Morton */ 3020e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 303c986d1e2SAndrew Morton { 30403ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 30503ba3782SJens Axboe 30603ba3782SJens Axboe list_move(&inode->i_list, &wb->b_more_io); 307c986d1e2SAndrew Morton } 308c986d1e2SAndrew Morton 3091c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 3101c0eeaf5SJoern Engel { 3111c0eeaf5SJoern Engel /* 3121c0eeaf5SJoern Engel * Prevent speculative execution through spin_unlock(&inode_lock); 3131c0eeaf5SJoern Engel */ 3141c0eeaf5SJoern Engel smp_mb(); 3151c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 3161c0eeaf5SJoern Engel } 3171c0eeaf5SJoern Engel 318d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 319d2caa3c5SJeff Layton { 320d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 321d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 322d2caa3c5SJeff Layton /* 323d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 324d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 325d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 3265b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 327d2caa3c5SJeff Layton */ 328d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 329d2caa3c5SJeff Layton #endif 330d2caa3c5SJeff Layton return ret; 331d2caa3c5SJeff Layton } 332d2caa3c5SJeff Layton 333c986d1e2SAndrew Morton /* 3342c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 3352c136579SFengguang Wu */ 3362c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 3372c136579SFengguang Wu struct list_head *dispatch_queue, 3382c136579SFengguang Wu unsigned long *older_than_this) 3392c136579SFengguang Wu { 3405c03449dSShaohua Li LIST_HEAD(tmp); 3415c03449dSShaohua Li struct list_head *pos, *node; 342cf137307SJens Axboe struct super_block *sb = NULL; 3435c03449dSShaohua Li struct inode *inode; 344cf137307SJens Axboe int do_sb_sort = 0; 3455c03449dSShaohua Li 3462c136579SFengguang Wu while (!list_empty(delaying_queue)) { 3475c03449dSShaohua Li inode = list_entry(delaying_queue->prev, struct inode, i_list); 3482c136579SFengguang Wu if (older_than_this && 349d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 3502c136579SFengguang Wu break; 351cf137307SJens Axboe if (sb && sb != inode->i_sb) 352cf137307SJens Axboe do_sb_sort = 1; 353cf137307SJens Axboe sb = inode->i_sb; 3545c03449dSShaohua Li list_move(&inode->i_list, &tmp); 3555c03449dSShaohua Li } 3565c03449dSShaohua Li 357cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 358cf137307SJens Axboe if (!do_sb_sort) { 359cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 360cf137307SJens Axboe return; 361cf137307SJens Axboe } 362cf137307SJens Axboe 3635c03449dSShaohua Li /* Move inodes from one superblock together */ 3645c03449dSShaohua Li while (!list_empty(&tmp)) { 3655c03449dSShaohua Li inode = list_entry(tmp.prev, struct inode, i_list); 3665c03449dSShaohua Li sb = inode->i_sb; 3675c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 3685c03449dSShaohua Li inode = list_entry(pos, struct inode, i_list); 3695c03449dSShaohua Li if (inode->i_sb == sb) 3702c136579SFengguang Wu list_move(&inode->i_list, dispatch_queue); 3712c136579SFengguang Wu } 3722c136579SFengguang Wu } 3735c03449dSShaohua Li } 3742c136579SFengguang Wu 3752c136579SFengguang Wu /* 3762c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 3772c136579SFengguang Wu */ 37803ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 3792c136579SFengguang Wu { 38003ba3782SJens Axboe list_splice_init(&wb->b_more_io, wb->b_io.prev); 38103ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 38266f3b8e2SJens Axboe } 38366f3b8e2SJens Axboe 384a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 38566f3b8e2SJens Axboe { 38603ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 387a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 38803ba3782SJens Axboe return 0; 38966f3b8e2SJens Axboe } 39008d8e974SFengguang Wu 3912c136579SFengguang Wu /* 39201c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 39301c03194SChristoph Hellwig */ 39401c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 39501c03194SChristoph Hellwig { 39601c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 39701c03194SChristoph Hellwig wait_queue_head_t *wqh; 39801c03194SChristoph Hellwig 39901c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 40001c03194SChristoph Hellwig do { 40101c03194SChristoph Hellwig spin_unlock(&inode_lock); 40201c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 40301c03194SChristoph Hellwig spin_lock(&inode_lock); 40401c03194SChristoph Hellwig } while (inode->i_state & I_SYNC); 40501c03194SChristoph Hellwig } 40601c03194SChristoph Hellwig 40701c03194SChristoph Hellwig /* 40801c03194SChristoph Hellwig * Write out an inode's dirty pages. Called under inode_lock. Either the 40901c03194SChristoph Hellwig * caller has ref on the inode (either via __iget or via syscall against an fd) 41001c03194SChristoph Hellwig * or the inode has I_WILL_FREE set (via generic_forget_inode) 41101c03194SChristoph Hellwig * 4121da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 4131da177e4SLinus Torvalds * 4141da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 4151da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 4161da177e4SLinus Torvalds * livelocks, etc. 4171da177e4SLinus Torvalds * 4181da177e4SLinus Torvalds * Called under inode_lock. 4191da177e4SLinus Torvalds */ 4201da177e4SLinus Torvalds static int 42101c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 4221da177e4SLinus Torvalds { 4231da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 42401c03194SChristoph Hellwig unsigned dirty; 4251da177e4SLinus Torvalds int ret; 4261da177e4SLinus Torvalds 42701c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 42801c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 42901c03194SChristoph Hellwig else 43001c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 43101c03194SChristoph Hellwig 43201c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 43301c03194SChristoph Hellwig /* 43401c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 43566f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 43601c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 43701c03194SChristoph Hellwig * 43801c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 43966f3b8e2SJens Axboe * completed a full scan of b_io. 44001c03194SChristoph Hellwig */ 441a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 44201c03194SChristoph Hellwig requeue_io(inode); 44301c03194SChristoph Hellwig return 0; 44401c03194SChristoph Hellwig } 44501c03194SChristoph Hellwig 44601c03194SChristoph Hellwig /* 44701c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 44801c03194SChristoph Hellwig */ 44901c03194SChristoph Hellwig inode_wait_for_writeback(inode); 45001c03194SChristoph Hellwig } 45101c03194SChristoph Hellwig 4521c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 4531da177e4SLinus Torvalds 4541c0eeaf5SJoern Engel /* Set I_SYNC, reset I_DIRTY */ 4551da177e4SLinus Torvalds dirty = inode->i_state & I_DIRTY; 4561c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 4571da177e4SLinus Torvalds inode->i_state &= ~I_DIRTY; 4581da177e4SLinus Torvalds 4591da177e4SLinus Torvalds spin_unlock(&inode_lock); 4601da177e4SLinus Torvalds 4611da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 4621da177e4SLinus Torvalds 46326821ed4SChristoph Hellwig /* 46426821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 46526821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 46626821ed4SChristoph Hellwig * I/O completion. 46726821ed4SChristoph Hellwig */ 468a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 46926821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 4701da177e4SLinus Torvalds if (ret == 0) 4711da177e4SLinus Torvalds ret = err; 4721da177e4SLinus Torvalds } 4731da177e4SLinus Torvalds 47426821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 47526821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 476a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 4771da177e4SLinus Torvalds if (ret == 0) 4781da177e4SLinus Torvalds ret = err; 4791da177e4SLinus Torvalds } 4801da177e4SLinus Torvalds 4811da177e4SLinus Torvalds spin_lock(&inode_lock); 4821c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 48384a89245SWu Fengguang if (!(inode->i_state & (I_FREEING | I_CLEAR))) { 484b3af9468SWu Fengguang if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) { 485ae1b7f7dSWu Fengguang /* 486b3af9468SWu Fengguang * More pages get dirtied by a fast dirtier. 487b3af9468SWu Fengguang */ 488b3af9468SWu Fengguang goto select_queue; 489b3af9468SWu Fengguang } else if (inode->i_state & I_DIRTY) { 490b3af9468SWu Fengguang /* 491b3af9468SWu Fengguang * At least XFS will redirty the inode during the 492b3af9468SWu Fengguang * writeback (delalloc) and on io completion (isize). 493ae1b7f7dSWu Fengguang */ 494ae1b7f7dSWu Fengguang redirty_tail(inode); 495ae1b7f7dSWu Fengguang } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4961da177e4SLinus Torvalds /* 4971da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 4981da177e4SLinus Torvalds * sometimes bales out without doing anything. Redirty 49966f3b8e2SJens Axboe * the inode; Move it from b_io onto b_more_io/b_dirty. 5001b43ef91SAndrew Morton */ 5011b43ef91SAndrew Morton /* 5021b43ef91SAndrew Morton * akpm: if the caller was the kupdate function we put 50366f3b8e2SJens Axboe * this inode at the head of b_dirty so it gets first 5041b43ef91SAndrew Morton * consideration. Otherwise, move it to the tail, for 5051b43ef91SAndrew Morton * the reasons described there. I'm not really sure 5061b43ef91SAndrew Morton * how much sense this makes. Presumably I had a good 5071b43ef91SAndrew Morton * reasons for doing it this way, and I'd rather not 5081b43ef91SAndrew Morton * muck with it at present. 5091da177e4SLinus Torvalds */ 5101da177e4SLinus Torvalds if (wbc->for_kupdate) { 5111da177e4SLinus Torvalds /* 5122c136579SFengguang Wu * For the kupdate function we move the inode 51366f3b8e2SJens Axboe * to b_more_io so it will get more writeout as 5142c136579SFengguang Wu * soon as the queue becomes uncongested. 5151da177e4SLinus Torvalds */ 5161da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 517b3af9468SWu Fengguang select_queue: 5188bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 5198bc3be27SFengguang Wu /* 5208bc3be27SFengguang Wu * slice used up: queue for next turn 5218bc3be27SFengguang Wu */ 5220e0f4fc2SKen Chen requeue_io(inode); 5231da177e4SLinus Torvalds } else { 5241da177e4SLinus Torvalds /* 5258bc3be27SFengguang Wu * somehow blocked: retry later 5268bc3be27SFengguang Wu */ 5278bc3be27SFengguang Wu redirty_tail(inode); 5288bc3be27SFengguang Wu } 5298bc3be27SFengguang Wu } else { 5308bc3be27SFengguang Wu /* 5311da177e4SLinus Torvalds * Otherwise fully redirty the inode so that 5321da177e4SLinus Torvalds * other inodes on this superblock will get some 5331da177e4SLinus Torvalds * writeout. Otherwise heavy writing to one 5341da177e4SLinus Torvalds * file would indefinitely suspend writeout of 5351da177e4SLinus Torvalds * all the other files. 5361da177e4SLinus Torvalds */ 5371da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 5381b43ef91SAndrew Morton redirty_tail(inode); 5391da177e4SLinus Torvalds } 5401da177e4SLinus Torvalds } else if (atomic_read(&inode->i_count)) { 5411da177e4SLinus Torvalds /* 5421da177e4SLinus Torvalds * The inode is clean, inuse 5431da177e4SLinus Torvalds */ 5441da177e4SLinus Torvalds list_move(&inode->i_list, &inode_in_use); 5451da177e4SLinus Torvalds } else { 5461da177e4SLinus Torvalds /* 5471da177e4SLinus Torvalds * The inode is clean, unused 5481da177e4SLinus Torvalds */ 5491da177e4SLinus Torvalds list_move(&inode->i_list, &inode_unused); 5501da177e4SLinus Torvalds } 5511da177e4SLinus Torvalds } 5521c0eeaf5SJoern Engel inode_sync_complete(inode); 5531da177e4SLinus Torvalds return ret; 5541da177e4SLinus Torvalds } 5551da177e4SLinus Torvalds 556f11c9c5cSEdward Shishkin static void unpin_sb_for_writeback(struct super_block *sb) 5579ecc2738SJens Axboe { 5589ecc2738SJens Axboe up_read(&sb->s_umount); 5599ecc2738SJens Axboe put_super(sb); 5609ecc2738SJens Axboe } 561f11c9c5cSEdward Shishkin 562f11c9c5cSEdward Shishkin enum sb_pin_state { 563f11c9c5cSEdward Shishkin SB_PINNED, 564f11c9c5cSEdward Shishkin SB_NOT_PINNED, 565f11c9c5cSEdward Shishkin SB_PIN_FAILED 566f11c9c5cSEdward Shishkin }; 5679ecc2738SJens Axboe 56803ba3782SJens Axboe /* 56903ba3782SJens Axboe * For WB_SYNC_NONE writeback, the caller does not have the sb pinned 57003ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 57103ba3782SJens Axboe * go away while we are writing inodes from it. 57203ba3782SJens Axboe */ 573f11c9c5cSEdward Shishkin static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc, 574f11c9c5cSEdward Shishkin struct super_block *sb) 5751da177e4SLinus Torvalds { 5769ecc2738SJens Axboe /* 57703ba3782SJens Axboe * Caller must already hold the ref for this 57803ba3782SJens Axboe */ 57903ba3782SJens Axboe if (wbc->sync_mode == WB_SYNC_ALL) { 58003ba3782SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 581f11c9c5cSEdward Shishkin return SB_NOT_PINNED; 58203ba3782SJens Axboe } 58303ba3782SJens Axboe spin_lock(&sb_lock); 58403ba3782SJens Axboe sb->s_count++; 58503ba3782SJens Axboe if (down_read_trylock(&sb->s_umount)) { 58603ba3782SJens Axboe if (sb->s_root) { 58703ba3782SJens Axboe spin_unlock(&sb_lock); 588f11c9c5cSEdward Shishkin return SB_PINNED; 58903ba3782SJens Axboe } 59003ba3782SJens Axboe /* 59103ba3782SJens Axboe * umounted, drop rwsem again and fall through to failure 59203ba3782SJens Axboe */ 59303ba3782SJens Axboe up_read(&sb->s_umount); 59403ba3782SJens Axboe } 59503ba3782SJens Axboe sb->s_count--; 59603ba3782SJens Axboe spin_unlock(&sb_lock); 597f11c9c5cSEdward Shishkin return SB_PIN_FAILED; 59803ba3782SJens Axboe } 59903ba3782SJens Axboe 600f11c9c5cSEdward Shishkin /* 601f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 602f11c9c5cSEdward Shishkin * If @wbc->sb != NULL, then find and write all such 603f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 604f11c9c5cSEdward Shishkin * in reverse order. 605f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 606f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 607f11c9c5cSEdward Shishkin */ 608f11c9c5cSEdward Shishkin static int writeback_sb_inodes(struct super_block *sb, 609f11c9c5cSEdward Shishkin struct bdi_writeback *wb, 61003ba3782SJens Axboe struct writeback_control *wbc) 61103ba3782SJens Axboe { 61203ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 613f11c9c5cSEdward Shishkin long pages_skipped; 61403ba3782SJens Axboe struct inode *inode = list_entry(wb->b_io.prev, 6151da177e4SLinus Torvalds struct inode, i_list); 616f11c9c5cSEdward Shishkin if (wbc->sb && sb != inode->i_sb) { 617f11c9c5cSEdward Shishkin /* super block given and doesn't 618f11c9c5cSEdward Shishkin match, skip this inode */ 61966f3b8e2SJens Axboe redirty_tail(inode); 62066f3b8e2SJens Axboe continue; 62166f3b8e2SJens Axboe } 622f11c9c5cSEdward Shishkin if (sb != inode->i_sb) 623f11c9c5cSEdward Shishkin /* finish with this superblock */ 624f11c9c5cSEdward Shishkin return 0; 62584a89245SWu Fengguang if (inode->i_state & (I_NEW | I_WILL_FREE)) { 6267ef0d737SNick Piggin requeue_io(inode); 6277ef0d737SNick Piggin continue; 6287ef0d737SNick Piggin } 629d2caa3c5SJeff Layton /* 630d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 631d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 632d2caa3c5SJeff Layton */ 633f11c9c5cSEdward Shishkin if (inode_dirtied_after(inode, wbc->wb_start)) 634f11c9c5cSEdward Shishkin return 1; 6351da177e4SLinus Torvalds 63684a89245SWu Fengguang BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); 6371da177e4SLinus Torvalds __iget(inode); 6381da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 63901c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 6401da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 6411da177e4SLinus Torvalds /* 6421da177e4SLinus Torvalds * writeback is not making progress due to locked 6431da177e4SLinus Torvalds * buffers. Skip this inode for now. 6441da177e4SLinus Torvalds */ 645f57b9b7bSAndrew Morton redirty_tail(inode); 6461da177e4SLinus Torvalds } 6471da177e4SLinus Torvalds spin_unlock(&inode_lock); 6481da177e4SLinus Torvalds iput(inode); 6494ffc8444SOGAWA Hirofumi cond_resched(); 6501da177e4SLinus Torvalds spin_lock(&inode_lock); 6518bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 6528bc3be27SFengguang Wu wbc->more_io = 1; 653f11c9c5cSEdward Shishkin return 1; 6541da177e4SLinus Torvalds } 65503ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 6568bc3be27SFengguang Wu wbc->more_io = 1; 6578bc3be27SFengguang Wu } 658f11c9c5cSEdward Shishkin /* b_io is empty */ 659f11c9c5cSEdward Shishkin return 1; 660f11c9c5cSEdward Shishkin } 66138f21977SNick Piggin 662f11c9c5cSEdward Shishkin static void writeback_inodes_wb(struct bdi_writeback *wb, 663f11c9c5cSEdward Shishkin struct writeback_control *wbc) 664f11c9c5cSEdward Shishkin { 665f11c9c5cSEdward Shishkin int ret = 0; 6669ecc2738SJens Axboe 667f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 668f11c9c5cSEdward Shishkin spin_lock(&inode_lock); 669f11c9c5cSEdward Shishkin if (!wbc->for_kupdate || list_empty(&wb->b_io)) 670f11c9c5cSEdward Shishkin queue_io(wb, wbc->older_than_this); 671f11c9c5cSEdward Shishkin 672f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 673f11c9c5cSEdward Shishkin struct inode *inode = list_entry(wb->b_io.prev, 674f11c9c5cSEdward Shishkin struct inode, i_list); 675f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 676f11c9c5cSEdward Shishkin enum sb_pin_state state; 677f11c9c5cSEdward Shishkin 678f11c9c5cSEdward Shishkin if (wbc->sb && sb != wbc->sb) { 679f11c9c5cSEdward Shishkin /* super block given and doesn't 680f11c9c5cSEdward Shishkin match, skip this inode */ 681f11c9c5cSEdward Shishkin redirty_tail(inode); 682f11c9c5cSEdward Shishkin continue; 683f11c9c5cSEdward Shishkin } 684f11c9c5cSEdward Shishkin state = pin_sb_for_writeback(wbc, sb); 685f11c9c5cSEdward Shishkin 686f11c9c5cSEdward Shishkin if (state == SB_PIN_FAILED) { 687f11c9c5cSEdward Shishkin requeue_io(inode); 688f11c9c5cSEdward Shishkin continue; 689f11c9c5cSEdward Shishkin } 690f11c9c5cSEdward Shishkin ret = writeback_sb_inodes(sb, wb, wbc); 691f11c9c5cSEdward Shishkin 692f11c9c5cSEdward Shishkin if (state == SB_PINNED) 693f11c9c5cSEdward Shishkin unpin_sb_for_writeback(sb); 694f11c9c5cSEdward Shishkin if (ret) 695f11c9c5cSEdward Shishkin break; 696f11c9c5cSEdward Shishkin } 69766f3b8e2SJens Axboe spin_unlock(&inode_lock); 69866f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 69966f3b8e2SJens Axboe } 70066f3b8e2SJens Axboe 70103ba3782SJens Axboe void writeback_inodes_wbc(struct writeback_control *wbc) 70203ba3782SJens Axboe { 70303ba3782SJens Axboe struct backing_dev_info *bdi = wbc->bdi; 70403ba3782SJens Axboe 70503ba3782SJens Axboe writeback_inodes_wb(&bdi->wb, wbc); 70603ba3782SJens Axboe } 70703ba3782SJens Axboe 70803ba3782SJens Axboe /* 70903ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 71003ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 71103ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 71203ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 71303ba3782SJens Axboe * the dirty each time it has written this many pages. 71403ba3782SJens Axboe */ 71503ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 71603ba3782SJens Axboe 71703ba3782SJens Axboe static inline bool over_bground_thresh(void) 71803ba3782SJens Axboe { 71903ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 72003ba3782SJens Axboe 72103ba3782SJens Axboe get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); 72203ba3782SJens Axboe 72303ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 72403ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) >= background_thresh); 72503ba3782SJens Axboe } 72603ba3782SJens Axboe 72703ba3782SJens Axboe /* 72803ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 72903ba3782SJens Axboe * 73003ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 73103ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 73203ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 73303ba3782SJens Axboe * older than a specific point in time. 73403ba3782SJens Axboe * 73503ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 73603ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 73703ba3782SJens Axboe * one-second gap. 73803ba3782SJens Axboe * 73903ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 74003ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 74103ba3782SJens Axboe */ 742c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 743c4a77a6cSJens Axboe struct wb_writeback_args *args) 74403ba3782SJens Axboe { 74503ba3782SJens Axboe struct writeback_control wbc = { 74603ba3782SJens Axboe .bdi = wb->bdi, 747c4a77a6cSJens Axboe .sb = args->sb, 748c4a77a6cSJens Axboe .sync_mode = args->sync_mode, 74903ba3782SJens Axboe .older_than_this = NULL, 750c4a77a6cSJens Axboe .for_kupdate = args->for_kupdate, 751b17621feSWu Fengguang .for_background = args->for_background, 752c4a77a6cSJens Axboe .range_cyclic = args->range_cyclic, 75303ba3782SJens Axboe }; 75403ba3782SJens Axboe unsigned long oldest_jif; 75503ba3782SJens Axboe long wrote = 0; 756a5989bdcSJan Kara struct inode *inode; 75703ba3782SJens Axboe 75803ba3782SJens Axboe if (wbc.for_kupdate) { 75903ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 76003ba3782SJens Axboe oldest_jif = jiffies - 76103ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 76203ba3782SJens Axboe } 763c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 764c4a77a6cSJens Axboe wbc.range_start = 0; 765c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 766c4a77a6cSJens Axboe } 76703ba3782SJens Axboe 76803ba3782SJens Axboe for (;;) { 76903ba3782SJens Axboe /* 770d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 77103ba3782SJens Axboe */ 772d3ddec76SWu Fengguang if (args->nr_pages <= 0) 77303ba3782SJens Axboe break; 77403ba3782SJens Axboe 77503ba3782SJens Axboe /* 776d3ddec76SWu Fengguang * For background writeout, stop when we are below the 777d3ddec76SWu Fengguang * background dirty threshold 77803ba3782SJens Axboe */ 779d3ddec76SWu Fengguang if (args->for_background && !over_bground_thresh()) 78003ba3782SJens Axboe break; 78103ba3782SJens Axboe 78203ba3782SJens Axboe wbc.more_io = 0; 78303ba3782SJens Axboe wbc.nr_to_write = MAX_WRITEBACK_PAGES; 78403ba3782SJens Axboe wbc.pages_skipped = 0; 78503ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 786c4a77a6cSJens Axboe args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 78703ba3782SJens Axboe wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 78803ba3782SJens Axboe 78903ba3782SJens Axboe /* 79071fd05a8SJens Axboe * If we consumed everything, see if we have more 79103ba3782SJens Axboe */ 79271fd05a8SJens Axboe if (wbc.nr_to_write <= 0) 79371fd05a8SJens Axboe continue; 79471fd05a8SJens Axboe /* 79571fd05a8SJens Axboe * Didn't write everything and we don't have more IO, bail 79671fd05a8SJens Axboe */ 79771fd05a8SJens Axboe if (!wbc.more_io) 79871fd05a8SJens Axboe break; 79971fd05a8SJens Axboe /* 80071fd05a8SJens Axboe * Did we write something? Try for more 80171fd05a8SJens Axboe */ 802a5989bdcSJan Kara if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) 80303ba3782SJens Axboe continue; 804a5989bdcSJan Kara /* 805a5989bdcSJan Kara * Nothing written. Wait for some inode to 806a5989bdcSJan Kara * become available for writeback. Otherwise 807a5989bdcSJan Kara * we'll just busyloop. 808a5989bdcSJan Kara */ 809a5989bdcSJan Kara spin_lock(&inode_lock); 810a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 81171fd05a8SJens Axboe inode = list_entry(wb->b_more_io.prev, 812a5989bdcSJan Kara struct inode, i_list); 813a5989bdcSJan Kara inode_wait_for_writeback(inode); 814a5989bdcSJan Kara } 815a5989bdcSJan Kara spin_unlock(&inode_lock); 81603ba3782SJens Axboe } 81703ba3782SJens Axboe 81803ba3782SJens Axboe return wrote; 81903ba3782SJens Axboe } 82003ba3782SJens Axboe 82103ba3782SJens Axboe /* 82203ba3782SJens Axboe * Return the next bdi_work struct that hasn't been processed by this 8238010c3b6SJens Axboe * wb thread yet. ->seen is initially set for each thread that exists 8248010c3b6SJens Axboe * for this device, when a thread first notices a piece of work it 8258010c3b6SJens Axboe * clears its bit. Depending on writeback type, the thread will notify 8268010c3b6SJens Axboe * completion on either receiving the work (WB_SYNC_NONE) or after 8278010c3b6SJens Axboe * it is done (WB_SYNC_ALL). 82803ba3782SJens Axboe */ 82903ba3782SJens Axboe static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, 83003ba3782SJens Axboe struct bdi_writeback *wb) 83103ba3782SJens Axboe { 83203ba3782SJens Axboe struct bdi_work *work, *ret = NULL; 83303ba3782SJens Axboe 83403ba3782SJens Axboe rcu_read_lock(); 83503ba3782SJens Axboe 83603ba3782SJens Axboe list_for_each_entry_rcu(work, &bdi->work_list, list) { 83777fad5e6SNick Piggin if (!test_bit(wb->nr, &work->seen)) 83803ba3782SJens Axboe continue; 83977fad5e6SNick Piggin clear_bit(wb->nr, &work->seen); 84003ba3782SJens Axboe 84103ba3782SJens Axboe ret = work; 84203ba3782SJens Axboe break; 84303ba3782SJens Axboe } 84403ba3782SJens Axboe 84503ba3782SJens Axboe rcu_read_unlock(); 84603ba3782SJens Axboe return ret; 84703ba3782SJens Axboe } 84803ba3782SJens Axboe 84903ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 85003ba3782SJens Axboe { 85103ba3782SJens Axboe unsigned long expired; 85203ba3782SJens Axboe long nr_pages; 85303ba3782SJens Axboe 85403ba3782SJens Axboe expired = wb->last_old_flush + 85503ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 85603ba3782SJens Axboe if (time_before(jiffies, expired)) 85703ba3782SJens Axboe return 0; 85803ba3782SJens Axboe 85903ba3782SJens Axboe wb->last_old_flush = jiffies; 86003ba3782SJens Axboe nr_pages = global_page_state(NR_FILE_DIRTY) + 86103ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) + 86203ba3782SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 86303ba3782SJens Axboe 864c4a77a6cSJens Axboe if (nr_pages) { 865c4a77a6cSJens Axboe struct wb_writeback_args args = { 866c4a77a6cSJens Axboe .nr_pages = nr_pages, 867c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 868c4a77a6cSJens Axboe .for_kupdate = 1, 869c4a77a6cSJens Axboe .range_cyclic = 1, 870c4a77a6cSJens Axboe }; 871c4a77a6cSJens Axboe 872c4a77a6cSJens Axboe return wb_writeback(wb, &args); 873c4a77a6cSJens Axboe } 87403ba3782SJens Axboe 87503ba3782SJens Axboe return 0; 87603ba3782SJens Axboe } 87703ba3782SJens Axboe 87803ba3782SJens Axboe /* 87903ba3782SJens Axboe * Retrieve work items and do the writeback they describe 88003ba3782SJens Axboe */ 88103ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 88203ba3782SJens Axboe { 88303ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 88403ba3782SJens Axboe struct bdi_work *work; 885c4a77a6cSJens Axboe long wrote = 0; 88603ba3782SJens Axboe 88703ba3782SJens Axboe while ((work = get_next_work_item(bdi, wb)) != NULL) { 888c4a77a6cSJens Axboe struct wb_writeback_args args = work->args; 88903ba3782SJens Axboe 89003ba3782SJens Axboe /* 89103ba3782SJens Axboe * Override sync mode, in case we must wait for completion 89203ba3782SJens Axboe */ 89303ba3782SJens Axboe if (force_wait) 894c4a77a6cSJens Axboe work->args.sync_mode = args.sync_mode = WB_SYNC_ALL; 89503ba3782SJens Axboe 89603ba3782SJens Axboe /* 89703ba3782SJens Axboe * If this isn't a data integrity operation, just notify 89803ba3782SJens Axboe * that we have seen this work and we are now starting it. 89903ba3782SJens Axboe */ 900c4a77a6cSJens Axboe if (args.sync_mode == WB_SYNC_NONE) 90103ba3782SJens Axboe wb_clear_pending(wb, work); 90203ba3782SJens Axboe 903c4a77a6cSJens Axboe wrote += wb_writeback(wb, &args); 90403ba3782SJens Axboe 90503ba3782SJens Axboe /* 90603ba3782SJens Axboe * This is a data integrity writeback, so only do the 90703ba3782SJens Axboe * notification when we have completed the work. 90803ba3782SJens Axboe */ 909c4a77a6cSJens Axboe if (args.sync_mode == WB_SYNC_ALL) 91003ba3782SJens Axboe wb_clear_pending(wb, work); 91103ba3782SJens Axboe } 91203ba3782SJens Axboe 91303ba3782SJens Axboe /* 91403ba3782SJens Axboe * Check for periodic writeback, kupdated() style 91503ba3782SJens Axboe */ 91603ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 91703ba3782SJens Axboe 91803ba3782SJens Axboe return wrote; 91903ba3782SJens Axboe } 92003ba3782SJens Axboe 92103ba3782SJens Axboe /* 92203ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 92303ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 92403ba3782SJens Axboe */ 92503ba3782SJens Axboe int bdi_writeback_task(struct bdi_writeback *wb) 92603ba3782SJens Axboe { 92703ba3782SJens Axboe unsigned long last_active = jiffies; 92803ba3782SJens Axboe unsigned long wait_jiffies = -1UL; 92903ba3782SJens Axboe long pages_written; 93003ba3782SJens Axboe 93103ba3782SJens Axboe while (!kthread_should_stop()) { 93203ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 93303ba3782SJens Axboe 93403ba3782SJens Axboe if (pages_written) 93503ba3782SJens Axboe last_active = jiffies; 93603ba3782SJens Axboe else if (wait_jiffies != -1UL) { 93703ba3782SJens Axboe unsigned long max_idle; 93803ba3782SJens Axboe 93903ba3782SJens Axboe /* 94003ba3782SJens Axboe * Longest period of inactivity that we tolerate. If we 94103ba3782SJens Axboe * see dirty data again later, the task will get 94203ba3782SJens Axboe * recreated automatically. 94303ba3782SJens Axboe */ 94403ba3782SJens Axboe max_idle = max(5UL * 60 * HZ, wait_jiffies); 94503ba3782SJens Axboe if (time_after(jiffies, max_idle + last_active)) 94603ba3782SJens Axboe break; 94703ba3782SJens Axboe } 94803ba3782SJens Axboe 94903ba3782SJens Axboe wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); 95049db0414SJens Axboe schedule_timeout_interruptible(wait_jiffies); 95103ba3782SJens Axboe try_to_freeze(); 95203ba3782SJens Axboe } 95303ba3782SJens Axboe 95403ba3782SJens Axboe return 0; 95503ba3782SJens Axboe } 95603ba3782SJens Axboe 95703ba3782SJens Axboe /* 958b6e51316SJens Axboe * Schedule writeback for all backing devices. This does WB_SYNC_NONE 959b6e51316SJens Axboe * writeback, for integrity writeback see bdi_sync_writeback(). 96003ba3782SJens Axboe */ 961b6e51316SJens Axboe static void bdi_writeback_all(struct super_block *sb, long nr_pages) 96203ba3782SJens Axboe { 963b6e51316SJens Axboe struct wb_writeback_args args = { 964b6e51316SJens Axboe .sb = sb, 965b6e51316SJens Axboe .nr_pages = nr_pages, 966b6e51316SJens Axboe .sync_mode = WB_SYNC_NONE, 967b6e51316SJens Axboe }; 96803ba3782SJens Axboe struct backing_dev_info *bdi; 96903ba3782SJens Axboe 970cfc4ba53SJens Axboe rcu_read_lock(); 97103ba3782SJens Axboe 972cfc4ba53SJens Axboe list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 97303ba3782SJens Axboe if (!bdi_has_dirty_io(bdi)) 97403ba3782SJens Axboe continue; 97503ba3782SJens Axboe 976b6e51316SJens Axboe bdi_alloc_queue_work(bdi, &args); 97703ba3782SJens Axboe } 97803ba3782SJens Axboe 979cfc4ba53SJens Axboe rcu_read_unlock(); 98003ba3782SJens Axboe } 98103ba3782SJens Axboe 98203ba3782SJens Axboe /* 98303ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 98403ba3782SJens Axboe * the whole world. 98503ba3782SJens Axboe */ 98603ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 98703ba3782SJens Axboe { 98803ba3782SJens Axboe if (nr_pages == 0) 98903ba3782SJens Axboe nr_pages = global_page_state(NR_FILE_DIRTY) + 99003ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 991b6e51316SJens Axboe bdi_writeback_all(NULL, nr_pages); 99203ba3782SJens Axboe } 99303ba3782SJens Axboe 99403ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 99503ba3782SJens Axboe { 99603ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 99703ba3782SJens Axboe struct dentry *dentry; 99803ba3782SJens Axboe const char *name = "?"; 99903ba3782SJens Axboe 100003ba3782SJens Axboe dentry = d_find_alias(inode); 100103ba3782SJens Axboe if (dentry) { 100203ba3782SJens Axboe spin_lock(&dentry->d_lock); 100303ba3782SJens Axboe name = (const char *) dentry->d_name.name; 100403ba3782SJens Axboe } 100503ba3782SJens Axboe printk(KERN_DEBUG 100603ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 100703ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 100803ba3782SJens Axboe name, inode->i_sb->s_id); 100903ba3782SJens Axboe if (dentry) { 101003ba3782SJens Axboe spin_unlock(&dentry->d_lock); 101103ba3782SJens Axboe dput(dentry); 101203ba3782SJens Axboe } 101303ba3782SJens Axboe } 101403ba3782SJens Axboe } 101503ba3782SJens Axboe 101603ba3782SJens Axboe /** 101703ba3782SJens Axboe * __mark_inode_dirty - internal function 101803ba3782SJens Axboe * @inode: inode to mark 101903ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 102003ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 102103ba3782SJens Axboe * mark_inode_dirty_sync. 102203ba3782SJens Axboe * 102303ba3782SJens Axboe * Put the inode on the super block's dirty list. 102403ba3782SJens Axboe * 102503ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 102603ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 102703ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 102803ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 102903ba3782SJens Axboe * 103003ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 103103ba3782SJens Axboe * them dirty. 103203ba3782SJens Axboe * 103303ba3782SJens Axboe * This function *must* be atomic for the I_DIRTY_PAGES case - 103403ba3782SJens Axboe * set_page_dirty() is called under spinlock in several places. 103503ba3782SJens Axboe * 103603ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 103703ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 103803ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 103903ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 104003ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 104103ba3782SJens Axboe * blockdev inode. 104203ba3782SJens Axboe */ 104303ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 104403ba3782SJens Axboe { 104503ba3782SJens Axboe struct super_block *sb = inode->i_sb; 104603ba3782SJens Axboe 104703ba3782SJens Axboe /* 104803ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 104903ba3782SJens Axboe * dirty the inode itself 105003ba3782SJens Axboe */ 105103ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 105203ba3782SJens Axboe if (sb->s_op->dirty_inode) 105303ba3782SJens Axboe sb->s_op->dirty_inode(inode); 105403ba3782SJens Axboe } 105503ba3782SJens Axboe 105603ba3782SJens Axboe /* 105703ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 105803ba3782SJens Axboe * -- mikulas 105903ba3782SJens Axboe */ 106003ba3782SJens Axboe smp_mb(); 106103ba3782SJens Axboe 106203ba3782SJens Axboe /* avoid the locking if we can */ 106303ba3782SJens Axboe if ((inode->i_state & flags) == flags) 106403ba3782SJens Axboe return; 106503ba3782SJens Axboe 106603ba3782SJens Axboe if (unlikely(block_dump)) 106703ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 106803ba3782SJens Axboe 106903ba3782SJens Axboe spin_lock(&inode_lock); 107003ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 107103ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 107203ba3782SJens Axboe 107303ba3782SJens Axboe inode->i_state |= flags; 107403ba3782SJens Axboe 107503ba3782SJens Axboe /* 107603ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 107703ba3782SJens Axboe * The unlocker will place the inode on the appropriate 107803ba3782SJens Axboe * superblock list, based upon its state. 107903ba3782SJens Axboe */ 108003ba3782SJens Axboe if (inode->i_state & I_SYNC) 108103ba3782SJens Axboe goto out; 108203ba3782SJens Axboe 108303ba3782SJens Axboe /* 108403ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 108503ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 108603ba3782SJens Axboe */ 108703ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 108803ba3782SJens Axboe if (hlist_unhashed(&inode->i_hash)) 108903ba3782SJens Axboe goto out; 109003ba3782SJens Axboe } 109103ba3782SJens Axboe if (inode->i_state & (I_FREEING|I_CLEAR)) 109203ba3782SJens Axboe goto out; 109303ba3782SJens Axboe 109403ba3782SJens Axboe /* 109503ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 109603ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 109703ba3782SJens Axboe */ 109803ba3782SJens Axboe if (!was_dirty) { 109903ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1100500b067cSJens Axboe struct backing_dev_info *bdi = wb->bdi; 1101500b067cSJens Axboe 1102500b067cSJens Axboe if (bdi_cap_writeback_dirty(bdi) && 1103500b067cSJens Axboe !test_bit(BDI_registered, &bdi->state)) { 1104500b067cSJens Axboe WARN_ON(1); 1105500b067cSJens Axboe printk(KERN_ERR "bdi-%s not registered\n", 1106500b067cSJens Axboe bdi->name); 1107500b067cSJens Axboe } 110803ba3782SJens Axboe 110903ba3782SJens Axboe inode->dirtied_when = jiffies; 111003ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 111103ba3782SJens Axboe } 111203ba3782SJens Axboe } 111303ba3782SJens Axboe out: 111403ba3782SJens Axboe spin_unlock(&inode_lock); 111503ba3782SJens Axboe } 111603ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 111703ba3782SJens Axboe 111866f3b8e2SJens Axboe /* 111966f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 112066f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 112166f3b8e2SJens Axboe * 112266f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 112366f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 112466f3b8e2SJens Axboe * 112566f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 112666f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 112766f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 112866f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 112966f3b8e2SJens Axboe * 113066f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 113166f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 113266f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 113366f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 113466f3b8e2SJens Axboe */ 1135b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 113666f3b8e2SJens Axboe { 113738f21977SNick Piggin struct inode *inode, *old_inode = NULL; 113838f21977SNick Piggin 113903ba3782SJens Axboe /* 114003ba3782SJens Axboe * We need to be protected against the filesystem going from 114103ba3782SJens Axboe * r/o to r/w or vice versa. 114203ba3782SJens Axboe */ 1143b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 114403ba3782SJens Axboe 114566f3b8e2SJens Axboe spin_lock(&inode_lock); 114666f3b8e2SJens Axboe 114738f21977SNick Piggin /* 114838f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 114938f21977SNick Piggin * because there may have been pages dirtied before our sync 115038f21977SNick Piggin * call, but which had writeout started before we write it out. 115138f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 115238f21977SNick Piggin * we still have to wait for that writeout. 115338f21977SNick Piggin */ 1154b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 115538f21977SNick Piggin struct address_space *mapping; 115638f21977SNick Piggin 115703ba3782SJens Axboe if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 115838f21977SNick Piggin continue; 115938f21977SNick Piggin mapping = inode->i_mapping; 116038f21977SNick Piggin if (mapping->nrpages == 0) 116138f21977SNick Piggin continue; 116238f21977SNick Piggin __iget(inode); 1163ae8547b0SHans Reiser spin_unlock(&inode_lock); 116438f21977SNick Piggin /* 116538f21977SNick Piggin * We hold a reference to 'inode' so it couldn't have 116638f21977SNick Piggin * been removed from s_inodes list while we dropped the 116738f21977SNick Piggin * inode_lock. We cannot iput the inode now as we can 116838f21977SNick Piggin * be holding the last reference and we cannot iput it 116938f21977SNick Piggin * under inode_lock. So we keep the reference and iput 117038f21977SNick Piggin * it later. 117138f21977SNick Piggin */ 117238f21977SNick Piggin iput(old_inode); 117338f21977SNick Piggin old_inode = inode; 117438f21977SNick Piggin 117538f21977SNick Piggin filemap_fdatawait(mapping); 117638f21977SNick Piggin 117738f21977SNick Piggin cond_resched(); 117838f21977SNick Piggin 117938f21977SNick Piggin spin_lock(&inode_lock); 118038f21977SNick Piggin } 118138f21977SNick Piggin spin_unlock(&inode_lock); 118238f21977SNick Piggin iput(old_inode); 118366f3b8e2SJens Axboe } 11841da177e4SLinus Torvalds 1185d8a8559cSJens Axboe /** 1186d8a8559cSJens Axboe * writeback_inodes_sb - writeback dirty inodes from given super_block 1187d8a8559cSJens Axboe * @sb: the superblock 11881da177e4SLinus Torvalds * 1189d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1190d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 1191d8a8559cSJens Axboe * for IO completion of submitted IO. The number of pages submitted is 1192d8a8559cSJens Axboe * returned. 11931da177e4SLinus Torvalds */ 1194b6e51316SJens Axboe void writeback_inodes_sb(struct super_block *sb) 11951da177e4SLinus Torvalds { 1196b1e7a8fdSChristoph Lameter unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 1197fd39fc85SChristoph Lameter unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 1198d8a8559cSJens Axboe long nr_to_write; 11991da177e4SLinus Torvalds 1200d8a8559cSJens Axboe nr_to_write = nr_dirty + nr_unstable + 120138f21977SNick Piggin (inodes_stat.nr_inodes - inodes_stat.nr_unused); 120238f21977SNick Piggin 1203a72bfd4dSJens Axboe bdi_start_writeback(sb->s_bdi, sb, nr_to_write); 12041da177e4SLinus Torvalds } 1205d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1206d8a8559cSJens Axboe 1207d8a8559cSJens Axboe /** 120817bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 120917bd55d0SEric Sandeen * @sb: the superblock 121017bd55d0SEric Sandeen * 121117bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 121217bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 121317bd55d0SEric Sandeen */ 121417bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 121517bd55d0SEric Sandeen { 121617bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 121717bd55d0SEric Sandeen writeback_inodes_sb(sb); 121817bd55d0SEric Sandeen return 1; 121917bd55d0SEric Sandeen } else 122017bd55d0SEric Sandeen return 0; 122117bd55d0SEric Sandeen } 122217bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 122317bd55d0SEric Sandeen 122417bd55d0SEric Sandeen /** 1225d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1226d8a8559cSJens Axboe * @sb: the superblock 1227d8a8559cSJens Axboe * 1228d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1229d8a8559cSJens Axboe * super_block. The number of pages synced is returned. 1230d8a8559cSJens Axboe */ 1231b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1232d8a8559cSJens Axboe { 1233b6e51316SJens Axboe bdi_sync_writeback(sb->s_bdi, sb); 1234b6e51316SJens Axboe wait_sb_inodes(sb); 1235d8a8559cSJens Axboe } 1236d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 12371da177e4SLinus Torvalds 12381da177e4SLinus Torvalds /** 12391da177e4SLinus Torvalds * write_inode_now - write an inode to disk 12401da177e4SLinus Torvalds * @inode: inode to write to disk 12411da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 12421da177e4SLinus Torvalds * 12437f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 12447f04c26dSAndrea Arcangeli * primarily needed by knfsd. 12457f04c26dSAndrea Arcangeli * 12467f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 12471da177e4SLinus Torvalds */ 12481da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 12491da177e4SLinus Torvalds { 12501da177e4SLinus Torvalds int ret; 12511da177e4SLinus Torvalds struct writeback_control wbc = { 12521da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 125318914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1254111ebb6eSOGAWA Hirofumi .range_start = 0, 1255111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 12561da177e4SLinus Torvalds }; 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 125949364ce2SAndrew Morton wbc.nr_to_write = 0; 12601da177e4SLinus Torvalds 12611da177e4SLinus Torvalds might_sleep(); 12621da177e4SLinus Torvalds spin_lock(&inode_lock); 126301c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 12641da177e4SLinus Torvalds spin_unlock(&inode_lock); 12651da177e4SLinus Torvalds if (sync) 12661c0eeaf5SJoern Engel inode_sync_wait(inode); 12671da177e4SLinus Torvalds return ret; 12681da177e4SLinus Torvalds } 12691da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds /** 12721da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 12731da177e4SLinus Torvalds * @inode: the inode to sync 12741da177e4SLinus Torvalds * @wbc: controls the writeback mode 12751da177e4SLinus Torvalds * 12761da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 12771da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 12781da177e4SLinus Torvalds * update inode->i_state. 12791da177e4SLinus Torvalds * 12801da177e4SLinus Torvalds * The caller must have a ref on the inode. 12811da177e4SLinus Torvalds */ 12821da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 12831da177e4SLinus Torvalds { 12841da177e4SLinus Torvalds int ret; 12851da177e4SLinus Torvalds 12861da177e4SLinus Torvalds spin_lock(&inode_lock); 128701c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 12881da177e4SLinus Torvalds spin_unlock(&inode_lock); 12891da177e4SLinus Torvalds return ret; 12901da177e4SLinus Torvalds } 12911da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1292