1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/fs-writeback.c 4 * 5 * Copyright (C) 2002, Linus Torvalds. 6 * 7 * Contains all the functions related to writing back and waiting 8 * upon dirty inodes against superblocks, and writing back dirty 9 * pages against inodes. ie: data writeback. Writeout of the 10 * inode itself is not handled here. 11 * 12 * 10Apr2002 Andrew Morton 13 * Split out of fs/inode.c 14 * Additions for address_space-based writeback 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/export.h> 19 #include <linux/spinlock.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/fs.h> 23 #include <linux/mm.h> 24 #include <linux/pagemap.h> 25 #include <linux/kthread.h> 26 #include <linux/writeback.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/tracepoint.h> 30 #include <linux/device.h> 31 #include <linux/memcontrol.h> 32 #include "internal.h" 33 34 /* 35 * 4MB minimal write chunk size 36 */ 37 #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10)) 38 39 /* 40 * Passed into wb_writeback(), essentially a subset of writeback_control 41 */ 42 struct wb_writeback_work { 43 long nr_pages; 44 struct super_block *sb; 45 enum writeback_sync_modes sync_mode; 46 unsigned int tagged_writepages:1; 47 unsigned int for_kupdate:1; 48 unsigned int range_cyclic:1; 49 unsigned int for_background:1; 50 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 51 unsigned int auto_free:1; /* free on completion */ 52 enum wb_reason reason; /* why was writeback initiated? */ 53 54 struct list_head list; /* pending work list */ 55 struct wb_completion *done; /* set if the caller waits */ 56 }; 57 58 /* 59 * If an inode is constantly having its pages dirtied, but then the 60 * updates stop dirtytime_expire_interval seconds in the past, it's 61 * possible for the worst case time between when an inode has its 62 * timestamps updated and when they finally get written out to be two 63 * dirtytime_expire_intervals. We set the default to 12 hours (in 64 * seconds), which means most of the time inodes will have their 65 * timestamps written to disk after 12 hours, but in the worst case a 66 * few inodes might not their timestamps updated for 24 hours. 67 */ 68 unsigned int dirtytime_expire_interval = 12 * 60 * 60; 69 70 static inline struct inode *wb_inode(struct list_head *head) 71 { 72 return list_entry(head, struct inode, i_io_list); 73 } 74 75 /* 76 * Include the creation of the trace points after defining the 77 * wb_writeback_work structure and inline functions so that the definition 78 * remains local to this file. 79 */ 80 #define CREATE_TRACE_POINTS 81 #include <trace/events/writeback.h> 82 83 EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage); 84 85 static bool wb_io_lists_populated(struct bdi_writeback *wb) 86 { 87 if (wb_has_dirty_io(wb)) { 88 return false; 89 } else { 90 set_bit(WB_has_dirty_io, &wb->state); 91 WARN_ON_ONCE(!wb->avg_write_bandwidth); 92 atomic_long_add(wb->avg_write_bandwidth, 93 &wb->bdi->tot_write_bandwidth); 94 return true; 95 } 96 } 97 98 static void wb_io_lists_depopulated(struct bdi_writeback *wb) 99 { 100 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && 101 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { 102 clear_bit(WB_has_dirty_io, &wb->state); 103 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, 104 &wb->bdi->tot_write_bandwidth) < 0); 105 } 106 } 107 108 /** 109 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list 110 * @inode: inode to be moved 111 * @wb: target bdi_writeback 112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time} 113 * 114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io. 115 * Returns %true if @inode is the first occupant of the !dirty_time IO 116 * lists; otherwise, %false. 117 */ 118 static bool inode_io_list_move_locked(struct inode *inode, 119 struct bdi_writeback *wb, 120 struct list_head *head) 121 { 122 assert_spin_locked(&wb->list_lock); 123 124 list_move(&inode->i_io_list, head); 125 126 /* dirty_time doesn't count as dirty_io until expiration */ 127 if (head != &wb->b_dirty_time) 128 return wb_io_lists_populated(wb); 129 130 wb_io_lists_depopulated(wb); 131 return false; 132 } 133 134 static void wb_wakeup(struct bdi_writeback *wb) 135 { 136 spin_lock_bh(&wb->work_lock); 137 if (test_bit(WB_registered, &wb->state)) 138 mod_delayed_work(bdi_wq, &wb->dwork, 0); 139 spin_unlock_bh(&wb->work_lock); 140 } 141 142 static void finish_writeback_work(struct bdi_writeback *wb, 143 struct wb_writeback_work *work) 144 { 145 struct wb_completion *done = work->done; 146 147 if (work->auto_free) 148 kfree(work); 149 if (done) { 150 wait_queue_head_t *waitq = done->waitq; 151 152 /* @done can't be accessed after the following dec */ 153 if (atomic_dec_and_test(&done->cnt)) 154 wake_up_all(waitq); 155 } 156 } 157 158 static void wb_queue_work(struct bdi_writeback *wb, 159 struct wb_writeback_work *work) 160 { 161 trace_writeback_queue(wb, work); 162 163 if (work->done) 164 atomic_inc(&work->done->cnt); 165 166 spin_lock_bh(&wb->work_lock); 167 168 if (test_bit(WB_registered, &wb->state)) { 169 list_add_tail(&work->list, &wb->work_list); 170 mod_delayed_work(bdi_wq, &wb->dwork, 0); 171 } else 172 finish_writeback_work(wb, work); 173 174 spin_unlock_bh(&wb->work_lock); 175 } 176 177 /** 178 * wb_wait_for_completion - wait for completion of bdi_writeback_works 179 * @done: target wb_completion 180 * 181 * Wait for one or more work items issued to @bdi with their ->done field 182 * set to @done, which should have been initialized with 183 * DEFINE_WB_COMPLETION(). This function returns after all such work items 184 * are completed. Work items which are waited upon aren't freed 185 * automatically on completion. 186 */ 187 void wb_wait_for_completion(struct wb_completion *done) 188 { 189 atomic_dec(&done->cnt); /* put down the initial count */ 190 wait_event(*done->waitq, !atomic_read(&done->cnt)); 191 } 192 193 #ifdef CONFIG_CGROUP_WRITEBACK 194 195 /* 196 * Parameters for foreign inode detection, see wbc_detach_inode() to see 197 * how they're used. 198 * 199 * These paramters are inherently heuristical as the detection target 200 * itself is fuzzy. All we want to do is detaching an inode from the 201 * current owner if it's being written to by some other cgroups too much. 202 * 203 * The current cgroup writeback is built on the assumption that multiple 204 * cgroups writing to the same inode concurrently is very rare and a mode 205 * of operation which isn't well supported. As such, the goal is not 206 * taking too long when a different cgroup takes over an inode while 207 * avoiding too aggressive flip-flops from occasional foreign writes. 208 * 209 * We record, very roughly, 2s worth of IO time history and if more than 210 * half of that is foreign, trigger the switch. The recording is quantized 211 * to 16 slots. To avoid tiny writes from swinging the decision too much, 212 * writes smaller than 1/8 of avg size are ignored. 213 */ 214 #define WB_FRN_TIME_SHIFT 13 /* 1s = 2^13, upto 8 secs w/ 16bit */ 215 #define WB_FRN_TIME_AVG_SHIFT 3 /* avg = avg * 7/8 + new * 1/8 */ 216 #define WB_FRN_TIME_CUT_DIV 8 /* ignore rounds < avg / 8 */ 217 #define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT)) /* 2s */ 218 219 #define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */ 220 #define WB_FRN_HIST_UNIT (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS) 221 /* each slot's duration is 2s / 16 */ 222 #define WB_FRN_HIST_THR_SLOTS (WB_FRN_HIST_SLOTS / 2) 223 /* if foreign slots >= 8, switch */ 224 #define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1) 225 /* one round can affect upto 5 slots */ 226 #define WB_FRN_MAX_IN_FLIGHT 1024 /* don't queue too many concurrently */ 227 228 /* 229 * Maximum inodes per isw. A specific value has been chosen to make 230 * struct inode_switch_wbs_context fit into 1024 bytes kmalloc. 231 */ 232 #define WB_MAX_INODES_PER_ISW ((1024UL - sizeof(struct inode_switch_wbs_context)) \ 233 / sizeof(struct inode *)) 234 235 static atomic_t isw_nr_in_flight = ATOMIC_INIT(0); 236 static struct workqueue_struct *isw_wq; 237 238 void __inode_attach_wb(struct inode *inode, struct page *page) 239 { 240 struct backing_dev_info *bdi = inode_to_bdi(inode); 241 struct bdi_writeback *wb = NULL; 242 243 if (inode_cgwb_enabled(inode)) { 244 struct cgroup_subsys_state *memcg_css; 245 246 if (page) { 247 memcg_css = mem_cgroup_css_from_page(page); 248 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 249 } else { 250 /* must pin memcg_css, see wb_get_create() */ 251 memcg_css = task_get_css(current, memory_cgrp_id); 252 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 253 css_put(memcg_css); 254 } 255 } 256 257 if (!wb) 258 wb = &bdi->wb; 259 260 /* 261 * There may be multiple instances of this function racing to 262 * update the same inode. Use cmpxchg() to tell the winner. 263 */ 264 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) 265 wb_put(wb); 266 } 267 EXPORT_SYMBOL_GPL(__inode_attach_wb); 268 269 /** 270 * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list 271 * @inode: inode of interest with i_lock held 272 * @wb: target bdi_writeback 273 * 274 * Remove the inode from wb's io lists and if necessarily put onto b_attached 275 * list. Only inodes attached to cgwb's are kept on this list. 276 */ 277 static void inode_cgwb_move_to_attached(struct inode *inode, 278 struct bdi_writeback *wb) 279 { 280 assert_spin_locked(&wb->list_lock); 281 assert_spin_locked(&inode->i_lock); 282 283 inode->i_state &= ~I_SYNC_QUEUED; 284 if (wb != &wb->bdi->wb) 285 list_move(&inode->i_io_list, &wb->b_attached); 286 else 287 list_del_init(&inode->i_io_list); 288 wb_io_lists_depopulated(wb); 289 } 290 291 /** 292 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it 293 * @inode: inode of interest with i_lock held 294 * 295 * Returns @inode's wb with its list_lock held. @inode->i_lock must be 296 * held on entry and is released on return. The returned wb is guaranteed 297 * to stay @inode's associated wb until its list_lock is released. 298 */ 299 static struct bdi_writeback * 300 locked_inode_to_wb_and_lock_list(struct inode *inode) 301 __releases(&inode->i_lock) 302 __acquires(&wb->list_lock) 303 { 304 while (true) { 305 struct bdi_writeback *wb = inode_to_wb(inode); 306 307 /* 308 * inode_to_wb() association is protected by both 309 * @inode->i_lock and @wb->list_lock but list_lock nests 310 * outside i_lock. Drop i_lock and verify that the 311 * association hasn't changed after acquiring list_lock. 312 */ 313 wb_get(wb); 314 spin_unlock(&inode->i_lock); 315 spin_lock(&wb->list_lock); 316 317 /* i_wb may have changed inbetween, can't use inode_to_wb() */ 318 if (likely(wb == inode->i_wb)) { 319 wb_put(wb); /* @inode already has ref */ 320 return wb; 321 } 322 323 spin_unlock(&wb->list_lock); 324 wb_put(wb); 325 cpu_relax(); 326 spin_lock(&inode->i_lock); 327 } 328 } 329 330 /** 331 * inode_to_wb_and_lock_list - determine an inode's wb and lock it 332 * @inode: inode of interest 333 * 334 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held 335 * on entry. 336 */ 337 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) 338 __acquires(&wb->list_lock) 339 { 340 spin_lock(&inode->i_lock); 341 return locked_inode_to_wb_and_lock_list(inode); 342 } 343 344 struct inode_switch_wbs_context { 345 struct rcu_work work; 346 347 /* 348 * Multiple inodes can be switched at once. The switching procedure 349 * consists of two parts, separated by a RCU grace period. To make 350 * sure that the second part is executed for each inode gone through 351 * the first part, all inode pointers are placed into a NULL-terminated 352 * array embedded into struct inode_switch_wbs_context. Otherwise 353 * an inode could be left in a non-consistent state. 354 */ 355 struct bdi_writeback *new_wb; 356 struct inode *inodes[]; 357 }; 358 359 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) 360 { 361 down_write(&bdi->wb_switch_rwsem); 362 } 363 364 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) 365 { 366 up_write(&bdi->wb_switch_rwsem); 367 } 368 369 static bool inode_do_switch_wbs(struct inode *inode, 370 struct bdi_writeback *old_wb, 371 struct bdi_writeback *new_wb) 372 { 373 struct address_space *mapping = inode->i_mapping; 374 XA_STATE(xas, &mapping->i_pages, 0); 375 struct folio *folio; 376 bool switched = false; 377 378 spin_lock(&inode->i_lock); 379 xa_lock_irq(&mapping->i_pages); 380 381 /* 382 * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction 383 * path owns the inode and we shouldn't modify ->i_io_list. 384 */ 385 if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE))) 386 goto skip_switch; 387 388 trace_inode_switch_wbs(inode, old_wb, new_wb); 389 390 /* 391 * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points 392 * to possibly dirty folios while PAGECACHE_TAG_WRITEBACK points to 393 * folios actually under writeback. 394 */ 395 xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) { 396 if (folio_test_dirty(folio)) { 397 long nr = folio_nr_pages(folio); 398 wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr); 399 wb_stat_mod(new_wb, WB_RECLAIMABLE, nr); 400 } 401 } 402 403 xas_set(&xas, 0); 404 xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) { 405 long nr = folio_nr_pages(folio); 406 WARN_ON_ONCE(!folio_test_writeback(folio)); 407 wb_stat_mod(old_wb, WB_WRITEBACK, -nr); 408 wb_stat_mod(new_wb, WB_WRITEBACK, nr); 409 } 410 411 if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) { 412 atomic_dec(&old_wb->writeback_inodes); 413 atomic_inc(&new_wb->writeback_inodes); 414 } 415 416 wb_get(new_wb); 417 418 /* 419 * Transfer to @new_wb's IO list if necessary. If the @inode is dirty, 420 * the specific list @inode was on is ignored and the @inode is put on 421 * ->b_dirty which is always correct including from ->b_dirty_time. 422 * The transfer preserves @inode->dirtied_when ordering. If the @inode 423 * was clean, it means it was on the b_attached list, so move it onto 424 * the b_attached list of @new_wb. 425 */ 426 if (!list_empty(&inode->i_io_list)) { 427 inode->i_wb = new_wb; 428 429 if (inode->i_state & I_DIRTY_ALL) { 430 struct inode *pos; 431 432 list_for_each_entry(pos, &new_wb->b_dirty, i_io_list) 433 if (time_after_eq(inode->dirtied_when, 434 pos->dirtied_when)) 435 break; 436 inode_io_list_move_locked(inode, new_wb, 437 pos->i_io_list.prev); 438 } else { 439 inode_cgwb_move_to_attached(inode, new_wb); 440 } 441 } else { 442 inode->i_wb = new_wb; 443 } 444 445 /* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */ 446 inode->i_wb_frn_winner = 0; 447 inode->i_wb_frn_avg_time = 0; 448 inode->i_wb_frn_history = 0; 449 switched = true; 450 skip_switch: 451 /* 452 * Paired with load_acquire in unlocked_inode_to_wb_begin() and 453 * ensures that the new wb is visible if they see !I_WB_SWITCH. 454 */ 455 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); 456 457 xa_unlock_irq(&mapping->i_pages); 458 spin_unlock(&inode->i_lock); 459 460 return switched; 461 } 462 463 static void inode_switch_wbs_work_fn(struct work_struct *work) 464 { 465 struct inode_switch_wbs_context *isw = 466 container_of(to_rcu_work(work), struct inode_switch_wbs_context, work); 467 struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]); 468 struct bdi_writeback *old_wb = isw->inodes[0]->i_wb; 469 struct bdi_writeback *new_wb = isw->new_wb; 470 unsigned long nr_switched = 0; 471 struct inode **inodep; 472 473 /* 474 * If @inode switches cgwb membership while sync_inodes_sb() is 475 * being issued, sync_inodes_sb() might miss it. Synchronize. 476 */ 477 down_read(&bdi->wb_switch_rwsem); 478 479 /* 480 * By the time control reaches here, RCU grace period has passed 481 * since I_WB_SWITCH assertion and all wb stat update transactions 482 * between unlocked_inode_to_wb_begin/end() are guaranteed to be 483 * synchronizing against the i_pages lock. 484 * 485 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock 486 * gives us exclusion against all wb related operations on @inode 487 * including IO list manipulations and stat updates. 488 */ 489 if (old_wb < new_wb) { 490 spin_lock(&old_wb->list_lock); 491 spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING); 492 } else { 493 spin_lock(&new_wb->list_lock); 494 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); 495 } 496 497 for (inodep = isw->inodes; *inodep; inodep++) { 498 WARN_ON_ONCE((*inodep)->i_wb != old_wb); 499 if (inode_do_switch_wbs(*inodep, old_wb, new_wb)) 500 nr_switched++; 501 } 502 503 spin_unlock(&new_wb->list_lock); 504 spin_unlock(&old_wb->list_lock); 505 506 up_read(&bdi->wb_switch_rwsem); 507 508 if (nr_switched) { 509 wb_wakeup(new_wb); 510 wb_put_many(old_wb, nr_switched); 511 } 512 513 for (inodep = isw->inodes; *inodep; inodep++) 514 iput(*inodep); 515 wb_put(new_wb); 516 kfree(isw); 517 atomic_dec(&isw_nr_in_flight); 518 } 519 520 static bool inode_prepare_wbs_switch(struct inode *inode, 521 struct bdi_writeback *new_wb) 522 { 523 /* 524 * Paired with smp_mb() in cgroup_writeback_umount(). 525 * isw_nr_in_flight must be increased before checking SB_ACTIVE and 526 * grabbing an inode, otherwise isw_nr_in_flight can be observed as 0 527 * in cgroup_writeback_umount() and the isw_wq will be not flushed. 528 */ 529 smp_mb(); 530 531 if (IS_DAX(inode)) 532 return false; 533 534 /* while holding I_WB_SWITCH, no one else can update the association */ 535 spin_lock(&inode->i_lock); 536 if (!(inode->i_sb->s_flags & SB_ACTIVE) || 537 inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) || 538 inode_to_wb(inode) == new_wb) { 539 spin_unlock(&inode->i_lock); 540 return false; 541 } 542 inode->i_state |= I_WB_SWITCH; 543 __iget(inode); 544 spin_unlock(&inode->i_lock); 545 546 return true; 547 } 548 549 /** 550 * inode_switch_wbs - change the wb association of an inode 551 * @inode: target inode 552 * @new_wb_id: ID of the new wb 553 * 554 * Switch @inode's wb association to the wb identified by @new_wb_id. The 555 * switching is performed asynchronously and may fail silently. 556 */ 557 static void inode_switch_wbs(struct inode *inode, int new_wb_id) 558 { 559 struct backing_dev_info *bdi = inode_to_bdi(inode); 560 struct cgroup_subsys_state *memcg_css; 561 struct inode_switch_wbs_context *isw; 562 563 /* noop if seems to be already in progress */ 564 if (inode->i_state & I_WB_SWITCH) 565 return; 566 567 /* avoid queueing a new switch if too many are already in flight */ 568 if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT) 569 return; 570 571 isw = kzalloc(struct_size(isw, inodes, 2), GFP_ATOMIC); 572 if (!isw) 573 return; 574 575 atomic_inc(&isw_nr_in_flight); 576 577 /* find and pin the new wb */ 578 rcu_read_lock(); 579 memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys); 580 if (memcg_css && !css_tryget(memcg_css)) 581 memcg_css = NULL; 582 rcu_read_unlock(); 583 if (!memcg_css) 584 goto out_free; 585 586 isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 587 css_put(memcg_css); 588 if (!isw->new_wb) 589 goto out_free; 590 591 if (!inode_prepare_wbs_switch(inode, isw->new_wb)) 592 goto out_free; 593 594 isw->inodes[0] = inode; 595 596 /* 597 * In addition to synchronizing among switchers, I_WB_SWITCH tells 598 * the RCU protected stat update paths to grab the i_page 599 * lock so that stat transfer can synchronize against them. 600 * Let's continue after I_WB_SWITCH is guaranteed to be visible. 601 */ 602 INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn); 603 queue_rcu_work(isw_wq, &isw->work); 604 return; 605 606 out_free: 607 atomic_dec(&isw_nr_in_flight); 608 if (isw->new_wb) 609 wb_put(isw->new_wb); 610 kfree(isw); 611 } 612 613 /** 614 * cleanup_offline_cgwb - detach associated inodes 615 * @wb: target wb 616 * 617 * Switch all inodes attached to @wb to a nearest living ancestor's wb in order 618 * to eventually release the dying @wb. Returns %true if not all inodes were 619 * switched and the function has to be restarted. 620 */ 621 bool cleanup_offline_cgwb(struct bdi_writeback *wb) 622 { 623 struct cgroup_subsys_state *memcg_css; 624 struct inode_switch_wbs_context *isw; 625 struct inode *inode; 626 int nr; 627 bool restart = false; 628 629 isw = kzalloc(struct_size(isw, inodes, WB_MAX_INODES_PER_ISW), 630 GFP_KERNEL); 631 if (!isw) 632 return restart; 633 634 atomic_inc(&isw_nr_in_flight); 635 636 for (memcg_css = wb->memcg_css->parent; memcg_css; 637 memcg_css = memcg_css->parent) { 638 isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL); 639 if (isw->new_wb) 640 break; 641 } 642 if (unlikely(!isw->new_wb)) 643 isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */ 644 645 nr = 0; 646 spin_lock(&wb->list_lock); 647 list_for_each_entry(inode, &wb->b_attached, i_io_list) { 648 if (!inode_prepare_wbs_switch(inode, isw->new_wb)) 649 continue; 650 651 isw->inodes[nr++] = inode; 652 653 if (nr >= WB_MAX_INODES_PER_ISW - 1) { 654 restart = true; 655 break; 656 } 657 } 658 spin_unlock(&wb->list_lock); 659 660 /* no attached inodes? bail out */ 661 if (nr == 0) { 662 atomic_dec(&isw_nr_in_flight); 663 wb_put(isw->new_wb); 664 kfree(isw); 665 return restart; 666 } 667 668 /* 669 * In addition to synchronizing among switchers, I_WB_SWITCH tells 670 * the RCU protected stat update paths to grab the i_page 671 * lock so that stat transfer can synchronize against them. 672 * Let's continue after I_WB_SWITCH is guaranteed to be visible. 673 */ 674 INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn); 675 queue_rcu_work(isw_wq, &isw->work); 676 677 return restart; 678 } 679 680 /** 681 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it 682 * @wbc: writeback_control of interest 683 * @inode: target inode 684 * 685 * @inode is locked and about to be written back under the control of @wbc. 686 * Record @inode's writeback context into @wbc and unlock the i_lock. On 687 * writeback completion, wbc_detach_inode() should be called. This is used 688 * to track the cgroup writeback context. 689 */ 690 void wbc_attach_and_unlock_inode(struct writeback_control *wbc, 691 struct inode *inode) 692 { 693 if (!inode_cgwb_enabled(inode)) { 694 spin_unlock(&inode->i_lock); 695 return; 696 } 697 698 wbc->wb = inode_to_wb(inode); 699 wbc->inode = inode; 700 701 wbc->wb_id = wbc->wb->memcg_css->id; 702 wbc->wb_lcand_id = inode->i_wb_frn_winner; 703 wbc->wb_tcand_id = 0; 704 wbc->wb_bytes = 0; 705 wbc->wb_lcand_bytes = 0; 706 wbc->wb_tcand_bytes = 0; 707 708 wb_get(wbc->wb); 709 spin_unlock(&inode->i_lock); 710 711 /* 712 * A dying wb indicates that either the blkcg associated with the 713 * memcg changed or the associated memcg is dying. In the first 714 * case, a replacement wb should already be available and we should 715 * refresh the wb immediately. In the second case, trying to 716 * refresh will keep failing. 717 */ 718 if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css))) 719 inode_switch_wbs(inode, wbc->wb_id); 720 } 721 EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode); 722 723 /** 724 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection 725 * @wbc: writeback_control of the just finished writeback 726 * 727 * To be called after a writeback attempt of an inode finishes and undoes 728 * wbc_attach_and_unlock_inode(). Can be called under any context. 729 * 730 * As concurrent write sharing of an inode is expected to be very rare and 731 * memcg only tracks page ownership on first-use basis severely confining 732 * the usefulness of such sharing, cgroup writeback tracks ownership 733 * per-inode. While the support for concurrent write sharing of an inode 734 * is deemed unnecessary, an inode being written to by different cgroups at 735 * different points in time is a lot more common, and, more importantly, 736 * charging only by first-use can too readily lead to grossly incorrect 737 * behaviors (single foreign page can lead to gigabytes of writeback to be 738 * incorrectly attributed). 739 * 740 * To resolve this issue, cgroup writeback detects the majority dirtier of 741 * an inode and transfers the ownership to it. To avoid unnnecessary 742 * oscillation, the detection mechanism keeps track of history and gives 743 * out the switch verdict only if the foreign usage pattern is stable over 744 * a certain amount of time and/or writeback attempts. 745 * 746 * On each writeback attempt, @wbc tries to detect the majority writer 747 * using Boyer-Moore majority vote algorithm. In addition to the byte 748 * count from the majority voting, it also counts the bytes written for the 749 * current wb and the last round's winner wb (max of last round's current 750 * wb, the winner from two rounds ago, and the last round's majority 751 * candidate). Keeping track of the historical winner helps the algorithm 752 * to semi-reliably detect the most active writer even when it's not the 753 * absolute majority. 754 * 755 * Once the winner of the round is determined, whether the winner is 756 * foreign or not and how much IO time the round consumed is recorded in 757 * inode->i_wb_frn_history. If the amount of recorded foreign IO time is 758 * over a certain threshold, the switch verdict is given. 759 */ 760 void wbc_detach_inode(struct writeback_control *wbc) 761 { 762 struct bdi_writeback *wb = wbc->wb; 763 struct inode *inode = wbc->inode; 764 unsigned long avg_time, max_bytes, max_time; 765 u16 history; 766 int max_id; 767 768 if (!wb) 769 return; 770 771 history = inode->i_wb_frn_history; 772 avg_time = inode->i_wb_frn_avg_time; 773 774 /* pick the winner of this round */ 775 if (wbc->wb_bytes >= wbc->wb_lcand_bytes && 776 wbc->wb_bytes >= wbc->wb_tcand_bytes) { 777 max_id = wbc->wb_id; 778 max_bytes = wbc->wb_bytes; 779 } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) { 780 max_id = wbc->wb_lcand_id; 781 max_bytes = wbc->wb_lcand_bytes; 782 } else { 783 max_id = wbc->wb_tcand_id; 784 max_bytes = wbc->wb_tcand_bytes; 785 } 786 787 /* 788 * Calculate the amount of IO time the winner consumed and fold it 789 * into the running average kept per inode. If the consumed IO 790 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for 791 * deciding whether to switch or not. This is to prevent one-off 792 * small dirtiers from skewing the verdict. 793 */ 794 max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT, 795 wb->avg_write_bandwidth); 796 if (avg_time) 797 avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) - 798 (avg_time >> WB_FRN_TIME_AVG_SHIFT); 799 else 800 avg_time = max_time; /* immediate catch up on first run */ 801 802 if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) { 803 int slots; 804 805 /* 806 * The switch verdict is reached if foreign wb's consume 807 * more than a certain proportion of IO time in a 808 * WB_FRN_TIME_PERIOD. This is loosely tracked by 16 slot 809 * history mask where each bit represents one sixteenth of 810 * the period. Determine the number of slots to shift into 811 * history from @max_time. 812 */ 813 slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT), 814 (unsigned long)WB_FRN_HIST_MAX_SLOTS); 815 history <<= slots; 816 if (wbc->wb_id != max_id) 817 history |= (1U << slots) - 1; 818 819 if (history) 820 trace_inode_foreign_history(inode, wbc, history); 821 822 /* 823 * Switch if the current wb isn't the consistent winner. 824 * If there are multiple closely competing dirtiers, the 825 * inode may switch across them repeatedly over time, which 826 * is okay. The main goal is avoiding keeping an inode on 827 * the wrong wb for an extended period of time. 828 */ 829 if (hweight32(history) > WB_FRN_HIST_THR_SLOTS) 830 inode_switch_wbs(inode, max_id); 831 } 832 833 /* 834 * Multiple instances of this function may race to update the 835 * following fields but we don't mind occassional inaccuracies. 836 */ 837 inode->i_wb_frn_winner = max_id; 838 inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); 839 inode->i_wb_frn_history = history; 840 841 wb_put(wbc->wb); 842 wbc->wb = NULL; 843 } 844 EXPORT_SYMBOL_GPL(wbc_detach_inode); 845 846 /** 847 * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership 848 * @wbc: writeback_control of the writeback in progress 849 * @page: page being written out 850 * @bytes: number of bytes being written out 851 * 852 * @bytes from @page are about to written out during the writeback 853 * controlled by @wbc. Keep the book for foreign inode detection. See 854 * wbc_detach_inode(). 855 */ 856 void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, 857 size_t bytes) 858 { 859 struct cgroup_subsys_state *css; 860 int id; 861 862 /* 863 * pageout() path doesn't attach @wbc to the inode being written 864 * out. This is intentional as we don't want the function to block 865 * behind a slow cgroup. Ultimately, we want pageout() to kick off 866 * regular writeback instead of writing things out itself. 867 */ 868 if (!wbc->wb || wbc->no_cgroup_owner) 869 return; 870 871 css = mem_cgroup_css_from_page(page); 872 /* dead cgroups shouldn't contribute to inode ownership arbitration */ 873 if (!(css->flags & CSS_ONLINE)) 874 return; 875 876 id = css->id; 877 878 if (id == wbc->wb_id) { 879 wbc->wb_bytes += bytes; 880 return; 881 } 882 883 if (id == wbc->wb_lcand_id) 884 wbc->wb_lcand_bytes += bytes; 885 886 /* Boyer-Moore majority vote algorithm */ 887 if (!wbc->wb_tcand_bytes) 888 wbc->wb_tcand_id = id; 889 if (id == wbc->wb_tcand_id) 890 wbc->wb_tcand_bytes += bytes; 891 else 892 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); 893 } 894 EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner); 895 896 /** 897 * inode_congested - test whether an inode is congested 898 * @inode: inode to test for congestion (may be NULL) 899 * @cong_bits: mask of WB_[a]sync_congested bits to test 900 * 901 * Tests whether @inode is congested. @cong_bits is the mask of congestion 902 * bits to test and the return value is the mask of set bits. 903 * 904 * If cgroup writeback is enabled for @inode, the congestion state is 905 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg 906 * associated with @inode is congested; otherwise, the root wb's congestion 907 * state is used. 908 * 909 * @inode is allowed to be NULL as this function is often called on 910 * mapping->host which is NULL for the swapper space. 911 */ 912 int inode_congested(struct inode *inode, int cong_bits) 913 { 914 /* 915 * Once set, ->i_wb never becomes NULL while the inode is alive. 916 * Start transaction iff ->i_wb is visible. 917 */ 918 if (inode && inode_to_wb_is_valid(inode)) { 919 struct bdi_writeback *wb; 920 struct wb_lock_cookie lock_cookie = {}; 921 bool congested; 922 923 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); 924 congested = wb_congested(wb, cong_bits); 925 unlocked_inode_to_wb_end(inode, &lock_cookie); 926 return congested; 927 } 928 929 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 930 } 931 EXPORT_SYMBOL_GPL(inode_congested); 932 933 /** 934 * wb_split_bdi_pages - split nr_pages to write according to bandwidth 935 * @wb: target bdi_writeback to split @nr_pages to 936 * @nr_pages: number of pages to write for the whole bdi 937 * 938 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in 939 * relation to the total write bandwidth of all wb's w/ dirty inodes on 940 * @wb->bdi. 941 */ 942 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) 943 { 944 unsigned long this_bw = wb->avg_write_bandwidth; 945 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); 946 947 if (nr_pages == LONG_MAX) 948 return LONG_MAX; 949 950 /* 951 * This may be called on clean wb's and proportional distribution 952 * may not make sense, just use the original @nr_pages in those 953 * cases. In general, we wanna err on the side of writing more. 954 */ 955 if (!tot_bw || this_bw >= tot_bw) 956 return nr_pages; 957 else 958 return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw); 959 } 960 961 /** 962 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi 963 * @bdi: target backing_dev_info 964 * @base_work: wb_writeback_work to issue 965 * @skip_if_busy: skip wb's which already have writeback in progress 966 * 967 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which 968 * have dirty inodes. If @base_work->nr_page isn't %LONG_MAX, it's 969 * distributed to the busy wbs according to each wb's proportion in the 970 * total active write bandwidth of @bdi. 971 */ 972 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, 973 struct wb_writeback_work *base_work, 974 bool skip_if_busy) 975 { 976 struct bdi_writeback *last_wb = NULL; 977 struct bdi_writeback *wb = list_entry(&bdi->wb_list, 978 struct bdi_writeback, bdi_node); 979 980 might_sleep(); 981 restart: 982 rcu_read_lock(); 983 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { 984 DEFINE_WB_COMPLETION(fallback_work_done, bdi); 985 struct wb_writeback_work fallback_work; 986 struct wb_writeback_work *work; 987 long nr_pages; 988 989 if (last_wb) { 990 wb_put(last_wb); 991 last_wb = NULL; 992 } 993 994 /* SYNC_ALL writes out I_DIRTY_TIME too */ 995 if (!wb_has_dirty_io(wb) && 996 (base_work->sync_mode == WB_SYNC_NONE || 997 list_empty(&wb->b_dirty_time))) 998 continue; 999 if (skip_if_busy && writeback_in_progress(wb)) 1000 continue; 1001 1002 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); 1003 1004 work = kmalloc(sizeof(*work), GFP_ATOMIC); 1005 if (work) { 1006 *work = *base_work; 1007 work->nr_pages = nr_pages; 1008 work->auto_free = 1; 1009 wb_queue_work(wb, work); 1010 continue; 1011 } 1012 1013 /* alloc failed, execute synchronously using on-stack fallback */ 1014 work = &fallback_work; 1015 *work = *base_work; 1016 work->nr_pages = nr_pages; 1017 work->auto_free = 0; 1018 work->done = &fallback_work_done; 1019 1020 wb_queue_work(wb, work); 1021 1022 /* 1023 * Pin @wb so that it stays on @bdi->wb_list. This allows 1024 * continuing iteration from @wb after dropping and 1025 * regrabbing rcu read lock. 1026 */ 1027 wb_get(wb); 1028 last_wb = wb; 1029 1030 rcu_read_unlock(); 1031 wb_wait_for_completion(&fallback_work_done); 1032 goto restart; 1033 } 1034 rcu_read_unlock(); 1035 1036 if (last_wb) 1037 wb_put(last_wb); 1038 } 1039 1040 /** 1041 * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs 1042 * @bdi_id: target bdi id 1043 * @memcg_id: target memcg css id 1044 * @reason: reason why some writeback work initiated 1045 * @done: target wb_completion 1046 * 1047 * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id 1048 * with the specified parameters. 1049 */ 1050 int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, 1051 enum wb_reason reason, struct wb_completion *done) 1052 { 1053 struct backing_dev_info *bdi; 1054 struct cgroup_subsys_state *memcg_css; 1055 struct bdi_writeback *wb; 1056 struct wb_writeback_work *work; 1057 unsigned long dirty; 1058 int ret; 1059 1060 /* lookup bdi and memcg */ 1061 bdi = bdi_get_by_id(bdi_id); 1062 if (!bdi) 1063 return -ENOENT; 1064 1065 rcu_read_lock(); 1066 memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys); 1067 if (memcg_css && !css_tryget(memcg_css)) 1068 memcg_css = NULL; 1069 rcu_read_unlock(); 1070 if (!memcg_css) { 1071 ret = -ENOENT; 1072 goto out_bdi_put; 1073 } 1074 1075 /* 1076 * And find the associated wb. If the wb isn't there already 1077 * there's nothing to flush, don't create one. 1078 */ 1079 wb = wb_get_lookup(bdi, memcg_css); 1080 if (!wb) { 1081 ret = -ENOENT; 1082 goto out_css_put; 1083 } 1084 1085 /* 1086 * The caller is attempting to write out most of 1087 * the currently dirty pages. Let's take the current dirty page 1088 * count and inflate it by 25% which should be large enough to 1089 * flush out most dirty pages while avoiding getting livelocked by 1090 * concurrent dirtiers. 1091 * 1092 * BTW the memcg stats are flushed periodically and this is best-effort 1093 * estimation, so some potential error is ok. 1094 */ 1095 dirty = memcg_page_state(mem_cgroup_from_css(memcg_css), NR_FILE_DIRTY); 1096 dirty = dirty * 10 / 8; 1097 1098 /* issue the writeback work */ 1099 work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN); 1100 if (work) { 1101 work->nr_pages = dirty; 1102 work->sync_mode = WB_SYNC_NONE; 1103 work->range_cyclic = 1; 1104 work->reason = reason; 1105 work->done = done; 1106 work->auto_free = 1; 1107 wb_queue_work(wb, work); 1108 ret = 0; 1109 } else { 1110 ret = -ENOMEM; 1111 } 1112 1113 wb_put(wb); 1114 out_css_put: 1115 css_put(memcg_css); 1116 out_bdi_put: 1117 bdi_put(bdi); 1118 return ret; 1119 } 1120 1121 /** 1122 * cgroup_writeback_umount - flush inode wb switches for umount 1123 * 1124 * This function is called when a super_block is about to be destroyed and 1125 * flushes in-flight inode wb switches. An inode wb switch goes through 1126 * RCU and then workqueue, so the two need to be flushed in order to ensure 1127 * that all previously scheduled switches are finished. As wb switches are 1128 * rare occurrences and synchronize_rcu() can take a while, perform 1129 * flushing iff wb switches are in flight. 1130 */ 1131 void cgroup_writeback_umount(void) 1132 { 1133 /* 1134 * SB_ACTIVE should be reliably cleared before checking 1135 * isw_nr_in_flight, see generic_shutdown_super(). 1136 */ 1137 smp_mb(); 1138 1139 if (atomic_read(&isw_nr_in_flight)) { 1140 /* 1141 * Use rcu_barrier() to wait for all pending callbacks to 1142 * ensure that all in-flight wb switches are in the workqueue. 1143 */ 1144 rcu_barrier(); 1145 flush_workqueue(isw_wq); 1146 } 1147 } 1148 1149 static int __init cgroup_writeback_init(void) 1150 { 1151 isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0); 1152 if (!isw_wq) 1153 return -ENOMEM; 1154 return 0; 1155 } 1156 fs_initcall(cgroup_writeback_init); 1157 1158 #else /* CONFIG_CGROUP_WRITEBACK */ 1159 1160 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } 1161 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } 1162 1163 static void inode_cgwb_move_to_attached(struct inode *inode, 1164 struct bdi_writeback *wb) 1165 { 1166 assert_spin_locked(&wb->list_lock); 1167 assert_spin_locked(&inode->i_lock); 1168 1169 inode->i_state &= ~I_SYNC_QUEUED; 1170 list_del_init(&inode->i_io_list); 1171 wb_io_lists_depopulated(wb); 1172 } 1173 1174 static struct bdi_writeback * 1175 locked_inode_to_wb_and_lock_list(struct inode *inode) 1176 __releases(&inode->i_lock) 1177 __acquires(&wb->list_lock) 1178 { 1179 struct bdi_writeback *wb = inode_to_wb(inode); 1180 1181 spin_unlock(&inode->i_lock); 1182 spin_lock(&wb->list_lock); 1183 return wb; 1184 } 1185 1186 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) 1187 __acquires(&wb->list_lock) 1188 { 1189 struct bdi_writeback *wb = inode_to_wb(inode); 1190 1191 spin_lock(&wb->list_lock); 1192 return wb; 1193 } 1194 1195 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) 1196 { 1197 return nr_pages; 1198 } 1199 1200 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, 1201 struct wb_writeback_work *base_work, 1202 bool skip_if_busy) 1203 { 1204 might_sleep(); 1205 1206 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { 1207 base_work->auto_free = 0; 1208 wb_queue_work(&bdi->wb, base_work); 1209 } 1210 } 1211 1212 #endif /* CONFIG_CGROUP_WRITEBACK */ 1213 1214 /* 1215 * Add in the number of potentially dirty inodes, because each inode 1216 * write can dirty pagecache in the underlying blockdev. 1217 */ 1218 static unsigned long get_nr_dirty_pages(void) 1219 { 1220 return global_node_page_state(NR_FILE_DIRTY) + 1221 get_nr_dirty_inodes(); 1222 } 1223 1224 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) 1225 { 1226 if (!wb_has_dirty_io(wb)) 1227 return; 1228 1229 /* 1230 * All callers of this function want to start writeback of all 1231 * dirty pages. Places like vmscan can call this at a very 1232 * high frequency, causing pointless allocations of tons of 1233 * work items and keeping the flusher threads busy retrieving 1234 * that work. Ensure that we only allow one of them pending and 1235 * inflight at the time. 1236 */ 1237 if (test_bit(WB_start_all, &wb->state) || 1238 test_and_set_bit(WB_start_all, &wb->state)) 1239 return; 1240 1241 wb->start_all_reason = reason; 1242 wb_wakeup(wb); 1243 } 1244 1245 /** 1246 * wb_start_background_writeback - start background writeback 1247 * @wb: bdi_writback to write from 1248 * 1249 * Description: 1250 * This makes sure WB_SYNC_NONE background writeback happens. When 1251 * this function returns, it is only guaranteed that for given wb 1252 * some IO is happening if we are over background dirty threshold. 1253 * Caller need not hold sb s_umount semaphore. 1254 */ 1255 void wb_start_background_writeback(struct bdi_writeback *wb) 1256 { 1257 /* 1258 * We just wake up the flusher thread. It will perform background 1259 * writeback as soon as there is no other work to do. 1260 */ 1261 trace_writeback_wake_background(wb); 1262 wb_wakeup(wb); 1263 } 1264 1265 /* 1266 * Remove the inode from the writeback list it is on. 1267 */ 1268 void inode_io_list_del(struct inode *inode) 1269 { 1270 struct bdi_writeback *wb; 1271 1272 wb = inode_to_wb_and_lock_list(inode); 1273 spin_lock(&inode->i_lock); 1274 1275 inode->i_state &= ~I_SYNC_QUEUED; 1276 list_del_init(&inode->i_io_list); 1277 wb_io_lists_depopulated(wb); 1278 1279 spin_unlock(&inode->i_lock); 1280 spin_unlock(&wb->list_lock); 1281 } 1282 EXPORT_SYMBOL(inode_io_list_del); 1283 1284 /* 1285 * mark an inode as under writeback on the sb 1286 */ 1287 void sb_mark_inode_writeback(struct inode *inode) 1288 { 1289 struct super_block *sb = inode->i_sb; 1290 unsigned long flags; 1291 1292 if (list_empty(&inode->i_wb_list)) { 1293 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); 1294 if (list_empty(&inode->i_wb_list)) { 1295 list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb); 1296 trace_sb_mark_inode_writeback(inode); 1297 } 1298 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); 1299 } 1300 } 1301 1302 /* 1303 * clear an inode as under writeback on the sb 1304 */ 1305 void sb_clear_inode_writeback(struct inode *inode) 1306 { 1307 struct super_block *sb = inode->i_sb; 1308 unsigned long flags; 1309 1310 if (!list_empty(&inode->i_wb_list)) { 1311 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); 1312 if (!list_empty(&inode->i_wb_list)) { 1313 list_del_init(&inode->i_wb_list); 1314 trace_sb_clear_inode_writeback(inode); 1315 } 1316 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); 1317 } 1318 } 1319 1320 /* 1321 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1322 * furthest end of its superblock's dirty-inode list. 1323 * 1324 * Before stamping the inode's ->dirtied_when, we check to see whether it is 1325 * already the most-recently-dirtied inode on the b_dirty list. If that is 1326 * the case then the inode must have been redirtied while it was being written 1327 * out and we don't reset its dirtied_when. 1328 */ 1329 static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) 1330 { 1331 assert_spin_locked(&inode->i_lock); 1332 1333 if (!list_empty(&wb->b_dirty)) { 1334 struct inode *tail; 1335 1336 tail = wb_inode(wb->b_dirty.next); 1337 if (time_before(inode->dirtied_when, tail->dirtied_when)) 1338 inode->dirtied_when = jiffies; 1339 } 1340 inode_io_list_move_locked(inode, wb, &wb->b_dirty); 1341 inode->i_state &= ~I_SYNC_QUEUED; 1342 } 1343 1344 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 1345 { 1346 spin_lock(&inode->i_lock); 1347 redirty_tail_locked(inode, wb); 1348 spin_unlock(&inode->i_lock); 1349 } 1350 1351 /* 1352 * requeue inode for re-scanning after bdi->b_io list is exhausted. 1353 */ 1354 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 1355 { 1356 inode_io_list_move_locked(inode, wb, &wb->b_more_io); 1357 } 1358 1359 static void inode_sync_complete(struct inode *inode) 1360 { 1361 inode->i_state &= ~I_SYNC; 1362 /* If inode is clean an unused, put it into LRU now... */ 1363 inode_add_lru(inode); 1364 /* Waiters must see I_SYNC cleared before being woken up */ 1365 smp_mb(); 1366 wake_up_bit(&inode->i_state, __I_SYNC); 1367 } 1368 1369 static bool inode_dirtied_after(struct inode *inode, unsigned long t) 1370 { 1371 bool ret = time_after(inode->dirtied_when, t); 1372 #ifndef CONFIG_64BIT 1373 /* 1374 * For inodes being constantly redirtied, dirtied_when can get stuck. 1375 * It _appears_ to be in the future, but is actually in distant past. 1376 * This test is necessary to prevent such wrapped-around relative times 1377 * from permanently stopping the whole bdi writeback. 1378 */ 1379 ret = ret && time_before_eq(inode->dirtied_when, jiffies); 1380 #endif 1381 return ret; 1382 } 1383 1384 #define EXPIRE_DIRTY_ATIME 0x0001 1385 1386 /* 1387 * Move expired (dirtied before dirtied_before) dirty inodes from 1388 * @delaying_queue to @dispatch_queue. 1389 */ 1390 static int move_expired_inodes(struct list_head *delaying_queue, 1391 struct list_head *dispatch_queue, 1392 unsigned long dirtied_before) 1393 { 1394 LIST_HEAD(tmp); 1395 struct list_head *pos, *node; 1396 struct super_block *sb = NULL; 1397 struct inode *inode; 1398 int do_sb_sort = 0; 1399 int moved = 0; 1400 1401 while (!list_empty(delaying_queue)) { 1402 inode = wb_inode(delaying_queue->prev); 1403 if (inode_dirtied_after(inode, dirtied_before)) 1404 break; 1405 list_move(&inode->i_io_list, &tmp); 1406 moved++; 1407 spin_lock(&inode->i_lock); 1408 inode->i_state |= I_SYNC_QUEUED; 1409 spin_unlock(&inode->i_lock); 1410 if (sb_is_blkdev_sb(inode->i_sb)) 1411 continue; 1412 if (sb && sb != inode->i_sb) 1413 do_sb_sort = 1; 1414 sb = inode->i_sb; 1415 } 1416 1417 /* just one sb in list, splice to dispatch_queue and we're done */ 1418 if (!do_sb_sort) { 1419 list_splice(&tmp, dispatch_queue); 1420 goto out; 1421 } 1422 1423 /* Move inodes from one superblock together */ 1424 while (!list_empty(&tmp)) { 1425 sb = wb_inode(tmp.prev)->i_sb; 1426 list_for_each_prev_safe(pos, node, &tmp) { 1427 inode = wb_inode(pos); 1428 if (inode->i_sb == sb) 1429 list_move(&inode->i_io_list, dispatch_queue); 1430 } 1431 } 1432 out: 1433 return moved; 1434 } 1435 1436 /* 1437 * Queue all expired dirty inodes for io, eldest first. 1438 * Before 1439 * newly dirtied b_dirty b_io b_more_io 1440 * =============> gf edc BA 1441 * After 1442 * newly dirtied b_dirty b_io b_more_io 1443 * =============> g fBAedc 1444 * | 1445 * +--> dequeue for IO 1446 */ 1447 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, 1448 unsigned long dirtied_before) 1449 { 1450 int moved; 1451 unsigned long time_expire_jif = dirtied_before; 1452 1453 assert_spin_locked(&wb->list_lock); 1454 list_splice_init(&wb->b_more_io, &wb->b_io); 1455 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); 1456 if (!work->for_sync) 1457 time_expire_jif = jiffies - dirtytime_expire_interval * HZ; 1458 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, 1459 time_expire_jif); 1460 if (moved) 1461 wb_io_lists_populated(wb); 1462 trace_writeback_queue_io(wb, work, dirtied_before, moved); 1463 } 1464 1465 static int write_inode(struct inode *inode, struct writeback_control *wbc) 1466 { 1467 int ret; 1468 1469 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { 1470 trace_writeback_write_inode_start(inode, wbc); 1471 ret = inode->i_sb->s_op->write_inode(inode, wbc); 1472 trace_writeback_write_inode(inode, wbc); 1473 return ret; 1474 } 1475 return 0; 1476 } 1477 1478 /* 1479 * Wait for writeback on an inode to complete. Called with i_lock held. 1480 * Caller must make sure inode cannot go away when we drop i_lock. 1481 */ 1482 static void __inode_wait_for_writeback(struct inode *inode) 1483 __releases(inode->i_lock) 1484 __acquires(inode->i_lock) 1485 { 1486 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 1487 wait_queue_head_t *wqh; 1488 1489 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 1490 while (inode->i_state & I_SYNC) { 1491 spin_unlock(&inode->i_lock); 1492 __wait_on_bit(wqh, &wq, bit_wait, 1493 TASK_UNINTERRUPTIBLE); 1494 spin_lock(&inode->i_lock); 1495 } 1496 } 1497 1498 /* 1499 * Wait for writeback on an inode to complete. Caller must have inode pinned. 1500 */ 1501 void inode_wait_for_writeback(struct inode *inode) 1502 { 1503 spin_lock(&inode->i_lock); 1504 __inode_wait_for_writeback(inode); 1505 spin_unlock(&inode->i_lock); 1506 } 1507 1508 /* 1509 * Sleep until I_SYNC is cleared. This function must be called with i_lock 1510 * held and drops it. It is aimed for callers not holding any inode reference 1511 * so once i_lock is dropped, inode can go away. 1512 */ 1513 static void inode_sleep_on_writeback(struct inode *inode) 1514 __releases(inode->i_lock) 1515 { 1516 DEFINE_WAIT(wait); 1517 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 1518 int sleep; 1519 1520 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 1521 sleep = inode->i_state & I_SYNC; 1522 spin_unlock(&inode->i_lock); 1523 if (sleep) 1524 schedule(); 1525 finish_wait(wqh, &wait); 1526 } 1527 1528 /* 1529 * Find proper writeback list for the inode depending on its current state and 1530 * possibly also change of its state while we were doing writeback. Here we 1531 * handle things such as livelock prevention or fairness of writeback among 1532 * inodes. This function can be called only by flusher thread - noone else 1533 * processes all inodes in writeback lists and requeueing inodes behind flusher 1534 * thread's back can have unexpected consequences. 1535 */ 1536 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, 1537 struct writeback_control *wbc) 1538 { 1539 if (inode->i_state & I_FREEING) 1540 return; 1541 1542 /* 1543 * Sync livelock prevention. Each inode is tagged and synced in one 1544 * shot. If still dirty, it will be redirty_tail()'ed below. Update 1545 * the dirty time to prevent enqueue and sync it again. 1546 */ 1547 if ((inode->i_state & I_DIRTY) && 1548 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 1549 inode->dirtied_when = jiffies; 1550 1551 if (wbc->pages_skipped) { 1552 /* 1553 * writeback is not making progress due to locked 1554 * buffers. Skip this inode for now. 1555 */ 1556 redirty_tail_locked(inode, wb); 1557 return; 1558 } 1559 1560 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 1561 /* 1562 * We didn't write back all the pages. nfs_writepages() 1563 * sometimes bales out without doing anything. 1564 */ 1565 if (wbc->nr_to_write <= 0) { 1566 /* Slice used up. Queue for next turn. */ 1567 requeue_io(inode, wb); 1568 } else { 1569 /* 1570 * Writeback blocked by something other than 1571 * congestion. Delay the inode for some time to 1572 * avoid spinning on the CPU (100% iowait) 1573 * retrying writeback of the dirty page/inode 1574 * that cannot be performed immediately. 1575 */ 1576 redirty_tail_locked(inode, wb); 1577 } 1578 } else if (inode->i_state & I_DIRTY) { 1579 /* 1580 * Filesystems can dirty the inode during writeback operations, 1581 * such as delayed allocation during submission or metadata 1582 * updates after data IO completion. 1583 */ 1584 redirty_tail_locked(inode, wb); 1585 } else if (inode->i_state & I_DIRTY_TIME) { 1586 inode->dirtied_when = jiffies; 1587 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); 1588 inode->i_state &= ~I_SYNC_QUEUED; 1589 } else { 1590 /* The inode is clean. Remove from writeback lists. */ 1591 inode_cgwb_move_to_attached(inode, wb); 1592 } 1593 } 1594 1595 /* 1596 * Write out an inode and its dirty pages (or some of its dirty pages, depending 1597 * on @wbc->nr_to_write), and clear the relevant dirty flags from i_state. 1598 * 1599 * This doesn't remove the inode from the writeback list it is on, except 1600 * potentially to move it from b_dirty_time to b_dirty due to timestamp 1601 * expiration. The caller is otherwise responsible for writeback list handling. 1602 * 1603 * The caller is also responsible for setting the I_SYNC flag beforehand and 1604 * calling inode_sync_complete() to clear it afterwards. 1605 */ 1606 static int 1607 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 1608 { 1609 struct address_space *mapping = inode->i_mapping; 1610 long nr_to_write = wbc->nr_to_write; 1611 unsigned dirty; 1612 int ret; 1613 1614 WARN_ON(!(inode->i_state & I_SYNC)); 1615 1616 trace_writeback_single_inode_start(inode, wbc, nr_to_write); 1617 1618 ret = do_writepages(mapping, wbc); 1619 1620 /* 1621 * Make sure to wait on the data before writing out the metadata. 1622 * This is important for filesystems that modify metadata on data 1623 * I/O completion. We don't do it for sync(2) writeback because it has a 1624 * separate, external IO completion path and ->sync_fs for guaranteeing 1625 * inode metadata is written back correctly. 1626 */ 1627 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { 1628 int err = filemap_fdatawait(mapping); 1629 if (ret == 0) 1630 ret = err; 1631 } 1632 1633 /* 1634 * If the inode has dirty timestamps and we need to write them, call 1635 * mark_inode_dirty_sync() to notify the filesystem about it and to 1636 * change I_DIRTY_TIME into I_DIRTY_SYNC. 1637 */ 1638 if ((inode->i_state & I_DIRTY_TIME) && 1639 (wbc->sync_mode == WB_SYNC_ALL || 1640 time_after(jiffies, inode->dirtied_time_when + 1641 dirtytime_expire_interval * HZ))) { 1642 trace_writeback_lazytime(inode); 1643 mark_inode_dirty_sync(inode); 1644 } 1645 1646 /* 1647 * Get and clear the dirty flags from i_state. This needs to be done 1648 * after calling writepages because some filesystems may redirty the 1649 * inode during writepages due to delalloc. It also needs to be done 1650 * after handling timestamp expiration, as that may dirty the inode too. 1651 */ 1652 spin_lock(&inode->i_lock); 1653 dirty = inode->i_state & I_DIRTY; 1654 inode->i_state &= ~dirty; 1655 1656 /* 1657 * Paired with smp_mb() in __mark_inode_dirty(). This allows 1658 * __mark_inode_dirty() to test i_state without grabbing i_lock - 1659 * either they see the I_DIRTY bits cleared or we see the dirtied 1660 * inode. 1661 * 1662 * I_DIRTY_PAGES is always cleared together above even if @mapping 1663 * still has dirty pages. The flag is reinstated after smp_mb() if 1664 * necessary. This guarantees that either __mark_inode_dirty() 1665 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. 1666 */ 1667 smp_mb(); 1668 1669 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 1670 inode->i_state |= I_DIRTY_PAGES; 1671 else if (unlikely(inode->i_state & I_PINNING_FSCACHE_WB)) { 1672 if (!(inode->i_state & I_DIRTY_PAGES)) { 1673 inode->i_state &= ~I_PINNING_FSCACHE_WB; 1674 wbc->unpinned_fscache_wb = true; 1675 dirty |= I_PINNING_FSCACHE_WB; /* Cause write_inode */ 1676 } 1677 } 1678 1679 spin_unlock(&inode->i_lock); 1680 1681 /* Don't write the inode if only I_DIRTY_PAGES was set */ 1682 if (dirty & ~I_DIRTY_PAGES) { 1683 int err = write_inode(inode, wbc); 1684 if (ret == 0) 1685 ret = err; 1686 } 1687 wbc->unpinned_fscache_wb = false; 1688 trace_writeback_single_inode(inode, wbc, nr_to_write); 1689 return ret; 1690 } 1691 1692 /* 1693 * Write out an inode's dirty data and metadata on-demand, i.e. separately from 1694 * the regular batched writeback done by the flusher threads in 1695 * writeback_sb_inodes(). @wbc controls various aspects of the write, such as 1696 * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE). 1697 * 1698 * To prevent the inode from going away, either the caller must have a reference 1699 * to the inode, or the inode must have I_WILL_FREE or I_FREEING set. 1700 */ 1701 static int writeback_single_inode(struct inode *inode, 1702 struct writeback_control *wbc) 1703 { 1704 struct bdi_writeback *wb; 1705 int ret = 0; 1706 1707 spin_lock(&inode->i_lock); 1708 if (!atomic_read(&inode->i_count)) 1709 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 1710 else 1711 WARN_ON(inode->i_state & I_WILL_FREE); 1712 1713 if (inode->i_state & I_SYNC) { 1714 /* 1715 * Writeback is already running on the inode. For WB_SYNC_NONE, 1716 * that's enough and we can just return. For WB_SYNC_ALL, we 1717 * must wait for the existing writeback to complete, then do 1718 * writeback again if there's anything left. 1719 */ 1720 if (wbc->sync_mode != WB_SYNC_ALL) 1721 goto out; 1722 __inode_wait_for_writeback(inode); 1723 } 1724 WARN_ON(inode->i_state & I_SYNC); 1725 /* 1726 * If the inode is already fully clean, then there's nothing to do. 1727 * 1728 * For data-integrity syncs we also need to check whether any pages are 1729 * still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If 1730 * there are any such pages, we'll need to wait for them. 1731 */ 1732 if (!(inode->i_state & I_DIRTY_ALL) && 1733 (wbc->sync_mode != WB_SYNC_ALL || 1734 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 1735 goto out; 1736 inode->i_state |= I_SYNC; 1737 wbc_attach_and_unlock_inode(wbc, inode); 1738 1739 ret = __writeback_single_inode(inode, wbc); 1740 1741 wbc_detach_inode(wbc); 1742 1743 wb = inode_to_wb_and_lock_list(inode); 1744 spin_lock(&inode->i_lock); 1745 /* 1746 * If the inode is now fully clean, then it can be safely removed from 1747 * its writeback list (if any). Otherwise the flusher threads are 1748 * responsible for the writeback lists. 1749 */ 1750 if (!(inode->i_state & I_DIRTY_ALL)) 1751 inode_cgwb_move_to_attached(inode, wb); 1752 spin_unlock(&wb->list_lock); 1753 inode_sync_complete(inode); 1754 out: 1755 spin_unlock(&inode->i_lock); 1756 return ret; 1757 } 1758 1759 static long writeback_chunk_size(struct bdi_writeback *wb, 1760 struct wb_writeback_work *work) 1761 { 1762 long pages; 1763 1764 /* 1765 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 1766 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 1767 * here avoids calling into writeback_inodes_wb() more than once. 1768 * 1769 * The intended call sequence for WB_SYNC_ALL writeback is: 1770 * 1771 * wb_writeback() 1772 * writeback_sb_inodes() <== called only once 1773 * write_cache_pages() <== called once for each inode 1774 * (quickly) tag currently dirty pages 1775 * (maybe slowly) sync all tagged pages 1776 */ 1777 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 1778 pages = LONG_MAX; 1779 else { 1780 pages = min(wb->avg_write_bandwidth / 2, 1781 global_wb_domain.dirty_limit / DIRTY_SCOPE); 1782 pages = min(pages, work->nr_pages); 1783 pages = round_down(pages + MIN_WRITEBACK_PAGES, 1784 MIN_WRITEBACK_PAGES); 1785 } 1786 1787 return pages; 1788 } 1789 1790 /* 1791 * Write a portion of b_io inodes which belong to @sb. 1792 * 1793 * Return the number of pages and/or inodes written. 1794 * 1795 * NOTE! This is called with wb->list_lock held, and will 1796 * unlock and relock that for each inode it ends up doing 1797 * IO for. 1798 */ 1799 static long writeback_sb_inodes(struct super_block *sb, 1800 struct bdi_writeback *wb, 1801 struct wb_writeback_work *work) 1802 { 1803 struct writeback_control wbc = { 1804 .sync_mode = work->sync_mode, 1805 .tagged_writepages = work->tagged_writepages, 1806 .for_kupdate = work->for_kupdate, 1807 .for_background = work->for_background, 1808 .for_sync = work->for_sync, 1809 .range_cyclic = work->range_cyclic, 1810 .range_start = 0, 1811 .range_end = LLONG_MAX, 1812 }; 1813 unsigned long start_time = jiffies; 1814 long write_chunk; 1815 long wrote = 0; /* count both pages and inodes */ 1816 1817 while (!list_empty(&wb->b_io)) { 1818 struct inode *inode = wb_inode(wb->b_io.prev); 1819 struct bdi_writeback *tmp_wb; 1820 1821 if (inode->i_sb != sb) { 1822 if (work->sb) { 1823 /* 1824 * We only want to write back data for this 1825 * superblock, move all inodes not belonging 1826 * to it back onto the dirty list. 1827 */ 1828 redirty_tail(inode, wb); 1829 continue; 1830 } 1831 1832 /* 1833 * The inode belongs to a different superblock. 1834 * Bounce back to the caller to unpin this and 1835 * pin the next superblock. 1836 */ 1837 break; 1838 } 1839 1840 /* 1841 * Don't bother with new inodes or inodes being freed, first 1842 * kind does not need periodic writeout yet, and for the latter 1843 * kind writeout is handled by the freer. 1844 */ 1845 spin_lock(&inode->i_lock); 1846 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 1847 redirty_tail_locked(inode, wb); 1848 spin_unlock(&inode->i_lock); 1849 continue; 1850 } 1851 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 1852 /* 1853 * If this inode is locked for writeback and we are not 1854 * doing writeback-for-data-integrity, move it to 1855 * b_more_io so that writeback can proceed with the 1856 * other inodes on s_io. 1857 * 1858 * We'll have another go at writing back this inode 1859 * when we completed a full scan of b_io. 1860 */ 1861 spin_unlock(&inode->i_lock); 1862 requeue_io(inode, wb); 1863 trace_writeback_sb_inodes_requeue(inode); 1864 continue; 1865 } 1866 spin_unlock(&wb->list_lock); 1867 1868 /* 1869 * We already requeued the inode if it had I_SYNC set and we 1870 * are doing WB_SYNC_NONE writeback. So this catches only the 1871 * WB_SYNC_ALL case. 1872 */ 1873 if (inode->i_state & I_SYNC) { 1874 /* Wait for I_SYNC. This function drops i_lock... */ 1875 inode_sleep_on_writeback(inode); 1876 /* Inode may be gone, start again */ 1877 spin_lock(&wb->list_lock); 1878 continue; 1879 } 1880 inode->i_state |= I_SYNC; 1881 wbc_attach_and_unlock_inode(&wbc, inode); 1882 1883 write_chunk = writeback_chunk_size(wb, work); 1884 wbc.nr_to_write = write_chunk; 1885 wbc.pages_skipped = 0; 1886 1887 /* 1888 * We use I_SYNC to pin the inode in memory. While it is set 1889 * evict_inode() will wait so the inode cannot be freed. 1890 */ 1891 __writeback_single_inode(inode, &wbc); 1892 1893 wbc_detach_inode(&wbc); 1894 work->nr_pages -= write_chunk - wbc.nr_to_write; 1895 wrote += write_chunk - wbc.nr_to_write; 1896 1897 if (need_resched()) { 1898 /* 1899 * We're trying to balance between building up a nice 1900 * long list of IOs to improve our merge rate, and 1901 * getting those IOs out quickly for anyone throttling 1902 * in balance_dirty_pages(). cond_resched() doesn't 1903 * unplug, so get our IOs out the door before we 1904 * give up the CPU. 1905 */ 1906 if (current->plug) 1907 blk_flush_plug(current->plug, false); 1908 cond_resched(); 1909 } 1910 1911 /* 1912 * Requeue @inode if still dirty. Be careful as @inode may 1913 * have been switched to another wb in the meantime. 1914 */ 1915 tmp_wb = inode_to_wb_and_lock_list(inode); 1916 spin_lock(&inode->i_lock); 1917 if (!(inode->i_state & I_DIRTY_ALL)) 1918 wrote++; 1919 requeue_inode(inode, tmp_wb, &wbc); 1920 inode_sync_complete(inode); 1921 spin_unlock(&inode->i_lock); 1922 1923 if (unlikely(tmp_wb != wb)) { 1924 spin_unlock(&tmp_wb->list_lock); 1925 spin_lock(&wb->list_lock); 1926 } 1927 1928 /* 1929 * bail out to wb_writeback() often enough to check 1930 * background threshold and other termination conditions. 1931 */ 1932 if (wrote) { 1933 if (time_is_before_jiffies(start_time + HZ / 10UL)) 1934 break; 1935 if (work->nr_pages <= 0) 1936 break; 1937 } 1938 } 1939 return wrote; 1940 } 1941 1942 static long __writeback_inodes_wb(struct bdi_writeback *wb, 1943 struct wb_writeback_work *work) 1944 { 1945 unsigned long start_time = jiffies; 1946 long wrote = 0; 1947 1948 while (!list_empty(&wb->b_io)) { 1949 struct inode *inode = wb_inode(wb->b_io.prev); 1950 struct super_block *sb = inode->i_sb; 1951 1952 if (!trylock_super(sb)) { 1953 /* 1954 * trylock_super() may fail consistently due to 1955 * s_umount being grabbed by someone else. Don't use 1956 * requeue_io() to avoid busy retrying the inode/sb. 1957 */ 1958 redirty_tail(inode, wb); 1959 continue; 1960 } 1961 wrote += writeback_sb_inodes(sb, wb, work); 1962 up_read(&sb->s_umount); 1963 1964 /* refer to the same tests at the end of writeback_sb_inodes */ 1965 if (wrote) { 1966 if (time_is_before_jiffies(start_time + HZ / 10UL)) 1967 break; 1968 if (work->nr_pages <= 0) 1969 break; 1970 } 1971 } 1972 /* Leave any unwritten inodes on b_io */ 1973 return wrote; 1974 } 1975 1976 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 1977 enum wb_reason reason) 1978 { 1979 struct wb_writeback_work work = { 1980 .nr_pages = nr_pages, 1981 .sync_mode = WB_SYNC_NONE, 1982 .range_cyclic = 1, 1983 .reason = reason, 1984 }; 1985 struct blk_plug plug; 1986 1987 blk_start_plug(&plug); 1988 spin_lock(&wb->list_lock); 1989 if (list_empty(&wb->b_io)) 1990 queue_io(wb, &work, jiffies); 1991 __writeback_inodes_wb(wb, &work); 1992 spin_unlock(&wb->list_lock); 1993 blk_finish_plug(&plug); 1994 1995 return nr_pages - work.nr_pages; 1996 } 1997 1998 /* 1999 * Explicit flushing or periodic writeback of "old" data. 2000 * 2001 * Define "old": the first time one of an inode's pages is dirtied, we mark the 2002 * dirtying-time in the inode's address_space. So this periodic writeback code 2003 * just walks the superblock inode list, writing back any inodes which are 2004 * older than a specific point in time. 2005 * 2006 * Try to run once per dirty_writeback_interval. But if a writeback event 2007 * takes longer than a dirty_writeback_interval interval, then leave a 2008 * one-second gap. 2009 * 2010 * dirtied_before takes precedence over nr_to_write. So we'll only write back 2011 * all dirty pages if they are all attached to "old" mappings. 2012 */ 2013 static long wb_writeback(struct bdi_writeback *wb, 2014 struct wb_writeback_work *work) 2015 { 2016 long nr_pages = work->nr_pages; 2017 unsigned long dirtied_before = jiffies; 2018 struct inode *inode; 2019 long progress; 2020 struct blk_plug plug; 2021 2022 blk_start_plug(&plug); 2023 spin_lock(&wb->list_lock); 2024 for (;;) { 2025 /* 2026 * Stop writeback when nr_pages has been consumed 2027 */ 2028 if (work->nr_pages <= 0) 2029 break; 2030 2031 /* 2032 * Background writeout and kupdate-style writeback may 2033 * run forever. Stop them if there is other work to do 2034 * so that e.g. sync can proceed. They'll be restarted 2035 * after the other works are all done. 2036 */ 2037 if ((work->for_background || work->for_kupdate) && 2038 !list_empty(&wb->work_list)) 2039 break; 2040 2041 /* 2042 * For background writeout, stop when we are below the 2043 * background dirty threshold 2044 */ 2045 if (work->for_background && !wb_over_bg_thresh(wb)) 2046 break; 2047 2048 /* 2049 * Kupdate and background works are special and we want to 2050 * include all inodes that need writing. Livelock avoidance is 2051 * handled by these works yielding to any other work so we are 2052 * safe. 2053 */ 2054 if (work->for_kupdate) { 2055 dirtied_before = jiffies - 2056 msecs_to_jiffies(dirty_expire_interval * 10); 2057 } else if (work->for_background) 2058 dirtied_before = jiffies; 2059 2060 trace_writeback_start(wb, work); 2061 if (list_empty(&wb->b_io)) 2062 queue_io(wb, work, dirtied_before); 2063 if (work->sb) 2064 progress = writeback_sb_inodes(work->sb, wb, work); 2065 else 2066 progress = __writeback_inodes_wb(wb, work); 2067 trace_writeback_written(wb, work); 2068 2069 /* 2070 * Did we write something? Try for more 2071 * 2072 * Dirty inodes are moved to b_io for writeback in batches. 2073 * The completion of the current batch does not necessarily 2074 * mean the overall work is done. So we keep looping as long 2075 * as made some progress on cleaning pages or inodes. 2076 */ 2077 if (progress) 2078 continue; 2079 /* 2080 * No more inodes for IO, bail 2081 */ 2082 if (list_empty(&wb->b_more_io)) 2083 break; 2084 /* 2085 * Nothing written. Wait for some inode to 2086 * become available for writeback. Otherwise 2087 * we'll just busyloop. 2088 */ 2089 trace_writeback_wait(wb, work); 2090 inode = wb_inode(wb->b_more_io.prev); 2091 spin_lock(&inode->i_lock); 2092 spin_unlock(&wb->list_lock); 2093 /* This function drops i_lock... */ 2094 inode_sleep_on_writeback(inode); 2095 spin_lock(&wb->list_lock); 2096 } 2097 spin_unlock(&wb->list_lock); 2098 blk_finish_plug(&plug); 2099 2100 return nr_pages - work->nr_pages; 2101 } 2102 2103 /* 2104 * Return the next wb_writeback_work struct that hasn't been processed yet. 2105 */ 2106 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) 2107 { 2108 struct wb_writeback_work *work = NULL; 2109 2110 spin_lock_bh(&wb->work_lock); 2111 if (!list_empty(&wb->work_list)) { 2112 work = list_entry(wb->work_list.next, 2113 struct wb_writeback_work, list); 2114 list_del_init(&work->list); 2115 } 2116 spin_unlock_bh(&wb->work_lock); 2117 return work; 2118 } 2119 2120 static long wb_check_background_flush(struct bdi_writeback *wb) 2121 { 2122 if (wb_over_bg_thresh(wb)) { 2123 2124 struct wb_writeback_work work = { 2125 .nr_pages = LONG_MAX, 2126 .sync_mode = WB_SYNC_NONE, 2127 .for_background = 1, 2128 .range_cyclic = 1, 2129 .reason = WB_REASON_BACKGROUND, 2130 }; 2131 2132 return wb_writeback(wb, &work); 2133 } 2134 2135 return 0; 2136 } 2137 2138 static long wb_check_old_data_flush(struct bdi_writeback *wb) 2139 { 2140 unsigned long expired; 2141 long nr_pages; 2142 2143 /* 2144 * When set to zero, disable periodic writeback 2145 */ 2146 if (!dirty_writeback_interval) 2147 return 0; 2148 2149 expired = wb->last_old_flush + 2150 msecs_to_jiffies(dirty_writeback_interval * 10); 2151 if (time_before(jiffies, expired)) 2152 return 0; 2153 2154 wb->last_old_flush = jiffies; 2155 nr_pages = get_nr_dirty_pages(); 2156 2157 if (nr_pages) { 2158 struct wb_writeback_work work = { 2159 .nr_pages = nr_pages, 2160 .sync_mode = WB_SYNC_NONE, 2161 .for_kupdate = 1, 2162 .range_cyclic = 1, 2163 .reason = WB_REASON_PERIODIC, 2164 }; 2165 2166 return wb_writeback(wb, &work); 2167 } 2168 2169 return 0; 2170 } 2171 2172 static long wb_check_start_all(struct bdi_writeback *wb) 2173 { 2174 long nr_pages; 2175 2176 if (!test_bit(WB_start_all, &wb->state)) 2177 return 0; 2178 2179 nr_pages = get_nr_dirty_pages(); 2180 if (nr_pages) { 2181 struct wb_writeback_work work = { 2182 .nr_pages = wb_split_bdi_pages(wb, nr_pages), 2183 .sync_mode = WB_SYNC_NONE, 2184 .range_cyclic = 1, 2185 .reason = wb->start_all_reason, 2186 }; 2187 2188 nr_pages = wb_writeback(wb, &work); 2189 } 2190 2191 clear_bit(WB_start_all, &wb->state); 2192 return nr_pages; 2193 } 2194 2195 2196 /* 2197 * Retrieve work items and do the writeback they describe 2198 */ 2199 static long wb_do_writeback(struct bdi_writeback *wb) 2200 { 2201 struct wb_writeback_work *work; 2202 long wrote = 0; 2203 2204 set_bit(WB_writeback_running, &wb->state); 2205 while ((work = get_next_work_item(wb)) != NULL) { 2206 trace_writeback_exec(wb, work); 2207 wrote += wb_writeback(wb, work); 2208 finish_writeback_work(wb, work); 2209 } 2210 2211 /* 2212 * Check for a flush-everything request 2213 */ 2214 wrote += wb_check_start_all(wb); 2215 2216 /* 2217 * Check for periodic writeback, kupdated() style 2218 */ 2219 wrote += wb_check_old_data_flush(wb); 2220 wrote += wb_check_background_flush(wb); 2221 clear_bit(WB_writeback_running, &wb->state); 2222 2223 return wrote; 2224 } 2225 2226 /* 2227 * Handle writeback of dirty data for the device backed by this bdi. Also 2228 * reschedules periodically and does kupdated style flushing. 2229 */ 2230 void wb_workfn(struct work_struct *work) 2231 { 2232 struct bdi_writeback *wb = container_of(to_delayed_work(work), 2233 struct bdi_writeback, dwork); 2234 long pages_written; 2235 2236 set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); 2237 current->flags |= PF_SWAPWRITE; 2238 2239 if (likely(!current_is_workqueue_rescuer() || 2240 !test_bit(WB_registered, &wb->state))) { 2241 /* 2242 * The normal path. Keep writing back @wb until its 2243 * work_list is empty. Note that this path is also taken 2244 * if @wb is shutting down even when we're running off the 2245 * rescuer as work_list needs to be drained. 2246 */ 2247 do { 2248 pages_written = wb_do_writeback(wb); 2249 trace_writeback_pages_written(pages_written); 2250 } while (!list_empty(&wb->work_list)); 2251 } else { 2252 /* 2253 * bdi_wq can't get enough workers and we're running off 2254 * the emergency worker. Don't hog it. Hopefully, 1024 is 2255 * enough for efficient IO. 2256 */ 2257 pages_written = writeback_inodes_wb(wb, 1024, 2258 WB_REASON_FORKER_THREAD); 2259 trace_writeback_pages_written(pages_written); 2260 } 2261 2262 if (!list_empty(&wb->work_list)) 2263 wb_wakeup(wb); 2264 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) 2265 wb_wakeup_delayed(wb); 2266 2267 current->flags &= ~PF_SWAPWRITE; 2268 } 2269 2270 /* 2271 * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero, 2272 * write back the whole world. 2273 */ 2274 static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, 2275 enum wb_reason reason) 2276 { 2277 struct bdi_writeback *wb; 2278 2279 if (!bdi_has_dirty_io(bdi)) 2280 return; 2281 2282 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) 2283 wb_start_writeback(wb, reason); 2284 } 2285 2286 void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, 2287 enum wb_reason reason) 2288 { 2289 rcu_read_lock(); 2290 __wakeup_flusher_threads_bdi(bdi, reason); 2291 rcu_read_unlock(); 2292 } 2293 2294 /* 2295 * Wakeup the flusher threads to start writeback of all currently dirty pages 2296 */ 2297 void wakeup_flusher_threads(enum wb_reason reason) 2298 { 2299 struct backing_dev_info *bdi; 2300 2301 /* 2302 * If we are expecting writeback progress we must submit plugged IO. 2303 */ 2304 if (blk_needs_flush_plug(current)) 2305 blk_flush_plug(current->plug, true); 2306 2307 rcu_read_lock(); 2308 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) 2309 __wakeup_flusher_threads_bdi(bdi, reason); 2310 rcu_read_unlock(); 2311 } 2312 2313 /* 2314 * Wake up bdi's periodically to make sure dirtytime inodes gets 2315 * written back periodically. We deliberately do *not* check the 2316 * b_dirtytime list in wb_has_dirty_io(), since this would cause the 2317 * kernel to be constantly waking up once there are any dirtytime 2318 * inodes on the system. So instead we define a separate delayed work 2319 * function which gets called much more rarely. (By default, only 2320 * once every 12 hours.) 2321 * 2322 * If there is any other write activity going on in the file system, 2323 * this function won't be necessary. But if the only thing that has 2324 * happened on the file system is a dirtytime inode caused by an atime 2325 * update, we need this infrastructure below to make sure that inode 2326 * eventually gets pushed out to disk. 2327 */ 2328 static void wakeup_dirtytime_writeback(struct work_struct *w); 2329 static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); 2330 2331 static void wakeup_dirtytime_writeback(struct work_struct *w) 2332 { 2333 struct backing_dev_info *bdi; 2334 2335 rcu_read_lock(); 2336 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 2337 struct bdi_writeback *wb; 2338 2339 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) 2340 if (!list_empty(&wb->b_dirty_time)) 2341 wb_wakeup(wb); 2342 } 2343 rcu_read_unlock(); 2344 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 2345 } 2346 2347 static int __init start_dirtytime_writeback(void) 2348 { 2349 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 2350 return 0; 2351 } 2352 __initcall(start_dirtytime_writeback); 2353 2354 int dirtytime_interval_handler(struct ctl_table *table, int write, 2355 void *buffer, size_t *lenp, loff_t *ppos) 2356 { 2357 int ret; 2358 2359 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 2360 if (ret == 0 && write) 2361 mod_delayed_work(system_wq, &dirtytime_work, 0); 2362 return ret; 2363 } 2364 2365 /** 2366 * __mark_inode_dirty - internal function to mark an inode dirty 2367 * 2368 * @inode: inode to mark 2369 * @flags: what kind of dirty, e.g. I_DIRTY_SYNC. This can be a combination of 2370 * multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined 2371 * with I_DIRTY_PAGES. 2372 * 2373 * Mark an inode as dirty. We notify the filesystem, then update the inode's 2374 * dirty flags. Then, if needed we add the inode to the appropriate dirty list. 2375 * 2376 * Most callers should use mark_inode_dirty() or mark_inode_dirty_sync() 2377 * instead of calling this directly. 2378 * 2379 * CAREFUL! We only add the inode to the dirty list if it is hashed or if it 2380 * refers to a blockdev. Unhashed inodes will never be added to the dirty list 2381 * even if they are later hashed, as they will have been marked dirty already. 2382 * 2383 * In short, ensure you hash any inodes _before_ you start marking them dirty. 2384 * 2385 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 2386 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 2387 * the kernel-internal blockdev inode represents the dirtying time of the 2388 * blockdev's pages. This is why for I_DIRTY_PAGES we always use 2389 * page->mapping->host, so the page-dirtying time is recorded in the internal 2390 * blockdev inode. 2391 */ 2392 void __mark_inode_dirty(struct inode *inode, int flags) 2393 { 2394 struct super_block *sb = inode->i_sb; 2395 int dirtytime = 0; 2396 2397 trace_writeback_mark_inode_dirty(inode, flags); 2398 2399 if (flags & I_DIRTY_INODE) { 2400 /* 2401 * Notify the filesystem about the inode being dirtied, so that 2402 * (if needed) it can update on-disk fields and journal the 2403 * inode. This is only needed when the inode itself is being 2404 * dirtied now. I.e. it's only needed for I_DIRTY_INODE, not 2405 * for just I_DIRTY_PAGES or I_DIRTY_TIME. 2406 */ 2407 trace_writeback_dirty_inode_start(inode, flags); 2408 if (sb->s_op->dirty_inode) 2409 sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE); 2410 trace_writeback_dirty_inode(inode, flags); 2411 2412 /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */ 2413 flags &= ~I_DIRTY_TIME; 2414 } else { 2415 /* 2416 * Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing. 2417 * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME 2418 * in one call to __mark_inode_dirty().) 2419 */ 2420 dirtytime = flags & I_DIRTY_TIME; 2421 WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME); 2422 } 2423 2424 /* 2425 * Paired with smp_mb() in __writeback_single_inode() for the 2426 * following lockless i_state test. See there for details. 2427 */ 2428 smp_mb(); 2429 2430 if (((inode->i_state & flags) == flags) || 2431 (dirtytime && (inode->i_state & I_DIRTY_INODE))) 2432 return; 2433 2434 spin_lock(&inode->i_lock); 2435 if (dirtytime && (inode->i_state & I_DIRTY_INODE)) 2436 goto out_unlock_inode; 2437 if ((inode->i_state & flags) != flags) { 2438 const int was_dirty = inode->i_state & I_DIRTY; 2439 2440 inode_attach_wb(inode, NULL); 2441 2442 /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */ 2443 if (flags & I_DIRTY_INODE) 2444 inode->i_state &= ~I_DIRTY_TIME; 2445 inode->i_state |= flags; 2446 2447 /* 2448 * If the inode is queued for writeback by flush worker, just 2449 * update its dirty state. Once the flush worker is done with 2450 * the inode it will place it on the appropriate superblock 2451 * list, based upon its state. 2452 */ 2453 if (inode->i_state & I_SYNC_QUEUED) 2454 goto out_unlock_inode; 2455 2456 /* 2457 * Only add valid (hashed) inodes to the superblock's 2458 * dirty list. Add blockdev inodes as well. 2459 */ 2460 if (!S_ISBLK(inode->i_mode)) { 2461 if (inode_unhashed(inode)) 2462 goto out_unlock_inode; 2463 } 2464 if (inode->i_state & I_FREEING) 2465 goto out_unlock_inode; 2466 2467 /* 2468 * If the inode was already on b_dirty/b_io/b_more_io, don't 2469 * reposition it (that would break b_dirty time-ordering). 2470 */ 2471 if (!was_dirty) { 2472 struct bdi_writeback *wb; 2473 struct list_head *dirty_list; 2474 bool wakeup_bdi = false; 2475 2476 wb = locked_inode_to_wb_and_lock_list(inode); 2477 2478 inode->dirtied_when = jiffies; 2479 if (dirtytime) 2480 inode->dirtied_time_when = jiffies; 2481 2482 if (inode->i_state & I_DIRTY) 2483 dirty_list = &wb->b_dirty; 2484 else 2485 dirty_list = &wb->b_dirty_time; 2486 2487 wakeup_bdi = inode_io_list_move_locked(inode, wb, 2488 dirty_list); 2489 2490 spin_unlock(&wb->list_lock); 2491 trace_writeback_dirty_inode_enqueue(inode); 2492 2493 /* 2494 * If this is the first dirty inode for this bdi, 2495 * we have to wake-up the corresponding bdi thread 2496 * to make sure background write-back happens 2497 * later. 2498 */ 2499 if (wakeup_bdi && 2500 (wb->bdi->capabilities & BDI_CAP_WRITEBACK)) 2501 wb_wakeup_delayed(wb); 2502 return; 2503 } 2504 } 2505 out_unlock_inode: 2506 spin_unlock(&inode->i_lock); 2507 } 2508 EXPORT_SYMBOL(__mark_inode_dirty); 2509 2510 /* 2511 * The @s_sync_lock is used to serialise concurrent sync operations 2512 * to avoid lock contention problems with concurrent wait_sb_inodes() calls. 2513 * Concurrent callers will block on the s_sync_lock rather than doing contending 2514 * walks. The queueing maintains sync(2) required behaviour as all the IO that 2515 * has been issued up to the time this function is enter is guaranteed to be 2516 * completed by the time we have gained the lock and waited for all IO that is 2517 * in progress regardless of the order callers are granted the lock. 2518 */ 2519 static void wait_sb_inodes(struct super_block *sb) 2520 { 2521 LIST_HEAD(sync_list); 2522 2523 /* 2524 * We need to be protected against the filesystem going from 2525 * r/o to r/w or vice versa. 2526 */ 2527 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2528 2529 mutex_lock(&sb->s_sync_lock); 2530 2531 /* 2532 * Splice the writeback list onto a temporary list to avoid waiting on 2533 * inodes that have started writeback after this point. 2534 * 2535 * Use rcu_read_lock() to keep the inodes around until we have a 2536 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as 2537 * the local list because inodes can be dropped from either by writeback 2538 * completion. 2539 */ 2540 rcu_read_lock(); 2541 spin_lock_irq(&sb->s_inode_wblist_lock); 2542 list_splice_init(&sb->s_inodes_wb, &sync_list); 2543 2544 /* 2545 * Data integrity sync. Must wait for all pages under writeback, because 2546 * there may have been pages dirtied before our sync call, but which had 2547 * writeout started before we write it out. In which case, the inode 2548 * may not be on the dirty list, but we still have to wait for that 2549 * writeout. 2550 */ 2551 while (!list_empty(&sync_list)) { 2552 struct inode *inode = list_first_entry(&sync_list, struct inode, 2553 i_wb_list); 2554 struct address_space *mapping = inode->i_mapping; 2555 2556 /* 2557 * Move each inode back to the wb list before we drop the lock 2558 * to preserve consistency between i_wb_list and the mapping 2559 * writeback tag. Writeback completion is responsible to remove 2560 * the inode from either list once the writeback tag is cleared. 2561 */ 2562 list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb); 2563 2564 /* 2565 * The mapping can appear untagged while still on-list since we 2566 * do not have the mapping lock. Skip it here, wb completion 2567 * will remove it. 2568 */ 2569 if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 2570 continue; 2571 2572 spin_unlock_irq(&sb->s_inode_wblist_lock); 2573 2574 spin_lock(&inode->i_lock); 2575 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { 2576 spin_unlock(&inode->i_lock); 2577 2578 spin_lock_irq(&sb->s_inode_wblist_lock); 2579 continue; 2580 } 2581 __iget(inode); 2582 spin_unlock(&inode->i_lock); 2583 rcu_read_unlock(); 2584 2585 /* 2586 * We keep the error status of individual mapping so that 2587 * applications can catch the writeback error using fsync(2). 2588 * See filemap_fdatawait_keep_errors() for details. 2589 */ 2590 filemap_fdatawait_keep_errors(mapping); 2591 2592 cond_resched(); 2593 2594 iput(inode); 2595 2596 rcu_read_lock(); 2597 spin_lock_irq(&sb->s_inode_wblist_lock); 2598 } 2599 spin_unlock_irq(&sb->s_inode_wblist_lock); 2600 rcu_read_unlock(); 2601 mutex_unlock(&sb->s_sync_lock); 2602 } 2603 2604 static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr, 2605 enum wb_reason reason, bool skip_if_busy) 2606 { 2607 struct backing_dev_info *bdi = sb->s_bdi; 2608 DEFINE_WB_COMPLETION(done, bdi); 2609 struct wb_writeback_work work = { 2610 .sb = sb, 2611 .sync_mode = WB_SYNC_NONE, 2612 .tagged_writepages = 1, 2613 .done = &done, 2614 .nr_pages = nr, 2615 .reason = reason, 2616 }; 2617 2618 if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) 2619 return; 2620 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2621 2622 bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy); 2623 wb_wait_for_completion(&done); 2624 } 2625 2626 /** 2627 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 2628 * @sb: the superblock 2629 * @nr: the number of pages to write 2630 * @reason: reason why some writeback work initiated 2631 * 2632 * Start writeback on some inodes on this super_block. No guarantees are made 2633 * on how many (if any) will be written, and this function does not wait 2634 * for IO completion of submitted IO. 2635 */ 2636 void writeback_inodes_sb_nr(struct super_block *sb, 2637 unsigned long nr, 2638 enum wb_reason reason) 2639 { 2640 __writeback_inodes_sb_nr(sb, nr, reason, false); 2641 } 2642 EXPORT_SYMBOL(writeback_inodes_sb_nr); 2643 2644 /** 2645 * writeback_inodes_sb - writeback dirty inodes from given super_block 2646 * @sb: the superblock 2647 * @reason: reason why some writeback work was initiated 2648 * 2649 * Start writeback on some inodes on this super_block. No guarantees are made 2650 * on how many (if any) will be written, and this function does not wait 2651 * for IO completion of submitted IO. 2652 */ 2653 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 2654 { 2655 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 2656 } 2657 EXPORT_SYMBOL(writeback_inodes_sb); 2658 2659 /** 2660 * try_to_writeback_inodes_sb - try to start writeback if none underway 2661 * @sb: the superblock 2662 * @reason: reason why some writeback work was initiated 2663 * 2664 * Invoke __writeback_inodes_sb_nr if no writeback is currently underway. 2665 */ 2666 void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 2667 { 2668 if (!down_read_trylock(&sb->s_umount)) 2669 return; 2670 2671 __writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true); 2672 up_read(&sb->s_umount); 2673 } 2674 EXPORT_SYMBOL(try_to_writeback_inodes_sb); 2675 2676 /** 2677 * sync_inodes_sb - sync sb inode pages 2678 * @sb: the superblock 2679 * 2680 * This function writes and waits on any dirty inode belonging to this 2681 * super_block. 2682 */ 2683 void sync_inodes_sb(struct super_block *sb) 2684 { 2685 struct backing_dev_info *bdi = sb->s_bdi; 2686 DEFINE_WB_COMPLETION(done, bdi); 2687 struct wb_writeback_work work = { 2688 .sb = sb, 2689 .sync_mode = WB_SYNC_ALL, 2690 .nr_pages = LONG_MAX, 2691 .range_cyclic = 0, 2692 .done = &done, 2693 .reason = WB_REASON_SYNC, 2694 .for_sync = 1, 2695 }; 2696 2697 /* 2698 * Can't skip on !bdi_has_dirty() because we should wait for !dirty 2699 * inodes under writeback and I_DIRTY_TIME inodes ignored by 2700 * bdi_has_dirty() need to be written out too. 2701 */ 2702 if (bdi == &noop_backing_dev_info) 2703 return; 2704 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2705 2706 /* protect against inode wb switch, see inode_switch_wbs_work_fn() */ 2707 bdi_down_write_wb_switch_rwsem(bdi); 2708 bdi_split_work_to_wbs(bdi, &work, false); 2709 wb_wait_for_completion(&done); 2710 bdi_up_write_wb_switch_rwsem(bdi); 2711 2712 wait_sb_inodes(sb); 2713 } 2714 EXPORT_SYMBOL(sync_inodes_sb); 2715 2716 /** 2717 * write_inode_now - write an inode to disk 2718 * @inode: inode to write to disk 2719 * @sync: whether the write should be synchronous or not 2720 * 2721 * This function commits an inode to disk immediately if it is dirty. This is 2722 * primarily needed by knfsd. 2723 * 2724 * The caller must either have a ref on the inode or must have set I_WILL_FREE. 2725 */ 2726 int write_inode_now(struct inode *inode, int sync) 2727 { 2728 struct writeback_control wbc = { 2729 .nr_to_write = LONG_MAX, 2730 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 2731 .range_start = 0, 2732 .range_end = LLONG_MAX, 2733 }; 2734 2735 if (!mapping_can_writeback(inode->i_mapping)) 2736 wbc.nr_to_write = 0; 2737 2738 might_sleep(); 2739 return writeback_single_inode(inode, &wbc); 2740 } 2741 EXPORT_SYMBOL(write_inode_now); 2742 2743 /** 2744 * sync_inode_metadata - write an inode to disk 2745 * @inode: the inode to sync 2746 * @wait: wait for I/O to complete. 2747 * 2748 * Write an inode to disk and adjust its dirty state after completion. 2749 * 2750 * Note: only writes the actual inode, no associated data or other metadata. 2751 */ 2752 int sync_inode_metadata(struct inode *inode, int wait) 2753 { 2754 struct writeback_control wbc = { 2755 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 2756 .nr_to_write = 0, /* metadata-only */ 2757 }; 2758 2759 return writeback_single_inode(inode, &wbc); 2760 } 2761 EXPORT_SYMBOL(sync_inode_metadata); 2762