1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/blkdev.h> 4 #include <linux/wait.h> 5 #include <linux/rbtree.h> 6 #include <linux/kthread.h> 7 #include <linux/backing-dev.h> 8 #include <linux/blk-cgroup.h> 9 #include <linux/freezer.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/mm.h> 13 #include <linux/sched/mm.h> 14 #include <linux/sched.h> 15 #include <linux/module.h> 16 #include <linux/writeback.h> 17 #include <linux/device.h> 18 #include <trace/events/writeback.h> 19 20 struct backing_dev_info noop_backing_dev_info; 21 EXPORT_SYMBOL_GPL(noop_backing_dev_info); 22 23 static struct class *bdi_class; 24 static const char *bdi_unknown_name = "(unknown)"; 25 26 /* 27 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU 28 * reader side locking. 29 */ 30 DEFINE_SPINLOCK(bdi_lock); 31 static u64 bdi_id_cursor; 32 static struct rb_root bdi_tree = RB_ROOT; 33 LIST_HEAD(bdi_list); 34 35 /* bdi_wq serves all asynchronous writeback tasks */ 36 struct workqueue_struct *bdi_wq; 37 38 #define K(x) ((x) << (PAGE_SHIFT - 10)) 39 40 #ifdef CONFIG_DEBUG_FS 41 #include <linux/debugfs.h> 42 #include <linux/seq_file.h> 43 44 static struct dentry *bdi_debug_root; 45 46 static void bdi_debug_init(void) 47 { 48 bdi_debug_root = debugfs_create_dir("bdi", NULL); 49 } 50 51 static int bdi_debug_stats_show(struct seq_file *m, void *v) 52 { 53 struct backing_dev_info *bdi = m->private; 54 struct bdi_writeback *wb = &bdi->wb; 55 unsigned long background_thresh; 56 unsigned long dirty_thresh; 57 unsigned long wb_thresh; 58 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; 59 struct inode *inode; 60 61 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; 62 spin_lock(&wb->list_lock); 63 list_for_each_entry(inode, &wb->b_dirty, i_io_list) 64 nr_dirty++; 65 list_for_each_entry(inode, &wb->b_io, i_io_list) 66 nr_io++; 67 list_for_each_entry(inode, &wb->b_more_io, i_io_list) 68 nr_more_io++; 69 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) 70 if (inode->i_state & I_DIRTY_TIME) 71 nr_dirty_time++; 72 spin_unlock(&wb->list_lock); 73 74 global_dirty_limits(&background_thresh, &dirty_thresh); 75 wb_thresh = wb_calc_thresh(wb, dirty_thresh); 76 77 seq_printf(m, 78 "BdiWriteback: %10lu kB\n" 79 "BdiReclaimable: %10lu kB\n" 80 "BdiDirtyThresh: %10lu kB\n" 81 "DirtyThresh: %10lu kB\n" 82 "BackgroundThresh: %10lu kB\n" 83 "BdiDirtied: %10lu kB\n" 84 "BdiWritten: %10lu kB\n" 85 "BdiWriteBandwidth: %10lu kBps\n" 86 "b_dirty: %10lu\n" 87 "b_io: %10lu\n" 88 "b_more_io: %10lu\n" 89 "b_dirty_time: %10lu\n" 90 "bdi_list: %10u\n" 91 "state: %10lx\n", 92 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)), 93 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)), 94 K(wb_thresh), 95 K(dirty_thresh), 96 K(background_thresh), 97 (unsigned long) K(wb_stat(wb, WB_DIRTIED)), 98 (unsigned long) K(wb_stat(wb, WB_WRITTEN)), 99 (unsigned long) K(wb->write_bandwidth), 100 nr_dirty, 101 nr_io, 102 nr_more_io, 103 nr_dirty_time, 104 !list_empty(&bdi->bdi_list), bdi->wb.state); 105 106 return 0; 107 } 108 DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats); 109 110 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) 111 { 112 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); 113 114 debugfs_create_file("stats", 0444, bdi->debug_dir, bdi, 115 &bdi_debug_stats_fops); 116 } 117 118 static void bdi_debug_unregister(struct backing_dev_info *bdi) 119 { 120 debugfs_remove_recursive(bdi->debug_dir); 121 } 122 #else 123 static inline void bdi_debug_init(void) 124 { 125 } 126 static inline void bdi_debug_register(struct backing_dev_info *bdi, 127 const char *name) 128 { 129 } 130 static inline void bdi_debug_unregister(struct backing_dev_info *bdi) 131 { 132 } 133 #endif 134 135 static ssize_t read_ahead_kb_store(struct device *dev, 136 struct device_attribute *attr, 137 const char *buf, size_t count) 138 { 139 struct backing_dev_info *bdi = dev_get_drvdata(dev); 140 unsigned long read_ahead_kb; 141 ssize_t ret; 142 143 ret = kstrtoul(buf, 10, &read_ahead_kb); 144 if (ret < 0) 145 return ret; 146 147 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); 148 149 return count; 150 } 151 152 #define BDI_SHOW(name, expr) \ 153 static ssize_t name##_show(struct device *dev, \ 154 struct device_attribute *attr, char *buf) \ 155 { \ 156 struct backing_dev_info *bdi = dev_get_drvdata(dev); \ 157 \ 158 return sysfs_emit(buf, "%lld\n", (long long)expr); \ 159 } \ 160 static DEVICE_ATTR_RW(name); 161 162 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) 163 164 static ssize_t min_ratio_store(struct device *dev, 165 struct device_attribute *attr, const char *buf, size_t count) 166 { 167 struct backing_dev_info *bdi = dev_get_drvdata(dev); 168 unsigned int ratio; 169 ssize_t ret; 170 171 ret = kstrtouint(buf, 10, &ratio); 172 if (ret < 0) 173 return ret; 174 175 ret = bdi_set_min_ratio(bdi, ratio); 176 if (!ret) 177 ret = count; 178 179 return ret; 180 } 181 BDI_SHOW(min_ratio, bdi->min_ratio / BDI_RATIO_SCALE) 182 183 static ssize_t min_ratio_fine_store(struct device *dev, 184 struct device_attribute *attr, const char *buf, size_t count) 185 { 186 struct backing_dev_info *bdi = dev_get_drvdata(dev); 187 unsigned int ratio; 188 ssize_t ret; 189 190 ret = kstrtouint(buf, 10, &ratio); 191 if (ret < 0) 192 return ret; 193 194 ret = bdi_set_min_ratio_no_scale(bdi, ratio); 195 if (!ret) 196 ret = count; 197 198 return ret; 199 } 200 BDI_SHOW(min_ratio_fine, bdi->min_ratio) 201 202 static ssize_t max_ratio_store(struct device *dev, 203 struct device_attribute *attr, const char *buf, size_t count) 204 { 205 struct backing_dev_info *bdi = dev_get_drvdata(dev); 206 unsigned int ratio; 207 ssize_t ret; 208 209 ret = kstrtouint(buf, 10, &ratio); 210 if (ret < 0) 211 return ret; 212 213 ret = bdi_set_max_ratio(bdi, ratio); 214 if (!ret) 215 ret = count; 216 217 return ret; 218 } 219 BDI_SHOW(max_ratio, bdi->max_ratio / BDI_RATIO_SCALE) 220 221 static ssize_t max_ratio_fine_store(struct device *dev, 222 struct device_attribute *attr, const char *buf, size_t count) 223 { 224 struct backing_dev_info *bdi = dev_get_drvdata(dev); 225 unsigned int ratio; 226 ssize_t ret; 227 228 ret = kstrtouint(buf, 10, &ratio); 229 if (ret < 0) 230 return ret; 231 232 ret = bdi_set_max_ratio_no_scale(bdi, ratio); 233 if (!ret) 234 ret = count; 235 236 return ret; 237 } 238 BDI_SHOW(max_ratio_fine, bdi->max_ratio) 239 240 static ssize_t min_bytes_show(struct device *dev, 241 struct device_attribute *attr, 242 char *buf) 243 { 244 struct backing_dev_info *bdi = dev_get_drvdata(dev); 245 246 return sysfs_emit(buf, "%llu\n", bdi_get_min_bytes(bdi)); 247 } 248 249 static ssize_t min_bytes_store(struct device *dev, 250 struct device_attribute *attr, const char *buf, size_t count) 251 { 252 struct backing_dev_info *bdi = dev_get_drvdata(dev); 253 u64 bytes; 254 ssize_t ret; 255 256 ret = kstrtoull(buf, 10, &bytes); 257 if (ret < 0) 258 return ret; 259 260 ret = bdi_set_min_bytes(bdi, bytes); 261 if (!ret) 262 ret = count; 263 264 return ret; 265 } 266 static DEVICE_ATTR_RW(min_bytes); 267 268 static ssize_t max_bytes_show(struct device *dev, 269 struct device_attribute *attr, 270 char *buf) 271 { 272 struct backing_dev_info *bdi = dev_get_drvdata(dev); 273 274 return sysfs_emit(buf, "%llu\n", bdi_get_max_bytes(bdi)); 275 } 276 277 static ssize_t max_bytes_store(struct device *dev, 278 struct device_attribute *attr, const char *buf, size_t count) 279 { 280 struct backing_dev_info *bdi = dev_get_drvdata(dev); 281 u64 bytes; 282 ssize_t ret; 283 284 ret = kstrtoull(buf, 10, &bytes); 285 if (ret < 0) 286 return ret; 287 288 ret = bdi_set_max_bytes(bdi, bytes); 289 if (!ret) 290 ret = count; 291 292 return ret; 293 } 294 static DEVICE_ATTR_RW(max_bytes); 295 296 static ssize_t stable_pages_required_show(struct device *dev, 297 struct device_attribute *attr, 298 char *buf) 299 { 300 dev_warn_once(dev, 301 "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n"); 302 return sysfs_emit(buf, "%d\n", 0); 303 } 304 static DEVICE_ATTR_RO(stable_pages_required); 305 306 static ssize_t strict_limit_store(struct device *dev, 307 struct device_attribute *attr, const char *buf, size_t count) 308 { 309 struct backing_dev_info *bdi = dev_get_drvdata(dev); 310 unsigned int strict_limit; 311 ssize_t ret; 312 313 ret = kstrtouint(buf, 10, &strict_limit); 314 if (ret < 0) 315 return ret; 316 317 ret = bdi_set_strict_limit(bdi, strict_limit); 318 if (!ret) 319 ret = count; 320 321 return ret; 322 } 323 324 static ssize_t strict_limit_show(struct device *dev, 325 struct device_attribute *attr, char *buf) 326 { 327 struct backing_dev_info *bdi = dev_get_drvdata(dev); 328 329 return sysfs_emit(buf, "%d\n", 330 !!(bdi->capabilities & BDI_CAP_STRICTLIMIT)); 331 } 332 static DEVICE_ATTR_RW(strict_limit); 333 334 static struct attribute *bdi_dev_attrs[] = { 335 &dev_attr_read_ahead_kb.attr, 336 &dev_attr_min_ratio.attr, 337 &dev_attr_min_ratio_fine.attr, 338 &dev_attr_max_ratio.attr, 339 &dev_attr_max_ratio_fine.attr, 340 &dev_attr_min_bytes.attr, 341 &dev_attr_max_bytes.attr, 342 &dev_attr_stable_pages_required.attr, 343 &dev_attr_strict_limit.attr, 344 NULL, 345 }; 346 ATTRIBUTE_GROUPS(bdi_dev); 347 348 static __init int bdi_class_init(void) 349 { 350 bdi_class = class_create("bdi"); 351 if (IS_ERR(bdi_class)) 352 return PTR_ERR(bdi_class); 353 354 bdi_class->dev_groups = bdi_dev_groups; 355 bdi_debug_init(); 356 357 return 0; 358 } 359 postcore_initcall(bdi_class_init); 360 361 static int __init default_bdi_init(void) 362 { 363 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND | 364 WQ_SYSFS, 0); 365 if (!bdi_wq) 366 return -ENOMEM; 367 return 0; 368 } 369 subsys_initcall(default_bdi_init); 370 371 /* 372 * This function is used when the first inode for this wb is marked dirty. It 373 * wakes-up the corresponding bdi thread which should then take care of the 374 * periodic background write-out of dirty inodes. Since the write-out would 375 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just 376 * set up a timer which wakes the bdi thread up later. 377 * 378 * Note, we wouldn't bother setting up the timer, but this function is on the 379 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches 380 * by delaying the wake-up. 381 * 382 * We have to be careful not to postpone flush work if it is scheduled for 383 * earlier. Thus we use queue_delayed_work(). 384 */ 385 void wb_wakeup_delayed(struct bdi_writeback *wb) 386 { 387 unsigned long timeout; 388 389 timeout = msecs_to_jiffies(dirty_writeback_interval * 10); 390 spin_lock_irq(&wb->work_lock); 391 if (test_bit(WB_registered, &wb->state)) 392 queue_delayed_work(bdi_wq, &wb->dwork, timeout); 393 spin_unlock_irq(&wb->work_lock); 394 } 395 396 static void wb_update_bandwidth_workfn(struct work_struct *work) 397 { 398 struct bdi_writeback *wb = container_of(to_delayed_work(work), 399 struct bdi_writeback, bw_dwork); 400 401 wb_update_bandwidth(wb); 402 } 403 404 /* 405 * Initial write bandwidth: 100 MB/s 406 */ 407 #define INIT_BW (100 << (20 - PAGE_SHIFT)) 408 409 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, 410 gfp_t gfp) 411 { 412 int i, err; 413 414 memset(wb, 0, sizeof(*wb)); 415 416 wb->bdi = bdi; 417 wb->last_old_flush = jiffies; 418 INIT_LIST_HEAD(&wb->b_dirty); 419 INIT_LIST_HEAD(&wb->b_io); 420 INIT_LIST_HEAD(&wb->b_more_io); 421 INIT_LIST_HEAD(&wb->b_dirty_time); 422 spin_lock_init(&wb->list_lock); 423 424 atomic_set(&wb->writeback_inodes, 0); 425 wb->bw_time_stamp = jiffies; 426 wb->balanced_dirty_ratelimit = INIT_BW; 427 wb->dirty_ratelimit = INIT_BW; 428 wb->write_bandwidth = INIT_BW; 429 wb->avg_write_bandwidth = INIT_BW; 430 431 spin_lock_init(&wb->work_lock); 432 INIT_LIST_HEAD(&wb->work_list); 433 INIT_DELAYED_WORK(&wb->dwork, wb_workfn); 434 INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn); 435 wb->dirty_sleep = jiffies; 436 437 err = fprop_local_init_percpu(&wb->completions, gfp); 438 if (err) 439 return err; 440 441 for (i = 0; i < NR_WB_STAT_ITEMS; i++) { 442 err = percpu_counter_init(&wb->stat[i], 0, gfp); 443 if (err) 444 goto out_destroy_stat; 445 } 446 447 return 0; 448 449 out_destroy_stat: 450 while (i--) 451 percpu_counter_destroy(&wb->stat[i]); 452 fprop_local_destroy_percpu(&wb->completions); 453 return err; 454 } 455 456 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); 457 458 /* 459 * Remove bdi from the global list and shutdown any threads we have running 460 */ 461 static void wb_shutdown(struct bdi_writeback *wb) 462 { 463 /* Make sure nobody queues further work */ 464 spin_lock_irq(&wb->work_lock); 465 if (!test_and_clear_bit(WB_registered, &wb->state)) { 466 spin_unlock_irq(&wb->work_lock); 467 return; 468 } 469 spin_unlock_irq(&wb->work_lock); 470 471 cgwb_remove_from_bdi_list(wb); 472 /* 473 * Drain work list and shutdown the delayed_work. !WB_registered 474 * tells wb_workfn() that @wb is dying and its work_list needs to 475 * be drained no matter what. 476 */ 477 mod_delayed_work(bdi_wq, &wb->dwork, 0); 478 flush_delayed_work(&wb->dwork); 479 WARN_ON(!list_empty(&wb->work_list)); 480 flush_delayed_work(&wb->bw_dwork); 481 } 482 483 static void wb_exit(struct bdi_writeback *wb) 484 { 485 int i; 486 487 WARN_ON(delayed_work_pending(&wb->dwork)); 488 489 for (i = 0; i < NR_WB_STAT_ITEMS; i++) 490 percpu_counter_destroy(&wb->stat[i]); 491 492 fprop_local_destroy_percpu(&wb->completions); 493 } 494 495 #ifdef CONFIG_CGROUP_WRITEBACK 496 497 #include <linux/memcontrol.h> 498 499 /* 500 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and 501 * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. 502 */ 503 static DEFINE_SPINLOCK(cgwb_lock); 504 static struct workqueue_struct *cgwb_release_wq; 505 506 static LIST_HEAD(offline_cgwbs); 507 static void cleanup_offline_cgwbs_workfn(struct work_struct *work); 508 static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn); 509 510 static void cgwb_free_rcu(struct rcu_head *rcu_head) 511 { 512 struct bdi_writeback *wb = container_of(rcu_head, 513 struct bdi_writeback, rcu); 514 515 percpu_ref_exit(&wb->refcnt); 516 kfree(wb); 517 } 518 519 static void cgwb_release_workfn(struct work_struct *work) 520 { 521 struct bdi_writeback *wb = container_of(work, struct bdi_writeback, 522 release_work); 523 struct backing_dev_info *bdi = wb->bdi; 524 525 mutex_lock(&wb->bdi->cgwb_release_mutex); 526 wb_shutdown(wb); 527 528 css_put(wb->memcg_css); 529 css_put(wb->blkcg_css); 530 mutex_unlock(&wb->bdi->cgwb_release_mutex); 531 532 /* triggers blkg destruction if no online users left */ 533 blkcg_unpin_online(wb->blkcg_css); 534 535 fprop_local_destroy_percpu(&wb->memcg_completions); 536 537 spin_lock_irq(&cgwb_lock); 538 list_del(&wb->offline_node); 539 spin_unlock_irq(&cgwb_lock); 540 541 wb_exit(wb); 542 bdi_put(bdi); 543 WARN_ON_ONCE(!list_empty(&wb->b_attached)); 544 call_rcu(&wb->rcu, cgwb_free_rcu); 545 } 546 547 static void cgwb_release(struct percpu_ref *refcnt) 548 { 549 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, 550 refcnt); 551 queue_work(cgwb_release_wq, &wb->release_work); 552 } 553 554 static void cgwb_kill(struct bdi_writeback *wb) 555 { 556 lockdep_assert_held(&cgwb_lock); 557 558 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); 559 list_del(&wb->memcg_node); 560 list_del(&wb->blkcg_node); 561 list_add(&wb->offline_node, &offline_cgwbs); 562 percpu_ref_kill(&wb->refcnt); 563 } 564 565 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) 566 { 567 spin_lock_irq(&cgwb_lock); 568 list_del_rcu(&wb->bdi_node); 569 spin_unlock_irq(&cgwb_lock); 570 } 571 572 static int cgwb_create(struct backing_dev_info *bdi, 573 struct cgroup_subsys_state *memcg_css, gfp_t gfp) 574 { 575 struct mem_cgroup *memcg; 576 struct cgroup_subsys_state *blkcg_css; 577 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list; 578 struct bdi_writeback *wb; 579 unsigned long flags; 580 int ret = 0; 581 582 memcg = mem_cgroup_from_css(memcg_css); 583 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); 584 memcg_cgwb_list = &memcg->cgwb_list; 585 blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css); 586 587 /* look up again under lock and discard on blkcg mismatch */ 588 spin_lock_irqsave(&cgwb_lock, flags); 589 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); 590 if (wb && wb->blkcg_css != blkcg_css) { 591 cgwb_kill(wb); 592 wb = NULL; 593 } 594 spin_unlock_irqrestore(&cgwb_lock, flags); 595 if (wb) 596 goto out_put; 597 598 /* need to create a new one */ 599 wb = kmalloc(sizeof(*wb), gfp); 600 if (!wb) { 601 ret = -ENOMEM; 602 goto out_put; 603 } 604 605 ret = wb_init(wb, bdi, gfp); 606 if (ret) 607 goto err_free; 608 609 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); 610 if (ret) 611 goto err_wb_exit; 612 613 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); 614 if (ret) 615 goto err_ref_exit; 616 617 wb->memcg_css = memcg_css; 618 wb->blkcg_css = blkcg_css; 619 INIT_LIST_HEAD(&wb->b_attached); 620 INIT_WORK(&wb->release_work, cgwb_release_workfn); 621 set_bit(WB_registered, &wb->state); 622 bdi_get(bdi); 623 624 /* 625 * The root wb determines the registered state of the whole bdi and 626 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate 627 * whether they're still online. Don't link @wb if any is dead. 628 * See wb_memcg_offline() and wb_blkcg_offline(). 629 */ 630 ret = -ENODEV; 631 spin_lock_irqsave(&cgwb_lock, flags); 632 if (test_bit(WB_registered, &bdi->wb.state) && 633 blkcg_cgwb_list->next && memcg_cgwb_list->next) { 634 /* we might have raced another instance of this function */ 635 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); 636 if (!ret) { 637 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); 638 list_add(&wb->memcg_node, memcg_cgwb_list); 639 list_add(&wb->blkcg_node, blkcg_cgwb_list); 640 blkcg_pin_online(blkcg_css); 641 css_get(memcg_css); 642 css_get(blkcg_css); 643 } 644 } 645 spin_unlock_irqrestore(&cgwb_lock, flags); 646 if (ret) { 647 if (ret == -EEXIST) 648 ret = 0; 649 goto err_fprop_exit; 650 } 651 goto out_put; 652 653 err_fprop_exit: 654 bdi_put(bdi); 655 fprop_local_destroy_percpu(&wb->memcg_completions); 656 err_ref_exit: 657 percpu_ref_exit(&wb->refcnt); 658 err_wb_exit: 659 wb_exit(wb); 660 err_free: 661 kfree(wb); 662 out_put: 663 css_put(blkcg_css); 664 return ret; 665 } 666 667 /** 668 * wb_get_lookup - get wb for a given memcg 669 * @bdi: target bdi 670 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) 671 * 672 * Try to get the wb for @memcg_css on @bdi. The returned wb has its 673 * refcount incremented. 674 * 675 * This function uses css_get() on @memcg_css and thus expects its refcnt 676 * to be positive on invocation. IOW, rcu_read_lock() protection on 677 * @memcg_css isn't enough. try_get it before calling this function. 678 * 679 * A wb is keyed by its associated memcg. As blkcg implicitly enables 680 * memcg on the default hierarchy, memcg association is guaranteed to be 681 * more specific (equal or descendant to the associated blkcg) and thus can 682 * identify both the memcg and blkcg associations. 683 * 684 * Because the blkcg associated with a memcg may change as blkcg is enabled 685 * and disabled closer to root in the hierarchy, each wb keeps track of 686 * both the memcg and blkcg associated with it and verifies the blkcg on 687 * each lookup. On mismatch, the existing wb is discarded and a new one is 688 * created. 689 */ 690 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, 691 struct cgroup_subsys_state *memcg_css) 692 { 693 struct bdi_writeback *wb; 694 695 if (!memcg_css->parent) 696 return &bdi->wb; 697 698 rcu_read_lock(); 699 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); 700 if (wb) { 701 struct cgroup_subsys_state *blkcg_css; 702 703 /* see whether the blkcg association has changed */ 704 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); 705 if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) 706 wb = NULL; 707 css_put(blkcg_css); 708 } 709 rcu_read_unlock(); 710 711 return wb; 712 } 713 714 /** 715 * wb_get_create - get wb for a given memcg, create if necessary 716 * @bdi: target bdi 717 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) 718 * @gfp: allocation mask to use 719 * 720 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to 721 * create one. See wb_get_lookup() for more details. 722 */ 723 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, 724 struct cgroup_subsys_state *memcg_css, 725 gfp_t gfp) 726 { 727 struct bdi_writeback *wb; 728 729 might_alloc(gfp); 730 731 if (!memcg_css->parent) 732 return &bdi->wb; 733 734 do { 735 wb = wb_get_lookup(bdi, memcg_css); 736 } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); 737 738 return wb; 739 } 740 741 static int cgwb_bdi_init(struct backing_dev_info *bdi) 742 { 743 int ret; 744 745 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); 746 mutex_init(&bdi->cgwb_release_mutex); 747 init_rwsem(&bdi->wb_switch_rwsem); 748 749 ret = wb_init(&bdi->wb, bdi, GFP_KERNEL); 750 if (!ret) { 751 bdi->wb.memcg_css = &root_mem_cgroup->css; 752 bdi->wb.blkcg_css = blkcg_root_css; 753 } 754 return ret; 755 } 756 757 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) 758 { 759 struct radix_tree_iter iter; 760 void **slot; 761 struct bdi_writeback *wb; 762 763 WARN_ON(test_bit(WB_registered, &bdi->wb.state)); 764 765 spin_lock_irq(&cgwb_lock); 766 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) 767 cgwb_kill(*slot); 768 spin_unlock_irq(&cgwb_lock); 769 770 mutex_lock(&bdi->cgwb_release_mutex); 771 spin_lock_irq(&cgwb_lock); 772 while (!list_empty(&bdi->wb_list)) { 773 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, 774 bdi_node); 775 spin_unlock_irq(&cgwb_lock); 776 wb_shutdown(wb); 777 spin_lock_irq(&cgwb_lock); 778 } 779 spin_unlock_irq(&cgwb_lock); 780 mutex_unlock(&bdi->cgwb_release_mutex); 781 } 782 783 /* 784 * cleanup_offline_cgwbs_workfn - try to release dying cgwbs 785 * 786 * Try to release dying cgwbs by switching attached inodes to the nearest 787 * living ancestor's writeback. Processed wbs are placed at the end 788 * of the list to guarantee the forward progress. 789 */ 790 static void cleanup_offline_cgwbs_workfn(struct work_struct *work) 791 { 792 struct bdi_writeback *wb; 793 LIST_HEAD(processed); 794 795 spin_lock_irq(&cgwb_lock); 796 797 while (!list_empty(&offline_cgwbs)) { 798 wb = list_first_entry(&offline_cgwbs, struct bdi_writeback, 799 offline_node); 800 list_move(&wb->offline_node, &processed); 801 802 /* 803 * If wb is dirty, cleaning up the writeback by switching 804 * attached inodes will result in an effective removal of any 805 * bandwidth restrictions, which isn't the goal. Instead, 806 * it can be postponed until the next time, when all io 807 * will be likely completed. If in the meantime some inodes 808 * will get re-dirtied, they should be eventually switched to 809 * a new cgwb. 810 */ 811 if (wb_has_dirty_io(wb)) 812 continue; 813 814 if (!wb_tryget(wb)) 815 continue; 816 817 spin_unlock_irq(&cgwb_lock); 818 while (cleanup_offline_cgwb(wb)) 819 cond_resched(); 820 spin_lock_irq(&cgwb_lock); 821 822 wb_put(wb); 823 } 824 825 if (!list_empty(&processed)) 826 list_splice_tail(&processed, &offline_cgwbs); 827 828 spin_unlock_irq(&cgwb_lock); 829 } 830 831 /** 832 * wb_memcg_offline - kill all wb's associated with a memcg being offlined 833 * @memcg: memcg being offlined 834 * 835 * Also prevents creation of any new wb's associated with @memcg. 836 */ 837 void wb_memcg_offline(struct mem_cgroup *memcg) 838 { 839 struct list_head *memcg_cgwb_list = &memcg->cgwb_list; 840 struct bdi_writeback *wb, *next; 841 842 spin_lock_irq(&cgwb_lock); 843 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) 844 cgwb_kill(wb); 845 memcg_cgwb_list->next = NULL; /* prevent new wb's */ 846 spin_unlock_irq(&cgwb_lock); 847 848 queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); 849 } 850 851 /** 852 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined 853 * @css: blkcg being offlined 854 * 855 * Also prevents creation of any new wb's associated with @blkcg. 856 */ 857 void wb_blkcg_offline(struct cgroup_subsys_state *css) 858 { 859 struct bdi_writeback *wb, *next; 860 struct list_head *list = blkcg_get_cgwb_list(css); 861 862 spin_lock_irq(&cgwb_lock); 863 list_for_each_entry_safe(wb, next, list, blkcg_node) 864 cgwb_kill(wb); 865 list->next = NULL; /* prevent new wb's */ 866 spin_unlock_irq(&cgwb_lock); 867 } 868 869 static void cgwb_bdi_register(struct backing_dev_info *bdi) 870 { 871 spin_lock_irq(&cgwb_lock); 872 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); 873 spin_unlock_irq(&cgwb_lock); 874 } 875 876 static int __init cgwb_init(void) 877 { 878 /* 879 * There can be many concurrent release work items overwhelming 880 * system_wq. Put them in a separate wq and limit concurrency. 881 * There's no point in executing many of these in parallel. 882 */ 883 cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1); 884 if (!cgwb_release_wq) 885 return -ENOMEM; 886 887 return 0; 888 } 889 subsys_initcall(cgwb_init); 890 891 #else /* CONFIG_CGROUP_WRITEBACK */ 892 893 static int cgwb_bdi_init(struct backing_dev_info *bdi) 894 { 895 return wb_init(&bdi->wb, bdi, GFP_KERNEL); 896 } 897 898 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } 899 900 static void cgwb_bdi_register(struct backing_dev_info *bdi) 901 { 902 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); 903 } 904 905 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) 906 { 907 list_del_rcu(&wb->bdi_node); 908 } 909 910 #endif /* CONFIG_CGROUP_WRITEBACK */ 911 912 int bdi_init(struct backing_dev_info *bdi) 913 { 914 bdi->dev = NULL; 915 916 kref_init(&bdi->refcnt); 917 bdi->min_ratio = 0; 918 bdi->max_ratio = 100 * BDI_RATIO_SCALE; 919 bdi->max_prop_frac = FPROP_FRAC_BASE; 920 INIT_LIST_HEAD(&bdi->bdi_list); 921 INIT_LIST_HEAD(&bdi->wb_list); 922 init_waitqueue_head(&bdi->wb_waitq); 923 924 return cgwb_bdi_init(bdi); 925 } 926 927 struct backing_dev_info *bdi_alloc(int node_id) 928 { 929 struct backing_dev_info *bdi; 930 931 bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id); 932 if (!bdi) 933 return NULL; 934 935 if (bdi_init(bdi)) { 936 kfree(bdi); 937 return NULL; 938 } 939 bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT; 940 bdi->ra_pages = VM_READAHEAD_PAGES; 941 bdi->io_pages = VM_READAHEAD_PAGES; 942 timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); 943 return bdi; 944 } 945 EXPORT_SYMBOL(bdi_alloc); 946 947 static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp) 948 { 949 struct rb_node **p = &bdi_tree.rb_node; 950 struct rb_node *parent = NULL; 951 struct backing_dev_info *bdi; 952 953 lockdep_assert_held(&bdi_lock); 954 955 while (*p) { 956 parent = *p; 957 bdi = rb_entry(parent, struct backing_dev_info, rb_node); 958 959 if (bdi->id > id) 960 p = &(*p)->rb_left; 961 else if (bdi->id < id) 962 p = &(*p)->rb_right; 963 else 964 break; 965 } 966 967 if (parentp) 968 *parentp = parent; 969 return p; 970 } 971 972 /** 973 * bdi_get_by_id - lookup and get bdi from its id 974 * @id: bdi id to lookup 975 * 976 * Find bdi matching @id and get it. Returns NULL if the matching bdi 977 * doesn't exist or is already unregistered. 978 */ 979 struct backing_dev_info *bdi_get_by_id(u64 id) 980 { 981 struct backing_dev_info *bdi = NULL; 982 struct rb_node **p; 983 984 spin_lock_bh(&bdi_lock); 985 p = bdi_lookup_rb_node(id, NULL); 986 if (*p) { 987 bdi = rb_entry(*p, struct backing_dev_info, rb_node); 988 bdi_get(bdi); 989 } 990 spin_unlock_bh(&bdi_lock); 991 992 return bdi; 993 } 994 995 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) 996 { 997 struct device *dev; 998 struct rb_node *parent, **p; 999 1000 if (bdi->dev) /* The driver needs to use separate queues per device */ 1001 return 0; 1002 1003 vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); 1004 dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); 1005 if (IS_ERR(dev)) 1006 return PTR_ERR(dev); 1007 1008 cgwb_bdi_register(bdi); 1009 bdi->dev = dev; 1010 1011 bdi_debug_register(bdi, dev_name(dev)); 1012 set_bit(WB_registered, &bdi->wb.state); 1013 1014 spin_lock_bh(&bdi_lock); 1015 1016 bdi->id = ++bdi_id_cursor; 1017 1018 p = bdi_lookup_rb_node(bdi->id, &parent); 1019 rb_link_node(&bdi->rb_node, parent, p); 1020 rb_insert_color(&bdi->rb_node, &bdi_tree); 1021 1022 list_add_tail_rcu(&bdi->bdi_list, &bdi_list); 1023 1024 spin_unlock_bh(&bdi_lock); 1025 1026 trace_writeback_bdi_register(bdi); 1027 return 0; 1028 } 1029 1030 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...) 1031 { 1032 va_list args; 1033 int ret; 1034 1035 va_start(args, fmt); 1036 ret = bdi_register_va(bdi, fmt, args); 1037 va_end(args); 1038 return ret; 1039 } 1040 EXPORT_SYMBOL(bdi_register); 1041 1042 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner) 1043 { 1044 WARN_ON_ONCE(bdi->owner); 1045 bdi->owner = owner; 1046 get_device(owner); 1047 } 1048 1049 /* 1050 * Remove bdi from bdi_list, and ensure that it is no longer visible 1051 */ 1052 static void bdi_remove_from_list(struct backing_dev_info *bdi) 1053 { 1054 spin_lock_bh(&bdi_lock); 1055 rb_erase(&bdi->rb_node, &bdi_tree); 1056 list_del_rcu(&bdi->bdi_list); 1057 spin_unlock_bh(&bdi_lock); 1058 1059 synchronize_rcu_expedited(); 1060 } 1061 1062 void bdi_unregister(struct backing_dev_info *bdi) 1063 { 1064 del_timer_sync(&bdi->laptop_mode_wb_timer); 1065 1066 /* make sure nobody finds us on the bdi_list anymore */ 1067 bdi_remove_from_list(bdi); 1068 wb_shutdown(&bdi->wb); 1069 cgwb_bdi_unregister(bdi); 1070 1071 /* 1072 * If this BDI's min ratio has been set, use bdi_set_min_ratio() to 1073 * update the global bdi_min_ratio. 1074 */ 1075 if (bdi->min_ratio) 1076 bdi_set_min_ratio(bdi, 0); 1077 1078 if (bdi->dev) { 1079 bdi_debug_unregister(bdi); 1080 device_unregister(bdi->dev); 1081 bdi->dev = NULL; 1082 } 1083 1084 if (bdi->owner) { 1085 put_device(bdi->owner); 1086 bdi->owner = NULL; 1087 } 1088 } 1089 EXPORT_SYMBOL(bdi_unregister); 1090 1091 static void release_bdi(struct kref *ref) 1092 { 1093 struct backing_dev_info *bdi = 1094 container_of(ref, struct backing_dev_info, refcnt); 1095 1096 WARN_ON_ONCE(test_bit(WB_registered, &bdi->wb.state)); 1097 WARN_ON_ONCE(bdi->dev); 1098 wb_exit(&bdi->wb); 1099 kfree(bdi); 1100 } 1101 1102 void bdi_put(struct backing_dev_info *bdi) 1103 { 1104 kref_put(&bdi->refcnt, release_bdi); 1105 } 1106 EXPORT_SYMBOL(bdi_put); 1107 1108 struct backing_dev_info *inode_to_bdi(struct inode *inode) 1109 { 1110 struct super_block *sb; 1111 1112 if (!inode) 1113 return &noop_backing_dev_info; 1114 1115 sb = inode->i_sb; 1116 #ifdef CONFIG_BLOCK 1117 if (sb_is_blkdev_sb(sb)) 1118 return I_BDEV(inode)->bd_disk->bdi; 1119 #endif 1120 return sb->s_bdi; 1121 } 1122 EXPORT_SYMBOL(inode_to_bdi); 1123 1124 const char *bdi_dev_name(struct backing_dev_info *bdi) 1125 { 1126 if (!bdi || !bdi->dev) 1127 return bdi_unknown_name; 1128 return bdi->dev_name; 1129 } 1130 EXPORT_SYMBOL_GPL(bdi_dev_name); 1131