1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Block IO controller cgroup interface 4 * 5 * Based on ideas and code from CFQ, CFS and BFQ: 6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 7 * 8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 9 * Paolo Valente <paolo.valente@unimore.it> 10 * 11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 12 * Nauman Rafique <nauman@google.com> 13 * 14 * For policy-specific per-blkcg data: 15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 16 * Arianna Avanzini <avanzini.arianna@gmail.com> 17 */ 18 #include <linux/ioprio.h> 19 #include <linux/kdev_t.h> 20 #include <linux/module.h> 21 #include <linux/sched/signal.h> 22 #include <linux/err.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/slab.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/ctype.h> 29 #include <linux/resume_user_mode.h> 30 #include <linux/psi.h> 31 #include <linux/part_stat.h> 32 #include "blk.h" 33 #include "blk-cgroup.h" 34 #include "blk-ioprio.h" 35 #include "blk-throttle.h" 36 37 /* 38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 40 * policy [un]register operations including cgroup file additions / 41 * removals. Putting cgroup file registration outside blkcg_pol_mutex 42 * allows grabbing it from cgroup callbacks. 43 */ 44 static DEFINE_MUTEX(blkcg_pol_register_mutex); 45 static DEFINE_MUTEX(blkcg_pol_mutex); 46 47 struct blkcg blkcg_root; 48 EXPORT_SYMBOL_GPL(blkcg_root); 49 50 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 51 EXPORT_SYMBOL_GPL(blkcg_root_css); 52 53 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 54 55 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 56 57 bool blkcg_debug_stats = false; 58 59 #define BLKG_DESTROY_BATCH_SIZE 64 60 61 /* 62 * Lockless lists for tracking IO stats update 63 * 64 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg). 65 * There are multiple blkg's (one for each block device) attached to each 66 * blkcg. The rstat code keeps track of which cpu has IO stats updated, 67 * but it doesn't know which blkg has the updated stats. If there are many 68 * block devices in a system, the cost of iterating all the blkg's to flush 69 * out the IO stats can be high. To reduce such overhead, a set of percpu 70 * lockless lists (lhead) per blkcg are used to track the set of recently 71 * updated iostat_cpu's since the last flush. An iostat_cpu will be put 72 * onto the lockless list on the update side [blk_cgroup_bio_start()] if 73 * not there yet and then removed when being flushed [blkcg_rstat_flush()]. 74 * References to blkg are gotten and then put back in the process to 75 * protect against blkg removal. 76 * 77 * Return: 0 if successful or -ENOMEM if allocation fails. 78 */ 79 static int init_blkcg_llists(struct blkcg *blkcg) 80 { 81 int cpu; 82 83 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL); 84 if (!blkcg->lhead) 85 return -ENOMEM; 86 87 for_each_possible_cpu(cpu) 88 init_llist_head(per_cpu_ptr(blkcg->lhead, cpu)); 89 return 0; 90 } 91 92 /** 93 * blkcg_css - find the current css 94 * 95 * Find the css associated with either the kthread or the current task. 96 * This may return a dying css, so it is up to the caller to use tryget logic 97 * to confirm it is alive and well. 98 */ 99 static struct cgroup_subsys_state *blkcg_css(void) 100 { 101 struct cgroup_subsys_state *css; 102 103 css = kthread_blkcg(); 104 if (css) 105 return css; 106 return task_css(current, io_cgrp_id); 107 } 108 109 static bool blkcg_policy_enabled(struct request_queue *q, 110 const struct blkcg_policy *pol) 111 { 112 return pol && test_bit(pol->plid, q->blkcg_pols); 113 } 114 115 static void blkg_free_workfn(struct work_struct *work) 116 { 117 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 118 free_work); 119 struct request_queue *q = blkg->q; 120 int i; 121 122 /* 123 * pd_free_fn() can also be called from blkcg_deactivate_policy(), 124 * in order to make sure pd_free_fn() is called in order, the deletion 125 * of the list blkg->q_node is delayed to here from blkg_destroy(), and 126 * blkcg_mutex is used to synchronize blkg_free_workfn() and 127 * blkcg_deactivate_policy(). 128 */ 129 mutex_lock(&q->blkcg_mutex); 130 for (i = 0; i < BLKCG_MAX_POLS; i++) 131 if (blkg->pd[i]) 132 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 133 if (blkg->parent) 134 blkg_put(blkg->parent); 135 list_del_init(&blkg->q_node); 136 mutex_unlock(&q->blkcg_mutex); 137 138 blk_put_queue(q); 139 free_percpu(blkg->iostat_cpu); 140 percpu_ref_exit(&blkg->refcnt); 141 kfree(blkg); 142 } 143 144 /** 145 * blkg_free - free a blkg 146 * @blkg: blkg to free 147 * 148 * Free @blkg which may be partially allocated. 149 */ 150 static void blkg_free(struct blkcg_gq *blkg) 151 { 152 if (!blkg) 153 return; 154 155 /* 156 * Both ->pd_free_fn() and request queue's release handler may 157 * sleep, so free us by scheduling one work func 158 */ 159 INIT_WORK(&blkg->free_work, blkg_free_workfn); 160 schedule_work(&blkg->free_work); 161 } 162 163 static void __blkg_release(struct rcu_head *rcu) 164 { 165 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 166 167 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 168 WARN_ON(!bio_list_empty(&blkg->async_bios)); 169 #endif 170 171 /* release the blkcg and parent blkg refs this blkg has been holding */ 172 css_put(&blkg->blkcg->css); 173 blkg_free(blkg); 174 } 175 176 /* 177 * A group is RCU protected, but having an rcu lock does not mean that one 178 * can access all the fields of blkg and assume these are valid. For 179 * example, don't try to follow throtl_data and request queue links. 180 * 181 * Having a reference to blkg under an rcu allows accesses to only values 182 * local to groups like group stats and group rate limits. 183 */ 184 static void blkg_release(struct percpu_ref *ref) 185 { 186 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); 187 188 call_rcu(&blkg->rcu_head, __blkg_release); 189 } 190 191 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 192 static struct workqueue_struct *blkcg_punt_bio_wq; 193 194 static void blkg_async_bio_workfn(struct work_struct *work) 195 { 196 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 197 async_bio_work); 198 struct bio_list bios = BIO_EMPTY_LIST; 199 struct bio *bio; 200 struct blk_plug plug; 201 bool need_plug = false; 202 203 /* as long as there are pending bios, @blkg can't go away */ 204 spin_lock(&blkg->async_bio_lock); 205 bio_list_merge(&bios, &blkg->async_bios); 206 bio_list_init(&blkg->async_bios); 207 spin_unlock(&blkg->async_bio_lock); 208 209 /* start plug only when bio_list contains at least 2 bios */ 210 if (bios.head && bios.head->bi_next) { 211 need_plug = true; 212 blk_start_plug(&plug); 213 } 214 while ((bio = bio_list_pop(&bios))) 215 submit_bio(bio); 216 if (need_plug) 217 blk_finish_plug(&plug); 218 } 219 220 /* 221 * When a shared kthread issues a bio for a cgroup, doing so synchronously can 222 * lead to priority inversions as the kthread can be trapped waiting for that 223 * cgroup. Use this helper instead of submit_bio to punt the actual issuing to 224 * a dedicated per-blkcg work item to avoid such priority inversions. 225 */ 226 void blkcg_punt_bio_submit(struct bio *bio) 227 { 228 struct blkcg_gq *blkg = bio->bi_blkg; 229 230 if (blkg->parent) { 231 spin_lock(&blkg->async_bio_lock); 232 bio_list_add(&blkg->async_bios, bio); 233 spin_unlock(&blkg->async_bio_lock); 234 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); 235 } else { 236 /* never bounce for the root cgroup */ 237 submit_bio(bio); 238 } 239 } 240 EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit); 241 242 static int __init blkcg_punt_bio_init(void) 243 { 244 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", 245 WQ_MEM_RECLAIM | WQ_FREEZABLE | 246 WQ_UNBOUND | WQ_SYSFS, 0); 247 if (!blkcg_punt_bio_wq) 248 return -ENOMEM; 249 return 0; 250 } 251 subsys_initcall(blkcg_punt_bio_init); 252 #endif /* CONFIG_BLK_CGROUP_PUNT_BIO */ 253 254 /** 255 * bio_blkcg_css - return the blkcg CSS associated with a bio 256 * @bio: target bio 257 * 258 * This returns the CSS for the blkcg associated with a bio, or %NULL if not 259 * associated. Callers are expected to either handle %NULL or know association 260 * has been done prior to calling this. 261 */ 262 struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) 263 { 264 if (!bio || !bio->bi_blkg) 265 return NULL; 266 return &bio->bi_blkg->blkcg->css; 267 } 268 EXPORT_SYMBOL_GPL(bio_blkcg_css); 269 270 /** 271 * blkcg_parent - get the parent of a blkcg 272 * @blkcg: blkcg of interest 273 * 274 * Return the parent blkcg of @blkcg. Can be called anytime. 275 */ 276 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 277 { 278 return css_to_blkcg(blkcg->css.parent); 279 } 280 281 /** 282 * blkg_alloc - allocate a blkg 283 * @blkcg: block cgroup the new blkg is associated with 284 * @disk: gendisk the new blkg is associated with 285 * @gfp_mask: allocation mask to use 286 * 287 * Allocate a new blkg assocating @blkcg and @q. 288 */ 289 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, 290 gfp_t gfp_mask) 291 { 292 struct blkcg_gq *blkg; 293 int i, cpu; 294 295 /* alloc and init base part */ 296 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); 297 if (!blkg) 298 return NULL; 299 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) 300 goto out_free_blkg; 301 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); 302 if (!blkg->iostat_cpu) 303 goto out_exit_refcnt; 304 if (!blk_get_queue(disk->queue)) 305 goto out_free_iostat; 306 307 blkg->q = disk->queue; 308 INIT_LIST_HEAD(&blkg->q_node); 309 blkg->blkcg = blkcg; 310 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 311 spin_lock_init(&blkg->async_bio_lock); 312 bio_list_init(&blkg->async_bios); 313 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); 314 #endif 315 316 u64_stats_init(&blkg->iostat.sync); 317 for_each_possible_cpu(cpu) { 318 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); 319 per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg; 320 } 321 322 for (i = 0; i < BLKCG_MAX_POLS; i++) { 323 struct blkcg_policy *pol = blkcg_policy[i]; 324 struct blkg_policy_data *pd; 325 326 if (!blkcg_policy_enabled(disk->queue, pol)) 327 continue; 328 329 /* alloc per-policy data and attach it to blkg */ 330 pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask); 331 if (!pd) 332 goto out_free_pds; 333 blkg->pd[i] = pd; 334 pd->blkg = blkg; 335 pd->plid = i; 336 pd->online = false; 337 } 338 339 return blkg; 340 341 out_free_pds: 342 while (--i >= 0) 343 if (blkg->pd[i]) 344 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 345 blk_put_queue(disk->queue); 346 out_free_iostat: 347 free_percpu(blkg->iostat_cpu); 348 out_exit_refcnt: 349 percpu_ref_exit(&blkg->refcnt); 350 out_free_blkg: 351 kfree(blkg); 352 return NULL; 353 } 354 355 /* 356 * If @new_blkg is %NULL, this function tries to allocate a new one as 357 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 358 */ 359 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, 360 struct blkcg_gq *new_blkg) 361 { 362 struct blkcg_gq *blkg; 363 int i, ret; 364 365 lockdep_assert_held(&disk->queue->queue_lock); 366 367 /* request_queue is dying, do not create/recreate a blkg */ 368 if (blk_queue_dying(disk->queue)) { 369 ret = -ENODEV; 370 goto err_free_blkg; 371 } 372 373 /* blkg holds a reference to blkcg */ 374 if (!css_tryget_online(&blkcg->css)) { 375 ret = -ENODEV; 376 goto err_free_blkg; 377 } 378 379 /* allocate */ 380 if (!new_blkg) { 381 new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN); 382 if (unlikely(!new_blkg)) { 383 ret = -ENOMEM; 384 goto err_put_css; 385 } 386 } 387 blkg = new_blkg; 388 389 /* link parent */ 390 if (blkcg_parent(blkcg)) { 391 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); 392 if (WARN_ON_ONCE(!blkg->parent)) { 393 ret = -ENODEV; 394 goto err_put_css; 395 } 396 blkg_get(blkg->parent); 397 } 398 399 /* invoke per-policy init */ 400 for (i = 0; i < BLKCG_MAX_POLS; i++) { 401 struct blkcg_policy *pol = blkcg_policy[i]; 402 403 if (blkg->pd[i] && pol->pd_init_fn) 404 pol->pd_init_fn(blkg->pd[i]); 405 } 406 407 /* insert */ 408 spin_lock(&blkcg->lock); 409 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); 410 if (likely(!ret)) { 411 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 412 list_add(&blkg->q_node, &disk->queue->blkg_list); 413 414 for (i = 0; i < BLKCG_MAX_POLS; i++) { 415 struct blkcg_policy *pol = blkcg_policy[i]; 416 417 if (blkg->pd[i]) { 418 if (pol->pd_online_fn) 419 pol->pd_online_fn(blkg->pd[i]); 420 blkg->pd[i]->online = true; 421 } 422 } 423 } 424 blkg->online = true; 425 spin_unlock(&blkcg->lock); 426 427 if (!ret) 428 return blkg; 429 430 /* @blkg failed fully initialized, use the usual release path */ 431 blkg_put(blkg); 432 return ERR_PTR(ret); 433 434 err_put_css: 435 css_put(&blkcg->css); 436 err_free_blkg: 437 if (new_blkg) 438 blkg_free(new_blkg); 439 return ERR_PTR(ret); 440 } 441 442 /** 443 * blkg_lookup_create - lookup blkg, try to create one if not there 444 * @blkcg: blkcg of interest 445 * @disk: gendisk of interest 446 * 447 * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to 448 * create one. blkg creation is performed recursively from blkcg_root such 449 * that all non-root blkg's have access to the parent blkg. This function 450 * should be called under RCU read lock and takes @disk->queue->queue_lock. 451 * 452 * Returns the blkg or the closest blkg if blkg_create() fails as it walks 453 * down from root. 454 */ 455 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 456 struct gendisk *disk) 457 { 458 struct request_queue *q = disk->queue; 459 struct blkcg_gq *blkg; 460 unsigned long flags; 461 462 WARN_ON_ONCE(!rcu_read_lock_held()); 463 464 blkg = blkg_lookup(blkcg, q); 465 if (blkg) 466 return blkg; 467 468 spin_lock_irqsave(&q->queue_lock, flags); 469 blkg = blkg_lookup(blkcg, q); 470 if (blkg) { 471 if (blkcg != &blkcg_root && 472 blkg != rcu_dereference(blkcg->blkg_hint)) 473 rcu_assign_pointer(blkcg->blkg_hint, blkg); 474 goto found; 475 } 476 477 /* 478 * Create blkgs walking down from blkcg_root to @blkcg, so that all 479 * non-root blkgs have access to their parents. Returns the closest 480 * blkg to the intended blkg should blkg_create() fail. 481 */ 482 while (true) { 483 struct blkcg *pos = blkcg; 484 struct blkcg *parent = blkcg_parent(blkcg); 485 struct blkcg_gq *ret_blkg = q->root_blkg; 486 487 while (parent) { 488 blkg = blkg_lookup(parent, q); 489 if (blkg) { 490 /* remember closest blkg */ 491 ret_blkg = blkg; 492 break; 493 } 494 pos = parent; 495 parent = blkcg_parent(parent); 496 } 497 498 blkg = blkg_create(pos, disk, NULL); 499 if (IS_ERR(blkg)) { 500 blkg = ret_blkg; 501 break; 502 } 503 if (pos == blkcg) 504 break; 505 } 506 507 found: 508 spin_unlock_irqrestore(&q->queue_lock, flags); 509 return blkg; 510 } 511 512 static void blkg_destroy(struct blkcg_gq *blkg) 513 { 514 struct blkcg *blkcg = blkg->blkcg; 515 int i; 516 517 lockdep_assert_held(&blkg->q->queue_lock); 518 lockdep_assert_held(&blkcg->lock); 519 520 /* 521 * blkg stays on the queue list until blkg_free_workfn(), see details in 522 * blkg_free_workfn(), hence this function can be called from 523 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before 524 * blkg_free_workfn(). 525 */ 526 if (hlist_unhashed(&blkg->blkcg_node)) 527 return; 528 529 for (i = 0; i < BLKCG_MAX_POLS; i++) { 530 struct blkcg_policy *pol = blkcg_policy[i]; 531 532 if (blkg->pd[i] && blkg->pd[i]->online) { 533 blkg->pd[i]->online = false; 534 if (pol->pd_offline_fn) 535 pol->pd_offline_fn(blkg->pd[i]); 536 } 537 } 538 539 blkg->online = false; 540 541 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 542 hlist_del_init_rcu(&blkg->blkcg_node); 543 544 /* 545 * Both setting lookup hint to and clearing it from @blkg are done 546 * under queue_lock. If it's not pointing to @blkg now, it never 547 * will. Hint assignment itself can race safely. 548 */ 549 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 550 rcu_assign_pointer(blkcg->blkg_hint, NULL); 551 552 /* 553 * Put the reference taken at the time of creation so that when all 554 * queues are gone, group can be destroyed. 555 */ 556 percpu_ref_kill(&blkg->refcnt); 557 } 558 559 static void blkg_destroy_all(struct gendisk *disk) 560 { 561 struct request_queue *q = disk->queue; 562 struct blkcg_gq *blkg, *n; 563 int count = BLKG_DESTROY_BATCH_SIZE; 564 565 restart: 566 spin_lock_irq(&q->queue_lock); 567 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 568 struct blkcg *blkcg = blkg->blkcg; 569 570 if (hlist_unhashed(&blkg->blkcg_node)) 571 continue; 572 573 spin_lock(&blkcg->lock); 574 blkg_destroy(blkg); 575 spin_unlock(&blkcg->lock); 576 577 /* 578 * in order to avoid holding the spin lock for too long, release 579 * it when a batch of blkgs are destroyed. 580 */ 581 if (!(--count)) { 582 count = BLKG_DESTROY_BATCH_SIZE; 583 spin_unlock_irq(&q->queue_lock); 584 cond_resched(); 585 goto restart; 586 } 587 } 588 589 q->root_blkg = NULL; 590 spin_unlock_irq(&q->queue_lock); 591 } 592 593 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 594 struct cftype *cftype, u64 val) 595 { 596 struct blkcg *blkcg = css_to_blkcg(css); 597 struct blkcg_gq *blkg; 598 int i, cpu; 599 600 mutex_lock(&blkcg_pol_mutex); 601 spin_lock_irq(&blkcg->lock); 602 603 /* 604 * Note that stat reset is racy - it doesn't synchronize against 605 * stat updates. This is a debug feature which shouldn't exist 606 * anyway. If you get hit by a race, retry. 607 */ 608 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 609 for_each_possible_cpu(cpu) { 610 struct blkg_iostat_set *bis = 611 per_cpu_ptr(blkg->iostat_cpu, cpu); 612 memset(bis, 0, sizeof(*bis)); 613 } 614 memset(&blkg->iostat, 0, sizeof(blkg->iostat)); 615 616 for (i = 0; i < BLKCG_MAX_POLS; i++) { 617 struct blkcg_policy *pol = blkcg_policy[i]; 618 619 if (blkg->pd[i] && pol->pd_reset_stats_fn) 620 pol->pd_reset_stats_fn(blkg->pd[i]); 621 } 622 } 623 624 spin_unlock_irq(&blkcg->lock); 625 mutex_unlock(&blkcg_pol_mutex); 626 return 0; 627 } 628 629 const char *blkg_dev_name(struct blkcg_gq *blkg) 630 { 631 if (!blkg->q->disk) 632 return NULL; 633 return bdi_dev_name(blkg->q->disk->bdi); 634 } 635 636 /** 637 * blkcg_print_blkgs - helper for printing per-blkg data 638 * @sf: seq_file to print to 639 * @blkcg: blkcg of interest 640 * @prfill: fill function to print out a blkg 641 * @pol: policy in question 642 * @data: data to be passed to @prfill 643 * @show_total: to print out sum of prfill return values or not 644 * 645 * This function invokes @prfill on each blkg of @blkcg if pd for the 646 * policy specified by @pol exists. @prfill is invoked with @sf, the 647 * policy data and @data and the matching queue lock held. If @show_total 648 * is %true, the sum of the return values from @prfill is printed with 649 * "Total" label at the end. 650 * 651 * This is to be used to construct print functions for 652 * cftype->read_seq_string method. 653 */ 654 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 655 u64 (*prfill)(struct seq_file *, 656 struct blkg_policy_data *, int), 657 const struct blkcg_policy *pol, int data, 658 bool show_total) 659 { 660 struct blkcg_gq *blkg; 661 u64 total = 0; 662 663 rcu_read_lock(); 664 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 665 spin_lock_irq(&blkg->q->queue_lock); 666 if (blkcg_policy_enabled(blkg->q, pol)) 667 total += prfill(sf, blkg->pd[pol->plid], data); 668 spin_unlock_irq(&blkg->q->queue_lock); 669 } 670 rcu_read_unlock(); 671 672 if (show_total) 673 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 674 } 675 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 676 677 /** 678 * __blkg_prfill_u64 - prfill helper for a single u64 value 679 * @sf: seq_file to print to 680 * @pd: policy private data of interest 681 * @v: value to print 682 * 683 * Print @v to @sf for the device associated with @pd. 684 */ 685 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 686 { 687 const char *dname = blkg_dev_name(pd->blkg); 688 689 if (!dname) 690 return 0; 691 692 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 693 return v; 694 } 695 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 696 697 /** 698 * blkg_conf_init - initialize a blkg_conf_ctx 699 * @ctx: blkg_conf_ctx to initialize 700 * @input: input string 701 * 702 * Initialize @ctx which can be used to parse blkg config input string @input. 703 * Once initialized, @ctx can be used with blkg_conf_open_bdev() and 704 * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit(). 705 */ 706 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input) 707 { 708 *ctx = (struct blkg_conf_ctx){ .input = input }; 709 } 710 EXPORT_SYMBOL_GPL(blkg_conf_init); 711 712 /** 713 * blkg_conf_open_bdev - parse and open bdev for per-blkg config update 714 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 715 * 716 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from 717 * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is 718 * set to point past the device node prefix. 719 * 720 * This function may be called multiple times on @ctx and the extra calls become 721 * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function 722 * explicitly if bdev access is needed without resolving the blkcg / policy part 723 * of @ctx->input. Returns -errno on error. 724 */ 725 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) 726 { 727 char *input = ctx->input; 728 unsigned int major, minor; 729 struct block_device *bdev; 730 int key_len; 731 732 if (ctx->bdev) 733 return 0; 734 735 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 736 return -EINVAL; 737 738 input += key_len; 739 if (!isspace(*input)) 740 return -EINVAL; 741 input = skip_spaces(input); 742 743 bdev = blkdev_get_no_open(MKDEV(major, minor)); 744 if (!bdev) 745 return -ENODEV; 746 if (bdev_is_partition(bdev)) { 747 blkdev_put_no_open(bdev); 748 return -ENODEV; 749 } 750 751 ctx->body = input; 752 ctx->bdev = bdev; 753 return 0; 754 } 755 756 /** 757 * blkg_conf_prep - parse and prepare for per-blkg config update 758 * @blkcg: target block cgroup 759 * @pol: target policy 760 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 761 * 762 * Parse per-blkg config update from @ctx->input and initialize @ctx 763 * accordingly. On success, @ctx->body points to the part of @ctx->input 764 * following MAJ:MIN, @ctx->bdev points to the target block device and 765 * @ctx->blkg to the blkg being configured. 766 * 767 * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this 768 * function returns with queue lock held and must be followed by 769 * blkg_conf_exit(). 770 */ 771 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 772 struct blkg_conf_ctx *ctx) 773 __acquires(&bdev->bd_queue->queue_lock) 774 { 775 struct gendisk *disk; 776 struct request_queue *q; 777 struct blkcg_gq *blkg; 778 int ret; 779 780 ret = blkg_conf_open_bdev(ctx); 781 if (ret) 782 return ret; 783 784 disk = ctx->bdev->bd_disk; 785 q = disk->queue; 786 787 /* 788 * blkcg_deactivate_policy() requires queue to be frozen, we can grab 789 * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). 790 */ 791 ret = blk_queue_enter(q, 0); 792 if (ret) 793 goto fail; 794 795 spin_lock_irq(&q->queue_lock); 796 797 if (!blkcg_policy_enabled(q, pol)) { 798 ret = -EOPNOTSUPP; 799 goto fail_unlock; 800 } 801 802 blkg = blkg_lookup(blkcg, q); 803 if (blkg) 804 goto success; 805 806 /* 807 * Create blkgs walking down from blkcg_root to @blkcg, so that all 808 * non-root blkgs have access to their parents. 809 */ 810 while (true) { 811 struct blkcg *pos = blkcg; 812 struct blkcg *parent; 813 struct blkcg_gq *new_blkg; 814 815 parent = blkcg_parent(blkcg); 816 while (parent && !blkg_lookup(parent, q)) { 817 pos = parent; 818 parent = blkcg_parent(parent); 819 } 820 821 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 822 spin_unlock_irq(&q->queue_lock); 823 824 new_blkg = blkg_alloc(pos, disk, GFP_KERNEL); 825 if (unlikely(!new_blkg)) { 826 ret = -ENOMEM; 827 goto fail_exit_queue; 828 } 829 830 if (radix_tree_preload(GFP_KERNEL)) { 831 blkg_free(new_blkg); 832 ret = -ENOMEM; 833 goto fail_exit_queue; 834 } 835 836 spin_lock_irq(&q->queue_lock); 837 838 if (!blkcg_policy_enabled(q, pol)) { 839 blkg_free(new_blkg); 840 ret = -EOPNOTSUPP; 841 goto fail_preloaded; 842 } 843 844 blkg = blkg_lookup(pos, q); 845 if (blkg) { 846 blkg_free(new_blkg); 847 } else { 848 blkg = blkg_create(pos, disk, new_blkg); 849 if (IS_ERR(blkg)) { 850 ret = PTR_ERR(blkg); 851 goto fail_preloaded; 852 } 853 } 854 855 radix_tree_preload_end(); 856 857 if (pos == blkcg) 858 goto success; 859 } 860 success: 861 blk_queue_exit(q); 862 ctx->blkg = blkg; 863 return 0; 864 865 fail_preloaded: 866 radix_tree_preload_end(); 867 fail_unlock: 868 spin_unlock_irq(&q->queue_lock); 869 fail_exit_queue: 870 blk_queue_exit(q); 871 fail: 872 /* 873 * If queue was bypassing, we should retry. Do so after a 874 * short msleep(). It isn't strictly necessary but queue 875 * can be bypassing for some time and it's always nice to 876 * avoid busy looping. 877 */ 878 if (ret == -EBUSY) { 879 msleep(10); 880 ret = restart_syscall(); 881 } 882 return ret; 883 } 884 EXPORT_SYMBOL_GPL(blkg_conf_prep); 885 886 /** 887 * blkg_conf_exit - clean up per-blkg config update 888 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 889 * 890 * Clean up after per-blkg config update. This function must be called on all 891 * blkg_conf_ctx's initialized with blkg_conf_init(). 892 */ 893 void blkg_conf_exit(struct blkg_conf_ctx *ctx) 894 __releases(&ctx->bdev->bd_queue->queue_lock) 895 { 896 if (ctx->blkg) { 897 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); 898 ctx->blkg = NULL; 899 } 900 901 if (ctx->bdev) { 902 blkdev_put_no_open(ctx->bdev); 903 ctx->body = NULL; 904 ctx->bdev = NULL; 905 } 906 } 907 EXPORT_SYMBOL_GPL(blkg_conf_exit); 908 909 static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) 910 { 911 int i; 912 913 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 914 dst->bytes[i] = src->bytes[i]; 915 dst->ios[i] = src->ios[i]; 916 } 917 } 918 919 static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) 920 { 921 int i; 922 923 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 924 dst->bytes[i] += src->bytes[i]; 925 dst->ios[i] += src->ios[i]; 926 } 927 } 928 929 static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) 930 { 931 int i; 932 933 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 934 dst->bytes[i] -= src->bytes[i]; 935 dst->ios[i] -= src->ios[i]; 936 } 937 } 938 939 static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, 940 struct blkg_iostat *last) 941 { 942 struct blkg_iostat delta; 943 unsigned long flags; 944 945 /* propagate percpu delta to global */ 946 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 947 blkg_iostat_set(&delta, cur); 948 blkg_iostat_sub(&delta, last); 949 blkg_iostat_add(&blkg->iostat.cur, &delta); 950 blkg_iostat_add(last, &delta); 951 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 952 } 953 954 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) 955 { 956 struct blkcg *blkcg = css_to_blkcg(css); 957 struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu); 958 struct llist_node *lnode; 959 struct blkg_iostat_set *bisc, *next_bisc; 960 961 /* Root-level stats are sourced from system-wide IO stats */ 962 if (!cgroup_parent(css->cgroup)) 963 return; 964 965 rcu_read_lock(); 966 967 lnode = llist_del_all(lhead); 968 if (!lnode) 969 goto out; 970 971 /* 972 * Iterate only the iostat_cpu's queued in the lockless list. 973 */ 974 llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) { 975 struct blkcg_gq *blkg = bisc->blkg; 976 struct blkcg_gq *parent = blkg->parent; 977 struct blkg_iostat cur; 978 unsigned int seq; 979 980 WRITE_ONCE(bisc->lqueued, false); 981 982 /* fetch the current per-cpu values */ 983 do { 984 seq = u64_stats_fetch_begin(&bisc->sync); 985 blkg_iostat_set(&cur, &bisc->cur); 986 } while (u64_stats_fetch_retry(&bisc->sync, seq)); 987 988 blkcg_iostat_update(blkg, &cur, &bisc->last); 989 990 /* propagate global delta to parent (unless that's root) */ 991 if (parent && parent->parent) 992 blkcg_iostat_update(parent, &blkg->iostat.cur, 993 &blkg->iostat.last); 994 percpu_ref_put(&blkg->refcnt); 995 } 996 997 out: 998 rcu_read_unlock(); 999 } 1000 1001 /* 1002 * We source root cgroup stats from the system-wide stats to avoid 1003 * tracking the same information twice and incurring overhead when no 1004 * cgroups are defined. For that reason, cgroup_rstat_flush in 1005 * blkcg_print_stat does not actually fill out the iostat in the root 1006 * cgroup's blkcg_gq. 1007 * 1008 * However, we would like to re-use the printing code between the root and 1009 * non-root cgroups to the extent possible. For that reason, we simulate 1010 * flushing the root cgroup's stats by explicitly filling in the iostat 1011 * with disk level statistics. 1012 */ 1013 static void blkcg_fill_root_iostats(void) 1014 { 1015 struct class_dev_iter iter; 1016 struct device *dev; 1017 1018 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 1019 while ((dev = class_dev_iter_next(&iter))) { 1020 struct block_device *bdev = dev_to_bdev(dev); 1021 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; 1022 struct blkg_iostat tmp; 1023 int cpu; 1024 unsigned long flags; 1025 1026 memset(&tmp, 0, sizeof(tmp)); 1027 for_each_possible_cpu(cpu) { 1028 struct disk_stats *cpu_dkstats; 1029 1030 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); 1031 tmp.ios[BLKG_IOSTAT_READ] += 1032 cpu_dkstats->ios[STAT_READ]; 1033 tmp.ios[BLKG_IOSTAT_WRITE] += 1034 cpu_dkstats->ios[STAT_WRITE]; 1035 tmp.ios[BLKG_IOSTAT_DISCARD] += 1036 cpu_dkstats->ios[STAT_DISCARD]; 1037 // convert sectors to bytes 1038 tmp.bytes[BLKG_IOSTAT_READ] += 1039 cpu_dkstats->sectors[STAT_READ] << 9; 1040 tmp.bytes[BLKG_IOSTAT_WRITE] += 1041 cpu_dkstats->sectors[STAT_WRITE] << 9; 1042 tmp.bytes[BLKG_IOSTAT_DISCARD] += 1043 cpu_dkstats->sectors[STAT_DISCARD] << 9; 1044 } 1045 1046 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 1047 blkg_iostat_set(&blkg->iostat.cur, &tmp); 1048 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 1049 } 1050 } 1051 1052 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) 1053 { 1054 struct blkg_iostat_set *bis = &blkg->iostat; 1055 u64 rbytes, wbytes, rios, wios, dbytes, dios; 1056 const char *dname; 1057 unsigned seq; 1058 int i; 1059 1060 if (!blkg->online) 1061 return; 1062 1063 dname = blkg_dev_name(blkg); 1064 if (!dname) 1065 return; 1066 1067 seq_printf(s, "%s ", dname); 1068 1069 do { 1070 seq = u64_stats_fetch_begin(&bis->sync); 1071 1072 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; 1073 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; 1074 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; 1075 rios = bis->cur.ios[BLKG_IOSTAT_READ]; 1076 wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; 1077 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; 1078 } while (u64_stats_fetch_retry(&bis->sync, seq)); 1079 1080 if (rbytes || wbytes || rios || wios) { 1081 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 1082 rbytes, wbytes, rios, wios, 1083 dbytes, dios); 1084 } 1085 1086 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { 1087 seq_printf(s, " use_delay=%d delay_nsec=%llu", 1088 atomic_read(&blkg->use_delay), 1089 atomic64_read(&blkg->delay_nsec)); 1090 } 1091 1092 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1093 struct blkcg_policy *pol = blkcg_policy[i]; 1094 1095 if (!blkg->pd[i] || !pol->pd_stat_fn) 1096 continue; 1097 1098 pol->pd_stat_fn(blkg->pd[i], s); 1099 } 1100 1101 seq_puts(s, "\n"); 1102 } 1103 1104 static int blkcg_print_stat(struct seq_file *sf, void *v) 1105 { 1106 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 1107 struct blkcg_gq *blkg; 1108 1109 if (!seq_css(sf)->parent) 1110 blkcg_fill_root_iostats(); 1111 else 1112 cgroup_rstat_flush(blkcg->css.cgroup); 1113 1114 rcu_read_lock(); 1115 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 1116 spin_lock_irq(&blkg->q->queue_lock); 1117 blkcg_print_one_stat(blkg, sf); 1118 spin_unlock_irq(&blkg->q->queue_lock); 1119 } 1120 rcu_read_unlock(); 1121 return 0; 1122 } 1123 1124 static struct cftype blkcg_files[] = { 1125 { 1126 .name = "stat", 1127 .seq_show = blkcg_print_stat, 1128 }, 1129 { } /* terminate */ 1130 }; 1131 1132 static struct cftype blkcg_legacy_files[] = { 1133 { 1134 .name = "reset_stats", 1135 .write_u64 = blkcg_reset_stats, 1136 }, 1137 { } /* terminate */ 1138 }; 1139 1140 #ifdef CONFIG_CGROUP_WRITEBACK 1141 struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css) 1142 { 1143 return &css_to_blkcg(css)->cgwb_list; 1144 } 1145 #endif 1146 1147 /* 1148 * blkcg destruction is a three-stage process. 1149 * 1150 * 1. Destruction starts. The blkcg_css_offline() callback is invoked 1151 * which offlines writeback. Here we tie the next stage of blkg destruction 1152 * to the completion of writeback associated with the blkcg. This lets us 1153 * avoid punting potentially large amounts of outstanding writeback to root 1154 * while maintaining any ongoing policies. The next stage is triggered when 1155 * the nr_cgwbs count goes to zero. 1156 * 1157 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 1158 * and handles the destruction of blkgs. Here the css reference held by 1159 * the blkg is put back eventually allowing blkcg_css_free() to be called. 1160 * This work may occur in cgwb_release_workfn() on the cgwb_release 1161 * workqueue. Any submitted ios that fail to get the blkg ref will be 1162 * punted to the root_blkg. 1163 * 1164 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 1165 * This finally frees the blkcg. 1166 */ 1167 1168 /** 1169 * blkcg_destroy_blkgs - responsible for shooting down blkgs 1170 * @blkcg: blkcg of interest 1171 * 1172 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1173 * is nested inside q lock, this function performs reverse double lock dancing. 1174 * Destroying the blkgs releases the reference held on the blkcg's css allowing 1175 * blkcg_css_free to eventually be called. 1176 * 1177 * This is the blkcg counterpart of ioc_release_fn(). 1178 */ 1179 static void blkcg_destroy_blkgs(struct blkcg *blkcg) 1180 { 1181 might_sleep(); 1182 1183 spin_lock_irq(&blkcg->lock); 1184 1185 while (!hlist_empty(&blkcg->blkg_list)) { 1186 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1187 struct blkcg_gq, blkcg_node); 1188 struct request_queue *q = blkg->q; 1189 1190 if (need_resched() || !spin_trylock(&q->queue_lock)) { 1191 /* 1192 * Given that the system can accumulate a huge number 1193 * of blkgs in pathological cases, check to see if we 1194 * need to rescheduling to avoid softlockup. 1195 */ 1196 spin_unlock_irq(&blkcg->lock); 1197 cond_resched(); 1198 spin_lock_irq(&blkcg->lock); 1199 continue; 1200 } 1201 1202 blkg_destroy(blkg); 1203 spin_unlock(&q->queue_lock); 1204 } 1205 1206 spin_unlock_irq(&blkcg->lock); 1207 } 1208 1209 /** 1210 * blkcg_pin_online - pin online state 1211 * @blkcg_css: blkcg of interest 1212 * 1213 * While pinned, a blkcg is kept online. This is primarily used to 1214 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline 1215 * while an associated cgwb is still active. 1216 */ 1217 void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css) 1218 { 1219 refcount_inc(&css_to_blkcg(blkcg_css)->online_pin); 1220 } 1221 1222 /** 1223 * blkcg_unpin_online - unpin online state 1224 * @blkcg_css: blkcg of interest 1225 * 1226 * This is primarily used to impedance-match blkg and cgwb lifetimes so 1227 * that blkg doesn't go offline while an associated cgwb is still active. 1228 * When this count goes to zero, all active cgwbs have finished so the 1229 * blkcg can continue destruction by calling blkcg_destroy_blkgs(). 1230 */ 1231 void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css) 1232 { 1233 struct blkcg *blkcg = css_to_blkcg(blkcg_css); 1234 1235 do { 1236 if (!refcount_dec_and_test(&blkcg->online_pin)) 1237 break; 1238 blkcg_destroy_blkgs(blkcg); 1239 blkcg = blkcg_parent(blkcg); 1240 } while (blkcg); 1241 } 1242 1243 /** 1244 * blkcg_css_offline - cgroup css_offline callback 1245 * @css: css of interest 1246 * 1247 * This function is called when @css is about to go away. Here the cgwbs are 1248 * offlined first and only once writeback associated with the blkcg has 1249 * finished do we start step 2 (see above). 1250 */ 1251 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1252 { 1253 /* this prevents anyone from attaching or migrating to this blkcg */ 1254 wb_blkcg_offline(css); 1255 1256 /* put the base online pin allowing step 2 to be triggered */ 1257 blkcg_unpin_online(css); 1258 } 1259 1260 static void blkcg_css_free(struct cgroup_subsys_state *css) 1261 { 1262 struct blkcg *blkcg = css_to_blkcg(css); 1263 int i; 1264 1265 mutex_lock(&blkcg_pol_mutex); 1266 1267 list_del(&blkcg->all_blkcgs_node); 1268 1269 for (i = 0; i < BLKCG_MAX_POLS; i++) 1270 if (blkcg->cpd[i]) 1271 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1272 1273 mutex_unlock(&blkcg_pol_mutex); 1274 1275 free_percpu(blkcg->lhead); 1276 kfree(blkcg); 1277 } 1278 1279 static struct cgroup_subsys_state * 1280 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1281 { 1282 struct blkcg *blkcg; 1283 int i; 1284 1285 mutex_lock(&blkcg_pol_mutex); 1286 1287 if (!parent_css) { 1288 blkcg = &blkcg_root; 1289 } else { 1290 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1291 if (!blkcg) 1292 goto unlock; 1293 } 1294 1295 if (init_blkcg_llists(blkcg)) 1296 goto free_blkcg; 1297 1298 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1299 struct blkcg_policy *pol = blkcg_policy[i]; 1300 struct blkcg_policy_data *cpd; 1301 1302 /* 1303 * If the policy hasn't been attached yet, wait for it 1304 * to be attached before doing anything else. Otherwise, 1305 * check if the policy requires any specific per-cgroup 1306 * data: if it does, allocate and initialize it. 1307 */ 1308 if (!pol || !pol->cpd_alloc_fn) 1309 continue; 1310 1311 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1312 if (!cpd) 1313 goto free_pd_blkcg; 1314 1315 blkcg->cpd[i] = cpd; 1316 cpd->blkcg = blkcg; 1317 cpd->plid = i; 1318 } 1319 1320 spin_lock_init(&blkcg->lock); 1321 refcount_set(&blkcg->online_pin, 1); 1322 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); 1323 INIT_HLIST_HEAD(&blkcg->blkg_list); 1324 #ifdef CONFIG_CGROUP_WRITEBACK 1325 INIT_LIST_HEAD(&blkcg->cgwb_list); 1326 #endif 1327 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1328 1329 mutex_unlock(&blkcg_pol_mutex); 1330 return &blkcg->css; 1331 1332 free_pd_blkcg: 1333 for (i--; i >= 0; i--) 1334 if (blkcg->cpd[i]) 1335 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1336 free_percpu(blkcg->lhead); 1337 free_blkcg: 1338 if (blkcg != &blkcg_root) 1339 kfree(blkcg); 1340 unlock: 1341 mutex_unlock(&blkcg_pol_mutex); 1342 return ERR_PTR(-ENOMEM); 1343 } 1344 1345 static int blkcg_css_online(struct cgroup_subsys_state *css) 1346 { 1347 struct blkcg *parent = blkcg_parent(css_to_blkcg(css)); 1348 1349 /* 1350 * blkcg_pin_online() is used to delay blkcg offline so that blkgs 1351 * don't go offline while cgwbs are still active on them. Pin the 1352 * parent so that offline always happens towards the root. 1353 */ 1354 if (parent) 1355 blkcg_pin_online(&parent->css); 1356 return 0; 1357 } 1358 1359 int blkcg_init_disk(struct gendisk *disk) 1360 { 1361 struct request_queue *q = disk->queue; 1362 struct blkcg_gq *new_blkg, *blkg; 1363 bool preloaded; 1364 int ret; 1365 1366 INIT_LIST_HEAD(&q->blkg_list); 1367 mutex_init(&q->blkcg_mutex); 1368 1369 new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); 1370 if (!new_blkg) 1371 return -ENOMEM; 1372 1373 preloaded = !radix_tree_preload(GFP_KERNEL); 1374 1375 /* Make sure the root blkg exists. */ 1376 /* spin_lock_irq can serve as RCU read-side critical section. */ 1377 spin_lock_irq(&q->queue_lock); 1378 blkg = blkg_create(&blkcg_root, disk, new_blkg); 1379 if (IS_ERR(blkg)) 1380 goto err_unlock; 1381 q->root_blkg = blkg; 1382 spin_unlock_irq(&q->queue_lock); 1383 1384 if (preloaded) 1385 radix_tree_preload_end(); 1386 1387 ret = blk_ioprio_init(disk); 1388 if (ret) 1389 goto err_destroy_all; 1390 1391 ret = blk_throtl_init(disk); 1392 if (ret) 1393 goto err_ioprio_exit; 1394 1395 return 0; 1396 1397 err_ioprio_exit: 1398 blk_ioprio_exit(disk); 1399 err_destroy_all: 1400 blkg_destroy_all(disk); 1401 return ret; 1402 err_unlock: 1403 spin_unlock_irq(&q->queue_lock); 1404 if (preloaded) 1405 radix_tree_preload_end(); 1406 return PTR_ERR(blkg); 1407 } 1408 1409 void blkcg_exit_disk(struct gendisk *disk) 1410 { 1411 blkg_destroy_all(disk); 1412 blk_throtl_exit(disk); 1413 } 1414 1415 static void blkcg_exit(struct task_struct *tsk) 1416 { 1417 if (tsk->throttle_disk) 1418 put_disk(tsk->throttle_disk); 1419 tsk->throttle_disk = NULL; 1420 } 1421 1422 struct cgroup_subsys io_cgrp_subsys = { 1423 .css_alloc = blkcg_css_alloc, 1424 .css_online = blkcg_css_online, 1425 .css_offline = blkcg_css_offline, 1426 .css_free = blkcg_css_free, 1427 .css_rstat_flush = blkcg_rstat_flush, 1428 .dfl_cftypes = blkcg_files, 1429 .legacy_cftypes = blkcg_legacy_files, 1430 .legacy_name = "blkio", 1431 .exit = blkcg_exit, 1432 #ifdef CONFIG_MEMCG 1433 /* 1434 * This ensures that, if available, memcg is automatically enabled 1435 * together on the default hierarchy so that the owner cgroup can 1436 * be retrieved from writeback pages. 1437 */ 1438 .depends_on = 1 << memory_cgrp_id, 1439 #endif 1440 }; 1441 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1442 1443 /** 1444 * blkcg_activate_policy - activate a blkcg policy on a gendisk 1445 * @disk: gendisk of interest 1446 * @pol: blkcg policy to activate 1447 * 1448 * Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through 1449 * bypass mode to populate its blkgs with policy_data for @pol. 1450 * 1451 * Activation happens with @disk bypassed, so nobody would be accessing blkgs 1452 * from IO path. Update of each blkg is protected by both queue and blkcg 1453 * locks so that holding either lock and testing blkcg_policy_enabled() is 1454 * always enough for dereferencing policy data. 1455 * 1456 * The caller is responsible for synchronizing [de]activations and policy 1457 * [un]registerations. Returns 0 on success, -errno on failure. 1458 */ 1459 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol) 1460 { 1461 struct request_queue *q = disk->queue; 1462 struct blkg_policy_data *pd_prealloc = NULL; 1463 struct blkcg_gq *blkg, *pinned_blkg = NULL; 1464 int ret; 1465 1466 if (blkcg_policy_enabled(q, pol)) 1467 return 0; 1468 1469 if (queue_is_mq(q)) 1470 blk_mq_freeze_queue(q); 1471 retry: 1472 spin_lock_irq(&q->queue_lock); 1473 1474 /* blkg_list is pushed at the head, reverse walk to allocate parents first */ 1475 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1476 struct blkg_policy_data *pd; 1477 1478 if (blkg->pd[pol->plid]) 1479 continue; 1480 1481 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ 1482 if (blkg == pinned_blkg) { 1483 pd = pd_prealloc; 1484 pd_prealloc = NULL; 1485 } else { 1486 pd = pol->pd_alloc_fn(disk, blkg->blkcg, 1487 GFP_NOWAIT | __GFP_NOWARN); 1488 } 1489 1490 if (!pd) { 1491 /* 1492 * GFP_NOWAIT failed. Free the existing one and 1493 * prealloc for @blkg w/ GFP_KERNEL. 1494 */ 1495 if (pinned_blkg) 1496 blkg_put(pinned_blkg); 1497 blkg_get(blkg); 1498 pinned_blkg = blkg; 1499 1500 spin_unlock_irq(&q->queue_lock); 1501 1502 if (pd_prealloc) 1503 pol->pd_free_fn(pd_prealloc); 1504 pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg, 1505 GFP_KERNEL); 1506 if (pd_prealloc) 1507 goto retry; 1508 else 1509 goto enomem; 1510 } 1511 1512 blkg->pd[pol->plid] = pd; 1513 pd->blkg = blkg; 1514 pd->plid = pol->plid; 1515 pd->online = false; 1516 } 1517 1518 /* all allocated, init in the same order */ 1519 if (pol->pd_init_fn) 1520 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) 1521 pol->pd_init_fn(blkg->pd[pol->plid]); 1522 1523 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1524 if (pol->pd_online_fn) 1525 pol->pd_online_fn(blkg->pd[pol->plid]); 1526 blkg->pd[pol->plid]->online = true; 1527 } 1528 1529 __set_bit(pol->plid, q->blkcg_pols); 1530 ret = 0; 1531 1532 spin_unlock_irq(&q->queue_lock); 1533 out: 1534 if (queue_is_mq(q)) 1535 blk_mq_unfreeze_queue(q); 1536 if (pinned_blkg) 1537 blkg_put(pinned_blkg); 1538 if (pd_prealloc) 1539 pol->pd_free_fn(pd_prealloc); 1540 return ret; 1541 1542 enomem: 1543 /* alloc failed, nothing's initialized yet, free everything */ 1544 spin_lock_irq(&q->queue_lock); 1545 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1546 struct blkcg *blkcg = blkg->blkcg; 1547 1548 spin_lock(&blkcg->lock); 1549 if (blkg->pd[pol->plid]) { 1550 pol->pd_free_fn(blkg->pd[pol->plid]); 1551 blkg->pd[pol->plid] = NULL; 1552 } 1553 spin_unlock(&blkcg->lock); 1554 } 1555 spin_unlock_irq(&q->queue_lock); 1556 ret = -ENOMEM; 1557 goto out; 1558 } 1559 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1560 1561 /** 1562 * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk 1563 * @disk: gendisk of interest 1564 * @pol: blkcg policy to deactivate 1565 * 1566 * Deactivate @pol on @disk. Follows the same synchronization rules as 1567 * blkcg_activate_policy(). 1568 */ 1569 void blkcg_deactivate_policy(struct gendisk *disk, 1570 const struct blkcg_policy *pol) 1571 { 1572 struct request_queue *q = disk->queue; 1573 struct blkcg_gq *blkg; 1574 1575 if (!blkcg_policy_enabled(q, pol)) 1576 return; 1577 1578 if (queue_is_mq(q)) 1579 blk_mq_freeze_queue(q); 1580 1581 mutex_lock(&q->blkcg_mutex); 1582 spin_lock_irq(&q->queue_lock); 1583 1584 __clear_bit(pol->plid, q->blkcg_pols); 1585 1586 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1587 struct blkcg *blkcg = blkg->blkcg; 1588 1589 spin_lock(&blkcg->lock); 1590 if (blkg->pd[pol->plid]) { 1591 if (blkg->pd[pol->plid]->online && pol->pd_offline_fn) 1592 pol->pd_offline_fn(blkg->pd[pol->plid]); 1593 pol->pd_free_fn(blkg->pd[pol->plid]); 1594 blkg->pd[pol->plid] = NULL; 1595 } 1596 spin_unlock(&blkcg->lock); 1597 } 1598 1599 spin_unlock_irq(&q->queue_lock); 1600 mutex_unlock(&q->blkcg_mutex); 1601 1602 if (queue_is_mq(q)) 1603 blk_mq_unfreeze_queue(q); 1604 } 1605 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1606 1607 static void blkcg_free_all_cpd(struct blkcg_policy *pol) 1608 { 1609 struct blkcg *blkcg; 1610 1611 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1612 if (blkcg->cpd[pol->plid]) { 1613 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1614 blkcg->cpd[pol->plid] = NULL; 1615 } 1616 } 1617 } 1618 1619 /** 1620 * blkcg_policy_register - register a blkcg policy 1621 * @pol: blkcg policy to register 1622 * 1623 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1624 * successful registration. Returns 0 on success and -errno on failure. 1625 */ 1626 int blkcg_policy_register(struct blkcg_policy *pol) 1627 { 1628 struct blkcg *blkcg; 1629 int i, ret; 1630 1631 mutex_lock(&blkcg_pol_register_mutex); 1632 mutex_lock(&blkcg_pol_mutex); 1633 1634 /* find an empty slot */ 1635 ret = -ENOSPC; 1636 for (i = 0; i < BLKCG_MAX_POLS; i++) 1637 if (!blkcg_policy[i]) 1638 break; 1639 if (i >= BLKCG_MAX_POLS) { 1640 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); 1641 goto err_unlock; 1642 } 1643 1644 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ 1645 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1646 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1647 goto err_unlock; 1648 1649 /* register @pol */ 1650 pol->plid = i; 1651 blkcg_policy[pol->plid] = pol; 1652 1653 /* allocate and install cpd's */ 1654 if (pol->cpd_alloc_fn) { 1655 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1656 struct blkcg_policy_data *cpd; 1657 1658 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1659 if (!cpd) 1660 goto err_free_cpds; 1661 1662 blkcg->cpd[pol->plid] = cpd; 1663 cpd->blkcg = blkcg; 1664 cpd->plid = pol->plid; 1665 } 1666 } 1667 1668 mutex_unlock(&blkcg_pol_mutex); 1669 1670 /* everything is in place, add intf files for the new policy */ 1671 if (pol->dfl_cftypes) 1672 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1673 pol->dfl_cftypes)); 1674 if (pol->legacy_cftypes) 1675 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1676 pol->legacy_cftypes)); 1677 mutex_unlock(&blkcg_pol_register_mutex); 1678 return 0; 1679 1680 err_free_cpds: 1681 if (pol->cpd_free_fn) 1682 blkcg_free_all_cpd(pol); 1683 1684 blkcg_policy[pol->plid] = NULL; 1685 err_unlock: 1686 mutex_unlock(&blkcg_pol_mutex); 1687 mutex_unlock(&blkcg_pol_register_mutex); 1688 return ret; 1689 } 1690 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1691 1692 /** 1693 * blkcg_policy_unregister - unregister a blkcg policy 1694 * @pol: blkcg policy to unregister 1695 * 1696 * Undo blkcg_policy_register(@pol). Might sleep. 1697 */ 1698 void blkcg_policy_unregister(struct blkcg_policy *pol) 1699 { 1700 mutex_lock(&blkcg_pol_register_mutex); 1701 1702 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1703 goto out_unlock; 1704 1705 /* kill the intf files first */ 1706 if (pol->dfl_cftypes) 1707 cgroup_rm_cftypes(pol->dfl_cftypes); 1708 if (pol->legacy_cftypes) 1709 cgroup_rm_cftypes(pol->legacy_cftypes); 1710 1711 /* remove cpds and unregister */ 1712 mutex_lock(&blkcg_pol_mutex); 1713 1714 if (pol->cpd_free_fn) 1715 blkcg_free_all_cpd(pol); 1716 1717 blkcg_policy[pol->plid] = NULL; 1718 1719 mutex_unlock(&blkcg_pol_mutex); 1720 out_unlock: 1721 mutex_unlock(&blkcg_pol_register_mutex); 1722 } 1723 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1724 1725 /* 1726 * Scale the accumulated delay based on how long it has been since we updated 1727 * the delay. We only call this when we are adding delay, in case it's been a 1728 * while since we added delay, and when we are checking to see if we need to 1729 * delay a task, to account for any delays that may have occurred. 1730 */ 1731 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1732 { 1733 u64 old = atomic64_read(&blkg->delay_start); 1734 1735 /* negative use_delay means no scaling, see blkcg_set_delay() */ 1736 if (atomic_read(&blkg->use_delay) < 0) 1737 return; 1738 1739 /* 1740 * We only want to scale down every second. The idea here is that we 1741 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1742 * time window. We only want to throttle tasks for recent delay that 1743 * has occurred, in 1 second time windows since that's the maximum 1744 * things can be throttled. We save the current delay window in 1745 * blkg->last_delay so we know what amount is still left to be charged 1746 * to the blkg from this point onward. blkg->last_use keeps track of 1747 * the use_delay counter. The idea is if we're unthrottling the blkg we 1748 * are ok with whatever is happening now, and we can take away more of 1749 * the accumulated delay as we've already throttled enough that 1750 * everybody is happy with their IO latencies. 1751 */ 1752 if (time_before64(old + NSEC_PER_SEC, now) && 1753 atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) { 1754 u64 cur = atomic64_read(&blkg->delay_nsec); 1755 u64 sub = min_t(u64, blkg->last_delay, now - old); 1756 int cur_use = atomic_read(&blkg->use_delay); 1757 1758 /* 1759 * We've been unthrottled, subtract a larger chunk of our 1760 * accumulated delay. 1761 */ 1762 if (cur_use < blkg->last_use) 1763 sub = max_t(u64, sub, blkg->last_delay >> 1); 1764 1765 /* 1766 * This shouldn't happen, but handle it anyway. Our delay_nsec 1767 * should only ever be growing except here where we subtract out 1768 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1769 * rather not end up with negative numbers. 1770 */ 1771 if (unlikely(cur < sub)) { 1772 atomic64_set(&blkg->delay_nsec, 0); 1773 blkg->last_delay = 0; 1774 } else { 1775 atomic64_sub(sub, &blkg->delay_nsec); 1776 blkg->last_delay = cur - sub; 1777 } 1778 blkg->last_use = cur_use; 1779 } 1780 } 1781 1782 /* 1783 * This is called when we want to actually walk up the hierarchy and check to 1784 * see if we need to throttle, and then actually throttle if there is some 1785 * accumulated delay. This should only be called upon return to user space so 1786 * we're not holding some lock that would induce a priority inversion. 1787 */ 1788 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1789 { 1790 unsigned long pflags; 1791 bool clamp; 1792 u64 now = ktime_to_ns(ktime_get()); 1793 u64 exp; 1794 u64 delay_nsec = 0; 1795 int tok; 1796 1797 while (blkg->parent) { 1798 int use_delay = atomic_read(&blkg->use_delay); 1799 1800 if (use_delay) { 1801 u64 this_delay; 1802 1803 blkcg_scale_delay(blkg, now); 1804 this_delay = atomic64_read(&blkg->delay_nsec); 1805 if (this_delay > delay_nsec) { 1806 delay_nsec = this_delay; 1807 clamp = use_delay > 0; 1808 } 1809 } 1810 blkg = blkg->parent; 1811 } 1812 1813 if (!delay_nsec) 1814 return; 1815 1816 /* 1817 * Let's not sleep for all eternity if we've amassed a huge delay. 1818 * Swapping or metadata IO can accumulate 10's of seconds worth of 1819 * delay, and we want userspace to be able to do _something_ so cap the 1820 * delays at 0.25s. If there's 10's of seconds worth of delay then the 1821 * tasks will be delayed for 0.25 second for every syscall. If 1822 * blkcg_set_delay() was used as indicated by negative use_delay, the 1823 * caller is responsible for regulating the range. 1824 */ 1825 if (clamp) 1826 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1827 1828 if (use_memdelay) 1829 psi_memstall_enter(&pflags); 1830 1831 exp = ktime_add_ns(now, delay_nsec); 1832 tok = io_schedule_prepare(); 1833 do { 1834 __set_current_state(TASK_KILLABLE); 1835 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1836 break; 1837 } while (!fatal_signal_pending(current)); 1838 io_schedule_finish(tok); 1839 1840 if (use_memdelay) 1841 psi_memstall_leave(&pflags); 1842 } 1843 1844 /** 1845 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 1846 * 1847 * This is only called if we've been marked with set_notify_resume(). Obviously 1848 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 1849 * check to see if current->throttle_disk is set and if not this doesn't do 1850 * anything. This should only ever be called by the resume code, it's not meant 1851 * to be called by people willy-nilly as it will actually do the work to 1852 * throttle the task if it is setup for throttling. 1853 */ 1854 void blkcg_maybe_throttle_current(void) 1855 { 1856 struct gendisk *disk = current->throttle_disk; 1857 struct blkcg *blkcg; 1858 struct blkcg_gq *blkg; 1859 bool use_memdelay = current->use_memdelay; 1860 1861 if (!disk) 1862 return; 1863 1864 current->throttle_disk = NULL; 1865 current->use_memdelay = false; 1866 1867 rcu_read_lock(); 1868 blkcg = css_to_blkcg(blkcg_css()); 1869 if (!blkcg) 1870 goto out; 1871 blkg = blkg_lookup(blkcg, disk->queue); 1872 if (!blkg) 1873 goto out; 1874 if (!blkg_tryget(blkg)) 1875 goto out; 1876 rcu_read_unlock(); 1877 1878 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 1879 blkg_put(blkg); 1880 put_disk(disk); 1881 return; 1882 out: 1883 rcu_read_unlock(); 1884 } 1885 1886 /** 1887 * blkcg_schedule_throttle - this task needs to check for throttling 1888 * @disk: disk to throttle 1889 * @use_memdelay: do we charge this to memory delay for PSI 1890 * 1891 * This is called by the IO controller when we know there's delay accumulated 1892 * for the blkg for this task. We do not pass the blkg because there are places 1893 * we call this that may not have that information, the swapping code for 1894 * instance will only have a block_device at that point. This set's the 1895 * notify_resume for the task to check and see if it requires throttling before 1896 * returning to user space. 1897 * 1898 * We will only schedule once per syscall. You can call this over and over 1899 * again and it will only do the check once upon return to user space, and only 1900 * throttle once. If the task needs to be throttled again it'll need to be 1901 * re-set at the next time we see the task. 1902 */ 1903 void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay) 1904 { 1905 if (unlikely(current->flags & PF_KTHREAD)) 1906 return; 1907 1908 if (current->throttle_disk != disk) { 1909 if (test_bit(GD_DEAD, &disk->state)) 1910 return; 1911 get_device(disk_to_dev(disk)); 1912 1913 if (current->throttle_disk) 1914 put_disk(current->throttle_disk); 1915 current->throttle_disk = disk; 1916 } 1917 1918 if (use_memdelay) 1919 current->use_memdelay = use_memdelay; 1920 set_notify_resume(current); 1921 } 1922 1923 /** 1924 * blkcg_add_delay - add delay to this blkg 1925 * @blkg: blkg of interest 1926 * @now: the current time in nanoseconds 1927 * @delta: how many nanoseconds of delay to add 1928 * 1929 * Charge @delta to the blkg's current delay accumulation. This is used to 1930 * throttle tasks if an IO controller thinks we need more throttling. 1931 */ 1932 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 1933 { 1934 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 1935 return; 1936 blkcg_scale_delay(blkg, now); 1937 atomic64_add(delta, &blkg->delay_nsec); 1938 } 1939 1940 /** 1941 * blkg_tryget_closest - try and get a blkg ref on the closet blkg 1942 * @bio: target bio 1943 * @css: target css 1944 * 1945 * As the failure mode here is to walk up the blkg tree, this ensure that the 1946 * blkg->parent pointers are always valid. This returns the blkg that it ended 1947 * up taking a reference on or %NULL if no reference was taken. 1948 */ 1949 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, 1950 struct cgroup_subsys_state *css) 1951 { 1952 struct blkcg_gq *blkg, *ret_blkg = NULL; 1953 1954 rcu_read_lock(); 1955 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk); 1956 while (blkg) { 1957 if (blkg_tryget(blkg)) { 1958 ret_blkg = blkg; 1959 break; 1960 } 1961 blkg = blkg->parent; 1962 } 1963 rcu_read_unlock(); 1964 1965 return ret_blkg; 1966 } 1967 1968 /** 1969 * bio_associate_blkg_from_css - associate a bio with a specified css 1970 * @bio: target bio 1971 * @css: target css 1972 * 1973 * Associate @bio with the blkg found by combining the css's blkg and the 1974 * request_queue of the @bio. An association failure is handled by walking up 1975 * the blkg tree. Therefore, the blkg associated can be anything between @blkg 1976 * and q->root_blkg. This situation only happens when a cgroup is dying and 1977 * then the remaining bios will spill to the closest alive blkg. 1978 * 1979 * A reference will be taken on the blkg and will be released when @bio is 1980 * freed. 1981 */ 1982 void bio_associate_blkg_from_css(struct bio *bio, 1983 struct cgroup_subsys_state *css) 1984 { 1985 if (bio->bi_blkg) 1986 blkg_put(bio->bi_blkg); 1987 1988 if (css && css->parent) { 1989 bio->bi_blkg = blkg_tryget_closest(bio, css); 1990 } else { 1991 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); 1992 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; 1993 } 1994 } 1995 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); 1996 1997 /** 1998 * bio_associate_blkg - associate a bio with a blkg 1999 * @bio: target bio 2000 * 2001 * Associate @bio with the blkg found from the bio's css and request_queue. 2002 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is 2003 * already associated, the css is reused and association redone as the 2004 * request_queue may have changed. 2005 */ 2006 void bio_associate_blkg(struct bio *bio) 2007 { 2008 struct cgroup_subsys_state *css; 2009 2010 rcu_read_lock(); 2011 2012 if (bio->bi_blkg) 2013 css = bio_blkcg_css(bio); 2014 else 2015 css = blkcg_css(); 2016 2017 bio_associate_blkg_from_css(bio, css); 2018 2019 rcu_read_unlock(); 2020 } 2021 EXPORT_SYMBOL_GPL(bio_associate_blkg); 2022 2023 /** 2024 * bio_clone_blkg_association - clone blkg association from src to dst bio 2025 * @dst: destination bio 2026 * @src: source bio 2027 */ 2028 void bio_clone_blkg_association(struct bio *dst, struct bio *src) 2029 { 2030 if (src->bi_blkg) 2031 bio_associate_blkg_from_css(dst, bio_blkcg_css(src)); 2032 } 2033 EXPORT_SYMBOL_GPL(bio_clone_blkg_association); 2034 2035 static int blk_cgroup_io_type(struct bio *bio) 2036 { 2037 if (op_is_discard(bio->bi_opf)) 2038 return BLKG_IOSTAT_DISCARD; 2039 if (op_is_write(bio->bi_opf)) 2040 return BLKG_IOSTAT_WRITE; 2041 return BLKG_IOSTAT_READ; 2042 } 2043 2044 void blk_cgroup_bio_start(struct bio *bio) 2045 { 2046 struct blkcg *blkcg = bio->bi_blkg->blkcg; 2047 int rwd = blk_cgroup_io_type(bio), cpu; 2048 struct blkg_iostat_set *bis; 2049 unsigned long flags; 2050 2051 /* Root-level stats are sourced from system-wide IO stats */ 2052 if (!cgroup_parent(blkcg->css.cgroup)) 2053 return; 2054 2055 cpu = get_cpu(); 2056 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); 2057 flags = u64_stats_update_begin_irqsave(&bis->sync); 2058 2059 /* 2060 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split 2061 * bio and we would have already accounted for the size of the bio. 2062 */ 2063 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { 2064 bio_set_flag(bio, BIO_CGROUP_ACCT); 2065 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; 2066 } 2067 bis->cur.ios[rwd]++; 2068 2069 /* 2070 * If the iostat_cpu isn't in a lockless list, put it into the 2071 * list to indicate that a stat update is pending. 2072 */ 2073 if (!READ_ONCE(bis->lqueued)) { 2074 struct llist_head *lhead = this_cpu_ptr(blkcg->lhead); 2075 2076 llist_add(&bis->lnode, lhead); 2077 WRITE_ONCE(bis->lqueued, true); 2078 percpu_ref_get(&bis->blkg->refcnt); 2079 } 2080 2081 u64_stats_update_end_irqrestore(&bis->sync, flags); 2082 if (cgroup_subsys_on_dfl(io_cgrp_subsys)) 2083 cgroup_rstat_updated(blkcg->css.cgroup, cpu); 2084 put_cpu(); 2085 } 2086 2087 bool blk_cgroup_congested(void) 2088 { 2089 struct cgroup_subsys_state *css; 2090 bool ret = false; 2091 2092 rcu_read_lock(); 2093 for (css = blkcg_css(); css; css = css->parent) { 2094 if (atomic_read(&css->cgroup->congestion_count)) { 2095 ret = true; 2096 break; 2097 } 2098 } 2099 rcu_read_unlock(); 2100 return ret; 2101 } 2102 2103 module_param(blkcg_debug_stats, bool, 0644); 2104 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 2105