1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Block IO controller cgroup interface 4 * 5 * Based on ideas and code from CFQ, CFS and BFQ: 6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 7 * 8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 9 * Paolo Valente <paolo.valente@unimore.it> 10 * 11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 12 * Nauman Rafique <nauman@google.com> 13 * 14 * For policy-specific per-blkcg data: 15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 16 * Arianna Avanzini <avanzini.arianna@gmail.com> 17 */ 18 #include <linux/ioprio.h> 19 #include <linux/kdev_t.h> 20 #include <linux/module.h> 21 #include <linux/sched/signal.h> 22 #include <linux/err.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/slab.h> 26 #include <linux/genhd.h> 27 #include <linux/delay.h> 28 #include <linux/atomic.h> 29 #include <linux/ctype.h> 30 #include <linux/blk-cgroup.h> 31 #include <linux/tracehook.h> 32 #include <linux/psi.h> 33 #include "blk.h" 34 #include "blk-ioprio.h" 35 #include "blk-throttle.h" 36 37 /* 38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 40 * policy [un]register operations including cgroup file additions / 41 * removals. Putting cgroup file registration outside blkcg_pol_mutex 42 * allows grabbing it from cgroup callbacks. 43 */ 44 static DEFINE_MUTEX(blkcg_pol_register_mutex); 45 static DEFINE_MUTEX(blkcg_pol_mutex); 46 47 struct blkcg blkcg_root; 48 EXPORT_SYMBOL_GPL(blkcg_root); 49 50 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 51 EXPORT_SYMBOL_GPL(blkcg_root_css); 52 53 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 54 55 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 56 57 bool blkcg_debug_stats = false; 58 static struct workqueue_struct *blkcg_punt_bio_wq; 59 60 #define BLKG_DESTROY_BATCH_SIZE 64 61 62 static bool blkcg_policy_enabled(struct request_queue *q, 63 const struct blkcg_policy *pol) 64 { 65 return pol && test_bit(pol->plid, q->blkcg_pols); 66 } 67 68 /** 69 * blkg_free - free a blkg 70 * @blkg: blkg to free 71 * 72 * Free @blkg which may be partially allocated. 73 */ 74 static void blkg_free(struct blkcg_gq *blkg) 75 { 76 int i; 77 78 if (!blkg) 79 return; 80 81 for (i = 0; i < BLKCG_MAX_POLS; i++) 82 if (blkg->pd[i]) 83 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 84 85 free_percpu(blkg->iostat_cpu); 86 percpu_ref_exit(&blkg->refcnt); 87 kfree(blkg); 88 } 89 90 static void __blkg_release(struct rcu_head *rcu) 91 { 92 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 93 94 WARN_ON(!bio_list_empty(&blkg->async_bios)); 95 96 /* release the blkcg and parent blkg refs this blkg has been holding */ 97 css_put(&blkg->blkcg->css); 98 if (blkg->parent) 99 blkg_put(blkg->parent); 100 blkg_free(blkg); 101 } 102 103 /* 104 * A group is RCU protected, but having an rcu lock does not mean that one 105 * can access all the fields of blkg and assume these are valid. For 106 * example, don't try to follow throtl_data and request queue links. 107 * 108 * Having a reference to blkg under an rcu allows accesses to only values 109 * local to groups like group stats and group rate limits. 110 */ 111 static void blkg_release(struct percpu_ref *ref) 112 { 113 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); 114 115 call_rcu(&blkg->rcu_head, __blkg_release); 116 } 117 118 static void blkg_async_bio_workfn(struct work_struct *work) 119 { 120 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 121 async_bio_work); 122 struct bio_list bios = BIO_EMPTY_LIST; 123 struct bio *bio; 124 struct blk_plug plug; 125 bool need_plug = false; 126 127 /* as long as there are pending bios, @blkg can't go away */ 128 spin_lock_bh(&blkg->async_bio_lock); 129 bio_list_merge(&bios, &blkg->async_bios); 130 bio_list_init(&blkg->async_bios); 131 spin_unlock_bh(&blkg->async_bio_lock); 132 133 /* start plug only when bio_list contains at least 2 bios */ 134 if (bios.head && bios.head->bi_next) { 135 need_plug = true; 136 blk_start_plug(&plug); 137 } 138 while ((bio = bio_list_pop(&bios))) 139 submit_bio(bio); 140 if (need_plug) 141 blk_finish_plug(&plug); 142 } 143 144 /** 145 * blkg_alloc - allocate a blkg 146 * @blkcg: block cgroup the new blkg is associated with 147 * @q: request_queue the new blkg is associated with 148 * @gfp_mask: allocation mask to use 149 * 150 * Allocate a new blkg assocating @blkcg and @q. 151 */ 152 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, 153 gfp_t gfp_mask) 154 { 155 struct blkcg_gq *blkg; 156 int i, cpu; 157 158 /* alloc and init base part */ 159 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); 160 if (!blkg) 161 return NULL; 162 163 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) 164 goto err_free; 165 166 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); 167 if (!blkg->iostat_cpu) 168 goto err_free; 169 170 blkg->q = q; 171 INIT_LIST_HEAD(&blkg->q_node); 172 spin_lock_init(&blkg->async_bio_lock); 173 bio_list_init(&blkg->async_bios); 174 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); 175 blkg->blkcg = blkcg; 176 177 u64_stats_init(&blkg->iostat.sync); 178 for_each_possible_cpu(cpu) 179 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); 180 181 for (i = 0; i < BLKCG_MAX_POLS; i++) { 182 struct blkcg_policy *pol = blkcg_policy[i]; 183 struct blkg_policy_data *pd; 184 185 if (!blkcg_policy_enabled(q, pol)) 186 continue; 187 188 /* alloc per-policy data and attach it to blkg */ 189 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); 190 if (!pd) 191 goto err_free; 192 193 blkg->pd[i] = pd; 194 pd->blkg = blkg; 195 pd->plid = i; 196 } 197 198 return blkg; 199 200 err_free: 201 blkg_free(blkg); 202 return NULL; 203 } 204 205 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 206 struct request_queue *q, bool update_hint) 207 { 208 struct blkcg_gq *blkg; 209 210 /* 211 * Hint didn't match. Look up from the radix tree. Note that the 212 * hint can only be updated under queue_lock as otherwise @blkg 213 * could have already been removed from blkg_tree. The caller is 214 * responsible for grabbing queue_lock if @update_hint. 215 */ 216 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 217 if (blkg && blkg->q == q) { 218 if (update_hint) { 219 lockdep_assert_held(&q->queue_lock); 220 rcu_assign_pointer(blkcg->blkg_hint, blkg); 221 } 222 return blkg; 223 } 224 225 return NULL; 226 } 227 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); 228 229 /* 230 * If @new_blkg is %NULL, this function tries to allocate a new one as 231 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 232 */ 233 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, 234 struct request_queue *q, 235 struct blkcg_gq *new_blkg) 236 { 237 struct blkcg_gq *blkg; 238 int i, ret; 239 240 WARN_ON_ONCE(!rcu_read_lock_held()); 241 lockdep_assert_held(&q->queue_lock); 242 243 /* request_queue is dying, do not create/recreate a blkg */ 244 if (blk_queue_dying(q)) { 245 ret = -ENODEV; 246 goto err_free_blkg; 247 } 248 249 /* blkg holds a reference to blkcg */ 250 if (!css_tryget_online(&blkcg->css)) { 251 ret = -ENODEV; 252 goto err_free_blkg; 253 } 254 255 /* allocate */ 256 if (!new_blkg) { 257 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); 258 if (unlikely(!new_blkg)) { 259 ret = -ENOMEM; 260 goto err_put_css; 261 } 262 } 263 blkg = new_blkg; 264 265 /* link parent */ 266 if (blkcg_parent(blkcg)) { 267 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); 268 if (WARN_ON_ONCE(!blkg->parent)) { 269 ret = -ENODEV; 270 goto err_put_css; 271 } 272 blkg_get(blkg->parent); 273 } 274 275 /* invoke per-policy init */ 276 for (i = 0; i < BLKCG_MAX_POLS; i++) { 277 struct blkcg_policy *pol = blkcg_policy[i]; 278 279 if (blkg->pd[i] && pol->pd_init_fn) 280 pol->pd_init_fn(blkg->pd[i]); 281 } 282 283 /* insert */ 284 spin_lock(&blkcg->lock); 285 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); 286 if (likely(!ret)) { 287 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 288 list_add(&blkg->q_node, &q->blkg_list); 289 290 for (i = 0; i < BLKCG_MAX_POLS; i++) { 291 struct blkcg_policy *pol = blkcg_policy[i]; 292 293 if (blkg->pd[i] && pol->pd_online_fn) 294 pol->pd_online_fn(blkg->pd[i]); 295 } 296 } 297 blkg->online = true; 298 spin_unlock(&blkcg->lock); 299 300 if (!ret) 301 return blkg; 302 303 /* @blkg failed fully initialized, use the usual release path */ 304 blkg_put(blkg); 305 return ERR_PTR(ret); 306 307 err_put_css: 308 css_put(&blkcg->css); 309 err_free_blkg: 310 blkg_free(new_blkg); 311 return ERR_PTR(ret); 312 } 313 314 /** 315 * blkg_lookup_create - lookup blkg, try to create one if not there 316 * @blkcg: blkcg of interest 317 * @q: request_queue of interest 318 * 319 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to 320 * create one. blkg creation is performed recursively from blkcg_root such 321 * that all non-root blkg's have access to the parent blkg. This function 322 * should be called under RCU read lock and takes @q->queue_lock. 323 * 324 * Returns the blkg or the closest blkg if blkg_create() fails as it walks 325 * down from root. 326 */ 327 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 328 struct request_queue *q) 329 { 330 struct blkcg_gq *blkg; 331 unsigned long flags; 332 333 WARN_ON_ONCE(!rcu_read_lock_held()); 334 335 blkg = blkg_lookup(blkcg, q); 336 if (blkg) 337 return blkg; 338 339 spin_lock_irqsave(&q->queue_lock, flags); 340 blkg = __blkg_lookup(blkcg, q, true); 341 if (blkg) 342 goto found; 343 344 /* 345 * Create blkgs walking down from blkcg_root to @blkcg, so that all 346 * non-root blkgs have access to their parents. Returns the closest 347 * blkg to the intended blkg should blkg_create() fail. 348 */ 349 while (true) { 350 struct blkcg *pos = blkcg; 351 struct blkcg *parent = blkcg_parent(blkcg); 352 struct blkcg_gq *ret_blkg = q->root_blkg; 353 354 while (parent) { 355 blkg = __blkg_lookup(parent, q, false); 356 if (blkg) { 357 /* remember closest blkg */ 358 ret_blkg = blkg; 359 break; 360 } 361 pos = parent; 362 parent = blkcg_parent(parent); 363 } 364 365 blkg = blkg_create(pos, q, NULL); 366 if (IS_ERR(blkg)) { 367 blkg = ret_blkg; 368 break; 369 } 370 if (pos == blkcg) 371 break; 372 } 373 374 found: 375 spin_unlock_irqrestore(&q->queue_lock, flags); 376 return blkg; 377 } 378 379 static void blkg_destroy(struct blkcg_gq *blkg) 380 { 381 struct blkcg *blkcg = blkg->blkcg; 382 int i; 383 384 lockdep_assert_held(&blkg->q->queue_lock); 385 lockdep_assert_held(&blkcg->lock); 386 387 /* Something wrong if we are trying to remove same group twice */ 388 WARN_ON_ONCE(list_empty(&blkg->q_node)); 389 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); 390 391 for (i = 0; i < BLKCG_MAX_POLS; i++) { 392 struct blkcg_policy *pol = blkcg_policy[i]; 393 394 if (blkg->pd[i] && pol->pd_offline_fn) 395 pol->pd_offline_fn(blkg->pd[i]); 396 } 397 398 blkg->online = false; 399 400 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 401 list_del_init(&blkg->q_node); 402 hlist_del_init_rcu(&blkg->blkcg_node); 403 404 /* 405 * Both setting lookup hint to and clearing it from @blkg are done 406 * under queue_lock. If it's not pointing to @blkg now, it never 407 * will. Hint assignment itself can race safely. 408 */ 409 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 410 rcu_assign_pointer(blkcg->blkg_hint, NULL); 411 412 /* 413 * Put the reference taken at the time of creation so that when all 414 * queues are gone, group can be destroyed. 415 */ 416 percpu_ref_kill(&blkg->refcnt); 417 } 418 419 /** 420 * blkg_destroy_all - destroy all blkgs associated with a request_queue 421 * @q: request_queue of interest 422 * 423 * Destroy all blkgs associated with @q. 424 */ 425 static void blkg_destroy_all(struct request_queue *q) 426 { 427 struct blkcg_gq *blkg, *n; 428 int count = BLKG_DESTROY_BATCH_SIZE; 429 430 restart: 431 spin_lock_irq(&q->queue_lock); 432 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 433 struct blkcg *blkcg = blkg->blkcg; 434 435 spin_lock(&blkcg->lock); 436 blkg_destroy(blkg); 437 spin_unlock(&blkcg->lock); 438 439 /* 440 * in order to avoid holding the spin lock for too long, release 441 * it when a batch of blkgs are destroyed. 442 */ 443 if (!(--count)) { 444 count = BLKG_DESTROY_BATCH_SIZE; 445 spin_unlock_irq(&q->queue_lock); 446 cond_resched(); 447 goto restart; 448 } 449 } 450 451 q->root_blkg = NULL; 452 spin_unlock_irq(&q->queue_lock); 453 } 454 455 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 456 struct cftype *cftype, u64 val) 457 { 458 struct blkcg *blkcg = css_to_blkcg(css); 459 struct blkcg_gq *blkg; 460 int i, cpu; 461 462 mutex_lock(&blkcg_pol_mutex); 463 spin_lock_irq(&blkcg->lock); 464 465 /* 466 * Note that stat reset is racy - it doesn't synchronize against 467 * stat updates. This is a debug feature which shouldn't exist 468 * anyway. If you get hit by a race, retry. 469 */ 470 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 471 for_each_possible_cpu(cpu) { 472 struct blkg_iostat_set *bis = 473 per_cpu_ptr(blkg->iostat_cpu, cpu); 474 memset(bis, 0, sizeof(*bis)); 475 } 476 memset(&blkg->iostat, 0, sizeof(blkg->iostat)); 477 478 for (i = 0; i < BLKCG_MAX_POLS; i++) { 479 struct blkcg_policy *pol = blkcg_policy[i]; 480 481 if (blkg->pd[i] && pol->pd_reset_stats_fn) 482 pol->pd_reset_stats_fn(blkg->pd[i]); 483 } 484 } 485 486 spin_unlock_irq(&blkcg->lock); 487 mutex_unlock(&blkcg_pol_mutex); 488 return 0; 489 } 490 491 const char *blkg_dev_name(struct blkcg_gq *blkg) 492 { 493 if (!blkg->q->disk || !blkg->q->disk->bdi->dev) 494 return NULL; 495 return bdi_dev_name(blkg->q->disk->bdi); 496 } 497 498 /** 499 * blkcg_print_blkgs - helper for printing per-blkg data 500 * @sf: seq_file to print to 501 * @blkcg: blkcg of interest 502 * @prfill: fill function to print out a blkg 503 * @pol: policy in question 504 * @data: data to be passed to @prfill 505 * @show_total: to print out sum of prfill return values or not 506 * 507 * This function invokes @prfill on each blkg of @blkcg if pd for the 508 * policy specified by @pol exists. @prfill is invoked with @sf, the 509 * policy data and @data and the matching queue lock held. If @show_total 510 * is %true, the sum of the return values from @prfill is printed with 511 * "Total" label at the end. 512 * 513 * This is to be used to construct print functions for 514 * cftype->read_seq_string method. 515 */ 516 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 517 u64 (*prfill)(struct seq_file *, 518 struct blkg_policy_data *, int), 519 const struct blkcg_policy *pol, int data, 520 bool show_total) 521 { 522 struct blkcg_gq *blkg; 523 u64 total = 0; 524 525 rcu_read_lock(); 526 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 527 spin_lock_irq(&blkg->q->queue_lock); 528 if (blkcg_policy_enabled(blkg->q, pol)) 529 total += prfill(sf, blkg->pd[pol->plid], data); 530 spin_unlock_irq(&blkg->q->queue_lock); 531 } 532 rcu_read_unlock(); 533 534 if (show_total) 535 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 536 } 537 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 538 539 /** 540 * __blkg_prfill_u64 - prfill helper for a single u64 value 541 * @sf: seq_file to print to 542 * @pd: policy private data of interest 543 * @v: value to print 544 * 545 * Print @v to @sf for the device assocaited with @pd. 546 */ 547 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 548 { 549 const char *dname = blkg_dev_name(pd->blkg); 550 551 if (!dname) 552 return 0; 553 554 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 555 return v; 556 } 557 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 558 559 /* Performs queue bypass and policy enabled checks then looks up blkg. */ 560 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, 561 const struct blkcg_policy *pol, 562 struct request_queue *q) 563 { 564 WARN_ON_ONCE(!rcu_read_lock_held()); 565 lockdep_assert_held(&q->queue_lock); 566 567 if (!blkcg_policy_enabled(q, pol)) 568 return ERR_PTR(-EOPNOTSUPP); 569 return __blkg_lookup(blkcg, q, true /* update_hint */); 570 } 571 572 /** 573 * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update 574 * @inputp: input string pointer 575 * 576 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update 577 * from @input and get and return the matching bdev. *@inputp is 578 * updated to point past the device node prefix. Returns an ERR_PTR() 579 * value on error. 580 * 581 * Use this function iff blkg_conf_prep() can't be used for some reason. 582 */ 583 struct block_device *blkcg_conf_open_bdev(char **inputp) 584 { 585 char *input = *inputp; 586 unsigned int major, minor; 587 struct block_device *bdev; 588 int key_len; 589 590 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 591 return ERR_PTR(-EINVAL); 592 593 input += key_len; 594 if (!isspace(*input)) 595 return ERR_PTR(-EINVAL); 596 input = skip_spaces(input); 597 598 bdev = blkdev_get_no_open(MKDEV(major, minor)); 599 if (!bdev) 600 return ERR_PTR(-ENODEV); 601 if (bdev_is_partition(bdev)) { 602 blkdev_put_no_open(bdev); 603 return ERR_PTR(-ENODEV); 604 } 605 606 *inputp = input; 607 return bdev; 608 } 609 610 /** 611 * blkg_conf_prep - parse and prepare for per-blkg config update 612 * @blkcg: target block cgroup 613 * @pol: target policy 614 * @input: input string 615 * @ctx: blkg_conf_ctx to be filled 616 * 617 * Parse per-blkg config update from @input and initialize @ctx with the 618 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the 619 * part of @input following MAJ:MIN. This function returns with RCU read 620 * lock and queue lock held and must be paired with blkg_conf_finish(). 621 */ 622 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 623 char *input, struct blkg_conf_ctx *ctx) 624 __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock) 625 { 626 struct block_device *bdev; 627 struct request_queue *q; 628 struct blkcg_gq *blkg; 629 int ret; 630 631 bdev = blkcg_conf_open_bdev(&input); 632 if (IS_ERR(bdev)) 633 return PTR_ERR(bdev); 634 635 q = bdev_get_queue(bdev); 636 637 /* 638 * blkcg_deactivate_policy() requires queue to be frozen, we can grab 639 * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). 640 */ 641 ret = blk_queue_enter(q, 0); 642 if (ret) 643 goto fail; 644 645 rcu_read_lock(); 646 spin_lock_irq(&q->queue_lock); 647 648 blkg = blkg_lookup_check(blkcg, pol, q); 649 if (IS_ERR(blkg)) { 650 ret = PTR_ERR(blkg); 651 goto fail_unlock; 652 } 653 654 if (blkg) 655 goto success; 656 657 /* 658 * Create blkgs walking down from blkcg_root to @blkcg, so that all 659 * non-root blkgs have access to their parents. 660 */ 661 while (true) { 662 struct blkcg *pos = blkcg; 663 struct blkcg *parent; 664 struct blkcg_gq *new_blkg; 665 666 parent = blkcg_parent(blkcg); 667 while (parent && !__blkg_lookup(parent, q, false)) { 668 pos = parent; 669 parent = blkcg_parent(parent); 670 } 671 672 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 673 spin_unlock_irq(&q->queue_lock); 674 rcu_read_unlock(); 675 676 new_blkg = blkg_alloc(pos, q, GFP_KERNEL); 677 if (unlikely(!new_blkg)) { 678 ret = -ENOMEM; 679 goto fail_exit_queue; 680 } 681 682 if (radix_tree_preload(GFP_KERNEL)) { 683 blkg_free(new_blkg); 684 ret = -ENOMEM; 685 goto fail_exit_queue; 686 } 687 688 rcu_read_lock(); 689 spin_lock_irq(&q->queue_lock); 690 691 blkg = blkg_lookup_check(pos, pol, q); 692 if (IS_ERR(blkg)) { 693 ret = PTR_ERR(blkg); 694 blkg_free(new_blkg); 695 goto fail_preloaded; 696 } 697 698 if (blkg) { 699 blkg_free(new_blkg); 700 } else { 701 blkg = blkg_create(pos, q, new_blkg); 702 if (IS_ERR(blkg)) { 703 ret = PTR_ERR(blkg); 704 goto fail_preloaded; 705 } 706 } 707 708 radix_tree_preload_end(); 709 710 if (pos == blkcg) 711 goto success; 712 } 713 success: 714 blk_queue_exit(q); 715 ctx->bdev = bdev; 716 ctx->blkg = blkg; 717 ctx->body = input; 718 return 0; 719 720 fail_preloaded: 721 radix_tree_preload_end(); 722 fail_unlock: 723 spin_unlock_irq(&q->queue_lock); 724 rcu_read_unlock(); 725 fail_exit_queue: 726 blk_queue_exit(q); 727 fail: 728 blkdev_put_no_open(bdev); 729 /* 730 * If queue was bypassing, we should retry. Do so after a 731 * short msleep(). It isn't strictly necessary but queue 732 * can be bypassing for some time and it's always nice to 733 * avoid busy looping. 734 */ 735 if (ret == -EBUSY) { 736 msleep(10); 737 ret = restart_syscall(); 738 } 739 return ret; 740 } 741 EXPORT_SYMBOL_GPL(blkg_conf_prep); 742 743 /** 744 * blkg_conf_finish - finish up per-blkg config update 745 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() 746 * 747 * Finish up after per-blkg config update. This function must be paired 748 * with blkg_conf_prep(). 749 */ 750 void blkg_conf_finish(struct blkg_conf_ctx *ctx) 751 __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu) 752 { 753 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); 754 rcu_read_unlock(); 755 blkdev_put_no_open(ctx->bdev); 756 } 757 EXPORT_SYMBOL_GPL(blkg_conf_finish); 758 759 static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) 760 { 761 int i; 762 763 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 764 dst->bytes[i] = src->bytes[i]; 765 dst->ios[i] = src->ios[i]; 766 } 767 } 768 769 static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) 770 { 771 int i; 772 773 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 774 dst->bytes[i] += src->bytes[i]; 775 dst->ios[i] += src->ios[i]; 776 } 777 } 778 779 static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) 780 { 781 int i; 782 783 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 784 dst->bytes[i] -= src->bytes[i]; 785 dst->ios[i] -= src->ios[i]; 786 } 787 } 788 789 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) 790 { 791 struct blkcg *blkcg = css_to_blkcg(css); 792 struct blkcg_gq *blkg; 793 794 /* Root-level stats are sourced from system-wide IO stats */ 795 if (!cgroup_parent(css->cgroup)) 796 return; 797 798 rcu_read_lock(); 799 800 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 801 struct blkcg_gq *parent = blkg->parent; 802 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); 803 struct blkg_iostat cur, delta; 804 unsigned long flags; 805 unsigned int seq; 806 807 /* fetch the current per-cpu values */ 808 do { 809 seq = u64_stats_fetch_begin(&bisc->sync); 810 blkg_iostat_set(&cur, &bisc->cur); 811 } while (u64_stats_fetch_retry(&bisc->sync, seq)); 812 813 /* propagate percpu delta to global */ 814 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 815 blkg_iostat_set(&delta, &cur); 816 blkg_iostat_sub(&delta, &bisc->last); 817 blkg_iostat_add(&blkg->iostat.cur, &delta); 818 blkg_iostat_add(&bisc->last, &delta); 819 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 820 821 /* propagate global delta to parent (unless that's root) */ 822 if (parent && parent->parent) { 823 flags = u64_stats_update_begin_irqsave(&parent->iostat.sync); 824 blkg_iostat_set(&delta, &blkg->iostat.cur); 825 blkg_iostat_sub(&delta, &blkg->iostat.last); 826 blkg_iostat_add(&parent->iostat.cur, &delta); 827 blkg_iostat_add(&blkg->iostat.last, &delta); 828 u64_stats_update_end_irqrestore(&parent->iostat.sync, flags); 829 } 830 } 831 832 rcu_read_unlock(); 833 } 834 835 /* 836 * We source root cgroup stats from the system-wide stats to avoid 837 * tracking the same information twice and incurring overhead when no 838 * cgroups are defined. For that reason, cgroup_rstat_flush in 839 * blkcg_print_stat does not actually fill out the iostat in the root 840 * cgroup's blkcg_gq. 841 * 842 * However, we would like to re-use the printing code between the root and 843 * non-root cgroups to the extent possible. For that reason, we simulate 844 * flushing the root cgroup's stats by explicitly filling in the iostat 845 * with disk level statistics. 846 */ 847 static void blkcg_fill_root_iostats(void) 848 { 849 struct class_dev_iter iter; 850 struct device *dev; 851 852 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 853 while ((dev = class_dev_iter_next(&iter))) { 854 struct block_device *bdev = dev_to_bdev(dev); 855 struct blkcg_gq *blkg = 856 blk_queue_root_blkg(bdev_get_queue(bdev)); 857 struct blkg_iostat tmp; 858 int cpu; 859 860 memset(&tmp, 0, sizeof(tmp)); 861 for_each_possible_cpu(cpu) { 862 struct disk_stats *cpu_dkstats; 863 unsigned long flags; 864 865 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); 866 tmp.ios[BLKG_IOSTAT_READ] += 867 cpu_dkstats->ios[STAT_READ]; 868 tmp.ios[BLKG_IOSTAT_WRITE] += 869 cpu_dkstats->ios[STAT_WRITE]; 870 tmp.ios[BLKG_IOSTAT_DISCARD] += 871 cpu_dkstats->ios[STAT_DISCARD]; 872 // convert sectors to bytes 873 tmp.bytes[BLKG_IOSTAT_READ] += 874 cpu_dkstats->sectors[STAT_READ] << 9; 875 tmp.bytes[BLKG_IOSTAT_WRITE] += 876 cpu_dkstats->sectors[STAT_WRITE] << 9; 877 tmp.bytes[BLKG_IOSTAT_DISCARD] += 878 cpu_dkstats->sectors[STAT_DISCARD] << 9; 879 880 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 881 blkg_iostat_set(&blkg->iostat.cur, &tmp); 882 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 883 } 884 } 885 } 886 887 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) 888 { 889 struct blkg_iostat_set *bis = &blkg->iostat; 890 u64 rbytes, wbytes, rios, wios, dbytes, dios; 891 bool has_stats = false; 892 const char *dname; 893 unsigned seq; 894 int i; 895 896 if (!blkg->online) 897 return; 898 899 dname = blkg_dev_name(blkg); 900 if (!dname) 901 return; 902 903 seq_printf(s, "%s ", dname); 904 905 do { 906 seq = u64_stats_fetch_begin(&bis->sync); 907 908 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; 909 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; 910 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; 911 rios = bis->cur.ios[BLKG_IOSTAT_READ]; 912 wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; 913 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; 914 } while (u64_stats_fetch_retry(&bis->sync, seq)); 915 916 if (rbytes || wbytes || rios || wios) { 917 has_stats = true; 918 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 919 rbytes, wbytes, rios, wios, 920 dbytes, dios); 921 } 922 923 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { 924 has_stats = true; 925 seq_printf(s, " use_delay=%d delay_nsec=%llu", 926 atomic_read(&blkg->use_delay), 927 atomic64_read(&blkg->delay_nsec)); 928 } 929 930 for (i = 0; i < BLKCG_MAX_POLS; i++) { 931 struct blkcg_policy *pol = blkcg_policy[i]; 932 933 if (!blkg->pd[i] || !pol->pd_stat_fn) 934 continue; 935 936 if (pol->pd_stat_fn(blkg->pd[i], s)) 937 has_stats = true; 938 } 939 940 if (has_stats) 941 seq_printf(s, "\n"); 942 } 943 944 static int blkcg_print_stat(struct seq_file *sf, void *v) 945 { 946 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 947 struct blkcg_gq *blkg; 948 949 if (!seq_css(sf)->parent) 950 blkcg_fill_root_iostats(); 951 else 952 cgroup_rstat_flush(blkcg->css.cgroup); 953 954 rcu_read_lock(); 955 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 956 spin_lock_irq(&blkg->q->queue_lock); 957 blkcg_print_one_stat(blkg, sf); 958 spin_unlock_irq(&blkg->q->queue_lock); 959 } 960 rcu_read_unlock(); 961 return 0; 962 } 963 964 static struct cftype blkcg_files[] = { 965 { 966 .name = "stat", 967 .seq_show = blkcg_print_stat, 968 }, 969 { } /* terminate */ 970 }; 971 972 static struct cftype blkcg_legacy_files[] = { 973 { 974 .name = "reset_stats", 975 .write_u64 = blkcg_reset_stats, 976 }, 977 { } /* terminate */ 978 }; 979 980 /* 981 * blkcg destruction is a three-stage process. 982 * 983 * 1. Destruction starts. The blkcg_css_offline() callback is invoked 984 * which offlines writeback. Here we tie the next stage of blkg destruction 985 * to the completion of writeback associated with the blkcg. This lets us 986 * avoid punting potentially large amounts of outstanding writeback to root 987 * while maintaining any ongoing policies. The next stage is triggered when 988 * the nr_cgwbs count goes to zero. 989 * 990 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 991 * and handles the destruction of blkgs. Here the css reference held by 992 * the blkg is put back eventually allowing blkcg_css_free() to be called. 993 * This work may occur in cgwb_release_workfn() on the cgwb_release 994 * workqueue. Any submitted ios that fail to get the blkg ref will be 995 * punted to the root_blkg. 996 * 997 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 998 * This finally frees the blkcg. 999 */ 1000 1001 /** 1002 * blkcg_css_offline - cgroup css_offline callback 1003 * @css: css of interest 1004 * 1005 * This function is called when @css is about to go away. Here the cgwbs are 1006 * offlined first and only once writeback associated with the blkcg has 1007 * finished do we start step 2 (see above). 1008 */ 1009 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1010 { 1011 struct blkcg *blkcg = css_to_blkcg(css); 1012 1013 /* this prevents anyone from attaching or migrating to this blkcg */ 1014 wb_blkcg_offline(blkcg); 1015 1016 /* put the base online pin allowing step 2 to be triggered */ 1017 blkcg_unpin_online(blkcg); 1018 } 1019 1020 /** 1021 * blkcg_destroy_blkgs - responsible for shooting down blkgs 1022 * @blkcg: blkcg of interest 1023 * 1024 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1025 * is nested inside q lock, this function performs reverse double lock dancing. 1026 * Destroying the blkgs releases the reference held on the blkcg's css allowing 1027 * blkcg_css_free to eventually be called. 1028 * 1029 * This is the blkcg counterpart of ioc_release_fn(). 1030 */ 1031 void blkcg_destroy_blkgs(struct blkcg *blkcg) 1032 { 1033 might_sleep(); 1034 1035 spin_lock_irq(&blkcg->lock); 1036 1037 while (!hlist_empty(&blkcg->blkg_list)) { 1038 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1039 struct blkcg_gq, blkcg_node); 1040 struct request_queue *q = blkg->q; 1041 1042 if (need_resched() || !spin_trylock(&q->queue_lock)) { 1043 /* 1044 * Given that the system can accumulate a huge number 1045 * of blkgs in pathological cases, check to see if we 1046 * need to rescheduling to avoid softlockup. 1047 */ 1048 spin_unlock_irq(&blkcg->lock); 1049 cond_resched(); 1050 spin_lock_irq(&blkcg->lock); 1051 continue; 1052 } 1053 1054 blkg_destroy(blkg); 1055 spin_unlock(&q->queue_lock); 1056 } 1057 1058 spin_unlock_irq(&blkcg->lock); 1059 } 1060 1061 static void blkcg_css_free(struct cgroup_subsys_state *css) 1062 { 1063 struct blkcg *blkcg = css_to_blkcg(css); 1064 int i; 1065 1066 mutex_lock(&blkcg_pol_mutex); 1067 1068 list_del(&blkcg->all_blkcgs_node); 1069 1070 for (i = 0; i < BLKCG_MAX_POLS; i++) 1071 if (blkcg->cpd[i]) 1072 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1073 1074 mutex_unlock(&blkcg_pol_mutex); 1075 1076 kfree(blkcg); 1077 } 1078 1079 static struct cgroup_subsys_state * 1080 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1081 { 1082 struct blkcg *blkcg; 1083 struct cgroup_subsys_state *ret; 1084 int i; 1085 1086 mutex_lock(&blkcg_pol_mutex); 1087 1088 if (!parent_css) { 1089 blkcg = &blkcg_root; 1090 } else { 1091 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1092 if (!blkcg) { 1093 ret = ERR_PTR(-ENOMEM); 1094 goto unlock; 1095 } 1096 } 1097 1098 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1099 struct blkcg_policy *pol = blkcg_policy[i]; 1100 struct blkcg_policy_data *cpd; 1101 1102 /* 1103 * If the policy hasn't been attached yet, wait for it 1104 * to be attached before doing anything else. Otherwise, 1105 * check if the policy requires any specific per-cgroup 1106 * data: if it does, allocate and initialize it. 1107 */ 1108 if (!pol || !pol->cpd_alloc_fn) 1109 continue; 1110 1111 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1112 if (!cpd) { 1113 ret = ERR_PTR(-ENOMEM); 1114 goto free_pd_blkcg; 1115 } 1116 blkcg->cpd[i] = cpd; 1117 cpd->blkcg = blkcg; 1118 cpd->plid = i; 1119 if (pol->cpd_init_fn) 1120 pol->cpd_init_fn(cpd); 1121 } 1122 1123 spin_lock_init(&blkcg->lock); 1124 refcount_set(&blkcg->online_pin, 1); 1125 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); 1126 INIT_HLIST_HEAD(&blkcg->blkg_list); 1127 #ifdef CONFIG_CGROUP_WRITEBACK 1128 INIT_LIST_HEAD(&blkcg->cgwb_list); 1129 #endif 1130 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1131 1132 mutex_unlock(&blkcg_pol_mutex); 1133 return &blkcg->css; 1134 1135 free_pd_blkcg: 1136 for (i--; i >= 0; i--) 1137 if (blkcg->cpd[i]) 1138 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1139 1140 if (blkcg != &blkcg_root) 1141 kfree(blkcg); 1142 unlock: 1143 mutex_unlock(&blkcg_pol_mutex); 1144 return ret; 1145 } 1146 1147 static int blkcg_css_online(struct cgroup_subsys_state *css) 1148 { 1149 struct blkcg *blkcg = css_to_blkcg(css); 1150 struct blkcg *parent = blkcg_parent(blkcg); 1151 1152 /* 1153 * blkcg_pin_online() is used to delay blkcg offline so that blkgs 1154 * don't go offline while cgwbs are still active on them. Pin the 1155 * parent so that offline always happens towards the root. 1156 */ 1157 if (parent) 1158 blkcg_pin_online(parent); 1159 return 0; 1160 } 1161 1162 /** 1163 * blkcg_init_queue - initialize blkcg part of request queue 1164 * @q: request_queue to initialize 1165 * 1166 * Called from blk_alloc_queue(). Responsible for initializing blkcg 1167 * part of new request_queue @q. 1168 * 1169 * RETURNS: 1170 * 0 on success, -errno on failure. 1171 */ 1172 int blkcg_init_queue(struct request_queue *q) 1173 { 1174 struct blkcg_gq *new_blkg, *blkg; 1175 bool preloaded; 1176 int ret; 1177 1178 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); 1179 if (!new_blkg) 1180 return -ENOMEM; 1181 1182 preloaded = !radix_tree_preload(GFP_KERNEL); 1183 1184 /* Make sure the root blkg exists. */ 1185 rcu_read_lock(); 1186 spin_lock_irq(&q->queue_lock); 1187 blkg = blkg_create(&blkcg_root, q, new_blkg); 1188 if (IS_ERR(blkg)) 1189 goto err_unlock; 1190 q->root_blkg = blkg; 1191 spin_unlock_irq(&q->queue_lock); 1192 rcu_read_unlock(); 1193 1194 if (preloaded) 1195 radix_tree_preload_end(); 1196 1197 ret = blk_ioprio_init(q); 1198 if (ret) 1199 goto err_destroy_all; 1200 1201 ret = blk_throtl_init(q); 1202 if (ret) 1203 goto err_destroy_all; 1204 1205 ret = blk_iolatency_init(q); 1206 if (ret) { 1207 blk_throtl_exit(q); 1208 goto err_destroy_all; 1209 } 1210 1211 return 0; 1212 1213 err_destroy_all: 1214 blkg_destroy_all(q); 1215 return ret; 1216 err_unlock: 1217 spin_unlock_irq(&q->queue_lock); 1218 rcu_read_unlock(); 1219 if (preloaded) 1220 radix_tree_preload_end(); 1221 return PTR_ERR(blkg); 1222 } 1223 1224 /** 1225 * blkcg_exit_queue - exit and release blkcg part of request_queue 1226 * @q: request_queue being released 1227 * 1228 * Called from blk_exit_queue(). Responsible for exiting blkcg part. 1229 */ 1230 void blkcg_exit_queue(struct request_queue *q) 1231 { 1232 blkg_destroy_all(q); 1233 blk_throtl_exit(q); 1234 } 1235 1236 static void blkcg_bind(struct cgroup_subsys_state *root_css) 1237 { 1238 int i; 1239 1240 mutex_lock(&blkcg_pol_mutex); 1241 1242 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1243 struct blkcg_policy *pol = blkcg_policy[i]; 1244 struct blkcg *blkcg; 1245 1246 if (!pol || !pol->cpd_bind_fn) 1247 continue; 1248 1249 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) 1250 if (blkcg->cpd[pol->plid]) 1251 pol->cpd_bind_fn(blkcg->cpd[pol->plid]); 1252 } 1253 mutex_unlock(&blkcg_pol_mutex); 1254 } 1255 1256 static void blkcg_exit(struct task_struct *tsk) 1257 { 1258 if (tsk->throttle_queue) 1259 blk_put_queue(tsk->throttle_queue); 1260 tsk->throttle_queue = NULL; 1261 } 1262 1263 struct cgroup_subsys io_cgrp_subsys = { 1264 .css_alloc = blkcg_css_alloc, 1265 .css_online = blkcg_css_online, 1266 .css_offline = blkcg_css_offline, 1267 .css_free = blkcg_css_free, 1268 .css_rstat_flush = blkcg_rstat_flush, 1269 .bind = blkcg_bind, 1270 .dfl_cftypes = blkcg_files, 1271 .legacy_cftypes = blkcg_legacy_files, 1272 .legacy_name = "blkio", 1273 .exit = blkcg_exit, 1274 #ifdef CONFIG_MEMCG 1275 /* 1276 * This ensures that, if available, memcg is automatically enabled 1277 * together on the default hierarchy so that the owner cgroup can 1278 * be retrieved from writeback pages. 1279 */ 1280 .depends_on = 1 << memory_cgrp_id, 1281 #endif 1282 }; 1283 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1284 1285 /** 1286 * blkcg_activate_policy - activate a blkcg policy on a request_queue 1287 * @q: request_queue of interest 1288 * @pol: blkcg policy to activate 1289 * 1290 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through 1291 * bypass mode to populate its blkgs with policy_data for @pol. 1292 * 1293 * Activation happens with @q bypassed, so nobody would be accessing blkgs 1294 * from IO path. Update of each blkg is protected by both queue and blkcg 1295 * locks so that holding either lock and testing blkcg_policy_enabled() is 1296 * always enough for dereferencing policy data. 1297 * 1298 * The caller is responsible for synchronizing [de]activations and policy 1299 * [un]registerations. Returns 0 on success, -errno on failure. 1300 */ 1301 int blkcg_activate_policy(struct request_queue *q, 1302 const struct blkcg_policy *pol) 1303 { 1304 struct blkg_policy_data *pd_prealloc = NULL; 1305 struct blkcg_gq *blkg, *pinned_blkg = NULL; 1306 int ret; 1307 1308 if (blkcg_policy_enabled(q, pol)) 1309 return 0; 1310 1311 if (queue_is_mq(q)) 1312 blk_mq_freeze_queue(q); 1313 retry: 1314 spin_lock_irq(&q->queue_lock); 1315 1316 /* blkg_list is pushed at the head, reverse walk to allocate parents first */ 1317 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1318 struct blkg_policy_data *pd; 1319 1320 if (blkg->pd[pol->plid]) 1321 continue; 1322 1323 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ 1324 if (blkg == pinned_blkg) { 1325 pd = pd_prealloc; 1326 pd_prealloc = NULL; 1327 } else { 1328 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, 1329 blkg->blkcg); 1330 } 1331 1332 if (!pd) { 1333 /* 1334 * GFP_NOWAIT failed. Free the existing one and 1335 * prealloc for @blkg w/ GFP_KERNEL. 1336 */ 1337 if (pinned_blkg) 1338 blkg_put(pinned_blkg); 1339 blkg_get(blkg); 1340 pinned_blkg = blkg; 1341 1342 spin_unlock_irq(&q->queue_lock); 1343 1344 if (pd_prealloc) 1345 pol->pd_free_fn(pd_prealloc); 1346 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, 1347 blkg->blkcg); 1348 if (pd_prealloc) 1349 goto retry; 1350 else 1351 goto enomem; 1352 } 1353 1354 blkg->pd[pol->plid] = pd; 1355 pd->blkg = blkg; 1356 pd->plid = pol->plid; 1357 } 1358 1359 /* all allocated, init in the same order */ 1360 if (pol->pd_init_fn) 1361 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) 1362 pol->pd_init_fn(blkg->pd[pol->plid]); 1363 1364 __set_bit(pol->plid, q->blkcg_pols); 1365 ret = 0; 1366 1367 spin_unlock_irq(&q->queue_lock); 1368 out: 1369 if (queue_is_mq(q)) 1370 blk_mq_unfreeze_queue(q); 1371 if (pinned_blkg) 1372 blkg_put(pinned_blkg); 1373 if (pd_prealloc) 1374 pol->pd_free_fn(pd_prealloc); 1375 return ret; 1376 1377 enomem: 1378 /* alloc failed, nothing's initialized yet, free everything */ 1379 spin_lock_irq(&q->queue_lock); 1380 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1381 struct blkcg *blkcg = blkg->blkcg; 1382 1383 spin_lock(&blkcg->lock); 1384 if (blkg->pd[pol->plid]) { 1385 pol->pd_free_fn(blkg->pd[pol->plid]); 1386 blkg->pd[pol->plid] = NULL; 1387 } 1388 spin_unlock(&blkcg->lock); 1389 } 1390 spin_unlock_irq(&q->queue_lock); 1391 ret = -ENOMEM; 1392 goto out; 1393 } 1394 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1395 1396 /** 1397 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue 1398 * @q: request_queue of interest 1399 * @pol: blkcg policy to deactivate 1400 * 1401 * Deactivate @pol on @q. Follows the same synchronization rules as 1402 * blkcg_activate_policy(). 1403 */ 1404 void blkcg_deactivate_policy(struct request_queue *q, 1405 const struct blkcg_policy *pol) 1406 { 1407 struct blkcg_gq *blkg; 1408 1409 if (!blkcg_policy_enabled(q, pol)) 1410 return; 1411 1412 if (queue_is_mq(q)) 1413 blk_mq_freeze_queue(q); 1414 1415 spin_lock_irq(&q->queue_lock); 1416 1417 __clear_bit(pol->plid, q->blkcg_pols); 1418 1419 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1420 struct blkcg *blkcg = blkg->blkcg; 1421 1422 spin_lock(&blkcg->lock); 1423 if (blkg->pd[pol->plid]) { 1424 if (pol->pd_offline_fn) 1425 pol->pd_offline_fn(blkg->pd[pol->plid]); 1426 pol->pd_free_fn(blkg->pd[pol->plid]); 1427 blkg->pd[pol->plid] = NULL; 1428 } 1429 spin_unlock(&blkcg->lock); 1430 } 1431 1432 spin_unlock_irq(&q->queue_lock); 1433 1434 if (queue_is_mq(q)) 1435 blk_mq_unfreeze_queue(q); 1436 } 1437 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1438 1439 /** 1440 * blkcg_policy_register - register a blkcg policy 1441 * @pol: blkcg policy to register 1442 * 1443 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1444 * successful registration. Returns 0 on success and -errno on failure. 1445 */ 1446 int blkcg_policy_register(struct blkcg_policy *pol) 1447 { 1448 struct blkcg *blkcg; 1449 int i, ret; 1450 1451 mutex_lock(&blkcg_pol_register_mutex); 1452 mutex_lock(&blkcg_pol_mutex); 1453 1454 /* find an empty slot */ 1455 ret = -ENOSPC; 1456 for (i = 0; i < BLKCG_MAX_POLS; i++) 1457 if (!blkcg_policy[i]) 1458 break; 1459 if (i >= BLKCG_MAX_POLS) { 1460 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); 1461 goto err_unlock; 1462 } 1463 1464 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ 1465 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1466 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1467 goto err_unlock; 1468 1469 /* register @pol */ 1470 pol->plid = i; 1471 blkcg_policy[pol->plid] = pol; 1472 1473 /* allocate and install cpd's */ 1474 if (pol->cpd_alloc_fn) { 1475 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1476 struct blkcg_policy_data *cpd; 1477 1478 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1479 if (!cpd) 1480 goto err_free_cpds; 1481 1482 blkcg->cpd[pol->plid] = cpd; 1483 cpd->blkcg = blkcg; 1484 cpd->plid = pol->plid; 1485 if (pol->cpd_init_fn) 1486 pol->cpd_init_fn(cpd); 1487 } 1488 } 1489 1490 mutex_unlock(&blkcg_pol_mutex); 1491 1492 /* everything is in place, add intf files for the new policy */ 1493 if (pol->dfl_cftypes) 1494 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1495 pol->dfl_cftypes)); 1496 if (pol->legacy_cftypes) 1497 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1498 pol->legacy_cftypes)); 1499 mutex_unlock(&blkcg_pol_register_mutex); 1500 return 0; 1501 1502 err_free_cpds: 1503 if (pol->cpd_free_fn) { 1504 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1505 if (blkcg->cpd[pol->plid]) { 1506 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1507 blkcg->cpd[pol->plid] = NULL; 1508 } 1509 } 1510 } 1511 blkcg_policy[pol->plid] = NULL; 1512 err_unlock: 1513 mutex_unlock(&blkcg_pol_mutex); 1514 mutex_unlock(&blkcg_pol_register_mutex); 1515 return ret; 1516 } 1517 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1518 1519 /** 1520 * blkcg_policy_unregister - unregister a blkcg policy 1521 * @pol: blkcg policy to unregister 1522 * 1523 * Undo blkcg_policy_register(@pol). Might sleep. 1524 */ 1525 void blkcg_policy_unregister(struct blkcg_policy *pol) 1526 { 1527 struct blkcg *blkcg; 1528 1529 mutex_lock(&blkcg_pol_register_mutex); 1530 1531 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1532 goto out_unlock; 1533 1534 /* kill the intf files first */ 1535 if (pol->dfl_cftypes) 1536 cgroup_rm_cftypes(pol->dfl_cftypes); 1537 if (pol->legacy_cftypes) 1538 cgroup_rm_cftypes(pol->legacy_cftypes); 1539 1540 /* remove cpds and unregister */ 1541 mutex_lock(&blkcg_pol_mutex); 1542 1543 if (pol->cpd_free_fn) { 1544 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1545 if (blkcg->cpd[pol->plid]) { 1546 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1547 blkcg->cpd[pol->plid] = NULL; 1548 } 1549 } 1550 } 1551 blkcg_policy[pol->plid] = NULL; 1552 1553 mutex_unlock(&blkcg_pol_mutex); 1554 out_unlock: 1555 mutex_unlock(&blkcg_pol_register_mutex); 1556 } 1557 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1558 1559 bool __blkcg_punt_bio_submit(struct bio *bio) 1560 { 1561 struct blkcg_gq *blkg = bio->bi_blkg; 1562 1563 /* consume the flag first */ 1564 bio->bi_opf &= ~REQ_CGROUP_PUNT; 1565 1566 /* never bounce for the root cgroup */ 1567 if (!blkg->parent) 1568 return false; 1569 1570 spin_lock_bh(&blkg->async_bio_lock); 1571 bio_list_add(&blkg->async_bios, bio); 1572 spin_unlock_bh(&blkg->async_bio_lock); 1573 1574 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); 1575 return true; 1576 } 1577 1578 /* 1579 * Scale the accumulated delay based on how long it has been since we updated 1580 * the delay. We only call this when we are adding delay, in case it's been a 1581 * while since we added delay, and when we are checking to see if we need to 1582 * delay a task, to account for any delays that may have occurred. 1583 */ 1584 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1585 { 1586 u64 old = atomic64_read(&blkg->delay_start); 1587 1588 /* negative use_delay means no scaling, see blkcg_set_delay() */ 1589 if (atomic_read(&blkg->use_delay) < 0) 1590 return; 1591 1592 /* 1593 * We only want to scale down every second. The idea here is that we 1594 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1595 * time window. We only want to throttle tasks for recent delay that 1596 * has occurred, in 1 second time windows since that's the maximum 1597 * things can be throttled. We save the current delay window in 1598 * blkg->last_delay so we know what amount is still left to be charged 1599 * to the blkg from this point onward. blkg->last_use keeps track of 1600 * the use_delay counter. The idea is if we're unthrottling the blkg we 1601 * are ok with whatever is happening now, and we can take away more of 1602 * the accumulated delay as we've already throttled enough that 1603 * everybody is happy with their IO latencies. 1604 */ 1605 if (time_before64(old + NSEC_PER_SEC, now) && 1606 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) { 1607 u64 cur = atomic64_read(&blkg->delay_nsec); 1608 u64 sub = min_t(u64, blkg->last_delay, now - old); 1609 int cur_use = atomic_read(&blkg->use_delay); 1610 1611 /* 1612 * We've been unthrottled, subtract a larger chunk of our 1613 * accumulated delay. 1614 */ 1615 if (cur_use < blkg->last_use) 1616 sub = max_t(u64, sub, blkg->last_delay >> 1); 1617 1618 /* 1619 * This shouldn't happen, but handle it anyway. Our delay_nsec 1620 * should only ever be growing except here where we subtract out 1621 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1622 * rather not end up with negative numbers. 1623 */ 1624 if (unlikely(cur < sub)) { 1625 atomic64_set(&blkg->delay_nsec, 0); 1626 blkg->last_delay = 0; 1627 } else { 1628 atomic64_sub(sub, &blkg->delay_nsec); 1629 blkg->last_delay = cur - sub; 1630 } 1631 blkg->last_use = cur_use; 1632 } 1633 } 1634 1635 /* 1636 * This is called when we want to actually walk up the hierarchy and check to 1637 * see if we need to throttle, and then actually throttle if there is some 1638 * accumulated delay. This should only be called upon return to user space so 1639 * we're not holding some lock that would induce a priority inversion. 1640 */ 1641 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1642 { 1643 unsigned long pflags; 1644 bool clamp; 1645 u64 now = ktime_to_ns(ktime_get()); 1646 u64 exp; 1647 u64 delay_nsec = 0; 1648 int tok; 1649 1650 while (blkg->parent) { 1651 int use_delay = atomic_read(&blkg->use_delay); 1652 1653 if (use_delay) { 1654 u64 this_delay; 1655 1656 blkcg_scale_delay(blkg, now); 1657 this_delay = atomic64_read(&blkg->delay_nsec); 1658 if (this_delay > delay_nsec) { 1659 delay_nsec = this_delay; 1660 clamp = use_delay > 0; 1661 } 1662 } 1663 blkg = blkg->parent; 1664 } 1665 1666 if (!delay_nsec) 1667 return; 1668 1669 /* 1670 * Let's not sleep for all eternity if we've amassed a huge delay. 1671 * Swapping or metadata IO can accumulate 10's of seconds worth of 1672 * delay, and we want userspace to be able to do _something_ so cap the 1673 * delays at 0.25s. If there's 10's of seconds worth of delay then the 1674 * tasks will be delayed for 0.25 second for every syscall. If 1675 * blkcg_set_delay() was used as indicated by negative use_delay, the 1676 * caller is responsible for regulating the range. 1677 */ 1678 if (clamp) 1679 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1680 1681 if (use_memdelay) 1682 psi_memstall_enter(&pflags); 1683 1684 exp = ktime_add_ns(now, delay_nsec); 1685 tok = io_schedule_prepare(); 1686 do { 1687 __set_current_state(TASK_KILLABLE); 1688 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1689 break; 1690 } while (!fatal_signal_pending(current)); 1691 io_schedule_finish(tok); 1692 1693 if (use_memdelay) 1694 psi_memstall_leave(&pflags); 1695 } 1696 1697 /** 1698 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 1699 * 1700 * This is only called if we've been marked with set_notify_resume(). Obviously 1701 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 1702 * check to see if current->throttle_queue is set and if not this doesn't do 1703 * anything. This should only ever be called by the resume code, it's not meant 1704 * to be called by people willy-nilly as it will actually do the work to 1705 * throttle the task if it is setup for throttling. 1706 */ 1707 void blkcg_maybe_throttle_current(void) 1708 { 1709 struct request_queue *q = current->throttle_queue; 1710 struct cgroup_subsys_state *css; 1711 struct blkcg *blkcg; 1712 struct blkcg_gq *blkg; 1713 bool use_memdelay = current->use_memdelay; 1714 1715 if (!q) 1716 return; 1717 1718 current->throttle_queue = NULL; 1719 current->use_memdelay = false; 1720 1721 rcu_read_lock(); 1722 css = kthread_blkcg(); 1723 if (css) 1724 blkcg = css_to_blkcg(css); 1725 else 1726 blkcg = css_to_blkcg(task_css(current, io_cgrp_id)); 1727 1728 if (!blkcg) 1729 goto out; 1730 blkg = blkg_lookup(blkcg, q); 1731 if (!blkg) 1732 goto out; 1733 if (!blkg_tryget(blkg)) 1734 goto out; 1735 rcu_read_unlock(); 1736 1737 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 1738 blkg_put(blkg); 1739 blk_put_queue(q); 1740 return; 1741 out: 1742 rcu_read_unlock(); 1743 blk_put_queue(q); 1744 } 1745 1746 /** 1747 * blkcg_schedule_throttle - this task needs to check for throttling 1748 * @q: the request queue IO was submitted on 1749 * @use_memdelay: do we charge this to memory delay for PSI 1750 * 1751 * This is called by the IO controller when we know there's delay accumulated 1752 * for the blkg for this task. We do not pass the blkg because there are places 1753 * we call this that may not have that information, the swapping code for 1754 * instance will only have a request_queue at that point. This set's the 1755 * notify_resume for the task to check and see if it requires throttling before 1756 * returning to user space. 1757 * 1758 * We will only schedule once per syscall. You can call this over and over 1759 * again and it will only do the check once upon return to user space, and only 1760 * throttle once. If the task needs to be throttled again it'll need to be 1761 * re-set at the next time we see the task. 1762 */ 1763 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) 1764 { 1765 if (unlikely(current->flags & PF_KTHREAD)) 1766 return; 1767 1768 if (current->throttle_queue != q) { 1769 if (!blk_get_queue(q)) 1770 return; 1771 1772 if (current->throttle_queue) 1773 blk_put_queue(current->throttle_queue); 1774 current->throttle_queue = q; 1775 } 1776 1777 if (use_memdelay) 1778 current->use_memdelay = use_memdelay; 1779 set_notify_resume(current); 1780 } 1781 1782 /** 1783 * blkcg_add_delay - add delay to this blkg 1784 * @blkg: blkg of interest 1785 * @now: the current time in nanoseconds 1786 * @delta: how many nanoseconds of delay to add 1787 * 1788 * Charge @delta to the blkg's current delay accumulation. This is used to 1789 * throttle tasks if an IO controller thinks we need more throttling. 1790 */ 1791 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 1792 { 1793 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 1794 return; 1795 blkcg_scale_delay(blkg, now); 1796 atomic64_add(delta, &blkg->delay_nsec); 1797 } 1798 1799 /** 1800 * blkg_tryget_closest - try and get a blkg ref on the closet blkg 1801 * @bio: target bio 1802 * @css: target css 1803 * 1804 * As the failure mode here is to walk up the blkg tree, this ensure that the 1805 * blkg->parent pointers are always valid. This returns the blkg that it ended 1806 * up taking a reference on or %NULL if no reference was taken. 1807 */ 1808 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, 1809 struct cgroup_subsys_state *css) 1810 { 1811 struct blkcg_gq *blkg, *ret_blkg = NULL; 1812 1813 rcu_read_lock(); 1814 blkg = blkg_lookup_create(css_to_blkcg(css), 1815 bdev_get_queue(bio->bi_bdev)); 1816 while (blkg) { 1817 if (blkg_tryget(blkg)) { 1818 ret_blkg = blkg; 1819 break; 1820 } 1821 blkg = blkg->parent; 1822 } 1823 rcu_read_unlock(); 1824 1825 return ret_blkg; 1826 } 1827 1828 /** 1829 * bio_associate_blkg_from_css - associate a bio with a specified css 1830 * @bio: target bio 1831 * @css: target css 1832 * 1833 * Associate @bio with the blkg found by combining the css's blkg and the 1834 * request_queue of the @bio. An association failure is handled by walking up 1835 * the blkg tree. Therefore, the blkg associated can be anything between @blkg 1836 * and q->root_blkg. This situation only happens when a cgroup is dying and 1837 * then the remaining bios will spill to the closest alive blkg. 1838 * 1839 * A reference will be taken on the blkg and will be released when @bio is 1840 * freed. 1841 */ 1842 void bio_associate_blkg_from_css(struct bio *bio, 1843 struct cgroup_subsys_state *css) 1844 { 1845 if (bio->bi_blkg) 1846 blkg_put(bio->bi_blkg); 1847 1848 if (css && css->parent) { 1849 bio->bi_blkg = blkg_tryget_closest(bio, css); 1850 } else { 1851 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); 1852 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; 1853 } 1854 } 1855 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); 1856 1857 /** 1858 * bio_associate_blkg - associate a bio with a blkg 1859 * @bio: target bio 1860 * 1861 * Associate @bio with the blkg found from the bio's css and request_queue. 1862 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is 1863 * already associated, the css is reused and association redone as the 1864 * request_queue may have changed. 1865 */ 1866 void bio_associate_blkg(struct bio *bio) 1867 { 1868 struct cgroup_subsys_state *css; 1869 1870 rcu_read_lock(); 1871 1872 if (bio->bi_blkg) 1873 css = &bio_blkcg(bio)->css; 1874 else 1875 css = blkcg_css(); 1876 1877 bio_associate_blkg_from_css(bio, css); 1878 1879 rcu_read_unlock(); 1880 } 1881 EXPORT_SYMBOL_GPL(bio_associate_blkg); 1882 1883 /** 1884 * bio_clone_blkg_association - clone blkg association from src to dst bio 1885 * @dst: destination bio 1886 * @src: source bio 1887 */ 1888 void bio_clone_blkg_association(struct bio *dst, struct bio *src) 1889 { 1890 if (src->bi_blkg) { 1891 if (dst->bi_blkg) 1892 blkg_put(dst->bi_blkg); 1893 blkg_get(src->bi_blkg); 1894 dst->bi_blkg = src->bi_blkg; 1895 } 1896 } 1897 EXPORT_SYMBOL_GPL(bio_clone_blkg_association); 1898 1899 static int blk_cgroup_io_type(struct bio *bio) 1900 { 1901 if (op_is_discard(bio->bi_opf)) 1902 return BLKG_IOSTAT_DISCARD; 1903 if (op_is_write(bio->bi_opf)) 1904 return BLKG_IOSTAT_WRITE; 1905 return BLKG_IOSTAT_READ; 1906 } 1907 1908 void blk_cgroup_bio_start(struct bio *bio) 1909 { 1910 int rwd = blk_cgroup_io_type(bio), cpu; 1911 struct blkg_iostat_set *bis; 1912 unsigned long flags; 1913 1914 cpu = get_cpu(); 1915 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); 1916 flags = u64_stats_update_begin_irqsave(&bis->sync); 1917 1918 /* 1919 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split 1920 * bio and we would have already accounted for the size of the bio. 1921 */ 1922 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { 1923 bio_set_flag(bio, BIO_CGROUP_ACCT); 1924 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; 1925 } 1926 bis->cur.ios[rwd]++; 1927 1928 u64_stats_update_end_irqrestore(&bis->sync, flags); 1929 if (cgroup_subsys_on_dfl(io_cgrp_subsys)) 1930 cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); 1931 put_cpu(); 1932 } 1933 1934 static int __init blkcg_init(void) 1935 { 1936 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", 1937 WQ_MEM_RECLAIM | WQ_FREEZABLE | 1938 WQ_UNBOUND | WQ_SYSFS, 0); 1939 if (!blkcg_punt_bio_wq) 1940 return -ENOMEM; 1941 return 0; 1942 } 1943 subsys_initcall(blkcg_init); 1944 1945 module_param(blkcg_debug_stats, bool, 0644); 1946 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 1947