1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Block IO controller cgroup interface 4 * 5 * Based on ideas and code from CFQ, CFS and BFQ: 6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 7 * 8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 9 * Paolo Valente <paolo.valente@unimore.it> 10 * 11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 12 * Nauman Rafique <nauman@google.com> 13 * 14 * For policy-specific per-blkcg data: 15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 16 * Arianna Avanzini <avanzini.arianna@gmail.com> 17 */ 18 #include <linux/ioprio.h> 19 #include <linux/kdev_t.h> 20 #include <linux/module.h> 21 #include <linux/sched/signal.h> 22 #include <linux/err.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/slab.h> 26 #include <linux/genhd.h> 27 #include <linux/delay.h> 28 #include <linux/atomic.h> 29 #include <linux/ctype.h> 30 #include <linux/blk-cgroup.h> 31 #include <linux/tracehook.h> 32 #include <linux/psi.h> 33 #include "blk.h" 34 #include "blk-ioprio.h" 35 #include "blk-throttle.h" 36 37 /* 38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 40 * policy [un]register operations including cgroup file additions / 41 * removals. Putting cgroup file registration outside blkcg_pol_mutex 42 * allows grabbing it from cgroup callbacks. 43 */ 44 static DEFINE_MUTEX(blkcg_pol_register_mutex); 45 static DEFINE_MUTEX(blkcg_pol_mutex); 46 47 struct blkcg blkcg_root; 48 EXPORT_SYMBOL_GPL(blkcg_root); 49 50 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 51 EXPORT_SYMBOL_GPL(blkcg_root_css); 52 53 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 54 55 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 56 57 bool blkcg_debug_stats = false; 58 static struct workqueue_struct *blkcg_punt_bio_wq; 59 60 #define BLKG_DESTROY_BATCH_SIZE 64 61 62 static bool blkcg_policy_enabled(struct request_queue *q, 63 const struct blkcg_policy *pol) 64 { 65 return pol && test_bit(pol->plid, q->blkcg_pols); 66 } 67 68 /** 69 * blkg_free - free a blkg 70 * @blkg: blkg to free 71 * 72 * Free @blkg which may be partially allocated. 73 */ 74 static void blkg_free(struct blkcg_gq *blkg) 75 { 76 int i; 77 78 if (!blkg) 79 return; 80 81 for (i = 0; i < BLKCG_MAX_POLS; i++) 82 if (blkg->pd[i]) 83 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 84 85 free_percpu(blkg->iostat_cpu); 86 percpu_ref_exit(&blkg->refcnt); 87 kfree(blkg); 88 } 89 90 static void __blkg_release(struct rcu_head *rcu) 91 { 92 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 93 94 WARN_ON(!bio_list_empty(&blkg->async_bios)); 95 96 /* release the blkcg and parent blkg refs this blkg has been holding */ 97 css_put(&blkg->blkcg->css); 98 if (blkg->parent) 99 blkg_put(blkg->parent); 100 blkg_free(blkg); 101 } 102 103 /* 104 * A group is RCU protected, but having an rcu lock does not mean that one 105 * can access all the fields of blkg and assume these are valid. For 106 * example, don't try to follow throtl_data and request queue links. 107 * 108 * Having a reference to blkg under an rcu allows accesses to only values 109 * local to groups like group stats and group rate limits. 110 */ 111 static void blkg_release(struct percpu_ref *ref) 112 { 113 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); 114 115 call_rcu(&blkg->rcu_head, __blkg_release); 116 } 117 118 static void blkg_async_bio_workfn(struct work_struct *work) 119 { 120 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 121 async_bio_work); 122 struct bio_list bios = BIO_EMPTY_LIST; 123 struct bio *bio; 124 struct blk_plug plug; 125 bool need_plug = false; 126 127 /* as long as there are pending bios, @blkg can't go away */ 128 spin_lock_bh(&blkg->async_bio_lock); 129 bio_list_merge(&bios, &blkg->async_bios); 130 bio_list_init(&blkg->async_bios); 131 spin_unlock_bh(&blkg->async_bio_lock); 132 133 /* start plug only when bio_list contains at least 2 bios */ 134 if (bios.head && bios.head->bi_next) { 135 need_plug = true; 136 blk_start_plug(&plug); 137 } 138 while ((bio = bio_list_pop(&bios))) 139 submit_bio(bio); 140 if (need_plug) 141 blk_finish_plug(&plug); 142 } 143 144 /** 145 * blkg_alloc - allocate a blkg 146 * @blkcg: block cgroup the new blkg is associated with 147 * @q: request_queue the new blkg is associated with 148 * @gfp_mask: allocation mask to use 149 * 150 * Allocate a new blkg assocating @blkcg and @q. 151 */ 152 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, 153 gfp_t gfp_mask) 154 { 155 struct blkcg_gq *blkg; 156 int i, cpu; 157 158 /* alloc and init base part */ 159 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); 160 if (!blkg) 161 return NULL; 162 163 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) 164 goto err_free; 165 166 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); 167 if (!blkg->iostat_cpu) 168 goto err_free; 169 170 blkg->q = q; 171 INIT_LIST_HEAD(&blkg->q_node); 172 spin_lock_init(&blkg->async_bio_lock); 173 bio_list_init(&blkg->async_bios); 174 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); 175 blkg->blkcg = blkcg; 176 177 u64_stats_init(&blkg->iostat.sync); 178 for_each_possible_cpu(cpu) 179 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); 180 181 for (i = 0; i < BLKCG_MAX_POLS; i++) { 182 struct blkcg_policy *pol = blkcg_policy[i]; 183 struct blkg_policy_data *pd; 184 185 if (!blkcg_policy_enabled(q, pol)) 186 continue; 187 188 /* alloc per-policy data and attach it to blkg */ 189 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); 190 if (!pd) 191 goto err_free; 192 193 blkg->pd[i] = pd; 194 pd->blkg = blkg; 195 pd->plid = i; 196 } 197 198 return blkg; 199 200 err_free: 201 blkg_free(blkg); 202 return NULL; 203 } 204 205 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 206 struct request_queue *q, bool update_hint) 207 { 208 struct blkcg_gq *blkg; 209 210 /* 211 * Hint didn't match. Look up from the radix tree. Note that the 212 * hint can only be updated under queue_lock as otherwise @blkg 213 * could have already been removed from blkg_tree. The caller is 214 * responsible for grabbing queue_lock if @update_hint. 215 */ 216 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 217 if (blkg && blkg->q == q) { 218 if (update_hint) { 219 lockdep_assert_held(&q->queue_lock); 220 rcu_assign_pointer(blkcg->blkg_hint, blkg); 221 } 222 return blkg; 223 } 224 225 return NULL; 226 } 227 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); 228 229 /* 230 * If @new_blkg is %NULL, this function tries to allocate a new one as 231 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 232 */ 233 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, 234 struct request_queue *q, 235 struct blkcg_gq *new_blkg) 236 { 237 struct blkcg_gq *blkg; 238 int i, ret; 239 240 WARN_ON_ONCE(!rcu_read_lock_held()); 241 lockdep_assert_held(&q->queue_lock); 242 243 /* request_queue is dying, do not create/recreate a blkg */ 244 if (blk_queue_dying(q)) { 245 ret = -ENODEV; 246 goto err_free_blkg; 247 } 248 249 /* blkg holds a reference to blkcg */ 250 if (!css_tryget_online(&blkcg->css)) { 251 ret = -ENODEV; 252 goto err_free_blkg; 253 } 254 255 /* allocate */ 256 if (!new_blkg) { 257 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); 258 if (unlikely(!new_blkg)) { 259 ret = -ENOMEM; 260 goto err_put_css; 261 } 262 } 263 blkg = new_blkg; 264 265 /* link parent */ 266 if (blkcg_parent(blkcg)) { 267 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); 268 if (WARN_ON_ONCE(!blkg->parent)) { 269 ret = -ENODEV; 270 goto err_put_css; 271 } 272 blkg_get(blkg->parent); 273 } 274 275 /* invoke per-policy init */ 276 for (i = 0; i < BLKCG_MAX_POLS; i++) { 277 struct blkcg_policy *pol = blkcg_policy[i]; 278 279 if (blkg->pd[i] && pol->pd_init_fn) 280 pol->pd_init_fn(blkg->pd[i]); 281 } 282 283 /* insert */ 284 spin_lock(&blkcg->lock); 285 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); 286 if (likely(!ret)) { 287 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 288 list_add(&blkg->q_node, &q->blkg_list); 289 290 for (i = 0; i < BLKCG_MAX_POLS; i++) { 291 struct blkcg_policy *pol = blkcg_policy[i]; 292 293 if (blkg->pd[i] && pol->pd_online_fn) 294 pol->pd_online_fn(blkg->pd[i]); 295 } 296 } 297 blkg->online = true; 298 spin_unlock(&blkcg->lock); 299 300 if (!ret) 301 return blkg; 302 303 /* @blkg failed fully initialized, use the usual release path */ 304 blkg_put(blkg); 305 return ERR_PTR(ret); 306 307 err_put_css: 308 css_put(&blkcg->css); 309 err_free_blkg: 310 blkg_free(new_blkg); 311 return ERR_PTR(ret); 312 } 313 314 /** 315 * blkg_lookup_create - lookup blkg, try to create one if not there 316 * @blkcg: blkcg of interest 317 * @q: request_queue of interest 318 * 319 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to 320 * create one. blkg creation is performed recursively from blkcg_root such 321 * that all non-root blkg's have access to the parent blkg. This function 322 * should be called under RCU read lock and takes @q->queue_lock. 323 * 324 * Returns the blkg or the closest blkg if blkg_create() fails as it walks 325 * down from root. 326 */ 327 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 328 struct request_queue *q) 329 { 330 struct blkcg_gq *blkg; 331 unsigned long flags; 332 333 WARN_ON_ONCE(!rcu_read_lock_held()); 334 335 blkg = blkg_lookup(blkcg, q); 336 if (blkg) 337 return blkg; 338 339 spin_lock_irqsave(&q->queue_lock, flags); 340 blkg = __blkg_lookup(blkcg, q, true); 341 if (blkg) 342 goto found; 343 344 /* 345 * Create blkgs walking down from blkcg_root to @blkcg, so that all 346 * non-root blkgs have access to their parents. Returns the closest 347 * blkg to the intended blkg should blkg_create() fail. 348 */ 349 while (true) { 350 struct blkcg *pos = blkcg; 351 struct blkcg *parent = blkcg_parent(blkcg); 352 struct blkcg_gq *ret_blkg = q->root_blkg; 353 354 while (parent) { 355 blkg = __blkg_lookup(parent, q, false); 356 if (blkg) { 357 /* remember closest blkg */ 358 ret_blkg = blkg; 359 break; 360 } 361 pos = parent; 362 parent = blkcg_parent(parent); 363 } 364 365 blkg = blkg_create(pos, q, NULL); 366 if (IS_ERR(blkg)) { 367 blkg = ret_blkg; 368 break; 369 } 370 if (pos == blkcg) 371 break; 372 } 373 374 found: 375 spin_unlock_irqrestore(&q->queue_lock, flags); 376 return blkg; 377 } 378 379 static void blkg_destroy(struct blkcg_gq *blkg) 380 { 381 struct blkcg *blkcg = blkg->blkcg; 382 int i; 383 384 lockdep_assert_held(&blkg->q->queue_lock); 385 lockdep_assert_held(&blkcg->lock); 386 387 /* Something wrong if we are trying to remove same group twice */ 388 WARN_ON_ONCE(list_empty(&blkg->q_node)); 389 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); 390 391 for (i = 0; i < BLKCG_MAX_POLS; i++) { 392 struct blkcg_policy *pol = blkcg_policy[i]; 393 394 if (blkg->pd[i] && pol->pd_offline_fn) 395 pol->pd_offline_fn(blkg->pd[i]); 396 } 397 398 blkg->online = false; 399 400 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 401 list_del_init(&blkg->q_node); 402 hlist_del_init_rcu(&blkg->blkcg_node); 403 404 /* 405 * Both setting lookup hint to and clearing it from @blkg are done 406 * under queue_lock. If it's not pointing to @blkg now, it never 407 * will. Hint assignment itself can race safely. 408 */ 409 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 410 rcu_assign_pointer(blkcg->blkg_hint, NULL); 411 412 /* 413 * Put the reference taken at the time of creation so that when all 414 * queues are gone, group can be destroyed. 415 */ 416 percpu_ref_kill(&blkg->refcnt); 417 } 418 419 /** 420 * blkg_destroy_all - destroy all blkgs associated with a request_queue 421 * @q: request_queue of interest 422 * 423 * Destroy all blkgs associated with @q. 424 */ 425 static void blkg_destroy_all(struct request_queue *q) 426 { 427 struct blkcg_gq *blkg, *n; 428 int count = BLKG_DESTROY_BATCH_SIZE; 429 430 restart: 431 spin_lock_irq(&q->queue_lock); 432 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 433 struct blkcg *blkcg = blkg->blkcg; 434 435 spin_lock(&blkcg->lock); 436 blkg_destroy(blkg); 437 spin_unlock(&blkcg->lock); 438 439 /* 440 * in order to avoid holding the spin lock for too long, release 441 * it when a batch of blkgs are destroyed. 442 */ 443 if (!(--count)) { 444 count = BLKG_DESTROY_BATCH_SIZE; 445 spin_unlock_irq(&q->queue_lock); 446 cond_resched(); 447 goto restart; 448 } 449 } 450 451 q->root_blkg = NULL; 452 spin_unlock_irq(&q->queue_lock); 453 } 454 455 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 456 struct cftype *cftype, u64 val) 457 { 458 struct blkcg *blkcg = css_to_blkcg(css); 459 struct blkcg_gq *blkg; 460 int i, cpu; 461 462 mutex_lock(&blkcg_pol_mutex); 463 spin_lock_irq(&blkcg->lock); 464 465 /* 466 * Note that stat reset is racy - it doesn't synchronize against 467 * stat updates. This is a debug feature which shouldn't exist 468 * anyway. If you get hit by a race, retry. 469 */ 470 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 471 for_each_possible_cpu(cpu) { 472 struct blkg_iostat_set *bis = 473 per_cpu_ptr(blkg->iostat_cpu, cpu); 474 memset(bis, 0, sizeof(*bis)); 475 } 476 memset(&blkg->iostat, 0, sizeof(blkg->iostat)); 477 478 for (i = 0; i < BLKCG_MAX_POLS; i++) { 479 struct blkcg_policy *pol = blkcg_policy[i]; 480 481 if (blkg->pd[i] && pol->pd_reset_stats_fn) 482 pol->pd_reset_stats_fn(blkg->pd[i]); 483 } 484 } 485 486 spin_unlock_irq(&blkcg->lock); 487 mutex_unlock(&blkcg_pol_mutex); 488 return 0; 489 } 490 491 const char *blkg_dev_name(struct blkcg_gq *blkg) 492 { 493 if (!blkg->q->disk || !blkg->q->disk->bdi->dev) 494 return NULL; 495 return bdi_dev_name(blkg->q->disk->bdi); 496 } 497 498 /** 499 * blkcg_print_blkgs - helper for printing per-blkg data 500 * @sf: seq_file to print to 501 * @blkcg: blkcg of interest 502 * @prfill: fill function to print out a blkg 503 * @pol: policy in question 504 * @data: data to be passed to @prfill 505 * @show_total: to print out sum of prfill return values or not 506 * 507 * This function invokes @prfill on each blkg of @blkcg if pd for the 508 * policy specified by @pol exists. @prfill is invoked with @sf, the 509 * policy data and @data and the matching queue lock held. If @show_total 510 * is %true, the sum of the return values from @prfill is printed with 511 * "Total" label at the end. 512 * 513 * This is to be used to construct print functions for 514 * cftype->read_seq_string method. 515 */ 516 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 517 u64 (*prfill)(struct seq_file *, 518 struct blkg_policy_data *, int), 519 const struct blkcg_policy *pol, int data, 520 bool show_total) 521 { 522 struct blkcg_gq *blkg; 523 u64 total = 0; 524 525 rcu_read_lock(); 526 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 527 spin_lock_irq(&blkg->q->queue_lock); 528 if (blkcg_policy_enabled(blkg->q, pol)) 529 total += prfill(sf, blkg->pd[pol->plid], data); 530 spin_unlock_irq(&blkg->q->queue_lock); 531 } 532 rcu_read_unlock(); 533 534 if (show_total) 535 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 536 } 537 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 538 539 /** 540 * __blkg_prfill_u64 - prfill helper for a single u64 value 541 * @sf: seq_file to print to 542 * @pd: policy private data of interest 543 * @v: value to print 544 * 545 * Print @v to @sf for the device assocaited with @pd. 546 */ 547 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 548 { 549 const char *dname = blkg_dev_name(pd->blkg); 550 551 if (!dname) 552 return 0; 553 554 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 555 return v; 556 } 557 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 558 559 /* Performs queue bypass and policy enabled checks then looks up blkg. */ 560 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, 561 const struct blkcg_policy *pol, 562 struct request_queue *q) 563 { 564 WARN_ON_ONCE(!rcu_read_lock_held()); 565 lockdep_assert_held(&q->queue_lock); 566 567 if (!blkcg_policy_enabled(q, pol)) 568 return ERR_PTR(-EOPNOTSUPP); 569 return __blkg_lookup(blkcg, q, true /* update_hint */); 570 } 571 572 /** 573 * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update 574 * @inputp: input string pointer 575 * 576 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update 577 * from @input and get and return the matching bdev. *@inputp is 578 * updated to point past the device node prefix. Returns an ERR_PTR() 579 * value on error. 580 * 581 * Use this function iff blkg_conf_prep() can't be used for some reason. 582 */ 583 struct block_device *blkcg_conf_open_bdev(char **inputp) 584 { 585 char *input = *inputp; 586 unsigned int major, minor; 587 struct block_device *bdev; 588 int key_len; 589 590 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 591 return ERR_PTR(-EINVAL); 592 593 input += key_len; 594 if (!isspace(*input)) 595 return ERR_PTR(-EINVAL); 596 input = skip_spaces(input); 597 598 bdev = blkdev_get_no_open(MKDEV(major, minor)); 599 if (!bdev) 600 return ERR_PTR(-ENODEV); 601 if (bdev_is_partition(bdev)) { 602 blkdev_put_no_open(bdev); 603 return ERR_PTR(-ENODEV); 604 } 605 606 *inputp = input; 607 return bdev; 608 } 609 610 /** 611 * blkg_conf_prep - parse and prepare for per-blkg config update 612 * @blkcg: target block cgroup 613 * @pol: target policy 614 * @input: input string 615 * @ctx: blkg_conf_ctx to be filled 616 * 617 * Parse per-blkg config update from @input and initialize @ctx with the 618 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the 619 * part of @input following MAJ:MIN. This function returns with RCU read 620 * lock and queue lock held and must be paired with blkg_conf_finish(). 621 */ 622 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 623 char *input, struct blkg_conf_ctx *ctx) 624 __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock) 625 { 626 struct block_device *bdev; 627 struct request_queue *q; 628 struct blkcg_gq *blkg; 629 int ret; 630 631 bdev = blkcg_conf_open_bdev(&input); 632 if (IS_ERR(bdev)) 633 return PTR_ERR(bdev); 634 635 q = bdev_get_queue(bdev); 636 637 /* 638 * blkcg_deactivate_policy() requires queue to be frozen, we can grab 639 * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). 640 */ 641 ret = blk_queue_enter(q, 0); 642 if (ret) 643 return ret; 644 645 rcu_read_lock(); 646 spin_lock_irq(&q->queue_lock); 647 648 blkg = blkg_lookup_check(blkcg, pol, q); 649 if (IS_ERR(blkg)) { 650 ret = PTR_ERR(blkg); 651 goto fail_unlock; 652 } 653 654 if (blkg) 655 goto success; 656 657 /* 658 * Create blkgs walking down from blkcg_root to @blkcg, so that all 659 * non-root blkgs have access to their parents. 660 */ 661 while (true) { 662 struct blkcg *pos = blkcg; 663 struct blkcg *parent; 664 struct blkcg_gq *new_blkg; 665 666 parent = blkcg_parent(blkcg); 667 while (parent && !__blkg_lookup(parent, q, false)) { 668 pos = parent; 669 parent = blkcg_parent(parent); 670 } 671 672 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 673 spin_unlock_irq(&q->queue_lock); 674 rcu_read_unlock(); 675 676 new_blkg = blkg_alloc(pos, q, GFP_KERNEL); 677 if (unlikely(!new_blkg)) { 678 ret = -ENOMEM; 679 goto fail; 680 } 681 682 if (radix_tree_preload(GFP_KERNEL)) { 683 blkg_free(new_blkg); 684 ret = -ENOMEM; 685 goto fail; 686 } 687 688 rcu_read_lock(); 689 spin_lock_irq(&q->queue_lock); 690 691 blkg = blkg_lookup_check(pos, pol, q); 692 if (IS_ERR(blkg)) { 693 ret = PTR_ERR(blkg); 694 blkg_free(new_blkg); 695 goto fail_preloaded; 696 } 697 698 if (blkg) { 699 blkg_free(new_blkg); 700 } else { 701 blkg = blkg_create(pos, q, new_blkg); 702 if (IS_ERR(blkg)) { 703 ret = PTR_ERR(blkg); 704 goto fail_preloaded; 705 } 706 } 707 708 radix_tree_preload_end(); 709 710 if (pos == blkcg) 711 goto success; 712 } 713 success: 714 blk_queue_exit(q); 715 ctx->bdev = bdev; 716 ctx->blkg = blkg; 717 ctx->body = input; 718 return 0; 719 720 fail_preloaded: 721 radix_tree_preload_end(); 722 fail_unlock: 723 spin_unlock_irq(&q->queue_lock); 724 rcu_read_unlock(); 725 fail: 726 blkdev_put_no_open(bdev); 727 blk_queue_exit(q); 728 /* 729 * If queue was bypassing, we should retry. Do so after a 730 * short msleep(). It isn't strictly necessary but queue 731 * can be bypassing for some time and it's always nice to 732 * avoid busy looping. 733 */ 734 if (ret == -EBUSY) { 735 msleep(10); 736 ret = restart_syscall(); 737 } 738 return ret; 739 } 740 EXPORT_SYMBOL_GPL(blkg_conf_prep); 741 742 /** 743 * blkg_conf_finish - finish up per-blkg config update 744 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() 745 * 746 * Finish up after per-blkg config update. This function must be paired 747 * with blkg_conf_prep(). 748 */ 749 void blkg_conf_finish(struct blkg_conf_ctx *ctx) 750 __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu) 751 { 752 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); 753 rcu_read_unlock(); 754 blkdev_put_no_open(ctx->bdev); 755 } 756 EXPORT_SYMBOL_GPL(blkg_conf_finish); 757 758 static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) 759 { 760 int i; 761 762 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 763 dst->bytes[i] = src->bytes[i]; 764 dst->ios[i] = src->ios[i]; 765 } 766 } 767 768 static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) 769 { 770 int i; 771 772 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 773 dst->bytes[i] += src->bytes[i]; 774 dst->ios[i] += src->ios[i]; 775 } 776 } 777 778 static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) 779 { 780 int i; 781 782 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 783 dst->bytes[i] -= src->bytes[i]; 784 dst->ios[i] -= src->ios[i]; 785 } 786 } 787 788 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) 789 { 790 struct blkcg *blkcg = css_to_blkcg(css); 791 struct blkcg_gq *blkg; 792 793 /* Root-level stats are sourced from system-wide IO stats */ 794 if (!cgroup_parent(css->cgroup)) 795 return; 796 797 rcu_read_lock(); 798 799 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 800 struct blkcg_gq *parent = blkg->parent; 801 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); 802 struct blkg_iostat cur, delta; 803 unsigned long flags; 804 unsigned int seq; 805 806 /* fetch the current per-cpu values */ 807 do { 808 seq = u64_stats_fetch_begin(&bisc->sync); 809 blkg_iostat_set(&cur, &bisc->cur); 810 } while (u64_stats_fetch_retry(&bisc->sync, seq)); 811 812 /* propagate percpu delta to global */ 813 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 814 blkg_iostat_set(&delta, &cur); 815 blkg_iostat_sub(&delta, &bisc->last); 816 blkg_iostat_add(&blkg->iostat.cur, &delta); 817 blkg_iostat_add(&bisc->last, &delta); 818 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 819 820 /* propagate global delta to parent (unless that's root) */ 821 if (parent && parent->parent) { 822 flags = u64_stats_update_begin_irqsave(&parent->iostat.sync); 823 blkg_iostat_set(&delta, &blkg->iostat.cur); 824 blkg_iostat_sub(&delta, &blkg->iostat.last); 825 blkg_iostat_add(&parent->iostat.cur, &delta); 826 blkg_iostat_add(&blkg->iostat.last, &delta); 827 u64_stats_update_end_irqrestore(&parent->iostat.sync, flags); 828 } 829 } 830 831 rcu_read_unlock(); 832 } 833 834 /* 835 * We source root cgroup stats from the system-wide stats to avoid 836 * tracking the same information twice and incurring overhead when no 837 * cgroups are defined. For that reason, cgroup_rstat_flush in 838 * blkcg_print_stat does not actually fill out the iostat in the root 839 * cgroup's blkcg_gq. 840 * 841 * However, we would like to re-use the printing code between the root and 842 * non-root cgroups to the extent possible. For that reason, we simulate 843 * flushing the root cgroup's stats by explicitly filling in the iostat 844 * with disk level statistics. 845 */ 846 static void blkcg_fill_root_iostats(void) 847 { 848 struct class_dev_iter iter; 849 struct device *dev; 850 851 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 852 while ((dev = class_dev_iter_next(&iter))) { 853 struct block_device *bdev = dev_to_bdev(dev); 854 struct blkcg_gq *blkg = 855 blk_queue_root_blkg(bdev_get_queue(bdev)); 856 struct blkg_iostat tmp; 857 int cpu; 858 859 memset(&tmp, 0, sizeof(tmp)); 860 for_each_possible_cpu(cpu) { 861 struct disk_stats *cpu_dkstats; 862 unsigned long flags; 863 864 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); 865 tmp.ios[BLKG_IOSTAT_READ] += 866 cpu_dkstats->ios[STAT_READ]; 867 tmp.ios[BLKG_IOSTAT_WRITE] += 868 cpu_dkstats->ios[STAT_WRITE]; 869 tmp.ios[BLKG_IOSTAT_DISCARD] += 870 cpu_dkstats->ios[STAT_DISCARD]; 871 // convert sectors to bytes 872 tmp.bytes[BLKG_IOSTAT_READ] += 873 cpu_dkstats->sectors[STAT_READ] << 9; 874 tmp.bytes[BLKG_IOSTAT_WRITE] += 875 cpu_dkstats->sectors[STAT_WRITE] << 9; 876 tmp.bytes[BLKG_IOSTAT_DISCARD] += 877 cpu_dkstats->sectors[STAT_DISCARD] << 9; 878 879 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 880 blkg_iostat_set(&blkg->iostat.cur, &tmp); 881 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 882 } 883 } 884 } 885 886 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) 887 { 888 struct blkg_iostat_set *bis = &blkg->iostat; 889 u64 rbytes, wbytes, rios, wios, dbytes, dios; 890 bool has_stats = false; 891 const char *dname; 892 unsigned seq; 893 int i; 894 895 if (!blkg->online) 896 return; 897 898 dname = blkg_dev_name(blkg); 899 if (!dname) 900 return; 901 902 seq_printf(s, "%s ", dname); 903 904 do { 905 seq = u64_stats_fetch_begin(&bis->sync); 906 907 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; 908 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; 909 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; 910 rios = bis->cur.ios[BLKG_IOSTAT_READ]; 911 wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; 912 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; 913 } while (u64_stats_fetch_retry(&bis->sync, seq)); 914 915 if (rbytes || wbytes || rios || wios) { 916 has_stats = true; 917 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 918 rbytes, wbytes, rios, wios, 919 dbytes, dios); 920 } 921 922 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { 923 has_stats = true; 924 seq_printf(s, " use_delay=%d delay_nsec=%llu", 925 atomic_read(&blkg->use_delay), 926 atomic64_read(&blkg->delay_nsec)); 927 } 928 929 for (i = 0; i < BLKCG_MAX_POLS; i++) { 930 struct blkcg_policy *pol = blkcg_policy[i]; 931 932 if (!blkg->pd[i] || !pol->pd_stat_fn) 933 continue; 934 935 if (pol->pd_stat_fn(blkg->pd[i], s)) 936 has_stats = true; 937 } 938 939 if (has_stats) 940 seq_printf(s, "\n"); 941 } 942 943 static int blkcg_print_stat(struct seq_file *sf, void *v) 944 { 945 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 946 struct blkcg_gq *blkg; 947 948 if (!seq_css(sf)->parent) 949 blkcg_fill_root_iostats(); 950 else 951 cgroup_rstat_flush(blkcg->css.cgroup); 952 953 rcu_read_lock(); 954 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 955 spin_lock_irq(&blkg->q->queue_lock); 956 blkcg_print_one_stat(blkg, sf); 957 spin_unlock_irq(&blkg->q->queue_lock); 958 } 959 rcu_read_unlock(); 960 return 0; 961 } 962 963 static struct cftype blkcg_files[] = { 964 { 965 .name = "stat", 966 .seq_show = blkcg_print_stat, 967 }, 968 { } /* terminate */ 969 }; 970 971 static struct cftype blkcg_legacy_files[] = { 972 { 973 .name = "reset_stats", 974 .write_u64 = blkcg_reset_stats, 975 }, 976 { } /* terminate */ 977 }; 978 979 /* 980 * blkcg destruction is a three-stage process. 981 * 982 * 1. Destruction starts. The blkcg_css_offline() callback is invoked 983 * which offlines writeback. Here we tie the next stage of blkg destruction 984 * to the completion of writeback associated with the blkcg. This lets us 985 * avoid punting potentially large amounts of outstanding writeback to root 986 * while maintaining any ongoing policies. The next stage is triggered when 987 * the nr_cgwbs count goes to zero. 988 * 989 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 990 * and handles the destruction of blkgs. Here the css reference held by 991 * the blkg is put back eventually allowing blkcg_css_free() to be called. 992 * This work may occur in cgwb_release_workfn() on the cgwb_release 993 * workqueue. Any submitted ios that fail to get the blkg ref will be 994 * punted to the root_blkg. 995 * 996 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 997 * This finally frees the blkcg. 998 */ 999 1000 /** 1001 * blkcg_css_offline - cgroup css_offline callback 1002 * @css: css of interest 1003 * 1004 * This function is called when @css is about to go away. Here the cgwbs are 1005 * offlined first and only once writeback associated with the blkcg has 1006 * finished do we start step 2 (see above). 1007 */ 1008 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1009 { 1010 struct blkcg *blkcg = css_to_blkcg(css); 1011 1012 /* this prevents anyone from attaching or migrating to this blkcg */ 1013 wb_blkcg_offline(blkcg); 1014 1015 /* put the base online pin allowing step 2 to be triggered */ 1016 blkcg_unpin_online(blkcg); 1017 } 1018 1019 /** 1020 * blkcg_destroy_blkgs - responsible for shooting down blkgs 1021 * @blkcg: blkcg of interest 1022 * 1023 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1024 * is nested inside q lock, this function performs reverse double lock dancing. 1025 * Destroying the blkgs releases the reference held on the blkcg's css allowing 1026 * blkcg_css_free to eventually be called. 1027 * 1028 * This is the blkcg counterpart of ioc_release_fn(). 1029 */ 1030 void blkcg_destroy_blkgs(struct blkcg *blkcg) 1031 { 1032 might_sleep(); 1033 1034 spin_lock_irq(&blkcg->lock); 1035 1036 while (!hlist_empty(&blkcg->blkg_list)) { 1037 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1038 struct blkcg_gq, blkcg_node); 1039 struct request_queue *q = blkg->q; 1040 1041 if (need_resched() || !spin_trylock(&q->queue_lock)) { 1042 /* 1043 * Given that the system can accumulate a huge number 1044 * of blkgs in pathological cases, check to see if we 1045 * need to rescheduling to avoid softlockup. 1046 */ 1047 spin_unlock_irq(&blkcg->lock); 1048 cond_resched(); 1049 spin_lock_irq(&blkcg->lock); 1050 continue; 1051 } 1052 1053 blkg_destroy(blkg); 1054 spin_unlock(&q->queue_lock); 1055 } 1056 1057 spin_unlock_irq(&blkcg->lock); 1058 } 1059 1060 static void blkcg_css_free(struct cgroup_subsys_state *css) 1061 { 1062 struct blkcg *blkcg = css_to_blkcg(css); 1063 int i; 1064 1065 mutex_lock(&blkcg_pol_mutex); 1066 1067 list_del(&blkcg->all_blkcgs_node); 1068 1069 for (i = 0; i < BLKCG_MAX_POLS; i++) 1070 if (blkcg->cpd[i]) 1071 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1072 1073 mutex_unlock(&blkcg_pol_mutex); 1074 1075 kfree(blkcg); 1076 } 1077 1078 static struct cgroup_subsys_state * 1079 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1080 { 1081 struct blkcg *blkcg; 1082 struct cgroup_subsys_state *ret; 1083 int i; 1084 1085 mutex_lock(&blkcg_pol_mutex); 1086 1087 if (!parent_css) { 1088 blkcg = &blkcg_root; 1089 } else { 1090 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1091 if (!blkcg) { 1092 ret = ERR_PTR(-ENOMEM); 1093 goto unlock; 1094 } 1095 } 1096 1097 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1098 struct blkcg_policy *pol = blkcg_policy[i]; 1099 struct blkcg_policy_data *cpd; 1100 1101 /* 1102 * If the policy hasn't been attached yet, wait for it 1103 * to be attached before doing anything else. Otherwise, 1104 * check if the policy requires any specific per-cgroup 1105 * data: if it does, allocate and initialize it. 1106 */ 1107 if (!pol || !pol->cpd_alloc_fn) 1108 continue; 1109 1110 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1111 if (!cpd) { 1112 ret = ERR_PTR(-ENOMEM); 1113 goto free_pd_blkcg; 1114 } 1115 blkcg->cpd[i] = cpd; 1116 cpd->blkcg = blkcg; 1117 cpd->plid = i; 1118 if (pol->cpd_init_fn) 1119 pol->cpd_init_fn(cpd); 1120 } 1121 1122 spin_lock_init(&blkcg->lock); 1123 refcount_set(&blkcg->online_pin, 1); 1124 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); 1125 INIT_HLIST_HEAD(&blkcg->blkg_list); 1126 #ifdef CONFIG_CGROUP_WRITEBACK 1127 INIT_LIST_HEAD(&blkcg->cgwb_list); 1128 #endif 1129 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1130 1131 mutex_unlock(&blkcg_pol_mutex); 1132 return &blkcg->css; 1133 1134 free_pd_blkcg: 1135 for (i--; i >= 0; i--) 1136 if (blkcg->cpd[i]) 1137 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1138 1139 if (blkcg != &blkcg_root) 1140 kfree(blkcg); 1141 unlock: 1142 mutex_unlock(&blkcg_pol_mutex); 1143 return ret; 1144 } 1145 1146 static int blkcg_css_online(struct cgroup_subsys_state *css) 1147 { 1148 struct blkcg *blkcg = css_to_blkcg(css); 1149 struct blkcg *parent = blkcg_parent(blkcg); 1150 1151 /* 1152 * blkcg_pin_online() is used to delay blkcg offline so that blkgs 1153 * don't go offline while cgwbs are still active on them. Pin the 1154 * parent so that offline always happens towards the root. 1155 */ 1156 if (parent) 1157 blkcg_pin_online(parent); 1158 return 0; 1159 } 1160 1161 /** 1162 * blkcg_init_queue - initialize blkcg part of request queue 1163 * @q: request_queue to initialize 1164 * 1165 * Called from blk_alloc_queue(). Responsible for initializing blkcg 1166 * part of new request_queue @q. 1167 * 1168 * RETURNS: 1169 * 0 on success, -errno on failure. 1170 */ 1171 int blkcg_init_queue(struct request_queue *q) 1172 { 1173 struct blkcg_gq *new_blkg, *blkg; 1174 bool preloaded; 1175 int ret; 1176 1177 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); 1178 if (!new_blkg) 1179 return -ENOMEM; 1180 1181 preloaded = !radix_tree_preload(GFP_KERNEL); 1182 1183 /* Make sure the root blkg exists. */ 1184 rcu_read_lock(); 1185 spin_lock_irq(&q->queue_lock); 1186 blkg = blkg_create(&blkcg_root, q, new_blkg); 1187 if (IS_ERR(blkg)) 1188 goto err_unlock; 1189 q->root_blkg = blkg; 1190 spin_unlock_irq(&q->queue_lock); 1191 rcu_read_unlock(); 1192 1193 if (preloaded) 1194 radix_tree_preload_end(); 1195 1196 ret = blk_ioprio_init(q); 1197 if (ret) 1198 goto err_destroy_all; 1199 1200 ret = blk_throtl_init(q); 1201 if (ret) 1202 goto err_destroy_all; 1203 1204 ret = blk_iolatency_init(q); 1205 if (ret) { 1206 blk_throtl_exit(q); 1207 goto err_destroy_all; 1208 } 1209 1210 return 0; 1211 1212 err_destroy_all: 1213 blkg_destroy_all(q); 1214 return ret; 1215 err_unlock: 1216 spin_unlock_irq(&q->queue_lock); 1217 rcu_read_unlock(); 1218 if (preloaded) 1219 radix_tree_preload_end(); 1220 return PTR_ERR(blkg); 1221 } 1222 1223 /** 1224 * blkcg_exit_queue - exit and release blkcg part of request_queue 1225 * @q: request_queue being released 1226 * 1227 * Called from blk_exit_queue(). Responsible for exiting blkcg part. 1228 */ 1229 void blkcg_exit_queue(struct request_queue *q) 1230 { 1231 blkg_destroy_all(q); 1232 blk_throtl_exit(q); 1233 } 1234 1235 static void blkcg_bind(struct cgroup_subsys_state *root_css) 1236 { 1237 int i; 1238 1239 mutex_lock(&blkcg_pol_mutex); 1240 1241 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1242 struct blkcg_policy *pol = blkcg_policy[i]; 1243 struct blkcg *blkcg; 1244 1245 if (!pol || !pol->cpd_bind_fn) 1246 continue; 1247 1248 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) 1249 if (blkcg->cpd[pol->plid]) 1250 pol->cpd_bind_fn(blkcg->cpd[pol->plid]); 1251 } 1252 mutex_unlock(&blkcg_pol_mutex); 1253 } 1254 1255 static void blkcg_exit(struct task_struct *tsk) 1256 { 1257 if (tsk->throttle_queue) 1258 blk_put_queue(tsk->throttle_queue); 1259 tsk->throttle_queue = NULL; 1260 } 1261 1262 struct cgroup_subsys io_cgrp_subsys = { 1263 .css_alloc = blkcg_css_alloc, 1264 .css_online = blkcg_css_online, 1265 .css_offline = blkcg_css_offline, 1266 .css_free = blkcg_css_free, 1267 .css_rstat_flush = blkcg_rstat_flush, 1268 .bind = blkcg_bind, 1269 .dfl_cftypes = blkcg_files, 1270 .legacy_cftypes = blkcg_legacy_files, 1271 .legacy_name = "blkio", 1272 .exit = blkcg_exit, 1273 #ifdef CONFIG_MEMCG 1274 /* 1275 * This ensures that, if available, memcg is automatically enabled 1276 * together on the default hierarchy so that the owner cgroup can 1277 * be retrieved from writeback pages. 1278 */ 1279 .depends_on = 1 << memory_cgrp_id, 1280 #endif 1281 }; 1282 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1283 1284 /** 1285 * blkcg_activate_policy - activate a blkcg policy on a request_queue 1286 * @q: request_queue of interest 1287 * @pol: blkcg policy to activate 1288 * 1289 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through 1290 * bypass mode to populate its blkgs with policy_data for @pol. 1291 * 1292 * Activation happens with @q bypassed, so nobody would be accessing blkgs 1293 * from IO path. Update of each blkg is protected by both queue and blkcg 1294 * locks so that holding either lock and testing blkcg_policy_enabled() is 1295 * always enough for dereferencing policy data. 1296 * 1297 * The caller is responsible for synchronizing [de]activations and policy 1298 * [un]registerations. Returns 0 on success, -errno on failure. 1299 */ 1300 int blkcg_activate_policy(struct request_queue *q, 1301 const struct blkcg_policy *pol) 1302 { 1303 struct blkg_policy_data *pd_prealloc = NULL; 1304 struct blkcg_gq *blkg, *pinned_blkg = NULL; 1305 int ret; 1306 1307 if (blkcg_policy_enabled(q, pol)) 1308 return 0; 1309 1310 if (queue_is_mq(q)) 1311 blk_mq_freeze_queue(q); 1312 retry: 1313 spin_lock_irq(&q->queue_lock); 1314 1315 /* blkg_list is pushed at the head, reverse walk to allocate parents first */ 1316 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1317 struct blkg_policy_data *pd; 1318 1319 if (blkg->pd[pol->plid]) 1320 continue; 1321 1322 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ 1323 if (blkg == pinned_blkg) { 1324 pd = pd_prealloc; 1325 pd_prealloc = NULL; 1326 } else { 1327 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, 1328 blkg->blkcg); 1329 } 1330 1331 if (!pd) { 1332 /* 1333 * GFP_NOWAIT failed. Free the existing one and 1334 * prealloc for @blkg w/ GFP_KERNEL. 1335 */ 1336 if (pinned_blkg) 1337 blkg_put(pinned_blkg); 1338 blkg_get(blkg); 1339 pinned_blkg = blkg; 1340 1341 spin_unlock_irq(&q->queue_lock); 1342 1343 if (pd_prealloc) 1344 pol->pd_free_fn(pd_prealloc); 1345 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, 1346 blkg->blkcg); 1347 if (pd_prealloc) 1348 goto retry; 1349 else 1350 goto enomem; 1351 } 1352 1353 blkg->pd[pol->plid] = pd; 1354 pd->blkg = blkg; 1355 pd->plid = pol->plid; 1356 } 1357 1358 /* all allocated, init in the same order */ 1359 if (pol->pd_init_fn) 1360 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) 1361 pol->pd_init_fn(blkg->pd[pol->plid]); 1362 1363 __set_bit(pol->plid, q->blkcg_pols); 1364 ret = 0; 1365 1366 spin_unlock_irq(&q->queue_lock); 1367 out: 1368 if (queue_is_mq(q)) 1369 blk_mq_unfreeze_queue(q); 1370 if (pinned_blkg) 1371 blkg_put(pinned_blkg); 1372 if (pd_prealloc) 1373 pol->pd_free_fn(pd_prealloc); 1374 return ret; 1375 1376 enomem: 1377 /* alloc failed, nothing's initialized yet, free everything */ 1378 spin_lock_irq(&q->queue_lock); 1379 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1380 struct blkcg *blkcg = blkg->blkcg; 1381 1382 spin_lock(&blkcg->lock); 1383 if (blkg->pd[pol->plid]) { 1384 pol->pd_free_fn(blkg->pd[pol->plid]); 1385 blkg->pd[pol->plid] = NULL; 1386 } 1387 spin_unlock(&blkcg->lock); 1388 } 1389 spin_unlock_irq(&q->queue_lock); 1390 ret = -ENOMEM; 1391 goto out; 1392 } 1393 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1394 1395 /** 1396 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue 1397 * @q: request_queue of interest 1398 * @pol: blkcg policy to deactivate 1399 * 1400 * Deactivate @pol on @q. Follows the same synchronization rules as 1401 * blkcg_activate_policy(). 1402 */ 1403 void blkcg_deactivate_policy(struct request_queue *q, 1404 const struct blkcg_policy *pol) 1405 { 1406 struct blkcg_gq *blkg; 1407 1408 if (!blkcg_policy_enabled(q, pol)) 1409 return; 1410 1411 if (queue_is_mq(q)) 1412 blk_mq_freeze_queue(q); 1413 1414 spin_lock_irq(&q->queue_lock); 1415 1416 __clear_bit(pol->plid, q->blkcg_pols); 1417 1418 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1419 struct blkcg *blkcg = blkg->blkcg; 1420 1421 spin_lock(&blkcg->lock); 1422 if (blkg->pd[pol->plid]) { 1423 if (pol->pd_offline_fn) 1424 pol->pd_offline_fn(blkg->pd[pol->plid]); 1425 pol->pd_free_fn(blkg->pd[pol->plid]); 1426 blkg->pd[pol->plid] = NULL; 1427 } 1428 spin_unlock(&blkcg->lock); 1429 } 1430 1431 spin_unlock_irq(&q->queue_lock); 1432 1433 if (queue_is_mq(q)) 1434 blk_mq_unfreeze_queue(q); 1435 } 1436 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1437 1438 /** 1439 * blkcg_policy_register - register a blkcg policy 1440 * @pol: blkcg policy to register 1441 * 1442 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1443 * successful registration. Returns 0 on success and -errno on failure. 1444 */ 1445 int blkcg_policy_register(struct blkcg_policy *pol) 1446 { 1447 struct blkcg *blkcg; 1448 int i, ret; 1449 1450 mutex_lock(&blkcg_pol_register_mutex); 1451 mutex_lock(&blkcg_pol_mutex); 1452 1453 /* find an empty slot */ 1454 ret = -ENOSPC; 1455 for (i = 0; i < BLKCG_MAX_POLS; i++) 1456 if (!blkcg_policy[i]) 1457 break; 1458 if (i >= BLKCG_MAX_POLS) { 1459 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); 1460 goto err_unlock; 1461 } 1462 1463 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ 1464 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1465 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1466 goto err_unlock; 1467 1468 /* register @pol */ 1469 pol->plid = i; 1470 blkcg_policy[pol->plid] = pol; 1471 1472 /* allocate and install cpd's */ 1473 if (pol->cpd_alloc_fn) { 1474 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1475 struct blkcg_policy_data *cpd; 1476 1477 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1478 if (!cpd) 1479 goto err_free_cpds; 1480 1481 blkcg->cpd[pol->plid] = cpd; 1482 cpd->blkcg = blkcg; 1483 cpd->plid = pol->plid; 1484 if (pol->cpd_init_fn) 1485 pol->cpd_init_fn(cpd); 1486 } 1487 } 1488 1489 mutex_unlock(&blkcg_pol_mutex); 1490 1491 /* everything is in place, add intf files for the new policy */ 1492 if (pol->dfl_cftypes) 1493 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1494 pol->dfl_cftypes)); 1495 if (pol->legacy_cftypes) 1496 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1497 pol->legacy_cftypes)); 1498 mutex_unlock(&blkcg_pol_register_mutex); 1499 return 0; 1500 1501 err_free_cpds: 1502 if (pol->cpd_free_fn) { 1503 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1504 if (blkcg->cpd[pol->plid]) { 1505 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1506 blkcg->cpd[pol->plid] = NULL; 1507 } 1508 } 1509 } 1510 blkcg_policy[pol->plid] = NULL; 1511 err_unlock: 1512 mutex_unlock(&blkcg_pol_mutex); 1513 mutex_unlock(&blkcg_pol_register_mutex); 1514 return ret; 1515 } 1516 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1517 1518 /** 1519 * blkcg_policy_unregister - unregister a blkcg policy 1520 * @pol: blkcg policy to unregister 1521 * 1522 * Undo blkcg_policy_register(@pol). Might sleep. 1523 */ 1524 void blkcg_policy_unregister(struct blkcg_policy *pol) 1525 { 1526 struct blkcg *blkcg; 1527 1528 mutex_lock(&blkcg_pol_register_mutex); 1529 1530 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1531 goto out_unlock; 1532 1533 /* kill the intf files first */ 1534 if (pol->dfl_cftypes) 1535 cgroup_rm_cftypes(pol->dfl_cftypes); 1536 if (pol->legacy_cftypes) 1537 cgroup_rm_cftypes(pol->legacy_cftypes); 1538 1539 /* remove cpds and unregister */ 1540 mutex_lock(&blkcg_pol_mutex); 1541 1542 if (pol->cpd_free_fn) { 1543 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1544 if (blkcg->cpd[pol->plid]) { 1545 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1546 blkcg->cpd[pol->plid] = NULL; 1547 } 1548 } 1549 } 1550 blkcg_policy[pol->plid] = NULL; 1551 1552 mutex_unlock(&blkcg_pol_mutex); 1553 out_unlock: 1554 mutex_unlock(&blkcg_pol_register_mutex); 1555 } 1556 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1557 1558 bool __blkcg_punt_bio_submit(struct bio *bio) 1559 { 1560 struct blkcg_gq *blkg = bio->bi_blkg; 1561 1562 /* consume the flag first */ 1563 bio->bi_opf &= ~REQ_CGROUP_PUNT; 1564 1565 /* never bounce for the root cgroup */ 1566 if (!blkg->parent) 1567 return false; 1568 1569 spin_lock_bh(&blkg->async_bio_lock); 1570 bio_list_add(&blkg->async_bios, bio); 1571 spin_unlock_bh(&blkg->async_bio_lock); 1572 1573 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); 1574 return true; 1575 } 1576 1577 /* 1578 * Scale the accumulated delay based on how long it has been since we updated 1579 * the delay. We only call this when we are adding delay, in case it's been a 1580 * while since we added delay, and when we are checking to see if we need to 1581 * delay a task, to account for any delays that may have occurred. 1582 */ 1583 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1584 { 1585 u64 old = atomic64_read(&blkg->delay_start); 1586 1587 /* negative use_delay means no scaling, see blkcg_set_delay() */ 1588 if (atomic_read(&blkg->use_delay) < 0) 1589 return; 1590 1591 /* 1592 * We only want to scale down every second. The idea here is that we 1593 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1594 * time window. We only want to throttle tasks for recent delay that 1595 * has occurred, in 1 second time windows since that's the maximum 1596 * things can be throttled. We save the current delay window in 1597 * blkg->last_delay so we know what amount is still left to be charged 1598 * to the blkg from this point onward. blkg->last_use keeps track of 1599 * the use_delay counter. The idea is if we're unthrottling the blkg we 1600 * are ok with whatever is happening now, and we can take away more of 1601 * the accumulated delay as we've already throttled enough that 1602 * everybody is happy with their IO latencies. 1603 */ 1604 if (time_before64(old + NSEC_PER_SEC, now) && 1605 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) { 1606 u64 cur = atomic64_read(&blkg->delay_nsec); 1607 u64 sub = min_t(u64, blkg->last_delay, now - old); 1608 int cur_use = atomic_read(&blkg->use_delay); 1609 1610 /* 1611 * We've been unthrottled, subtract a larger chunk of our 1612 * accumulated delay. 1613 */ 1614 if (cur_use < blkg->last_use) 1615 sub = max_t(u64, sub, blkg->last_delay >> 1); 1616 1617 /* 1618 * This shouldn't happen, but handle it anyway. Our delay_nsec 1619 * should only ever be growing except here where we subtract out 1620 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1621 * rather not end up with negative numbers. 1622 */ 1623 if (unlikely(cur < sub)) { 1624 atomic64_set(&blkg->delay_nsec, 0); 1625 blkg->last_delay = 0; 1626 } else { 1627 atomic64_sub(sub, &blkg->delay_nsec); 1628 blkg->last_delay = cur - sub; 1629 } 1630 blkg->last_use = cur_use; 1631 } 1632 } 1633 1634 /* 1635 * This is called when we want to actually walk up the hierarchy and check to 1636 * see if we need to throttle, and then actually throttle if there is some 1637 * accumulated delay. This should only be called upon return to user space so 1638 * we're not holding some lock that would induce a priority inversion. 1639 */ 1640 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1641 { 1642 unsigned long pflags; 1643 bool clamp; 1644 u64 now = ktime_to_ns(ktime_get()); 1645 u64 exp; 1646 u64 delay_nsec = 0; 1647 int tok; 1648 1649 while (blkg->parent) { 1650 int use_delay = atomic_read(&blkg->use_delay); 1651 1652 if (use_delay) { 1653 u64 this_delay; 1654 1655 blkcg_scale_delay(blkg, now); 1656 this_delay = atomic64_read(&blkg->delay_nsec); 1657 if (this_delay > delay_nsec) { 1658 delay_nsec = this_delay; 1659 clamp = use_delay > 0; 1660 } 1661 } 1662 blkg = blkg->parent; 1663 } 1664 1665 if (!delay_nsec) 1666 return; 1667 1668 /* 1669 * Let's not sleep for all eternity if we've amassed a huge delay. 1670 * Swapping or metadata IO can accumulate 10's of seconds worth of 1671 * delay, and we want userspace to be able to do _something_ so cap the 1672 * delays at 0.25s. If there's 10's of seconds worth of delay then the 1673 * tasks will be delayed for 0.25 second for every syscall. If 1674 * blkcg_set_delay() was used as indicated by negative use_delay, the 1675 * caller is responsible for regulating the range. 1676 */ 1677 if (clamp) 1678 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1679 1680 if (use_memdelay) 1681 psi_memstall_enter(&pflags); 1682 1683 exp = ktime_add_ns(now, delay_nsec); 1684 tok = io_schedule_prepare(); 1685 do { 1686 __set_current_state(TASK_KILLABLE); 1687 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1688 break; 1689 } while (!fatal_signal_pending(current)); 1690 io_schedule_finish(tok); 1691 1692 if (use_memdelay) 1693 psi_memstall_leave(&pflags); 1694 } 1695 1696 /** 1697 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 1698 * 1699 * This is only called if we've been marked with set_notify_resume(). Obviously 1700 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 1701 * check to see if current->throttle_queue is set and if not this doesn't do 1702 * anything. This should only ever be called by the resume code, it's not meant 1703 * to be called by people willy-nilly as it will actually do the work to 1704 * throttle the task if it is setup for throttling. 1705 */ 1706 void blkcg_maybe_throttle_current(void) 1707 { 1708 struct request_queue *q = current->throttle_queue; 1709 struct cgroup_subsys_state *css; 1710 struct blkcg *blkcg; 1711 struct blkcg_gq *blkg; 1712 bool use_memdelay = current->use_memdelay; 1713 1714 if (!q) 1715 return; 1716 1717 current->throttle_queue = NULL; 1718 current->use_memdelay = false; 1719 1720 rcu_read_lock(); 1721 css = kthread_blkcg(); 1722 if (css) 1723 blkcg = css_to_blkcg(css); 1724 else 1725 blkcg = css_to_blkcg(task_css(current, io_cgrp_id)); 1726 1727 if (!blkcg) 1728 goto out; 1729 blkg = blkg_lookup(blkcg, q); 1730 if (!blkg) 1731 goto out; 1732 if (!blkg_tryget(blkg)) 1733 goto out; 1734 rcu_read_unlock(); 1735 1736 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 1737 blkg_put(blkg); 1738 blk_put_queue(q); 1739 return; 1740 out: 1741 rcu_read_unlock(); 1742 blk_put_queue(q); 1743 } 1744 1745 /** 1746 * blkcg_schedule_throttle - this task needs to check for throttling 1747 * @q: the request queue IO was submitted on 1748 * @use_memdelay: do we charge this to memory delay for PSI 1749 * 1750 * This is called by the IO controller when we know there's delay accumulated 1751 * for the blkg for this task. We do not pass the blkg because there are places 1752 * we call this that may not have that information, the swapping code for 1753 * instance will only have a request_queue at that point. This set's the 1754 * notify_resume for the task to check and see if it requires throttling before 1755 * returning to user space. 1756 * 1757 * We will only schedule once per syscall. You can call this over and over 1758 * again and it will only do the check once upon return to user space, and only 1759 * throttle once. If the task needs to be throttled again it'll need to be 1760 * re-set at the next time we see the task. 1761 */ 1762 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) 1763 { 1764 if (unlikely(current->flags & PF_KTHREAD)) 1765 return; 1766 1767 if (current->throttle_queue != q) { 1768 if (!blk_get_queue(q)) 1769 return; 1770 1771 if (current->throttle_queue) 1772 blk_put_queue(current->throttle_queue); 1773 current->throttle_queue = q; 1774 } 1775 1776 if (use_memdelay) 1777 current->use_memdelay = use_memdelay; 1778 set_notify_resume(current); 1779 } 1780 1781 /** 1782 * blkcg_add_delay - add delay to this blkg 1783 * @blkg: blkg of interest 1784 * @now: the current time in nanoseconds 1785 * @delta: how many nanoseconds of delay to add 1786 * 1787 * Charge @delta to the blkg's current delay accumulation. This is used to 1788 * throttle tasks if an IO controller thinks we need more throttling. 1789 */ 1790 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 1791 { 1792 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 1793 return; 1794 blkcg_scale_delay(blkg, now); 1795 atomic64_add(delta, &blkg->delay_nsec); 1796 } 1797 1798 /** 1799 * blkg_tryget_closest - try and get a blkg ref on the closet blkg 1800 * @bio: target bio 1801 * @css: target css 1802 * 1803 * As the failure mode here is to walk up the blkg tree, this ensure that the 1804 * blkg->parent pointers are always valid. This returns the blkg that it ended 1805 * up taking a reference on or %NULL if no reference was taken. 1806 */ 1807 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, 1808 struct cgroup_subsys_state *css) 1809 { 1810 struct blkcg_gq *blkg, *ret_blkg = NULL; 1811 1812 rcu_read_lock(); 1813 blkg = blkg_lookup_create(css_to_blkcg(css), 1814 bdev_get_queue(bio->bi_bdev)); 1815 while (blkg) { 1816 if (blkg_tryget(blkg)) { 1817 ret_blkg = blkg; 1818 break; 1819 } 1820 blkg = blkg->parent; 1821 } 1822 rcu_read_unlock(); 1823 1824 return ret_blkg; 1825 } 1826 1827 /** 1828 * bio_associate_blkg_from_css - associate a bio with a specified css 1829 * @bio: target bio 1830 * @css: target css 1831 * 1832 * Associate @bio with the blkg found by combining the css's blkg and the 1833 * request_queue of the @bio. An association failure is handled by walking up 1834 * the blkg tree. Therefore, the blkg associated can be anything between @blkg 1835 * and q->root_blkg. This situation only happens when a cgroup is dying and 1836 * then the remaining bios will spill to the closest alive blkg. 1837 * 1838 * A reference will be taken on the blkg and will be released when @bio is 1839 * freed. 1840 */ 1841 void bio_associate_blkg_from_css(struct bio *bio, 1842 struct cgroup_subsys_state *css) 1843 { 1844 if (bio->bi_blkg) 1845 blkg_put(bio->bi_blkg); 1846 1847 if (css && css->parent) { 1848 bio->bi_blkg = blkg_tryget_closest(bio, css); 1849 } else { 1850 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); 1851 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; 1852 } 1853 } 1854 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); 1855 1856 /** 1857 * bio_associate_blkg - associate a bio with a blkg 1858 * @bio: target bio 1859 * 1860 * Associate @bio with the blkg found from the bio's css and request_queue. 1861 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is 1862 * already associated, the css is reused and association redone as the 1863 * request_queue may have changed. 1864 */ 1865 void bio_associate_blkg(struct bio *bio) 1866 { 1867 struct cgroup_subsys_state *css; 1868 1869 rcu_read_lock(); 1870 1871 if (bio->bi_blkg) 1872 css = &bio_blkcg(bio)->css; 1873 else 1874 css = blkcg_css(); 1875 1876 bio_associate_blkg_from_css(bio, css); 1877 1878 rcu_read_unlock(); 1879 } 1880 EXPORT_SYMBOL_GPL(bio_associate_blkg); 1881 1882 /** 1883 * bio_clone_blkg_association - clone blkg association from src to dst bio 1884 * @dst: destination bio 1885 * @src: source bio 1886 */ 1887 void bio_clone_blkg_association(struct bio *dst, struct bio *src) 1888 { 1889 if (src->bi_blkg) { 1890 if (dst->bi_blkg) 1891 blkg_put(dst->bi_blkg); 1892 blkg_get(src->bi_blkg); 1893 dst->bi_blkg = src->bi_blkg; 1894 } 1895 } 1896 EXPORT_SYMBOL_GPL(bio_clone_blkg_association); 1897 1898 static int blk_cgroup_io_type(struct bio *bio) 1899 { 1900 if (op_is_discard(bio->bi_opf)) 1901 return BLKG_IOSTAT_DISCARD; 1902 if (op_is_write(bio->bi_opf)) 1903 return BLKG_IOSTAT_WRITE; 1904 return BLKG_IOSTAT_READ; 1905 } 1906 1907 void blk_cgroup_bio_start(struct bio *bio) 1908 { 1909 int rwd = blk_cgroup_io_type(bio), cpu; 1910 struct blkg_iostat_set *bis; 1911 unsigned long flags; 1912 1913 cpu = get_cpu(); 1914 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); 1915 flags = u64_stats_update_begin_irqsave(&bis->sync); 1916 1917 /* 1918 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split 1919 * bio and we would have already accounted for the size of the bio. 1920 */ 1921 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { 1922 bio_set_flag(bio, BIO_CGROUP_ACCT); 1923 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; 1924 } 1925 bis->cur.ios[rwd]++; 1926 1927 u64_stats_update_end_irqrestore(&bis->sync, flags); 1928 if (cgroup_subsys_on_dfl(io_cgrp_subsys)) 1929 cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); 1930 put_cpu(); 1931 } 1932 1933 static int __init blkcg_init(void) 1934 { 1935 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", 1936 WQ_MEM_RECLAIM | WQ_FREEZABLE | 1937 WQ_UNBOUND | WQ_SYSFS, 0); 1938 if (!blkcg_punt_bio_wq) 1939 return -ENOMEM; 1940 return 0; 1941 } 1942 subsys_initcall(blkcg_init); 1943 1944 module_param(blkcg_debug_stats, bool, 0644); 1945 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 1946