1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Block IO controller cgroup interface 4 * 5 * Based on ideas and code from CFQ, CFS and BFQ: 6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 7 * 8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 9 * Paolo Valente <paolo.valente@unimore.it> 10 * 11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 12 * Nauman Rafique <nauman@google.com> 13 * 14 * For policy-specific per-blkcg data: 15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 16 * Arianna Avanzini <avanzini.arianna@gmail.com> 17 */ 18 #include <linux/ioprio.h> 19 #include <linux/kdev_t.h> 20 #include <linux/module.h> 21 #include <linux/sched/signal.h> 22 #include <linux/err.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/slab.h> 26 #include <linux/genhd.h> 27 #include <linux/delay.h> 28 #include <linux/atomic.h> 29 #include <linux/ctype.h> 30 #include <linux/blk-cgroup.h> 31 #include <linux/tracehook.h> 32 #include <linux/psi.h> 33 #include "blk.h" 34 35 #define MAX_KEY_LEN 100 36 37 /* 38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 40 * policy [un]register operations including cgroup file additions / 41 * removals. Putting cgroup file registration outside blkcg_pol_mutex 42 * allows grabbing it from cgroup callbacks. 43 */ 44 static DEFINE_MUTEX(blkcg_pol_register_mutex); 45 static DEFINE_MUTEX(blkcg_pol_mutex); 46 47 struct blkcg blkcg_root; 48 EXPORT_SYMBOL_GPL(blkcg_root); 49 50 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 51 EXPORT_SYMBOL_GPL(blkcg_root_css); 52 53 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 54 55 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 56 57 static bool blkcg_debug_stats = false; 58 static struct workqueue_struct *blkcg_punt_bio_wq; 59 60 static bool blkcg_policy_enabled(struct request_queue *q, 61 const struct blkcg_policy *pol) 62 { 63 return pol && test_bit(pol->plid, q->blkcg_pols); 64 } 65 66 /** 67 * blkg_free - free a blkg 68 * @blkg: blkg to free 69 * 70 * Free @blkg which may be partially allocated. 71 */ 72 static void blkg_free(struct blkcg_gq *blkg) 73 { 74 int i; 75 76 if (!blkg) 77 return; 78 79 for (i = 0; i < BLKCG_MAX_POLS; i++) 80 if (blkg->pd[i]) 81 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 82 83 blkg_rwstat_exit(&blkg->stat_ios); 84 blkg_rwstat_exit(&blkg->stat_bytes); 85 percpu_ref_exit(&blkg->refcnt); 86 kfree(blkg); 87 } 88 89 static void __blkg_release(struct rcu_head *rcu) 90 { 91 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 92 93 WARN_ON(!bio_list_empty(&blkg->async_bios)); 94 95 /* release the blkcg and parent blkg refs this blkg has been holding */ 96 css_put(&blkg->blkcg->css); 97 if (blkg->parent) 98 blkg_put(blkg->parent); 99 100 wb_congested_put(blkg->wb_congested); 101 102 blkg_free(blkg); 103 } 104 105 /* 106 * A group is RCU protected, but having an rcu lock does not mean that one 107 * can access all the fields of blkg and assume these are valid. For 108 * example, don't try to follow throtl_data and request queue links. 109 * 110 * Having a reference to blkg under an rcu allows accesses to only values 111 * local to groups like group stats and group rate limits. 112 */ 113 static void blkg_release(struct percpu_ref *ref) 114 { 115 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); 116 117 call_rcu(&blkg->rcu_head, __blkg_release); 118 } 119 120 static void blkg_async_bio_workfn(struct work_struct *work) 121 { 122 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 123 async_bio_work); 124 struct bio_list bios = BIO_EMPTY_LIST; 125 struct bio *bio; 126 127 /* as long as there are pending bios, @blkg can't go away */ 128 spin_lock_bh(&blkg->async_bio_lock); 129 bio_list_merge(&bios, &blkg->async_bios); 130 bio_list_init(&blkg->async_bios); 131 spin_unlock_bh(&blkg->async_bio_lock); 132 133 while ((bio = bio_list_pop(&bios))) 134 submit_bio(bio); 135 } 136 137 /** 138 * blkg_alloc - allocate a blkg 139 * @blkcg: block cgroup the new blkg is associated with 140 * @q: request_queue the new blkg is associated with 141 * @gfp_mask: allocation mask to use 142 * 143 * Allocate a new blkg assocating @blkcg and @q. 144 */ 145 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, 146 gfp_t gfp_mask) 147 { 148 struct blkcg_gq *blkg; 149 int i; 150 151 /* alloc and init base part */ 152 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); 153 if (!blkg) 154 return NULL; 155 156 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) 157 goto err_free; 158 159 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || 160 blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) 161 goto err_free; 162 163 blkg->q = q; 164 INIT_LIST_HEAD(&blkg->q_node); 165 spin_lock_init(&blkg->async_bio_lock); 166 bio_list_init(&blkg->async_bios); 167 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); 168 blkg->blkcg = blkcg; 169 170 for (i = 0; i < BLKCG_MAX_POLS; i++) { 171 struct blkcg_policy *pol = blkcg_policy[i]; 172 struct blkg_policy_data *pd; 173 174 if (!blkcg_policy_enabled(q, pol)) 175 continue; 176 177 /* alloc per-policy data and attach it to blkg */ 178 pd = pol->pd_alloc_fn(gfp_mask, q->node); 179 if (!pd) 180 goto err_free; 181 182 blkg->pd[i] = pd; 183 pd->blkg = blkg; 184 pd->plid = i; 185 } 186 187 return blkg; 188 189 err_free: 190 blkg_free(blkg); 191 return NULL; 192 } 193 194 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 195 struct request_queue *q, bool update_hint) 196 { 197 struct blkcg_gq *blkg; 198 199 /* 200 * Hint didn't match. Look up from the radix tree. Note that the 201 * hint can only be updated under queue_lock as otherwise @blkg 202 * could have already been removed from blkg_tree. The caller is 203 * responsible for grabbing queue_lock if @update_hint. 204 */ 205 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 206 if (blkg && blkg->q == q) { 207 if (update_hint) { 208 lockdep_assert_held(&q->queue_lock); 209 rcu_assign_pointer(blkcg->blkg_hint, blkg); 210 } 211 return blkg; 212 } 213 214 return NULL; 215 } 216 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); 217 218 /* 219 * If @new_blkg is %NULL, this function tries to allocate a new one as 220 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 221 */ 222 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, 223 struct request_queue *q, 224 struct blkcg_gq *new_blkg) 225 { 226 struct blkcg_gq *blkg; 227 struct bdi_writeback_congested *wb_congested; 228 int i, ret; 229 230 WARN_ON_ONCE(!rcu_read_lock_held()); 231 lockdep_assert_held(&q->queue_lock); 232 233 /* request_queue is dying, do not create/recreate a blkg */ 234 if (blk_queue_dying(q)) { 235 ret = -ENODEV; 236 goto err_free_blkg; 237 } 238 239 /* blkg holds a reference to blkcg */ 240 if (!css_tryget_online(&blkcg->css)) { 241 ret = -ENODEV; 242 goto err_free_blkg; 243 } 244 245 wb_congested = wb_congested_get_create(q->backing_dev_info, 246 blkcg->css.id, 247 GFP_NOWAIT | __GFP_NOWARN); 248 if (!wb_congested) { 249 ret = -ENOMEM; 250 goto err_put_css; 251 } 252 253 /* allocate */ 254 if (!new_blkg) { 255 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); 256 if (unlikely(!new_blkg)) { 257 ret = -ENOMEM; 258 goto err_put_congested; 259 } 260 } 261 blkg = new_blkg; 262 blkg->wb_congested = wb_congested; 263 264 /* link parent */ 265 if (blkcg_parent(blkcg)) { 266 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); 267 if (WARN_ON_ONCE(!blkg->parent)) { 268 ret = -ENODEV; 269 goto err_put_congested; 270 } 271 blkg_get(blkg->parent); 272 } 273 274 /* invoke per-policy init */ 275 for (i = 0; i < BLKCG_MAX_POLS; i++) { 276 struct blkcg_policy *pol = blkcg_policy[i]; 277 278 if (blkg->pd[i] && pol->pd_init_fn) 279 pol->pd_init_fn(blkg->pd[i]); 280 } 281 282 /* insert */ 283 spin_lock(&blkcg->lock); 284 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); 285 if (likely(!ret)) { 286 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 287 list_add(&blkg->q_node, &q->blkg_list); 288 289 for (i = 0; i < BLKCG_MAX_POLS; i++) { 290 struct blkcg_policy *pol = blkcg_policy[i]; 291 292 if (blkg->pd[i] && pol->pd_online_fn) 293 pol->pd_online_fn(blkg->pd[i]); 294 } 295 } 296 blkg->online = true; 297 spin_unlock(&blkcg->lock); 298 299 if (!ret) 300 return blkg; 301 302 /* @blkg failed fully initialized, use the usual release path */ 303 blkg_put(blkg); 304 return ERR_PTR(ret); 305 306 err_put_congested: 307 wb_congested_put(wb_congested); 308 err_put_css: 309 css_put(&blkcg->css); 310 err_free_blkg: 311 blkg_free(new_blkg); 312 return ERR_PTR(ret); 313 } 314 315 /** 316 * __blkg_lookup_create - lookup blkg, try to create one if not there 317 * @blkcg: blkcg of interest 318 * @q: request_queue of interest 319 * 320 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to 321 * create one. blkg creation is performed recursively from blkcg_root such 322 * that all non-root blkg's have access to the parent blkg. This function 323 * should be called under RCU read lock and @q->queue_lock. 324 * 325 * Returns the blkg or the closest blkg if blkg_create() fails as it walks 326 * down from root. 327 */ 328 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, 329 struct request_queue *q) 330 { 331 struct blkcg_gq *blkg; 332 333 WARN_ON_ONCE(!rcu_read_lock_held()); 334 lockdep_assert_held(&q->queue_lock); 335 336 blkg = __blkg_lookup(blkcg, q, true); 337 if (blkg) 338 return blkg; 339 340 /* 341 * Create blkgs walking down from blkcg_root to @blkcg, so that all 342 * non-root blkgs have access to their parents. Returns the closest 343 * blkg to the intended blkg should blkg_create() fail. 344 */ 345 while (true) { 346 struct blkcg *pos = blkcg; 347 struct blkcg *parent = blkcg_parent(blkcg); 348 struct blkcg_gq *ret_blkg = q->root_blkg; 349 350 while (parent) { 351 blkg = __blkg_lookup(parent, q, false); 352 if (blkg) { 353 /* remember closest blkg */ 354 ret_blkg = blkg; 355 break; 356 } 357 pos = parent; 358 parent = blkcg_parent(parent); 359 } 360 361 blkg = blkg_create(pos, q, NULL); 362 if (IS_ERR(blkg)) 363 return ret_blkg; 364 if (pos == blkcg) 365 return blkg; 366 } 367 } 368 369 /** 370 * blkg_lookup_create - find or create a blkg 371 * @blkcg: target block cgroup 372 * @q: target request_queue 373 * 374 * This looks up or creates the blkg representing the unique pair 375 * of the blkcg and the request_queue. 376 */ 377 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 378 struct request_queue *q) 379 { 380 struct blkcg_gq *blkg = blkg_lookup(blkcg, q); 381 382 if (unlikely(!blkg)) { 383 unsigned long flags; 384 385 spin_lock_irqsave(&q->queue_lock, flags); 386 blkg = __blkg_lookup_create(blkcg, q); 387 spin_unlock_irqrestore(&q->queue_lock, flags); 388 } 389 390 return blkg; 391 } 392 393 static void blkg_destroy(struct blkcg_gq *blkg) 394 { 395 struct blkcg *blkcg = blkg->blkcg; 396 struct blkcg_gq *parent = blkg->parent; 397 int i; 398 399 lockdep_assert_held(&blkg->q->queue_lock); 400 lockdep_assert_held(&blkcg->lock); 401 402 /* Something wrong if we are trying to remove same group twice */ 403 WARN_ON_ONCE(list_empty(&blkg->q_node)); 404 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); 405 406 for (i = 0; i < BLKCG_MAX_POLS; i++) { 407 struct blkcg_policy *pol = blkcg_policy[i]; 408 409 if (blkg->pd[i] && pol->pd_offline_fn) 410 pol->pd_offline_fn(blkg->pd[i]); 411 } 412 413 if (parent) { 414 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); 415 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); 416 } 417 418 blkg->online = false; 419 420 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 421 list_del_init(&blkg->q_node); 422 hlist_del_init_rcu(&blkg->blkcg_node); 423 424 /* 425 * Both setting lookup hint to and clearing it from @blkg are done 426 * under queue_lock. If it's not pointing to @blkg now, it never 427 * will. Hint assignment itself can race safely. 428 */ 429 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 430 rcu_assign_pointer(blkcg->blkg_hint, NULL); 431 432 /* 433 * Put the reference taken at the time of creation so that when all 434 * queues are gone, group can be destroyed. 435 */ 436 percpu_ref_kill(&blkg->refcnt); 437 } 438 439 /** 440 * blkg_destroy_all - destroy all blkgs associated with a request_queue 441 * @q: request_queue of interest 442 * 443 * Destroy all blkgs associated with @q. 444 */ 445 static void blkg_destroy_all(struct request_queue *q) 446 { 447 struct blkcg_gq *blkg, *n; 448 449 spin_lock_irq(&q->queue_lock); 450 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 451 struct blkcg *blkcg = blkg->blkcg; 452 453 spin_lock(&blkcg->lock); 454 blkg_destroy(blkg); 455 spin_unlock(&blkcg->lock); 456 } 457 458 q->root_blkg = NULL; 459 spin_unlock_irq(&q->queue_lock); 460 } 461 462 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 463 struct cftype *cftype, u64 val) 464 { 465 struct blkcg *blkcg = css_to_blkcg(css); 466 struct blkcg_gq *blkg; 467 int i; 468 469 mutex_lock(&blkcg_pol_mutex); 470 spin_lock_irq(&blkcg->lock); 471 472 /* 473 * Note that stat reset is racy - it doesn't synchronize against 474 * stat updates. This is a debug feature which shouldn't exist 475 * anyway. If you get hit by a race, retry. 476 */ 477 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 478 blkg_rwstat_reset(&blkg->stat_bytes); 479 blkg_rwstat_reset(&blkg->stat_ios); 480 481 for (i = 0; i < BLKCG_MAX_POLS; i++) { 482 struct blkcg_policy *pol = blkcg_policy[i]; 483 484 if (blkg->pd[i] && pol->pd_reset_stats_fn) 485 pol->pd_reset_stats_fn(blkg->pd[i]); 486 } 487 } 488 489 spin_unlock_irq(&blkcg->lock); 490 mutex_unlock(&blkcg_pol_mutex); 491 return 0; 492 } 493 494 const char *blkg_dev_name(struct blkcg_gq *blkg) 495 { 496 /* some drivers (floppy) instantiate a queue w/o disk registered */ 497 if (blkg->q->backing_dev_info->dev) 498 return dev_name(blkg->q->backing_dev_info->dev); 499 return NULL; 500 } 501 502 /** 503 * blkcg_print_blkgs - helper for printing per-blkg data 504 * @sf: seq_file to print to 505 * @blkcg: blkcg of interest 506 * @prfill: fill function to print out a blkg 507 * @pol: policy in question 508 * @data: data to be passed to @prfill 509 * @show_total: to print out sum of prfill return values or not 510 * 511 * This function invokes @prfill on each blkg of @blkcg if pd for the 512 * policy specified by @pol exists. @prfill is invoked with @sf, the 513 * policy data and @data and the matching queue lock held. If @show_total 514 * is %true, the sum of the return values from @prfill is printed with 515 * "Total" label at the end. 516 * 517 * This is to be used to construct print functions for 518 * cftype->read_seq_string method. 519 */ 520 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 521 u64 (*prfill)(struct seq_file *, 522 struct blkg_policy_data *, int), 523 const struct blkcg_policy *pol, int data, 524 bool show_total) 525 { 526 struct blkcg_gq *blkg; 527 u64 total = 0; 528 529 rcu_read_lock(); 530 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 531 spin_lock_irq(&blkg->q->queue_lock); 532 if (blkcg_policy_enabled(blkg->q, pol)) 533 total += prfill(sf, blkg->pd[pol->plid], data); 534 spin_unlock_irq(&blkg->q->queue_lock); 535 } 536 rcu_read_unlock(); 537 538 if (show_total) 539 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 540 } 541 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 542 543 /** 544 * __blkg_prfill_u64 - prfill helper for a single u64 value 545 * @sf: seq_file to print to 546 * @pd: policy private data of interest 547 * @v: value to print 548 * 549 * Print @v to @sf for the device assocaited with @pd. 550 */ 551 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 552 { 553 const char *dname = blkg_dev_name(pd->blkg); 554 555 if (!dname) 556 return 0; 557 558 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 559 return v; 560 } 561 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 562 563 /** 564 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat 565 * @sf: seq_file to print to 566 * @pd: policy private data of interest 567 * @rwstat: rwstat to print 568 * 569 * Print @rwstat to @sf for the device assocaited with @pd. 570 */ 571 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 572 const struct blkg_rwstat_sample *rwstat) 573 { 574 static const char *rwstr[] = { 575 [BLKG_RWSTAT_READ] = "Read", 576 [BLKG_RWSTAT_WRITE] = "Write", 577 [BLKG_RWSTAT_SYNC] = "Sync", 578 [BLKG_RWSTAT_ASYNC] = "Async", 579 [BLKG_RWSTAT_DISCARD] = "Discard", 580 }; 581 const char *dname = blkg_dev_name(pd->blkg); 582 u64 v; 583 int i; 584 585 if (!dname) 586 return 0; 587 588 for (i = 0; i < BLKG_RWSTAT_NR; i++) 589 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], 590 rwstat->cnt[i]); 591 592 v = rwstat->cnt[BLKG_RWSTAT_READ] + 593 rwstat->cnt[BLKG_RWSTAT_WRITE] + 594 rwstat->cnt[BLKG_RWSTAT_DISCARD]; 595 seq_printf(sf, "%s Total %llu\n", dname, v); 596 return v; 597 } 598 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); 599 600 /** 601 * blkg_prfill_rwstat - prfill callback for blkg_rwstat 602 * @sf: seq_file to print to 603 * @pd: policy private data of interest 604 * @off: offset to the blkg_rwstat in @pd 605 * 606 * prfill callback for printing a blkg_rwstat. 607 */ 608 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 609 int off) 610 { 611 struct blkg_rwstat_sample rwstat = { }; 612 613 blkg_rwstat_read((void *)pd + off, &rwstat); 614 return __blkg_prfill_rwstat(sf, pd, &rwstat); 615 } 616 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); 617 618 static u64 blkg_prfill_rwstat_field(struct seq_file *sf, 619 struct blkg_policy_data *pd, int off) 620 { 621 struct blkg_rwstat_sample rwstat = { }; 622 623 blkg_rwstat_read((void *)pd->blkg + off, &rwstat); 624 return __blkg_prfill_rwstat(sf, pd, &rwstat); 625 } 626 627 /** 628 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes 629 * @sf: seq_file to print to 630 * @v: unused 631 * 632 * To be used as cftype->seq_show to print blkg->stat_bytes. 633 * cftype->private must be set to the blkcg_policy. 634 */ 635 int blkg_print_stat_bytes(struct seq_file *sf, void *v) 636 { 637 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 638 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, 639 offsetof(struct blkcg_gq, stat_bytes), true); 640 return 0; 641 } 642 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes); 643 644 /** 645 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios 646 * @sf: seq_file to print to 647 * @v: unused 648 * 649 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private 650 * must be set to the blkcg_policy. 651 */ 652 int blkg_print_stat_ios(struct seq_file *sf, void *v) 653 { 654 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 655 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, 656 offsetof(struct blkcg_gq, stat_ios), true); 657 return 0; 658 } 659 EXPORT_SYMBOL_GPL(blkg_print_stat_ios); 660 661 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf, 662 struct blkg_policy_data *pd, 663 int off) 664 { 665 struct blkg_rwstat_sample rwstat; 666 667 blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat); 668 return __blkg_prfill_rwstat(sf, pd, &rwstat); 669 } 670 671 /** 672 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes 673 * @sf: seq_file to print to 674 * @v: unused 675 */ 676 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v) 677 { 678 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 679 blkg_prfill_rwstat_field_recursive, 680 (void *)seq_cft(sf)->private, 681 offsetof(struct blkcg_gq, stat_bytes), true); 682 return 0; 683 } 684 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive); 685 686 /** 687 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios 688 * @sf: seq_file to print to 689 * @v: unused 690 */ 691 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v) 692 { 693 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 694 blkg_prfill_rwstat_field_recursive, 695 (void *)seq_cft(sf)->private, 696 offsetof(struct blkcg_gq, stat_ios), true); 697 return 0; 698 } 699 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive); 700 701 /** 702 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat 703 * @blkg: blkg of interest 704 * @pol: blkcg_policy which contains the blkg_rwstat 705 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg 706 * @sum: blkg_rwstat_sample structure containing the results 707 * 708 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its 709 * online descendants and their aux counts. The caller must be holding the 710 * queue lock for online tests. 711 * 712 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it 713 * is at @off bytes into @blkg's blkg_policy_data of the policy. 714 */ 715 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, 716 int off, struct blkg_rwstat_sample *sum) 717 { 718 struct blkcg_gq *pos_blkg; 719 struct cgroup_subsys_state *pos_css; 720 unsigned int i; 721 722 lockdep_assert_held(&blkg->q->queue_lock); 723 724 rcu_read_lock(); 725 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { 726 struct blkg_rwstat *rwstat; 727 728 if (!pos_blkg->online) 729 continue; 730 731 if (pol) 732 rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off; 733 else 734 rwstat = (void *)pos_blkg + off; 735 736 for (i = 0; i < BLKG_RWSTAT_NR; i++) 737 sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i); 738 } 739 rcu_read_unlock(); 740 } 741 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); 742 743 /* Performs queue bypass and policy enabled checks then looks up blkg. */ 744 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, 745 const struct blkcg_policy *pol, 746 struct request_queue *q) 747 { 748 WARN_ON_ONCE(!rcu_read_lock_held()); 749 lockdep_assert_held(&q->queue_lock); 750 751 if (!blkcg_policy_enabled(q, pol)) 752 return ERR_PTR(-EOPNOTSUPP); 753 return __blkg_lookup(blkcg, q, true /* update_hint */); 754 } 755 756 /** 757 * blkg_conf_prep - parse and prepare for per-blkg config update 758 * @blkcg: target block cgroup 759 * @pol: target policy 760 * @input: input string 761 * @ctx: blkg_conf_ctx to be filled 762 * 763 * Parse per-blkg config update from @input and initialize @ctx with the 764 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the 765 * part of @input following MAJ:MIN. This function returns with RCU read 766 * lock and queue lock held and must be paired with blkg_conf_finish(). 767 */ 768 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 769 char *input, struct blkg_conf_ctx *ctx) 770 __acquires(rcu) __acquires(&disk->queue->queue_lock) 771 { 772 struct gendisk *disk; 773 struct request_queue *q; 774 struct blkcg_gq *blkg; 775 unsigned int major, minor; 776 int key_len, part, ret; 777 char *body; 778 779 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 780 return -EINVAL; 781 782 body = input + key_len; 783 if (!isspace(*body)) 784 return -EINVAL; 785 body = skip_spaces(body); 786 787 disk = get_gendisk(MKDEV(major, minor), &part); 788 if (!disk) 789 return -ENODEV; 790 if (part) { 791 ret = -ENODEV; 792 goto fail; 793 } 794 795 q = disk->queue; 796 797 rcu_read_lock(); 798 spin_lock_irq(&q->queue_lock); 799 800 blkg = blkg_lookup_check(blkcg, pol, q); 801 if (IS_ERR(blkg)) { 802 ret = PTR_ERR(blkg); 803 goto fail_unlock; 804 } 805 806 if (blkg) 807 goto success; 808 809 /* 810 * Create blkgs walking down from blkcg_root to @blkcg, so that all 811 * non-root blkgs have access to their parents. 812 */ 813 while (true) { 814 struct blkcg *pos = blkcg; 815 struct blkcg *parent; 816 struct blkcg_gq *new_blkg; 817 818 parent = blkcg_parent(blkcg); 819 while (parent && !__blkg_lookup(parent, q, false)) { 820 pos = parent; 821 parent = blkcg_parent(parent); 822 } 823 824 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 825 spin_unlock_irq(&q->queue_lock); 826 rcu_read_unlock(); 827 828 new_blkg = blkg_alloc(pos, q, GFP_KERNEL); 829 if (unlikely(!new_blkg)) { 830 ret = -ENOMEM; 831 goto fail; 832 } 833 834 rcu_read_lock(); 835 spin_lock_irq(&q->queue_lock); 836 837 blkg = blkg_lookup_check(pos, pol, q); 838 if (IS_ERR(blkg)) { 839 ret = PTR_ERR(blkg); 840 goto fail_unlock; 841 } 842 843 if (blkg) { 844 blkg_free(new_blkg); 845 } else { 846 blkg = blkg_create(pos, q, new_blkg); 847 if (IS_ERR(blkg)) { 848 ret = PTR_ERR(blkg); 849 goto fail_unlock; 850 } 851 } 852 853 if (pos == blkcg) 854 goto success; 855 } 856 success: 857 ctx->disk = disk; 858 ctx->blkg = blkg; 859 ctx->body = body; 860 return 0; 861 862 fail_unlock: 863 spin_unlock_irq(&q->queue_lock); 864 rcu_read_unlock(); 865 fail: 866 put_disk_and_module(disk); 867 /* 868 * If queue was bypassing, we should retry. Do so after a 869 * short msleep(). It isn't strictly necessary but queue 870 * can be bypassing for some time and it's always nice to 871 * avoid busy looping. 872 */ 873 if (ret == -EBUSY) { 874 msleep(10); 875 ret = restart_syscall(); 876 } 877 return ret; 878 } 879 880 /** 881 * blkg_conf_finish - finish up per-blkg config update 882 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() 883 * 884 * Finish up after per-blkg config update. This function must be paired 885 * with blkg_conf_prep(). 886 */ 887 void blkg_conf_finish(struct blkg_conf_ctx *ctx) 888 __releases(&ctx->disk->queue->queue_lock) __releases(rcu) 889 { 890 spin_unlock_irq(&ctx->disk->queue->queue_lock); 891 rcu_read_unlock(); 892 put_disk_and_module(ctx->disk); 893 } 894 895 static int blkcg_print_stat(struct seq_file *sf, void *v) 896 { 897 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 898 struct blkcg_gq *blkg; 899 900 rcu_read_lock(); 901 902 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 903 const char *dname; 904 char *buf; 905 struct blkg_rwstat_sample rwstat; 906 u64 rbytes, wbytes, rios, wios, dbytes, dios; 907 size_t size = seq_get_buf(sf, &buf), off = 0; 908 int i; 909 bool has_stats = false; 910 911 dname = blkg_dev_name(blkg); 912 if (!dname) 913 continue; 914 915 /* 916 * Hooray string manipulation, count is the size written NOT 917 * INCLUDING THE \0, so size is now count+1 less than what we 918 * had before, but we want to start writing the next bit from 919 * the \0 so we only add count to buf. 920 */ 921 off += scnprintf(buf+off, size-off, "%s ", dname); 922 923 spin_lock_irq(&blkg->q->queue_lock); 924 925 blkg_rwstat_recursive_sum(blkg, NULL, 926 offsetof(struct blkcg_gq, stat_bytes), &rwstat); 927 rbytes = rwstat.cnt[BLKG_RWSTAT_READ]; 928 wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE]; 929 dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD]; 930 931 blkg_rwstat_recursive_sum(blkg, NULL, 932 offsetof(struct blkcg_gq, stat_ios), &rwstat); 933 rios = rwstat.cnt[BLKG_RWSTAT_READ]; 934 wios = rwstat.cnt[BLKG_RWSTAT_WRITE]; 935 dios = rwstat.cnt[BLKG_RWSTAT_DISCARD]; 936 937 spin_unlock_irq(&blkg->q->queue_lock); 938 939 if (rbytes || wbytes || rios || wios) { 940 has_stats = true; 941 off += scnprintf(buf+off, size-off, 942 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 943 rbytes, wbytes, rios, wios, 944 dbytes, dios); 945 } 946 947 if (!blkcg_debug_stats) 948 goto next; 949 950 if (atomic_read(&blkg->use_delay)) { 951 has_stats = true; 952 off += scnprintf(buf+off, size-off, 953 " use_delay=%d delay_nsec=%llu", 954 atomic_read(&blkg->use_delay), 955 (unsigned long long)atomic64_read(&blkg->delay_nsec)); 956 } 957 958 for (i = 0; i < BLKCG_MAX_POLS; i++) { 959 struct blkcg_policy *pol = blkcg_policy[i]; 960 size_t written; 961 962 if (!blkg->pd[i] || !pol->pd_stat_fn) 963 continue; 964 965 written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off); 966 if (written) 967 has_stats = true; 968 off += written; 969 } 970 next: 971 if (has_stats) { 972 if (off < size - 1) { 973 off += scnprintf(buf+off, size-off, "\n"); 974 seq_commit(sf, off); 975 } else { 976 seq_commit(sf, -1); 977 } 978 } 979 } 980 981 rcu_read_unlock(); 982 return 0; 983 } 984 985 static struct cftype blkcg_files[] = { 986 { 987 .name = "stat", 988 .flags = CFTYPE_NOT_ON_ROOT, 989 .seq_show = blkcg_print_stat, 990 }, 991 { } /* terminate */ 992 }; 993 994 static struct cftype blkcg_legacy_files[] = { 995 { 996 .name = "reset_stats", 997 .write_u64 = blkcg_reset_stats, 998 }, 999 { } /* terminate */ 1000 }; 1001 1002 /* 1003 * blkcg destruction is a three-stage process. 1004 * 1005 * 1. Destruction starts. The blkcg_css_offline() callback is invoked 1006 * which offlines writeback. Here we tie the next stage of blkg destruction 1007 * to the completion of writeback associated with the blkcg. This lets us 1008 * avoid punting potentially large amounts of outstanding writeback to root 1009 * while maintaining any ongoing policies. The next stage is triggered when 1010 * the nr_cgwbs count goes to zero. 1011 * 1012 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 1013 * and handles the destruction of blkgs. Here the css reference held by 1014 * the blkg is put back eventually allowing blkcg_css_free() to be called. 1015 * This work may occur in cgwb_release_workfn() on the cgwb_release 1016 * workqueue. Any submitted ios that fail to get the blkg ref will be 1017 * punted to the root_blkg. 1018 * 1019 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 1020 * This finally frees the blkcg. 1021 */ 1022 1023 /** 1024 * blkcg_css_offline - cgroup css_offline callback 1025 * @css: css of interest 1026 * 1027 * This function is called when @css is about to go away. Here the cgwbs are 1028 * offlined first and only once writeback associated with the blkcg has 1029 * finished do we start step 2 (see above). 1030 */ 1031 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1032 { 1033 struct blkcg *blkcg = css_to_blkcg(css); 1034 1035 /* this prevents anyone from attaching or migrating to this blkcg */ 1036 wb_blkcg_offline(blkcg); 1037 1038 /* put the base cgwb reference allowing step 2 to be triggered */ 1039 blkcg_cgwb_put(blkcg); 1040 } 1041 1042 /** 1043 * blkcg_destroy_blkgs - responsible for shooting down blkgs 1044 * @blkcg: blkcg of interest 1045 * 1046 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1047 * is nested inside q lock, this function performs reverse double lock dancing. 1048 * Destroying the blkgs releases the reference held on the blkcg's css allowing 1049 * blkcg_css_free to eventually be called. 1050 * 1051 * This is the blkcg counterpart of ioc_release_fn(). 1052 */ 1053 void blkcg_destroy_blkgs(struct blkcg *blkcg) 1054 { 1055 spin_lock_irq(&blkcg->lock); 1056 1057 while (!hlist_empty(&blkcg->blkg_list)) { 1058 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1059 struct blkcg_gq, blkcg_node); 1060 struct request_queue *q = blkg->q; 1061 1062 if (spin_trylock(&q->queue_lock)) { 1063 blkg_destroy(blkg); 1064 spin_unlock(&q->queue_lock); 1065 } else { 1066 spin_unlock_irq(&blkcg->lock); 1067 cpu_relax(); 1068 spin_lock_irq(&blkcg->lock); 1069 } 1070 } 1071 1072 spin_unlock_irq(&blkcg->lock); 1073 } 1074 1075 static void blkcg_css_free(struct cgroup_subsys_state *css) 1076 { 1077 struct blkcg *blkcg = css_to_blkcg(css); 1078 int i; 1079 1080 mutex_lock(&blkcg_pol_mutex); 1081 1082 list_del(&blkcg->all_blkcgs_node); 1083 1084 for (i = 0; i < BLKCG_MAX_POLS; i++) 1085 if (blkcg->cpd[i]) 1086 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1087 1088 mutex_unlock(&blkcg_pol_mutex); 1089 1090 kfree(blkcg); 1091 } 1092 1093 static struct cgroup_subsys_state * 1094 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1095 { 1096 struct blkcg *blkcg; 1097 struct cgroup_subsys_state *ret; 1098 int i; 1099 1100 mutex_lock(&blkcg_pol_mutex); 1101 1102 if (!parent_css) { 1103 blkcg = &blkcg_root; 1104 } else { 1105 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1106 if (!blkcg) { 1107 ret = ERR_PTR(-ENOMEM); 1108 goto unlock; 1109 } 1110 } 1111 1112 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1113 struct blkcg_policy *pol = blkcg_policy[i]; 1114 struct blkcg_policy_data *cpd; 1115 1116 /* 1117 * If the policy hasn't been attached yet, wait for it 1118 * to be attached before doing anything else. Otherwise, 1119 * check if the policy requires any specific per-cgroup 1120 * data: if it does, allocate and initialize it. 1121 */ 1122 if (!pol || !pol->cpd_alloc_fn) 1123 continue; 1124 1125 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1126 if (!cpd) { 1127 ret = ERR_PTR(-ENOMEM); 1128 goto free_pd_blkcg; 1129 } 1130 blkcg->cpd[i] = cpd; 1131 cpd->blkcg = blkcg; 1132 cpd->plid = i; 1133 if (pol->cpd_init_fn) 1134 pol->cpd_init_fn(cpd); 1135 } 1136 1137 spin_lock_init(&blkcg->lock); 1138 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); 1139 INIT_HLIST_HEAD(&blkcg->blkg_list); 1140 #ifdef CONFIG_CGROUP_WRITEBACK 1141 INIT_LIST_HEAD(&blkcg->cgwb_list); 1142 refcount_set(&blkcg->cgwb_refcnt, 1); 1143 #endif 1144 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1145 1146 mutex_unlock(&blkcg_pol_mutex); 1147 return &blkcg->css; 1148 1149 free_pd_blkcg: 1150 for (i--; i >= 0; i--) 1151 if (blkcg->cpd[i]) 1152 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1153 1154 if (blkcg != &blkcg_root) 1155 kfree(blkcg); 1156 unlock: 1157 mutex_unlock(&blkcg_pol_mutex); 1158 return ret; 1159 } 1160 1161 /** 1162 * blkcg_init_queue - initialize blkcg part of request queue 1163 * @q: request_queue to initialize 1164 * 1165 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg 1166 * part of new request_queue @q. 1167 * 1168 * RETURNS: 1169 * 0 on success, -errno on failure. 1170 */ 1171 int blkcg_init_queue(struct request_queue *q) 1172 { 1173 struct blkcg_gq *new_blkg, *blkg; 1174 bool preloaded; 1175 int ret; 1176 1177 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); 1178 if (!new_blkg) 1179 return -ENOMEM; 1180 1181 preloaded = !radix_tree_preload(GFP_KERNEL); 1182 1183 /* Make sure the root blkg exists. */ 1184 rcu_read_lock(); 1185 spin_lock_irq(&q->queue_lock); 1186 blkg = blkg_create(&blkcg_root, q, new_blkg); 1187 if (IS_ERR(blkg)) 1188 goto err_unlock; 1189 q->root_blkg = blkg; 1190 spin_unlock_irq(&q->queue_lock); 1191 rcu_read_unlock(); 1192 1193 if (preloaded) 1194 radix_tree_preload_end(); 1195 1196 ret = blk_iolatency_init(q); 1197 if (ret) 1198 goto err_destroy_all; 1199 1200 ret = blk_throtl_init(q); 1201 if (ret) 1202 goto err_destroy_all; 1203 return 0; 1204 1205 err_destroy_all: 1206 blkg_destroy_all(q); 1207 return ret; 1208 err_unlock: 1209 spin_unlock_irq(&q->queue_lock); 1210 rcu_read_unlock(); 1211 if (preloaded) 1212 radix_tree_preload_end(); 1213 return PTR_ERR(blkg); 1214 } 1215 1216 /** 1217 * blkcg_drain_queue - drain blkcg part of request_queue 1218 * @q: request_queue to drain 1219 * 1220 * Called from blk_drain_queue(). Responsible for draining blkcg part. 1221 */ 1222 void blkcg_drain_queue(struct request_queue *q) 1223 { 1224 lockdep_assert_held(&q->queue_lock); 1225 1226 /* 1227 * @q could be exiting and already have destroyed all blkgs as 1228 * indicated by NULL root_blkg. If so, don't confuse policies. 1229 */ 1230 if (!q->root_blkg) 1231 return; 1232 1233 blk_throtl_drain(q); 1234 } 1235 1236 /** 1237 * blkcg_exit_queue - exit and release blkcg part of request_queue 1238 * @q: request_queue being released 1239 * 1240 * Called from blk_exit_queue(). Responsible for exiting blkcg part. 1241 */ 1242 void blkcg_exit_queue(struct request_queue *q) 1243 { 1244 blkg_destroy_all(q); 1245 blk_throtl_exit(q); 1246 } 1247 1248 /* 1249 * We cannot support shared io contexts, as we have no mean to support 1250 * two tasks with the same ioc in two different groups without major rework 1251 * of the main cic data structures. For now we allow a task to change 1252 * its cgroup only if it's the only owner of its ioc. 1253 */ 1254 static int blkcg_can_attach(struct cgroup_taskset *tset) 1255 { 1256 struct task_struct *task; 1257 struct cgroup_subsys_state *dst_css; 1258 struct io_context *ioc; 1259 int ret = 0; 1260 1261 /* task_lock() is needed to avoid races with exit_io_context() */ 1262 cgroup_taskset_for_each(task, dst_css, tset) { 1263 task_lock(task); 1264 ioc = task->io_context; 1265 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 1266 ret = -EINVAL; 1267 task_unlock(task); 1268 if (ret) 1269 break; 1270 } 1271 return ret; 1272 } 1273 1274 static void blkcg_bind(struct cgroup_subsys_state *root_css) 1275 { 1276 int i; 1277 1278 mutex_lock(&blkcg_pol_mutex); 1279 1280 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1281 struct blkcg_policy *pol = blkcg_policy[i]; 1282 struct blkcg *blkcg; 1283 1284 if (!pol || !pol->cpd_bind_fn) 1285 continue; 1286 1287 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) 1288 if (blkcg->cpd[pol->plid]) 1289 pol->cpd_bind_fn(blkcg->cpd[pol->plid]); 1290 } 1291 mutex_unlock(&blkcg_pol_mutex); 1292 } 1293 1294 static void blkcg_exit(struct task_struct *tsk) 1295 { 1296 if (tsk->throttle_queue) 1297 blk_put_queue(tsk->throttle_queue); 1298 tsk->throttle_queue = NULL; 1299 } 1300 1301 struct cgroup_subsys io_cgrp_subsys = { 1302 .css_alloc = blkcg_css_alloc, 1303 .css_offline = blkcg_css_offline, 1304 .css_free = blkcg_css_free, 1305 .can_attach = blkcg_can_attach, 1306 .bind = blkcg_bind, 1307 .dfl_cftypes = blkcg_files, 1308 .legacy_cftypes = blkcg_legacy_files, 1309 .legacy_name = "blkio", 1310 .exit = blkcg_exit, 1311 #ifdef CONFIG_MEMCG 1312 /* 1313 * This ensures that, if available, memcg is automatically enabled 1314 * together on the default hierarchy so that the owner cgroup can 1315 * be retrieved from writeback pages. 1316 */ 1317 .depends_on = 1 << memory_cgrp_id, 1318 #endif 1319 }; 1320 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1321 1322 /** 1323 * blkcg_activate_policy - activate a blkcg policy on a request_queue 1324 * @q: request_queue of interest 1325 * @pol: blkcg policy to activate 1326 * 1327 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through 1328 * bypass mode to populate its blkgs with policy_data for @pol. 1329 * 1330 * Activation happens with @q bypassed, so nobody would be accessing blkgs 1331 * from IO path. Update of each blkg is protected by both queue and blkcg 1332 * locks so that holding either lock and testing blkcg_policy_enabled() is 1333 * always enough for dereferencing policy data. 1334 * 1335 * The caller is responsible for synchronizing [de]activations and policy 1336 * [un]registerations. Returns 0 on success, -errno on failure. 1337 */ 1338 int blkcg_activate_policy(struct request_queue *q, 1339 const struct blkcg_policy *pol) 1340 { 1341 struct blkg_policy_data *pd_prealloc = NULL; 1342 struct blkcg_gq *blkg; 1343 int ret; 1344 1345 if (blkcg_policy_enabled(q, pol)) 1346 return 0; 1347 1348 if (queue_is_mq(q)) 1349 blk_mq_freeze_queue(q); 1350 pd_prealloc: 1351 if (!pd_prealloc) { 1352 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node); 1353 if (!pd_prealloc) { 1354 ret = -ENOMEM; 1355 goto out_bypass_end; 1356 } 1357 } 1358 1359 spin_lock_irq(&q->queue_lock); 1360 1361 /* blkg_list is pushed at the head, reverse walk to init parents first */ 1362 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1363 struct blkg_policy_data *pd; 1364 1365 if (blkg->pd[pol->plid]) 1366 continue; 1367 1368 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node); 1369 if (!pd) 1370 swap(pd, pd_prealloc); 1371 if (!pd) { 1372 spin_unlock_irq(&q->queue_lock); 1373 goto pd_prealloc; 1374 } 1375 1376 blkg->pd[pol->plid] = pd; 1377 pd->blkg = blkg; 1378 pd->plid = pol->plid; 1379 if (pol->pd_init_fn) 1380 pol->pd_init_fn(pd); 1381 } 1382 1383 __set_bit(pol->plid, q->blkcg_pols); 1384 ret = 0; 1385 1386 spin_unlock_irq(&q->queue_lock); 1387 out_bypass_end: 1388 if (queue_is_mq(q)) 1389 blk_mq_unfreeze_queue(q); 1390 if (pd_prealloc) 1391 pol->pd_free_fn(pd_prealloc); 1392 return ret; 1393 } 1394 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1395 1396 /** 1397 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue 1398 * @q: request_queue of interest 1399 * @pol: blkcg policy to deactivate 1400 * 1401 * Deactivate @pol on @q. Follows the same synchronization rules as 1402 * blkcg_activate_policy(). 1403 */ 1404 void blkcg_deactivate_policy(struct request_queue *q, 1405 const struct blkcg_policy *pol) 1406 { 1407 struct blkcg_gq *blkg; 1408 1409 if (!blkcg_policy_enabled(q, pol)) 1410 return; 1411 1412 if (queue_is_mq(q)) 1413 blk_mq_freeze_queue(q); 1414 1415 spin_lock_irq(&q->queue_lock); 1416 1417 __clear_bit(pol->plid, q->blkcg_pols); 1418 1419 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1420 if (blkg->pd[pol->plid]) { 1421 if (pol->pd_offline_fn) 1422 pol->pd_offline_fn(blkg->pd[pol->plid]); 1423 pol->pd_free_fn(blkg->pd[pol->plid]); 1424 blkg->pd[pol->plid] = NULL; 1425 } 1426 } 1427 1428 spin_unlock_irq(&q->queue_lock); 1429 1430 if (queue_is_mq(q)) 1431 blk_mq_unfreeze_queue(q); 1432 } 1433 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1434 1435 /** 1436 * blkcg_policy_register - register a blkcg policy 1437 * @pol: blkcg policy to register 1438 * 1439 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1440 * successful registration. Returns 0 on success and -errno on failure. 1441 */ 1442 int blkcg_policy_register(struct blkcg_policy *pol) 1443 { 1444 struct blkcg *blkcg; 1445 int i, ret; 1446 1447 mutex_lock(&blkcg_pol_register_mutex); 1448 mutex_lock(&blkcg_pol_mutex); 1449 1450 /* find an empty slot */ 1451 ret = -ENOSPC; 1452 for (i = 0; i < BLKCG_MAX_POLS; i++) 1453 if (!blkcg_policy[i]) 1454 break; 1455 if (i >= BLKCG_MAX_POLS) { 1456 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); 1457 goto err_unlock; 1458 } 1459 1460 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ 1461 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1462 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1463 goto err_unlock; 1464 1465 /* register @pol */ 1466 pol->plid = i; 1467 blkcg_policy[pol->plid] = pol; 1468 1469 /* allocate and install cpd's */ 1470 if (pol->cpd_alloc_fn) { 1471 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1472 struct blkcg_policy_data *cpd; 1473 1474 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1475 if (!cpd) 1476 goto err_free_cpds; 1477 1478 blkcg->cpd[pol->plid] = cpd; 1479 cpd->blkcg = blkcg; 1480 cpd->plid = pol->plid; 1481 pol->cpd_init_fn(cpd); 1482 } 1483 } 1484 1485 mutex_unlock(&blkcg_pol_mutex); 1486 1487 /* everything is in place, add intf files for the new policy */ 1488 if (pol->dfl_cftypes) 1489 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1490 pol->dfl_cftypes)); 1491 if (pol->legacy_cftypes) 1492 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1493 pol->legacy_cftypes)); 1494 mutex_unlock(&blkcg_pol_register_mutex); 1495 return 0; 1496 1497 err_free_cpds: 1498 if (pol->cpd_free_fn) { 1499 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1500 if (blkcg->cpd[pol->plid]) { 1501 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1502 blkcg->cpd[pol->plid] = NULL; 1503 } 1504 } 1505 } 1506 blkcg_policy[pol->plid] = NULL; 1507 err_unlock: 1508 mutex_unlock(&blkcg_pol_mutex); 1509 mutex_unlock(&blkcg_pol_register_mutex); 1510 return ret; 1511 } 1512 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1513 1514 /** 1515 * blkcg_policy_unregister - unregister a blkcg policy 1516 * @pol: blkcg policy to unregister 1517 * 1518 * Undo blkcg_policy_register(@pol). Might sleep. 1519 */ 1520 void blkcg_policy_unregister(struct blkcg_policy *pol) 1521 { 1522 struct blkcg *blkcg; 1523 1524 mutex_lock(&blkcg_pol_register_mutex); 1525 1526 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1527 goto out_unlock; 1528 1529 /* kill the intf files first */ 1530 if (pol->dfl_cftypes) 1531 cgroup_rm_cftypes(pol->dfl_cftypes); 1532 if (pol->legacy_cftypes) 1533 cgroup_rm_cftypes(pol->legacy_cftypes); 1534 1535 /* remove cpds and unregister */ 1536 mutex_lock(&blkcg_pol_mutex); 1537 1538 if (pol->cpd_free_fn) { 1539 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1540 if (blkcg->cpd[pol->plid]) { 1541 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1542 blkcg->cpd[pol->plid] = NULL; 1543 } 1544 } 1545 } 1546 blkcg_policy[pol->plid] = NULL; 1547 1548 mutex_unlock(&blkcg_pol_mutex); 1549 out_unlock: 1550 mutex_unlock(&blkcg_pol_register_mutex); 1551 } 1552 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1553 1554 bool __blkcg_punt_bio_submit(struct bio *bio) 1555 { 1556 struct blkcg_gq *blkg = bio->bi_blkg; 1557 1558 /* consume the flag first */ 1559 bio->bi_opf &= ~REQ_CGROUP_PUNT; 1560 1561 /* never bounce for the root cgroup */ 1562 if (!blkg->parent) 1563 return false; 1564 1565 spin_lock_bh(&blkg->async_bio_lock); 1566 bio_list_add(&blkg->async_bios, bio); 1567 spin_unlock_bh(&blkg->async_bio_lock); 1568 1569 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); 1570 return true; 1571 } 1572 1573 /* 1574 * Scale the accumulated delay based on how long it has been since we updated 1575 * the delay. We only call this when we are adding delay, in case it's been a 1576 * while since we added delay, and when we are checking to see if we need to 1577 * delay a task, to account for any delays that may have occurred. 1578 */ 1579 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1580 { 1581 u64 old = atomic64_read(&blkg->delay_start); 1582 1583 /* 1584 * We only want to scale down every second. The idea here is that we 1585 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1586 * time window. We only want to throttle tasks for recent delay that 1587 * has occurred, in 1 second time windows since that's the maximum 1588 * things can be throttled. We save the current delay window in 1589 * blkg->last_delay so we know what amount is still left to be charged 1590 * to the blkg from this point onward. blkg->last_use keeps track of 1591 * the use_delay counter. The idea is if we're unthrottling the blkg we 1592 * are ok with whatever is happening now, and we can take away more of 1593 * the accumulated delay as we've already throttled enough that 1594 * everybody is happy with their IO latencies. 1595 */ 1596 if (time_before64(old + NSEC_PER_SEC, now) && 1597 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) { 1598 u64 cur = atomic64_read(&blkg->delay_nsec); 1599 u64 sub = min_t(u64, blkg->last_delay, now - old); 1600 int cur_use = atomic_read(&blkg->use_delay); 1601 1602 /* 1603 * We've been unthrottled, subtract a larger chunk of our 1604 * accumulated delay. 1605 */ 1606 if (cur_use < blkg->last_use) 1607 sub = max_t(u64, sub, blkg->last_delay >> 1); 1608 1609 /* 1610 * This shouldn't happen, but handle it anyway. Our delay_nsec 1611 * should only ever be growing except here where we subtract out 1612 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1613 * rather not end up with negative numbers. 1614 */ 1615 if (unlikely(cur < sub)) { 1616 atomic64_set(&blkg->delay_nsec, 0); 1617 blkg->last_delay = 0; 1618 } else { 1619 atomic64_sub(sub, &blkg->delay_nsec); 1620 blkg->last_delay = cur - sub; 1621 } 1622 blkg->last_use = cur_use; 1623 } 1624 } 1625 1626 /* 1627 * This is called when we want to actually walk up the hierarchy and check to 1628 * see if we need to throttle, and then actually throttle if there is some 1629 * accumulated delay. This should only be called upon return to user space so 1630 * we're not holding some lock that would induce a priority inversion. 1631 */ 1632 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1633 { 1634 unsigned long pflags; 1635 u64 now = ktime_to_ns(ktime_get()); 1636 u64 exp; 1637 u64 delay_nsec = 0; 1638 int tok; 1639 1640 while (blkg->parent) { 1641 if (atomic_read(&blkg->use_delay)) { 1642 blkcg_scale_delay(blkg, now); 1643 delay_nsec = max_t(u64, delay_nsec, 1644 atomic64_read(&blkg->delay_nsec)); 1645 } 1646 blkg = blkg->parent; 1647 } 1648 1649 if (!delay_nsec) 1650 return; 1651 1652 /* 1653 * Let's not sleep for all eternity if we've amassed a huge delay. 1654 * Swapping or metadata IO can accumulate 10's of seconds worth of 1655 * delay, and we want userspace to be able to do _something_ so cap the 1656 * delays at 1 second. If there's 10's of seconds worth of delay then 1657 * the tasks will be delayed for 1 second for every syscall. 1658 */ 1659 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1660 1661 if (use_memdelay) 1662 psi_memstall_enter(&pflags); 1663 1664 exp = ktime_add_ns(now, delay_nsec); 1665 tok = io_schedule_prepare(); 1666 do { 1667 __set_current_state(TASK_KILLABLE); 1668 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1669 break; 1670 } while (!fatal_signal_pending(current)); 1671 io_schedule_finish(tok); 1672 1673 if (use_memdelay) 1674 psi_memstall_leave(&pflags); 1675 } 1676 1677 /** 1678 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 1679 * 1680 * This is only called if we've been marked with set_notify_resume(). Obviously 1681 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 1682 * check to see if current->throttle_queue is set and if not this doesn't do 1683 * anything. This should only ever be called by the resume code, it's not meant 1684 * to be called by people willy-nilly as it will actually do the work to 1685 * throttle the task if it is setup for throttling. 1686 */ 1687 void blkcg_maybe_throttle_current(void) 1688 { 1689 struct request_queue *q = current->throttle_queue; 1690 struct cgroup_subsys_state *css; 1691 struct blkcg *blkcg; 1692 struct blkcg_gq *blkg; 1693 bool use_memdelay = current->use_memdelay; 1694 1695 if (!q) 1696 return; 1697 1698 current->throttle_queue = NULL; 1699 current->use_memdelay = false; 1700 1701 rcu_read_lock(); 1702 css = kthread_blkcg(); 1703 if (css) 1704 blkcg = css_to_blkcg(css); 1705 else 1706 blkcg = css_to_blkcg(task_css(current, io_cgrp_id)); 1707 1708 if (!blkcg) 1709 goto out; 1710 blkg = blkg_lookup(blkcg, q); 1711 if (!blkg) 1712 goto out; 1713 if (!blkg_tryget(blkg)) 1714 goto out; 1715 rcu_read_unlock(); 1716 1717 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 1718 blkg_put(blkg); 1719 blk_put_queue(q); 1720 return; 1721 out: 1722 rcu_read_unlock(); 1723 blk_put_queue(q); 1724 } 1725 1726 /** 1727 * blkcg_schedule_throttle - this task needs to check for throttling 1728 * @q: the request queue IO was submitted on 1729 * @use_memdelay: do we charge this to memory delay for PSI 1730 * 1731 * This is called by the IO controller when we know there's delay accumulated 1732 * for the blkg for this task. We do not pass the blkg because there are places 1733 * we call this that may not have that information, the swapping code for 1734 * instance will only have a request_queue at that point. This set's the 1735 * notify_resume for the task to check and see if it requires throttling before 1736 * returning to user space. 1737 * 1738 * We will only schedule once per syscall. You can call this over and over 1739 * again and it will only do the check once upon return to user space, and only 1740 * throttle once. If the task needs to be throttled again it'll need to be 1741 * re-set at the next time we see the task. 1742 */ 1743 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) 1744 { 1745 if (unlikely(current->flags & PF_KTHREAD)) 1746 return; 1747 1748 if (!blk_get_queue(q)) 1749 return; 1750 1751 if (current->throttle_queue) 1752 blk_put_queue(current->throttle_queue); 1753 current->throttle_queue = q; 1754 if (use_memdelay) 1755 current->use_memdelay = use_memdelay; 1756 set_notify_resume(current); 1757 } 1758 1759 /** 1760 * blkcg_add_delay - add delay to this blkg 1761 * @blkg: blkg of interest 1762 * @now: the current time in nanoseconds 1763 * @delta: how many nanoseconds of delay to add 1764 * 1765 * Charge @delta to the blkg's current delay accumulation. This is used to 1766 * throttle tasks if an IO controller thinks we need more throttling. 1767 */ 1768 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 1769 { 1770 blkcg_scale_delay(blkg, now); 1771 atomic64_add(delta, &blkg->delay_nsec); 1772 } 1773 1774 static int __init blkcg_init(void) 1775 { 1776 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", 1777 WQ_MEM_RECLAIM | WQ_FREEZABLE | 1778 WQ_UNBOUND | WQ_SYSFS, 0); 1779 if (!blkcg_punt_bio_wq) 1780 return -ENOMEM; 1781 return 0; 1782 } 1783 subsys_initcall(blkcg_init); 1784 1785 module_param(blkcg_debug_stats, bool, 0644); 1786 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 1787