1 /* 2 * Common Block IO controller cgroup interface 3 * 4 * Based on ideas and code from CFQ, CFS and BFQ: 5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 6 * 7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 8 * Paolo Valente <paolo.valente@unimore.it> 9 * 10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 11 * Nauman Rafique <nauman@google.com> 12 * 13 * For policy-specific per-blkcg data: 14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 15 * Arianna Avanzini <avanzini.arianna@gmail.com> 16 */ 17 #include <linux/ioprio.h> 18 #include <linux/kdev_t.h> 19 #include <linux/module.h> 20 #include <linux/sched/signal.h> 21 #include <linux/err.h> 22 #include <linux/blkdev.h> 23 #include <linux/backing-dev.h> 24 #include <linux/slab.h> 25 #include <linux/genhd.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/ctype.h> 29 #include <linux/blk-cgroup.h> 30 #include <linux/tracehook.h> 31 #include "blk.h" 32 33 #define MAX_KEY_LEN 100 34 35 /* 36 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 37 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 38 * policy [un]register operations including cgroup file additions / 39 * removals. Putting cgroup file registration outside blkcg_pol_mutex 40 * allows grabbing it from cgroup callbacks. 41 */ 42 static DEFINE_MUTEX(blkcg_pol_register_mutex); 43 static DEFINE_MUTEX(blkcg_pol_mutex); 44 45 struct blkcg blkcg_root; 46 EXPORT_SYMBOL_GPL(blkcg_root); 47 48 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 49 50 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 51 52 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 53 54 static bool blkcg_debug_stats = false; 55 56 static bool blkcg_policy_enabled(struct request_queue *q, 57 const struct blkcg_policy *pol) 58 { 59 return pol && test_bit(pol->plid, q->blkcg_pols); 60 } 61 62 /** 63 * blkg_free - free a blkg 64 * @blkg: blkg to free 65 * 66 * Free @blkg which may be partially allocated. 67 */ 68 static void blkg_free(struct blkcg_gq *blkg) 69 { 70 int i; 71 72 if (!blkg) 73 return; 74 75 for (i = 0; i < BLKCG_MAX_POLS; i++) 76 if (blkg->pd[i]) 77 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 78 79 if (blkg->blkcg != &blkcg_root) 80 blk_exit_rl(blkg->q, &blkg->rl); 81 82 blkg_rwstat_exit(&blkg->stat_ios); 83 blkg_rwstat_exit(&blkg->stat_bytes); 84 kfree(blkg); 85 } 86 87 /** 88 * blkg_alloc - allocate a blkg 89 * @blkcg: block cgroup the new blkg is associated with 90 * @q: request_queue the new blkg is associated with 91 * @gfp_mask: allocation mask to use 92 * 93 * Allocate a new blkg assocating @blkcg and @q. 94 */ 95 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, 96 gfp_t gfp_mask) 97 { 98 struct blkcg_gq *blkg; 99 int i; 100 101 /* alloc and init base part */ 102 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); 103 if (!blkg) 104 return NULL; 105 106 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || 107 blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) 108 goto err_free; 109 110 blkg->q = q; 111 INIT_LIST_HEAD(&blkg->q_node); 112 blkg->blkcg = blkcg; 113 atomic_set(&blkg->refcnt, 1); 114 115 /* root blkg uses @q->root_rl, init rl only for !root blkgs */ 116 if (blkcg != &blkcg_root) { 117 if (blk_init_rl(&blkg->rl, q, gfp_mask)) 118 goto err_free; 119 blkg->rl.blkg = blkg; 120 } 121 122 for (i = 0; i < BLKCG_MAX_POLS; i++) { 123 struct blkcg_policy *pol = blkcg_policy[i]; 124 struct blkg_policy_data *pd; 125 126 if (!blkcg_policy_enabled(q, pol)) 127 continue; 128 129 /* alloc per-policy data and attach it to blkg */ 130 pd = pol->pd_alloc_fn(gfp_mask, q->node); 131 if (!pd) 132 goto err_free; 133 134 blkg->pd[i] = pd; 135 pd->blkg = blkg; 136 pd->plid = i; 137 } 138 139 return blkg; 140 141 err_free: 142 blkg_free(blkg); 143 return NULL; 144 } 145 146 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 147 struct request_queue *q, bool update_hint) 148 { 149 struct blkcg_gq *blkg; 150 151 /* 152 * Hint didn't match. Look up from the radix tree. Note that the 153 * hint can only be updated under queue_lock as otherwise @blkg 154 * could have already been removed from blkg_tree. The caller is 155 * responsible for grabbing queue_lock if @update_hint. 156 */ 157 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 158 if (blkg && blkg->q == q) { 159 if (update_hint) { 160 lockdep_assert_held(q->queue_lock); 161 rcu_assign_pointer(blkcg->blkg_hint, blkg); 162 } 163 return blkg; 164 } 165 166 return NULL; 167 } 168 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); 169 170 /* 171 * If @new_blkg is %NULL, this function tries to allocate a new one as 172 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 173 */ 174 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, 175 struct request_queue *q, 176 struct blkcg_gq *new_blkg) 177 { 178 struct blkcg_gq *blkg; 179 struct bdi_writeback_congested *wb_congested; 180 int i, ret; 181 182 WARN_ON_ONCE(!rcu_read_lock_held()); 183 lockdep_assert_held(q->queue_lock); 184 185 /* blkg holds a reference to blkcg */ 186 if (!css_tryget_online(&blkcg->css)) { 187 ret = -ENODEV; 188 goto err_free_blkg; 189 } 190 191 wb_congested = wb_congested_get_create(q->backing_dev_info, 192 blkcg->css.id, 193 GFP_NOWAIT | __GFP_NOWARN); 194 if (!wb_congested) { 195 ret = -ENOMEM; 196 goto err_put_css; 197 } 198 199 /* allocate */ 200 if (!new_blkg) { 201 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); 202 if (unlikely(!new_blkg)) { 203 ret = -ENOMEM; 204 goto err_put_congested; 205 } 206 } 207 blkg = new_blkg; 208 blkg->wb_congested = wb_congested; 209 210 /* link parent */ 211 if (blkcg_parent(blkcg)) { 212 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); 213 if (WARN_ON_ONCE(!blkg->parent)) { 214 ret = -ENODEV; 215 goto err_put_congested; 216 } 217 blkg_get(blkg->parent); 218 } 219 220 /* invoke per-policy init */ 221 for (i = 0; i < BLKCG_MAX_POLS; i++) { 222 struct blkcg_policy *pol = blkcg_policy[i]; 223 224 if (blkg->pd[i] && pol->pd_init_fn) 225 pol->pd_init_fn(blkg->pd[i]); 226 } 227 228 /* insert */ 229 spin_lock(&blkcg->lock); 230 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); 231 if (likely(!ret)) { 232 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 233 list_add(&blkg->q_node, &q->blkg_list); 234 235 for (i = 0; i < BLKCG_MAX_POLS; i++) { 236 struct blkcg_policy *pol = blkcg_policy[i]; 237 238 if (blkg->pd[i] && pol->pd_online_fn) 239 pol->pd_online_fn(blkg->pd[i]); 240 } 241 } 242 blkg->online = true; 243 spin_unlock(&blkcg->lock); 244 245 if (!ret) 246 return blkg; 247 248 /* @blkg failed fully initialized, use the usual release path */ 249 blkg_put(blkg); 250 return ERR_PTR(ret); 251 252 err_put_congested: 253 wb_congested_put(wb_congested); 254 err_put_css: 255 css_put(&blkcg->css); 256 err_free_blkg: 257 blkg_free(new_blkg); 258 return ERR_PTR(ret); 259 } 260 261 /** 262 * blkg_lookup_create - lookup blkg, try to create one if not there 263 * @blkcg: blkcg of interest 264 * @q: request_queue of interest 265 * 266 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to 267 * create one. blkg creation is performed recursively from blkcg_root such 268 * that all non-root blkg's have access to the parent blkg. This function 269 * should be called under RCU read lock and @q->queue_lock. 270 * 271 * Returns pointer to the looked up or created blkg on success, ERR_PTR() 272 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not 273 * dead and bypassing, returns ERR_PTR(-EBUSY). 274 */ 275 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 276 struct request_queue *q) 277 { 278 struct blkcg_gq *blkg; 279 280 WARN_ON_ONCE(!rcu_read_lock_held()); 281 lockdep_assert_held(q->queue_lock); 282 283 /* 284 * This could be the first entry point of blkcg implementation and 285 * we shouldn't allow anything to go through for a bypassing queue. 286 */ 287 if (unlikely(blk_queue_bypass(q))) 288 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); 289 290 blkg = __blkg_lookup(blkcg, q, true); 291 if (blkg) 292 return blkg; 293 294 /* 295 * Create blkgs walking down from blkcg_root to @blkcg, so that all 296 * non-root blkgs have access to their parents. 297 */ 298 while (true) { 299 struct blkcg *pos = blkcg; 300 struct blkcg *parent = blkcg_parent(blkcg); 301 302 while (parent && !__blkg_lookup(parent, q, false)) { 303 pos = parent; 304 parent = blkcg_parent(parent); 305 } 306 307 blkg = blkg_create(pos, q, NULL); 308 if (pos == blkcg || IS_ERR(blkg)) 309 return blkg; 310 } 311 } 312 313 static void blkg_pd_offline(struct blkcg_gq *blkg) 314 { 315 int i; 316 317 lockdep_assert_held(blkg->q->queue_lock); 318 lockdep_assert_held(&blkg->blkcg->lock); 319 320 for (i = 0; i < BLKCG_MAX_POLS; i++) { 321 struct blkcg_policy *pol = blkcg_policy[i]; 322 323 if (blkg->pd[i] && !blkg->pd[i]->offline && 324 pol->pd_offline_fn) { 325 pol->pd_offline_fn(blkg->pd[i]); 326 blkg->pd[i]->offline = true; 327 } 328 } 329 } 330 331 static void blkg_destroy(struct blkcg_gq *blkg) 332 { 333 struct blkcg *blkcg = blkg->blkcg; 334 struct blkcg_gq *parent = blkg->parent; 335 336 lockdep_assert_held(blkg->q->queue_lock); 337 lockdep_assert_held(&blkcg->lock); 338 339 /* Something wrong if we are trying to remove same group twice */ 340 WARN_ON_ONCE(list_empty(&blkg->q_node)); 341 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); 342 343 if (parent) { 344 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); 345 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); 346 } 347 348 blkg->online = false; 349 350 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 351 list_del_init(&blkg->q_node); 352 hlist_del_init_rcu(&blkg->blkcg_node); 353 354 /* 355 * Both setting lookup hint to and clearing it from @blkg are done 356 * under queue_lock. If it's not pointing to @blkg now, it never 357 * will. Hint assignment itself can race safely. 358 */ 359 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 360 rcu_assign_pointer(blkcg->blkg_hint, NULL); 361 362 /* 363 * Put the reference taken at the time of creation so that when all 364 * queues are gone, group can be destroyed. 365 */ 366 blkg_put(blkg); 367 } 368 369 /** 370 * blkg_destroy_all - destroy all blkgs associated with a request_queue 371 * @q: request_queue of interest 372 * 373 * Destroy all blkgs associated with @q. 374 */ 375 static void blkg_destroy_all(struct request_queue *q) 376 { 377 struct blkcg_gq *blkg, *n; 378 379 lockdep_assert_held(q->queue_lock); 380 381 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 382 struct blkcg *blkcg = blkg->blkcg; 383 384 spin_lock(&blkcg->lock); 385 blkg_pd_offline(blkg); 386 blkg_destroy(blkg); 387 spin_unlock(&blkcg->lock); 388 } 389 390 q->root_blkg = NULL; 391 q->root_rl.blkg = NULL; 392 } 393 394 /* 395 * A group is RCU protected, but having an rcu lock does not mean that one 396 * can access all the fields of blkg and assume these are valid. For 397 * example, don't try to follow throtl_data and request queue links. 398 * 399 * Having a reference to blkg under an rcu allows accesses to only values 400 * local to groups like group stats and group rate limits. 401 */ 402 void __blkg_release_rcu(struct rcu_head *rcu_head) 403 { 404 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); 405 406 /* release the blkcg and parent blkg refs this blkg has been holding */ 407 css_put(&blkg->blkcg->css); 408 if (blkg->parent) 409 blkg_put(blkg->parent); 410 411 wb_congested_put(blkg->wb_congested); 412 413 blkg_free(blkg); 414 } 415 EXPORT_SYMBOL_GPL(__blkg_release_rcu); 416 417 /* 418 * The next function used by blk_queue_for_each_rl(). It's a bit tricky 419 * because the root blkg uses @q->root_rl instead of its own rl. 420 */ 421 struct request_list *__blk_queue_next_rl(struct request_list *rl, 422 struct request_queue *q) 423 { 424 struct list_head *ent; 425 struct blkcg_gq *blkg; 426 427 /* 428 * Determine the current blkg list_head. The first entry is 429 * root_rl which is off @q->blkg_list and mapped to the head. 430 */ 431 if (rl == &q->root_rl) { 432 ent = &q->blkg_list; 433 /* There are no more block groups, hence no request lists */ 434 if (list_empty(ent)) 435 return NULL; 436 } else { 437 blkg = container_of(rl, struct blkcg_gq, rl); 438 ent = &blkg->q_node; 439 } 440 441 /* walk to the next list_head, skip root blkcg */ 442 ent = ent->next; 443 if (ent == &q->root_blkg->q_node) 444 ent = ent->next; 445 if (ent == &q->blkg_list) 446 return NULL; 447 448 blkg = container_of(ent, struct blkcg_gq, q_node); 449 return &blkg->rl; 450 } 451 452 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 453 struct cftype *cftype, u64 val) 454 { 455 struct blkcg *blkcg = css_to_blkcg(css); 456 struct blkcg_gq *blkg; 457 int i; 458 459 mutex_lock(&blkcg_pol_mutex); 460 spin_lock_irq(&blkcg->lock); 461 462 /* 463 * Note that stat reset is racy - it doesn't synchronize against 464 * stat updates. This is a debug feature which shouldn't exist 465 * anyway. If you get hit by a race, retry. 466 */ 467 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 468 blkg_rwstat_reset(&blkg->stat_bytes); 469 blkg_rwstat_reset(&blkg->stat_ios); 470 471 for (i = 0; i < BLKCG_MAX_POLS; i++) { 472 struct blkcg_policy *pol = blkcg_policy[i]; 473 474 if (blkg->pd[i] && pol->pd_reset_stats_fn) 475 pol->pd_reset_stats_fn(blkg->pd[i]); 476 } 477 } 478 479 spin_unlock_irq(&blkcg->lock); 480 mutex_unlock(&blkcg_pol_mutex); 481 return 0; 482 } 483 484 const char *blkg_dev_name(struct blkcg_gq *blkg) 485 { 486 /* some drivers (floppy) instantiate a queue w/o disk registered */ 487 if (blkg->q->backing_dev_info->dev) 488 return dev_name(blkg->q->backing_dev_info->dev); 489 return NULL; 490 } 491 EXPORT_SYMBOL_GPL(blkg_dev_name); 492 493 /** 494 * blkcg_print_blkgs - helper for printing per-blkg data 495 * @sf: seq_file to print to 496 * @blkcg: blkcg of interest 497 * @prfill: fill function to print out a blkg 498 * @pol: policy in question 499 * @data: data to be passed to @prfill 500 * @show_total: to print out sum of prfill return values or not 501 * 502 * This function invokes @prfill on each blkg of @blkcg if pd for the 503 * policy specified by @pol exists. @prfill is invoked with @sf, the 504 * policy data and @data and the matching queue lock held. If @show_total 505 * is %true, the sum of the return values from @prfill is printed with 506 * "Total" label at the end. 507 * 508 * This is to be used to construct print functions for 509 * cftype->read_seq_string method. 510 */ 511 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 512 u64 (*prfill)(struct seq_file *, 513 struct blkg_policy_data *, int), 514 const struct blkcg_policy *pol, int data, 515 bool show_total) 516 { 517 struct blkcg_gq *blkg; 518 u64 total = 0; 519 520 rcu_read_lock(); 521 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 522 spin_lock_irq(blkg->q->queue_lock); 523 if (blkcg_policy_enabled(blkg->q, pol)) 524 total += prfill(sf, blkg->pd[pol->plid], data); 525 spin_unlock_irq(blkg->q->queue_lock); 526 } 527 rcu_read_unlock(); 528 529 if (show_total) 530 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 531 } 532 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 533 534 /** 535 * __blkg_prfill_u64 - prfill helper for a single u64 value 536 * @sf: seq_file to print to 537 * @pd: policy private data of interest 538 * @v: value to print 539 * 540 * Print @v to @sf for the device assocaited with @pd. 541 */ 542 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 543 { 544 const char *dname = blkg_dev_name(pd->blkg); 545 546 if (!dname) 547 return 0; 548 549 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 550 return v; 551 } 552 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 553 554 /** 555 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat 556 * @sf: seq_file to print to 557 * @pd: policy private data of interest 558 * @rwstat: rwstat to print 559 * 560 * Print @rwstat to @sf for the device assocaited with @pd. 561 */ 562 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 563 const struct blkg_rwstat *rwstat) 564 { 565 static const char *rwstr[] = { 566 [BLKG_RWSTAT_READ] = "Read", 567 [BLKG_RWSTAT_WRITE] = "Write", 568 [BLKG_RWSTAT_SYNC] = "Sync", 569 [BLKG_RWSTAT_ASYNC] = "Async", 570 [BLKG_RWSTAT_DISCARD] = "Discard", 571 }; 572 const char *dname = blkg_dev_name(pd->blkg); 573 u64 v; 574 int i; 575 576 if (!dname) 577 return 0; 578 579 for (i = 0; i < BLKG_RWSTAT_NR; i++) 580 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], 581 (unsigned long long)atomic64_read(&rwstat->aux_cnt[i])); 582 583 v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) + 584 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) + 585 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]); 586 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); 587 return v; 588 } 589 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); 590 591 /** 592 * blkg_prfill_stat - prfill callback for blkg_stat 593 * @sf: seq_file to print to 594 * @pd: policy private data of interest 595 * @off: offset to the blkg_stat in @pd 596 * 597 * prfill callback for printing a blkg_stat. 598 */ 599 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) 600 { 601 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); 602 } 603 EXPORT_SYMBOL_GPL(blkg_prfill_stat); 604 605 /** 606 * blkg_prfill_rwstat - prfill callback for blkg_rwstat 607 * @sf: seq_file to print to 608 * @pd: policy private data of interest 609 * @off: offset to the blkg_rwstat in @pd 610 * 611 * prfill callback for printing a blkg_rwstat. 612 */ 613 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 614 int off) 615 { 616 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); 617 618 return __blkg_prfill_rwstat(sf, pd, &rwstat); 619 } 620 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); 621 622 static u64 blkg_prfill_rwstat_field(struct seq_file *sf, 623 struct blkg_policy_data *pd, int off) 624 { 625 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off); 626 627 return __blkg_prfill_rwstat(sf, pd, &rwstat); 628 } 629 630 /** 631 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes 632 * @sf: seq_file to print to 633 * @v: unused 634 * 635 * To be used as cftype->seq_show to print blkg->stat_bytes. 636 * cftype->private must be set to the blkcg_policy. 637 */ 638 int blkg_print_stat_bytes(struct seq_file *sf, void *v) 639 { 640 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 641 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, 642 offsetof(struct blkcg_gq, stat_bytes), true); 643 return 0; 644 } 645 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes); 646 647 /** 648 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios 649 * @sf: seq_file to print to 650 * @v: unused 651 * 652 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private 653 * must be set to the blkcg_policy. 654 */ 655 int blkg_print_stat_ios(struct seq_file *sf, void *v) 656 { 657 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 658 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, 659 offsetof(struct blkcg_gq, stat_ios), true); 660 return 0; 661 } 662 EXPORT_SYMBOL_GPL(blkg_print_stat_ios); 663 664 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf, 665 struct blkg_policy_data *pd, 666 int off) 667 { 668 struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg, 669 NULL, off); 670 return __blkg_prfill_rwstat(sf, pd, &rwstat); 671 } 672 673 /** 674 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes 675 * @sf: seq_file to print to 676 * @v: unused 677 */ 678 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v) 679 { 680 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 681 blkg_prfill_rwstat_field_recursive, 682 (void *)seq_cft(sf)->private, 683 offsetof(struct blkcg_gq, stat_bytes), true); 684 return 0; 685 } 686 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive); 687 688 /** 689 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios 690 * @sf: seq_file to print to 691 * @v: unused 692 */ 693 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v) 694 { 695 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 696 blkg_prfill_rwstat_field_recursive, 697 (void *)seq_cft(sf)->private, 698 offsetof(struct blkcg_gq, stat_ios), true); 699 return 0; 700 } 701 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive); 702 703 /** 704 * blkg_stat_recursive_sum - collect hierarchical blkg_stat 705 * @blkg: blkg of interest 706 * @pol: blkcg_policy which contains the blkg_stat 707 * @off: offset to the blkg_stat in blkg_policy_data or @blkg 708 * 709 * Collect the blkg_stat specified by @blkg, @pol and @off and all its 710 * online descendants and their aux counts. The caller must be holding the 711 * queue lock for online tests. 712 * 713 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is 714 * at @off bytes into @blkg's blkg_policy_data of the policy. 715 */ 716 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, 717 struct blkcg_policy *pol, int off) 718 { 719 struct blkcg_gq *pos_blkg; 720 struct cgroup_subsys_state *pos_css; 721 u64 sum = 0; 722 723 lockdep_assert_held(blkg->q->queue_lock); 724 725 rcu_read_lock(); 726 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { 727 struct blkg_stat *stat; 728 729 if (!pos_blkg->online) 730 continue; 731 732 if (pol) 733 stat = (void *)blkg_to_pd(pos_blkg, pol) + off; 734 else 735 stat = (void *)blkg + off; 736 737 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt); 738 } 739 rcu_read_unlock(); 740 741 return sum; 742 } 743 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); 744 745 /** 746 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat 747 * @blkg: blkg of interest 748 * @pol: blkcg_policy which contains the blkg_rwstat 749 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg 750 * 751 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its 752 * online descendants and their aux counts. The caller must be holding the 753 * queue lock for online tests. 754 * 755 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it 756 * is at @off bytes into @blkg's blkg_policy_data of the policy. 757 */ 758 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, 759 struct blkcg_policy *pol, int off) 760 { 761 struct blkcg_gq *pos_blkg; 762 struct cgroup_subsys_state *pos_css; 763 struct blkg_rwstat sum = { }; 764 int i; 765 766 lockdep_assert_held(blkg->q->queue_lock); 767 768 rcu_read_lock(); 769 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { 770 struct blkg_rwstat *rwstat; 771 772 if (!pos_blkg->online) 773 continue; 774 775 if (pol) 776 rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off; 777 else 778 rwstat = (void *)pos_blkg + off; 779 780 for (i = 0; i < BLKG_RWSTAT_NR; i++) 781 atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) + 782 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]), 783 &sum.aux_cnt[i]); 784 } 785 rcu_read_unlock(); 786 787 return sum; 788 } 789 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); 790 791 /* Performs queue bypass and policy enabled checks then looks up blkg. */ 792 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, 793 const struct blkcg_policy *pol, 794 struct request_queue *q) 795 { 796 WARN_ON_ONCE(!rcu_read_lock_held()); 797 lockdep_assert_held(q->queue_lock); 798 799 if (!blkcg_policy_enabled(q, pol)) 800 return ERR_PTR(-EOPNOTSUPP); 801 802 /* 803 * This could be the first entry point of blkcg implementation and 804 * we shouldn't allow anything to go through for a bypassing queue. 805 */ 806 if (unlikely(blk_queue_bypass(q))) 807 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); 808 809 return __blkg_lookup(blkcg, q, true /* update_hint */); 810 } 811 812 /** 813 * blkg_conf_prep - parse and prepare for per-blkg config update 814 * @blkcg: target block cgroup 815 * @pol: target policy 816 * @input: input string 817 * @ctx: blkg_conf_ctx to be filled 818 * 819 * Parse per-blkg config update from @input and initialize @ctx with the 820 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the 821 * part of @input following MAJ:MIN. This function returns with RCU read 822 * lock and queue lock held and must be paired with blkg_conf_finish(). 823 */ 824 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 825 char *input, struct blkg_conf_ctx *ctx) 826 __acquires(rcu) __acquires(disk->queue->queue_lock) 827 { 828 struct gendisk *disk; 829 struct request_queue *q; 830 struct blkcg_gq *blkg; 831 unsigned int major, minor; 832 int key_len, part, ret; 833 char *body; 834 835 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 836 return -EINVAL; 837 838 body = input + key_len; 839 if (!isspace(*body)) 840 return -EINVAL; 841 body = skip_spaces(body); 842 843 disk = get_gendisk(MKDEV(major, minor), &part); 844 if (!disk) 845 return -ENODEV; 846 if (part) { 847 ret = -ENODEV; 848 goto fail; 849 } 850 851 q = disk->queue; 852 853 rcu_read_lock(); 854 spin_lock_irq(q->queue_lock); 855 856 blkg = blkg_lookup_check(blkcg, pol, q); 857 if (IS_ERR(blkg)) { 858 ret = PTR_ERR(blkg); 859 goto fail_unlock; 860 } 861 862 if (blkg) 863 goto success; 864 865 /* 866 * Create blkgs walking down from blkcg_root to @blkcg, so that all 867 * non-root blkgs have access to their parents. 868 */ 869 while (true) { 870 struct blkcg *pos = blkcg; 871 struct blkcg *parent; 872 struct blkcg_gq *new_blkg; 873 874 parent = blkcg_parent(blkcg); 875 while (parent && !__blkg_lookup(parent, q, false)) { 876 pos = parent; 877 parent = blkcg_parent(parent); 878 } 879 880 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 881 spin_unlock_irq(q->queue_lock); 882 rcu_read_unlock(); 883 884 new_blkg = blkg_alloc(pos, q, GFP_KERNEL); 885 if (unlikely(!new_blkg)) { 886 ret = -ENOMEM; 887 goto fail; 888 } 889 890 rcu_read_lock(); 891 spin_lock_irq(q->queue_lock); 892 893 blkg = blkg_lookup_check(pos, pol, q); 894 if (IS_ERR(blkg)) { 895 ret = PTR_ERR(blkg); 896 goto fail_unlock; 897 } 898 899 if (blkg) { 900 blkg_free(new_blkg); 901 } else { 902 blkg = blkg_create(pos, q, new_blkg); 903 if (unlikely(IS_ERR(blkg))) { 904 ret = PTR_ERR(blkg); 905 goto fail_unlock; 906 } 907 } 908 909 if (pos == blkcg) 910 goto success; 911 } 912 success: 913 ctx->disk = disk; 914 ctx->blkg = blkg; 915 ctx->body = body; 916 return 0; 917 918 fail_unlock: 919 spin_unlock_irq(q->queue_lock); 920 rcu_read_unlock(); 921 fail: 922 put_disk_and_module(disk); 923 /* 924 * If queue was bypassing, we should retry. Do so after a 925 * short msleep(). It isn't strictly necessary but queue 926 * can be bypassing for some time and it's always nice to 927 * avoid busy looping. 928 */ 929 if (ret == -EBUSY) { 930 msleep(10); 931 ret = restart_syscall(); 932 } 933 return ret; 934 } 935 EXPORT_SYMBOL_GPL(blkg_conf_prep); 936 937 /** 938 * blkg_conf_finish - finish up per-blkg config update 939 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() 940 * 941 * Finish up after per-blkg config update. This function must be paired 942 * with blkg_conf_prep(). 943 */ 944 void blkg_conf_finish(struct blkg_conf_ctx *ctx) 945 __releases(ctx->disk->queue->queue_lock) __releases(rcu) 946 { 947 spin_unlock_irq(ctx->disk->queue->queue_lock); 948 rcu_read_unlock(); 949 put_disk_and_module(ctx->disk); 950 } 951 EXPORT_SYMBOL_GPL(blkg_conf_finish); 952 953 static int blkcg_print_stat(struct seq_file *sf, void *v) 954 { 955 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 956 struct blkcg_gq *blkg; 957 958 rcu_read_lock(); 959 960 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 961 const char *dname; 962 char *buf; 963 struct blkg_rwstat rwstat; 964 u64 rbytes, wbytes, rios, wios, dbytes, dios; 965 size_t size = seq_get_buf(sf, &buf), off = 0; 966 int i; 967 bool has_stats = false; 968 969 dname = blkg_dev_name(blkg); 970 if (!dname) 971 continue; 972 973 /* 974 * Hooray string manipulation, count is the size written NOT 975 * INCLUDING THE \0, so size is now count+1 less than what we 976 * had before, but we want to start writing the next bit from 977 * the \0 so we only add count to buf. 978 */ 979 off += scnprintf(buf+off, size-off, "%s ", dname); 980 981 spin_lock_irq(blkg->q->queue_lock); 982 983 rwstat = blkg_rwstat_recursive_sum(blkg, NULL, 984 offsetof(struct blkcg_gq, stat_bytes)); 985 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); 986 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); 987 dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]); 988 989 rwstat = blkg_rwstat_recursive_sum(blkg, NULL, 990 offsetof(struct blkcg_gq, stat_ios)); 991 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); 992 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); 993 dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]); 994 995 spin_unlock_irq(blkg->q->queue_lock); 996 997 if (rbytes || wbytes || rios || wios) { 998 has_stats = true; 999 off += scnprintf(buf+off, size-off, 1000 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 1001 rbytes, wbytes, rios, wios, 1002 dbytes, dios); 1003 } 1004 1005 if (!blkcg_debug_stats) 1006 goto next; 1007 1008 if (atomic_read(&blkg->use_delay)) { 1009 has_stats = true; 1010 off += scnprintf(buf+off, size-off, 1011 " use_delay=%d delay_nsec=%llu", 1012 atomic_read(&blkg->use_delay), 1013 (unsigned long long)atomic64_read(&blkg->delay_nsec)); 1014 } 1015 1016 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1017 struct blkcg_policy *pol = blkcg_policy[i]; 1018 size_t written; 1019 1020 if (!blkg->pd[i] || !pol->pd_stat_fn) 1021 continue; 1022 1023 written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off); 1024 if (written) 1025 has_stats = true; 1026 off += written; 1027 } 1028 next: 1029 if (has_stats) { 1030 off += scnprintf(buf+off, size-off, "\n"); 1031 seq_commit(sf, off); 1032 } 1033 } 1034 1035 rcu_read_unlock(); 1036 return 0; 1037 } 1038 1039 static struct cftype blkcg_files[] = { 1040 { 1041 .name = "stat", 1042 .flags = CFTYPE_NOT_ON_ROOT, 1043 .seq_show = blkcg_print_stat, 1044 }, 1045 { } /* terminate */ 1046 }; 1047 1048 static struct cftype blkcg_legacy_files[] = { 1049 { 1050 .name = "reset_stats", 1051 .write_u64 = blkcg_reset_stats, 1052 }, 1053 { } /* terminate */ 1054 }; 1055 1056 /** 1057 * blkcg_css_offline - cgroup css_offline callback 1058 * @css: css of interest 1059 * 1060 * This function is called when @css is about to go away and responsible 1061 * for offlining all blkgs pd and killing all wbs associated with @css. 1062 * blkgs pd offline should be done while holding both q and blkcg locks. 1063 * As blkcg lock is nested inside q lock, this function performs reverse 1064 * double lock dancing. 1065 * 1066 * This is the blkcg counterpart of ioc_release_fn(). 1067 */ 1068 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1069 { 1070 struct blkcg *blkcg = css_to_blkcg(css); 1071 struct blkcg_gq *blkg; 1072 1073 spin_lock_irq(&blkcg->lock); 1074 1075 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 1076 struct request_queue *q = blkg->q; 1077 1078 if (spin_trylock(q->queue_lock)) { 1079 blkg_pd_offline(blkg); 1080 spin_unlock(q->queue_lock); 1081 } else { 1082 spin_unlock_irq(&blkcg->lock); 1083 cpu_relax(); 1084 spin_lock_irq(&blkcg->lock); 1085 } 1086 } 1087 1088 spin_unlock_irq(&blkcg->lock); 1089 1090 wb_blkcg_offline(blkcg); 1091 } 1092 1093 /** 1094 * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg 1095 * @blkcg: blkcg of interest 1096 * 1097 * This function is called when blkcg css is about to free and responsible for 1098 * destroying all blkgs associated with @blkcg. 1099 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1100 * is nested inside q lock, this function performs reverse double lock dancing. 1101 */ 1102 static void blkcg_destroy_all_blkgs(struct blkcg *blkcg) 1103 { 1104 spin_lock_irq(&blkcg->lock); 1105 while (!hlist_empty(&blkcg->blkg_list)) { 1106 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1107 struct blkcg_gq, 1108 blkcg_node); 1109 struct request_queue *q = blkg->q; 1110 1111 if (spin_trylock(q->queue_lock)) { 1112 blkg_destroy(blkg); 1113 spin_unlock(q->queue_lock); 1114 } else { 1115 spin_unlock_irq(&blkcg->lock); 1116 cpu_relax(); 1117 spin_lock_irq(&blkcg->lock); 1118 } 1119 } 1120 spin_unlock_irq(&blkcg->lock); 1121 } 1122 1123 static void blkcg_css_free(struct cgroup_subsys_state *css) 1124 { 1125 struct blkcg *blkcg = css_to_blkcg(css); 1126 int i; 1127 1128 blkcg_destroy_all_blkgs(blkcg); 1129 1130 mutex_lock(&blkcg_pol_mutex); 1131 1132 list_del(&blkcg->all_blkcgs_node); 1133 1134 for (i = 0; i < BLKCG_MAX_POLS; i++) 1135 if (blkcg->cpd[i]) 1136 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1137 1138 mutex_unlock(&blkcg_pol_mutex); 1139 1140 kfree(blkcg); 1141 } 1142 1143 static struct cgroup_subsys_state * 1144 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1145 { 1146 struct blkcg *blkcg; 1147 struct cgroup_subsys_state *ret; 1148 int i; 1149 1150 mutex_lock(&blkcg_pol_mutex); 1151 1152 if (!parent_css) { 1153 blkcg = &blkcg_root; 1154 } else { 1155 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1156 if (!blkcg) { 1157 ret = ERR_PTR(-ENOMEM); 1158 goto unlock; 1159 } 1160 } 1161 1162 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1163 struct blkcg_policy *pol = blkcg_policy[i]; 1164 struct blkcg_policy_data *cpd; 1165 1166 /* 1167 * If the policy hasn't been attached yet, wait for it 1168 * to be attached before doing anything else. Otherwise, 1169 * check if the policy requires any specific per-cgroup 1170 * data: if it does, allocate and initialize it. 1171 */ 1172 if (!pol || !pol->cpd_alloc_fn) 1173 continue; 1174 1175 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1176 if (!cpd) { 1177 ret = ERR_PTR(-ENOMEM); 1178 goto free_pd_blkcg; 1179 } 1180 blkcg->cpd[i] = cpd; 1181 cpd->blkcg = blkcg; 1182 cpd->plid = i; 1183 if (pol->cpd_init_fn) 1184 pol->cpd_init_fn(cpd); 1185 } 1186 1187 spin_lock_init(&blkcg->lock); 1188 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); 1189 INIT_HLIST_HEAD(&blkcg->blkg_list); 1190 #ifdef CONFIG_CGROUP_WRITEBACK 1191 INIT_LIST_HEAD(&blkcg->cgwb_list); 1192 #endif 1193 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1194 1195 mutex_unlock(&blkcg_pol_mutex); 1196 return &blkcg->css; 1197 1198 free_pd_blkcg: 1199 for (i--; i >= 0; i--) 1200 if (blkcg->cpd[i]) 1201 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1202 1203 if (blkcg != &blkcg_root) 1204 kfree(blkcg); 1205 unlock: 1206 mutex_unlock(&blkcg_pol_mutex); 1207 return ret; 1208 } 1209 1210 /** 1211 * blkcg_init_queue - initialize blkcg part of request queue 1212 * @q: request_queue to initialize 1213 * 1214 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg 1215 * part of new request_queue @q. 1216 * 1217 * RETURNS: 1218 * 0 on success, -errno on failure. 1219 */ 1220 int blkcg_init_queue(struct request_queue *q) 1221 { 1222 struct blkcg_gq *new_blkg, *blkg; 1223 bool preloaded; 1224 int ret; 1225 1226 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); 1227 if (!new_blkg) 1228 return -ENOMEM; 1229 1230 preloaded = !radix_tree_preload(GFP_KERNEL); 1231 1232 /* Make sure the root blkg exists. */ 1233 rcu_read_lock(); 1234 spin_lock_irq(q->queue_lock); 1235 blkg = blkg_create(&blkcg_root, q, new_blkg); 1236 if (IS_ERR(blkg)) 1237 goto err_unlock; 1238 q->root_blkg = blkg; 1239 q->root_rl.blkg = blkg; 1240 spin_unlock_irq(q->queue_lock); 1241 rcu_read_unlock(); 1242 1243 if (preloaded) 1244 radix_tree_preload_end(); 1245 1246 ret = blk_iolatency_init(q); 1247 if (ret) { 1248 spin_lock_irq(q->queue_lock); 1249 blkg_destroy_all(q); 1250 spin_unlock_irq(q->queue_lock); 1251 return ret; 1252 } 1253 1254 ret = blk_throtl_init(q); 1255 if (ret) { 1256 spin_lock_irq(q->queue_lock); 1257 blkg_destroy_all(q); 1258 spin_unlock_irq(q->queue_lock); 1259 } 1260 return ret; 1261 1262 err_unlock: 1263 spin_unlock_irq(q->queue_lock); 1264 rcu_read_unlock(); 1265 if (preloaded) 1266 radix_tree_preload_end(); 1267 return PTR_ERR(blkg); 1268 } 1269 1270 /** 1271 * blkcg_drain_queue - drain blkcg part of request_queue 1272 * @q: request_queue to drain 1273 * 1274 * Called from blk_drain_queue(). Responsible for draining blkcg part. 1275 */ 1276 void blkcg_drain_queue(struct request_queue *q) 1277 { 1278 lockdep_assert_held(q->queue_lock); 1279 1280 /* 1281 * @q could be exiting and already have destroyed all blkgs as 1282 * indicated by NULL root_blkg. If so, don't confuse policies. 1283 */ 1284 if (!q->root_blkg) 1285 return; 1286 1287 blk_throtl_drain(q); 1288 } 1289 1290 /** 1291 * blkcg_exit_queue - exit and release blkcg part of request_queue 1292 * @q: request_queue being released 1293 * 1294 * Called from blk_release_queue(). Responsible for exiting blkcg part. 1295 */ 1296 void blkcg_exit_queue(struct request_queue *q) 1297 { 1298 spin_lock_irq(q->queue_lock); 1299 blkg_destroy_all(q); 1300 spin_unlock_irq(q->queue_lock); 1301 1302 blk_throtl_exit(q); 1303 } 1304 1305 /* 1306 * We cannot support shared io contexts, as we have no mean to support 1307 * two tasks with the same ioc in two different groups without major rework 1308 * of the main cic data structures. For now we allow a task to change 1309 * its cgroup only if it's the only owner of its ioc. 1310 */ 1311 static int blkcg_can_attach(struct cgroup_taskset *tset) 1312 { 1313 struct task_struct *task; 1314 struct cgroup_subsys_state *dst_css; 1315 struct io_context *ioc; 1316 int ret = 0; 1317 1318 /* task_lock() is needed to avoid races with exit_io_context() */ 1319 cgroup_taskset_for_each(task, dst_css, tset) { 1320 task_lock(task); 1321 ioc = task->io_context; 1322 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 1323 ret = -EINVAL; 1324 task_unlock(task); 1325 if (ret) 1326 break; 1327 } 1328 return ret; 1329 } 1330 1331 static void blkcg_bind(struct cgroup_subsys_state *root_css) 1332 { 1333 int i; 1334 1335 mutex_lock(&blkcg_pol_mutex); 1336 1337 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1338 struct blkcg_policy *pol = blkcg_policy[i]; 1339 struct blkcg *blkcg; 1340 1341 if (!pol || !pol->cpd_bind_fn) 1342 continue; 1343 1344 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) 1345 if (blkcg->cpd[pol->plid]) 1346 pol->cpd_bind_fn(blkcg->cpd[pol->plid]); 1347 } 1348 mutex_unlock(&blkcg_pol_mutex); 1349 } 1350 1351 static void blkcg_exit(struct task_struct *tsk) 1352 { 1353 if (tsk->throttle_queue) 1354 blk_put_queue(tsk->throttle_queue); 1355 tsk->throttle_queue = NULL; 1356 } 1357 1358 struct cgroup_subsys io_cgrp_subsys = { 1359 .css_alloc = blkcg_css_alloc, 1360 .css_offline = blkcg_css_offline, 1361 .css_free = blkcg_css_free, 1362 .can_attach = blkcg_can_attach, 1363 .bind = blkcg_bind, 1364 .dfl_cftypes = blkcg_files, 1365 .legacy_cftypes = blkcg_legacy_files, 1366 .legacy_name = "blkio", 1367 .exit = blkcg_exit, 1368 #ifdef CONFIG_MEMCG 1369 /* 1370 * This ensures that, if available, memcg is automatically enabled 1371 * together on the default hierarchy so that the owner cgroup can 1372 * be retrieved from writeback pages. 1373 */ 1374 .depends_on = 1 << memory_cgrp_id, 1375 #endif 1376 }; 1377 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1378 1379 /** 1380 * blkcg_activate_policy - activate a blkcg policy on a request_queue 1381 * @q: request_queue of interest 1382 * @pol: blkcg policy to activate 1383 * 1384 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through 1385 * bypass mode to populate its blkgs with policy_data for @pol. 1386 * 1387 * Activation happens with @q bypassed, so nobody would be accessing blkgs 1388 * from IO path. Update of each blkg is protected by both queue and blkcg 1389 * locks so that holding either lock and testing blkcg_policy_enabled() is 1390 * always enough for dereferencing policy data. 1391 * 1392 * The caller is responsible for synchronizing [de]activations and policy 1393 * [un]registerations. Returns 0 on success, -errno on failure. 1394 */ 1395 int blkcg_activate_policy(struct request_queue *q, 1396 const struct blkcg_policy *pol) 1397 { 1398 struct blkg_policy_data *pd_prealloc = NULL; 1399 struct blkcg_gq *blkg; 1400 int ret; 1401 1402 if (blkcg_policy_enabled(q, pol)) 1403 return 0; 1404 1405 if (q->mq_ops) 1406 blk_mq_freeze_queue(q); 1407 else 1408 blk_queue_bypass_start(q); 1409 pd_prealloc: 1410 if (!pd_prealloc) { 1411 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node); 1412 if (!pd_prealloc) { 1413 ret = -ENOMEM; 1414 goto out_bypass_end; 1415 } 1416 } 1417 1418 spin_lock_irq(q->queue_lock); 1419 1420 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1421 struct blkg_policy_data *pd; 1422 1423 if (blkg->pd[pol->plid]) 1424 continue; 1425 1426 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node); 1427 if (!pd) 1428 swap(pd, pd_prealloc); 1429 if (!pd) { 1430 spin_unlock_irq(q->queue_lock); 1431 goto pd_prealloc; 1432 } 1433 1434 blkg->pd[pol->plid] = pd; 1435 pd->blkg = blkg; 1436 pd->plid = pol->plid; 1437 if (pol->pd_init_fn) 1438 pol->pd_init_fn(pd); 1439 } 1440 1441 __set_bit(pol->plid, q->blkcg_pols); 1442 ret = 0; 1443 1444 spin_unlock_irq(q->queue_lock); 1445 out_bypass_end: 1446 if (q->mq_ops) 1447 blk_mq_unfreeze_queue(q); 1448 else 1449 blk_queue_bypass_end(q); 1450 if (pd_prealloc) 1451 pol->pd_free_fn(pd_prealloc); 1452 return ret; 1453 } 1454 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1455 1456 /** 1457 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue 1458 * @q: request_queue of interest 1459 * @pol: blkcg policy to deactivate 1460 * 1461 * Deactivate @pol on @q. Follows the same synchronization rules as 1462 * blkcg_activate_policy(). 1463 */ 1464 void blkcg_deactivate_policy(struct request_queue *q, 1465 const struct blkcg_policy *pol) 1466 { 1467 struct blkcg_gq *blkg; 1468 1469 if (!blkcg_policy_enabled(q, pol)) 1470 return; 1471 1472 if (q->mq_ops) 1473 blk_mq_freeze_queue(q); 1474 else 1475 blk_queue_bypass_start(q); 1476 1477 spin_lock_irq(q->queue_lock); 1478 1479 __clear_bit(pol->plid, q->blkcg_pols); 1480 1481 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1482 if (blkg->pd[pol->plid]) { 1483 if (!blkg->pd[pol->plid]->offline && 1484 pol->pd_offline_fn) { 1485 pol->pd_offline_fn(blkg->pd[pol->plid]); 1486 blkg->pd[pol->plid]->offline = true; 1487 } 1488 pol->pd_free_fn(blkg->pd[pol->plid]); 1489 blkg->pd[pol->plid] = NULL; 1490 } 1491 } 1492 1493 spin_unlock_irq(q->queue_lock); 1494 1495 if (q->mq_ops) 1496 blk_mq_unfreeze_queue(q); 1497 else 1498 blk_queue_bypass_end(q); 1499 } 1500 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1501 1502 /** 1503 * blkcg_policy_register - register a blkcg policy 1504 * @pol: blkcg policy to register 1505 * 1506 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1507 * successful registration. Returns 0 on success and -errno on failure. 1508 */ 1509 int blkcg_policy_register(struct blkcg_policy *pol) 1510 { 1511 struct blkcg *blkcg; 1512 int i, ret; 1513 1514 mutex_lock(&blkcg_pol_register_mutex); 1515 mutex_lock(&blkcg_pol_mutex); 1516 1517 /* find an empty slot */ 1518 ret = -ENOSPC; 1519 for (i = 0; i < BLKCG_MAX_POLS; i++) 1520 if (!blkcg_policy[i]) 1521 break; 1522 if (i >= BLKCG_MAX_POLS) 1523 goto err_unlock; 1524 1525 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ 1526 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1527 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1528 goto err_unlock; 1529 1530 /* register @pol */ 1531 pol->plid = i; 1532 blkcg_policy[pol->plid] = pol; 1533 1534 /* allocate and install cpd's */ 1535 if (pol->cpd_alloc_fn) { 1536 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1537 struct blkcg_policy_data *cpd; 1538 1539 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1540 if (!cpd) 1541 goto err_free_cpds; 1542 1543 blkcg->cpd[pol->plid] = cpd; 1544 cpd->blkcg = blkcg; 1545 cpd->plid = pol->plid; 1546 pol->cpd_init_fn(cpd); 1547 } 1548 } 1549 1550 mutex_unlock(&blkcg_pol_mutex); 1551 1552 /* everything is in place, add intf files for the new policy */ 1553 if (pol->dfl_cftypes) 1554 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1555 pol->dfl_cftypes)); 1556 if (pol->legacy_cftypes) 1557 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1558 pol->legacy_cftypes)); 1559 mutex_unlock(&blkcg_pol_register_mutex); 1560 return 0; 1561 1562 err_free_cpds: 1563 if (pol->cpd_free_fn) { 1564 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1565 if (blkcg->cpd[pol->plid]) { 1566 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1567 blkcg->cpd[pol->plid] = NULL; 1568 } 1569 } 1570 } 1571 blkcg_policy[pol->plid] = NULL; 1572 err_unlock: 1573 mutex_unlock(&blkcg_pol_mutex); 1574 mutex_unlock(&blkcg_pol_register_mutex); 1575 return ret; 1576 } 1577 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1578 1579 /** 1580 * blkcg_policy_unregister - unregister a blkcg policy 1581 * @pol: blkcg policy to unregister 1582 * 1583 * Undo blkcg_policy_register(@pol). Might sleep. 1584 */ 1585 void blkcg_policy_unregister(struct blkcg_policy *pol) 1586 { 1587 struct blkcg *blkcg; 1588 1589 mutex_lock(&blkcg_pol_register_mutex); 1590 1591 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1592 goto out_unlock; 1593 1594 /* kill the intf files first */ 1595 if (pol->dfl_cftypes) 1596 cgroup_rm_cftypes(pol->dfl_cftypes); 1597 if (pol->legacy_cftypes) 1598 cgroup_rm_cftypes(pol->legacy_cftypes); 1599 1600 /* remove cpds and unregister */ 1601 mutex_lock(&blkcg_pol_mutex); 1602 1603 if (pol->cpd_free_fn) { 1604 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1605 if (blkcg->cpd[pol->plid]) { 1606 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1607 blkcg->cpd[pol->plid] = NULL; 1608 } 1609 } 1610 } 1611 blkcg_policy[pol->plid] = NULL; 1612 1613 mutex_unlock(&blkcg_pol_mutex); 1614 out_unlock: 1615 mutex_unlock(&blkcg_pol_register_mutex); 1616 } 1617 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1618 1619 /* 1620 * Scale the accumulated delay based on how long it has been since we updated 1621 * the delay. We only call this when we are adding delay, in case it's been a 1622 * while since we added delay, and when we are checking to see if we need to 1623 * delay a task, to account for any delays that may have occurred. 1624 */ 1625 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1626 { 1627 u64 old = atomic64_read(&blkg->delay_start); 1628 1629 /* 1630 * We only want to scale down every second. The idea here is that we 1631 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1632 * time window. We only want to throttle tasks for recent delay that 1633 * has occurred, in 1 second time windows since that's the maximum 1634 * things can be throttled. We save the current delay window in 1635 * blkg->last_delay so we know what amount is still left to be charged 1636 * to the blkg from this point onward. blkg->last_use keeps track of 1637 * the use_delay counter. The idea is if we're unthrottling the blkg we 1638 * are ok with whatever is happening now, and we can take away more of 1639 * the accumulated delay as we've already throttled enough that 1640 * everybody is happy with their IO latencies. 1641 */ 1642 if (time_before64(old + NSEC_PER_SEC, now) && 1643 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) { 1644 u64 cur = atomic64_read(&blkg->delay_nsec); 1645 u64 sub = min_t(u64, blkg->last_delay, now - old); 1646 int cur_use = atomic_read(&blkg->use_delay); 1647 1648 /* 1649 * We've been unthrottled, subtract a larger chunk of our 1650 * accumulated delay. 1651 */ 1652 if (cur_use < blkg->last_use) 1653 sub = max_t(u64, sub, blkg->last_delay >> 1); 1654 1655 /* 1656 * This shouldn't happen, but handle it anyway. Our delay_nsec 1657 * should only ever be growing except here where we subtract out 1658 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1659 * rather not end up with negative numbers. 1660 */ 1661 if (unlikely(cur < sub)) { 1662 atomic64_set(&blkg->delay_nsec, 0); 1663 blkg->last_delay = 0; 1664 } else { 1665 atomic64_sub(sub, &blkg->delay_nsec); 1666 blkg->last_delay = cur - sub; 1667 } 1668 blkg->last_use = cur_use; 1669 } 1670 } 1671 1672 /* 1673 * This is called when we want to actually walk up the hierarchy and check to 1674 * see if we need to throttle, and then actually throttle if there is some 1675 * accumulated delay. This should only be called upon return to user space so 1676 * we're not holding some lock that would induce a priority inversion. 1677 */ 1678 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1679 { 1680 u64 now = ktime_to_ns(ktime_get()); 1681 u64 exp; 1682 u64 delay_nsec = 0; 1683 int tok; 1684 1685 while (blkg->parent) { 1686 if (atomic_read(&blkg->use_delay)) { 1687 blkcg_scale_delay(blkg, now); 1688 delay_nsec = max_t(u64, delay_nsec, 1689 atomic64_read(&blkg->delay_nsec)); 1690 } 1691 blkg = blkg->parent; 1692 } 1693 1694 if (!delay_nsec) 1695 return; 1696 1697 /* 1698 * Let's not sleep for all eternity if we've amassed a huge delay. 1699 * Swapping or metadata IO can accumulate 10's of seconds worth of 1700 * delay, and we want userspace to be able to do _something_ so cap the 1701 * delays at 1 second. If there's 10's of seconds worth of delay then 1702 * the tasks will be delayed for 1 second for every syscall. 1703 */ 1704 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1705 1706 /* 1707 * TODO: the use_memdelay flag is going to be for the upcoming psi stuff 1708 * that hasn't landed upstream yet. Once that stuff is in place we need 1709 * to do a psi_memstall_enter/leave if memdelay is set. 1710 */ 1711 1712 exp = ktime_add_ns(now, delay_nsec); 1713 tok = io_schedule_prepare(); 1714 do { 1715 __set_current_state(TASK_KILLABLE); 1716 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1717 break; 1718 } while (!fatal_signal_pending(current)); 1719 io_schedule_finish(tok); 1720 } 1721 1722 /** 1723 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 1724 * 1725 * This is only called if we've been marked with set_notify_resume(). Obviously 1726 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 1727 * check to see if current->throttle_queue is set and if not this doesn't do 1728 * anything. This should only ever be called by the resume code, it's not meant 1729 * to be called by people willy-nilly as it will actually do the work to 1730 * throttle the task if it is setup for throttling. 1731 */ 1732 void blkcg_maybe_throttle_current(void) 1733 { 1734 struct request_queue *q = current->throttle_queue; 1735 struct cgroup_subsys_state *css; 1736 struct blkcg *blkcg; 1737 struct blkcg_gq *blkg; 1738 bool use_memdelay = current->use_memdelay; 1739 1740 if (!q) 1741 return; 1742 1743 current->throttle_queue = NULL; 1744 current->use_memdelay = false; 1745 1746 rcu_read_lock(); 1747 css = kthread_blkcg(); 1748 if (css) 1749 blkcg = css_to_blkcg(css); 1750 else 1751 blkcg = css_to_blkcg(task_css(current, io_cgrp_id)); 1752 1753 if (!blkcg) 1754 goto out; 1755 blkg = blkg_lookup(blkcg, q); 1756 if (!blkg) 1757 goto out; 1758 blkg = blkg_try_get(blkg); 1759 if (!blkg) 1760 goto out; 1761 rcu_read_unlock(); 1762 1763 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 1764 blkg_put(blkg); 1765 blk_put_queue(q); 1766 return; 1767 out: 1768 rcu_read_unlock(); 1769 blk_put_queue(q); 1770 } 1771 EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current); 1772 1773 /** 1774 * blkcg_schedule_throttle - this task needs to check for throttling 1775 * @q - the request queue IO was submitted on 1776 * @use_memdelay - do we charge this to memory delay for PSI 1777 * 1778 * This is called by the IO controller when we know there's delay accumulated 1779 * for the blkg for this task. We do not pass the blkg because there are places 1780 * we call this that may not have that information, the swapping code for 1781 * instance will only have a request_queue at that point. This set's the 1782 * notify_resume for the task to check and see if it requires throttling before 1783 * returning to user space. 1784 * 1785 * We will only schedule once per syscall. You can call this over and over 1786 * again and it will only do the check once upon return to user space, and only 1787 * throttle once. If the task needs to be throttled again it'll need to be 1788 * re-set at the next time we see the task. 1789 */ 1790 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) 1791 { 1792 if (unlikely(current->flags & PF_KTHREAD)) 1793 return; 1794 1795 if (!blk_get_queue(q)) 1796 return; 1797 1798 if (current->throttle_queue) 1799 blk_put_queue(current->throttle_queue); 1800 current->throttle_queue = q; 1801 if (use_memdelay) 1802 current->use_memdelay = use_memdelay; 1803 set_notify_resume(current); 1804 } 1805 EXPORT_SYMBOL_GPL(blkcg_schedule_throttle); 1806 1807 /** 1808 * blkcg_add_delay - add delay to this blkg 1809 * @now - the current time in nanoseconds 1810 * @delta - how many nanoseconds of delay to add 1811 * 1812 * Charge @delta to the blkg's current delay accumulation. This is used to 1813 * throttle tasks if an IO controller thinks we need more throttling. 1814 */ 1815 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 1816 { 1817 blkcg_scale_delay(blkg, now); 1818 atomic64_add(delta, &blkg->delay_nsec); 1819 } 1820 EXPORT_SYMBOL_GPL(blkcg_add_delay); 1821 1822 module_param(blkcg_debug_stats, bool, 0644); 1823 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 1824