1 /* 2 * Common Block IO controller cgroup interface 3 * 4 * Based on ideas and code from CFQ, CFS and BFQ: 5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 6 * 7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 8 * Paolo Valente <paolo.valente@unimore.it> 9 * 10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 11 * Nauman Rafique <nauman@google.com> 12 */ 13 #include <linux/ioprio.h> 14 #include <linux/kdev_t.h> 15 #include <linux/module.h> 16 #include <linux/err.h> 17 #include <linux/blkdev.h> 18 #include <linux/slab.h> 19 #include <linux/genhd.h> 20 #include <linux/delay.h> 21 #include <linux/atomic.h> 22 #include "blk-cgroup.h" 23 #include "blk.h" 24 25 #define MAX_KEY_LEN 100 26 27 static DEFINE_MUTEX(blkcg_pol_mutex); 28 29 struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT, 30 .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, }; 31 EXPORT_SYMBOL_GPL(blkcg_root); 32 33 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 34 35 static bool blkcg_policy_enabled(struct request_queue *q, 36 const struct blkcg_policy *pol) 37 { 38 return pol && test_bit(pol->plid, q->blkcg_pols); 39 } 40 41 /** 42 * blkg_free - free a blkg 43 * @blkg: blkg to free 44 * 45 * Free @blkg which may be partially allocated. 46 */ 47 static void blkg_free(struct blkcg_gq *blkg) 48 { 49 int i; 50 51 if (!blkg) 52 return; 53 54 for (i = 0; i < BLKCG_MAX_POLS; i++) 55 kfree(blkg->pd[i]); 56 57 blk_exit_rl(&blkg->rl); 58 kfree(blkg); 59 } 60 61 /** 62 * blkg_alloc - allocate a blkg 63 * @blkcg: block cgroup the new blkg is associated with 64 * @q: request_queue the new blkg is associated with 65 * @gfp_mask: allocation mask to use 66 * 67 * Allocate a new blkg assocating @blkcg and @q. 68 */ 69 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, 70 gfp_t gfp_mask) 71 { 72 struct blkcg_gq *blkg; 73 int i; 74 75 /* alloc and init base part */ 76 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); 77 if (!blkg) 78 return NULL; 79 80 blkg->q = q; 81 INIT_LIST_HEAD(&blkg->q_node); 82 blkg->blkcg = blkcg; 83 atomic_set(&blkg->refcnt, 1); 84 85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */ 86 if (blkcg != &blkcg_root) { 87 if (blk_init_rl(&blkg->rl, q, gfp_mask)) 88 goto err_free; 89 blkg->rl.blkg = blkg; 90 } 91 92 for (i = 0; i < BLKCG_MAX_POLS; i++) { 93 struct blkcg_policy *pol = blkcg_policy[i]; 94 struct blkg_policy_data *pd; 95 96 if (!blkcg_policy_enabled(q, pol)) 97 continue; 98 99 /* alloc per-policy data and attach it to blkg */ 100 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); 101 if (!pd) 102 goto err_free; 103 104 blkg->pd[i] = pd; 105 pd->blkg = blkg; 106 pd->plid = i; 107 } 108 109 return blkg; 110 111 err_free: 112 blkg_free(blkg); 113 return NULL; 114 } 115 116 /** 117 * __blkg_lookup - internal version of blkg_lookup() 118 * @blkcg: blkcg of interest 119 * @q: request_queue of interest 120 * @update_hint: whether to update lookup hint with the result or not 121 * 122 * This is internal version and shouldn't be used by policy 123 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of 124 * @q's bypass state. If @update_hint is %true, the caller should be 125 * holding @q->queue_lock and lookup hint is updated on success. 126 */ 127 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, 128 bool update_hint) 129 { 130 struct blkcg_gq *blkg; 131 132 blkg = rcu_dereference(blkcg->blkg_hint); 133 if (blkg && blkg->q == q) 134 return blkg; 135 136 /* 137 * Hint didn't match. Look up from the radix tree. Note that the 138 * hint can only be updated under queue_lock as otherwise @blkg 139 * could have already been removed from blkg_tree. The caller is 140 * responsible for grabbing queue_lock if @update_hint. 141 */ 142 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 143 if (blkg && blkg->q == q) { 144 if (update_hint) { 145 lockdep_assert_held(q->queue_lock); 146 rcu_assign_pointer(blkcg->blkg_hint, blkg); 147 } 148 return blkg; 149 } 150 151 return NULL; 152 } 153 154 /** 155 * blkg_lookup - lookup blkg for the specified blkcg - q pair 156 * @blkcg: blkcg of interest 157 * @q: request_queue of interest 158 * 159 * Lookup blkg for the @blkcg - @q pair. This function should be called 160 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing 161 * - see blk_queue_bypass_start() for details. 162 */ 163 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) 164 { 165 WARN_ON_ONCE(!rcu_read_lock_held()); 166 167 if (unlikely(blk_queue_bypass(q))) 168 return NULL; 169 return __blkg_lookup(blkcg, q, false); 170 } 171 EXPORT_SYMBOL_GPL(blkg_lookup); 172 173 /* 174 * If @new_blkg is %NULL, this function tries to allocate a new one as 175 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. 176 */ 177 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, 178 struct request_queue *q, 179 struct blkcg_gq *new_blkg) 180 { 181 struct blkcg_gq *blkg; 182 int i, ret; 183 184 WARN_ON_ONCE(!rcu_read_lock_held()); 185 lockdep_assert_held(q->queue_lock); 186 187 /* blkg holds a reference to blkcg */ 188 if (!css_tryget_online(&blkcg->css)) { 189 ret = -EINVAL; 190 goto err_free_blkg; 191 } 192 193 /* allocate */ 194 if (!new_blkg) { 195 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); 196 if (unlikely(!new_blkg)) { 197 ret = -ENOMEM; 198 goto err_put_css; 199 } 200 } 201 blkg = new_blkg; 202 203 /* link parent */ 204 if (blkcg_parent(blkcg)) { 205 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); 206 if (WARN_ON_ONCE(!blkg->parent)) { 207 ret = -EINVAL; 208 goto err_put_css; 209 } 210 blkg_get(blkg->parent); 211 } 212 213 /* invoke per-policy init */ 214 for (i = 0; i < BLKCG_MAX_POLS; i++) { 215 struct blkcg_policy *pol = blkcg_policy[i]; 216 217 if (blkg->pd[i] && pol->pd_init_fn) 218 pol->pd_init_fn(blkg); 219 } 220 221 /* insert */ 222 spin_lock(&blkcg->lock); 223 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); 224 if (likely(!ret)) { 225 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 226 list_add(&blkg->q_node, &q->blkg_list); 227 228 for (i = 0; i < BLKCG_MAX_POLS; i++) { 229 struct blkcg_policy *pol = blkcg_policy[i]; 230 231 if (blkg->pd[i] && pol->pd_online_fn) 232 pol->pd_online_fn(blkg); 233 } 234 } 235 blkg->online = true; 236 spin_unlock(&blkcg->lock); 237 238 if (!ret) { 239 if (blkcg == &blkcg_root) { 240 q->root_blkg = blkg; 241 q->root_rl.blkg = blkg; 242 } 243 return blkg; 244 } 245 246 /* @blkg failed fully initialized, use the usual release path */ 247 blkg_put(blkg); 248 return ERR_PTR(ret); 249 250 err_put_css: 251 css_put(&blkcg->css); 252 err_free_blkg: 253 blkg_free(new_blkg); 254 return ERR_PTR(ret); 255 } 256 257 /** 258 * blkg_lookup_create - lookup blkg, try to create one if not there 259 * @blkcg: blkcg of interest 260 * @q: request_queue of interest 261 * 262 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to 263 * create one. blkg creation is performed recursively from blkcg_root such 264 * that all non-root blkg's have access to the parent blkg. This function 265 * should be called under RCU read lock and @q->queue_lock. 266 * 267 * Returns pointer to the looked up or created blkg on success, ERR_PTR() 268 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not 269 * dead and bypassing, returns ERR_PTR(-EBUSY). 270 */ 271 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 272 struct request_queue *q) 273 { 274 struct blkcg_gq *blkg; 275 276 WARN_ON_ONCE(!rcu_read_lock_held()); 277 lockdep_assert_held(q->queue_lock); 278 279 /* 280 * This could be the first entry point of blkcg implementation and 281 * we shouldn't allow anything to go through for a bypassing queue. 282 */ 283 if (unlikely(blk_queue_bypass(q))) 284 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); 285 286 blkg = __blkg_lookup(blkcg, q, true); 287 if (blkg) 288 return blkg; 289 290 /* 291 * Create blkgs walking down from blkcg_root to @blkcg, so that all 292 * non-root blkgs have access to their parents. 293 */ 294 while (true) { 295 struct blkcg *pos = blkcg; 296 struct blkcg *parent = blkcg_parent(blkcg); 297 298 while (parent && !__blkg_lookup(parent, q, false)) { 299 pos = parent; 300 parent = blkcg_parent(parent); 301 } 302 303 blkg = blkg_create(pos, q, NULL); 304 if (pos == blkcg || IS_ERR(blkg)) 305 return blkg; 306 } 307 } 308 EXPORT_SYMBOL_GPL(blkg_lookup_create); 309 310 static void blkg_destroy(struct blkcg_gq *blkg) 311 { 312 struct blkcg *blkcg = blkg->blkcg; 313 int i; 314 315 lockdep_assert_held(blkg->q->queue_lock); 316 lockdep_assert_held(&blkcg->lock); 317 318 /* Something wrong if we are trying to remove same group twice */ 319 WARN_ON_ONCE(list_empty(&blkg->q_node)); 320 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); 321 322 for (i = 0; i < BLKCG_MAX_POLS; i++) { 323 struct blkcg_policy *pol = blkcg_policy[i]; 324 325 if (blkg->pd[i] && pol->pd_offline_fn) 326 pol->pd_offline_fn(blkg); 327 } 328 blkg->online = false; 329 330 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 331 list_del_init(&blkg->q_node); 332 hlist_del_init_rcu(&blkg->blkcg_node); 333 334 /* 335 * Both setting lookup hint to and clearing it from @blkg are done 336 * under queue_lock. If it's not pointing to @blkg now, it never 337 * will. Hint assignment itself can race safely. 338 */ 339 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 340 rcu_assign_pointer(blkcg->blkg_hint, NULL); 341 342 /* 343 * If root blkg is destroyed. Just clear the pointer since root_rl 344 * does not take reference on root blkg. 345 */ 346 if (blkcg == &blkcg_root) { 347 blkg->q->root_blkg = NULL; 348 blkg->q->root_rl.blkg = NULL; 349 } 350 351 /* 352 * Put the reference taken at the time of creation so that when all 353 * queues are gone, group can be destroyed. 354 */ 355 blkg_put(blkg); 356 } 357 358 /** 359 * blkg_destroy_all - destroy all blkgs associated with a request_queue 360 * @q: request_queue of interest 361 * 362 * Destroy all blkgs associated with @q. 363 */ 364 static void blkg_destroy_all(struct request_queue *q) 365 { 366 struct blkcg_gq *blkg, *n; 367 368 lockdep_assert_held(q->queue_lock); 369 370 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 371 struct blkcg *blkcg = blkg->blkcg; 372 373 spin_lock(&blkcg->lock); 374 blkg_destroy(blkg); 375 spin_unlock(&blkcg->lock); 376 } 377 } 378 379 /* 380 * A group is RCU protected, but having an rcu lock does not mean that one 381 * can access all the fields of blkg and assume these are valid. For 382 * example, don't try to follow throtl_data and request queue links. 383 * 384 * Having a reference to blkg under an rcu allows accesses to only values 385 * local to groups like group stats and group rate limits. 386 */ 387 void __blkg_release_rcu(struct rcu_head *rcu_head) 388 { 389 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); 390 int i; 391 392 /* tell policies that this one is being freed */ 393 for (i = 0; i < BLKCG_MAX_POLS; i++) { 394 struct blkcg_policy *pol = blkcg_policy[i]; 395 396 if (blkg->pd[i] && pol->pd_exit_fn) 397 pol->pd_exit_fn(blkg); 398 } 399 400 /* release the blkcg and parent blkg refs this blkg has been holding */ 401 css_put(&blkg->blkcg->css); 402 if (blkg->parent) 403 blkg_put(blkg->parent); 404 405 blkg_free(blkg); 406 } 407 EXPORT_SYMBOL_GPL(__blkg_release_rcu); 408 409 /* 410 * The next function used by blk_queue_for_each_rl(). It's a bit tricky 411 * because the root blkg uses @q->root_rl instead of its own rl. 412 */ 413 struct request_list *__blk_queue_next_rl(struct request_list *rl, 414 struct request_queue *q) 415 { 416 struct list_head *ent; 417 struct blkcg_gq *blkg; 418 419 /* 420 * Determine the current blkg list_head. The first entry is 421 * root_rl which is off @q->blkg_list and mapped to the head. 422 */ 423 if (rl == &q->root_rl) { 424 ent = &q->blkg_list; 425 /* There are no more block groups, hence no request lists */ 426 if (list_empty(ent)) 427 return NULL; 428 } else { 429 blkg = container_of(rl, struct blkcg_gq, rl); 430 ent = &blkg->q_node; 431 } 432 433 /* walk to the next list_head, skip root blkcg */ 434 ent = ent->next; 435 if (ent == &q->root_blkg->q_node) 436 ent = ent->next; 437 if (ent == &q->blkg_list) 438 return NULL; 439 440 blkg = container_of(ent, struct blkcg_gq, q_node); 441 return &blkg->rl; 442 } 443 444 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 445 struct cftype *cftype, u64 val) 446 { 447 struct blkcg *blkcg = css_to_blkcg(css); 448 struct blkcg_gq *blkg; 449 int i; 450 451 /* 452 * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex 453 * which ends up putting cgroup's internal cgroup_tree_mutex under 454 * it; however, cgroup_tree_mutex is nested above cgroup file 455 * active protection and grabbing blkcg_pol_mutex from a cgroup 456 * file operation creates a possible circular dependency. cgroup 457 * internal locking is planned to go through further simplification 458 * and this issue should go away soon. For now, let's trylock 459 * blkcg_pol_mutex and restart the write on failure. 460 * 461 * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com 462 */ 463 if (!mutex_trylock(&blkcg_pol_mutex)) 464 return restart_syscall(); 465 spin_lock_irq(&blkcg->lock); 466 467 /* 468 * Note that stat reset is racy - it doesn't synchronize against 469 * stat updates. This is a debug feature which shouldn't exist 470 * anyway. If you get hit by a race, retry. 471 */ 472 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 473 for (i = 0; i < BLKCG_MAX_POLS; i++) { 474 struct blkcg_policy *pol = blkcg_policy[i]; 475 476 if (blkcg_policy_enabled(blkg->q, pol) && 477 pol->pd_reset_stats_fn) 478 pol->pd_reset_stats_fn(blkg); 479 } 480 } 481 482 spin_unlock_irq(&blkcg->lock); 483 mutex_unlock(&blkcg_pol_mutex); 484 return 0; 485 } 486 487 static const char *blkg_dev_name(struct blkcg_gq *blkg) 488 { 489 /* some drivers (floppy) instantiate a queue w/o disk registered */ 490 if (blkg->q->backing_dev_info.dev) 491 return dev_name(blkg->q->backing_dev_info.dev); 492 return NULL; 493 } 494 495 /** 496 * blkcg_print_blkgs - helper for printing per-blkg data 497 * @sf: seq_file to print to 498 * @blkcg: blkcg of interest 499 * @prfill: fill function to print out a blkg 500 * @pol: policy in question 501 * @data: data to be passed to @prfill 502 * @show_total: to print out sum of prfill return values or not 503 * 504 * This function invokes @prfill on each blkg of @blkcg if pd for the 505 * policy specified by @pol exists. @prfill is invoked with @sf, the 506 * policy data and @data and the matching queue lock held. If @show_total 507 * is %true, the sum of the return values from @prfill is printed with 508 * "Total" label at the end. 509 * 510 * This is to be used to construct print functions for 511 * cftype->read_seq_string method. 512 */ 513 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 514 u64 (*prfill)(struct seq_file *, 515 struct blkg_policy_data *, int), 516 const struct blkcg_policy *pol, int data, 517 bool show_total) 518 { 519 struct blkcg_gq *blkg; 520 u64 total = 0; 521 522 rcu_read_lock(); 523 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 524 spin_lock_irq(blkg->q->queue_lock); 525 if (blkcg_policy_enabled(blkg->q, pol)) 526 total += prfill(sf, blkg->pd[pol->plid], data); 527 spin_unlock_irq(blkg->q->queue_lock); 528 } 529 rcu_read_unlock(); 530 531 if (show_total) 532 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 533 } 534 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 535 536 /** 537 * __blkg_prfill_u64 - prfill helper for a single u64 value 538 * @sf: seq_file to print to 539 * @pd: policy private data of interest 540 * @v: value to print 541 * 542 * Print @v to @sf for the device assocaited with @pd. 543 */ 544 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 545 { 546 const char *dname = blkg_dev_name(pd->blkg); 547 548 if (!dname) 549 return 0; 550 551 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 552 return v; 553 } 554 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 555 556 /** 557 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat 558 * @sf: seq_file to print to 559 * @pd: policy private data of interest 560 * @rwstat: rwstat to print 561 * 562 * Print @rwstat to @sf for the device assocaited with @pd. 563 */ 564 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 565 const struct blkg_rwstat *rwstat) 566 { 567 static const char *rwstr[] = { 568 [BLKG_RWSTAT_READ] = "Read", 569 [BLKG_RWSTAT_WRITE] = "Write", 570 [BLKG_RWSTAT_SYNC] = "Sync", 571 [BLKG_RWSTAT_ASYNC] = "Async", 572 }; 573 const char *dname = blkg_dev_name(pd->blkg); 574 u64 v; 575 int i; 576 577 if (!dname) 578 return 0; 579 580 for (i = 0; i < BLKG_RWSTAT_NR; i++) 581 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], 582 (unsigned long long)rwstat->cnt[i]); 583 584 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; 585 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); 586 return v; 587 } 588 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); 589 590 /** 591 * blkg_prfill_stat - prfill callback for blkg_stat 592 * @sf: seq_file to print to 593 * @pd: policy private data of interest 594 * @off: offset to the blkg_stat in @pd 595 * 596 * prfill callback for printing a blkg_stat. 597 */ 598 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) 599 { 600 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); 601 } 602 EXPORT_SYMBOL_GPL(blkg_prfill_stat); 603 604 /** 605 * blkg_prfill_rwstat - prfill callback for blkg_rwstat 606 * @sf: seq_file to print to 607 * @pd: policy private data of interest 608 * @off: offset to the blkg_rwstat in @pd 609 * 610 * prfill callback for printing a blkg_rwstat. 611 */ 612 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 613 int off) 614 { 615 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); 616 617 return __blkg_prfill_rwstat(sf, pd, &rwstat); 618 } 619 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); 620 621 /** 622 * blkg_stat_recursive_sum - collect hierarchical blkg_stat 623 * @pd: policy private data of interest 624 * @off: offset to the blkg_stat in @pd 625 * 626 * Collect the blkg_stat specified by @off from @pd and all its online 627 * descendants and return the sum. The caller must be holding the queue 628 * lock for online tests. 629 */ 630 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) 631 { 632 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 633 struct blkcg_gq *pos_blkg; 634 struct cgroup_subsys_state *pos_css; 635 u64 sum = 0; 636 637 lockdep_assert_held(pd->blkg->q->queue_lock); 638 639 rcu_read_lock(); 640 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { 641 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 642 struct blkg_stat *stat = (void *)pos_pd + off; 643 644 if (pos_blkg->online) 645 sum += blkg_stat_read(stat); 646 } 647 rcu_read_unlock(); 648 649 return sum; 650 } 651 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); 652 653 /** 654 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat 655 * @pd: policy private data of interest 656 * @off: offset to the blkg_stat in @pd 657 * 658 * Collect the blkg_rwstat specified by @off from @pd and all its online 659 * descendants and return the sum. The caller must be holding the queue 660 * lock for online tests. 661 */ 662 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, 663 int off) 664 { 665 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 666 struct blkcg_gq *pos_blkg; 667 struct cgroup_subsys_state *pos_css; 668 struct blkg_rwstat sum = { }; 669 int i; 670 671 lockdep_assert_held(pd->blkg->q->queue_lock); 672 673 rcu_read_lock(); 674 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { 675 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 676 struct blkg_rwstat *rwstat = (void *)pos_pd + off; 677 struct blkg_rwstat tmp; 678 679 if (!pos_blkg->online) 680 continue; 681 682 tmp = blkg_rwstat_read(rwstat); 683 684 for (i = 0; i < BLKG_RWSTAT_NR; i++) 685 sum.cnt[i] += tmp.cnt[i]; 686 } 687 rcu_read_unlock(); 688 689 return sum; 690 } 691 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); 692 693 /** 694 * blkg_conf_prep - parse and prepare for per-blkg config update 695 * @blkcg: target block cgroup 696 * @pol: target policy 697 * @input: input string 698 * @ctx: blkg_conf_ctx to be filled 699 * 700 * Parse per-blkg config update from @input and initialize @ctx with the 701 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new 702 * value. This function returns with RCU read lock and queue lock held and 703 * must be paired with blkg_conf_finish(). 704 */ 705 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 706 const char *input, struct blkg_conf_ctx *ctx) 707 __acquires(rcu) __acquires(disk->queue->queue_lock) 708 { 709 struct gendisk *disk; 710 struct blkcg_gq *blkg; 711 unsigned int major, minor; 712 unsigned long long v; 713 int part, ret; 714 715 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) 716 return -EINVAL; 717 718 disk = get_gendisk(MKDEV(major, minor), &part); 719 if (!disk || part) 720 return -EINVAL; 721 722 rcu_read_lock(); 723 spin_lock_irq(disk->queue->queue_lock); 724 725 if (blkcg_policy_enabled(disk->queue, pol)) 726 blkg = blkg_lookup_create(blkcg, disk->queue); 727 else 728 blkg = ERR_PTR(-EINVAL); 729 730 if (IS_ERR(blkg)) { 731 ret = PTR_ERR(blkg); 732 rcu_read_unlock(); 733 spin_unlock_irq(disk->queue->queue_lock); 734 put_disk(disk); 735 /* 736 * If queue was bypassing, we should retry. Do so after a 737 * short msleep(). It isn't strictly necessary but queue 738 * can be bypassing for some time and it's always nice to 739 * avoid busy looping. 740 */ 741 if (ret == -EBUSY) { 742 msleep(10); 743 ret = restart_syscall(); 744 } 745 return ret; 746 } 747 748 ctx->disk = disk; 749 ctx->blkg = blkg; 750 ctx->v = v; 751 return 0; 752 } 753 EXPORT_SYMBOL_GPL(blkg_conf_prep); 754 755 /** 756 * blkg_conf_finish - finish up per-blkg config update 757 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() 758 * 759 * Finish up after per-blkg config update. This function must be paired 760 * with blkg_conf_prep(). 761 */ 762 void blkg_conf_finish(struct blkg_conf_ctx *ctx) 763 __releases(ctx->disk->queue->queue_lock) __releases(rcu) 764 { 765 spin_unlock_irq(ctx->disk->queue->queue_lock); 766 rcu_read_unlock(); 767 put_disk(ctx->disk); 768 } 769 EXPORT_SYMBOL_GPL(blkg_conf_finish); 770 771 struct cftype blkcg_files[] = { 772 { 773 .name = "reset_stats", 774 .write_u64 = blkcg_reset_stats, 775 }, 776 { } /* terminate */ 777 }; 778 779 /** 780 * blkcg_css_offline - cgroup css_offline callback 781 * @css: css of interest 782 * 783 * This function is called when @css is about to go away and responsible 784 * for shooting down all blkgs associated with @css. blkgs should be 785 * removed while holding both q and blkcg locks. As blkcg lock is nested 786 * inside q lock, this function performs reverse double lock dancing. 787 * 788 * This is the blkcg counterpart of ioc_release_fn(). 789 */ 790 static void blkcg_css_offline(struct cgroup_subsys_state *css) 791 { 792 struct blkcg *blkcg = css_to_blkcg(css); 793 794 spin_lock_irq(&blkcg->lock); 795 796 while (!hlist_empty(&blkcg->blkg_list)) { 797 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 798 struct blkcg_gq, blkcg_node); 799 struct request_queue *q = blkg->q; 800 801 if (spin_trylock(q->queue_lock)) { 802 blkg_destroy(blkg); 803 spin_unlock(q->queue_lock); 804 } else { 805 spin_unlock_irq(&blkcg->lock); 806 cpu_relax(); 807 spin_lock_irq(&blkcg->lock); 808 } 809 } 810 811 spin_unlock_irq(&blkcg->lock); 812 } 813 814 static void blkcg_css_free(struct cgroup_subsys_state *css) 815 { 816 struct blkcg *blkcg = css_to_blkcg(css); 817 818 if (blkcg != &blkcg_root) 819 kfree(blkcg); 820 } 821 822 static struct cgroup_subsys_state * 823 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 824 { 825 struct blkcg *blkcg; 826 827 if (!parent_css) { 828 blkcg = &blkcg_root; 829 goto done; 830 } 831 832 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 833 if (!blkcg) 834 return ERR_PTR(-ENOMEM); 835 836 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; 837 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT; 838 done: 839 spin_lock_init(&blkcg->lock); 840 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); 841 INIT_HLIST_HEAD(&blkcg->blkg_list); 842 843 return &blkcg->css; 844 } 845 846 /** 847 * blkcg_init_queue - initialize blkcg part of request queue 848 * @q: request_queue to initialize 849 * 850 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg 851 * part of new request_queue @q. 852 * 853 * RETURNS: 854 * 0 on success, -errno on failure. 855 */ 856 int blkcg_init_queue(struct request_queue *q) 857 { 858 might_sleep(); 859 860 return blk_throtl_init(q); 861 } 862 863 /** 864 * blkcg_drain_queue - drain blkcg part of request_queue 865 * @q: request_queue to drain 866 * 867 * Called from blk_drain_queue(). Responsible for draining blkcg part. 868 */ 869 void blkcg_drain_queue(struct request_queue *q) 870 { 871 lockdep_assert_held(q->queue_lock); 872 873 /* 874 * @q could be exiting and already have destroyed all blkgs as 875 * indicated by NULL root_blkg. If so, don't confuse policies. 876 */ 877 if (!q->root_blkg) 878 return; 879 880 blk_throtl_drain(q); 881 } 882 883 /** 884 * blkcg_exit_queue - exit and release blkcg part of request_queue 885 * @q: request_queue being released 886 * 887 * Called from blk_release_queue(). Responsible for exiting blkcg part. 888 */ 889 void blkcg_exit_queue(struct request_queue *q) 890 { 891 spin_lock_irq(q->queue_lock); 892 blkg_destroy_all(q); 893 spin_unlock_irq(q->queue_lock); 894 895 blk_throtl_exit(q); 896 } 897 898 /* 899 * We cannot support shared io contexts, as we have no mean to support 900 * two tasks with the same ioc in two different groups without major rework 901 * of the main cic data structures. For now we allow a task to change 902 * its cgroup only if it's the only owner of its ioc. 903 */ 904 static int blkcg_can_attach(struct cgroup_subsys_state *css, 905 struct cgroup_taskset *tset) 906 { 907 struct task_struct *task; 908 struct io_context *ioc; 909 int ret = 0; 910 911 /* task_lock() is needed to avoid races with exit_io_context() */ 912 cgroup_taskset_for_each(task, tset) { 913 task_lock(task); 914 ioc = task->io_context; 915 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 916 ret = -EINVAL; 917 task_unlock(task); 918 if (ret) 919 break; 920 } 921 return ret; 922 } 923 924 struct cgroup_subsys blkio_cgrp_subsys = { 925 .css_alloc = blkcg_css_alloc, 926 .css_offline = blkcg_css_offline, 927 .css_free = blkcg_css_free, 928 .can_attach = blkcg_can_attach, 929 .legacy_cftypes = blkcg_files, 930 #ifdef CONFIG_MEMCG 931 /* 932 * This ensures that, if available, memcg is automatically enabled 933 * together on the default hierarchy so that the owner cgroup can 934 * be retrieved from writeback pages. 935 */ 936 .depends_on = 1 << memory_cgrp_id, 937 #endif 938 }; 939 EXPORT_SYMBOL_GPL(blkio_cgrp_subsys); 940 941 /** 942 * blkcg_activate_policy - activate a blkcg policy on a request_queue 943 * @q: request_queue of interest 944 * @pol: blkcg policy to activate 945 * 946 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through 947 * bypass mode to populate its blkgs with policy_data for @pol. 948 * 949 * Activation happens with @q bypassed, so nobody would be accessing blkgs 950 * from IO path. Update of each blkg is protected by both queue and blkcg 951 * locks so that holding either lock and testing blkcg_policy_enabled() is 952 * always enough for dereferencing policy data. 953 * 954 * The caller is responsible for synchronizing [de]activations and policy 955 * [un]registerations. Returns 0 on success, -errno on failure. 956 */ 957 int blkcg_activate_policy(struct request_queue *q, 958 const struct blkcg_policy *pol) 959 { 960 LIST_HEAD(pds); 961 struct blkcg_gq *blkg, *new_blkg; 962 struct blkg_policy_data *pd, *n; 963 int cnt = 0, ret; 964 bool preloaded; 965 966 if (blkcg_policy_enabled(q, pol)) 967 return 0; 968 969 /* preallocations for root blkg */ 970 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); 971 if (!new_blkg) 972 return -ENOMEM; 973 974 blk_queue_bypass_start(q); 975 976 preloaded = !radix_tree_preload(GFP_KERNEL); 977 978 /* 979 * Make sure the root blkg exists and count the existing blkgs. As 980 * @q is bypassing at this point, blkg_lookup_create() can't be 981 * used. Open code it. 982 */ 983 spin_lock_irq(q->queue_lock); 984 985 rcu_read_lock(); 986 blkg = __blkg_lookup(&blkcg_root, q, false); 987 if (blkg) 988 blkg_free(new_blkg); 989 else 990 blkg = blkg_create(&blkcg_root, q, new_blkg); 991 rcu_read_unlock(); 992 993 if (preloaded) 994 radix_tree_preload_end(); 995 996 if (IS_ERR(blkg)) { 997 ret = PTR_ERR(blkg); 998 goto out_unlock; 999 } 1000 1001 list_for_each_entry(blkg, &q->blkg_list, q_node) 1002 cnt++; 1003 1004 spin_unlock_irq(q->queue_lock); 1005 1006 /* allocate policy_data for all existing blkgs */ 1007 while (cnt--) { 1008 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); 1009 if (!pd) { 1010 ret = -ENOMEM; 1011 goto out_free; 1012 } 1013 list_add_tail(&pd->alloc_node, &pds); 1014 } 1015 1016 /* 1017 * Install the allocated pds. With @q bypassing, no new blkg 1018 * should have been created while the queue lock was dropped. 1019 */ 1020 spin_lock_irq(q->queue_lock); 1021 1022 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1023 if (WARN_ON(list_empty(&pds))) { 1024 /* umm... this shouldn't happen, just abort */ 1025 ret = -ENOMEM; 1026 goto out_unlock; 1027 } 1028 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); 1029 list_del_init(&pd->alloc_node); 1030 1031 /* grab blkcg lock too while installing @pd on @blkg */ 1032 spin_lock(&blkg->blkcg->lock); 1033 1034 blkg->pd[pol->plid] = pd; 1035 pd->blkg = blkg; 1036 pd->plid = pol->plid; 1037 pol->pd_init_fn(blkg); 1038 1039 spin_unlock(&blkg->blkcg->lock); 1040 } 1041 1042 __set_bit(pol->plid, q->blkcg_pols); 1043 ret = 0; 1044 out_unlock: 1045 spin_unlock_irq(q->queue_lock); 1046 out_free: 1047 blk_queue_bypass_end(q); 1048 list_for_each_entry_safe(pd, n, &pds, alloc_node) 1049 kfree(pd); 1050 return ret; 1051 } 1052 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1053 1054 /** 1055 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue 1056 * @q: request_queue of interest 1057 * @pol: blkcg policy to deactivate 1058 * 1059 * Deactivate @pol on @q. Follows the same synchronization rules as 1060 * blkcg_activate_policy(). 1061 */ 1062 void blkcg_deactivate_policy(struct request_queue *q, 1063 const struct blkcg_policy *pol) 1064 { 1065 struct blkcg_gq *blkg; 1066 1067 if (!blkcg_policy_enabled(q, pol)) 1068 return; 1069 1070 blk_queue_bypass_start(q); 1071 spin_lock_irq(q->queue_lock); 1072 1073 __clear_bit(pol->plid, q->blkcg_pols); 1074 1075 /* if no policy is left, no need for blkgs - shoot them down */ 1076 if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) 1077 blkg_destroy_all(q); 1078 1079 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1080 /* grab blkcg lock too while removing @pd from @blkg */ 1081 spin_lock(&blkg->blkcg->lock); 1082 1083 if (pol->pd_offline_fn) 1084 pol->pd_offline_fn(blkg); 1085 if (pol->pd_exit_fn) 1086 pol->pd_exit_fn(blkg); 1087 1088 kfree(blkg->pd[pol->plid]); 1089 blkg->pd[pol->plid] = NULL; 1090 1091 spin_unlock(&blkg->blkcg->lock); 1092 } 1093 1094 spin_unlock_irq(q->queue_lock); 1095 blk_queue_bypass_end(q); 1096 } 1097 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1098 1099 /** 1100 * blkcg_policy_register - register a blkcg policy 1101 * @pol: blkcg policy to register 1102 * 1103 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1104 * successful registration. Returns 0 on success and -errno on failure. 1105 */ 1106 int blkcg_policy_register(struct blkcg_policy *pol) 1107 { 1108 int i, ret; 1109 1110 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) 1111 return -EINVAL; 1112 1113 mutex_lock(&blkcg_pol_mutex); 1114 1115 /* find an empty slot */ 1116 ret = -ENOSPC; 1117 for (i = 0; i < BLKCG_MAX_POLS; i++) 1118 if (!blkcg_policy[i]) 1119 break; 1120 if (i >= BLKCG_MAX_POLS) 1121 goto out_unlock; 1122 1123 /* register and update blkgs */ 1124 pol->plid = i; 1125 blkcg_policy[i] = pol; 1126 1127 /* everything is in place, add intf files for the new policy */ 1128 if (pol->cftypes) 1129 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, 1130 pol->cftypes)); 1131 ret = 0; 1132 out_unlock: 1133 mutex_unlock(&blkcg_pol_mutex); 1134 return ret; 1135 } 1136 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1137 1138 /** 1139 * blkcg_policy_unregister - unregister a blkcg policy 1140 * @pol: blkcg policy to unregister 1141 * 1142 * Undo blkcg_policy_register(@pol). Might sleep. 1143 */ 1144 void blkcg_policy_unregister(struct blkcg_policy *pol) 1145 { 1146 mutex_lock(&blkcg_pol_mutex); 1147 1148 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1149 goto out_unlock; 1150 1151 /* kill the intf files first */ 1152 if (pol->cftypes) 1153 cgroup_rm_cftypes(pol->cftypes); 1154 1155 /* unregister and update blkgs */ 1156 blkcg_policy[pol->plid] = NULL; 1157 out_unlock: 1158 mutex_unlock(&blkcg_pol_mutex); 1159 } 1160 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1161