1 /* 2 * Common Block IO controller cgroup interface 3 * 4 * Based on ideas and code from CFQ, CFS and BFQ: 5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 6 * 7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 8 * Paolo Valente <paolo.valente@unimore.it> 9 * 10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 11 * Nauman Rafique <nauman@google.com> 12 */ 13 #include <linux/ioprio.h> 14 #include <linux/kdev_t.h> 15 #include <linux/module.h> 16 #include <linux/err.h> 17 #include <linux/blkdev.h> 18 #include <linux/slab.h> 19 #include <linux/genhd.h> 20 #include <linux/delay.h> 21 #include <linux/atomic.h> 22 #include "blk-cgroup.h" 23 #include "blk.h" 24 25 #define MAX_KEY_LEN 100 26 27 static DEFINE_MUTEX(blkcg_pol_mutex); 28 29 struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT, 30 .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, }; 31 EXPORT_SYMBOL_GPL(blkcg_root); 32 33 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 34 35 static bool blkcg_policy_enabled(struct request_queue *q, 36 const struct blkcg_policy *pol) 37 { 38 return pol && test_bit(pol->plid, q->blkcg_pols); 39 } 40 41 /** 42 * blkg_free - free a blkg 43 * @blkg: blkg to free 44 * 45 * Free @blkg which may be partially allocated. 46 */ 47 static void blkg_free(struct blkcg_gq *blkg) 48 { 49 int i; 50 51 if (!blkg) 52 return; 53 54 for (i = 0; i < BLKCG_MAX_POLS; i++) 55 kfree(blkg->pd[i]); 56 57 blk_exit_rl(&blkg->rl); 58 kfree(blkg); 59 } 60 61 /** 62 * blkg_alloc - allocate a blkg 63 * @blkcg: block cgroup the new blkg is associated with 64 * @q: request_queue the new blkg is associated with 65 * @gfp_mask: allocation mask to use 66 * 67 * Allocate a new blkg assocating @blkcg and @q. 68 */ 69 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, 70 gfp_t gfp_mask) 71 { 72 struct blkcg_gq *blkg; 73 int i; 74 75 /* alloc and init base part */ 76 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); 77 if (!blkg) 78 return NULL; 79 80 blkg->q = q; 81 INIT_LIST_HEAD(&blkg->q_node); 82 blkg->blkcg = blkcg; 83 blkg->refcnt = 1; 84 85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */ 86 if (blkcg != &blkcg_root) { 87 if (blk_init_rl(&blkg->rl, q, gfp_mask)) 88 goto err_free; 89 blkg->rl.blkg = blkg; 90 } 91 92 for (i = 0; i < BLKCG_MAX_POLS; i++) { 93 struct blkcg_policy *pol = blkcg_policy[i]; 94 struct blkg_policy_data *pd; 95 96 if (!blkcg_policy_enabled(q, pol)) 97 continue; 98 99 /* alloc per-policy data and attach it to blkg */ 100 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); 101 if (!pd) 102 goto err_free; 103 104 blkg->pd[i] = pd; 105 pd->blkg = blkg; 106 pd->plid = i; 107 } 108 109 return blkg; 110 111 err_free: 112 blkg_free(blkg); 113 return NULL; 114 } 115 116 /** 117 * __blkg_lookup - internal version of blkg_lookup() 118 * @blkcg: blkcg of interest 119 * @q: request_queue of interest 120 * @update_hint: whether to update lookup hint with the result or not 121 * 122 * This is internal version and shouldn't be used by policy 123 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of 124 * @q's bypass state. If @update_hint is %true, the caller should be 125 * holding @q->queue_lock and lookup hint is updated on success. 126 */ 127 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, 128 bool update_hint) 129 { 130 struct blkcg_gq *blkg; 131 132 blkg = rcu_dereference(blkcg->blkg_hint); 133 if (blkg && blkg->q == q) 134 return blkg; 135 136 /* 137 * Hint didn't match. Look up from the radix tree. Note that the 138 * hint can only be updated under queue_lock as otherwise @blkg 139 * could have already been removed from blkg_tree. The caller is 140 * responsible for grabbing queue_lock if @update_hint. 141 */ 142 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 143 if (blkg && blkg->q == q) { 144 if (update_hint) { 145 lockdep_assert_held(q->queue_lock); 146 rcu_assign_pointer(blkcg->blkg_hint, blkg); 147 } 148 return blkg; 149 } 150 151 return NULL; 152 } 153 154 /** 155 * blkg_lookup - lookup blkg for the specified blkcg - q pair 156 * @blkcg: blkcg of interest 157 * @q: request_queue of interest 158 * 159 * Lookup blkg for the @blkcg - @q pair. This function should be called 160 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing 161 * - see blk_queue_bypass_start() for details. 162 */ 163 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) 164 { 165 WARN_ON_ONCE(!rcu_read_lock_held()); 166 167 if (unlikely(blk_queue_bypass(q))) 168 return NULL; 169 return __blkg_lookup(blkcg, q, false); 170 } 171 EXPORT_SYMBOL_GPL(blkg_lookup); 172 173 /* 174 * If @new_blkg is %NULL, this function tries to allocate a new one as 175 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. 176 */ 177 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, 178 struct request_queue *q, 179 struct blkcg_gq *new_blkg) 180 { 181 struct blkcg_gq *blkg; 182 int i, ret; 183 184 WARN_ON_ONCE(!rcu_read_lock_held()); 185 lockdep_assert_held(q->queue_lock); 186 187 /* blkg holds a reference to blkcg */ 188 if (!css_tryget_online(&blkcg->css)) { 189 ret = -EINVAL; 190 goto err_free_blkg; 191 } 192 193 /* allocate */ 194 if (!new_blkg) { 195 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); 196 if (unlikely(!new_blkg)) { 197 ret = -ENOMEM; 198 goto err_put_css; 199 } 200 } 201 blkg = new_blkg; 202 203 /* link parent */ 204 if (blkcg_parent(blkcg)) { 205 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); 206 if (WARN_ON_ONCE(!blkg->parent)) { 207 ret = -EINVAL; 208 goto err_put_css; 209 } 210 blkg_get(blkg->parent); 211 } 212 213 /* invoke per-policy init */ 214 for (i = 0; i < BLKCG_MAX_POLS; i++) { 215 struct blkcg_policy *pol = blkcg_policy[i]; 216 217 if (blkg->pd[i] && pol->pd_init_fn) 218 pol->pd_init_fn(blkg); 219 } 220 221 /* insert */ 222 spin_lock(&blkcg->lock); 223 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); 224 if (likely(!ret)) { 225 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 226 list_add(&blkg->q_node, &q->blkg_list); 227 228 for (i = 0; i < BLKCG_MAX_POLS; i++) { 229 struct blkcg_policy *pol = blkcg_policy[i]; 230 231 if (blkg->pd[i] && pol->pd_online_fn) 232 pol->pd_online_fn(blkg); 233 } 234 } 235 blkg->online = true; 236 spin_unlock(&blkcg->lock); 237 238 if (!ret) { 239 if (blkcg == &blkcg_root) { 240 q->root_blkg = blkg; 241 q->root_rl.blkg = blkg; 242 } 243 return blkg; 244 } 245 246 /* @blkg failed fully initialized, use the usual release path */ 247 blkg_put(blkg); 248 return ERR_PTR(ret); 249 250 err_put_css: 251 css_put(&blkcg->css); 252 err_free_blkg: 253 blkg_free(new_blkg); 254 return ERR_PTR(ret); 255 } 256 257 /** 258 * blkg_lookup_create - lookup blkg, try to create one if not there 259 * @blkcg: blkcg of interest 260 * @q: request_queue of interest 261 * 262 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to 263 * create one. blkg creation is performed recursively from blkcg_root such 264 * that all non-root blkg's have access to the parent blkg. This function 265 * should be called under RCU read lock and @q->queue_lock. 266 * 267 * Returns pointer to the looked up or created blkg on success, ERR_PTR() 268 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not 269 * dead and bypassing, returns ERR_PTR(-EBUSY). 270 */ 271 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 272 struct request_queue *q) 273 { 274 struct blkcg_gq *blkg; 275 276 WARN_ON_ONCE(!rcu_read_lock_held()); 277 lockdep_assert_held(q->queue_lock); 278 279 /* 280 * This could be the first entry point of blkcg implementation and 281 * we shouldn't allow anything to go through for a bypassing queue. 282 */ 283 if (unlikely(blk_queue_bypass(q))) 284 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); 285 286 blkg = __blkg_lookup(blkcg, q, true); 287 if (blkg) 288 return blkg; 289 290 /* 291 * Create blkgs walking down from blkcg_root to @blkcg, so that all 292 * non-root blkgs have access to their parents. 293 */ 294 while (true) { 295 struct blkcg *pos = blkcg; 296 struct blkcg *parent = blkcg_parent(blkcg); 297 298 while (parent && !__blkg_lookup(parent, q, false)) { 299 pos = parent; 300 parent = blkcg_parent(parent); 301 } 302 303 blkg = blkg_create(pos, q, NULL); 304 if (pos == blkcg || IS_ERR(blkg)) 305 return blkg; 306 } 307 } 308 EXPORT_SYMBOL_GPL(blkg_lookup_create); 309 310 static void blkg_destroy(struct blkcg_gq *blkg) 311 { 312 struct blkcg *blkcg = blkg->blkcg; 313 int i; 314 315 lockdep_assert_held(blkg->q->queue_lock); 316 lockdep_assert_held(&blkcg->lock); 317 318 /* Something wrong if we are trying to remove same group twice */ 319 WARN_ON_ONCE(list_empty(&blkg->q_node)); 320 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); 321 322 for (i = 0; i < BLKCG_MAX_POLS; i++) { 323 struct blkcg_policy *pol = blkcg_policy[i]; 324 325 if (blkg->pd[i] && pol->pd_offline_fn) 326 pol->pd_offline_fn(blkg); 327 } 328 blkg->online = false; 329 330 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 331 list_del_init(&blkg->q_node); 332 hlist_del_init_rcu(&blkg->blkcg_node); 333 334 /* 335 * Both setting lookup hint to and clearing it from @blkg are done 336 * under queue_lock. If it's not pointing to @blkg now, it never 337 * will. Hint assignment itself can race safely. 338 */ 339 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 340 rcu_assign_pointer(blkcg->blkg_hint, NULL); 341 342 /* 343 * If root blkg is destroyed. Just clear the pointer since root_rl 344 * does not take reference on root blkg. 345 */ 346 if (blkcg == &blkcg_root) { 347 blkg->q->root_blkg = NULL; 348 blkg->q->root_rl.blkg = NULL; 349 } 350 351 /* 352 * Put the reference taken at the time of creation so that when all 353 * queues are gone, group can be destroyed. 354 */ 355 blkg_put(blkg); 356 } 357 358 /** 359 * blkg_destroy_all - destroy all blkgs associated with a request_queue 360 * @q: request_queue of interest 361 * 362 * Destroy all blkgs associated with @q. 363 */ 364 static void blkg_destroy_all(struct request_queue *q) 365 { 366 struct blkcg_gq *blkg, *n; 367 368 lockdep_assert_held(q->queue_lock); 369 370 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 371 struct blkcg *blkcg = blkg->blkcg; 372 373 spin_lock(&blkcg->lock); 374 blkg_destroy(blkg); 375 spin_unlock(&blkcg->lock); 376 } 377 } 378 379 /* 380 * A group is RCU protected, but having an rcu lock does not mean that one 381 * can access all the fields of blkg and assume these are valid. For 382 * example, don't try to follow throtl_data and request queue links. 383 * 384 * Having a reference to blkg under an rcu allows accesses to only values 385 * local to groups like group stats and group rate limits. 386 */ 387 void __blkg_release_rcu(struct rcu_head *rcu_head) 388 { 389 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); 390 int i; 391 392 /* tell policies that this one is being freed */ 393 for (i = 0; i < BLKCG_MAX_POLS; i++) { 394 struct blkcg_policy *pol = blkcg_policy[i]; 395 396 if (blkg->pd[i] && pol->pd_exit_fn) 397 pol->pd_exit_fn(blkg); 398 } 399 400 /* release the blkcg and parent blkg refs this blkg has been holding */ 401 css_put(&blkg->blkcg->css); 402 if (blkg->parent) { 403 spin_lock_irq(blkg->q->queue_lock); 404 blkg_put(blkg->parent); 405 spin_unlock_irq(blkg->q->queue_lock); 406 } 407 408 blkg_free(blkg); 409 } 410 EXPORT_SYMBOL_GPL(__blkg_release_rcu); 411 412 /* 413 * The next function used by blk_queue_for_each_rl(). It's a bit tricky 414 * because the root blkg uses @q->root_rl instead of its own rl. 415 */ 416 struct request_list *__blk_queue_next_rl(struct request_list *rl, 417 struct request_queue *q) 418 { 419 struct list_head *ent; 420 struct blkcg_gq *blkg; 421 422 /* 423 * Determine the current blkg list_head. The first entry is 424 * root_rl which is off @q->blkg_list and mapped to the head. 425 */ 426 if (rl == &q->root_rl) { 427 ent = &q->blkg_list; 428 /* There are no more block groups, hence no request lists */ 429 if (list_empty(ent)) 430 return NULL; 431 } else { 432 blkg = container_of(rl, struct blkcg_gq, rl); 433 ent = &blkg->q_node; 434 } 435 436 /* walk to the next list_head, skip root blkcg */ 437 ent = ent->next; 438 if (ent == &q->root_blkg->q_node) 439 ent = ent->next; 440 if (ent == &q->blkg_list) 441 return NULL; 442 443 blkg = container_of(ent, struct blkcg_gq, q_node); 444 return &blkg->rl; 445 } 446 447 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 448 struct cftype *cftype, u64 val) 449 { 450 struct blkcg *blkcg = css_to_blkcg(css); 451 struct blkcg_gq *blkg; 452 int i; 453 454 /* 455 * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex 456 * which ends up putting cgroup's internal cgroup_tree_mutex under 457 * it; however, cgroup_tree_mutex is nested above cgroup file 458 * active protection and grabbing blkcg_pol_mutex from a cgroup 459 * file operation creates a possible circular dependency. cgroup 460 * internal locking is planned to go through further simplification 461 * and this issue should go away soon. For now, let's trylock 462 * blkcg_pol_mutex and restart the write on failure. 463 * 464 * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com 465 */ 466 if (!mutex_trylock(&blkcg_pol_mutex)) 467 return restart_syscall(); 468 spin_lock_irq(&blkcg->lock); 469 470 /* 471 * Note that stat reset is racy - it doesn't synchronize against 472 * stat updates. This is a debug feature which shouldn't exist 473 * anyway. If you get hit by a race, retry. 474 */ 475 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 476 for (i = 0; i < BLKCG_MAX_POLS; i++) { 477 struct blkcg_policy *pol = blkcg_policy[i]; 478 479 if (blkcg_policy_enabled(blkg->q, pol) && 480 pol->pd_reset_stats_fn) 481 pol->pd_reset_stats_fn(blkg); 482 } 483 } 484 485 spin_unlock_irq(&blkcg->lock); 486 mutex_unlock(&blkcg_pol_mutex); 487 return 0; 488 } 489 490 static const char *blkg_dev_name(struct blkcg_gq *blkg) 491 { 492 /* some drivers (floppy) instantiate a queue w/o disk registered */ 493 if (blkg->q->backing_dev_info.dev) 494 return dev_name(blkg->q->backing_dev_info.dev); 495 return NULL; 496 } 497 498 /** 499 * blkcg_print_blkgs - helper for printing per-blkg data 500 * @sf: seq_file to print to 501 * @blkcg: blkcg of interest 502 * @prfill: fill function to print out a blkg 503 * @pol: policy in question 504 * @data: data to be passed to @prfill 505 * @show_total: to print out sum of prfill return values or not 506 * 507 * This function invokes @prfill on each blkg of @blkcg if pd for the 508 * policy specified by @pol exists. @prfill is invoked with @sf, the 509 * policy data and @data and the matching queue lock held. If @show_total 510 * is %true, the sum of the return values from @prfill is printed with 511 * "Total" label at the end. 512 * 513 * This is to be used to construct print functions for 514 * cftype->read_seq_string method. 515 */ 516 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 517 u64 (*prfill)(struct seq_file *, 518 struct blkg_policy_data *, int), 519 const struct blkcg_policy *pol, int data, 520 bool show_total) 521 { 522 struct blkcg_gq *blkg; 523 u64 total = 0; 524 525 rcu_read_lock(); 526 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 527 spin_lock_irq(blkg->q->queue_lock); 528 if (blkcg_policy_enabled(blkg->q, pol)) 529 total += prfill(sf, blkg->pd[pol->plid], data); 530 spin_unlock_irq(blkg->q->queue_lock); 531 } 532 rcu_read_unlock(); 533 534 if (show_total) 535 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 536 } 537 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 538 539 /** 540 * __blkg_prfill_u64 - prfill helper for a single u64 value 541 * @sf: seq_file to print to 542 * @pd: policy private data of interest 543 * @v: value to print 544 * 545 * Print @v to @sf for the device assocaited with @pd. 546 */ 547 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 548 { 549 const char *dname = blkg_dev_name(pd->blkg); 550 551 if (!dname) 552 return 0; 553 554 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 555 return v; 556 } 557 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 558 559 /** 560 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat 561 * @sf: seq_file to print to 562 * @pd: policy private data of interest 563 * @rwstat: rwstat to print 564 * 565 * Print @rwstat to @sf for the device assocaited with @pd. 566 */ 567 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 568 const struct blkg_rwstat *rwstat) 569 { 570 static const char *rwstr[] = { 571 [BLKG_RWSTAT_READ] = "Read", 572 [BLKG_RWSTAT_WRITE] = "Write", 573 [BLKG_RWSTAT_SYNC] = "Sync", 574 [BLKG_RWSTAT_ASYNC] = "Async", 575 }; 576 const char *dname = blkg_dev_name(pd->blkg); 577 u64 v; 578 int i; 579 580 if (!dname) 581 return 0; 582 583 for (i = 0; i < BLKG_RWSTAT_NR; i++) 584 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], 585 (unsigned long long)rwstat->cnt[i]); 586 587 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; 588 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); 589 return v; 590 } 591 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); 592 593 /** 594 * blkg_prfill_stat - prfill callback for blkg_stat 595 * @sf: seq_file to print to 596 * @pd: policy private data of interest 597 * @off: offset to the blkg_stat in @pd 598 * 599 * prfill callback for printing a blkg_stat. 600 */ 601 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) 602 { 603 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); 604 } 605 EXPORT_SYMBOL_GPL(blkg_prfill_stat); 606 607 /** 608 * blkg_prfill_rwstat - prfill callback for blkg_rwstat 609 * @sf: seq_file to print to 610 * @pd: policy private data of interest 611 * @off: offset to the blkg_rwstat in @pd 612 * 613 * prfill callback for printing a blkg_rwstat. 614 */ 615 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 616 int off) 617 { 618 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); 619 620 return __blkg_prfill_rwstat(sf, pd, &rwstat); 621 } 622 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); 623 624 /** 625 * blkg_stat_recursive_sum - collect hierarchical blkg_stat 626 * @pd: policy private data of interest 627 * @off: offset to the blkg_stat in @pd 628 * 629 * Collect the blkg_stat specified by @off from @pd and all its online 630 * descendants and return the sum. The caller must be holding the queue 631 * lock for online tests. 632 */ 633 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) 634 { 635 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 636 struct blkcg_gq *pos_blkg; 637 struct cgroup_subsys_state *pos_css; 638 u64 sum = 0; 639 640 lockdep_assert_held(pd->blkg->q->queue_lock); 641 642 rcu_read_lock(); 643 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { 644 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 645 struct blkg_stat *stat = (void *)pos_pd + off; 646 647 if (pos_blkg->online) 648 sum += blkg_stat_read(stat); 649 } 650 rcu_read_unlock(); 651 652 return sum; 653 } 654 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); 655 656 /** 657 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat 658 * @pd: policy private data of interest 659 * @off: offset to the blkg_stat in @pd 660 * 661 * Collect the blkg_rwstat specified by @off from @pd and all its online 662 * descendants and return the sum. The caller must be holding the queue 663 * lock for online tests. 664 */ 665 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, 666 int off) 667 { 668 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 669 struct blkcg_gq *pos_blkg; 670 struct cgroup_subsys_state *pos_css; 671 struct blkg_rwstat sum = { }; 672 int i; 673 674 lockdep_assert_held(pd->blkg->q->queue_lock); 675 676 rcu_read_lock(); 677 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { 678 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 679 struct blkg_rwstat *rwstat = (void *)pos_pd + off; 680 struct blkg_rwstat tmp; 681 682 if (!pos_blkg->online) 683 continue; 684 685 tmp = blkg_rwstat_read(rwstat); 686 687 for (i = 0; i < BLKG_RWSTAT_NR; i++) 688 sum.cnt[i] += tmp.cnt[i]; 689 } 690 rcu_read_unlock(); 691 692 return sum; 693 } 694 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); 695 696 /** 697 * blkg_conf_prep - parse and prepare for per-blkg config update 698 * @blkcg: target block cgroup 699 * @pol: target policy 700 * @input: input string 701 * @ctx: blkg_conf_ctx to be filled 702 * 703 * Parse per-blkg config update from @input and initialize @ctx with the 704 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new 705 * value. This function returns with RCU read lock and queue lock held and 706 * must be paired with blkg_conf_finish(). 707 */ 708 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 709 const char *input, struct blkg_conf_ctx *ctx) 710 __acquires(rcu) __acquires(disk->queue->queue_lock) 711 { 712 struct gendisk *disk; 713 struct blkcg_gq *blkg; 714 unsigned int major, minor; 715 unsigned long long v; 716 int part, ret; 717 718 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) 719 return -EINVAL; 720 721 disk = get_gendisk(MKDEV(major, minor), &part); 722 if (!disk || part) 723 return -EINVAL; 724 725 rcu_read_lock(); 726 spin_lock_irq(disk->queue->queue_lock); 727 728 if (blkcg_policy_enabled(disk->queue, pol)) 729 blkg = blkg_lookup_create(blkcg, disk->queue); 730 else 731 blkg = ERR_PTR(-EINVAL); 732 733 if (IS_ERR(blkg)) { 734 ret = PTR_ERR(blkg); 735 rcu_read_unlock(); 736 spin_unlock_irq(disk->queue->queue_lock); 737 put_disk(disk); 738 /* 739 * If queue was bypassing, we should retry. Do so after a 740 * short msleep(). It isn't strictly necessary but queue 741 * can be bypassing for some time and it's always nice to 742 * avoid busy looping. 743 */ 744 if (ret == -EBUSY) { 745 msleep(10); 746 ret = restart_syscall(); 747 } 748 return ret; 749 } 750 751 ctx->disk = disk; 752 ctx->blkg = blkg; 753 ctx->v = v; 754 return 0; 755 } 756 EXPORT_SYMBOL_GPL(blkg_conf_prep); 757 758 /** 759 * blkg_conf_finish - finish up per-blkg config update 760 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() 761 * 762 * Finish up after per-blkg config update. This function must be paired 763 * with blkg_conf_prep(). 764 */ 765 void blkg_conf_finish(struct blkg_conf_ctx *ctx) 766 __releases(ctx->disk->queue->queue_lock) __releases(rcu) 767 { 768 spin_unlock_irq(ctx->disk->queue->queue_lock); 769 rcu_read_unlock(); 770 put_disk(ctx->disk); 771 } 772 EXPORT_SYMBOL_GPL(blkg_conf_finish); 773 774 struct cftype blkcg_files[] = { 775 { 776 .name = "reset_stats", 777 .write_u64 = blkcg_reset_stats, 778 }, 779 { } /* terminate */ 780 }; 781 782 /** 783 * blkcg_css_offline - cgroup css_offline callback 784 * @css: css of interest 785 * 786 * This function is called when @css is about to go away and responsible 787 * for shooting down all blkgs associated with @css. blkgs should be 788 * removed while holding both q and blkcg locks. As blkcg lock is nested 789 * inside q lock, this function performs reverse double lock dancing. 790 * 791 * This is the blkcg counterpart of ioc_release_fn(). 792 */ 793 static void blkcg_css_offline(struct cgroup_subsys_state *css) 794 { 795 struct blkcg *blkcg = css_to_blkcg(css); 796 797 spin_lock_irq(&blkcg->lock); 798 799 while (!hlist_empty(&blkcg->blkg_list)) { 800 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 801 struct blkcg_gq, blkcg_node); 802 struct request_queue *q = blkg->q; 803 804 if (spin_trylock(q->queue_lock)) { 805 blkg_destroy(blkg); 806 spin_unlock(q->queue_lock); 807 } else { 808 spin_unlock_irq(&blkcg->lock); 809 cpu_relax(); 810 spin_lock_irq(&blkcg->lock); 811 } 812 } 813 814 spin_unlock_irq(&blkcg->lock); 815 } 816 817 static void blkcg_css_free(struct cgroup_subsys_state *css) 818 { 819 struct blkcg *blkcg = css_to_blkcg(css); 820 821 if (blkcg != &blkcg_root) 822 kfree(blkcg); 823 } 824 825 static struct cgroup_subsys_state * 826 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 827 { 828 static atomic64_t id_seq = ATOMIC64_INIT(0); 829 struct blkcg *blkcg; 830 831 if (!parent_css) { 832 blkcg = &blkcg_root; 833 goto done; 834 } 835 836 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 837 if (!blkcg) 838 return ERR_PTR(-ENOMEM); 839 840 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; 841 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT; 842 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ 843 done: 844 spin_lock_init(&blkcg->lock); 845 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); 846 INIT_HLIST_HEAD(&blkcg->blkg_list); 847 848 return &blkcg->css; 849 } 850 851 /** 852 * blkcg_init_queue - initialize blkcg part of request queue 853 * @q: request_queue to initialize 854 * 855 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg 856 * part of new request_queue @q. 857 * 858 * RETURNS: 859 * 0 on success, -errno on failure. 860 */ 861 int blkcg_init_queue(struct request_queue *q) 862 { 863 might_sleep(); 864 865 return blk_throtl_init(q); 866 } 867 868 /** 869 * blkcg_drain_queue - drain blkcg part of request_queue 870 * @q: request_queue to drain 871 * 872 * Called from blk_drain_queue(). Responsible for draining blkcg part. 873 */ 874 void blkcg_drain_queue(struct request_queue *q) 875 { 876 lockdep_assert_held(q->queue_lock); 877 878 blk_throtl_drain(q); 879 } 880 881 /** 882 * blkcg_exit_queue - exit and release blkcg part of request_queue 883 * @q: request_queue being released 884 * 885 * Called from blk_release_queue(). Responsible for exiting blkcg part. 886 */ 887 void blkcg_exit_queue(struct request_queue *q) 888 { 889 spin_lock_irq(q->queue_lock); 890 blkg_destroy_all(q); 891 spin_unlock_irq(q->queue_lock); 892 893 blk_throtl_exit(q); 894 } 895 896 /* 897 * We cannot support shared io contexts, as we have no mean to support 898 * two tasks with the same ioc in two different groups without major rework 899 * of the main cic data structures. For now we allow a task to change 900 * its cgroup only if it's the only owner of its ioc. 901 */ 902 static int blkcg_can_attach(struct cgroup_subsys_state *css, 903 struct cgroup_taskset *tset) 904 { 905 struct task_struct *task; 906 struct io_context *ioc; 907 int ret = 0; 908 909 /* task_lock() is needed to avoid races with exit_io_context() */ 910 cgroup_taskset_for_each(task, tset) { 911 task_lock(task); 912 ioc = task->io_context; 913 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 914 ret = -EINVAL; 915 task_unlock(task); 916 if (ret) 917 break; 918 } 919 return ret; 920 } 921 922 struct cgroup_subsys blkio_cgrp_subsys = { 923 .css_alloc = blkcg_css_alloc, 924 .css_offline = blkcg_css_offline, 925 .css_free = blkcg_css_free, 926 .can_attach = blkcg_can_attach, 927 .base_cftypes = blkcg_files, 928 }; 929 EXPORT_SYMBOL_GPL(blkio_cgrp_subsys); 930 931 /** 932 * blkcg_activate_policy - activate a blkcg policy on a request_queue 933 * @q: request_queue of interest 934 * @pol: blkcg policy to activate 935 * 936 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through 937 * bypass mode to populate its blkgs with policy_data for @pol. 938 * 939 * Activation happens with @q bypassed, so nobody would be accessing blkgs 940 * from IO path. Update of each blkg is protected by both queue and blkcg 941 * locks so that holding either lock and testing blkcg_policy_enabled() is 942 * always enough for dereferencing policy data. 943 * 944 * The caller is responsible for synchronizing [de]activations and policy 945 * [un]registerations. Returns 0 on success, -errno on failure. 946 */ 947 int blkcg_activate_policy(struct request_queue *q, 948 const struct blkcg_policy *pol) 949 { 950 LIST_HEAD(pds); 951 struct blkcg_gq *blkg, *new_blkg; 952 struct blkg_policy_data *pd, *n; 953 int cnt = 0, ret; 954 bool preloaded; 955 956 if (blkcg_policy_enabled(q, pol)) 957 return 0; 958 959 /* preallocations for root blkg */ 960 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); 961 if (!new_blkg) 962 return -ENOMEM; 963 964 blk_queue_bypass_start(q); 965 966 preloaded = !radix_tree_preload(GFP_KERNEL); 967 968 /* 969 * Make sure the root blkg exists and count the existing blkgs. As 970 * @q is bypassing at this point, blkg_lookup_create() can't be 971 * used. Open code it. 972 */ 973 spin_lock_irq(q->queue_lock); 974 975 rcu_read_lock(); 976 blkg = __blkg_lookup(&blkcg_root, q, false); 977 if (blkg) 978 blkg_free(new_blkg); 979 else 980 blkg = blkg_create(&blkcg_root, q, new_blkg); 981 rcu_read_unlock(); 982 983 if (preloaded) 984 radix_tree_preload_end(); 985 986 if (IS_ERR(blkg)) { 987 ret = PTR_ERR(blkg); 988 goto out_unlock; 989 } 990 991 list_for_each_entry(blkg, &q->blkg_list, q_node) 992 cnt++; 993 994 spin_unlock_irq(q->queue_lock); 995 996 /* allocate policy_data for all existing blkgs */ 997 while (cnt--) { 998 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); 999 if (!pd) { 1000 ret = -ENOMEM; 1001 goto out_free; 1002 } 1003 list_add_tail(&pd->alloc_node, &pds); 1004 } 1005 1006 /* 1007 * Install the allocated pds. With @q bypassing, no new blkg 1008 * should have been created while the queue lock was dropped. 1009 */ 1010 spin_lock_irq(q->queue_lock); 1011 1012 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1013 if (WARN_ON(list_empty(&pds))) { 1014 /* umm... this shouldn't happen, just abort */ 1015 ret = -ENOMEM; 1016 goto out_unlock; 1017 } 1018 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); 1019 list_del_init(&pd->alloc_node); 1020 1021 /* grab blkcg lock too while installing @pd on @blkg */ 1022 spin_lock(&blkg->blkcg->lock); 1023 1024 blkg->pd[pol->plid] = pd; 1025 pd->blkg = blkg; 1026 pd->plid = pol->plid; 1027 pol->pd_init_fn(blkg); 1028 1029 spin_unlock(&blkg->blkcg->lock); 1030 } 1031 1032 __set_bit(pol->plid, q->blkcg_pols); 1033 ret = 0; 1034 out_unlock: 1035 spin_unlock_irq(q->queue_lock); 1036 out_free: 1037 blk_queue_bypass_end(q); 1038 list_for_each_entry_safe(pd, n, &pds, alloc_node) 1039 kfree(pd); 1040 return ret; 1041 } 1042 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1043 1044 /** 1045 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue 1046 * @q: request_queue of interest 1047 * @pol: blkcg policy to deactivate 1048 * 1049 * Deactivate @pol on @q. Follows the same synchronization rules as 1050 * blkcg_activate_policy(). 1051 */ 1052 void blkcg_deactivate_policy(struct request_queue *q, 1053 const struct blkcg_policy *pol) 1054 { 1055 struct blkcg_gq *blkg; 1056 1057 if (!blkcg_policy_enabled(q, pol)) 1058 return; 1059 1060 blk_queue_bypass_start(q); 1061 spin_lock_irq(q->queue_lock); 1062 1063 __clear_bit(pol->plid, q->blkcg_pols); 1064 1065 /* if no policy is left, no need for blkgs - shoot them down */ 1066 if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) 1067 blkg_destroy_all(q); 1068 1069 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1070 /* grab blkcg lock too while removing @pd from @blkg */ 1071 spin_lock(&blkg->blkcg->lock); 1072 1073 if (pol->pd_offline_fn) 1074 pol->pd_offline_fn(blkg); 1075 if (pol->pd_exit_fn) 1076 pol->pd_exit_fn(blkg); 1077 1078 kfree(blkg->pd[pol->plid]); 1079 blkg->pd[pol->plid] = NULL; 1080 1081 spin_unlock(&blkg->blkcg->lock); 1082 } 1083 1084 spin_unlock_irq(q->queue_lock); 1085 blk_queue_bypass_end(q); 1086 } 1087 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1088 1089 /** 1090 * blkcg_policy_register - register a blkcg policy 1091 * @pol: blkcg policy to register 1092 * 1093 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1094 * successful registration. Returns 0 on success and -errno on failure. 1095 */ 1096 int __init blkcg_policy_register(struct blkcg_policy *pol) 1097 { 1098 int i, ret; 1099 1100 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) 1101 return -EINVAL; 1102 1103 mutex_lock(&blkcg_pol_mutex); 1104 1105 /* find an empty slot */ 1106 ret = -ENOSPC; 1107 for (i = 0; i < BLKCG_MAX_POLS; i++) 1108 if (!blkcg_policy[i]) 1109 break; 1110 if (i >= BLKCG_MAX_POLS) 1111 goto out_unlock; 1112 1113 /* register and update blkgs */ 1114 pol->plid = i; 1115 blkcg_policy[i] = pol; 1116 1117 /* everything is in place, add intf files for the new policy */ 1118 if (pol->cftypes) 1119 WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes)); 1120 ret = 0; 1121 out_unlock: 1122 mutex_unlock(&blkcg_pol_mutex); 1123 return ret; 1124 } 1125 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1126 1127 /** 1128 * blkcg_policy_unregister - unregister a blkcg policy 1129 * @pol: blkcg policy to unregister 1130 * 1131 * Undo blkcg_policy_register(@pol). Might sleep. 1132 */ 1133 void blkcg_policy_unregister(struct blkcg_policy *pol) 1134 { 1135 mutex_lock(&blkcg_pol_mutex); 1136 1137 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1138 goto out_unlock; 1139 1140 /* kill the intf files first */ 1141 if (pol->cftypes) 1142 cgroup_rm_cftypes(pol->cftypes); 1143 1144 /* unregister and update blkgs */ 1145 blkcg_policy[pol->plid] = NULL; 1146 out_unlock: 1147 mutex_unlock(&blkcg_pol_mutex); 1148 } 1149 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1150