1 /* 2 * cgroups support for the BFQ I/O scheduler. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation; either version 2 of the 7 * License, or (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 */ 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/blkdev.h> 17 #include <linux/cgroup.h> 18 #include <linux/elevator.h> 19 #include <linux/ktime.h> 20 #include <linux/rbtree.h> 21 #include <linux/ioprio.h> 22 #include <linux/sbitmap.h> 23 #include <linux/delay.h> 24 25 #include "bfq-iosched.h" 26 27 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) 28 29 /* bfqg stats flags */ 30 enum bfqg_stats_flags { 31 BFQG_stats_waiting = 0, 32 BFQG_stats_idling, 33 BFQG_stats_empty, 34 }; 35 36 #define BFQG_FLAG_FNS(name) \ 37 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ 38 { \ 39 stats->flags |= (1 << BFQG_stats_##name); \ 40 } \ 41 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ 42 { \ 43 stats->flags &= ~(1 << BFQG_stats_##name); \ 44 } \ 45 static int bfqg_stats_##name(struct bfqg_stats *stats) \ 46 { \ 47 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ 48 } \ 49 50 BFQG_FLAG_FNS(waiting) 51 BFQG_FLAG_FNS(idling) 52 BFQG_FLAG_FNS(empty) 53 #undef BFQG_FLAG_FNS 54 55 /* This should be called with the scheduler lock held. */ 56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) 57 { 58 unsigned long long now; 59 60 if (!bfqg_stats_waiting(stats)) 61 return; 62 63 now = sched_clock(); 64 if (time_after64(now, stats->start_group_wait_time)) 65 blkg_stat_add(&stats->group_wait_time, 66 now - stats->start_group_wait_time); 67 bfqg_stats_clear_waiting(stats); 68 } 69 70 /* This should be called with the scheduler lock held. */ 71 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, 72 struct bfq_group *curr_bfqg) 73 { 74 struct bfqg_stats *stats = &bfqg->stats; 75 76 if (bfqg_stats_waiting(stats)) 77 return; 78 if (bfqg == curr_bfqg) 79 return; 80 stats->start_group_wait_time = sched_clock(); 81 bfqg_stats_mark_waiting(stats); 82 } 83 84 /* This should be called with the scheduler lock held. */ 85 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) 86 { 87 unsigned long long now; 88 89 if (!bfqg_stats_empty(stats)) 90 return; 91 92 now = sched_clock(); 93 if (time_after64(now, stats->start_empty_time)) 94 blkg_stat_add(&stats->empty_time, 95 now - stats->start_empty_time); 96 bfqg_stats_clear_empty(stats); 97 } 98 99 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) 100 { 101 blkg_stat_add(&bfqg->stats.dequeue, 1); 102 } 103 104 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) 105 { 106 struct bfqg_stats *stats = &bfqg->stats; 107 108 if (blkg_rwstat_total(&stats->queued)) 109 return; 110 111 /* 112 * group is already marked empty. This can happen if bfqq got new 113 * request in parent group and moved to this group while being added 114 * to service tree. Just ignore the event and move on. 115 */ 116 if (bfqg_stats_empty(stats)) 117 return; 118 119 stats->start_empty_time = sched_clock(); 120 bfqg_stats_mark_empty(stats); 121 } 122 123 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) 124 { 125 struct bfqg_stats *stats = &bfqg->stats; 126 127 if (bfqg_stats_idling(stats)) { 128 unsigned long long now = sched_clock(); 129 130 if (time_after64(now, stats->start_idle_time)) 131 blkg_stat_add(&stats->idle_time, 132 now - stats->start_idle_time); 133 bfqg_stats_clear_idling(stats); 134 } 135 } 136 137 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) 138 { 139 struct bfqg_stats *stats = &bfqg->stats; 140 141 stats->start_idle_time = sched_clock(); 142 bfqg_stats_mark_idling(stats); 143 } 144 145 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) 146 { 147 struct bfqg_stats *stats = &bfqg->stats; 148 149 blkg_stat_add(&stats->avg_queue_size_sum, 150 blkg_rwstat_total(&stats->queued)); 151 blkg_stat_add(&stats->avg_queue_size_samples, 1); 152 bfqg_stats_update_group_wait_time(stats); 153 } 154 155 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 156 unsigned int op) 157 { 158 blkg_rwstat_add(&bfqg->stats.queued, op, 1); 159 bfqg_stats_end_empty_time(&bfqg->stats); 160 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) 161 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); 162 } 163 164 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) 165 { 166 blkg_rwstat_add(&bfqg->stats.queued, op, -1); 167 } 168 169 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) 170 { 171 blkg_rwstat_add(&bfqg->stats.merged, op, 1); 172 } 173 174 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time, 175 uint64_t io_start_time, unsigned int op) 176 { 177 struct bfqg_stats *stats = &bfqg->stats; 178 unsigned long long now = sched_clock(); 179 180 if (time_after64(now, io_start_time)) 181 blkg_rwstat_add(&stats->service_time, op, 182 now - io_start_time); 183 if (time_after64(io_start_time, start_time)) 184 blkg_rwstat_add(&stats->wait_time, op, 185 io_start_time - start_time); 186 } 187 188 #else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ 189 190 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 191 unsigned int op) { } 192 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } 193 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } 194 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time, 195 uint64_t io_start_time, unsigned int op) { } 196 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } 197 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } 198 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } 199 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } 200 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } 201 202 #endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ 203 204 #ifdef CONFIG_BFQ_GROUP_IOSCHED 205 206 /* 207 * blk-cgroup policy-related handlers 208 * The following functions help in converting between blk-cgroup 209 * internal structures and BFQ-specific structures. 210 */ 211 212 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) 213 { 214 return pd ? container_of(pd, struct bfq_group, pd) : NULL; 215 } 216 217 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) 218 { 219 return pd_to_blkg(&bfqg->pd); 220 } 221 222 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) 223 { 224 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); 225 } 226 227 /* 228 * bfq_group handlers 229 * The following functions help in navigating the bfq_group hierarchy 230 * by allowing to find the parent of a bfq_group or the bfq_group 231 * associated to a bfq_queue. 232 */ 233 234 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) 235 { 236 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; 237 238 return pblkg ? blkg_to_bfqg(pblkg) : NULL; 239 } 240 241 struct bfq_group *bfqq_group(struct bfq_queue *bfqq) 242 { 243 struct bfq_entity *group_entity = bfqq->entity.parent; 244 245 return group_entity ? container_of(group_entity, struct bfq_group, 246 entity) : 247 bfqq->bfqd->root_group; 248 } 249 250 /* 251 * The following two functions handle get and put of a bfq_group by 252 * wrapping the related blk-cgroup hooks. 253 */ 254 255 static void bfqg_get(struct bfq_group *bfqg) 256 { 257 bfqg->ref++; 258 } 259 260 static void bfqg_put(struct bfq_group *bfqg) 261 { 262 bfqg->ref--; 263 264 if (bfqg->ref == 0) 265 kfree(bfqg); 266 } 267 268 static void bfqg_and_blkg_get(struct bfq_group *bfqg) 269 { 270 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ 271 bfqg_get(bfqg); 272 273 blkg_get(bfqg_to_blkg(bfqg)); 274 } 275 276 void bfqg_and_blkg_put(struct bfq_group *bfqg) 277 { 278 bfqg_put(bfqg); 279 280 blkg_put(bfqg_to_blkg(bfqg)); 281 } 282 283 /* @stats = 0 */ 284 static void bfqg_stats_reset(struct bfqg_stats *stats) 285 { 286 #ifdef CONFIG_DEBUG_BLK_CGROUP 287 /* queued stats shouldn't be cleared */ 288 blkg_rwstat_reset(&stats->merged); 289 blkg_rwstat_reset(&stats->service_time); 290 blkg_rwstat_reset(&stats->wait_time); 291 blkg_stat_reset(&stats->time); 292 blkg_stat_reset(&stats->avg_queue_size_sum); 293 blkg_stat_reset(&stats->avg_queue_size_samples); 294 blkg_stat_reset(&stats->dequeue); 295 blkg_stat_reset(&stats->group_wait_time); 296 blkg_stat_reset(&stats->idle_time); 297 blkg_stat_reset(&stats->empty_time); 298 #endif 299 } 300 301 /* @to += @from */ 302 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) 303 { 304 if (!to || !from) 305 return; 306 307 #ifdef CONFIG_DEBUG_BLK_CGROUP 308 /* queued stats shouldn't be cleared */ 309 blkg_rwstat_add_aux(&to->merged, &from->merged); 310 blkg_rwstat_add_aux(&to->service_time, &from->service_time); 311 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); 312 blkg_stat_add_aux(&from->time, &from->time); 313 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); 314 blkg_stat_add_aux(&to->avg_queue_size_samples, 315 &from->avg_queue_size_samples); 316 blkg_stat_add_aux(&to->dequeue, &from->dequeue); 317 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); 318 blkg_stat_add_aux(&to->idle_time, &from->idle_time); 319 blkg_stat_add_aux(&to->empty_time, &from->empty_time); 320 #endif 321 } 322 323 /* 324 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' 325 * recursive stats can still account for the amount used by this bfqg after 326 * it's gone. 327 */ 328 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) 329 { 330 struct bfq_group *parent; 331 332 if (!bfqg) /* root_group */ 333 return; 334 335 parent = bfqg_parent(bfqg); 336 337 lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock); 338 339 if (unlikely(!parent)) 340 return; 341 342 bfqg_stats_add_aux(&parent->stats, &bfqg->stats); 343 bfqg_stats_reset(&bfqg->stats); 344 } 345 346 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) 347 { 348 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); 349 350 entity->weight = entity->new_weight; 351 entity->orig_weight = entity->new_weight; 352 if (bfqq) { 353 bfqq->ioprio = bfqq->new_ioprio; 354 bfqq->ioprio_class = bfqq->new_ioprio_class; 355 /* 356 * Make sure that bfqg and its associated blkg do not 357 * disappear before entity. 358 */ 359 bfqg_and_blkg_get(bfqg); 360 } 361 entity->parent = bfqg->my_entity; /* NULL for root group */ 362 entity->sched_data = &bfqg->sched_data; 363 } 364 365 static void bfqg_stats_exit(struct bfqg_stats *stats) 366 { 367 #ifdef CONFIG_DEBUG_BLK_CGROUP 368 blkg_rwstat_exit(&stats->merged); 369 blkg_rwstat_exit(&stats->service_time); 370 blkg_rwstat_exit(&stats->wait_time); 371 blkg_rwstat_exit(&stats->queued); 372 blkg_stat_exit(&stats->time); 373 blkg_stat_exit(&stats->avg_queue_size_sum); 374 blkg_stat_exit(&stats->avg_queue_size_samples); 375 blkg_stat_exit(&stats->dequeue); 376 blkg_stat_exit(&stats->group_wait_time); 377 blkg_stat_exit(&stats->idle_time); 378 blkg_stat_exit(&stats->empty_time); 379 #endif 380 } 381 382 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) 383 { 384 #ifdef CONFIG_DEBUG_BLK_CGROUP 385 if (blkg_rwstat_init(&stats->merged, gfp) || 386 blkg_rwstat_init(&stats->service_time, gfp) || 387 blkg_rwstat_init(&stats->wait_time, gfp) || 388 blkg_rwstat_init(&stats->queued, gfp) || 389 blkg_stat_init(&stats->time, gfp) || 390 blkg_stat_init(&stats->avg_queue_size_sum, gfp) || 391 blkg_stat_init(&stats->avg_queue_size_samples, gfp) || 392 blkg_stat_init(&stats->dequeue, gfp) || 393 blkg_stat_init(&stats->group_wait_time, gfp) || 394 blkg_stat_init(&stats->idle_time, gfp) || 395 blkg_stat_init(&stats->empty_time, gfp)) { 396 bfqg_stats_exit(stats); 397 return -ENOMEM; 398 } 399 #endif 400 401 return 0; 402 } 403 404 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) 405 { 406 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; 407 } 408 409 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) 410 { 411 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); 412 } 413 414 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) 415 { 416 struct bfq_group_data *bgd; 417 418 bgd = kzalloc(sizeof(*bgd), gfp); 419 if (!bgd) 420 return NULL; 421 return &bgd->pd; 422 } 423 424 static void bfq_cpd_init(struct blkcg_policy_data *cpd) 425 { 426 struct bfq_group_data *d = cpd_to_bfqgd(cpd); 427 428 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? 429 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; 430 } 431 432 static void bfq_cpd_free(struct blkcg_policy_data *cpd) 433 { 434 kfree(cpd_to_bfqgd(cpd)); 435 } 436 437 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) 438 { 439 struct bfq_group *bfqg; 440 441 bfqg = kzalloc_node(sizeof(*bfqg), gfp, node); 442 if (!bfqg) 443 return NULL; 444 445 if (bfqg_stats_init(&bfqg->stats, gfp)) { 446 kfree(bfqg); 447 return NULL; 448 } 449 450 /* see comments in bfq_bic_update_cgroup for why refcounting */ 451 bfqg_get(bfqg); 452 return &bfqg->pd; 453 } 454 455 static void bfq_pd_init(struct blkg_policy_data *pd) 456 { 457 struct blkcg_gq *blkg = pd_to_blkg(pd); 458 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 459 struct bfq_data *bfqd = blkg->q->elevator->elevator_data; 460 struct bfq_entity *entity = &bfqg->entity; 461 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); 462 463 entity->orig_weight = entity->weight = entity->new_weight = d->weight; 464 entity->my_sched_data = &bfqg->sched_data; 465 bfqg->my_entity = entity; /* 466 * the root_group's will be set to NULL 467 * in bfq_init_queue() 468 */ 469 bfqg->bfqd = bfqd; 470 bfqg->active_entities = 0; 471 bfqg->rq_pos_tree = RB_ROOT; 472 } 473 474 static void bfq_pd_free(struct blkg_policy_data *pd) 475 { 476 struct bfq_group *bfqg = pd_to_bfqg(pd); 477 478 bfqg_stats_exit(&bfqg->stats); 479 bfqg_put(bfqg); 480 } 481 482 static void bfq_pd_reset_stats(struct blkg_policy_data *pd) 483 { 484 struct bfq_group *bfqg = pd_to_bfqg(pd); 485 486 bfqg_stats_reset(&bfqg->stats); 487 } 488 489 static void bfq_group_set_parent(struct bfq_group *bfqg, 490 struct bfq_group *parent) 491 { 492 struct bfq_entity *entity; 493 494 entity = &bfqg->entity; 495 entity->parent = parent->my_entity; 496 entity->sched_data = &parent->sched_data; 497 } 498 499 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, 500 struct blkcg *blkcg) 501 { 502 struct blkcg_gq *blkg; 503 504 blkg = blkg_lookup(blkcg, bfqd->queue); 505 if (likely(blkg)) 506 return blkg_to_bfqg(blkg); 507 return NULL; 508 } 509 510 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, 511 struct blkcg *blkcg) 512 { 513 struct bfq_group *bfqg, *parent; 514 struct bfq_entity *entity; 515 516 bfqg = bfq_lookup_bfqg(bfqd, blkcg); 517 518 if (unlikely(!bfqg)) 519 return NULL; 520 521 /* 522 * Update chain of bfq_groups as we might be handling a leaf group 523 * which, along with some of its relatives, has not been hooked yet 524 * to the private hierarchy of BFQ. 525 */ 526 entity = &bfqg->entity; 527 for_each_entity(entity) { 528 bfqg = container_of(entity, struct bfq_group, entity); 529 if (bfqg != bfqd->root_group) { 530 parent = bfqg_parent(bfqg); 531 if (!parent) 532 parent = bfqd->root_group; 533 bfq_group_set_parent(bfqg, parent); 534 } 535 } 536 537 return bfqg; 538 } 539 540 /** 541 * bfq_bfqq_move - migrate @bfqq to @bfqg. 542 * @bfqd: queue descriptor. 543 * @bfqq: the queue to move. 544 * @bfqg: the group to move to. 545 * 546 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating 547 * it on the new one. Avoid putting the entity on the old group idle tree. 548 * 549 * Must be called under the scheduler lock, to make sure that the blkg 550 * owning @bfqg does not disappear (see comments in 551 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg 552 * objects). 553 */ 554 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 555 struct bfq_group *bfqg) 556 { 557 struct bfq_entity *entity = &bfqq->entity; 558 559 /* If bfqq is empty, then bfq_bfqq_expire also invokes 560 * bfq_del_bfqq_busy, thereby removing bfqq and its entity 561 * from data structures related to current group. Otherwise we 562 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as 563 * we do below. 564 */ 565 if (bfqq == bfqd->in_service_queue) 566 bfq_bfqq_expire(bfqd, bfqd->in_service_queue, 567 false, BFQQE_PREEMPTED); 568 569 if (bfq_bfqq_busy(bfqq)) 570 bfq_deactivate_bfqq(bfqd, bfqq, false, false); 571 else if (entity->on_st) 572 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); 573 bfqg_and_blkg_put(bfqq_group(bfqq)); 574 575 entity->parent = bfqg->my_entity; 576 entity->sched_data = &bfqg->sched_data; 577 /* pin down bfqg and its associated blkg */ 578 bfqg_and_blkg_get(bfqg); 579 580 if (bfq_bfqq_busy(bfqq)) { 581 bfq_pos_tree_add_move(bfqd, bfqq); 582 bfq_activate_bfqq(bfqd, bfqq); 583 } 584 585 if (!bfqd->in_service_queue && !bfqd->rq_in_driver) 586 bfq_schedule_dispatch(bfqd); 587 } 588 589 /** 590 * __bfq_bic_change_cgroup - move @bic to @cgroup. 591 * @bfqd: the queue descriptor. 592 * @bic: the bic to move. 593 * @blkcg: the blk-cgroup to move to. 594 * 595 * Move bic to blkcg, assuming that bfqd->lock is held; which makes 596 * sure that the reference to cgroup is valid across the call (see 597 * comments in bfq_bic_update_cgroup on this issue) 598 * 599 * NOTE: an alternative approach might have been to store the current 600 * cgroup in bfqq and getting a reference to it, reducing the lookup 601 * time here, at the price of slightly more complex code. 602 */ 603 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, 604 struct bfq_io_cq *bic, 605 struct blkcg *blkcg) 606 { 607 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); 608 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); 609 struct bfq_group *bfqg; 610 struct bfq_entity *entity; 611 612 bfqg = bfq_find_set_group(bfqd, blkcg); 613 614 if (unlikely(!bfqg)) 615 bfqg = bfqd->root_group; 616 617 if (async_bfqq) { 618 entity = &async_bfqq->entity; 619 620 if (entity->sched_data != &bfqg->sched_data) { 621 bic_set_bfqq(bic, NULL, 0); 622 bfq_log_bfqq(bfqd, async_bfqq, 623 "bic_change_group: %p %d", 624 async_bfqq, async_bfqq->ref); 625 bfq_put_queue(async_bfqq); 626 } 627 } 628 629 if (sync_bfqq) { 630 entity = &sync_bfqq->entity; 631 if (entity->sched_data != &bfqg->sched_data) 632 bfq_bfqq_move(bfqd, sync_bfqq, bfqg); 633 } 634 635 return bfqg; 636 } 637 638 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) 639 { 640 struct bfq_data *bfqd = bic_to_bfqd(bic); 641 struct bfq_group *bfqg = NULL; 642 uint64_t serial_nr; 643 644 rcu_read_lock(); 645 serial_nr = bio_blkcg(bio)->css.serial_nr; 646 647 /* 648 * Check whether blkcg has changed. The condition may trigger 649 * spuriously on a newly created cic but there's no harm. 650 */ 651 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) 652 goto out; 653 654 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); 655 /* 656 * Update blkg_path for bfq_log_* functions. We cache this 657 * path, and update it here, for the following 658 * reasons. Operations on blkg objects in blk-cgroup are 659 * protected with the request_queue lock, and not with the 660 * lock that protects the instances of this scheduler 661 * (bfqd->lock). This exposes BFQ to the following sort of 662 * race. 663 * 664 * The blkg_lookup performed in bfq_get_queue, protected 665 * through rcu, may happen to return the address of a copy of 666 * the original blkg. If this is the case, then the 667 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down 668 * the blkg, is useless: it does not prevent blk-cgroup code 669 * from destroying both the original blkg and all objects 670 * directly or indirectly referred by the copy of the 671 * blkg. 672 * 673 * On the bright side, destroy operations on a blkg invoke, as 674 * a first step, hooks of the scheduler associated with the 675 * blkg. And these hooks are executed with bfqd->lock held for 676 * BFQ. As a consequence, for any blkg associated with the 677 * request queue this instance of the scheduler is attached 678 * to, we are guaranteed that such a blkg is not destroyed, and 679 * that all the pointers it contains are consistent, while we 680 * are holding bfqd->lock. A blkg_lookup performed with 681 * bfqd->lock held then returns a fully consistent blkg, which 682 * remains consistent until this lock is held. 683 * 684 * Thanks to the last fact, and to the fact that: (1) bfqg has 685 * been obtained through a blkg_lookup in the above 686 * assignment, and (2) bfqd->lock is being held, here we can 687 * safely use the policy data for the involved blkg (i.e., the 688 * field bfqg->pd) to get to the blkg associated with bfqg, 689 * and then we can safely use any field of blkg. After we 690 * release bfqd->lock, even just getting blkg through this 691 * bfqg may cause dangling references to be traversed, as 692 * bfqg->pd may not exist any more. 693 * 694 * In view of the above facts, here we cache, in the bfqg, any 695 * blkg data we may need for this bic, and for its associated 696 * bfq_queue. As of now, we need to cache only the path of the 697 * blkg, which is used in the bfq_log_* functions. 698 * 699 * Finally, note that bfqg itself needs to be protected from 700 * destruction on the blkg_free of the original blkg (which 701 * invokes bfq_pd_free). We use an additional private 702 * refcounter for bfqg, to let it disappear only after no 703 * bfq_queue refers to it any longer. 704 */ 705 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); 706 bic->blkcg_serial_nr = serial_nr; 707 out: 708 rcu_read_unlock(); 709 } 710 711 /** 712 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. 713 * @st: the service tree being flushed. 714 */ 715 static void bfq_flush_idle_tree(struct bfq_service_tree *st) 716 { 717 struct bfq_entity *entity = st->first_idle; 718 719 for (; entity ; entity = st->first_idle) 720 __bfq_deactivate_entity(entity, false); 721 } 722 723 /** 724 * bfq_reparent_leaf_entity - move leaf entity to the root_group. 725 * @bfqd: the device data structure with the root group. 726 * @entity: the entity to move. 727 */ 728 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, 729 struct bfq_entity *entity) 730 { 731 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); 732 733 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); 734 } 735 736 /** 737 * bfq_reparent_active_entities - move to the root group all active 738 * entities. 739 * @bfqd: the device data structure with the root group. 740 * @bfqg: the group to move from. 741 * @st: the service tree with the entities. 742 */ 743 static void bfq_reparent_active_entities(struct bfq_data *bfqd, 744 struct bfq_group *bfqg, 745 struct bfq_service_tree *st) 746 { 747 struct rb_root *active = &st->active; 748 struct bfq_entity *entity = NULL; 749 750 if (!RB_EMPTY_ROOT(&st->active)) 751 entity = bfq_entity_of(rb_first(active)); 752 753 for (; entity ; entity = bfq_entity_of(rb_first(active))) 754 bfq_reparent_leaf_entity(bfqd, entity); 755 756 if (bfqg->sched_data.in_service_entity) 757 bfq_reparent_leaf_entity(bfqd, 758 bfqg->sched_data.in_service_entity); 759 } 760 761 /** 762 * bfq_pd_offline - deactivate the entity associated with @pd, 763 * and reparent its children entities. 764 * @pd: descriptor of the policy going offline. 765 * 766 * blkio already grabs the queue_lock for us, so no need to use 767 * RCU-based magic 768 */ 769 static void bfq_pd_offline(struct blkg_policy_data *pd) 770 { 771 struct bfq_service_tree *st; 772 struct bfq_group *bfqg = pd_to_bfqg(pd); 773 struct bfq_data *bfqd = bfqg->bfqd; 774 struct bfq_entity *entity = bfqg->my_entity; 775 unsigned long flags; 776 int i; 777 778 if (!entity) /* root group */ 779 return; 780 781 spin_lock_irqsave(&bfqd->lock, flags); 782 /* 783 * Empty all service_trees belonging to this group before 784 * deactivating the group itself. 785 */ 786 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { 787 st = bfqg->sched_data.service_tree + i; 788 789 /* 790 * The idle tree may still contain bfq_queues belonging 791 * to exited task because they never migrated to a different 792 * cgroup from the one being destroyed now. 793 */ 794 bfq_flush_idle_tree(st); 795 796 /* 797 * It may happen that some queues are still active 798 * (busy) upon group destruction (if the corresponding 799 * processes have been forced to terminate). We move 800 * all the leaf entities corresponding to these queues 801 * to the root_group. 802 * Also, it may happen that the group has an entity 803 * in service, which is disconnected from the active 804 * tree: it must be moved, too. 805 * There is no need to put the sync queues, as the 806 * scheduler has taken no reference. 807 */ 808 bfq_reparent_active_entities(bfqd, bfqg, st); 809 } 810 811 __bfq_deactivate_entity(entity, false); 812 bfq_put_async_queues(bfqd, bfqg); 813 814 spin_unlock_irqrestore(&bfqd->lock, flags); 815 /* 816 * @blkg is going offline and will be ignored by 817 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so 818 * that they don't get lost. If IOs complete after this point, the 819 * stats for them will be lost. Oh well... 820 */ 821 bfqg_stats_xfer_dead(bfqg); 822 } 823 824 void bfq_end_wr_async(struct bfq_data *bfqd) 825 { 826 struct blkcg_gq *blkg; 827 828 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { 829 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 830 831 bfq_end_wr_async_queues(bfqd, bfqg); 832 } 833 bfq_end_wr_async_queues(bfqd, bfqd->root_group); 834 } 835 836 static int bfq_io_show_weight(struct seq_file *sf, void *v) 837 { 838 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 839 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); 840 unsigned int val = 0; 841 842 if (bfqgd) 843 val = bfqgd->weight; 844 845 seq_printf(sf, "%u\n", val); 846 847 return 0; 848 } 849 850 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, 851 struct cftype *cftype, 852 u64 val) 853 { 854 struct blkcg *blkcg = css_to_blkcg(css); 855 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); 856 struct blkcg_gq *blkg; 857 int ret = -ERANGE; 858 859 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) 860 return ret; 861 862 ret = 0; 863 spin_lock_irq(&blkcg->lock); 864 bfqgd->weight = (unsigned short)val; 865 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 866 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 867 868 if (!bfqg) 869 continue; 870 /* 871 * Setting the prio_changed flag of the entity 872 * to 1 with new_weight == weight would re-set 873 * the value of the weight to its ioprio mapping. 874 * Set the flag only if necessary. 875 */ 876 if ((unsigned short)val != bfqg->entity.new_weight) { 877 bfqg->entity.new_weight = (unsigned short)val; 878 /* 879 * Make sure that the above new value has been 880 * stored in bfqg->entity.new_weight before 881 * setting the prio_changed flag. In fact, 882 * this flag may be read asynchronously (in 883 * critical sections protected by a different 884 * lock than that held here), and finding this 885 * flag set may cause the execution of the code 886 * for updating parameters whose value may 887 * depend also on bfqg->entity.new_weight (in 888 * __bfq_entity_update_weight_prio). 889 * This barrier makes sure that the new value 890 * of bfqg->entity.new_weight is correctly 891 * seen in that code. 892 */ 893 smp_wmb(); 894 bfqg->entity.prio_changed = 1; 895 } 896 } 897 spin_unlock_irq(&blkcg->lock); 898 899 return ret; 900 } 901 902 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, 903 char *buf, size_t nbytes, 904 loff_t off) 905 { 906 u64 weight; 907 /* First unsigned long found in the file is used */ 908 int ret = kstrtoull(strim(buf), 0, &weight); 909 910 if (ret) 911 return ret; 912 913 return bfq_io_set_weight_legacy(of_css(of), NULL, weight); 914 } 915 916 #ifdef CONFIG_DEBUG_BLK_CGROUP 917 static int bfqg_print_stat(struct seq_file *sf, void *v) 918 { 919 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, 920 &blkcg_policy_bfq, seq_cft(sf)->private, false); 921 return 0; 922 } 923 924 static int bfqg_print_rwstat(struct seq_file *sf, void *v) 925 { 926 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, 927 &blkcg_policy_bfq, seq_cft(sf)->private, true); 928 return 0; 929 } 930 931 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, 932 struct blkg_policy_data *pd, int off) 933 { 934 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd), 935 &blkcg_policy_bfq, off); 936 return __blkg_prfill_u64(sf, pd, sum); 937 } 938 939 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, 940 struct blkg_policy_data *pd, int off) 941 { 942 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd), 943 &blkcg_policy_bfq, 944 off); 945 return __blkg_prfill_rwstat(sf, pd, &sum); 946 } 947 948 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) 949 { 950 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 951 bfqg_prfill_stat_recursive, &blkcg_policy_bfq, 952 seq_cft(sf)->private, false); 953 return 0; 954 } 955 956 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) 957 { 958 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 959 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, 960 seq_cft(sf)->private, true); 961 return 0; 962 } 963 964 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, 965 int off) 966 { 967 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); 968 969 return __blkg_prfill_u64(sf, pd, sum >> 9); 970 } 971 972 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) 973 { 974 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 975 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); 976 return 0; 977 } 978 979 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, 980 struct blkg_policy_data *pd, int off) 981 { 982 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, 983 offsetof(struct blkcg_gq, stat_bytes)); 984 u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + 985 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); 986 987 return __blkg_prfill_u64(sf, pd, sum >> 9); 988 } 989 990 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) 991 { 992 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 993 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, 994 false); 995 return 0; 996 } 997 998 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, 999 struct blkg_policy_data *pd, int off) 1000 { 1001 struct bfq_group *bfqg = pd_to_bfqg(pd); 1002 u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples); 1003 u64 v = 0; 1004 1005 if (samples) { 1006 v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum); 1007 v = div64_u64(v, samples); 1008 } 1009 __blkg_prfill_u64(sf, pd, v); 1010 return 0; 1011 } 1012 1013 /* print avg_queue_size */ 1014 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) 1015 { 1016 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1017 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, 1018 0, false); 1019 return 0; 1020 } 1021 #endif /* CONFIG_DEBUG_BLK_CGROUP */ 1022 1023 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) 1024 { 1025 int ret; 1026 1027 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); 1028 if (ret) 1029 return NULL; 1030 1031 return blkg_to_bfqg(bfqd->queue->root_blkg); 1032 } 1033 1034 struct blkcg_policy blkcg_policy_bfq = { 1035 .dfl_cftypes = bfq_blkg_files, 1036 .legacy_cftypes = bfq_blkcg_legacy_files, 1037 1038 .cpd_alloc_fn = bfq_cpd_alloc, 1039 .cpd_init_fn = bfq_cpd_init, 1040 .cpd_bind_fn = bfq_cpd_init, 1041 .cpd_free_fn = bfq_cpd_free, 1042 1043 .pd_alloc_fn = bfq_pd_alloc, 1044 .pd_init_fn = bfq_pd_init, 1045 .pd_offline_fn = bfq_pd_offline, 1046 .pd_free_fn = bfq_pd_free, 1047 .pd_reset_stats_fn = bfq_pd_reset_stats, 1048 }; 1049 1050 struct cftype bfq_blkcg_legacy_files[] = { 1051 { 1052 .name = "bfq.weight", 1053 .flags = CFTYPE_NOT_ON_ROOT, 1054 .seq_show = bfq_io_show_weight, 1055 .write_u64 = bfq_io_set_weight_legacy, 1056 }, 1057 1058 /* statistics, covers only the tasks in the bfqg */ 1059 { 1060 .name = "bfq.io_service_bytes", 1061 .private = (unsigned long)&blkcg_policy_bfq, 1062 .seq_show = blkg_print_stat_bytes, 1063 }, 1064 { 1065 .name = "bfq.io_serviced", 1066 .private = (unsigned long)&blkcg_policy_bfq, 1067 .seq_show = blkg_print_stat_ios, 1068 }, 1069 #ifdef CONFIG_DEBUG_BLK_CGROUP 1070 { 1071 .name = "bfq.time", 1072 .private = offsetof(struct bfq_group, stats.time), 1073 .seq_show = bfqg_print_stat, 1074 }, 1075 { 1076 .name = "bfq.sectors", 1077 .seq_show = bfqg_print_stat_sectors, 1078 }, 1079 { 1080 .name = "bfq.io_service_time", 1081 .private = offsetof(struct bfq_group, stats.service_time), 1082 .seq_show = bfqg_print_rwstat, 1083 }, 1084 { 1085 .name = "bfq.io_wait_time", 1086 .private = offsetof(struct bfq_group, stats.wait_time), 1087 .seq_show = bfqg_print_rwstat, 1088 }, 1089 { 1090 .name = "bfq.io_merged", 1091 .private = offsetof(struct bfq_group, stats.merged), 1092 .seq_show = bfqg_print_rwstat, 1093 }, 1094 { 1095 .name = "bfq.io_queued", 1096 .private = offsetof(struct bfq_group, stats.queued), 1097 .seq_show = bfqg_print_rwstat, 1098 }, 1099 #endif /* CONFIG_DEBUG_BLK_CGROUP */ 1100 1101 /* the same statictics which cover the bfqg and its descendants */ 1102 { 1103 .name = "bfq.io_service_bytes_recursive", 1104 .private = (unsigned long)&blkcg_policy_bfq, 1105 .seq_show = blkg_print_stat_bytes_recursive, 1106 }, 1107 { 1108 .name = "bfq.io_serviced_recursive", 1109 .private = (unsigned long)&blkcg_policy_bfq, 1110 .seq_show = blkg_print_stat_ios_recursive, 1111 }, 1112 #ifdef CONFIG_DEBUG_BLK_CGROUP 1113 { 1114 .name = "bfq.time_recursive", 1115 .private = offsetof(struct bfq_group, stats.time), 1116 .seq_show = bfqg_print_stat_recursive, 1117 }, 1118 { 1119 .name = "bfq.sectors_recursive", 1120 .seq_show = bfqg_print_stat_sectors_recursive, 1121 }, 1122 { 1123 .name = "bfq.io_service_time_recursive", 1124 .private = offsetof(struct bfq_group, stats.service_time), 1125 .seq_show = bfqg_print_rwstat_recursive, 1126 }, 1127 { 1128 .name = "bfq.io_wait_time_recursive", 1129 .private = offsetof(struct bfq_group, stats.wait_time), 1130 .seq_show = bfqg_print_rwstat_recursive, 1131 }, 1132 { 1133 .name = "bfq.io_merged_recursive", 1134 .private = offsetof(struct bfq_group, stats.merged), 1135 .seq_show = bfqg_print_rwstat_recursive, 1136 }, 1137 { 1138 .name = "bfq.io_queued_recursive", 1139 .private = offsetof(struct bfq_group, stats.queued), 1140 .seq_show = bfqg_print_rwstat_recursive, 1141 }, 1142 { 1143 .name = "bfq.avg_queue_size", 1144 .seq_show = bfqg_print_avg_queue_size, 1145 }, 1146 { 1147 .name = "bfq.group_wait_time", 1148 .private = offsetof(struct bfq_group, stats.group_wait_time), 1149 .seq_show = bfqg_print_stat, 1150 }, 1151 { 1152 .name = "bfq.idle_time", 1153 .private = offsetof(struct bfq_group, stats.idle_time), 1154 .seq_show = bfqg_print_stat, 1155 }, 1156 { 1157 .name = "bfq.empty_time", 1158 .private = offsetof(struct bfq_group, stats.empty_time), 1159 .seq_show = bfqg_print_stat, 1160 }, 1161 { 1162 .name = "bfq.dequeue", 1163 .private = offsetof(struct bfq_group, stats.dequeue), 1164 .seq_show = bfqg_print_stat, 1165 }, 1166 #endif /* CONFIG_DEBUG_BLK_CGROUP */ 1167 { } /* terminate */ 1168 }; 1169 1170 struct cftype bfq_blkg_files[] = { 1171 { 1172 .name = "bfq.weight", 1173 .flags = CFTYPE_NOT_ON_ROOT, 1174 .seq_show = bfq_io_show_weight, 1175 .write = bfq_io_set_weight, 1176 }, 1177 {} /* terminate */ 1178 }; 1179 1180 #else /* CONFIG_BFQ_GROUP_IOSCHED */ 1181 1182 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 1183 struct bfq_group *bfqg) {} 1184 1185 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) 1186 { 1187 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); 1188 1189 entity->weight = entity->new_weight; 1190 entity->orig_weight = entity->new_weight; 1191 if (bfqq) { 1192 bfqq->ioprio = bfqq->new_ioprio; 1193 bfqq->ioprio_class = bfqq->new_ioprio_class; 1194 } 1195 entity->sched_data = &bfqg->sched_data; 1196 } 1197 1198 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} 1199 1200 void bfq_end_wr_async(struct bfq_data *bfqd) 1201 { 1202 bfq_end_wr_async_queues(bfqd, bfqd->root_group); 1203 } 1204 1205 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg) 1206 { 1207 return bfqd->root_group; 1208 } 1209 1210 struct bfq_group *bfqq_group(struct bfq_queue *bfqq) 1211 { 1212 return bfqq->bfqd->root_group; 1213 } 1214 1215 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) 1216 { 1217 struct bfq_group *bfqg; 1218 int i; 1219 1220 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); 1221 if (!bfqg) 1222 return NULL; 1223 1224 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) 1225 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; 1226 1227 return bfqg; 1228 } 1229 #endif /* CONFIG_BFQ_GROUP_IOSCHED */ 1230