1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * cgroups support for the BFQ I/O scheduler. 4 */ 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/blkdev.h> 8 #include <linux/cgroup.h> 9 #include <linux/elevator.h> 10 #include <linux/ktime.h> 11 #include <linux/rbtree.h> 12 #include <linux/ioprio.h> 13 #include <linux/sbitmap.h> 14 #include <linux/delay.h> 15 16 #include "bfq-iosched.h" 17 18 #ifdef CONFIG_BFQ_CGROUP_DEBUG 19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp) 20 { 21 int ret; 22 23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); 24 if (ret) 25 return ret; 26 27 atomic64_set(&stat->aux_cnt, 0); 28 return 0; 29 } 30 31 static void bfq_stat_exit(struct bfq_stat *stat) 32 { 33 percpu_counter_destroy(&stat->cpu_cnt); 34 } 35 36 /** 37 * bfq_stat_add - add a value to a bfq_stat 38 * @stat: target bfq_stat 39 * @val: value to add 40 * 41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU 42 * don't re-enter this function for the same counter. 43 */ 44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val) 45 { 46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); 47 } 48 49 /** 50 * bfq_stat_read - read the current value of a bfq_stat 51 * @stat: bfq_stat to read 52 */ 53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat) 54 { 55 return percpu_counter_sum_positive(&stat->cpu_cnt); 56 } 57 58 /** 59 * bfq_stat_reset - reset a bfq_stat 60 * @stat: bfq_stat to reset 61 */ 62 static inline void bfq_stat_reset(struct bfq_stat *stat) 63 { 64 percpu_counter_set(&stat->cpu_cnt, 0); 65 atomic64_set(&stat->aux_cnt, 0); 66 } 67 68 /** 69 * bfq_stat_add_aux - add a bfq_stat into another's aux count 70 * @to: the destination bfq_stat 71 * @from: the source 72 * 73 * Add @from's count including the aux one to @to's aux count. 74 */ 75 static inline void bfq_stat_add_aux(struct bfq_stat *to, 76 struct bfq_stat *from) 77 { 78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt), 79 &to->aux_cnt); 80 } 81 82 /** 83 * blkg_prfill_stat - prfill callback for bfq_stat 84 * @sf: seq_file to print to 85 * @pd: policy private data of interest 86 * @off: offset to the bfq_stat in @pd 87 * 88 * prfill callback for printing a bfq_stat. 89 */ 90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, 91 int off) 92 { 93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off)); 94 } 95 96 /* bfqg stats flags */ 97 enum bfqg_stats_flags { 98 BFQG_stats_waiting = 0, 99 BFQG_stats_idling, 100 BFQG_stats_empty, 101 }; 102 103 #define BFQG_FLAG_FNS(name) \ 104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ 105 { \ 106 stats->flags |= (1 << BFQG_stats_##name); \ 107 } \ 108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ 109 { \ 110 stats->flags &= ~(1 << BFQG_stats_##name); \ 111 } \ 112 static int bfqg_stats_##name(struct bfqg_stats *stats) \ 113 { \ 114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ 115 } \ 116 117 BFQG_FLAG_FNS(waiting) 118 BFQG_FLAG_FNS(idling) 119 BFQG_FLAG_FNS(empty) 120 #undef BFQG_FLAG_FNS 121 122 /* This should be called with the scheduler lock held. */ 123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) 124 { 125 u64 now; 126 127 if (!bfqg_stats_waiting(stats)) 128 return; 129 130 now = ktime_get_ns(); 131 if (now > stats->start_group_wait_time) 132 bfq_stat_add(&stats->group_wait_time, 133 now - stats->start_group_wait_time); 134 bfqg_stats_clear_waiting(stats); 135 } 136 137 /* This should be called with the scheduler lock held. */ 138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, 139 struct bfq_group *curr_bfqg) 140 { 141 struct bfqg_stats *stats = &bfqg->stats; 142 143 if (bfqg_stats_waiting(stats)) 144 return; 145 if (bfqg == curr_bfqg) 146 return; 147 stats->start_group_wait_time = ktime_get_ns(); 148 bfqg_stats_mark_waiting(stats); 149 } 150 151 /* This should be called with the scheduler lock held. */ 152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) 153 { 154 u64 now; 155 156 if (!bfqg_stats_empty(stats)) 157 return; 158 159 now = ktime_get_ns(); 160 if (now > stats->start_empty_time) 161 bfq_stat_add(&stats->empty_time, 162 now - stats->start_empty_time); 163 bfqg_stats_clear_empty(stats); 164 } 165 166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) 167 { 168 bfq_stat_add(&bfqg->stats.dequeue, 1); 169 } 170 171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) 172 { 173 struct bfqg_stats *stats = &bfqg->stats; 174 175 if (blkg_rwstat_total(&stats->queued)) 176 return; 177 178 /* 179 * group is already marked empty. This can happen if bfqq got new 180 * request in parent group and moved to this group while being added 181 * to service tree. Just ignore the event and move on. 182 */ 183 if (bfqg_stats_empty(stats)) 184 return; 185 186 stats->start_empty_time = ktime_get_ns(); 187 bfqg_stats_mark_empty(stats); 188 } 189 190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) 191 { 192 struct bfqg_stats *stats = &bfqg->stats; 193 194 if (bfqg_stats_idling(stats)) { 195 u64 now = ktime_get_ns(); 196 197 if (now > stats->start_idle_time) 198 bfq_stat_add(&stats->idle_time, 199 now - stats->start_idle_time); 200 bfqg_stats_clear_idling(stats); 201 } 202 } 203 204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) 205 { 206 struct bfqg_stats *stats = &bfqg->stats; 207 208 stats->start_idle_time = ktime_get_ns(); 209 bfqg_stats_mark_idling(stats); 210 } 211 212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) 213 { 214 struct bfqg_stats *stats = &bfqg->stats; 215 216 bfq_stat_add(&stats->avg_queue_size_sum, 217 blkg_rwstat_total(&stats->queued)); 218 bfq_stat_add(&stats->avg_queue_size_samples, 1); 219 bfqg_stats_update_group_wait_time(stats); 220 } 221 222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 223 unsigned int op) 224 { 225 blkg_rwstat_add(&bfqg->stats.queued, op, 1); 226 bfqg_stats_end_empty_time(&bfqg->stats); 227 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) 228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); 229 } 230 231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) 232 { 233 blkg_rwstat_add(&bfqg->stats.queued, op, -1); 234 } 235 236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) 237 { 238 blkg_rwstat_add(&bfqg->stats.merged, op, 1); 239 } 240 241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, 242 u64 io_start_time_ns, unsigned int op) 243 { 244 struct bfqg_stats *stats = &bfqg->stats; 245 u64 now = ktime_get_ns(); 246 247 if (now > io_start_time_ns) 248 blkg_rwstat_add(&stats->service_time, op, 249 now - io_start_time_ns); 250 if (io_start_time_ns > start_time_ns) 251 blkg_rwstat_add(&stats->wait_time, op, 252 io_start_time_ns - start_time_ns); 253 } 254 255 #else /* CONFIG_BFQ_CGROUP_DEBUG */ 256 257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 258 unsigned int op) { } 259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } 260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } 261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, 262 u64 io_start_time_ns, unsigned int op) { } 263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } 264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } 265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } 266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } 267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } 268 269 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 270 271 #ifdef CONFIG_BFQ_GROUP_IOSCHED 272 273 /* 274 * blk-cgroup policy-related handlers 275 * The following functions help in converting between blk-cgroup 276 * internal structures and BFQ-specific structures. 277 */ 278 279 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) 280 { 281 return pd ? container_of(pd, struct bfq_group, pd) : NULL; 282 } 283 284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) 285 { 286 return pd_to_blkg(&bfqg->pd); 287 } 288 289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) 290 { 291 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); 292 } 293 294 /* 295 * bfq_group handlers 296 * The following functions help in navigating the bfq_group hierarchy 297 * by allowing to find the parent of a bfq_group or the bfq_group 298 * associated to a bfq_queue. 299 */ 300 301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) 302 { 303 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; 304 305 return pblkg ? blkg_to_bfqg(pblkg) : NULL; 306 } 307 308 struct bfq_group *bfqq_group(struct bfq_queue *bfqq) 309 { 310 struct bfq_entity *group_entity = bfqq->entity.parent; 311 312 return group_entity ? container_of(group_entity, struct bfq_group, 313 entity) : 314 bfqq->bfqd->root_group; 315 } 316 317 /* 318 * The following two functions handle get and put of a bfq_group by 319 * wrapping the related blk-cgroup hooks. 320 */ 321 322 static void bfqg_get(struct bfq_group *bfqg) 323 { 324 bfqg->ref++; 325 } 326 327 static void bfqg_put(struct bfq_group *bfqg) 328 { 329 bfqg->ref--; 330 331 if (bfqg->ref == 0) 332 kfree(bfqg); 333 } 334 335 static void bfqg_and_blkg_get(struct bfq_group *bfqg) 336 { 337 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ 338 bfqg_get(bfqg); 339 340 blkg_get(bfqg_to_blkg(bfqg)); 341 } 342 343 void bfqg_and_blkg_put(struct bfq_group *bfqg) 344 { 345 blkg_put(bfqg_to_blkg(bfqg)); 346 347 bfqg_put(bfqg); 348 } 349 350 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq) 351 { 352 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg); 353 354 if (!bfqg) 355 return; 356 357 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); 358 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1); 359 } 360 361 /* @stats = 0 */ 362 static void bfqg_stats_reset(struct bfqg_stats *stats) 363 { 364 #ifdef CONFIG_BFQ_CGROUP_DEBUG 365 /* queued stats shouldn't be cleared */ 366 blkg_rwstat_reset(&stats->merged); 367 blkg_rwstat_reset(&stats->service_time); 368 blkg_rwstat_reset(&stats->wait_time); 369 bfq_stat_reset(&stats->time); 370 bfq_stat_reset(&stats->avg_queue_size_sum); 371 bfq_stat_reset(&stats->avg_queue_size_samples); 372 bfq_stat_reset(&stats->dequeue); 373 bfq_stat_reset(&stats->group_wait_time); 374 bfq_stat_reset(&stats->idle_time); 375 bfq_stat_reset(&stats->empty_time); 376 #endif 377 } 378 379 /* @to += @from */ 380 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) 381 { 382 if (!to || !from) 383 return; 384 385 #ifdef CONFIG_BFQ_CGROUP_DEBUG 386 /* queued stats shouldn't be cleared */ 387 blkg_rwstat_add_aux(&to->merged, &from->merged); 388 blkg_rwstat_add_aux(&to->service_time, &from->service_time); 389 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); 390 bfq_stat_add_aux(&from->time, &from->time); 391 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); 392 bfq_stat_add_aux(&to->avg_queue_size_samples, 393 &from->avg_queue_size_samples); 394 bfq_stat_add_aux(&to->dequeue, &from->dequeue); 395 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time); 396 bfq_stat_add_aux(&to->idle_time, &from->idle_time); 397 bfq_stat_add_aux(&to->empty_time, &from->empty_time); 398 #endif 399 } 400 401 /* 402 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' 403 * recursive stats can still account for the amount used by this bfqg after 404 * it's gone. 405 */ 406 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) 407 { 408 struct bfq_group *parent; 409 410 if (!bfqg) /* root_group */ 411 return; 412 413 parent = bfqg_parent(bfqg); 414 415 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); 416 417 if (unlikely(!parent)) 418 return; 419 420 bfqg_stats_add_aux(&parent->stats, &bfqg->stats); 421 bfqg_stats_reset(&bfqg->stats); 422 } 423 424 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) 425 { 426 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); 427 428 entity->weight = entity->new_weight; 429 entity->orig_weight = entity->new_weight; 430 if (bfqq) { 431 bfqq->ioprio = bfqq->new_ioprio; 432 bfqq->ioprio_class = bfqq->new_ioprio_class; 433 /* 434 * Make sure that bfqg and its associated blkg do not 435 * disappear before entity. 436 */ 437 bfqg_and_blkg_get(bfqg); 438 } 439 entity->parent = bfqg->my_entity; /* NULL for root group */ 440 entity->sched_data = &bfqg->sched_data; 441 } 442 443 static void bfqg_stats_exit(struct bfqg_stats *stats) 444 { 445 blkg_rwstat_exit(&stats->bytes); 446 blkg_rwstat_exit(&stats->ios); 447 #ifdef CONFIG_BFQ_CGROUP_DEBUG 448 blkg_rwstat_exit(&stats->merged); 449 blkg_rwstat_exit(&stats->service_time); 450 blkg_rwstat_exit(&stats->wait_time); 451 blkg_rwstat_exit(&stats->queued); 452 bfq_stat_exit(&stats->time); 453 bfq_stat_exit(&stats->avg_queue_size_sum); 454 bfq_stat_exit(&stats->avg_queue_size_samples); 455 bfq_stat_exit(&stats->dequeue); 456 bfq_stat_exit(&stats->group_wait_time); 457 bfq_stat_exit(&stats->idle_time); 458 bfq_stat_exit(&stats->empty_time); 459 #endif 460 } 461 462 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) 463 { 464 if (blkg_rwstat_init(&stats->bytes, gfp) || 465 blkg_rwstat_init(&stats->ios, gfp)) 466 return -ENOMEM; 467 468 #ifdef CONFIG_BFQ_CGROUP_DEBUG 469 if (blkg_rwstat_init(&stats->merged, gfp) || 470 blkg_rwstat_init(&stats->service_time, gfp) || 471 blkg_rwstat_init(&stats->wait_time, gfp) || 472 blkg_rwstat_init(&stats->queued, gfp) || 473 bfq_stat_init(&stats->time, gfp) || 474 bfq_stat_init(&stats->avg_queue_size_sum, gfp) || 475 bfq_stat_init(&stats->avg_queue_size_samples, gfp) || 476 bfq_stat_init(&stats->dequeue, gfp) || 477 bfq_stat_init(&stats->group_wait_time, gfp) || 478 bfq_stat_init(&stats->idle_time, gfp) || 479 bfq_stat_init(&stats->empty_time, gfp)) { 480 bfqg_stats_exit(stats); 481 return -ENOMEM; 482 } 483 #endif 484 485 return 0; 486 } 487 488 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) 489 { 490 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; 491 } 492 493 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) 494 { 495 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); 496 } 497 498 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) 499 { 500 struct bfq_group_data *bgd; 501 502 bgd = kzalloc(sizeof(*bgd), gfp); 503 if (!bgd) 504 return NULL; 505 return &bgd->pd; 506 } 507 508 static void bfq_cpd_init(struct blkcg_policy_data *cpd) 509 { 510 struct bfq_group_data *d = cpd_to_bfqgd(cpd); 511 512 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? 513 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; 514 } 515 516 static void bfq_cpd_free(struct blkcg_policy_data *cpd) 517 { 518 kfree(cpd_to_bfqgd(cpd)); 519 } 520 521 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q, 522 struct blkcg *blkcg) 523 { 524 struct bfq_group *bfqg; 525 526 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node); 527 if (!bfqg) 528 return NULL; 529 530 if (bfqg_stats_init(&bfqg->stats, gfp)) { 531 kfree(bfqg); 532 return NULL; 533 } 534 535 /* see comments in bfq_bic_update_cgroup for why refcounting */ 536 bfqg_get(bfqg); 537 return &bfqg->pd; 538 } 539 540 static void bfq_pd_init(struct blkg_policy_data *pd) 541 { 542 struct blkcg_gq *blkg = pd_to_blkg(pd); 543 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 544 struct bfq_data *bfqd = blkg->q->elevator->elevator_data; 545 struct bfq_entity *entity = &bfqg->entity; 546 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); 547 548 entity->orig_weight = entity->weight = entity->new_weight = d->weight; 549 entity->my_sched_data = &bfqg->sched_data; 550 bfqg->my_entity = entity; /* 551 * the root_group's will be set to NULL 552 * in bfq_init_queue() 553 */ 554 bfqg->bfqd = bfqd; 555 bfqg->active_entities = 0; 556 bfqg->rq_pos_tree = RB_ROOT; 557 } 558 559 static void bfq_pd_free(struct blkg_policy_data *pd) 560 { 561 struct bfq_group *bfqg = pd_to_bfqg(pd); 562 563 bfqg_stats_exit(&bfqg->stats); 564 bfqg_put(bfqg); 565 } 566 567 static void bfq_pd_reset_stats(struct blkg_policy_data *pd) 568 { 569 struct bfq_group *bfqg = pd_to_bfqg(pd); 570 571 bfqg_stats_reset(&bfqg->stats); 572 } 573 574 static void bfq_group_set_parent(struct bfq_group *bfqg, 575 struct bfq_group *parent) 576 { 577 struct bfq_entity *entity; 578 579 entity = &bfqg->entity; 580 entity->parent = parent->my_entity; 581 entity->sched_data = &parent->sched_data; 582 } 583 584 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, 585 struct blkcg *blkcg) 586 { 587 struct blkcg_gq *blkg; 588 589 blkg = blkg_lookup(blkcg, bfqd->queue); 590 if (likely(blkg)) 591 return blkg_to_bfqg(blkg); 592 return NULL; 593 } 594 595 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, 596 struct blkcg *blkcg) 597 { 598 struct bfq_group *bfqg, *parent; 599 struct bfq_entity *entity; 600 601 bfqg = bfq_lookup_bfqg(bfqd, blkcg); 602 603 if (unlikely(!bfqg)) 604 return NULL; 605 606 /* 607 * Update chain of bfq_groups as we might be handling a leaf group 608 * which, along with some of its relatives, has not been hooked yet 609 * to the private hierarchy of BFQ. 610 */ 611 entity = &bfqg->entity; 612 for_each_entity(entity) { 613 bfqg = container_of(entity, struct bfq_group, entity); 614 if (bfqg != bfqd->root_group) { 615 parent = bfqg_parent(bfqg); 616 if (!parent) 617 parent = bfqd->root_group; 618 bfq_group_set_parent(bfqg, parent); 619 } 620 } 621 622 return bfqg; 623 } 624 625 /** 626 * bfq_bfqq_move - migrate @bfqq to @bfqg. 627 * @bfqd: queue descriptor. 628 * @bfqq: the queue to move. 629 * @bfqg: the group to move to. 630 * 631 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating 632 * it on the new one. Avoid putting the entity on the old group idle tree. 633 * 634 * Must be called under the scheduler lock, to make sure that the blkg 635 * owning @bfqg does not disappear (see comments in 636 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg 637 * objects). 638 */ 639 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 640 struct bfq_group *bfqg) 641 { 642 struct bfq_entity *entity = &bfqq->entity; 643 644 /* If bfqq is empty, then bfq_bfqq_expire also invokes 645 * bfq_del_bfqq_busy, thereby removing bfqq and its entity 646 * from data structures related to current group. Otherwise we 647 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as 648 * we do below. 649 */ 650 if (bfqq == bfqd->in_service_queue) 651 bfq_bfqq_expire(bfqd, bfqd->in_service_queue, 652 false, BFQQE_PREEMPTED); 653 654 if (bfq_bfqq_busy(bfqq)) 655 bfq_deactivate_bfqq(bfqd, bfqq, false, false); 656 else if (entity->on_st) 657 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); 658 bfqg_and_blkg_put(bfqq_group(bfqq)); 659 660 entity->parent = bfqg->my_entity; 661 entity->sched_data = &bfqg->sched_data; 662 /* pin down bfqg and its associated blkg */ 663 bfqg_and_blkg_get(bfqg); 664 665 if (bfq_bfqq_busy(bfqq)) { 666 if (unlikely(!bfqd->nonrot_with_queueing)) 667 bfq_pos_tree_add_move(bfqd, bfqq); 668 bfq_activate_bfqq(bfqd, bfqq); 669 } 670 671 if (!bfqd->in_service_queue && !bfqd->rq_in_driver) 672 bfq_schedule_dispatch(bfqd); 673 } 674 675 /** 676 * __bfq_bic_change_cgroup - move @bic to @cgroup. 677 * @bfqd: the queue descriptor. 678 * @bic: the bic to move. 679 * @blkcg: the blk-cgroup to move to. 680 * 681 * Move bic to blkcg, assuming that bfqd->lock is held; which makes 682 * sure that the reference to cgroup is valid across the call (see 683 * comments in bfq_bic_update_cgroup on this issue) 684 * 685 * NOTE: an alternative approach might have been to store the current 686 * cgroup in bfqq and getting a reference to it, reducing the lookup 687 * time here, at the price of slightly more complex code. 688 */ 689 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, 690 struct bfq_io_cq *bic, 691 struct blkcg *blkcg) 692 { 693 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); 694 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); 695 struct bfq_group *bfqg; 696 struct bfq_entity *entity; 697 698 bfqg = bfq_find_set_group(bfqd, blkcg); 699 700 if (unlikely(!bfqg)) 701 bfqg = bfqd->root_group; 702 703 if (async_bfqq) { 704 entity = &async_bfqq->entity; 705 706 if (entity->sched_data != &bfqg->sched_data) { 707 bic_set_bfqq(bic, NULL, 0); 708 bfq_log_bfqq(bfqd, async_bfqq, 709 "bic_change_group: %p %d", 710 async_bfqq, async_bfqq->ref); 711 bfq_put_queue(async_bfqq); 712 } 713 } 714 715 if (sync_bfqq) { 716 entity = &sync_bfqq->entity; 717 if (entity->sched_data != &bfqg->sched_data) 718 bfq_bfqq_move(bfqd, sync_bfqq, bfqg); 719 } 720 721 return bfqg; 722 } 723 724 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) 725 { 726 struct bfq_data *bfqd = bic_to_bfqd(bic); 727 struct bfq_group *bfqg = NULL; 728 uint64_t serial_nr; 729 730 rcu_read_lock(); 731 serial_nr = __bio_blkcg(bio)->css.serial_nr; 732 733 /* 734 * Check whether blkcg has changed. The condition may trigger 735 * spuriously on a newly created cic but there's no harm. 736 */ 737 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) 738 goto out; 739 740 bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); 741 /* 742 * Update blkg_path for bfq_log_* functions. We cache this 743 * path, and update it here, for the following 744 * reasons. Operations on blkg objects in blk-cgroup are 745 * protected with the request_queue lock, and not with the 746 * lock that protects the instances of this scheduler 747 * (bfqd->lock). This exposes BFQ to the following sort of 748 * race. 749 * 750 * The blkg_lookup performed in bfq_get_queue, protected 751 * through rcu, may happen to return the address of a copy of 752 * the original blkg. If this is the case, then the 753 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down 754 * the blkg, is useless: it does not prevent blk-cgroup code 755 * from destroying both the original blkg and all objects 756 * directly or indirectly referred by the copy of the 757 * blkg. 758 * 759 * On the bright side, destroy operations on a blkg invoke, as 760 * a first step, hooks of the scheduler associated with the 761 * blkg. And these hooks are executed with bfqd->lock held for 762 * BFQ. As a consequence, for any blkg associated with the 763 * request queue this instance of the scheduler is attached 764 * to, we are guaranteed that such a blkg is not destroyed, and 765 * that all the pointers it contains are consistent, while we 766 * are holding bfqd->lock. A blkg_lookup performed with 767 * bfqd->lock held then returns a fully consistent blkg, which 768 * remains consistent until this lock is held. 769 * 770 * Thanks to the last fact, and to the fact that: (1) bfqg has 771 * been obtained through a blkg_lookup in the above 772 * assignment, and (2) bfqd->lock is being held, here we can 773 * safely use the policy data for the involved blkg (i.e., the 774 * field bfqg->pd) to get to the blkg associated with bfqg, 775 * and then we can safely use any field of blkg. After we 776 * release bfqd->lock, even just getting blkg through this 777 * bfqg may cause dangling references to be traversed, as 778 * bfqg->pd may not exist any more. 779 * 780 * In view of the above facts, here we cache, in the bfqg, any 781 * blkg data we may need for this bic, and for its associated 782 * bfq_queue. As of now, we need to cache only the path of the 783 * blkg, which is used in the bfq_log_* functions. 784 * 785 * Finally, note that bfqg itself needs to be protected from 786 * destruction on the blkg_free of the original blkg (which 787 * invokes bfq_pd_free). We use an additional private 788 * refcounter for bfqg, to let it disappear only after no 789 * bfq_queue refers to it any longer. 790 */ 791 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); 792 bic->blkcg_serial_nr = serial_nr; 793 out: 794 rcu_read_unlock(); 795 } 796 797 /** 798 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. 799 * @st: the service tree being flushed. 800 */ 801 static void bfq_flush_idle_tree(struct bfq_service_tree *st) 802 { 803 struct bfq_entity *entity = st->first_idle; 804 805 for (; entity ; entity = st->first_idle) 806 __bfq_deactivate_entity(entity, false); 807 } 808 809 /** 810 * bfq_reparent_leaf_entity - move leaf entity to the root_group. 811 * @bfqd: the device data structure with the root group. 812 * @entity: the entity to move. 813 */ 814 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, 815 struct bfq_entity *entity) 816 { 817 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); 818 819 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); 820 } 821 822 /** 823 * bfq_reparent_active_entities - move to the root group all active 824 * entities. 825 * @bfqd: the device data structure with the root group. 826 * @bfqg: the group to move from. 827 * @st: the service tree with the entities. 828 */ 829 static void bfq_reparent_active_entities(struct bfq_data *bfqd, 830 struct bfq_group *bfqg, 831 struct bfq_service_tree *st) 832 { 833 struct rb_root *active = &st->active; 834 struct bfq_entity *entity = NULL; 835 836 if (!RB_EMPTY_ROOT(&st->active)) 837 entity = bfq_entity_of(rb_first(active)); 838 839 for (; entity ; entity = bfq_entity_of(rb_first(active))) 840 bfq_reparent_leaf_entity(bfqd, entity); 841 842 if (bfqg->sched_data.in_service_entity) 843 bfq_reparent_leaf_entity(bfqd, 844 bfqg->sched_data.in_service_entity); 845 } 846 847 /** 848 * bfq_pd_offline - deactivate the entity associated with @pd, 849 * and reparent its children entities. 850 * @pd: descriptor of the policy going offline. 851 * 852 * blkio already grabs the queue_lock for us, so no need to use 853 * RCU-based magic 854 */ 855 static void bfq_pd_offline(struct blkg_policy_data *pd) 856 { 857 struct bfq_service_tree *st; 858 struct bfq_group *bfqg = pd_to_bfqg(pd); 859 struct bfq_data *bfqd = bfqg->bfqd; 860 struct bfq_entity *entity = bfqg->my_entity; 861 unsigned long flags; 862 int i; 863 864 spin_lock_irqsave(&bfqd->lock, flags); 865 866 if (!entity) /* root group */ 867 goto put_async_queues; 868 869 /* 870 * Empty all service_trees belonging to this group before 871 * deactivating the group itself. 872 */ 873 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { 874 st = bfqg->sched_data.service_tree + i; 875 876 /* 877 * The idle tree may still contain bfq_queues belonging 878 * to exited task because they never migrated to a different 879 * cgroup from the one being destroyed now. 880 */ 881 bfq_flush_idle_tree(st); 882 883 /* 884 * It may happen that some queues are still active 885 * (busy) upon group destruction (if the corresponding 886 * processes have been forced to terminate). We move 887 * all the leaf entities corresponding to these queues 888 * to the root_group. 889 * Also, it may happen that the group has an entity 890 * in service, which is disconnected from the active 891 * tree: it must be moved, too. 892 * There is no need to put the sync queues, as the 893 * scheduler has taken no reference. 894 */ 895 bfq_reparent_active_entities(bfqd, bfqg, st); 896 } 897 898 __bfq_deactivate_entity(entity, false); 899 900 put_async_queues: 901 bfq_put_async_queues(bfqd, bfqg); 902 903 spin_unlock_irqrestore(&bfqd->lock, flags); 904 /* 905 * @blkg is going offline and will be ignored by 906 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so 907 * that they don't get lost. If IOs complete after this point, the 908 * stats for them will be lost. Oh well... 909 */ 910 bfqg_stats_xfer_dead(bfqg); 911 } 912 913 void bfq_end_wr_async(struct bfq_data *bfqd) 914 { 915 struct blkcg_gq *blkg; 916 917 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { 918 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 919 920 bfq_end_wr_async_queues(bfqd, bfqg); 921 } 922 bfq_end_wr_async_queues(bfqd, bfqd->root_group); 923 } 924 925 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v) 926 { 927 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 928 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); 929 unsigned int val = 0; 930 931 if (bfqgd) 932 val = bfqgd->weight; 933 934 seq_printf(sf, "%u\n", val); 935 936 return 0; 937 } 938 939 static u64 bfqg_prfill_weight_device(struct seq_file *sf, 940 struct blkg_policy_data *pd, int off) 941 { 942 struct bfq_group *bfqg = pd_to_bfqg(pd); 943 944 if (!bfqg->entity.dev_weight) 945 return 0; 946 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight); 947 } 948 949 static int bfq_io_show_weight(struct seq_file *sf, void *v) 950 { 951 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 952 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); 953 954 seq_printf(sf, "default %u\n", bfqgd->weight); 955 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device, 956 &blkcg_policy_bfq, 0, false); 957 return 0; 958 } 959 960 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight) 961 { 962 weight = dev_weight ?: weight; 963 964 bfqg->entity.dev_weight = dev_weight; 965 /* 966 * Setting the prio_changed flag of the entity 967 * to 1 with new_weight == weight would re-set 968 * the value of the weight to its ioprio mapping. 969 * Set the flag only if necessary. 970 */ 971 if ((unsigned short)weight != bfqg->entity.new_weight) { 972 bfqg->entity.new_weight = (unsigned short)weight; 973 /* 974 * Make sure that the above new value has been 975 * stored in bfqg->entity.new_weight before 976 * setting the prio_changed flag. In fact, 977 * this flag may be read asynchronously (in 978 * critical sections protected by a different 979 * lock than that held here), and finding this 980 * flag set may cause the execution of the code 981 * for updating parameters whose value may 982 * depend also on bfqg->entity.new_weight (in 983 * __bfq_entity_update_weight_prio). 984 * This barrier makes sure that the new value 985 * of bfqg->entity.new_weight is correctly 986 * seen in that code. 987 */ 988 smp_wmb(); 989 bfqg->entity.prio_changed = 1; 990 } 991 } 992 993 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, 994 struct cftype *cftype, 995 u64 val) 996 { 997 struct blkcg *blkcg = css_to_blkcg(css); 998 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); 999 struct blkcg_gq *blkg; 1000 int ret = -ERANGE; 1001 1002 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) 1003 return ret; 1004 1005 ret = 0; 1006 spin_lock_irq(&blkcg->lock); 1007 bfqgd->weight = (unsigned short)val; 1008 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 1009 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 1010 1011 if (bfqg) 1012 bfq_group_set_weight(bfqg, val, 0); 1013 } 1014 spin_unlock_irq(&blkcg->lock); 1015 1016 return ret; 1017 } 1018 1019 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of, 1020 char *buf, size_t nbytes, 1021 loff_t off) 1022 { 1023 int ret; 1024 struct blkg_conf_ctx ctx; 1025 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 1026 struct bfq_group *bfqg; 1027 u64 v; 1028 1029 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx); 1030 if (ret) 1031 return ret; 1032 1033 if (sscanf(ctx.body, "%llu", &v) == 1) { 1034 /* require "default" on dfl */ 1035 ret = -ERANGE; 1036 if (!v) 1037 goto out; 1038 } else if (!strcmp(strim(ctx.body), "default")) { 1039 v = 0; 1040 } else { 1041 ret = -EINVAL; 1042 goto out; 1043 } 1044 1045 bfqg = blkg_to_bfqg(ctx.blkg); 1046 1047 ret = -ERANGE; 1048 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) { 1049 bfq_group_set_weight(bfqg, bfqg->entity.weight, v); 1050 ret = 0; 1051 } 1052 out: 1053 blkg_conf_finish(&ctx); 1054 return ret ?: nbytes; 1055 } 1056 1057 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, 1058 char *buf, size_t nbytes, 1059 loff_t off) 1060 { 1061 char *endp; 1062 int ret; 1063 u64 v; 1064 1065 buf = strim(buf); 1066 1067 /* "WEIGHT" or "default WEIGHT" sets the default weight */ 1068 v = simple_strtoull(buf, &endp, 0); 1069 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) { 1070 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v); 1071 return ret ?: nbytes; 1072 } 1073 1074 return bfq_io_set_device_weight(of, buf, nbytes, off); 1075 } 1076 1077 static int bfqg_print_rwstat(struct seq_file *sf, void *v) 1078 { 1079 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, 1080 &blkcg_policy_bfq, seq_cft(sf)->private, true); 1081 return 0; 1082 } 1083 1084 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, 1085 struct blkg_policy_data *pd, int off) 1086 { 1087 struct blkg_rwstat_sample sum; 1088 1089 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum); 1090 return __blkg_prfill_rwstat(sf, pd, &sum); 1091 } 1092 1093 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) 1094 { 1095 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1096 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, 1097 seq_cft(sf)->private, true); 1098 return 0; 1099 } 1100 1101 #ifdef CONFIG_BFQ_CGROUP_DEBUG 1102 static int bfqg_print_stat(struct seq_file *sf, void *v) 1103 { 1104 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, 1105 &blkcg_policy_bfq, seq_cft(sf)->private, false); 1106 return 0; 1107 } 1108 1109 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, 1110 struct blkg_policy_data *pd, int off) 1111 { 1112 struct blkcg_gq *blkg = pd_to_blkg(pd); 1113 struct blkcg_gq *pos_blkg; 1114 struct cgroup_subsys_state *pos_css; 1115 u64 sum = 0; 1116 1117 lockdep_assert_held(&blkg->q->queue_lock); 1118 1119 rcu_read_lock(); 1120 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { 1121 struct bfq_stat *stat; 1122 1123 if (!pos_blkg->online) 1124 continue; 1125 1126 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off; 1127 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt); 1128 } 1129 rcu_read_unlock(); 1130 1131 return __blkg_prfill_u64(sf, pd, sum); 1132 } 1133 1134 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) 1135 { 1136 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1137 bfqg_prfill_stat_recursive, &blkcg_policy_bfq, 1138 seq_cft(sf)->private, false); 1139 return 0; 1140 } 1141 1142 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, 1143 int off) 1144 { 1145 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg); 1146 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); 1147 1148 return __blkg_prfill_u64(sf, pd, sum >> 9); 1149 } 1150 1151 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) 1152 { 1153 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1154 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); 1155 return 0; 1156 } 1157 1158 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, 1159 struct blkg_policy_data *pd, int off) 1160 { 1161 struct blkg_rwstat_sample tmp; 1162 1163 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq, 1164 offsetof(struct bfq_group, stats.bytes), &tmp); 1165 1166 return __blkg_prfill_u64(sf, pd, 1167 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9); 1168 } 1169 1170 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) 1171 { 1172 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1173 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, 1174 false); 1175 return 0; 1176 } 1177 1178 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, 1179 struct blkg_policy_data *pd, int off) 1180 { 1181 struct bfq_group *bfqg = pd_to_bfqg(pd); 1182 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples); 1183 u64 v = 0; 1184 1185 if (samples) { 1186 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum); 1187 v = div64_u64(v, samples); 1188 } 1189 __blkg_prfill_u64(sf, pd, v); 1190 return 0; 1191 } 1192 1193 /* print avg_queue_size */ 1194 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) 1195 { 1196 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1197 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, 1198 0, false); 1199 return 0; 1200 } 1201 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 1202 1203 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) 1204 { 1205 int ret; 1206 1207 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); 1208 if (ret) 1209 return NULL; 1210 1211 return blkg_to_bfqg(bfqd->queue->root_blkg); 1212 } 1213 1214 struct blkcg_policy blkcg_policy_bfq = { 1215 .dfl_cftypes = bfq_blkg_files, 1216 .legacy_cftypes = bfq_blkcg_legacy_files, 1217 1218 .cpd_alloc_fn = bfq_cpd_alloc, 1219 .cpd_init_fn = bfq_cpd_init, 1220 .cpd_bind_fn = bfq_cpd_init, 1221 .cpd_free_fn = bfq_cpd_free, 1222 1223 .pd_alloc_fn = bfq_pd_alloc, 1224 .pd_init_fn = bfq_pd_init, 1225 .pd_offline_fn = bfq_pd_offline, 1226 .pd_free_fn = bfq_pd_free, 1227 .pd_reset_stats_fn = bfq_pd_reset_stats, 1228 }; 1229 1230 struct cftype bfq_blkcg_legacy_files[] = { 1231 { 1232 .name = "bfq.weight", 1233 .flags = CFTYPE_NOT_ON_ROOT, 1234 .seq_show = bfq_io_show_weight_legacy, 1235 .write_u64 = bfq_io_set_weight_legacy, 1236 }, 1237 { 1238 .name = "bfq.weight_device", 1239 .flags = CFTYPE_NOT_ON_ROOT, 1240 .seq_show = bfq_io_show_weight, 1241 .write = bfq_io_set_weight, 1242 }, 1243 1244 /* statistics, covers only the tasks in the bfqg */ 1245 { 1246 .name = "bfq.io_service_bytes", 1247 .private = offsetof(struct bfq_group, stats.bytes), 1248 .seq_show = bfqg_print_rwstat, 1249 }, 1250 { 1251 .name = "bfq.io_serviced", 1252 .private = offsetof(struct bfq_group, stats.ios), 1253 .seq_show = bfqg_print_rwstat, 1254 }, 1255 #ifdef CONFIG_BFQ_CGROUP_DEBUG 1256 { 1257 .name = "bfq.time", 1258 .private = offsetof(struct bfq_group, stats.time), 1259 .seq_show = bfqg_print_stat, 1260 }, 1261 { 1262 .name = "bfq.sectors", 1263 .seq_show = bfqg_print_stat_sectors, 1264 }, 1265 { 1266 .name = "bfq.io_service_time", 1267 .private = offsetof(struct bfq_group, stats.service_time), 1268 .seq_show = bfqg_print_rwstat, 1269 }, 1270 { 1271 .name = "bfq.io_wait_time", 1272 .private = offsetof(struct bfq_group, stats.wait_time), 1273 .seq_show = bfqg_print_rwstat, 1274 }, 1275 { 1276 .name = "bfq.io_merged", 1277 .private = offsetof(struct bfq_group, stats.merged), 1278 .seq_show = bfqg_print_rwstat, 1279 }, 1280 { 1281 .name = "bfq.io_queued", 1282 .private = offsetof(struct bfq_group, stats.queued), 1283 .seq_show = bfqg_print_rwstat, 1284 }, 1285 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 1286 1287 /* the same statistics which cover the bfqg and its descendants */ 1288 { 1289 .name = "bfq.io_service_bytes_recursive", 1290 .private = offsetof(struct bfq_group, stats.bytes), 1291 .seq_show = bfqg_print_rwstat_recursive, 1292 }, 1293 { 1294 .name = "bfq.io_serviced_recursive", 1295 .private = offsetof(struct bfq_group, stats.ios), 1296 .seq_show = bfqg_print_rwstat_recursive, 1297 }, 1298 #ifdef CONFIG_BFQ_CGROUP_DEBUG 1299 { 1300 .name = "bfq.time_recursive", 1301 .private = offsetof(struct bfq_group, stats.time), 1302 .seq_show = bfqg_print_stat_recursive, 1303 }, 1304 { 1305 .name = "bfq.sectors_recursive", 1306 .seq_show = bfqg_print_stat_sectors_recursive, 1307 }, 1308 { 1309 .name = "bfq.io_service_time_recursive", 1310 .private = offsetof(struct bfq_group, stats.service_time), 1311 .seq_show = bfqg_print_rwstat_recursive, 1312 }, 1313 { 1314 .name = "bfq.io_wait_time_recursive", 1315 .private = offsetof(struct bfq_group, stats.wait_time), 1316 .seq_show = bfqg_print_rwstat_recursive, 1317 }, 1318 { 1319 .name = "bfq.io_merged_recursive", 1320 .private = offsetof(struct bfq_group, stats.merged), 1321 .seq_show = bfqg_print_rwstat_recursive, 1322 }, 1323 { 1324 .name = "bfq.io_queued_recursive", 1325 .private = offsetof(struct bfq_group, stats.queued), 1326 .seq_show = bfqg_print_rwstat_recursive, 1327 }, 1328 { 1329 .name = "bfq.avg_queue_size", 1330 .seq_show = bfqg_print_avg_queue_size, 1331 }, 1332 { 1333 .name = "bfq.group_wait_time", 1334 .private = offsetof(struct bfq_group, stats.group_wait_time), 1335 .seq_show = bfqg_print_stat, 1336 }, 1337 { 1338 .name = "bfq.idle_time", 1339 .private = offsetof(struct bfq_group, stats.idle_time), 1340 .seq_show = bfqg_print_stat, 1341 }, 1342 { 1343 .name = "bfq.empty_time", 1344 .private = offsetof(struct bfq_group, stats.empty_time), 1345 .seq_show = bfqg_print_stat, 1346 }, 1347 { 1348 .name = "bfq.dequeue", 1349 .private = offsetof(struct bfq_group, stats.dequeue), 1350 .seq_show = bfqg_print_stat, 1351 }, 1352 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 1353 { } /* terminate */ 1354 }; 1355 1356 struct cftype bfq_blkg_files[] = { 1357 { 1358 .name = "bfq.weight", 1359 .flags = CFTYPE_NOT_ON_ROOT, 1360 .seq_show = bfq_io_show_weight, 1361 .write = bfq_io_set_weight, 1362 }, 1363 {} /* terminate */ 1364 }; 1365 1366 #else /* CONFIG_BFQ_GROUP_IOSCHED */ 1367 1368 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 1369 struct bfq_group *bfqg) {} 1370 1371 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) 1372 { 1373 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); 1374 1375 entity->weight = entity->new_weight; 1376 entity->orig_weight = entity->new_weight; 1377 if (bfqq) { 1378 bfqq->ioprio = bfqq->new_ioprio; 1379 bfqq->ioprio_class = bfqq->new_ioprio_class; 1380 } 1381 entity->sched_data = &bfqg->sched_data; 1382 } 1383 1384 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} 1385 1386 void bfq_end_wr_async(struct bfq_data *bfqd) 1387 { 1388 bfq_end_wr_async_queues(bfqd, bfqd->root_group); 1389 } 1390 1391 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg) 1392 { 1393 return bfqd->root_group; 1394 } 1395 1396 struct bfq_group *bfqq_group(struct bfq_queue *bfqq) 1397 { 1398 return bfqq->bfqd->root_group; 1399 } 1400 1401 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) 1402 { 1403 struct bfq_group *bfqg; 1404 int i; 1405 1406 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); 1407 if (!bfqg) 1408 return NULL; 1409 1410 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) 1411 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; 1412 1413 return bfqg; 1414 } 1415 #endif /* CONFIG_BFQ_GROUP_IOSCHED */ 1416