1 /* 2 * Interface for controlling IO bandwidth on a request queue 3 * 4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com> 5 */ 6 7 #include <linux/module.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/bio.h> 11 #include <linux/blktrace_api.h> 12 #include "blk-cgroup.h" 13 14 /* Max dispatch from a group in 1 round */ 15 static int throtl_grp_quantum = 8; 16 17 /* Total max dispatch from all groups in one round */ 18 static int throtl_quantum = 32; 19 20 /* Throttling is performed over 100ms slice and after that slice is renewed */ 21 static unsigned long throtl_slice = HZ/10; /* 100 ms */ 22 23 struct throtl_rb_root { 24 struct rb_root rb; 25 struct rb_node *left; 26 unsigned int count; 27 unsigned long min_disptime; 28 }; 29 30 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \ 31 .count = 0, .min_disptime = 0} 32 33 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 34 35 struct throtl_grp { 36 /* List of throtl groups on the request queue*/ 37 struct hlist_node tg_node; 38 39 /* active throtl group service_tree member */ 40 struct rb_node rb_node; 41 42 /* 43 * Dispatch time in jiffies. This is the estimated time when group 44 * will unthrottle and is ready to dispatch more bio. It is used as 45 * key to sort active groups in service tree. 46 */ 47 unsigned long disptime; 48 49 struct blkio_group blkg; 50 atomic_t ref; 51 unsigned int flags; 52 53 /* Two lists for READ and WRITE */ 54 struct bio_list bio_lists[2]; 55 56 /* Number of queued bios on READ and WRITE lists */ 57 unsigned int nr_queued[2]; 58 59 /* bytes per second rate limits */ 60 uint64_t bps[2]; 61 62 /* IOPS limits */ 63 unsigned int iops[2]; 64 65 /* Number of bytes disptached in current slice */ 66 uint64_t bytes_disp[2]; 67 /* Number of bio's dispatched in current slice */ 68 unsigned int io_disp[2]; 69 70 /* When did we start a new slice */ 71 unsigned long slice_start[2]; 72 unsigned long slice_end[2]; 73 74 /* Some throttle limits got updated for the group */ 75 bool limits_changed; 76 }; 77 78 struct throtl_data 79 { 80 /* List of throtl groups */ 81 struct hlist_head tg_list; 82 83 /* service tree for active throtl groups */ 84 struct throtl_rb_root tg_service_tree; 85 86 struct throtl_grp root_tg; 87 struct request_queue *queue; 88 89 /* Total Number of queued bios on READ and WRITE lists */ 90 unsigned int nr_queued[2]; 91 92 /* 93 * number of total undestroyed groups 94 */ 95 unsigned int nr_undestroyed_grps; 96 97 /* Work for dispatching throttled bios */ 98 struct delayed_work throtl_work; 99 100 atomic_t limits_changed; 101 }; 102 103 enum tg_state_flags { 104 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */ 105 }; 106 107 #define THROTL_TG_FNS(name) \ 108 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \ 109 { \ 110 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \ 111 } \ 112 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \ 113 { \ 114 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \ 115 } \ 116 static inline int throtl_tg_##name(const struct throtl_grp *tg) \ 117 { \ 118 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \ 119 } 120 121 THROTL_TG_FNS(on_rr); 122 123 #define throtl_log_tg(td, tg, fmt, args...) \ 124 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \ 125 blkg_path(&(tg)->blkg), ##args); \ 126 127 #define throtl_log(td, fmt, args...) \ 128 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args) 129 130 static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg) 131 { 132 if (blkg) 133 return container_of(blkg, struct throtl_grp, blkg); 134 135 return NULL; 136 } 137 138 static inline int total_nr_queued(struct throtl_data *td) 139 { 140 return (td->nr_queued[0] + td->nr_queued[1]); 141 } 142 143 static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg) 144 { 145 atomic_inc(&tg->ref); 146 return tg; 147 } 148 149 static void throtl_put_tg(struct throtl_grp *tg) 150 { 151 BUG_ON(atomic_read(&tg->ref) <= 0); 152 if (!atomic_dec_and_test(&tg->ref)) 153 return; 154 kfree(tg); 155 } 156 157 static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 158 struct cgroup *cgroup) 159 { 160 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 161 struct throtl_grp *tg = NULL; 162 void *key = td; 163 struct backing_dev_info *bdi = &td->queue->backing_dev_info; 164 unsigned int major, minor; 165 166 /* 167 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix 168 * tree of blkg (instead of traversing through hash list all 169 * the time. 170 */ 171 172 /* 173 * This is the common case when there are no blkio cgroups. 174 * Avoid lookup in this case 175 */ 176 if (blkcg == &blkio_root_cgroup) 177 tg = &td->root_tg; 178 else 179 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); 180 181 /* Fill in device details for root group */ 182 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { 183 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 184 tg->blkg.dev = MKDEV(major, minor); 185 goto done; 186 } 187 188 if (tg) 189 goto done; 190 191 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node); 192 if (!tg) 193 goto done; 194 195 INIT_HLIST_NODE(&tg->tg_node); 196 RB_CLEAR_NODE(&tg->rb_node); 197 bio_list_init(&tg->bio_lists[0]); 198 bio_list_init(&tg->bio_lists[1]); 199 200 /* 201 * Take the initial reference that will be released on destroy 202 * This can be thought of a joint reference by cgroup and 203 * request queue which will be dropped by either request queue 204 * exit or cgroup deletion path depending on who is exiting first. 205 */ 206 atomic_set(&tg->ref, 1); 207 208 /* Add group onto cgroup list */ 209 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 210 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td, 211 MKDEV(major, minor), BLKIO_POLICY_THROTL); 212 213 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev); 214 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev); 215 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev); 216 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev); 217 218 hlist_add_head(&tg->tg_node, &td->tg_list); 219 td->nr_undestroyed_grps++; 220 done: 221 return tg; 222 } 223 224 static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 225 { 226 struct cgroup *cgroup; 227 struct throtl_grp *tg = NULL; 228 229 rcu_read_lock(); 230 cgroup = task_cgroup(current, blkio_subsys_id); 231 tg = throtl_find_alloc_tg(td, cgroup); 232 if (!tg) 233 tg = &td->root_tg; 234 rcu_read_unlock(); 235 return tg; 236 } 237 238 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root) 239 { 240 /* Service tree is empty */ 241 if (!root->count) 242 return NULL; 243 244 if (!root->left) 245 root->left = rb_first(&root->rb); 246 247 if (root->left) 248 return rb_entry_tg(root->left); 249 250 return NULL; 251 } 252 253 static void rb_erase_init(struct rb_node *n, struct rb_root *root) 254 { 255 rb_erase(n, root); 256 RB_CLEAR_NODE(n); 257 } 258 259 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root) 260 { 261 if (root->left == n) 262 root->left = NULL; 263 rb_erase_init(n, &root->rb); 264 --root->count; 265 } 266 267 static void update_min_dispatch_time(struct throtl_rb_root *st) 268 { 269 struct throtl_grp *tg; 270 271 tg = throtl_rb_first(st); 272 if (!tg) 273 return; 274 275 st->min_disptime = tg->disptime; 276 } 277 278 static void 279 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg) 280 { 281 struct rb_node **node = &st->rb.rb_node; 282 struct rb_node *parent = NULL; 283 struct throtl_grp *__tg; 284 unsigned long key = tg->disptime; 285 int left = 1; 286 287 while (*node != NULL) { 288 parent = *node; 289 __tg = rb_entry_tg(parent); 290 291 if (time_before(key, __tg->disptime)) 292 node = &parent->rb_left; 293 else { 294 node = &parent->rb_right; 295 left = 0; 296 } 297 } 298 299 if (left) 300 st->left = &tg->rb_node; 301 302 rb_link_node(&tg->rb_node, parent, node); 303 rb_insert_color(&tg->rb_node, &st->rb); 304 } 305 306 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) 307 { 308 struct throtl_rb_root *st = &td->tg_service_tree; 309 310 tg_service_tree_add(st, tg); 311 throtl_mark_tg_on_rr(tg); 312 st->count++; 313 } 314 315 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) 316 { 317 if (!throtl_tg_on_rr(tg)) 318 __throtl_enqueue_tg(td, tg); 319 } 320 321 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) 322 { 323 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree); 324 throtl_clear_tg_on_rr(tg); 325 } 326 327 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) 328 { 329 if (throtl_tg_on_rr(tg)) 330 __throtl_dequeue_tg(td, tg); 331 } 332 333 static void throtl_schedule_next_dispatch(struct throtl_data *td) 334 { 335 struct throtl_rb_root *st = &td->tg_service_tree; 336 337 /* 338 * If there are more bios pending, schedule more work. 339 */ 340 if (!total_nr_queued(td)) 341 return; 342 343 BUG_ON(!st->count); 344 345 update_min_dispatch_time(st); 346 347 if (time_before_eq(st->min_disptime, jiffies)) 348 throtl_schedule_delayed_work(td->queue, 0); 349 else 350 throtl_schedule_delayed_work(td->queue, 351 (st->min_disptime - jiffies)); 352 } 353 354 static inline void 355 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) 356 { 357 tg->bytes_disp[rw] = 0; 358 tg->io_disp[rw] = 0; 359 tg->slice_start[rw] = jiffies; 360 tg->slice_end[rw] = jiffies + throtl_slice; 361 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu", 362 rw == READ ? 'R' : 'W', tg->slice_start[rw], 363 tg->slice_end[rw], jiffies); 364 } 365 366 static inline void throtl_set_slice_end(struct throtl_data *td, 367 struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 368 { 369 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); 370 } 371 372 static inline void throtl_extend_slice(struct throtl_data *td, 373 struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 374 { 375 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); 376 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu", 377 rw == READ ? 'R' : 'W', tg->slice_start[rw], 378 tg->slice_end[rw], jiffies); 379 } 380 381 /* Determine if previously allocated or extended slice is complete or not */ 382 static bool 383 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw) 384 { 385 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) 386 return 0; 387 388 return 1; 389 } 390 391 /* Trim the used slices and adjust slice start accordingly */ 392 static inline void 393 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) 394 { 395 unsigned long nr_slices, time_elapsed, io_trim; 396 u64 bytes_trim, tmp; 397 398 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); 399 400 /* 401 * If bps are unlimited (-1), then time slice don't get 402 * renewed. Don't try to trim the slice if slice is used. A new 403 * slice will start when appropriate. 404 */ 405 if (throtl_slice_used(td, tg, rw)) 406 return; 407 408 /* 409 * A bio has been dispatched. Also adjust slice_end. It might happen 410 * that initially cgroup limit was very low resulting in high 411 * slice_end, but later limit was bumped up and bio was dispached 412 * sooner, then we need to reduce slice_end. A high bogus slice_end 413 * is bad because it does not allow new slice to start. 414 */ 415 416 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice); 417 418 time_elapsed = jiffies - tg->slice_start[rw]; 419 420 nr_slices = time_elapsed / throtl_slice; 421 422 if (!nr_slices) 423 return; 424 tmp = tg->bps[rw] * throtl_slice * nr_slices; 425 do_div(tmp, HZ); 426 bytes_trim = tmp; 427 428 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ; 429 430 if (!bytes_trim && !io_trim) 431 return; 432 433 if (tg->bytes_disp[rw] >= bytes_trim) 434 tg->bytes_disp[rw] -= bytes_trim; 435 else 436 tg->bytes_disp[rw] = 0; 437 438 if (tg->io_disp[rw] >= io_trim) 439 tg->io_disp[rw] -= io_trim; 440 else 441 tg->io_disp[rw] = 0; 442 443 tg->slice_start[rw] += nr_slices * throtl_slice; 444 445 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu" 446 " start=%lu end=%lu jiffies=%lu", 447 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, 448 tg->slice_start[rw], tg->slice_end[rw], jiffies); 449 } 450 451 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg, 452 struct bio *bio, unsigned long *wait) 453 { 454 bool rw = bio_data_dir(bio); 455 unsigned int io_allowed; 456 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 457 u64 tmp; 458 459 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 460 461 /* Slice has just started. Consider one slice interval */ 462 if (!jiffy_elapsed) 463 jiffy_elapsed_rnd = throtl_slice; 464 465 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); 466 467 /* 468 * jiffy_elapsed_rnd should not be a big value as minimum iops can be 469 * 1 then at max jiffy elapsed should be equivalent of 1 second as we 470 * will allow dispatch after 1 second and after that slice should 471 * have been trimmed. 472 */ 473 474 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd; 475 do_div(tmp, HZ); 476 477 if (tmp > UINT_MAX) 478 io_allowed = UINT_MAX; 479 else 480 io_allowed = tmp; 481 482 if (tg->io_disp[rw] + 1 <= io_allowed) { 483 if (wait) 484 *wait = 0; 485 return 1; 486 } 487 488 /* Calc approx time to dispatch */ 489 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1; 490 491 if (jiffy_wait > jiffy_elapsed) 492 jiffy_wait = jiffy_wait - jiffy_elapsed; 493 else 494 jiffy_wait = 1; 495 496 if (wait) 497 *wait = jiffy_wait; 498 return 0; 499 } 500 501 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg, 502 struct bio *bio, unsigned long *wait) 503 { 504 bool rw = bio_data_dir(bio); 505 u64 bytes_allowed, extra_bytes, tmp; 506 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 507 508 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 509 510 /* Slice has just started. Consider one slice interval */ 511 if (!jiffy_elapsed) 512 jiffy_elapsed_rnd = throtl_slice; 513 514 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); 515 516 tmp = tg->bps[rw] * jiffy_elapsed_rnd; 517 do_div(tmp, HZ); 518 bytes_allowed = tmp; 519 520 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { 521 if (wait) 522 *wait = 0; 523 return 1; 524 } 525 526 /* Calc approx time to dispatch */ 527 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; 528 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); 529 530 if (!jiffy_wait) 531 jiffy_wait = 1; 532 533 /* 534 * This wait time is without taking into consideration the rounding 535 * up we did. Add that time also. 536 */ 537 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); 538 if (wait) 539 *wait = jiffy_wait; 540 return 0; 541 } 542 543 /* 544 * Returns whether one can dispatch a bio or not. Also returns approx number 545 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 546 */ 547 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg, 548 struct bio *bio, unsigned long *wait) 549 { 550 bool rw = bio_data_dir(bio); 551 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; 552 553 /* 554 * Currently whole state machine of group depends on first bio 555 * queued in the group bio list. So one should not be calling 556 * this function with a different bio if there are other bios 557 * queued. 558 */ 559 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw])); 560 561 /* If tg->bps = -1, then BW is unlimited */ 562 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { 563 if (wait) 564 *wait = 0; 565 return 1; 566 } 567 568 /* 569 * If previous slice expired, start a new one otherwise renew/extend 570 * existing slice to make sure it is at least throtl_slice interval 571 * long since now. 572 */ 573 if (throtl_slice_used(td, tg, rw)) 574 throtl_start_new_slice(td, tg, rw); 575 else { 576 if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) 577 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice); 578 } 579 580 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait) 581 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) { 582 if (wait) 583 *wait = 0; 584 return 1; 585 } 586 587 max_wait = max(bps_wait, iops_wait); 588 589 if (wait) 590 *wait = max_wait; 591 592 if (time_before(tg->slice_end[rw], jiffies + max_wait)) 593 throtl_extend_slice(td, tg, rw, jiffies + max_wait); 594 595 return 0; 596 } 597 598 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 599 { 600 bool rw = bio_data_dir(bio); 601 bool sync = bio->bi_rw & REQ_SYNC; 602 603 /* Charge the bio to the group */ 604 tg->bytes_disp[rw] += bio->bi_size; 605 tg->io_disp[rw]++; 606 607 /* 608 * TODO: This will take blkg->stats_lock. Figure out a way 609 * to avoid this cost. 610 */ 611 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync); 612 } 613 614 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg, 615 struct bio *bio) 616 { 617 bool rw = bio_data_dir(bio); 618 619 bio_list_add(&tg->bio_lists[rw], bio); 620 /* Take a bio reference on tg */ 621 throtl_ref_get_tg(tg); 622 tg->nr_queued[rw]++; 623 td->nr_queued[rw]++; 624 throtl_enqueue_tg(td, tg); 625 } 626 627 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg) 628 { 629 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; 630 struct bio *bio; 631 632 if ((bio = bio_list_peek(&tg->bio_lists[READ]))) 633 tg_may_dispatch(td, tg, bio, &read_wait); 634 635 if ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) 636 tg_may_dispatch(td, tg, bio, &write_wait); 637 638 min_wait = min(read_wait, write_wait); 639 disptime = jiffies + min_wait; 640 641 /* Update dispatch time */ 642 throtl_dequeue_tg(td, tg); 643 tg->disptime = disptime; 644 throtl_enqueue_tg(td, tg); 645 } 646 647 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg, 648 bool rw, struct bio_list *bl) 649 { 650 struct bio *bio; 651 652 bio = bio_list_pop(&tg->bio_lists[rw]); 653 tg->nr_queued[rw]--; 654 /* Drop bio reference on tg */ 655 throtl_put_tg(tg); 656 657 BUG_ON(td->nr_queued[rw] <= 0); 658 td->nr_queued[rw]--; 659 660 throtl_charge_bio(tg, bio); 661 bio_list_add(bl, bio); 662 bio->bi_rw |= REQ_THROTTLED; 663 664 throtl_trim_slice(td, tg, rw); 665 } 666 667 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg, 668 struct bio_list *bl) 669 { 670 unsigned int nr_reads = 0, nr_writes = 0; 671 unsigned int max_nr_reads = throtl_grp_quantum*3/4; 672 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; 673 struct bio *bio; 674 675 /* Try to dispatch 75% READS and 25% WRITES */ 676 677 while ((bio = bio_list_peek(&tg->bio_lists[READ])) 678 && tg_may_dispatch(td, tg, bio, NULL)) { 679 680 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl); 681 nr_reads++; 682 683 if (nr_reads >= max_nr_reads) 684 break; 685 } 686 687 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) 688 && tg_may_dispatch(td, tg, bio, NULL)) { 689 690 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl); 691 nr_writes++; 692 693 if (nr_writes >= max_nr_writes) 694 break; 695 } 696 697 return nr_reads + nr_writes; 698 } 699 700 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl) 701 { 702 unsigned int nr_disp = 0; 703 struct throtl_grp *tg; 704 struct throtl_rb_root *st = &td->tg_service_tree; 705 706 while (1) { 707 tg = throtl_rb_first(st); 708 709 if (!tg) 710 break; 711 712 if (time_before(jiffies, tg->disptime)) 713 break; 714 715 throtl_dequeue_tg(td, tg); 716 717 nr_disp += throtl_dispatch_tg(td, tg, bl); 718 719 if (tg->nr_queued[0] || tg->nr_queued[1]) { 720 tg_update_disptime(td, tg); 721 throtl_enqueue_tg(td, tg); 722 } 723 724 if (nr_disp >= throtl_quantum) 725 break; 726 } 727 728 return nr_disp; 729 } 730 731 static void throtl_process_limit_change(struct throtl_data *td) 732 { 733 struct throtl_grp *tg; 734 struct hlist_node *pos, *n; 735 736 if (!atomic_read(&td->limits_changed)) 737 return; 738 739 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); 740 741 /* 742 * Make sure updates from throtl_update_blkio_group_read_bps() group 743 * of functions to tg->limits_changed are visible. We do not 744 * want update td->limits_changed to be visible but update to 745 * tg->limits_changed not being visible yet on this cpu. Hence 746 * the read barrier. 747 */ 748 smp_rmb(); 749 750 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 751 if (throtl_tg_on_rr(tg) && tg->limits_changed) { 752 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" 753 " riops=%u wiops=%u", tg->bps[READ], 754 tg->bps[WRITE], tg->iops[READ], 755 tg->iops[WRITE]); 756 tg_update_disptime(td, tg); 757 tg->limits_changed = false; 758 } 759 } 760 761 smp_mb__before_atomic_dec(); 762 atomic_dec(&td->limits_changed); 763 smp_mb__after_atomic_dec(); 764 } 765 766 /* Dispatch throttled bios. Should be called without queue lock held. */ 767 static int throtl_dispatch(struct request_queue *q) 768 { 769 struct throtl_data *td = q->td; 770 unsigned int nr_disp = 0; 771 struct bio_list bio_list_on_stack; 772 struct bio *bio; 773 774 spin_lock_irq(q->queue_lock); 775 776 throtl_process_limit_change(td); 777 778 if (!total_nr_queued(td)) 779 goto out; 780 781 bio_list_init(&bio_list_on_stack); 782 783 throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u", 784 total_nr_queued(td), td->nr_queued[READ], 785 td->nr_queued[WRITE]); 786 787 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack); 788 789 if (nr_disp) 790 throtl_log(td, "bios disp=%u", nr_disp); 791 792 throtl_schedule_next_dispatch(td); 793 out: 794 spin_unlock_irq(q->queue_lock); 795 796 /* 797 * If we dispatched some requests, unplug the queue to make sure 798 * immediate dispatch 799 */ 800 if (nr_disp) { 801 while((bio = bio_list_pop(&bio_list_on_stack))) 802 generic_make_request(bio); 803 blk_unplug(q); 804 } 805 return nr_disp; 806 } 807 808 void blk_throtl_work(struct work_struct *work) 809 { 810 struct throtl_data *td = container_of(work, struct throtl_data, 811 throtl_work.work); 812 struct request_queue *q = td->queue; 813 814 throtl_dispatch(q); 815 } 816 817 /* Call with queue lock held */ 818 void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) 819 { 820 821 struct throtl_data *td = q->td; 822 struct delayed_work *dwork = &td->throtl_work; 823 824 if (total_nr_queued(td) > 0) { 825 /* 826 * We might have a work scheduled to be executed in future. 827 * Cancel that and schedule a new one. 828 */ 829 __cancel_delayed_work(dwork); 830 kblockd_schedule_delayed_work(q, dwork, delay); 831 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 832 delay, jiffies); 833 } 834 } 835 EXPORT_SYMBOL(throtl_schedule_delayed_work); 836 837 static void 838 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) 839 { 840 /* Something wrong if we are trying to remove same group twice */ 841 BUG_ON(hlist_unhashed(&tg->tg_node)); 842 843 hlist_del_init(&tg->tg_node); 844 845 /* 846 * Put the reference taken at the time of creation so that when all 847 * queues are gone, group can be destroyed. 848 */ 849 throtl_put_tg(tg); 850 td->nr_undestroyed_grps--; 851 } 852 853 static void throtl_release_tgs(struct throtl_data *td) 854 { 855 struct hlist_node *pos, *n; 856 struct throtl_grp *tg; 857 858 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 859 /* 860 * If cgroup removal path got to blk_group first and removed 861 * it from cgroup list, then it will take care of destroying 862 * cfqg also. 863 */ 864 if (!blkiocg_del_blkio_group(&tg->blkg)) 865 throtl_destroy_tg(td, tg); 866 } 867 } 868 869 static void throtl_td_free(struct throtl_data *td) 870 { 871 kfree(td); 872 } 873 874 /* 875 * Blk cgroup controller notification saying that blkio_group object is being 876 * delinked as associated cgroup object is going away. That also means that 877 * no new IO will come in this group. So get rid of this group as soon as 878 * any pending IO in the group is finished. 879 * 880 * This function is called under rcu_read_lock(). key is the rcu protected 881 * pointer. That means "key" is a valid throtl_data pointer as long as we are 882 * rcu read lock. 883 * 884 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means 885 * it should not be NULL as even if queue was going away, cgroup deltion 886 * path got to it first. 887 */ 888 void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg) 889 { 890 unsigned long flags; 891 struct throtl_data *td = key; 892 893 spin_lock_irqsave(td->queue->queue_lock, flags); 894 throtl_destroy_tg(td, tg_of_blkg(blkg)); 895 spin_unlock_irqrestore(td->queue->queue_lock, flags); 896 } 897 898 /* 899 * For all update functions, key should be a valid pointer because these 900 * update functions are called under blkcg_lock, that means, blkg is 901 * valid and in turn key is valid. queue exit path can not race becuase 902 * of blkcg_lock 903 * 904 * Can not take queue lock in update functions as queue lock under blkcg_lock 905 * is not allowed. Under other paths we take blkcg_lock under queue_lock. 906 */ 907 static void throtl_update_blkio_group_read_bps(void *key, 908 struct blkio_group *blkg, u64 read_bps) 909 { 910 struct throtl_data *td = key; 911 912 tg_of_blkg(blkg)->bps[READ] = read_bps; 913 /* Make sure read_bps is updated before setting limits_changed */ 914 smp_wmb(); 915 tg_of_blkg(blkg)->limits_changed = true; 916 917 /* Make sure tg->limits_changed is updated before td->limits_changed */ 918 smp_mb__before_atomic_inc(); 919 atomic_inc(&td->limits_changed); 920 smp_mb__after_atomic_inc(); 921 922 /* Schedule a work now to process the limit change */ 923 throtl_schedule_delayed_work(td->queue, 0); 924 } 925 926 static void throtl_update_blkio_group_write_bps(void *key, 927 struct blkio_group *blkg, u64 write_bps) 928 { 929 struct throtl_data *td = key; 930 931 tg_of_blkg(blkg)->bps[WRITE] = write_bps; 932 smp_wmb(); 933 tg_of_blkg(blkg)->limits_changed = true; 934 smp_mb__before_atomic_inc(); 935 atomic_inc(&td->limits_changed); 936 smp_mb__after_atomic_inc(); 937 throtl_schedule_delayed_work(td->queue, 0); 938 } 939 940 static void throtl_update_blkio_group_read_iops(void *key, 941 struct blkio_group *blkg, unsigned int read_iops) 942 { 943 struct throtl_data *td = key; 944 945 tg_of_blkg(blkg)->iops[READ] = read_iops; 946 smp_wmb(); 947 tg_of_blkg(blkg)->limits_changed = true; 948 smp_mb__before_atomic_inc(); 949 atomic_inc(&td->limits_changed); 950 smp_mb__after_atomic_inc(); 951 throtl_schedule_delayed_work(td->queue, 0); 952 } 953 954 static void throtl_update_blkio_group_write_iops(void *key, 955 struct blkio_group *blkg, unsigned int write_iops) 956 { 957 struct throtl_data *td = key; 958 959 tg_of_blkg(blkg)->iops[WRITE] = write_iops; 960 smp_wmb(); 961 tg_of_blkg(blkg)->limits_changed = true; 962 smp_mb__before_atomic_inc(); 963 atomic_inc(&td->limits_changed); 964 smp_mb__after_atomic_inc(); 965 throtl_schedule_delayed_work(td->queue, 0); 966 } 967 968 void throtl_shutdown_timer_wq(struct request_queue *q) 969 { 970 struct throtl_data *td = q->td; 971 972 cancel_delayed_work_sync(&td->throtl_work); 973 } 974 975 static struct blkio_policy_type blkio_policy_throtl = { 976 .ops = { 977 .blkio_unlink_group_fn = throtl_unlink_blkio_group, 978 .blkio_update_group_read_bps_fn = 979 throtl_update_blkio_group_read_bps, 980 .blkio_update_group_write_bps_fn = 981 throtl_update_blkio_group_write_bps, 982 .blkio_update_group_read_iops_fn = 983 throtl_update_blkio_group_read_iops, 984 .blkio_update_group_write_iops_fn = 985 throtl_update_blkio_group_write_iops, 986 }, 987 .plid = BLKIO_POLICY_THROTL, 988 }; 989 990 int blk_throtl_bio(struct request_queue *q, struct bio **biop) 991 { 992 struct throtl_data *td = q->td; 993 struct throtl_grp *tg; 994 struct bio *bio = *biop; 995 bool rw = bio_data_dir(bio), update_disptime = true; 996 997 if (bio->bi_rw & REQ_THROTTLED) { 998 bio->bi_rw &= ~REQ_THROTTLED; 999 return 0; 1000 } 1001 1002 spin_lock_irq(q->queue_lock); 1003 tg = throtl_get_tg(td); 1004 1005 if (tg->nr_queued[rw]) { 1006 /* 1007 * There is already another bio queued in same dir. No 1008 * need to update dispatch time. 1009 * Still update the disptime if rate limits on this group 1010 * were changed. 1011 */ 1012 if (!tg->limits_changed) 1013 update_disptime = false; 1014 else 1015 tg->limits_changed = false; 1016 1017 goto queue_bio; 1018 } 1019 1020 /* Bio is with-in rate limit of group */ 1021 if (tg_may_dispatch(td, tg, bio, NULL)) { 1022 throtl_charge_bio(tg, bio); 1023 goto out; 1024 } 1025 1026 queue_bio: 1027 throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu" 1028 " iodisp=%u iops=%u queued=%d/%d", 1029 rw == READ ? 'R' : 'W', 1030 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1031 tg->io_disp[rw], tg->iops[rw], 1032 tg->nr_queued[READ], tg->nr_queued[WRITE]); 1033 1034 throtl_add_bio_tg(q->td, tg, bio); 1035 *biop = NULL; 1036 1037 if (update_disptime) { 1038 tg_update_disptime(td, tg); 1039 throtl_schedule_next_dispatch(td); 1040 } 1041 1042 out: 1043 spin_unlock_irq(q->queue_lock); 1044 return 0; 1045 } 1046 1047 int blk_throtl_init(struct request_queue *q) 1048 { 1049 struct throtl_data *td; 1050 struct throtl_grp *tg; 1051 1052 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); 1053 if (!td) 1054 return -ENOMEM; 1055 1056 INIT_HLIST_HEAD(&td->tg_list); 1057 td->tg_service_tree = THROTL_RB_ROOT; 1058 atomic_set(&td->limits_changed, 0); 1059 1060 /* Init root group */ 1061 tg = &td->root_tg; 1062 INIT_HLIST_NODE(&tg->tg_node); 1063 RB_CLEAR_NODE(&tg->rb_node); 1064 bio_list_init(&tg->bio_lists[0]); 1065 bio_list_init(&tg->bio_lists[1]); 1066 1067 /* Practically unlimited BW */ 1068 tg->bps[0] = tg->bps[1] = -1; 1069 tg->iops[0] = tg->iops[1] = -1; 1070 1071 /* 1072 * Set root group reference to 2. One reference will be dropped when 1073 * all groups on tg_list are being deleted during queue exit. Other 1074 * reference will remain there as we don't want to delete this group 1075 * as it is statically allocated and gets destroyed when throtl_data 1076 * goes away. 1077 */ 1078 atomic_set(&tg->ref, 2); 1079 hlist_add_head(&tg->tg_node, &td->tg_list); 1080 td->nr_undestroyed_grps++; 1081 1082 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); 1083 1084 rcu_read_lock(); 1085 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td, 1086 0, BLKIO_POLICY_THROTL); 1087 rcu_read_unlock(); 1088 1089 /* Attach throtl data to request queue */ 1090 td->queue = q; 1091 q->td = td; 1092 return 0; 1093 } 1094 1095 void blk_throtl_exit(struct request_queue *q) 1096 { 1097 struct throtl_data *td = q->td; 1098 bool wait = false; 1099 1100 BUG_ON(!td); 1101 1102 throtl_shutdown_timer_wq(q); 1103 1104 spin_lock_irq(q->queue_lock); 1105 throtl_release_tgs(td); 1106 1107 /* If there are other groups */ 1108 if (td->nr_undestroyed_grps > 0) 1109 wait = true; 1110 1111 spin_unlock_irq(q->queue_lock); 1112 1113 /* 1114 * Wait for tg->blkg->key accessors to exit their grace periods. 1115 * Do this wait only if there are other undestroyed groups out 1116 * there (other than root group). This can happen if cgroup deletion 1117 * path claimed the responsibility of cleaning up a group before 1118 * queue cleanup code get to the group. 1119 * 1120 * Do not call synchronize_rcu() unconditionally as there are drivers 1121 * which create/delete request queue hundreds of times during scan/boot 1122 * and synchronize_rcu() can take significant time and slow down boot. 1123 */ 1124 if (wait) 1125 synchronize_rcu(); 1126 1127 /* 1128 * Just being safe to make sure after previous flush if some body did 1129 * update limits through cgroup and another work got queued, cancel 1130 * it. 1131 */ 1132 throtl_shutdown_timer_wq(q); 1133 throtl_td_free(td); 1134 } 1135 1136 static int __init throtl_init(void) 1137 { 1138 blkio_policy_register(&blkio_policy_throtl); 1139 return 0; 1140 } 1141 1142 module_init(throtl_init); 1143