1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Interface for controlling IO bandwidth on a request queue 4 * 5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/bio.h> 12 #include <linux/blktrace_api.h> 13 #include <linux/blk-cgroup.h> 14 #include "blk.h" 15 16 /* Max dispatch from a group in 1 round */ 17 static int throtl_grp_quantum = 8; 18 19 /* Total max dispatch from all groups in one round */ 20 static int throtl_quantum = 32; 21 22 /* Throttling is performed over a slice and after that slice is renewed */ 23 #define DFL_THROTL_SLICE_HD (HZ / 10) 24 #define DFL_THROTL_SLICE_SSD (HZ / 50) 25 #define MAX_THROTL_SLICE (HZ) 26 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */ 27 #define MIN_THROTL_BPS (320 * 1024) 28 #define MIN_THROTL_IOPS (10) 29 #define DFL_LATENCY_TARGET (-1L) 30 #define DFL_IDLE_THRESHOLD (0) 31 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */ 32 #define LATENCY_FILTERED_SSD (0) 33 /* 34 * For HD, very small latency comes from sequential IO. Such IO is helpless to 35 * help determine if its IO is impacted by others, hence we ignore the IO 36 */ 37 #define LATENCY_FILTERED_HD (1000L) /* 1ms */ 38 39 static struct blkcg_policy blkcg_policy_throtl; 40 41 /* A workqueue to queue throttle related work */ 42 static struct workqueue_struct *kthrotld_workqueue; 43 44 /* 45 * To implement hierarchical throttling, throtl_grps form a tree and bios 46 * are dispatched upwards level by level until they reach the top and get 47 * issued. When dispatching bios from the children and local group at each 48 * level, if the bios are dispatched into a single bio_list, there's a risk 49 * of a local or child group which can queue many bios at once filling up 50 * the list starving others. 51 * 52 * To avoid such starvation, dispatched bios are queued separately 53 * according to where they came from. When they are again dispatched to 54 * the parent, they're popped in round-robin order so that no single source 55 * hogs the dispatch window. 56 * 57 * throtl_qnode is used to keep the queued bios separated by their sources. 58 * Bios are queued to throtl_qnode which in turn is queued to 59 * throtl_service_queue and then dispatched in round-robin order. 60 * 61 * It's also used to track the reference counts on blkg's. A qnode always 62 * belongs to a throtl_grp and gets queued on itself or the parent, so 63 * incrementing the reference of the associated throtl_grp when a qnode is 64 * queued and decrementing when dequeued is enough to keep the whole blkg 65 * tree pinned while bios are in flight. 66 */ 67 struct throtl_qnode { 68 struct list_head node; /* service_queue->queued[] */ 69 struct bio_list bios; /* queued bios */ 70 struct throtl_grp *tg; /* tg this qnode belongs to */ 71 }; 72 73 struct throtl_service_queue { 74 struct throtl_service_queue *parent_sq; /* the parent service_queue */ 75 76 /* 77 * Bios queued directly to this service_queue or dispatched from 78 * children throtl_grp's. 79 */ 80 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ 81 unsigned int nr_queued[2]; /* number of queued bios */ 82 83 /* 84 * RB tree of active children throtl_grp's, which are sorted by 85 * their ->disptime. 86 */ 87 struct rb_root pending_tree; /* RB tree of active tgs */ 88 struct rb_node *first_pending; /* first node in the tree */ 89 unsigned int nr_pending; /* # queued in the tree */ 90 unsigned long first_pending_disptime; /* disptime of the first tg */ 91 struct timer_list pending_timer; /* fires on first_pending_disptime */ 92 }; 93 94 enum tg_state_flags { 95 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ 96 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ 97 }; 98 99 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 100 101 enum { 102 LIMIT_LOW, 103 LIMIT_MAX, 104 LIMIT_CNT, 105 }; 106 107 struct throtl_grp { 108 /* must be the first member */ 109 struct blkg_policy_data pd; 110 111 /* active throtl group service_queue member */ 112 struct rb_node rb_node; 113 114 /* throtl_data this group belongs to */ 115 struct throtl_data *td; 116 117 /* this group's service queue */ 118 struct throtl_service_queue service_queue; 119 120 /* 121 * qnode_on_self is used when bios are directly queued to this 122 * throtl_grp so that local bios compete fairly with bios 123 * dispatched from children. qnode_on_parent is used when bios are 124 * dispatched from this throtl_grp into its parent and will compete 125 * with the sibling qnode_on_parents and the parent's 126 * qnode_on_self. 127 */ 128 struct throtl_qnode qnode_on_self[2]; 129 struct throtl_qnode qnode_on_parent[2]; 130 131 /* 132 * Dispatch time in jiffies. This is the estimated time when group 133 * will unthrottle and is ready to dispatch more bio. It is used as 134 * key to sort active groups in service tree. 135 */ 136 unsigned long disptime; 137 138 unsigned int flags; 139 140 /* are there any throtl rules between this group and td? */ 141 bool has_rules[2]; 142 143 /* internally used bytes per second rate limits */ 144 uint64_t bps[2][LIMIT_CNT]; 145 /* user configured bps limits */ 146 uint64_t bps_conf[2][LIMIT_CNT]; 147 148 /* internally used IOPS limits */ 149 unsigned int iops[2][LIMIT_CNT]; 150 /* user configured IOPS limits */ 151 unsigned int iops_conf[2][LIMIT_CNT]; 152 153 /* Number of bytes disptached in current slice */ 154 uint64_t bytes_disp[2]; 155 /* Number of bio's dispatched in current slice */ 156 unsigned int io_disp[2]; 157 158 unsigned long last_low_overflow_time[2]; 159 160 uint64_t last_bytes_disp[2]; 161 unsigned int last_io_disp[2]; 162 163 unsigned long last_check_time; 164 165 unsigned long latency_target; /* us */ 166 unsigned long latency_target_conf; /* us */ 167 /* When did we start a new slice */ 168 unsigned long slice_start[2]; 169 unsigned long slice_end[2]; 170 171 unsigned long last_finish_time; /* ns / 1024 */ 172 unsigned long checked_last_finish_time; /* ns / 1024 */ 173 unsigned long avg_idletime; /* ns / 1024 */ 174 unsigned long idletime_threshold; /* us */ 175 unsigned long idletime_threshold_conf; /* us */ 176 177 unsigned int bio_cnt; /* total bios */ 178 unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ 179 unsigned long bio_cnt_reset_time; 180 }; 181 182 /* We measure latency for request size from <= 4k to >= 1M */ 183 #define LATENCY_BUCKET_SIZE 9 184 185 struct latency_bucket { 186 unsigned long total_latency; /* ns / 1024 */ 187 int samples; 188 }; 189 190 struct avg_latency_bucket { 191 unsigned long latency; /* ns / 1024 */ 192 bool valid; 193 }; 194 195 struct throtl_data 196 { 197 /* service tree for active throtl groups */ 198 struct throtl_service_queue service_queue; 199 200 struct request_queue *queue; 201 202 /* Total Number of queued bios on READ and WRITE lists */ 203 unsigned int nr_queued[2]; 204 205 unsigned int throtl_slice; 206 207 /* Work for dispatching throttled bios */ 208 struct work_struct dispatch_work; 209 unsigned int limit_index; 210 bool limit_valid[LIMIT_CNT]; 211 212 unsigned long low_upgrade_time; 213 unsigned long low_downgrade_time; 214 215 unsigned int scale; 216 217 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE]; 218 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE]; 219 struct latency_bucket __percpu *latency_buckets[2]; 220 unsigned long last_calculate_time; 221 unsigned long filtered_latency; 222 223 bool track_bio_latency; 224 }; 225 226 static void throtl_pending_timer_fn(struct timer_list *t); 227 228 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) 229 { 230 return pd ? container_of(pd, struct throtl_grp, pd) : NULL; 231 } 232 233 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) 234 { 235 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); 236 } 237 238 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) 239 { 240 return pd_to_blkg(&tg->pd); 241 } 242 243 /** 244 * sq_to_tg - return the throl_grp the specified service queue belongs to 245 * @sq: the throtl_service_queue of interest 246 * 247 * Return the throtl_grp @sq belongs to. If @sq is the top-level one 248 * embedded in throtl_data, %NULL is returned. 249 */ 250 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) 251 { 252 if (sq && sq->parent_sq) 253 return container_of(sq, struct throtl_grp, service_queue); 254 else 255 return NULL; 256 } 257 258 /** 259 * sq_to_td - return throtl_data the specified service queue belongs to 260 * @sq: the throtl_service_queue of interest 261 * 262 * A service_queue can be embedded in either a throtl_grp or throtl_data. 263 * Determine the associated throtl_data accordingly and return it. 264 */ 265 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) 266 { 267 struct throtl_grp *tg = sq_to_tg(sq); 268 269 if (tg) 270 return tg->td; 271 else 272 return container_of(sq, struct throtl_data, service_queue); 273 } 274 275 /* 276 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to 277 * make the IO dispatch more smooth. 278 * Scale up: linearly scale up according to lapsed time since upgrade. For 279 * every throtl_slice, the limit scales up 1/2 .low limit till the 280 * limit hits .max limit 281 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit 282 */ 283 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) 284 { 285 /* arbitrary value to avoid too big scale */ 286 if (td->scale < 4096 && time_after_eq(jiffies, 287 td->low_upgrade_time + td->scale * td->throtl_slice)) 288 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; 289 290 return low + (low >> 1) * td->scale; 291 } 292 293 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) 294 { 295 struct blkcg_gq *blkg = tg_to_blkg(tg); 296 struct throtl_data *td; 297 uint64_t ret; 298 299 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) 300 return U64_MAX; 301 302 td = tg->td; 303 ret = tg->bps[rw][td->limit_index]; 304 if (ret == 0 && td->limit_index == LIMIT_LOW) { 305 /* intermediate node or iops isn't 0 */ 306 if (!list_empty(&blkg->blkcg->css.children) || 307 tg->iops[rw][td->limit_index]) 308 return U64_MAX; 309 else 310 return MIN_THROTL_BPS; 311 } 312 313 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && 314 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { 315 uint64_t adjusted; 316 317 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); 318 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); 319 } 320 return ret; 321 } 322 323 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) 324 { 325 struct blkcg_gq *blkg = tg_to_blkg(tg); 326 struct throtl_data *td; 327 unsigned int ret; 328 329 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) 330 return UINT_MAX; 331 332 td = tg->td; 333 ret = tg->iops[rw][td->limit_index]; 334 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { 335 /* intermediate node or bps isn't 0 */ 336 if (!list_empty(&blkg->blkcg->css.children) || 337 tg->bps[rw][td->limit_index]) 338 return UINT_MAX; 339 else 340 return MIN_THROTL_IOPS; 341 } 342 343 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && 344 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { 345 uint64_t adjusted; 346 347 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); 348 if (adjusted > UINT_MAX) 349 adjusted = UINT_MAX; 350 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); 351 } 352 return ret; 353 } 354 355 #define request_bucket_index(sectors) \ 356 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1) 357 358 /** 359 * throtl_log - log debug message via blktrace 360 * @sq: the service_queue being reported 361 * @fmt: printf format string 362 * @args: printf args 363 * 364 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a 365 * throtl_grp; otherwise, just "throtl". 366 */ 367 #define throtl_log(sq, fmt, args...) do { \ 368 struct throtl_grp *__tg = sq_to_tg((sq)); \ 369 struct throtl_data *__td = sq_to_td((sq)); \ 370 \ 371 (void)__td; \ 372 if (likely(!blk_trace_note_message_enabled(__td->queue))) \ 373 break; \ 374 if ((__tg)) { \ 375 blk_add_cgroup_trace_msg(__td->queue, \ 376 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\ 377 } else { \ 378 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ 379 } \ 380 } while (0) 381 382 static inline unsigned int throtl_bio_data_size(struct bio *bio) 383 { 384 /* assume it's one sector */ 385 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 386 return 512; 387 return bio->bi_iter.bi_size; 388 } 389 390 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) 391 { 392 INIT_LIST_HEAD(&qn->node); 393 bio_list_init(&qn->bios); 394 qn->tg = tg; 395 } 396 397 /** 398 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it 399 * @bio: bio being added 400 * @qn: qnode to add bio to 401 * @queued: the service_queue->queued[] list @qn belongs to 402 * 403 * Add @bio to @qn and put @qn on @queued if it's not already on. 404 * @qn->tg's reference count is bumped when @qn is activated. See the 405 * comment on top of throtl_qnode definition for details. 406 */ 407 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, 408 struct list_head *queued) 409 { 410 bio_list_add(&qn->bios, bio); 411 if (list_empty(&qn->node)) { 412 list_add_tail(&qn->node, queued); 413 blkg_get(tg_to_blkg(qn->tg)); 414 } 415 } 416 417 /** 418 * throtl_peek_queued - peek the first bio on a qnode list 419 * @queued: the qnode list to peek 420 */ 421 static struct bio *throtl_peek_queued(struct list_head *queued) 422 { 423 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); 424 struct bio *bio; 425 426 if (list_empty(queued)) 427 return NULL; 428 429 bio = bio_list_peek(&qn->bios); 430 WARN_ON_ONCE(!bio); 431 return bio; 432 } 433 434 /** 435 * throtl_pop_queued - pop the first bio form a qnode list 436 * @queued: the qnode list to pop a bio from 437 * @tg_to_put: optional out argument for throtl_grp to put 438 * 439 * Pop the first bio from the qnode list @queued. After popping, the first 440 * qnode is removed from @queued if empty or moved to the end of @queued so 441 * that the popping order is round-robin. 442 * 443 * When the first qnode is removed, its associated throtl_grp should be put 444 * too. If @tg_to_put is NULL, this function automatically puts it; 445 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is 446 * responsible for putting it. 447 */ 448 static struct bio *throtl_pop_queued(struct list_head *queued, 449 struct throtl_grp **tg_to_put) 450 { 451 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); 452 struct bio *bio; 453 454 if (list_empty(queued)) 455 return NULL; 456 457 bio = bio_list_pop(&qn->bios); 458 WARN_ON_ONCE(!bio); 459 460 if (bio_list_empty(&qn->bios)) { 461 list_del_init(&qn->node); 462 if (tg_to_put) 463 *tg_to_put = qn->tg; 464 else 465 blkg_put(tg_to_blkg(qn->tg)); 466 } else { 467 list_move_tail(&qn->node, queued); 468 } 469 470 return bio; 471 } 472 473 /* init a service_queue, assumes the caller zeroed it */ 474 static void throtl_service_queue_init(struct throtl_service_queue *sq) 475 { 476 INIT_LIST_HEAD(&sq->queued[0]); 477 INIT_LIST_HEAD(&sq->queued[1]); 478 sq->pending_tree = RB_ROOT; 479 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); 480 } 481 482 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node) 483 { 484 struct throtl_grp *tg; 485 int rw; 486 487 tg = kzalloc_node(sizeof(*tg), gfp, node); 488 if (!tg) 489 return NULL; 490 491 throtl_service_queue_init(&tg->service_queue); 492 493 for (rw = READ; rw <= WRITE; rw++) { 494 throtl_qnode_init(&tg->qnode_on_self[rw], tg); 495 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); 496 } 497 498 RB_CLEAR_NODE(&tg->rb_node); 499 tg->bps[READ][LIMIT_MAX] = U64_MAX; 500 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; 501 tg->iops[READ][LIMIT_MAX] = UINT_MAX; 502 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; 503 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; 504 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; 505 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; 506 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; 507 /* LIMIT_LOW will have default value 0 */ 508 509 tg->latency_target = DFL_LATENCY_TARGET; 510 tg->latency_target_conf = DFL_LATENCY_TARGET; 511 tg->idletime_threshold = DFL_IDLE_THRESHOLD; 512 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; 513 514 return &tg->pd; 515 } 516 517 static void throtl_pd_init(struct blkg_policy_data *pd) 518 { 519 struct throtl_grp *tg = pd_to_tg(pd); 520 struct blkcg_gq *blkg = tg_to_blkg(tg); 521 struct throtl_data *td = blkg->q->td; 522 struct throtl_service_queue *sq = &tg->service_queue; 523 524 /* 525 * If on the default hierarchy, we switch to properly hierarchical 526 * behavior where limits on a given throtl_grp are applied to the 527 * whole subtree rather than just the group itself. e.g. If 16M 528 * read_bps limit is set on the root group, the whole system can't 529 * exceed 16M for the device. 530 * 531 * If not on the default hierarchy, the broken flat hierarchy 532 * behavior is retained where all throtl_grps are treated as if 533 * they're all separate root groups right below throtl_data. 534 * Limits of a group don't interact with limits of other groups 535 * regardless of the position of the group in the hierarchy. 536 */ 537 sq->parent_sq = &td->service_queue; 538 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) 539 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; 540 tg->td = td; 541 } 542 543 /* 544 * Set has_rules[] if @tg or any of its parents have limits configured. 545 * This doesn't require walking up to the top of the hierarchy as the 546 * parent's has_rules[] is guaranteed to be correct. 547 */ 548 static void tg_update_has_rules(struct throtl_grp *tg) 549 { 550 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); 551 struct throtl_data *td = tg->td; 552 int rw; 553 554 for (rw = READ; rw <= WRITE; rw++) 555 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || 556 (td->limit_valid[td->limit_index] && 557 (tg_bps_limit(tg, rw) != U64_MAX || 558 tg_iops_limit(tg, rw) != UINT_MAX)); 559 } 560 561 static void throtl_pd_online(struct blkg_policy_data *pd) 562 { 563 struct throtl_grp *tg = pd_to_tg(pd); 564 /* 565 * We don't want new groups to escape the limits of its ancestors. 566 * Update has_rules[] after a new group is brought online. 567 */ 568 tg_update_has_rules(tg); 569 } 570 571 static void blk_throtl_update_limit_valid(struct throtl_data *td) 572 { 573 struct cgroup_subsys_state *pos_css; 574 struct blkcg_gq *blkg; 575 bool low_valid = false; 576 577 rcu_read_lock(); 578 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 579 struct throtl_grp *tg = blkg_to_tg(blkg); 580 581 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || 582 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { 583 low_valid = true; 584 break; 585 } 586 } 587 rcu_read_unlock(); 588 589 td->limit_valid[LIMIT_LOW] = low_valid; 590 } 591 592 static void throtl_upgrade_state(struct throtl_data *td); 593 static void throtl_pd_offline(struct blkg_policy_data *pd) 594 { 595 struct throtl_grp *tg = pd_to_tg(pd); 596 597 tg->bps[READ][LIMIT_LOW] = 0; 598 tg->bps[WRITE][LIMIT_LOW] = 0; 599 tg->iops[READ][LIMIT_LOW] = 0; 600 tg->iops[WRITE][LIMIT_LOW] = 0; 601 602 blk_throtl_update_limit_valid(tg->td); 603 604 if (!tg->td->limit_valid[tg->td->limit_index]) 605 throtl_upgrade_state(tg->td); 606 } 607 608 static void throtl_pd_free(struct blkg_policy_data *pd) 609 { 610 struct throtl_grp *tg = pd_to_tg(pd); 611 612 del_timer_sync(&tg->service_queue.pending_timer); 613 kfree(tg); 614 } 615 616 static struct throtl_grp * 617 throtl_rb_first(struct throtl_service_queue *parent_sq) 618 { 619 /* Service tree is empty */ 620 if (!parent_sq->nr_pending) 621 return NULL; 622 623 if (!parent_sq->first_pending) 624 parent_sq->first_pending = rb_first(&parent_sq->pending_tree); 625 626 if (parent_sq->first_pending) 627 return rb_entry_tg(parent_sq->first_pending); 628 629 return NULL; 630 } 631 632 static void rb_erase_init(struct rb_node *n, struct rb_root *root) 633 { 634 rb_erase(n, root); 635 RB_CLEAR_NODE(n); 636 } 637 638 static void throtl_rb_erase(struct rb_node *n, 639 struct throtl_service_queue *parent_sq) 640 { 641 if (parent_sq->first_pending == n) 642 parent_sq->first_pending = NULL; 643 rb_erase_init(n, &parent_sq->pending_tree); 644 --parent_sq->nr_pending; 645 } 646 647 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq) 648 { 649 struct throtl_grp *tg; 650 651 tg = throtl_rb_first(parent_sq); 652 if (!tg) 653 return; 654 655 parent_sq->first_pending_disptime = tg->disptime; 656 } 657 658 static void tg_service_queue_add(struct throtl_grp *tg) 659 { 660 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; 661 struct rb_node **node = &parent_sq->pending_tree.rb_node; 662 struct rb_node *parent = NULL; 663 struct throtl_grp *__tg; 664 unsigned long key = tg->disptime; 665 int left = 1; 666 667 while (*node != NULL) { 668 parent = *node; 669 __tg = rb_entry_tg(parent); 670 671 if (time_before(key, __tg->disptime)) 672 node = &parent->rb_left; 673 else { 674 node = &parent->rb_right; 675 left = 0; 676 } 677 } 678 679 if (left) 680 parent_sq->first_pending = &tg->rb_node; 681 682 rb_link_node(&tg->rb_node, parent, node); 683 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); 684 } 685 686 static void __throtl_enqueue_tg(struct throtl_grp *tg) 687 { 688 tg_service_queue_add(tg); 689 tg->flags |= THROTL_TG_PENDING; 690 tg->service_queue.parent_sq->nr_pending++; 691 } 692 693 static void throtl_enqueue_tg(struct throtl_grp *tg) 694 { 695 if (!(tg->flags & THROTL_TG_PENDING)) 696 __throtl_enqueue_tg(tg); 697 } 698 699 static void __throtl_dequeue_tg(struct throtl_grp *tg) 700 { 701 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); 702 tg->flags &= ~THROTL_TG_PENDING; 703 } 704 705 static void throtl_dequeue_tg(struct throtl_grp *tg) 706 { 707 if (tg->flags & THROTL_TG_PENDING) 708 __throtl_dequeue_tg(tg); 709 } 710 711 /* Call with queue lock held */ 712 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, 713 unsigned long expires) 714 { 715 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice; 716 717 /* 718 * Since we are adjusting the throttle limit dynamically, the sleep 719 * time calculated according to previous limit might be invalid. It's 720 * possible the cgroup sleep time is very long and no other cgroups 721 * have IO running so notify the limit changes. Make sure the cgroup 722 * doesn't sleep too long to avoid the missed notification. 723 */ 724 if (time_after(expires, max_expire)) 725 expires = max_expire; 726 mod_timer(&sq->pending_timer, expires); 727 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", 728 expires - jiffies, jiffies); 729 } 730 731 /** 732 * throtl_schedule_next_dispatch - schedule the next dispatch cycle 733 * @sq: the service_queue to schedule dispatch for 734 * @force: force scheduling 735 * 736 * Arm @sq->pending_timer so that the next dispatch cycle starts on the 737 * dispatch time of the first pending child. Returns %true if either timer 738 * is armed or there's no pending child left. %false if the current 739 * dispatch window is still open and the caller should continue 740 * dispatching. 741 * 742 * If @force is %true, the dispatch timer is always scheduled and this 743 * function is guaranteed to return %true. This is to be used when the 744 * caller can't dispatch itself and needs to invoke pending_timer 745 * unconditionally. Note that forced scheduling is likely to induce short 746 * delay before dispatch starts even if @sq->first_pending_disptime is not 747 * in the future and thus shouldn't be used in hot paths. 748 */ 749 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq, 750 bool force) 751 { 752 /* any pending children left? */ 753 if (!sq->nr_pending) 754 return true; 755 756 update_min_dispatch_time(sq); 757 758 /* is the next dispatch time in the future? */ 759 if (force || time_after(sq->first_pending_disptime, jiffies)) { 760 throtl_schedule_pending_timer(sq, sq->first_pending_disptime); 761 return true; 762 } 763 764 /* tell the caller to continue dispatching */ 765 return false; 766 } 767 768 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, 769 bool rw, unsigned long start) 770 { 771 tg->bytes_disp[rw] = 0; 772 tg->io_disp[rw] = 0; 773 774 /* 775 * Previous slice has expired. We must have trimmed it after last 776 * bio dispatch. That means since start of last slice, we never used 777 * that bandwidth. Do try to make use of that bandwidth while giving 778 * credit. 779 */ 780 if (time_after_eq(start, tg->slice_start[rw])) 781 tg->slice_start[rw] = start; 782 783 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; 784 throtl_log(&tg->service_queue, 785 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu", 786 rw == READ ? 'R' : 'W', tg->slice_start[rw], 787 tg->slice_end[rw], jiffies); 788 } 789 790 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) 791 { 792 tg->bytes_disp[rw] = 0; 793 tg->io_disp[rw] = 0; 794 tg->slice_start[rw] = jiffies; 795 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; 796 throtl_log(&tg->service_queue, 797 "[%c] new slice start=%lu end=%lu jiffies=%lu", 798 rw == READ ? 'R' : 'W', tg->slice_start[rw], 799 tg->slice_end[rw], jiffies); 800 } 801 802 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, 803 unsigned long jiffy_end) 804 { 805 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); 806 } 807 808 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, 809 unsigned long jiffy_end) 810 { 811 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); 812 throtl_log(&tg->service_queue, 813 "[%c] extend slice start=%lu end=%lu jiffies=%lu", 814 rw == READ ? 'R' : 'W', tg->slice_start[rw], 815 tg->slice_end[rw], jiffies); 816 } 817 818 /* Determine if previously allocated or extended slice is complete or not */ 819 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) 820 { 821 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) 822 return false; 823 824 return true; 825 } 826 827 /* Trim the used slices and adjust slice start accordingly */ 828 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) 829 { 830 unsigned long nr_slices, time_elapsed, io_trim; 831 u64 bytes_trim, tmp; 832 833 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); 834 835 /* 836 * If bps are unlimited (-1), then time slice don't get 837 * renewed. Don't try to trim the slice if slice is used. A new 838 * slice will start when appropriate. 839 */ 840 if (throtl_slice_used(tg, rw)) 841 return; 842 843 /* 844 * A bio has been dispatched. Also adjust slice_end. It might happen 845 * that initially cgroup limit was very low resulting in high 846 * slice_end, but later limit was bumped up and bio was dispached 847 * sooner, then we need to reduce slice_end. A high bogus slice_end 848 * is bad because it does not allow new slice to start. 849 */ 850 851 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); 852 853 time_elapsed = jiffies - tg->slice_start[rw]; 854 855 nr_slices = time_elapsed / tg->td->throtl_slice; 856 857 if (!nr_slices) 858 return; 859 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; 860 do_div(tmp, HZ); 861 bytes_trim = tmp; 862 863 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / 864 HZ; 865 866 if (!bytes_trim && !io_trim) 867 return; 868 869 if (tg->bytes_disp[rw] >= bytes_trim) 870 tg->bytes_disp[rw] -= bytes_trim; 871 else 872 tg->bytes_disp[rw] = 0; 873 874 if (tg->io_disp[rw] >= io_trim) 875 tg->io_disp[rw] -= io_trim; 876 else 877 tg->io_disp[rw] = 0; 878 879 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; 880 881 throtl_log(&tg->service_queue, 882 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu", 883 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, 884 tg->slice_start[rw], tg->slice_end[rw], jiffies); 885 } 886 887 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, 888 unsigned long *wait) 889 { 890 bool rw = bio_data_dir(bio); 891 unsigned int io_allowed; 892 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 893 u64 tmp; 894 895 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 896 897 /* Slice has just started. Consider one slice interval */ 898 if (!jiffy_elapsed) 899 jiffy_elapsed_rnd = tg->td->throtl_slice; 900 901 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); 902 903 /* 904 * jiffy_elapsed_rnd should not be a big value as minimum iops can be 905 * 1 then at max jiffy elapsed should be equivalent of 1 second as we 906 * will allow dispatch after 1 second and after that slice should 907 * have been trimmed. 908 */ 909 910 tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd; 911 do_div(tmp, HZ); 912 913 if (tmp > UINT_MAX) 914 io_allowed = UINT_MAX; 915 else 916 io_allowed = tmp; 917 918 if (tg->io_disp[rw] + 1 <= io_allowed) { 919 if (wait) 920 *wait = 0; 921 return true; 922 } 923 924 /* Calc approx time to dispatch */ 925 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed; 926 927 if (wait) 928 *wait = jiffy_wait; 929 return false; 930 } 931 932 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, 933 unsigned long *wait) 934 { 935 bool rw = bio_data_dir(bio); 936 u64 bytes_allowed, extra_bytes, tmp; 937 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 938 unsigned int bio_size = throtl_bio_data_size(bio); 939 940 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 941 942 /* Slice has just started. Consider one slice interval */ 943 if (!jiffy_elapsed) 944 jiffy_elapsed_rnd = tg->td->throtl_slice; 945 946 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); 947 948 tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd; 949 do_div(tmp, HZ); 950 bytes_allowed = tmp; 951 952 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { 953 if (wait) 954 *wait = 0; 955 return true; 956 } 957 958 /* Calc approx time to dispatch */ 959 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; 960 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); 961 962 if (!jiffy_wait) 963 jiffy_wait = 1; 964 965 /* 966 * This wait time is without taking into consideration the rounding 967 * up we did. Add that time also. 968 */ 969 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); 970 if (wait) 971 *wait = jiffy_wait; 972 return false; 973 } 974 975 /* 976 * Returns whether one can dispatch a bio or not. Also returns approx number 977 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 978 */ 979 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, 980 unsigned long *wait) 981 { 982 bool rw = bio_data_dir(bio); 983 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; 984 985 /* 986 * Currently whole state machine of group depends on first bio 987 * queued in the group bio list. So one should not be calling 988 * this function with a different bio if there are other bios 989 * queued. 990 */ 991 BUG_ON(tg->service_queue.nr_queued[rw] && 992 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); 993 994 /* If tg->bps = -1, then BW is unlimited */ 995 if (tg_bps_limit(tg, rw) == U64_MAX && 996 tg_iops_limit(tg, rw) == UINT_MAX) { 997 if (wait) 998 *wait = 0; 999 return true; 1000 } 1001 1002 /* 1003 * If previous slice expired, start a new one otherwise renew/extend 1004 * existing slice to make sure it is at least throtl_slice interval 1005 * long since now. New slice is started only for empty throttle group. 1006 * If there is queued bio, that means there should be an active 1007 * slice and it should be extended instead. 1008 */ 1009 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) 1010 throtl_start_new_slice(tg, rw); 1011 else { 1012 if (time_before(tg->slice_end[rw], 1013 jiffies + tg->td->throtl_slice)) 1014 throtl_extend_slice(tg, rw, 1015 jiffies + tg->td->throtl_slice); 1016 } 1017 1018 if (tg_with_in_bps_limit(tg, bio, &bps_wait) && 1019 tg_with_in_iops_limit(tg, bio, &iops_wait)) { 1020 if (wait) 1021 *wait = 0; 1022 return true; 1023 } 1024 1025 max_wait = max(bps_wait, iops_wait); 1026 1027 if (wait) 1028 *wait = max_wait; 1029 1030 if (time_before(tg->slice_end[rw], jiffies + max_wait)) 1031 throtl_extend_slice(tg, rw, jiffies + max_wait); 1032 1033 return false; 1034 } 1035 1036 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 1037 { 1038 bool rw = bio_data_dir(bio); 1039 unsigned int bio_size = throtl_bio_data_size(bio); 1040 1041 /* Charge the bio to the group */ 1042 tg->bytes_disp[rw] += bio_size; 1043 tg->io_disp[rw]++; 1044 tg->last_bytes_disp[rw] += bio_size; 1045 tg->last_io_disp[rw]++; 1046 1047 /* 1048 * BIO_THROTTLED is used to prevent the same bio to be throttled 1049 * more than once as a throttled bio will go through blk-throtl the 1050 * second time when it eventually gets issued. Set it when a bio 1051 * is being charged to a tg. 1052 */ 1053 if (!bio_flagged(bio, BIO_THROTTLED)) 1054 bio_set_flag(bio, BIO_THROTTLED); 1055 } 1056 1057 /** 1058 * throtl_add_bio_tg - add a bio to the specified throtl_grp 1059 * @bio: bio to add 1060 * @qn: qnode to use 1061 * @tg: the target throtl_grp 1062 * 1063 * Add @bio to @tg's service_queue using @qn. If @qn is not specified, 1064 * tg->qnode_on_self[] is used. 1065 */ 1066 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, 1067 struct throtl_grp *tg) 1068 { 1069 struct throtl_service_queue *sq = &tg->service_queue; 1070 bool rw = bio_data_dir(bio); 1071 1072 if (!qn) 1073 qn = &tg->qnode_on_self[rw]; 1074 1075 /* 1076 * If @tg doesn't currently have any bios queued in the same 1077 * direction, queueing @bio can change when @tg should be 1078 * dispatched. Mark that @tg was empty. This is automatically 1079 * cleaered on the next tg_update_disptime(). 1080 */ 1081 if (!sq->nr_queued[rw]) 1082 tg->flags |= THROTL_TG_WAS_EMPTY; 1083 1084 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); 1085 1086 sq->nr_queued[rw]++; 1087 throtl_enqueue_tg(tg); 1088 } 1089 1090 static void tg_update_disptime(struct throtl_grp *tg) 1091 { 1092 struct throtl_service_queue *sq = &tg->service_queue; 1093 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; 1094 struct bio *bio; 1095 1096 bio = throtl_peek_queued(&sq->queued[READ]); 1097 if (bio) 1098 tg_may_dispatch(tg, bio, &read_wait); 1099 1100 bio = throtl_peek_queued(&sq->queued[WRITE]); 1101 if (bio) 1102 tg_may_dispatch(tg, bio, &write_wait); 1103 1104 min_wait = min(read_wait, write_wait); 1105 disptime = jiffies + min_wait; 1106 1107 /* Update dispatch time */ 1108 throtl_dequeue_tg(tg); 1109 tg->disptime = disptime; 1110 throtl_enqueue_tg(tg); 1111 1112 /* see throtl_add_bio_tg() */ 1113 tg->flags &= ~THROTL_TG_WAS_EMPTY; 1114 } 1115 1116 static void start_parent_slice_with_credit(struct throtl_grp *child_tg, 1117 struct throtl_grp *parent_tg, bool rw) 1118 { 1119 if (throtl_slice_used(parent_tg, rw)) { 1120 throtl_start_new_slice_with_credit(parent_tg, rw, 1121 child_tg->slice_start[rw]); 1122 } 1123 1124 } 1125 1126 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) 1127 { 1128 struct throtl_service_queue *sq = &tg->service_queue; 1129 struct throtl_service_queue *parent_sq = sq->parent_sq; 1130 struct throtl_grp *parent_tg = sq_to_tg(parent_sq); 1131 struct throtl_grp *tg_to_put = NULL; 1132 struct bio *bio; 1133 1134 /* 1135 * @bio is being transferred from @tg to @parent_sq. Popping a bio 1136 * from @tg may put its reference and @parent_sq might end up 1137 * getting released prematurely. Remember the tg to put and put it 1138 * after @bio is transferred to @parent_sq. 1139 */ 1140 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); 1141 sq->nr_queued[rw]--; 1142 1143 throtl_charge_bio(tg, bio); 1144 1145 /* 1146 * If our parent is another tg, we just need to transfer @bio to 1147 * the parent using throtl_add_bio_tg(). If our parent is 1148 * @td->service_queue, @bio is ready to be issued. Put it on its 1149 * bio_lists[] and decrease total number queued. The caller is 1150 * responsible for issuing these bios. 1151 */ 1152 if (parent_tg) { 1153 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); 1154 start_parent_slice_with_credit(tg, parent_tg, rw); 1155 } else { 1156 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], 1157 &parent_sq->queued[rw]); 1158 BUG_ON(tg->td->nr_queued[rw] <= 0); 1159 tg->td->nr_queued[rw]--; 1160 } 1161 1162 throtl_trim_slice(tg, rw); 1163 1164 if (tg_to_put) 1165 blkg_put(tg_to_blkg(tg_to_put)); 1166 } 1167 1168 static int throtl_dispatch_tg(struct throtl_grp *tg) 1169 { 1170 struct throtl_service_queue *sq = &tg->service_queue; 1171 unsigned int nr_reads = 0, nr_writes = 0; 1172 unsigned int max_nr_reads = throtl_grp_quantum*3/4; 1173 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; 1174 struct bio *bio; 1175 1176 /* Try to dispatch 75% READS and 25% WRITES */ 1177 1178 while ((bio = throtl_peek_queued(&sq->queued[READ])) && 1179 tg_may_dispatch(tg, bio, NULL)) { 1180 1181 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 1182 nr_reads++; 1183 1184 if (nr_reads >= max_nr_reads) 1185 break; 1186 } 1187 1188 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && 1189 tg_may_dispatch(tg, bio, NULL)) { 1190 1191 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 1192 nr_writes++; 1193 1194 if (nr_writes >= max_nr_writes) 1195 break; 1196 } 1197 1198 return nr_reads + nr_writes; 1199 } 1200 1201 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) 1202 { 1203 unsigned int nr_disp = 0; 1204 1205 while (1) { 1206 struct throtl_grp *tg = throtl_rb_first(parent_sq); 1207 struct throtl_service_queue *sq; 1208 1209 if (!tg) 1210 break; 1211 1212 if (time_before(jiffies, tg->disptime)) 1213 break; 1214 1215 throtl_dequeue_tg(tg); 1216 1217 nr_disp += throtl_dispatch_tg(tg); 1218 1219 sq = &tg->service_queue; 1220 if (sq->nr_queued[0] || sq->nr_queued[1]) 1221 tg_update_disptime(tg); 1222 1223 if (nr_disp >= throtl_quantum) 1224 break; 1225 } 1226 1227 return nr_disp; 1228 } 1229 1230 static bool throtl_can_upgrade(struct throtl_data *td, 1231 struct throtl_grp *this_tg); 1232 /** 1233 * throtl_pending_timer_fn - timer function for service_queue->pending_timer 1234 * @arg: the throtl_service_queue being serviced 1235 * 1236 * This timer is armed when a child throtl_grp with active bio's become 1237 * pending and queued on the service_queue's pending_tree and expires when 1238 * the first child throtl_grp should be dispatched. This function 1239 * dispatches bio's from the children throtl_grps to the parent 1240 * service_queue. 1241 * 1242 * If the parent's parent is another throtl_grp, dispatching is propagated 1243 * by either arming its pending_timer or repeating dispatch directly. If 1244 * the top-level service_tree is reached, throtl_data->dispatch_work is 1245 * kicked so that the ready bio's are issued. 1246 */ 1247 static void throtl_pending_timer_fn(struct timer_list *t) 1248 { 1249 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer); 1250 struct throtl_grp *tg = sq_to_tg(sq); 1251 struct throtl_data *td = sq_to_td(sq); 1252 struct request_queue *q = td->queue; 1253 struct throtl_service_queue *parent_sq; 1254 bool dispatched; 1255 int ret; 1256 1257 spin_lock_irq(q->queue_lock); 1258 if (throtl_can_upgrade(td, NULL)) 1259 throtl_upgrade_state(td); 1260 1261 again: 1262 parent_sq = sq->parent_sq; 1263 dispatched = false; 1264 1265 while (true) { 1266 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", 1267 sq->nr_queued[READ] + sq->nr_queued[WRITE], 1268 sq->nr_queued[READ], sq->nr_queued[WRITE]); 1269 1270 ret = throtl_select_dispatch(sq); 1271 if (ret) { 1272 throtl_log(sq, "bios disp=%u", ret); 1273 dispatched = true; 1274 } 1275 1276 if (throtl_schedule_next_dispatch(sq, false)) 1277 break; 1278 1279 /* this dispatch windows is still open, relax and repeat */ 1280 spin_unlock_irq(q->queue_lock); 1281 cpu_relax(); 1282 spin_lock_irq(q->queue_lock); 1283 } 1284 1285 if (!dispatched) 1286 goto out_unlock; 1287 1288 if (parent_sq) { 1289 /* @parent_sq is another throl_grp, propagate dispatch */ 1290 if (tg->flags & THROTL_TG_WAS_EMPTY) { 1291 tg_update_disptime(tg); 1292 if (!throtl_schedule_next_dispatch(parent_sq, false)) { 1293 /* window is already open, repeat dispatching */ 1294 sq = parent_sq; 1295 tg = sq_to_tg(sq); 1296 goto again; 1297 } 1298 } 1299 } else { 1300 /* reached the top-level, queue issueing */ 1301 queue_work(kthrotld_workqueue, &td->dispatch_work); 1302 } 1303 out_unlock: 1304 spin_unlock_irq(q->queue_lock); 1305 } 1306 1307 /** 1308 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work 1309 * @work: work item being executed 1310 * 1311 * This function is queued for execution when bio's reach the bio_lists[] 1312 * of throtl_data->service_queue. Those bio's are ready and issued by this 1313 * function. 1314 */ 1315 static void blk_throtl_dispatch_work_fn(struct work_struct *work) 1316 { 1317 struct throtl_data *td = container_of(work, struct throtl_data, 1318 dispatch_work); 1319 struct throtl_service_queue *td_sq = &td->service_queue; 1320 struct request_queue *q = td->queue; 1321 struct bio_list bio_list_on_stack; 1322 struct bio *bio; 1323 struct blk_plug plug; 1324 int rw; 1325 1326 bio_list_init(&bio_list_on_stack); 1327 1328 spin_lock_irq(q->queue_lock); 1329 for (rw = READ; rw <= WRITE; rw++) 1330 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) 1331 bio_list_add(&bio_list_on_stack, bio); 1332 spin_unlock_irq(q->queue_lock); 1333 1334 if (!bio_list_empty(&bio_list_on_stack)) { 1335 blk_start_plug(&plug); 1336 while((bio = bio_list_pop(&bio_list_on_stack))) 1337 generic_make_request(bio); 1338 blk_finish_plug(&plug); 1339 } 1340 } 1341 1342 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd, 1343 int off) 1344 { 1345 struct throtl_grp *tg = pd_to_tg(pd); 1346 u64 v = *(u64 *)((void *)tg + off); 1347 1348 if (v == U64_MAX) 1349 return 0; 1350 return __blkg_prfill_u64(sf, pd, v); 1351 } 1352 1353 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, 1354 int off) 1355 { 1356 struct throtl_grp *tg = pd_to_tg(pd); 1357 unsigned int v = *(unsigned int *)((void *)tg + off); 1358 1359 if (v == UINT_MAX) 1360 return 0; 1361 return __blkg_prfill_u64(sf, pd, v); 1362 } 1363 1364 static int tg_print_conf_u64(struct seq_file *sf, void *v) 1365 { 1366 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64, 1367 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1368 return 0; 1369 } 1370 1371 static int tg_print_conf_uint(struct seq_file *sf, void *v) 1372 { 1373 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint, 1374 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1375 return 0; 1376 } 1377 1378 static void tg_conf_updated(struct throtl_grp *tg, bool global) 1379 { 1380 struct throtl_service_queue *sq = &tg->service_queue; 1381 struct cgroup_subsys_state *pos_css; 1382 struct blkcg_gq *blkg; 1383 1384 throtl_log(&tg->service_queue, 1385 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", 1386 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), 1387 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); 1388 1389 /* 1390 * Update has_rules[] flags for the updated tg's subtree. A tg is 1391 * considered to have rules if either the tg itself or any of its 1392 * ancestors has rules. This identifies groups without any 1393 * restrictions in the whole hierarchy and allows them to bypass 1394 * blk-throttle. 1395 */ 1396 blkg_for_each_descendant_pre(blkg, pos_css, 1397 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { 1398 struct throtl_grp *this_tg = blkg_to_tg(blkg); 1399 struct throtl_grp *parent_tg; 1400 1401 tg_update_has_rules(this_tg); 1402 /* ignore root/second level */ 1403 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent || 1404 !blkg->parent->parent) 1405 continue; 1406 parent_tg = blkg_to_tg(blkg->parent); 1407 /* 1408 * make sure all children has lower idle time threshold and 1409 * higher latency target 1410 */ 1411 this_tg->idletime_threshold = min(this_tg->idletime_threshold, 1412 parent_tg->idletime_threshold); 1413 this_tg->latency_target = max(this_tg->latency_target, 1414 parent_tg->latency_target); 1415 } 1416 1417 /* 1418 * We're already holding queue_lock and know @tg is valid. Let's 1419 * apply the new config directly. 1420 * 1421 * Restart the slices for both READ and WRITES. It might happen 1422 * that a group's limit are dropped suddenly and we don't want to 1423 * account recently dispatched IO with new low rate. 1424 */ 1425 throtl_start_new_slice(tg, 0); 1426 throtl_start_new_slice(tg, 1); 1427 1428 if (tg->flags & THROTL_TG_PENDING) { 1429 tg_update_disptime(tg); 1430 throtl_schedule_next_dispatch(sq->parent_sq, true); 1431 } 1432 } 1433 1434 static ssize_t tg_set_conf(struct kernfs_open_file *of, 1435 char *buf, size_t nbytes, loff_t off, bool is_u64) 1436 { 1437 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 1438 struct blkg_conf_ctx ctx; 1439 struct throtl_grp *tg; 1440 int ret; 1441 u64 v; 1442 1443 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1444 if (ret) 1445 return ret; 1446 1447 ret = -EINVAL; 1448 if (sscanf(ctx.body, "%llu", &v) != 1) 1449 goto out_finish; 1450 if (!v) 1451 v = U64_MAX; 1452 1453 tg = blkg_to_tg(ctx.blkg); 1454 1455 if (is_u64) 1456 *(u64 *)((void *)tg + of_cft(of)->private) = v; 1457 else 1458 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; 1459 1460 tg_conf_updated(tg, false); 1461 ret = 0; 1462 out_finish: 1463 blkg_conf_finish(&ctx); 1464 return ret ?: nbytes; 1465 } 1466 1467 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of, 1468 char *buf, size_t nbytes, loff_t off) 1469 { 1470 return tg_set_conf(of, buf, nbytes, off, true); 1471 } 1472 1473 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of, 1474 char *buf, size_t nbytes, loff_t off) 1475 { 1476 return tg_set_conf(of, buf, nbytes, off, false); 1477 } 1478 1479 static struct cftype throtl_legacy_files[] = { 1480 { 1481 .name = "throttle.read_bps_device", 1482 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]), 1483 .seq_show = tg_print_conf_u64, 1484 .write = tg_set_conf_u64, 1485 }, 1486 { 1487 .name = "throttle.write_bps_device", 1488 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]), 1489 .seq_show = tg_print_conf_u64, 1490 .write = tg_set_conf_u64, 1491 }, 1492 { 1493 .name = "throttle.read_iops_device", 1494 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]), 1495 .seq_show = tg_print_conf_uint, 1496 .write = tg_set_conf_uint, 1497 }, 1498 { 1499 .name = "throttle.write_iops_device", 1500 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]), 1501 .seq_show = tg_print_conf_uint, 1502 .write = tg_set_conf_uint, 1503 }, 1504 { 1505 .name = "throttle.io_service_bytes", 1506 .private = (unsigned long)&blkcg_policy_throtl, 1507 .seq_show = blkg_print_stat_bytes, 1508 }, 1509 { 1510 .name = "throttle.io_service_bytes_recursive", 1511 .private = (unsigned long)&blkcg_policy_throtl, 1512 .seq_show = blkg_print_stat_bytes_recursive, 1513 }, 1514 { 1515 .name = "throttle.io_serviced", 1516 .private = (unsigned long)&blkcg_policy_throtl, 1517 .seq_show = blkg_print_stat_ios, 1518 }, 1519 { 1520 .name = "throttle.io_serviced_recursive", 1521 .private = (unsigned long)&blkcg_policy_throtl, 1522 .seq_show = blkg_print_stat_ios_recursive, 1523 }, 1524 { } /* terminate */ 1525 }; 1526 1527 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, 1528 int off) 1529 { 1530 struct throtl_grp *tg = pd_to_tg(pd); 1531 const char *dname = blkg_dev_name(pd->blkg); 1532 char bufs[4][21] = { "max", "max", "max", "max" }; 1533 u64 bps_dft; 1534 unsigned int iops_dft; 1535 char idle_time[26] = ""; 1536 char latency_time[26] = ""; 1537 1538 if (!dname) 1539 return 0; 1540 1541 if (off == LIMIT_LOW) { 1542 bps_dft = 0; 1543 iops_dft = 0; 1544 } else { 1545 bps_dft = U64_MAX; 1546 iops_dft = UINT_MAX; 1547 } 1548 1549 if (tg->bps_conf[READ][off] == bps_dft && 1550 tg->bps_conf[WRITE][off] == bps_dft && 1551 tg->iops_conf[READ][off] == iops_dft && 1552 tg->iops_conf[WRITE][off] == iops_dft && 1553 (off != LIMIT_LOW || 1554 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && 1555 tg->latency_target_conf == DFL_LATENCY_TARGET))) 1556 return 0; 1557 1558 if (tg->bps_conf[READ][off] != U64_MAX) 1559 snprintf(bufs[0], sizeof(bufs[0]), "%llu", 1560 tg->bps_conf[READ][off]); 1561 if (tg->bps_conf[WRITE][off] != U64_MAX) 1562 snprintf(bufs[1], sizeof(bufs[1]), "%llu", 1563 tg->bps_conf[WRITE][off]); 1564 if (tg->iops_conf[READ][off] != UINT_MAX) 1565 snprintf(bufs[2], sizeof(bufs[2]), "%u", 1566 tg->iops_conf[READ][off]); 1567 if (tg->iops_conf[WRITE][off] != UINT_MAX) 1568 snprintf(bufs[3], sizeof(bufs[3]), "%u", 1569 tg->iops_conf[WRITE][off]); 1570 if (off == LIMIT_LOW) { 1571 if (tg->idletime_threshold_conf == ULONG_MAX) 1572 strcpy(idle_time, " idle=max"); 1573 else 1574 snprintf(idle_time, sizeof(idle_time), " idle=%lu", 1575 tg->idletime_threshold_conf); 1576 1577 if (tg->latency_target_conf == ULONG_MAX) 1578 strcpy(latency_time, " latency=max"); 1579 else 1580 snprintf(latency_time, sizeof(latency_time), 1581 " latency=%lu", tg->latency_target_conf); 1582 } 1583 1584 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n", 1585 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time, 1586 latency_time); 1587 return 0; 1588 } 1589 1590 static int tg_print_limit(struct seq_file *sf, void *v) 1591 { 1592 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit, 1593 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1594 return 0; 1595 } 1596 1597 static ssize_t tg_set_limit(struct kernfs_open_file *of, 1598 char *buf, size_t nbytes, loff_t off) 1599 { 1600 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 1601 struct blkg_conf_ctx ctx; 1602 struct throtl_grp *tg; 1603 u64 v[4]; 1604 unsigned long idle_time; 1605 unsigned long latency_time; 1606 int ret; 1607 int index = of_cft(of)->private; 1608 1609 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1610 if (ret) 1611 return ret; 1612 1613 tg = blkg_to_tg(ctx.blkg); 1614 1615 v[0] = tg->bps_conf[READ][index]; 1616 v[1] = tg->bps_conf[WRITE][index]; 1617 v[2] = tg->iops_conf[READ][index]; 1618 v[3] = tg->iops_conf[WRITE][index]; 1619 1620 idle_time = tg->idletime_threshold_conf; 1621 latency_time = tg->latency_target_conf; 1622 while (true) { 1623 char tok[27]; /* wiops=18446744073709551616 */ 1624 char *p; 1625 u64 val = U64_MAX; 1626 int len; 1627 1628 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1) 1629 break; 1630 if (tok[0] == '\0') 1631 break; 1632 ctx.body += len; 1633 1634 ret = -EINVAL; 1635 p = tok; 1636 strsep(&p, "="); 1637 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max"))) 1638 goto out_finish; 1639 1640 ret = -ERANGE; 1641 if (!val) 1642 goto out_finish; 1643 1644 ret = -EINVAL; 1645 if (!strcmp(tok, "rbps")) 1646 v[0] = val; 1647 else if (!strcmp(tok, "wbps")) 1648 v[1] = val; 1649 else if (!strcmp(tok, "riops")) 1650 v[2] = min_t(u64, val, UINT_MAX); 1651 else if (!strcmp(tok, "wiops")) 1652 v[3] = min_t(u64, val, UINT_MAX); 1653 else if (off == LIMIT_LOW && !strcmp(tok, "idle")) 1654 idle_time = val; 1655 else if (off == LIMIT_LOW && !strcmp(tok, "latency")) 1656 latency_time = val; 1657 else 1658 goto out_finish; 1659 } 1660 1661 tg->bps_conf[READ][index] = v[0]; 1662 tg->bps_conf[WRITE][index] = v[1]; 1663 tg->iops_conf[READ][index] = v[2]; 1664 tg->iops_conf[WRITE][index] = v[3]; 1665 1666 if (index == LIMIT_MAX) { 1667 tg->bps[READ][index] = v[0]; 1668 tg->bps[WRITE][index] = v[1]; 1669 tg->iops[READ][index] = v[2]; 1670 tg->iops[WRITE][index] = v[3]; 1671 } 1672 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], 1673 tg->bps_conf[READ][LIMIT_MAX]); 1674 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], 1675 tg->bps_conf[WRITE][LIMIT_MAX]); 1676 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], 1677 tg->iops_conf[READ][LIMIT_MAX]); 1678 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], 1679 tg->iops_conf[WRITE][LIMIT_MAX]); 1680 tg->idletime_threshold_conf = idle_time; 1681 tg->latency_target_conf = latency_time; 1682 1683 /* force user to configure all settings for low limit */ 1684 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || 1685 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || 1686 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || 1687 tg->latency_target_conf == DFL_LATENCY_TARGET) { 1688 tg->bps[READ][LIMIT_LOW] = 0; 1689 tg->bps[WRITE][LIMIT_LOW] = 0; 1690 tg->iops[READ][LIMIT_LOW] = 0; 1691 tg->iops[WRITE][LIMIT_LOW] = 0; 1692 tg->idletime_threshold = DFL_IDLE_THRESHOLD; 1693 tg->latency_target = DFL_LATENCY_TARGET; 1694 } else if (index == LIMIT_LOW) { 1695 tg->idletime_threshold = tg->idletime_threshold_conf; 1696 tg->latency_target = tg->latency_target_conf; 1697 } 1698 1699 blk_throtl_update_limit_valid(tg->td); 1700 if (tg->td->limit_valid[LIMIT_LOW]) { 1701 if (index == LIMIT_LOW) 1702 tg->td->limit_index = LIMIT_LOW; 1703 } else 1704 tg->td->limit_index = LIMIT_MAX; 1705 tg_conf_updated(tg, index == LIMIT_LOW && 1706 tg->td->limit_valid[LIMIT_LOW]); 1707 ret = 0; 1708 out_finish: 1709 blkg_conf_finish(&ctx); 1710 return ret ?: nbytes; 1711 } 1712 1713 static struct cftype throtl_files[] = { 1714 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 1715 { 1716 .name = "low", 1717 .flags = CFTYPE_NOT_ON_ROOT, 1718 .seq_show = tg_print_limit, 1719 .write = tg_set_limit, 1720 .private = LIMIT_LOW, 1721 }, 1722 #endif 1723 { 1724 .name = "max", 1725 .flags = CFTYPE_NOT_ON_ROOT, 1726 .seq_show = tg_print_limit, 1727 .write = tg_set_limit, 1728 .private = LIMIT_MAX, 1729 }, 1730 { } /* terminate */ 1731 }; 1732 1733 static void throtl_shutdown_wq(struct request_queue *q) 1734 { 1735 struct throtl_data *td = q->td; 1736 1737 cancel_work_sync(&td->dispatch_work); 1738 } 1739 1740 static struct blkcg_policy blkcg_policy_throtl = { 1741 .dfl_cftypes = throtl_files, 1742 .legacy_cftypes = throtl_legacy_files, 1743 1744 .pd_alloc_fn = throtl_pd_alloc, 1745 .pd_init_fn = throtl_pd_init, 1746 .pd_online_fn = throtl_pd_online, 1747 .pd_offline_fn = throtl_pd_offline, 1748 .pd_free_fn = throtl_pd_free, 1749 }; 1750 1751 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) 1752 { 1753 unsigned long rtime = jiffies, wtime = jiffies; 1754 1755 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) 1756 rtime = tg->last_low_overflow_time[READ]; 1757 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) 1758 wtime = tg->last_low_overflow_time[WRITE]; 1759 return min(rtime, wtime); 1760 } 1761 1762 /* tg should not be an intermediate node */ 1763 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) 1764 { 1765 struct throtl_service_queue *parent_sq; 1766 struct throtl_grp *parent = tg; 1767 unsigned long ret = __tg_last_low_overflow_time(tg); 1768 1769 while (true) { 1770 parent_sq = parent->service_queue.parent_sq; 1771 parent = sq_to_tg(parent_sq); 1772 if (!parent) 1773 break; 1774 1775 /* 1776 * The parent doesn't have low limit, it always reaches low 1777 * limit. Its overflow time is useless for children 1778 */ 1779 if (!parent->bps[READ][LIMIT_LOW] && 1780 !parent->iops[READ][LIMIT_LOW] && 1781 !parent->bps[WRITE][LIMIT_LOW] && 1782 !parent->iops[WRITE][LIMIT_LOW]) 1783 continue; 1784 if (time_after(__tg_last_low_overflow_time(parent), ret)) 1785 ret = __tg_last_low_overflow_time(parent); 1786 } 1787 return ret; 1788 } 1789 1790 static bool throtl_tg_is_idle(struct throtl_grp *tg) 1791 { 1792 /* 1793 * cgroup is idle if: 1794 * - single idle is too long, longer than a fixed value (in case user 1795 * configure a too big threshold) or 4 times of idletime threshold 1796 * - average think time is more than threshold 1797 * - IO latency is largely below threshold 1798 */ 1799 unsigned long time; 1800 bool ret; 1801 1802 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); 1803 ret = tg->latency_target == DFL_LATENCY_TARGET || 1804 tg->idletime_threshold == DFL_IDLE_THRESHOLD || 1805 (ktime_get_ns() >> 10) - tg->last_finish_time > time || 1806 tg->avg_idletime > tg->idletime_threshold || 1807 (tg->latency_target && tg->bio_cnt && 1808 tg->bad_bio_cnt * 5 < tg->bio_cnt); 1809 throtl_log(&tg->service_queue, 1810 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d", 1811 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, 1812 tg->bio_cnt, ret, tg->td->scale); 1813 return ret; 1814 } 1815 1816 static bool throtl_tg_can_upgrade(struct throtl_grp *tg) 1817 { 1818 struct throtl_service_queue *sq = &tg->service_queue; 1819 bool read_limit, write_limit; 1820 1821 /* 1822 * if cgroup reaches low limit (if low limit is 0, the cgroup always 1823 * reaches), it's ok to upgrade to next limit 1824 */ 1825 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]; 1826 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; 1827 if (!read_limit && !write_limit) 1828 return true; 1829 if (read_limit && sq->nr_queued[READ] && 1830 (!write_limit || sq->nr_queued[WRITE])) 1831 return true; 1832 if (write_limit && sq->nr_queued[WRITE] && 1833 (!read_limit || sq->nr_queued[READ])) 1834 return true; 1835 1836 if (time_after_eq(jiffies, 1837 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && 1838 throtl_tg_is_idle(tg)) 1839 return true; 1840 return false; 1841 } 1842 1843 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) 1844 { 1845 while (true) { 1846 if (throtl_tg_can_upgrade(tg)) 1847 return true; 1848 tg = sq_to_tg(tg->service_queue.parent_sq); 1849 if (!tg || !tg_to_blkg(tg)->parent) 1850 return false; 1851 } 1852 return false; 1853 } 1854 1855 static bool throtl_can_upgrade(struct throtl_data *td, 1856 struct throtl_grp *this_tg) 1857 { 1858 struct cgroup_subsys_state *pos_css; 1859 struct blkcg_gq *blkg; 1860 1861 if (td->limit_index != LIMIT_LOW) 1862 return false; 1863 1864 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) 1865 return false; 1866 1867 rcu_read_lock(); 1868 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 1869 struct throtl_grp *tg = blkg_to_tg(blkg); 1870 1871 if (tg == this_tg) 1872 continue; 1873 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) 1874 continue; 1875 if (!throtl_hierarchy_can_upgrade(tg)) { 1876 rcu_read_unlock(); 1877 return false; 1878 } 1879 } 1880 rcu_read_unlock(); 1881 return true; 1882 } 1883 1884 static void throtl_upgrade_check(struct throtl_grp *tg) 1885 { 1886 unsigned long now = jiffies; 1887 1888 if (tg->td->limit_index != LIMIT_LOW) 1889 return; 1890 1891 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) 1892 return; 1893 1894 tg->last_check_time = now; 1895 1896 if (!time_after_eq(now, 1897 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) 1898 return; 1899 1900 if (throtl_can_upgrade(tg->td, NULL)) 1901 throtl_upgrade_state(tg->td); 1902 } 1903 1904 static void throtl_upgrade_state(struct throtl_data *td) 1905 { 1906 struct cgroup_subsys_state *pos_css; 1907 struct blkcg_gq *blkg; 1908 1909 throtl_log(&td->service_queue, "upgrade to max"); 1910 td->limit_index = LIMIT_MAX; 1911 td->low_upgrade_time = jiffies; 1912 td->scale = 0; 1913 rcu_read_lock(); 1914 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 1915 struct throtl_grp *tg = blkg_to_tg(blkg); 1916 struct throtl_service_queue *sq = &tg->service_queue; 1917 1918 tg->disptime = jiffies - 1; 1919 throtl_select_dispatch(sq); 1920 throtl_schedule_next_dispatch(sq, true); 1921 } 1922 rcu_read_unlock(); 1923 throtl_select_dispatch(&td->service_queue); 1924 throtl_schedule_next_dispatch(&td->service_queue, true); 1925 queue_work(kthrotld_workqueue, &td->dispatch_work); 1926 } 1927 1928 static void throtl_downgrade_state(struct throtl_data *td, int new) 1929 { 1930 td->scale /= 2; 1931 1932 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); 1933 if (td->scale) { 1934 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; 1935 return; 1936 } 1937 1938 td->limit_index = new; 1939 td->low_downgrade_time = jiffies; 1940 } 1941 1942 static bool throtl_tg_can_downgrade(struct throtl_grp *tg) 1943 { 1944 struct throtl_data *td = tg->td; 1945 unsigned long now = jiffies; 1946 1947 /* 1948 * If cgroup is below low limit, consider downgrade and throttle other 1949 * cgroups 1950 */ 1951 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) && 1952 time_after_eq(now, tg_last_low_overflow_time(tg) + 1953 td->throtl_slice) && 1954 (!throtl_tg_is_idle(tg) || 1955 !list_empty(&tg_to_blkg(tg)->blkcg->css.children))) 1956 return true; 1957 return false; 1958 } 1959 1960 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) 1961 { 1962 while (true) { 1963 if (!throtl_tg_can_downgrade(tg)) 1964 return false; 1965 tg = sq_to_tg(tg->service_queue.parent_sq); 1966 if (!tg || !tg_to_blkg(tg)->parent) 1967 break; 1968 } 1969 return true; 1970 } 1971 1972 static void throtl_downgrade_check(struct throtl_grp *tg) 1973 { 1974 uint64_t bps; 1975 unsigned int iops; 1976 unsigned long elapsed_time; 1977 unsigned long now = jiffies; 1978 1979 if (tg->td->limit_index != LIMIT_MAX || 1980 !tg->td->limit_valid[LIMIT_LOW]) 1981 return; 1982 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) 1983 return; 1984 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) 1985 return; 1986 1987 elapsed_time = now - tg->last_check_time; 1988 tg->last_check_time = now; 1989 1990 if (time_before(now, tg_last_low_overflow_time(tg) + 1991 tg->td->throtl_slice)) 1992 return; 1993 1994 if (tg->bps[READ][LIMIT_LOW]) { 1995 bps = tg->last_bytes_disp[READ] * HZ; 1996 do_div(bps, elapsed_time); 1997 if (bps >= tg->bps[READ][LIMIT_LOW]) 1998 tg->last_low_overflow_time[READ] = now; 1999 } 2000 2001 if (tg->bps[WRITE][LIMIT_LOW]) { 2002 bps = tg->last_bytes_disp[WRITE] * HZ; 2003 do_div(bps, elapsed_time); 2004 if (bps >= tg->bps[WRITE][LIMIT_LOW]) 2005 tg->last_low_overflow_time[WRITE] = now; 2006 } 2007 2008 if (tg->iops[READ][LIMIT_LOW]) { 2009 iops = tg->last_io_disp[READ] * HZ / elapsed_time; 2010 if (iops >= tg->iops[READ][LIMIT_LOW]) 2011 tg->last_low_overflow_time[READ] = now; 2012 } 2013 2014 if (tg->iops[WRITE][LIMIT_LOW]) { 2015 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; 2016 if (iops >= tg->iops[WRITE][LIMIT_LOW]) 2017 tg->last_low_overflow_time[WRITE] = now; 2018 } 2019 2020 /* 2021 * If cgroup is below low limit, consider downgrade and throttle other 2022 * cgroups 2023 */ 2024 if (throtl_hierarchy_can_downgrade(tg)) 2025 throtl_downgrade_state(tg->td, LIMIT_LOW); 2026 2027 tg->last_bytes_disp[READ] = 0; 2028 tg->last_bytes_disp[WRITE] = 0; 2029 tg->last_io_disp[READ] = 0; 2030 tg->last_io_disp[WRITE] = 0; 2031 } 2032 2033 static void blk_throtl_update_idletime(struct throtl_grp *tg) 2034 { 2035 unsigned long now = ktime_get_ns() >> 10; 2036 unsigned long last_finish_time = tg->last_finish_time; 2037 2038 if (now <= last_finish_time || last_finish_time == 0 || 2039 last_finish_time == tg->checked_last_finish_time) 2040 return; 2041 2042 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; 2043 tg->checked_last_finish_time = last_finish_time; 2044 } 2045 2046 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2047 static void throtl_update_latency_buckets(struct throtl_data *td) 2048 { 2049 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE]; 2050 int i, cpu, rw; 2051 unsigned long last_latency[2] = { 0 }; 2052 unsigned long latency[2]; 2053 2054 if (!blk_queue_nonrot(td->queue)) 2055 return; 2056 if (time_before(jiffies, td->last_calculate_time + HZ)) 2057 return; 2058 td->last_calculate_time = jiffies; 2059 2060 memset(avg_latency, 0, sizeof(avg_latency)); 2061 for (rw = READ; rw <= WRITE; rw++) { 2062 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2063 struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; 2064 2065 for_each_possible_cpu(cpu) { 2066 struct latency_bucket *bucket; 2067 2068 /* this isn't race free, but ok in practice */ 2069 bucket = per_cpu_ptr(td->latency_buckets[rw], 2070 cpu); 2071 tmp->total_latency += bucket[i].total_latency; 2072 tmp->samples += bucket[i].samples; 2073 bucket[i].total_latency = 0; 2074 bucket[i].samples = 0; 2075 } 2076 2077 if (tmp->samples >= 32) { 2078 int samples = tmp->samples; 2079 2080 latency[rw] = tmp->total_latency; 2081 2082 tmp->total_latency = 0; 2083 tmp->samples = 0; 2084 latency[rw] /= samples; 2085 if (latency[rw] == 0) 2086 continue; 2087 avg_latency[rw][i].latency = latency[rw]; 2088 } 2089 } 2090 } 2091 2092 for (rw = READ; rw <= WRITE; rw++) { 2093 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2094 if (!avg_latency[rw][i].latency) { 2095 if (td->avg_buckets[rw][i].latency < last_latency[rw]) 2096 td->avg_buckets[rw][i].latency = 2097 last_latency[rw]; 2098 continue; 2099 } 2100 2101 if (!td->avg_buckets[rw][i].valid) 2102 latency[rw] = avg_latency[rw][i].latency; 2103 else 2104 latency[rw] = (td->avg_buckets[rw][i].latency * 7 + 2105 avg_latency[rw][i].latency) >> 3; 2106 2107 td->avg_buckets[rw][i].latency = max(latency[rw], 2108 last_latency[rw]); 2109 td->avg_buckets[rw][i].valid = true; 2110 last_latency[rw] = td->avg_buckets[rw][i].latency; 2111 } 2112 } 2113 2114 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) 2115 throtl_log(&td->service_queue, 2116 "Latency bucket %d: read latency=%ld, read valid=%d, " 2117 "write latency=%ld, write valid=%d", i, 2118 td->avg_buckets[READ][i].latency, 2119 td->avg_buckets[READ][i].valid, 2120 td->avg_buckets[WRITE][i].latency, 2121 td->avg_buckets[WRITE][i].valid); 2122 } 2123 #else 2124 static inline void throtl_update_latency_buckets(struct throtl_data *td) 2125 { 2126 } 2127 #endif 2128 2129 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) 2130 { 2131 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2132 /* fallback to root_blkg if we fail to get a blkg ref */ 2133 if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV)) 2134 bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg); 2135 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 2136 #endif 2137 } 2138 2139 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, 2140 struct bio *bio) 2141 { 2142 struct throtl_qnode *qn = NULL; 2143 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); 2144 struct throtl_service_queue *sq; 2145 bool rw = bio_data_dir(bio); 2146 bool throttled = false; 2147 struct throtl_data *td = tg->td; 2148 2149 WARN_ON_ONCE(!rcu_read_lock_held()); 2150 2151 /* see throtl_charge_bio() */ 2152 if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) 2153 goto out; 2154 2155 spin_lock_irq(q->queue_lock); 2156 2157 throtl_update_latency_buckets(td); 2158 2159 if (unlikely(blk_queue_bypass(q))) 2160 goto out_unlock; 2161 2162 blk_throtl_assoc_bio(tg, bio); 2163 blk_throtl_update_idletime(tg); 2164 2165 sq = &tg->service_queue; 2166 2167 again: 2168 while (true) { 2169 if (tg->last_low_overflow_time[rw] == 0) 2170 tg->last_low_overflow_time[rw] = jiffies; 2171 throtl_downgrade_check(tg); 2172 throtl_upgrade_check(tg); 2173 /* throtl is FIFO - if bios are already queued, should queue */ 2174 if (sq->nr_queued[rw]) 2175 break; 2176 2177 /* if above limits, break to queue */ 2178 if (!tg_may_dispatch(tg, bio, NULL)) { 2179 tg->last_low_overflow_time[rw] = jiffies; 2180 if (throtl_can_upgrade(td, tg)) { 2181 throtl_upgrade_state(td); 2182 goto again; 2183 } 2184 break; 2185 } 2186 2187 /* within limits, let's charge and dispatch directly */ 2188 throtl_charge_bio(tg, bio); 2189 2190 /* 2191 * We need to trim slice even when bios are not being queued 2192 * otherwise it might happen that a bio is not queued for 2193 * a long time and slice keeps on extending and trim is not 2194 * called for a long time. Now if limits are reduced suddenly 2195 * we take into account all the IO dispatched so far at new 2196 * low rate and * newly queued IO gets a really long dispatch 2197 * time. 2198 * 2199 * So keep on trimming slice even if bio is not queued. 2200 */ 2201 throtl_trim_slice(tg, rw); 2202 2203 /* 2204 * @bio passed through this layer without being throttled. 2205 * Climb up the ladder. If we''re already at the top, it 2206 * can be executed directly. 2207 */ 2208 qn = &tg->qnode_on_parent[rw]; 2209 sq = sq->parent_sq; 2210 tg = sq_to_tg(sq); 2211 if (!tg) 2212 goto out_unlock; 2213 } 2214 2215 /* out-of-limit, queue to @tg */ 2216 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", 2217 rw == READ ? 'R' : 'W', 2218 tg->bytes_disp[rw], bio->bi_iter.bi_size, 2219 tg_bps_limit(tg, rw), 2220 tg->io_disp[rw], tg_iops_limit(tg, rw), 2221 sq->nr_queued[READ], sq->nr_queued[WRITE]); 2222 2223 tg->last_low_overflow_time[rw] = jiffies; 2224 2225 td->nr_queued[rw]++; 2226 throtl_add_bio_tg(bio, qn, tg); 2227 throttled = true; 2228 2229 /* 2230 * Update @tg's dispatch time and force schedule dispatch if @tg 2231 * was empty before @bio. The forced scheduling isn't likely to 2232 * cause undue delay as @bio is likely to be dispatched directly if 2233 * its @tg's disptime is not in the future. 2234 */ 2235 if (tg->flags & THROTL_TG_WAS_EMPTY) { 2236 tg_update_disptime(tg); 2237 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); 2238 } 2239 2240 out_unlock: 2241 spin_unlock_irq(q->queue_lock); 2242 out: 2243 bio_set_flag(bio, BIO_THROTTLED); 2244 2245 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2246 if (throttled || !td->track_bio_latency) 2247 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; 2248 #endif 2249 return throttled; 2250 } 2251 2252 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2253 static void throtl_track_latency(struct throtl_data *td, sector_t size, 2254 int op, unsigned long time) 2255 { 2256 struct latency_bucket *latency; 2257 int index; 2258 2259 if (!td || td->limit_index != LIMIT_LOW || 2260 !(op == REQ_OP_READ || op == REQ_OP_WRITE) || 2261 !blk_queue_nonrot(td->queue)) 2262 return; 2263 2264 index = request_bucket_index(size); 2265 2266 latency = get_cpu_ptr(td->latency_buckets[op]); 2267 latency[index].total_latency += time; 2268 latency[index].samples++; 2269 put_cpu_ptr(td->latency_buckets[op]); 2270 } 2271 2272 void blk_throtl_stat_add(struct request *rq, u64 time_ns) 2273 { 2274 struct request_queue *q = rq->q; 2275 struct throtl_data *td = q->td; 2276 2277 throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10); 2278 } 2279 2280 void blk_throtl_bio_endio(struct bio *bio) 2281 { 2282 struct blkcg_gq *blkg; 2283 struct throtl_grp *tg; 2284 u64 finish_time_ns; 2285 unsigned long finish_time; 2286 unsigned long start_time; 2287 unsigned long lat; 2288 int rw = bio_data_dir(bio); 2289 2290 blkg = bio->bi_blkg; 2291 if (!blkg) 2292 return; 2293 tg = blkg_to_tg(blkg); 2294 2295 finish_time_ns = ktime_get_ns(); 2296 tg->last_finish_time = finish_time_ns >> 10; 2297 2298 start_time = bio_issue_time(&bio->bi_issue) >> 10; 2299 finish_time = __bio_issue_time(finish_time_ns) >> 10; 2300 if (!start_time || finish_time <= start_time) 2301 return; 2302 2303 lat = finish_time - start_time; 2304 /* this is only for bio based driver */ 2305 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY)) 2306 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), 2307 bio_op(bio), lat); 2308 2309 if (tg->latency_target && lat >= tg->td->filtered_latency) { 2310 int bucket; 2311 unsigned int threshold; 2312 2313 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue)); 2314 threshold = tg->td->avg_buckets[rw][bucket].latency + 2315 tg->latency_target; 2316 if (lat > threshold) 2317 tg->bad_bio_cnt++; 2318 /* 2319 * Not race free, could get wrong count, which means cgroups 2320 * will be throttled 2321 */ 2322 tg->bio_cnt++; 2323 } 2324 2325 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { 2326 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; 2327 tg->bio_cnt /= 2; 2328 tg->bad_bio_cnt /= 2; 2329 } 2330 } 2331 #endif 2332 2333 /* 2334 * Dispatch all bios from all children tg's queued on @parent_sq. On 2335 * return, @parent_sq is guaranteed to not have any active children tg's 2336 * and all bios from previously active tg's are on @parent_sq->bio_lists[]. 2337 */ 2338 static void tg_drain_bios(struct throtl_service_queue *parent_sq) 2339 { 2340 struct throtl_grp *tg; 2341 2342 while ((tg = throtl_rb_first(parent_sq))) { 2343 struct throtl_service_queue *sq = &tg->service_queue; 2344 struct bio *bio; 2345 2346 throtl_dequeue_tg(tg); 2347 2348 while ((bio = throtl_peek_queued(&sq->queued[READ]))) 2349 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 2350 while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) 2351 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 2352 } 2353 } 2354 2355 /** 2356 * blk_throtl_drain - drain throttled bios 2357 * @q: request_queue to drain throttled bios for 2358 * 2359 * Dispatch all currently throttled bios on @q through ->make_request_fn(). 2360 */ 2361 void blk_throtl_drain(struct request_queue *q) 2362 __releases(q->queue_lock) __acquires(q->queue_lock) 2363 { 2364 struct throtl_data *td = q->td; 2365 struct blkcg_gq *blkg; 2366 struct cgroup_subsys_state *pos_css; 2367 struct bio *bio; 2368 int rw; 2369 2370 queue_lockdep_assert_held(q); 2371 rcu_read_lock(); 2372 2373 /* 2374 * Drain each tg while doing post-order walk on the blkg tree, so 2375 * that all bios are propagated to td->service_queue. It'd be 2376 * better to walk service_queue tree directly but blkg walk is 2377 * easier. 2378 */ 2379 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) 2380 tg_drain_bios(&blkg_to_tg(blkg)->service_queue); 2381 2382 /* finally, transfer bios from top-level tg's into the td */ 2383 tg_drain_bios(&td->service_queue); 2384 2385 rcu_read_unlock(); 2386 spin_unlock_irq(q->queue_lock); 2387 2388 /* all bios now should be in td->service_queue, issue them */ 2389 for (rw = READ; rw <= WRITE; rw++) 2390 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], 2391 NULL))) 2392 generic_make_request(bio); 2393 2394 spin_lock_irq(q->queue_lock); 2395 } 2396 2397 int blk_throtl_init(struct request_queue *q) 2398 { 2399 struct throtl_data *td; 2400 int ret; 2401 2402 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); 2403 if (!td) 2404 return -ENOMEM; 2405 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * 2406 LATENCY_BUCKET_SIZE, __alignof__(u64)); 2407 if (!td->latency_buckets[READ]) { 2408 kfree(td); 2409 return -ENOMEM; 2410 } 2411 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * 2412 LATENCY_BUCKET_SIZE, __alignof__(u64)); 2413 if (!td->latency_buckets[WRITE]) { 2414 free_percpu(td->latency_buckets[READ]); 2415 kfree(td); 2416 return -ENOMEM; 2417 } 2418 2419 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); 2420 throtl_service_queue_init(&td->service_queue); 2421 2422 q->td = td; 2423 td->queue = q; 2424 2425 td->limit_valid[LIMIT_MAX] = true; 2426 td->limit_index = LIMIT_MAX; 2427 td->low_upgrade_time = jiffies; 2428 td->low_downgrade_time = jiffies; 2429 2430 /* activate policy */ 2431 ret = blkcg_activate_policy(q, &blkcg_policy_throtl); 2432 if (ret) { 2433 free_percpu(td->latency_buckets[READ]); 2434 free_percpu(td->latency_buckets[WRITE]); 2435 kfree(td); 2436 } 2437 return ret; 2438 } 2439 2440 void blk_throtl_exit(struct request_queue *q) 2441 { 2442 BUG_ON(!q->td); 2443 throtl_shutdown_wq(q); 2444 blkcg_deactivate_policy(q, &blkcg_policy_throtl); 2445 free_percpu(q->td->latency_buckets[READ]); 2446 free_percpu(q->td->latency_buckets[WRITE]); 2447 kfree(q->td); 2448 } 2449 2450 void blk_throtl_register_queue(struct request_queue *q) 2451 { 2452 struct throtl_data *td; 2453 int i; 2454 2455 td = q->td; 2456 BUG_ON(!td); 2457 2458 if (blk_queue_nonrot(q)) { 2459 td->throtl_slice = DFL_THROTL_SLICE_SSD; 2460 td->filtered_latency = LATENCY_FILTERED_SSD; 2461 } else { 2462 td->throtl_slice = DFL_THROTL_SLICE_HD; 2463 td->filtered_latency = LATENCY_FILTERED_HD; 2464 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2465 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; 2466 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; 2467 } 2468 } 2469 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW 2470 /* if no low limit, use previous default */ 2471 td->throtl_slice = DFL_THROTL_SLICE_HD; 2472 #endif 2473 2474 td->track_bio_latency = !queue_is_rq_based(q); 2475 if (!td->track_bio_latency) 2476 blk_stat_enable_accounting(q); 2477 } 2478 2479 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2480 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page) 2481 { 2482 if (!q->td) 2483 return -EINVAL; 2484 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); 2485 } 2486 2487 ssize_t blk_throtl_sample_time_store(struct request_queue *q, 2488 const char *page, size_t count) 2489 { 2490 unsigned long v; 2491 unsigned long t; 2492 2493 if (!q->td) 2494 return -EINVAL; 2495 if (kstrtoul(page, 10, &v)) 2496 return -EINVAL; 2497 t = msecs_to_jiffies(v); 2498 if (t == 0 || t > MAX_THROTL_SLICE) 2499 return -EINVAL; 2500 q->td->throtl_slice = t; 2501 return count; 2502 } 2503 #endif 2504 2505 static int __init throtl_init(void) 2506 { 2507 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); 2508 if (!kthrotld_workqueue) 2509 panic("Failed to create kthrotld\n"); 2510 2511 return blkcg_policy_register(&blkcg_policy_throtl); 2512 } 2513 2514 module_init(throtl_init); 2515