1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Interface for controlling IO bandwidth on a request queue 4 * 5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/bio.h> 12 #include <linux/blktrace_api.h> 13 #include <linux/blk-cgroup.h> 14 #include "blk.h" 15 16 /* Max dispatch from a group in 1 round */ 17 static int throtl_grp_quantum = 8; 18 19 /* Total max dispatch from all groups in one round */ 20 static int throtl_quantum = 32; 21 22 /* Throttling is performed over a slice and after that slice is renewed */ 23 #define DFL_THROTL_SLICE_HD (HZ / 10) 24 #define DFL_THROTL_SLICE_SSD (HZ / 50) 25 #define MAX_THROTL_SLICE (HZ) 26 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */ 27 #define MIN_THROTL_BPS (320 * 1024) 28 #define MIN_THROTL_IOPS (10) 29 #define DFL_LATENCY_TARGET (-1L) 30 #define DFL_IDLE_THRESHOLD (0) 31 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */ 32 #define LATENCY_FILTERED_SSD (0) 33 /* 34 * For HD, very small latency comes from sequential IO. Such IO is helpless to 35 * help determine if its IO is impacted by others, hence we ignore the IO 36 */ 37 #define LATENCY_FILTERED_HD (1000L) /* 1ms */ 38 39 static struct blkcg_policy blkcg_policy_throtl; 40 41 /* A workqueue to queue throttle related work */ 42 static struct workqueue_struct *kthrotld_workqueue; 43 44 /* 45 * To implement hierarchical throttling, throtl_grps form a tree and bios 46 * are dispatched upwards level by level until they reach the top and get 47 * issued. When dispatching bios from the children and local group at each 48 * level, if the bios are dispatched into a single bio_list, there's a risk 49 * of a local or child group which can queue many bios at once filling up 50 * the list starving others. 51 * 52 * To avoid such starvation, dispatched bios are queued separately 53 * according to where they came from. When they are again dispatched to 54 * the parent, they're popped in round-robin order so that no single source 55 * hogs the dispatch window. 56 * 57 * throtl_qnode is used to keep the queued bios separated by their sources. 58 * Bios are queued to throtl_qnode which in turn is queued to 59 * throtl_service_queue and then dispatched in round-robin order. 60 * 61 * It's also used to track the reference counts on blkg's. A qnode always 62 * belongs to a throtl_grp and gets queued on itself or the parent, so 63 * incrementing the reference of the associated throtl_grp when a qnode is 64 * queued and decrementing when dequeued is enough to keep the whole blkg 65 * tree pinned while bios are in flight. 66 */ 67 struct throtl_qnode { 68 struct list_head node; /* service_queue->queued[] */ 69 struct bio_list bios; /* queued bios */ 70 struct throtl_grp *tg; /* tg this qnode belongs to */ 71 }; 72 73 struct throtl_service_queue { 74 struct throtl_service_queue *parent_sq; /* the parent service_queue */ 75 76 /* 77 * Bios queued directly to this service_queue or dispatched from 78 * children throtl_grp's. 79 */ 80 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ 81 unsigned int nr_queued[2]; /* number of queued bios */ 82 83 /* 84 * RB tree of active children throtl_grp's, which are sorted by 85 * their ->disptime. 86 */ 87 struct rb_root pending_tree; /* RB tree of active tgs */ 88 struct rb_node *first_pending; /* first node in the tree */ 89 unsigned int nr_pending; /* # queued in the tree */ 90 unsigned long first_pending_disptime; /* disptime of the first tg */ 91 struct timer_list pending_timer; /* fires on first_pending_disptime */ 92 }; 93 94 enum tg_state_flags { 95 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ 96 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ 97 }; 98 99 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 100 101 enum { 102 LIMIT_LOW, 103 LIMIT_MAX, 104 LIMIT_CNT, 105 }; 106 107 struct throtl_grp { 108 /* must be the first member */ 109 struct blkg_policy_data pd; 110 111 /* active throtl group service_queue member */ 112 struct rb_node rb_node; 113 114 /* throtl_data this group belongs to */ 115 struct throtl_data *td; 116 117 /* this group's service queue */ 118 struct throtl_service_queue service_queue; 119 120 /* 121 * qnode_on_self is used when bios are directly queued to this 122 * throtl_grp so that local bios compete fairly with bios 123 * dispatched from children. qnode_on_parent is used when bios are 124 * dispatched from this throtl_grp into its parent and will compete 125 * with the sibling qnode_on_parents and the parent's 126 * qnode_on_self. 127 */ 128 struct throtl_qnode qnode_on_self[2]; 129 struct throtl_qnode qnode_on_parent[2]; 130 131 /* 132 * Dispatch time in jiffies. This is the estimated time when group 133 * will unthrottle and is ready to dispatch more bio. It is used as 134 * key to sort active groups in service tree. 135 */ 136 unsigned long disptime; 137 138 unsigned int flags; 139 140 /* are there any throtl rules between this group and td? */ 141 bool has_rules[2]; 142 143 /* internally used bytes per second rate limits */ 144 uint64_t bps[2][LIMIT_CNT]; 145 /* user configured bps limits */ 146 uint64_t bps_conf[2][LIMIT_CNT]; 147 148 /* internally used IOPS limits */ 149 unsigned int iops[2][LIMIT_CNT]; 150 /* user configured IOPS limits */ 151 unsigned int iops_conf[2][LIMIT_CNT]; 152 153 /* Number of bytes disptached in current slice */ 154 uint64_t bytes_disp[2]; 155 /* Number of bio's dispatched in current slice */ 156 unsigned int io_disp[2]; 157 158 unsigned long last_low_overflow_time[2]; 159 160 uint64_t last_bytes_disp[2]; 161 unsigned int last_io_disp[2]; 162 163 unsigned long last_check_time; 164 165 unsigned long latency_target; /* us */ 166 unsigned long latency_target_conf; /* us */ 167 /* When did we start a new slice */ 168 unsigned long slice_start[2]; 169 unsigned long slice_end[2]; 170 171 unsigned long last_finish_time; /* ns / 1024 */ 172 unsigned long checked_last_finish_time; /* ns / 1024 */ 173 unsigned long avg_idletime; /* ns / 1024 */ 174 unsigned long idletime_threshold; /* us */ 175 unsigned long idletime_threshold_conf; /* us */ 176 177 unsigned int bio_cnt; /* total bios */ 178 unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ 179 unsigned long bio_cnt_reset_time; 180 }; 181 182 /* We measure latency for request size from <= 4k to >= 1M */ 183 #define LATENCY_BUCKET_SIZE 9 184 185 struct latency_bucket { 186 unsigned long total_latency; /* ns / 1024 */ 187 int samples; 188 }; 189 190 struct avg_latency_bucket { 191 unsigned long latency; /* ns / 1024 */ 192 bool valid; 193 }; 194 195 struct throtl_data 196 { 197 /* service tree for active throtl groups */ 198 struct throtl_service_queue service_queue; 199 200 struct request_queue *queue; 201 202 /* Total Number of queued bios on READ and WRITE lists */ 203 unsigned int nr_queued[2]; 204 205 unsigned int throtl_slice; 206 207 /* Work for dispatching throttled bios */ 208 struct work_struct dispatch_work; 209 unsigned int limit_index; 210 bool limit_valid[LIMIT_CNT]; 211 212 unsigned long low_upgrade_time; 213 unsigned long low_downgrade_time; 214 215 unsigned int scale; 216 217 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE]; 218 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE]; 219 struct latency_bucket __percpu *latency_buckets[2]; 220 unsigned long last_calculate_time; 221 unsigned long filtered_latency; 222 223 bool track_bio_latency; 224 }; 225 226 static void throtl_pending_timer_fn(struct timer_list *t); 227 228 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) 229 { 230 return pd ? container_of(pd, struct throtl_grp, pd) : NULL; 231 } 232 233 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) 234 { 235 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); 236 } 237 238 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) 239 { 240 return pd_to_blkg(&tg->pd); 241 } 242 243 /** 244 * sq_to_tg - return the throl_grp the specified service queue belongs to 245 * @sq: the throtl_service_queue of interest 246 * 247 * Return the throtl_grp @sq belongs to. If @sq is the top-level one 248 * embedded in throtl_data, %NULL is returned. 249 */ 250 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) 251 { 252 if (sq && sq->parent_sq) 253 return container_of(sq, struct throtl_grp, service_queue); 254 else 255 return NULL; 256 } 257 258 /** 259 * sq_to_td - return throtl_data the specified service queue belongs to 260 * @sq: the throtl_service_queue of interest 261 * 262 * A service_queue can be embedded in either a throtl_grp or throtl_data. 263 * Determine the associated throtl_data accordingly and return it. 264 */ 265 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) 266 { 267 struct throtl_grp *tg = sq_to_tg(sq); 268 269 if (tg) 270 return tg->td; 271 else 272 return container_of(sq, struct throtl_data, service_queue); 273 } 274 275 /* 276 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to 277 * make the IO dispatch more smooth. 278 * Scale up: linearly scale up according to lapsed time since upgrade. For 279 * every throtl_slice, the limit scales up 1/2 .low limit till the 280 * limit hits .max limit 281 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit 282 */ 283 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) 284 { 285 /* arbitrary value to avoid too big scale */ 286 if (td->scale < 4096 && time_after_eq(jiffies, 287 td->low_upgrade_time + td->scale * td->throtl_slice)) 288 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; 289 290 return low + (low >> 1) * td->scale; 291 } 292 293 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) 294 { 295 struct blkcg_gq *blkg = tg_to_blkg(tg); 296 struct throtl_data *td; 297 uint64_t ret; 298 299 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) 300 return U64_MAX; 301 302 td = tg->td; 303 ret = tg->bps[rw][td->limit_index]; 304 if (ret == 0 && td->limit_index == LIMIT_LOW) { 305 /* intermediate node or iops isn't 0 */ 306 if (!list_empty(&blkg->blkcg->css.children) || 307 tg->iops[rw][td->limit_index]) 308 return U64_MAX; 309 else 310 return MIN_THROTL_BPS; 311 } 312 313 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && 314 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { 315 uint64_t adjusted; 316 317 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); 318 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); 319 } 320 return ret; 321 } 322 323 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) 324 { 325 struct blkcg_gq *blkg = tg_to_blkg(tg); 326 struct throtl_data *td; 327 unsigned int ret; 328 329 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) 330 return UINT_MAX; 331 332 td = tg->td; 333 ret = tg->iops[rw][td->limit_index]; 334 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { 335 /* intermediate node or bps isn't 0 */ 336 if (!list_empty(&blkg->blkcg->css.children) || 337 tg->bps[rw][td->limit_index]) 338 return UINT_MAX; 339 else 340 return MIN_THROTL_IOPS; 341 } 342 343 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && 344 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { 345 uint64_t adjusted; 346 347 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); 348 if (adjusted > UINT_MAX) 349 adjusted = UINT_MAX; 350 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); 351 } 352 return ret; 353 } 354 355 #define request_bucket_index(sectors) \ 356 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1) 357 358 /** 359 * throtl_log - log debug message via blktrace 360 * @sq: the service_queue being reported 361 * @fmt: printf format string 362 * @args: printf args 363 * 364 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a 365 * throtl_grp; otherwise, just "throtl". 366 */ 367 #define throtl_log(sq, fmt, args...) do { \ 368 struct throtl_grp *__tg = sq_to_tg((sq)); \ 369 struct throtl_data *__td = sq_to_td((sq)); \ 370 \ 371 (void)__td; \ 372 if (likely(!blk_trace_note_message_enabled(__td->queue))) \ 373 break; \ 374 if ((__tg)) { \ 375 blk_add_cgroup_trace_msg(__td->queue, \ 376 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\ 377 } else { \ 378 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ 379 } \ 380 } while (0) 381 382 static inline unsigned int throtl_bio_data_size(struct bio *bio) 383 { 384 /* assume it's one sector */ 385 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 386 return 512; 387 return bio->bi_iter.bi_size; 388 } 389 390 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) 391 { 392 INIT_LIST_HEAD(&qn->node); 393 bio_list_init(&qn->bios); 394 qn->tg = tg; 395 } 396 397 /** 398 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it 399 * @bio: bio being added 400 * @qn: qnode to add bio to 401 * @queued: the service_queue->queued[] list @qn belongs to 402 * 403 * Add @bio to @qn and put @qn on @queued if it's not already on. 404 * @qn->tg's reference count is bumped when @qn is activated. See the 405 * comment on top of throtl_qnode definition for details. 406 */ 407 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, 408 struct list_head *queued) 409 { 410 bio_list_add(&qn->bios, bio); 411 if (list_empty(&qn->node)) { 412 list_add_tail(&qn->node, queued); 413 blkg_get(tg_to_blkg(qn->tg)); 414 } 415 } 416 417 /** 418 * throtl_peek_queued - peek the first bio on a qnode list 419 * @queued: the qnode list to peek 420 */ 421 static struct bio *throtl_peek_queued(struct list_head *queued) 422 { 423 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); 424 struct bio *bio; 425 426 if (list_empty(queued)) 427 return NULL; 428 429 bio = bio_list_peek(&qn->bios); 430 WARN_ON_ONCE(!bio); 431 return bio; 432 } 433 434 /** 435 * throtl_pop_queued - pop the first bio form a qnode list 436 * @queued: the qnode list to pop a bio from 437 * @tg_to_put: optional out argument for throtl_grp to put 438 * 439 * Pop the first bio from the qnode list @queued. After popping, the first 440 * qnode is removed from @queued if empty or moved to the end of @queued so 441 * that the popping order is round-robin. 442 * 443 * When the first qnode is removed, its associated throtl_grp should be put 444 * too. If @tg_to_put is NULL, this function automatically puts it; 445 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is 446 * responsible for putting it. 447 */ 448 static struct bio *throtl_pop_queued(struct list_head *queued, 449 struct throtl_grp **tg_to_put) 450 { 451 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); 452 struct bio *bio; 453 454 if (list_empty(queued)) 455 return NULL; 456 457 bio = bio_list_pop(&qn->bios); 458 WARN_ON_ONCE(!bio); 459 460 if (bio_list_empty(&qn->bios)) { 461 list_del_init(&qn->node); 462 if (tg_to_put) 463 *tg_to_put = qn->tg; 464 else 465 blkg_put(tg_to_blkg(qn->tg)); 466 } else { 467 list_move_tail(&qn->node, queued); 468 } 469 470 return bio; 471 } 472 473 /* init a service_queue, assumes the caller zeroed it */ 474 static void throtl_service_queue_init(struct throtl_service_queue *sq) 475 { 476 INIT_LIST_HEAD(&sq->queued[0]); 477 INIT_LIST_HEAD(&sq->queued[1]); 478 sq->pending_tree = RB_ROOT; 479 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); 480 } 481 482 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node) 483 { 484 struct throtl_grp *tg; 485 int rw; 486 487 tg = kzalloc_node(sizeof(*tg), gfp, node); 488 if (!tg) 489 return NULL; 490 491 throtl_service_queue_init(&tg->service_queue); 492 493 for (rw = READ; rw <= WRITE; rw++) { 494 throtl_qnode_init(&tg->qnode_on_self[rw], tg); 495 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); 496 } 497 498 RB_CLEAR_NODE(&tg->rb_node); 499 tg->bps[READ][LIMIT_MAX] = U64_MAX; 500 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; 501 tg->iops[READ][LIMIT_MAX] = UINT_MAX; 502 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; 503 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; 504 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; 505 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; 506 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; 507 /* LIMIT_LOW will have default value 0 */ 508 509 tg->latency_target = DFL_LATENCY_TARGET; 510 tg->latency_target_conf = DFL_LATENCY_TARGET; 511 tg->idletime_threshold = DFL_IDLE_THRESHOLD; 512 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; 513 514 return &tg->pd; 515 } 516 517 static void throtl_pd_init(struct blkg_policy_data *pd) 518 { 519 struct throtl_grp *tg = pd_to_tg(pd); 520 struct blkcg_gq *blkg = tg_to_blkg(tg); 521 struct throtl_data *td = blkg->q->td; 522 struct throtl_service_queue *sq = &tg->service_queue; 523 524 /* 525 * If on the default hierarchy, we switch to properly hierarchical 526 * behavior where limits on a given throtl_grp are applied to the 527 * whole subtree rather than just the group itself. e.g. If 16M 528 * read_bps limit is set on the root group, the whole system can't 529 * exceed 16M for the device. 530 * 531 * If not on the default hierarchy, the broken flat hierarchy 532 * behavior is retained where all throtl_grps are treated as if 533 * they're all separate root groups right below throtl_data. 534 * Limits of a group don't interact with limits of other groups 535 * regardless of the position of the group in the hierarchy. 536 */ 537 sq->parent_sq = &td->service_queue; 538 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) 539 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; 540 tg->td = td; 541 } 542 543 /* 544 * Set has_rules[] if @tg or any of its parents have limits configured. 545 * This doesn't require walking up to the top of the hierarchy as the 546 * parent's has_rules[] is guaranteed to be correct. 547 */ 548 static void tg_update_has_rules(struct throtl_grp *tg) 549 { 550 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); 551 struct throtl_data *td = tg->td; 552 int rw; 553 554 for (rw = READ; rw <= WRITE; rw++) 555 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || 556 (td->limit_valid[td->limit_index] && 557 (tg_bps_limit(tg, rw) != U64_MAX || 558 tg_iops_limit(tg, rw) != UINT_MAX)); 559 } 560 561 static void throtl_pd_online(struct blkg_policy_data *pd) 562 { 563 struct throtl_grp *tg = pd_to_tg(pd); 564 /* 565 * We don't want new groups to escape the limits of its ancestors. 566 * Update has_rules[] after a new group is brought online. 567 */ 568 tg_update_has_rules(tg); 569 } 570 571 static void blk_throtl_update_limit_valid(struct throtl_data *td) 572 { 573 struct cgroup_subsys_state *pos_css; 574 struct blkcg_gq *blkg; 575 bool low_valid = false; 576 577 rcu_read_lock(); 578 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 579 struct throtl_grp *tg = blkg_to_tg(blkg); 580 581 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || 582 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) 583 low_valid = true; 584 } 585 rcu_read_unlock(); 586 587 td->limit_valid[LIMIT_LOW] = low_valid; 588 } 589 590 static void throtl_upgrade_state(struct throtl_data *td); 591 static void throtl_pd_offline(struct blkg_policy_data *pd) 592 { 593 struct throtl_grp *tg = pd_to_tg(pd); 594 595 tg->bps[READ][LIMIT_LOW] = 0; 596 tg->bps[WRITE][LIMIT_LOW] = 0; 597 tg->iops[READ][LIMIT_LOW] = 0; 598 tg->iops[WRITE][LIMIT_LOW] = 0; 599 600 blk_throtl_update_limit_valid(tg->td); 601 602 if (!tg->td->limit_valid[tg->td->limit_index]) 603 throtl_upgrade_state(tg->td); 604 } 605 606 static void throtl_pd_free(struct blkg_policy_data *pd) 607 { 608 struct throtl_grp *tg = pd_to_tg(pd); 609 610 del_timer_sync(&tg->service_queue.pending_timer); 611 kfree(tg); 612 } 613 614 static struct throtl_grp * 615 throtl_rb_first(struct throtl_service_queue *parent_sq) 616 { 617 /* Service tree is empty */ 618 if (!parent_sq->nr_pending) 619 return NULL; 620 621 if (!parent_sq->first_pending) 622 parent_sq->first_pending = rb_first(&parent_sq->pending_tree); 623 624 if (parent_sq->first_pending) 625 return rb_entry_tg(parent_sq->first_pending); 626 627 return NULL; 628 } 629 630 static void rb_erase_init(struct rb_node *n, struct rb_root *root) 631 { 632 rb_erase(n, root); 633 RB_CLEAR_NODE(n); 634 } 635 636 static void throtl_rb_erase(struct rb_node *n, 637 struct throtl_service_queue *parent_sq) 638 { 639 if (parent_sq->first_pending == n) 640 parent_sq->first_pending = NULL; 641 rb_erase_init(n, &parent_sq->pending_tree); 642 --parent_sq->nr_pending; 643 } 644 645 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq) 646 { 647 struct throtl_grp *tg; 648 649 tg = throtl_rb_first(parent_sq); 650 if (!tg) 651 return; 652 653 parent_sq->first_pending_disptime = tg->disptime; 654 } 655 656 static void tg_service_queue_add(struct throtl_grp *tg) 657 { 658 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; 659 struct rb_node **node = &parent_sq->pending_tree.rb_node; 660 struct rb_node *parent = NULL; 661 struct throtl_grp *__tg; 662 unsigned long key = tg->disptime; 663 int left = 1; 664 665 while (*node != NULL) { 666 parent = *node; 667 __tg = rb_entry_tg(parent); 668 669 if (time_before(key, __tg->disptime)) 670 node = &parent->rb_left; 671 else { 672 node = &parent->rb_right; 673 left = 0; 674 } 675 } 676 677 if (left) 678 parent_sq->first_pending = &tg->rb_node; 679 680 rb_link_node(&tg->rb_node, parent, node); 681 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); 682 } 683 684 static void __throtl_enqueue_tg(struct throtl_grp *tg) 685 { 686 tg_service_queue_add(tg); 687 tg->flags |= THROTL_TG_PENDING; 688 tg->service_queue.parent_sq->nr_pending++; 689 } 690 691 static void throtl_enqueue_tg(struct throtl_grp *tg) 692 { 693 if (!(tg->flags & THROTL_TG_PENDING)) 694 __throtl_enqueue_tg(tg); 695 } 696 697 static void __throtl_dequeue_tg(struct throtl_grp *tg) 698 { 699 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); 700 tg->flags &= ~THROTL_TG_PENDING; 701 } 702 703 static void throtl_dequeue_tg(struct throtl_grp *tg) 704 { 705 if (tg->flags & THROTL_TG_PENDING) 706 __throtl_dequeue_tg(tg); 707 } 708 709 /* Call with queue lock held */ 710 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, 711 unsigned long expires) 712 { 713 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice; 714 715 /* 716 * Since we are adjusting the throttle limit dynamically, the sleep 717 * time calculated according to previous limit might be invalid. It's 718 * possible the cgroup sleep time is very long and no other cgroups 719 * have IO running so notify the limit changes. Make sure the cgroup 720 * doesn't sleep too long to avoid the missed notification. 721 */ 722 if (time_after(expires, max_expire)) 723 expires = max_expire; 724 mod_timer(&sq->pending_timer, expires); 725 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", 726 expires - jiffies, jiffies); 727 } 728 729 /** 730 * throtl_schedule_next_dispatch - schedule the next dispatch cycle 731 * @sq: the service_queue to schedule dispatch for 732 * @force: force scheduling 733 * 734 * Arm @sq->pending_timer so that the next dispatch cycle starts on the 735 * dispatch time of the first pending child. Returns %true if either timer 736 * is armed or there's no pending child left. %false if the current 737 * dispatch window is still open and the caller should continue 738 * dispatching. 739 * 740 * If @force is %true, the dispatch timer is always scheduled and this 741 * function is guaranteed to return %true. This is to be used when the 742 * caller can't dispatch itself and needs to invoke pending_timer 743 * unconditionally. Note that forced scheduling is likely to induce short 744 * delay before dispatch starts even if @sq->first_pending_disptime is not 745 * in the future and thus shouldn't be used in hot paths. 746 */ 747 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq, 748 bool force) 749 { 750 /* any pending children left? */ 751 if (!sq->nr_pending) 752 return true; 753 754 update_min_dispatch_time(sq); 755 756 /* is the next dispatch time in the future? */ 757 if (force || time_after(sq->first_pending_disptime, jiffies)) { 758 throtl_schedule_pending_timer(sq, sq->first_pending_disptime); 759 return true; 760 } 761 762 /* tell the caller to continue dispatching */ 763 return false; 764 } 765 766 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, 767 bool rw, unsigned long start) 768 { 769 tg->bytes_disp[rw] = 0; 770 tg->io_disp[rw] = 0; 771 772 /* 773 * Previous slice has expired. We must have trimmed it after last 774 * bio dispatch. That means since start of last slice, we never used 775 * that bandwidth. Do try to make use of that bandwidth while giving 776 * credit. 777 */ 778 if (time_after_eq(start, tg->slice_start[rw])) 779 tg->slice_start[rw] = start; 780 781 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; 782 throtl_log(&tg->service_queue, 783 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu", 784 rw == READ ? 'R' : 'W', tg->slice_start[rw], 785 tg->slice_end[rw], jiffies); 786 } 787 788 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) 789 { 790 tg->bytes_disp[rw] = 0; 791 tg->io_disp[rw] = 0; 792 tg->slice_start[rw] = jiffies; 793 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; 794 throtl_log(&tg->service_queue, 795 "[%c] new slice start=%lu end=%lu jiffies=%lu", 796 rw == READ ? 'R' : 'W', tg->slice_start[rw], 797 tg->slice_end[rw], jiffies); 798 } 799 800 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, 801 unsigned long jiffy_end) 802 { 803 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); 804 } 805 806 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, 807 unsigned long jiffy_end) 808 { 809 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); 810 throtl_log(&tg->service_queue, 811 "[%c] extend slice start=%lu end=%lu jiffies=%lu", 812 rw == READ ? 'R' : 'W', tg->slice_start[rw], 813 tg->slice_end[rw], jiffies); 814 } 815 816 /* Determine if previously allocated or extended slice is complete or not */ 817 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) 818 { 819 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) 820 return false; 821 822 return true; 823 } 824 825 /* Trim the used slices and adjust slice start accordingly */ 826 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) 827 { 828 unsigned long nr_slices, time_elapsed, io_trim; 829 u64 bytes_trim, tmp; 830 831 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); 832 833 /* 834 * If bps are unlimited (-1), then time slice don't get 835 * renewed. Don't try to trim the slice if slice is used. A new 836 * slice will start when appropriate. 837 */ 838 if (throtl_slice_used(tg, rw)) 839 return; 840 841 /* 842 * A bio has been dispatched. Also adjust slice_end. It might happen 843 * that initially cgroup limit was very low resulting in high 844 * slice_end, but later limit was bumped up and bio was dispached 845 * sooner, then we need to reduce slice_end. A high bogus slice_end 846 * is bad because it does not allow new slice to start. 847 */ 848 849 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); 850 851 time_elapsed = jiffies - tg->slice_start[rw]; 852 853 nr_slices = time_elapsed / tg->td->throtl_slice; 854 855 if (!nr_slices) 856 return; 857 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; 858 do_div(tmp, HZ); 859 bytes_trim = tmp; 860 861 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / 862 HZ; 863 864 if (!bytes_trim && !io_trim) 865 return; 866 867 if (tg->bytes_disp[rw] >= bytes_trim) 868 tg->bytes_disp[rw] -= bytes_trim; 869 else 870 tg->bytes_disp[rw] = 0; 871 872 if (tg->io_disp[rw] >= io_trim) 873 tg->io_disp[rw] -= io_trim; 874 else 875 tg->io_disp[rw] = 0; 876 877 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; 878 879 throtl_log(&tg->service_queue, 880 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu", 881 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, 882 tg->slice_start[rw], tg->slice_end[rw], jiffies); 883 } 884 885 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, 886 unsigned long *wait) 887 { 888 bool rw = bio_data_dir(bio); 889 unsigned int io_allowed; 890 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 891 u64 tmp; 892 893 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 894 895 /* Slice has just started. Consider one slice interval */ 896 if (!jiffy_elapsed) 897 jiffy_elapsed_rnd = tg->td->throtl_slice; 898 899 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); 900 901 /* 902 * jiffy_elapsed_rnd should not be a big value as minimum iops can be 903 * 1 then at max jiffy elapsed should be equivalent of 1 second as we 904 * will allow dispatch after 1 second and after that slice should 905 * have been trimmed. 906 */ 907 908 tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd; 909 do_div(tmp, HZ); 910 911 if (tmp > UINT_MAX) 912 io_allowed = UINT_MAX; 913 else 914 io_allowed = tmp; 915 916 if (tg->io_disp[rw] + 1 <= io_allowed) { 917 if (wait) 918 *wait = 0; 919 return true; 920 } 921 922 /* Calc approx time to dispatch */ 923 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1; 924 925 if (jiffy_wait > jiffy_elapsed) 926 jiffy_wait = jiffy_wait - jiffy_elapsed; 927 else 928 jiffy_wait = 1; 929 930 if (wait) 931 *wait = jiffy_wait; 932 return false; 933 } 934 935 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, 936 unsigned long *wait) 937 { 938 bool rw = bio_data_dir(bio); 939 u64 bytes_allowed, extra_bytes, tmp; 940 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 941 unsigned int bio_size = throtl_bio_data_size(bio); 942 943 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 944 945 /* Slice has just started. Consider one slice interval */ 946 if (!jiffy_elapsed) 947 jiffy_elapsed_rnd = tg->td->throtl_slice; 948 949 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); 950 951 tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd; 952 do_div(tmp, HZ); 953 bytes_allowed = tmp; 954 955 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { 956 if (wait) 957 *wait = 0; 958 return true; 959 } 960 961 /* Calc approx time to dispatch */ 962 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; 963 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); 964 965 if (!jiffy_wait) 966 jiffy_wait = 1; 967 968 /* 969 * This wait time is without taking into consideration the rounding 970 * up we did. Add that time also. 971 */ 972 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); 973 if (wait) 974 *wait = jiffy_wait; 975 return false; 976 } 977 978 /* 979 * Returns whether one can dispatch a bio or not. Also returns approx number 980 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 981 */ 982 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, 983 unsigned long *wait) 984 { 985 bool rw = bio_data_dir(bio); 986 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; 987 988 /* 989 * Currently whole state machine of group depends on first bio 990 * queued in the group bio list. So one should not be calling 991 * this function with a different bio if there are other bios 992 * queued. 993 */ 994 BUG_ON(tg->service_queue.nr_queued[rw] && 995 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); 996 997 /* If tg->bps = -1, then BW is unlimited */ 998 if (tg_bps_limit(tg, rw) == U64_MAX && 999 tg_iops_limit(tg, rw) == UINT_MAX) { 1000 if (wait) 1001 *wait = 0; 1002 return true; 1003 } 1004 1005 /* 1006 * If previous slice expired, start a new one otherwise renew/extend 1007 * existing slice to make sure it is at least throtl_slice interval 1008 * long since now. New slice is started only for empty throttle group. 1009 * If there is queued bio, that means there should be an active 1010 * slice and it should be extended instead. 1011 */ 1012 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) 1013 throtl_start_new_slice(tg, rw); 1014 else { 1015 if (time_before(tg->slice_end[rw], 1016 jiffies + tg->td->throtl_slice)) 1017 throtl_extend_slice(tg, rw, 1018 jiffies + tg->td->throtl_slice); 1019 } 1020 1021 if (tg_with_in_bps_limit(tg, bio, &bps_wait) && 1022 tg_with_in_iops_limit(tg, bio, &iops_wait)) { 1023 if (wait) 1024 *wait = 0; 1025 return true; 1026 } 1027 1028 max_wait = max(bps_wait, iops_wait); 1029 1030 if (wait) 1031 *wait = max_wait; 1032 1033 if (time_before(tg->slice_end[rw], jiffies + max_wait)) 1034 throtl_extend_slice(tg, rw, jiffies + max_wait); 1035 1036 return false; 1037 } 1038 1039 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 1040 { 1041 bool rw = bio_data_dir(bio); 1042 unsigned int bio_size = throtl_bio_data_size(bio); 1043 1044 /* Charge the bio to the group */ 1045 tg->bytes_disp[rw] += bio_size; 1046 tg->io_disp[rw]++; 1047 tg->last_bytes_disp[rw] += bio_size; 1048 tg->last_io_disp[rw]++; 1049 1050 /* 1051 * BIO_THROTTLED is used to prevent the same bio to be throttled 1052 * more than once as a throttled bio will go through blk-throtl the 1053 * second time when it eventually gets issued. Set it when a bio 1054 * is being charged to a tg. 1055 */ 1056 if (!bio_flagged(bio, BIO_THROTTLED)) 1057 bio_set_flag(bio, BIO_THROTTLED); 1058 } 1059 1060 /** 1061 * throtl_add_bio_tg - add a bio to the specified throtl_grp 1062 * @bio: bio to add 1063 * @qn: qnode to use 1064 * @tg: the target throtl_grp 1065 * 1066 * Add @bio to @tg's service_queue using @qn. If @qn is not specified, 1067 * tg->qnode_on_self[] is used. 1068 */ 1069 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, 1070 struct throtl_grp *tg) 1071 { 1072 struct throtl_service_queue *sq = &tg->service_queue; 1073 bool rw = bio_data_dir(bio); 1074 1075 if (!qn) 1076 qn = &tg->qnode_on_self[rw]; 1077 1078 /* 1079 * If @tg doesn't currently have any bios queued in the same 1080 * direction, queueing @bio can change when @tg should be 1081 * dispatched. Mark that @tg was empty. This is automatically 1082 * cleaered on the next tg_update_disptime(). 1083 */ 1084 if (!sq->nr_queued[rw]) 1085 tg->flags |= THROTL_TG_WAS_EMPTY; 1086 1087 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); 1088 1089 sq->nr_queued[rw]++; 1090 throtl_enqueue_tg(tg); 1091 } 1092 1093 static void tg_update_disptime(struct throtl_grp *tg) 1094 { 1095 struct throtl_service_queue *sq = &tg->service_queue; 1096 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; 1097 struct bio *bio; 1098 1099 bio = throtl_peek_queued(&sq->queued[READ]); 1100 if (bio) 1101 tg_may_dispatch(tg, bio, &read_wait); 1102 1103 bio = throtl_peek_queued(&sq->queued[WRITE]); 1104 if (bio) 1105 tg_may_dispatch(tg, bio, &write_wait); 1106 1107 min_wait = min(read_wait, write_wait); 1108 disptime = jiffies + min_wait; 1109 1110 /* Update dispatch time */ 1111 throtl_dequeue_tg(tg); 1112 tg->disptime = disptime; 1113 throtl_enqueue_tg(tg); 1114 1115 /* see throtl_add_bio_tg() */ 1116 tg->flags &= ~THROTL_TG_WAS_EMPTY; 1117 } 1118 1119 static void start_parent_slice_with_credit(struct throtl_grp *child_tg, 1120 struct throtl_grp *parent_tg, bool rw) 1121 { 1122 if (throtl_slice_used(parent_tg, rw)) { 1123 throtl_start_new_slice_with_credit(parent_tg, rw, 1124 child_tg->slice_start[rw]); 1125 } 1126 1127 } 1128 1129 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) 1130 { 1131 struct throtl_service_queue *sq = &tg->service_queue; 1132 struct throtl_service_queue *parent_sq = sq->parent_sq; 1133 struct throtl_grp *parent_tg = sq_to_tg(parent_sq); 1134 struct throtl_grp *tg_to_put = NULL; 1135 struct bio *bio; 1136 1137 /* 1138 * @bio is being transferred from @tg to @parent_sq. Popping a bio 1139 * from @tg may put its reference and @parent_sq might end up 1140 * getting released prematurely. Remember the tg to put and put it 1141 * after @bio is transferred to @parent_sq. 1142 */ 1143 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); 1144 sq->nr_queued[rw]--; 1145 1146 throtl_charge_bio(tg, bio); 1147 1148 /* 1149 * If our parent is another tg, we just need to transfer @bio to 1150 * the parent using throtl_add_bio_tg(). If our parent is 1151 * @td->service_queue, @bio is ready to be issued. Put it on its 1152 * bio_lists[] and decrease total number queued. The caller is 1153 * responsible for issuing these bios. 1154 */ 1155 if (parent_tg) { 1156 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); 1157 start_parent_slice_with_credit(tg, parent_tg, rw); 1158 } else { 1159 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], 1160 &parent_sq->queued[rw]); 1161 BUG_ON(tg->td->nr_queued[rw] <= 0); 1162 tg->td->nr_queued[rw]--; 1163 } 1164 1165 throtl_trim_slice(tg, rw); 1166 1167 if (tg_to_put) 1168 blkg_put(tg_to_blkg(tg_to_put)); 1169 } 1170 1171 static int throtl_dispatch_tg(struct throtl_grp *tg) 1172 { 1173 struct throtl_service_queue *sq = &tg->service_queue; 1174 unsigned int nr_reads = 0, nr_writes = 0; 1175 unsigned int max_nr_reads = throtl_grp_quantum*3/4; 1176 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; 1177 struct bio *bio; 1178 1179 /* Try to dispatch 75% READS and 25% WRITES */ 1180 1181 while ((bio = throtl_peek_queued(&sq->queued[READ])) && 1182 tg_may_dispatch(tg, bio, NULL)) { 1183 1184 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 1185 nr_reads++; 1186 1187 if (nr_reads >= max_nr_reads) 1188 break; 1189 } 1190 1191 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && 1192 tg_may_dispatch(tg, bio, NULL)) { 1193 1194 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 1195 nr_writes++; 1196 1197 if (nr_writes >= max_nr_writes) 1198 break; 1199 } 1200 1201 return nr_reads + nr_writes; 1202 } 1203 1204 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) 1205 { 1206 unsigned int nr_disp = 0; 1207 1208 while (1) { 1209 struct throtl_grp *tg = throtl_rb_first(parent_sq); 1210 struct throtl_service_queue *sq; 1211 1212 if (!tg) 1213 break; 1214 1215 if (time_before(jiffies, tg->disptime)) 1216 break; 1217 1218 throtl_dequeue_tg(tg); 1219 1220 nr_disp += throtl_dispatch_tg(tg); 1221 1222 sq = &tg->service_queue; 1223 if (sq->nr_queued[0] || sq->nr_queued[1]) 1224 tg_update_disptime(tg); 1225 1226 if (nr_disp >= throtl_quantum) 1227 break; 1228 } 1229 1230 return nr_disp; 1231 } 1232 1233 static bool throtl_can_upgrade(struct throtl_data *td, 1234 struct throtl_grp *this_tg); 1235 /** 1236 * throtl_pending_timer_fn - timer function for service_queue->pending_timer 1237 * @arg: the throtl_service_queue being serviced 1238 * 1239 * This timer is armed when a child throtl_grp with active bio's become 1240 * pending and queued on the service_queue's pending_tree and expires when 1241 * the first child throtl_grp should be dispatched. This function 1242 * dispatches bio's from the children throtl_grps to the parent 1243 * service_queue. 1244 * 1245 * If the parent's parent is another throtl_grp, dispatching is propagated 1246 * by either arming its pending_timer or repeating dispatch directly. If 1247 * the top-level service_tree is reached, throtl_data->dispatch_work is 1248 * kicked so that the ready bio's are issued. 1249 */ 1250 static void throtl_pending_timer_fn(struct timer_list *t) 1251 { 1252 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer); 1253 struct throtl_grp *tg = sq_to_tg(sq); 1254 struct throtl_data *td = sq_to_td(sq); 1255 struct request_queue *q = td->queue; 1256 struct throtl_service_queue *parent_sq; 1257 bool dispatched; 1258 int ret; 1259 1260 spin_lock_irq(q->queue_lock); 1261 if (throtl_can_upgrade(td, NULL)) 1262 throtl_upgrade_state(td); 1263 1264 again: 1265 parent_sq = sq->parent_sq; 1266 dispatched = false; 1267 1268 while (true) { 1269 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", 1270 sq->nr_queued[READ] + sq->nr_queued[WRITE], 1271 sq->nr_queued[READ], sq->nr_queued[WRITE]); 1272 1273 ret = throtl_select_dispatch(sq); 1274 if (ret) { 1275 throtl_log(sq, "bios disp=%u", ret); 1276 dispatched = true; 1277 } 1278 1279 if (throtl_schedule_next_dispatch(sq, false)) 1280 break; 1281 1282 /* this dispatch windows is still open, relax and repeat */ 1283 spin_unlock_irq(q->queue_lock); 1284 cpu_relax(); 1285 spin_lock_irq(q->queue_lock); 1286 } 1287 1288 if (!dispatched) 1289 goto out_unlock; 1290 1291 if (parent_sq) { 1292 /* @parent_sq is another throl_grp, propagate dispatch */ 1293 if (tg->flags & THROTL_TG_WAS_EMPTY) { 1294 tg_update_disptime(tg); 1295 if (!throtl_schedule_next_dispatch(parent_sq, false)) { 1296 /* window is already open, repeat dispatching */ 1297 sq = parent_sq; 1298 tg = sq_to_tg(sq); 1299 goto again; 1300 } 1301 } 1302 } else { 1303 /* reached the top-level, queue issueing */ 1304 queue_work(kthrotld_workqueue, &td->dispatch_work); 1305 } 1306 out_unlock: 1307 spin_unlock_irq(q->queue_lock); 1308 } 1309 1310 /** 1311 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work 1312 * @work: work item being executed 1313 * 1314 * This function is queued for execution when bio's reach the bio_lists[] 1315 * of throtl_data->service_queue. Those bio's are ready and issued by this 1316 * function. 1317 */ 1318 static void blk_throtl_dispatch_work_fn(struct work_struct *work) 1319 { 1320 struct throtl_data *td = container_of(work, struct throtl_data, 1321 dispatch_work); 1322 struct throtl_service_queue *td_sq = &td->service_queue; 1323 struct request_queue *q = td->queue; 1324 struct bio_list bio_list_on_stack; 1325 struct bio *bio; 1326 struct blk_plug plug; 1327 int rw; 1328 1329 bio_list_init(&bio_list_on_stack); 1330 1331 spin_lock_irq(q->queue_lock); 1332 for (rw = READ; rw <= WRITE; rw++) 1333 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) 1334 bio_list_add(&bio_list_on_stack, bio); 1335 spin_unlock_irq(q->queue_lock); 1336 1337 if (!bio_list_empty(&bio_list_on_stack)) { 1338 blk_start_plug(&plug); 1339 while((bio = bio_list_pop(&bio_list_on_stack))) 1340 generic_make_request(bio); 1341 blk_finish_plug(&plug); 1342 } 1343 } 1344 1345 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd, 1346 int off) 1347 { 1348 struct throtl_grp *tg = pd_to_tg(pd); 1349 u64 v = *(u64 *)((void *)tg + off); 1350 1351 if (v == U64_MAX) 1352 return 0; 1353 return __blkg_prfill_u64(sf, pd, v); 1354 } 1355 1356 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, 1357 int off) 1358 { 1359 struct throtl_grp *tg = pd_to_tg(pd); 1360 unsigned int v = *(unsigned int *)((void *)tg + off); 1361 1362 if (v == UINT_MAX) 1363 return 0; 1364 return __blkg_prfill_u64(sf, pd, v); 1365 } 1366 1367 static int tg_print_conf_u64(struct seq_file *sf, void *v) 1368 { 1369 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64, 1370 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1371 return 0; 1372 } 1373 1374 static int tg_print_conf_uint(struct seq_file *sf, void *v) 1375 { 1376 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint, 1377 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1378 return 0; 1379 } 1380 1381 static void tg_conf_updated(struct throtl_grp *tg, bool global) 1382 { 1383 struct throtl_service_queue *sq = &tg->service_queue; 1384 struct cgroup_subsys_state *pos_css; 1385 struct blkcg_gq *blkg; 1386 1387 throtl_log(&tg->service_queue, 1388 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", 1389 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), 1390 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); 1391 1392 /* 1393 * Update has_rules[] flags for the updated tg's subtree. A tg is 1394 * considered to have rules if either the tg itself or any of its 1395 * ancestors has rules. This identifies groups without any 1396 * restrictions in the whole hierarchy and allows them to bypass 1397 * blk-throttle. 1398 */ 1399 blkg_for_each_descendant_pre(blkg, pos_css, 1400 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { 1401 struct throtl_grp *this_tg = blkg_to_tg(blkg); 1402 struct throtl_grp *parent_tg; 1403 1404 tg_update_has_rules(this_tg); 1405 /* ignore root/second level */ 1406 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent || 1407 !blkg->parent->parent) 1408 continue; 1409 parent_tg = blkg_to_tg(blkg->parent); 1410 /* 1411 * make sure all children has lower idle time threshold and 1412 * higher latency target 1413 */ 1414 this_tg->idletime_threshold = min(this_tg->idletime_threshold, 1415 parent_tg->idletime_threshold); 1416 this_tg->latency_target = max(this_tg->latency_target, 1417 parent_tg->latency_target); 1418 } 1419 1420 /* 1421 * We're already holding queue_lock and know @tg is valid. Let's 1422 * apply the new config directly. 1423 * 1424 * Restart the slices for both READ and WRITES. It might happen 1425 * that a group's limit are dropped suddenly and we don't want to 1426 * account recently dispatched IO with new low rate. 1427 */ 1428 throtl_start_new_slice(tg, 0); 1429 throtl_start_new_slice(tg, 1); 1430 1431 if (tg->flags & THROTL_TG_PENDING) { 1432 tg_update_disptime(tg); 1433 throtl_schedule_next_dispatch(sq->parent_sq, true); 1434 } 1435 } 1436 1437 static ssize_t tg_set_conf(struct kernfs_open_file *of, 1438 char *buf, size_t nbytes, loff_t off, bool is_u64) 1439 { 1440 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 1441 struct blkg_conf_ctx ctx; 1442 struct throtl_grp *tg; 1443 int ret; 1444 u64 v; 1445 1446 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1447 if (ret) 1448 return ret; 1449 1450 ret = -EINVAL; 1451 if (sscanf(ctx.body, "%llu", &v) != 1) 1452 goto out_finish; 1453 if (!v) 1454 v = U64_MAX; 1455 1456 tg = blkg_to_tg(ctx.blkg); 1457 1458 if (is_u64) 1459 *(u64 *)((void *)tg + of_cft(of)->private) = v; 1460 else 1461 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; 1462 1463 tg_conf_updated(tg, false); 1464 ret = 0; 1465 out_finish: 1466 blkg_conf_finish(&ctx); 1467 return ret ?: nbytes; 1468 } 1469 1470 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of, 1471 char *buf, size_t nbytes, loff_t off) 1472 { 1473 return tg_set_conf(of, buf, nbytes, off, true); 1474 } 1475 1476 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of, 1477 char *buf, size_t nbytes, loff_t off) 1478 { 1479 return tg_set_conf(of, buf, nbytes, off, false); 1480 } 1481 1482 static struct cftype throtl_legacy_files[] = { 1483 { 1484 .name = "throttle.read_bps_device", 1485 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]), 1486 .seq_show = tg_print_conf_u64, 1487 .write = tg_set_conf_u64, 1488 }, 1489 { 1490 .name = "throttle.write_bps_device", 1491 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]), 1492 .seq_show = tg_print_conf_u64, 1493 .write = tg_set_conf_u64, 1494 }, 1495 { 1496 .name = "throttle.read_iops_device", 1497 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]), 1498 .seq_show = tg_print_conf_uint, 1499 .write = tg_set_conf_uint, 1500 }, 1501 { 1502 .name = "throttle.write_iops_device", 1503 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]), 1504 .seq_show = tg_print_conf_uint, 1505 .write = tg_set_conf_uint, 1506 }, 1507 { 1508 .name = "throttle.io_service_bytes", 1509 .private = (unsigned long)&blkcg_policy_throtl, 1510 .seq_show = blkg_print_stat_bytes, 1511 }, 1512 { 1513 .name = "throttle.io_service_bytes_recursive", 1514 .private = (unsigned long)&blkcg_policy_throtl, 1515 .seq_show = blkg_print_stat_bytes_recursive, 1516 }, 1517 { 1518 .name = "throttle.io_serviced", 1519 .private = (unsigned long)&blkcg_policy_throtl, 1520 .seq_show = blkg_print_stat_ios, 1521 }, 1522 { 1523 .name = "throttle.io_serviced_recursive", 1524 .private = (unsigned long)&blkcg_policy_throtl, 1525 .seq_show = blkg_print_stat_ios_recursive, 1526 }, 1527 { } /* terminate */ 1528 }; 1529 1530 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, 1531 int off) 1532 { 1533 struct throtl_grp *tg = pd_to_tg(pd); 1534 const char *dname = blkg_dev_name(pd->blkg); 1535 char bufs[4][21] = { "max", "max", "max", "max" }; 1536 u64 bps_dft; 1537 unsigned int iops_dft; 1538 char idle_time[26] = ""; 1539 char latency_time[26] = ""; 1540 1541 if (!dname) 1542 return 0; 1543 1544 if (off == LIMIT_LOW) { 1545 bps_dft = 0; 1546 iops_dft = 0; 1547 } else { 1548 bps_dft = U64_MAX; 1549 iops_dft = UINT_MAX; 1550 } 1551 1552 if (tg->bps_conf[READ][off] == bps_dft && 1553 tg->bps_conf[WRITE][off] == bps_dft && 1554 tg->iops_conf[READ][off] == iops_dft && 1555 tg->iops_conf[WRITE][off] == iops_dft && 1556 (off != LIMIT_LOW || 1557 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && 1558 tg->latency_target_conf == DFL_LATENCY_TARGET))) 1559 return 0; 1560 1561 if (tg->bps_conf[READ][off] != U64_MAX) 1562 snprintf(bufs[0], sizeof(bufs[0]), "%llu", 1563 tg->bps_conf[READ][off]); 1564 if (tg->bps_conf[WRITE][off] != U64_MAX) 1565 snprintf(bufs[1], sizeof(bufs[1]), "%llu", 1566 tg->bps_conf[WRITE][off]); 1567 if (tg->iops_conf[READ][off] != UINT_MAX) 1568 snprintf(bufs[2], sizeof(bufs[2]), "%u", 1569 tg->iops_conf[READ][off]); 1570 if (tg->iops_conf[WRITE][off] != UINT_MAX) 1571 snprintf(bufs[3], sizeof(bufs[3]), "%u", 1572 tg->iops_conf[WRITE][off]); 1573 if (off == LIMIT_LOW) { 1574 if (tg->idletime_threshold_conf == ULONG_MAX) 1575 strcpy(idle_time, " idle=max"); 1576 else 1577 snprintf(idle_time, sizeof(idle_time), " idle=%lu", 1578 tg->idletime_threshold_conf); 1579 1580 if (tg->latency_target_conf == ULONG_MAX) 1581 strcpy(latency_time, " latency=max"); 1582 else 1583 snprintf(latency_time, sizeof(latency_time), 1584 " latency=%lu", tg->latency_target_conf); 1585 } 1586 1587 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n", 1588 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time, 1589 latency_time); 1590 return 0; 1591 } 1592 1593 static int tg_print_limit(struct seq_file *sf, void *v) 1594 { 1595 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit, 1596 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1597 return 0; 1598 } 1599 1600 static ssize_t tg_set_limit(struct kernfs_open_file *of, 1601 char *buf, size_t nbytes, loff_t off) 1602 { 1603 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 1604 struct blkg_conf_ctx ctx; 1605 struct throtl_grp *tg; 1606 u64 v[4]; 1607 unsigned long idle_time; 1608 unsigned long latency_time; 1609 int ret; 1610 int index = of_cft(of)->private; 1611 1612 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1613 if (ret) 1614 return ret; 1615 1616 tg = blkg_to_tg(ctx.blkg); 1617 1618 v[0] = tg->bps_conf[READ][index]; 1619 v[1] = tg->bps_conf[WRITE][index]; 1620 v[2] = tg->iops_conf[READ][index]; 1621 v[3] = tg->iops_conf[WRITE][index]; 1622 1623 idle_time = tg->idletime_threshold_conf; 1624 latency_time = tg->latency_target_conf; 1625 while (true) { 1626 char tok[27]; /* wiops=18446744073709551616 */ 1627 char *p; 1628 u64 val = U64_MAX; 1629 int len; 1630 1631 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1) 1632 break; 1633 if (tok[0] == '\0') 1634 break; 1635 ctx.body += len; 1636 1637 ret = -EINVAL; 1638 p = tok; 1639 strsep(&p, "="); 1640 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max"))) 1641 goto out_finish; 1642 1643 ret = -ERANGE; 1644 if (!val) 1645 goto out_finish; 1646 1647 ret = -EINVAL; 1648 if (!strcmp(tok, "rbps")) 1649 v[0] = val; 1650 else if (!strcmp(tok, "wbps")) 1651 v[1] = val; 1652 else if (!strcmp(tok, "riops")) 1653 v[2] = min_t(u64, val, UINT_MAX); 1654 else if (!strcmp(tok, "wiops")) 1655 v[3] = min_t(u64, val, UINT_MAX); 1656 else if (off == LIMIT_LOW && !strcmp(tok, "idle")) 1657 idle_time = val; 1658 else if (off == LIMIT_LOW && !strcmp(tok, "latency")) 1659 latency_time = val; 1660 else 1661 goto out_finish; 1662 } 1663 1664 tg->bps_conf[READ][index] = v[0]; 1665 tg->bps_conf[WRITE][index] = v[1]; 1666 tg->iops_conf[READ][index] = v[2]; 1667 tg->iops_conf[WRITE][index] = v[3]; 1668 1669 if (index == LIMIT_MAX) { 1670 tg->bps[READ][index] = v[0]; 1671 tg->bps[WRITE][index] = v[1]; 1672 tg->iops[READ][index] = v[2]; 1673 tg->iops[WRITE][index] = v[3]; 1674 } 1675 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], 1676 tg->bps_conf[READ][LIMIT_MAX]); 1677 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], 1678 tg->bps_conf[WRITE][LIMIT_MAX]); 1679 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], 1680 tg->iops_conf[READ][LIMIT_MAX]); 1681 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], 1682 tg->iops_conf[WRITE][LIMIT_MAX]); 1683 tg->idletime_threshold_conf = idle_time; 1684 tg->latency_target_conf = latency_time; 1685 1686 /* force user to configure all settings for low limit */ 1687 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || 1688 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || 1689 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || 1690 tg->latency_target_conf == DFL_LATENCY_TARGET) { 1691 tg->bps[READ][LIMIT_LOW] = 0; 1692 tg->bps[WRITE][LIMIT_LOW] = 0; 1693 tg->iops[READ][LIMIT_LOW] = 0; 1694 tg->iops[WRITE][LIMIT_LOW] = 0; 1695 tg->idletime_threshold = DFL_IDLE_THRESHOLD; 1696 tg->latency_target = DFL_LATENCY_TARGET; 1697 } else if (index == LIMIT_LOW) { 1698 tg->idletime_threshold = tg->idletime_threshold_conf; 1699 tg->latency_target = tg->latency_target_conf; 1700 } 1701 1702 blk_throtl_update_limit_valid(tg->td); 1703 if (tg->td->limit_valid[LIMIT_LOW]) { 1704 if (index == LIMIT_LOW) 1705 tg->td->limit_index = LIMIT_LOW; 1706 } else 1707 tg->td->limit_index = LIMIT_MAX; 1708 tg_conf_updated(tg, index == LIMIT_LOW && 1709 tg->td->limit_valid[LIMIT_LOW]); 1710 ret = 0; 1711 out_finish: 1712 blkg_conf_finish(&ctx); 1713 return ret ?: nbytes; 1714 } 1715 1716 static struct cftype throtl_files[] = { 1717 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 1718 { 1719 .name = "low", 1720 .flags = CFTYPE_NOT_ON_ROOT, 1721 .seq_show = tg_print_limit, 1722 .write = tg_set_limit, 1723 .private = LIMIT_LOW, 1724 }, 1725 #endif 1726 { 1727 .name = "max", 1728 .flags = CFTYPE_NOT_ON_ROOT, 1729 .seq_show = tg_print_limit, 1730 .write = tg_set_limit, 1731 .private = LIMIT_MAX, 1732 }, 1733 { } /* terminate */ 1734 }; 1735 1736 static void throtl_shutdown_wq(struct request_queue *q) 1737 { 1738 struct throtl_data *td = q->td; 1739 1740 cancel_work_sync(&td->dispatch_work); 1741 } 1742 1743 static struct blkcg_policy blkcg_policy_throtl = { 1744 .dfl_cftypes = throtl_files, 1745 .legacy_cftypes = throtl_legacy_files, 1746 1747 .pd_alloc_fn = throtl_pd_alloc, 1748 .pd_init_fn = throtl_pd_init, 1749 .pd_online_fn = throtl_pd_online, 1750 .pd_offline_fn = throtl_pd_offline, 1751 .pd_free_fn = throtl_pd_free, 1752 }; 1753 1754 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) 1755 { 1756 unsigned long rtime = jiffies, wtime = jiffies; 1757 1758 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) 1759 rtime = tg->last_low_overflow_time[READ]; 1760 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) 1761 wtime = tg->last_low_overflow_time[WRITE]; 1762 return min(rtime, wtime); 1763 } 1764 1765 /* tg should not be an intermediate node */ 1766 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) 1767 { 1768 struct throtl_service_queue *parent_sq; 1769 struct throtl_grp *parent = tg; 1770 unsigned long ret = __tg_last_low_overflow_time(tg); 1771 1772 while (true) { 1773 parent_sq = parent->service_queue.parent_sq; 1774 parent = sq_to_tg(parent_sq); 1775 if (!parent) 1776 break; 1777 1778 /* 1779 * The parent doesn't have low limit, it always reaches low 1780 * limit. Its overflow time is useless for children 1781 */ 1782 if (!parent->bps[READ][LIMIT_LOW] && 1783 !parent->iops[READ][LIMIT_LOW] && 1784 !parent->bps[WRITE][LIMIT_LOW] && 1785 !parent->iops[WRITE][LIMIT_LOW]) 1786 continue; 1787 if (time_after(__tg_last_low_overflow_time(parent), ret)) 1788 ret = __tg_last_low_overflow_time(parent); 1789 } 1790 return ret; 1791 } 1792 1793 static bool throtl_tg_is_idle(struct throtl_grp *tg) 1794 { 1795 /* 1796 * cgroup is idle if: 1797 * - single idle is too long, longer than a fixed value (in case user 1798 * configure a too big threshold) or 4 times of idletime threshold 1799 * - average think time is more than threshold 1800 * - IO latency is largely below threshold 1801 */ 1802 unsigned long time; 1803 bool ret; 1804 1805 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); 1806 ret = tg->latency_target == DFL_LATENCY_TARGET || 1807 tg->idletime_threshold == DFL_IDLE_THRESHOLD || 1808 (ktime_get_ns() >> 10) - tg->last_finish_time > time || 1809 tg->avg_idletime > tg->idletime_threshold || 1810 (tg->latency_target && tg->bio_cnt && 1811 tg->bad_bio_cnt * 5 < tg->bio_cnt); 1812 throtl_log(&tg->service_queue, 1813 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d", 1814 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, 1815 tg->bio_cnt, ret, tg->td->scale); 1816 return ret; 1817 } 1818 1819 static bool throtl_tg_can_upgrade(struct throtl_grp *tg) 1820 { 1821 struct throtl_service_queue *sq = &tg->service_queue; 1822 bool read_limit, write_limit; 1823 1824 /* 1825 * if cgroup reaches low limit (if low limit is 0, the cgroup always 1826 * reaches), it's ok to upgrade to next limit 1827 */ 1828 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]; 1829 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; 1830 if (!read_limit && !write_limit) 1831 return true; 1832 if (read_limit && sq->nr_queued[READ] && 1833 (!write_limit || sq->nr_queued[WRITE])) 1834 return true; 1835 if (write_limit && sq->nr_queued[WRITE] && 1836 (!read_limit || sq->nr_queued[READ])) 1837 return true; 1838 1839 if (time_after_eq(jiffies, 1840 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && 1841 throtl_tg_is_idle(tg)) 1842 return true; 1843 return false; 1844 } 1845 1846 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) 1847 { 1848 while (true) { 1849 if (throtl_tg_can_upgrade(tg)) 1850 return true; 1851 tg = sq_to_tg(tg->service_queue.parent_sq); 1852 if (!tg || !tg_to_blkg(tg)->parent) 1853 return false; 1854 } 1855 return false; 1856 } 1857 1858 static bool throtl_can_upgrade(struct throtl_data *td, 1859 struct throtl_grp *this_tg) 1860 { 1861 struct cgroup_subsys_state *pos_css; 1862 struct blkcg_gq *blkg; 1863 1864 if (td->limit_index != LIMIT_LOW) 1865 return false; 1866 1867 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) 1868 return false; 1869 1870 rcu_read_lock(); 1871 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 1872 struct throtl_grp *tg = blkg_to_tg(blkg); 1873 1874 if (tg == this_tg) 1875 continue; 1876 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) 1877 continue; 1878 if (!throtl_hierarchy_can_upgrade(tg)) { 1879 rcu_read_unlock(); 1880 return false; 1881 } 1882 } 1883 rcu_read_unlock(); 1884 return true; 1885 } 1886 1887 static void throtl_upgrade_check(struct throtl_grp *tg) 1888 { 1889 unsigned long now = jiffies; 1890 1891 if (tg->td->limit_index != LIMIT_LOW) 1892 return; 1893 1894 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) 1895 return; 1896 1897 tg->last_check_time = now; 1898 1899 if (!time_after_eq(now, 1900 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) 1901 return; 1902 1903 if (throtl_can_upgrade(tg->td, NULL)) 1904 throtl_upgrade_state(tg->td); 1905 } 1906 1907 static void throtl_upgrade_state(struct throtl_data *td) 1908 { 1909 struct cgroup_subsys_state *pos_css; 1910 struct blkcg_gq *blkg; 1911 1912 throtl_log(&td->service_queue, "upgrade to max"); 1913 td->limit_index = LIMIT_MAX; 1914 td->low_upgrade_time = jiffies; 1915 td->scale = 0; 1916 rcu_read_lock(); 1917 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 1918 struct throtl_grp *tg = blkg_to_tg(blkg); 1919 struct throtl_service_queue *sq = &tg->service_queue; 1920 1921 tg->disptime = jiffies - 1; 1922 throtl_select_dispatch(sq); 1923 throtl_schedule_next_dispatch(sq, true); 1924 } 1925 rcu_read_unlock(); 1926 throtl_select_dispatch(&td->service_queue); 1927 throtl_schedule_next_dispatch(&td->service_queue, true); 1928 queue_work(kthrotld_workqueue, &td->dispatch_work); 1929 } 1930 1931 static void throtl_downgrade_state(struct throtl_data *td, int new) 1932 { 1933 td->scale /= 2; 1934 1935 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); 1936 if (td->scale) { 1937 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; 1938 return; 1939 } 1940 1941 td->limit_index = new; 1942 td->low_downgrade_time = jiffies; 1943 } 1944 1945 static bool throtl_tg_can_downgrade(struct throtl_grp *tg) 1946 { 1947 struct throtl_data *td = tg->td; 1948 unsigned long now = jiffies; 1949 1950 /* 1951 * If cgroup is below low limit, consider downgrade and throttle other 1952 * cgroups 1953 */ 1954 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) && 1955 time_after_eq(now, tg_last_low_overflow_time(tg) + 1956 td->throtl_slice) && 1957 (!throtl_tg_is_idle(tg) || 1958 !list_empty(&tg_to_blkg(tg)->blkcg->css.children))) 1959 return true; 1960 return false; 1961 } 1962 1963 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) 1964 { 1965 while (true) { 1966 if (!throtl_tg_can_downgrade(tg)) 1967 return false; 1968 tg = sq_to_tg(tg->service_queue.parent_sq); 1969 if (!tg || !tg_to_blkg(tg)->parent) 1970 break; 1971 } 1972 return true; 1973 } 1974 1975 static void throtl_downgrade_check(struct throtl_grp *tg) 1976 { 1977 uint64_t bps; 1978 unsigned int iops; 1979 unsigned long elapsed_time; 1980 unsigned long now = jiffies; 1981 1982 if (tg->td->limit_index != LIMIT_MAX || 1983 !tg->td->limit_valid[LIMIT_LOW]) 1984 return; 1985 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) 1986 return; 1987 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) 1988 return; 1989 1990 elapsed_time = now - tg->last_check_time; 1991 tg->last_check_time = now; 1992 1993 if (time_before(now, tg_last_low_overflow_time(tg) + 1994 tg->td->throtl_slice)) 1995 return; 1996 1997 if (tg->bps[READ][LIMIT_LOW]) { 1998 bps = tg->last_bytes_disp[READ] * HZ; 1999 do_div(bps, elapsed_time); 2000 if (bps >= tg->bps[READ][LIMIT_LOW]) 2001 tg->last_low_overflow_time[READ] = now; 2002 } 2003 2004 if (tg->bps[WRITE][LIMIT_LOW]) { 2005 bps = tg->last_bytes_disp[WRITE] * HZ; 2006 do_div(bps, elapsed_time); 2007 if (bps >= tg->bps[WRITE][LIMIT_LOW]) 2008 tg->last_low_overflow_time[WRITE] = now; 2009 } 2010 2011 if (tg->iops[READ][LIMIT_LOW]) { 2012 iops = tg->last_io_disp[READ] * HZ / elapsed_time; 2013 if (iops >= tg->iops[READ][LIMIT_LOW]) 2014 tg->last_low_overflow_time[READ] = now; 2015 } 2016 2017 if (tg->iops[WRITE][LIMIT_LOW]) { 2018 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; 2019 if (iops >= tg->iops[WRITE][LIMIT_LOW]) 2020 tg->last_low_overflow_time[WRITE] = now; 2021 } 2022 2023 /* 2024 * If cgroup is below low limit, consider downgrade and throttle other 2025 * cgroups 2026 */ 2027 if (throtl_hierarchy_can_downgrade(tg)) 2028 throtl_downgrade_state(tg->td, LIMIT_LOW); 2029 2030 tg->last_bytes_disp[READ] = 0; 2031 tg->last_bytes_disp[WRITE] = 0; 2032 tg->last_io_disp[READ] = 0; 2033 tg->last_io_disp[WRITE] = 0; 2034 } 2035 2036 static void blk_throtl_update_idletime(struct throtl_grp *tg) 2037 { 2038 unsigned long now = ktime_get_ns() >> 10; 2039 unsigned long last_finish_time = tg->last_finish_time; 2040 2041 if (now <= last_finish_time || last_finish_time == 0 || 2042 last_finish_time == tg->checked_last_finish_time) 2043 return; 2044 2045 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; 2046 tg->checked_last_finish_time = last_finish_time; 2047 } 2048 2049 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2050 static void throtl_update_latency_buckets(struct throtl_data *td) 2051 { 2052 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE]; 2053 int i, cpu, rw; 2054 unsigned long last_latency[2] = { 0 }; 2055 unsigned long latency[2]; 2056 2057 if (!blk_queue_nonrot(td->queue)) 2058 return; 2059 if (time_before(jiffies, td->last_calculate_time + HZ)) 2060 return; 2061 td->last_calculate_time = jiffies; 2062 2063 memset(avg_latency, 0, sizeof(avg_latency)); 2064 for (rw = READ; rw <= WRITE; rw++) { 2065 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2066 struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; 2067 2068 for_each_possible_cpu(cpu) { 2069 struct latency_bucket *bucket; 2070 2071 /* this isn't race free, but ok in practice */ 2072 bucket = per_cpu_ptr(td->latency_buckets[rw], 2073 cpu); 2074 tmp->total_latency += bucket[i].total_latency; 2075 tmp->samples += bucket[i].samples; 2076 bucket[i].total_latency = 0; 2077 bucket[i].samples = 0; 2078 } 2079 2080 if (tmp->samples >= 32) { 2081 int samples = tmp->samples; 2082 2083 latency[rw] = tmp->total_latency; 2084 2085 tmp->total_latency = 0; 2086 tmp->samples = 0; 2087 latency[rw] /= samples; 2088 if (latency[rw] == 0) 2089 continue; 2090 avg_latency[rw][i].latency = latency[rw]; 2091 } 2092 } 2093 } 2094 2095 for (rw = READ; rw <= WRITE; rw++) { 2096 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2097 if (!avg_latency[rw][i].latency) { 2098 if (td->avg_buckets[rw][i].latency < last_latency[rw]) 2099 td->avg_buckets[rw][i].latency = 2100 last_latency[rw]; 2101 continue; 2102 } 2103 2104 if (!td->avg_buckets[rw][i].valid) 2105 latency[rw] = avg_latency[rw][i].latency; 2106 else 2107 latency[rw] = (td->avg_buckets[rw][i].latency * 7 + 2108 avg_latency[rw][i].latency) >> 3; 2109 2110 td->avg_buckets[rw][i].latency = max(latency[rw], 2111 last_latency[rw]); 2112 td->avg_buckets[rw][i].valid = true; 2113 last_latency[rw] = td->avg_buckets[rw][i].latency; 2114 } 2115 } 2116 2117 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) 2118 throtl_log(&td->service_queue, 2119 "Latency bucket %d: read latency=%ld, read valid=%d, " 2120 "write latency=%ld, write valid=%d", i, 2121 td->avg_buckets[READ][i].latency, 2122 td->avg_buckets[READ][i].valid, 2123 td->avg_buckets[WRITE][i].latency, 2124 td->avg_buckets[WRITE][i].valid); 2125 } 2126 #else 2127 static inline void throtl_update_latency_buckets(struct throtl_data *td) 2128 { 2129 } 2130 #endif 2131 2132 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) 2133 { 2134 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2135 if (bio->bi_css) { 2136 if (bio->bi_cg_private) 2137 blkg_put(tg_to_blkg(bio->bi_cg_private)); 2138 bio->bi_cg_private = tg; 2139 blkg_get(tg_to_blkg(tg)); 2140 } 2141 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 2142 #endif 2143 } 2144 2145 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, 2146 struct bio *bio) 2147 { 2148 struct throtl_qnode *qn = NULL; 2149 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); 2150 struct throtl_service_queue *sq; 2151 bool rw = bio_data_dir(bio); 2152 bool throttled = false; 2153 struct throtl_data *td = tg->td; 2154 2155 WARN_ON_ONCE(!rcu_read_lock_held()); 2156 2157 /* see throtl_charge_bio() */ 2158 if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) 2159 goto out; 2160 2161 spin_lock_irq(q->queue_lock); 2162 2163 throtl_update_latency_buckets(td); 2164 2165 if (unlikely(blk_queue_bypass(q))) 2166 goto out_unlock; 2167 2168 blk_throtl_assoc_bio(tg, bio); 2169 blk_throtl_update_idletime(tg); 2170 2171 sq = &tg->service_queue; 2172 2173 again: 2174 while (true) { 2175 if (tg->last_low_overflow_time[rw] == 0) 2176 tg->last_low_overflow_time[rw] = jiffies; 2177 throtl_downgrade_check(tg); 2178 throtl_upgrade_check(tg); 2179 /* throtl is FIFO - if bios are already queued, should queue */ 2180 if (sq->nr_queued[rw]) 2181 break; 2182 2183 /* if above limits, break to queue */ 2184 if (!tg_may_dispatch(tg, bio, NULL)) { 2185 tg->last_low_overflow_time[rw] = jiffies; 2186 if (throtl_can_upgrade(td, tg)) { 2187 throtl_upgrade_state(td); 2188 goto again; 2189 } 2190 break; 2191 } 2192 2193 /* within limits, let's charge and dispatch directly */ 2194 throtl_charge_bio(tg, bio); 2195 2196 /* 2197 * We need to trim slice even when bios are not being queued 2198 * otherwise it might happen that a bio is not queued for 2199 * a long time and slice keeps on extending and trim is not 2200 * called for a long time. Now if limits are reduced suddenly 2201 * we take into account all the IO dispatched so far at new 2202 * low rate and * newly queued IO gets a really long dispatch 2203 * time. 2204 * 2205 * So keep on trimming slice even if bio is not queued. 2206 */ 2207 throtl_trim_slice(tg, rw); 2208 2209 /* 2210 * @bio passed through this layer without being throttled. 2211 * Climb up the ladder. If we''re already at the top, it 2212 * can be executed directly. 2213 */ 2214 qn = &tg->qnode_on_parent[rw]; 2215 sq = sq->parent_sq; 2216 tg = sq_to_tg(sq); 2217 if (!tg) 2218 goto out_unlock; 2219 } 2220 2221 /* out-of-limit, queue to @tg */ 2222 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", 2223 rw == READ ? 'R' : 'W', 2224 tg->bytes_disp[rw], bio->bi_iter.bi_size, 2225 tg_bps_limit(tg, rw), 2226 tg->io_disp[rw], tg_iops_limit(tg, rw), 2227 sq->nr_queued[READ], sq->nr_queued[WRITE]); 2228 2229 tg->last_low_overflow_time[rw] = jiffies; 2230 2231 td->nr_queued[rw]++; 2232 throtl_add_bio_tg(bio, qn, tg); 2233 throttled = true; 2234 2235 /* 2236 * Update @tg's dispatch time and force schedule dispatch if @tg 2237 * was empty before @bio. The forced scheduling isn't likely to 2238 * cause undue delay as @bio is likely to be dispatched directly if 2239 * its @tg's disptime is not in the future. 2240 */ 2241 if (tg->flags & THROTL_TG_WAS_EMPTY) { 2242 tg_update_disptime(tg); 2243 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); 2244 } 2245 2246 out_unlock: 2247 spin_unlock_irq(q->queue_lock); 2248 out: 2249 bio_set_flag(bio, BIO_THROTTLED); 2250 2251 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2252 if (throttled || !td->track_bio_latency) 2253 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; 2254 #endif 2255 return throttled; 2256 } 2257 2258 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2259 static void throtl_track_latency(struct throtl_data *td, sector_t size, 2260 int op, unsigned long time) 2261 { 2262 struct latency_bucket *latency; 2263 int index; 2264 2265 if (!td || td->limit_index != LIMIT_LOW || 2266 !(op == REQ_OP_READ || op == REQ_OP_WRITE) || 2267 !blk_queue_nonrot(td->queue)) 2268 return; 2269 2270 index = request_bucket_index(size); 2271 2272 latency = get_cpu_ptr(td->latency_buckets[op]); 2273 latency[index].total_latency += time; 2274 latency[index].samples++; 2275 put_cpu_ptr(td->latency_buckets[op]); 2276 } 2277 2278 void blk_throtl_stat_add(struct request *rq, u64 time_ns) 2279 { 2280 struct request_queue *q = rq->q; 2281 struct throtl_data *td = q->td; 2282 2283 throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10); 2284 } 2285 2286 void blk_throtl_bio_endio(struct bio *bio) 2287 { 2288 struct throtl_grp *tg; 2289 u64 finish_time_ns; 2290 unsigned long finish_time; 2291 unsigned long start_time; 2292 unsigned long lat; 2293 int rw = bio_data_dir(bio); 2294 2295 tg = bio->bi_cg_private; 2296 if (!tg) 2297 return; 2298 bio->bi_cg_private = NULL; 2299 2300 finish_time_ns = ktime_get_ns(); 2301 tg->last_finish_time = finish_time_ns >> 10; 2302 2303 start_time = bio_issue_time(&bio->bi_issue) >> 10; 2304 finish_time = __bio_issue_time(finish_time_ns) >> 10; 2305 if (!start_time || finish_time <= start_time) { 2306 blkg_put(tg_to_blkg(tg)); 2307 return; 2308 } 2309 2310 lat = finish_time - start_time; 2311 /* this is only for bio based driver */ 2312 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY)) 2313 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), 2314 bio_op(bio), lat); 2315 2316 if (tg->latency_target && lat >= tg->td->filtered_latency) { 2317 int bucket; 2318 unsigned int threshold; 2319 2320 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue)); 2321 threshold = tg->td->avg_buckets[rw][bucket].latency + 2322 tg->latency_target; 2323 if (lat > threshold) 2324 tg->bad_bio_cnt++; 2325 /* 2326 * Not race free, could get wrong count, which means cgroups 2327 * will be throttled 2328 */ 2329 tg->bio_cnt++; 2330 } 2331 2332 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { 2333 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; 2334 tg->bio_cnt /= 2; 2335 tg->bad_bio_cnt /= 2; 2336 } 2337 2338 blkg_put(tg_to_blkg(tg)); 2339 } 2340 #endif 2341 2342 /* 2343 * Dispatch all bios from all children tg's queued on @parent_sq. On 2344 * return, @parent_sq is guaranteed to not have any active children tg's 2345 * and all bios from previously active tg's are on @parent_sq->bio_lists[]. 2346 */ 2347 static void tg_drain_bios(struct throtl_service_queue *parent_sq) 2348 { 2349 struct throtl_grp *tg; 2350 2351 while ((tg = throtl_rb_first(parent_sq))) { 2352 struct throtl_service_queue *sq = &tg->service_queue; 2353 struct bio *bio; 2354 2355 throtl_dequeue_tg(tg); 2356 2357 while ((bio = throtl_peek_queued(&sq->queued[READ]))) 2358 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 2359 while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) 2360 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 2361 } 2362 } 2363 2364 /** 2365 * blk_throtl_drain - drain throttled bios 2366 * @q: request_queue to drain throttled bios for 2367 * 2368 * Dispatch all currently throttled bios on @q through ->make_request_fn(). 2369 */ 2370 void blk_throtl_drain(struct request_queue *q) 2371 __releases(q->queue_lock) __acquires(q->queue_lock) 2372 { 2373 struct throtl_data *td = q->td; 2374 struct blkcg_gq *blkg; 2375 struct cgroup_subsys_state *pos_css; 2376 struct bio *bio; 2377 int rw; 2378 2379 queue_lockdep_assert_held(q); 2380 rcu_read_lock(); 2381 2382 /* 2383 * Drain each tg while doing post-order walk on the blkg tree, so 2384 * that all bios are propagated to td->service_queue. It'd be 2385 * better to walk service_queue tree directly but blkg walk is 2386 * easier. 2387 */ 2388 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) 2389 tg_drain_bios(&blkg_to_tg(blkg)->service_queue); 2390 2391 /* finally, transfer bios from top-level tg's into the td */ 2392 tg_drain_bios(&td->service_queue); 2393 2394 rcu_read_unlock(); 2395 spin_unlock_irq(q->queue_lock); 2396 2397 /* all bios now should be in td->service_queue, issue them */ 2398 for (rw = READ; rw <= WRITE; rw++) 2399 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], 2400 NULL))) 2401 generic_make_request(bio); 2402 2403 spin_lock_irq(q->queue_lock); 2404 } 2405 2406 int blk_throtl_init(struct request_queue *q) 2407 { 2408 struct throtl_data *td; 2409 int ret; 2410 2411 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); 2412 if (!td) 2413 return -ENOMEM; 2414 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * 2415 LATENCY_BUCKET_SIZE, __alignof__(u64)); 2416 if (!td->latency_buckets[READ]) { 2417 kfree(td); 2418 return -ENOMEM; 2419 } 2420 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * 2421 LATENCY_BUCKET_SIZE, __alignof__(u64)); 2422 if (!td->latency_buckets[WRITE]) { 2423 free_percpu(td->latency_buckets[READ]); 2424 kfree(td); 2425 return -ENOMEM; 2426 } 2427 2428 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); 2429 throtl_service_queue_init(&td->service_queue); 2430 2431 q->td = td; 2432 td->queue = q; 2433 2434 td->limit_valid[LIMIT_MAX] = true; 2435 td->limit_index = LIMIT_MAX; 2436 td->low_upgrade_time = jiffies; 2437 td->low_downgrade_time = jiffies; 2438 2439 /* activate policy */ 2440 ret = blkcg_activate_policy(q, &blkcg_policy_throtl); 2441 if (ret) { 2442 free_percpu(td->latency_buckets[READ]); 2443 free_percpu(td->latency_buckets[WRITE]); 2444 kfree(td); 2445 } 2446 return ret; 2447 } 2448 2449 void blk_throtl_exit(struct request_queue *q) 2450 { 2451 BUG_ON(!q->td); 2452 throtl_shutdown_wq(q); 2453 blkcg_deactivate_policy(q, &blkcg_policy_throtl); 2454 free_percpu(q->td->latency_buckets[READ]); 2455 free_percpu(q->td->latency_buckets[WRITE]); 2456 kfree(q->td); 2457 } 2458 2459 void blk_throtl_register_queue(struct request_queue *q) 2460 { 2461 struct throtl_data *td; 2462 int i; 2463 2464 td = q->td; 2465 BUG_ON(!td); 2466 2467 if (blk_queue_nonrot(q)) { 2468 td->throtl_slice = DFL_THROTL_SLICE_SSD; 2469 td->filtered_latency = LATENCY_FILTERED_SSD; 2470 } else { 2471 td->throtl_slice = DFL_THROTL_SLICE_HD; 2472 td->filtered_latency = LATENCY_FILTERED_HD; 2473 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2474 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; 2475 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; 2476 } 2477 } 2478 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW 2479 /* if no low limit, use previous default */ 2480 td->throtl_slice = DFL_THROTL_SLICE_HD; 2481 #endif 2482 2483 td->track_bio_latency = !queue_is_rq_based(q); 2484 if (!td->track_bio_latency) 2485 blk_stat_enable_accounting(q); 2486 } 2487 2488 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2489 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page) 2490 { 2491 if (!q->td) 2492 return -EINVAL; 2493 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); 2494 } 2495 2496 ssize_t blk_throtl_sample_time_store(struct request_queue *q, 2497 const char *page, size_t count) 2498 { 2499 unsigned long v; 2500 unsigned long t; 2501 2502 if (!q->td) 2503 return -EINVAL; 2504 if (kstrtoul(page, 10, &v)) 2505 return -EINVAL; 2506 t = msecs_to_jiffies(v); 2507 if (t == 0 || t > MAX_THROTL_SLICE) 2508 return -EINVAL; 2509 q->td->throtl_slice = t; 2510 return count; 2511 } 2512 #endif 2513 2514 static int __init throtl_init(void) 2515 { 2516 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); 2517 if (!kthrotld_workqueue) 2518 panic("Failed to create kthrotld\n"); 2519 2520 return blkcg_policy_register(&blkcg_policy_throtl); 2521 } 2522 2523 module_init(throtl_init); 2524