1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * IO cost model based controller. 4 * 5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org> 6 * Copyright (C) 2019 Andy Newell <newella@fb.com> 7 * Copyright (C) 2019 Facebook 8 * 9 * One challenge of controlling IO resources is the lack of trivially 10 * observable cost metric. This is distinguished from CPU and memory where 11 * wallclock time and the number of bytes can serve as accurate enough 12 * approximations. 13 * 14 * Bandwidth and iops are the most commonly used metrics for IO devices but 15 * depending on the type and specifics of the device, different IO patterns 16 * easily lead to multiple orders of magnitude variations rendering them 17 * useless for the purpose of IO capacity distribution. While on-device 18 * time, with a lot of clutches, could serve as a useful approximation for 19 * non-queued rotational devices, this is no longer viable with modern 20 * devices, even the rotational ones. 21 * 22 * While there is no cost metric we can trivially observe, it isn't a 23 * complete mystery. For example, on a rotational device, seek cost 24 * dominates while a contiguous transfer contributes a smaller amount 25 * proportional to the size. If we can characterize at least the relative 26 * costs of these different types of IOs, it should be possible to 27 * implement a reasonable work-conserving proportional IO resource 28 * distribution. 29 * 30 * 1. IO Cost Model 31 * 32 * IO cost model estimates the cost of an IO given its basic parameters and 33 * history (e.g. the end sector of the last IO). The cost is measured in 34 * device time. If a given IO is estimated to cost 10ms, the device should 35 * be able to process ~100 of those IOs in a second. 36 * 37 * Currently, there's only one builtin cost model - linear. Each IO is 38 * classified as sequential or random and given a base cost accordingly. 39 * On top of that, a size cost proportional to the length of the IO is 40 * added. While simple, this model captures the operational 41 * characteristics of a wide varienty of devices well enough. Default 42 * paramters for several different classes of devices are provided and the 43 * parameters can be configured from userspace via 44 * /sys/fs/cgroup/io.cost.model. 45 * 46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate 47 * device-specific coefficients. 48 * 49 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate 50 * device-specific coefficients. 51 * 52 * 2. Control Strategy 53 * 54 * The device virtual time (vtime) is used as the primary control metric. 55 * The control strategy is composed of the following three parts. 56 * 57 * 2-1. Vtime Distribution 58 * 59 * When a cgroup becomes active in terms of IOs, its hierarchical share is 60 * calculated. Please consider the following hierarchy where the numbers 61 * inside parentheses denote the configured weights. 62 * 63 * root 64 * / \ 65 * A (w:100) B (w:300) 66 * / \ 67 * A0 (w:100) A1 (w:100) 68 * 69 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are 70 * of equal weight, each gets 50% share. If then B starts issuing IOs, B 71 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest, 72 * 12.5% each. The distribution mechanism only cares about these flattened 73 * shares. They're called hweights (hierarchical weights) and always add 74 * upto 1 (HWEIGHT_WHOLE). 75 * 76 * A given cgroup's vtime runs slower in inverse proportion to its hweight. 77 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5) 78 * against the device vtime - an IO which takes 10ms on the underlying 79 * device is considered to take 80ms on A0. 80 * 81 * This constitutes the basis of IO capacity distribution. Each cgroup's 82 * vtime is running at a rate determined by its hweight. A cgroup tracks 83 * the vtime consumed by past IOs and can issue a new IO iff doing so 84 * wouldn't outrun the current device vtime. Otherwise, the IO is 85 * suspended until the vtime has progressed enough to cover it. 86 * 87 * 2-2. Vrate Adjustment 88 * 89 * It's unrealistic to expect the cost model to be perfect. There are too 90 * many devices and even on the same device the overall performance 91 * fluctuates depending on numerous factors such as IO mixture and device 92 * internal garbage collection. The controller needs to adapt dynamically. 93 * 94 * This is achieved by adjusting the overall IO rate according to how busy 95 * the device is. If the device becomes overloaded, we're sending down too 96 * many IOs and should generally slow down. If there are waiting issuers 97 * but the device isn't saturated, we're issuing too few and should 98 * generally speed up. 99 * 100 * To slow down, we lower the vrate - the rate at which the device vtime 101 * passes compared to the wall clock. For example, if the vtime is running 102 * at the vrate of 75%, all cgroups added up would only be able to issue 103 * 750ms worth of IOs per second, and vice-versa for speeding up. 104 * 105 * Device business is determined using two criteria - rq wait and 106 * completion latencies. 107 * 108 * When a device gets saturated, the on-device and then the request queues 109 * fill up and a bio which is ready to be issued has to wait for a request 110 * to become available. When this delay becomes noticeable, it's a clear 111 * indication that the device is saturated and we lower the vrate. This 112 * saturation signal is fairly conservative as it only triggers when both 113 * hardware and software queues are filled up, and is used as the default 114 * busy signal. 115 * 116 * As devices can have deep queues and be unfair in how the queued commands 117 * are executed, soley depending on rq wait may not result in satisfactory 118 * control quality. For a better control quality, completion latency QoS 119 * parameters can be configured so that the device is considered saturated 120 * if N'th percentile completion latency rises above the set point. 121 * 122 * The completion latency requirements are a function of both the 123 * underlying device characteristics and the desired IO latency quality of 124 * service. There is an inherent trade-off - the tighter the latency QoS, 125 * the higher the bandwidth lossage. Latency QoS is disabled by default 126 * and can be set through /sys/fs/cgroup/io.cost.qos. 127 * 128 * 2-3. Work Conservation 129 * 130 * Imagine two cgroups A and B with equal weights. A is issuing a small IO 131 * periodically while B is sending out enough parallel IOs to saturate the 132 * device on its own. Let's say A's usage amounts to 100ms worth of IO 133 * cost per second, i.e., 10% of the device capacity. The naive 134 * distribution of half and half would lead to 60% utilization of the 135 * device, a significant reduction in the total amount of work done 136 * compared to free-for-all competition. This is too high a cost to pay 137 * for IO control. 138 * 139 * To conserve the total amount of work done, we keep track of how much 140 * each active cgroup is actually using and yield part of its weight if 141 * there are other cgroups which can make use of it. In the above case, 142 * A's weight will be lowered so that it hovers above the actual usage and 143 * B would be able to use the rest. 144 * 145 * As we don't want to penalize a cgroup for donating its weight, the 146 * surplus weight adjustment factors in a margin and has an immediate 147 * snapback mechanism in case the cgroup needs more IO vtime for itself. 148 * 149 * Note that adjusting down surplus weights has the same effects as 150 * accelerating vtime for other cgroups and work conservation can also be 151 * implemented by adjusting vrate dynamically. However, squaring who can 152 * donate and should take back how much requires hweight propagations 153 * anyway making it easier to implement and understand as a separate 154 * mechanism. 155 * 156 * 3. Monitoring 157 * 158 * Instead of debugfs or other clumsy monitoring mechanisms, this 159 * controller uses a drgn based monitoring script - 160 * tools/cgroup/iocost_monitor.py. For details on drgn, please see 161 * https://github.com/osandov/drgn. The ouput looks like the following. 162 * 163 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12% 164 * active weight hweight% inflt% dbt delay usages% 165 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033 166 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077 167 * 168 * - per : Timer period 169 * - cur_per : Internal wall and device vtime clock 170 * - vrate : Device virtual time rate against wall clock 171 * - weight : Surplus-adjusted and configured weights 172 * - hweight : Surplus-adjusted and configured hierarchical weights 173 * - inflt : The percentage of in-flight IO cost at the end of last period 174 * - del_ms : Deferred issuer delay induction level and duration 175 * - usages : Usage history 176 */ 177 178 #include <linux/kernel.h> 179 #include <linux/module.h> 180 #include <linux/timer.h> 181 #include <linux/time64.h> 182 #include <linux/parser.h> 183 #include <linux/sched/signal.h> 184 #include <linux/blk-cgroup.h> 185 #include "blk-rq-qos.h" 186 #include "blk-stat.h" 187 #include "blk-wbt.h" 188 189 #ifdef CONFIG_TRACEPOINTS 190 191 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */ 192 #define TRACE_IOCG_PATH_LEN 1024 193 static DEFINE_SPINLOCK(trace_iocg_path_lock); 194 static char trace_iocg_path[TRACE_IOCG_PATH_LEN]; 195 196 #define TRACE_IOCG_PATH(type, iocg, ...) \ 197 do { \ 198 unsigned long flags; \ 199 if (trace_iocost_##type##_enabled()) { \ 200 spin_lock_irqsave(&trace_iocg_path_lock, flags); \ 201 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \ 202 trace_iocg_path, TRACE_IOCG_PATH_LEN); \ 203 trace_iocost_##type(iocg, trace_iocg_path, \ 204 ##__VA_ARGS__); \ 205 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \ 206 } \ 207 } while (0) 208 209 #else /* CONFIG_TRACE_POINTS */ 210 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0) 211 #endif /* CONFIG_TRACE_POINTS */ 212 213 enum { 214 MILLION = 1000000, 215 216 /* timer period is calculated from latency requirements, bound it */ 217 MIN_PERIOD = USEC_PER_MSEC, 218 MAX_PERIOD = USEC_PER_SEC, 219 220 /* 221 * A cgroup's vtime can run 50% behind the device vtime, which 222 * serves as its IO credit buffer. Surplus weight adjustment is 223 * immediately canceled if the vtime margin runs below 10%. 224 */ 225 MARGIN_PCT = 50, 226 INUSE_MARGIN_PCT = 10, 227 228 /* Have some play in waitq timer operations */ 229 WAITQ_TIMER_MARGIN_PCT = 5, 230 231 /* 232 * vtime can wrap well within a reasonable uptime when vrate is 233 * consistently raised. Don't trust recorded cgroup vtime if the 234 * period counter indicates that it's older than 5mins. 235 */ 236 VTIME_VALID_DUR = 300 * USEC_PER_SEC, 237 238 /* 239 * Remember the past three non-zero usages and use the max for 240 * surplus calculation. Three slots guarantee that we remember one 241 * full period usage from the last active stretch even after 242 * partial deactivation and re-activation periods. Don't start 243 * giving away weight before collecting two data points to prevent 244 * hweight adjustments based on one partial activation period. 245 */ 246 NR_USAGE_SLOTS = 3, 247 MIN_VALID_USAGES = 2, 248 249 /* 1/64k is granular enough and can easily be handled w/ u32 */ 250 HWEIGHT_WHOLE = 1 << 16, 251 252 /* 253 * As vtime is used to calculate the cost of each IO, it needs to 254 * be fairly high precision. For example, it should be able to 255 * represent the cost of a single page worth of discard with 256 * suffificient accuracy. At the same time, it should be able to 257 * represent reasonably long enough durations to be useful and 258 * convenient during operation. 259 * 260 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond 261 * granularity and days of wrap-around time even at extreme vrates. 262 */ 263 VTIME_PER_SEC_SHIFT = 37, 264 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT, 265 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC, 266 267 /* bound vrate adjustments within two orders of magnitude */ 268 VRATE_MIN_PPM = 10000, /* 1% */ 269 VRATE_MAX_PPM = 100000000, /* 10000% */ 270 271 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION, 272 VRATE_CLAMP_ADJ_PCT = 4, 273 274 /* if IOs end up waiting for requests, issue less */ 275 RQ_WAIT_BUSY_PCT = 5, 276 277 /* unbusy hysterisis */ 278 UNBUSY_THR_PCT = 75, 279 280 /* don't let cmds which take a very long time pin lagging for too long */ 281 MAX_LAGGING_PERIODS = 10, 282 283 /* 284 * If usage% * 1.25 + 2% is lower than hweight% by more than 3%, 285 * donate the surplus. 286 */ 287 SURPLUS_SCALE_PCT = 125, /* * 125% */ 288 SURPLUS_SCALE_ABS = HWEIGHT_WHOLE / 50, /* + 2% */ 289 SURPLUS_MIN_ADJ_DELTA = HWEIGHT_WHOLE / 33, /* 3% */ 290 291 /* switch iff the conditions are met for longer than this */ 292 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, 293 294 /* 295 * Count IO size in 4k pages. The 12bit shift helps keeping 296 * size-proportional components of cost calculation in closer 297 * numbers of digits to per-IO cost components. 298 */ 299 IOC_PAGE_SHIFT = 12, 300 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT, 301 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT, 302 303 /* if apart further than 16M, consider randio for linear model */ 304 LCOEF_RANDIO_PAGES = 4096, 305 }; 306 307 enum ioc_running { 308 IOC_IDLE, 309 IOC_RUNNING, 310 IOC_STOP, 311 }; 312 313 /* io.cost.qos controls including per-dev enable of the whole controller */ 314 enum { 315 QOS_ENABLE, 316 QOS_CTRL, 317 NR_QOS_CTRL_PARAMS, 318 }; 319 320 /* io.cost.qos params */ 321 enum { 322 QOS_RPPM, 323 QOS_RLAT, 324 QOS_WPPM, 325 QOS_WLAT, 326 QOS_MIN, 327 QOS_MAX, 328 NR_QOS_PARAMS, 329 }; 330 331 /* io.cost.model controls */ 332 enum { 333 COST_CTRL, 334 COST_MODEL, 335 NR_COST_CTRL_PARAMS, 336 }; 337 338 /* builtin linear cost model coefficients */ 339 enum { 340 I_LCOEF_RBPS, 341 I_LCOEF_RSEQIOPS, 342 I_LCOEF_RRANDIOPS, 343 I_LCOEF_WBPS, 344 I_LCOEF_WSEQIOPS, 345 I_LCOEF_WRANDIOPS, 346 NR_I_LCOEFS, 347 }; 348 349 enum { 350 LCOEF_RPAGE, 351 LCOEF_RSEQIO, 352 LCOEF_RRANDIO, 353 LCOEF_WPAGE, 354 LCOEF_WSEQIO, 355 LCOEF_WRANDIO, 356 NR_LCOEFS, 357 }; 358 359 enum { 360 AUTOP_INVALID, 361 AUTOP_HDD, 362 AUTOP_SSD_QD1, 363 AUTOP_SSD_DFL, 364 AUTOP_SSD_FAST, 365 }; 366 367 struct ioc_gq; 368 369 struct ioc_params { 370 u32 qos[NR_QOS_PARAMS]; 371 u64 i_lcoefs[NR_I_LCOEFS]; 372 u64 lcoefs[NR_LCOEFS]; 373 u32 too_fast_vrate_pct; 374 u32 too_slow_vrate_pct; 375 }; 376 377 struct ioc_missed { 378 u32 nr_met; 379 u32 nr_missed; 380 u32 last_met; 381 u32 last_missed; 382 }; 383 384 struct ioc_pcpu_stat { 385 struct ioc_missed missed[2]; 386 387 u64 rq_wait_ns; 388 u64 last_rq_wait_ns; 389 }; 390 391 /* per device */ 392 struct ioc { 393 struct rq_qos rqos; 394 395 bool enabled; 396 397 struct ioc_params params; 398 u32 period_us; 399 u32 margin_us; 400 u64 vrate_min; 401 u64 vrate_max; 402 403 spinlock_t lock; 404 struct timer_list timer; 405 struct list_head active_iocgs; /* active cgroups */ 406 struct ioc_pcpu_stat __percpu *pcpu_stat; 407 408 enum ioc_running running; 409 atomic64_t vtime_rate; 410 411 seqcount_t period_seqcount; 412 u32 period_at; /* wallclock starttime */ 413 u64 period_at_vtime; /* vtime starttime */ 414 415 atomic64_t cur_period; /* inc'd each period */ 416 int busy_level; /* saturation history */ 417 418 u64 inuse_margin_vtime; 419 bool weights_updated; 420 atomic_t hweight_gen; /* for lazy hweights */ 421 422 u64 autop_too_fast_at; 423 u64 autop_too_slow_at; 424 int autop_idx; 425 bool user_qos_params:1; 426 bool user_cost_model:1; 427 }; 428 429 /* per device-cgroup pair */ 430 struct ioc_gq { 431 struct blkg_policy_data pd; 432 struct ioc *ioc; 433 434 /* 435 * A iocg can get its weight from two sources - an explicit 436 * per-device-cgroup configuration or the default weight of the 437 * cgroup. `cfg_weight` is the explicit per-device-cgroup 438 * configuration. `weight` is the effective considering both 439 * sources. 440 * 441 * When an idle cgroup becomes active its `active` goes from 0 to 442 * `weight`. `inuse` is the surplus adjusted active weight. 443 * `active` and `inuse` are used to calculate `hweight_active` and 444 * `hweight_inuse`. 445 * 446 * `last_inuse` remembers `inuse` while an iocg is idle to persist 447 * surplus adjustments. 448 */ 449 u32 cfg_weight; 450 u32 weight; 451 u32 active; 452 u32 inuse; 453 u32 last_inuse; 454 455 sector_t cursor; /* to detect randio */ 456 457 /* 458 * `vtime` is this iocg's vtime cursor which progresses as IOs are 459 * issued. If lagging behind device vtime, the delta represents 460 * the currently available IO budget. If runnning ahead, the 461 * overage. 462 * 463 * `vtime_done` is the same but progressed on completion rather 464 * than issue. The delta behind `vtime` represents the cost of 465 * currently in-flight IOs. 466 * 467 * `last_vtime` is used to remember `vtime` at the end of the last 468 * period to calculate utilization. 469 */ 470 atomic64_t vtime; 471 atomic64_t done_vtime; 472 atomic64_t abs_vdebt; 473 u64 last_vtime; 474 475 /* 476 * The period this iocg was last active in. Used for deactivation 477 * and invalidating `vtime`. 478 */ 479 atomic64_t active_period; 480 struct list_head active_list; 481 482 /* see __propagate_active_weight() and current_hweight() for details */ 483 u64 child_active_sum; 484 u64 child_inuse_sum; 485 int hweight_gen; 486 u32 hweight_active; 487 u32 hweight_inuse; 488 bool has_surplus; 489 490 struct wait_queue_head waitq; 491 struct hrtimer waitq_timer; 492 struct hrtimer delay_timer; 493 494 /* usage is recorded as fractions of HWEIGHT_WHOLE */ 495 int usage_idx; 496 u32 usages[NR_USAGE_SLOTS]; 497 498 /* this iocg's depth in the hierarchy and ancestors including self */ 499 int level; 500 struct ioc_gq *ancestors[]; 501 }; 502 503 /* per cgroup */ 504 struct ioc_cgrp { 505 struct blkcg_policy_data cpd; 506 unsigned int dfl_weight; 507 }; 508 509 struct ioc_now { 510 u64 now_ns; 511 u32 now; 512 u64 vnow; 513 u64 vrate; 514 }; 515 516 struct iocg_wait { 517 struct wait_queue_entry wait; 518 struct bio *bio; 519 u64 abs_cost; 520 bool committed; 521 }; 522 523 struct iocg_wake_ctx { 524 struct ioc_gq *iocg; 525 u32 hw_inuse; 526 s64 vbudget; 527 }; 528 529 static const struct ioc_params autop[] = { 530 [AUTOP_HDD] = { 531 .qos = { 532 [QOS_RLAT] = 50000, /* 50ms */ 533 [QOS_WLAT] = 50000, 534 [QOS_MIN] = VRATE_MIN_PPM, 535 [QOS_MAX] = VRATE_MAX_PPM, 536 }, 537 .i_lcoefs = { 538 [I_LCOEF_RBPS] = 174019176, 539 [I_LCOEF_RSEQIOPS] = 41708, 540 [I_LCOEF_RRANDIOPS] = 370, 541 [I_LCOEF_WBPS] = 178075866, 542 [I_LCOEF_WSEQIOPS] = 42705, 543 [I_LCOEF_WRANDIOPS] = 378, 544 }, 545 }, 546 [AUTOP_SSD_QD1] = { 547 .qos = { 548 [QOS_RLAT] = 25000, /* 25ms */ 549 [QOS_WLAT] = 25000, 550 [QOS_MIN] = VRATE_MIN_PPM, 551 [QOS_MAX] = VRATE_MAX_PPM, 552 }, 553 .i_lcoefs = { 554 [I_LCOEF_RBPS] = 245855193, 555 [I_LCOEF_RSEQIOPS] = 61575, 556 [I_LCOEF_RRANDIOPS] = 6946, 557 [I_LCOEF_WBPS] = 141365009, 558 [I_LCOEF_WSEQIOPS] = 33716, 559 [I_LCOEF_WRANDIOPS] = 26796, 560 }, 561 }, 562 [AUTOP_SSD_DFL] = { 563 .qos = { 564 [QOS_RLAT] = 25000, /* 25ms */ 565 [QOS_WLAT] = 25000, 566 [QOS_MIN] = VRATE_MIN_PPM, 567 [QOS_MAX] = VRATE_MAX_PPM, 568 }, 569 .i_lcoefs = { 570 [I_LCOEF_RBPS] = 488636629, 571 [I_LCOEF_RSEQIOPS] = 8932, 572 [I_LCOEF_RRANDIOPS] = 8518, 573 [I_LCOEF_WBPS] = 427891549, 574 [I_LCOEF_WSEQIOPS] = 28755, 575 [I_LCOEF_WRANDIOPS] = 21940, 576 }, 577 .too_fast_vrate_pct = 500, 578 }, 579 [AUTOP_SSD_FAST] = { 580 .qos = { 581 [QOS_RLAT] = 5000, /* 5ms */ 582 [QOS_WLAT] = 5000, 583 [QOS_MIN] = VRATE_MIN_PPM, 584 [QOS_MAX] = VRATE_MAX_PPM, 585 }, 586 .i_lcoefs = { 587 [I_LCOEF_RBPS] = 3102524156LLU, 588 [I_LCOEF_RSEQIOPS] = 724816, 589 [I_LCOEF_RRANDIOPS] = 778122, 590 [I_LCOEF_WBPS] = 1742780862LLU, 591 [I_LCOEF_WSEQIOPS] = 425702, 592 [I_LCOEF_WRANDIOPS] = 443193, 593 }, 594 .too_slow_vrate_pct = 10, 595 }, 596 }; 597 598 /* 599 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on 600 * vtime credit shortage and down on device saturation. 601 */ 602 static u32 vrate_adj_pct[] = 603 { 0, 0, 0, 0, 604 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 605 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 606 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 }; 607 608 static struct blkcg_policy blkcg_policy_iocost; 609 610 /* accessors and helpers */ 611 static struct ioc *rqos_to_ioc(struct rq_qos *rqos) 612 { 613 return container_of(rqos, struct ioc, rqos); 614 } 615 616 static struct ioc *q_to_ioc(struct request_queue *q) 617 { 618 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST)); 619 } 620 621 static const char *q_name(struct request_queue *q) 622 { 623 if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) 624 return kobject_name(q->kobj.parent); 625 else 626 return "<unknown>"; 627 } 628 629 static const char __maybe_unused *ioc_name(struct ioc *ioc) 630 { 631 return q_name(ioc->rqos.q); 632 } 633 634 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd) 635 { 636 return pd ? container_of(pd, struct ioc_gq, pd) : NULL; 637 } 638 639 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg) 640 { 641 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost)); 642 } 643 644 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg) 645 { 646 return pd_to_blkg(&iocg->pd); 647 } 648 649 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg) 650 { 651 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost), 652 struct ioc_cgrp, cpd); 653 } 654 655 /* 656 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical 657 * weight, the more expensive each IO. Must round up. 658 */ 659 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse) 660 { 661 return DIV64_U64_ROUND_UP(abs_cost * HWEIGHT_WHOLE, hw_inuse); 662 } 663 664 /* 665 * The inverse of abs_cost_to_cost(). Must round up. 666 */ 667 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse) 668 { 669 return DIV64_U64_ROUND_UP(cost * hw_inuse, HWEIGHT_WHOLE); 670 } 671 672 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost) 673 { 674 bio->bi_iocost_cost = cost; 675 atomic64_add(cost, &iocg->vtime); 676 } 677 678 #define CREATE_TRACE_POINTS 679 #include <trace/events/iocost.h> 680 681 /* latency Qos params changed, update period_us and all the dependent params */ 682 static void ioc_refresh_period_us(struct ioc *ioc) 683 { 684 u32 ppm, lat, multi, period_us; 685 686 lockdep_assert_held(&ioc->lock); 687 688 /* pick the higher latency target */ 689 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) { 690 ppm = ioc->params.qos[QOS_RPPM]; 691 lat = ioc->params.qos[QOS_RLAT]; 692 } else { 693 ppm = ioc->params.qos[QOS_WPPM]; 694 lat = ioc->params.qos[QOS_WLAT]; 695 } 696 697 /* 698 * We want the period to be long enough to contain a healthy number 699 * of IOs while short enough for granular control. Define it as a 700 * multiple of the latency target. Ideally, the multiplier should 701 * be scaled according to the percentile so that it would nominally 702 * contain a certain number of requests. Let's be simpler and 703 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50). 704 */ 705 if (ppm) 706 multi = max_t(u32, (MILLION - ppm) / 50000, 2); 707 else 708 multi = 2; 709 period_us = multi * lat; 710 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD); 711 712 /* calculate dependent params */ 713 ioc->period_us = period_us; 714 ioc->margin_us = period_us * MARGIN_PCT / 100; 715 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP( 716 period_us * VTIME_PER_USEC * INUSE_MARGIN_PCT, 100); 717 } 718 719 static int ioc_autop_idx(struct ioc *ioc) 720 { 721 int idx = ioc->autop_idx; 722 const struct ioc_params *p = &autop[idx]; 723 u32 vrate_pct; 724 u64 now_ns; 725 726 /* rotational? */ 727 if (!blk_queue_nonrot(ioc->rqos.q)) 728 return AUTOP_HDD; 729 730 /* handle SATA SSDs w/ broken NCQ */ 731 if (blk_queue_depth(ioc->rqos.q) == 1) 732 return AUTOP_SSD_QD1; 733 734 /* use one of the normal ssd sets */ 735 if (idx < AUTOP_SSD_DFL) 736 return AUTOP_SSD_DFL; 737 738 /* if user is overriding anything, maintain what was there */ 739 if (ioc->user_qos_params || ioc->user_cost_model) 740 return idx; 741 742 /* step up/down based on the vrate */ 743 vrate_pct = div64_u64(atomic64_read(&ioc->vtime_rate) * 100, 744 VTIME_PER_USEC); 745 now_ns = ktime_get_ns(); 746 747 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) { 748 if (!ioc->autop_too_fast_at) 749 ioc->autop_too_fast_at = now_ns; 750 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC) 751 return idx + 1; 752 } else { 753 ioc->autop_too_fast_at = 0; 754 } 755 756 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) { 757 if (!ioc->autop_too_slow_at) 758 ioc->autop_too_slow_at = now_ns; 759 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC) 760 return idx - 1; 761 } else { 762 ioc->autop_too_slow_at = 0; 763 } 764 765 return idx; 766 } 767 768 /* 769 * Take the followings as input 770 * 771 * @bps maximum sequential throughput 772 * @seqiops maximum sequential 4k iops 773 * @randiops maximum random 4k iops 774 * 775 * and calculate the linear model cost coefficients. 776 * 777 * *@page per-page cost 1s / (@bps / 4096) 778 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0) 779 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0) 780 */ 781 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops, 782 u64 *page, u64 *seqio, u64 *randio) 783 { 784 u64 v; 785 786 *page = *seqio = *randio = 0; 787 788 if (bps) 789 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, 790 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE)); 791 792 if (seqiops) { 793 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops); 794 if (v > *page) 795 *seqio = v - *page; 796 } 797 798 if (randiops) { 799 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops); 800 if (v > *page) 801 *randio = v - *page; 802 } 803 } 804 805 static void ioc_refresh_lcoefs(struct ioc *ioc) 806 { 807 u64 *u = ioc->params.i_lcoefs; 808 u64 *c = ioc->params.lcoefs; 809 810 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS], 811 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]); 812 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS], 813 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]); 814 } 815 816 static bool ioc_refresh_params(struct ioc *ioc, bool force) 817 { 818 const struct ioc_params *p; 819 int idx; 820 821 lockdep_assert_held(&ioc->lock); 822 823 idx = ioc_autop_idx(ioc); 824 p = &autop[idx]; 825 826 if (idx == ioc->autop_idx && !force) 827 return false; 828 829 if (idx != ioc->autop_idx) 830 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); 831 832 ioc->autop_idx = idx; 833 ioc->autop_too_fast_at = 0; 834 ioc->autop_too_slow_at = 0; 835 836 if (!ioc->user_qos_params) 837 memcpy(ioc->params.qos, p->qos, sizeof(p->qos)); 838 if (!ioc->user_cost_model) 839 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs)); 840 841 ioc_refresh_period_us(ioc); 842 ioc_refresh_lcoefs(ioc); 843 844 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] * 845 VTIME_PER_USEC, MILLION); 846 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] * 847 VTIME_PER_USEC, MILLION); 848 849 return true; 850 } 851 852 /* take a snapshot of the current [v]time and vrate */ 853 static void ioc_now(struct ioc *ioc, struct ioc_now *now) 854 { 855 unsigned seq; 856 857 now->now_ns = ktime_get(); 858 now->now = ktime_to_us(now->now_ns); 859 now->vrate = atomic64_read(&ioc->vtime_rate); 860 861 /* 862 * The current vtime is 863 * 864 * vtime at period start + (wallclock time since the start) * vrate 865 * 866 * As a consistent snapshot of `period_at_vtime` and `period_at` is 867 * needed, they're seqcount protected. 868 */ 869 do { 870 seq = read_seqcount_begin(&ioc->period_seqcount); 871 now->vnow = ioc->period_at_vtime + 872 (now->now - ioc->period_at) * now->vrate; 873 } while (read_seqcount_retry(&ioc->period_seqcount, seq)); 874 } 875 876 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) 877 { 878 lockdep_assert_held(&ioc->lock); 879 WARN_ON_ONCE(ioc->running != IOC_RUNNING); 880 881 write_seqcount_begin(&ioc->period_seqcount); 882 ioc->period_at = now->now; 883 ioc->period_at_vtime = now->vnow; 884 write_seqcount_end(&ioc->period_seqcount); 885 886 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us); 887 add_timer(&ioc->timer); 888 } 889 890 /* 891 * Update @iocg's `active` and `inuse` to @active and @inuse, update level 892 * weight sums and propagate upwards accordingly. 893 */ 894 static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse) 895 { 896 struct ioc *ioc = iocg->ioc; 897 int lvl; 898 899 lockdep_assert_held(&ioc->lock); 900 901 inuse = min(active, inuse); 902 903 for (lvl = iocg->level - 1; lvl >= 0; lvl--) { 904 struct ioc_gq *parent = iocg->ancestors[lvl]; 905 struct ioc_gq *child = iocg->ancestors[lvl + 1]; 906 u32 parent_active = 0, parent_inuse = 0; 907 908 /* update the level sums */ 909 parent->child_active_sum += (s32)(active - child->active); 910 parent->child_inuse_sum += (s32)(inuse - child->inuse); 911 /* apply the udpates */ 912 child->active = active; 913 child->inuse = inuse; 914 915 /* 916 * The delta between inuse and active sums indicates that 917 * that much of weight is being given away. Parent's inuse 918 * and active should reflect the ratio. 919 */ 920 if (parent->child_active_sum) { 921 parent_active = parent->weight; 922 parent_inuse = DIV64_U64_ROUND_UP( 923 parent_active * parent->child_inuse_sum, 924 parent->child_active_sum); 925 } 926 927 /* do we need to keep walking up? */ 928 if (parent_active == parent->active && 929 parent_inuse == parent->inuse) 930 break; 931 932 active = parent_active; 933 inuse = parent_inuse; 934 } 935 936 ioc->weights_updated = true; 937 } 938 939 static void commit_active_weights(struct ioc *ioc) 940 { 941 lockdep_assert_held(&ioc->lock); 942 943 if (ioc->weights_updated) { 944 /* paired with rmb in current_hweight(), see there */ 945 smp_wmb(); 946 atomic_inc(&ioc->hweight_gen); 947 ioc->weights_updated = false; 948 } 949 } 950 951 static void propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse) 952 { 953 __propagate_active_weight(iocg, active, inuse); 954 commit_active_weights(iocg->ioc); 955 } 956 957 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep) 958 { 959 struct ioc *ioc = iocg->ioc; 960 int lvl; 961 u32 hwa, hwi; 962 int ioc_gen; 963 964 /* hot path - if uptodate, use cached */ 965 ioc_gen = atomic_read(&ioc->hweight_gen); 966 if (ioc_gen == iocg->hweight_gen) 967 goto out; 968 969 /* 970 * Paired with wmb in commit_active_weights(). If we saw the 971 * updated hweight_gen, all the weight updates from 972 * __propagate_active_weight() are visible too. 973 * 974 * We can race with weight updates during calculation and get it 975 * wrong. However, hweight_gen would have changed and a future 976 * reader will recalculate and we're guaranteed to discard the 977 * wrong result soon. 978 */ 979 smp_rmb(); 980 981 hwa = hwi = HWEIGHT_WHOLE; 982 for (lvl = 0; lvl <= iocg->level - 1; lvl++) { 983 struct ioc_gq *parent = iocg->ancestors[lvl]; 984 struct ioc_gq *child = iocg->ancestors[lvl + 1]; 985 u32 active_sum = READ_ONCE(parent->child_active_sum); 986 u32 inuse_sum = READ_ONCE(parent->child_inuse_sum); 987 u32 active = READ_ONCE(child->active); 988 u32 inuse = READ_ONCE(child->inuse); 989 990 /* we can race with deactivations and either may read as zero */ 991 if (!active_sum || !inuse_sum) 992 continue; 993 994 active_sum = max(active, active_sum); 995 hwa = hwa * active / active_sum; /* max 16bits * 10000 */ 996 997 inuse_sum = max(inuse, inuse_sum); 998 hwi = hwi * inuse / inuse_sum; /* max 16bits * 10000 */ 999 } 1000 1001 iocg->hweight_active = max_t(u32, hwa, 1); 1002 iocg->hweight_inuse = max_t(u32, hwi, 1); 1003 iocg->hweight_gen = ioc_gen; 1004 out: 1005 if (hw_activep) 1006 *hw_activep = iocg->hweight_active; 1007 if (hw_inusep) 1008 *hw_inusep = iocg->hweight_inuse; 1009 } 1010 1011 static void weight_updated(struct ioc_gq *iocg) 1012 { 1013 struct ioc *ioc = iocg->ioc; 1014 struct blkcg_gq *blkg = iocg_to_blkg(iocg); 1015 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg); 1016 u32 weight; 1017 1018 lockdep_assert_held(&ioc->lock); 1019 1020 weight = iocg->cfg_weight ?: iocc->dfl_weight; 1021 if (weight != iocg->weight && iocg->active) 1022 propagate_active_weight(iocg, weight, 1023 DIV64_U64_ROUND_UP(iocg->inuse * weight, iocg->weight)); 1024 iocg->weight = weight; 1025 } 1026 1027 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) 1028 { 1029 struct ioc *ioc = iocg->ioc; 1030 u64 last_period, cur_period, max_period_delta; 1031 u64 vtime, vmargin, vmin; 1032 int i; 1033 1034 /* 1035 * If seem to be already active, just update the stamp to tell the 1036 * timer that we're still active. We don't mind occassional races. 1037 */ 1038 if (!list_empty(&iocg->active_list)) { 1039 ioc_now(ioc, now); 1040 cur_period = atomic64_read(&ioc->cur_period); 1041 if (atomic64_read(&iocg->active_period) != cur_period) 1042 atomic64_set(&iocg->active_period, cur_period); 1043 return true; 1044 } 1045 1046 /* racy check on internal node IOs, treat as root level IOs */ 1047 if (iocg->child_active_sum) 1048 return false; 1049 1050 spin_lock_irq(&ioc->lock); 1051 1052 ioc_now(ioc, now); 1053 1054 /* update period */ 1055 cur_period = atomic64_read(&ioc->cur_period); 1056 last_period = atomic64_read(&iocg->active_period); 1057 atomic64_set(&iocg->active_period, cur_period); 1058 1059 /* already activated or breaking leaf-only constraint? */ 1060 for (i = iocg->level; i > 0; i--) 1061 if (!list_empty(&iocg->active_list)) 1062 goto fail_unlock; 1063 if (iocg->child_active_sum) 1064 goto fail_unlock; 1065 1066 /* 1067 * vtime may wrap when vrate is raised substantially due to 1068 * underestimated IO costs. Look at the period and ignore its 1069 * vtime if the iocg has been idle for too long. Also, cap the 1070 * budget it can start with to the margin. 1071 */ 1072 max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us); 1073 vtime = atomic64_read(&iocg->vtime); 1074 vmargin = ioc->margin_us * now->vrate; 1075 vmin = now->vnow - vmargin; 1076 1077 if (last_period + max_period_delta < cur_period || 1078 time_before64(vtime, vmin)) { 1079 atomic64_add(vmin - vtime, &iocg->vtime); 1080 atomic64_add(vmin - vtime, &iocg->done_vtime); 1081 vtime = vmin; 1082 } 1083 1084 /* 1085 * Activate, propagate weight and start period timer if not 1086 * running. Reset hweight_gen to avoid accidental match from 1087 * wrapping. 1088 */ 1089 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; 1090 list_add(&iocg->active_list, &ioc->active_iocgs); 1091 propagate_active_weight(iocg, iocg->weight, 1092 iocg->last_inuse ?: iocg->weight); 1093 1094 TRACE_IOCG_PATH(iocg_activate, iocg, now, 1095 last_period, cur_period, vtime); 1096 1097 iocg->last_vtime = vtime; 1098 1099 if (ioc->running == IOC_IDLE) { 1100 ioc->running = IOC_RUNNING; 1101 ioc_start_period(ioc, now); 1102 } 1103 1104 spin_unlock_irq(&ioc->lock); 1105 return true; 1106 1107 fail_unlock: 1108 spin_unlock_irq(&ioc->lock); 1109 return false; 1110 } 1111 1112 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, 1113 int flags, void *key) 1114 { 1115 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait); 1116 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key; 1117 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse); 1118 1119 ctx->vbudget -= cost; 1120 1121 if (ctx->vbudget < 0) 1122 return -1; 1123 1124 iocg_commit_bio(ctx->iocg, wait->bio, cost); 1125 1126 /* 1127 * autoremove_wake_function() removes the wait entry only when it 1128 * actually changed the task state. We want the wait always 1129 * removed. Remove explicitly and use default_wake_function(). 1130 */ 1131 list_del_init(&wq_entry->entry); 1132 wait->committed = true; 1133 1134 default_wake_function(wq_entry, mode, flags, key); 1135 return 0; 1136 } 1137 1138 static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) 1139 { 1140 struct ioc *ioc = iocg->ioc; 1141 struct iocg_wake_ctx ctx = { .iocg = iocg }; 1142 u64 margin_ns = (u64)(ioc->period_us * 1143 WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC; 1144 u64 abs_vdebt, vdebt, vshortage, expires, oexpires; 1145 s64 vbudget; 1146 u32 hw_inuse; 1147 1148 lockdep_assert_held(&iocg->waitq.lock); 1149 1150 current_hweight(iocg, NULL, &hw_inuse); 1151 vbudget = now->vnow - atomic64_read(&iocg->vtime); 1152 1153 /* pay off debt */ 1154 abs_vdebt = atomic64_read(&iocg->abs_vdebt); 1155 vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse); 1156 if (vdebt && vbudget > 0) { 1157 u64 delta = min_t(u64, vbudget, vdebt); 1158 u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse), 1159 abs_vdebt); 1160 1161 atomic64_add(delta, &iocg->vtime); 1162 atomic64_add(delta, &iocg->done_vtime); 1163 atomic64_sub(abs_delta, &iocg->abs_vdebt); 1164 if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0)) 1165 atomic64_set(&iocg->abs_vdebt, 0); 1166 } 1167 1168 /* 1169 * Wake up the ones which are due and see how much vtime we'll need 1170 * for the next one. 1171 */ 1172 ctx.hw_inuse = hw_inuse; 1173 ctx.vbudget = vbudget - vdebt; 1174 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx); 1175 if (!waitqueue_active(&iocg->waitq)) 1176 return; 1177 if (WARN_ON_ONCE(ctx.vbudget >= 0)) 1178 return; 1179 1180 /* determine next wakeup, add a quarter margin to guarantee chunking */ 1181 vshortage = -ctx.vbudget; 1182 expires = now->now_ns + 1183 DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC; 1184 expires += margin_ns / 4; 1185 1186 /* if already active and close enough, don't bother */ 1187 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer)); 1188 if (hrtimer_is_queued(&iocg->waitq_timer) && 1189 abs(oexpires - expires) <= margin_ns / 4) 1190 return; 1191 1192 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires), 1193 margin_ns / 4, HRTIMER_MODE_ABS); 1194 } 1195 1196 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) 1197 { 1198 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer); 1199 struct ioc_now now; 1200 unsigned long flags; 1201 1202 ioc_now(iocg->ioc, &now); 1203 1204 spin_lock_irqsave(&iocg->waitq.lock, flags); 1205 iocg_kick_waitq(iocg, &now); 1206 spin_unlock_irqrestore(&iocg->waitq.lock, flags); 1207 1208 return HRTIMER_NORESTART; 1209 } 1210 1211 static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) 1212 { 1213 struct ioc *ioc = iocg->ioc; 1214 struct blkcg_gq *blkg = iocg_to_blkg(iocg); 1215 u64 vtime = atomic64_read(&iocg->vtime); 1216 u64 vmargin = ioc->margin_us * now->vrate; 1217 u64 margin_ns = ioc->margin_us * NSEC_PER_USEC; 1218 u64 expires, oexpires; 1219 u32 hw_inuse; 1220 1221 /* debt-adjust vtime */ 1222 current_hweight(iocg, NULL, &hw_inuse); 1223 vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse); 1224 1225 /* clear or maintain depending on the overage */ 1226 if (time_before_eq64(vtime, now->vnow)) { 1227 blkcg_clear_delay(blkg); 1228 return; 1229 } 1230 if (!atomic_read(&blkg->use_delay) && 1231 time_before_eq64(vtime, now->vnow + vmargin)) 1232 return; 1233 1234 /* use delay */ 1235 if (cost) { 1236 u64 cost_ns = DIV64_U64_ROUND_UP(cost * NSEC_PER_USEC, 1237 now->vrate); 1238 blkcg_add_delay(blkg, now->now_ns, cost_ns); 1239 } 1240 blkcg_use_delay(blkg); 1241 1242 expires = now->now_ns + DIV64_U64_ROUND_UP(vtime - now->vnow, 1243 now->vrate) * NSEC_PER_USEC; 1244 1245 /* if already active and close enough, don't bother */ 1246 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); 1247 if (hrtimer_is_queued(&iocg->delay_timer) && 1248 abs(oexpires - expires) <= margin_ns / 4) 1249 return; 1250 1251 hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), 1252 margin_ns / 4, HRTIMER_MODE_ABS); 1253 } 1254 1255 static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) 1256 { 1257 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); 1258 struct ioc_now now; 1259 1260 ioc_now(iocg->ioc, &now); 1261 iocg_kick_delay(iocg, &now, 0); 1262 1263 return HRTIMER_NORESTART; 1264 } 1265 1266 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) 1267 { 1268 u32 nr_met[2] = { }; 1269 u32 nr_missed[2] = { }; 1270 u64 rq_wait_ns = 0; 1271 int cpu, rw; 1272 1273 for_each_online_cpu(cpu) { 1274 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); 1275 u64 this_rq_wait_ns; 1276 1277 for (rw = READ; rw <= WRITE; rw++) { 1278 u32 this_met = READ_ONCE(stat->missed[rw].nr_met); 1279 u32 this_missed = READ_ONCE(stat->missed[rw].nr_missed); 1280 1281 nr_met[rw] += this_met - stat->missed[rw].last_met; 1282 nr_missed[rw] += this_missed - stat->missed[rw].last_missed; 1283 stat->missed[rw].last_met = this_met; 1284 stat->missed[rw].last_missed = this_missed; 1285 } 1286 1287 this_rq_wait_ns = READ_ONCE(stat->rq_wait_ns); 1288 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns; 1289 stat->last_rq_wait_ns = this_rq_wait_ns; 1290 } 1291 1292 for (rw = READ; rw <= WRITE; rw++) { 1293 if (nr_met[rw] + nr_missed[rw]) 1294 missed_ppm_ar[rw] = 1295 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION, 1296 nr_met[rw] + nr_missed[rw]); 1297 else 1298 missed_ppm_ar[rw] = 0; 1299 } 1300 1301 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100, 1302 ioc->period_us * NSEC_PER_USEC); 1303 } 1304 1305 /* was iocg idle this period? */ 1306 static bool iocg_is_idle(struct ioc_gq *iocg) 1307 { 1308 struct ioc *ioc = iocg->ioc; 1309 1310 /* did something get issued this period? */ 1311 if (atomic64_read(&iocg->active_period) == 1312 atomic64_read(&ioc->cur_period)) 1313 return false; 1314 1315 /* is something in flight? */ 1316 if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime)) 1317 return false; 1318 1319 return true; 1320 } 1321 1322 /* returns usage with margin added if surplus is large enough */ 1323 static u32 surplus_adjusted_hweight_inuse(u32 usage, u32 hw_inuse) 1324 { 1325 /* add margin */ 1326 usage = DIV_ROUND_UP(usage * SURPLUS_SCALE_PCT, 100); 1327 usage += SURPLUS_SCALE_ABS; 1328 1329 /* don't bother if the surplus is too small */ 1330 if (usage + SURPLUS_MIN_ADJ_DELTA > hw_inuse) 1331 return 0; 1332 1333 return usage; 1334 } 1335 1336 static void ioc_timer_fn(struct timer_list *timer) 1337 { 1338 struct ioc *ioc = container_of(timer, struct ioc, timer); 1339 struct ioc_gq *iocg, *tiocg; 1340 struct ioc_now now; 1341 int nr_surpluses = 0, nr_shortages = 0, nr_lagging = 0; 1342 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; 1343 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; 1344 u32 missed_ppm[2], rq_wait_pct; 1345 u64 period_vtime; 1346 int i; 1347 1348 /* how were the latencies during the period? */ 1349 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); 1350 1351 /* take care of active iocgs */ 1352 spin_lock_irq(&ioc->lock); 1353 1354 ioc_now(ioc, &now); 1355 1356 period_vtime = now.vnow - ioc->period_at_vtime; 1357 if (WARN_ON_ONCE(!period_vtime)) { 1358 spin_unlock_irq(&ioc->lock); 1359 return; 1360 } 1361 1362 /* 1363 * Waiters determine the sleep durations based on the vrate they 1364 * saw at the time of sleep. If vrate has increased, some waiters 1365 * could be sleeping for too long. Wake up tardy waiters which 1366 * should have woken up in the last period and expire idle iocgs. 1367 */ 1368 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { 1369 if (!waitqueue_active(&iocg->waitq) && 1370 !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg)) 1371 continue; 1372 1373 spin_lock(&iocg->waitq.lock); 1374 1375 if (waitqueue_active(&iocg->waitq) || 1376 atomic64_read(&iocg->abs_vdebt)) { 1377 /* might be oversleeping vtime / hweight changes, kick */ 1378 iocg_kick_waitq(iocg, &now); 1379 iocg_kick_delay(iocg, &now, 0); 1380 } else if (iocg_is_idle(iocg)) { 1381 /* no waiter and idle, deactivate */ 1382 iocg->last_inuse = iocg->inuse; 1383 __propagate_active_weight(iocg, 0, 0); 1384 list_del_init(&iocg->active_list); 1385 } 1386 1387 spin_unlock(&iocg->waitq.lock); 1388 } 1389 commit_active_weights(ioc); 1390 1391 /* calc usages and see whether some weights need to be moved around */ 1392 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { 1393 u64 vdone, vtime, vusage, vmargin, vmin; 1394 u32 hw_active, hw_inuse, usage; 1395 1396 /* 1397 * Collect unused and wind vtime closer to vnow to prevent 1398 * iocgs from accumulating a large amount of budget. 1399 */ 1400 vdone = atomic64_read(&iocg->done_vtime); 1401 vtime = atomic64_read(&iocg->vtime); 1402 current_hweight(iocg, &hw_active, &hw_inuse); 1403 1404 /* 1405 * Latency QoS detection doesn't account for IOs which are 1406 * in-flight for longer than a period. Detect them by 1407 * comparing vdone against period start. If lagging behind 1408 * IOs from past periods, don't increase vrate. 1409 */ 1410 if (!atomic_read(&iocg_to_blkg(iocg)->use_delay) && 1411 time_after64(vtime, vdone) && 1412 time_after64(vtime, now.vnow - 1413 MAX_LAGGING_PERIODS * period_vtime) && 1414 time_before64(vdone, now.vnow - period_vtime)) 1415 nr_lagging++; 1416 1417 if (waitqueue_active(&iocg->waitq)) 1418 vusage = now.vnow - iocg->last_vtime; 1419 else if (time_before64(iocg->last_vtime, vtime)) 1420 vusage = vtime - iocg->last_vtime; 1421 else 1422 vusage = 0; 1423 1424 iocg->last_vtime += vusage; 1425 /* 1426 * Factor in in-flight vtime into vusage to avoid 1427 * high-latency completions appearing as idle. This should 1428 * be done after the above ->last_time adjustment. 1429 */ 1430 vusage = max(vusage, vtime - vdone); 1431 1432 /* calculate hweight based usage ratio and record */ 1433 if (vusage) { 1434 usage = DIV64_U64_ROUND_UP(vusage * hw_inuse, 1435 period_vtime); 1436 iocg->usage_idx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS; 1437 iocg->usages[iocg->usage_idx] = usage; 1438 } else { 1439 usage = 0; 1440 } 1441 1442 /* see whether there's surplus vtime */ 1443 vmargin = ioc->margin_us * now.vrate; 1444 vmin = now.vnow - vmargin; 1445 1446 iocg->has_surplus = false; 1447 1448 if (!waitqueue_active(&iocg->waitq) && 1449 time_before64(vtime, vmin)) { 1450 u64 delta = vmin - vtime; 1451 1452 /* throw away surplus vtime */ 1453 atomic64_add(delta, &iocg->vtime); 1454 atomic64_add(delta, &iocg->done_vtime); 1455 iocg->last_vtime += delta; 1456 /* if usage is sufficiently low, maybe it can donate */ 1457 if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) { 1458 iocg->has_surplus = true; 1459 nr_surpluses++; 1460 } 1461 } else if (hw_inuse < hw_active) { 1462 u32 new_hwi, new_inuse; 1463 1464 /* was donating but might need to take back some */ 1465 if (waitqueue_active(&iocg->waitq)) { 1466 new_hwi = hw_active; 1467 } else { 1468 new_hwi = max(hw_inuse, 1469 usage * SURPLUS_SCALE_PCT / 100 + 1470 SURPLUS_SCALE_ABS); 1471 } 1472 1473 new_inuse = div64_u64((u64)iocg->inuse * new_hwi, 1474 hw_inuse); 1475 new_inuse = clamp_t(u32, new_inuse, 1, iocg->active); 1476 1477 if (new_inuse > iocg->inuse) { 1478 TRACE_IOCG_PATH(inuse_takeback, iocg, &now, 1479 iocg->inuse, new_inuse, 1480 hw_inuse, new_hwi); 1481 __propagate_active_weight(iocg, iocg->weight, 1482 new_inuse); 1483 } 1484 } else { 1485 /* genuninely out of vtime */ 1486 nr_shortages++; 1487 } 1488 } 1489 1490 if (!nr_shortages || !nr_surpluses) 1491 goto skip_surplus_transfers; 1492 1493 /* there are both shortages and surpluses, transfer surpluses */ 1494 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { 1495 u32 usage, hw_active, hw_inuse, new_hwi, new_inuse; 1496 int nr_valid = 0; 1497 1498 if (!iocg->has_surplus) 1499 continue; 1500 1501 /* base the decision on max historical usage */ 1502 for (i = 0, usage = 0; i < NR_USAGE_SLOTS; i++) { 1503 if (iocg->usages[i]) { 1504 usage = max(usage, iocg->usages[i]); 1505 nr_valid++; 1506 } 1507 } 1508 if (nr_valid < MIN_VALID_USAGES) 1509 continue; 1510 1511 current_hweight(iocg, &hw_active, &hw_inuse); 1512 new_hwi = surplus_adjusted_hweight_inuse(usage, hw_inuse); 1513 if (!new_hwi) 1514 continue; 1515 1516 new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi, 1517 hw_inuse); 1518 if (new_inuse < iocg->inuse) { 1519 TRACE_IOCG_PATH(inuse_giveaway, iocg, &now, 1520 iocg->inuse, new_inuse, 1521 hw_inuse, new_hwi); 1522 __propagate_active_weight(iocg, iocg->weight, new_inuse); 1523 } 1524 } 1525 skip_surplus_transfers: 1526 commit_active_weights(ioc); 1527 1528 /* 1529 * If q is getting clogged or we're missing too much, we're issuing 1530 * too much IO and should lower vtime rate. If we're not missing 1531 * and experiencing shortages but not surpluses, we're too stingy 1532 * and should increase vtime rate. 1533 */ 1534 if (rq_wait_pct > RQ_WAIT_BUSY_PCT || 1535 missed_ppm[READ] > ppm_rthr || 1536 missed_ppm[WRITE] > ppm_wthr) { 1537 ioc->busy_level = max(ioc->busy_level, 0); 1538 ioc->busy_level++; 1539 } else if (nr_lagging) { 1540 ioc->busy_level = max(ioc->busy_level, 0); 1541 } else if (nr_shortages && !nr_surpluses && 1542 rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 && 1543 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 && 1544 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) { 1545 ioc->busy_level = min(ioc->busy_level, 0); 1546 ioc->busy_level--; 1547 } else { 1548 ioc->busy_level = 0; 1549 } 1550 1551 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); 1552 1553 if (ioc->busy_level) { 1554 u64 vrate = atomic64_read(&ioc->vtime_rate); 1555 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; 1556 1557 /* rq_wait signal is always reliable, ignore user vrate_min */ 1558 if (rq_wait_pct > RQ_WAIT_BUSY_PCT) 1559 vrate_min = VRATE_MIN; 1560 1561 /* 1562 * If vrate is out of bounds, apply clamp gradually as the 1563 * bounds can change abruptly. Otherwise, apply busy_level 1564 * based adjustment. 1565 */ 1566 if (vrate < vrate_min) { 1567 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 1568 100); 1569 vrate = min(vrate, vrate_min); 1570 } else if (vrate > vrate_max) { 1571 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 1572 100); 1573 vrate = max(vrate, vrate_max); 1574 } else { 1575 int idx = min_t(int, abs(ioc->busy_level), 1576 ARRAY_SIZE(vrate_adj_pct) - 1); 1577 u32 adj_pct = vrate_adj_pct[idx]; 1578 1579 if (ioc->busy_level > 0) 1580 adj_pct = 100 - adj_pct; 1581 else 1582 adj_pct = 100 + adj_pct; 1583 1584 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100), 1585 vrate_min, vrate_max); 1586 } 1587 1588 trace_iocost_ioc_vrate_adj(ioc, vrate, &missed_ppm, rq_wait_pct, 1589 nr_lagging, nr_shortages, 1590 nr_surpluses); 1591 1592 atomic64_set(&ioc->vtime_rate, vrate); 1593 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP( 1594 ioc->period_us * vrate * INUSE_MARGIN_PCT, 100); 1595 } 1596 1597 ioc_refresh_params(ioc, false); 1598 1599 /* 1600 * This period is done. Move onto the next one. If nothing's 1601 * going on with the device, stop the timer. 1602 */ 1603 atomic64_inc(&ioc->cur_period); 1604 1605 if (ioc->running != IOC_STOP) { 1606 if (!list_empty(&ioc->active_iocgs)) { 1607 ioc_start_period(ioc, &now); 1608 } else { 1609 ioc->busy_level = 0; 1610 ioc->running = IOC_IDLE; 1611 } 1612 } 1613 1614 spin_unlock_irq(&ioc->lock); 1615 } 1616 1617 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg, 1618 bool is_merge, u64 *costp) 1619 { 1620 struct ioc *ioc = iocg->ioc; 1621 u64 coef_seqio, coef_randio, coef_page; 1622 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1); 1623 u64 seek_pages = 0; 1624 u64 cost = 0; 1625 1626 switch (bio_op(bio)) { 1627 case REQ_OP_READ: 1628 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; 1629 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO]; 1630 coef_page = ioc->params.lcoefs[LCOEF_RPAGE]; 1631 break; 1632 case REQ_OP_WRITE: 1633 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO]; 1634 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO]; 1635 coef_page = ioc->params.lcoefs[LCOEF_WPAGE]; 1636 break; 1637 default: 1638 goto out; 1639 } 1640 1641 if (iocg->cursor) { 1642 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor); 1643 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT; 1644 } 1645 1646 if (!is_merge) { 1647 if (seek_pages > LCOEF_RANDIO_PAGES) { 1648 cost += coef_randio; 1649 } else { 1650 cost += coef_seqio; 1651 } 1652 } 1653 cost += pages * coef_page; 1654 out: 1655 *costp = cost; 1656 } 1657 1658 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge) 1659 { 1660 u64 cost; 1661 1662 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost); 1663 return cost; 1664 } 1665 1666 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) 1667 { 1668 struct blkcg_gq *blkg = bio->bi_blkg; 1669 struct ioc *ioc = rqos_to_ioc(rqos); 1670 struct ioc_gq *iocg = blkg_to_iocg(blkg); 1671 struct ioc_now now; 1672 struct iocg_wait wait; 1673 u32 hw_active, hw_inuse; 1674 u64 abs_cost, cost, vtime; 1675 1676 /* bypass IOs if disabled or for root cgroup */ 1677 if (!ioc->enabled || !iocg->level) 1678 return; 1679 1680 /* always activate so that even 0 cost IOs get protected to some level */ 1681 if (!iocg_activate(iocg, &now)) 1682 return; 1683 1684 /* calculate the absolute vtime cost */ 1685 abs_cost = calc_vtime_cost(bio, iocg, false); 1686 if (!abs_cost) 1687 return; 1688 1689 iocg->cursor = bio_end_sector(bio); 1690 1691 vtime = atomic64_read(&iocg->vtime); 1692 current_hweight(iocg, &hw_active, &hw_inuse); 1693 1694 if (hw_inuse < hw_active && 1695 time_after_eq64(vtime + ioc->inuse_margin_vtime, now.vnow)) { 1696 TRACE_IOCG_PATH(inuse_reset, iocg, &now, 1697 iocg->inuse, iocg->weight, hw_inuse, hw_active); 1698 spin_lock_irq(&ioc->lock); 1699 propagate_active_weight(iocg, iocg->weight, iocg->weight); 1700 spin_unlock_irq(&ioc->lock); 1701 current_hweight(iocg, &hw_active, &hw_inuse); 1702 } 1703 1704 cost = abs_cost_to_cost(abs_cost, hw_inuse); 1705 1706 /* 1707 * If no one's waiting and within budget, issue right away. The 1708 * tests are racy but the races aren't systemic - we only miss once 1709 * in a while which is fine. 1710 */ 1711 if (!waitqueue_active(&iocg->waitq) && 1712 !atomic64_read(&iocg->abs_vdebt) && 1713 time_before_eq64(vtime + cost, now.vnow)) { 1714 iocg_commit_bio(iocg, bio, cost); 1715 return; 1716 } 1717 1718 /* 1719 * We're over budget. If @bio has to be issued regardless, 1720 * remember the abs_cost instead of advancing vtime. 1721 * iocg_kick_waitq() will pay off the debt before waking more IOs. 1722 * This way, the debt is continuously paid off each period with the 1723 * actual budget available to the cgroup. If we just wound vtime, 1724 * we would incorrectly use the current hw_inuse for the entire 1725 * amount which, for example, can lead to the cgroup staying 1726 * blocked for a long time even with substantially raised hw_inuse. 1727 */ 1728 if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { 1729 atomic64_add(abs_cost, &iocg->abs_vdebt); 1730 iocg_kick_delay(iocg, &now, cost); 1731 return; 1732 } 1733 1734 /* 1735 * Append self to the waitq and schedule the wakeup timer if we're 1736 * the first waiter. The timer duration is calculated based on the 1737 * current vrate. vtime and hweight changes can make it too short 1738 * or too long. Each wait entry records the absolute cost it's 1739 * waiting for to allow re-evaluation using a custom wait entry. 1740 * 1741 * If too short, the timer simply reschedules itself. If too long, 1742 * the period timer will notice and trigger wakeups. 1743 * 1744 * All waiters are on iocg->waitq and the wait states are 1745 * synchronized using waitq.lock. 1746 */ 1747 spin_lock_irq(&iocg->waitq.lock); 1748 1749 /* 1750 * We activated above but w/o any synchronization. Deactivation is 1751 * synchronized with waitq.lock and we won't get deactivated as 1752 * long as we're waiting, so we're good if we're activated here. 1753 * In the unlikely case that we are deactivated, just issue the IO. 1754 */ 1755 if (unlikely(list_empty(&iocg->active_list))) { 1756 spin_unlock_irq(&iocg->waitq.lock); 1757 iocg_commit_bio(iocg, bio, cost); 1758 return; 1759 } 1760 1761 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn); 1762 wait.wait.private = current; 1763 wait.bio = bio; 1764 wait.abs_cost = abs_cost; 1765 wait.committed = false; /* will be set true by waker */ 1766 1767 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait); 1768 iocg_kick_waitq(iocg, &now); 1769 1770 spin_unlock_irq(&iocg->waitq.lock); 1771 1772 while (true) { 1773 set_current_state(TASK_UNINTERRUPTIBLE); 1774 if (wait.committed) 1775 break; 1776 io_schedule(); 1777 } 1778 1779 /* waker already committed us, proceed */ 1780 finish_wait(&iocg->waitq, &wait.wait); 1781 } 1782 1783 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, 1784 struct bio *bio) 1785 { 1786 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); 1787 struct ioc *ioc = iocg->ioc; 1788 sector_t bio_end = bio_end_sector(bio); 1789 struct ioc_now now; 1790 u32 hw_inuse; 1791 u64 abs_cost, cost; 1792 1793 /* bypass if disabled or for root cgroup */ 1794 if (!ioc->enabled || !iocg->level) 1795 return; 1796 1797 abs_cost = calc_vtime_cost(bio, iocg, true); 1798 if (!abs_cost) 1799 return; 1800 1801 ioc_now(ioc, &now); 1802 current_hweight(iocg, NULL, &hw_inuse); 1803 cost = abs_cost_to_cost(abs_cost, hw_inuse); 1804 1805 /* update cursor if backmerging into the request at the cursor */ 1806 if (blk_rq_pos(rq) < bio_end && 1807 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor) 1808 iocg->cursor = bio_end; 1809 1810 /* 1811 * Charge if there's enough vtime budget and the existing request 1812 * has cost assigned. Otherwise, account it as debt. See debt 1813 * handling in ioc_rqos_throttle() for details. 1814 */ 1815 if (rq->bio && rq->bio->bi_iocost_cost && 1816 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) 1817 iocg_commit_bio(iocg, bio, cost); 1818 else 1819 atomic64_add(abs_cost, &iocg->abs_vdebt); 1820 } 1821 1822 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) 1823 { 1824 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); 1825 1826 if (iocg && bio->bi_iocost_cost) 1827 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime); 1828 } 1829 1830 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) 1831 { 1832 struct ioc *ioc = rqos_to_ioc(rqos); 1833 u64 on_q_ns, rq_wait_ns; 1834 int pidx, rw; 1835 1836 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) 1837 return; 1838 1839 switch (req_op(rq) & REQ_OP_MASK) { 1840 case REQ_OP_READ: 1841 pidx = QOS_RLAT; 1842 rw = READ; 1843 break; 1844 case REQ_OP_WRITE: 1845 pidx = QOS_WLAT; 1846 rw = WRITE; 1847 break; 1848 default: 1849 return; 1850 } 1851 1852 on_q_ns = ktime_get_ns() - rq->alloc_time_ns; 1853 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns; 1854 1855 if (on_q_ns <= ioc->params.qos[pidx] * NSEC_PER_USEC) 1856 this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met); 1857 else 1858 this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed); 1859 1860 this_cpu_add(ioc->pcpu_stat->rq_wait_ns, rq_wait_ns); 1861 } 1862 1863 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos) 1864 { 1865 struct ioc *ioc = rqos_to_ioc(rqos); 1866 1867 spin_lock_irq(&ioc->lock); 1868 ioc_refresh_params(ioc, false); 1869 spin_unlock_irq(&ioc->lock); 1870 } 1871 1872 static void ioc_rqos_exit(struct rq_qos *rqos) 1873 { 1874 struct ioc *ioc = rqos_to_ioc(rqos); 1875 1876 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost); 1877 1878 spin_lock_irq(&ioc->lock); 1879 ioc->running = IOC_STOP; 1880 spin_unlock_irq(&ioc->lock); 1881 1882 del_timer_sync(&ioc->timer); 1883 free_percpu(ioc->pcpu_stat); 1884 kfree(ioc); 1885 } 1886 1887 static struct rq_qos_ops ioc_rqos_ops = { 1888 .throttle = ioc_rqos_throttle, 1889 .merge = ioc_rqos_merge, 1890 .done_bio = ioc_rqos_done_bio, 1891 .done = ioc_rqos_done, 1892 .queue_depth_changed = ioc_rqos_queue_depth_changed, 1893 .exit = ioc_rqos_exit, 1894 }; 1895 1896 static int blk_iocost_init(struct request_queue *q) 1897 { 1898 struct ioc *ioc; 1899 struct rq_qos *rqos; 1900 int ret; 1901 1902 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); 1903 if (!ioc) 1904 return -ENOMEM; 1905 1906 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat); 1907 if (!ioc->pcpu_stat) { 1908 kfree(ioc); 1909 return -ENOMEM; 1910 } 1911 1912 rqos = &ioc->rqos; 1913 rqos->id = RQ_QOS_COST; 1914 rqos->ops = &ioc_rqos_ops; 1915 rqos->q = q; 1916 1917 spin_lock_init(&ioc->lock); 1918 timer_setup(&ioc->timer, ioc_timer_fn, 0); 1919 INIT_LIST_HEAD(&ioc->active_iocgs); 1920 1921 ioc->running = IOC_IDLE; 1922 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); 1923 seqcount_init(&ioc->period_seqcount); 1924 ioc->period_at = ktime_to_us(ktime_get()); 1925 atomic64_set(&ioc->cur_period, 0); 1926 atomic_set(&ioc->hweight_gen, 0); 1927 1928 spin_lock_irq(&ioc->lock); 1929 ioc->autop_idx = AUTOP_INVALID; 1930 ioc_refresh_params(ioc, true); 1931 spin_unlock_irq(&ioc->lock); 1932 1933 rq_qos_add(q, rqos); 1934 ret = blkcg_activate_policy(q, &blkcg_policy_iocost); 1935 if (ret) { 1936 rq_qos_del(q, rqos); 1937 free_percpu(ioc->pcpu_stat); 1938 kfree(ioc); 1939 return ret; 1940 } 1941 return 0; 1942 } 1943 1944 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp) 1945 { 1946 struct ioc_cgrp *iocc; 1947 1948 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp); 1949 if (!iocc) 1950 return NULL; 1951 1952 iocc->dfl_weight = CGROUP_WEIGHT_DFL; 1953 return &iocc->cpd; 1954 } 1955 1956 static void ioc_cpd_free(struct blkcg_policy_data *cpd) 1957 { 1958 kfree(container_of(cpd, struct ioc_cgrp, cpd)); 1959 } 1960 1961 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q, 1962 struct blkcg *blkcg) 1963 { 1964 int levels = blkcg->css.cgroup->level + 1; 1965 struct ioc_gq *iocg; 1966 1967 iocg = kzalloc_node(sizeof(*iocg) + levels * sizeof(iocg->ancestors[0]), 1968 gfp, q->node); 1969 if (!iocg) 1970 return NULL; 1971 1972 return &iocg->pd; 1973 } 1974 1975 static void ioc_pd_init(struct blkg_policy_data *pd) 1976 { 1977 struct ioc_gq *iocg = pd_to_iocg(pd); 1978 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd); 1979 struct ioc *ioc = q_to_ioc(blkg->q); 1980 struct ioc_now now; 1981 struct blkcg_gq *tblkg; 1982 unsigned long flags; 1983 1984 ioc_now(ioc, &now); 1985 1986 iocg->ioc = ioc; 1987 atomic64_set(&iocg->vtime, now.vnow); 1988 atomic64_set(&iocg->done_vtime, now.vnow); 1989 atomic64_set(&iocg->abs_vdebt, 0); 1990 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); 1991 INIT_LIST_HEAD(&iocg->active_list); 1992 iocg->hweight_active = HWEIGHT_WHOLE; 1993 iocg->hweight_inuse = HWEIGHT_WHOLE; 1994 1995 init_waitqueue_head(&iocg->waitq); 1996 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1997 iocg->waitq_timer.function = iocg_waitq_timer_fn; 1998 hrtimer_init(&iocg->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1999 iocg->delay_timer.function = iocg_delay_timer_fn; 2000 2001 iocg->level = blkg->blkcg->css.cgroup->level; 2002 2003 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) { 2004 struct ioc_gq *tiocg = blkg_to_iocg(tblkg); 2005 iocg->ancestors[tiocg->level] = tiocg; 2006 } 2007 2008 spin_lock_irqsave(&ioc->lock, flags); 2009 weight_updated(iocg); 2010 spin_unlock_irqrestore(&ioc->lock, flags); 2011 } 2012 2013 static void ioc_pd_free(struct blkg_policy_data *pd) 2014 { 2015 struct ioc_gq *iocg = pd_to_iocg(pd); 2016 struct ioc *ioc = iocg->ioc; 2017 2018 if (ioc) { 2019 spin_lock(&ioc->lock); 2020 if (!list_empty(&iocg->active_list)) { 2021 propagate_active_weight(iocg, 0, 0); 2022 list_del_init(&iocg->active_list); 2023 } 2024 spin_unlock(&ioc->lock); 2025 2026 hrtimer_cancel(&iocg->waitq_timer); 2027 hrtimer_cancel(&iocg->delay_timer); 2028 } 2029 kfree(iocg); 2030 } 2031 2032 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd, 2033 int off) 2034 { 2035 const char *dname = blkg_dev_name(pd->blkg); 2036 struct ioc_gq *iocg = pd_to_iocg(pd); 2037 2038 if (dname && iocg->cfg_weight) 2039 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight); 2040 return 0; 2041 } 2042 2043 2044 static int ioc_weight_show(struct seq_file *sf, void *v) 2045 { 2046 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 2047 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg); 2048 2049 seq_printf(sf, "default %u\n", iocc->dfl_weight); 2050 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill, 2051 &blkcg_policy_iocost, seq_cft(sf)->private, false); 2052 return 0; 2053 } 2054 2055 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, 2056 size_t nbytes, loff_t off) 2057 { 2058 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 2059 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg); 2060 struct blkg_conf_ctx ctx; 2061 struct ioc_gq *iocg; 2062 u32 v; 2063 int ret; 2064 2065 if (!strchr(buf, ':')) { 2066 struct blkcg_gq *blkg; 2067 2068 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v)) 2069 return -EINVAL; 2070 2071 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) 2072 return -EINVAL; 2073 2074 spin_lock(&blkcg->lock); 2075 iocc->dfl_weight = v; 2076 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 2077 struct ioc_gq *iocg = blkg_to_iocg(blkg); 2078 2079 if (iocg) { 2080 spin_lock_irq(&iocg->ioc->lock); 2081 weight_updated(iocg); 2082 spin_unlock_irq(&iocg->ioc->lock); 2083 } 2084 } 2085 spin_unlock(&blkcg->lock); 2086 2087 return nbytes; 2088 } 2089 2090 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx); 2091 if (ret) 2092 return ret; 2093 2094 iocg = blkg_to_iocg(ctx.blkg); 2095 2096 if (!strncmp(ctx.body, "default", 7)) { 2097 v = 0; 2098 } else { 2099 if (!sscanf(ctx.body, "%u", &v)) 2100 goto einval; 2101 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) 2102 goto einval; 2103 } 2104 2105 spin_lock_irq(&iocg->ioc->lock); 2106 iocg->cfg_weight = v; 2107 weight_updated(iocg); 2108 spin_unlock_irq(&iocg->ioc->lock); 2109 2110 blkg_conf_finish(&ctx); 2111 return nbytes; 2112 2113 einval: 2114 blkg_conf_finish(&ctx); 2115 return -EINVAL; 2116 } 2117 2118 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd, 2119 int off) 2120 { 2121 const char *dname = blkg_dev_name(pd->blkg); 2122 struct ioc *ioc = pd_to_iocg(pd)->ioc; 2123 2124 if (!dname) 2125 return 0; 2126 2127 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n", 2128 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto", 2129 ioc->params.qos[QOS_RPPM] / 10000, 2130 ioc->params.qos[QOS_RPPM] % 10000 / 100, 2131 ioc->params.qos[QOS_RLAT], 2132 ioc->params.qos[QOS_WPPM] / 10000, 2133 ioc->params.qos[QOS_WPPM] % 10000 / 100, 2134 ioc->params.qos[QOS_WLAT], 2135 ioc->params.qos[QOS_MIN] / 10000, 2136 ioc->params.qos[QOS_MIN] % 10000 / 100, 2137 ioc->params.qos[QOS_MAX] / 10000, 2138 ioc->params.qos[QOS_MAX] % 10000 / 100); 2139 return 0; 2140 } 2141 2142 static int ioc_qos_show(struct seq_file *sf, void *v) 2143 { 2144 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 2145 2146 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill, 2147 &blkcg_policy_iocost, seq_cft(sf)->private, false); 2148 return 0; 2149 } 2150 2151 static const match_table_t qos_ctrl_tokens = { 2152 { QOS_ENABLE, "enable=%u" }, 2153 { QOS_CTRL, "ctrl=%s" }, 2154 { NR_QOS_CTRL_PARAMS, NULL }, 2155 }; 2156 2157 static const match_table_t qos_tokens = { 2158 { QOS_RPPM, "rpct=%s" }, 2159 { QOS_RLAT, "rlat=%u" }, 2160 { QOS_WPPM, "wpct=%s" }, 2161 { QOS_WLAT, "wlat=%u" }, 2162 { QOS_MIN, "min=%s" }, 2163 { QOS_MAX, "max=%s" }, 2164 { NR_QOS_PARAMS, NULL }, 2165 }; 2166 2167 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, 2168 size_t nbytes, loff_t off) 2169 { 2170 struct gendisk *disk; 2171 struct ioc *ioc; 2172 u32 qos[NR_QOS_PARAMS]; 2173 bool enable, user; 2174 char *p; 2175 int ret; 2176 2177 disk = blkcg_conf_get_disk(&input); 2178 if (IS_ERR(disk)) 2179 return PTR_ERR(disk); 2180 2181 ioc = q_to_ioc(disk->queue); 2182 if (!ioc) { 2183 ret = blk_iocost_init(disk->queue); 2184 if (ret) 2185 goto err; 2186 ioc = q_to_ioc(disk->queue); 2187 } 2188 2189 spin_lock_irq(&ioc->lock); 2190 memcpy(qos, ioc->params.qos, sizeof(qos)); 2191 enable = ioc->enabled; 2192 user = ioc->user_qos_params; 2193 spin_unlock_irq(&ioc->lock); 2194 2195 while ((p = strsep(&input, " \t\n"))) { 2196 substring_t args[MAX_OPT_ARGS]; 2197 char buf[32]; 2198 int tok; 2199 s64 v; 2200 2201 if (!*p) 2202 continue; 2203 2204 switch (match_token(p, qos_ctrl_tokens, args)) { 2205 case QOS_ENABLE: 2206 match_u64(&args[0], &v); 2207 enable = v; 2208 continue; 2209 case QOS_CTRL: 2210 match_strlcpy(buf, &args[0], sizeof(buf)); 2211 if (!strcmp(buf, "auto")) 2212 user = false; 2213 else if (!strcmp(buf, "user")) 2214 user = true; 2215 else 2216 goto einval; 2217 continue; 2218 } 2219 2220 tok = match_token(p, qos_tokens, args); 2221 switch (tok) { 2222 case QOS_RPPM: 2223 case QOS_WPPM: 2224 if (match_strlcpy(buf, &args[0], sizeof(buf)) >= 2225 sizeof(buf)) 2226 goto einval; 2227 if (cgroup_parse_float(buf, 2, &v)) 2228 goto einval; 2229 if (v < 0 || v > 10000) 2230 goto einval; 2231 qos[tok] = v * 100; 2232 break; 2233 case QOS_RLAT: 2234 case QOS_WLAT: 2235 if (match_u64(&args[0], &v)) 2236 goto einval; 2237 qos[tok] = v; 2238 break; 2239 case QOS_MIN: 2240 case QOS_MAX: 2241 if (match_strlcpy(buf, &args[0], sizeof(buf)) >= 2242 sizeof(buf)) 2243 goto einval; 2244 if (cgroup_parse_float(buf, 2, &v)) 2245 goto einval; 2246 if (v < 0) 2247 goto einval; 2248 qos[tok] = clamp_t(s64, v * 100, 2249 VRATE_MIN_PPM, VRATE_MAX_PPM); 2250 break; 2251 default: 2252 goto einval; 2253 } 2254 user = true; 2255 } 2256 2257 if (qos[QOS_MIN] > qos[QOS_MAX]) 2258 goto einval; 2259 2260 spin_lock_irq(&ioc->lock); 2261 2262 if (enable) { 2263 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); 2264 ioc->enabled = true; 2265 } else { 2266 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); 2267 ioc->enabled = false; 2268 } 2269 2270 if (user) { 2271 memcpy(ioc->params.qos, qos, sizeof(qos)); 2272 ioc->user_qos_params = true; 2273 } else { 2274 ioc->user_qos_params = false; 2275 } 2276 2277 ioc_refresh_params(ioc, true); 2278 spin_unlock_irq(&ioc->lock); 2279 2280 put_disk_and_module(disk); 2281 return nbytes; 2282 einval: 2283 ret = -EINVAL; 2284 err: 2285 put_disk_and_module(disk); 2286 return ret; 2287 } 2288 2289 static u64 ioc_cost_model_prfill(struct seq_file *sf, 2290 struct blkg_policy_data *pd, int off) 2291 { 2292 const char *dname = blkg_dev_name(pd->blkg); 2293 struct ioc *ioc = pd_to_iocg(pd)->ioc; 2294 u64 *u = ioc->params.i_lcoefs; 2295 2296 if (!dname) 2297 return 0; 2298 2299 seq_printf(sf, "%s ctrl=%s model=linear " 2300 "rbps=%llu rseqiops=%llu rrandiops=%llu " 2301 "wbps=%llu wseqiops=%llu wrandiops=%llu\n", 2302 dname, ioc->user_cost_model ? "user" : "auto", 2303 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS], 2304 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]); 2305 return 0; 2306 } 2307 2308 static int ioc_cost_model_show(struct seq_file *sf, void *v) 2309 { 2310 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 2311 2312 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill, 2313 &blkcg_policy_iocost, seq_cft(sf)->private, false); 2314 return 0; 2315 } 2316 2317 static const match_table_t cost_ctrl_tokens = { 2318 { COST_CTRL, "ctrl=%s" }, 2319 { COST_MODEL, "model=%s" }, 2320 { NR_COST_CTRL_PARAMS, NULL }, 2321 }; 2322 2323 static const match_table_t i_lcoef_tokens = { 2324 { I_LCOEF_RBPS, "rbps=%u" }, 2325 { I_LCOEF_RSEQIOPS, "rseqiops=%u" }, 2326 { I_LCOEF_RRANDIOPS, "rrandiops=%u" }, 2327 { I_LCOEF_WBPS, "wbps=%u" }, 2328 { I_LCOEF_WSEQIOPS, "wseqiops=%u" }, 2329 { I_LCOEF_WRANDIOPS, "wrandiops=%u" }, 2330 { NR_I_LCOEFS, NULL }, 2331 }; 2332 2333 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, 2334 size_t nbytes, loff_t off) 2335 { 2336 struct gendisk *disk; 2337 struct ioc *ioc; 2338 u64 u[NR_I_LCOEFS]; 2339 bool user; 2340 char *p; 2341 int ret; 2342 2343 disk = blkcg_conf_get_disk(&input); 2344 if (IS_ERR(disk)) 2345 return PTR_ERR(disk); 2346 2347 ioc = q_to_ioc(disk->queue); 2348 if (!ioc) { 2349 ret = blk_iocost_init(disk->queue); 2350 if (ret) 2351 goto err; 2352 ioc = q_to_ioc(disk->queue); 2353 } 2354 2355 spin_lock_irq(&ioc->lock); 2356 memcpy(u, ioc->params.i_lcoefs, sizeof(u)); 2357 user = ioc->user_cost_model; 2358 spin_unlock_irq(&ioc->lock); 2359 2360 while ((p = strsep(&input, " \t\n"))) { 2361 substring_t args[MAX_OPT_ARGS]; 2362 char buf[32]; 2363 int tok; 2364 u64 v; 2365 2366 if (!*p) 2367 continue; 2368 2369 switch (match_token(p, cost_ctrl_tokens, args)) { 2370 case COST_CTRL: 2371 match_strlcpy(buf, &args[0], sizeof(buf)); 2372 if (!strcmp(buf, "auto")) 2373 user = false; 2374 else if (!strcmp(buf, "user")) 2375 user = true; 2376 else 2377 goto einval; 2378 continue; 2379 case COST_MODEL: 2380 match_strlcpy(buf, &args[0], sizeof(buf)); 2381 if (strcmp(buf, "linear")) 2382 goto einval; 2383 continue; 2384 } 2385 2386 tok = match_token(p, i_lcoef_tokens, args); 2387 if (tok == NR_I_LCOEFS) 2388 goto einval; 2389 if (match_u64(&args[0], &v)) 2390 goto einval; 2391 u[tok] = v; 2392 user = true; 2393 } 2394 2395 spin_lock_irq(&ioc->lock); 2396 if (user) { 2397 memcpy(ioc->params.i_lcoefs, u, sizeof(u)); 2398 ioc->user_cost_model = true; 2399 } else { 2400 ioc->user_cost_model = false; 2401 } 2402 ioc_refresh_params(ioc, true); 2403 spin_unlock_irq(&ioc->lock); 2404 2405 put_disk_and_module(disk); 2406 return nbytes; 2407 2408 einval: 2409 ret = -EINVAL; 2410 err: 2411 put_disk_and_module(disk); 2412 return ret; 2413 } 2414 2415 static struct cftype ioc_files[] = { 2416 { 2417 .name = "weight", 2418 .flags = CFTYPE_NOT_ON_ROOT, 2419 .seq_show = ioc_weight_show, 2420 .write = ioc_weight_write, 2421 }, 2422 { 2423 .name = "cost.qos", 2424 .flags = CFTYPE_ONLY_ON_ROOT, 2425 .seq_show = ioc_qos_show, 2426 .write = ioc_qos_write, 2427 }, 2428 { 2429 .name = "cost.model", 2430 .flags = CFTYPE_ONLY_ON_ROOT, 2431 .seq_show = ioc_cost_model_show, 2432 .write = ioc_cost_model_write, 2433 }, 2434 {} 2435 }; 2436 2437 static struct blkcg_policy blkcg_policy_iocost = { 2438 .dfl_cftypes = ioc_files, 2439 .cpd_alloc_fn = ioc_cpd_alloc, 2440 .cpd_free_fn = ioc_cpd_free, 2441 .pd_alloc_fn = ioc_pd_alloc, 2442 .pd_init_fn = ioc_pd_init, 2443 .pd_free_fn = ioc_pd_free, 2444 }; 2445 2446 static int __init ioc_init(void) 2447 { 2448 return blkcg_policy_register(&blkcg_policy_iocost); 2449 } 2450 2451 static void __exit ioc_exit(void) 2452 { 2453 return blkcg_policy_unregister(&blkcg_policy_iocost); 2454 } 2455 2456 module_init(ioc_init); 2457 module_exit(ioc_exit); 2458