1 /* 2 * Budget Fair Queueing (BFQ) I/O scheduler. 3 * 4 * Based on ideas and code from CFQ: 5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 6 * 7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 8 * Paolo Valente <paolo.valente@unimore.it> 9 * 10 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it> 11 * Arianna Avanzini <avanzini@google.com> 12 * 13 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License as 17 * published by the Free Software Foundation; either version 2 of the 18 * License, or (at your option) any later version. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 23 * General Public License for more details. 24 * 25 * BFQ is a proportional-share I/O scheduler, with some extra 26 * low-latency capabilities. BFQ also supports full hierarchical 27 * scheduling through cgroups. Next paragraphs provide an introduction 28 * on BFQ inner workings. Details on BFQ benefits, usage and 29 * limitations can be found in Documentation/block/bfq-iosched.txt. 30 * 31 * BFQ is a proportional-share storage-I/O scheduling algorithm based 32 * on the slice-by-slice service scheme of CFQ. But BFQ assigns 33 * budgets, measured in number of sectors, to processes instead of 34 * time slices. The device is not granted to the in-service process 35 * for a given time slice, but until it has exhausted its assigned 36 * budget. This change from the time to the service domain enables BFQ 37 * to distribute the device throughput among processes as desired, 38 * without any distortion due to throughput fluctuations, or to device 39 * internal queueing. BFQ uses an ad hoc internal scheduler, called 40 * B-WF2Q+, to schedule processes according to their budgets. More 41 * precisely, BFQ schedules queues associated with processes. Each 42 * process/queue is assigned a user-configurable weight, and B-WF2Q+ 43 * guarantees that each queue receives a fraction of the throughput 44 * proportional to its weight. Thanks to the accurate policy of 45 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound 46 * processes issuing sequential requests (to boost the throughput), 47 * and yet guarantee a low latency to interactive and soft real-time 48 * applications. 49 * 50 * In particular, to provide these low-latency guarantees, BFQ 51 * explicitly privileges the I/O of two classes of time-sensitive 52 * applications: interactive and soft real-time. In more detail, BFQ 53 * behaves this way if the low_latency parameter is set (default 54 * configuration). This feature enables BFQ to provide applications in 55 * these classes with a very low latency. 56 * 57 * To implement this feature, BFQ constantly tries to detect whether 58 * the I/O requests in a bfq_queue come from an interactive or a soft 59 * real-time application. For brevity, in these cases, the queue is 60 * said to be interactive or soft real-time. In both cases, BFQ 61 * privileges the service of the queue, over that of non-interactive 62 * and non-soft-real-time queues. This privileging is performed, 63 * mainly, by raising the weight of the queue. So, for brevity, we 64 * call just weight-raising periods the time periods during which a 65 * queue is privileged, because deemed interactive or soft real-time. 66 * 67 * The detection of soft real-time queues/applications is described in 68 * detail in the comments on the function 69 * bfq_bfqq_softrt_next_start. On the other hand, the detection of an 70 * interactive queue works as follows: a queue is deemed interactive 71 * if it is constantly non empty only for a limited time interval, 72 * after which it does become empty. The queue may be deemed 73 * interactive again (for a limited time), if it restarts being 74 * constantly non empty, provided that this happens only after the 75 * queue has remained empty for a given minimum idle time. 76 * 77 * By default, BFQ computes automatically the above maximum time 78 * interval, i.e., the time interval after which a constantly 79 * non-empty queue stops being deemed interactive. Since a queue is 80 * weight-raised while it is deemed interactive, this maximum time 81 * interval happens to coincide with the (maximum) duration of the 82 * weight-raising for interactive queues. 83 * 84 * Finally, BFQ also features additional heuristics for 85 * preserving both a low latency and a high throughput on NCQ-capable, 86 * rotational or flash-based devices, and to get the job done quickly 87 * for applications consisting in many I/O-bound processes. 88 * 89 * NOTE: if the main or only goal, with a given device, is to achieve 90 * the maximum-possible throughput at all times, then do switch off 91 * all low-latency heuristics for that device, by setting low_latency 92 * to 0. 93 * 94 * BFQ is described in [1], where also a reference to the initial, 95 * more theoretical paper on BFQ can be found. The interested reader 96 * can find in the latter paper full details on the main algorithm, as 97 * well as formulas of the guarantees and formal proofs of all the 98 * properties. With respect to the version of BFQ presented in these 99 * papers, this implementation adds a few more heuristics, such as the 100 * ones that guarantee a low latency to interactive and soft real-time 101 * applications, and a hierarchical extension based on H-WF2Q+. 102 * 103 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with 104 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+ 105 * with O(log N) complexity derives from the one introduced with EEVDF 106 * in [3]. 107 * 108 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O 109 * Scheduler", Proceedings of the First Workshop on Mobile System 110 * Technologies (MST-2015), May 2015. 111 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf 112 * 113 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing 114 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689, 115 * Oct 1997. 116 * 117 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz 118 * 119 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline 120 * First: A Flexible and Accurate Mechanism for Proportional Share 121 * Resource Allocation", technical report. 122 * 123 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf 124 */ 125 #include <linux/module.h> 126 #include <linux/slab.h> 127 #include <linux/blkdev.h> 128 #include <linux/cgroup.h> 129 #include <linux/elevator.h> 130 #include <linux/ktime.h> 131 #include <linux/rbtree.h> 132 #include <linux/ioprio.h> 133 #include <linux/sbitmap.h> 134 #include <linux/delay.h> 135 136 #include "blk.h" 137 #include "blk-mq.h" 138 #include "blk-mq-tag.h" 139 #include "blk-mq-sched.h" 140 #include "bfq-iosched.h" 141 #include "blk-wbt.h" 142 143 #define BFQ_BFQQ_FNS(name) \ 144 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ 145 { \ 146 __set_bit(BFQQF_##name, &(bfqq)->flags); \ 147 } \ 148 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ 149 { \ 150 __clear_bit(BFQQF_##name, &(bfqq)->flags); \ 151 } \ 152 int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ 153 { \ 154 return test_bit(BFQQF_##name, &(bfqq)->flags); \ 155 } 156 157 BFQ_BFQQ_FNS(just_created); 158 BFQ_BFQQ_FNS(busy); 159 BFQ_BFQQ_FNS(wait_request); 160 BFQ_BFQQ_FNS(non_blocking_wait_rq); 161 BFQ_BFQQ_FNS(fifo_expire); 162 BFQ_BFQQ_FNS(has_short_ttime); 163 BFQ_BFQQ_FNS(sync); 164 BFQ_BFQQ_FNS(IO_bound); 165 BFQ_BFQQ_FNS(in_large_burst); 166 BFQ_BFQQ_FNS(coop); 167 BFQ_BFQQ_FNS(split_coop); 168 BFQ_BFQQ_FNS(softrt_update); 169 #undef BFQ_BFQQ_FNS \ 170 171 /* Expiration time of sync (0) and async (1) requests, in ns. */ 172 static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; 173 174 /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */ 175 static const int bfq_back_max = 16 * 1024; 176 177 /* Penalty of a backwards seek, in number of sectors. */ 178 static const int bfq_back_penalty = 2; 179 180 /* Idling period duration, in ns. */ 181 static u64 bfq_slice_idle = NSEC_PER_SEC / 125; 182 183 /* Minimum number of assigned budgets for which stats are safe to compute. */ 184 static const int bfq_stats_min_budgets = 194; 185 186 /* Default maximum budget values, in sectors and number of requests. */ 187 static const int bfq_default_max_budget = 16 * 1024; 188 189 /* 190 * When a sync request is dispatched, the queue that contains that 191 * request, and all the ancestor entities of that queue, are charged 192 * with the number of sectors of the request. In constrast, if the 193 * request is async, then the queue and its ancestor entities are 194 * charged with the number of sectors of the request, multiplied by 195 * the factor below. This throttles the bandwidth for async I/O, 196 * w.r.t. to sync I/O, and it is done to counter the tendency of async 197 * writes to steal I/O throughput to reads. 198 * 199 * The current value of this parameter is the result of a tuning with 200 * several hardware and software configurations. We tried to find the 201 * lowest value for which writes do not cause noticeable problems to 202 * reads. In fact, the lower this parameter, the stabler I/O control, 203 * in the following respect. The lower this parameter is, the less 204 * the bandwidth enjoyed by a group decreases 205 * - when the group does writes, w.r.t. to when it does reads; 206 * - when other groups do reads, w.r.t. to when they do writes. 207 */ 208 static const int bfq_async_charge_factor = 3; 209 210 /* Default timeout values, in jiffies, approximating CFQ defaults. */ 211 const int bfq_timeout = HZ / 8; 212 213 /* 214 * Time limit for merging (see comments in bfq_setup_cooperator). Set 215 * to the slowest value that, in our tests, proved to be effective in 216 * removing false positives, while not causing true positives to miss 217 * queue merging. 218 * 219 * As can be deduced from the low time limit below, queue merging, if 220 * successful, happens at the very beggining of the I/O of the involved 221 * cooperating processes, as a consequence of the arrival of the very 222 * first requests from each cooperator. After that, there is very 223 * little chance to find cooperators. 224 */ 225 static const unsigned long bfq_merge_time_limit = HZ/10; 226 227 static struct kmem_cache *bfq_pool; 228 229 /* Below this threshold (in ns), we consider thinktime immediate. */ 230 #define BFQ_MIN_TT (2 * NSEC_PER_MSEC) 231 232 /* hw_tag detection: parallel requests threshold and min samples needed. */ 233 #define BFQ_HW_QUEUE_THRESHOLD 4 234 #define BFQ_HW_QUEUE_SAMPLES 32 235 236 #define BFQQ_SEEK_THR (sector_t)(8 * 100) 237 #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) 238 #define BFQQ_CLOSE_THR (sector_t)(8 * 1024) 239 #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19) 240 241 /* Min number of samples required to perform peak-rate update */ 242 #define BFQ_RATE_MIN_SAMPLES 32 243 /* Min observation time interval required to perform a peak-rate update (ns) */ 244 #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) 245 /* Target observation time interval for a peak-rate update (ns) */ 246 #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC 247 248 /* 249 * Shift used for peak-rate fixed precision calculations. 250 * With 251 * - the current shift: 16 positions 252 * - the current type used to store rate: u32 253 * - the current unit of measure for rate: [sectors/usec], or, more precisely, 254 * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift, 255 * the range of rates that can be stored is 256 * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec = 257 * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec = 258 * [15, 65G] sectors/sec 259 * Which, assuming a sector size of 512B, corresponds to a range of 260 * [7.5K, 33T] B/sec 261 */ 262 #define BFQ_RATE_SHIFT 16 263 264 /* 265 * When configured for computing the duration of the weight-raising 266 * for interactive queues automatically (see the comments at the 267 * beginning of this file), BFQ does it using the following formula: 268 * duration = (ref_rate / r) * ref_wr_duration, 269 * where r is the peak rate of the device, and ref_rate and 270 * ref_wr_duration are two reference parameters. In particular, 271 * ref_rate is the peak rate of the reference storage device (see 272 * below), and ref_wr_duration is about the maximum time needed, with 273 * BFQ and while reading two files in parallel, to load typical large 274 * applications on the reference device (see the comments on 275 * max_service_from_wr below, for more details on how ref_wr_duration 276 * is obtained). In practice, the slower/faster the device at hand 277 * is, the more/less it takes to load applications with respect to the 278 * reference device. Accordingly, the longer/shorter BFQ grants 279 * weight raising to interactive applications. 280 * 281 * BFQ uses two different reference pairs (ref_rate, ref_wr_duration), 282 * depending on whether the device is rotational or non-rotational. 283 * 284 * In the following definitions, ref_rate[0] and ref_wr_duration[0] 285 * are the reference values for a rotational device, whereas 286 * ref_rate[1] and ref_wr_duration[1] are the reference values for a 287 * non-rotational device. The reference rates are not the actual peak 288 * rates of the devices used as a reference, but slightly lower 289 * values. The reason for using slightly lower values is that the 290 * peak-rate estimator tends to yield slightly lower values than the 291 * actual peak rate (it can yield the actual peak rate only if there 292 * is only one process doing I/O, and the process does sequential 293 * I/O). 294 * 295 * The reference peak rates are measured in sectors/usec, left-shifted 296 * by BFQ_RATE_SHIFT. 297 */ 298 static int ref_rate[2] = {14000, 33000}; 299 /* 300 * To improve readability, a conversion function is used to initialize 301 * the following array, which entails that the array can be 302 * initialized only in a function. 303 */ 304 static int ref_wr_duration[2]; 305 306 /* 307 * BFQ uses the above-detailed, time-based weight-raising mechanism to 308 * privilege interactive tasks. This mechanism is vulnerable to the 309 * following false positives: I/O-bound applications that will go on 310 * doing I/O for much longer than the duration of weight 311 * raising. These applications have basically no benefit from being 312 * weight-raised at the beginning of their I/O. On the opposite end, 313 * while being weight-raised, these applications 314 * a) unjustly steal throughput to applications that may actually need 315 * low latency; 316 * b) make BFQ uselessly perform device idling; device idling results 317 * in loss of device throughput with most flash-based storage, and may 318 * increase latencies when used purposelessly. 319 * 320 * BFQ tries to reduce these problems, by adopting the following 321 * countermeasure. To introduce this countermeasure, we need first to 322 * finish explaining how the duration of weight-raising for 323 * interactive tasks is computed. 324 * 325 * For a bfq_queue deemed as interactive, the duration of weight 326 * raising is dynamically adjusted, as a function of the estimated 327 * peak rate of the device, so as to be equal to the time needed to 328 * execute the 'largest' interactive task we benchmarked so far. By 329 * largest task, we mean the task for which each involved process has 330 * to do more I/O than for any of the other tasks we benchmarked. This 331 * reference interactive task is the start-up of LibreOffice Writer, 332 * and in this task each process/bfq_queue needs to have at most ~110K 333 * sectors transferred. 334 * 335 * This last piece of information enables BFQ to reduce the actual 336 * duration of weight-raising for at least one class of I/O-bound 337 * applications: those doing sequential or quasi-sequential I/O. An 338 * example is file copy. In fact, once started, the main I/O-bound 339 * processes of these applications usually consume the above 110K 340 * sectors in much less time than the processes of an application that 341 * is starting, because these I/O-bound processes will greedily devote 342 * almost all their CPU cycles only to their target, 343 * throughput-friendly I/O operations. This is even more true if BFQ 344 * happens to be underestimating the device peak rate, and thus 345 * overestimating the duration of weight raising. But, according to 346 * our measurements, once transferred 110K sectors, these processes 347 * have no right to be weight-raised any longer. 348 * 349 * Basing on the last consideration, BFQ ends weight-raising for a 350 * bfq_queue if the latter happens to have received an amount of 351 * service at least equal to the following constant. The constant is 352 * set to slightly more than 110K, to have a minimum safety margin. 353 * 354 * This early ending of weight-raising reduces the amount of time 355 * during which interactive false positives cause the two problems 356 * described at the beginning of these comments. 357 */ 358 static const unsigned long max_service_from_wr = 120000; 359 360 #define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0]) 361 #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) 362 363 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) 364 { 365 return bic->bfqq[is_sync]; 366 } 367 368 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) 369 { 370 bic->bfqq[is_sync] = bfqq; 371 } 372 373 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) 374 { 375 return bic->icq.q->elevator->elevator_data; 376 } 377 378 /** 379 * icq_to_bic - convert iocontext queue structure to bfq_io_cq. 380 * @icq: the iocontext queue. 381 */ 382 static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) 383 { 384 /* bic->icq is the first member, %NULL will convert to %NULL */ 385 return container_of(icq, struct bfq_io_cq, icq); 386 } 387 388 /** 389 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. 390 * @bfqd: the lookup key. 391 * @ioc: the io_context of the process doing I/O. 392 * @q: the request queue. 393 */ 394 static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, 395 struct io_context *ioc, 396 struct request_queue *q) 397 { 398 if (ioc) { 399 unsigned long flags; 400 struct bfq_io_cq *icq; 401 402 spin_lock_irqsave(&q->queue_lock, flags); 403 icq = icq_to_bic(ioc_lookup_icq(ioc, q)); 404 spin_unlock_irqrestore(&q->queue_lock, flags); 405 406 return icq; 407 } 408 409 return NULL; 410 } 411 412 /* 413 * Scheduler run of queue, if there are requests pending and no one in the 414 * driver that will restart queueing. 415 */ 416 void bfq_schedule_dispatch(struct bfq_data *bfqd) 417 { 418 if (bfqd->queued != 0) { 419 bfq_log(bfqd, "schedule dispatch"); 420 blk_mq_run_hw_queues(bfqd->queue, true); 421 } 422 } 423 424 #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 425 #define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) 426 427 #define bfq_sample_valid(samples) ((samples) > 80) 428 429 /* 430 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 431 * We choose the request that is closesr to the head right now. Distance 432 * behind the head is penalized and only allowed to a certain extent. 433 */ 434 static struct request *bfq_choose_req(struct bfq_data *bfqd, 435 struct request *rq1, 436 struct request *rq2, 437 sector_t last) 438 { 439 sector_t s1, s2, d1 = 0, d2 = 0; 440 unsigned long back_max; 441 #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 442 #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 443 unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ 444 445 if (!rq1 || rq1 == rq2) 446 return rq2; 447 if (!rq2) 448 return rq1; 449 450 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 451 return rq1; 452 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 453 return rq2; 454 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) 455 return rq1; 456 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) 457 return rq2; 458 459 s1 = blk_rq_pos(rq1); 460 s2 = blk_rq_pos(rq2); 461 462 /* 463 * By definition, 1KiB is 2 sectors. 464 */ 465 back_max = bfqd->bfq_back_max * 2; 466 467 /* 468 * Strict one way elevator _except_ in the case where we allow 469 * short backward seeks which are biased as twice the cost of a 470 * similar forward seek. 471 */ 472 if (s1 >= last) 473 d1 = s1 - last; 474 else if (s1 + back_max >= last) 475 d1 = (last - s1) * bfqd->bfq_back_penalty; 476 else 477 wrap |= BFQ_RQ1_WRAP; 478 479 if (s2 >= last) 480 d2 = s2 - last; 481 else if (s2 + back_max >= last) 482 d2 = (last - s2) * bfqd->bfq_back_penalty; 483 else 484 wrap |= BFQ_RQ2_WRAP; 485 486 /* Found required data */ 487 488 /* 489 * By doing switch() on the bit mask "wrap" we avoid having to 490 * check two variables for all permutations: --> faster! 491 */ 492 switch (wrap) { 493 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 494 if (d1 < d2) 495 return rq1; 496 else if (d2 < d1) 497 return rq2; 498 499 if (s1 >= s2) 500 return rq1; 501 else 502 return rq2; 503 504 case BFQ_RQ2_WRAP: 505 return rq1; 506 case BFQ_RQ1_WRAP: 507 return rq2; 508 case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */ 509 default: 510 /* 511 * Since both rqs are wrapped, 512 * start with the one that's further behind head 513 * (--> only *one* back seek required), 514 * since back seek takes more time than forward. 515 */ 516 if (s1 <= s2) 517 return rq1; 518 else 519 return rq2; 520 } 521 } 522 523 /* 524 * Async I/O can easily starve sync I/O (both sync reads and sync 525 * writes), by consuming all tags. Similarly, storms of sync writes, 526 * such as those that sync(2) may trigger, can starve sync reads. 527 * Limit depths of async I/O and sync writes so as to counter both 528 * problems. 529 */ 530 static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) 531 { 532 struct bfq_data *bfqd = data->q->elevator->elevator_data; 533 534 if (op_is_sync(op) && !op_is_write(op)) 535 return; 536 537 data->shallow_depth = 538 bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; 539 540 bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", 541 __func__, bfqd->wr_busy_queues, op_is_sync(op), 542 data->shallow_depth); 543 } 544 545 static struct bfq_queue * 546 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, 547 sector_t sector, struct rb_node **ret_parent, 548 struct rb_node ***rb_link) 549 { 550 struct rb_node **p, *parent; 551 struct bfq_queue *bfqq = NULL; 552 553 parent = NULL; 554 p = &root->rb_node; 555 while (*p) { 556 struct rb_node **n; 557 558 parent = *p; 559 bfqq = rb_entry(parent, struct bfq_queue, pos_node); 560 561 /* 562 * Sort strictly based on sector. Smallest to the left, 563 * largest to the right. 564 */ 565 if (sector > blk_rq_pos(bfqq->next_rq)) 566 n = &(*p)->rb_right; 567 else if (sector < blk_rq_pos(bfqq->next_rq)) 568 n = &(*p)->rb_left; 569 else 570 break; 571 p = n; 572 bfqq = NULL; 573 } 574 575 *ret_parent = parent; 576 if (rb_link) 577 *rb_link = p; 578 579 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", 580 (unsigned long long)sector, 581 bfqq ? bfqq->pid : 0); 582 583 return bfqq; 584 } 585 586 static bool bfq_too_late_for_merging(struct bfq_queue *bfqq) 587 { 588 return bfqq->service_from_backlogged > 0 && 589 time_is_before_jiffies(bfqq->first_IO_time + 590 bfq_merge_time_limit); 591 } 592 593 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) 594 { 595 struct rb_node **p, *parent; 596 struct bfq_queue *__bfqq; 597 598 if (bfqq->pos_root) { 599 rb_erase(&bfqq->pos_node, bfqq->pos_root); 600 bfqq->pos_root = NULL; 601 } 602 603 /* 604 * bfqq cannot be merged any longer (see comments in 605 * bfq_setup_cooperator): no point in adding bfqq into the 606 * position tree. 607 */ 608 if (bfq_too_late_for_merging(bfqq)) 609 return; 610 611 if (bfq_class_idle(bfqq)) 612 return; 613 if (!bfqq->next_rq) 614 return; 615 616 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; 617 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, 618 blk_rq_pos(bfqq->next_rq), &parent, &p); 619 if (!__bfqq) { 620 rb_link_node(&bfqq->pos_node, parent, p); 621 rb_insert_color(&bfqq->pos_node, bfqq->pos_root); 622 } else 623 bfqq->pos_root = NULL; 624 } 625 626 /* 627 * Tell whether there are active queues with different weights or 628 * active groups. 629 */ 630 static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd) 631 { 632 /* 633 * For queue weights to differ, queue_weights_tree must contain 634 * at least two nodes. 635 */ 636 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && 637 (bfqd->queue_weights_tree.rb_node->rb_left || 638 bfqd->queue_weights_tree.rb_node->rb_right) 639 #ifdef CONFIG_BFQ_GROUP_IOSCHED 640 ) || 641 (bfqd->num_groups_with_pending_reqs > 0 642 #endif 643 ); 644 } 645 646 /* 647 * The following function returns true if every queue must receive the 648 * same share of the throughput (this condition is used when deciding 649 * whether idling may be disabled, see the comments in the function 650 * bfq_better_to_idle()). 651 * 652 * Such a scenario occurs when: 653 * 1) all active queues have the same weight, 654 * 2) all active groups at the same level in the groups tree have the same 655 * weight, 656 * 3) all active groups at the same level in the groups tree have the same 657 * number of children. 658 * 659 * Unfortunately, keeping the necessary state for evaluating exactly 660 * the last two symmetry sub-conditions above would be quite complex 661 * and time consuming. Therefore this function evaluates, instead, 662 * only the following stronger two sub-conditions, for which it is 663 * much easier to maintain the needed state: 664 * 1) all active queues have the same weight, 665 * 2) there are no active groups. 666 * In particular, the last condition is always true if hierarchical 667 * support or the cgroups interface are not enabled, thus no state 668 * needs to be maintained in this case. 669 */ 670 static bool bfq_symmetric_scenario(struct bfq_data *bfqd) 671 { 672 return !bfq_varied_queue_weights_or_active_groups(bfqd); 673 } 674 675 /* 676 * If the weight-counter tree passed as input contains no counter for 677 * the weight of the input queue, then add that counter; otherwise just 678 * increment the existing counter. 679 * 680 * Note that weight-counter trees contain few nodes in mostly symmetric 681 * scenarios. For example, if all queues have the same weight, then the 682 * weight-counter tree for the queues may contain at most one node. 683 * This holds even if low_latency is on, because weight-raised queues 684 * are not inserted in the tree. 685 * In most scenarios, the rate at which nodes are created/destroyed 686 * should be low too. 687 */ 688 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, 689 struct rb_root *root) 690 { 691 struct bfq_entity *entity = &bfqq->entity; 692 struct rb_node **new = &(root->rb_node), *parent = NULL; 693 694 /* 695 * Do not insert if the queue is already associated with a 696 * counter, which happens if: 697 * 1) a request arrival has caused the queue to become both 698 * non-weight-raised, and hence change its weight, and 699 * backlogged; in this respect, each of the two events 700 * causes an invocation of this function, 701 * 2) this is the invocation of this function caused by the 702 * second event. This second invocation is actually useless, 703 * and we handle this fact by exiting immediately. More 704 * efficient or clearer solutions might possibly be adopted. 705 */ 706 if (bfqq->weight_counter) 707 return; 708 709 while (*new) { 710 struct bfq_weight_counter *__counter = container_of(*new, 711 struct bfq_weight_counter, 712 weights_node); 713 parent = *new; 714 715 if (entity->weight == __counter->weight) { 716 bfqq->weight_counter = __counter; 717 goto inc_counter; 718 } 719 if (entity->weight < __counter->weight) 720 new = &((*new)->rb_left); 721 else 722 new = &((*new)->rb_right); 723 } 724 725 bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), 726 GFP_ATOMIC); 727 728 /* 729 * In the unlucky event of an allocation failure, we just 730 * exit. This will cause the weight of queue to not be 731 * considered in bfq_varied_queue_weights_or_active_groups, 732 * which, in its turn, causes the scenario to be deemed 733 * wrongly symmetric in case bfqq's weight would have been 734 * the only weight making the scenario asymmetric. On the 735 * bright side, no unbalance will however occur when bfqq 736 * becomes inactive again (the invocation of this function 737 * is triggered by an activation of queue). In fact, 738 * bfq_weights_tree_remove does nothing if 739 * !bfqq->weight_counter. 740 */ 741 if (unlikely(!bfqq->weight_counter)) 742 return; 743 744 bfqq->weight_counter->weight = entity->weight; 745 rb_link_node(&bfqq->weight_counter->weights_node, parent, new); 746 rb_insert_color(&bfqq->weight_counter->weights_node, root); 747 748 inc_counter: 749 bfqq->weight_counter->num_active++; 750 } 751 752 /* 753 * Decrement the weight counter associated with the queue, and, if the 754 * counter reaches 0, remove the counter from the tree. 755 * See the comments to the function bfq_weights_tree_add() for considerations 756 * about overhead. 757 */ 758 void __bfq_weights_tree_remove(struct bfq_data *bfqd, 759 struct bfq_queue *bfqq, 760 struct rb_root *root) 761 { 762 if (!bfqq->weight_counter) 763 return; 764 765 bfqq->weight_counter->num_active--; 766 if (bfqq->weight_counter->num_active > 0) 767 goto reset_entity_pointer; 768 769 rb_erase(&bfqq->weight_counter->weights_node, root); 770 kfree(bfqq->weight_counter); 771 772 reset_entity_pointer: 773 bfqq->weight_counter = NULL; 774 } 775 776 /* 777 * Invoke __bfq_weights_tree_remove on bfqq and decrement the number 778 * of active groups for each queue's inactive parent entity. 779 */ 780 void bfq_weights_tree_remove(struct bfq_data *bfqd, 781 struct bfq_queue *bfqq) 782 { 783 struct bfq_entity *entity = bfqq->entity.parent; 784 785 __bfq_weights_tree_remove(bfqd, bfqq, 786 &bfqd->queue_weights_tree); 787 788 for_each_entity(entity) { 789 struct bfq_sched_data *sd = entity->my_sched_data; 790 791 if (sd->next_in_service || sd->in_service_entity) { 792 /* 793 * entity is still active, because either 794 * next_in_service or in_service_entity is not 795 * NULL (see the comments on the definition of 796 * next_in_service for details on why 797 * in_service_entity must be checked too). 798 * 799 * As a consequence, its parent entities are 800 * active as well, and thus this loop must 801 * stop here. 802 */ 803 break; 804 } 805 806 /* 807 * The decrement of num_groups_with_pending_reqs is 808 * not performed immediately upon the deactivation of 809 * entity, but it is delayed to when it also happens 810 * that the first leaf descendant bfqq of entity gets 811 * all its pending requests completed. The following 812 * instructions perform this delayed decrement, if 813 * needed. See the comments on 814 * num_groups_with_pending_reqs for details. 815 */ 816 if (entity->in_groups_with_pending_reqs) { 817 entity->in_groups_with_pending_reqs = false; 818 bfqd->num_groups_with_pending_reqs--; 819 } 820 } 821 } 822 823 /* 824 * Return expired entry, or NULL to just start from scratch in rbtree. 825 */ 826 static struct request *bfq_check_fifo(struct bfq_queue *bfqq, 827 struct request *last) 828 { 829 struct request *rq; 830 831 if (bfq_bfqq_fifo_expire(bfqq)) 832 return NULL; 833 834 bfq_mark_bfqq_fifo_expire(bfqq); 835 836 rq = rq_entry_fifo(bfqq->fifo.next); 837 838 if (rq == last || ktime_get_ns() < rq->fifo_time) 839 return NULL; 840 841 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); 842 return rq; 843 } 844 845 static struct request *bfq_find_next_rq(struct bfq_data *bfqd, 846 struct bfq_queue *bfqq, 847 struct request *last) 848 { 849 struct rb_node *rbnext = rb_next(&last->rb_node); 850 struct rb_node *rbprev = rb_prev(&last->rb_node); 851 struct request *next, *prev = NULL; 852 853 /* Follow expired path, else get first next available. */ 854 next = bfq_check_fifo(bfqq, last); 855 if (next) 856 return next; 857 858 if (rbprev) 859 prev = rb_entry_rq(rbprev); 860 861 if (rbnext) 862 next = rb_entry_rq(rbnext); 863 else { 864 rbnext = rb_first(&bfqq->sort_list); 865 if (rbnext && rbnext != &last->rb_node) 866 next = rb_entry_rq(rbnext); 867 } 868 869 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); 870 } 871 872 /* see the definition of bfq_async_charge_factor for details */ 873 static unsigned long bfq_serv_to_charge(struct request *rq, 874 struct bfq_queue *bfqq) 875 { 876 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) 877 return blk_rq_sectors(rq); 878 879 return blk_rq_sectors(rq) * bfq_async_charge_factor; 880 } 881 882 /** 883 * bfq_updated_next_req - update the queue after a new next_rq selection. 884 * @bfqd: the device data the queue belongs to. 885 * @bfqq: the queue to update. 886 * 887 * If the first request of a queue changes we make sure that the queue 888 * has enough budget to serve at least its first request (if the 889 * request has grown). We do this because if the queue has not enough 890 * budget for its first request, it has to go through two dispatch 891 * rounds to actually get it dispatched. 892 */ 893 static void bfq_updated_next_req(struct bfq_data *bfqd, 894 struct bfq_queue *bfqq) 895 { 896 struct bfq_entity *entity = &bfqq->entity; 897 struct request *next_rq = bfqq->next_rq; 898 unsigned long new_budget; 899 900 if (!next_rq) 901 return; 902 903 if (bfqq == bfqd->in_service_queue) 904 /* 905 * In order not to break guarantees, budgets cannot be 906 * changed after an entity has been selected. 907 */ 908 return; 909 910 new_budget = max_t(unsigned long, bfqq->max_budget, 911 bfq_serv_to_charge(next_rq, bfqq)); 912 if (entity->budget != new_budget) { 913 entity->budget = new_budget; 914 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", 915 new_budget); 916 bfq_requeue_bfqq(bfqd, bfqq, false); 917 } 918 } 919 920 static unsigned int bfq_wr_duration(struct bfq_data *bfqd) 921 { 922 u64 dur; 923 924 if (bfqd->bfq_wr_max_time > 0) 925 return bfqd->bfq_wr_max_time; 926 927 dur = bfqd->rate_dur_prod; 928 do_div(dur, bfqd->peak_rate); 929 930 /* 931 * Limit duration between 3 and 25 seconds. The upper limit 932 * has been conservatively set after the following worst case: 933 * on a QEMU/KVM virtual machine 934 * - running in a slow PC 935 * - with a virtual disk stacked on a slow low-end 5400rpm HDD 936 * - serving a heavy I/O workload, such as the sequential reading 937 * of several files 938 * mplayer took 23 seconds to start, if constantly weight-raised. 939 * 940 * As for higher values than that accomodating the above bad 941 * scenario, tests show that higher values would often yield 942 * the opposite of the desired result, i.e., would worsen 943 * responsiveness by allowing non-interactive applications to 944 * preserve weight raising for too long. 945 * 946 * On the other end, lower values than 3 seconds make it 947 * difficult for most interactive tasks to complete their jobs 948 * before weight-raising finishes. 949 */ 950 return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000)); 951 } 952 953 /* switch back from soft real-time to interactive weight raising */ 954 static void switch_back_to_interactive_wr(struct bfq_queue *bfqq, 955 struct bfq_data *bfqd) 956 { 957 bfqq->wr_coeff = bfqd->bfq_wr_coeff; 958 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); 959 bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; 960 } 961 962 static void 963 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, 964 struct bfq_io_cq *bic, bool bfq_already_existing) 965 { 966 unsigned int old_wr_coeff = bfqq->wr_coeff; 967 bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); 968 969 if (bic->saved_has_short_ttime) 970 bfq_mark_bfqq_has_short_ttime(bfqq); 971 else 972 bfq_clear_bfqq_has_short_ttime(bfqq); 973 974 if (bic->saved_IO_bound) 975 bfq_mark_bfqq_IO_bound(bfqq); 976 else 977 bfq_clear_bfqq_IO_bound(bfqq); 978 979 bfqq->ttime = bic->saved_ttime; 980 bfqq->wr_coeff = bic->saved_wr_coeff; 981 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; 982 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; 983 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; 984 985 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || 986 time_is_before_jiffies(bfqq->last_wr_start_finish + 987 bfqq->wr_cur_max_time))) { 988 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && 989 !bfq_bfqq_in_large_burst(bfqq) && 990 time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + 991 bfq_wr_duration(bfqd))) { 992 switch_back_to_interactive_wr(bfqq, bfqd); 993 } else { 994 bfqq->wr_coeff = 1; 995 bfq_log_bfqq(bfqq->bfqd, bfqq, 996 "resume state: switching off wr"); 997 } 998 } 999 1000 /* make sure weight will be updated, however we got here */ 1001 bfqq->entity.prio_changed = 1; 1002 1003 if (likely(!busy)) 1004 return; 1005 1006 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) 1007 bfqd->wr_busy_queues++; 1008 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) 1009 bfqd->wr_busy_queues--; 1010 } 1011 1012 static int bfqq_process_refs(struct bfq_queue *bfqq) 1013 { 1014 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st; 1015 } 1016 1017 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */ 1018 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) 1019 { 1020 struct bfq_queue *item; 1021 struct hlist_node *n; 1022 1023 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) 1024 hlist_del_init(&item->burst_list_node); 1025 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); 1026 bfqd->burst_size = 1; 1027 bfqd->burst_parent_entity = bfqq->entity.parent; 1028 } 1029 1030 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ 1031 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) 1032 { 1033 /* Increment burst size to take into account also bfqq */ 1034 bfqd->burst_size++; 1035 1036 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { 1037 struct bfq_queue *pos, *bfqq_item; 1038 struct hlist_node *n; 1039 1040 /* 1041 * Enough queues have been activated shortly after each 1042 * other to consider this burst as large. 1043 */ 1044 bfqd->large_burst = true; 1045 1046 /* 1047 * We can now mark all queues in the burst list as 1048 * belonging to a large burst. 1049 */ 1050 hlist_for_each_entry(bfqq_item, &bfqd->burst_list, 1051 burst_list_node) 1052 bfq_mark_bfqq_in_large_burst(bfqq_item); 1053 bfq_mark_bfqq_in_large_burst(bfqq); 1054 1055 /* 1056 * From now on, and until the current burst finishes, any 1057 * new queue being activated shortly after the last queue 1058 * was inserted in the burst can be immediately marked as 1059 * belonging to a large burst. So the burst list is not 1060 * needed any more. Remove it. 1061 */ 1062 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, 1063 burst_list_node) 1064 hlist_del_init(&pos->burst_list_node); 1065 } else /* 1066 * Burst not yet large: add bfqq to the burst list. Do 1067 * not increment the ref counter for bfqq, because bfqq 1068 * is removed from the burst list before freeing bfqq 1069 * in put_queue. 1070 */ 1071 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); 1072 } 1073 1074 /* 1075 * If many queues belonging to the same group happen to be created 1076 * shortly after each other, then the processes associated with these 1077 * queues have typically a common goal. In particular, bursts of queue 1078 * creations are usually caused by services or applications that spawn 1079 * many parallel threads/processes. Examples are systemd during boot, 1080 * or git grep. To help these processes get their job done as soon as 1081 * possible, it is usually better to not grant either weight-raising 1082 * or device idling to their queues. 1083 * 1084 * In this comment we describe, firstly, the reasons why this fact 1085 * holds, and, secondly, the next function, which implements the main 1086 * steps needed to properly mark these queues so that they can then be 1087 * treated in a different way. 1088 * 1089 * The above services or applications benefit mostly from a high 1090 * throughput: the quicker the requests of the activated queues are 1091 * cumulatively served, the sooner the target job of these queues gets 1092 * completed. As a consequence, weight-raising any of these queues, 1093 * which also implies idling the device for it, is almost always 1094 * counterproductive. In most cases it just lowers throughput. 1095 * 1096 * On the other hand, a burst of queue creations may be caused also by 1097 * the start of an application that does not consist of a lot of 1098 * parallel I/O-bound threads. In fact, with a complex application, 1099 * several short processes may need to be executed to start-up the 1100 * application. In this respect, to start an application as quickly as 1101 * possible, the best thing to do is in any case to privilege the I/O 1102 * related to the application with respect to all other 1103 * I/O. Therefore, the best strategy to start as quickly as possible 1104 * an application that causes a burst of queue creations is to 1105 * weight-raise all the queues created during the burst. This is the 1106 * exact opposite of the best strategy for the other type of bursts. 1107 * 1108 * In the end, to take the best action for each of the two cases, the 1109 * two types of bursts need to be distinguished. Fortunately, this 1110 * seems relatively easy, by looking at the sizes of the bursts. In 1111 * particular, we found a threshold such that only bursts with a 1112 * larger size than that threshold are apparently caused by 1113 * services or commands such as systemd or git grep. For brevity, 1114 * hereafter we call just 'large' these bursts. BFQ *does not* 1115 * weight-raise queues whose creation occurs in a large burst. In 1116 * addition, for each of these queues BFQ performs or does not perform 1117 * idling depending on which choice boosts the throughput more. The 1118 * exact choice depends on the device and request pattern at 1119 * hand. 1120 * 1121 * Unfortunately, false positives may occur while an interactive task 1122 * is starting (e.g., an application is being started). The 1123 * consequence is that the queues associated with the task do not 1124 * enjoy weight raising as expected. Fortunately these false positives 1125 * are very rare. They typically occur if some service happens to 1126 * start doing I/O exactly when the interactive task starts. 1127 * 1128 * Turning back to the next function, it implements all the steps 1129 * needed to detect the occurrence of a large burst and to properly 1130 * mark all the queues belonging to it (so that they can then be 1131 * treated in a different way). This goal is achieved by maintaining a 1132 * "burst list" that holds, temporarily, the queues that belong to the 1133 * burst in progress. The list is then used to mark these queues as 1134 * belonging to a large burst if the burst does become large. The main 1135 * steps are the following. 1136 * 1137 * . when the very first queue is created, the queue is inserted into the 1138 * list (as it could be the first queue in a possible burst) 1139 * 1140 * . if the current burst has not yet become large, and a queue Q that does 1141 * not yet belong to the burst is activated shortly after the last time 1142 * at which a new queue entered the burst list, then the function appends 1143 * Q to the burst list 1144 * 1145 * . if, as a consequence of the previous step, the burst size reaches 1146 * the large-burst threshold, then 1147 * 1148 * . all the queues in the burst list are marked as belonging to a 1149 * large burst 1150 * 1151 * . the burst list is deleted; in fact, the burst list already served 1152 * its purpose (keeping temporarily track of the queues in a burst, 1153 * so as to be able to mark them as belonging to a large burst in the 1154 * previous sub-step), and now is not needed any more 1155 * 1156 * . the device enters a large-burst mode 1157 * 1158 * . if a queue Q that does not belong to the burst is created while 1159 * the device is in large-burst mode and shortly after the last time 1160 * at which a queue either entered the burst list or was marked as 1161 * belonging to the current large burst, then Q is immediately marked 1162 * as belonging to a large burst. 1163 * 1164 * . if a queue Q that does not belong to the burst is created a while 1165 * later, i.e., not shortly after, than the last time at which a queue 1166 * either entered the burst list or was marked as belonging to the 1167 * current large burst, then the current burst is deemed as finished and: 1168 * 1169 * . the large-burst mode is reset if set 1170 * 1171 * . the burst list is emptied 1172 * 1173 * . Q is inserted in the burst list, as Q may be the first queue 1174 * in a possible new burst (then the burst list contains just Q 1175 * after this step). 1176 */ 1177 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) 1178 { 1179 /* 1180 * If bfqq is already in the burst list or is part of a large 1181 * burst, or finally has just been split, then there is 1182 * nothing else to do. 1183 */ 1184 if (!hlist_unhashed(&bfqq->burst_list_node) || 1185 bfq_bfqq_in_large_burst(bfqq) || 1186 time_is_after_eq_jiffies(bfqq->split_time + 1187 msecs_to_jiffies(10))) 1188 return; 1189 1190 /* 1191 * If bfqq's creation happens late enough, or bfqq belongs to 1192 * a different group than the burst group, then the current 1193 * burst is finished, and related data structures must be 1194 * reset. 1195 * 1196 * In this respect, consider the special case where bfqq is 1197 * the very first queue created after BFQ is selected for this 1198 * device. In this case, last_ins_in_burst and 1199 * burst_parent_entity are not yet significant when we get 1200 * here. But it is easy to verify that, whether or not the 1201 * following condition is true, bfqq will end up being 1202 * inserted into the burst list. In particular the list will 1203 * happen to contain only bfqq. And this is exactly what has 1204 * to happen, as bfqq may be the first queue of the first 1205 * burst. 1206 */ 1207 if (time_is_before_jiffies(bfqd->last_ins_in_burst + 1208 bfqd->bfq_burst_interval) || 1209 bfqq->entity.parent != bfqd->burst_parent_entity) { 1210 bfqd->large_burst = false; 1211 bfq_reset_burst_list(bfqd, bfqq); 1212 goto end; 1213 } 1214 1215 /* 1216 * If we get here, then bfqq is being activated shortly after the 1217 * last queue. So, if the current burst is also large, we can mark 1218 * bfqq as belonging to this large burst immediately. 1219 */ 1220 if (bfqd->large_burst) { 1221 bfq_mark_bfqq_in_large_burst(bfqq); 1222 goto end; 1223 } 1224 1225 /* 1226 * If we get here, then a large-burst state has not yet been 1227 * reached, but bfqq is being activated shortly after the last 1228 * queue. Then we add bfqq to the burst. 1229 */ 1230 bfq_add_to_burst(bfqd, bfqq); 1231 end: 1232 /* 1233 * At this point, bfqq either has been added to the current 1234 * burst or has caused the current burst to terminate and a 1235 * possible new burst to start. In particular, in the second 1236 * case, bfqq has become the first queue in the possible new 1237 * burst. In both cases last_ins_in_burst needs to be moved 1238 * forward. 1239 */ 1240 bfqd->last_ins_in_burst = jiffies; 1241 } 1242 1243 static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) 1244 { 1245 struct bfq_entity *entity = &bfqq->entity; 1246 1247 return entity->budget - entity->service; 1248 } 1249 1250 /* 1251 * If enough samples have been computed, return the current max budget 1252 * stored in bfqd, which is dynamically updated according to the 1253 * estimated disk peak rate; otherwise return the default max budget 1254 */ 1255 static int bfq_max_budget(struct bfq_data *bfqd) 1256 { 1257 if (bfqd->budgets_assigned < bfq_stats_min_budgets) 1258 return bfq_default_max_budget; 1259 else 1260 return bfqd->bfq_max_budget; 1261 } 1262 1263 /* 1264 * Return min budget, which is a fraction of the current or default 1265 * max budget (trying with 1/32) 1266 */ 1267 static int bfq_min_budget(struct bfq_data *bfqd) 1268 { 1269 if (bfqd->budgets_assigned < bfq_stats_min_budgets) 1270 return bfq_default_max_budget / 32; 1271 else 1272 return bfqd->bfq_max_budget / 32; 1273 } 1274 1275 /* 1276 * The next function, invoked after the input queue bfqq switches from 1277 * idle to busy, updates the budget of bfqq. The function also tells 1278 * whether the in-service queue should be expired, by returning 1279 * true. The purpose of expiring the in-service queue is to give bfqq 1280 * the chance to possibly preempt the in-service queue, and the reason 1281 * for preempting the in-service queue is to achieve one of the two 1282 * goals below. 1283 * 1284 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has 1285 * expired because it has remained idle. In particular, bfqq may have 1286 * expired for one of the following two reasons: 1287 * 1288 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling 1289 * and did not make it to issue a new request before its last 1290 * request was served; 1291 * 1292 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue 1293 * a new request before the expiration of the idling-time. 1294 * 1295 * Even if bfqq has expired for one of the above reasons, the process 1296 * associated with the queue may be however issuing requests greedily, 1297 * and thus be sensitive to the bandwidth it receives (bfqq may have 1298 * remained idle for other reasons: CPU high load, bfqq not enjoying 1299 * idling, I/O throttling somewhere in the path from the process to 1300 * the I/O scheduler, ...). But if, after every expiration for one of 1301 * the above two reasons, bfqq has to wait for the service of at least 1302 * one full budget of another queue before being served again, then 1303 * bfqq is likely to get a much lower bandwidth or resource time than 1304 * its reserved ones. To address this issue, two countermeasures need 1305 * to be taken. 1306 * 1307 * First, the budget and the timestamps of bfqq need to be updated in 1308 * a special way on bfqq reactivation: they need to be updated as if 1309 * bfqq did not remain idle and did not expire. In fact, if they are 1310 * computed as if bfqq expired and remained idle until reactivation, 1311 * then the process associated with bfqq is treated as if, instead of 1312 * being greedy, it stopped issuing requests when bfqq remained idle, 1313 * and restarts issuing requests only on this reactivation. In other 1314 * words, the scheduler does not help the process recover the "service 1315 * hole" between bfqq expiration and reactivation. As a consequence, 1316 * the process receives a lower bandwidth than its reserved one. In 1317 * contrast, to recover this hole, the budget must be updated as if 1318 * bfqq was not expired at all before this reactivation, i.e., it must 1319 * be set to the value of the remaining budget when bfqq was 1320 * expired. Along the same line, timestamps need to be assigned the 1321 * value they had the last time bfqq was selected for service, i.e., 1322 * before last expiration. Thus timestamps need to be back-shifted 1323 * with respect to their normal computation (see [1] for more details 1324 * on this tricky aspect). 1325 * 1326 * Secondly, to allow the process to recover the hole, the in-service 1327 * queue must be expired too, to give bfqq the chance to preempt it 1328 * immediately. In fact, if bfqq has to wait for a full budget of the 1329 * in-service queue to be completed, then it may become impossible to 1330 * let the process recover the hole, even if the back-shifted 1331 * timestamps of bfqq are lower than those of the in-service queue. If 1332 * this happens for most or all of the holes, then the process may not 1333 * receive its reserved bandwidth. In this respect, it is worth noting 1334 * that, being the service of outstanding requests unpreemptible, a 1335 * little fraction of the holes may however be unrecoverable, thereby 1336 * causing a little loss of bandwidth. 1337 * 1338 * The last important point is detecting whether bfqq does need this 1339 * bandwidth recovery. In this respect, the next function deems the 1340 * process associated with bfqq greedy, and thus allows it to recover 1341 * the hole, if: 1) the process is waiting for the arrival of a new 1342 * request (which implies that bfqq expired for one of the above two 1343 * reasons), and 2) such a request has arrived soon. The first 1344 * condition is controlled through the flag non_blocking_wait_rq, 1345 * while the second through the flag arrived_in_time. If both 1346 * conditions hold, then the function computes the budget in the 1347 * above-described special way, and signals that the in-service queue 1348 * should be expired. Timestamp back-shifting is done later in 1349 * __bfq_activate_entity. 1350 * 1351 * 2. Reduce latency. Even if timestamps are not backshifted to let 1352 * the process associated with bfqq recover a service hole, bfqq may 1353 * however happen to have, after being (re)activated, a lower finish 1354 * timestamp than the in-service queue. That is, the next budget of 1355 * bfqq may have to be completed before the one of the in-service 1356 * queue. If this is the case, then preempting the in-service queue 1357 * allows this goal to be achieved, apart from the unpreemptible, 1358 * outstanding requests mentioned above. 1359 * 1360 * Unfortunately, regardless of which of the above two goals one wants 1361 * to achieve, service trees need first to be updated to know whether 1362 * the in-service queue must be preempted. To have service trees 1363 * correctly updated, the in-service queue must be expired and 1364 * rescheduled, and bfqq must be scheduled too. This is one of the 1365 * most costly operations (in future versions, the scheduling 1366 * mechanism may be re-designed in such a way to make it possible to 1367 * know whether preemption is needed without needing to update service 1368 * trees). In addition, queue preemptions almost always cause random 1369 * I/O, and thus loss of throughput. Because of these facts, the next 1370 * function adopts the following simple scheme to avoid both costly 1371 * operations and too frequent preemptions: it requests the expiration 1372 * of the in-service queue (unconditionally) only for queues that need 1373 * to recover a hole, or that either are weight-raised or deserve to 1374 * be weight-raised. 1375 */ 1376 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, 1377 struct bfq_queue *bfqq, 1378 bool arrived_in_time, 1379 bool wr_or_deserves_wr) 1380 { 1381 struct bfq_entity *entity = &bfqq->entity; 1382 1383 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) { 1384 /* 1385 * We do not clear the flag non_blocking_wait_rq here, as 1386 * the latter is used in bfq_activate_bfqq to signal 1387 * that timestamps need to be back-shifted (and is 1388 * cleared right after). 1389 */ 1390 1391 /* 1392 * In next assignment we rely on that either 1393 * entity->service or entity->budget are not updated 1394 * on expiration if bfqq is empty (see 1395 * __bfq_bfqq_recalc_budget). Thus both quantities 1396 * remain unchanged after such an expiration, and the 1397 * following statement therefore assigns to 1398 * entity->budget the remaining budget on such an 1399 * expiration. 1400 */ 1401 entity->budget = min_t(unsigned long, 1402 bfq_bfqq_budget_left(bfqq), 1403 bfqq->max_budget); 1404 1405 /* 1406 * At this point, we have used entity->service to get 1407 * the budget left (needed for updating 1408 * entity->budget). Thus we finally can, and have to, 1409 * reset entity->service. The latter must be reset 1410 * because bfqq would otherwise be charged again for 1411 * the service it has received during its previous 1412 * service slot(s). 1413 */ 1414 entity->service = 0; 1415 1416 return true; 1417 } 1418 1419 /* 1420 * We can finally complete expiration, by setting service to 0. 1421 */ 1422 entity->service = 0; 1423 entity->budget = max_t(unsigned long, bfqq->max_budget, 1424 bfq_serv_to_charge(bfqq->next_rq, bfqq)); 1425 bfq_clear_bfqq_non_blocking_wait_rq(bfqq); 1426 return wr_or_deserves_wr; 1427 } 1428 1429 /* 1430 * Return the farthest past time instant according to jiffies 1431 * macros. 1432 */ 1433 static unsigned long bfq_smallest_from_now(void) 1434 { 1435 return jiffies - MAX_JIFFY_OFFSET; 1436 } 1437 1438 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, 1439 struct bfq_queue *bfqq, 1440 unsigned int old_wr_coeff, 1441 bool wr_or_deserves_wr, 1442 bool interactive, 1443 bool in_burst, 1444 bool soft_rt) 1445 { 1446 if (old_wr_coeff == 1 && wr_or_deserves_wr) { 1447 /* start a weight-raising period */ 1448 if (interactive) { 1449 bfqq->service_from_wr = 0; 1450 bfqq->wr_coeff = bfqd->bfq_wr_coeff; 1451 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); 1452 } else { 1453 /* 1454 * No interactive weight raising in progress 1455 * here: assign minus infinity to 1456 * wr_start_at_switch_to_srt, to make sure 1457 * that, at the end of the soft-real-time 1458 * weight raising periods that is starting 1459 * now, no interactive weight-raising period 1460 * may be wrongly considered as still in 1461 * progress (and thus actually started by 1462 * mistake). 1463 */ 1464 bfqq->wr_start_at_switch_to_srt = 1465 bfq_smallest_from_now(); 1466 bfqq->wr_coeff = bfqd->bfq_wr_coeff * 1467 BFQ_SOFTRT_WEIGHT_FACTOR; 1468 bfqq->wr_cur_max_time = 1469 bfqd->bfq_wr_rt_max_time; 1470 } 1471 1472 /* 1473 * If needed, further reduce budget to make sure it is 1474 * close to bfqq's backlog, so as to reduce the 1475 * scheduling-error component due to a too large 1476 * budget. Do not care about throughput consequences, 1477 * but only about latency. Finally, do not assign a 1478 * too small budget either, to avoid increasing 1479 * latency by causing too frequent expirations. 1480 */ 1481 bfqq->entity.budget = min_t(unsigned long, 1482 bfqq->entity.budget, 1483 2 * bfq_min_budget(bfqd)); 1484 } else if (old_wr_coeff > 1) { 1485 if (interactive) { /* update wr coeff and duration */ 1486 bfqq->wr_coeff = bfqd->bfq_wr_coeff; 1487 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); 1488 } else if (in_burst) 1489 bfqq->wr_coeff = 1; 1490 else if (soft_rt) { 1491 /* 1492 * The application is now or still meeting the 1493 * requirements for being deemed soft rt. We 1494 * can then correctly and safely (re)charge 1495 * the weight-raising duration for the 1496 * application with the weight-raising 1497 * duration for soft rt applications. 1498 * 1499 * In particular, doing this recharge now, i.e., 1500 * before the weight-raising period for the 1501 * application finishes, reduces the probability 1502 * of the following negative scenario: 1503 * 1) the weight of a soft rt application is 1504 * raised at startup (as for any newly 1505 * created application), 1506 * 2) since the application is not interactive, 1507 * at a certain time weight-raising is 1508 * stopped for the application, 1509 * 3) at that time the application happens to 1510 * still have pending requests, and hence 1511 * is destined to not have a chance to be 1512 * deemed soft rt before these requests are 1513 * completed (see the comments to the 1514 * function bfq_bfqq_softrt_next_start() 1515 * for details on soft rt detection), 1516 * 4) these pending requests experience a high 1517 * latency because the application is not 1518 * weight-raised while they are pending. 1519 */ 1520 if (bfqq->wr_cur_max_time != 1521 bfqd->bfq_wr_rt_max_time) { 1522 bfqq->wr_start_at_switch_to_srt = 1523 bfqq->last_wr_start_finish; 1524 1525 bfqq->wr_cur_max_time = 1526 bfqd->bfq_wr_rt_max_time; 1527 bfqq->wr_coeff = bfqd->bfq_wr_coeff * 1528 BFQ_SOFTRT_WEIGHT_FACTOR; 1529 } 1530 bfqq->last_wr_start_finish = jiffies; 1531 } 1532 } 1533 } 1534 1535 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, 1536 struct bfq_queue *bfqq) 1537 { 1538 return bfqq->dispatched == 0 && 1539 time_is_before_jiffies( 1540 bfqq->budget_timeout + 1541 bfqd->bfq_wr_min_idle_time); 1542 } 1543 1544 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, 1545 struct bfq_queue *bfqq, 1546 int old_wr_coeff, 1547 struct request *rq, 1548 bool *interactive) 1549 { 1550 bool soft_rt, in_burst, wr_or_deserves_wr, 1551 bfqq_wants_to_preempt, 1552 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), 1553 /* 1554 * See the comments on 1555 * bfq_bfqq_update_budg_for_activation for 1556 * details on the usage of the next variable. 1557 */ 1558 arrived_in_time = ktime_get_ns() <= 1559 bfqq->ttime.last_end_request + 1560 bfqd->bfq_slice_idle * 3; 1561 1562 1563 /* 1564 * bfqq deserves to be weight-raised if: 1565 * - it is sync, 1566 * - it does not belong to a large burst, 1567 * - it has been idle for enough time or is soft real-time, 1568 * - is linked to a bfq_io_cq (it is not shared in any sense). 1569 */ 1570 in_burst = bfq_bfqq_in_large_burst(bfqq); 1571 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && 1572 !in_burst && 1573 time_is_before_jiffies(bfqq->soft_rt_next_start) && 1574 bfqq->dispatched == 0; 1575 *interactive = !in_burst && idle_for_long_time; 1576 wr_or_deserves_wr = bfqd->low_latency && 1577 (bfqq->wr_coeff > 1 || 1578 (bfq_bfqq_sync(bfqq) && 1579 bfqq->bic && (*interactive || soft_rt))); 1580 1581 /* 1582 * Using the last flag, update budget and check whether bfqq 1583 * may want to preempt the in-service queue. 1584 */ 1585 bfqq_wants_to_preempt = 1586 bfq_bfqq_update_budg_for_activation(bfqd, bfqq, 1587 arrived_in_time, 1588 wr_or_deserves_wr); 1589 1590 /* 1591 * If bfqq happened to be activated in a burst, but has been 1592 * idle for much more than an interactive queue, then we 1593 * assume that, in the overall I/O initiated in the burst, the 1594 * I/O associated with bfqq is finished. So bfqq does not need 1595 * to be treated as a queue belonging to a burst 1596 * anymore. Accordingly, we reset bfqq's in_large_burst flag 1597 * if set, and remove bfqq from the burst list if it's 1598 * there. We do not decrement burst_size, because the fact 1599 * that bfqq does not need to belong to the burst list any 1600 * more does not invalidate the fact that bfqq was created in 1601 * a burst. 1602 */ 1603 if (likely(!bfq_bfqq_just_created(bfqq)) && 1604 idle_for_long_time && 1605 time_is_before_jiffies( 1606 bfqq->budget_timeout + 1607 msecs_to_jiffies(10000))) { 1608 hlist_del_init(&bfqq->burst_list_node); 1609 bfq_clear_bfqq_in_large_burst(bfqq); 1610 } 1611 1612 bfq_clear_bfqq_just_created(bfqq); 1613 1614 1615 if (!bfq_bfqq_IO_bound(bfqq)) { 1616 if (arrived_in_time) { 1617 bfqq->requests_within_timer++; 1618 if (bfqq->requests_within_timer >= 1619 bfqd->bfq_requests_within_timer) 1620 bfq_mark_bfqq_IO_bound(bfqq); 1621 } else 1622 bfqq->requests_within_timer = 0; 1623 } 1624 1625 if (bfqd->low_latency) { 1626 if (unlikely(time_is_after_jiffies(bfqq->split_time))) 1627 /* wraparound */ 1628 bfqq->split_time = 1629 jiffies - bfqd->bfq_wr_min_idle_time - 1; 1630 1631 if (time_is_before_jiffies(bfqq->split_time + 1632 bfqd->bfq_wr_min_idle_time)) { 1633 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, 1634 old_wr_coeff, 1635 wr_or_deserves_wr, 1636 *interactive, 1637 in_burst, 1638 soft_rt); 1639 1640 if (old_wr_coeff != bfqq->wr_coeff) 1641 bfqq->entity.prio_changed = 1; 1642 } 1643 } 1644 1645 bfqq->last_idle_bklogged = jiffies; 1646 bfqq->service_from_backlogged = 0; 1647 bfq_clear_bfqq_softrt_update(bfqq); 1648 1649 bfq_add_bfqq_busy(bfqd, bfqq); 1650 1651 /* 1652 * Expire in-service queue only if preemption may be needed 1653 * for guarantees. In this respect, the function 1654 * next_queue_may_preempt just checks a simple, necessary 1655 * condition, and not a sufficient condition based on 1656 * timestamps. In fact, for the latter condition to be 1657 * evaluated, timestamps would need first to be updated, and 1658 * this operation is quite costly (see the comments on the 1659 * function bfq_bfqq_update_budg_for_activation). 1660 */ 1661 if (bfqd->in_service_queue && bfqq_wants_to_preempt && 1662 bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && 1663 next_queue_may_preempt(bfqd)) 1664 bfq_bfqq_expire(bfqd, bfqd->in_service_queue, 1665 false, BFQQE_PREEMPTED); 1666 } 1667 1668 static void bfq_add_request(struct request *rq) 1669 { 1670 struct bfq_queue *bfqq = RQ_BFQQ(rq); 1671 struct bfq_data *bfqd = bfqq->bfqd; 1672 struct request *next_rq, *prev; 1673 unsigned int old_wr_coeff = bfqq->wr_coeff; 1674 bool interactive = false; 1675 1676 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); 1677 bfqq->queued[rq_is_sync(rq)]++; 1678 bfqd->queued++; 1679 1680 elv_rb_add(&bfqq->sort_list, rq); 1681 1682 /* 1683 * Check if this request is a better next-serve candidate. 1684 */ 1685 prev = bfqq->next_rq; 1686 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); 1687 bfqq->next_rq = next_rq; 1688 1689 /* 1690 * Adjust priority tree position, if next_rq changes. 1691 */ 1692 if (prev != bfqq->next_rq) 1693 bfq_pos_tree_add_move(bfqd, bfqq); 1694 1695 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ 1696 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, 1697 rq, &interactive); 1698 else { 1699 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && 1700 time_is_before_jiffies( 1701 bfqq->last_wr_start_finish + 1702 bfqd->bfq_wr_min_inter_arr_async)) { 1703 bfqq->wr_coeff = bfqd->bfq_wr_coeff; 1704 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); 1705 1706 bfqd->wr_busy_queues++; 1707 bfqq->entity.prio_changed = 1; 1708 } 1709 if (prev != bfqq->next_rq) 1710 bfq_updated_next_req(bfqd, bfqq); 1711 } 1712 1713 /* 1714 * Assign jiffies to last_wr_start_finish in the following 1715 * cases: 1716 * 1717 * . if bfqq is not going to be weight-raised, because, for 1718 * non weight-raised queues, last_wr_start_finish stores the 1719 * arrival time of the last request; as of now, this piece 1720 * of information is used only for deciding whether to 1721 * weight-raise async queues 1722 * 1723 * . if bfqq is not weight-raised, because, if bfqq is now 1724 * switching to weight-raised, then last_wr_start_finish 1725 * stores the time when weight-raising starts 1726 * 1727 * . if bfqq is interactive, because, regardless of whether 1728 * bfqq is currently weight-raised, the weight-raising 1729 * period must start or restart (this case is considered 1730 * separately because it is not detected by the above 1731 * conditions, if bfqq is already weight-raised) 1732 * 1733 * last_wr_start_finish has to be updated also if bfqq is soft 1734 * real-time, because the weight-raising period is constantly 1735 * restarted on idle-to-busy transitions for these queues, but 1736 * this is already done in bfq_bfqq_handle_idle_busy_switch if 1737 * needed. 1738 */ 1739 if (bfqd->low_latency && 1740 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) 1741 bfqq->last_wr_start_finish = jiffies; 1742 } 1743 1744 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, 1745 struct bio *bio, 1746 struct request_queue *q) 1747 { 1748 struct bfq_queue *bfqq = bfqd->bio_bfqq; 1749 1750 1751 if (bfqq) 1752 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); 1753 1754 return NULL; 1755 } 1756 1757 static sector_t get_sdist(sector_t last_pos, struct request *rq) 1758 { 1759 if (last_pos) 1760 return abs(blk_rq_pos(rq) - last_pos); 1761 1762 return 0; 1763 } 1764 1765 #if 0 /* Still not clear if we can do without next two functions */ 1766 static void bfq_activate_request(struct request_queue *q, struct request *rq) 1767 { 1768 struct bfq_data *bfqd = q->elevator->elevator_data; 1769 1770 bfqd->rq_in_driver++; 1771 } 1772 1773 static void bfq_deactivate_request(struct request_queue *q, struct request *rq) 1774 { 1775 struct bfq_data *bfqd = q->elevator->elevator_data; 1776 1777 bfqd->rq_in_driver--; 1778 } 1779 #endif 1780 1781 static void bfq_remove_request(struct request_queue *q, 1782 struct request *rq) 1783 { 1784 struct bfq_queue *bfqq = RQ_BFQQ(rq); 1785 struct bfq_data *bfqd = bfqq->bfqd; 1786 const int sync = rq_is_sync(rq); 1787 1788 if (bfqq->next_rq == rq) { 1789 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); 1790 bfq_updated_next_req(bfqd, bfqq); 1791 } 1792 1793 if (rq->queuelist.prev != &rq->queuelist) 1794 list_del_init(&rq->queuelist); 1795 bfqq->queued[sync]--; 1796 bfqd->queued--; 1797 elv_rb_del(&bfqq->sort_list, rq); 1798 1799 elv_rqhash_del(q, rq); 1800 if (q->last_merge == rq) 1801 q->last_merge = NULL; 1802 1803 if (RB_EMPTY_ROOT(&bfqq->sort_list)) { 1804 bfqq->next_rq = NULL; 1805 1806 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { 1807 bfq_del_bfqq_busy(bfqd, bfqq, false); 1808 /* 1809 * bfqq emptied. In normal operation, when 1810 * bfqq is empty, bfqq->entity.service and 1811 * bfqq->entity.budget must contain, 1812 * respectively, the service received and the 1813 * budget used last time bfqq emptied. These 1814 * facts do not hold in this case, as at least 1815 * this last removal occurred while bfqq is 1816 * not in service. To avoid inconsistencies, 1817 * reset both bfqq->entity.service and 1818 * bfqq->entity.budget, if bfqq has still a 1819 * process that may issue I/O requests to it. 1820 */ 1821 bfqq->entity.budget = bfqq->entity.service = 0; 1822 } 1823 1824 /* 1825 * Remove queue from request-position tree as it is empty. 1826 */ 1827 if (bfqq->pos_root) { 1828 rb_erase(&bfqq->pos_node, bfqq->pos_root); 1829 bfqq->pos_root = NULL; 1830 } 1831 } else { 1832 bfq_pos_tree_add_move(bfqd, bfqq); 1833 } 1834 1835 if (rq->cmd_flags & REQ_META) 1836 bfqq->meta_pending--; 1837 1838 } 1839 1840 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) 1841 { 1842 struct request_queue *q = hctx->queue; 1843 struct bfq_data *bfqd = q->elevator->elevator_data; 1844 struct request *free = NULL; 1845 /* 1846 * bfq_bic_lookup grabs the queue_lock: invoke it now and 1847 * store its return value for later use, to avoid nesting 1848 * queue_lock inside the bfqd->lock. We assume that the bic 1849 * returned by bfq_bic_lookup does not go away before 1850 * bfqd->lock is taken. 1851 */ 1852 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); 1853 bool ret; 1854 1855 spin_lock_irq(&bfqd->lock); 1856 1857 if (bic) 1858 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); 1859 else 1860 bfqd->bio_bfqq = NULL; 1861 bfqd->bio_bic = bic; 1862 1863 ret = blk_mq_sched_try_merge(q, bio, &free); 1864 1865 if (free) 1866 blk_mq_free_request(free); 1867 spin_unlock_irq(&bfqd->lock); 1868 1869 return ret; 1870 } 1871 1872 static int bfq_request_merge(struct request_queue *q, struct request **req, 1873 struct bio *bio) 1874 { 1875 struct bfq_data *bfqd = q->elevator->elevator_data; 1876 struct request *__rq; 1877 1878 __rq = bfq_find_rq_fmerge(bfqd, bio, q); 1879 if (__rq && elv_bio_merge_ok(__rq, bio)) { 1880 *req = __rq; 1881 return ELEVATOR_FRONT_MERGE; 1882 } 1883 1884 return ELEVATOR_NO_MERGE; 1885 } 1886 1887 static struct bfq_queue *bfq_init_rq(struct request *rq); 1888 1889 static void bfq_request_merged(struct request_queue *q, struct request *req, 1890 enum elv_merge type) 1891 { 1892 if (type == ELEVATOR_FRONT_MERGE && 1893 rb_prev(&req->rb_node) && 1894 blk_rq_pos(req) < 1895 blk_rq_pos(container_of(rb_prev(&req->rb_node), 1896 struct request, rb_node))) { 1897 struct bfq_queue *bfqq = bfq_init_rq(req); 1898 struct bfq_data *bfqd = bfqq->bfqd; 1899 struct request *prev, *next_rq; 1900 1901 /* Reposition request in its sort_list */ 1902 elv_rb_del(&bfqq->sort_list, req); 1903 elv_rb_add(&bfqq->sort_list, req); 1904 1905 /* Choose next request to be served for bfqq */ 1906 prev = bfqq->next_rq; 1907 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, 1908 bfqd->last_position); 1909 bfqq->next_rq = next_rq; 1910 /* 1911 * If next_rq changes, update both the queue's budget to 1912 * fit the new request and the queue's position in its 1913 * rq_pos_tree. 1914 */ 1915 if (prev != bfqq->next_rq) { 1916 bfq_updated_next_req(bfqd, bfqq); 1917 bfq_pos_tree_add_move(bfqd, bfqq); 1918 } 1919 } 1920 } 1921 1922 /* 1923 * This function is called to notify the scheduler that the requests 1924 * rq and 'next' have been merged, with 'next' going away. BFQ 1925 * exploits this hook to address the following issue: if 'next' has a 1926 * fifo_time lower that rq, then the fifo_time of rq must be set to 1927 * the value of 'next', to not forget the greater age of 'next'. 1928 * 1929 * NOTE: in this function we assume that rq is in a bfq_queue, basing 1930 * on that rq is picked from the hash table q->elevator->hash, which, 1931 * in its turn, is filled only with I/O requests present in 1932 * bfq_queues, while BFQ is in use for the request queue q. In fact, 1933 * the function that fills this hash table (elv_rqhash_add) is called 1934 * only by bfq_insert_request. 1935 */ 1936 static void bfq_requests_merged(struct request_queue *q, struct request *rq, 1937 struct request *next) 1938 { 1939 struct bfq_queue *bfqq = bfq_init_rq(rq), 1940 *next_bfqq = bfq_init_rq(next); 1941 1942 /* 1943 * If next and rq belong to the same bfq_queue and next is older 1944 * than rq, then reposition rq in the fifo (by substituting next 1945 * with rq). Otherwise, if next and rq belong to different 1946 * bfq_queues, never reposition rq: in fact, we would have to 1947 * reposition it with respect to next's position in its own fifo, 1948 * which would most certainly be too expensive with respect to 1949 * the benefits. 1950 */ 1951 if (bfqq == next_bfqq && 1952 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 1953 next->fifo_time < rq->fifo_time) { 1954 list_del_init(&rq->queuelist); 1955 list_replace_init(&next->queuelist, &rq->queuelist); 1956 rq->fifo_time = next->fifo_time; 1957 } 1958 1959 if (bfqq->next_rq == next) 1960 bfqq->next_rq = rq; 1961 1962 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); 1963 } 1964 1965 /* Must be called with bfqq != NULL */ 1966 static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) 1967 { 1968 if (bfq_bfqq_busy(bfqq)) 1969 bfqq->bfqd->wr_busy_queues--; 1970 bfqq->wr_coeff = 1; 1971 bfqq->wr_cur_max_time = 0; 1972 bfqq->last_wr_start_finish = jiffies; 1973 /* 1974 * Trigger a weight change on the next invocation of 1975 * __bfq_entity_update_weight_prio. 1976 */ 1977 bfqq->entity.prio_changed = 1; 1978 } 1979 1980 void bfq_end_wr_async_queues(struct bfq_data *bfqd, 1981 struct bfq_group *bfqg) 1982 { 1983 int i, j; 1984 1985 for (i = 0; i < 2; i++) 1986 for (j = 0; j < IOPRIO_BE_NR; j++) 1987 if (bfqg->async_bfqq[i][j]) 1988 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); 1989 if (bfqg->async_idle_bfqq) 1990 bfq_bfqq_end_wr(bfqg->async_idle_bfqq); 1991 } 1992 1993 static void bfq_end_wr(struct bfq_data *bfqd) 1994 { 1995 struct bfq_queue *bfqq; 1996 1997 spin_lock_irq(&bfqd->lock); 1998 1999 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) 2000 bfq_bfqq_end_wr(bfqq); 2001 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) 2002 bfq_bfqq_end_wr(bfqq); 2003 bfq_end_wr_async(bfqd); 2004 2005 spin_unlock_irq(&bfqd->lock); 2006 } 2007 2008 static sector_t bfq_io_struct_pos(void *io_struct, bool request) 2009 { 2010 if (request) 2011 return blk_rq_pos(io_struct); 2012 else 2013 return ((struct bio *)io_struct)->bi_iter.bi_sector; 2014 } 2015 2016 static int bfq_rq_close_to_sector(void *io_struct, bool request, 2017 sector_t sector) 2018 { 2019 return abs(bfq_io_struct_pos(io_struct, request) - sector) <= 2020 BFQQ_CLOSE_THR; 2021 } 2022 2023 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, 2024 struct bfq_queue *bfqq, 2025 sector_t sector) 2026 { 2027 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; 2028 struct rb_node *parent, *node; 2029 struct bfq_queue *__bfqq; 2030 2031 if (RB_EMPTY_ROOT(root)) 2032 return NULL; 2033 2034 /* 2035 * First, if we find a request starting at the end of the last 2036 * request, choose it. 2037 */ 2038 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); 2039 if (__bfqq) 2040 return __bfqq; 2041 2042 /* 2043 * If the exact sector wasn't found, the parent of the NULL leaf 2044 * will contain the closest sector (rq_pos_tree sorted by 2045 * next_request position). 2046 */ 2047 __bfqq = rb_entry(parent, struct bfq_queue, pos_node); 2048 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) 2049 return __bfqq; 2050 2051 if (blk_rq_pos(__bfqq->next_rq) < sector) 2052 node = rb_next(&__bfqq->pos_node); 2053 else 2054 node = rb_prev(&__bfqq->pos_node); 2055 if (!node) 2056 return NULL; 2057 2058 __bfqq = rb_entry(node, struct bfq_queue, pos_node); 2059 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) 2060 return __bfqq; 2061 2062 return NULL; 2063 } 2064 2065 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, 2066 struct bfq_queue *cur_bfqq, 2067 sector_t sector) 2068 { 2069 struct bfq_queue *bfqq; 2070 2071 /* 2072 * We shall notice if some of the queues are cooperating, 2073 * e.g., working closely on the same area of the device. In 2074 * that case, we can group them together and: 1) don't waste 2075 * time idling, and 2) serve the union of their requests in 2076 * the best possible order for throughput. 2077 */ 2078 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); 2079 if (!bfqq || bfqq == cur_bfqq) 2080 return NULL; 2081 2082 return bfqq; 2083 } 2084 2085 static struct bfq_queue * 2086 bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) 2087 { 2088 int process_refs, new_process_refs; 2089 struct bfq_queue *__bfqq; 2090 2091 /* 2092 * If there are no process references on the new_bfqq, then it is 2093 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain 2094 * may have dropped their last reference (not just their last process 2095 * reference). 2096 */ 2097 if (!bfqq_process_refs(new_bfqq)) 2098 return NULL; 2099 2100 /* Avoid a circular list and skip interim queue merges. */ 2101 while ((__bfqq = new_bfqq->new_bfqq)) { 2102 if (__bfqq == bfqq) 2103 return NULL; 2104 new_bfqq = __bfqq; 2105 } 2106 2107 process_refs = bfqq_process_refs(bfqq); 2108 new_process_refs = bfqq_process_refs(new_bfqq); 2109 /* 2110 * If the process for the bfqq has gone away, there is no 2111 * sense in merging the queues. 2112 */ 2113 if (process_refs == 0 || new_process_refs == 0) 2114 return NULL; 2115 2116 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", 2117 new_bfqq->pid); 2118 2119 /* 2120 * Merging is just a redirection: the requests of the process 2121 * owning one of the two queues are redirected to the other queue. 2122 * The latter queue, in its turn, is set as shared if this is the 2123 * first time that the requests of some process are redirected to 2124 * it. 2125 * 2126 * We redirect bfqq to new_bfqq and not the opposite, because 2127 * we are in the context of the process owning bfqq, thus we 2128 * have the io_cq of this process. So we can immediately 2129 * configure this io_cq to redirect the requests of the 2130 * process to new_bfqq. In contrast, the io_cq of new_bfqq is 2131 * not available any more (new_bfqq->bic == NULL). 2132 * 2133 * Anyway, even in case new_bfqq coincides with the in-service 2134 * queue, redirecting requests the in-service queue is the 2135 * best option, as we feed the in-service queue with new 2136 * requests close to the last request served and, by doing so, 2137 * are likely to increase the throughput. 2138 */ 2139 bfqq->new_bfqq = new_bfqq; 2140 new_bfqq->ref += process_refs; 2141 return new_bfqq; 2142 } 2143 2144 static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, 2145 struct bfq_queue *new_bfqq) 2146 { 2147 if (bfq_too_late_for_merging(new_bfqq)) 2148 return false; 2149 2150 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || 2151 (bfqq->ioprio_class != new_bfqq->ioprio_class)) 2152 return false; 2153 2154 /* 2155 * If either of the queues has already been detected as seeky, 2156 * then merging it with the other queue is unlikely to lead to 2157 * sequential I/O. 2158 */ 2159 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) 2160 return false; 2161 2162 /* 2163 * Interleaved I/O is known to be done by (some) applications 2164 * only for reads, so it does not make sense to merge async 2165 * queues. 2166 */ 2167 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq)) 2168 return false; 2169 2170 return true; 2171 } 2172 2173 /* 2174 * Attempt to schedule a merge of bfqq with the currently in-service 2175 * queue or with a close queue among the scheduled queues. Return 2176 * NULL if no merge was scheduled, a pointer to the shared bfq_queue 2177 * structure otherwise. 2178 * 2179 * The OOM queue is not allowed to participate to cooperation: in fact, since 2180 * the requests temporarily redirected to the OOM queue could be redirected 2181 * again to dedicated queues at any time, the state needed to correctly 2182 * handle merging with the OOM queue would be quite complex and expensive 2183 * to maintain. Besides, in such a critical condition as an out of memory, 2184 * the benefits of queue merging may be little relevant, or even negligible. 2185 * 2186 * WARNING: queue merging may impair fairness among non-weight raised 2187 * queues, for at least two reasons: 1) the original weight of a 2188 * merged queue may change during the merged state, 2) even being the 2189 * weight the same, a merged queue may be bloated with many more 2190 * requests than the ones produced by its originally-associated 2191 * process. 2192 */ 2193 static struct bfq_queue * 2194 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, 2195 void *io_struct, bool request) 2196 { 2197 struct bfq_queue *in_service_bfqq, *new_bfqq; 2198 2199 /* 2200 * Prevent bfqq from being merged if it has been created too 2201 * long ago. The idea is that true cooperating processes, and 2202 * thus their associated bfq_queues, are supposed to be 2203 * created shortly after each other. This is the case, e.g., 2204 * for KVM/QEMU and dump I/O threads. Basing on this 2205 * assumption, the following filtering greatly reduces the 2206 * probability that two non-cooperating processes, which just 2207 * happen to do close I/O for some short time interval, have 2208 * their queues merged by mistake. 2209 */ 2210 if (bfq_too_late_for_merging(bfqq)) 2211 return NULL; 2212 2213 if (bfqq->new_bfqq) 2214 return bfqq->new_bfqq; 2215 2216 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) 2217 return NULL; 2218 2219 /* If there is only one backlogged queue, don't search. */ 2220 if (bfqd->busy_queues == 1) 2221 return NULL; 2222 2223 in_service_bfqq = bfqd->in_service_queue; 2224 2225 if (in_service_bfqq && in_service_bfqq != bfqq && 2226 likely(in_service_bfqq != &bfqd->oom_bfqq) && 2227 bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && 2228 bfqq->entity.parent == in_service_bfqq->entity.parent && 2229 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { 2230 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); 2231 if (new_bfqq) 2232 return new_bfqq; 2233 } 2234 /* 2235 * Check whether there is a cooperator among currently scheduled 2236 * queues. The only thing we need is that the bio/request is not 2237 * NULL, as we need it to establish whether a cooperator exists. 2238 */ 2239 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, 2240 bfq_io_struct_pos(io_struct, request)); 2241 2242 if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && 2243 bfq_may_be_close_cooperator(bfqq, new_bfqq)) 2244 return bfq_setup_merge(bfqq, new_bfqq); 2245 2246 return NULL; 2247 } 2248 2249 static void bfq_bfqq_save_state(struct bfq_queue *bfqq) 2250 { 2251 struct bfq_io_cq *bic = bfqq->bic; 2252 2253 /* 2254 * If !bfqq->bic, the queue is already shared or its requests 2255 * have already been redirected to a shared queue; both idle window 2256 * and weight raising state have already been saved. Do nothing. 2257 */ 2258 if (!bic) 2259 return; 2260 2261 bic->saved_ttime = bfqq->ttime; 2262 bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); 2263 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); 2264 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); 2265 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); 2266 if (unlikely(bfq_bfqq_just_created(bfqq) && 2267 !bfq_bfqq_in_large_burst(bfqq) && 2268 bfqq->bfqd->low_latency)) { 2269 /* 2270 * bfqq being merged right after being created: bfqq 2271 * would have deserved interactive weight raising, but 2272 * did not make it to be set in a weight-raised state, 2273 * because of this early merge. Store directly the 2274 * weight-raising state that would have been assigned 2275 * to bfqq, so that to avoid that bfqq unjustly fails 2276 * to enjoy weight raising if split soon. 2277 */ 2278 bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; 2279 bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); 2280 bic->saved_last_wr_start_finish = jiffies; 2281 } else { 2282 bic->saved_wr_coeff = bfqq->wr_coeff; 2283 bic->saved_wr_start_at_switch_to_srt = 2284 bfqq->wr_start_at_switch_to_srt; 2285 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; 2286 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; 2287 } 2288 } 2289 2290 static void 2291 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, 2292 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) 2293 { 2294 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", 2295 (unsigned long)new_bfqq->pid); 2296 /* Save weight raising and idle window of the merged queues */ 2297 bfq_bfqq_save_state(bfqq); 2298 bfq_bfqq_save_state(new_bfqq); 2299 if (bfq_bfqq_IO_bound(bfqq)) 2300 bfq_mark_bfqq_IO_bound(new_bfqq); 2301 bfq_clear_bfqq_IO_bound(bfqq); 2302 2303 /* 2304 * If bfqq is weight-raised, then let new_bfqq inherit 2305 * weight-raising. To reduce false positives, neglect the case 2306 * where bfqq has just been created, but has not yet made it 2307 * to be weight-raised (which may happen because EQM may merge 2308 * bfqq even before bfq_add_request is executed for the first 2309 * time for bfqq). Handling this case would however be very 2310 * easy, thanks to the flag just_created. 2311 */ 2312 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { 2313 new_bfqq->wr_coeff = bfqq->wr_coeff; 2314 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; 2315 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; 2316 new_bfqq->wr_start_at_switch_to_srt = 2317 bfqq->wr_start_at_switch_to_srt; 2318 if (bfq_bfqq_busy(new_bfqq)) 2319 bfqd->wr_busy_queues++; 2320 new_bfqq->entity.prio_changed = 1; 2321 } 2322 2323 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ 2324 bfqq->wr_coeff = 1; 2325 bfqq->entity.prio_changed = 1; 2326 if (bfq_bfqq_busy(bfqq)) 2327 bfqd->wr_busy_queues--; 2328 } 2329 2330 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", 2331 bfqd->wr_busy_queues); 2332 2333 /* 2334 * Merge queues (that is, let bic redirect its requests to new_bfqq) 2335 */ 2336 bic_set_bfqq(bic, new_bfqq, 1); 2337 bfq_mark_bfqq_coop(new_bfqq); 2338 /* 2339 * new_bfqq now belongs to at least two bics (it is a shared queue): 2340 * set new_bfqq->bic to NULL. bfqq either: 2341 * - does not belong to any bic any more, and hence bfqq->bic must 2342 * be set to NULL, or 2343 * - is a queue whose owning bics have already been redirected to a 2344 * different queue, hence the queue is destined to not belong to 2345 * any bic soon and bfqq->bic is already NULL (therefore the next 2346 * assignment causes no harm). 2347 */ 2348 new_bfqq->bic = NULL; 2349 bfqq->bic = NULL; 2350 /* release process reference to bfqq */ 2351 bfq_put_queue(bfqq); 2352 } 2353 2354 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, 2355 struct bio *bio) 2356 { 2357 struct bfq_data *bfqd = q->elevator->elevator_data; 2358 bool is_sync = op_is_sync(bio->bi_opf); 2359 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; 2360 2361 /* 2362 * Disallow merge of a sync bio into an async request. 2363 */ 2364 if (is_sync && !rq_is_sync(rq)) 2365 return false; 2366 2367 /* 2368 * Lookup the bfqq that this bio will be queued with. Allow 2369 * merge only if rq is queued there. 2370 */ 2371 if (!bfqq) 2372 return false; 2373 2374 /* 2375 * We take advantage of this function to perform an early merge 2376 * of the queues of possible cooperating processes. 2377 */ 2378 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); 2379 if (new_bfqq) { 2380 /* 2381 * bic still points to bfqq, then it has not yet been 2382 * redirected to some other bfq_queue, and a queue 2383 * merge beween bfqq and new_bfqq can be safely 2384 * fulfillled, i.e., bic can be redirected to new_bfqq 2385 * and bfqq can be put. 2386 */ 2387 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, 2388 new_bfqq); 2389 /* 2390 * If we get here, bio will be queued into new_queue, 2391 * so use new_bfqq to decide whether bio and rq can be 2392 * merged. 2393 */ 2394 bfqq = new_bfqq; 2395 2396 /* 2397 * Change also bqfd->bio_bfqq, as 2398 * bfqd->bio_bic now points to new_bfqq, and 2399 * this function may be invoked again (and then may 2400 * use again bqfd->bio_bfqq). 2401 */ 2402 bfqd->bio_bfqq = bfqq; 2403 } 2404 2405 return bfqq == RQ_BFQQ(rq); 2406 } 2407 2408 /* 2409 * Set the maximum time for the in-service queue to consume its 2410 * budget. This prevents seeky processes from lowering the throughput. 2411 * In practice, a time-slice service scheme is used with seeky 2412 * processes. 2413 */ 2414 static void bfq_set_budget_timeout(struct bfq_data *bfqd, 2415 struct bfq_queue *bfqq) 2416 { 2417 unsigned int timeout_coeff; 2418 2419 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) 2420 timeout_coeff = 1; 2421 else 2422 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; 2423 2424 bfqd->last_budget_start = ktime_get(); 2425 2426 bfqq->budget_timeout = jiffies + 2427 bfqd->bfq_timeout * timeout_coeff; 2428 } 2429 2430 static void __bfq_set_in_service_queue(struct bfq_data *bfqd, 2431 struct bfq_queue *bfqq) 2432 { 2433 if (bfqq) { 2434 bfq_clear_bfqq_fifo_expire(bfqq); 2435 2436 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; 2437 2438 if (time_is_before_jiffies(bfqq->last_wr_start_finish) && 2439 bfqq->wr_coeff > 1 && 2440 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && 2441 time_is_before_jiffies(bfqq->budget_timeout)) { 2442 /* 2443 * For soft real-time queues, move the start 2444 * of the weight-raising period forward by the 2445 * time the queue has not received any 2446 * service. Otherwise, a relatively long 2447 * service delay is likely to cause the 2448 * weight-raising period of the queue to end, 2449 * because of the short duration of the 2450 * weight-raising period of a soft real-time 2451 * queue. It is worth noting that this move 2452 * is not so dangerous for the other queues, 2453 * because soft real-time queues are not 2454 * greedy. 2455 * 2456 * To not add a further variable, we use the 2457 * overloaded field budget_timeout to 2458 * determine for how long the queue has not 2459 * received service, i.e., how much time has 2460 * elapsed since the queue expired. However, 2461 * this is a little imprecise, because 2462 * budget_timeout is set to jiffies if bfqq 2463 * not only expires, but also remains with no 2464 * request. 2465 */ 2466 if (time_after(bfqq->budget_timeout, 2467 bfqq->last_wr_start_finish)) 2468 bfqq->last_wr_start_finish += 2469 jiffies - bfqq->budget_timeout; 2470 else 2471 bfqq->last_wr_start_finish = jiffies; 2472 } 2473 2474 bfq_set_budget_timeout(bfqd, bfqq); 2475 bfq_log_bfqq(bfqd, bfqq, 2476 "set_in_service_queue, cur-budget = %d", 2477 bfqq->entity.budget); 2478 } 2479 2480 bfqd->in_service_queue = bfqq; 2481 } 2482 2483 /* 2484 * Get and set a new queue for service. 2485 */ 2486 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) 2487 { 2488 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); 2489 2490 __bfq_set_in_service_queue(bfqd, bfqq); 2491 return bfqq; 2492 } 2493 2494 static void bfq_arm_slice_timer(struct bfq_data *bfqd) 2495 { 2496 struct bfq_queue *bfqq = bfqd->in_service_queue; 2497 u32 sl; 2498 2499 bfq_mark_bfqq_wait_request(bfqq); 2500 2501 /* 2502 * We don't want to idle for seeks, but we do want to allow 2503 * fair distribution of slice time for a process doing back-to-back 2504 * seeks. So allow a little bit of time for him to submit a new rq. 2505 */ 2506 sl = bfqd->bfq_slice_idle; 2507 /* 2508 * Unless the queue is being weight-raised or the scenario is 2509 * asymmetric, grant only minimum idle time if the queue 2510 * is seeky. A long idling is preserved for a weight-raised 2511 * queue, or, more in general, in an asymmetric scenario, 2512 * because a long idling is needed for guaranteeing to a queue 2513 * its reserved share of the throughput (in particular, it is 2514 * needed if the queue has a higher weight than some other 2515 * queue). 2516 */ 2517 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && 2518 bfq_symmetric_scenario(bfqd)) 2519 sl = min_t(u64, sl, BFQ_MIN_TT); 2520 2521 bfqd->last_idling_start = ktime_get(); 2522 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), 2523 HRTIMER_MODE_REL); 2524 bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); 2525 } 2526 2527 /* 2528 * In autotuning mode, max_budget is dynamically recomputed as the 2529 * amount of sectors transferred in timeout at the estimated peak 2530 * rate. This enables BFQ to utilize a full timeslice with a full 2531 * budget, even if the in-service queue is served at peak rate. And 2532 * this maximises throughput with sequential workloads. 2533 */ 2534 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) 2535 { 2536 return (u64)bfqd->peak_rate * USEC_PER_MSEC * 2537 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; 2538 } 2539 2540 /* 2541 * Update parameters related to throughput and responsiveness, as a 2542 * function of the estimated peak rate. See comments on 2543 * bfq_calc_max_budget(), and on the ref_wr_duration array. 2544 */ 2545 static void update_thr_responsiveness_params(struct bfq_data *bfqd) 2546 { 2547 if (bfqd->bfq_user_max_budget == 0) { 2548 bfqd->bfq_max_budget = 2549 bfq_calc_max_budget(bfqd); 2550 bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget); 2551 } 2552 } 2553 2554 static void bfq_reset_rate_computation(struct bfq_data *bfqd, 2555 struct request *rq) 2556 { 2557 if (rq != NULL) { /* new rq dispatch now, reset accordingly */ 2558 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); 2559 bfqd->peak_rate_samples = 1; 2560 bfqd->sequential_samples = 0; 2561 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = 2562 blk_rq_sectors(rq); 2563 } else /* no new rq dispatched, just reset the number of samples */ 2564 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ 2565 2566 bfq_log(bfqd, 2567 "reset_rate_computation at end, sample %u/%u tot_sects %llu", 2568 bfqd->peak_rate_samples, bfqd->sequential_samples, 2569 bfqd->tot_sectors_dispatched); 2570 } 2571 2572 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) 2573 { 2574 u32 rate, weight, divisor; 2575 2576 /* 2577 * For the convergence property to hold (see comments on 2578 * bfq_update_peak_rate()) and for the assessment to be 2579 * reliable, a minimum number of samples must be present, and 2580 * a minimum amount of time must have elapsed. If not so, do 2581 * not compute new rate. Just reset parameters, to get ready 2582 * for a new evaluation attempt. 2583 */ 2584 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || 2585 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) 2586 goto reset_computation; 2587 2588 /* 2589 * If a new request completion has occurred after last 2590 * dispatch, then, to approximate the rate at which requests 2591 * have been served by the device, it is more precise to 2592 * extend the observation interval to the last completion. 2593 */ 2594 bfqd->delta_from_first = 2595 max_t(u64, bfqd->delta_from_first, 2596 bfqd->last_completion - bfqd->first_dispatch); 2597 2598 /* 2599 * Rate computed in sects/usec, and not sects/nsec, for 2600 * precision issues. 2601 */ 2602 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, 2603 div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); 2604 2605 /* 2606 * Peak rate not updated if: 2607 * - the percentage of sequential dispatches is below 3/4 of the 2608 * total, and rate is below the current estimated peak rate 2609 * - rate is unreasonably high (> 20M sectors/sec) 2610 */ 2611 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && 2612 rate <= bfqd->peak_rate) || 2613 rate > 20<<BFQ_RATE_SHIFT) 2614 goto reset_computation; 2615 2616 /* 2617 * We have to update the peak rate, at last! To this purpose, 2618 * we use a low-pass filter. We compute the smoothing constant 2619 * of the filter as a function of the 'weight' of the new 2620 * measured rate. 2621 * 2622 * As can be seen in next formulas, we define this weight as a 2623 * quantity proportional to how sequential the workload is, 2624 * and to how long the observation time interval is. 2625 * 2626 * The weight runs from 0 to 8. The maximum value of the 2627 * weight, 8, yields the minimum value for the smoothing 2628 * constant. At this minimum value for the smoothing constant, 2629 * the measured rate contributes for half of the next value of 2630 * the estimated peak rate. 2631 * 2632 * So, the first step is to compute the weight as a function 2633 * of how sequential the workload is. Note that the weight 2634 * cannot reach 9, because bfqd->sequential_samples cannot 2635 * become equal to bfqd->peak_rate_samples, which, in its 2636 * turn, holds true because bfqd->sequential_samples is not 2637 * incremented for the first sample. 2638 */ 2639 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; 2640 2641 /* 2642 * Second step: further refine the weight as a function of the 2643 * duration of the observation interval. 2644 */ 2645 weight = min_t(u32, 8, 2646 div_u64(weight * bfqd->delta_from_first, 2647 BFQ_RATE_REF_INTERVAL)); 2648 2649 /* 2650 * Divisor ranging from 10, for minimum weight, to 2, for 2651 * maximum weight. 2652 */ 2653 divisor = 10 - weight; 2654 2655 /* 2656 * Finally, update peak rate: 2657 * 2658 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor 2659 */ 2660 bfqd->peak_rate *= divisor-1; 2661 bfqd->peak_rate /= divisor; 2662 rate /= divisor; /* smoothing constant alpha = 1/divisor */ 2663 2664 bfqd->peak_rate += rate; 2665 2666 /* 2667 * For a very slow device, bfqd->peak_rate can reach 0 (see 2668 * the minimum representable values reported in the comments 2669 * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid 2670 * divisions by zero where bfqd->peak_rate is used as a 2671 * divisor. 2672 */ 2673 bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); 2674 2675 update_thr_responsiveness_params(bfqd); 2676 2677 reset_computation: 2678 bfq_reset_rate_computation(bfqd, rq); 2679 } 2680 2681 /* 2682 * Update the read/write peak rate (the main quantity used for 2683 * auto-tuning, see update_thr_responsiveness_params()). 2684 * 2685 * It is not trivial to estimate the peak rate (correctly): because of 2686 * the presence of sw and hw queues between the scheduler and the 2687 * device components that finally serve I/O requests, it is hard to 2688 * say exactly when a given dispatched request is served inside the 2689 * device, and for how long. As a consequence, it is hard to know 2690 * precisely at what rate a given set of requests is actually served 2691 * by the device. 2692 * 2693 * On the opposite end, the dispatch time of any request is trivially 2694 * available, and, from this piece of information, the "dispatch rate" 2695 * of requests can be immediately computed. So, the idea in the next 2696 * function is to use what is known, namely request dispatch times 2697 * (plus, when useful, request completion times), to estimate what is 2698 * unknown, namely in-device request service rate. 2699 * 2700 * The main issue is that, because of the above facts, the rate at 2701 * which a certain set of requests is dispatched over a certain time 2702 * interval can vary greatly with respect to the rate at which the 2703 * same requests are then served. But, since the size of any 2704 * intermediate queue is limited, and the service scheme is lossless 2705 * (no request is silently dropped), the following obvious convergence 2706 * property holds: the number of requests dispatched MUST become 2707 * closer and closer to the number of requests completed as the 2708 * observation interval grows. This is the key property used in 2709 * the next function to estimate the peak service rate as a function 2710 * of the observed dispatch rate. The function assumes to be invoked 2711 * on every request dispatch. 2712 */ 2713 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) 2714 { 2715 u64 now_ns = ktime_get_ns(); 2716 2717 if (bfqd->peak_rate_samples == 0) { /* first dispatch */ 2718 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", 2719 bfqd->peak_rate_samples); 2720 bfq_reset_rate_computation(bfqd, rq); 2721 goto update_last_values; /* will add one sample */ 2722 } 2723 2724 /* 2725 * Device idle for very long: the observation interval lasting 2726 * up to this dispatch cannot be a valid observation interval 2727 * for computing a new peak rate (similarly to the late- 2728 * completion event in bfq_completed_request()). Go to 2729 * update_rate_and_reset to have the following three steps 2730 * taken: 2731 * - close the observation interval at the last (previous) 2732 * request dispatch or completion 2733 * - compute rate, if possible, for that observation interval 2734 * - start a new observation interval with this dispatch 2735 */ 2736 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && 2737 bfqd->rq_in_driver == 0) 2738 goto update_rate_and_reset; 2739 2740 /* Update sampling information */ 2741 bfqd->peak_rate_samples++; 2742 2743 if ((bfqd->rq_in_driver > 0 || 2744 now_ns - bfqd->last_completion < BFQ_MIN_TT) 2745 && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) 2746 bfqd->sequential_samples++; 2747 2748 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); 2749 2750 /* Reset max observed rq size every 32 dispatches */ 2751 if (likely(bfqd->peak_rate_samples % 32)) 2752 bfqd->last_rq_max_size = 2753 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); 2754 else 2755 bfqd->last_rq_max_size = blk_rq_sectors(rq); 2756 2757 bfqd->delta_from_first = now_ns - bfqd->first_dispatch; 2758 2759 /* Target observation interval not yet reached, go on sampling */ 2760 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) 2761 goto update_last_values; 2762 2763 update_rate_and_reset: 2764 bfq_update_rate_reset(bfqd, rq); 2765 update_last_values: 2766 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); 2767 bfqd->last_dispatch = now_ns; 2768 } 2769 2770 /* 2771 * Remove request from internal lists. 2772 */ 2773 static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) 2774 { 2775 struct bfq_queue *bfqq = RQ_BFQQ(rq); 2776 2777 /* 2778 * For consistency, the next instruction should have been 2779 * executed after removing the request from the queue and 2780 * dispatching it. We execute instead this instruction before 2781 * bfq_remove_request() (and hence introduce a temporary 2782 * inconsistency), for efficiency. In fact, should this 2783 * dispatch occur for a non in-service bfqq, this anticipated 2784 * increment prevents two counters related to bfqq->dispatched 2785 * from risking to be, first, uselessly decremented, and then 2786 * incremented again when the (new) value of bfqq->dispatched 2787 * happens to be taken into account. 2788 */ 2789 bfqq->dispatched++; 2790 bfq_update_peak_rate(q->elevator->elevator_data, rq); 2791 2792 bfq_remove_request(q, rq); 2793 } 2794 2795 static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) 2796 { 2797 /* 2798 * If this bfqq is shared between multiple processes, check 2799 * to make sure that those processes are still issuing I/Os 2800 * within the mean seek distance. If not, it may be time to 2801 * break the queues apart again. 2802 */ 2803 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) 2804 bfq_mark_bfqq_split_coop(bfqq); 2805 2806 if (RB_EMPTY_ROOT(&bfqq->sort_list)) { 2807 if (bfqq->dispatched == 0) 2808 /* 2809 * Overloading budget_timeout field to store 2810 * the time at which the queue remains with no 2811 * backlog and no outstanding request; used by 2812 * the weight-raising mechanism. 2813 */ 2814 bfqq->budget_timeout = jiffies; 2815 2816 bfq_del_bfqq_busy(bfqd, bfqq, true); 2817 } else { 2818 bfq_requeue_bfqq(bfqd, bfqq, true); 2819 /* 2820 * Resort priority tree of potential close cooperators. 2821 */ 2822 bfq_pos_tree_add_move(bfqd, bfqq); 2823 } 2824 2825 /* 2826 * All in-service entities must have been properly deactivated 2827 * or requeued before executing the next function, which 2828 * resets all in-service entites as no more in service. 2829 */ 2830 __bfq_bfqd_reset_in_service(bfqd); 2831 } 2832 2833 /** 2834 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. 2835 * @bfqd: device data. 2836 * @bfqq: queue to update. 2837 * @reason: reason for expiration. 2838 * 2839 * Handle the feedback on @bfqq budget at queue expiration. 2840 * See the body for detailed comments. 2841 */ 2842 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, 2843 struct bfq_queue *bfqq, 2844 enum bfqq_expiration reason) 2845 { 2846 struct request *next_rq; 2847 int budget, min_budget; 2848 2849 min_budget = bfq_min_budget(bfqd); 2850 2851 if (bfqq->wr_coeff == 1) 2852 budget = bfqq->max_budget; 2853 else /* 2854 * Use a constant, low budget for weight-raised queues, 2855 * to help achieve a low latency. Keep it slightly higher 2856 * than the minimum possible budget, to cause a little 2857 * bit fewer expirations. 2858 */ 2859 budget = 2 * min_budget; 2860 2861 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", 2862 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); 2863 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", 2864 budget, bfq_min_budget(bfqd)); 2865 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", 2866 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); 2867 2868 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { 2869 switch (reason) { 2870 /* 2871 * Caveat: in all the following cases we trade latency 2872 * for throughput. 2873 */ 2874 case BFQQE_TOO_IDLE: 2875 /* 2876 * This is the only case where we may reduce 2877 * the budget: if there is no request of the 2878 * process still waiting for completion, then 2879 * we assume (tentatively) that the timer has 2880 * expired because the batch of requests of 2881 * the process could have been served with a 2882 * smaller budget. Hence, betting that 2883 * process will behave in the same way when it 2884 * becomes backlogged again, we reduce its 2885 * next budget. As long as we guess right, 2886 * this budget cut reduces the latency 2887 * experienced by the process. 2888 * 2889 * However, if there are still outstanding 2890 * requests, then the process may have not yet 2891 * issued its next request just because it is 2892 * still waiting for the completion of some of 2893 * the still outstanding ones. So in this 2894 * subcase we do not reduce its budget, on the 2895 * contrary we increase it to possibly boost 2896 * the throughput, as discussed in the 2897 * comments to the BUDGET_TIMEOUT case. 2898 */ 2899 if (bfqq->dispatched > 0) /* still outstanding reqs */ 2900 budget = min(budget * 2, bfqd->bfq_max_budget); 2901 else { 2902 if (budget > 5 * min_budget) 2903 budget -= 4 * min_budget; 2904 else 2905 budget = min_budget; 2906 } 2907 break; 2908 case BFQQE_BUDGET_TIMEOUT: 2909 /* 2910 * We double the budget here because it gives 2911 * the chance to boost the throughput if this 2912 * is not a seeky process (and has bumped into 2913 * this timeout because of, e.g., ZBR). 2914 */ 2915 budget = min(budget * 2, bfqd->bfq_max_budget); 2916 break; 2917 case BFQQE_BUDGET_EXHAUSTED: 2918 /* 2919 * The process still has backlog, and did not 2920 * let either the budget timeout or the disk 2921 * idling timeout expire. Hence it is not 2922 * seeky, has a short thinktime and may be 2923 * happy with a higher budget too. So 2924 * definitely increase the budget of this good 2925 * candidate to boost the disk throughput. 2926 */ 2927 budget = min(budget * 4, bfqd->bfq_max_budget); 2928 break; 2929 case BFQQE_NO_MORE_REQUESTS: 2930 /* 2931 * For queues that expire for this reason, it 2932 * is particularly important to keep the 2933 * budget close to the actual service they 2934 * need. Doing so reduces the timestamp 2935 * misalignment problem described in the 2936 * comments in the body of 2937 * __bfq_activate_entity. In fact, suppose 2938 * that a queue systematically expires for 2939 * BFQQE_NO_MORE_REQUESTS and presents a 2940 * new request in time to enjoy timestamp 2941 * back-shifting. The larger the budget of the 2942 * queue is with respect to the service the 2943 * queue actually requests in each service 2944 * slot, the more times the queue can be 2945 * reactivated with the same virtual finish 2946 * time. It follows that, even if this finish 2947 * time is pushed to the system virtual time 2948 * to reduce the consequent timestamp 2949 * misalignment, the queue unjustly enjoys for 2950 * many re-activations a lower finish time 2951 * than all newly activated queues. 2952 * 2953 * The service needed by bfqq is measured 2954 * quite precisely by bfqq->entity.service. 2955 * Since bfqq does not enjoy device idling, 2956 * bfqq->entity.service is equal to the number 2957 * of sectors that the process associated with 2958 * bfqq requested to read/write before waiting 2959 * for request completions, or blocking for 2960 * other reasons. 2961 */ 2962 budget = max_t(int, bfqq->entity.service, min_budget); 2963 break; 2964 default: 2965 return; 2966 } 2967 } else if (!bfq_bfqq_sync(bfqq)) { 2968 /* 2969 * Async queues get always the maximum possible 2970 * budget, as for them we do not care about latency 2971 * (in addition, their ability to dispatch is limited 2972 * by the charging factor). 2973 */ 2974 budget = bfqd->bfq_max_budget; 2975 } 2976 2977 bfqq->max_budget = budget; 2978 2979 if (bfqd->budgets_assigned >= bfq_stats_min_budgets && 2980 !bfqd->bfq_user_max_budget) 2981 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); 2982 2983 /* 2984 * If there is still backlog, then assign a new budget, making 2985 * sure that it is large enough for the next request. Since 2986 * the finish time of bfqq must be kept in sync with the 2987 * budget, be sure to call __bfq_bfqq_expire() *after* this 2988 * update. 2989 * 2990 * If there is no backlog, then no need to update the budget; 2991 * it will be updated on the arrival of a new request. 2992 */ 2993 next_rq = bfqq->next_rq; 2994 if (next_rq) 2995 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, 2996 bfq_serv_to_charge(next_rq, bfqq)); 2997 2998 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", 2999 next_rq ? blk_rq_sectors(next_rq) : 0, 3000 bfqq->entity.budget); 3001 } 3002 3003 /* 3004 * Return true if the process associated with bfqq is "slow". The slow 3005 * flag is used, in addition to the budget timeout, to reduce the 3006 * amount of service provided to seeky processes, and thus reduce 3007 * their chances to lower the throughput. More details in the comments 3008 * on the function bfq_bfqq_expire(). 3009 * 3010 * An important observation is in order: as discussed in the comments 3011 * on the function bfq_update_peak_rate(), with devices with internal 3012 * queues, it is hard if ever possible to know when and for how long 3013 * an I/O request is processed by the device (apart from the trivial 3014 * I/O pattern where a new request is dispatched only after the 3015 * previous one has been completed). This makes it hard to evaluate 3016 * the real rate at which the I/O requests of each bfq_queue are 3017 * served. In fact, for an I/O scheduler like BFQ, serving a 3018 * bfq_queue means just dispatching its requests during its service 3019 * slot (i.e., until the budget of the queue is exhausted, or the 3020 * queue remains idle, or, finally, a timeout fires). But, during the 3021 * service slot of a bfq_queue, around 100 ms at most, the device may 3022 * be even still processing requests of bfq_queues served in previous 3023 * service slots. On the opposite end, the requests of the in-service 3024 * bfq_queue may be completed after the service slot of the queue 3025 * finishes. 3026 * 3027 * Anyway, unless more sophisticated solutions are used 3028 * (where possible), the sum of the sizes of the requests dispatched 3029 * during the service slot of a bfq_queue is probably the only 3030 * approximation available for the service received by the bfq_queue 3031 * during its service slot. And this sum is the quantity used in this 3032 * function to evaluate the I/O speed of a process. 3033 */ 3034 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, 3035 bool compensate, enum bfqq_expiration reason, 3036 unsigned long *delta_ms) 3037 { 3038 ktime_t delta_ktime; 3039 u32 delta_usecs; 3040 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ 3041 3042 if (!bfq_bfqq_sync(bfqq)) 3043 return false; 3044 3045 if (compensate) 3046 delta_ktime = bfqd->last_idling_start; 3047 else 3048 delta_ktime = ktime_get(); 3049 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); 3050 delta_usecs = ktime_to_us(delta_ktime); 3051 3052 /* don't use too short time intervals */ 3053 if (delta_usecs < 1000) { 3054 if (blk_queue_nonrot(bfqd->queue)) 3055 /* 3056 * give same worst-case guarantees as idling 3057 * for seeky 3058 */ 3059 *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; 3060 else /* charge at least one seek */ 3061 *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; 3062 3063 return slow; 3064 } 3065 3066 *delta_ms = delta_usecs / USEC_PER_MSEC; 3067 3068 /* 3069 * Use only long (> 20ms) intervals to filter out excessive 3070 * spikes in service rate estimation. 3071 */ 3072 if (delta_usecs > 20000) { 3073 /* 3074 * Caveat for rotational devices: processes doing I/O 3075 * in the slower disk zones tend to be slow(er) even 3076 * if not seeky. In this respect, the estimated peak 3077 * rate is likely to be an average over the disk 3078 * surface. Accordingly, to not be too harsh with 3079 * unlucky processes, a process is deemed slow only if 3080 * its rate has been lower than half of the estimated 3081 * peak rate. 3082 */ 3083 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; 3084 } 3085 3086 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); 3087 3088 return slow; 3089 } 3090 3091 /* 3092 * To be deemed as soft real-time, an application must meet two 3093 * requirements. First, the application must not require an average 3094 * bandwidth higher than the approximate bandwidth required to playback or 3095 * record a compressed high-definition video. 3096 * The next function is invoked on the completion of the last request of a 3097 * batch, to compute the next-start time instant, soft_rt_next_start, such 3098 * that, if the next request of the application does not arrive before 3099 * soft_rt_next_start, then the above requirement on the bandwidth is met. 3100 * 3101 * The second requirement is that the request pattern of the application is 3102 * isochronous, i.e., that, after issuing a request or a batch of requests, 3103 * the application stops issuing new requests until all its pending requests 3104 * have been completed. After that, the application may issue a new batch, 3105 * and so on. 3106 * For this reason the next function is invoked to compute 3107 * soft_rt_next_start only for applications that meet this requirement, 3108 * whereas soft_rt_next_start is set to infinity for applications that do 3109 * not. 3110 * 3111 * Unfortunately, even a greedy (i.e., I/O-bound) application may 3112 * happen to meet, occasionally or systematically, both the above 3113 * bandwidth and isochrony requirements. This may happen at least in 3114 * the following circumstances. First, if the CPU load is high. The 3115 * application may stop issuing requests while the CPUs are busy 3116 * serving other processes, then restart, then stop again for a while, 3117 * and so on. The other circumstances are related to the storage 3118 * device: the storage device is highly loaded or reaches a low-enough 3119 * throughput with the I/O of the application (e.g., because the I/O 3120 * is random and/or the device is slow). In all these cases, the 3121 * I/O of the application may be simply slowed down enough to meet 3122 * the bandwidth and isochrony requirements. To reduce the probability 3123 * that greedy applications are deemed as soft real-time in these 3124 * corner cases, a further rule is used in the computation of 3125 * soft_rt_next_start: the return value of this function is forced to 3126 * be higher than the maximum between the following two quantities. 3127 * 3128 * (a) Current time plus: (1) the maximum time for which the arrival 3129 * of a request is waited for when a sync queue becomes idle, 3130 * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We 3131 * postpone for a moment the reason for adding a few extra 3132 * jiffies; we get back to it after next item (b). Lower-bounding 3133 * the return value of this function with the current time plus 3134 * bfqd->bfq_slice_idle tends to filter out greedy applications, 3135 * because the latter issue their next request as soon as possible 3136 * after the last one has been completed. In contrast, a soft 3137 * real-time application spends some time processing data, after a 3138 * batch of its requests has been completed. 3139 * 3140 * (b) Current value of bfqq->soft_rt_next_start. As pointed out 3141 * above, greedy applications may happen to meet both the 3142 * bandwidth and isochrony requirements under heavy CPU or 3143 * storage-device load. In more detail, in these scenarios, these 3144 * applications happen, only for limited time periods, to do I/O 3145 * slowly enough to meet all the requirements described so far, 3146 * including the filtering in above item (a). These slow-speed 3147 * time intervals are usually interspersed between other time 3148 * intervals during which these applications do I/O at a very high 3149 * speed. Fortunately, exactly because of the high speed of the 3150 * I/O in the high-speed intervals, the values returned by this 3151 * function happen to be so high, near the end of any such 3152 * high-speed interval, to be likely to fall *after* the end of 3153 * the low-speed time interval that follows. These high values are 3154 * stored in bfqq->soft_rt_next_start after each invocation of 3155 * this function. As a consequence, if the last value of 3156 * bfqq->soft_rt_next_start is constantly used to lower-bound the 3157 * next value that this function may return, then, from the very 3158 * beginning of a low-speed interval, bfqq->soft_rt_next_start is 3159 * likely to be constantly kept so high that any I/O request 3160 * issued during the low-speed interval is considered as arriving 3161 * to soon for the application to be deemed as soft 3162 * real-time. Then, in the high-speed interval that follows, the 3163 * application will not be deemed as soft real-time, just because 3164 * it will do I/O at a high speed. And so on. 3165 * 3166 * Getting back to the filtering in item (a), in the following two 3167 * cases this filtering might be easily passed by a greedy 3168 * application, if the reference quantity was just 3169 * bfqd->bfq_slice_idle: 3170 * 1) HZ is so low that the duration of a jiffy is comparable to or 3171 * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow 3172 * devices with HZ=100. The time granularity may be so coarse 3173 * that the approximation, in jiffies, of bfqd->bfq_slice_idle 3174 * is rather lower than the exact value. 3175 * 2) jiffies, instead of increasing at a constant rate, may stop increasing 3176 * for a while, then suddenly 'jump' by several units to recover the lost 3177 * increments. This seems to happen, e.g., inside virtual machines. 3178 * To address this issue, in the filtering in (a) we do not use as a 3179 * reference time interval just bfqd->bfq_slice_idle, but 3180 * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the 3181 * minimum number of jiffies for which the filter seems to be quite 3182 * precise also in embedded systems and KVM/QEMU virtual machines. 3183 */ 3184 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, 3185 struct bfq_queue *bfqq) 3186 { 3187 return max3(bfqq->soft_rt_next_start, 3188 bfqq->last_idle_bklogged + 3189 HZ * bfqq->service_from_backlogged / 3190 bfqd->bfq_wr_max_softrt_rate, 3191 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); 3192 } 3193 3194 static bool bfq_bfqq_injectable(struct bfq_queue *bfqq) 3195 { 3196 return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && 3197 blk_queue_nonrot(bfqq->bfqd->queue) && 3198 bfqq->bfqd->hw_tag; 3199 } 3200 3201 /** 3202 * bfq_bfqq_expire - expire a queue. 3203 * @bfqd: device owning the queue. 3204 * @bfqq: the queue to expire. 3205 * @compensate: if true, compensate for the time spent idling. 3206 * @reason: the reason causing the expiration. 3207 * 3208 * If the process associated with bfqq does slow I/O (e.g., because it 3209 * issues random requests), we charge bfqq with the time it has been 3210 * in service instead of the service it has received (see 3211 * bfq_bfqq_charge_time for details on how this goal is achieved). As 3212 * a consequence, bfqq will typically get higher timestamps upon 3213 * reactivation, and hence it will be rescheduled as if it had 3214 * received more service than what it has actually received. In the 3215 * end, bfqq receives less service in proportion to how slowly its 3216 * associated process consumes its budgets (and hence how seriously it 3217 * tends to lower the throughput). In addition, this time-charging 3218 * strategy guarantees time fairness among slow processes. In 3219 * contrast, if the process associated with bfqq is not slow, we 3220 * charge bfqq exactly with the service it has received. 3221 * 3222 * Charging time to the first type of queues and the exact service to 3223 * the other has the effect of using the WF2Q+ policy to schedule the 3224 * former on a timeslice basis, without violating service domain 3225 * guarantees among the latter. 3226 */ 3227 void bfq_bfqq_expire(struct bfq_data *bfqd, 3228 struct bfq_queue *bfqq, 3229 bool compensate, 3230 enum bfqq_expiration reason) 3231 { 3232 bool slow; 3233 unsigned long delta = 0; 3234 struct bfq_entity *entity = &bfqq->entity; 3235 int ref; 3236 3237 /* 3238 * Check whether the process is slow (see bfq_bfqq_is_slow). 3239 */ 3240 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); 3241 3242 /* 3243 * As above explained, charge slow (typically seeky) and 3244 * timed-out queues with the time and not the service 3245 * received, to favor sequential workloads. 3246 * 3247 * Processes doing I/O in the slower disk zones will tend to 3248 * be slow(er) even if not seeky. Therefore, since the 3249 * estimated peak rate is actually an average over the disk 3250 * surface, these processes may timeout just for bad luck. To 3251 * avoid punishing them, do not charge time to processes that 3252 * succeeded in consuming at least 2/3 of their budget. This 3253 * allows BFQ to preserve enough elasticity to still perform 3254 * bandwidth, and not time, distribution with little unlucky 3255 * or quasi-sequential processes. 3256 */ 3257 if (bfqq->wr_coeff == 1 && 3258 (slow || 3259 (reason == BFQQE_BUDGET_TIMEOUT && 3260 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) 3261 bfq_bfqq_charge_time(bfqd, bfqq, delta); 3262 3263 if (reason == BFQQE_TOO_IDLE && 3264 entity->service <= 2 * entity->budget / 10) 3265 bfq_clear_bfqq_IO_bound(bfqq); 3266 3267 if (bfqd->low_latency && bfqq->wr_coeff == 1) 3268 bfqq->last_wr_start_finish = jiffies; 3269 3270 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && 3271 RB_EMPTY_ROOT(&bfqq->sort_list)) { 3272 /* 3273 * If we get here, and there are no outstanding 3274 * requests, then the request pattern is isochronous 3275 * (see the comments on the function 3276 * bfq_bfqq_softrt_next_start()). Thus we can compute 3277 * soft_rt_next_start. If, instead, the queue still 3278 * has outstanding requests, then we have to wait for 3279 * the completion of all the outstanding requests to 3280 * discover whether the request pattern is actually 3281 * isochronous. 3282 */ 3283 if (bfqq->dispatched == 0) 3284 bfqq->soft_rt_next_start = 3285 bfq_bfqq_softrt_next_start(bfqd, bfqq); 3286 else { 3287 /* 3288 * Schedule an update of soft_rt_next_start to when 3289 * the task may be discovered to be isochronous. 3290 */ 3291 bfq_mark_bfqq_softrt_update(bfqq); 3292 } 3293 } 3294 3295 bfq_log_bfqq(bfqd, bfqq, 3296 "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason, 3297 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); 3298 3299 /* 3300 * Increase, decrease or leave budget unchanged according to 3301 * reason. 3302 */ 3303 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); 3304 ref = bfqq->ref; 3305 __bfq_bfqq_expire(bfqd, bfqq); 3306 3307 if (ref == 1) /* bfqq is gone, no more actions on it */ 3308 return; 3309 3310 bfqq->injected_service = 0; 3311 3312 /* mark bfqq as waiting a request only if a bic still points to it */ 3313 if (!bfq_bfqq_busy(bfqq) && 3314 reason != BFQQE_BUDGET_TIMEOUT && 3315 reason != BFQQE_BUDGET_EXHAUSTED) { 3316 bfq_mark_bfqq_non_blocking_wait_rq(bfqq); 3317 /* 3318 * Not setting service to 0, because, if the next rq 3319 * arrives in time, the queue will go on receiving 3320 * service with this same budget (as if it never expired) 3321 */ 3322 } else 3323 entity->service = 0; 3324 3325 /* 3326 * Reset the received-service counter for every parent entity. 3327 * Differently from what happens with bfqq->entity.service, 3328 * the resetting of this counter never needs to be postponed 3329 * for parent entities. In fact, in case bfqq may have a 3330 * chance to go on being served using the last, partially 3331 * consumed budget, bfqq->entity.service needs to be kept, 3332 * because if bfqq then actually goes on being served using 3333 * the same budget, the last value of bfqq->entity.service is 3334 * needed to properly decrement bfqq->entity.budget by the 3335 * portion already consumed. In contrast, it is not necessary 3336 * to keep entity->service for parent entities too, because 3337 * the bubble up of the new value of bfqq->entity.budget will 3338 * make sure that the budgets of parent entities are correct, 3339 * even in case bfqq and thus parent entities go on receiving 3340 * service with the same budget. 3341 */ 3342 entity = entity->parent; 3343 for_each_entity(entity) 3344 entity->service = 0; 3345 } 3346 3347 /* 3348 * Budget timeout is not implemented through a dedicated timer, but 3349 * just checked on request arrivals and completions, as well as on 3350 * idle timer expirations. 3351 */ 3352 static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) 3353 { 3354 return time_is_before_eq_jiffies(bfqq->budget_timeout); 3355 } 3356 3357 /* 3358 * If we expire a queue that is actively waiting (i.e., with the 3359 * device idled) for the arrival of a new request, then we may incur 3360 * the timestamp misalignment problem described in the body of the 3361 * function __bfq_activate_entity. Hence we return true only if this 3362 * condition does not hold, or if the queue is slow enough to deserve 3363 * only to be kicked off for preserving a high throughput. 3364 */ 3365 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) 3366 { 3367 bfq_log_bfqq(bfqq->bfqd, bfqq, 3368 "may_budget_timeout: wait_request %d left %d timeout %d", 3369 bfq_bfqq_wait_request(bfqq), 3370 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, 3371 bfq_bfqq_budget_timeout(bfqq)); 3372 3373 return (!bfq_bfqq_wait_request(bfqq) || 3374 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) 3375 && 3376 bfq_bfqq_budget_timeout(bfqq); 3377 } 3378 3379 /* 3380 * For a queue that becomes empty, device idling is allowed only if 3381 * this function returns true for the queue. As a consequence, since 3382 * device idling plays a critical role in both throughput boosting and 3383 * service guarantees, the return value of this function plays a 3384 * critical role in both these aspects as well. 3385 * 3386 * In a nutshell, this function returns true only if idling is 3387 * beneficial for throughput or, even if detrimental for throughput, 3388 * idling is however necessary to preserve service guarantees (low 3389 * latency, desired throughput distribution, ...). In particular, on 3390 * NCQ-capable devices, this function tries to return false, so as to 3391 * help keep the drives' internal queues full, whenever this helps the 3392 * device boost the throughput without causing any service-guarantee 3393 * issue. 3394 * 3395 * In more detail, the return value of this function is obtained by, 3396 * first, computing a number of boolean variables that take into 3397 * account throughput and service-guarantee issues, and, then, 3398 * combining these variables in a logical expression. Most of the 3399 * issues taken into account are not trivial. We discuss these issues 3400 * individually while introducing the variables. 3401 */ 3402 static bool bfq_better_to_idle(struct bfq_queue *bfqq) 3403 { 3404 struct bfq_data *bfqd = bfqq->bfqd; 3405 bool rot_without_queueing = 3406 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, 3407 bfqq_sequential_and_IO_bound, 3408 idling_boosts_thr, idling_boosts_thr_without_issues, 3409 idling_needed_for_service_guarantees, 3410 asymmetric_scenario; 3411 3412 if (bfqd->strict_guarantees) 3413 return true; 3414 3415 /* 3416 * Idling is performed only if slice_idle > 0. In addition, we 3417 * do not idle if 3418 * (a) bfqq is async 3419 * (b) bfqq is in the idle io prio class: in this case we do 3420 * not idle because we want to minimize the bandwidth that 3421 * queues in this class can steal to higher-priority queues 3422 */ 3423 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || 3424 bfq_class_idle(bfqq)) 3425 return false; 3426 3427 bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && 3428 bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); 3429 3430 /* 3431 * The next variable takes into account the cases where idling 3432 * boosts the throughput. 3433 * 3434 * The value of the variable is computed considering, first, that 3435 * idling is virtually always beneficial for the throughput if: 3436 * (a) the device is not NCQ-capable and rotational, or 3437 * (b) regardless of the presence of NCQ, the device is rotational and 3438 * the request pattern for bfqq is I/O-bound and sequential, or 3439 * (c) regardless of whether it is rotational, the device is 3440 * not NCQ-capable and the request pattern for bfqq is 3441 * I/O-bound and sequential. 3442 * 3443 * Secondly, and in contrast to the above item (b), idling an 3444 * NCQ-capable flash-based device would not boost the 3445 * throughput even with sequential I/O; rather it would lower 3446 * the throughput in proportion to how fast the device 3447 * is. Accordingly, the next variable is true if any of the 3448 * above conditions (a), (b) or (c) is true, and, in 3449 * particular, happens to be false if bfqd is an NCQ-capable 3450 * flash-based device. 3451 */ 3452 idling_boosts_thr = rot_without_queueing || 3453 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && 3454 bfqq_sequential_and_IO_bound); 3455 3456 /* 3457 * The value of the next variable, 3458 * idling_boosts_thr_without_issues, is equal to that of 3459 * idling_boosts_thr, unless a special case holds. In this 3460 * special case, described below, idling may cause problems to 3461 * weight-raised queues. 3462 * 3463 * When the request pool is saturated (e.g., in the presence 3464 * of write hogs), if the processes associated with 3465 * non-weight-raised queues ask for requests at a lower rate, 3466 * then processes associated with weight-raised queues have a 3467 * higher probability to get a request from the pool 3468 * immediately (or at least soon) when they need one. Thus 3469 * they have a higher probability to actually get a fraction 3470 * of the device throughput proportional to their high 3471 * weight. This is especially true with NCQ-capable drives, 3472 * which enqueue several requests in advance, and further 3473 * reorder internally-queued requests. 3474 * 3475 * For this reason, we force to false the value of 3476 * idling_boosts_thr_without_issues if there are weight-raised 3477 * busy queues. In this case, and if bfqq is not weight-raised, 3478 * this guarantees that the device is not idled for bfqq (if, 3479 * instead, bfqq is weight-raised, then idling will be 3480 * guaranteed by another variable, see below). Combined with 3481 * the timestamping rules of BFQ (see [1] for details), this 3482 * behavior causes bfqq, and hence any sync non-weight-raised 3483 * queue, to get a lower number of requests served, and thus 3484 * to ask for a lower number of requests from the request 3485 * pool, before the busy weight-raised queues get served 3486 * again. This often mitigates starvation problems in the 3487 * presence of heavy write workloads and NCQ, thereby 3488 * guaranteeing a higher application and system responsiveness 3489 * in these hostile scenarios. 3490 */ 3491 idling_boosts_thr_without_issues = idling_boosts_thr && 3492 bfqd->wr_busy_queues == 0; 3493 3494 /* 3495 * There is then a case where idling must be performed not 3496 * for throughput concerns, but to preserve service 3497 * guarantees. 3498 * 3499 * To introduce this case, we can note that allowing the drive 3500 * to enqueue more than one request at a time, and hence 3501 * delegating de facto final scheduling decisions to the 3502 * drive's internal scheduler, entails loss of control on the 3503 * actual request service order. In particular, the critical 3504 * situation is when requests from different processes happen 3505 * to be present, at the same time, in the internal queue(s) 3506 * of the drive. In such a situation, the drive, by deciding 3507 * the service order of the internally-queued requests, does 3508 * determine also the actual throughput distribution among 3509 * these processes. But the drive typically has no notion or 3510 * concern about per-process throughput distribution, and 3511 * makes its decisions only on a per-request basis. Therefore, 3512 * the service distribution enforced by the drive's internal 3513 * scheduler is likely to coincide with the desired 3514 * device-throughput distribution only in a completely 3515 * symmetric scenario where: 3516 * (i) each of these processes must get the same throughput as 3517 * the others; 3518 * (ii) the I/O of each process has the same properties, in 3519 * terms of locality (sequential or random), direction 3520 * (reads or writes), request sizes, greediness 3521 * (from I/O-bound to sporadic), and so on. 3522 * In fact, in such a scenario, the drive tends to treat 3523 * the requests of each of these processes in about the same 3524 * way as the requests of the others, and thus to provide 3525 * each of these processes with about the same throughput 3526 * (which is exactly the desired throughput distribution). In 3527 * contrast, in any asymmetric scenario, device idling is 3528 * certainly needed to guarantee that bfqq receives its 3529 * assigned fraction of the device throughput (see [1] for 3530 * details). 3531 * The problem is that idling may significantly reduce 3532 * throughput with certain combinations of types of I/O and 3533 * devices. An important example is sync random I/O, on flash 3534 * storage with command queueing. So, unless bfqq falls in the 3535 * above cases where idling also boosts throughput, it would 3536 * be important to check conditions (i) and (ii) accurately, 3537 * so as to avoid idling when not strictly needed for service 3538 * guarantees. 3539 * 3540 * Unfortunately, it is extremely difficult to thoroughly 3541 * check condition (ii). And, in case there are active groups, 3542 * it becomes very difficult to check condition (i) too. In 3543 * fact, if there are active groups, then, for condition (i) 3544 * to become false, it is enough that an active group contains 3545 * more active processes or sub-groups than some other active 3546 * group. More precisely, for condition (i) to hold because of 3547 * such a group, it is not even necessary that the group is 3548 * (still) active: it is sufficient that, even if the group 3549 * has become inactive, some of its descendant processes still 3550 * have some request already dispatched but still waiting for 3551 * completion. In fact, requests have still to be guaranteed 3552 * their share of the throughput even after being 3553 * dispatched. In this respect, it is easy to show that, if a 3554 * group frequently becomes inactive while still having 3555 * in-flight requests, and if, when this happens, the group is 3556 * not considered in the calculation of whether the scenario 3557 * is asymmetric, then the group may fail to be guaranteed its 3558 * fair share of the throughput (basically because idling may 3559 * not be performed for the descendant processes of the group, 3560 * but it had to be). We address this issue with the 3561 * following bi-modal behavior, implemented in the function 3562 * bfq_symmetric_scenario(). 3563 * 3564 * If there are groups with requests waiting for completion 3565 * (as commented above, some of these groups may even be 3566 * already inactive), then the scenario is tagged as 3567 * asymmetric, conservatively, without checking any of the 3568 * conditions (i) and (ii). So the device is idled for bfqq. 3569 * This behavior matches also the fact that groups are created 3570 * exactly if controlling I/O is a primary concern (to 3571 * preserve bandwidth and latency guarantees). 3572 * 3573 * On the opposite end, if there are no groups with requests 3574 * waiting for completion, then only condition (i) is actually 3575 * controlled, i.e., provided that condition (i) holds, idling 3576 * is not performed, regardless of whether condition (ii) 3577 * holds. In other words, only if condition (i) does not hold, 3578 * then idling is allowed, and the device tends to be 3579 * prevented from queueing many requests, possibly of several 3580 * processes. Since there are no groups with requests waiting 3581 * for completion, then, to control condition (i) it is enough 3582 * to check just whether all the queues with requests waiting 3583 * for completion also have the same weight. 3584 * 3585 * Not checking condition (ii) evidently exposes bfqq to the 3586 * risk of getting less throughput than its fair share. 3587 * However, for queues with the same weight, a further 3588 * mechanism, preemption, mitigates or even eliminates this 3589 * problem. And it does so without consequences on overall 3590 * throughput. This mechanism and its benefits are explained 3591 * in the next three paragraphs. 3592 * 3593 * Even if a queue, say Q, is expired when it remains idle, Q 3594 * can still preempt the new in-service queue if the next 3595 * request of Q arrives soon (see the comments on 3596 * bfq_bfqq_update_budg_for_activation). If all queues and 3597 * groups have the same weight, this form of preemption, 3598 * combined with the hole-recovery heuristic described in the 3599 * comments on function bfq_bfqq_update_budg_for_activation, 3600 * are enough to preserve a correct bandwidth distribution in 3601 * the mid term, even without idling. In fact, even if not 3602 * idling allows the internal queues of the device to contain 3603 * many requests, and thus to reorder requests, we can rather 3604 * safely assume that the internal scheduler still preserves a 3605 * minimum of mid-term fairness. 3606 * 3607 * More precisely, this preemption-based, idleless approach 3608 * provides fairness in terms of IOPS, and not sectors per 3609 * second. This can be seen with a simple example. Suppose 3610 * that there are two queues with the same weight, but that 3611 * the first queue receives requests of 8 sectors, while the 3612 * second queue receives requests of 1024 sectors. In 3613 * addition, suppose that each of the two queues contains at 3614 * most one request at a time, which implies that each queue 3615 * always remains idle after it is served. Finally, after 3616 * remaining idle, each queue receives very quickly a new 3617 * request. It follows that the two queues are served 3618 * alternatively, preempting each other if needed. This 3619 * implies that, although both queues have the same weight, 3620 * the queue with large requests receives a service that is 3621 * 1024/8 times as high as the service received by the other 3622 * queue. 3623 * 3624 * The motivation for using preemption instead of idling (for 3625 * queues with the same weight) is that, by not idling, 3626 * service guarantees are preserved (completely or at least in 3627 * part) without minimally sacrificing throughput. And, if 3628 * there is no active group, then the primary expectation for 3629 * this device is probably a high throughput. 3630 * 3631 * We are now left only with explaining the additional 3632 * compound condition that is checked below for deciding 3633 * whether the scenario is asymmetric. To explain this 3634 * compound condition, we need to add that the function 3635 * bfq_symmetric_scenario checks the weights of only 3636 * non-weight-raised queues, for efficiency reasons (see 3637 * comments on bfq_weights_tree_add()). Then the fact that 3638 * bfqq is weight-raised is checked explicitly here. More 3639 * precisely, the compound condition below takes into account 3640 * also the fact that, even if bfqq is being weight-raised, 3641 * the scenario is still symmetric if all queues with requests 3642 * waiting for completion happen to be 3643 * weight-raised. Actually, we should be even more precise 3644 * here, and differentiate between interactive weight raising 3645 * and soft real-time weight raising. 3646 * 3647 * As a side note, it is worth considering that the above 3648 * device-idling countermeasures may however fail in the 3649 * following unlucky scenario: if idling is (correctly) 3650 * disabled in a time period during which all symmetry 3651 * sub-conditions hold, and hence the device is allowed to 3652 * enqueue many requests, but at some later point in time some 3653 * sub-condition stops to hold, then it may become impossible 3654 * to let requests be served in the desired order until all 3655 * the requests already queued in the device have been served. 3656 */ 3657 asymmetric_scenario = (bfqq->wr_coeff > 1 && 3658 bfqd->wr_busy_queues < bfqd->busy_queues) || 3659 !bfq_symmetric_scenario(bfqd); 3660 3661 /* 3662 * Finally, there is a case where maximizing throughput is the 3663 * best choice even if it may cause unfairness toward 3664 * bfqq. Such a case is when bfqq became active in a burst of 3665 * queue activations. Queues that became active during a large 3666 * burst benefit only from throughput, as discussed in the 3667 * comments on bfq_handle_burst. Thus, if bfqq became active 3668 * in a burst and not idling the device maximizes throughput, 3669 * then the device must no be idled, because not idling the 3670 * device provides bfqq and all other queues in the burst with 3671 * maximum benefit. Combining this and the above case, we can 3672 * now establish when idling is actually needed to preserve 3673 * service guarantees. 3674 */ 3675 idling_needed_for_service_guarantees = 3676 asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); 3677 3678 /* 3679 * We have now all the components we need to compute the 3680 * return value of the function, which is true only if idling 3681 * either boosts the throughput (without issues), or is 3682 * necessary to preserve service guarantees. 3683 */ 3684 return idling_boosts_thr_without_issues || 3685 idling_needed_for_service_guarantees; 3686 } 3687 3688 /* 3689 * If the in-service queue is empty but the function bfq_better_to_idle 3690 * returns true, then: 3691 * 1) the queue must remain in service and cannot be expired, and 3692 * 2) the device must be idled to wait for the possible arrival of a new 3693 * request for the queue. 3694 * See the comments on the function bfq_better_to_idle for the reasons 3695 * why performing device idling is the best choice to boost the throughput 3696 * and preserve service guarantees when bfq_better_to_idle itself 3697 * returns true. 3698 */ 3699 static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) 3700 { 3701 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); 3702 } 3703 3704 static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd) 3705 { 3706 struct bfq_queue *bfqq; 3707 3708 /* 3709 * A linear search; but, with a high probability, very few 3710 * steps are needed to find a candidate queue, i.e., a queue 3711 * with enough budget left for its next request. In fact: 3712 * - BFQ dynamically updates the budget of every queue so as 3713 * to accommodate the expected backlog of the queue; 3714 * - if a queue gets all its requests dispatched as injected 3715 * service, then the queue is removed from the active list 3716 * (and re-added only if it gets new requests, but with 3717 * enough budget for its new backlog). 3718 */ 3719 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) 3720 if (!RB_EMPTY_ROOT(&bfqq->sort_list) && 3721 bfq_serv_to_charge(bfqq->next_rq, bfqq) <= 3722 bfq_bfqq_budget_left(bfqq)) 3723 return bfqq; 3724 3725 return NULL; 3726 } 3727 3728 /* 3729 * Select a queue for service. If we have a current queue in service, 3730 * check whether to continue servicing it, or retrieve and set a new one. 3731 */ 3732 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) 3733 { 3734 struct bfq_queue *bfqq; 3735 struct request *next_rq; 3736 enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT; 3737 3738 bfqq = bfqd->in_service_queue; 3739 if (!bfqq) 3740 goto new_queue; 3741 3742 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); 3743 3744 /* 3745 * Do not expire bfqq for budget timeout if bfqq may be about 3746 * to enjoy device idling. The reason why, in this case, we 3747 * prevent bfqq from expiring is the same as in the comments 3748 * on the case where bfq_bfqq_must_idle() returns true, in 3749 * bfq_completed_request(). 3750 */ 3751 if (bfq_may_expire_for_budg_timeout(bfqq) && 3752 !bfq_bfqq_must_idle(bfqq)) 3753 goto expire; 3754 3755 check_queue: 3756 /* 3757 * This loop is rarely executed more than once. Even when it 3758 * happens, it is much more convenient to re-execute this loop 3759 * than to return NULL and trigger a new dispatch to get a 3760 * request served. 3761 */ 3762 next_rq = bfqq->next_rq; 3763 /* 3764 * If bfqq has requests queued and it has enough budget left to 3765 * serve them, keep the queue, otherwise expire it. 3766 */ 3767 if (next_rq) { 3768 if (bfq_serv_to_charge(next_rq, bfqq) > 3769 bfq_bfqq_budget_left(bfqq)) { 3770 /* 3771 * Expire the queue for budget exhaustion, 3772 * which makes sure that the next budget is 3773 * enough to serve the next request, even if 3774 * it comes from the fifo expired path. 3775 */ 3776 reason = BFQQE_BUDGET_EXHAUSTED; 3777 goto expire; 3778 } else { 3779 /* 3780 * The idle timer may be pending because we may 3781 * not disable disk idling even when a new request 3782 * arrives. 3783 */ 3784 if (bfq_bfqq_wait_request(bfqq)) { 3785 /* 3786 * If we get here: 1) at least a new request 3787 * has arrived but we have not disabled the 3788 * timer because the request was too small, 3789 * 2) then the block layer has unplugged 3790 * the device, causing the dispatch to be 3791 * invoked. 3792 * 3793 * Since the device is unplugged, now the 3794 * requests are probably large enough to 3795 * provide a reasonable throughput. 3796 * So we disable idling. 3797 */ 3798 bfq_clear_bfqq_wait_request(bfqq); 3799 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); 3800 } 3801 goto keep_queue; 3802 } 3803 } 3804 3805 /* 3806 * No requests pending. However, if the in-service queue is idling 3807 * for a new request, or has requests waiting for a completion and 3808 * may idle after their completion, then keep it anyway. 3809 * 3810 * Yet, to boost throughput, inject service from other queues if 3811 * possible. 3812 */ 3813 if (bfq_bfqq_wait_request(bfqq) || 3814 (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { 3815 if (bfq_bfqq_injectable(bfqq) && 3816 bfqq->injected_service * bfqq->inject_coeff < 3817 bfqq->entity.service * 10) 3818 bfqq = bfq_choose_bfqq_for_injection(bfqd); 3819 else 3820 bfqq = NULL; 3821 3822 goto keep_queue; 3823 } 3824 3825 reason = BFQQE_NO_MORE_REQUESTS; 3826 expire: 3827 bfq_bfqq_expire(bfqd, bfqq, false, reason); 3828 new_queue: 3829 bfqq = bfq_set_in_service_queue(bfqd); 3830 if (bfqq) { 3831 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); 3832 goto check_queue; 3833 } 3834 keep_queue: 3835 if (bfqq) 3836 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); 3837 else 3838 bfq_log(bfqd, "select_queue: no queue returned"); 3839 3840 return bfqq; 3841 } 3842 3843 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) 3844 { 3845 struct bfq_entity *entity = &bfqq->entity; 3846 3847 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ 3848 bfq_log_bfqq(bfqd, bfqq, 3849 "raising period dur %u/%u msec, old coeff %u, w %d(%d)", 3850 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), 3851 jiffies_to_msecs(bfqq->wr_cur_max_time), 3852 bfqq->wr_coeff, 3853 bfqq->entity.weight, bfqq->entity.orig_weight); 3854 3855 if (entity->prio_changed) 3856 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); 3857 3858 /* 3859 * If the queue was activated in a burst, or too much 3860 * time has elapsed from the beginning of this 3861 * weight-raising period, then end weight raising. 3862 */ 3863 if (bfq_bfqq_in_large_burst(bfqq)) 3864 bfq_bfqq_end_wr(bfqq); 3865 else if (time_is_before_jiffies(bfqq->last_wr_start_finish + 3866 bfqq->wr_cur_max_time)) { 3867 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || 3868 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + 3869 bfq_wr_duration(bfqd))) 3870 bfq_bfqq_end_wr(bfqq); 3871 else { 3872 switch_back_to_interactive_wr(bfqq, bfqd); 3873 bfqq->entity.prio_changed = 1; 3874 } 3875 } 3876 if (bfqq->wr_coeff > 1 && 3877 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && 3878 bfqq->service_from_wr > max_service_from_wr) { 3879 /* see comments on max_service_from_wr */ 3880 bfq_bfqq_end_wr(bfqq); 3881 } 3882 } 3883 /* 3884 * To improve latency (for this or other queues), immediately 3885 * update weight both if it must be raised and if it must be 3886 * lowered. Since, entity may be on some active tree here, and 3887 * might have a pending change of its ioprio class, invoke 3888 * next function with the last parameter unset (see the 3889 * comments on the function). 3890 */ 3891 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) 3892 __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity), 3893 entity, false); 3894 } 3895 3896 /* 3897 * Dispatch next request from bfqq. 3898 */ 3899 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, 3900 struct bfq_queue *bfqq) 3901 { 3902 struct request *rq = bfqq->next_rq; 3903 unsigned long service_to_charge; 3904 3905 service_to_charge = bfq_serv_to_charge(rq, bfqq); 3906 3907 bfq_bfqq_served(bfqq, service_to_charge); 3908 3909 bfq_dispatch_remove(bfqd->queue, rq); 3910 3911 if (bfqq != bfqd->in_service_queue) { 3912 if (likely(bfqd->in_service_queue)) 3913 bfqd->in_service_queue->injected_service += 3914 bfq_serv_to_charge(rq, bfqq); 3915 3916 goto return_rq; 3917 } 3918 3919 /* 3920 * If weight raising has to terminate for bfqq, then next 3921 * function causes an immediate update of bfqq's weight, 3922 * without waiting for next activation. As a consequence, on 3923 * expiration, bfqq will be timestamped as if has never been 3924 * weight-raised during this service slot, even if it has 3925 * received part or even most of the service as a 3926 * weight-raised queue. This inflates bfqq's timestamps, which 3927 * is beneficial, as bfqq is then more willing to leave the 3928 * device immediately to possible other weight-raised queues. 3929 */ 3930 bfq_update_wr_data(bfqd, bfqq); 3931 3932 /* 3933 * Expire bfqq, pretending that its budget expired, if bfqq 3934 * belongs to CLASS_IDLE and other queues are waiting for 3935 * service. 3936 */ 3937 if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq))) 3938 goto return_rq; 3939 3940 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); 3941 3942 return_rq: 3943 return rq; 3944 } 3945 3946 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) 3947 { 3948 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; 3949 3950 /* 3951 * Avoiding lock: a race on bfqd->busy_queues should cause at 3952 * most a call to dispatch for nothing 3953 */ 3954 return !list_empty_careful(&bfqd->dispatch) || 3955 bfqd->busy_queues > 0; 3956 } 3957 3958 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) 3959 { 3960 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; 3961 struct request *rq = NULL; 3962 struct bfq_queue *bfqq = NULL; 3963 3964 if (!list_empty(&bfqd->dispatch)) { 3965 rq = list_first_entry(&bfqd->dispatch, struct request, 3966 queuelist); 3967 list_del_init(&rq->queuelist); 3968 3969 bfqq = RQ_BFQQ(rq); 3970 3971 if (bfqq) { 3972 /* 3973 * Increment counters here, because this 3974 * dispatch does not follow the standard 3975 * dispatch flow (where counters are 3976 * incremented) 3977 */ 3978 bfqq->dispatched++; 3979 3980 goto inc_in_driver_start_rq; 3981 } 3982 3983 /* 3984 * We exploit the bfq_finish_requeue_request hook to 3985 * decrement rq_in_driver, but 3986 * bfq_finish_requeue_request will not be invoked on 3987 * this request. So, to avoid unbalance, just start 3988 * this request, without incrementing rq_in_driver. As 3989 * a negative consequence, rq_in_driver is deceptively 3990 * lower than it should be while this request is in 3991 * service. This may cause bfq_schedule_dispatch to be 3992 * invoked uselessly. 3993 * 3994 * As for implementing an exact solution, the 3995 * bfq_finish_requeue_request hook, if defined, is 3996 * probably invoked also on this request. So, by 3997 * exploiting this hook, we could 1) increment 3998 * rq_in_driver here, and 2) decrement it in 3999 * bfq_finish_requeue_request. Such a solution would 4000 * let the value of the counter be always accurate, 4001 * but it would entail using an extra interface 4002 * function. This cost seems higher than the benefit, 4003 * being the frequency of non-elevator-private 4004 * requests very low. 4005 */ 4006 goto start_rq; 4007 } 4008 4009 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); 4010 4011 if (bfqd->busy_queues == 0) 4012 goto exit; 4013 4014 /* 4015 * Force device to serve one request at a time if 4016 * strict_guarantees is true. Forcing this service scheme is 4017 * currently the ONLY way to guarantee that the request 4018 * service order enforced by the scheduler is respected by a 4019 * queueing device. Otherwise the device is free even to make 4020 * some unlucky request wait for as long as the device 4021 * wishes. 4022 * 4023 * Of course, serving one request at at time may cause loss of 4024 * throughput. 4025 */ 4026 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) 4027 goto exit; 4028 4029 bfqq = bfq_select_queue(bfqd); 4030 if (!bfqq) 4031 goto exit; 4032 4033 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); 4034 4035 if (rq) { 4036 inc_in_driver_start_rq: 4037 bfqd->rq_in_driver++; 4038 start_rq: 4039 rq->rq_flags |= RQF_STARTED; 4040 } 4041 exit: 4042 return rq; 4043 } 4044 4045 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) 4046 static void bfq_update_dispatch_stats(struct request_queue *q, 4047 struct request *rq, 4048 struct bfq_queue *in_serv_queue, 4049 bool idle_timer_disabled) 4050 { 4051 struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL; 4052 4053 if (!idle_timer_disabled && !bfqq) 4054 return; 4055 4056 /* 4057 * rq and bfqq are guaranteed to exist until this function 4058 * ends, for the following reasons. First, rq can be 4059 * dispatched to the device, and then can be completed and 4060 * freed, only after this function ends. Second, rq cannot be 4061 * merged (and thus freed because of a merge) any longer, 4062 * because it has already started. Thus rq cannot be freed 4063 * before this function ends, and, since rq has a reference to 4064 * bfqq, the same guarantee holds for bfqq too. 4065 * 4066 * In addition, the following queue lock guarantees that 4067 * bfqq_group(bfqq) exists as well. 4068 */ 4069 spin_lock_irq(&q->queue_lock); 4070 if (idle_timer_disabled) 4071 /* 4072 * Since the idle timer has been disabled, 4073 * in_serv_queue contained some request when 4074 * __bfq_dispatch_request was invoked above, which 4075 * implies that rq was picked exactly from 4076 * in_serv_queue. Thus in_serv_queue == bfqq, and is 4077 * therefore guaranteed to exist because of the above 4078 * arguments. 4079 */ 4080 bfqg_stats_update_idle_time(bfqq_group(in_serv_queue)); 4081 if (bfqq) { 4082 struct bfq_group *bfqg = bfqq_group(bfqq); 4083 4084 bfqg_stats_update_avg_queue_size(bfqg); 4085 bfqg_stats_set_start_empty_time(bfqg); 4086 bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); 4087 } 4088 spin_unlock_irq(&q->queue_lock); 4089 } 4090 #else 4091 static inline void bfq_update_dispatch_stats(struct request_queue *q, 4092 struct request *rq, 4093 struct bfq_queue *in_serv_queue, 4094 bool idle_timer_disabled) {} 4095 #endif 4096 4097 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) 4098 { 4099 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; 4100 struct request *rq; 4101 struct bfq_queue *in_serv_queue; 4102 bool waiting_rq, idle_timer_disabled; 4103 4104 spin_lock_irq(&bfqd->lock); 4105 4106 in_serv_queue = bfqd->in_service_queue; 4107 waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); 4108 4109 rq = __bfq_dispatch_request(hctx); 4110 4111 idle_timer_disabled = 4112 waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); 4113 4114 spin_unlock_irq(&bfqd->lock); 4115 4116 bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, 4117 idle_timer_disabled); 4118 4119 return rq; 4120 } 4121 4122 /* 4123 * Task holds one reference to the queue, dropped when task exits. Each rq 4124 * in-flight on this queue also holds a reference, dropped when rq is freed. 4125 * 4126 * Scheduler lock must be held here. Recall not to use bfqq after calling 4127 * this function on it. 4128 */ 4129 void bfq_put_queue(struct bfq_queue *bfqq) 4130 { 4131 #ifdef CONFIG_BFQ_GROUP_IOSCHED 4132 struct bfq_group *bfqg = bfqq_group(bfqq); 4133 #endif 4134 4135 if (bfqq->bfqd) 4136 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", 4137 bfqq, bfqq->ref); 4138 4139 bfqq->ref--; 4140 if (bfqq->ref) 4141 return; 4142 4143 if (!hlist_unhashed(&bfqq->burst_list_node)) { 4144 hlist_del_init(&bfqq->burst_list_node); 4145 /* 4146 * Decrement also burst size after the removal, if the 4147 * process associated with bfqq is exiting, and thus 4148 * does not contribute to the burst any longer. This 4149 * decrement helps filter out false positives of large 4150 * bursts, when some short-lived process (often due to 4151 * the execution of commands by some service) happens 4152 * to start and exit while a complex application is 4153 * starting, and thus spawning several processes that 4154 * do I/O (and that *must not* be treated as a large 4155 * burst, see comments on bfq_handle_burst). 4156 * 4157 * In particular, the decrement is performed only if: 4158 * 1) bfqq is not a merged queue, because, if it is, 4159 * then this free of bfqq is not triggered by the exit 4160 * of the process bfqq is associated with, but exactly 4161 * by the fact that bfqq has just been merged. 4162 * 2) burst_size is greater than 0, to handle 4163 * unbalanced decrements. Unbalanced decrements may 4164 * happen in te following case: bfqq is inserted into 4165 * the current burst list--without incrementing 4166 * bust_size--because of a split, but the current 4167 * burst list is not the burst list bfqq belonged to 4168 * (see comments on the case of a split in 4169 * bfq_set_request). 4170 */ 4171 if (bfqq->bic && bfqq->bfqd->burst_size > 0) 4172 bfqq->bfqd->burst_size--; 4173 } 4174 4175 kmem_cache_free(bfq_pool, bfqq); 4176 #ifdef CONFIG_BFQ_GROUP_IOSCHED 4177 bfqg_and_blkg_put(bfqg); 4178 #endif 4179 } 4180 4181 static void bfq_put_cooperator(struct bfq_queue *bfqq) 4182 { 4183 struct bfq_queue *__bfqq, *next; 4184 4185 /* 4186 * If this queue was scheduled to merge with another queue, be 4187 * sure to drop the reference taken on that queue (and others in 4188 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. 4189 */ 4190 __bfqq = bfqq->new_bfqq; 4191 while (__bfqq) { 4192 if (__bfqq == bfqq) 4193 break; 4194 next = __bfqq->new_bfqq; 4195 bfq_put_queue(__bfqq); 4196 __bfqq = next; 4197 } 4198 } 4199 4200 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) 4201 { 4202 if (bfqq == bfqd->in_service_queue) { 4203 __bfq_bfqq_expire(bfqd, bfqq); 4204 bfq_schedule_dispatch(bfqd); 4205 } 4206 4207 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); 4208 4209 bfq_put_cooperator(bfqq); 4210 4211 bfq_put_queue(bfqq); /* release process reference */ 4212 } 4213 4214 static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) 4215 { 4216 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); 4217 struct bfq_data *bfqd; 4218 4219 if (bfqq) 4220 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ 4221 4222 if (bfqq && bfqd) { 4223 unsigned long flags; 4224 4225 spin_lock_irqsave(&bfqd->lock, flags); 4226 bfq_exit_bfqq(bfqd, bfqq); 4227 bic_set_bfqq(bic, NULL, is_sync); 4228 spin_unlock_irqrestore(&bfqd->lock, flags); 4229 } 4230 } 4231 4232 static void bfq_exit_icq(struct io_cq *icq) 4233 { 4234 struct bfq_io_cq *bic = icq_to_bic(icq); 4235 4236 bfq_exit_icq_bfqq(bic, true); 4237 bfq_exit_icq_bfqq(bic, false); 4238 } 4239 4240 /* 4241 * Update the entity prio values; note that the new values will not 4242 * be used until the next (re)activation. 4243 */ 4244 static void 4245 bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) 4246 { 4247 struct task_struct *tsk = current; 4248 int ioprio_class; 4249 struct bfq_data *bfqd = bfqq->bfqd; 4250 4251 if (!bfqd) 4252 return; 4253 4254 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); 4255 switch (ioprio_class) { 4256 default: 4257 dev_err(bfqq->bfqd->queue->backing_dev_info->dev, 4258 "bfq: bad prio class %d\n", ioprio_class); 4259 /* fall through */ 4260 case IOPRIO_CLASS_NONE: 4261 /* 4262 * No prio set, inherit CPU scheduling settings. 4263 */ 4264 bfqq->new_ioprio = task_nice_ioprio(tsk); 4265 bfqq->new_ioprio_class = task_nice_ioclass(tsk); 4266 break; 4267 case IOPRIO_CLASS_RT: 4268 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); 4269 bfqq->new_ioprio_class = IOPRIO_CLASS_RT; 4270 break; 4271 case IOPRIO_CLASS_BE: 4272 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); 4273 bfqq->new_ioprio_class = IOPRIO_CLASS_BE; 4274 break; 4275 case IOPRIO_CLASS_IDLE: 4276 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; 4277 bfqq->new_ioprio = 7; 4278 break; 4279 } 4280 4281 if (bfqq->new_ioprio >= IOPRIO_BE_NR) { 4282 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", 4283 bfqq->new_ioprio); 4284 bfqq->new_ioprio = IOPRIO_BE_NR; 4285 } 4286 4287 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); 4288 bfqq->entity.prio_changed = 1; 4289 } 4290 4291 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, 4292 struct bio *bio, bool is_sync, 4293 struct bfq_io_cq *bic); 4294 4295 static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) 4296 { 4297 struct bfq_data *bfqd = bic_to_bfqd(bic); 4298 struct bfq_queue *bfqq; 4299 int ioprio = bic->icq.ioc->ioprio; 4300 4301 /* 4302 * This condition may trigger on a newly created bic, be sure to 4303 * drop the lock before returning. 4304 */ 4305 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) 4306 return; 4307 4308 bic->ioprio = ioprio; 4309 4310 bfqq = bic_to_bfqq(bic, false); 4311 if (bfqq) { 4312 /* release process reference on this queue */ 4313 bfq_put_queue(bfqq); 4314 bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); 4315 bic_set_bfqq(bic, bfqq, false); 4316 } 4317 4318 bfqq = bic_to_bfqq(bic, true); 4319 if (bfqq) 4320 bfq_set_next_ioprio_data(bfqq, bic); 4321 } 4322 4323 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, 4324 struct bfq_io_cq *bic, pid_t pid, int is_sync) 4325 { 4326 RB_CLEAR_NODE(&bfqq->entity.rb_node); 4327 INIT_LIST_HEAD(&bfqq->fifo); 4328 INIT_HLIST_NODE(&bfqq->burst_list_node); 4329 4330 bfqq->ref = 0; 4331 bfqq->bfqd = bfqd; 4332 4333 if (bic) 4334 bfq_set_next_ioprio_data(bfqq, bic); 4335 4336 if (is_sync) { 4337 /* 4338 * No need to mark as has_short_ttime if in 4339 * idle_class, because no device idling is performed 4340 * for queues in idle class 4341 */ 4342 if (!bfq_class_idle(bfqq)) 4343 /* tentatively mark as has_short_ttime */ 4344 bfq_mark_bfqq_has_short_ttime(bfqq); 4345 bfq_mark_bfqq_sync(bfqq); 4346 bfq_mark_bfqq_just_created(bfqq); 4347 /* 4348 * Aggressively inject a lot of service: up to 90%. 4349 * This coefficient remains constant during bfqq life, 4350 * but this behavior might be changed, after enough 4351 * testing and tuning. 4352 */ 4353 bfqq->inject_coeff = 1; 4354 } else 4355 bfq_clear_bfqq_sync(bfqq); 4356 4357 /* set end request to minus infinity from now */ 4358 bfqq->ttime.last_end_request = ktime_get_ns() + 1; 4359 4360 bfq_mark_bfqq_IO_bound(bfqq); 4361 4362 bfqq->pid = pid; 4363 4364 /* Tentative initial value to trade off between thr and lat */ 4365 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; 4366 bfqq->budget_timeout = bfq_smallest_from_now(); 4367 4368 bfqq->wr_coeff = 1; 4369 bfqq->last_wr_start_finish = jiffies; 4370 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); 4371 bfqq->split_time = bfq_smallest_from_now(); 4372 4373 /* 4374 * To not forget the possibly high bandwidth consumed by a 4375 * process/queue in the recent past, 4376 * bfq_bfqq_softrt_next_start() returns a value at least equal 4377 * to the current value of bfqq->soft_rt_next_start (see 4378 * comments on bfq_bfqq_softrt_next_start). Set 4379 * soft_rt_next_start to now, to mean that bfqq has consumed 4380 * no bandwidth so far. 4381 */ 4382 bfqq->soft_rt_next_start = jiffies; 4383 4384 /* first request is almost certainly seeky */ 4385 bfqq->seek_history = 1; 4386 } 4387 4388 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, 4389 struct bfq_group *bfqg, 4390 int ioprio_class, int ioprio) 4391 { 4392 switch (ioprio_class) { 4393 case IOPRIO_CLASS_RT: 4394 return &bfqg->async_bfqq[0][ioprio]; 4395 case IOPRIO_CLASS_NONE: 4396 ioprio = IOPRIO_NORM; 4397 /* fall through */ 4398 case IOPRIO_CLASS_BE: 4399 return &bfqg->async_bfqq[1][ioprio]; 4400 case IOPRIO_CLASS_IDLE: 4401 return &bfqg->async_idle_bfqq; 4402 default: 4403 return NULL; 4404 } 4405 } 4406 4407 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, 4408 struct bio *bio, bool is_sync, 4409 struct bfq_io_cq *bic) 4410 { 4411 const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); 4412 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); 4413 struct bfq_queue **async_bfqq = NULL; 4414 struct bfq_queue *bfqq; 4415 struct bfq_group *bfqg; 4416 4417 rcu_read_lock(); 4418 4419 bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio)); 4420 if (!bfqg) { 4421 bfqq = &bfqd->oom_bfqq; 4422 goto out; 4423 } 4424 4425 if (!is_sync) { 4426 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, 4427 ioprio); 4428 bfqq = *async_bfqq; 4429 if (bfqq) 4430 goto out; 4431 } 4432 4433 bfqq = kmem_cache_alloc_node(bfq_pool, 4434 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, 4435 bfqd->queue->node); 4436 4437 if (bfqq) { 4438 bfq_init_bfqq(bfqd, bfqq, bic, current->pid, 4439 is_sync); 4440 bfq_init_entity(&bfqq->entity, bfqg); 4441 bfq_log_bfqq(bfqd, bfqq, "allocated"); 4442 } else { 4443 bfqq = &bfqd->oom_bfqq; 4444 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); 4445 goto out; 4446 } 4447 4448 /* 4449 * Pin the queue now that it's allocated, scheduler exit will 4450 * prune it. 4451 */ 4452 if (async_bfqq) { 4453 bfqq->ref++; /* 4454 * Extra group reference, w.r.t. sync 4455 * queue. This extra reference is removed 4456 * only if bfqq->bfqg disappears, to 4457 * guarantee that this queue is not freed 4458 * until its group goes away. 4459 */ 4460 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", 4461 bfqq, bfqq->ref); 4462 *async_bfqq = bfqq; 4463 } 4464 4465 out: 4466 bfqq->ref++; /* get a process reference to this queue */ 4467 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); 4468 rcu_read_unlock(); 4469 return bfqq; 4470 } 4471 4472 static void bfq_update_io_thinktime(struct bfq_data *bfqd, 4473 struct bfq_queue *bfqq) 4474 { 4475 struct bfq_ttime *ttime = &bfqq->ttime; 4476 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; 4477 4478 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); 4479 4480 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8; 4481 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); 4482 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, 4483 ttime->ttime_samples); 4484 } 4485 4486 static void 4487 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, 4488 struct request *rq) 4489 { 4490 bfqq->seek_history <<= 1; 4491 bfqq->seek_history |= 4492 get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && 4493 (!blk_queue_nonrot(bfqd->queue) || 4494 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); 4495 } 4496 4497 static void bfq_update_has_short_ttime(struct bfq_data *bfqd, 4498 struct bfq_queue *bfqq, 4499 struct bfq_io_cq *bic) 4500 { 4501 bool has_short_ttime = true; 4502 4503 /* 4504 * No need to update has_short_ttime if bfqq is async or in 4505 * idle io prio class, or if bfq_slice_idle is zero, because 4506 * no device idling is performed for bfqq in this case. 4507 */ 4508 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || 4509 bfqd->bfq_slice_idle == 0) 4510 return; 4511 4512 /* Idle window just restored, statistics are meaningless. */ 4513 if (time_is_after_eq_jiffies(bfqq->split_time + 4514 bfqd->bfq_wr_min_idle_time)) 4515 return; 4516 4517 /* Think time is infinite if no process is linked to 4518 * bfqq. Otherwise check average think time to 4519 * decide whether to mark as has_short_ttime 4520 */ 4521 if (atomic_read(&bic->icq.ioc->active_ref) == 0 || 4522 (bfq_sample_valid(bfqq->ttime.ttime_samples) && 4523 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) 4524 has_short_ttime = false; 4525 4526 bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", 4527 has_short_ttime); 4528 4529 if (has_short_ttime) 4530 bfq_mark_bfqq_has_short_ttime(bfqq); 4531 else 4532 bfq_clear_bfqq_has_short_ttime(bfqq); 4533 } 4534 4535 /* 4536 * Called when a new fs request (rq) is added to bfqq. Check if there's 4537 * something we should do about it. 4538 */ 4539 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, 4540 struct request *rq) 4541 { 4542 struct bfq_io_cq *bic = RQ_BIC(rq); 4543 4544 if (rq->cmd_flags & REQ_META) 4545 bfqq->meta_pending++; 4546 4547 bfq_update_io_thinktime(bfqd, bfqq); 4548 bfq_update_has_short_ttime(bfqd, bfqq, bic); 4549 bfq_update_io_seektime(bfqd, bfqq, rq); 4550 4551 bfq_log_bfqq(bfqd, bfqq, 4552 "rq_enqueued: has_short_ttime=%d (seeky %d)", 4553 bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); 4554 4555 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 4556 4557 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { 4558 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && 4559 blk_rq_sectors(rq) < 32; 4560 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); 4561 4562 /* 4563 * There is just this request queued: if the request 4564 * is small and the queue is not to be expired, then 4565 * just exit. 4566 * 4567 * In this way, if the device is being idled to wait 4568 * for a new request from the in-service queue, we 4569 * avoid unplugging the device and committing the 4570 * device to serve just a small request. On the 4571 * contrary, we wait for the block layer to decide 4572 * when to unplug the device: hopefully, new requests 4573 * will be merged to this one quickly, then the device 4574 * will be unplugged and larger requests will be 4575 * dispatched. 4576 */ 4577 if (small_req && !budget_timeout) 4578 return; 4579 4580 /* 4581 * A large enough request arrived, or the queue is to 4582 * be expired: in both cases disk idling is to be 4583 * stopped, so clear wait_request flag and reset 4584 * timer. 4585 */ 4586 bfq_clear_bfqq_wait_request(bfqq); 4587 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); 4588 4589 /* 4590 * The queue is not empty, because a new request just 4591 * arrived. Hence we can safely expire the queue, in 4592 * case of budget timeout, without risking that the 4593 * timestamps of the queue are not updated correctly. 4594 * See [1] for more details. 4595 */ 4596 if (budget_timeout) 4597 bfq_bfqq_expire(bfqd, bfqq, false, 4598 BFQQE_BUDGET_TIMEOUT); 4599 } 4600 } 4601 4602 /* returns true if it causes the idle timer to be disabled */ 4603 static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) 4604 { 4605 struct bfq_queue *bfqq = RQ_BFQQ(rq), 4606 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); 4607 bool waiting, idle_timer_disabled = false; 4608 4609 if (new_bfqq) { 4610 if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) 4611 new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); 4612 /* 4613 * Release the request's reference to the old bfqq 4614 * and make sure one is taken to the shared queue. 4615 */ 4616 new_bfqq->allocated++; 4617 bfqq->allocated--; 4618 new_bfqq->ref++; 4619 /* 4620 * If the bic associated with the process 4621 * issuing this request still points to bfqq 4622 * (and thus has not been already redirected 4623 * to new_bfqq or even some other bfq_queue), 4624 * then complete the merge and redirect it to 4625 * new_bfqq. 4626 */ 4627 if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) 4628 bfq_merge_bfqqs(bfqd, RQ_BIC(rq), 4629 bfqq, new_bfqq); 4630 4631 bfq_clear_bfqq_just_created(bfqq); 4632 /* 4633 * rq is about to be enqueued into new_bfqq, 4634 * release rq reference on bfqq 4635 */ 4636 bfq_put_queue(bfqq); 4637 rq->elv.priv[1] = new_bfqq; 4638 bfqq = new_bfqq; 4639 } 4640 4641 waiting = bfqq && bfq_bfqq_wait_request(bfqq); 4642 bfq_add_request(rq); 4643 idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq); 4644 4645 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; 4646 list_add_tail(&rq->queuelist, &bfqq->fifo); 4647 4648 bfq_rq_enqueued(bfqd, bfqq, rq); 4649 4650 return idle_timer_disabled; 4651 } 4652 4653 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) 4654 static void bfq_update_insert_stats(struct request_queue *q, 4655 struct bfq_queue *bfqq, 4656 bool idle_timer_disabled, 4657 unsigned int cmd_flags) 4658 { 4659 if (!bfqq) 4660 return; 4661 4662 /* 4663 * bfqq still exists, because it can disappear only after 4664 * either it is merged with another queue, or the process it 4665 * is associated with exits. But both actions must be taken by 4666 * the same process currently executing this flow of 4667 * instructions. 4668 * 4669 * In addition, the following queue lock guarantees that 4670 * bfqq_group(bfqq) exists as well. 4671 */ 4672 spin_lock_irq(&q->queue_lock); 4673 bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); 4674 if (idle_timer_disabled) 4675 bfqg_stats_update_idle_time(bfqq_group(bfqq)); 4676 spin_unlock_irq(&q->queue_lock); 4677 } 4678 #else 4679 static inline void bfq_update_insert_stats(struct request_queue *q, 4680 struct bfq_queue *bfqq, 4681 bool idle_timer_disabled, 4682 unsigned int cmd_flags) {} 4683 #endif 4684 4685 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 4686 bool at_head) 4687 { 4688 struct request_queue *q = hctx->queue; 4689 struct bfq_data *bfqd = q->elevator->elevator_data; 4690 struct bfq_queue *bfqq; 4691 bool idle_timer_disabled = false; 4692 unsigned int cmd_flags; 4693 4694 spin_lock_irq(&bfqd->lock); 4695 if (blk_mq_sched_try_insert_merge(q, rq)) { 4696 spin_unlock_irq(&bfqd->lock); 4697 return; 4698 } 4699 4700 spin_unlock_irq(&bfqd->lock); 4701 4702 blk_mq_sched_request_inserted(rq); 4703 4704 spin_lock_irq(&bfqd->lock); 4705 bfqq = bfq_init_rq(rq); 4706 if (at_head || blk_rq_is_passthrough(rq)) { 4707 if (at_head) 4708 list_add(&rq->queuelist, &bfqd->dispatch); 4709 else 4710 list_add_tail(&rq->queuelist, &bfqd->dispatch); 4711 } else { /* bfqq is assumed to be non null here */ 4712 idle_timer_disabled = __bfq_insert_request(bfqd, rq); 4713 /* 4714 * Update bfqq, because, if a queue merge has occurred 4715 * in __bfq_insert_request, then rq has been 4716 * redirected into a new queue. 4717 */ 4718 bfqq = RQ_BFQQ(rq); 4719 4720 if (rq_mergeable(rq)) { 4721 elv_rqhash_add(q, rq); 4722 if (!q->last_merge) 4723 q->last_merge = rq; 4724 } 4725 } 4726 4727 /* 4728 * Cache cmd_flags before releasing scheduler lock, because rq 4729 * may disappear afterwards (for example, because of a request 4730 * merge). 4731 */ 4732 cmd_flags = rq->cmd_flags; 4733 4734 spin_unlock_irq(&bfqd->lock); 4735 4736 bfq_update_insert_stats(q, bfqq, idle_timer_disabled, 4737 cmd_flags); 4738 } 4739 4740 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, 4741 struct list_head *list, bool at_head) 4742 { 4743 while (!list_empty(list)) { 4744 struct request *rq; 4745 4746 rq = list_first_entry(list, struct request, queuelist); 4747 list_del_init(&rq->queuelist); 4748 bfq_insert_request(hctx, rq, at_head); 4749 } 4750 } 4751 4752 static void bfq_update_hw_tag(struct bfq_data *bfqd) 4753 { 4754 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, 4755 bfqd->rq_in_driver); 4756 4757 if (bfqd->hw_tag == 1) 4758 return; 4759 4760 /* 4761 * This sample is valid if the number of outstanding requests 4762 * is large enough to allow a queueing behavior. Note that the 4763 * sum is not exact, as it's not taking into account deactivated 4764 * requests. 4765 */ 4766 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) 4767 return; 4768 4769 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) 4770 return; 4771 4772 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; 4773 bfqd->max_rq_in_driver = 0; 4774 bfqd->hw_tag_samples = 0; 4775 } 4776 4777 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) 4778 { 4779 u64 now_ns; 4780 u32 delta_us; 4781 4782 bfq_update_hw_tag(bfqd); 4783 4784 bfqd->rq_in_driver--; 4785 bfqq->dispatched--; 4786 4787 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { 4788 /* 4789 * Set budget_timeout (which we overload to store the 4790 * time at which the queue remains with no backlog and 4791 * no outstanding request; used by the weight-raising 4792 * mechanism). 4793 */ 4794 bfqq->budget_timeout = jiffies; 4795 4796 bfq_weights_tree_remove(bfqd, bfqq); 4797 } 4798 4799 now_ns = ktime_get_ns(); 4800 4801 bfqq->ttime.last_end_request = now_ns; 4802 4803 /* 4804 * Using us instead of ns, to get a reasonable precision in 4805 * computing rate in next check. 4806 */ 4807 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); 4808 4809 /* 4810 * If the request took rather long to complete, and, according 4811 * to the maximum request size recorded, this completion latency 4812 * implies that the request was certainly served at a very low 4813 * rate (less than 1M sectors/sec), then the whole observation 4814 * interval that lasts up to this time instant cannot be a 4815 * valid time interval for computing a new peak rate. Invoke 4816 * bfq_update_rate_reset to have the following three steps 4817 * taken: 4818 * - close the observation interval at the last (previous) 4819 * request dispatch or completion 4820 * - compute rate, if possible, for that observation interval 4821 * - reset to zero samples, which will trigger a proper 4822 * re-initialization of the observation interval on next 4823 * dispatch 4824 */ 4825 if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && 4826 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < 4827 1UL<<(BFQ_RATE_SHIFT - 10)) 4828 bfq_update_rate_reset(bfqd, NULL); 4829 bfqd->last_completion = now_ns; 4830 4831 /* 4832 * If we are waiting to discover whether the request pattern 4833 * of the task associated with the queue is actually 4834 * isochronous, and both requisites for this condition to hold 4835 * are now satisfied, then compute soft_rt_next_start (see the 4836 * comments on the function bfq_bfqq_softrt_next_start()). We 4837 * schedule this delayed check when bfqq expires, if it still 4838 * has in-flight requests. 4839 */ 4840 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && 4841 RB_EMPTY_ROOT(&bfqq->sort_list)) 4842 bfqq->soft_rt_next_start = 4843 bfq_bfqq_softrt_next_start(bfqd, bfqq); 4844 4845 /* 4846 * If this is the in-service queue, check if it needs to be expired, 4847 * or if we want to idle in case it has no pending requests. 4848 */ 4849 if (bfqd->in_service_queue == bfqq) { 4850 if (bfq_bfqq_must_idle(bfqq)) { 4851 if (bfqq->dispatched == 0) 4852 bfq_arm_slice_timer(bfqd); 4853 /* 4854 * If we get here, we do not expire bfqq, even 4855 * if bfqq was in budget timeout or had no 4856 * more requests (as controlled in the next 4857 * conditional instructions). The reason for 4858 * not expiring bfqq is as follows. 4859 * 4860 * Here bfqq->dispatched > 0 holds, but 4861 * bfq_bfqq_must_idle() returned true. This 4862 * implies that, even if no request arrives 4863 * for bfqq before bfqq->dispatched reaches 0, 4864 * bfqq will, however, not be expired on the 4865 * completion event that causes bfqq->dispatch 4866 * to reach zero. In contrast, on this event, 4867 * bfqq will start enjoying device idling 4868 * (I/O-dispatch plugging). 4869 * 4870 * But, if we expired bfqq here, bfqq would 4871 * not have the chance to enjoy device idling 4872 * when bfqq->dispatched finally reaches 4873 * zero. This would expose bfqq to violation 4874 * of its reserved service guarantees. 4875 */ 4876 return; 4877 } else if (bfq_may_expire_for_budg_timeout(bfqq)) 4878 bfq_bfqq_expire(bfqd, bfqq, false, 4879 BFQQE_BUDGET_TIMEOUT); 4880 else if (RB_EMPTY_ROOT(&bfqq->sort_list) && 4881 (bfqq->dispatched == 0 || 4882 !bfq_better_to_idle(bfqq))) 4883 bfq_bfqq_expire(bfqd, bfqq, false, 4884 BFQQE_NO_MORE_REQUESTS); 4885 } 4886 4887 if (!bfqd->rq_in_driver) 4888 bfq_schedule_dispatch(bfqd); 4889 } 4890 4891 static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq) 4892 { 4893 bfqq->allocated--; 4894 4895 bfq_put_queue(bfqq); 4896 } 4897 4898 /* 4899 * Handle either a requeue or a finish for rq. The things to do are 4900 * the same in both cases: all references to rq are to be dropped. In 4901 * particular, rq is considered completed from the point of view of 4902 * the scheduler. 4903 */ 4904 static void bfq_finish_requeue_request(struct request *rq) 4905 { 4906 struct bfq_queue *bfqq = RQ_BFQQ(rq); 4907 struct bfq_data *bfqd; 4908 4909 /* 4910 * Requeue and finish hooks are invoked in blk-mq without 4911 * checking whether the involved request is actually still 4912 * referenced in the scheduler. To handle this fact, the 4913 * following two checks make this function exit in case of 4914 * spurious invocations, for which there is nothing to do. 4915 * 4916 * First, check whether rq has nothing to do with an elevator. 4917 */ 4918 if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) 4919 return; 4920 4921 /* 4922 * rq either is not associated with any icq, or is an already 4923 * requeued request that has not (yet) been re-inserted into 4924 * a bfq_queue. 4925 */ 4926 if (!rq->elv.icq || !bfqq) 4927 return; 4928 4929 bfqd = bfqq->bfqd; 4930 4931 if (rq->rq_flags & RQF_STARTED) 4932 bfqg_stats_update_completion(bfqq_group(bfqq), 4933 rq->start_time_ns, 4934 rq->io_start_time_ns, 4935 rq->cmd_flags); 4936 4937 if (likely(rq->rq_flags & RQF_STARTED)) { 4938 unsigned long flags; 4939 4940 spin_lock_irqsave(&bfqd->lock, flags); 4941 4942 bfq_completed_request(bfqq, bfqd); 4943 bfq_finish_requeue_request_body(bfqq); 4944 4945 spin_unlock_irqrestore(&bfqd->lock, flags); 4946 } else { 4947 /* 4948 * Request rq may be still/already in the scheduler, 4949 * in which case we need to remove it (this should 4950 * never happen in case of requeue). And we cannot 4951 * defer such a check and removal, to avoid 4952 * inconsistencies in the time interval from the end 4953 * of this function to the start of the deferred work. 4954 * This situation seems to occur only in process 4955 * context, as a consequence of a merge. In the 4956 * current version of the code, this implies that the 4957 * lock is held. 4958 */ 4959 4960 if (!RB_EMPTY_NODE(&rq->rb_node)) { 4961 bfq_remove_request(rq->q, rq); 4962 bfqg_stats_update_io_remove(bfqq_group(bfqq), 4963 rq->cmd_flags); 4964 } 4965 bfq_finish_requeue_request_body(bfqq); 4966 } 4967 4968 /* 4969 * Reset private fields. In case of a requeue, this allows 4970 * this function to correctly do nothing if it is spuriously 4971 * invoked again on this same request (see the check at the 4972 * beginning of the function). Probably, a better general 4973 * design would be to prevent blk-mq from invoking the requeue 4974 * or finish hooks of an elevator, for a request that is not 4975 * referred by that elevator. 4976 * 4977 * Resetting the following fields would break the 4978 * request-insertion logic if rq is re-inserted into a bfq 4979 * internal queue, without a re-preparation. Here we assume 4980 * that re-insertions of requeued requests, without 4981 * re-preparation, can happen only for pass_through or at_head 4982 * requests (which are not re-inserted into bfq internal 4983 * queues). 4984 */ 4985 rq->elv.priv[0] = NULL; 4986 rq->elv.priv[1] = NULL; 4987 } 4988 4989 /* 4990 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this 4991 * was the last process referring to that bfqq. 4992 */ 4993 static struct bfq_queue * 4994 bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) 4995 { 4996 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); 4997 4998 if (bfqq_process_refs(bfqq) == 1) { 4999 bfqq->pid = current->pid; 5000 bfq_clear_bfqq_coop(bfqq); 5001 bfq_clear_bfqq_split_coop(bfqq); 5002 return bfqq; 5003 } 5004 5005 bic_set_bfqq(bic, NULL, 1); 5006 5007 bfq_put_cooperator(bfqq); 5008 5009 bfq_put_queue(bfqq); 5010 return NULL; 5011 } 5012 5013 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, 5014 struct bfq_io_cq *bic, 5015 struct bio *bio, 5016 bool split, bool is_sync, 5017 bool *new_queue) 5018 { 5019 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); 5020 5021 if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) 5022 return bfqq; 5023 5024 if (new_queue) 5025 *new_queue = true; 5026 5027 if (bfqq) 5028 bfq_put_queue(bfqq); 5029 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); 5030 5031 bic_set_bfqq(bic, bfqq, is_sync); 5032 if (split && is_sync) { 5033 if ((bic->was_in_burst_list && bfqd->large_burst) || 5034 bic->saved_in_large_burst) 5035 bfq_mark_bfqq_in_large_burst(bfqq); 5036 else { 5037 bfq_clear_bfqq_in_large_burst(bfqq); 5038 if (bic->was_in_burst_list) 5039 /* 5040 * If bfqq was in the current 5041 * burst list before being 5042 * merged, then we have to add 5043 * it back. And we do not need 5044 * to increase burst_size, as 5045 * we did not decrement 5046 * burst_size when we removed 5047 * bfqq from the burst list as 5048 * a consequence of a merge 5049 * (see comments in 5050 * bfq_put_queue). In this 5051 * respect, it would be rather 5052 * costly to know whether the 5053 * current burst list is still 5054 * the same burst list from 5055 * which bfqq was removed on 5056 * the merge. To avoid this 5057 * cost, if bfqq was in a 5058 * burst list, then we add 5059 * bfqq to the current burst 5060 * list without any further 5061 * check. This can cause 5062 * inappropriate insertions, 5063 * but rarely enough to not 5064 * harm the detection of large 5065 * bursts significantly. 5066 */ 5067 hlist_add_head(&bfqq->burst_list_node, 5068 &bfqd->burst_list); 5069 } 5070 bfqq->split_time = jiffies; 5071 } 5072 5073 return bfqq; 5074 } 5075 5076 /* 5077 * Only reset private fields. The actual request preparation will be 5078 * performed by bfq_init_rq, when rq is either inserted or merged. See 5079 * comments on bfq_init_rq for the reason behind this delayed 5080 * preparation. 5081 */ 5082 static void bfq_prepare_request(struct request *rq, struct bio *bio) 5083 { 5084 /* 5085 * Regardless of whether we have an icq attached, we have to 5086 * clear the scheduler pointers, as they might point to 5087 * previously allocated bic/bfqq structs. 5088 */ 5089 rq->elv.priv[0] = rq->elv.priv[1] = NULL; 5090 } 5091 5092 /* 5093 * If needed, init rq, allocate bfq data structures associated with 5094 * rq, and increment reference counters in the destination bfq_queue 5095 * for rq. Return the destination bfq_queue for rq, or NULL is rq is 5096 * not associated with any bfq_queue. 5097 * 5098 * This function is invoked by the functions that perform rq insertion 5099 * or merging. One may have expected the above preparation operations 5100 * to be performed in bfq_prepare_request, and not delayed to when rq 5101 * is inserted or merged. The rationale behind this delayed 5102 * preparation is that, after the prepare_request hook is invoked for 5103 * rq, rq may still be transformed into a request with no icq, i.e., a 5104 * request not associated with any queue. No bfq hook is invoked to 5105 * signal this tranformation. As a consequence, should these 5106 * preparation operations be performed when the prepare_request hook 5107 * is invoked, and should rq be transformed one moment later, bfq 5108 * would end up in an inconsistent state, because it would have 5109 * incremented some queue counters for an rq destined to 5110 * transformation, without any chance to correctly lower these 5111 * counters back. In contrast, no transformation can still happen for 5112 * rq after rq has been inserted or merged. So, it is safe to execute 5113 * these preparation operations when rq is finally inserted or merged. 5114 */ 5115 static struct bfq_queue *bfq_init_rq(struct request *rq) 5116 { 5117 struct request_queue *q = rq->q; 5118 struct bio *bio = rq->bio; 5119 struct bfq_data *bfqd = q->elevator->elevator_data; 5120 struct bfq_io_cq *bic; 5121 const int is_sync = rq_is_sync(rq); 5122 struct bfq_queue *bfqq; 5123 bool new_queue = false; 5124 bool bfqq_already_existing = false, split = false; 5125 5126 if (unlikely(!rq->elv.icq)) 5127 return NULL; 5128 5129 /* 5130 * Assuming that elv.priv[1] is set only if everything is set 5131 * for this rq. This holds true, because this function is 5132 * invoked only for insertion or merging, and, after such 5133 * events, a request cannot be manipulated any longer before 5134 * being removed from bfq. 5135 */ 5136 if (rq->elv.priv[1]) 5137 return rq->elv.priv[1]; 5138 5139 bic = icq_to_bic(rq->elv.icq); 5140 5141 bfq_check_ioprio_change(bic, bio); 5142 5143 bfq_bic_update_cgroup(bic, bio); 5144 5145 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, 5146 &new_queue); 5147 5148 if (likely(!new_queue)) { 5149 /* If the queue was seeky for too long, break it apart. */ 5150 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { 5151 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); 5152 5153 /* Update bic before losing reference to bfqq */ 5154 if (bfq_bfqq_in_large_burst(bfqq)) 5155 bic->saved_in_large_burst = true; 5156 5157 bfqq = bfq_split_bfqq(bic, bfqq); 5158 split = true; 5159 5160 if (!bfqq) 5161 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, 5162 true, is_sync, 5163 NULL); 5164 else 5165 bfqq_already_existing = true; 5166 } 5167 } 5168 5169 bfqq->allocated++; 5170 bfqq->ref++; 5171 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", 5172 rq, bfqq, bfqq->ref); 5173 5174 rq->elv.priv[0] = bic; 5175 rq->elv.priv[1] = bfqq; 5176 5177 /* 5178 * If a bfq_queue has only one process reference, it is owned 5179 * by only this bic: we can then set bfqq->bic = bic. in 5180 * addition, if the queue has also just been split, we have to 5181 * resume its state. 5182 */ 5183 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { 5184 bfqq->bic = bic; 5185 if (split) { 5186 /* 5187 * The queue has just been split from a shared 5188 * queue: restore the idle window and the 5189 * possible weight raising period. 5190 */ 5191 bfq_bfqq_resume_state(bfqq, bfqd, bic, 5192 bfqq_already_existing); 5193 } 5194 } 5195 5196 if (unlikely(bfq_bfqq_just_created(bfqq))) 5197 bfq_handle_burst(bfqd, bfqq); 5198 5199 return bfqq; 5200 } 5201 5202 static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) 5203 { 5204 struct bfq_data *bfqd = bfqq->bfqd; 5205 enum bfqq_expiration reason; 5206 unsigned long flags; 5207 5208 spin_lock_irqsave(&bfqd->lock, flags); 5209 bfq_clear_bfqq_wait_request(bfqq); 5210 5211 if (bfqq != bfqd->in_service_queue) { 5212 spin_unlock_irqrestore(&bfqd->lock, flags); 5213 return; 5214 } 5215 5216 if (bfq_bfqq_budget_timeout(bfqq)) 5217 /* 5218 * Also here the queue can be safely expired 5219 * for budget timeout without wasting 5220 * guarantees 5221 */ 5222 reason = BFQQE_BUDGET_TIMEOUT; 5223 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) 5224 /* 5225 * The queue may not be empty upon timer expiration, 5226 * because we may not disable the timer when the 5227 * first request of the in-service queue arrives 5228 * during disk idling. 5229 */ 5230 reason = BFQQE_TOO_IDLE; 5231 else 5232 goto schedule_dispatch; 5233 5234 bfq_bfqq_expire(bfqd, bfqq, true, reason); 5235 5236 schedule_dispatch: 5237 spin_unlock_irqrestore(&bfqd->lock, flags); 5238 bfq_schedule_dispatch(bfqd); 5239 } 5240 5241 /* 5242 * Handler of the expiration of the timer running if the in-service queue 5243 * is idling inside its time slice. 5244 */ 5245 static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) 5246 { 5247 struct bfq_data *bfqd = container_of(timer, struct bfq_data, 5248 idle_slice_timer); 5249 struct bfq_queue *bfqq = bfqd->in_service_queue; 5250 5251 /* 5252 * Theoretical race here: the in-service queue can be NULL or 5253 * different from the queue that was idling if a new request 5254 * arrives for the current queue and there is a full dispatch 5255 * cycle that changes the in-service queue. This can hardly 5256 * happen, but in the worst case we just expire a queue too 5257 * early. 5258 */ 5259 if (bfqq) 5260 bfq_idle_slice_timer_body(bfqq); 5261 5262 return HRTIMER_NORESTART; 5263 } 5264 5265 static void __bfq_put_async_bfqq(struct bfq_data *bfqd, 5266 struct bfq_queue **bfqq_ptr) 5267 { 5268 struct bfq_queue *bfqq = *bfqq_ptr; 5269 5270 bfq_log(bfqd, "put_async_bfqq: %p", bfqq); 5271 if (bfqq) { 5272 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); 5273 5274 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", 5275 bfqq, bfqq->ref); 5276 bfq_put_queue(bfqq); 5277 *bfqq_ptr = NULL; 5278 } 5279 } 5280 5281 /* 5282 * Release all the bfqg references to its async queues. If we are 5283 * deallocating the group these queues may still contain requests, so 5284 * we reparent them to the root cgroup (i.e., the only one that will 5285 * exist for sure until all the requests on a device are gone). 5286 */ 5287 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) 5288 { 5289 int i, j; 5290 5291 for (i = 0; i < 2; i++) 5292 for (j = 0; j < IOPRIO_BE_NR; j++) 5293 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); 5294 5295 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); 5296 } 5297 5298 /* 5299 * See the comments on bfq_limit_depth for the purpose of 5300 * the depths set in the function. Return minimum shallow depth we'll use. 5301 */ 5302 static unsigned int bfq_update_depths(struct bfq_data *bfqd, 5303 struct sbitmap_queue *bt) 5304 { 5305 unsigned int i, j, min_shallow = UINT_MAX; 5306 5307 /* 5308 * In-word depths if no bfq_queue is being weight-raised: 5309 * leaving 25% of tags only for sync reads. 5310 * 5311 * In next formulas, right-shift the value 5312 * (1U<<bt->sb.shift), instead of computing directly 5313 * (1U<<(bt->sb.shift - something)), to be robust against 5314 * any possible value of bt->sb.shift, without having to 5315 * limit 'something'. 5316 */ 5317 /* no more than 50% of tags for async I/O */ 5318 bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U); 5319 /* 5320 * no more than 75% of tags for sync writes (25% extra tags 5321 * w.r.t. async I/O, to prevent async I/O from starving sync 5322 * writes) 5323 */ 5324 bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U); 5325 5326 /* 5327 * In-word depths in case some bfq_queue is being weight- 5328 * raised: leaving ~63% of tags for sync reads. This is the 5329 * highest percentage for which, in our tests, application 5330 * start-up times didn't suffer from any regression due to tag 5331 * shortage. 5332 */ 5333 /* no more than ~18% of tags for async I/O */ 5334 bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U); 5335 /* no more than ~37% of tags for sync writes (~20% extra tags) */ 5336 bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U); 5337 5338 for (i = 0; i < 2; i++) 5339 for (j = 0; j < 2; j++) 5340 min_shallow = min(min_shallow, bfqd->word_depths[i][j]); 5341 5342 return min_shallow; 5343 } 5344 5345 static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) 5346 { 5347 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; 5348 struct blk_mq_tags *tags = hctx->sched_tags; 5349 unsigned int min_shallow; 5350 5351 min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); 5352 sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); 5353 return 0; 5354 } 5355 5356 static void bfq_exit_queue(struct elevator_queue *e) 5357 { 5358 struct bfq_data *bfqd = e->elevator_data; 5359 struct bfq_queue *bfqq, *n; 5360 5361 hrtimer_cancel(&bfqd->idle_slice_timer); 5362 5363 spin_lock_irq(&bfqd->lock); 5364 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) 5365 bfq_deactivate_bfqq(bfqd, bfqq, false, false); 5366 spin_unlock_irq(&bfqd->lock); 5367 5368 hrtimer_cancel(&bfqd->idle_slice_timer); 5369 5370 #ifdef CONFIG_BFQ_GROUP_IOSCHED 5371 /* release oom-queue reference to root group */ 5372 bfqg_and_blkg_put(bfqd->root_group); 5373 5374 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); 5375 #else 5376 spin_lock_irq(&bfqd->lock); 5377 bfq_put_async_queues(bfqd, bfqd->root_group); 5378 kfree(bfqd->root_group); 5379 spin_unlock_irq(&bfqd->lock); 5380 #endif 5381 5382 kfree(bfqd); 5383 } 5384 5385 static void bfq_init_root_group(struct bfq_group *root_group, 5386 struct bfq_data *bfqd) 5387 { 5388 int i; 5389 5390 #ifdef CONFIG_BFQ_GROUP_IOSCHED 5391 root_group->entity.parent = NULL; 5392 root_group->my_entity = NULL; 5393 root_group->bfqd = bfqd; 5394 #endif 5395 root_group->rq_pos_tree = RB_ROOT; 5396 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) 5397 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; 5398 root_group->sched_data.bfq_class_idle_last_service = jiffies; 5399 } 5400 5401 static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) 5402 { 5403 struct bfq_data *bfqd; 5404 struct elevator_queue *eq; 5405 5406 eq = elevator_alloc(q, e); 5407 if (!eq) 5408 return -ENOMEM; 5409 5410 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); 5411 if (!bfqd) { 5412 kobject_put(&eq->kobj); 5413 return -ENOMEM; 5414 } 5415 eq->elevator_data = bfqd; 5416 5417 spin_lock_irq(&q->queue_lock); 5418 q->elevator = eq; 5419 spin_unlock_irq(&q->queue_lock); 5420 5421 /* 5422 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. 5423 * Grab a permanent reference to it, so that the normal code flow 5424 * will not attempt to free it. 5425 */ 5426 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); 5427 bfqd->oom_bfqq.ref++; 5428 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; 5429 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; 5430 bfqd->oom_bfqq.entity.new_weight = 5431 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); 5432 5433 /* oom_bfqq does not participate to bursts */ 5434 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); 5435 5436 /* 5437 * Trigger weight initialization, according to ioprio, at the 5438 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio 5439 * class won't be changed any more. 5440 */ 5441 bfqd->oom_bfqq.entity.prio_changed = 1; 5442 5443 bfqd->queue = q; 5444 5445 INIT_LIST_HEAD(&bfqd->dispatch); 5446 5447 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, 5448 HRTIMER_MODE_REL); 5449 bfqd->idle_slice_timer.function = bfq_idle_slice_timer; 5450 5451 bfqd->queue_weights_tree = RB_ROOT; 5452 bfqd->num_groups_with_pending_reqs = 0; 5453 5454 INIT_LIST_HEAD(&bfqd->active_list); 5455 INIT_LIST_HEAD(&bfqd->idle_list); 5456 INIT_HLIST_HEAD(&bfqd->burst_list); 5457 5458 bfqd->hw_tag = -1; 5459 5460 bfqd->bfq_max_budget = bfq_default_max_budget; 5461 5462 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; 5463 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; 5464 bfqd->bfq_back_max = bfq_back_max; 5465 bfqd->bfq_back_penalty = bfq_back_penalty; 5466 bfqd->bfq_slice_idle = bfq_slice_idle; 5467 bfqd->bfq_timeout = bfq_timeout; 5468 5469 bfqd->bfq_requests_within_timer = 120; 5470 5471 bfqd->bfq_large_burst_thresh = 8; 5472 bfqd->bfq_burst_interval = msecs_to_jiffies(180); 5473 5474 bfqd->low_latency = true; 5475 5476 /* 5477 * Trade-off between responsiveness and fairness. 5478 */ 5479 bfqd->bfq_wr_coeff = 30; 5480 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); 5481 bfqd->bfq_wr_max_time = 0; 5482 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); 5483 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); 5484 bfqd->bfq_wr_max_softrt_rate = 7000; /* 5485 * Approximate rate required 5486 * to playback or record a 5487 * high-definition compressed 5488 * video. 5489 */ 5490 bfqd->wr_busy_queues = 0; 5491 5492 /* 5493 * Begin by assuming, optimistically, that the device peak 5494 * rate is equal to 2/3 of the highest reference rate. 5495 */ 5496 bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * 5497 ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; 5498 bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; 5499 5500 spin_lock_init(&bfqd->lock); 5501 5502 /* 5503 * The invocation of the next bfq_create_group_hierarchy 5504 * function is the head of a chain of function calls 5505 * (bfq_create_group_hierarchy->blkcg_activate_policy-> 5506 * blk_mq_freeze_queue) that may lead to the invocation of the 5507 * has_work hook function. For this reason, 5508 * bfq_create_group_hierarchy is invoked only after all 5509 * scheduler data has been initialized, apart from the fields 5510 * that can be initialized only after invoking 5511 * bfq_create_group_hierarchy. This, in particular, enables 5512 * has_work to correctly return false. Of course, to avoid 5513 * other inconsistencies, the blk-mq stack must then refrain 5514 * from invoking further scheduler hooks before this init 5515 * function is finished. 5516 */ 5517 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); 5518 if (!bfqd->root_group) 5519 goto out_free; 5520 bfq_init_root_group(bfqd->root_group, bfqd); 5521 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); 5522 5523 wbt_disable_default(q); 5524 return 0; 5525 5526 out_free: 5527 kfree(bfqd); 5528 kobject_put(&eq->kobj); 5529 return -ENOMEM; 5530 } 5531 5532 static void bfq_slab_kill(void) 5533 { 5534 kmem_cache_destroy(bfq_pool); 5535 } 5536 5537 static int __init bfq_slab_setup(void) 5538 { 5539 bfq_pool = KMEM_CACHE(bfq_queue, 0); 5540 if (!bfq_pool) 5541 return -ENOMEM; 5542 return 0; 5543 } 5544 5545 static ssize_t bfq_var_show(unsigned int var, char *page) 5546 { 5547 return sprintf(page, "%u\n", var); 5548 } 5549 5550 static int bfq_var_store(unsigned long *var, const char *page) 5551 { 5552 unsigned long new_val; 5553 int ret = kstrtoul(page, 10, &new_val); 5554 5555 if (ret) 5556 return ret; 5557 *var = new_val; 5558 return 0; 5559 } 5560 5561 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 5562 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 5563 { \ 5564 struct bfq_data *bfqd = e->elevator_data; \ 5565 u64 __data = __VAR; \ 5566 if (__CONV == 1) \ 5567 __data = jiffies_to_msecs(__data); \ 5568 else if (__CONV == 2) \ 5569 __data = div_u64(__data, NSEC_PER_MSEC); \ 5570 return bfq_var_show(__data, (page)); \ 5571 } 5572 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); 5573 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); 5574 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); 5575 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); 5576 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); 5577 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); 5578 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); 5579 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); 5580 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); 5581 #undef SHOW_FUNCTION 5582 5583 #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ 5584 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 5585 { \ 5586 struct bfq_data *bfqd = e->elevator_data; \ 5587 u64 __data = __VAR; \ 5588 __data = div_u64(__data, NSEC_PER_USEC); \ 5589 return bfq_var_show(__data, (page)); \ 5590 } 5591 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); 5592 #undef USEC_SHOW_FUNCTION 5593 5594 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 5595 static ssize_t \ 5596 __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 5597 { \ 5598 struct bfq_data *bfqd = e->elevator_data; \ 5599 unsigned long __data, __min = (MIN), __max = (MAX); \ 5600 int ret; \ 5601 \ 5602 ret = bfq_var_store(&__data, (page)); \ 5603 if (ret) \ 5604 return ret; \ 5605 if (__data < __min) \ 5606 __data = __min; \ 5607 else if (__data > __max) \ 5608 __data = __max; \ 5609 if (__CONV == 1) \ 5610 *(__PTR) = msecs_to_jiffies(__data); \ 5611 else if (__CONV == 2) \ 5612 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ 5613 else \ 5614 *(__PTR) = __data; \ 5615 return count; \ 5616 } 5617 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, 5618 INT_MAX, 2); 5619 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, 5620 INT_MAX, 2); 5621 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); 5622 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, 5623 INT_MAX, 0); 5624 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); 5625 #undef STORE_FUNCTION 5626 5627 #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ 5628 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ 5629 { \ 5630 struct bfq_data *bfqd = e->elevator_data; \ 5631 unsigned long __data, __min = (MIN), __max = (MAX); \ 5632 int ret; \ 5633 \ 5634 ret = bfq_var_store(&__data, (page)); \ 5635 if (ret) \ 5636 return ret; \ 5637 if (__data < __min) \ 5638 __data = __min; \ 5639 else if (__data > __max) \ 5640 __data = __max; \ 5641 *(__PTR) = (u64)__data * NSEC_PER_USEC; \ 5642 return count; \ 5643 } 5644 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, 5645 UINT_MAX); 5646 #undef USEC_STORE_FUNCTION 5647 5648 static ssize_t bfq_max_budget_store(struct elevator_queue *e, 5649 const char *page, size_t count) 5650 { 5651 struct bfq_data *bfqd = e->elevator_data; 5652 unsigned long __data; 5653 int ret; 5654 5655 ret = bfq_var_store(&__data, (page)); 5656 if (ret) 5657 return ret; 5658 5659 if (__data == 0) 5660 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); 5661 else { 5662 if (__data > INT_MAX) 5663 __data = INT_MAX; 5664 bfqd->bfq_max_budget = __data; 5665 } 5666 5667 bfqd->bfq_user_max_budget = __data; 5668 5669 return count; 5670 } 5671 5672 /* 5673 * Leaving this name to preserve name compatibility with cfq 5674 * parameters, but this timeout is used for both sync and async. 5675 */ 5676 static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, 5677 const char *page, size_t count) 5678 { 5679 struct bfq_data *bfqd = e->elevator_data; 5680 unsigned long __data; 5681 int ret; 5682 5683 ret = bfq_var_store(&__data, (page)); 5684 if (ret) 5685 return ret; 5686 5687 if (__data < 1) 5688 __data = 1; 5689 else if (__data > INT_MAX) 5690 __data = INT_MAX; 5691 5692 bfqd->bfq_timeout = msecs_to_jiffies(__data); 5693 if (bfqd->bfq_user_max_budget == 0) 5694 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); 5695 5696 return count; 5697 } 5698 5699 static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, 5700 const char *page, size_t count) 5701 { 5702 struct bfq_data *bfqd = e->elevator_data; 5703 unsigned long __data; 5704 int ret; 5705 5706 ret = bfq_var_store(&__data, (page)); 5707 if (ret) 5708 return ret; 5709 5710 if (__data > 1) 5711 __data = 1; 5712 if (!bfqd->strict_guarantees && __data == 1 5713 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) 5714 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; 5715 5716 bfqd->strict_guarantees = __data; 5717 5718 return count; 5719 } 5720 5721 static ssize_t bfq_low_latency_store(struct elevator_queue *e, 5722 const char *page, size_t count) 5723 { 5724 struct bfq_data *bfqd = e->elevator_data; 5725 unsigned long __data; 5726 int ret; 5727 5728 ret = bfq_var_store(&__data, (page)); 5729 if (ret) 5730 return ret; 5731 5732 if (__data > 1) 5733 __data = 1; 5734 if (__data == 0 && bfqd->low_latency != 0) 5735 bfq_end_wr(bfqd); 5736 bfqd->low_latency = __data; 5737 5738 return count; 5739 } 5740 5741 #define BFQ_ATTR(name) \ 5742 __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store) 5743 5744 static struct elv_fs_entry bfq_attrs[] = { 5745 BFQ_ATTR(fifo_expire_sync), 5746 BFQ_ATTR(fifo_expire_async), 5747 BFQ_ATTR(back_seek_max), 5748 BFQ_ATTR(back_seek_penalty), 5749 BFQ_ATTR(slice_idle), 5750 BFQ_ATTR(slice_idle_us), 5751 BFQ_ATTR(max_budget), 5752 BFQ_ATTR(timeout_sync), 5753 BFQ_ATTR(strict_guarantees), 5754 BFQ_ATTR(low_latency), 5755 __ATTR_NULL 5756 }; 5757 5758 static struct elevator_type iosched_bfq_mq = { 5759 .ops = { 5760 .limit_depth = bfq_limit_depth, 5761 .prepare_request = bfq_prepare_request, 5762 .requeue_request = bfq_finish_requeue_request, 5763 .finish_request = bfq_finish_requeue_request, 5764 .exit_icq = bfq_exit_icq, 5765 .insert_requests = bfq_insert_requests, 5766 .dispatch_request = bfq_dispatch_request, 5767 .next_request = elv_rb_latter_request, 5768 .former_request = elv_rb_former_request, 5769 .allow_merge = bfq_allow_bio_merge, 5770 .bio_merge = bfq_bio_merge, 5771 .request_merge = bfq_request_merge, 5772 .requests_merged = bfq_requests_merged, 5773 .request_merged = bfq_request_merged, 5774 .has_work = bfq_has_work, 5775 .init_hctx = bfq_init_hctx, 5776 .init_sched = bfq_init_queue, 5777 .exit_sched = bfq_exit_queue, 5778 }, 5779 5780 .icq_size = sizeof(struct bfq_io_cq), 5781 .icq_align = __alignof__(struct bfq_io_cq), 5782 .elevator_attrs = bfq_attrs, 5783 .elevator_name = "bfq", 5784 .elevator_owner = THIS_MODULE, 5785 }; 5786 MODULE_ALIAS("bfq-iosched"); 5787 5788 static int __init bfq_init(void) 5789 { 5790 int ret; 5791 5792 #ifdef CONFIG_BFQ_GROUP_IOSCHED 5793 ret = blkcg_policy_register(&blkcg_policy_bfq); 5794 if (ret) 5795 return ret; 5796 #endif 5797 5798 ret = -ENOMEM; 5799 if (bfq_slab_setup()) 5800 goto err_pol_unreg; 5801 5802 /* 5803 * Times to load large popular applications for the typical 5804 * systems installed on the reference devices (see the 5805 * comments before the definition of the next 5806 * array). Actually, we use slightly lower values, as the 5807 * estimated peak rate tends to be smaller than the actual 5808 * peak rate. The reason for this last fact is that estimates 5809 * are computed over much shorter time intervals than the long 5810 * intervals typically used for benchmarking. Why? First, to 5811 * adapt more quickly to variations. Second, because an I/O 5812 * scheduler cannot rely on a peak-rate-evaluation workload to 5813 * be run for a long time. 5814 */ 5815 ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */ 5816 ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */ 5817 5818 ret = elv_register(&iosched_bfq_mq); 5819 if (ret) 5820 goto slab_kill; 5821 5822 return 0; 5823 5824 slab_kill: 5825 bfq_slab_kill(); 5826 err_pol_unreg: 5827 #ifdef CONFIG_BFQ_GROUP_IOSCHED 5828 blkcg_policy_unregister(&blkcg_policy_bfq); 5829 #endif 5830 return ret; 5831 } 5832 5833 static void __exit bfq_exit(void) 5834 { 5835 elv_unregister(&iosched_bfq_mq); 5836 #ifdef CONFIG_BFQ_GROUP_IOSCHED 5837 blkcg_policy_unregister(&blkcg_policy_bfq); 5838 #endif 5839 bfq_slab_kill(); 5840 } 5841 5842 module_init(bfq_init); 5843 module_exit(bfq_exit); 5844 5845 MODULE_AUTHOR("Paolo Valente"); 5846 MODULE_LICENSE("GPL"); 5847 MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler"); 5848