1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, 4 * for the blk-mq scheduling framework 5 * 6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> 7 */ 8 #include <linux/kernel.h> 9 #include <linux/fs.h> 10 #include <linux/blkdev.h> 11 #include <linux/blk-mq.h> 12 #include <linux/elevator.h> 13 #include <linux/bio.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/compiler.h> 18 #include <linux/rbtree.h> 19 #include <linux/sbitmap.h> 20 21 #include <trace/events/block.h> 22 23 #include "blk.h" 24 #include "blk-mq.h" 25 #include "blk-mq-debugfs.h" 26 #include "blk-mq-tag.h" 27 #include "blk-mq-sched.h" 28 29 /* 30 * See Documentation/block/deadline-iosched.rst 31 */ 32 static const int read_expire = HZ / 2; /* max time before a read is submitted. */ 33 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ 34 /* 35 * Time after which to dispatch lower priority requests even if higher 36 * priority requests are pending. 37 */ 38 static const int aging_expire = 10 * HZ; 39 static const int writes_starved = 2; /* max times reads can starve a write */ 40 static const int fifo_batch = 16; /* # of sequential requests treated as one 41 by the above parameters. For throughput. */ 42 43 enum dd_data_dir { 44 DD_READ = READ, 45 DD_WRITE = WRITE, 46 }; 47 48 enum { DD_DIR_COUNT = 2 }; 49 50 enum dd_prio { 51 DD_RT_PRIO = 0, 52 DD_BE_PRIO = 1, 53 DD_IDLE_PRIO = 2, 54 DD_PRIO_MAX = 2, 55 }; 56 57 enum { DD_PRIO_COUNT = 3 }; 58 59 /* I/O statistics per I/O priority. */ 60 struct io_stats_per_prio { 61 local_t inserted; 62 local_t merged; 63 local_t dispatched; 64 local_t completed; 65 }; 66 67 /* I/O statistics for all I/O priorities (enum dd_prio). */ 68 struct io_stats { 69 struct io_stats_per_prio stats[DD_PRIO_COUNT]; 70 }; 71 72 /* 73 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are 74 * present on both sort_list[] and fifo_list[]. 75 */ 76 struct dd_per_prio { 77 struct list_head dispatch; 78 struct rb_root sort_list[DD_DIR_COUNT]; 79 struct list_head fifo_list[DD_DIR_COUNT]; 80 /* Next request in FIFO order. Read, write or both are NULL. */ 81 struct request *next_rq[DD_DIR_COUNT]; 82 }; 83 84 struct deadline_data { 85 /* 86 * run time data 87 */ 88 89 struct dd_per_prio per_prio[DD_PRIO_COUNT]; 90 91 /* Data direction of latest dispatched request. */ 92 enum dd_data_dir last_dir; 93 unsigned int batching; /* number of sequential requests made */ 94 unsigned int starved; /* times reads have starved writes */ 95 96 struct io_stats __percpu *stats; 97 98 /* 99 * settings that change how the i/o scheduler behaves 100 */ 101 int fifo_expire[DD_DIR_COUNT]; 102 int fifo_batch; 103 int writes_starved; 104 int front_merges; 105 u32 async_depth; 106 int aging_expire; 107 108 spinlock_t lock; 109 spinlock_t zone_lock; 110 }; 111 112 /* Count one event of type 'event_type' and with I/O priority 'prio' */ 113 #define dd_count(dd, event_type, prio) do { \ 114 struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \ 115 \ 116 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ 117 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ 118 local_inc(&io_stats->stats[(prio)].event_type); \ 119 put_cpu_ptr(io_stats); \ 120 } while (0) 121 122 /* 123 * Returns the total number of dd_count(dd, event_type, prio) calls across all 124 * CPUs. No locking or barriers since it is fine if the returned sum is slightly 125 * outdated. 126 */ 127 #define dd_sum(dd, event_type, prio) ({ \ 128 unsigned int cpu; \ 129 u32 sum = 0; \ 130 \ 131 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ 132 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ 133 for_each_present_cpu(cpu) \ 134 sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \ 135 stats[(prio)].event_type); \ 136 sum; \ 137 }) 138 139 /* Maps an I/O priority class to a deadline scheduler priority. */ 140 static const enum dd_prio ioprio_class_to_prio[] = { 141 [IOPRIO_CLASS_NONE] = DD_BE_PRIO, 142 [IOPRIO_CLASS_RT] = DD_RT_PRIO, 143 [IOPRIO_CLASS_BE] = DD_BE_PRIO, 144 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO, 145 }; 146 147 static inline struct rb_root * 148 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) 149 { 150 return &per_prio->sort_list[rq_data_dir(rq)]; 151 } 152 153 /* 154 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a 155 * request. 156 */ 157 static u8 dd_rq_ioclass(struct request *rq) 158 { 159 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 160 } 161 162 /* 163 * get the request after `rq' in sector-sorted order 164 */ 165 static inline struct request * 166 deadline_latter_request(struct request *rq) 167 { 168 struct rb_node *node = rb_next(&rq->rb_node); 169 170 if (node) 171 return rb_entry_rq(node); 172 173 return NULL; 174 } 175 176 static void 177 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) 178 { 179 struct rb_root *root = deadline_rb_root(per_prio, rq); 180 181 elv_rb_add(root, rq); 182 } 183 184 static inline void 185 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) 186 { 187 const enum dd_data_dir data_dir = rq_data_dir(rq); 188 189 if (per_prio->next_rq[data_dir] == rq) 190 per_prio->next_rq[data_dir] = deadline_latter_request(rq); 191 192 elv_rb_del(deadline_rb_root(per_prio, rq), rq); 193 } 194 195 /* 196 * remove rq from rbtree and fifo. 197 */ 198 static void deadline_remove_request(struct request_queue *q, 199 struct dd_per_prio *per_prio, 200 struct request *rq) 201 { 202 list_del_init(&rq->queuelist); 203 204 /* 205 * We might not be on the rbtree, if we are doing an insert merge 206 */ 207 if (!RB_EMPTY_NODE(&rq->rb_node)) 208 deadline_del_rq_rb(per_prio, rq); 209 210 elv_rqhash_del(q, rq); 211 if (q->last_merge == rq) 212 q->last_merge = NULL; 213 } 214 215 static void dd_request_merged(struct request_queue *q, struct request *req, 216 enum elv_merge type) 217 { 218 struct deadline_data *dd = q->elevator->elevator_data; 219 const u8 ioprio_class = dd_rq_ioclass(req); 220 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; 221 struct dd_per_prio *per_prio = &dd->per_prio[prio]; 222 223 /* 224 * if the merge was a front merge, we need to reposition request 225 */ 226 if (type == ELEVATOR_FRONT_MERGE) { 227 elv_rb_del(deadline_rb_root(per_prio, req), req); 228 deadline_add_rq_rb(per_prio, req); 229 } 230 } 231 232 /* 233 * Callback function that is invoked after @next has been merged into @req. 234 */ 235 static void dd_merged_requests(struct request_queue *q, struct request *req, 236 struct request *next) 237 { 238 struct deadline_data *dd = q->elevator->elevator_data; 239 const u8 ioprio_class = dd_rq_ioclass(next); 240 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; 241 242 dd_count(dd, merged, prio); 243 244 /* 245 * if next expires before rq, assign its expire time to rq 246 * and move into next position (next will be deleted) in fifo 247 */ 248 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 249 if (time_before((unsigned long)next->fifo_time, 250 (unsigned long)req->fifo_time)) { 251 list_move(&req->queuelist, &next->queuelist); 252 req->fifo_time = next->fifo_time; 253 } 254 } 255 256 /* 257 * kill knowledge of next, this one is a goner 258 */ 259 deadline_remove_request(q, &dd->per_prio[prio], next); 260 } 261 262 /* 263 * move an entry to dispatch queue 264 */ 265 static void 266 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, 267 struct request *rq) 268 { 269 const enum dd_data_dir data_dir = rq_data_dir(rq); 270 271 per_prio->next_rq[data_dir] = deadline_latter_request(rq); 272 273 /* 274 * take it off the sort and fifo list 275 */ 276 deadline_remove_request(rq->q, per_prio, rq); 277 } 278 279 /* Number of requests queued for a given priority level. */ 280 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) 281 { 282 return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); 283 } 284 285 /* 286 * deadline_check_fifo returns 0 if there are no expired requests on the fifo, 287 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) 288 */ 289 static inline int deadline_check_fifo(struct dd_per_prio *per_prio, 290 enum dd_data_dir data_dir) 291 { 292 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); 293 294 /* 295 * rq is expired! 296 */ 297 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) 298 return 1; 299 300 return 0; 301 } 302 303 /* 304 * For the specified data direction, return the next request to 305 * dispatch using arrival ordered lists. 306 */ 307 static struct request * 308 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, 309 enum dd_data_dir data_dir) 310 { 311 struct request *rq; 312 unsigned long flags; 313 314 if (list_empty(&per_prio->fifo_list[data_dir])) 315 return NULL; 316 317 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); 318 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) 319 return rq; 320 321 /* 322 * Look for a write request that can be dispatched, that is one with 323 * an unlocked target zone. 324 */ 325 spin_lock_irqsave(&dd->zone_lock, flags); 326 list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { 327 if (blk_req_can_dispatch_to_zone(rq)) 328 goto out; 329 } 330 rq = NULL; 331 out: 332 spin_unlock_irqrestore(&dd->zone_lock, flags); 333 334 return rq; 335 } 336 337 /* 338 * For the specified data direction, return the next request to 339 * dispatch using sector position sorted lists. 340 */ 341 static struct request * 342 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, 343 enum dd_data_dir data_dir) 344 { 345 struct request *rq; 346 unsigned long flags; 347 348 rq = per_prio->next_rq[data_dir]; 349 if (!rq) 350 return NULL; 351 352 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) 353 return rq; 354 355 /* 356 * Look for a write request that can be dispatched, that is one with 357 * an unlocked target zone. 358 */ 359 spin_lock_irqsave(&dd->zone_lock, flags); 360 while (rq) { 361 if (blk_req_can_dispatch_to_zone(rq)) 362 break; 363 rq = deadline_latter_request(rq); 364 } 365 spin_unlock_irqrestore(&dd->zone_lock, flags); 366 367 return rq; 368 } 369 370 /* 371 * deadline_dispatch_requests selects the best request according to 372 * read/write expire, fifo_batch, etc and with a start time <= @latest. 373 */ 374 static struct request *__dd_dispatch_request(struct deadline_data *dd, 375 struct dd_per_prio *per_prio, 376 u64 latest_start_ns) 377 { 378 struct request *rq, *next_rq; 379 enum dd_data_dir data_dir; 380 enum dd_prio prio; 381 u8 ioprio_class; 382 383 lockdep_assert_held(&dd->lock); 384 385 if (!list_empty(&per_prio->dispatch)) { 386 rq = list_first_entry(&per_prio->dispatch, struct request, 387 queuelist); 388 if (rq->start_time_ns > latest_start_ns) 389 return NULL; 390 list_del_init(&rq->queuelist); 391 goto done; 392 } 393 394 /* 395 * batches are currently reads XOR writes 396 */ 397 rq = deadline_next_request(dd, per_prio, dd->last_dir); 398 if (rq && dd->batching < dd->fifo_batch) 399 /* we have a next request are still entitled to batch */ 400 goto dispatch_request; 401 402 /* 403 * at this point we are not running a batch. select the appropriate 404 * data direction (read / write) 405 */ 406 407 if (!list_empty(&per_prio->fifo_list[DD_READ])) { 408 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ])); 409 410 if (deadline_fifo_request(dd, per_prio, DD_WRITE) && 411 (dd->starved++ >= dd->writes_starved)) 412 goto dispatch_writes; 413 414 data_dir = DD_READ; 415 416 goto dispatch_find_request; 417 } 418 419 /* 420 * there are either no reads or writes have been starved 421 */ 422 423 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) { 424 dispatch_writes: 425 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE])); 426 427 dd->starved = 0; 428 429 data_dir = DD_WRITE; 430 431 goto dispatch_find_request; 432 } 433 434 return NULL; 435 436 dispatch_find_request: 437 /* 438 * we are not running a batch, find best request for selected data_dir 439 */ 440 next_rq = deadline_next_request(dd, per_prio, data_dir); 441 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) { 442 /* 443 * A deadline has expired, the last request was in the other 444 * direction, or we have run out of higher-sectored requests. 445 * Start again from the request with the earliest expiry time. 446 */ 447 rq = deadline_fifo_request(dd, per_prio, data_dir); 448 } else { 449 /* 450 * The last req was the same dir and we have a next request in 451 * sort order. No expired requests so continue on from here. 452 */ 453 rq = next_rq; 454 } 455 456 /* 457 * For a zoned block device, if we only have writes queued and none of 458 * them can be dispatched, rq will be NULL. 459 */ 460 if (!rq) 461 return NULL; 462 463 dd->last_dir = data_dir; 464 dd->batching = 0; 465 466 dispatch_request: 467 if (rq->start_time_ns > latest_start_ns) 468 return NULL; 469 /* 470 * rq is the selected appropriate request. 471 */ 472 dd->batching++; 473 deadline_move_request(dd, per_prio, rq); 474 done: 475 ioprio_class = dd_rq_ioclass(rq); 476 prio = ioprio_class_to_prio[ioprio_class]; 477 dd_count(dd, dispatched, prio); 478 /* 479 * If the request needs its target zone locked, do it. 480 */ 481 blk_req_zone_write_lock(rq); 482 rq->rq_flags |= RQF_STARTED; 483 return rq; 484 } 485 486 /* 487 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). 488 * 489 * One confusing aspect here is that we get called for a specific 490 * hardware queue, but we may return a request that is for a 491 * different hardware queue. This is because mq-deadline has shared 492 * state for all hardware queues, in terms of sorting, FIFOs, etc. 493 */ 494 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) 495 { 496 struct deadline_data *dd = hctx->queue->elevator->elevator_data; 497 const u64 now_ns = ktime_get_ns(); 498 struct request *rq = NULL; 499 enum dd_prio prio; 500 501 spin_lock(&dd->lock); 502 /* 503 * Start with dispatching requests whose deadline expired more than 504 * aging_expire jiffies ago. 505 */ 506 for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) { 507 rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns - 508 jiffies_to_nsecs(dd->aging_expire)); 509 if (rq) 510 goto unlock; 511 } 512 /* 513 * Next, dispatch requests in priority order. Ignore lower priority 514 * requests if any higher priority requests are pending. 515 */ 516 for (prio = 0; prio <= DD_PRIO_MAX; prio++) { 517 rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns); 518 if (rq || dd_queued(dd, prio)) 519 break; 520 } 521 522 unlock: 523 spin_unlock(&dd->lock); 524 525 return rq; 526 } 527 528 /* 529 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this 530 * function is used by __blk_mq_get_tag(). 531 */ 532 static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) 533 { 534 struct deadline_data *dd = data->q->elevator->elevator_data; 535 536 /* Do not throttle synchronous reads. */ 537 if (op_is_sync(op) && !op_is_write(op)) 538 return; 539 540 /* 541 * Throttle asynchronous requests and writes such that these requests 542 * do not block the allocation of synchronous requests. 543 */ 544 data->shallow_depth = dd->async_depth; 545 } 546 547 /* Called by blk_mq_update_nr_requests(). */ 548 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) 549 { 550 struct request_queue *q = hctx->queue; 551 struct deadline_data *dd = q->elevator->elevator_data; 552 struct blk_mq_tags *tags = hctx->sched_tags; 553 554 dd->async_depth = max(1UL, 3 * q->nr_requests / 4); 555 556 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); 557 } 558 559 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ 560 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 561 { 562 dd_depth_updated(hctx); 563 return 0; 564 } 565 566 static void dd_exit_sched(struct elevator_queue *e) 567 { 568 struct deadline_data *dd = e->elevator_data; 569 enum dd_prio prio; 570 571 for (prio = 0; prio <= DD_PRIO_MAX; prio++) { 572 struct dd_per_prio *per_prio = &dd->per_prio[prio]; 573 574 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); 575 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); 576 } 577 578 free_percpu(dd->stats); 579 580 kfree(dd); 581 } 582 583 /* 584 * initialize elevator private data (deadline_data). 585 */ 586 static int dd_init_sched(struct request_queue *q, struct elevator_type *e) 587 { 588 struct deadline_data *dd; 589 struct elevator_queue *eq; 590 enum dd_prio prio; 591 int ret = -ENOMEM; 592 593 eq = elevator_alloc(q, e); 594 if (!eq) 595 return ret; 596 597 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 598 if (!dd) 599 goto put_eq; 600 601 eq->elevator_data = dd; 602 603 dd->stats = alloc_percpu_gfp(typeof(*dd->stats), 604 GFP_KERNEL | __GFP_ZERO); 605 if (!dd->stats) 606 goto free_dd; 607 608 for (prio = 0; prio <= DD_PRIO_MAX; prio++) { 609 struct dd_per_prio *per_prio = &dd->per_prio[prio]; 610 611 INIT_LIST_HEAD(&per_prio->dispatch); 612 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]); 613 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]); 614 per_prio->sort_list[DD_READ] = RB_ROOT; 615 per_prio->sort_list[DD_WRITE] = RB_ROOT; 616 } 617 dd->fifo_expire[DD_READ] = read_expire; 618 dd->fifo_expire[DD_WRITE] = write_expire; 619 dd->writes_starved = writes_starved; 620 dd->front_merges = 1; 621 dd->last_dir = DD_WRITE; 622 dd->fifo_batch = fifo_batch; 623 dd->aging_expire = aging_expire; 624 spin_lock_init(&dd->lock); 625 spin_lock_init(&dd->zone_lock); 626 627 q->elevator = eq; 628 return 0; 629 630 free_dd: 631 kfree(dd); 632 633 put_eq: 634 kobject_put(&eq->kobj); 635 return ret; 636 } 637 638 /* 639 * Try to merge @bio into an existing request. If @bio has been merged into 640 * an existing request, store the pointer to that request into *@rq. 641 */ 642 static int dd_request_merge(struct request_queue *q, struct request **rq, 643 struct bio *bio) 644 { 645 struct deadline_data *dd = q->elevator->elevator_data; 646 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio); 647 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; 648 struct dd_per_prio *per_prio = &dd->per_prio[prio]; 649 sector_t sector = bio_end_sector(bio); 650 struct request *__rq; 651 652 if (!dd->front_merges) 653 return ELEVATOR_NO_MERGE; 654 655 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector); 656 if (__rq) { 657 BUG_ON(sector != blk_rq_pos(__rq)); 658 659 if (elv_bio_merge_ok(__rq, bio)) { 660 *rq = __rq; 661 return ELEVATOR_FRONT_MERGE; 662 } 663 } 664 665 return ELEVATOR_NO_MERGE; 666 } 667 668 /* 669 * Attempt to merge a bio into an existing request. This function is called 670 * before @bio is associated with a request. 671 */ 672 static bool dd_bio_merge(struct request_queue *q, struct bio *bio, 673 unsigned int nr_segs) 674 { 675 struct deadline_data *dd = q->elevator->elevator_data; 676 struct request *free = NULL; 677 bool ret; 678 679 spin_lock(&dd->lock); 680 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); 681 spin_unlock(&dd->lock); 682 683 if (free) 684 blk_mq_free_request(free); 685 686 return ret; 687 } 688 689 /* 690 * add rq to rbtree and fifo 691 */ 692 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 693 bool at_head) 694 { 695 struct request_queue *q = hctx->queue; 696 struct deadline_data *dd = q->elevator->elevator_data; 697 const enum dd_data_dir data_dir = rq_data_dir(rq); 698 u16 ioprio = req_get_ioprio(rq); 699 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); 700 struct dd_per_prio *per_prio; 701 enum dd_prio prio; 702 LIST_HEAD(free); 703 704 lockdep_assert_held(&dd->lock); 705 706 /* 707 * This may be a requeue of a write request that has locked its 708 * target zone. If it is the case, this releases the zone lock. 709 */ 710 blk_req_zone_write_unlock(rq); 711 712 prio = ioprio_class_to_prio[ioprio_class]; 713 dd_count(dd, inserted, prio); 714 715 if (blk_mq_sched_try_insert_merge(q, rq, &free)) { 716 blk_mq_free_requests(&free); 717 return; 718 } 719 720 trace_block_rq_insert(rq); 721 722 per_prio = &dd->per_prio[prio]; 723 if (at_head) { 724 list_add(&rq->queuelist, &per_prio->dispatch); 725 } else { 726 deadline_add_rq_rb(per_prio, rq); 727 728 if (rq_mergeable(rq)) { 729 elv_rqhash_add(q, rq); 730 if (!q->last_merge) 731 q->last_merge = rq; 732 } 733 734 /* 735 * set expire time and add to fifo list 736 */ 737 rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; 738 list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]); 739 } 740 } 741 742 /* 743 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). 744 */ 745 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, 746 struct list_head *list, bool at_head) 747 { 748 struct request_queue *q = hctx->queue; 749 struct deadline_data *dd = q->elevator->elevator_data; 750 751 spin_lock(&dd->lock); 752 while (!list_empty(list)) { 753 struct request *rq; 754 755 rq = list_first_entry(list, struct request, queuelist); 756 list_del_init(&rq->queuelist); 757 dd_insert_request(hctx, rq, at_head); 758 } 759 spin_unlock(&dd->lock); 760 } 761 762 /* 763 * Nothing to do here. This is defined only to ensure that .finish_request 764 * method is called upon request completion. 765 */ 766 static void dd_prepare_request(struct request *rq) 767 { 768 } 769 770 /* 771 * Callback from inside blk_mq_free_request(). 772 * 773 * For zoned block devices, write unlock the target zone of 774 * completed write requests. Do this while holding the zone lock 775 * spinlock so that the zone is never unlocked while deadline_fifo_request() 776 * or deadline_next_request() are executing. This function is called for 777 * all requests, whether or not these requests complete successfully. 778 * 779 * For a zoned block device, __dd_dispatch_request() may have stopped 780 * dispatching requests if all the queued requests are write requests directed 781 * at zones that are already locked due to on-going write requests. To ensure 782 * write request dispatch progress in this case, mark the queue as needing a 783 * restart to ensure that the queue is run again after completion of the 784 * request and zones being unlocked. 785 */ 786 static void dd_finish_request(struct request *rq) 787 { 788 struct request_queue *q = rq->q; 789 struct deadline_data *dd = q->elevator->elevator_data; 790 const u8 ioprio_class = dd_rq_ioclass(rq); 791 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; 792 struct dd_per_prio *per_prio = &dd->per_prio[prio]; 793 794 dd_count(dd, completed, prio); 795 796 if (blk_queue_is_zoned(q)) { 797 unsigned long flags; 798 799 spin_lock_irqsave(&dd->zone_lock, flags); 800 blk_req_zone_write_unlock(rq); 801 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) 802 blk_mq_sched_mark_restart_hctx(rq->mq_hctx); 803 spin_unlock_irqrestore(&dd->zone_lock, flags); 804 } 805 } 806 807 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio) 808 { 809 return !list_empty_careful(&per_prio->dispatch) || 810 !list_empty_careful(&per_prio->fifo_list[DD_READ]) || 811 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]); 812 } 813 814 static bool dd_has_work(struct blk_mq_hw_ctx *hctx) 815 { 816 struct deadline_data *dd = hctx->queue->elevator->elevator_data; 817 enum dd_prio prio; 818 819 for (prio = 0; prio <= DD_PRIO_MAX; prio++) 820 if (dd_has_work_for_prio(&dd->per_prio[prio])) 821 return true; 822 823 return false; 824 } 825 826 /* 827 * sysfs parts below 828 */ 829 #define SHOW_INT(__FUNC, __VAR) \ 830 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 831 { \ 832 struct deadline_data *dd = e->elevator_data; \ 833 \ 834 return sysfs_emit(page, "%d\n", __VAR); \ 835 } 836 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) 837 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); 838 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); 839 SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire); 840 SHOW_INT(deadline_writes_starved_show, dd->writes_starved); 841 SHOW_INT(deadline_front_merges_show, dd->front_merges); 842 SHOW_INT(deadline_async_depth_show, dd->front_merges); 843 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); 844 #undef SHOW_INT 845 #undef SHOW_JIFFIES 846 847 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 848 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 849 { \ 850 struct deadline_data *dd = e->elevator_data; \ 851 int __data, __ret; \ 852 \ 853 __ret = kstrtoint(page, 0, &__data); \ 854 if (__ret < 0) \ 855 return __ret; \ 856 if (__data < (MIN)) \ 857 __data = (MIN); \ 858 else if (__data > (MAX)) \ 859 __data = (MAX); \ 860 *(__PTR) = __CONV(__data); \ 861 return count; \ 862 } 863 #define STORE_INT(__FUNC, __PTR, MIN, MAX) \ 864 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, ) 865 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \ 866 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) 867 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); 868 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); 869 STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX); 870 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); 871 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); 872 STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); 873 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); 874 #undef STORE_FUNCTION 875 #undef STORE_INT 876 #undef STORE_JIFFIES 877 878 #define DD_ATTR(name) \ 879 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) 880 881 static struct elv_fs_entry deadline_attrs[] = { 882 DD_ATTR(read_expire), 883 DD_ATTR(write_expire), 884 DD_ATTR(writes_starved), 885 DD_ATTR(front_merges), 886 DD_ATTR(async_depth), 887 DD_ATTR(fifo_batch), 888 DD_ATTR(aging_expire), 889 __ATTR_NULL 890 }; 891 892 #ifdef CONFIG_BLK_DEBUG_FS 893 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \ 894 static void *deadline_##name##_fifo_start(struct seq_file *m, \ 895 loff_t *pos) \ 896 __acquires(&dd->lock) \ 897 { \ 898 struct request_queue *q = m->private; \ 899 struct deadline_data *dd = q->elevator->elevator_data; \ 900 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ 901 \ 902 spin_lock(&dd->lock); \ 903 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \ 904 } \ 905 \ 906 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ 907 loff_t *pos) \ 908 { \ 909 struct request_queue *q = m->private; \ 910 struct deadline_data *dd = q->elevator->elevator_data; \ 911 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ 912 \ 913 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \ 914 } \ 915 \ 916 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ 917 __releases(&dd->lock) \ 918 { \ 919 struct request_queue *q = m->private; \ 920 struct deadline_data *dd = q->elevator->elevator_data; \ 921 \ 922 spin_unlock(&dd->lock); \ 923 } \ 924 \ 925 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ 926 .start = deadline_##name##_fifo_start, \ 927 .next = deadline_##name##_fifo_next, \ 928 .stop = deadline_##name##_fifo_stop, \ 929 .show = blk_mq_debugfs_rq_show, \ 930 }; \ 931 \ 932 static int deadline_##name##_next_rq_show(void *data, \ 933 struct seq_file *m) \ 934 { \ 935 struct request_queue *q = data; \ 936 struct deadline_data *dd = q->elevator->elevator_data; \ 937 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ 938 struct request *rq = per_prio->next_rq[data_dir]; \ 939 \ 940 if (rq) \ 941 __blk_mq_debugfs_rq_show(m, rq); \ 942 return 0; \ 943 } 944 945 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0); 946 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0); 947 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1); 948 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1); 949 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2); 950 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2); 951 #undef DEADLINE_DEBUGFS_DDIR_ATTRS 952 953 static int deadline_batching_show(void *data, struct seq_file *m) 954 { 955 struct request_queue *q = data; 956 struct deadline_data *dd = q->elevator->elevator_data; 957 958 seq_printf(m, "%u\n", dd->batching); 959 return 0; 960 } 961 962 static int deadline_starved_show(void *data, struct seq_file *m) 963 { 964 struct request_queue *q = data; 965 struct deadline_data *dd = q->elevator->elevator_data; 966 967 seq_printf(m, "%u\n", dd->starved); 968 return 0; 969 } 970 971 static int dd_async_depth_show(void *data, struct seq_file *m) 972 { 973 struct request_queue *q = data; 974 struct deadline_data *dd = q->elevator->elevator_data; 975 976 seq_printf(m, "%u\n", dd->async_depth); 977 return 0; 978 } 979 980 static int dd_queued_show(void *data, struct seq_file *m) 981 { 982 struct request_queue *q = data; 983 struct deadline_data *dd = q->elevator->elevator_data; 984 985 seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO), 986 dd_queued(dd, DD_BE_PRIO), 987 dd_queued(dd, DD_IDLE_PRIO)); 988 return 0; 989 } 990 991 /* Number of requests owned by the block driver for a given priority. */ 992 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) 993 { 994 return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio) 995 - dd_sum(dd, completed, prio); 996 } 997 998 static int dd_owned_by_driver_show(void *data, struct seq_file *m) 999 { 1000 struct request_queue *q = data; 1001 struct deadline_data *dd = q->elevator->elevator_data; 1002 1003 seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO), 1004 dd_owned_by_driver(dd, DD_BE_PRIO), 1005 dd_owned_by_driver(dd, DD_IDLE_PRIO)); 1006 return 0; 1007 } 1008 1009 #define DEADLINE_DISPATCH_ATTR(prio) \ 1010 static void *deadline_dispatch##prio##_start(struct seq_file *m, \ 1011 loff_t *pos) \ 1012 __acquires(&dd->lock) \ 1013 { \ 1014 struct request_queue *q = m->private; \ 1015 struct deadline_data *dd = q->elevator->elevator_data; \ 1016 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ 1017 \ 1018 spin_lock(&dd->lock); \ 1019 return seq_list_start(&per_prio->dispatch, *pos); \ 1020 } \ 1021 \ 1022 static void *deadline_dispatch##prio##_next(struct seq_file *m, \ 1023 void *v, loff_t *pos) \ 1024 { \ 1025 struct request_queue *q = m->private; \ 1026 struct deadline_data *dd = q->elevator->elevator_data; \ 1027 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ 1028 \ 1029 return seq_list_next(v, &per_prio->dispatch, pos); \ 1030 } \ 1031 \ 1032 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \ 1033 __releases(&dd->lock) \ 1034 { \ 1035 struct request_queue *q = m->private; \ 1036 struct deadline_data *dd = q->elevator->elevator_data; \ 1037 \ 1038 spin_unlock(&dd->lock); \ 1039 } \ 1040 \ 1041 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \ 1042 .start = deadline_dispatch##prio##_start, \ 1043 .next = deadline_dispatch##prio##_next, \ 1044 .stop = deadline_dispatch##prio##_stop, \ 1045 .show = blk_mq_debugfs_rq_show, \ 1046 } 1047 1048 DEADLINE_DISPATCH_ATTR(0); 1049 DEADLINE_DISPATCH_ATTR(1); 1050 DEADLINE_DISPATCH_ATTR(2); 1051 #undef DEADLINE_DISPATCH_ATTR 1052 1053 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ 1054 {#name "_fifo_list", 0400, \ 1055 .seq_ops = &deadline_##name##_fifo_seq_ops} 1056 #define DEADLINE_NEXT_RQ_ATTR(name) \ 1057 {#name "_next_rq", 0400, deadline_##name##_next_rq_show} 1058 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { 1059 DEADLINE_QUEUE_DDIR_ATTRS(read0), 1060 DEADLINE_QUEUE_DDIR_ATTRS(write0), 1061 DEADLINE_QUEUE_DDIR_ATTRS(read1), 1062 DEADLINE_QUEUE_DDIR_ATTRS(write1), 1063 DEADLINE_QUEUE_DDIR_ATTRS(read2), 1064 DEADLINE_QUEUE_DDIR_ATTRS(write2), 1065 DEADLINE_NEXT_RQ_ATTR(read0), 1066 DEADLINE_NEXT_RQ_ATTR(write0), 1067 DEADLINE_NEXT_RQ_ATTR(read1), 1068 DEADLINE_NEXT_RQ_ATTR(write1), 1069 DEADLINE_NEXT_RQ_ATTR(read2), 1070 DEADLINE_NEXT_RQ_ATTR(write2), 1071 {"batching", 0400, deadline_batching_show}, 1072 {"starved", 0400, deadline_starved_show}, 1073 {"async_depth", 0400, dd_async_depth_show}, 1074 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, 1075 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, 1076 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, 1077 {"owned_by_driver", 0400, dd_owned_by_driver_show}, 1078 {"queued", 0400, dd_queued_show}, 1079 {}, 1080 }; 1081 #undef DEADLINE_QUEUE_DDIR_ATTRS 1082 #endif 1083 1084 static struct elevator_type mq_deadline = { 1085 .ops = { 1086 .depth_updated = dd_depth_updated, 1087 .limit_depth = dd_limit_depth, 1088 .insert_requests = dd_insert_requests, 1089 .dispatch_request = dd_dispatch_request, 1090 .prepare_request = dd_prepare_request, 1091 .finish_request = dd_finish_request, 1092 .next_request = elv_rb_latter_request, 1093 .former_request = elv_rb_former_request, 1094 .bio_merge = dd_bio_merge, 1095 .request_merge = dd_request_merge, 1096 .requests_merged = dd_merged_requests, 1097 .request_merged = dd_request_merged, 1098 .has_work = dd_has_work, 1099 .init_sched = dd_init_sched, 1100 .exit_sched = dd_exit_sched, 1101 .init_hctx = dd_init_hctx, 1102 }, 1103 1104 #ifdef CONFIG_BLK_DEBUG_FS 1105 .queue_debugfs_attrs = deadline_queue_debugfs_attrs, 1106 #endif 1107 .elevator_attrs = deadline_attrs, 1108 .elevator_name = "mq-deadline", 1109 .elevator_alias = "deadline", 1110 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE, 1111 .elevator_owner = THIS_MODULE, 1112 }; 1113 MODULE_ALIAS("mq-deadline-iosched"); 1114 1115 static int __init deadline_init(void) 1116 { 1117 return elv_register(&mq_deadline); 1118 } 1119 1120 static void __exit deadline_exit(void) 1121 { 1122 elv_unregister(&mq_deadline); 1123 } 1124 1125 module_init(deadline_init); 1126 module_exit(deadline_exit); 1127 1128 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche"); 1129 MODULE_LICENSE("GPL"); 1130 MODULE_DESCRIPTION("MQ deadline IO scheduler"); 1131