1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
5 *
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/rbtree.h>
17 #include <linux/sbitmap.h>
18
19 #include <trace/events/block.h>
20
21 #include "elevator.h"
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-sched.h"
26
27 /*
28 * See Documentation/block/deadline-iosched.rst
29 */
30 static const int read_expire = HZ / 2; /* max time before a read is submitted. */
31 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32 /*
33 * Time after which to dispatch lower priority requests even if higher
34 * priority requests are pending.
35 */
36 static const int prio_aging_expire = 10 * HZ;
37 static const int writes_starved = 2; /* max times reads can starve a write */
38 static const int fifo_batch = 16; /* # of sequential requests treated as one
39 by the above parameters. For throughput. */
40
41 enum dd_data_dir {
42 DD_READ = READ,
43 DD_WRITE = WRITE,
44 };
45
46 enum { DD_DIR_COUNT = 2 };
47
48 enum dd_prio {
49 DD_RT_PRIO = 0,
50 DD_BE_PRIO = 1,
51 DD_IDLE_PRIO = 2,
52 DD_PRIO_MAX = 2,
53 };
54
55 enum { DD_PRIO_COUNT = 3 };
56
57 /*
58 * I/O statistics per I/O priority. It is fine if these counters overflow.
59 * What matters is that these counters are at least as wide as
60 * log2(max_outstanding_requests).
61 */
62 struct io_stats_per_prio {
63 uint32_t inserted;
64 uint32_t merged;
65 uint32_t dispatched;
66 atomic_t completed;
67 };
68
69 /*
70 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
71 * present on both sort_list[] and fifo_list[].
72 */
73 struct dd_per_prio {
74 struct list_head dispatch;
75 struct rb_root sort_list[DD_DIR_COUNT];
76 struct list_head fifo_list[DD_DIR_COUNT];
77 /* Position of the most recently dispatched request. */
78 sector_t latest_pos[DD_DIR_COUNT];
79 struct io_stats_per_prio stats;
80 };
81
82 struct deadline_data {
83 /*
84 * run time data
85 */
86
87 struct dd_per_prio per_prio[DD_PRIO_COUNT];
88
89 /* Data direction of latest dispatched request. */
90 enum dd_data_dir last_dir;
91 unsigned int batching; /* number of sequential requests made */
92 unsigned int starved; /* times reads have starved writes */
93
94 /*
95 * settings that change how the i/o scheduler behaves
96 */
97 int fifo_expire[DD_DIR_COUNT];
98 int fifo_batch;
99 int writes_starved;
100 int front_merges;
101 u32 async_depth;
102 int prio_aging_expire;
103
104 spinlock_t lock;
105 spinlock_t zone_lock;
106 };
107
108 /* Maps an I/O priority class to a deadline scheduler priority. */
109 static const enum dd_prio ioprio_class_to_prio[] = {
110 [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
111 [IOPRIO_CLASS_RT] = DD_RT_PRIO,
112 [IOPRIO_CLASS_BE] = DD_BE_PRIO,
113 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
114 };
115
116 static inline struct rb_root *
deadline_rb_root(struct dd_per_prio * per_prio,struct request * rq)117 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
118 {
119 return &per_prio->sort_list[rq_data_dir(rq)];
120 }
121
122 /*
123 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
124 * request.
125 */
dd_rq_ioclass(struct request * rq)126 static u8 dd_rq_ioclass(struct request *rq)
127 {
128 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
129 }
130
131 /*
132 * get the request before `rq' in sector-sorted order
133 */
134 static inline struct request *
deadline_earlier_request(struct request * rq)135 deadline_earlier_request(struct request *rq)
136 {
137 struct rb_node *node = rb_prev(&rq->rb_node);
138
139 if (node)
140 return rb_entry_rq(node);
141
142 return NULL;
143 }
144
145 /*
146 * get the request after `rq' in sector-sorted order
147 */
148 static inline struct request *
deadline_latter_request(struct request * rq)149 deadline_latter_request(struct request *rq)
150 {
151 struct rb_node *node = rb_next(&rq->rb_node);
152
153 if (node)
154 return rb_entry_rq(node);
155
156 return NULL;
157 }
158
159 /*
160 * Return the first request for which blk_rq_pos() >= @pos. For zoned devices,
161 * return the first request after the start of the zone containing @pos.
162 */
deadline_from_pos(struct dd_per_prio * per_prio,enum dd_data_dir data_dir,sector_t pos)163 static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
164 enum dd_data_dir data_dir, sector_t pos)
165 {
166 struct rb_node *node = per_prio->sort_list[data_dir].rb_node;
167 struct request *rq, *res = NULL;
168
169 if (!node)
170 return NULL;
171
172 rq = rb_entry_rq(node);
173 /*
174 * A zoned write may have been requeued with a starting position that
175 * is below that of the most recently dispatched request. Hence, for
176 * zoned writes, start searching from the start of a zone.
177 */
178 if (blk_rq_is_seq_zoned_write(rq))
179 pos = round_down(pos, rq->q->limits.chunk_sectors);
180
181 while (node) {
182 rq = rb_entry_rq(node);
183 if (blk_rq_pos(rq) >= pos) {
184 res = rq;
185 node = node->rb_left;
186 } else {
187 node = node->rb_right;
188 }
189 }
190 return res;
191 }
192
193 static void
deadline_add_rq_rb(struct dd_per_prio * per_prio,struct request * rq)194 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
195 {
196 struct rb_root *root = deadline_rb_root(per_prio, rq);
197
198 elv_rb_add(root, rq);
199 }
200
201 static inline void
deadline_del_rq_rb(struct dd_per_prio * per_prio,struct request * rq)202 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
203 {
204 elv_rb_del(deadline_rb_root(per_prio, rq), rq);
205 }
206
207 /*
208 * remove rq from rbtree and fifo.
209 */
deadline_remove_request(struct request_queue * q,struct dd_per_prio * per_prio,struct request * rq)210 static void deadline_remove_request(struct request_queue *q,
211 struct dd_per_prio *per_prio,
212 struct request *rq)
213 {
214 list_del_init(&rq->queuelist);
215
216 /*
217 * We might not be on the rbtree, if we are doing an insert merge
218 */
219 if (!RB_EMPTY_NODE(&rq->rb_node))
220 deadline_del_rq_rb(per_prio, rq);
221
222 elv_rqhash_del(q, rq);
223 if (q->last_merge == rq)
224 q->last_merge = NULL;
225 }
226
dd_request_merged(struct request_queue * q,struct request * req,enum elv_merge type)227 static void dd_request_merged(struct request_queue *q, struct request *req,
228 enum elv_merge type)
229 {
230 struct deadline_data *dd = q->elevator->elevator_data;
231 const u8 ioprio_class = dd_rq_ioclass(req);
232 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
233 struct dd_per_prio *per_prio = &dd->per_prio[prio];
234
235 /*
236 * if the merge was a front merge, we need to reposition request
237 */
238 if (type == ELEVATOR_FRONT_MERGE) {
239 elv_rb_del(deadline_rb_root(per_prio, req), req);
240 deadline_add_rq_rb(per_prio, req);
241 }
242 }
243
244 /*
245 * Callback function that is invoked after @next has been merged into @req.
246 */
dd_merged_requests(struct request_queue * q,struct request * req,struct request * next)247 static void dd_merged_requests(struct request_queue *q, struct request *req,
248 struct request *next)
249 {
250 struct deadline_data *dd = q->elevator->elevator_data;
251 const u8 ioprio_class = dd_rq_ioclass(next);
252 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
253
254 lockdep_assert_held(&dd->lock);
255
256 dd->per_prio[prio].stats.merged++;
257
258 /*
259 * if next expires before rq, assign its expire time to rq
260 * and move into next position (next will be deleted) in fifo
261 */
262 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
263 if (time_before((unsigned long)next->fifo_time,
264 (unsigned long)req->fifo_time)) {
265 list_move(&req->queuelist, &next->queuelist);
266 req->fifo_time = next->fifo_time;
267 }
268 }
269
270 /*
271 * kill knowledge of next, this one is a goner
272 */
273 deadline_remove_request(q, &dd->per_prio[prio], next);
274 }
275
276 /*
277 * move an entry to dispatch queue
278 */
279 static void
deadline_move_request(struct deadline_data * dd,struct dd_per_prio * per_prio,struct request * rq)280 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
281 struct request *rq)
282 {
283 /*
284 * take it off the sort and fifo list
285 */
286 deadline_remove_request(rq->q, per_prio, rq);
287 }
288
289 /* Number of requests queued for a given priority level. */
dd_queued(struct deadline_data * dd,enum dd_prio prio)290 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
291 {
292 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
293
294 lockdep_assert_held(&dd->lock);
295
296 return stats->inserted - atomic_read(&stats->completed);
297 }
298
299 /*
300 * deadline_check_fifo returns true if and only if there are expired requests
301 * in the FIFO list. Requires !list_empty(&dd->fifo_list[data_dir]).
302 */
deadline_check_fifo(struct dd_per_prio * per_prio,enum dd_data_dir data_dir)303 static inline bool deadline_check_fifo(struct dd_per_prio *per_prio,
304 enum dd_data_dir data_dir)
305 {
306 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
307
308 return time_is_before_eq_jiffies((unsigned long)rq->fifo_time);
309 }
310
311 /*
312 * Check if rq has a sequential request preceding it.
313 */
deadline_is_seq_write(struct deadline_data * dd,struct request * rq)314 static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
315 {
316 struct request *prev = deadline_earlier_request(rq);
317
318 if (!prev)
319 return false;
320
321 return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
322 }
323
324 /*
325 * Skip all write requests that are sequential from @rq, even if we cross
326 * a zone boundary.
327 */
deadline_skip_seq_writes(struct deadline_data * dd,struct request * rq)328 static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
329 struct request *rq)
330 {
331 sector_t pos = blk_rq_pos(rq);
332
333 do {
334 pos += blk_rq_sectors(rq);
335 rq = deadline_latter_request(rq);
336 } while (rq && blk_rq_pos(rq) == pos);
337
338 return rq;
339 }
340
341 /*
342 * For the specified data direction, return the next request to
343 * dispatch using arrival ordered lists.
344 */
345 static struct request *
deadline_fifo_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)346 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
347 enum dd_data_dir data_dir)
348 {
349 struct request *rq, *rb_rq, *next;
350 unsigned long flags;
351
352 if (list_empty(&per_prio->fifo_list[data_dir]))
353 return NULL;
354
355 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
356 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
357 return rq;
358
359 /*
360 * Look for a write request that can be dispatched, that is one with
361 * an unlocked target zone. For some HDDs, breaking a sequential
362 * write stream can lead to lower throughput, so make sure to preserve
363 * sequential write streams, even if that stream crosses into the next
364 * zones and these zones are unlocked.
365 */
366 spin_lock_irqsave(&dd->zone_lock, flags);
367 list_for_each_entry_safe(rq, next, &per_prio->fifo_list[DD_WRITE],
368 queuelist) {
369 /* Check whether a prior request exists for the same zone. */
370 rb_rq = deadline_from_pos(per_prio, data_dir, blk_rq_pos(rq));
371 if (rb_rq && blk_rq_pos(rb_rq) < blk_rq_pos(rq))
372 rq = rb_rq;
373 if (blk_req_can_dispatch_to_zone(rq) &&
374 (blk_queue_nonrot(rq->q) ||
375 !deadline_is_seq_write(dd, rq)))
376 goto out;
377 }
378 rq = NULL;
379 out:
380 spin_unlock_irqrestore(&dd->zone_lock, flags);
381
382 return rq;
383 }
384
385 /*
386 * For the specified data direction, return the next request to
387 * dispatch using sector position sorted lists.
388 */
389 static struct request *
deadline_next_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)390 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
391 enum dd_data_dir data_dir)
392 {
393 struct request *rq;
394 unsigned long flags;
395
396 rq = deadline_from_pos(per_prio, data_dir,
397 per_prio->latest_pos[data_dir]);
398 if (!rq)
399 return NULL;
400
401 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
402 return rq;
403
404 /*
405 * Look for a write request that can be dispatched, that is one with
406 * an unlocked target zone. For some HDDs, breaking a sequential
407 * write stream can lead to lower throughput, so make sure to preserve
408 * sequential write streams, even if that stream crosses into the next
409 * zones and these zones are unlocked.
410 */
411 spin_lock_irqsave(&dd->zone_lock, flags);
412 while (rq) {
413 if (blk_req_can_dispatch_to_zone(rq))
414 break;
415 if (blk_queue_nonrot(rq->q))
416 rq = deadline_latter_request(rq);
417 else
418 rq = deadline_skip_seq_writes(dd, rq);
419 }
420 spin_unlock_irqrestore(&dd->zone_lock, flags);
421
422 return rq;
423 }
424
425 /*
426 * Returns true if and only if @rq started after @latest_start where
427 * @latest_start is in jiffies.
428 */
started_after(struct deadline_data * dd,struct request * rq,unsigned long latest_start)429 static bool started_after(struct deadline_data *dd, struct request *rq,
430 unsigned long latest_start)
431 {
432 unsigned long start_time = (unsigned long)rq->fifo_time;
433
434 start_time -= dd->fifo_expire[rq_data_dir(rq)];
435
436 return time_after(start_time, latest_start);
437 }
438
439 /*
440 * deadline_dispatch_requests selects the best request according to
441 * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
442 */
__dd_dispatch_request(struct deadline_data * dd,struct dd_per_prio * per_prio,unsigned long latest_start)443 static struct request *__dd_dispatch_request(struct deadline_data *dd,
444 struct dd_per_prio *per_prio,
445 unsigned long latest_start)
446 {
447 struct request *rq, *next_rq;
448 enum dd_data_dir data_dir;
449 enum dd_prio prio;
450 u8 ioprio_class;
451
452 lockdep_assert_held(&dd->lock);
453
454 if (!list_empty(&per_prio->dispatch)) {
455 rq = list_first_entry(&per_prio->dispatch, struct request,
456 queuelist);
457 if (started_after(dd, rq, latest_start))
458 return NULL;
459 list_del_init(&rq->queuelist);
460 data_dir = rq_data_dir(rq);
461 goto done;
462 }
463
464 /*
465 * batches are currently reads XOR writes
466 */
467 rq = deadline_next_request(dd, per_prio, dd->last_dir);
468 if (rq && dd->batching < dd->fifo_batch) {
469 /* we have a next request and are still entitled to batch */
470 data_dir = rq_data_dir(rq);
471 goto dispatch_request;
472 }
473
474 /*
475 * at this point we are not running a batch. select the appropriate
476 * data direction (read / write)
477 */
478
479 if (!list_empty(&per_prio->fifo_list[DD_READ])) {
480 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
481
482 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
483 (dd->starved++ >= dd->writes_starved))
484 goto dispatch_writes;
485
486 data_dir = DD_READ;
487
488 goto dispatch_find_request;
489 }
490
491 /*
492 * there are either no reads or writes have been starved
493 */
494
495 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
496 dispatch_writes:
497 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
498
499 dd->starved = 0;
500
501 data_dir = DD_WRITE;
502
503 goto dispatch_find_request;
504 }
505
506 return NULL;
507
508 dispatch_find_request:
509 /*
510 * we are not running a batch, find best request for selected data_dir
511 */
512 next_rq = deadline_next_request(dd, per_prio, data_dir);
513 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
514 /*
515 * A deadline has expired, the last request was in the other
516 * direction, or we have run out of higher-sectored requests.
517 * Start again from the request with the earliest expiry time.
518 */
519 rq = deadline_fifo_request(dd, per_prio, data_dir);
520 } else {
521 /*
522 * The last req was the same dir and we have a next request in
523 * sort order. No expired requests so continue on from here.
524 */
525 rq = next_rq;
526 }
527
528 /*
529 * For a zoned block device, if we only have writes queued and none of
530 * them can be dispatched, rq will be NULL.
531 */
532 if (!rq)
533 return NULL;
534
535 dd->last_dir = data_dir;
536 dd->batching = 0;
537
538 dispatch_request:
539 if (started_after(dd, rq, latest_start))
540 return NULL;
541
542 /*
543 * rq is the selected appropriate request.
544 */
545 dd->batching++;
546 deadline_move_request(dd, per_prio, rq);
547 done:
548 ioprio_class = dd_rq_ioclass(rq);
549 prio = ioprio_class_to_prio[ioprio_class];
550 dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
551 dd->per_prio[prio].stats.dispatched++;
552 /*
553 * If the request needs its target zone locked, do it.
554 */
555 blk_req_zone_write_lock(rq);
556 rq->rq_flags |= RQF_STARTED;
557 return rq;
558 }
559
560 /*
561 * Check whether there are any requests with priority other than DD_RT_PRIO
562 * that were inserted more than prio_aging_expire jiffies ago.
563 */
dd_dispatch_prio_aged_requests(struct deadline_data * dd,unsigned long now)564 static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
565 unsigned long now)
566 {
567 struct request *rq;
568 enum dd_prio prio;
569 int prio_cnt;
570
571 lockdep_assert_held(&dd->lock);
572
573 prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
574 !!dd_queued(dd, DD_IDLE_PRIO);
575 if (prio_cnt < 2)
576 return NULL;
577
578 for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
579 rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
580 now - dd->prio_aging_expire);
581 if (rq)
582 return rq;
583 }
584
585 return NULL;
586 }
587
588 /*
589 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
590 *
591 * One confusing aspect here is that we get called for a specific
592 * hardware queue, but we may return a request that is for a
593 * different hardware queue. This is because mq-deadline has shared
594 * state for all hardware queues, in terms of sorting, FIFOs, etc.
595 */
dd_dispatch_request(struct blk_mq_hw_ctx * hctx)596 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
597 {
598 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
599 const unsigned long now = jiffies;
600 struct request *rq;
601 enum dd_prio prio;
602
603 spin_lock(&dd->lock);
604 rq = dd_dispatch_prio_aged_requests(dd, now);
605 if (rq)
606 goto unlock;
607
608 /*
609 * Next, dispatch requests in priority order. Ignore lower priority
610 * requests if any higher priority requests are pending.
611 */
612 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
613 rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
614 if (rq || dd_queued(dd, prio))
615 break;
616 }
617
618 unlock:
619 spin_unlock(&dd->lock);
620
621 return rq;
622 }
623
624 /*
625 * 'depth' is a number in the range 1..INT_MAX representing a number of
626 * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
627 * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
628 * Values larger than q->nr_requests have the same effect as q->nr_requests.
629 */
dd_to_word_depth(struct blk_mq_hw_ctx * hctx,unsigned int qdepth)630 static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
631 {
632 struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
633 const unsigned int nrr = hctx->queue->nr_requests;
634
635 return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
636 }
637
638 /*
639 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
640 * function is used by __blk_mq_get_tag().
641 */
dd_limit_depth(blk_opf_t opf,struct blk_mq_alloc_data * data)642 static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
643 {
644 struct deadline_data *dd = data->q->elevator->elevator_data;
645
646 /* Do not throttle synchronous reads. */
647 if (op_is_sync(opf) && !op_is_write(opf))
648 return;
649
650 /*
651 * Throttle asynchronous requests and writes such that these requests
652 * do not block the allocation of synchronous requests.
653 */
654 data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
655 }
656
657 /* Called by blk_mq_update_nr_requests(). */
dd_depth_updated(struct blk_mq_hw_ctx * hctx)658 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
659 {
660 struct request_queue *q = hctx->queue;
661 struct deadline_data *dd = q->elevator->elevator_data;
662 struct blk_mq_tags *tags = hctx->sched_tags;
663
664 dd->async_depth = q->nr_requests;
665
666 sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
667 }
668
669 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
dd_init_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)670 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
671 {
672 dd_depth_updated(hctx);
673 return 0;
674 }
675
dd_exit_sched(struct elevator_queue * e)676 static void dd_exit_sched(struct elevator_queue *e)
677 {
678 struct deadline_data *dd = e->elevator_data;
679 enum dd_prio prio;
680
681 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
682 struct dd_per_prio *per_prio = &dd->per_prio[prio];
683 const struct io_stats_per_prio *stats = &per_prio->stats;
684 uint32_t queued;
685
686 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
687 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
688
689 spin_lock(&dd->lock);
690 queued = dd_queued(dd, prio);
691 spin_unlock(&dd->lock);
692
693 WARN_ONCE(queued != 0,
694 "statistics for priority %d: i %u m %u d %u c %u\n",
695 prio, stats->inserted, stats->merged,
696 stats->dispatched, atomic_read(&stats->completed));
697 }
698
699 kfree(dd);
700 }
701
702 /*
703 * initialize elevator private data (deadline_data).
704 */
dd_init_sched(struct request_queue * q,struct elevator_type * e)705 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
706 {
707 struct deadline_data *dd;
708 struct elevator_queue *eq;
709 enum dd_prio prio;
710 int ret = -ENOMEM;
711
712 eq = elevator_alloc(q, e);
713 if (!eq)
714 return ret;
715
716 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
717 if (!dd)
718 goto put_eq;
719
720 eq->elevator_data = dd;
721
722 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
723 struct dd_per_prio *per_prio = &dd->per_prio[prio];
724
725 INIT_LIST_HEAD(&per_prio->dispatch);
726 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
727 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
728 per_prio->sort_list[DD_READ] = RB_ROOT;
729 per_prio->sort_list[DD_WRITE] = RB_ROOT;
730 }
731 dd->fifo_expire[DD_READ] = read_expire;
732 dd->fifo_expire[DD_WRITE] = write_expire;
733 dd->writes_starved = writes_starved;
734 dd->front_merges = 1;
735 dd->last_dir = DD_WRITE;
736 dd->fifo_batch = fifo_batch;
737 dd->prio_aging_expire = prio_aging_expire;
738 spin_lock_init(&dd->lock);
739 spin_lock_init(&dd->zone_lock);
740
741 /* We dispatch from request queue wide instead of hw queue */
742 blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
743
744 q->elevator = eq;
745 return 0;
746
747 put_eq:
748 kobject_put(&eq->kobj);
749 return ret;
750 }
751
752 /*
753 * Try to merge @bio into an existing request. If @bio has been merged into
754 * an existing request, store the pointer to that request into *@rq.
755 */
dd_request_merge(struct request_queue * q,struct request ** rq,struct bio * bio)756 static int dd_request_merge(struct request_queue *q, struct request **rq,
757 struct bio *bio)
758 {
759 struct deadline_data *dd = q->elevator->elevator_data;
760 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
761 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
762 struct dd_per_prio *per_prio = &dd->per_prio[prio];
763 sector_t sector = bio_end_sector(bio);
764 struct request *__rq;
765
766 if (!dd->front_merges)
767 return ELEVATOR_NO_MERGE;
768
769 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
770 if (__rq) {
771 BUG_ON(sector != blk_rq_pos(__rq));
772
773 if (elv_bio_merge_ok(__rq, bio)) {
774 *rq = __rq;
775 if (blk_discard_mergable(__rq))
776 return ELEVATOR_DISCARD_MERGE;
777 return ELEVATOR_FRONT_MERGE;
778 }
779 }
780
781 return ELEVATOR_NO_MERGE;
782 }
783
784 /*
785 * Attempt to merge a bio into an existing request. This function is called
786 * before @bio is associated with a request.
787 */
dd_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)788 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
789 unsigned int nr_segs)
790 {
791 struct deadline_data *dd = q->elevator->elevator_data;
792 struct request *free = NULL;
793 bool ret;
794
795 spin_lock(&dd->lock);
796 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
797 spin_unlock(&dd->lock);
798
799 if (free)
800 blk_mq_free_request(free);
801
802 return ret;
803 }
804
805 /*
806 * add rq to rbtree and fifo
807 */
dd_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,blk_insert_t flags,struct list_head * free)808 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
809 blk_insert_t flags, struct list_head *free)
810 {
811 struct request_queue *q = hctx->queue;
812 struct deadline_data *dd = q->elevator->elevator_data;
813 const enum dd_data_dir data_dir = rq_data_dir(rq);
814 u16 ioprio = req_get_ioprio(rq);
815 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
816 struct dd_per_prio *per_prio;
817 enum dd_prio prio;
818
819 lockdep_assert_held(&dd->lock);
820
821 /*
822 * This may be a requeue of a write request that has locked its
823 * target zone. If it is the case, this releases the zone lock.
824 */
825 blk_req_zone_write_unlock(rq);
826
827 prio = ioprio_class_to_prio[ioprio_class];
828 per_prio = &dd->per_prio[prio];
829 if (!rq->elv.priv[0]) {
830 per_prio->stats.inserted++;
831 rq->elv.priv[0] = (void *)(uintptr_t)1;
832 }
833
834 if (blk_mq_sched_try_insert_merge(q, rq, free))
835 return;
836
837 trace_block_rq_insert(rq);
838
839 if (flags & BLK_MQ_INSERT_AT_HEAD) {
840 list_add(&rq->queuelist, &per_prio->dispatch);
841 rq->fifo_time = jiffies;
842 } else {
843 struct list_head *insert_before;
844
845 deadline_add_rq_rb(per_prio, rq);
846
847 if (rq_mergeable(rq)) {
848 elv_rqhash_add(q, rq);
849 if (!q->last_merge)
850 q->last_merge = rq;
851 }
852
853 /*
854 * set expire time and add to fifo list
855 */
856 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
857 insert_before = &per_prio->fifo_list[data_dir];
858 #ifdef CONFIG_BLK_DEV_ZONED
859 /*
860 * Insert zoned writes such that requests are sorted by
861 * position per zone.
862 */
863 if (blk_rq_is_seq_zoned_write(rq)) {
864 struct request *rq2 = deadline_latter_request(rq);
865
866 if (rq2 && blk_rq_zone_no(rq2) == blk_rq_zone_no(rq))
867 insert_before = &rq2->queuelist;
868 }
869 #endif
870 list_add_tail(&rq->queuelist, insert_before);
871 }
872 }
873
874 /*
875 * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list().
876 */
dd_insert_requests(struct blk_mq_hw_ctx * hctx,struct list_head * list,blk_insert_t flags)877 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
878 struct list_head *list,
879 blk_insert_t flags)
880 {
881 struct request_queue *q = hctx->queue;
882 struct deadline_data *dd = q->elevator->elevator_data;
883 LIST_HEAD(free);
884
885 spin_lock(&dd->lock);
886 while (!list_empty(list)) {
887 struct request *rq;
888
889 rq = list_first_entry(list, struct request, queuelist);
890 list_del_init(&rq->queuelist);
891 dd_insert_request(hctx, rq, flags, &free);
892 }
893 spin_unlock(&dd->lock);
894
895 blk_mq_free_requests(&free);
896 }
897
898 /* Callback from inside blk_mq_rq_ctx_init(). */
dd_prepare_request(struct request * rq)899 static void dd_prepare_request(struct request *rq)
900 {
901 rq->elv.priv[0] = NULL;
902 }
903
dd_has_write_work(struct blk_mq_hw_ctx * hctx)904 static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
905 {
906 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
907 enum dd_prio p;
908
909 for (p = 0; p <= DD_PRIO_MAX; p++)
910 if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
911 return true;
912
913 return false;
914 }
915
916 /*
917 * Callback from inside blk_mq_free_request().
918 *
919 * For zoned block devices, write unlock the target zone of
920 * completed write requests. Do this while holding the zone lock
921 * spinlock so that the zone is never unlocked while deadline_fifo_request()
922 * or deadline_next_request() are executing. This function is called for
923 * all requests, whether or not these requests complete successfully.
924 *
925 * For a zoned block device, __dd_dispatch_request() may have stopped
926 * dispatching requests if all the queued requests are write requests directed
927 * at zones that are already locked due to on-going write requests. To ensure
928 * write request dispatch progress in this case, mark the queue as needing a
929 * restart to ensure that the queue is run again after completion of the
930 * request and zones being unlocked.
931 */
dd_finish_request(struct request * rq)932 static void dd_finish_request(struct request *rq)
933 {
934 struct request_queue *q = rq->q;
935 struct deadline_data *dd = q->elevator->elevator_data;
936 const u8 ioprio_class = dd_rq_ioclass(rq);
937 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
938 struct dd_per_prio *per_prio = &dd->per_prio[prio];
939
940 /*
941 * The block layer core may call dd_finish_request() without having
942 * called dd_insert_requests(). Skip requests that bypassed I/O
943 * scheduling. See also blk_mq_request_bypass_insert().
944 */
945 if (!rq->elv.priv[0])
946 return;
947
948 atomic_inc(&per_prio->stats.completed);
949
950 if (blk_queue_is_zoned(q)) {
951 unsigned long flags;
952
953 spin_lock_irqsave(&dd->zone_lock, flags);
954 blk_req_zone_write_unlock(rq);
955 spin_unlock_irqrestore(&dd->zone_lock, flags);
956
957 if (dd_has_write_work(rq->mq_hctx))
958 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
959 }
960 }
961
dd_has_work_for_prio(struct dd_per_prio * per_prio)962 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
963 {
964 return !list_empty_careful(&per_prio->dispatch) ||
965 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
966 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
967 }
968
dd_has_work(struct blk_mq_hw_ctx * hctx)969 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
970 {
971 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
972 enum dd_prio prio;
973
974 for (prio = 0; prio <= DD_PRIO_MAX; prio++)
975 if (dd_has_work_for_prio(&dd->per_prio[prio]))
976 return true;
977
978 return false;
979 }
980
981 /*
982 * sysfs parts below
983 */
984 #define SHOW_INT(__FUNC, __VAR) \
985 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
986 { \
987 struct deadline_data *dd = e->elevator_data; \
988 \
989 return sysfs_emit(page, "%d\n", __VAR); \
990 }
991 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
992 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
993 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
994 SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
995 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
996 SHOW_INT(deadline_front_merges_show, dd->front_merges);
997 SHOW_INT(deadline_async_depth_show, dd->async_depth);
998 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
999 #undef SHOW_INT
1000 #undef SHOW_JIFFIES
1001
1002 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
1003 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1004 { \
1005 struct deadline_data *dd = e->elevator_data; \
1006 int __data, __ret; \
1007 \
1008 __ret = kstrtoint(page, 0, &__data); \
1009 if (__ret < 0) \
1010 return __ret; \
1011 if (__data < (MIN)) \
1012 __data = (MIN); \
1013 else if (__data > (MAX)) \
1014 __data = (MAX); \
1015 *(__PTR) = __CONV(__data); \
1016 return count; \
1017 }
1018 #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
1019 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
1020 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
1021 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
1022 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
1023 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
1024 STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
1025 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
1026 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
1027 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
1028 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
1029 #undef STORE_FUNCTION
1030 #undef STORE_INT
1031 #undef STORE_JIFFIES
1032
1033 #define DD_ATTR(name) \
1034 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
1035
1036 static struct elv_fs_entry deadline_attrs[] = {
1037 DD_ATTR(read_expire),
1038 DD_ATTR(write_expire),
1039 DD_ATTR(writes_starved),
1040 DD_ATTR(front_merges),
1041 DD_ATTR(async_depth),
1042 DD_ATTR(fifo_batch),
1043 DD_ATTR(prio_aging_expire),
1044 __ATTR_NULL
1045 };
1046
1047 #ifdef CONFIG_BLK_DEBUG_FS
1048 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
1049 static void *deadline_##name##_fifo_start(struct seq_file *m, \
1050 loff_t *pos) \
1051 __acquires(&dd->lock) \
1052 { \
1053 struct request_queue *q = m->private; \
1054 struct deadline_data *dd = q->elevator->elevator_data; \
1055 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1056 \
1057 spin_lock(&dd->lock); \
1058 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
1059 } \
1060 \
1061 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
1062 loff_t *pos) \
1063 { \
1064 struct request_queue *q = m->private; \
1065 struct deadline_data *dd = q->elevator->elevator_data; \
1066 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1067 \
1068 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
1069 } \
1070 \
1071 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
1072 __releases(&dd->lock) \
1073 { \
1074 struct request_queue *q = m->private; \
1075 struct deadline_data *dd = q->elevator->elevator_data; \
1076 \
1077 spin_unlock(&dd->lock); \
1078 } \
1079 \
1080 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
1081 .start = deadline_##name##_fifo_start, \
1082 .next = deadline_##name##_fifo_next, \
1083 .stop = deadline_##name##_fifo_stop, \
1084 .show = blk_mq_debugfs_rq_show, \
1085 }; \
1086 \
1087 static int deadline_##name##_next_rq_show(void *data, \
1088 struct seq_file *m) \
1089 { \
1090 struct request_queue *q = data; \
1091 struct deadline_data *dd = q->elevator->elevator_data; \
1092 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1093 struct request *rq; \
1094 \
1095 rq = deadline_from_pos(per_prio, data_dir, \
1096 per_prio->latest_pos[data_dir]); \
1097 if (rq) \
1098 __blk_mq_debugfs_rq_show(m, rq); \
1099 return 0; \
1100 }
1101
1102 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
1103 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
1104 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
1105 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
1106 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
1107 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
1108 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
1109
deadline_batching_show(void * data,struct seq_file * m)1110 static int deadline_batching_show(void *data, struct seq_file *m)
1111 {
1112 struct request_queue *q = data;
1113 struct deadline_data *dd = q->elevator->elevator_data;
1114
1115 seq_printf(m, "%u\n", dd->batching);
1116 return 0;
1117 }
1118
deadline_starved_show(void * data,struct seq_file * m)1119 static int deadline_starved_show(void *data, struct seq_file *m)
1120 {
1121 struct request_queue *q = data;
1122 struct deadline_data *dd = q->elevator->elevator_data;
1123
1124 seq_printf(m, "%u\n", dd->starved);
1125 return 0;
1126 }
1127
dd_async_depth_show(void * data,struct seq_file * m)1128 static int dd_async_depth_show(void *data, struct seq_file *m)
1129 {
1130 struct request_queue *q = data;
1131 struct deadline_data *dd = q->elevator->elevator_data;
1132
1133 seq_printf(m, "%u\n", dd->async_depth);
1134 return 0;
1135 }
1136
dd_queued_show(void * data,struct seq_file * m)1137 static int dd_queued_show(void *data, struct seq_file *m)
1138 {
1139 struct request_queue *q = data;
1140 struct deadline_data *dd = q->elevator->elevator_data;
1141 u32 rt, be, idle;
1142
1143 spin_lock(&dd->lock);
1144 rt = dd_queued(dd, DD_RT_PRIO);
1145 be = dd_queued(dd, DD_BE_PRIO);
1146 idle = dd_queued(dd, DD_IDLE_PRIO);
1147 spin_unlock(&dd->lock);
1148
1149 seq_printf(m, "%u %u %u\n", rt, be, idle);
1150
1151 return 0;
1152 }
1153
1154 /* Number of requests owned by the block driver for a given priority. */
dd_owned_by_driver(struct deadline_data * dd,enum dd_prio prio)1155 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1156 {
1157 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1158
1159 lockdep_assert_held(&dd->lock);
1160
1161 return stats->dispatched + stats->merged -
1162 atomic_read(&stats->completed);
1163 }
1164
dd_owned_by_driver_show(void * data,struct seq_file * m)1165 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1166 {
1167 struct request_queue *q = data;
1168 struct deadline_data *dd = q->elevator->elevator_data;
1169 u32 rt, be, idle;
1170
1171 spin_lock(&dd->lock);
1172 rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1173 be = dd_owned_by_driver(dd, DD_BE_PRIO);
1174 idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1175 spin_unlock(&dd->lock);
1176
1177 seq_printf(m, "%u %u %u\n", rt, be, idle);
1178
1179 return 0;
1180 }
1181
1182 #define DEADLINE_DISPATCH_ATTR(prio) \
1183 static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1184 loff_t *pos) \
1185 __acquires(&dd->lock) \
1186 { \
1187 struct request_queue *q = m->private; \
1188 struct deadline_data *dd = q->elevator->elevator_data; \
1189 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1190 \
1191 spin_lock(&dd->lock); \
1192 return seq_list_start(&per_prio->dispatch, *pos); \
1193 } \
1194 \
1195 static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1196 void *v, loff_t *pos) \
1197 { \
1198 struct request_queue *q = m->private; \
1199 struct deadline_data *dd = q->elevator->elevator_data; \
1200 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1201 \
1202 return seq_list_next(v, &per_prio->dispatch, pos); \
1203 } \
1204 \
1205 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1206 __releases(&dd->lock) \
1207 { \
1208 struct request_queue *q = m->private; \
1209 struct deadline_data *dd = q->elevator->elevator_data; \
1210 \
1211 spin_unlock(&dd->lock); \
1212 } \
1213 \
1214 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1215 .start = deadline_dispatch##prio##_start, \
1216 .next = deadline_dispatch##prio##_next, \
1217 .stop = deadline_dispatch##prio##_stop, \
1218 .show = blk_mq_debugfs_rq_show, \
1219 }
1220
1221 DEADLINE_DISPATCH_ATTR(0);
1222 DEADLINE_DISPATCH_ATTR(1);
1223 DEADLINE_DISPATCH_ATTR(2);
1224 #undef DEADLINE_DISPATCH_ATTR
1225
1226 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1227 {#name "_fifo_list", 0400, \
1228 .seq_ops = &deadline_##name##_fifo_seq_ops}
1229 #define DEADLINE_NEXT_RQ_ATTR(name) \
1230 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1231 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1232 DEADLINE_QUEUE_DDIR_ATTRS(read0),
1233 DEADLINE_QUEUE_DDIR_ATTRS(write0),
1234 DEADLINE_QUEUE_DDIR_ATTRS(read1),
1235 DEADLINE_QUEUE_DDIR_ATTRS(write1),
1236 DEADLINE_QUEUE_DDIR_ATTRS(read2),
1237 DEADLINE_QUEUE_DDIR_ATTRS(write2),
1238 DEADLINE_NEXT_RQ_ATTR(read0),
1239 DEADLINE_NEXT_RQ_ATTR(write0),
1240 DEADLINE_NEXT_RQ_ATTR(read1),
1241 DEADLINE_NEXT_RQ_ATTR(write1),
1242 DEADLINE_NEXT_RQ_ATTR(read2),
1243 DEADLINE_NEXT_RQ_ATTR(write2),
1244 {"batching", 0400, deadline_batching_show},
1245 {"starved", 0400, deadline_starved_show},
1246 {"async_depth", 0400, dd_async_depth_show},
1247 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1248 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1249 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1250 {"owned_by_driver", 0400, dd_owned_by_driver_show},
1251 {"queued", 0400, dd_queued_show},
1252 {},
1253 };
1254 #undef DEADLINE_QUEUE_DDIR_ATTRS
1255 #endif
1256
1257 static struct elevator_type mq_deadline = {
1258 .ops = {
1259 .depth_updated = dd_depth_updated,
1260 .limit_depth = dd_limit_depth,
1261 .insert_requests = dd_insert_requests,
1262 .dispatch_request = dd_dispatch_request,
1263 .prepare_request = dd_prepare_request,
1264 .finish_request = dd_finish_request,
1265 .next_request = elv_rb_latter_request,
1266 .former_request = elv_rb_former_request,
1267 .bio_merge = dd_bio_merge,
1268 .request_merge = dd_request_merge,
1269 .requests_merged = dd_merged_requests,
1270 .request_merged = dd_request_merged,
1271 .has_work = dd_has_work,
1272 .init_sched = dd_init_sched,
1273 .exit_sched = dd_exit_sched,
1274 .init_hctx = dd_init_hctx,
1275 },
1276
1277 #ifdef CONFIG_BLK_DEBUG_FS
1278 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1279 #endif
1280 .elevator_attrs = deadline_attrs,
1281 .elevator_name = "mq-deadline",
1282 .elevator_alias = "deadline",
1283 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1284 .elevator_owner = THIS_MODULE,
1285 };
1286 MODULE_ALIAS("mq-deadline-iosched");
1287
deadline_init(void)1288 static int __init deadline_init(void)
1289 {
1290 return elv_register(&mq_deadline);
1291 }
1292
deadline_exit(void)1293 static void __exit deadline_exit(void)
1294 {
1295 elv_unregister(&mq_deadline);
1296 }
1297
1298 module_init(deadline_init);
1299 module_exit(deadline_exit);
1300
1301 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1302 MODULE_LICENSE("GPL");
1303 MODULE_DESCRIPTION("MQ deadline IO scheduler");
1304