xref: /openbmc/linux/block/mq-deadline.c (revision e368cd72)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4  *  for the blk-mq scheduling framework
5  *
6  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7  */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
20 
21 #include <trace/events/block.h>
22 
23 #include "blk.h"
24 #include "blk-mq.h"
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
28 
29 /*
30  * See Documentation/block/deadline-iosched.rst
31  */
32 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
33 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
34 static const int writes_starved = 2;    /* max times reads can starve a write */
35 static const int fifo_batch = 16;       /* # of sequential requests treated as one
36 				     by the above parameters. For throughput. */
37 
38 enum dd_data_dir {
39 	DD_READ		= READ,
40 	DD_WRITE	= WRITE,
41 };
42 
43 enum { DD_DIR_COUNT = 2 };
44 
45 enum dd_prio {
46 	DD_RT_PRIO	= 0,
47 	DD_BE_PRIO	= 1,
48 	DD_IDLE_PRIO	= 2,
49 	DD_PRIO_MAX	= 2,
50 };
51 
52 enum { DD_PRIO_COUNT = 3 };
53 
54 /* I/O statistics per I/O priority. */
55 struct io_stats_per_prio {
56 	local_t inserted;
57 	local_t merged;
58 	local_t dispatched;
59 	local_t completed;
60 };
61 
62 /* I/O statistics for all I/O priorities (enum dd_prio). */
63 struct io_stats {
64 	struct io_stats_per_prio stats[DD_PRIO_COUNT];
65 };
66 
67 /*
68  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
69  * present on both sort_list[] and fifo_list[].
70  */
71 struct dd_per_prio {
72 	struct list_head dispatch;
73 	struct rb_root sort_list[DD_DIR_COUNT];
74 	struct list_head fifo_list[DD_DIR_COUNT];
75 	/* Next request in FIFO order. Read, write or both are NULL. */
76 	struct request *next_rq[DD_DIR_COUNT];
77 };
78 
79 struct deadline_data {
80 	/*
81 	 * run time data
82 	 */
83 
84 	struct dd_per_prio per_prio[DD_PRIO_COUNT];
85 
86 	/* Data direction of latest dispatched request. */
87 	enum dd_data_dir last_dir;
88 	unsigned int batching;		/* number of sequential requests made */
89 	unsigned int starved;		/* times reads have starved writes */
90 
91 	struct io_stats __percpu *stats;
92 
93 	/*
94 	 * settings that change how the i/o scheduler behaves
95 	 */
96 	int fifo_expire[DD_DIR_COUNT];
97 	int fifo_batch;
98 	int writes_starved;
99 	int front_merges;
100 	u32 async_depth;
101 
102 	spinlock_t lock;
103 	spinlock_t zone_lock;
104 };
105 
106 /* Count one event of type 'event_type' and with I/O priority 'prio' */
107 #define dd_count(dd, event_type, prio) do {				\
108 	struct io_stats *io_stats = get_cpu_ptr((dd)->stats);		\
109 									\
110 	BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));	\
111 	BUILD_BUG_ON(!__same_type((prio), enum dd_prio));		\
112 	local_inc(&io_stats->stats[(prio)].event_type);			\
113 	put_cpu_ptr(io_stats);						\
114 } while (0)
115 
116 /*
117  * Returns the total number of dd_count(dd, event_type, prio) calls across all
118  * CPUs. No locking or barriers since it is fine if the returned sum is slightly
119  * outdated.
120  */
121 #define dd_sum(dd, event_type, prio) ({					\
122 	unsigned int cpu;						\
123 	u32 sum = 0;							\
124 									\
125 	BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));	\
126 	BUILD_BUG_ON(!__same_type((prio), enum dd_prio));		\
127 	for_each_present_cpu(cpu)					\
128 		sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->	\
129 				  stats[(prio)].event_type);		\
130 	sum;								\
131 })
132 
133 /* Maps an I/O priority class to a deadline scheduler priority. */
134 static const enum dd_prio ioprio_class_to_prio[] = {
135 	[IOPRIO_CLASS_NONE]	= DD_BE_PRIO,
136 	[IOPRIO_CLASS_RT]	= DD_RT_PRIO,
137 	[IOPRIO_CLASS_BE]	= DD_BE_PRIO,
138 	[IOPRIO_CLASS_IDLE]	= DD_IDLE_PRIO,
139 };
140 
141 static inline struct rb_root *
142 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
143 {
144 	return &per_prio->sort_list[rq_data_dir(rq)];
145 }
146 
147 /*
148  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
149  * request.
150  */
151 static u8 dd_rq_ioclass(struct request *rq)
152 {
153 	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
154 }
155 
156 /*
157  * get the request after `rq' in sector-sorted order
158  */
159 static inline struct request *
160 deadline_latter_request(struct request *rq)
161 {
162 	struct rb_node *node = rb_next(&rq->rb_node);
163 
164 	if (node)
165 		return rb_entry_rq(node);
166 
167 	return NULL;
168 }
169 
170 static void
171 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
172 {
173 	struct rb_root *root = deadline_rb_root(per_prio, rq);
174 
175 	elv_rb_add(root, rq);
176 }
177 
178 static inline void
179 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
180 {
181 	const enum dd_data_dir data_dir = rq_data_dir(rq);
182 
183 	if (per_prio->next_rq[data_dir] == rq)
184 		per_prio->next_rq[data_dir] = deadline_latter_request(rq);
185 
186 	elv_rb_del(deadline_rb_root(per_prio, rq), rq);
187 }
188 
189 /*
190  * remove rq from rbtree and fifo.
191  */
192 static void deadline_remove_request(struct request_queue *q,
193 				    struct dd_per_prio *per_prio,
194 				    struct request *rq)
195 {
196 	list_del_init(&rq->queuelist);
197 
198 	/*
199 	 * We might not be on the rbtree, if we are doing an insert merge
200 	 */
201 	if (!RB_EMPTY_NODE(&rq->rb_node))
202 		deadline_del_rq_rb(per_prio, rq);
203 
204 	elv_rqhash_del(q, rq);
205 	if (q->last_merge == rq)
206 		q->last_merge = NULL;
207 }
208 
209 static void dd_request_merged(struct request_queue *q, struct request *req,
210 			      enum elv_merge type)
211 {
212 	struct deadline_data *dd = q->elevator->elevator_data;
213 	const u8 ioprio_class = dd_rq_ioclass(req);
214 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
215 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
216 
217 	/*
218 	 * if the merge was a front merge, we need to reposition request
219 	 */
220 	if (type == ELEVATOR_FRONT_MERGE) {
221 		elv_rb_del(deadline_rb_root(per_prio, req), req);
222 		deadline_add_rq_rb(per_prio, req);
223 	}
224 }
225 
226 /*
227  * Callback function that is invoked after @next has been merged into @req.
228  */
229 static void dd_merged_requests(struct request_queue *q, struct request *req,
230 			       struct request *next)
231 {
232 	struct deadline_data *dd = q->elevator->elevator_data;
233 	const u8 ioprio_class = dd_rq_ioclass(next);
234 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
235 
236 	dd_count(dd, merged, prio);
237 
238 	/*
239 	 * if next expires before rq, assign its expire time to rq
240 	 * and move into next position (next will be deleted) in fifo
241 	 */
242 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
243 		if (time_before((unsigned long)next->fifo_time,
244 				(unsigned long)req->fifo_time)) {
245 			list_move(&req->queuelist, &next->queuelist);
246 			req->fifo_time = next->fifo_time;
247 		}
248 	}
249 
250 	/*
251 	 * kill knowledge of next, this one is a goner
252 	 */
253 	deadline_remove_request(q, &dd->per_prio[prio], next);
254 }
255 
256 /*
257  * move an entry to dispatch queue
258  */
259 static void
260 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
261 		      struct request *rq)
262 {
263 	const enum dd_data_dir data_dir = rq_data_dir(rq);
264 
265 	per_prio->next_rq[data_dir] = deadline_latter_request(rq);
266 
267 	/*
268 	 * take it off the sort and fifo list
269 	 */
270 	deadline_remove_request(rq->q, per_prio, rq);
271 }
272 
273 /* Number of requests queued for a given priority level. */
274 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
275 {
276 	return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
277 }
278 
279 /*
280  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
281  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
282  */
283 static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
284 				      enum dd_data_dir data_dir)
285 {
286 	struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
287 
288 	/*
289 	 * rq is expired!
290 	 */
291 	if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
292 		return 1;
293 
294 	return 0;
295 }
296 
297 /*
298  * For the specified data direction, return the next request to
299  * dispatch using arrival ordered lists.
300  */
301 static struct request *
302 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
303 		      enum dd_data_dir data_dir)
304 {
305 	struct request *rq;
306 	unsigned long flags;
307 
308 	if (list_empty(&per_prio->fifo_list[data_dir]))
309 		return NULL;
310 
311 	rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
312 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
313 		return rq;
314 
315 	/*
316 	 * Look for a write request that can be dispatched, that is one with
317 	 * an unlocked target zone.
318 	 */
319 	spin_lock_irqsave(&dd->zone_lock, flags);
320 	list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
321 		if (blk_req_can_dispatch_to_zone(rq))
322 			goto out;
323 	}
324 	rq = NULL;
325 out:
326 	spin_unlock_irqrestore(&dd->zone_lock, flags);
327 
328 	return rq;
329 }
330 
331 /*
332  * For the specified data direction, return the next request to
333  * dispatch using sector position sorted lists.
334  */
335 static struct request *
336 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
337 		      enum dd_data_dir data_dir)
338 {
339 	struct request *rq;
340 	unsigned long flags;
341 
342 	rq = per_prio->next_rq[data_dir];
343 	if (!rq)
344 		return NULL;
345 
346 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
347 		return rq;
348 
349 	/*
350 	 * Look for a write request that can be dispatched, that is one with
351 	 * an unlocked target zone.
352 	 */
353 	spin_lock_irqsave(&dd->zone_lock, flags);
354 	while (rq) {
355 		if (blk_req_can_dispatch_to_zone(rq))
356 			break;
357 		rq = deadline_latter_request(rq);
358 	}
359 	spin_unlock_irqrestore(&dd->zone_lock, flags);
360 
361 	return rq;
362 }
363 
364 /*
365  * deadline_dispatch_requests selects the best request according to
366  * read/write expire, fifo_batch, etc
367  */
368 static struct request *__dd_dispatch_request(struct deadline_data *dd,
369 					     struct dd_per_prio *per_prio)
370 {
371 	struct request *rq, *next_rq;
372 	enum dd_data_dir data_dir;
373 	enum dd_prio prio;
374 	u8 ioprio_class;
375 
376 	lockdep_assert_held(&dd->lock);
377 
378 	if (!list_empty(&per_prio->dispatch)) {
379 		rq = list_first_entry(&per_prio->dispatch, struct request,
380 				      queuelist);
381 		list_del_init(&rq->queuelist);
382 		goto done;
383 	}
384 
385 	/*
386 	 * batches are currently reads XOR writes
387 	 */
388 	rq = deadline_next_request(dd, per_prio, dd->last_dir);
389 	if (rq && dd->batching < dd->fifo_batch)
390 		/* we have a next request are still entitled to batch */
391 		goto dispatch_request;
392 
393 	/*
394 	 * at this point we are not running a batch. select the appropriate
395 	 * data direction (read / write)
396 	 */
397 
398 	if (!list_empty(&per_prio->fifo_list[DD_READ])) {
399 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
400 
401 		if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
402 		    (dd->starved++ >= dd->writes_starved))
403 			goto dispatch_writes;
404 
405 		data_dir = DD_READ;
406 
407 		goto dispatch_find_request;
408 	}
409 
410 	/*
411 	 * there are either no reads or writes have been starved
412 	 */
413 
414 	if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
415 dispatch_writes:
416 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
417 
418 		dd->starved = 0;
419 
420 		data_dir = DD_WRITE;
421 
422 		goto dispatch_find_request;
423 	}
424 
425 	return NULL;
426 
427 dispatch_find_request:
428 	/*
429 	 * we are not running a batch, find best request for selected data_dir
430 	 */
431 	next_rq = deadline_next_request(dd, per_prio, data_dir);
432 	if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
433 		/*
434 		 * A deadline has expired, the last request was in the other
435 		 * direction, or we have run out of higher-sectored requests.
436 		 * Start again from the request with the earliest expiry time.
437 		 */
438 		rq = deadline_fifo_request(dd, per_prio, data_dir);
439 	} else {
440 		/*
441 		 * The last req was the same dir and we have a next request in
442 		 * sort order. No expired requests so continue on from here.
443 		 */
444 		rq = next_rq;
445 	}
446 
447 	/*
448 	 * For a zoned block device, if we only have writes queued and none of
449 	 * them can be dispatched, rq will be NULL.
450 	 */
451 	if (!rq)
452 		return NULL;
453 
454 	dd->last_dir = data_dir;
455 	dd->batching = 0;
456 
457 dispatch_request:
458 	/*
459 	 * rq is the selected appropriate request.
460 	 */
461 	dd->batching++;
462 	deadline_move_request(dd, per_prio, rq);
463 done:
464 	ioprio_class = dd_rq_ioclass(rq);
465 	prio = ioprio_class_to_prio[ioprio_class];
466 	dd_count(dd, dispatched, prio);
467 	/*
468 	 * If the request needs its target zone locked, do it.
469 	 */
470 	blk_req_zone_write_lock(rq);
471 	rq->rq_flags |= RQF_STARTED;
472 	return rq;
473 }
474 
475 /*
476  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
477  *
478  * One confusing aspect here is that we get called for a specific
479  * hardware queue, but we may return a request that is for a
480  * different hardware queue. This is because mq-deadline has shared
481  * state for all hardware queues, in terms of sorting, FIFOs, etc.
482  */
483 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
484 {
485 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
486 	struct request *rq;
487 	enum dd_prio prio;
488 
489 	spin_lock(&dd->lock);
490 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
491 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
492 		if (rq)
493 			break;
494 	}
495 	spin_unlock(&dd->lock);
496 
497 	return rq;
498 }
499 
500 /*
501  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
502  * function is used by __blk_mq_get_tag().
503  */
504 static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
505 {
506 	struct deadline_data *dd = data->q->elevator->elevator_data;
507 
508 	/* Do not throttle synchronous reads. */
509 	if (op_is_sync(op) && !op_is_write(op))
510 		return;
511 
512 	/*
513 	 * Throttle asynchronous requests and writes such that these requests
514 	 * do not block the allocation of synchronous requests.
515 	 */
516 	data->shallow_depth = dd->async_depth;
517 }
518 
519 /* Called by blk_mq_update_nr_requests(). */
520 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
521 {
522 	struct request_queue *q = hctx->queue;
523 	struct deadline_data *dd = q->elevator->elevator_data;
524 	struct blk_mq_tags *tags = hctx->sched_tags;
525 
526 	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
527 
528 	sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
529 }
530 
531 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
532 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
533 {
534 	dd_depth_updated(hctx);
535 	return 0;
536 }
537 
538 static void dd_exit_sched(struct elevator_queue *e)
539 {
540 	struct deadline_data *dd = e->elevator_data;
541 	enum dd_prio prio;
542 
543 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
544 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
545 
546 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
547 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
548 	}
549 
550 	free_percpu(dd->stats);
551 
552 	kfree(dd);
553 }
554 
555 /*
556  * initialize elevator private data (deadline_data).
557  */
558 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
559 {
560 	struct deadline_data *dd;
561 	struct elevator_queue *eq;
562 	enum dd_prio prio;
563 	int ret = -ENOMEM;
564 
565 	eq = elevator_alloc(q, e);
566 	if (!eq)
567 		return ret;
568 
569 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
570 	if (!dd)
571 		goto put_eq;
572 
573 	eq->elevator_data = dd;
574 
575 	dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
576 				     GFP_KERNEL | __GFP_ZERO);
577 	if (!dd->stats)
578 		goto free_dd;
579 
580 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
581 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
582 
583 		INIT_LIST_HEAD(&per_prio->dispatch);
584 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
585 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
586 		per_prio->sort_list[DD_READ] = RB_ROOT;
587 		per_prio->sort_list[DD_WRITE] = RB_ROOT;
588 	}
589 	dd->fifo_expire[DD_READ] = read_expire;
590 	dd->fifo_expire[DD_WRITE] = write_expire;
591 	dd->writes_starved = writes_starved;
592 	dd->front_merges = 1;
593 	dd->last_dir = DD_WRITE;
594 	dd->fifo_batch = fifo_batch;
595 	spin_lock_init(&dd->lock);
596 	spin_lock_init(&dd->zone_lock);
597 
598 	q->elevator = eq;
599 	return 0;
600 
601 free_dd:
602 	kfree(dd);
603 
604 put_eq:
605 	kobject_put(&eq->kobj);
606 	return ret;
607 }
608 
609 /*
610  * Try to merge @bio into an existing request. If @bio has been merged into
611  * an existing request, store the pointer to that request into *@rq.
612  */
613 static int dd_request_merge(struct request_queue *q, struct request **rq,
614 			    struct bio *bio)
615 {
616 	struct deadline_data *dd = q->elevator->elevator_data;
617 	const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
618 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
619 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
620 	sector_t sector = bio_end_sector(bio);
621 	struct request *__rq;
622 
623 	if (!dd->front_merges)
624 		return ELEVATOR_NO_MERGE;
625 
626 	__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
627 	if (__rq) {
628 		BUG_ON(sector != blk_rq_pos(__rq));
629 
630 		if (elv_bio_merge_ok(__rq, bio)) {
631 			*rq = __rq;
632 			if (blk_discard_mergable(__rq))
633 				return ELEVATOR_DISCARD_MERGE;
634 			return ELEVATOR_FRONT_MERGE;
635 		}
636 	}
637 
638 	return ELEVATOR_NO_MERGE;
639 }
640 
641 /*
642  * Attempt to merge a bio into an existing request. This function is called
643  * before @bio is associated with a request.
644  */
645 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
646 		unsigned int nr_segs)
647 {
648 	struct deadline_data *dd = q->elevator->elevator_data;
649 	struct request *free = NULL;
650 	bool ret;
651 
652 	spin_lock(&dd->lock);
653 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
654 	spin_unlock(&dd->lock);
655 
656 	if (free)
657 		blk_mq_free_request(free);
658 
659 	return ret;
660 }
661 
662 /*
663  * add rq to rbtree and fifo
664  */
665 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
666 			      bool at_head)
667 {
668 	struct request_queue *q = hctx->queue;
669 	struct deadline_data *dd = q->elevator->elevator_data;
670 	const enum dd_data_dir data_dir = rq_data_dir(rq);
671 	u16 ioprio = req_get_ioprio(rq);
672 	u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
673 	struct dd_per_prio *per_prio;
674 	enum dd_prio prio;
675 	LIST_HEAD(free);
676 
677 	lockdep_assert_held(&dd->lock);
678 
679 	/*
680 	 * This may be a requeue of a write request that has locked its
681 	 * target zone. If it is the case, this releases the zone lock.
682 	 */
683 	blk_req_zone_write_unlock(rq);
684 
685 	prio = ioprio_class_to_prio[ioprio_class];
686 	dd_count(dd, inserted, prio);
687 	rq->elv.priv[0] = (void *)(uintptr_t)1;
688 
689 	if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
690 		blk_mq_free_requests(&free);
691 		return;
692 	}
693 
694 	trace_block_rq_insert(rq);
695 
696 	per_prio = &dd->per_prio[prio];
697 	if (at_head) {
698 		list_add(&rq->queuelist, &per_prio->dispatch);
699 	} else {
700 		deadline_add_rq_rb(per_prio, rq);
701 
702 		if (rq_mergeable(rq)) {
703 			elv_rqhash_add(q, rq);
704 			if (!q->last_merge)
705 				q->last_merge = rq;
706 		}
707 
708 		/*
709 		 * set expire time and add to fifo list
710 		 */
711 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
712 		list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
713 	}
714 }
715 
716 /*
717  * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
718  */
719 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
720 			       struct list_head *list, bool at_head)
721 {
722 	struct request_queue *q = hctx->queue;
723 	struct deadline_data *dd = q->elevator->elevator_data;
724 
725 	spin_lock(&dd->lock);
726 	while (!list_empty(list)) {
727 		struct request *rq;
728 
729 		rq = list_first_entry(list, struct request, queuelist);
730 		list_del_init(&rq->queuelist);
731 		dd_insert_request(hctx, rq, at_head);
732 	}
733 	spin_unlock(&dd->lock);
734 }
735 
736 /* Callback from inside blk_mq_rq_ctx_init(). */
737 static void dd_prepare_request(struct request *rq)
738 {
739 	rq->elv.priv[0] = NULL;
740 }
741 
742 /*
743  * Callback from inside blk_mq_free_request().
744  *
745  * For zoned block devices, write unlock the target zone of
746  * completed write requests. Do this while holding the zone lock
747  * spinlock so that the zone is never unlocked while deadline_fifo_request()
748  * or deadline_next_request() are executing. This function is called for
749  * all requests, whether or not these requests complete successfully.
750  *
751  * For a zoned block device, __dd_dispatch_request() may have stopped
752  * dispatching requests if all the queued requests are write requests directed
753  * at zones that are already locked due to on-going write requests. To ensure
754  * write request dispatch progress in this case, mark the queue as needing a
755  * restart to ensure that the queue is run again after completion of the
756  * request and zones being unlocked.
757  */
758 static void dd_finish_request(struct request *rq)
759 {
760 	struct request_queue *q = rq->q;
761 	struct deadline_data *dd = q->elevator->elevator_data;
762 	const u8 ioprio_class = dd_rq_ioclass(rq);
763 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
764 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
765 
766 	/*
767 	 * The block layer core may call dd_finish_request() without having
768 	 * called dd_insert_requests(). Hence only update statistics for
769 	 * requests for which dd_insert_requests() has been called. See also
770 	 * blk_mq_request_bypass_insert().
771 	 */
772 	if (rq->elv.priv[0])
773 		dd_count(dd, completed, prio);
774 
775 	if (blk_queue_is_zoned(q)) {
776 		unsigned long flags;
777 
778 		spin_lock_irqsave(&dd->zone_lock, flags);
779 		blk_req_zone_write_unlock(rq);
780 		if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
781 			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
782 		spin_unlock_irqrestore(&dd->zone_lock, flags);
783 	}
784 }
785 
786 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
787 {
788 	return !list_empty_careful(&per_prio->dispatch) ||
789 		!list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
790 		!list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
791 }
792 
793 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
794 {
795 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
796 	enum dd_prio prio;
797 
798 	for (prio = 0; prio <= DD_PRIO_MAX; prio++)
799 		if (dd_has_work_for_prio(&dd->per_prio[prio]))
800 			return true;
801 
802 	return false;
803 }
804 
805 /*
806  * sysfs parts below
807  */
808 #define SHOW_INT(__FUNC, __VAR)						\
809 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
810 {									\
811 	struct deadline_data *dd = e->elevator_data;			\
812 									\
813 	return sysfs_emit(page, "%d\n", __VAR);				\
814 }
815 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
816 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
817 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
818 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
819 SHOW_INT(deadline_front_merges_show, dd->front_merges);
820 SHOW_INT(deadline_async_depth_show, dd->front_merges);
821 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
822 #undef SHOW_INT
823 #undef SHOW_JIFFIES
824 
825 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
826 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
827 {									\
828 	struct deadline_data *dd = e->elevator_data;			\
829 	int __data, __ret;						\
830 									\
831 	__ret = kstrtoint(page, 0, &__data);				\
832 	if (__ret < 0)							\
833 		return __ret;						\
834 	if (__data < (MIN))						\
835 		__data = (MIN);						\
836 	else if (__data > (MAX))					\
837 		__data = (MAX);						\
838 	*(__PTR) = __CONV(__data);					\
839 	return count;							\
840 }
841 #define STORE_INT(__FUNC, __PTR, MIN, MAX)				\
842 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
843 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)				\
844 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
845 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
846 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
847 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
848 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
849 STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
850 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
851 #undef STORE_FUNCTION
852 #undef STORE_INT
853 #undef STORE_JIFFIES
854 
855 #define DD_ATTR(name) \
856 	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
857 
858 static struct elv_fs_entry deadline_attrs[] = {
859 	DD_ATTR(read_expire),
860 	DD_ATTR(write_expire),
861 	DD_ATTR(writes_starved),
862 	DD_ATTR(front_merges),
863 	DD_ATTR(async_depth),
864 	DD_ATTR(fifo_batch),
865 	__ATTR_NULL
866 };
867 
868 #ifdef CONFIG_BLK_DEBUG_FS
869 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)		\
870 static void *deadline_##name##_fifo_start(struct seq_file *m,		\
871 					  loff_t *pos)			\
872 	__acquires(&dd->lock)						\
873 {									\
874 	struct request_queue *q = m->private;				\
875 	struct deadline_data *dd = q->elevator->elevator_data;		\
876 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
877 									\
878 	spin_lock(&dd->lock);						\
879 	return seq_list_start(&per_prio->fifo_list[data_dir], *pos);	\
880 }									\
881 									\
882 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
883 					 loff_t *pos)			\
884 {									\
885 	struct request_queue *q = m->private;				\
886 	struct deadline_data *dd = q->elevator->elevator_data;		\
887 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
888 									\
889 	return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);	\
890 }									\
891 									\
892 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
893 	__releases(&dd->lock)						\
894 {									\
895 	struct request_queue *q = m->private;				\
896 	struct deadline_data *dd = q->elevator->elevator_data;		\
897 									\
898 	spin_unlock(&dd->lock);						\
899 }									\
900 									\
901 static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
902 	.start	= deadline_##name##_fifo_start,				\
903 	.next	= deadline_##name##_fifo_next,				\
904 	.stop	= deadline_##name##_fifo_stop,				\
905 	.show	= blk_mq_debugfs_rq_show,				\
906 };									\
907 									\
908 static int deadline_##name##_next_rq_show(void *data,			\
909 					  struct seq_file *m)		\
910 {									\
911 	struct request_queue *q = data;					\
912 	struct deadline_data *dd = q->elevator->elevator_data;		\
913 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
914 	struct request *rq = per_prio->next_rq[data_dir];		\
915 									\
916 	if (rq)								\
917 		__blk_mq_debugfs_rq_show(m, rq);			\
918 	return 0;							\
919 }
920 
921 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
922 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
923 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
924 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
925 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
926 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
927 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
928 
929 static int deadline_batching_show(void *data, struct seq_file *m)
930 {
931 	struct request_queue *q = data;
932 	struct deadline_data *dd = q->elevator->elevator_data;
933 
934 	seq_printf(m, "%u\n", dd->batching);
935 	return 0;
936 }
937 
938 static int deadline_starved_show(void *data, struct seq_file *m)
939 {
940 	struct request_queue *q = data;
941 	struct deadline_data *dd = q->elevator->elevator_data;
942 
943 	seq_printf(m, "%u\n", dd->starved);
944 	return 0;
945 }
946 
947 static int dd_async_depth_show(void *data, struct seq_file *m)
948 {
949 	struct request_queue *q = data;
950 	struct deadline_data *dd = q->elevator->elevator_data;
951 
952 	seq_printf(m, "%u\n", dd->async_depth);
953 	return 0;
954 }
955 
956 static int dd_queued_show(void *data, struct seq_file *m)
957 {
958 	struct request_queue *q = data;
959 	struct deadline_data *dd = q->elevator->elevator_data;
960 
961 	seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
962 		   dd_queued(dd, DD_BE_PRIO),
963 		   dd_queued(dd, DD_IDLE_PRIO));
964 	return 0;
965 }
966 
967 /* Number of requests owned by the block driver for a given priority. */
968 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
969 {
970 	return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
971 		- dd_sum(dd, completed, prio);
972 }
973 
974 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
975 {
976 	struct request_queue *q = data;
977 	struct deadline_data *dd = q->elevator->elevator_data;
978 
979 	seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
980 		   dd_owned_by_driver(dd, DD_BE_PRIO),
981 		   dd_owned_by_driver(dd, DD_IDLE_PRIO));
982 	return 0;
983 }
984 
985 #define DEADLINE_DISPATCH_ATTR(prio)					\
986 static void *deadline_dispatch##prio##_start(struct seq_file *m,	\
987 					     loff_t *pos)		\
988 	__acquires(&dd->lock)						\
989 {									\
990 	struct request_queue *q = m->private;				\
991 	struct deadline_data *dd = q->elevator->elevator_data;		\
992 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
993 									\
994 	spin_lock(&dd->lock);						\
995 	return seq_list_start(&per_prio->dispatch, *pos);		\
996 }									\
997 									\
998 static void *deadline_dispatch##prio##_next(struct seq_file *m,		\
999 					    void *v, loff_t *pos)	\
1000 {									\
1001 	struct request_queue *q = m->private;				\
1002 	struct deadline_data *dd = q->elevator->elevator_data;		\
1003 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
1004 									\
1005 	return seq_list_next(v, &per_prio->dispatch, pos);		\
1006 }									\
1007 									\
1008 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)	\
1009 	__releases(&dd->lock)						\
1010 {									\
1011 	struct request_queue *q = m->private;				\
1012 	struct deadline_data *dd = q->elevator->elevator_data;		\
1013 									\
1014 	spin_unlock(&dd->lock);						\
1015 }									\
1016 									\
1017 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1018 	.start	= deadline_dispatch##prio##_start,			\
1019 	.next	= deadline_dispatch##prio##_next,			\
1020 	.stop	= deadline_dispatch##prio##_stop,			\
1021 	.show	= blk_mq_debugfs_rq_show,				\
1022 }
1023 
1024 DEADLINE_DISPATCH_ATTR(0);
1025 DEADLINE_DISPATCH_ATTR(1);
1026 DEADLINE_DISPATCH_ATTR(2);
1027 #undef DEADLINE_DISPATCH_ATTR
1028 
1029 #define DEADLINE_QUEUE_DDIR_ATTRS(name)					\
1030 	{#name "_fifo_list", 0400,					\
1031 			.seq_ops = &deadline_##name##_fifo_seq_ops}
1032 #define DEADLINE_NEXT_RQ_ATTR(name)					\
1033 	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1034 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1035 	DEADLINE_QUEUE_DDIR_ATTRS(read0),
1036 	DEADLINE_QUEUE_DDIR_ATTRS(write0),
1037 	DEADLINE_QUEUE_DDIR_ATTRS(read1),
1038 	DEADLINE_QUEUE_DDIR_ATTRS(write1),
1039 	DEADLINE_QUEUE_DDIR_ATTRS(read2),
1040 	DEADLINE_QUEUE_DDIR_ATTRS(write2),
1041 	DEADLINE_NEXT_RQ_ATTR(read0),
1042 	DEADLINE_NEXT_RQ_ATTR(write0),
1043 	DEADLINE_NEXT_RQ_ATTR(read1),
1044 	DEADLINE_NEXT_RQ_ATTR(write1),
1045 	DEADLINE_NEXT_RQ_ATTR(read2),
1046 	DEADLINE_NEXT_RQ_ATTR(write2),
1047 	{"batching", 0400, deadline_batching_show},
1048 	{"starved", 0400, deadline_starved_show},
1049 	{"async_depth", 0400, dd_async_depth_show},
1050 	{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1051 	{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1052 	{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1053 	{"owned_by_driver", 0400, dd_owned_by_driver_show},
1054 	{"queued", 0400, dd_queued_show},
1055 	{},
1056 };
1057 #undef DEADLINE_QUEUE_DDIR_ATTRS
1058 #endif
1059 
1060 static struct elevator_type mq_deadline = {
1061 	.ops = {
1062 		.depth_updated		= dd_depth_updated,
1063 		.limit_depth		= dd_limit_depth,
1064 		.insert_requests	= dd_insert_requests,
1065 		.dispatch_request	= dd_dispatch_request,
1066 		.prepare_request	= dd_prepare_request,
1067 		.finish_request		= dd_finish_request,
1068 		.next_request		= elv_rb_latter_request,
1069 		.former_request		= elv_rb_former_request,
1070 		.bio_merge		= dd_bio_merge,
1071 		.request_merge		= dd_request_merge,
1072 		.requests_merged	= dd_merged_requests,
1073 		.request_merged		= dd_request_merged,
1074 		.has_work		= dd_has_work,
1075 		.init_sched		= dd_init_sched,
1076 		.exit_sched		= dd_exit_sched,
1077 		.init_hctx		= dd_init_hctx,
1078 	},
1079 
1080 #ifdef CONFIG_BLK_DEBUG_FS
1081 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1082 #endif
1083 	.elevator_attrs = deadline_attrs,
1084 	.elevator_name = "mq-deadline",
1085 	.elevator_alias = "deadline",
1086 	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1087 	.elevator_owner = THIS_MODULE,
1088 };
1089 MODULE_ALIAS("mq-deadline-iosched");
1090 
1091 static int __init deadline_init(void)
1092 {
1093 	return elv_register(&mq_deadline);
1094 }
1095 
1096 static void __exit deadline_exit(void)
1097 {
1098 	elv_unregister(&mq_deadline);
1099 }
1100 
1101 module_init(deadline_init);
1102 module_exit(deadline_exit);
1103 
1104 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1105 MODULE_LICENSE("GPL");
1106 MODULE_DESCRIPTION("MQ deadline IO scheduler");
1107