xref: /openbmc/linux/block/mq-deadline.c (revision 7e24a55b2122746c2eef192296fc84624354f895)
10f783995STejun Heo // SPDX-License-Identifier: GPL-2.0
20f783995STejun Heo /*
30f783995STejun Heo  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
40f783995STejun Heo  *  for the blk-mq scheduling framework
50f783995STejun Heo  *
60f783995STejun Heo  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
70f783995STejun Heo  */
80f783995STejun Heo #include <linux/kernel.h>
90f783995STejun Heo #include <linux/fs.h>
100f783995STejun Heo #include <linux/blkdev.h>
110f783995STejun Heo #include <linux/bio.h>
120f783995STejun Heo #include <linux/module.h>
130f783995STejun Heo #include <linux/slab.h>
140f783995STejun Heo #include <linux/init.h>
150f783995STejun Heo #include <linux/compiler.h>
160f783995STejun Heo #include <linux/rbtree.h>
170f783995STejun Heo #include <linux/sbitmap.h>
180f783995STejun Heo 
190f783995STejun Heo #include <trace/events/block.h>
200f783995STejun Heo 
212e9bc346SChristoph Hellwig #include "elevator.h"
220f783995STejun Heo #include "blk.h"
230f783995STejun Heo #include "blk-mq.h"
240f783995STejun Heo #include "blk-mq-debugfs.h"
250f783995STejun Heo #include "blk-mq-sched.h"
260f783995STejun Heo 
270f783995STejun Heo /*
280f783995STejun Heo  * See Documentation/block/deadline-iosched.rst
290f783995STejun Heo  */
300f783995STejun Heo static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
310f783995STejun Heo static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32322cff70SBart Van Assche /*
33322cff70SBart Van Assche  * Time after which to dispatch lower priority requests even if higher
34322cff70SBart Van Assche  * priority requests are pending.
35322cff70SBart Van Assche  */
36322cff70SBart Van Assche static const int prio_aging_expire = 10 * HZ;
370f783995STejun Heo static const int writes_starved = 2;    /* max times reads can starve a write */
380f783995STejun Heo static const int fifo_batch = 16;       /* # of sequential requests treated as one
390f783995STejun Heo 				     by the above parameters. For throughput. */
400f783995STejun Heo 
410f783995STejun Heo enum dd_data_dir {
420f783995STejun Heo 	DD_READ		= READ,
430f783995STejun Heo 	DD_WRITE	= WRITE,
440f783995STejun Heo };
450f783995STejun Heo 
460f783995STejun Heo enum { DD_DIR_COUNT = 2 };
470f783995STejun Heo 
480f783995STejun Heo enum dd_prio {
490f783995STejun Heo 	DD_RT_PRIO	= 0,
500f783995STejun Heo 	DD_BE_PRIO	= 1,
510f783995STejun Heo 	DD_IDLE_PRIO	= 2,
520f783995STejun Heo 	DD_PRIO_MAX	= 2,
530f783995STejun Heo };
540f783995STejun Heo 
550f783995STejun Heo enum { DD_PRIO_COUNT = 3 };
560f783995STejun Heo 
57bce0363eSBart Van Assche /*
58bce0363eSBart Van Assche  * I/O statistics per I/O priority. It is fine if these counters overflow.
59bce0363eSBart Van Assche  * What matters is that these counters are at least as wide as
60bce0363eSBart Van Assche  * log2(max_outstanding_requests).
61bce0363eSBart Van Assche  */
620f783995STejun Heo struct io_stats_per_prio {
63bce0363eSBart Van Assche 	uint32_t inserted;
64bce0363eSBart Van Assche 	uint32_t merged;
65bce0363eSBart Van Assche 	uint32_t dispatched;
66bce0363eSBart Van Assche 	atomic_t completed;
670f783995STejun Heo };
680f783995STejun Heo 
690f783995STejun Heo /*
700f783995STejun Heo  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
710f783995STejun Heo  * present on both sort_list[] and fifo_list[].
720f783995STejun Heo  */
730f783995STejun Heo struct dd_per_prio {
740f783995STejun Heo 	struct list_head dispatch;
750f783995STejun Heo 	struct rb_root sort_list[DD_DIR_COUNT];
760f783995STejun Heo 	struct list_head fifo_list[DD_DIR_COUNT];
7783c46ed6SBart Van Assche 	/* Position of the most recently dispatched request. */
7883c46ed6SBart Van Assche 	sector_t latest_pos[DD_DIR_COUNT];
79bce0363eSBart Van Assche 	struct io_stats_per_prio stats;
800f783995STejun Heo };
810f783995STejun Heo 
820f783995STejun Heo struct deadline_data {
830f783995STejun Heo 	/*
840f783995STejun Heo 	 * run time data
850f783995STejun Heo 	 */
860f783995STejun Heo 
870f783995STejun Heo 	struct dd_per_prio per_prio[DD_PRIO_COUNT];
880f783995STejun Heo 
890f783995STejun Heo 	/* Data direction of latest dispatched request. */
900f783995STejun Heo 	enum dd_data_dir last_dir;
910f783995STejun Heo 	unsigned int batching;		/* number of sequential requests made */
920f783995STejun Heo 	unsigned int starved;		/* times reads have starved writes */
930f783995STejun Heo 
940f783995STejun Heo 	/*
950f783995STejun Heo 	 * settings that change how the i/o scheduler behaves
960f783995STejun Heo 	 */
970f783995STejun Heo 	int fifo_expire[DD_DIR_COUNT];
980f783995STejun Heo 	int fifo_batch;
990f783995STejun Heo 	int writes_starved;
1000f783995STejun Heo 	int front_merges;
1010f783995STejun Heo 	u32 async_depth;
102322cff70SBart Van Assche 	int prio_aging_expire;
1030f783995STejun Heo 
1040f783995STejun Heo 	spinlock_t lock;
1050f783995STejun Heo 	spinlock_t zone_lock;
1060f783995STejun Heo };
1070f783995STejun Heo 
1080f783995STejun Heo /* Maps an I/O priority class to a deadline scheduler priority. */
1090f783995STejun Heo static const enum dd_prio ioprio_class_to_prio[] = {
1100f783995STejun Heo 	[IOPRIO_CLASS_NONE]	= DD_BE_PRIO,
1110f783995STejun Heo 	[IOPRIO_CLASS_RT]	= DD_RT_PRIO,
1120f783995STejun Heo 	[IOPRIO_CLASS_BE]	= DD_BE_PRIO,
1130f783995STejun Heo 	[IOPRIO_CLASS_IDLE]	= DD_IDLE_PRIO,
1140f783995STejun Heo };
1150f783995STejun Heo 
1160f783995STejun Heo static inline struct rb_root *
deadline_rb_root(struct dd_per_prio * per_prio,struct request * rq)1170f783995STejun Heo deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
1180f783995STejun Heo {
1190f783995STejun Heo 	return &per_prio->sort_list[rq_data_dir(rq)];
1200f783995STejun Heo }
1210f783995STejun Heo 
1220f783995STejun Heo /*
1230f783995STejun Heo  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
1240f783995STejun Heo  * request.
1250f783995STejun Heo  */
dd_rq_ioclass(struct request * rq)1260f783995STejun Heo static u8 dd_rq_ioclass(struct request *rq)
1270f783995STejun Heo {
1280f783995STejun Heo 	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
1290f783995STejun Heo }
1300f783995STejun Heo 
1310f783995STejun Heo /*
132015d02f4SDamien Le Moal  * get the request before `rq' in sector-sorted order
133015d02f4SDamien Le Moal  */
134015d02f4SDamien Le Moal static inline struct request *
deadline_earlier_request(struct request * rq)135015d02f4SDamien Le Moal deadline_earlier_request(struct request *rq)
136015d02f4SDamien Le Moal {
137015d02f4SDamien Le Moal 	struct rb_node *node = rb_prev(&rq->rb_node);
138015d02f4SDamien Le Moal 
139015d02f4SDamien Le Moal 	if (node)
140015d02f4SDamien Le Moal 		return rb_entry_rq(node);
141015d02f4SDamien Le Moal 
142015d02f4SDamien Le Moal 	return NULL;
143015d02f4SDamien Le Moal }
144015d02f4SDamien Le Moal 
145015d02f4SDamien Le Moal /*
1460f783995STejun Heo  * get the request after `rq' in sector-sorted order
1470f783995STejun Heo  */
1480f783995STejun Heo static inline struct request *
deadline_latter_request(struct request * rq)1490f783995STejun Heo deadline_latter_request(struct request *rq)
1500f783995STejun Heo {
1510f783995STejun Heo 	struct rb_node *node = rb_next(&rq->rb_node);
1520f783995STejun Heo 
1530f783995STejun Heo 	if (node)
1540f783995STejun Heo 		return rb_entry_rq(node);
1550f783995STejun Heo 
1560f783995STejun Heo 	return NULL;
1570f783995STejun Heo }
1580f783995STejun Heo 
1590effb390SBart Van Assche /*
1600effb390SBart Van Assche  * Return the first request for which blk_rq_pos() >= @pos. For zoned devices,
1610effb390SBart Van Assche  * return the first request after the start of the zone containing @pos.
1620effb390SBart Van Assche  */
deadline_from_pos(struct dd_per_prio * per_prio,enum dd_data_dir data_dir,sector_t pos)16383c46ed6SBart Van Assche static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
16483c46ed6SBart Van Assche 				enum dd_data_dir data_dir, sector_t pos)
16583c46ed6SBart Van Assche {
16683c46ed6SBart Van Assche 	struct rb_node *node = per_prio->sort_list[data_dir].rb_node;
16783c46ed6SBart Van Assche 	struct request *rq, *res = NULL;
16883c46ed6SBart Van Assche 
1690effb390SBart Van Assche 	if (!node)
1700effb390SBart Van Assche 		return NULL;
1710effb390SBart Van Assche 
1720effb390SBart Van Assche 	rq = rb_entry_rq(node);
1730effb390SBart Van Assche 	/*
1740effb390SBart Van Assche 	 * A zoned write may have been requeued with a starting position that
1750effb390SBart Van Assche 	 * is below that of the most recently dispatched request. Hence, for
1760effb390SBart Van Assche 	 * zoned writes, start searching from the start of a zone.
1770effb390SBart Van Assche 	 */
1780effb390SBart Van Assche 	if (blk_rq_is_seq_zoned_write(rq))
179f673b4f5SBart Van Assche 		pos = round_down(pos, rq->q->limits.chunk_sectors);
1800effb390SBart Van Assche 
18183c46ed6SBart Van Assche 	while (node) {
18283c46ed6SBart Van Assche 		rq = rb_entry_rq(node);
18383c46ed6SBart Van Assche 		if (blk_rq_pos(rq) >= pos) {
18483c46ed6SBart Van Assche 			res = rq;
18583c46ed6SBart Van Assche 			node = node->rb_left;
18683c46ed6SBart Van Assche 		} else {
18783c46ed6SBart Van Assche 			node = node->rb_right;
18883c46ed6SBart Van Assche 		}
18983c46ed6SBart Van Assche 	}
19083c46ed6SBart Van Assche 	return res;
19183c46ed6SBart Van Assche }
19283c46ed6SBart Van Assche 
1930f783995STejun Heo static void
deadline_add_rq_rb(struct dd_per_prio * per_prio,struct request * rq)1940f783995STejun Heo deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
1950f783995STejun Heo {
1960f783995STejun Heo 	struct rb_root *root = deadline_rb_root(per_prio, rq);
1970f783995STejun Heo 
1980f783995STejun Heo 	elv_rb_add(root, rq);
1990f783995STejun Heo }
2000f783995STejun Heo 
2010f783995STejun Heo static inline void
deadline_del_rq_rb(struct dd_per_prio * per_prio,struct request * rq)2020f783995STejun Heo deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
2030f783995STejun Heo {
2040f783995STejun Heo 	elv_rb_del(deadline_rb_root(per_prio, rq), rq);
2050f783995STejun Heo }
2060f783995STejun Heo 
2070f783995STejun Heo /*
2080f783995STejun Heo  * remove rq from rbtree and fifo.
2090f783995STejun Heo  */
deadline_remove_request(struct request_queue * q,struct dd_per_prio * per_prio,struct request * rq)2100f783995STejun Heo static void deadline_remove_request(struct request_queue *q,
2110f783995STejun Heo 				    struct dd_per_prio *per_prio,
2120f783995STejun Heo 				    struct request *rq)
2130f783995STejun Heo {
2140f783995STejun Heo 	list_del_init(&rq->queuelist);
2150f783995STejun Heo 
2160f783995STejun Heo 	/*
2170f783995STejun Heo 	 * We might not be on the rbtree, if we are doing an insert merge
2180f783995STejun Heo 	 */
2190f783995STejun Heo 	if (!RB_EMPTY_NODE(&rq->rb_node))
2200f783995STejun Heo 		deadline_del_rq_rb(per_prio, rq);
2210f783995STejun Heo 
2220f783995STejun Heo 	elv_rqhash_del(q, rq);
2230f783995STejun Heo 	if (q->last_merge == rq)
2240f783995STejun Heo 		q->last_merge = NULL;
2250f783995STejun Heo }
2260f783995STejun Heo 
dd_request_merged(struct request_queue * q,struct request * req,enum elv_merge type)2270f783995STejun Heo static void dd_request_merged(struct request_queue *q, struct request *req,
2280f783995STejun Heo 			      enum elv_merge type)
2290f783995STejun Heo {
2300f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
2310f783995STejun Heo 	const u8 ioprio_class = dd_rq_ioclass(req);
2320f783995STejun Heo 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
2330f783995STejun Heo 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
2340f783995STejun Heo 
2350f783995STejun Heo 	/*
2360f783995STejun Heo 	 * if the merge was a front merge, we need to reposition request
2370f783995STejun Heo 	 */
2380f783995STejun Heo 	if (type == ELEVATOR_FRONT_MERGE) {
2390f783995STejun Heo 		elv_rb_del(deadline_rb_root(per_prio, req), req);
2400f783995STejun Heo 		deadline_add_rq_rb(per_prio, req);
2410f783995STejun Heo 	}
2420f783995STejun Heo }
2430f783995STejun Heo 
2440f783995STejun Heo /*
2450f783995STejun Heo  * Callback function that is invoked after @next has been merged into @req.
2460f783995STejun Heo  */
dd_merged_requests(struct request_queue * q,struct request * req,struct request * next)2470f783995STejun Heo static void dd_merged_requests(struct request_queue *q, struct request *req,
2480f783995STejun Heo 			       struct request *next)
2490f783995STejun Heo {
2500f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
2510f783995STejun Heo 	const u8 ioprio_class = dd_rq_ioclass(next);
2520f783995STejun Heo 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
2530f783995STejun Heo 
254bce0363eSBart Van Assche 	lockdep_assert_held(&dd->lock);
255bce0363eSBart Van Assche 
256bce0363eSBart Van Assche 	dd->per_prio[prio].stats.merged++;
2570f783995STejun Heo 
2580f783995STejun Heo 	/*
2590f783995STejun Heo 	 * if next expires before rq, assign its expire time to rq
2600f783995STejun Heo 	 * and move into next position (next will be deleted) in fifo
2610f783995STejun Heo 	 */
2620f783995STejun Heo 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
2630f783995STejun Heo 		if (time_before((unsigned long)next->fifo_time,
2640f783995STejun Heo 				(unsigned long)req->fifo_time)) {
2650f783995STejun Heo 			list_move(&req->queuelist, &next->queuelist);
2660f783995STejun Heo 			req->fifo_time = next->fifo_time;
2670f783995STejun Heo 		}
2680f783995STejun Heo 	}
2690f783995STejun Heo 
2700f783995STejun Heo 	/*
2710f783995STejun Heo 	 * kill knowledge of next, this one is a goner
2720f783995STejun Heo 	 */
2730f783995STejun Heo 	deadline_remove_request(q, &dd->per_prio[prio], next);
2740f783995STejun Heo }
2750f783995STejun Heo 
2760f783995STejun Heo /*
2770f783995STejun Heo  * move an entry to dispatch queue
2780f783995STejun Heo  */
2790f783995STejun Heo static void
deadline_move_request(struct deadline_data * dd,struct dd_per_prio * per_prio,struct request * rq)2800f783995STejun Heo deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
2810f783995STejun Heo 		      struct request *rq)
2820f783995STejun Heo {
2830f783995STejun Heo 	/*
2840f783995STejun Heo 	 * take it off the sort and fifo list
2850f783995STejun Heo 	 */
2860f783995STejun Heo 	deadline_remove_request(rq->q, per_prio, rq);
2870f783995STejun Heo }
2880f783995STejun Heo 
28932f64cadSBart Van Assche /* Number of requests queued for a given priority level. */
dd_queued(struct deadline_data * dd,enum dd_prio prio)29032f64cadSBart Van Assche static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
29132f64cadSBart Van Assche {
292bce0363eSBart Van Assche 	const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
293bce0363eSBart Van Assche 
294bce0363eSBart Van Assche 	lockdep_assert_held(&dd->lock);
295bce0363eSBart Van Assche 
296bce0363eSBart Van Assche 	return stats->inserted - atomic_read(&stats->completed);
29732f64cadSBart Van Assche }
29832f64cadSBart Van Assche 
2990f783995STejun Heo /*
300e0d85cdeSBart Van Assche  * deadline_check_fifo returns true if and only if there are expired requests
301e0d85cdeSBart Van Assche  * in the FIFO list. Requires !list_empty(&dd->fifo_list[data_dir]).
3020f783995STejun Heo  */
deadline_check_fifo(struct dd_per_prio * per_prio,enum dd_data_dir data_dir)303e0d85cdeSBart Van Assche static inline bool deadline_check_fifo(struct dd_per_prio *per_prio,
3040f783995STejun Heo 				       enum dd_data_dir data_dir)
3050f783995STejun Heo {
3060f783995STejun Heo 	struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
3070f783995STejun Heo 
308e0d85cdeSBart Van Assche 	return time_is_before_eq_jiffies((unsigned long)rq->fifo_time);
3090f783995STejun Heo }
3100f783995STejun Heo 
3110f783995STejun Heo /*
312015d02f4SDamien Le Moal  * Check if rq has a sequential request preceding it.
313015d02f4SDamien Le Moal  */
deadline_is_seq_write(struct deadline_data * dd,struct request * rq)3143692fec8SDamien Le Moal static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
315015d02f4SDamien Le Moal {
316015d02f4SDamien Le Moal 	struct request *prev = deadline_earlier_request(rq);
317015d02f4SDamien Le Moal 
318015d02f4SDamien Le Moal 	if (!prev)
319015d02f4SDamien Le Moal 		return false;
320015d02f4SDamien Le Moal 
321015d02f4SDamien Le Moal 	return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
322015d02f4SDamien Le Moal }
323015d02f4SDamien Le Moal 
324015d02f4SDamien Le Moal /*
325015d02f4SDamien Le Moal  * Skip all write requests that are sequential from @rq, even if we cross
326015d02f4SDamien Le Moal  * a zone boundary.
327015d02f4SDamien Le Moal  */
deadline_skip_seq_writes(struct deadline_data * dd,struct request * rq)328015d02f4SDamien Le Moal static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
329015d02f4SDamien Le Moal 						struct request *rq)
330015d02f4SDamien Le Moal {
331015d02f4SDamien Le Moal 	sector_t pos = blk_rq_pos(rq);
332015d02f4SDamien Le Moal 
3333b463cbeSBart Van Assche 	do {
3343b463cbeSBart Van Assche 		pos += blk_rq_sectors(rq);
335015d02f4SDamien Le Moal 		rq = deadline_latter_request(rq);
3363b463cbeSBart Van Assche 	} while (rq && blk_rq_pos(rq) == pos);
337015d02f4SDamien Le Moal 
338015d02f4SDamien Le Moal 	return rq;
339015d02f4SDamien Le Moal }
340015d02f4SDamien Le Moal 
341015d02f4SDamien Le Moal /*
3420f783995STejun Heo  * For the specified data direction, return the next request to
3430f783995STejun Heo  * dispatch using arrival ordered lists.
3440f783995STejun Heo  */
3450f783995STejun Heo static struct request *
deadline_fifo_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)3460f783995STejun Heo deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
3470f783995STejun Heo 		      enum dd_data_dir data_dir)
3480f783995STejun Heo {
349a036e698SBart Van Assche 	struct request *rq, *rb_rq, *next;
3500f783995STejun Heo 	unsigned long flags;
3510f783995STejun Heo 
3520f783995STejun Heo 	if (list_empty(&per_prio->fifo_list[data_dir]))
3530f783995STejun Heo 		return NULL;
3540f783995STejun Heo 
3550f783995STejun Heo 	rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
3560f783995STejun Heo 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
3570f783995STejun Heo 		return rq;
3580f783995STejun Heo 
3590f783995STejun Heo 	/*
3600f783995STejun Heo 	 * Look for a write request that can be dispatched, that is one with
361015d02f4SDamien Le Moal 	 * an unlocked target zone. For some HDDs, breaking a sequential
362015d02f4SDamien Le Moal 	 * write stream can lead to lower throughput, so make sure to preserve
363015d02f4SDamien Le Moal 	 * sequential write streams, even if that stream crosses into the next
364015d02f4SDamien Le Moal 	 * zones and these zones are unlocked.
3650f783995STejun Heo 	 */
3660f783995STejun Heo 	spin_lock_irqsave(&dd->zone_lock, flags);
367a036e698SBart Van Assche 	list_for_each_entry_safe(rq, next, &per_prio->fifo_list[DD_WRITE],
368a036e698SBart Van Assche 				 queuelist) {
369a036e698SBart Van Assche 		/* Check whether a prior request exists for the same zone. */
370a036e698SBart Van Assche 		rb_rq = deadline_from_pos(per_prio, data_dir, blk_rq_pos(rq));
371a036e698SBart Van Assche 		if (rb_rq && blk_rq_pos(rb_rq) < blk_rq_pos(rq))
372a036e698SBart Van Assche 			rq = rb_rq;
373015d02f4SDamien Le Moal 		if (blk_req_can_dispatch_to_zone(rq) &&
374015d02f4SDamien Le Moal 		    (blk_queue_nonrot(rq->q) ||
3753692fec8SDamien Le Moal 		     !deadline_is_seq_write(dd, rq)))
3760f783995STejun Heo 			goto out;
3770f783995STejun Heo 	}
3780f783995STejun Heo 	rq = NULL;
3790f783995STejun Heo out:
3800f783995STejun Heo 	spin_unlock_irqrestore(&dd->zone_lock, flags);
3810f783995STejun Heo 
3820f783995STejun Heo 	return rq;
3830f783995STejun Heo }
3840f783995STejun Heo 
3850f783995STejun Heo /*
3860f783995STejun Heo  * For the specified data direction, return the next request to
3870f783995STejun Heo  * dispatch using sector position sorted lists.
3880f783995STejun Heo  */
3890f783995STejun Heo static struct request *
deadline_next_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)3900f783995STejun Heo deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
3910f783995STejun Heo 		      enum dd_data_dir data_dir)
3920f783995STejun Heo {
3930f783995STejun Heo 	struct request *rq;
3940f783995STejun Heo 	unsigned long flags;
3950f783995STejun Heo 
39683c46ed6SBart Van Assche 	rq = deadline_from_pos(per_prio, data_dir,
39783c46ed6SBart Van Assche 			       per_prio->latest_pos[data_dir]);
3980f783995STejun Heo 	if (!rq)
3990f783995STejun Heo 		return NULL;
4000f783995STejun Heo 
4010f783995STejun Heo 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
4020f783995STejun Heo 		return rq;
4030f783995STejun Heo 
4040f783995STejun Heo 	/*
4050f783995STejun Heo 	 * Look for a write request that can be dispatched, that is one with
406015d02f4SDamien Le Moal 	 * an unlocked target zone. For some HDDs, breaking a sequential
407015d02f4SDamien Le Moal 	 * write stream can lead to lower throughput, so make sure to preserve
408015d02f4SDamien Le Moal 	 * sequential write streams, even if that stream crosses into the next
409015d02f4SDamien Le Moal 	 * zones and these zones are unlocked.
4100f783995STejun Heo 	 */
4110f783995STejun Heo 	spin_lock_irqsave(&dd->zone_lock, flags);
4120f783995STejun Heo 	while (rq) {
4130f783995STejun Heo 		if (blk_req_can_dispatch_to_zone(rq))
4140f783995STejun Heo 			break;
415015d02f4SDamien Le Moal 		if (blk_queue_nonrot(rq->q))
4160f783995STejun Heo 			rq = deadline_latter_request(rq);
417015d02f4SDamien Le Moal 		else
418015d02f4SDamien Le Moal 			rq = deadline_skip_seq_writes(dd, rq);
4190f783995STejun Heo 	}
4200f783995STejun Heo 	spin_unlock_irqrestore(&dd->zone_lock, flags);
4210f783995STejun Heo 
4220f783995STejun Heo 	return rq;
4230f783995STejun Heo }
4240f783995STejun Heo 
4250f783995STejun Heo /*
426322cff70SBart Van Assche  * Returns true if and only if @rq started after @latest_start where
427322cff70SBart Van Assche  * @latest_start is in jiffies.
428322cff70SBart Van Assche  */
started_after(struct deadline_data * dd,struct request * rq,unsigned long latest_start)429322cff70SBart Van Assche static bool started_after(struct deadline_data *dd, struct request *rq,
430322cff70SBart Van Assche 			  unsigned long latest_start)
431322cff70SBart Van Assche {
432322cff70SBart Van Assche 	unsigned long start_time = (unsigned long)rq->fifo_time;
433322cff70SBart Van Assche 
434322cff70SBart Van Assche 	start_time -= dd->fifo_expire[rq_data_dir(rq)];
435322cff70SBart Van Assche 
436322cff70SBart Van Assche 	return time_after(start_time, latest_start);
437322cff70SBart Van Assche }
438322cff70SBart Van Assche 
439322cff70SBart Van Assche /*
4400f783995STejun Heo  * deadline_dispatch_requests selects the best request according to
441322cff70SBart Van Assche  * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
4420f783995STejun Heo  */
__dd_dispatch_request(struct deadline_data * dd,struct dd_per_prio * per_prio,unsigned long latest_start)4430f783995STejun Heo static struct request *__dd_dispatch_request(struct deadline_data *dd,
444322cff70SBart Van Assche 					     struct dd_per_prio *per_prio,
445322cff70SBart Van Assche 					     unsigned long latest_start)
4460f783995STejun Heo {
4470f783995STejun Heo 	struct request *rq, *next_rq;
4480f783995STejun Heo 	enum dd_data_dir data_dir;
4490f783995STejun Heo 	enum dd_prio prio;
4500f783995STejun Heo 	u8 ioprio_class;
4510f783995STejun Heo 
4520f783995STejun Heo 	lockdep_assert_held(&dd->lock);
4530f783995STejun Heo 
4540f783995STejun Heo 	if (!list_empty(&per_prio->dispatch)) {
4550f783995STejun Heo 		rq = list_first_entry(&per_prio->dispatch, struct request,
4560f783995STejun Heo 				      queuelist);
457322cff70SBart Van Assche 		if (started_after(dd, rq, latest_start))
458322cff70SBart Van Assche 			return NULL;
4590f783995STejun Heo 		list_del_init(&rq->queuelist);
46083c46ed6SBart Van Assche 		data_dir = rq_data_dir(rq);
4610f783995STejun Heo 		goto done;
4620f783995STejun Heo 	}
4630f783995STejun Heo 
4640f783995STejun Heo 	/*
4650f783995STejun Heo 	 * batches are currently reads XOR writes
4660f783995STejun Heo 	 */
4670f783995STejun Heo 	rq = deadline_next_request(dd, per_prio, dd->last_dir);
46883c46ed6SBart Van Assche 	if (rq && dd->batching < dd->fifo_batch) {
46945b46b6fSBart Van Assche 		/* we have a next request and are still entitled to batch */
47083c46ed6SBart Van Assche 		data_dir = rq_data_dir(rq);
4710f783995STejun Heo 		goto dispatch_request;
47283c46ed6SBart Van Assche 	}
4730f783995STejun Heo 
4740f783995STejun Heo 	/*
4750f783995STejun Heo 	 * at this point we are not running a batch. select the appropriate
4760f783995STejun Heo 	 * data direction (read / write)
4770f783995STejun Heo 	 */
4780f783995STejun Heo 
4790f783995STejun Heo 	if (!list_empty(&per_prio->fifo_list[DD_READ])) {
4800f783995STejun Heo 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
4810f783995STejun Heo 
4820f783995STejun Heo 		if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
4830f783995STejun Heo 		    (dd->starved++ >= dd->writes_starved))
4840f783995STejun Heo 			goto dispatch_writes;
4850f783995STejun Heo 
4860f783995STejun Heo 		data_dir = DD_READ;
4870f783995STejun Heo 
4880f783995STejun Heo 		goto dispatch_find_request;
4890f783995STejun Heo 	}
4900f783995STejun Heo 
4910f783995STejun Heo 	/*
4920f783995STejun Heo 	 * there are either no reads or writes have been starved
4930f783995STejun Heo 	 */
4940f783995STejun Heo 
4950f783995STejun Heo 	if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
4960f783995STejun Heo dispatch_writes:
4970f783995STejun Heo 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
4980f783995STejun Heo 
4990f783995STejun Heo 		dd->starved = 0;
5000f783995STejun Heo 
5010f783995STejun Heo 		data_dir = DD_WRITE;
5020f783995STejun Heo 
5030f783995STejun Heo 		goto dispatch_find_request;
5040f783995STejun Heo 	}
5050f783995STejun Heo 
5060f783995STejun Heo 	return NULL;
5070f783995STejun Heo 
5080f783995STejun Heo dispatch_find_request:
5090f783995STejun Heo 	/*
5100f783995STejun Heo 	 * we are not running a batch, find best request for selected data_dir
5110f783995STejun Heo 	 */
5120f783995STejun Heo 	next_rq = deadline_next_request(dd, per_prio, data_dir);
5130f783995STejun Heo 	if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
5140f783995STejun Heo 		/*
5150f783995STejun Heo 		 * A deadline has expired, the last request was in the other
5160f783995STejun Heo 		 * direction, or we have run out of higher-sectored requests.
5170f783995STejun Heo 		 * Start again from the request with the earliest expiry time.
5180f783995STejun Heo 		 */
5190f783995STejun Heo 		rq = deadline_fifo_request(dd, per_prio, data_dir);
5200f783995STejun Heo 	} else {
5210f783995STejun Heo 		/*
5220f783995STejun Heo 		 * The last req was the same dir and we have a next request in
5230f783995STejun Heo 		 * sort order. No expired requests so continue on from here.
5240f783995STejun Heo 		 */
5250f783995STejun Heo 		rq = next_rq;
5260f783995STejun Heo 	}
5270f783995STejun Heo 
5280f783995STejun Heo 	/*
5290f783995STejun Heo 	 * For a zoned block device, if we only have writes queued and none of
5300f783995STejun Heo 	 * them can be dispatched, rq will be NULL.
5310f783995STejun Heo 	 */
5320f783995STejun Heo 	if (!rq)
5330f783995STejun Heo 		return NULL;
5340f783995STejun Heo 
5350f783995STejun Heo 	dd->last_dir = data_dir;
5360f783995STejun Heo 	dd->batching = 0;
5370f783995STejun Heo 
5380f783995STejun Heo dispatch_request:
539322cff70SBart Van Assche 	if (started_after(dd, rq, latest_start))
540322cff70SBart Van Assche 		return NULL;
541322cff70SBart Van Assche 
5420f783995STejun Heo 	/*
5430f783995STejun Heo 	 * rq is the selected appropriate request.
5440f783995STejun Heo 	 */
5450f783995STejun Heo 	dd->batching++;
5460f783995STejun Heo 	deadline_move_request(dd, per_prio, rq);
5470f783995STejun Heo done:
5480f783995STejun Heo 	ioprio_class = dd_rq_ioclass(rq);
5490f783995STejun Heo 	prio = ioprio_class_to_prio[ioprio_class];
55083c46ed6SBart Van Assche 	dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
551bce0363eSBart Van Assche 	dd->per_prio[prio].stats.dispatched++;
5520f783995STejun Heo 	/*
5530f783995STejun Heo 	 * If the request needs its target zone locked, do it.
5540f783995STejun Heo 	 */
5550f783995STejun Heo 	blk_req_zone_write_lock(rq);
5560f783995STejun Heo 	rq->rq_flags |= RQF_STARTED;
5570f783995STejun Heo 	return rq;
5580f783995STejun Heo }
5590f783995STejun Heo 
5600f783995STejun Heo /*
561322cff70SBart Van Assche  * Check whether there are any requests with priority other than DD_RT_PRIO
562322cff70SBart Van Assche  * that were inserted more than prio_aging_expire jiffies ago.
563322cff70SBart Van Assche  */
dd_dispatch_prio_aged_requests(struct deadline_data * dd,unsigned long now)564322cff70SBart Van Assche static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
565322cff70SBart Van Assche 						      unsigned long now)
566322cff70SBart Van Assche {
567322cff70SBart Van Assche 	struct request *rq;
568322cff70SBart Van Assche 	enum dd_prio prio;
569322cff70SBart Van Assche 	int prio_cnt;
570322cff70SBart Van Assche 
571322cff70SBart Van Assche 	lockdep_assert_held(&dd->lock);
572322cff70SBart Van Assche 
573322cff70SBart Van Assche 	prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
574322cff70SBart Van Assche 		   !!dd_queued(dd, DD_IDLE_PRIO);
575322cff70SBart Van Assche 	if (prio_cnt < 2)
576322cff70SBart Van Assche 		return NULL;
577322cff70SBart Van Assche 
578322cff70SBart Van Assche 	for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
579322cff70SBart Van Assche 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
580322cff70SBart Van Assche 					   now - dd->prio_aging_expire);
581322cff70SBart Van Assche 		if (rq)
582322cff70SBart Van Assche 			return rq;
583322cff70SBart Van Assche 	}
584322cff70SBart Van Assche 
585322cff70SBart Van Assche 	return NULL;
586322cff70SBart Van Assche }
587322cff70SBart Van Assche 
588322cff70SBart Van Assche /*
5890f783995STejun Heo  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
5900f783995STejun Heo  *
5910f783995STejun Heo  * One confusing aspect here is that we get called for a specific
5920f783995STejun Heo  * hardware queue, but we may return a request that is for a
5930f783995STejun Heo  * different hardware queue. This is because mq-deadline has shared
5940f783995STejun Heo  * state for all hardware queues, in terms of sorting, FIFOs, etc.
5950f783995STejun Heo  */
dd_dispatch_request(struct blk_mq_hw_ctx * hctx)5960f783995STejun Heo static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
5970f783995STejun Heo {
5980f783995STejun Heo 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
599322cff70SBart Van Assche 	const unsigned long now = jiffies;
6007b05bf77SJens Axboe 	struct request *rq;
6010f783995STejun Heo 	enum dd_prio prio;
6020f783995STejun Heo 
6030f783995STejun Heo 	spin_lock(&dd->lock);
604322cff70SBart Van Assche 	rq = dd_dispatch_prio_aged_requests(dd, now);
6057b05bf77SJens Axboe 	if (rq)
606322cff70SBart Van Assche 		goto unlock;
607322cff70SBart Van Assche 
608322cff70SBart Van Assche 	/*
609322cff70SBart Van Assche 	 * Next, dispatch requests in priority order. Ignore lower priority
610322cff70SBart Van Assche 	 * requests if any higher priority requests are pending.
611322cff70SBart Van Assche 	 */
612322cff70SBart Van Assche 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
613322cff70SBart Van Assche 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
614322cff70SBart Van Assche 		if (rq || dd_queued(dd, prio))
6150f783995STejun Heo 			break;
6160f783995STejun Heo 	}
617322cff70SBart Van Assche 
618322cff70SBart Van Assche unlock:
6190f783995STejun Heo 	spin_unlock(&dd->lock);
6200f783995STejun Heo 
6210f783995STejun Heo 	return rq;
6220f783995STejun Heo }
6230f783995STejun Heo 
6240f783995STejun Heo /*
625*2a6849a2SBart Van Assche  * 'depth' is a number in the range 1..INT_MAX representing a number of
626*2a6849a2SBart Van Assche  * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
627*2a6849a2SBart Van Assche  * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
628*2a6849a2SBart Van Assche  * Values larger than q->nr_requests have the same effect as q->nr_requests.
629*2a6849a2SBart Van Assche  */
dd_to_word_depth(struct blk_mq_hw_ctx * hctx,unsigned int qdepth)630*2a6849a2SBart Van Assche static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
631*2a6849a2SBart Van Assche {
632*2a6849a2SBart Van Assche 	struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
633*2a6849a2SBart Van Assche 	const unsigned int nrr = hctx->queue->nr_requests;
634*2a6849a2SBart Van Assche 
635*2a6849a2SBart Van Assche 	return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
636*2a6849a2SBart Van Assche }
637*2a6849a2SBart Van Assche 
638*2a6849a2SBart Van Assche /*
6390f783995STejun Heo  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
6400f783995STejun Heo  * function is used by __blk_mq_get_tag().
6410f783995STejun Heo  */
dd_limit_depth(blk_opf_t opf,struct blk_mq_alloc_data * data)642f8359efeSBart Van Assche static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
6430f783995STejun Heo {
6440f783995STejun Heo 	struct deadline_data *dd = data->q->elevator->elevator_data;
6450f783995STejun Heo 
6460f783995STejun Heo 	/* Do not throttle synchronous reads. */
647f8359efeSBart Van Assche 	if (op_is_sync(opf) && !op_is_write(opf))
6480f783995STejun Heo 		return;
6490f783995STejun Heo 
6500f783995STejun Heo 	/*
6510f783995STejun Heo 	 * Throttle asynchronous requests and writes such that these requests
6520f783995STejun Heo 	 * do not block the allocation of synchronous requests.
6530f783995STejun Heo 	 */
654*2a6849a2SBart Van Assche 	data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
6550f783995STejun Heo }
6560f783995STejun Heo 
6570f783995STejun Heo /* Called by blk_mq_update_nr_requests(). */
dd_depth_updated(struct blk_mq_hw_ctx * hctx)6580f783995STejun Heo static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
6590f783995STejun Heo {
6600f783995STejun Heo 	struct request_queue *q = hctx->queue;
6610f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
6620f783995STejun Heo 	struct blk_mq_tags *tags = hctx->sched_tags;
6630f783995STejun Heo 
664*2a6849a2SBart Van Assche 	dd->async_depth = q->nr_requests;
6650f783995STejun Heo 
666*2a6849a2SBart Van Assche 	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
6670f783995STejun Heo }
6680f783995STejun Heo 
6690f783995STejun Heo /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
dd_init_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)6700f783995STejun Heo static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
6710f783995STejun Heo {
6720f783995STejun Heo 	dd_depth_updated(hctx);
6730f783995STejun Heo 	return 0;
6740f783995STejun Heo }
6750f783995STejun Heo 
dd_exit_sched(struct elevator_queue * e)6760f783995STejun Heo static void dd_exit_sched(struct elevator_queue *e)
6770f783995STejun Heo {
6780f783995STejun Heo 	struct deadline_data *dd = e->elevator_data;
6790f783995STejun Heo 	enum dd_prio prio;
6800f783995STejun Heo 
6810f783995STejun Heo 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
6820f783995STejun Heo 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
683bce0363eSBart Van Assche 		const struct io_stats_per_prio *stats = &per_prio->stats;
684bce0363eSBart Van Assche 		uint32_t queued;
6850f783995STejun Heo 
6860f783995STejun Heo 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
6870f783995STejun Heo 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
6880f783995STejun Heo 
689bce0363eSBart Van Assche 		spin_lock(&dd->lock);
690bce0363eSBart Van Assche 		queued = dd_queued(dd, prio);
691bce0363eSBart Van Assche 		spin_unlock(&dd->lock);
692bce0363eSBart Van Assche 
693bce0363eSBart Van Assche 		WARN_ONCE(queued != 0,
694bce0363eSBart Van Assche 			  "statistics for priority %d: i %u m %u d %u c %u\n",
695bce0363eSBart Van Assche 			  prio, stats->inserted, stats->merged,
696bce0363eSBart Van Assche 			  stats->dispatched, atomic_read(&stats->completed));
697bce0363eSBart Van Assche 	}
6980f783995STejun Heo 
6990f783995STejun Heo 	kfree(dd);
7000f783995STejun Heo }
7010f783995STejun Heo 
7020f783995STejun Heo /*
7030f783995STejun Heo  * initialize elevator private data (deadline_data).
7040f783995STejun Heo  */
dd_init_sched(struct request_queue * q,struct elevator_type * e)7050f783995STejun Heo static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
7060f783995STejun Heo {
7070f783995STejun Heo 	struct deadline_data *dd;
7080f783995STejun Heo 	struct elevator_queue *eq;
7090f783995STejun Heo 	enum dd_prio prio;
7100f783995STejun Heo 	int ret = -ENOMEM;
7110f783995STejun Heo 
7120f783995STejun Heo 	eq = elevator_alloc(q, e);
7130f783995STejun Heo 	if (!eq)
7140f783995STejun Heo 		return ret;
7150f783995STejun Heo 
7160f783995STejun Heo 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
7170f783995STejun Heo 	if (!dd)
7180f783995STejun Heo 		goto put_eq;
7190f783995STejun Heo 
7200f783995STejun Heo 	eq->elevator_data = dd;
7210f783995STejun Heo 
7220f783995STejun Heo 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
7230f783995STejun Heo 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
7240f783995STejun Heo 
7250f783995STejun Heo 		INIT_LIST_HEAD(&per_prio->dispatch);
7260f783995STejun Heo 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
7270f783995STejun Heo 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
7280f783995STejun Heo 		per_prio->sort_list[DD_READ] = RB_ROOT;
7290f783995STejun Heo 		per_prio->sort_list[DD_WRITE] = RB_ROOT;
7300f783995STejun Heo 	}
7310f783995STejun Heo 	dd->fifo_expire[DD_READ] = read_expire;
7320f783995STejun Heo 	dd->fifo_expire[DD_WRITE] = write_expire;
7330f783995STejun Heo 	dd->writes_starved = writes_starved;
7340f783995STejun Heo 	dd->front_merges = 1;
7350f783995STejun Heo 	dd->last_dir = DD_WRITE;
7360f783995STejun Heo 	dd->fifo_batch = fifo_batch;
737322cff70SBart Van Assche 	dd->prio_aging_expire = prio_aging_expire;
7380f783995STejun Heo 	spin_lock_init(&dd->lock);
7390f783995STejun Heo 	spin_lock_init(&dd->zone_lock);
7400f783995STejun Heo 
7414d337cebSMing Lei 	/* We dispatch from request queue wide instead of hw queue */
7424d337cebSMing Lei 	blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
7434d337cebSMing Lei 
7440f783995STejun Heo 	q->elevator = eq;
7450f783995STejun Heo 	return 0;
7460f783995STejun Heo 
7470f783995STejun Heo put_eq:
7480f783995STejun Heo 	kobject_put(&eq->kobj);
7490f783995STejun Heo 	return ret;
7500f783995STejun Heo }
7510f783995STejun Heo 
7520f783995STejun Heo /*
7530f783995STejun Heo  * Try to merge @bio into an existing request. If @bio has been merged into
7540f783995STejun Heo  * an existing request, store the pointer to that request into *@rq.
7550f783995STejun Heo  */
dd_request_merge(struct request_queue * q,struct request ** rq,struct bio * bio)7560f783995STejun Heo static int dd_request_merge(struct request_queue *q, struct request **rq,
7570f783995STejun Heo 			    struct bio *bio)
7580f783995STejun Heo {
7590f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
7600f783995STejun Heo 	const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
7610f783995STejun Heo 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
7620f783995STejun Heo 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
7630f783995STejun Heo 	sector_t sector = bio_end_sector(bio);
7640f783995STejun Heo 	struct request *__rq;
7650f783995STejun Heo 
7660f783995STejun Heo 	if (!dd->front_merges)
7670f783995STejun Heo 		return ELEVATOR_NO_MERGE;
7680f783995STejun Heo 
7690f783995STejun Heo 	__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
7700f783995STejun Heo 	if (__rq) {
7710f783995STejun Heo 		BUG_ON(sector != blk_rq_pos(__rq));
7720f783995STejun Heo 
7730f783995STejun Heo 		if (elv_bio_merge_ok(__rq, bio)) {
7740f783995STejun Heo 			*rq = __rq;
77567936911SLinus Torvalds 			if (blk_discard_mergable(__rq))
77667936911SLinus Torvalds 				return ELEVATOR_DISCARD_MERGE;
7770f783995STejun Heo 			return ELEVATOR_FRONT_MERGE;
7780f783995STejun Heo 		}
7790f783995STejun Heo 	}
7800f783995STejun Heo 
7810f783995STejun Heo 	return ELEVATOR_NO_MERGE;
7820f783995STejun Heo }
7830f783995STejun Heo 
7840f783995STejun Heo /*
7850f783995STejun Heo  * Attempt to merge a bio into an existing request. This function is called
7860f783995STejun Heo  * before @bio is associated with a request.
7870f783995STejun Heo  */
dd_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)7880f783995STejun Heo static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
7890f783995STejun Heo 		unsigned int nr_segs)
7900f783995STejun Heo {
7910f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
7920f783995STejun Heo 	struct request *free = NULL;
7930f783995STejun Heo 	bool ret;
7940f783995STejun Heo 
7950f783995STejun Heo 	spin_lock(&dd->lock);
7960f783995STejun Heo 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
7970f783995STejun Heo 	spin_unlock(&dd->lock);
7980f783995STejun Heo 
7990f783995STejun Heo 	if (free)
8000f783995STejun Heo 		blk_mq_free_request(free);
8010f783995STejun Heo 
8020f783995STejun Heo 	return ret;
8030f783995STejun Heo }
8040f783995STejun Heo 
8050f783995STejun Heo /*
8060f783995STejun Heo  * add rq to rbtree and fifo
8070f783995STejun Heo  */
dd_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,blk_insert_t flags,struct list_head * free)8080f783995STejun Heo static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
809b2097bd2SBart Van Assche 			      blk_insert_t flags, struct list_head *free)
8100f783995STejun Heo {
8110f783995STejun Heo 	struct request_queue *q = hctx->queue;
8120f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
8130f783995STejun Heo 	const enum dd_data_dir data_dir = rq_data_dir(rq);
8140f783995STejun Heo 	u16 ioprio = req_get_ioprio(rq);
8150f783995STejun Heo 	u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
8160f783995STejun Heo 	struct dd_per_prio *per_prio;
8170f783995STejun Heo 	enum dd_prio prio;
8180f783995STejun Heo 
8190f783995STejun Heo 	lockdep_assert_held(&dd->lock);
8200f783995STejun Heo 
8210f783995STejun Heo 	/*
8220f783995STejun Heo 	 * This may be a requeue of a write request that has locked its
8230f783995STejun Heo 	 * target zone. If it is the case, this releases the zone lock.
8240f783995STejun Heo 	 */
8250f783995STejun Heo 	blk_req_zone_write_unlock(rq);
8260f783995STejun Heo 
8270f783995STejun Heo 	prio = ioprio_class_to_prio[ioprio_class];
828bce0363eSBart Van Assche 	per_prio = &dd->per_prio[prio];
829e2c7275dSBart Van Assche 	if (!rq->elv.priv[0]) {
830bce0363eSBart Van Assche 		per_prio->stats.inserted++;
831b6d2b054SBart Van Assche 		rq->elv.priv[0] = (void *)(uintptr_t)1;
832e2c7275dSBart Van Assche 	}
8330f783995STejun Heo 
834b2097bd2SBart Van Assche 	if (blk_mq_sched_try_insert_merge(q, rq, free))
8350f783995STejun Heo 		return;
8360f783995STejun Heo 
8370f783995STejun Heo 	trace_block_rq_insert(rq);
8380f783995STejun Heo 
83993fffe16SChristoph Hellwig 	if (flags & BLK_MQ_INSERT_AT_HEAD) {
8400f783995STejun Heo 		list_add(&rq->queuelist, &per_prio->dispatch);
841725f22a1SBart Van Assche 		rq->fifo_time = jiffies;
8420f783995STejun Heo 	} else {
8430effb390SBart Van Assche 		struct list_head *insert_before;
8440effb390SBart Van Assche 
8450f783995STejun Heo 		deadline_add_rq_rb(per_prio, rq);
8460f783995STejun Heo 
8470f783995STejun Heo 		if (rq_mergeable(rq)) {
8480f783995STejun Heo 			elv_rqhash_add(q, rq);
8490f783995STejun Heo 			if (!q->last_merge)
8500f783995STejun Heo 				q->last_merge = rq;
8510f783995STejun Heo 		}
8520f783995STejun Heo 
8530f783995STejun Heo 		/*
8540f783995STejun Heo 		 * set expire time and add to fifo list
8550f783995STejun Heo 		 */
8560f783995STejun Heo 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
8570effb390SBart Van Assche 		insert_before = &per_prio->fifo_list[data_dir];
8580effb390SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED
8590effb390SBart Van Assche 		/*
8600effb390SBart Van Assche 		 * Insert zoned writes such that requests are sorted by
8610effb390SBart Van Assche 		 * position per zone.
8620effb390SBart Van Assche 		 */
8630effb390SBart Van Assche 		if (blk_rq_is_seq_zoned_write(rq)) {
8640effb390SBart Van Assche 			struct request *rq2 = deadline_latter_request(rq);
8650effb390SBart Van Assche 
8660effb390SBart Van Assche 			if (rq2 && blk_rq_zone_no(rq2) == blk_rq_zone_no(rq))
8670effb390SBart Van Assche 				insert_before = &rq2->queuelist;
8680effb390SBart Van Assche 		}
8690effb390SBart Van Assche #endif
8700effb390SBart Van Assche 		list_add_tail(&rq->queuelist, insert_before);
8710f783995STejun Heo 	}
8720f783995STejun Heo }
8730f783995STejun Heo 
8740f783995STejun Heo /*
8752bd215dfSChristoph Hellwig  * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list().
8760f783995STejun Heo  */
dd_insert_requests(struct blk_mq_hw_ctx * hctx,struct list_head * list,blk_insert_t flags)8770f783995STejun Heo static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
87893fffe16SChristoph Hellwig 			       struct list_head *list,
87993fffe16SChristoph Hellwig 			       blk_insert_t flags)
8800f783995STejun Heo {
8810f783995STejun Heo 	struct request_queue *q = hctx->queue;
8820f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
883b2097bd2SBart Van Assche 	LIST_HEAD(free);
8840f783995STejun Heo 
8850f783995STejun Heo 	spin_lock(&dd->lock);
8860f783995STejun Heo 	while (!list_empty(list)) {
8870f783995STejun Heo 		struct request *rq;
8880f783995STejun Heo 
8890f783995STejun Heo 		rq = list_first_entry(list, struct request, queuelist);
8900f783995STejun Heo 		list_del_init(&rq->queuelist);
891b2097bd2SBart Van Assche 		dd_insert_request(hctx, rq, flags, &free);
8920f783995STejun Heo 	}
8930f783995STejun Heo 	spin_unlock(&dd->lock);
894b2097bd2SBart Van Assche 
895b2097bd2SBart Van Assche 	blk_mq_free_requests(&free);
8960f783995STejun Heo }
8970f783995STejun Heo 
898b6d2b054SBart Van Assche /* Callback from inside blk_mq_rq_ctx_init(). */
dd_prepare_request(struct request * rq)8990f783995STejun Heo static void dd_prepare_request(struct request *rq)
9000f783995STejun Heo {
901b6d2b054SBart Van Assche 	rq->elv.priv[0] = NULL;
9020f783995STejun Heo }
9030f783995STejun Heo 
dd_has_write_work(struct blk_mq_hw_ctx * hctx)9042820e5d0SDamien Le Moal static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
9052820e5d0SDamien Le Moal {
9062820e5d0SDamien Le Moal 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
9072820e5d0SDamien Le Moal 	enum dd_prio p;
9082820e5d0SDamien Le Moal 
9092820e5d0SDamien Le Moal 	for (p = 0; p <= DD_PRIO_MAX; p++)
9102820e5d0SDamien Le Moal 		if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
9112820e5d0SDamien Le Moal 			return true;
9122820e5d0SDamien Le Moal 
9132820e5d0SDamien Le Moal 	return false;
9142820e5d0SDamien Le Moal }
9152820e5d0SDamien Le Moal 
9160f783995STejun Heo /*
9170f783995STejun Heo  * Callback from inside blk_mq_free_request().
9180f783995STejun Heo  *
9190f783995STejun Heo  * For zoned block devices, write unlock the target zone of
9200f783995STejun Heo  * completed write requests. Do this while holding the zone lock
9210f783995STejun Heo  * spinlock so that the zone is never unlocked while deadline_fifo_request()
9220f783995STejun Heo  * or deadline_next_request() are executing. This function is called for
9230f783995STejun Heo  * all requests, whether or not these requests complete successfully.
9240f783995STejun Heo  *
9250f783995STejun Heo  * For a zoned block device, __dd_dispatch_request() may have stopped
9260f783995STejun Heo  * dispatching requests if all the queued requests are write requests directed
9270f783995STejun Heo  * at zones that are already locked due to on-going write requests. To ensure
9280f783995STejun Heo  * write request dispatch progress in this case, mark the queue as needing a
9290f783995STejun Heo  * restart to ensure that the queue is run again after completion of the
9300f783995STejun Heo  * request and zones being unlocked.
9310f783995STejun Heo  */
dd_finish_request(struct request * rq)9320f783995STejun Heo static void dd_finish_request(struct request *rq)
9330f783995STejun Heo {
9340f783995STejun Heo 	struct request_queue *q = rq->q;
9350f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
9360f783995STejun Heo 	const u8 ioprio_class = dd_rq_ioclass(rq);
9370f783995STejun Heo 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
9380f783995STejun Heo 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
9390f783995STejun Heo 
940b6d2b054SBart Van Assche 	/*
941b6d2b054SBart Van Assche 	 * The block layer core may call dd_finish_request() without having
942e2c7275dSBart Van Assche 	 * called dd_insert_requests(). Skip requests that bypassed I/O
943e2c7275dSBart Van Assche 	 * scheduling. See also blk_mq_request_bypass_insert().
944b6d2b054SBart Van Assche 	 */
945e2c7275dSBart Van Assche 	if (!rq->elv.priv[0])
946e2c7275dSBart Van Assche 		return;
947e2c7275dSBart Van Assche 
948bce0363eSBart Van Assche 	atomic_inc(&per_prio->stats.completed);
9490f783995STejun Heo 
9500f783995STejun Heo 	if (blk_queue_is_zoned(q)) {
9510f783995STejun Heo 		unsigned long flags;
9520f783995STejun Heo 
9530f783995STejun Heo 		spin_lock_irqsave(&dd->zone_lock, flags);
9540f783995STejun Heo 		blk_req_zone_write_unlock(rq);
9550f783995STejun Heo 		spin_unlock_irqrestore(&dd->zone_lock, flags);
9562820e5d0SDamien Le Moal 
9572820e5d0SDamien Le Moal 		if (dd_has_write_work(rq->mq_hctx))
9582820e5d0SDamien Le Moal 			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
9590f783995STejun Heo 	}
9600f783995STejun Heo }
9610f783995STejun Heo 
dd_has_work_for_prio(struct dd_per_prio * per_prio)9620f783995STejun Heo static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
9630f783995STejun Heo {
9640f783995STejun Heo 	return !list_empty_careful(&per_prio->dispatch) ||
9650f783995STejun Heo 		!list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
9660f783995STejun Heo 		!list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
9670f783995STejun Heo }
9680f783995STejun Heo 
dd_has_work(struct blk_mq_hw_ctx * hctx)9690f783995STejun Heo static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
9700f783995STejun Heo {
9710f783995STejun Heo 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
9720f783995STejun Heo 	enum dd_prio prio;
9730f783995STejun Heo 
9740f783995STejun Heo 	for (prio = 0; prio <= DD_PRIO_MAX; prio++)
9750f783995STejun Heo 		if (dd_has_work_for_prio(&dd->per_prio[prio]))
9760f783995STejun Heo 			return true;
9770f783995STejun Heo 
9780f783995STejun Heo 	return false;
9790f783995STejun Heo }
9800f783995STejun Heo 
9810f783995STejun Heo /*
9820f783995STejun Heo  * sysfs parts below
9830f783995STejun Heo  */
9840f783995STejun Heo #define SHOW_INT(__FUNC, __VAR)						\
9850f783995STejun Heo static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
9860f783995STejun Heo {									\
9870f783995STejun Heo 	struct deadline_data *dd = e->elevator_data;			\
9880f783995STejun Heo 									\
9890f783995STejun Heo 	return sysfs_emit(page, "%d\n", __VAR);				\
9900f783995STejun Heo }
9910f783995STejun Heo #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
9920f783995STejun Heo SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
9930f783995STejun Heo SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
994322cff70SBart Van Assche SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
9950f783995STejun Heo SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
9960f783995STejun Heo SHOW_INT(deadline_front_merges_show, dd->front_merges);
99746cdc45aSJens Axboe SHOW_INT(deadline_async_depth_show, dd->async_depth);
9980f783995STejun Heo SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
9990f783995STejun Heo #undef SHOW_INT
10000f783995STejun Heo #undef SHOW_JIFFIES
10010f783995STejun Heo 
10020f783995STejun Heo #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
10030f783995STejun Heo static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
10040f783995STejun Heo {									\
10050f783995STejun Heo 	struct deadline_data *dd = e->elevator_data;			\
10060f783995STejun Heo 	int __data, __ret;						\
10070f783995STejun Heo 									\
10080f783995STejun Heo 	__ret = kstrtoint(page, 0, &__data);				\
10090f783995STejun Heo 	if (__ret < 0)							\
10100f783995STejun Heo 		return __ret;						\
10110f783995STejun Heo 	if (__data < (MIN))						\
10120f783995STejun Heo 		__data = (MIN);						\
10130f783995STejun Heo 	else if (__data > (MAX))					\
10140f783995STejun Heo 		__data = (MAX);						\
10150f783995STejun Heo 	*(__PTR) = __CONV(__data);					\
10160f783995STejun Heo 	return count;							\
10170f783995STejun Heo }
10180f783995STejun Heo #define STORE_INT(__FUNC, __PTR, MIN, MAX)				\
10190f783995STejun Heo 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
10200f783995STejun Heo #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)				\
10210f783995STejun Heo 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
10220f783995STejun Heo STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
10230f783995STejun Heo STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
1024322cff70SBart Van Assche STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
10250f783995STejun Heo STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
10260f783995STejun Heo STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
102746cdc45aSJens Axboe STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
10280f783995STejun Heo STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
10290f783995STejun Heo #undef STORE_FUNCTION
10300f783995STejun Heo #undef STORE_INT
10310f783995STejun Heo #undef STORE_JIFFIES
10320f783995STejun Heo 
10330f783995STejun Heo #define DD_ATTR(name) \
10340f783995STejun Heo 	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
10350f783995STejun Heo 
10360f783995STejun Heo static struct elv_fs_entry deadline_attrs[] = {
10370f783995STejun Heo 	DD_ATTR(read_expire),
10380f783995STejun Heo 	DD_ATTR(write_expire),
10390f783995STejun Heo 	DD_ATTR(writes_starved),
10400f783995STejun Heo 	DD_ATTR(front_merges),
10410f783995STejun Heo 	DD_ATTR(async_depth),
10420f783995STejun Heo 	DD_ATTR(fifo_batch),
1043322cff70SBart Van Assche 	DD_ATTR(prio_aging_expire),
10440f783995STejun Heo 	__ATTR_NULL
10450f783995STejun Heo };
10460f783995STejun Heo 
10470f783995STejun Heo #ifdef CONFIG_BLK_DEBUG_FS
10480f783995STejun Heo #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)		\
10490f783995STejun Heo static void *deadline_##name##_fifo_start(struct seq_file *m,		\
10500f783995STejun Heo 					  loff_t *pos)			\
10510f783995STejun Heo 	__acquires(&dd->lock)						\
10520f783995STejun Heo {									\
10530f783995STejun Heo 	struct request_queue *q = m->private;				\
10540f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;		\
10550f783995STejun Heo 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
10560f783995STejun Heo 									\
10570f783995STejun Heo 	spin_lock(&dd->lock);						\
10580f783995STejun Heo 	return seq_list_start(&per_prio->fifo_list[data_dir], *pos);	\
10590f783995STejun Heo }									\
10600f783995STejun Heo 									\
10610f783995STejun Heo static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
10620f783995STejun Heo 					 loff_t *pos)			\
10630f783995STejun Heo {									\
10640f783995STejun Heo 	struct request_queue *q = m->private;				\
10650f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;		\
10660f783995STejun Heo 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
10670f783995STejun Heo 									\
10680f783995STejun Heo 	return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);	\
10690f783995STejun Heo }									\
10700f783995STejun Heo 									\
10710f783995STejun Heo static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
10720f783995STejun Heo 	__releases(&dd->lock)						\
10730f783995STejun Heo {									\
10740f783995STejun Heo 	struct request_queue *q = m->private;				\
10750f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;		\
10760f783995STejun Heo 									\
10770f783995STejun Heo 	spin_unlock(&dd->lock);						\
10780f783995STejun Heo }									\
10790f783995STejun Heo 									\
10800f783995STejun Heo static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
10810f783995STejun Heo 	.start	= deadline_##name##_fifo_start,				\
10820f783995STejun Heo 	.next	= deadline_##name##_fifo_next,				\
10830f783995STejun Heo 	.stop	= deadline_##name##_fifo_stop,				\
10840f783995STejun Heo 	.show	= blk_mq_debugfs_rq_show,				\
10850f783995STejun Heo };									\
10860f783995STejun Heo 									\
10870f783995STejun Heo static int deadline_##name##_next_rq_show(void *data,			\
10880f783995STejun Heo 					  struct seq_file *m)		\
10890f783995STejun Heo {									\
10900f783995STejun Heo 	struct request_queue *q = data;					\
10910f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;		\
10920f783995STejun Heo 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
109383c46ed6SBart Van Assche 	struct request *rq;						\
10940f783995STejun Heo 									\
109583c46ed6SBart Van Assche 	rq = deadline_from_pos(per_prio, data_dir,			\
109683c46ed6SBart Van Assche 			       per_prio->latest_pos[data_dir]);		\
10970f783995STejun Heo 	if (rq)								\
10980f783995STejun Heo 		__blk_mq_debugfs_rq_show(m, rq);			\
10990f783995STejun Heo 	return 0;							\
11000f783995STejun Heo }
11010f783995STejun Heo 
11020f783995STejun Heo DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
11030f783995STejun Heo DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
11040f783995STejun Heo DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
11050f783995STejun Heo DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
11060f783995STejun Heo DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
11070f783995STejun Heo DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
11080f783995STejun Heo #undef DEADLINE_DEBUGFS_DDIR_ATTRS
11090f783995STejun Heo 
deadline_batching_show(void * data,struct seq_file * m)11100f783995STejun Heo static int deadline_batching_show(void *data, struct seq_file *m)
11110f783995STejun Heo {
11120f783995STejun Heo 	struct request_queue *q = data;
11130f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
11140f783995STejun Heo 
11150f783995STejun Heo 	seq_printf(m, "%u\n", dd->batching);
11160f783995STejun Heo 	return 0;
11170f783995STejun Heo }
11180f783995STejun Heo 
deadline_starved_show(void * data,struct seq_file * m)11190f783995STejun Heo static int deadline_starved_show(void *data, struct seq_file *m)
11200f783995STejun Heo {
11210f783995STejun Heo 	struct request_queue *q = data;
11220f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
11230f783995STejun Heo 
11240f783995STejun Heo 	seq_printf(m, "%u\n", dd->starved);
11250f783995STejun Heo 	return 0;
11260f783995STejun Heo }
11270f783995STejun Heo 
dd_async_depth_show(void * data,struct seq_file * m)11280f783995STejun Heo static int dd_async_depth_show(void *data, struct seq_file *m)
11290f783995STejun Heo {
11300f783995STejun Heo 	struct request_queue *q = data;
11310f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
11320f783995STejun Heo 
11330f783995STejun Heo 	seq_printf(m, "%u\n", dd->async_depth);
11340f783995STejun Heo 	return 0;
11350f783995STejun Heo }
11360f783995STejun Heo 
dd_queued_show(void * data,struct seq_file * m)11370f783995STejun Heo static int dd_queued_show(void *data, struct seq_file *m)
11380f783995STejun Heo {
11390f783995STejun Heo 	struct request_queue *q = data;
11400f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
1141bce0363eSBart Van Assche 	u32 rt, be, idle;
11420f783995STejun Heo 
1143bce0363eSBart Van Assche 	spin_lock(&dd->lock);
1144bce0363eSBart Van Assche 	rt = dd_queued(dd, DD_RT_PRIO);
1145bce0363eSBart Van Assche 	be = dd_queued(dd, DD_BE_PRIO);
1146bce0363eSBart Van Assche 	idle = dd_queued(dd, DD_IDLE_PRIO);
1147bce0363eSBart Van Assche 	spin_unlock(&dd->lock);
1148bce0363eSBart Van Assche 
1149bce0363eSBart Van Assche 	seq_printf(m, "%u %u %u\n", rt, be, idle);
1150bce0363eSBart Van Assche 
11510f783995STejun Heo 	return 0;
11520f783995STejun Heo }
11530f783995STejun Heo 
11540f783995STejun Heo /* Number of requests owned by the block driver for a given priority. */
dd_owned_by_driver(struct deadline_data * dd,enum dd_prio prio)11550f783995STejun Heo static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
11560f783995STejun Heo {
1157bce0363eSBart Van Assche 	const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1158bce0363eSBart Van Assche 
1159bce0363eSBart Van Assche 	lockdep_assert_held(&dd->lock);
1160bce0363eSBart Van Assche 
1161bce0363eSBart Van Assche 	return stats->dispatched + stats->merged -
1162bce0363eSBart Van Assche 		atomic_read(&stats->completed);
11630f783995STejun Heo }
11640f783995STejun Heo 
dd_owned_by_driver_show(void * data,struct seq_file * m)11650f783995STejun Heo static int dd_owned_by_driver_show(void *data, struct seq_file *m)
11660f783995STejun Heo {
11670f783995STejun Heo 	struct request_queue *q = data;
11680f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;
1169bce0363eSBart Van Assche 	u32 rt, be, idle;
11700f783995STejun Heo 
1171bce0363eSBart Van Assche 	spin_lock(&dd->lock);
1172bce0363eSBart Van Assche 	rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1173bce0363eSBart Van Assche 	be = dd_owned_by_driver(dd, DD_BE_PRIO);
1174bce0363eSBart Van Assche 	idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1175bce0363eSBart Van Assche 	spin_unlock(&dd->lock);
1176bce0363eSBart Van Assche 
1177bce0363eSBart Van Assche 	seq_printf(m, "%u %u %u\n", rt, be, idle);
1178bce0363eSBart Van Assche 
11790f783995STejun Heo 	return 0;
11800f783995STejun Heo }
11810f783995STejun Heo 
11820f783995STejun Heo #define DEADLINE_DISPATCH_ATTR(prio)					\
11830f783995STejun Heo static void *deadline_dispatch##prio##_start(struct seq_file *m,	\
11840f783995STejun Heo 					     loff_t *pos)		\
11850f783995STejun Heo 	__acquires(&dd->lock)						\
11860f783995STejun Heo {									\
11870f783995STejun Heo 	struct request_queue *q = m->private;				\
11880f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;		\
11890f783995STejun Heo 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
11900f783995STejun Heo 									\
11910f783995STejun Heo 	spin_lock(&dd->lock);						\
11920f783995STejun Heo 	return seq_list_start(&per_prio->dispatch, *pos);		\
11930f783995STejun Heo }									\
11940f783995STejun Heo 									\
11950f783995STejun Heo static void *deadline_dispatch##prio##_next(struct seq_file *m,		\
11960f783995STejun Heo 					    void *v, loff_t *pos)	\
11970f783995STejun Heo {									\
11980f783995STejun Heo 	struct request_queue *q = m->private;				\
11990f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;		\
12000f783995STejun Heo 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
12010f783995STejun Heo 									\
12020f783995STejun Heo 	return seq_list_next(v, &per_prio->dispatch, pos);		\
12030f783995STejun Heo }									\
12040f783995STejun Heo 									\
12050f783995STejun Heo static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)	\
12060f783995STejun Heo 	__releases(&dd->lock)						\
12070f783995STejun Heo {									\
12080f783995STejun Heo 	struct request_queue *q = m->private;				\
12090f783995STejun Heo 	struct deadline_data *dd = q->elevator->elevator_data;		\
12100f783995STejun Heo 									\
12110f783995STejun Heo 	spin_unlock(&dd->lock);						\
12120f783995STejun Heo }									\
12130f783995STejun Heo 									\
12140f783995STejun Heo static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
12150f783995STejun Heo 	.start	= deadline_dispatch##prio##_start,			\
12160f783995STejun Heo 	.next	= deadline_dispatch##prio##_next,			\
12170f783995STejun Heo 	.stop	= deadline_dispatch##prio##_stop,			\
12180f783995STejun Heo 	.show	= blk_mq_debugfs_rq_show,				\
12190f783995STejun Heo }
12200f783995STejun Heo 
12210f783995STejun Heo DEADLINE_DISPATCH_ATTR(0);
12220f783995STejun Heo DEADLINE_DISPATCH_ATTR(1);
12230f783995STejun Heo DEADLINE_DISPATCH_ATTR(2);
12240f783995STejun Heo #undef DEADLINE_DISPATCH_ATTR
12250f783995STejun Heo 
12260f783995STejun Heo #define DEADLINE_QUEUE_DDIR_ATTRS(name)					\
12270f783995STejun Heo 	{#name "_fifo_list", 0400,					\
12280f783995STejun Heo 			.seq_ops = &deadline_##name##_fifo_seq_ops}
12290f783995STejun Heo #define DEADLINE_NEXT_RQ_ATTR(name)					\
12300f783995STejun Heo 	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
12310f783995STejun Heo static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
12320f783995STejun Heo 	DEADLINE_QUEUE_DDIR_ATTRS(read0),
12330f783995STejun Heo 	DEADLINE_QUEUE_DDIR_ATTRS(write0),
12340f783995STejun Heo 	DEADLINE_QUEUE_DDIR_ATTRS(read1),
12350f783995STejun Heo 	DEADLINE_QUEUE_DDIR_ATTRS(write1),
12360f783995STejun Heo 	DEADLINE_QUEUE_DDIR_ATTRS(read2),
12370f783995STejun Heo 	DEADLINE_QUEUE_DDIR_ATTRS(write2),
12380f783995STejun Heo 	DEADLINE_NEXT_RQ_ATTR(read0),
12390f783995STejun Heo 	DEADLINE_NEXT_RQ_ATTR(write0),
12400f783995STejun Heo 	DEADLINE_NEXT_RQ_ATTR(read1),
12410f783995STejun Heo 	DEADLINE_NEXT_RQ_ATTR(write1),
12420f783995STejun Heo 	DEADLINE_NEXT_RQ_ATTR(read2),
12430f783995STejun Heo 	DEADLINE_NEXT_RQ_ATTR(write2),
12440f783995STejun Heo 	{"batching", 0400, deadline_batching_show},
12450f783995STejun Heo 	{"starved", 0400, deadline_starved_show},
12460f783995STejun Heo 	{"async_depth", 0400, dd_async_depth_show},
12470f783995STejun Heo 	{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
12480f783995STejun Heo 	{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
12490f783995STejun Heo 	{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
12500f783995STejun Heo 	{"owned_by_driver", 0400, dd_owned_by_driver_show},
12510f783995STejun Heo 	{"queued", 0400, dd_queued_show},
12520f783995STejun Heo 	{},
12530f783995STejun Heo };
12540f783995STejun Heo #undef DEADLINE_QUEUE_DDIR_ATTRS
12550f783995STejun Heo #endif
12560f783995STejun Heo 
12570f783995STejun Heo static struct elevator_type mq_deadline = {
12580f783995STejun Heo 	.ops = {
12590f783995STejun Heo 		.depth_updated		= dd_depth_updated,
12600f783995STejun Heo 		.limit_depth		= dd_limit_depth,
12610f783995STejun Heo 		.insert_requests	= dd_insert_requests,
12620f783995STejun Heo 		.dispatch_request	= dd_dispatch_request,
12630f783995STejun Heo 		.prepare_request	= dd_prepare_request,
12640f783995STejun Heo 		.finish_request		= dd_finish_request,
12650f783995STejun Heo 		.next_request		= elv_rb_latter_request,
12660f783995STejun Heo 		.former_request		= elv_rb_former_request,
12670f783995STejun Heo 		.bio_merge		= dd_bio_merge,
12680f783995STejun Heo 		.request_merge		= dd_request_merge,
12690f783995STejun Heo 		.requests_merged	= dd_merged_requests,
12700f783995STejun Heo 		.request_merged		= dd_request_merged,
12710f783995STejun Heo 		.has_work		= dd_has_work,
12720f783995STejun Heo 		.init_sched		= dd_init_sched,
12730f783995STejun Heo 		.exit_sched		= dd_exit_sched,
12740f783995STejun Heo 		.init_hctx		= dd_init_hctx,
12750f783995STejun Heo 	},
12760f783995STejun Heo 
12770f783995STejun Heo #ifdef CONFIG_BLK_DEBUG_FS
12780f783995STejun Heo 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
12790f783995STejun Heo #endif
12800f783995STejun Heo 	.elevator_attrs = deadline_attrs,
12810f783995STejun Heo 	.elevator_name = "mq-deadline",
12820f783995STejun Heo 	.elevator_alias = "deadline",
12830f783995STejun Heo 	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
12840f783995STejun Heo 	.elevator_owner = THIS_MODULE,
12850f783995STejun Heo };
12860f783995STejun Heo MODULE_ALIAS("mq-deadline-iosched");
12870f783995STejun Heo 
deadline_init(void)12880f783995STejun Heo static int __init deadline_init(void)
12890f783995STejun Heo {
12900f783995STejun Heo 	return elv_register(&mq_deadline);
12910f783995STejun Heo }
12920f783995STejun Heo 
deadline_exit(void)12930f783995STejun Heo static void __exit deadline_exit(void)
12940f783995STejun Heo {
12950f783995STejun Heo 	elv_unregister(&mq_deadline);
12960f783995STejun Heo }
12970f783995STejun Heo 
12980f783995STejun Heo module_init(deadline_init);
12990f783995STejun Heo module_exit(deadline_exit);
13000f783995STejun Heo 
13010f783995STejun Heo MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
13020f783995STejun Heo MODULE_LICENSE("GPL");
13030f783995STejun Heo MODULE_DESCRIPTION("MQ deadline IO scheduler");
1304