xref: /openbmc/linux/block/blk-mq.h (revision fe644072dfee069d97a66ea9a80f4bc461499e6a)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
5cf43e6beSJens Axboe #include "blk-stat.h"
6244c65a3SMing Lei #include "blk-mq-tag.h"
7cf43e6beSJens Axboe 
824d2f903SChristoph Hellwig struct blk_mq_tag_set;
924d2f903SChristoph Hellwig 
10*fe644072SLinus Walleij /**
11*fe644072SLinus Walleij  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12*fe644072SLinus Walleij  */
13320ae51fSJens Axboe struct blk_mq_ctx {
14320ae51fSJens Axboe 	struct {
15320ae51fSJens Axboe 		spinlock_t		lock;
16320ae51fSJens Axboe 		struct list_head	rq_list;
17320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
18320ae51fSJens Axboe 
19320ae51fSJens Axboe 	unsigned int		cpu;
20320ae51fSJens Axboe 	unsigned int		index_hw;
21320ae51fSJens Axboe 
22320ae51fSJens Axboe 	/* incremented at dispatch time */
23320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
24320ae51fSJens Axboe 	unsigned long		rq_merged;
25320ae51fSJens Axboe 
26320ae51fSJens Axboe 	/* incremented at completion time */
27320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
28320ae51fSJens Axboe 
29320ae51fSJens Axboe 	struct request_queue	*queue;
30320ae51fSJens Axboe 	struct kobject		kobj;
314bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
32320ae51fSJens Axboe 
331d9bd516STejun Heo /*
341d9bd516STejun Heo  * Bits for request->gstate.  The lower two bits carry MQ_RQ_* state value
351d9bd516STejun Heo  * and the upper bits the generation number.
361d9bd516STejun Heo  */
371d9bd516STejun Heo enum mq_rq_state {
381d9bd516STejun Heo 	MQ_RQ_IDLE		= 0,
391d9bd516STejun Heo 	MQ_RQ_IN_FLIGHT		= 1,
405a61c363STejun Heo 	MQ_RQ_COMPLETE		= 2,
411d9bd516STejun Heo 
421d9bd516STejun Heo 	MQ_RQ_STATE_BITS	= 2,
431d9bd516STejun Heo 	MQ_RQ_STATE_MASK	= (1 << MQ_RQ_STATE_BITS) - 1,
441d9bd516STejun Heo 	MQ_RQ_GEN_INC		= 1 << MQ_RQ_STATE_BITS,
451d9bd516STejun Heo };
461d9bd516STejun Heo 
47780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
483edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
49e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
50aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
51de148297SMing Lei bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
522c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
53bd6737f1SJens Axboe bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
54bd6737f1SJens Axboe 				bool wait);
55b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
56b347689fSMing Lei 					struct blk_mq_ctx *start);
572c3ad667SJens Axboe 
582c3ad667SJens Axboe /*
592c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
602c3ad667SJens Axboe  */
61cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
622c3ad667SJens Axboe 		     unsigned int hctx_idx);
63cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags);
64cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
65cc71a6f4SJens Axboe 					unsigned int hctx_idx,
66cc71a6f4SJens Axboe 					unsigned int nr_tags,
67cc71a6f4SJens Axboe 					unsigned int reserved_tags);
68cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
69cc71a6f4SJens Axboe 		     unsigned int hctx_idx, unsigned int depth);
702c3ad667SJens Axboe 
712c3ad667SJens Axboe /*
722c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
732c3ad667SJens Axboe  */
742c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
752c3ad667SJens Axboe 				bool at_head);
76b0850297SMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
77bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
78bd166ef1SJens Axboe 				struct list_head *list);
79320ae51fSJens Axboe 
80396eaf21SMing Lei /* Used by blk_insert_cloned_request() to issue request directly */
81c77ff7fdSBart Van Assche blk_status_t blk_mq_request_issue_directly(struct request *rq);
82396eaf21SMing Lei 
83320ae51fSJens Axboe /*
84320ae51fSJens Axboe  * CPU -> queue mappings
85320ae51fSJens Axboe  */
86f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
87320ae51fSJens Axboe 
887d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
897d7e0f90SChristoph Hellwig 		int cpu)
907d7e0f90SChristoph Hellwig {
917d7e0f90SChristoph Hellwig 	return q->queue_hw_ctx[q->mq_map[cpu]];
927d7e0f90SChristoph Hellwig }
937d7e0f90SChristoph Hellwig 
94e93ecf60SJens Axboe /*
9567aec14cSJens Axboe  * sysfs helpers
9667aec14cSJens Axboe  */
97737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
987ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
992d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
10067aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
10167aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
102868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
10367aec14cSJens Axboe 
104e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
105e09aae7eSMing Lei 
1061d9bd516STejun Heo /**
1071d9bd516STejun Heo  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
1081d9bd516STejun Heo  * @rq: target request.
1091d9bd516STejun Heo  */
1101d9bd516STejun Heo static inline int blk_mq_rq_state(struct request *rq)
1111d9bd516STejun Heo {
1121d9bd516STejun Heo 	return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK;
1131d9bd516STejun Heo }
1141d9bd516STejun Heo 
1151d9bd516STejun Heo /**
1161d9bd516STejun Heo  * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
1171d9bd516STejun Heo  * @rq: target request.
1181d9bd516STejun Heo  * @state: new state to set.
1191d9bd516STejun Heo  *
1201d9bd516STejun Heo  * Set @rq's state to @state.  The caller is responsible for ensuring that
1211d9bd516STejun Heo  * there are no other updaters.  A request can transition into IN_FLIGHT
1221d9bd516STejun Heo  * only from IDLE and doing so increments the generation number.
1231d9bd516STejun Heo  */
1241d9bd516STejun Heo static inline void blk_mq_rq_update_state(struct request *rq,
1251d9bd516STejun Heo 					  enum mq_rq_state state)
1261d9bd516STejun Heo {
1271d9bd516STejun Heo 	u64 old_val = READ_ONCE(rq->gstate);
1281d9bd516STejun Heo 	u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
1291d9bd516STejun Heo 
1301d9bd516STejun Heo 	if (state == MQ_RQ_IN_FLIGHT) {
1311d9bd516STejun Heo 		WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
1321d9bd516STejun Heo 		new_val += MQ_RQ_GEN_INC;
1331d9bd516STejun Heo 	}
1341d9bd516STejun Heo 
1351d9bd516STejun Heo 	/* avoid exposing interim values */
1361d9bd516STejun Heo 	WRITE_ONCE(rq->gstate, new_val);
1371d9bd516STejun Heo }
1381d9bd516STejun Heo 
1391aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1401aecfe48SMing Lei 					   unsigned int cpu)
1411aecfe48SMing Lei {
1421aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1431aecfe48SMing Lei }
1441aecfe48SMing Lei 
1451aecfe48SMing Lei /*
1461aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1471aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1481aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1491aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1501aecfe48SMing Lei  */
1511aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1521aecfe48SMing Lei {
1531aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
1541aecfe48SMing Lei }
1551aecfe48SMing Lei 
1561aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
1571aecfe48SMing Lei {
1581aecfe48SMing Lei 	put_cpu();
1591aecfe48SMing Lei }
1601aecfe48SMing Lei 
161cb96a42cSMing Lei struct blk_mq_alloc_data {
162cb96a42cSMing Lei 	/* input parameter */
163cb96a42cSMing Lei 	struct request_queue *q;
1649a95e4efSBart Van Assche 	blk_mq_req_flags_t flags;
165229a9287SOmar Sandoval 	unsigned int shallow_depth;
166cb96a42cSMing Lei 
167cb96a42cSMing Lei 	/* input & output parameter */
168cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
169cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
170cb96a42cSMing Lei };
171cb96a42cSMing Lei 
1724941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1734941115bSJens Axboe {
174bd166ef1SJens Axboe 	if (data->flags & BLK_MQ_REQ_INTERNAL)
175bd166ef1SJens Axboe 		return data->hctx->sched_tags;
176bd166ef1SJens Axboe 
1774941115bSJens Axboe 	return data->hctx->tags;
1784941115bSJens Axboe }
1794941115bSJens Axboe 
1805d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1815d1b25c1SBart Van Assche {
1825d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1835d1b25c1SBart Van Assche }
1845d1b25c1SBart Van Assche 
18519c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
18619c66e59SMing Lei {
18719c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
18819c66e59SMing Lei }
18919c66e59SMing Lei 
190f299b7c7SJens Axboe void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
191f299b7c7SJens Axboe 			unsigned int inflight[2]);
192f299b7c7SJens Axboe 
193de148297SMing Lei static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
194de148297SMing Lei {
195de148297SMing Lei 	struct request_queue *q = hctx->queue;
196de148297SMing Lei 
197de148297SMing Lei 	if (q->mq_ops->put_budget)
198de148297SMing Lei 		q->mq_ops->put_budget(hctx);
199de148297SMing Lei }
200de148297SMing Lei 
20188022d72SMing Lei static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
202de148297SMing Lei {
203de148297SMing Lei 	struct request_queue *q = hctx->queue;
204de148297SMing Lei 
205de148297SMing Lei 	if (q->mq_ops->get_budget)
206de148297SMing Lei 		return q->mq_ops->get_budget(hctx);
20788022d72SMing Lei 	return true;
208de148297SMing Lei }
209de148297SMing Lei 
210244c65a3SMing Lei static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
211244c65a3SMing Lei 					   struct request *rq)
212244c65a3SMing Lei {
213244c65a3SMing Lei 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
214244c65a3SMing Lei 	rq->tag = -1;
215244c65a3SMing Lei 
216244c65a3SMing Lei 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
217244c65a3SMing Lei 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
218244c65a3SMing Lei 		atomic_dec(&hctx->nr_active);
219244c65a3SMing Lei 	}
220244c65a3SMing Lei }
221244c65a3SMing Lei 
222244c65a3SMing Lei static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
223244c65a3SMing Lei 				       struct request *rq)
224244c65a3SMing Lei {
225244c65a3SMing Lei 	if (rq->tag == -1 || rq->internal_tag == -1)
226244c65a3SMing Lei 		return;
227244c65a3SMing Lei 
228244c65a3SMing Lei 	__blk_mq_put_driver_tag(hctx, rq);
229244c65a3SMing Lei }
230244c65a3SMing Lei 
231244c65a3SMing Lei static inline void blk_mq_put_driver_tag(struct request *rq)
232244c65a3SMing Lei {
233244c65a3SMing Lei 	struct blk_mq_hw_ctx *hctx;
234244c65a3SMing Lei 
235244c65a3SMing Lei 	if (rq->tag == -1 || rq->internal_tag == -1)
236244c65a3SMing Lei 		return;
237244c65a3SMing Lei 
238244c65a3SMing Lei 	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
239244c65a3SMing Lei 	__blk_mq_put_driver_tag(hctx, rq);
240244c65a3SMing Lei }
241244c65a3SMing Lei 
242320ae51fSJens Axboe #endif
243