xref: /openbmc/linux/block/blk-mq.h (revision 1db4909e76f64a85f4aaa187f0f683f5c85a471d)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
5cf43e6beSJens Axboe #include "blk-stat.h"
6244c65a3SMing Lei #include "blk-mq-tag.h"
7cf43e6beSJens Axboe 
824d2f903SChristoph Hellwig struct blk_mq_tag_set;
924d2f903SChristoph Hellwig 
10*1db4909eSMing Lei struct blk_mq_ctxs {
11*1db4909eSMing Lei 	struct kobject kobj;
12*1db4909eSMing Lei 	struct blk_mq_ctx __percpu	*queue_ctx;
13*1db4909eSMing Lei };
14*1db4909eSMing Lei 
15fe644072SLinus Walleij /**
16fe644072SLinus Walleij  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17fe644072SLinus Walleij  */
18320ae51fSJens Axboe struct blk_mq_ctx {
19320ae51fSJens Axboe 	struct {
20320ae51fSJens Axboe 		spinlock_t		lock;
21320ae51fSJens Axboe 		struct list_head	rq_list;
22320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
23320ae51fSJens Axboe 
24320ae51fSJens Axboe 	unsigned int		cpu;
25f31967f0SJens Axboe 	unsigned short		index_hw[HCTX_MAX_TYPES];
26320ae51fSJens Axboe 
27320ae51fSJens Axboe 	/* incremented at dispatch time */
28320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
29320ae51fSJens Axboe 	unsigned long		rq_merged;
30320ae51fSJens Axboe 
31320ae51fSJens Axboe 	/* incremented at completion time */
32320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
33320ae51fSJens Axboe 
34320ae51fSJens Axboe 	struct request_queue	*queue;
35*1db4909eSMing Lei 	struct blk_mq_ctxs      *ctxs;
36320ae51fSJens Axboe 	struct kobject		kobj;
374bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
38320ae51fSJens Axboe 
39780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
403edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
41e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
42aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
43de148297SMing Lei bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
442c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
458ab6bb9eSMing Lei bool blk_mq_get_driver_tag(struct request *rq);
46b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
47b347689fSMing Lei 					struct blk_mq_ctx *start);
482c3ad667SJens Axboe 
492c3ad667SJens Axboe /*
502c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
512c3ad667SJens Axboe  */
52cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
532c3ad667SJens Axboe 		     unsigned int hctx_idx);
54cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags);
55cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
56cc71a6f4SJens Axboe 					unsigned int hctx_idx,
57cc71a6f4SJens Axboe 					unsigned int nr_tags,
58cc71a6f4SJens Axboe 					unsigned int reserved_tags);
59cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
60cc71a6f4SJens Axboe 		     unsigned int hctx_idx, unsigned int depth);
612c3ad667SJens Axboe 
622c3ad667SJens Axboe /*
632c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
642c3ad667SJens Axboe  */
652c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
662c3ad667SJens Axboe 				bool at_head);
67b0850297SMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
68bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
69bd166ef1SJens Axboe 				struct list_head *list);
70320ae51fSJens Axboe 
71396eaf21SMing Lei /* Used by blk_insert_cloned_request() to issue request directly */
72c77ff7fdSBart Van Assche blk_status_t blk_mq_request_issue_directly(struct request *rq);
736ce3dd6eSMing Lei void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
746ce3dd6eSMing Lei 				    struct list_head *list);
75396eaf21SMing Lei 
76320ae51fSJens Axboe /*
77320ae51fSJens Axboe  * CPU -> queue mappings
78320ae51fSJens Axboe  */
79ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
80320ae51fSJens Axboe 
81b3c661b1SJens Axboe /*
82b3c661b1SJens Axboe  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
83b3c661b1SJens Axboe  * @q: request queue
84b3c661b1SJens Axboe  * @hctx_type: the hctx type index
85b3c661b1SJens Axboe  * @cpu: CPU
86b3c661b1SJens Axboe  */
87ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
88ff2c5660SJens Axboe 							  unsigned int hctx_type,
89ff2c5660SJens Axboe 							  unsigned int cpu)
90ff2c5660SJens Axboe {
91b3c661b1SJens Axboe 	struct blk_mq_tag_set *set = q->tag_set;
92b3c661b1SJens Axboe 
93b3c661b1SJens Axboe 	return q->queue_hw_ctx[set->map[hctx_type].mq_map[cpu]];
94b3c661b1SJens Axboe }
95b3c661b1SJens Axboe 
96b3c661b1SJens Axboe /*
97b3c661b1SJens Axboe  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
98b3c661b1SJens Axboe  * @q: request queue
99b3c661b1SJens Axboe  * @flags: request command flags
100b3c661b1SJens Axboe  * @cpu: CPU
101b3c661b1SJens Axboe  */
102b3c661b1SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
103b3c661b1SJens Axboe 						     unsigned int flags,
104b3c661b1SJens Axboe 						     unsigned int cpu)
105b3c661b1SJens Axboe {
106b3c661b1SJens Axboe 	int hctx_type = 0;
107b3c661b1SJens Axboe 
108b3c661b1SJens Axboe 	if (q->mq_ops->rq_flags_to_type)
109b3c661b1SJens Axboe 		hctx_type = q->mq_ops->rq_flags_to_type(q, flags);
110b3c661b1SJens Axboe 
111b3c661b1SJens Axboe 	return blk_mq_map_queue_type(q, hctx_type, cpu);
112ff2c5660SJens Axboe }
113ff2c5660SJens Axboe 
114e93ecf60SJens Axboe /*
11567aec14cSJens Axboe  * sysfs helpers
11667aec14cSJens Axboe  */
117737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
1187ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
1192d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
12067aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
12167aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
122868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
12367aec14cSJens Axboe 
124e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
125e09aae7eSMing Lei 
1261d9bd516STejun Heo /**
1271d9bd516STejun Heo  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
1281d9bd516STejun Heo  * @rq: target request.
1291d9bd516STejun Heo  */
13012f5b931SKeith Busch static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
1311d9bd516STejun Heo {
13212f5b931SKeith Busch 	return READ_ONCE(rq->state);
1331d9bd516STejun Heo }
1341d9bd516STejun Heo 
1351aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1361aecfe48SMing Lei 					   unsigned int cpu)
1371aecfe48SMing Lei {
1381aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1391aecfe48SMing Lei }
1401aecfe48SMing Lei 
1411aecfe48SMing Lei /*
1421aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1431aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1441aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1451aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1461aecfe48SMing Lei  */
1471aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1481aecfe48SMing Lei {
1491aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
1501aecfe48SMing Lei }
1511aecfe48SMing Lei 
1521aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
1531aecfe48SMing Lei {
1541aecfe48SMing Lei 	put_cpu();
1551aecfe48SMing Lei }
1561aecfe48SMing Lei 
157cb96a42cSMing Lei struct blk_mq_alloc_data {
158cb96a42cSMing Lei 	/* input parameter */
159cb96a42cSMing Lei 	struct request_queue *q;
1609a95e4efSBart Van Assche 	blk_mq_req_flags_t flags;
161229a9287SOmar Sandoval 	unsigned int shallow_depth;
162f9afca4dSJens Axboe 	unsigned int cmd_flags;
163cb96a42cSMing Lei 
164cb96a42cSMing Lei 	/* input & output parameter */
165cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
166cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
167cb96a42cSMing Lei };
168cb96a42cSMing Lei 
1694941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1704941115bSJens Axboe {
171bd166ef1SJens Axboe 	if (data->flags & BLK_MQ_REQ_INTERNAL)
172bd166ef1SJens Axboe 		return data->hctx->sched_tags;
173bd166ef1SJens Axboe 
1744941115bSJens Axboe 	return data->hctx->tags;
1754941115bSJens Axboe }
1764941115bSJens Axboe 
1775d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1785d1b25c1SBart Van Assche {
1795d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1805d1b25c1SBart Van Assche }
1815d1b25c1SBart Van Assche 
18219c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
18319c66e59SMing Lei {
18419c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
18519c66e59SMing Lei }
18619c66e59SMing Lei 
187f299b7c7SJens Axboe void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
188f299b7c7SJens Axboe 		      unsigned int inflight[2]);
189bf0ddabaSOmar Sandoval void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
190bf0ddabaSOmar Sandoval 			 unsigned int inflight[2]);
191f299b7c7SJens Axboe 
192de148297SMing Lei static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
193de148297SMing Lei {
194de148297SMing Lei 	struct request_queue *q = hctx->queue;
195de148297SMing Lei 
196de148297SMing Lei 	if (q->mq_ops->put_budget)
197de148297SMing Lei 		q->mq_ops->put_budget(hctx);
198de148297SMing Lei }
199de148297SMing Lei 
20088022d72SMing Lei static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
201de148297SMing Lei {
202de148297SMing Lei 	struct request_queue *q = hctx->queue;
203de148297SMing Lei 
204de148297SMing Lei 	if (q->mq_ops->get_budget)
205de148297SMing Lei 		return q->mq_ops->get_budget(hctx);
20688022d72SMing Lei 	return true;
207de148297SMing Lei }
208de148297SMing Lei 
209244c65a3SMing Lei static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
210244c65a3SMing Lei 					   struct request *rq)
211244c65a3SMing Lei {
212244c65a3SMing Lei 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
213244c65a3SMing Lei 	rq->tag = -1;
214244c65a3SMing Lei 
215244c65a3SMing Lei 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
216244c65a3SMing Lei 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
217244c65a3SMing Lei 		atomic_dec(&hctx->nr_active);
218244c65a3SMing Lei 	}
219244c65a3SMing Lei }
220244c65a3SMing Lei 
221244c65a3SMing Lei static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
222244c65a3SMing Lei 				       struct request *rq)
223244c65a3SMing Lei {
224244c65a3SMing Lei 	if (rq->tag == -1 || rq->internal_tag == -1)
225244c65a3SMing Lei 		return;
226244c65a3SMing Lei 
227244c65a3SMing Lei 	__blk_mq_put_driver_tag(hctx, rq);
228244c65a3SMing Lei }
229244c65a3SMing Lei 
230244c65a3SMing Lei static inline void blk_mq_put_driver_tag(struct request *rq)
231244c65a3SMing Lei {
232244c65a3SMing Lei 	if (rq->tag == -1 || rq->internal_tag == -1)
233244c65a3SMing Lei 		return;
234244c65a3SMing Lei 
235ea4f995eSJens Axboe 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
236244c65a3SMing Lei }
237244c65a3SMing Lei 
238ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
2390da73d00SMinwoo Im {
2400da73d00SMinwoo Im 	int cpu;
2410da73d00SMinwoo Im 
2420da73d00SMinwoo Im 	for_each_possible_cpu(cpu)
243ed76e329SJens Axboe 		qmap->mq_map[cpu] = 0;
2440da73d00SMinwoo Im }
2450da73d00SMinwoo Im 
246320ae51fSJens Axboe #endif
247