xref: /openbmc/linux/block/blk-mq.h (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
590110e04SChristoph Hellwig #include <linux/blk-mq.h>
6cf43e6beSJens Axboe #include "blk-stat.h"
7cf43e6beSJens Axboe 
824d2f903SChristoph Hellwig struct blk_mq_tag_set;
924d2f903SChristoph Hellwig 
101db4909eSMing Lei struct blk_mq_ctxs {
111db4909eSMing Lei 	struct kobject kobj;
121db4909eSMing Lei 	struct blk_mq_ctx __percpu	*queue_ctx;
131db4909eSMing Lei };
141db4909eSMing Lei 
15fe644072SLinus Walleij /**
16fe644072SLinus Walleij  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17fe644072SLinus Walleij  */
18320ae51fSJens Axboe struct blk_mq_ctx {
19320ae51fSJens Axboe 	struct {
20320ae51fSJens Axboe 		spinlock_t		lock;
21c16d6b5aSMing Lei 		struct list_head	rq_lists[HCTX_MAX_TYPES];
22320ae51fSJens Axboe 	} ____cacheline_aligned_in_smp;
23320ae51fSJens Axboe 
24320ae51fSJens Axboe 	unsigned int		cpu;
25f31967f0SJens Axboe 	unsigned short		index_hw[HCTX_MAX_TYPES];
268ccdf4a3SJianchao Wang 	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
27320ae51fSJens Axboe 
28320ae51fSJens Axboe 	struct request_queue	*queue;
291db4909eSMing Lei 	struct blk_mq_ctxs      *ctxs;
30320ae51fSJens Axboe 	struct kobject		kobj;
314bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
32320ae51fSJens Axboe 
33bebe84ebSChristoph Hellwig enum {
34bebe84ebSChristoph Hellwig 	BLK_MQ_NO_TAG		= -1U,
35bebe84ebSChristoph Hellwig 	BLK_MQ_TAG_MIN		= 1,
36bebe84ebSChristoph Hellwig 	BLK_MQ_TAG_MAX		= BLK_MQ_NO_TAG - 1,
37bebe84ebSChristoph Hellwig };
38bebe84ebSChristoph Hellwig 
39710fa378SChristoph Hellwig typedef unsigned int __bitwise blk_insert_t;
40710fa378SChristoph Hellwig #define BLK_MQ_INSERT_AT_HEAD		((__force blk_insert_t)0x01)
41710fa378SChristoph Hellwig 
423e08773cSChristoph Hellwig void blk_mq_submit_bio(struct bio *bio);
435a72e899SJens Axboe int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
445a72e899SJens Axboe 		unsigned int flags);
45c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q);
46e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
47aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
481fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
491fd40b5eSMing Lei 			     unsigned int);
502c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
51b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
52b347689fSMing Lei 					struct blk_mq_ctx *start);
532e315dc0SMing Lei void blk_mq_put_rq_ref(struct request *rq);
542c3ad667SJens Axboe 
552c3ad667SJens Axboe /*
562c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
572c3ad667SJens Axboe  */
58cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
592c3ad667SJens Axboe 		     unsigned int hctx_idx);
60e155b0c2SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags);
6163064be1SJohn Garry struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
62cc71a6f4SJens Axboe 				unsigned int hctx_idx, unsigned int depth);
63645db34eSJohn Garry void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
64645db34eSJohn Garry 			     struct blk_mq_tags *tags,
65645db34eSJohn Garry 			     unsigned int hctx_idx);
66396eaf21SMing Lei 
67320ae51fSJens Axboe /*
68320ae51fSJens Axboe  * CPU -> queue mappings
69320ae51fSJens Axboe  */
70ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
71320ae51fSJens Axboe 
72b3c661b1SJens Axboe /*
73b3c661b1SJens Axboe  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
74b3c661b1SJens Axboe  * @q: request queue
75e20ba6e1SChristoph Hellwig  * @type: the hctx type index
76b3c661b1SJens Axboe  * @cpu: CPU
77b3c661b1SJens Axboe  */
blk_mq_map_queue_type(struct request_queue * q,enum hctx_type type,unsigned int cpu)78ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
79e20ba6e1SChristoph Hellwig 							  enum hctx_type type,
80ff2c5660SJens Axboe 							  unsigned int cpu)
81ff2c5660SJens Axboe {
824e5cc99eSMing Lei 	return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
83b3c661b1SJens Axboe }
84b3c661b1SJens Axboe 
blk_mq_get_hctx_type(blk_opf_t opf)8516458cf3SBart Van Assche static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
86b3c661b1SJens Axboe {
87e20ba6e1SChristoph Hellwig 	enum hctx_type type = HCTX_TYPE_DEFAULT;
88b3c661b1SJens Axboe 
89bb94aea1SJianchao Wang 	/*
906ce913feSChristoph Hellwig 	 * The caller ensure that if REQ_POLLED, poll must be enabled.
91bb94aea1SJianchao Wang 	 */
927e923f40SBart Van Assche 	if (opf & REQ_POLLED)
93e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_POLL;
947e923f40SBart Van Assche 	else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
95e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_READ;
96b637108aSMing Lei 	return type;
97b637108aSMing Lei }
98e20ba6e1SChristoph Hellwig 
99b637108aSMing Lei /*
100b637108aSMing Lei  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
101b637108aSMing Lei  * @q: request queue
1027e923f40SBart Van Assche  * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
103b637108aSMing Lei  * @ctx: software queue cpu ctx
104b637108aSMing Lei  */
blk_mq_map_queue(struct request_queue * q,blk_opf_t opf,struct blk_mq_ctx * ctx)105b637108aSMing Lei static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
10616458cf3SBart Van Assche 						     blk_opf_t opf,
107b637108aSMing Lei 						     struct blk_mq_ctx *ctx)
108b637108aSMing Lei {
1097e923f40SBart Van Assche 	return ctx->hctxs[blk_mq_get_hctx_type(opf)];
110ff2c5660SJens Axboe }
111ff2c5660SJens Axboe 
112e93ecf60SJens Axboe /*
11367aec14cSJens Axboe  * sysfs helpers
11467aec14cSJens Axboe  */
115737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
1167ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
1178682b92eSChristoph Hellwig int blk_mq_sysfs_register(struct gendisk *disk);
1188682b92eSChristoph Hellwig void blk_mq_sysfs_unregister(struct gendisk *disk);
119eaa870f9SChristoph Hellwig int blk_mq_sysfs_register_hctxs(struct request_queue *q);
120eaa870f9SChristoph Hellwig void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
121868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
12247c122e3SJens Axboe void blk_mq_free_plug_rqs(struct blk_plug *plug);
123dbb6f764SChristoph Hellwig void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
12467aec14cSJens Axboe 
1252a19b28fSMing Lei void blk_mq_cancel_work_sync(struct request_queue *q);
1262a19b28fSMing Lei 
127e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
128e09aae7eSMing Lei 
__blk_mq_get_ctx(struct request_queue * q,unsigned int cpu)1291aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1301aecfe48SMing Lei 					   unsigned int cpu)
1311aecfe48SMing Lei {
1321aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1331aecfe48SMing Lei }
1341aecfe48SMing Lei 
1351aecfe48SMing Lei /*
1361aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1371aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1381aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1391aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1401aecfe48SMing Lei  */
blk_mq_get_ctx(struct request_queue * q)1411aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1421aecfe48SMing Lei {
143c05f4220SBart Van Assche 	return __blk_mq_get_ctx(q, raw_smp_processor_id());
1441aecfe48SMing Lei }
1451aecfe48SMing Lei 
146cb96a42cSMing Lei struct blk_mq_alloc_data {
147cb96a42cSMing Lei 	/* input parameter */
148cb96a42cSMing Lei 	struct request_queue *q;
1499a95e4efSBart Van Assche 	blk_mq_req_flags_t flags;
150229a9287SOmar Sandoval 	unsigned int shallow_depth;
15116458cf3SBart Van Assche 	blk_opf_t cmd_flags;
152ecaf97f4SJens Axboe 	req_flags_t rq_flags;
153cb96a42cSMing Lei 
15447c122e3SJens Axboe 	/* allocate multiple requests/tags in one go */
15547c122e3SJens Axboe 	unsigned int nr_tags;
15647c122e3SJens Axboe 	struct request **cached_rq;
15747c122e3SJens Axboe 
158cb96a42cSMing Lei 	/* input & output parameter */
159cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
160cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
161cb96a42cSMing Lei };
162cb96a42cSMing Lei 
163bebe84ebSChristoph Hellwig struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
164bebe84ebSChristoph Hellwig 		unsigned int reserved_tags, int node, int alloc_policy);
165bebe84ebSChristoph Hellwig void blk_mq_free_tags(struct blk_mq_tags *tags);
166bebe84ebSChristoph Hellwig int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
167bebe84ebSChristoph Hellwig 		struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
168bebe84ebSChristoph Hellwig 		unsigned int reserved, int node, int alloc_policy);
169bebe84ebSChristoph Hellwig 
170bebe84ebSChristoph Hellwig unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
171bebe84ebSChristoph Hellwig unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
172bebe84ebSChristoph Hellwig 		unsigned int *offset);
173bebe84ebSChristoph Hellwig void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
174bebe84ebSChristoph Hellwig 		unsigned int tag);
175bebe84ebSChristoph Hellwig void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
176bebe84ebSChristoph Hellwig int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
177bebe84ebSChristoph Hellwig 		struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
178bebe84ebSChristoph Hellwig void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
179bebe84ebSChristoph Hellwig 		unsigned int size);
180bebe84ebSChristoph Hellwig void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
181bebe84ebSChristoph Hellwig 
182bebe84ebSChristoph Hellwig void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
183bebe84ebSChristoph Hellwig void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
184bebe84ebSChristoph Hellwig 		void *priv);
185bebe84ebSChristoph Hellwig void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
186bebe84ebSChristoph Hellwig 		void *priv);
187bebe84ebSChristoph Hellwig 
bt_wait_ptr(struct sbitmap_queue * bt,struct blk_mq_hw_ctx * hctx)188bebe84ebSChristoph Hellwig static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
189bebe84ebSChristoph Hellwig 						 struct blk_mq_hw_ctx *hctx)
190bebe84ebSChristoph Hellwig {
191bebe84ebSChristoph Hellwig 	if (!hctx)
192bebe84ebSChristoph Hellwig 		return &bt->ws[0];
193bebe84ebSChristoph Hellwig 	return sbq_wait_ptr(bt, &hctx->wait_index);
194bebe84ebSChristoph Hellwig }
195bebe84ebSChristoph Hellwig 
196bebe84ebSChristoph Hellwig void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
197bebe84ebSChristoph Hellwig void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
198bebe84ebSChristoph Hellwig 
blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx)199bebe84ebSChristoph Hellwig static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
200bebe84ebSChristoph Hellwig {
201bebe84ebSChristoph Hellwig 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
202bebe84ebSChristoph Hellwig 		__blk_mq_tag_busy(hctx);
203bebe84ebSChristoph Hellwig }
204bebe84ebSChristoph Hellwig 
blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx)205bebe84ebSChristoph Hellwig static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
206bebe84ebSChristoph Hellwig {
207bebe84ebSChristoph Hellwig 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
208bebe84ebSChristoph Hellwig 		__blk_mq_tag_idle(hctx);
209bebe84ebSChristoph Hellwig }
210bebe84ebSChristoph Hellwig 
blk_mq_tag_is_reserved(struct blk_mq_tags * tags,unsigned int tag)211bebe84ebSChristoph Hellwig static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
212bebe84ebSChristoph Hellwig 					  unsigned int tag)
213bebe84ebSChristoph Hellwig {
214bebe84ebSChristoph Hellwig 	return tag < tags->nr_reserved_tags;
215bebe84ebSChristoph Hellwig }
216bebe84ebSChristoph Hellwig 
blk_mq_is_shared_tags(unsigned int flags)217079a2e3eSJohn Garry static inline bool blk_mq_is_shared_tags(unsigned int flags)
21832bc15afSJohn Garry {
21932bc15afSJohn Garry 	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
22032bc15afSJohn Garry }
22132bc15afSJohn Garry 
blk_mq_tags_from_data(struct blk_mq_alloc_data * data)2224941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
2234941115bSJens Axboe {
224dd6216bbSChristoph Hellwig 	if (data->rq_flags & RQF_SCHED_TAGS)
22556f8da64SJens Axboe 		return data->hctx->sched_tags;
226dd6216bbSChristoph Hellwig 	return data->hctx->tags;
2274941115bSJens Axboe }
2284941115bSJens Axboe 
blk_mq_hctx_stopped(struct blk_mq_hw_ctx * hctx)2295d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
2305d1b25c1SBart Van Assche {
231*e95080fbSMuchun Song 	/* Fast path: hardware queue is not stopped most of the time. */
232*e95080fbSMuchun Song 	if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
233*e95080fbSMuchun Song 		return false;
234*e95080fbSMuchun Song 
235*e95080fbSMuchun Song 	/*
236*e95080fbSMuchun Song 	 * This barrier is used to order adding of dispatch list before and
237*e95080fbSMuchun Song 	 * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
238*e95080fbSMuchun Song 	 * in blk_mq_start_stopped_hw_queue() so that dispatch code could
239*e95080fbSMuchun Song 	 * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
240*e95080fbSMuchun Song 	 * empty to avoid missing dispatching requests.
241*e95080fbSMuchun Song 	 */
242*e95080fbSMuchun Song 	smp_mb();
243*e95080fbSMuchun Song 
2445d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
2455d1b25c1SBart Van Assche }
2465d1b25c1SBart Van Assche 
blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx * hctx)24719c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
24819c66e59SMing Lei {
24919c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
25019c66e59SMing Lei }
25119c66e59SMing Lei 
2528446fe92SChristoph Hellwig unsigned int blk_mq_in_flight(struct request_queue *q,
2538446fe92SChristoph Hellwig 		struct block_device *part);
2548446fe92SChristoph Hellwig void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
255bf0ddabaSOmar Sandoval 		unsigned int inflight[2]);
256f299b7c7SJens Axboe 
blk_mq_put_dispatch_budget(struct request_queue * q,int budget_token)2572a5a24aaSMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
2582a5a24aaSMing Lei 					      int budget_token)
259de148297SMing Lei {
260de148297SMing Lei 	if (q->mq_ops->put_budget)
2612a5a24aaSMing Lei 		q->mq_ops->put_budget(q, budget_token);
262de148297SMing Lei }
263de148297SMing Lei 
blk_mq_get_dispatch_budget(struct request_queue * q)2642a5a24aaSMing Lei static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
265de148297SMing Lei {
266de148297SMing Lei 	if (q->mq_ops->get_budget)
26765c76369SMing Lei 		return q->mq_ops->get_budget(q);
2682a5a24aaSMing Lei 	return 0;
2692a5a24aaSMing Lei }
2702a5a24aaSMing Lei 
blk_mq_set_rq_budget_token(struct request * rq,int token)2712a5a24aaSMing Lei static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
2722a5a24aaSMing Lei {
2732a5a24aaSMing Lei 	if (token < 0)
2742a5a24aaSMing Lei 		return;
2752a5a24aaSMing Lei 
2762a5a24aaSMing Lei 	if (rq->q->mq_ops->set_rq_budget_token)
2772a5a24aaSMing Lei 		rq->q->mq_ops->set_rq_budget_token(rq, token);
2782a5a24aaSMing Lei }
2792a5a24aaSMing Lei 
blk_mq_get_rq_budget_token(struct request * rq)2802a5a24aaSMing Lei static inline int blk_mq_get_rq_budget_token(struct request *rq)
2812a5a24aaSMing Lei {
2822a5a24aaSMing Lei 	if (rq->q->mq_ops->get_rq_budget_token)
2832a5a24aaSMing Lei 		return rq->q->mq_ops->get_rq_budget_token(rq);
2842a5a24aaSMing Lei 	return -1;
285de148297SMing Lei }
286de148297SMing Lei 
__blk_mq_inc_active_requests(struct blk_mq_hw_ctx * hctx)287bccf5e26SJohn Garry static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
288bccf5e26SJohn Garry {
289079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
290079a2e3eSJohn Garry 		atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
291bccf5e26SJohn Garry 	else
292bccf5e26SJohn Garry 		atomic_inc(&hctx->nr_active);
293bccf5e26SJohn Garry }
294bccf5e26SJohn Garry 
__blk_mq_sub_active_requests(struct blk_mq_hw_ctx * hctx,int val)2953b87c6eaSMing Lei static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
2963b87c6eaSMing Lei 		int val)
297bccf5e26SJohn Garry {
298079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
2993b87c6eaSMing Lei 		atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
300bccf5e26SJohn Garry 	else
3013b87c6eaSMing Lei 		atomic_sub(val, &hctx->nr_active);
3023b87c6eaSMing Lei }
3033b87c6eaSMing Lei 
__blk_mq_dec_active_requests(struct blk_mq_hw_ctx * hctx)3043b87c6eaSMing Lei static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
3053b87c6eaSMing Lei {
3063b87c6eaSMing Lei 	__blk_mq_sub_active_requests(hctx, 1);
307bccf5e26SJohn Garry }
308bccf5e26SJohn Garry 
__blk_mq_active_requests(struct blk_mq_hw_ctx * hctx)309bccf5e26SJohn Garry static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
310bccf5e26SJohn Garry {
311079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
312079a2e3eSJohn Garry 		return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
313bccf5e26SJohn Garry 	return atomic_read(&hctx->nr_active);
314bccf5e26SJohn Garry }
__blk_mq_put_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq)3154e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
3164e2f62e5SJens Axboe 					   struct request *rq)
3174e2f62e5SJens Axboe {
3184e2f62e5SJens Axboe 	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
3194e2f62e5SJens Axboe 	rq->tag = BLK_MQ_NO_TAG;
3204e2f62e5SJens Axboe 
3214e2f62e5SJens Axboe 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
3224e2f62e5SJens Axboe 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
323bccf5e26SJohn Garry 		__blk_mq_dec_active_requests(hctx);
3244e2f62e5SJens Axboe 	}
3254e2f62e5SJens Axboe }
3264e2f62e5SJens Axboe 
blk_mq_put_driver_tag(struct request * rq)3274e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq)
3284e2f62e5SJens Axboe {
3294e2f62e5SJens Axboe 	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
3304e2f62e5SJens Axboe 		return;
3314e2f62e5SJens Axboe 
3324e2f62e5SJens Axboe 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
3334e2f62e5SJens Axboe }
3344e2f62e5SJens Axboe 
335a808a9d5SJens Axboe bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
336a808a9d5SJens Axboe 
blk_mq_get_driver_tag(struct request * rq)337a808a9d5SJens Axboe static inline bool blk_mq_get_driver_tag(struct request *rq)
338a808a9d5SJens Axboe {
339a808a9d5SJens Axboe 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
340a808a9d5SJens Axboe 
341a808a9d5SJens Axboe 	if (rq->tag != BLK_MQ_NO_TAG &&
342a808a9d5SJens Axboe 	    !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
343a808a9d5SJens Axboe 		hctx->tags->rqs[rq->tag] = rq;
344a808a9d5SJens Axboe 		return true;
345a808a9d5SJens Axboe 	}
346a808a9d5SJens Axboe 
347a808a9d5SJens Axboe 	return __blk_mq_get_driver_tag(hctx, rq);
348a808a9d5SJens Axboe }
34961347154SJan Kara 
blk_mq_clear_mq_map(struct blk_mq_queue_map * qmap)350ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
3510da73d00SMinwoo Im {
3520da73d00SMinwoo Im 	int cpu;
3530da73d00SMinwoo Im 
3540da73d00SMinwoo Im 	for_each_possible_cpu(cpu)
355ed76e329SJens Axboe 		qmap->mq_map[cpu] = 0;
3560da73d00SMinwoo Im }
3570da73d00SMinwoo Im 
358b49773e7SDamien Le Moal /*
359b49773e7SDamien Le Moal  * blk_mq_plug() - Get caller context plug
360b49773e7SDamien Le Moal  * @bio : the bio being submitted by the caller context
361b49773e7SDamien Le Moal  *
362b49773e7SDamien Le Moal  * Plugging, by design, may delay the insertion of BIOs into the elevator in
363b49773e7SDamien Le Moal  * order to increase BIO merging opportunities. This however can cause BIO
364b49773e7SDamien Le Moal  * insertion order to change from the order in which submit_bio() is being
365b49773e7SDamien Le Moal  * executed in the case of multiple contexts concurrently issuing BIOs to a
366b49773e7SDamien Le Moal  * device, even if these context are synchronized to tightly control BIO issuing
367b49773e7SDamien Le Moal  * order. While this is not a problem with regular block devices, this ordering
368b49773e7SDamien Le Moal  * change can cause write BIO failures with zoned block devices as these
369b49773e7SDamien Le Moal  * require sequential write patterns to zones. Prevent this from happening by
3706deacb3bSChristoph Hellwig  * ignoring the plug state of a BIO issuing context if it is for a zoned block
3716deacb3bSChristoph Hellwig  * device and the BIO to plug is a write operation.
372b49773e7SDamien Le Moal  *
373b49773e7SDamien Le Moal  * Return current->plug if the bio can be plugged and NULL otherwise
374b49773e7SDamien Le Moal  */
blk_mq_plug(struct bio * bio)3756deacb3bSChristoph Hellwig static inline struct blk_plug *blk_mq_plug( struct bio *bio)
376b49773e7SDamien Le Moal {
3776deacb3bSChristoph Hellwig 	/* Zoned block device write operation case: do not plug the BIO */
3788cafdb5aSPankaj Raghav 	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
3798cafdb5aSPankaj Raghav 	    bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
3806deacb3bSChristoph Hellwig 		return NULL;
3816deacb3bSChristoph Hellwig 
382b49773e7SDamien Le Moal 	/*
383b49773e7SDamien Le Moal 	 * For regular block devices or read operations, use the context plug
384b49773e7SDamien Le Moal 	 * which may be NULL if blk_start_plug() was not executed.
385b49773e7SDamien Le Moal 	 */
386b49773e7SDamien Le Moal 	return current->plug;
387b49773e7SDamien Le Moal }
388b49773e7SDamien Le Moal 
389fd2ef39cSJan Kara /* Free all requests on the list */
blk_mq_free_requests(struct list_head * list)390fd2ef39cSJan Kara static inline void blk_mq_free_requests(struct list_head *list)
391fd2ef39cSJan Kara {
392fd2ef39cSJan Kara 	while (!list_empty(list)) {
393fd2ef39cSJan Kara 		struct request *rq = list_entry_rq(list->next);
394fd2ef39cSJan Kara 
395fd2ef39cSJan Kara 		list_del_init(&rq->queuelist);
396fd2ef39cSJan Kara 		blk_mq_free_request(rq);
397fd2ef39cSJan Kara 	}
398fd2ef39cSJan Kara }
399fd2ef39cSJan Kara 
400a0235d23SJohn Garry /*
401a0235d23SJohn Garry  * For shared tag users, we track the number of currently active users
402a0235d23SJohn Garry  * and attempt to provide a fair share of the tag depth for each of them.
403a0235d23SJohn Garry  */
hctx_may_queue(struct blk_mq_hw_ctx * hctx,struct sbitmap_queue * bt)404a0235d23SJohn Garry static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
405a0235d23SJohn Garry 				  struct sbitmap_queue *bt)
406a0235d23SJohn Garry {
407a0235d23SJohn Garry 	unsigned int depth, users;
408a0235d23SJohn Garry 
409a0235d23SJohn Garry 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
410a0235d23SJohn Garry 		return true;
411a0235d23SJohn Garry 
412a0235d23SJohn Garry 	/*
413a0235d23SJohn Garry 	 * Don't try dividing an ant
414a0235d23SJohn Garry 	 */
415a0235d23SJohn Garry 	if (bt->sb.depth == 1)
416a0235d23SJohn Garry 		return true;
417a0235d23SJohn Garry 
418079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags)) {
419f1b49fdcSJohn Garry 		struct request_queue *q = hctx->queue;
420f1b49fdcSJohn Garry 
4212569063cSMing Lei 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
422f1b49fdcSJohn Garry 			return true;
423f1b49fdcSJohn Garry 	} else {
424f1b49fdcSJohn Garry 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
425f1b49fdcSJohn Garry 			return true;
426f1b49fdcSJohn Garry 	}
427f1b49fdcSJohn Garry 
4284f1731dfSYu Kuai 	users = READ_ONCE(hctx->tags->active_queues);
429a0235d23SJohn Garry 	if (!users)
430a0235d23SJohn Garry 		return true;
431a0235d23SJohn Garry 
432a0235d23SJohn Garry 	/*
433a0235d23SJohn Garry 	 * Allow at least some tags
434a0235d23SJohn Garry 	 */
435a0235d23SJohn Garry 	depth = max((bt->sb.depth + users - 1) / users, 4U);
436bccf5e26SJohn Garry 	return __blk_mq_active_requests(hctx) < depth;
437a0235d23SJohn Garry }
438a0235d23SJohn Garry 
4392a904d00SMing Lei /* run the code block in @dispatch_ops with rcu/srcu read lock held */
44041adf531SMing Lei #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops)	\
4412a904d00SMing Lei do {								\
44280bd4a7aSChristoph Hellwig 	if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {		\
44300e885efSChris Leech 		struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
4442a904d00SMing Lei 		int srcu_idx;					\
4452a904d00SMing Lei 								\
44641adf531SMing Lei 		might_sleep_if(check_sleep);			\
44700e885efSChris Leech 		srcu_idx = srcu_read_lock(__tag_set->srcu);	\
4482a904d00SMing Lei 		(dispatch_ops);					\
44900e885efSChris Leech 		srcu_read_unlock(__tag_set->srcu, srcu_idx);	\
45080bd4a7aSChristoph Hellwig 	} else {						\
45180bd4a7aSChristoph Hellwig 		rcu_read_lock();				\
45280bd4a7aSChristoph Hellwig 		(dispatch_ops);					\
45380bd4a7aSChristoph Hellwig 		rcu_read_unlock();				\
4542a904d00SMing Lei 	}							\
4552a904d00SMing Lei } while (0)
456a0235d23SJohn Garry 
45741adf531SMing Lei #define blk_mq_run_dispatch_ops(q, dispatch_ops)		\
45841adf531SMing Lei 	__blk_mq_run_dispatch_ops(q, true, dispatch_ops)	\
45941adf531SMing Lei 
460320ae51fSJens Axboe #endif
461