xref: /openbmc/linux/block/blk-mq.h (revision 4f1731df60f9033669f024d06ae26a6301260b55)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
590110e04SChristoph Hellwig #include <linux/blk-mq.h>
6cf43e6beSJens Axboe #include "blk-stat.h"
7cf43e6beSJens Axboe 
824d2f903SChristoph Hellwig struct blk_mq_tag_set;
924d2f903SChristoph Hellwig 
101db4909eSMing Lei struct blk_mq_ctxs {
111db4909eSMing Lei 	struct kobject kobj;
121db4909eSMing Lei 	struct blk_mq_ctx __percpu	*queue_ctx;
131db4909eSMing Lei };
141db4909eSMing Lei 
15fe644072SLinus Walleij /**
16fe644072SLinus Walleij  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17fe644072SLinus Walleij  */
18320ae51fSJens Axboe struct blk_mq_ctx {
19320ae51fSJens Axboe 	struct {
20320ae51fSJens Axboe 		spinlock_t		lock;
21c16d6b5aSMing Lei 		struct list_head	rq_lists[HCTX_MAX_TYPES];
22320ae51fSJens Axboe 	} ____cacheline_aligned_in_smp;
23320ae51fSJens Axboe 
24320ae51fSJens Axboe 	unsigned int		cpu;
25f31967f0SJens Axboe 	unsigned short		index_hw[HCTX_MAX_TYPES];
268ccdf4a3SJianchao Wang 	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
27320ae51fSJens Axboe 
28320ae51fSJens Axboe 	struct request_queue	*queue;
291db4909eSMing Lei 	struct blk_mq_ctxs      *ctxs;
30320ae51fSJens Axboe 	struct kobject		kobj;
314bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
32320ae51fSJens Axboe 
33bebe84ebSChristoph Hellwig enum {
34bebe84ebSChristoph Hellwig 	BLK_MQ_NO_TAG		= -1U,
35bebe84ebSChristoph Hellwig 	BLK_MQ_TAG_MIN		= 1,
36bebe84ebSChristoph Hellwig 	BLK_MQ_TAG_MAX		= BLK_MQ_NO_TAG - 1,
37bebe84ebSChristoph Hellwig };
38bebe84ebSChristoph Hellwig 
39710fa378SChristoph Hellwig typedef unsigned int __bitwise blk_insert_t;
40710fa378SChristoph Hellwig #define BLK_MQ_INSERT_AT_HEAD		((__force blk_insert_t)0x01)
41710fa378SChristoph Hellwig 
423e08773cSChristoph Hellwig void blk_mq_submit_bio(struct bio *bio);
435a72e899SJens Axboe int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
445a72e899SJens Axboe 		unsigned int flags);
45c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q);
46e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
47aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
481fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
491fd40b5eSMing Lei 			     unsigned int);
502c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
51b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
52b347689fSMing Lei 					struct blk_mq_ctx *start);
532e315dc0SMing Lei void blk_mq_put_rq_ref(struct request *rq);
542c3ad667SJens Axboe 
552c3ad667SJens Axboe /*
562c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
572c3ad667SJens Axboe  */
58cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
592c3ad667SJens Axboe 		     unsigned int hctx_idx);
60e155b0c2SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags);
6163064be1SJohn Garry struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
62cc71a6f4SJens Axboe 				unsigned int hctx_idx, unsigned int depth);
63645db34eSJohn Garry void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
64645db34eSJohn Garry 			     struct blk_mq_tags *tags,
65645db34eSJohn Garry 			     unsigned int hctx_idx);
66396eaf21SMing Lei 
67320ae51fSJens Axboe /*
68320ae51fSJens Axboe  * CPU -> queue mappings
69320ae51fSJens Axboe  */
70ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
71320ae51fSJens Axboe 
72b3c661b1SJens Axboe /*
73b3c661b1SJens Axboe  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
74b3c661b1SJens Axboe  * @q: request queue
75e20ba6e1SChristoph Hellwig  * @type: the hctx type index
76b3c661b1SJens Axboe  * @cpu: CPU
77b3c661b1SJens Axboe  */
78ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
79e20ba6e1SChristoph Hellwig 							  enum hctx_type type,
80ff2c5660SJens Axboe 							  unsigned int cpu)
81ff2c5660SJens Axboe {
824e5cc99eSMing Lei 	return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
83b3c661b1SJens Axboe }
84b3c661b1SJens Axboe 
8516458cf3SBart Van Assche static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
86b3c661b1SJens Axboe {
87e20ba6e1SChristoph Hellwig 	enum hctx_type type = HCTX_TYPE_DEFAULT;
88b3c661b1SJens Axboe 
89bb94aea1SJianchao Wang 	/*
906ce913feSChristoph Hellwig 	 * The caller ensure that if REQ_POLLED, poll must be enabled.
91bb94aea1SJianchao Wang 	 */
927e923f40SBart Van Assche 	if (opf & REQ_POLLED)
93e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_POLL;
947e923f40SBart Van Assche 	else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
95e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_READ;
96b637108aSMing Lei 	return type;
97b637108aSMing Lei }
98e20ba6e1SChristoph Hellwig 
99b637108aSMing Lei /*
100b637108aSMing Lei  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
101b637108aSMing Lei  * @q: request queue
1027e923f40SBart Van Assche  * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
103b637108aSMing Lei  * @ctx: software queue cpu ctx
104b637108aSMing Lei  */
105b637108aSMing Lei static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
10616458cf3SBart Van Assche 						     blk_opf_t opf,
107b637108aSMing Lei 						     struct blk_mq_ctx *ctx)
108b637108aSMing Lei {
1097e923f40SBart Van Assche 	return ctx->hctxs[blk_mq_get_hctx_type(opf)];
110ff2c5660SJens Axboe }
111ff2c5660SJens Axboe 
112e93ecf60SJens Axboe /*
11367aec14cSJens Axboe  * sysfs helpers
11467aec14cSJens Axboe  */
115737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
1167ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
1178682b92eSChristoph Hellwig int blk_mq_sysfs_register(struct gendisk *disk);
1188682b92eSChristoph Hellwig void blk_mq_sysfs_unregister(struct gendisk *disk);
119eaa870f9SChristoph Hellwig int blk_mq_sysfs_register_hctxs(struct request_queue *q);
120eaa870f9SChristoph Hellwig void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
121868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
12247c122e3SJens Axboe void blk_mq_free_plug_rqs(struct blk_plug *plug);
123dbb6f764SChristoph Hellwig void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
12467aec14cSJens Axboe 
1252a19b28fSMing Lei void blk_mq_cancel_work_sync(struct request_queue *q);
1262a19b28fSMing Lei 
127e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
128e09aae7eSMing Lei 
1291aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1301aecfe48SMing Lei 					   unsigned int cpu)
1311aecfe48SMing Lei {
1321aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1331aecfe48SMing Lei }
1341aecfe48SMing Lei 
1351aecfe48SMing Lei /*
1361aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1371aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1381aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1391aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1401aecfe48SMing Lei  */
1411aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1421aecfe48SMing Lei {
143c05f4220SBart Van Assche 	return __blk_mq_get_ctx(q, raw_smp_processor_id());
1441aecfe48SMing Lei }
1451aecfe48SMing Lei 
146cb96a42cSMing Lei struct blk_mq_alloc_data {
147cb96a42cSMing Lei 	/* input parameter */
148cb96a42cSMing Lei 	struct request_queue *q;
1499a95e4efSBart Van Assche 	blk_mq_req_flags_t flags;
150229a9287SOmar Sandoval 	unsigned int shallow_depth;
15116458cf3SBart Van Assche 	blk_opf_t cmd_flags;
152ecaf97f4SJens Axboe 	req_flags_t rq_flags;
153cb96a42cSMing Lei 
15447c122e3SJens Axboe 	/* allocate multiple requests/tags in one go */
15547c122e3SJens Axboe 	unsigned int nr_tags;
15647c122e3SJens Axboe 	struct request **cached_rq;
15747c122e3SJens Axboe 
158cb96a42cSMing Lei 	/* input & output parameter */
159cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
160cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
161cb96a42cSMing Lei };
162cb96a42cSMing Lei 
163bebe84ebSChristoph Hellwig struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
164bebe84ebSChristoph Hellwig 		unsigned int reserved_tags, int node, int alloc_policy);
165bebe84ebSChristoph Hellwig void blk_mq_free_tags(struct blk_mq_tags *tags);
166bebe84ebSChristoph Hellwig int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
167bebe84ebSChristoph Hellwig 		struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
168bebe84ebSChristoph Hellwig 		unsigned int reserved, int node, int alloc_policy);
169bebe84ebSChristoph Hellwig 
170bebe84ebSChristoph Hellwig unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
171bebe84ebSChristoph Hellwig unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
172bebe84ebSChristoph Hellwig 		unsigned int *offset);
173bebe84ebSChristoph Hellwig void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
174bebe84ebSChristoph Hellwig 		unsigned int tag);
175bebe84ebSChristoph Hellwig void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
176bebe84ebSChristoph Hellwig int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
177bebe84ebSChristoph Hellwig 		struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
178bebe84ebSChristoph Hellwig void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
179bebe84ebSChristoph Hellwig 		unsigned int size);
180bebe84ebSChristoph Hellwig void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
181bebe84ebSChristoph Hellwig 
182bebe84ebSChristoph Hellwig void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
183bebe84ebSChristoph Hellwig void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
184bebe84ebSChristoph Hellwig 		void *priv);
185bebe84ebSChristoph Hellwig void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
186bebe84ebSChristoph Hellwig 		void *priv);
187bebe84ebSChristoph Hellwig 
188bebe84ebSChristoph Hellwig static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
189bebe84ebSChristoph Hellwig 						 struct blk_mq_hw_ctx *hctx)
190bebe84ebSChristoph Hellwig {
191bebe84ebSChristoph Hellwig 	if (!hctx)
192bebe84ebSChristoph Hellwig 		return &bt->ws[0];
193bebe84ebSChristoph Hellwig 	return sbq_wait_ptr(bt, &hctx->wait_index);
194bebe84ebSChristoph Hellwig }
195bebe84ebSChristoph Hellwig 
196bebe84ebSChristoph Hellwig void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
197bebe84ebSChristoph Hellwig void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
198bebe84ebSChristoph Hellwig 
199bebe84ebSChristoph Hellwig static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
200bebe84ebSChristoph Hellwig {
201bebe84ebSChristoph Hellwig 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
202bebe84ebSChristoph Hellwig 		__blk_mq_tag_busy(hctx);
203bebe84ebSChristoph Hellwig }
204bebe84ebSChristoph Hellwig 
205bebe84ebSChristoph Hellwig static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
206bebe84ebSChristoph Hellwig {
207bebe84ebSChristoph Hellwig 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
208bebe84ebSChristoph Hellwig 		__blk_mq_tag_idle(hctx);
209bebe84ebSChristoph Hellwig }
210bebe84ebSChristoph Hellwig 
211bebe84ebSChristoph Hellwig static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
212bebe84ebSChristoph Hellwig 					  unsigned int tag)
213bebe84ebSChristoph Hellwig {
214bebe84ebSChristoph Hellwig 	return tag < tags->nr_reserved_tags;
215bebe84ebSChristoph Hellwig }
216bebe84ebSChristoph Hellwig 
217079a2e3eSJohn Garry static inline bool blk_mq_is_shared_tags(unsigned int flags)
21832bc15afSJohn Garry {
21932bc15afSJohn Garry 	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
22032bc15afSJohn Garry }
22132bc15afSJohn Garry 
2224941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
2234941115bSJens Axboe {
224dd6216bbSChristoph Hellwig 	if (data->rq_flags & RQF_SCHED_TAGS)
22556f8da64SJens Axboe 		return data->hctx->sched_tags;
226dd6216bbSChristoph Hellwig 	return data->hctx->tags;
2274941115bSJens Axboe }
2284941115bSJens Axboe 
2295d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
2305d1b25c1SBart Van Assche {
2315d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
2325d1b25c1SBart Van Assche }
2335d1b25c1SBart Van Assche 
23419c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
23519c66e59SMing Lei {
23619c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
23719c66e59SMing Lei }
23819c66e59SMing Lei 
2398446fe92SChristoph Hellwig unsigned int blk_mq_in_flight(struct request_queue *q,
2408446fe92SChristoph Hellwig 		struct block_device *part);
2418446fe92SChristoph Hellwig void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
242bf0ddabaSOmar Sandoval 		unsigned int inflight[2]);
243f299b7c7SJens Axboe 
2442a5a24aaSMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
2452a5a24aaSMing Lei 					      int budget_token)
246de148297SMing Lei {
247de148297SMing Lei 	if (q->mq_ops->put_budget)
2482a5a24aaSMing Lei 		q->mq_ops->put_budget(q, budget_token);
249de148297SMing Lei }
250de148297SMing Lei 
2512a5a24aaSMing Lei static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
252de148297SMing Lei {
253de148297SMing Lei 	if (q->mq_ops->get_budget)
25465c76369SMing Lei 		return q->mq_ops->get_budget(q);
2552a5a24aaSMing Lei 	return 0;
2562a5a24aaSMing Lei }
2572a5a24aaSMing Lei 
2582a5a24aaSMing Lei static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
2592a5a24aaSMing Lei {
2602a5a24aaSMing Lei 	if (token < 0)
2612a5a24aaSMing Lei 		return;
2622a5a24aaSMing Lei 
2632a5a24aaSMing Lei 	if (rq->q->mq_ops->set_rq_budget_token)
2642a5a24aaSMing Lei 		rq->q->mq_ops->set_rq_budget_token(rq, token);
2652a5a24aaSMing Lei }
2662a5a24aaSMing Lei 
2672a5a24aaSMing Lei static inline int blk_mq_get_rq_budget_token(struct request *rq)
2682a5a24aaSMing Lei {
2692a5a24aaSMing Lei 	if (rq->q->mq_ops->get_rq_budget_token)
2702a5a24aaSMing Lei 		return rq->q->mq_ops->get_rq_budget_token(rq);
2712a5a24aaSMing Lei 	return -1;
272de148297SMing Lei }
273de148297SMing Lei 
274bccf5e26SJohn Garry static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
275bccf5e26SJohn Garry {
276079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
277079a2e3eSJohn Garry 		atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
278bccf5e26SJohn Garry 	else
279bccf5e26SJohn Garry 		atomic_inc(&hctx->nr_active);
280bccf5e26SJohn Garry }
281bccf5e26SJohn Garry 
2823b87c6eaSMing Lei static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
2833b87c6eaSMing Lei 		int val)
284bccf5e26SJohn Garry {
285079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
2863b87c6eaSMing Lei 		atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
287bccf5e26SJohn Garry 	else
2883b87c6eaSMing Lei 		atomic_sub(val, &hctx->nr_active);
2893b87c6eaSMing Lei }
2903b87c6eaSMing Lei 
2913b87c6eaSMing Lei static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
2923b87c6eaSMing Lei {
2933b87c6eaSMing Lei 	__blk_mq_sub_active_requests(hctx, 1);
294bccf5e26SJohn Garry }
295bccf5e26SJohn Garry 
296bccf5e26SJohn Garry static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
297bccf5e26SJohn Garry {
298079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
299079a2e3eSJohn Garry 		return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
300bccf5e26SJohn Garry 	return atomic_read(&hctx->nr_active);
301bccf5e26SJohn Garry }
3024e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
3034e2f62e5SJens Axboe 					   struct request *rq)
3044e2f62e5SJens Axboe {
3054e2f62e5SJens Axboe 	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
3064e2f62e5SJens Axboe 	rq->tag = BLK_MQ_NO_TAG;
3074e2f62e5SJens Axboe 
3084e2f62e5SJens Axboe 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
3094e2f62e5SJens Axboe 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
310bccf5e26SJohn Garry 		__blk_mq_dec_active_requests(hctx);
3114e2f62e5SJens Axboe 	}
3124e2f62e5SJens Axboe }
3134e2f62e5SJens Axboe 
3144e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq)
3154e2f62e5SJens Axboe {
3164e2f62e5SJens Axboe 	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
3174e2f62e5SJens Axboe 		return;
3184e2f62e5SJens Axboe 
3194e2f62e5SJens Axboe 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
3204e2f62e5SJens Axboe }
3214e2f62e5SJens Axboe 
322a808a9d5SJens Axboe bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
323a808a9d5SJens Axboe 
324a808a9d5SJens Axboe static inline bool blk_mq_get_driver_tag(struct request *rq)
325a808a9d5SJens Axboe {
326a808a9d5SJens Axboe 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
327a808a9d5SJens Axboe 
328a808a9d5SJens Axboe 	if (rq->tag != BLK_MQ_NO_TAG &&
329a808a9d5SJens Axboe 	    !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
330a808a9d5SJens Axboe 		hctx->tags->rqs[rq->tag] = rq;
331a808a9d5SJens Axboe 		return true;
332a808a9d5SJens Axboe 	}
333a808a9d5SJens Axboe 
334a808a9d5SJens Axboe 	return __blk_mq_get_driver_tag(hctx, rq);
335a808a9d5SJens Axboe }
33661347154SJan Kara 
337ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
3380da73d00SMinwoo Im {
3390da73d00SMinwoo Im 	int cpu;
3400da73d00SMinwoo Im 
3410da73d00SMinwoo Im 	for_each_possible_cpu(cpu)
342ed76e329SJens Axboe 		qmap->mq_map[cpu] = 0;
3430da73d00SMinwoo Im }
3440da73d00SMinwoo Im 
345b49773e7SDamien Le Moal /*
346b49773e7SDamien Le Moal  * blk_mq_plug() - Get caller context plug
347b49773e7SDamien Le Moal  * @bio : the bio being submitted by the caller context
348b49773e7SDamien Le Moal  *
349b49773e7SDamien Le Moal  * Plugging, by design, may delay the insertion of BIOs into the elevator in
350b49773e7SDamien Le Moal  * order to increase BIO merging opportunities. This however can cause BIO
351b49773e7SDamien Le Moal  * insertion order to change from the order in which submit_bio() is being
352b49773e7SDamien Le Moal  * executed in the case of multiple contexts concurrently issuing BIOs to a
353b49773e7SDamien Le Moal  * device, even if these context are synchronized to tightly control BIO issuing
354b49773e7SDamien Le Moal  * order. While this is not a problem with regular block devices, this ordering
355b49773e7SDamien Le Moal  * change can cause write BIO failures with zoned block devices as these
356b49773e7SDamien Le Moal  * require sequential write patterns to zones. Prevent this from happening by
3576deacb3bSChristoph Hellwig  * ignoring the plug state of a BIO issuing context if it is for a zoned block
3586deacb3bSChristoph Hellwig  * device and the BIO to plug is a write operation.
359b49773e7SDamien Le Moal  *
360b49773e7SDamien Le Moal  * Return current->plug if the bio can be plugged and NULL otherwise
361b49773e7SDamien Le Moal  */
3626deacb3bSChristoph Hellwig static inline struct blk_plug *blk_mq_plug( struct bio *bio)
363b49773e7SDamien Le Moal {
3646deacb3bSChristoph Hellwig 	/* Zoned block device write operation case: do not plug the BIO */
3658cafdb5aSPankaj Raghav 	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
3668cafdb5aSPankaj Raghav 	    bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
3676deacb3bSChristoph Hellwig 		return NULL;
3686deacb3bSChristoph Hellwig 
369b49773e7SDamien Le Moal 	/*
370b49773e7SDamien Le Moal 	 * For regular block devices or read operations, use the context plug
371b49773e7SDamien Le Moal 	 * which may be NULL if blk_start_plug() was not executed.
372b49773e7SDamien Le Moal 	 */
373b49773e7SDamien Le Moal 	return current->plug;
374b49773e7SDamien Le Moal }
375b49773e7SDamien Le Moal 
376fd2ef39cSJan Kara /* Free all requests on the list */
377fd2ef39cSJan Kara static inline void blk_mq_free_requests(struct list_head *list)
378fd2ef39cSJan Kara {
379fd2ef39cSJan Kara 	while (!list_empty(list)) {
380fd2ef39cSJan Kara 		struct request *rq = list_entry_rq(list->next);
381fd2ef39cSJan Kara 
382fd2ef39cSJan Kara 		list_del_init(&rq->queuelist);
383fd2ef39cSJan Kara 		blk_mq_free_request(rq);
384fd2ef39cSJan Kara 	}
385fd2ef39cSJan Kara }
386fd2ef39cSJan Kara 
387a0235d23SJohn Garry /*
388a0235d23SJohn Garry  * For shared tag users, we track the number of currently active users
389a0235d23SJohn Garry  * and attempt to provide a fair share of the tag depth for each of them.
390a0235d23SJohn Garry  */
391a0235d23SJohn Garry static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
392a0235d23SJohn Garry 				  struct sbitmap_queue *bt)
393a0235d23SJohn Garry {
394a0235d23SJohn Garry 	unsigned int depth, users;
395a0235d23SJohn Garry 
396a0235d23SJohn Garry 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
397a0235d23SJohn Garry 		return true;
398a0235d23SJohn Garry 
399a0235d23SJohn Garry 	/*
400a0235d23SJohn Garry 	 * Don't try dividing an ant
401a0235d23SJohn Garry 	 */
402a0235d23SJohn Garry 	if (bt->sb.depth == 1)
403a0235d23SJohn Garry 		return true;
404a0235d23SJohn Garry 
405079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags)) {
406f1b49fdcSJohn Garry 		struct request_queue *q = hctx->queue;
407f1b49fdcSJohn Garry 
4082569063cSMing Lei 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
409f1b49fdcSJohn Garry 			return true;
410f1b49fdcSJohn Garry 	} else {
411f1b49fdcSJohn Garry 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
412f1b49fdcSJohn Garry 			return true;
413f1b49fdcSJohn Garry 	}
414f1b49fdcSJohn Garry 
415*4f1731dfSYu Kuai 	users = READ_ONCE(hctx->tags->active_queues);
416a0235d23SJohn Garry 	if (!users)
417a0235d23SJohn Garry 		return true;
418a0235d23SJohn Garry 
419a0235d23SJohn Garry 	/*
420a0235d23SJohn Garry 	 * Allow at least some tags
421a0235d23SJohn Garry 	 */
422a0235d23SJohn Garry 	depth = max((bt->sb.depth + users - 1) / users, 4U);
423bccf5e26SJohn Garry 	return __blk_mq_active_requests(hctx) < depth;
424a0235d23SJohn Garry }
425a0235d23SJohn Garry 
4262a904d00SMing Lei /* run the code block in @dispatch_ops with rcu/srcu read lock held */
42741adf531SMing Lei #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops)	\
4282a904d00SMing Lei do {								\
42980bd4a7aSChristoph Hellwig 	if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {		\
43000e885efSChris Leech 		struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
4312a904d00SMing Lei 		int srcu_idx;					\
4322a904d00SMing Lei 								\
43341adf531SMing Lei 		might_sleep_if(check_sleep);			\
43400e885efSChris Leech 		srcu_idx = srcu_read_lock(__tag_set->srcu);	\
4352a904d00SMing Lei 		(dispatch_ops);					\
43600e885efSChris Leech 		srcu_read_unlock(__tag_set->srcu, srcu_idx);	\
43780bd4a7aSChristoph Hellwig 	} else {						\
43880bd4a7aSChristoph Hellwig 		rcu_read_lock();				\
43980bd4a7aSChristoph Hellwig 		(dispatch_ops);					\
44080bd4a7aSChristoph Hellwig 		rcu_read_unlock();				\
4412a904d00SMing Lei 	}							\
4422a904d00SMing Lei } while (0)
443a0235d23SJohn Garry 
44441adf531SMing Lei #define blk_mq_run_dispatch_ops(q, dispatch_ops)		\
44541adf531SMing Lei 	__blk_mq_run_dispatch_ops(q, true, dispatch_ops)	\
44641adf531SMing Lei 
447320ae51fSJens Axboe #endif
448