xref: /openbmc/linux/block/blk-mq.h (revision 2a19b28f7929866e1cec92a3619f4de9f2d20005)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
5cf43e6beSJens Axboe #include "blk-stat.h"
6244c65a3SMing Lei #include "blk-mq-tag.h"
7cf43e6beSJens Axboe 
824d2f903SChristoph Hellwig struct blk_mq_tag_set;
924d2f903SChristoph Hellwig 
101db4909eSMing Lei struct blk_mq_ctxs {
111db4909eSMing Lei 	struct kobject kobj;
121db4909eSMing Lei 	struct blk_mq_ctx __percpu	*queue_ctx;
131db4909eSMing Lei };
141db4909eSMing Lei 
15fe644072SLinus Walleij /**
16fe644072SLinus Walleij  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17fe644072SLinus Walleij  */
18320ae51fSJens Axboe struct blk_mq_ctx {
19320ae51fSJens Axboe 	struct {
20320ae51fSJens Axboe 		spinlock_t		lock;
21c16d6b5aSMing Lei 		struct list_head	rq_lists[HCTX_MAX_TYPES];
22320ae51fSJens Axboe 	} ____cacheline_aligned_in_smp;
23320ae51fSJens Axboe 
24320ae51fSJens Axboe 	unsigned int		cpu;
25f31967f0SJens Axboe 	unsigned short		index_hw[HCTX_MAX_TYPES];
268ccdf4a3SJianchao Wang 	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
27320ae51fSJens Axboe 
28320ae51fSJens Axboe 	struct request_queue	*queue;
291db4909eSMing Lei 	struct blk_mq_ctxs      *ctxs;
30320ae51fSJens Axboe 	struct kobject		kobj;
314bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
32320ae51fSJens Axboe 
333e08773cSChristoph Hellwig void blk_mq_submit_bio(struct bio *bio);
345a72e899SJens Axboe int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
355a72e899SJens Axboe 		unsigned int flags);
36c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q);
37e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
38aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
391fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
401fd40b5eSMing Lei 			     unsigned int);
41e6c98712SBart Van Assche void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
42e6c98712SBart Van Assche 				bool kick_requeue_list);
432c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
44b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
45b347689fSMing Lei 					struct blk_mq_ctx *start);
462e315dc0SMing Lei void blk_mq_put_rq_ref(struct request *rq);
472c3ad667SJens Axboe 
482c3ad667SJens Axboe /*
492c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
502c3ad667SJens Axboe  */
51cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
522c3ad667SJens Axboe 		     unsigned int hctx_idx);
53e155b0c2SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags);
5463064be1SJohn Garry struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
55cc71a6f4SJens Axboe 				unsigned int hctx_idx, unsigned int depth);
56645db34eSJohn Garry void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
57645db34eSJohn Garry 			     struct blk_mq_tags *tags,
58645db34eSJohn Garry 			     unsigned int hctx_idx);
592c3ad667SJens Axboe /*
602c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
612c3ad667SJens Axboe  */
622c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
632c3ad667SJens Axboe 				bool at_head);
6401e99aecSMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
6501e99aecSMing Lei 				  bool run_queue);
66bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
67bd166ef1SJens Axboe 				struct list_head *list);
68320ae51fSJens Axboe 
69fd9c40f6SBart Van Assche /* Used by blk_insert_cloned_request() to issue request directly */
70fd9c40f6SBart Van Assche blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
716ce3dd6eSMing Lei void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
726ce3dd6eSMing Lei 				    struct list_head *list);
73396eaf21SMing Lei 
74320ae51fSJens Axboe /*
75320ae51fSJens Axboe  * CPU -> queue mappings
76320ae51fSJens Axboe  */
77ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
78320ae51fSJens Axboe 
79b3c661b1SJens Axboe /*
80b3c661b1SJens Axboe  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
81b3c661b1SJens Axboe  * @q: request queue
82e20ba6e1SChristoph Hellwig  * @type: the hctx type index
83b3c661b1SJens Axboe  * @cpu: CPU
84b3c661b1SJens Axboe  */
85ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
86e20ba6e1SChristoph Hellwig 							  enum hctx_type type,
87ff2c5660SJens Axboe 							  unsigned int cpu)
88ff2c5660SJens Axboe {
89e20ba6e1SChristoph Hellwig 	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
90b3c661b1SJens Axboe }
91b3c661b1SJens Axboe 
92b637108aSMing Lei static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
93b3c661b1SJens Axboe {
94e20ba6e1SChristoph Hellwig 	enum hctx_type type = HCTX_TYPE_DEFAULT;
95b3c661b1SJens Axboe 
96bb94aea1SJianchao Wang 	/*
976ce913feSChristoph Hellwig 	 * The caller ensure that if REQ_POLLED, poll must be enabled.
98bb94aea1SJianchao Wang 	 */
996ce913feSChristoph Hellwig 	if (flags & REQ_POLLED)
100e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_POLL;
101bb94aea1SJianchao Wang 	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
102e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_READ;
103b637108aSMing Lei 	return type;
104b637108aSMing Lei }
105e20ba6e1SChristoph Hellwig 
106b637108aSMing Lei /*
107b637108aSMing Lei  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
108b637108aSMing Lei  * @q: request queue
109b637108aSMing Lei  * @flags: request command flags
110b637108aSMing Lei  * @ctx: software queue cpu ctx
111b637108aSMing Lei  */
112b637108aSMing Lei static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
113b637108aSMing Lei 						     unsigned int flags,
114b637108aSMing Lei 						     struct blk_mq_ctx *ctx)
115b637108aSMing Lei {
116b637108aSMing Lei 	return ctx->hctxs[blk_mq_get_hctx_type(flags)];
117ff2c5660SJens Axboe }
118ff2c5660SJens Axboe 
119e93ecf60SJens Axboe /*
12067aec14cSJens Axboe  * sysfs helpers
12167aec14cSJens Axboe  */
122737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
1237ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
1242d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
12567aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
12667aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
127868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
12847c122e3SJens Axboe void blk_mq_free_plug_rqs(struct blk_plug *plug);
129dbb6f764SChristoph Hellwig void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
13067aec14cSJens Axboe 
131*2a19b28fSMing Lei void blk_mq_cancel_work_sync(struct request_queue *q);
132*2a19b28fSMing Lei 
133e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
134e09aae7eSMing Lei 
1351aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1361aecfe48SMing Lei 					   unsigned int cpu)
1371aecfe48SMing Lei {
1381aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1391aecfe48SMing Lei }
1401aecfe48SMing Lei 
1411aecfe48SMing Lei /*
1421aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1431aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1441aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1451aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1461aecfe48SMing Lei  */
1471aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1481aecfe48SMing Lei {
149c05f4220SBart Van Assche 	return __blk_mq_get_ctx(q, raw_smp_processor_id());
1501aecfe48SMing Lei }
1511aecfe48SMing Lei 
152cb96a42cSMing Lei struct blk_mq_alloc_data {
153cb96a42cSMing Lei 	/* input parameter */
154cb96a42cSMing Lei 	struct request_queue *q;
1559a95e4efSBart Van Assche 	blk_mq_req_flags_t flags;
156229a9287SOmar Sandoval 	unsigned int shallow_depth;
157f9afca4dSJens Axboe 	unsigned int cmd_flags;
158ecaf97f4SJens Axboe 	req_flags_t rq_flags;
159cb96a42cSMing Lei 
16047c122e3SJens Axboe 	/* allocate multiple requests/tags in one go */
16147c122e3SJens Axboe 	unsigned int nr_tags;
16247c122e3SJens Axboe 	struct request **cached_rq;
16347c122e3SJens Axboe 
164cb96a42cSMing Lei 	/* input & output parameter */
165cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
166cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
167cb96a42cSMing Lei };
168cb96a42cSMing Lei 
169079a2e3eSJohn Garry static inline bool blk_mq_is_shared_tags(unsigned int flags)
17032bc15afSJohn Garry {
17132bc15afSJohn Garry 	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
17232bc15afSJohn Garry }
17332bc15afSJohn Garry 
1744941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1754941115bSJens Axboe {
17656f8da64SJens Axboe 	if (!(data->rq_flags & RQF_ELV))
1774941115bSJens Axboe 		return data->hctx->tags;
17856f8da64SJens Axboe 	return data->hctx->sched_tags;
1794941115bSJens Axboe }
1804941115bSJens Axboe 
1815d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1825d1b25c1SBart Van Assche {
1835d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1845d1b25c1SBart Van Assche }
1855d1b25c1SBart Van Assche 
18619c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
18719c66e59SMing Lei {
18819c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
18919c66e59SMing Lei }
19019c66e59SMing Lei 
1918446fe92SChristoph Hellwig unsigned int blk_mq_in_flight(struct request_queue *q,
1928446fe92SChristoph Hellwig 		struct block_device *part);
1938446fe92SChristoph Hellwig void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
194bf0ddabaSOmar Sandoval 		unsigned int inflight[2]);
195f299b7c7SJens Axboe 
1962a5a24aaSMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
1972a5a24aaSMing Lei 					      int budget_token)
198de148297SMing Lei {
199de148297SMing Lei 	if (q->mq_ops->put_budget)
2002a5a24aaSMing Lei 		q->mq_ops->put_budget(q, budget_token);
201de148297SMing Lei }
202de148297SMing Lei 
2032a5a24aaSMing Lei static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
204de148297SMing Lei {
205de148297SMing Lei 	if (q->mq_ops->get_budget)
20665c76369SMing Lei 		return q->mq_ops->get_budget(q);
2072a5a24aaSMing Lei 	return 0;
2082a5a24aaSMing Lei }
2092a5a24aaSMing Lei 
2102a5a24aaSMing Lei static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
2112a5a24aaSMing Lei {
2122a5a24aaSMing Lei 	if (token < 0)
2132a5a24aaSMing Lei 		return;
2142a5a24aaSMing Lei 
2152a5a24aaSMing Lei 	if (rq->q->mq_ops->set_rq_budget_token)
2162a5a24aaSMing Lei 		rq->q->mq_ops->set_rq_budget_token(rq, token);
2172a5a24aaSMing Lei }
2182a5a24aaSMing Lei 
2192a5a24aaSMing Lei static inline int blk_mq_get_rq_budget_token(struct request *rq)
2202a5a24aaSMing Lei {
2212a5a24aaSMing Lei 	if (rq->q->mq_ops->get_rq_budget_token)
2222a5a24aaSMing Lei 		return rq->q->mq_ops->get_rq_budget_token(rq);
2232a5a24aaSMing Lei 	return -1;
224de148297SMing Lei }
225de148297SMing Lei 
226bccf5e26SJohn Garry static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
227bccf5e26SJohn Garry {
228079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
229079a2e3eSJohn Garry 		atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
230bccf5e26SJohn Garry 	else
231bccf5e26SJohn Garry 		atomic_inc(&hctx->nr_active);
232bccf5e26SJohn Garry }
233bccf5e26SJohn Garry 
2343b87c6eaSMing Lei static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
2353b87c6eaSMing Lei 		int val)
236bccf5e26SJohn Garry {
237079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
2383b87c6eaSMing Lei 		atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
239bccf5e26SJohn Garry 	else
2403b87c6eaSMing Lei 		atomic_sub(val, &hctx->nr_active);
2413b87c6eaSMing Lei }
2423b87c6eaSMing Lei 
2433b87c6eaSMing Lei static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
2443b87c6eaSMing Lei {
2453b87c6eaSMing Lei 	__blk_mq_sub_active_requests(hctx, 1);
246bccf5e26SJohn Garry }
247bccf5e26SJohn Garry 
248bccf5e26SJohn Garry static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
249bccf5e26SJohn Garry {
250079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
251079a2e3eSJohn Garry 		return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
252bccf5e26SJohn Garry 	return atomic_read(&hctx->nr_active);
253bccf5e26SJohn Garry }
2544e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
2554e2f62e5SJens Axboe 					   struct request *rq)
2564e2f62e5SJens Axboe {
2574e2f62e5SJens Axboe 	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
2584e2f62e5SJens Axboe 	rq->tag = BLK_MQ_NO_TAG;
2594e2f62e5SJens Axboe 
2604e2f62e5SJens Axboe 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
2614e2f62e5SJens Axboe 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
262bccf5e26SJohn Garry 		__blk_mq_dec_active_requests(hctx);
2634e2f62e5SJens Axboe 	}
2644e2f62e5SJens Axboe }
2654e2f62e5SJens Axboe 
2664e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq)
2674e2f62e5SJens Axboe {
2684e2f62e5SJens Axboe 	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
2694e2f62e5SJens Axboe 		return;
2704e2f62e5SJens Axboe 
2714e2f62e5SJens Axboe 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
2724e2f62e5SJens Axboe }
2734e2f62e5SJens Axboe 
274a808a9d5SJens Axboe bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
275a808a9d5SJens Axboe 
276a808a9d5SJens Axboe static inline bool blk_mq_get_driver_tag(struct request *rq)
277a808a9d5SJens Axboe {
278a808a9d5SJens Axboe 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
279a808a9d5SJens Axboe 
280a808a9d5SJens Axboe 	if (rq->tag != BLK_MQ_NO_TAG &&
281a808a9d5SJens Axboe 	    !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
282a808a9d5SJens Axboe 		hctx->tags->rqs[rq->tag] = rq;
283a808a9d5SJens Axboe 		return true;
284a808a9d5SJens Axboe 	}
285a808a9d5SJens Axboe 
286a808a9d5SJens Axboe 	return __blk_mq_get_driver_tag(hctx, rq);
287a808a9d5SJens Axboe }
28861347154SJan Kara 
289ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
2900da73d00SMinwoo Im {
2910da73d00SMinwoo Im 	int cpu;
2920da73d00SMinwoo Im 
2930da73d00SMinwoo Im 	for_each_possible_cpu(cpu)
294ed76e329SJens Axboe 		qmap->mq_map[cpu] = 0;
2950da73d00SMinwoo Im }
2960da73d00SMinwoo Im 
297b49773e7SDamien Le Moal /*
298b49773e7SDamien Le Moal  * blk_mq_plug() - Get caller context plug
299b49773e7SDamien Le Moal  * @q: request queue
300b49773e7SDamien Le Moal  * @bio : the bio being submitted by the caller context
301b49773e7SDamien Le Moal  *
302b49773e7SDamien Le Moal  * Plugging, by design, may delay the insertion of BIOs into the elevator in
303b49773e7SDamien Le Moal  * order to increase BIO merging opportunities. This however can cause BIO
304b49773e7SDamien Le Moal  * insertion order to change from the order in which submit_bio() is being
305b49773e7SDamien Le Moal  * executed in the case of multiple contexts concurrently issuing BIOs to a
306b49773e7SDamien Le Moal  * device, even if these context are synchronized to tightly control BIO issuing
307b49773e7SDamien Le Moal  * order. While this is not a problem with regular block devices, this ordering
308b49773e7SDamien Le Moal  * change can cause write BIO failures with zoned block devices as these
309b49773e7SDamien Le Moal  * require sequential write patterns to zones. Prevent this from happening by
310b49773e7SDamien Le Moal  * ignoring the plug state of a BIO issuing context if the target request queue
311b49773e7SDamien Le Moal  * is for a zoned block device and the BIO to plug is a write operation.
312b49773e7SDamien Le Moal  *
313b49773e7SDamien Le Moal  * Return current->plug if the bio can be plugged and NULL otherwise
314b49773e7SDamien Le Moal  */
315b49773e7SDamien Le Moal static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
316b49773e7SDamien Le Moal 					   struct bio *bio)
317b49773e7SDamien Le Moal {
318b49773e7SDamien Le Moal 	/*
319b49773e7SDamien Le Moal 	 * For regular block devices or read operations, use the context plug
320b49773e7SDamien Le Moal 	 * which may be NULL if blk_start_plug() was not executed.
321b49773e7SDamien Le Moal 	 */
322b49773e7SDamien Le Moal 	if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
323b49773e7SDamien Le Moal 		return current->plug;
324b49773e7SDamien Le Moal 
325b49773e7SDamien Le Moal 	/* Zoned block device write operation case: do not plug the BIO */
326b49773e7SDamien Le Moal 	return NULL;
327b49773e7SDamien Le Moal }
328b49773e7SDamien Le Moal 
329fd2ef39cSJan Kara /* Free all requests on the list */
330fd2ef39cSJan Kara static inline void blk_mq_free_requests(struct list_head *list)
331fd2ef39cSJan Kara {
332fd2ef39cSJan Kara 	while (!list_empty(list)) {
333fd2ef39cSJan Kara 		struct request *rq = list_entry_rq(list->next);
334fd2ef39cSJan Kara 
335fd2ef39cSJan Kara 		list_del_init(&rq->queuelist);
336fd2ef39cSJan Kara 		blk_mq_free_request(rq);
337fd2ef39cSJan Kara 	}
338fd2ef39cSJan Kara }
339fd2ef39cSJan Kara 
340a0235d23SJohn Garry /*
341a0235d23SJohn Garry  * For shared tag users, we track the number of currently active users
342a0235d23SJohn Garry  * and attempt to provide a fair share of the tag depth for each of them.
343a0235d23SJohn Garry  */
344a0235d23SJohn Garry static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
345a0235d23SJohn Garry 				  struct sbitmap_queue *bt)
346a0235d23SJohn Garry {
347a0235d23SJohn Garry 	unsigned int depth, users;
348a0235d23SJohn Garry 
349a0235d23SJohn Garry 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
350a0235d23SJohn Garry 		return true;
351a0235d23SJohn Garry 
352a0235d23SJohn Garry 	/*
353a0235d23SJohn Garry 	 * Don't try dividing an ant
354a0235d23SJohn Garry 	 */
355a0235d23SJohn Garry 	if (bt->sb.depth == 1)
356a0235d23SJohn Garry 		return true;
357a0235d23SJohn Garry 
358079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags)) {
359f1b49fdcSJohn Garry 		struct request_queue *q = hctx->queue;
360f1b49fdcSJohn Garry 
3612569063cSMing Lei 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
362f1b49fdcSJohn Garry 			return true;
363f1b49fdcSJohn Garry 	} else {
364f1b49fdcSJohn Garry 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
365f1b49fdcSJohn Garry 			return true;
366f1b49fdcSJohn Garry 	}
367f1b49fdcSJohn Garry 
368e155b0c2SJohn Garry 	users = atomic_read(&hctx->tags->active_queues);
369e155b0c2SJohn Garry 
370a0235d23SJohn Garry 	if (!users)
371a0235d23SJohn Garry 		return true;
372a0235d23SJohn Garry 
373a0235d23SJohn Garry 	/*
374a0235d23SJohn Garry 	 * Allow at least some tags
375a0235d23SJohn Garry 	 */
376a0235d23SJohn Garry 	depth = max((bt->sb.depth + users - 1) / users, 4U);
377bccf5e26SJohn Garry 	return __blk_mq_active_requests(hctx) < depth;
378a0235d23SJohn Garry }
379a0235d23SJohn Garry 
380a0235d23SJohn Garry 
381320ae51fSJens Axboe #endif
382