1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H 3320ae51fSJens Axboe #define INT_BLK_MQ_H 4320ae51fSJens Axboe 5cf43e6beSJens Axboe #include "blk-stat.h" 6244c65a3SMing Lei #include "blk-mq-tag.h" 7cf43e6beSJens Axboe 824d2f903SChristoph Hellwig struct blk_mq_tag_set; 924d2f903SChristoph Hellwig 101db4909eSMing Lei struct blk_mq_ctxs { 111db4909eSMing Lei struct kobject kobj; 121db4909eSMing Lei struct blk_mq_ctx __percpu *queue_ctx; 131db4909eSMing Lei }; 141db4909eSMing Lei 15fe644072SLinus Walleij /** 16fe644072SLinus Walleij * struct blk_mq_ctx - State for a software queue facing the submitting CPUs 17fe644072SLinus Walleij */ 18320ae51fSJens Axboe struct blk_mq_ctx { 19320ae51fSJens Axboe struct { 20320ae51fSJens Axboe spinlock_t lock; 21c16d6b5aSMing Lei struct list_head rq_lists[HCTX_MAX_TYPES]; 22320ae51fSJens Axboe } ____cacheline_aligned_in_smp; 23320ae51fSJens Axboe 24320ae51fSJens Axboe unsigned int cpu; 25f31967f0SJens Axboe unsigned short index_hw[HCTX_MAX_TYPES]; 268ccdf4a3SJianchao Wang struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; 27320ae51fSJens Axboe 28320ae51fSJens Axboe struct request_queue *queue; 291db4909eSMing Lei struct blk_mq_ctxs *ctxs; 30320ae51fSJens Axboe struct kobject kobj; 314bb659b1SJens Axboe } ____cacheline_aligned_in_smp; 32320ae51fSJens Axboe 333e08773cSChristoph Hellwig void blk_mq_submit_bio(struct bio *bio); 345a72e899SJens Axboe int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 355a72e899SJens Axboe unsigned int flags); 36c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q); 37e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 38aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q); 391fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 401fd40b5eSMing Lei unsigned int); 41e6c98712SBart Van Assche void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 42e6c98712SBart Van Assche bool kick_requeue_list); 432c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 44b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 45b347689fSMing Lei struct blk_mq_ctx *start); 462e315dc0SMing Lei void blk_mq_put_rq_ref(struct request *rq); 472c3ad667SJens Axboe 482c3ad667SJens Axboe /* 492c3ad667SJens Axboe * Internal helpers for allocating/freeing the request map 502c3ad667SJens Axboe */ 51cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 522c3ad667SJens Axboe unsigned int hctx_idx); 53e155b0c2SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags); 5463064be1SJohn Garry struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 55cc71a6f4SJens Axboe unsigned int hctx_idx, unsigned int depth); 56645db34eSJohn Garry void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 57645db34eSJohn Garry struct blk_mq_tags *tags, 58645db34eSJohn Garry unsigned int hctx_idx); 592c3ad667SJens Axboe /* 602c3ad667SJens Axboe * Internal helpers for request insertion into sw queues 612c3ad667SJens Axboe */ 622c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 632c3ad667SJens Axboe bool at_head); 6401e99aecSMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 6501e99aecSMing Lei bool run_queue); 66bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 67bd166ef1SJens Axboe struct list_head *list); 686ce3dd6eSMing Lei void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 696ce3dd6eSMing Lei struct list_head *list); 70396eaf21SMing Lei 71320ae51fSJens Axboe /* 72320ae51fSJens Axboe * CPU -> queue mappings 73320ae51fSJens Axboe */ 74ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 75320ae51fSJens Axboe 76b3c661b1SJens Axboe /* 77b3c661b1SJens Axboe * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue 78b3c661b1SJens Axboe * @q: request queue 79e20ba6e1SChristoph Hellwig * @type: the hctx type index 80b3c661b1SJens Axboe * @cpu: CPU 81b3c661b1SJens Axboe */ 82ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, 83e20ba6e1SChristoph Hellwig enum hctx_type type, 84ff2c5660SJens Axboe unsigned int cpu) 85ff2c5660SJens Axboe { 86*4e5cc99eSMing Lei return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); 87b3c661b1SJens Axboe } 88b3c661b1SJens Axboe 89b637108aSMing Lei static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) 90b3c661b1SJens Axboe { 91e20ba6e1SChristoph Hellwig enum hctx_type type = HCTX_TYPE_DEFAULT; 92b3c661b1SJens Axboe 93bb94aea1SJianchao Wang /* 946ce913feSChristoph Hellwig * The caller ensure that if REQ_POLLED, poll must be enabled. 95bb94aea1SJianchao Wang */ 966ce913feSChristoph Hellwig if (flags & REQ_POLLED) 97e20ba6e1SChristoph Hellwig type = HCTX_TYPE_POLL; 98bb94aea1SJianchao Wang else if ((flags & REQ_OP_MASK) == REQ_OP_READ) 99e20ba6e1SChristoph Hellwig type = HCTX_TYPE_READ; 100b637108aSMing Lei return type; 101b637108aSMing Lei } 102e20ba6e1SChristoph Hellwig 103b637108aSMing Lei /* 104b637108aSMing Lei * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue 105b637108aSMing Lei * @q: request queue 106b637108aSMing Lei * @flags: request command flags 107b637108aSMing Lei * @ctx: software queue cpu ctx 108b637108aSMing Lei */ 109b637108aSMing Lei static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 110b637108aSMing Lei unsigned int flags, 111b637108aSMing Lei struct blk_mq_ctx *ctx) 112b637108aSMing Lei { 113b637108aSMing Lei return ctx->hctxs[blk_mq_get_hctx_type(flags)]; 114ff2c5660SJens Axboe } 115ff2c5660SJens Axboe 116e93ecf60SJens Axboe /* 11767aec14cSJens Axboe * sysfs helpers 11867aec14cSJens Axboe */ 119737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q); 1207ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q); 1212d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); 12267aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q); 12367aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q); 124868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 12547c122e3SJens Axboe void blk_mq_free_plug_rqs(struct blk_plug *plug); 126dbb6f764SChristoph Hellwig void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 12767aec14cSJens Axboe 1282a19b28fSMing Lei void blk_mq_cancel_work_sync(struct request_queue *q); 1292a19b28fSMing Lei 130e09aae7eSMing Lei void blk_mq_release(struct request_queue *q); 131e09aae7eSMing Lei 1321aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 1331aecfe48SMing Lei unsigned int cpu) 1341aecfe48SMing Lei { 1351aecfe48SMing Lei return per_cpu_ptr(q->queue_ctx, cpu); 1361aecfe48SMing Lei } 1371aecfe48SMing Lei 1381aecfe48SMing Lei /* 1391aecfe48SMing Lei * This assumes per-cpu software queueing queues. They could be per-node 1401aecfe48SMing Lei * as well, for instance. For now this is hardcoded as-is. Note that we don't 1411aecfe48SMing Lei * care about preemption, since we know the ctx's are persistent. This does 1421aecfe48SMing Lei * mean that we can't rely on ctx always matching the currently running CPU. 1431aecfe48SMing Lei */ 1441aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 1451aecfe48SMing Lei { 146c05f4220SBart Van Assche return __blk_mq_get_ctx(q, raw_smp_processor_id()); 1471aecfe48SMing Lei } 1481aecfe48SMing Lei 149cb96a42cSMing Lei struct blk_mq_alloc_data { 150cb96a42cSMing Lei /* input parameter */ 151cb96a42cSMing Lei struct request_queue *q; 1529a95e4efSBart Van Assche blk_mq_req_flags_t flags; 153229a9287SOmar Sandoval unsigned int shallow_depth; 154f9afca4dSJens Axboe unsigned int cmd_flags; 155ecaf97f4SJens Axboe req_flags_t rq_flags; 156cb96a42cSMing Lei 15747c122e3SJens Axboe /* allocate multiple requests/tags in one go */ 15847c122e3SJens Axboe unsigned int nr_tags; 15947c122e3SJens Axboe struct request **cached_rq; 16047c122e3SJens Axboe 161cb96a42cSMing Lei /* input & output parameter */ 162cb96a42cSMing Lei struct blk_mq_ctx *ctx; 163cb96a42cSMing Lei struct blk_mq_hw_ctx *hctx; 164cb96a42cSMing Lei }; 165cb96a42cSMing Lei 166079a2e3eSJohn Garry static inline bool blk_mq_is_shared_tags(unsigned int flags) 16732bc15afSJohn Garry { 16832bc15afSJohn Garry return flags & BLK_MQ_F_TAG_HCTX_SHARED; 16932bc15afSJohn Garry } 17032bc15afSJohn Garry 1714941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) 1724941115bSJens Axboe { 17356f8da64SJens Axboe if (!(data->rq_flags & RQF_ELV)) 1744941115bSJens Axboe return data->hctx->tags; 17556f8da64SJens Axboe return data->hctx->sched_tags; 1764941115bSJens Axboe } 1774941115bSJens Axboe 1785d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 1795d1b25c1SBart Van Assche { 1805d1b25c1SBart Van Assche return test_bit(BLK_MQ_S_STOPPED, &hctx->state); 1815d1b25c1SBart Van Assche } 1825d1b25c1SBart Van Assche 18319c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) 18419c66e59SMing Lei { 18519c66e59SMing Lei return hctx->nr_ctx && hctx->tags; 18619c66e59SMing Lei } 18719c66e59SMing Lei 1888446fe92SChristoph Hellwig unsigned int blk_mq_in_flight(struct request_queue *q, 1898446fe92SChristoph Hellwig struct block_device *part); 1908446fe92SChristoph Hellwig void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 191bf0ddabaSOmar Sandoval unsigned int inflight[2]); 192f299b7c7SJens Axboe 1932a5a24aaSMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q, 1942a5a24aaSMing Lei int budget_token) 195de148297SMing Lei { 196de148297SMing Lei if (q->mq_ops->put_budget) 1972a5a24aaSMing Lei q->mq_ops->put_budget(q, budget_token); 198de148297SMing Lei } 199de148297SMing Lei 2002a5a24aaSMing Lei static inline int blk_mq_get_dispatch_budget(struct request_queue *q) 201de148297SMing Lei { 202de148297SMing Lei if (q->mq_ops->get_budget) 20365c76369SMing Lei return q->mq_ops->get_budget(q); 2042a5a24aaSMing Lei return 0; 2052a5a24aaSMing Lei } 2062a5a24aaSMing Lei 2072a5a24aaSMing Lei static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) 2082a5a24aaSMing Lei { 2092a5a24aaSMing Lei if (token < 0) 2102a5a24aaSMing Lei return; 2112a5a24aaSMing Lei 2122a5a24aaSMing Lei if (rq->q->mq_ops->set_rq_budget_token) 2132a5a24aaSMing Lei rq->q->mq_ops->set_rq_budget_token(rq, token); 2142a5a24aaSMing Lei } 2152a5a24aaSMing Lei 2162a5a24aaSMing Lei static inline int blk_mq_get_rq_budget_token(struct request *rq) 2172a5a24aaSMing Lei { 2182a5a24aaSMing Lei if (rq->q->mq_ops->get_rq_budget_token) 2192a5a24aaSMing Lei return rq->q->mq_ops->get_rq_budget_token(rq); 2202a5a24aaSMing Lei return -1; 221de148297SMing Lei } 222de148297SMing Lei 223bccf5e26SJohn Garry static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) 224bccf5e26SJohn Garry { 225079a2e3eSJohn Garry if (blk_mq_is_shared_tags(hctx->flags)) 226079a2e3eSJohn Garry atomic_inc(&hctx->queue->nr_active_requests_shared_tags); 227bccf5e26SJohn Garry else 228bccf5e26SJohn Garry atomic_inc(&hctx->nr_active); 229bccf5e26SJohn Garry } 230bccf5e26SJohn Garry 2313b87c6eaSMing Lei static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, 2323b87c6eaSMing Lei int val) 233bccf5e26SJohn Garry { 234079a2e3eSJohn Garry if (blk_mq_is_shared_tags(hctx->flags)) 2353b87c6eaSMing Lei atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); 236bccf5e26SJohn Garry else 2373b87c6eaSMing Lei atomic_sub(val, &hctx->nr_active); 2383b87c6eaSMing Lei } 2393b87c6eaSMing Lei 2403b87c6eaSMing Lei static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) 2413b87c6eaSMing Lei { 2423b87c6eaSMing Lei __blk_mq_sub_active_requests(hctx, 1); 243bccf5e26SJohn Garry } 244bccf5e26SJohn Garry 245bccf5e26SJohn Garry static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) 246bccf5e26SJohn Garry { 247079a2e3eSJohn Garry if (blk_mq_is_shared_tags(hctx->flags)) 248079a2e3eSJohn Garry return atomic_read(&hctx->queue->nr_active_requests_shared_tags); 249bccf5e26SJohn Garry return atomic_read(&hctx->nr_active); 250bccf5e26SJohn Garry } 2514e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 2524e2f62e5SJens Axboe struct request *rq) 2534e2f62e5SJens Axboe { 2544e2f62e5SJens Axboe blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); 2554e2f62e5SJens Axboe rq->tag = BLK_MQ_NO_TAG; 2564e2f62e5SJens Axboe 2574e2f62e5SJens Axboe if (rq->rq_flags & RQF_MQ_INFLIGHT) { 2584e2f62e5SJens Axboe rq->rq_flags &= ~RQF_MQ_INFLIGHT; 259bccf5e26SJohn Garry __blk_mq_dec_active_requests(hctx); 2604e2f62e5SJens Axboe } 2614e2f62e5SJens Axboe } 2624e2f62e5SJens Axboe 2634e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq) 2644e2f62e5SJens Axboe { 2654e2f62e5SJens Axboe if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) 2664e2f62e5SJens Axboe return; 2674e2f62e5SJens Axboe 2684e2f62e5SJens Axboe __blk_mq_put_driver_tag(rq->mq_hctx, rq); 2694e2f62e5SJens Axboe } 2704e2f62e5SJens Axboe 271a808a9d5SJens Axboe bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq); 272a808a9d5SJens Axboe 273a808a9d5SJens Axboe static inline bool blk_mq_get_driver_tag(struct request *rq) 274a808a9d5SJens Axboe { 275a808a9d5SJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 276a808a9d5SJens Axboe 277a808a9d5SJens Axboe if (rq->tag != BLK_MQ_NO_TAG && 278a808a9d5SJens Axboe !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 279a808a9d5SJens Axboe hctx->tags->rqs[rq->tag] = rq; 280a808a9d5SJens Axboe return true; 281a808a9d5SJens Axboe } 282a808a9d5SJens Axboe 283a808a9d5SJens Axboe return __blk_mq_get_driver_tag(hctx, rq); 284a808a9d5SJens Axboe } 28561347154SJan Kara 286ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) 2870da73d00SMinwoo Im { 2880da73d00SMinwoo Im int cpu; 2890da73d00SMinwoo Im 2900da73d00SMinwoo Im for_each_possible_cpu(cpu) 291ed76e329SJens Axboe qmap->mq_map[cpu] = 0; 2920da73d00SMinwoo Im } 2930da73d00SMinwoo Im 294b49773e7SDamien Le Moal /* 295b49773e7SDamien Le Moal * blk_mq_plug() - Get caller context plug 296b49773e7SDamien Le Moal * @q: request queue 297b49773e7SDamien Le Moal * @bio : the bio being submitted by the caller context 298b49773e7SDamien Le Moal * 299b49773e7SDamien Le Moal * Plugging, by design, may delay the insertion of BIOs into the elevator in 300b49773e7SDamien Le Moal * order to increase BIO merging opportunities. This however can cause BIO 301b49773e7SDamien Le Moal * insertion order to change from the order in which submit_bio() is being 302b49773e7SDamien Le Moal * executed in the case of multiple contexts concurrently issuing BIOs to a 303b49773e7SDamien Le Moal * device, even if these context are synchronized to tightly control BIO issuing 304b49773e7SDamien Le Moal * order. While this is not a problem with regular block devices, this ordering 305b49773e7SDamien Le Moal * change can cause write BIO failures with zoned block devices as these 306b49773e7SDamien Le Moal * require sequential write patterns to zones. Prevent this from happening by 307b49773e7SDamien Le Moal * ignoring the plug state of a BIO issuing context if the target request queue 308b49773e7SDamien Le Moal * is for a zoned block device and the BIO to plug is a write operation. 309b49773e7SDamien Le Moal * 310b49773e7SDamien Le Moal * Return current->plug if the bio can be plugged and NULL otherwise 311b49773e7SDamien Le Moal */ 312b49773e7SDamien Le Moal static inline struct blk_plug *blk_mq_plug(struct request_queue *q, 313b49773e7SDamien Le Moal struct bio *bio) 314b49773e7SDamien Le Moal { 315b49773e7SDamien Le Moal /* 316b49773e7SDamien Le Moal * For regular block devices or read operations, use the context plug 317b49773e7SDamien Le Moal * which may be NULL if blk_start_plug() was not executed. 318b49773e7SDamien Le Moal */ 319b49773e7SDamien Le Moal if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio))) 320b49773e7SDamien Le Moal return current->plug; 321b49773e7SDamien Le Moal 322b49773e7SDamien Le Moal /* Zoned block device write operation case: do not plug the BIO */ 323b49773e7SDamien Le Moal return NULL; 324b49773e7SDamien Le Moal } 325b49773e7SDamien Le Moal 326fd2ef39cSJan Kara /* Free all requests on the list */ 327fd2ef39cSJan Kara static inline void blk_mq_free_requests(struct list_head *list) 328fd2ef39cSJan Kara { 329fd2ef39cSJan Kara while (!list_empty(list)) { 330fd2ef39cSJan Kara struct request *rq = list_entry_rq(list->next); 331fd2ef39cSJan Kara 332fd2ef39cSJan Kara list_del_init(&rq->queuelist); 333fd2ef39cSJan Kara blk_mq_free_request(rq); 334fd2ef39cSJan Kara } 335fd2ef39cSJan Kara } 336fd2ef39cSJan Kara 337a0235d23SJohn Garry /* 338a0235d23SJohn Garry * For shared tag users, we track the number of currently active users 339a0235d23SJohn Garry * and attempt to provide a fair share of the tag depth for each of them. 340a0235d23SJohn Garry */ 341a0235d23SJohn Garry static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 342a0235d23SJohn Garry struct sbitmap_queue *bt) 343a0235d23SJohn Garry { 344a0235d23SJohn Garry unsigned int depth, users; 345a0235d23SJohn Garry 346a0235d23SJohn Garry if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) 347a0235d23SJohn Garry return true; 348a0235d23SJohn Garry 349a0235d23SJohn Garry /* 350a0235d23SJohn Garry * Don't try dividing an ant 351a0235d23SJohn Garry */ 352a0235d23SJohn Garry if (bt->sb.depth == 1) 353a0235d23SJohn Garry return true; 354a0235d23SJohn Garry 355079a2e3eSJohn Garry if (blk_mq_is_shared_tags(hctx->flags)) { 356f1b49fdcSJohn Garry struct request_queue *q = hctx->queue; 357f1b49fdcSJohn Garry 3582569063cSMing Lei if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) 359f1b49fdcSJohn Garry return true; 360f1b49fdcSJohn Garry } else { 361f1b49fdcSJohn Garry if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 362f1b49fdcSJohn Garry return true; 363f1b49fdcSJohn Garry } 364f1b49fdcSJohn Garry 365e155b0c2SJohn Garry users = atomic_read(&hctx->tags->active_queues); 366e155b0c2SJohn Garry 367a0235d23SJohn Garry if (!users) 368a0235d23SJohn Garry return true; 369a0235d23SJohn Garry 370a0235d23SJohn Garry /* 371a0235d23SJohn Garry * Allow at least some tags 372a0235d23SJohn Garry */ 373a0235d23SJohn Garry depth = max((bt->sb.depth + users - 1) / users, 4U); 374bccf5e26SJohn Garry return __blk_mq_active_requests(hctx) < depth; 375a0235d23SJohn Garry } 376a0235d23SJohn Garry 3772a904d00SMing Lei /* run the code block in @dispatch_ops with rcu/srcu read lock held */ 37841adf531SMing Lei #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \ 3792a904d00SMing Lei do { \ 380bcc330f4SMing Lei if (!blk_queue_has_srcu(q)) { \ 3812a904d00SMing Lei rcu_read_lock(); \ 3822a904d00SMing Lei (dispatch_ops); \ 3832a904d00SMing Lei rcu_read_unlock(); \ 3842a904d00SMing Lei } else { \ 3852a904d00SMing Lei int srcu_idx; \ 3862a904d00SMing Lei \ 38741adf531SMing Lei might_sleep_if(check_sleep); \ 388bcc330f4SMing Lei srcu_idx = srcu_read_lock((q)->srcu); \ 3892a904d00SMing Lei (dispatch_ops); \ 390bcc330f4SMing Lei srcu_read_unlock((q)->srcu, srcu_idx); \ 3912a904d00SMing Lei } \ 3922a904d00SMing Lei } while (0) 393a0235d23SJohn Garry 39441adf531SMing Lei #define blk_mq_run_dispatch_ops(q, dispatch_ops) \ 39541adf531SMing Lei __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \ 39641adf531SMing Lei 397320ae51fSJens Axboe #endif 398