1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H 3320ae51fSJens Axboe #define INT_BLK_MQ_H 4320ae51fSJens Axboe 5cf43e6beSJens Axboe #include "blk-stat.h" 6cf43e6beSJens Axboe 724d2f903SChristoph Hellwig struct blk_mq_tag_set; 824d2f903SChristoph Hellwig 91db4909eSMing Lei struct blk_mq_ctxs { 101db4909eSMing Lei struct kobject kobj; 111db4909eSMing Lei struct blk_mq_ctx __percpu *queue_ctx; 121db4909eSMing Lei }; 131db4909eSMing Lei 14fe644072SLinus Walleij /** 15fe644072SLinus Walleij * struct blk_mq_ctx - State for a software queue facing the submitting CPUs 16fe644072SLinus Walleij */ 17320ae51fSJens Axboe struct blk_mq_ctx { 18320ae51fSJens Axboe struct { 19320ae51fSJens Axboe spinlock_t lock; 20c16d6b5aSMing Lei struct list_head rq_lists[HCTX_MAX_TYPES]; 21320ae51fSJens Axboe } ____cacheline_aligned_in_smp; 22320ae51fSJens Axboe 23320ae51fSJens Axboe unsigned int cpu; 24f31967f0SJens Axboe unsigned short index_hw[HCTX_MAX_TYPES]; 258ccdf4a3SJianchao Wang struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; 26320ae51fSJens Axboe 27320ae51fSJens Axboe struct request_queue *queue; 281db4909eSMing Lei struct blk_mq_ctxs *ctxs; 29320ae51fSJens Axboe struct kobject kobj; 304bb659b1SJens Axboe } ____cacheline_aligned_in_smp; 31320ae51fSJens Axboe 32*bebe84ebSChristoph Hellwig enum { 33*bebe84ebSChristoph Hellwig BLK_MQ_NO_TAG = -1U, 34*bebe84ebSChristoph Hellwig BLK_MQ_TAG_MIN = 1, 35*bebe84ebSChristoph Hellwig BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, 36*bebe84ebSChristoph Hellwig }; 37*bebe84ebSChristoph Hellwig 383e08773cSChristoph Hellwig void blk_mq_submit_bio(struct bio *bio); 395a72e899SJens Axboe int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 405a72e899SJens Axboe unsigned int flags); 41c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q); 42e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 43aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q); 441fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 451fd40b5eSMing Lei unsigned int); 46e6c98712SBart Van Assche void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 47e6c98712SBart Van Assche bool kick_requeue_list); 482c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 49b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 50b347689fSMing Lei struct blk_mq_ctx *start); 512e315dc0SMing Lei void blk_mq_put_rq_ref(struct request *rq); 522c3ad667SJens Axboe 532c3ad667SJens Axboe /* 542c3ad667SJens Axboe * Internal helpers for allocating/freeing the request map 552c3ad667SJens Axboe */ 56cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 572c3ad667SJens Axboe unsigned int hctx_idx); 58e155b0c2SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags); 5963064be1SJohn Garry struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 60cc71a6f4SJens Axboe unsigned int hctx_idx, unsigned int depth); 61645db34eSJohn Garry void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 62645db34eSJohn Garry struct blk_mq_tags *tags, 63645db34eSJohn Garry unsigned int hctx_idx); 642c3ad667SJens Axboe /* 652c3ad667SJens Axboe * Internal helpers for request insertion into sw queues 662c3ad667SJens Axboe */ 672c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 682c3ad667SJens Axboe bool at_head); 6901e99aecSMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 7001e99aecSMing Lei bool run_queue); 71bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 72bd166ef1SJens Axboe struct list_head *list); 736ce3dd6eSMing Lei void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 746ce3dd6eSMing Lei struct list_head *list); 75396eaf21SMing Lei 76320ae51fSJens Axboe /* 77320ae51fSJens Axboe * CPU -> queue mappings 78320ae51fSJens Axboe */ 79ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 80320ae51fSJens Axboe 81b3c661b1SJens Axboe /* 82b3c661b1SJens Axboe * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue 83b3c661b1SJens Axboe * @q: request queue 84e20ba6e1SChristoph Hellwig * @type: the hctx type index 85b3c661b1SJens Axboe * @cpu: CPU 86b3c661b1SJens Axboe */ 87ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, 88e20ba6e1SChristoph Hellwig enum hctx_type type, 89ff2c5660SJens Axboe unsigned int cpu) 90ff2c5660SJens Axboe { 914e5cc99eSMing Lei return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); 92b3c661b1SJens Axboe } 93b3c661b1SJens Axboe 9416458cf3SBart Van Assche static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) 95b3c661b1SJens Axboe { 96e20ba6e1SChristoph Hellwig enum hctx_type type = HCTX_TYPE_DEFAULT; 97b3c661b1SJens Axboe 98bb94aea1SJianchao Wang /* 996ce913feSChristoph Hellwig * The caller ensure that if REQ_POLLED, poll must be enabled. 100bb94aea1SJianchao Wang */ 1017e923f40SBart Van Assche if (opf & REQ_POLLED) 102e20ba6e1SChristoph Hellwig type = HCTX_TYPE_POLL; 1037e923f40SBart Van Assche else if ((opf & REQ_OP_MASK) == REQ_OP_READ) 104e20ba6e1SChristoph Hellwig type = HCTX_TYPE_READ; 105b637108aSMing Lei return type; 106b637108aSMing Lei } 107e20ba6e1SChristoph Hellwig 108b637108aSMing Lei /* 109b637108aSMing Lei * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue 110b637108aSMing Lei * @q: request queue 1117e923f40SBart Van Assche * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED). 112b637108aSMing Lei * @ctx: software queue cpu ctx 113b637108aSMing Lei */ 114b637108aSMing Lei static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 11516458cf3SBart Van Assche blk_opf_t opf, 116b637108aSMing Lei struct blk_mq_ctx *ctx) 117b637108aSMing Lei { 1187e923f40SBart Van Assche return ctx->hctxs[blk_mq_get_hctx_type(opf)]; 119ff2c5660SJens Axboe } 120ff2c5660SJens Axboe 121e93ecf60SJens Axboe /* 12267aec14cSJens Axboe * sysfs helpers 12367aec14cSJens Axboe */ 124737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q); 1257ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q); 1268682b92eSChristoph Hellwig int blk_mq_sysfs_register(struct gendisk *disk); 1278682b92eSChristoph Hellwig void blk_mq_sysfs_unregister(struct gendisk *disk); 128eaa870f9SChristoph Hellwig int blk_mq_sysfs_register_hctxs(struct request_queue *q); 129eaa870f9SChristoph Hellwig void blk_mq_sysfs_unregister_hctxs(struct request_queue *q); 130868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 13147c122e3SJens Axboe void blk_mq_free_plug_rqs(struct blk_plug *plug); 132dbb6f764SChristoph Hellwig void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 13367aec14cSJens Axboe 1342a19b28fSMing Lei void blk_mq_cancel_work_sync(struct request_queue *q); 1352a19b28fSMing Lei 136e09aae7eSMing Lei void blk_mq_release(struct request_queue *q); 137e09aae7eSMing Lei 1381aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 1391aecfe48SMing Lei unsigned int cpu) 1401aecfe48SMing Lei { 1411aecfe48SMing Lei return per_cpu_ptr(q->queue_ctx, cpu); 1421aecfe48SMing Lei } 1431aecfe48SMing Lei 1441aecfe48SMing Lei /* 1451aecfe48SMing Lei * This assumes per-cpu software queueing queues. They could be per-node 1461aecfe48SMing Lei * as well, for instance. For now this is hardcoded as-is. Note that we don't 1471aecfe48SMing Lei * care about preemption, since we know the ctx's are persistent. This does 1481aecfe48SMing Lei * mean that we can't rely on ctx always matching the currently running CPU. 1491aecfe48SMing Lei */ 1501aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 1511aecfe48SMing Lei { 152c05f4220SBart Van Assche return __blk_mq_get_ctx(q, raw_smp_processor_id()); 1531aecfe48SMing Lei } 1541aecfe48SMing Lei 155cb96a42cSMing Lei struct blk_mq_alloc_data { 156cb96a42cSMing Lei /* input parameter */ 157cb96a42cSMing Lei struct request_queue *q; 1589a95e4efSBart Van Assche blk_mq_req_flags_t flags; 159229a9287SOmar Sandoval unsigned int shallow_depth; 16016458cf3SBart Van Assche blk_opf_t cmd_flags; 161ecaf97f4SJens Axboe req_flags_t rq_flags; 162cb96a42cSMing Lei 16347c122e3SJens Axboe /* allocate multiple requests/tags in one go */ 16447c122e3SJens Axboe unsigned int nr_tags; 16547c122e3SJens Axboe struct request **cached_rq; 16647c122e3SJens Axboe 167cb96a42cSMing Lei /* input & output parameter */ 168cb96a42cSMing Lei struct blk_mq_ctx *ctx; 169cb96a42cSMing Lei struct blk_mq_hw_ctx *hctx; 170cb96a42cSMing Lei }; 171cb96a42cSMing Lei 172*bebe84ebSChristoph Hellwig struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, 173*bebe84ebSChristoph Hellwig unsigned int reserved_tags, int node, int alloc_policy); 174*bebe84ebSChristoph Hellwig void blk_mq_free_tags(struct blk_mq_tags *tags); 175*bebe84ebSChristoph Hellwig int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, 176*bebe84ebSChristoph Hellwig struct sbitmap_queue *breserved_tags, unsigned int queue_depth, 177*bebe84ebSChristoph Hellwig unsigned int reserved, int node, int alloc_policy); 178*bebe84ebSChristoph Hellwig 179*bebe84ebSChristoph Hellwig unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); 180*bebe84ebSChristoph Hellwig unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, 181*bebe84ebSChristoph Hellwig unsigned int *offset); 182*bebe84ebSChristoph Hellwig void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, 183*bebe84ebSChristoph Hellwig unsigned int tag); 184*bebe84ebSChristoph Hellwig void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); 185*bebe84ebSChristoph Hellwig int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 186*bebe84ebSChristoph Hellwig struct blk_mq_tags **tags, unsigned int depth, bool can_grow); 187*bebe84ebSChristoph Hellwig void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, 188*bebe84ebSChristoph Hellwig unsigned int size); 189*bebe84ebSChristoph Hellwig void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); 190*bebe84ebSChristoph Hellwig 191*bebe84ebSChristoph Hellwig void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 192*bebe84ebSChristoph Hellwig void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, 193*bebe84ebSChristoph Hellwig void *priv); 194*bebe84ebSChristoph Hellwig void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, 195*bebe84ebSChristoph Hellwig void *priv); 196*bebe84ebSChristoph Hellwig 197*bebe84ebSChristoph Hellwig static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt, 198*bebe84ebSChristoph Hellwig struct blk_mq_hw_ctx *hctx) 199*bebe84ebSChristoph Hellwig { 200*bebe84ebSChristoph Hellwig if (!hctx) 201*bebe84ebSChristoph Hellwig return &bt->ws[0]; 202*bebe84ebSChristoph Hellwig return sbq_wait_ptr(bt, &hctx->wait_index); 203*bebe84ebSChristoph Hellwig } 204*bebe84ebSChristoph Hellwig 205*bebe84ebSChristoph Hellwig void __blk_mq_tag_busy(struct blk_mq_hw_ctx *); 206*bebe84ebSChristoph Hellwig void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); 207*bebe84ebSChristoph Hellwig 208*bebe84ebSChristoph Hellwig static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) 209*bebe84ebSChristoph Hellwig { 210*bebe84ebSChristoph Hellwig if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 211*bebe84ebSChristoph Hellwig __blk_mq_tag_busy(hctx); 212*bebe84ebSChristoph Hellwig } 213*bebe84ebSChristoph Hellwig 214*bebe84ebSChristoph Hellwig static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) 215*bebe84ebSChristoph Hellwig { 216*bebe84ebSChristoph Hellwig if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 217*bebe84ebSChristoph Hellwig __blk_mq_tag_idle(hctx); 218*bebe84ebSChristoph Hellwig } 219*bebe84ebSChristoph Hellwig 220*bebe84ebSChristoph Hellwig static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, 221*bebe84ebSChristoph Hellwig unsigned int tag) 222*bebe84ebSChristoph Hellwig { 223*bebe84ebSChristoph Hellwig return tag < tags->nr_reserved_tags; 224*bebe84ebSChristoph Hellwig } 225*bebe84ebSChristoph Hellwig 226079a2e3eSJohn Garry static inline bool blk_mq_is_shared_tags(unsigned int flags) 22732bc15afSJohn Garry { 22832bc15afSJohn Garry return flags & BLK_MQ_F_TAG_HCTX_SHARED; 22932bc15afSJohn Garry } 23032bc15afSJohn Garry 2314941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) 2324941115bSJens Axboe { 23356f8da64SJens Axboe if (!(data->rq_flags & RQF_ELV)) 2344941115bSJens Axboe return data->hctx->tags; 23556f8da64SJens Axboe return data->hctx->sched_tags; 2364941115bSJens Axboe } 2374941115bSJens Axboe 2385d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 2395d1b25c1SBart Van Assche { 2405d1b25c1SBart Van Assche return test_bit(BLK_MQ_S_STOPPED, &hctx->state); 2415d1b25c1SBart Van Assche } 2425d1b25c1SBart Van Assche 24319c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) 24419c66e59SMing Lei { 24519c66e59SMing Lei return hctx->nr_ctx && hctx->tags; 24619c66e59SMing Lei } 24719c66e59SMing Lei 2488446fe92SChristoph Hellwig unsigned int blk_mq_in_flight(struct request_queue *q, 2498446fe92SChristoph Hellwig struct block_device *part); 2508446fe92SChristoph Hellwig void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 251bf0ddabaSOmar Sandoval unsigned int inflight[2]); 252f299b7c7SJens Axboe 2532a5a24aaSMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q, 2542a5a24aaSMing Lei int budget_token) 255de148297SMing Lei { 256de148297SMing Lei if (q->mq_ops->put_budget) 2572a5a24aaSMing Lei q->mq_ops->put_budget(q, budget_token); 258de148297SMing Lei } 259de148297SMing Lei 2602a5a24aaSMing Lei static inline int blk_mq_get_dispatch_budget(struct request_queue *q) 261de148297SMing Lei { 262de148297SMing Lei if (q->mq_ops->get_budget) 26365c76369SMing Lei return q->mq_ops->get_budget(q); 2642a5a24aaSMing Lei return 0; 2652a5a24aaSMing Lei } 2662a5a24aaSMing Lei 2672a5a24aaSMing Lei static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) 2682a5a24aaSMing Lei { 2692a5a24aaSMing Lei if (token < 0) 2702a5a24aaSMing Lei return; 2712a5a24aaSMing Lei 2722a5a24aaSMing Lei if (rq->q->mq_ops->set_rq_budget_token) 2732a5a24aaSMing Lei rq->q->mq_ops->set_rq_budget_token(rq, token); 2742a5a24aaSMing Lei } 2752a5a24aaSMing Lei 2762a5a24aaSMing Lei static inline int blk_mq_get_rq_budget_token(struct request *rq) 2772a5a24aaSMing Lei { 2782a5a24aaSMing Lei if (rq->q->mq_ops->get_rq_budget_token) 2792a5a24aaSMing Lei return rq->q->mq_ops->get_rq_budget_token(rq); 2802a5a24aaSMing Lei return -1; 281de148297SMing Lei } 282de148297SMing Lei 283bccf5e26SJohn Garry static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) 284bccf5e26SJohn Garry { 285079a2e3eSJohn Garry if (blk_mq_is_shared_tags(hctx->flags)) 286079a2e3eSJohn Garry atomic_inc(&hctx->queue->nr_active_requests_shared_tags); 287bccf5e26SJohn Garry else 288bccf5e26SJohn Garry atomic_inc(&hctx->nr_active); 289bccf5e26SJohn Garry } 290bccf5e26SJohn Garry 2913b87c6eaSMing Lei static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, 2923b87c6eaSMing Lei int val) 293bccf5e26SJohn Garry { 294079a2e3eSJohn Garry if (blk_mq_is_shared_tags(hctx->flags)) 2953b87c6eaSMing Lei atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); 296bccf5e26SJohn Garry else 2973b87c6eaSMing Lei atomic_sub(val, &hctx->nr_active); 2983b87c6eaSMing Lei } 2993b87c6eaSMing Lei 3003b87c6eaSMing Lei static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) 3013b87c6eaSMing Lei { 3023b87c6eaSMing Lei __blk_mq_sub_active_requests(hctx, 1); 303bccf5e26SJohn Garry } 304bccf5e26SJohn Garry 305bccf5e26SJohn Garry static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) 306bccf5e26SJohn Garry { 307079a2e3eSJohn Garry if (blk_mq_is_shared_tags(hctx->flags)) 308079a2e3eSJohn Garry return atomic_read(&hctx->queue->nr_active_requests_shared_tags); 309bccf5e26SJohn Garry return atomic_read(&hctx->nr_active); 310bccf5e26SJohn Garry } 3114e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 3124e2f62e5SJens Axboe struct request *rq) 3134e2f62e5SJens Axboe { 3144e2f62e5SJens Axboe blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); 3154e2f62e5SJens Axboe rq->tag = BLK_MQ_NO_TAG; 3164e2f62e5SJens Axboe 3174e2f62e5SJens Axboe if (rq->rq_flags & RQF_MQ_INFLIGHT) { 3184e2f62e5SJens Axboe rq->rq_flags &= ~RQF_MQ_INFLIGHT; 319bccf5e26SJohn Garry __blk_mq_dec_active_requests(hctx); 3204e2f62e5SJens Axboe } 3214e2f62e5SJens Axboe } 3224e2f62e5SJens Axboe 3234e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq) 3244e2f62e5SJens Axboe { 3254e2f62e5SJens Axboe if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) 3264e2f62e5SJens Axboe return; 3274e2f62e5SJens Axboe 3284e2f62e5SJens Axboe __blk_mq_put_driver_tag(rq->mq_hctx, rq); 3294e2f62e5SJens Axboe } 3304e2f62e5SJens Axboe 331a808a9d5SJens Axboe bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq); 332a808a9d5SJens Axboe 333a808a9d5SJens Axboe static inline bool blk_mq_get_driver_tag(struct request *rq) 334a808a9d5SJens Axboe { 335a808a9d5SJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 336a808a9d5SJens Axboe 337a808a9d5SJens Axboe if (rq->tag != BLK_MQ_NO_TAG && 338a808a9d5SJens Axboe !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 339a808a9d5SJens Axboe hctx->tags->rqs[rq->tag] = rq; 340a808a9d5SJens Axboe return true; 341a808a9d5SJens Axboe } 342a808a9d5SJens Axboe 343a808a9d5SJens Axboe return __blk_mq_get_driver_tag(hctx, rq); 344a808a9d5SJens Axboe } 34561347154SJan Kara 346ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) 3470da73d00SMinwoo Im { 3480da73d00SMinwoo Im int cpu; 3490da73d00SMinwoo Im 3500da73d00SMinwoo Im for_each_possible_cpu(cpu) 351ed76e329SJens Axboe qmap->mq_map[cpu] = 0; 3520da73d00SMinwoo Im } 3530da73d00SMinwoo Im 354b49773e7SDamien Le Moal /* 355b49773e7SDamien Le Moal * blk_mq_plug() - Get caller context plug 356b49773e7SDamien Le Moal * @bio : the bio being submitted by the caller context 357b49773e7SDamien Le Moal * 358b49773e7SDamien Le Moal * Plugging, by design, may delay the insertion of BIOs into the elevator in 359b49773e7SDamien Le Moal * order to increase BIO merging opportunities. This however can cause BIO 360b49773e7SDamien Le Moal * insertion order to change from the order in which submit_bio() is being 361b49773e7SDamien Le Moal * executed in the case of multiple contexts concurrently issuing BIOs to a 362b49773e7SDamien Le Moal * device, even if these context are synchronized to tightly control BIO issuing 363b49773e7SDamien Le Moal * order. While this is not a problem with regular block devices, this ordering 364b49773e7SDamien Le Moal * change can cause write BIO failures with zoned block devices as these 365b49773e7SDamien Le Moal * require sequential write patterns to zones. Prevent this from happening by 3666deacb3bSChristoph Hellwig * ignoring the plug state of a BIO issuing context if it is for a zoned block 3676deacb3bSChristoph Hellwig * device and the BIO to plug is a write operation. 368b49773e7SDamien Le Moal * 369b49773e7SDamien Le Moal * Return current->plug if the bio can be plugged and NULL otherwise 370b49773e7SDamien Le Moal */ 3716deacb3bSChristoph Hellwig static inline struct blk_plug *blk_mq_plug( struct bio *bio) 372b49773e7SDamien Le Moal { 3736deacb3bSChristoph Hellwig /* Zoned block device write operation case: do not plug the BIO */ 3748cafdb5aSPankaj Raghav if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 3758cafdb5aSPankaj Raghav bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio))) 3766deacb3bSChristoph Hellwig return NULL; 3776deacb3bSChristoph Hellwig 378b49773e7SDamien Le Moal /* 379b49773e7SDamien Le Moal * For regular block devices or read operations, use the context plug 380b49773e7SDamien Le Moal * which may be NULL if blk_start_plug() was not executed. 381b49773e7SDamien Le Moal */ 382b49773e7SDamien Le Moal return current->plug; 383b49773e7SDamien Le Moal } 384b49773e7SDamien Le Moal 385fd2ef39cSJan Kara /* Free all requests on the list */ 386fd2ef39cSJan Kara static inline void blk_mq_free_requests(struct list_head *list) 387fd2ef39cSJan Kara { 388fd2ef39cSJan Kara while (!list_empty(list)) { 389fd2ef39cSJan Kara struct request *rq = list_entry_rq(list->next); 390fd2ef39cSJan Kara 391fd2ef39cSJan Kara list_del_init(&rq->queuelist); 392fd2ef39cSJan Kara blk_mq_free_request(rq); 393fd2ef39cSJan Kara } 394fd2ef39cSJan Kara } 395fd2ef39cSJan Kara 396a0235d23SJohn Garry /* 397a0235d23SJohn Garry * For shared tag users, we track the number of currently active users 398a0235d23SJohn Garry * and attempt to provide a fair share of the tag depth for each of them. 399a0235d23SJohn Garry */ 400a0235d23SJohn Garry static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 401a0235d23SJohn Garry struct sbitmap_queue *bt) 402a0235d23SJohn Garry { 403a0235d23SJohn Garry unsigned int depth, users; 404a0235d23SJohn Garry 405a0235d23SJohn Garry if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) 406a0235d23SJohn Garry return true; 407a0235d23SJohn Garry 408a0235d23SJohn Garry /* 409a0235d23SJohn Garry * Don't try dividing an ant 410a0235d23SJohn Garry */ 411a0235d23SJohn Garry if (bt->sb.depth == 1) 412a0235d23SJohn Garry return true; 413a0235d23SJohn Garry 414079a2e3eSJohn Garry if (blk_mq_is_shared_tags(hctx->flags)) { 415f1b49fdcSJohn Garry struct request_queue *q = hctx->queue; 416f1b49fdcSJohn Garry 4172569063cSMing Lei if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) 418f1b49fdcSJohn Garry return true; 419f1b49fdcSJohn Garry } else { 420f1b49fdcSJohn Garry if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 421f1b49fdcSJohn Garry return true; 422f1b49fdcSJohn Garry } 423f1b49fdcSJohn Garry 424e155b0c2SJohn Garry users = atomic_read(&hctx->tags->active_queues); 425e155b0c2SJohn Garry 426a0235d23SJohn Garry if (!users) 427a0235d23SJohn Garry return true; 428a0235d23SJohn Garry 429a0235d23SJohn Garry /* 430a0235d23SJohn Garry * Allow at least some tags 431a0235d23SJohn Garry */ 432a0235d23SJohn Garry depth = max((bt->sb.depth + users - 1) / users, 4U); 433bccf5e26SJohn Garry return __blk_mq_active_requests(hctx) < depth; 434a0235d23SJohn Garry } 435a0235d23SJohn Garry 4362a904d00SMing Lei /* run the code block in @dispatch_ops with rcu/srcu read lock held */ 43741adf531SMing Lei #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \ 4382a904d00SMing Lei do { \ 43980bd4a7aSChristoph Hellwig if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \ 4402a904d00SMing Lei int srcu_idx; \ 4412a904d00SMing Lei \ 44241adf531SMing Lei might_sleep_if(check_sleep); \ 44380bd4a7aSChristoph Hellwig srcu_idx = srcu_read_lock((q)->tag_set->srcu); \ 4442a904d00SMing Lei (dispatch_ops); \ 44580bd4a7aSChristoph Hellwig srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \ 44680bd4a7aSChristoph Hellwig } else { \ 44780bd4a7aSChristoph Hellwig rcu_read_lock(); \ 44880bd4a7aSChristoph Hellwig (dispatch_ops); \ 44980bd4a7aSChristoph Hellwig rcu_read_unlock(); \ 4502a904d00SMing Lei } \ 4512a904d00SMing Lei } while (0) 452a0235d23SJohn Garry 45341adf531SMing Lei #define blk_mq_run_dispatch_ops(q, dispatch_ops) \ 45441adf531SMing Lei __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \ 45541adf531SMing Lei 456320ae51fSJens Axboe #endif 457