1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef INT_BLK_MQ_H 3 #define INT_BLK_MQ_H 4 5 #include "blk-stat.h" 6 #include "blk-mq-tag.h" 7 8 struct blk_mq_tag_set; 9 10 struct blk_mq_ctxs { 11 struct kobject kobj; 12 struct blk_mq_ctx __percpu *queue_ctx; 13 }; 14 15 /** 16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs 17 */ 18 struct blk_mq_ctx { 19 struct { 20 spinlock_t lock; 21 struct list_head rq_lists[HCTX_MAX_TYPES]; 22 } ____cacheline_aligned_in_smp; 23 24 unsigned int cpu; 25 unsigned short index_hw[HCTX_MAX_TYPES]; 26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; 27 28 struct request_queue *queue; 29 struct blk_mq_ctxs *ctxs; 30 struct kobject kobj; 31 } ____cacheline_aligned_in_smp; 32 33 void blk_mq_submit_bio(struct bio *bio); 34 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 35 unsigned int flags); 36 void blk_mq_exit_queue(struct request_queue *q); 37 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 38 void blk_mq_wake_waiters(struct request_queue *q); 39 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 40 unsigned int); 41 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 42 bool kick_requeue_list); 43 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 44 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 45 struct blk_mq_ctx *start); 46 void blk_mq_put_rq_ref(struct request *rq); 47 48 /* 49 * Internal helpers for allocating/freeing the request map 50 */ 51 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 52 unsigned int hctx_idx); 53 void blk_mq_free_rq_map(struct blk_mq_tags *tags); 54 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 55 unsigned int hctx_idx, unsigned int depth); 56 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 57 struct blk_mq_tags *tags, 58 unsigned int hctx_idx); 59 /* 60 * Internal helpers for request insertion into sw queues 61 */ 62 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 63 bool at_head); 64 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 65 bool run_queue); 66 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 67 struct list_head *list); 68 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 69 struct list_head *list); 70 71 /* 72 * CPU -> queue mappings 73 */ 74 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 75 76 /* 77 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue 78 * @q: request queue 79 * @type: the hctx type index 80 * @cpu: CPU 81 */ 82 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, 83 enum hctx_type type, 84 unsigned int cpu) 85 { 86 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); 87 } 88 89 static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) 90 { 91 enum hctx_type type = HCTX_TYPE_DEFAULT; 92 93 /* 94 * The caller ensure that if REQ_POLLED, poll must be enabled. 95 */ 96 if (opf & REQ_POLLED) 97 type = HCTX_TYPE_POLL; 98 else if ((opf & REQ_OP_MASK) == REQ_OP_READ) 99 type = HCTX_TYPE_READ; 100 return type; 101 } 102 103 /* 104 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue 105 * @q: request queue 106 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED). 107 * @ctx: software queue cpu ctx 108 */ 109 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 110 blk_opf_t opf, 111 struct blk_mq_ctx *ctx) 112 { 113 return ctx->hctxs[blk_mq_get_hctx_type(opf)]; 114 } 115 116 /* 117 * sysfs helpers 118 */ 119 extern void blk_mq_sysfs_init(struct request_queue *q); 120 extern void blk_mq_sysfs_deinit(struct request_queue *q); 121 int blk_mq_sysfs_register(struct gendisk *disk); 122 void blk_mq_sysfs_unregister(struct gendisk *disk); 123 int blk_mq_sysfs_register_hctxs(struct request_queue *q); 124 void blk_mq_sysfs_unregister_hctxs(struct request_queue *q); 125 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 126 void blk_mq_free_plug_rqs(struct blk_plug *plug); 127 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 128 129 void blk_mq_cancel_work_sync(struct request_queue *q); 130 131 void blk_mq_release(struct request_queue *q); 132 133 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 134 unsigned int cpu) 135 { 136 return per_cpu_ptr(q->queue_ctx, cpu); 137 } 138 139 /* 140 * This assumes per-cpu software queueing queues. They could be per-node 141 * as well, for instance. For now this is hardcoded as-is. Note that we don't 142 * care about preemption, since we know the ctx's are persistent. This does 143 * mean that we can't rely on ctx always matching the currently running CPU. 144 */ 145 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 146 { 147 return __blk_mq_get_ctx(q, raw_smp_processor_id()); 148 } 149 150 struct blk_mq_alloc_data { 151 /* input parameter */ 152 struct request_queue *q; 153 blk_mq_req_flags_t flags; 154 unsigned int shallow_depth; 155 blk_opf_t cmd_flags; 156 req_flags_t rq_flags; 157 158 /* allocate multiple requests/tags in one go */ 159 unsigned int nr_tags; 160 struct request **cached_rq; 161 162 /* input & output parameter */ 163 struct blk_mq_ctx *ctx; 164 struct blk_mq_hw_ctx *hctx; 165 }; 166 167 static inline bool blk_mq_is_shared_tags(unsigned int flags) 168 { 169 return flags & BLK_MQ_F_TAG_HCTX_SHARED; 170 } 171 172 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) 173 { 174 if (!(data->rq_flags & RQF_ELV)) 175 return data->hctx->tags; 176 return data->hctx->sched_tags; 177 } 178 179 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 180 { 181 return test_bit(BLK_MQ_S_STOPPED, &hctx->state); 182 } 183 184 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) 185 { 186 return hctx->nr_ctx && hctx->tags; 187 } 188 189 unsigned int blk_mq_in_flight(struct request_queue *q, 190 struct block_device *part); 191 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 192 unsigned int inflight[2]); 193 194 static inline void blk_mq_put_dispatch_budget(struct request_queue *q, 195 int budget_token) 196 { 197 if (q->mq_ops->put_budget) 198 q->mq_ops->put_budget(q, budget_token); 199 } 200 201 static inline int blk_mq_get_dispatch_budget(struct request_queue *q) 202 { 203 if (q->mq_ops->get_budget) 204 return q->mq_ops->get_budget(q); 205 return 0; 206 } 207 208 static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) 209 { 210 if (token < 0) 211 return; 212 213 if (rq->q->mq_ops->set_rq_budget_token) 214 rq->q->mq_ops->set_rq_budget_token(rq, token); 215 } 216 217 static inline int blk_mq_get_rq_budget_token(struct request *rq) 218 { 219 if (rq->q->mq_ops->get_rq_budget_token) 220 return rq->q->mq_ops->get_rq_budget_token(rq); 221 return -1; 222 } 223 224 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) 225 { 226 if (blk_mq_is_shared_tags(hctx->flags)) 227 atomic_inc(&hctx->queue->nr_active_requests_shared_tags); 228 else 229 atomic_inc(&hctx->nr_active); 230 } 231 232 static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, 233 int val) 234 { 235 if (blk_mq_is_shared_tags(hctx->flags)) 236 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); 237 else 238 atomic_sub(val, &hctx->nr_active); 239 } 240 241 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) 242 { 243 __blk_mq_sub_active_requests(hctx, 1); 244 } 245 246 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) 247 { 248 if (blk_mq_is_shared_tags(hctx->flags)) 249 return atomic_read(&hctx->queue->nr_active_requests_shared_tags); 250 return atomic_read(&hctx->nr_active); 251 } 252 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 253 struct request *rq) 254 { 255 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); 256 rq->tag = BLK_MQ_NO_TAG; 257 258 if (rq->rq_flags & RQF_MQ_INFLIGHT) { 259 rq->rq_flags &= ~RQF_MQ_INFLIGHT; 260 __blk_mq_dec_active_requests(hctx); 261 } 262 } 263 264 static inline void blk_mq_put_driver_tag(struct request *rq) 265 { 266 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) 267 return; 268 269 __blk_mq_put_driver_tag(rq->mq_hctx, rq); 270 } 271 272 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq); 273 274 static inline bool blk_mq_get_driver_tag(struct request *rq) 275 { 276 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 277 278 if (rq->tag != BLK_MQ_NO_TAG && 279 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 280 hctx->tags->rqs[rq->tag] = rq; 281 return true; 282 } 283 284 return __blk_mq_get_driver_tag(hctx, rq); 285 } 286 287 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) 288 { 289 int cpu; 290 291 for_each_possible_cpu(cpu) 292 qmap->mq_map[cpu] = 0; 293 } 294 295 /* 296 * blk_mq_plug() - Get caller context plug 297 * @bio : the bio being submitted by the caller context 298 * 299 * Plugging, by design, may delay the insertion of BIOs into the elevator in 300 * order to increase BIO merging opportunities. This however can cause BIO 301 * insertion order to change from the order in which submit_bio() is being 302 * executed in the case of multiple contexts concurrently issuing BIOs to a 303 * device, even if these context are synchronized to tightly control BIO issuing 304 * order. While this is not a problem with regular block devices, this ordering 305 * change can cause write BIO failures with zoned block devices as these 306 * require sequential write patterns to zones. Prevent this from happening by 307 * ignoring the plug state of a BIO issuing context if it is for a zoned block 308 * device and the BIO to plug is a write operation. 309 * 310 * Return current->plug if the bio can be plugged and NULL otherwise 311 */ 312 static inline struct blk_plug *blk_mq_plug( struct bio *bio) 313 { 314 /* Zoned block device write operation case: do not plug the BIO */ 315 if (bdev_is_zoned(bio->bi_bdev) && op_is_write(bio_op(bio))) 316 return NULL; 317 318 /* 319 * For regular block devices or read operations, use the context plug 320 * which may be NULL if blk_start_plug() was not executed. 321 */ 322 return current->plug; 323 } 324 325 /* Free all requests on the list */ 326 static inline void blk_mq_free_requests(struct list_head *list) 327 { 328 while (!list_empty(list)) { 329 struct request *rq = list_entry_rq(list->next); 330 331 list_del_init(&rq->queuelist); 332 blk_mq_free_request(rq); 333 } 334 } 335 336 /* 337 * For shared tag users, we track the number of currently active users 338 * and attempt to provide a fair share of the tag depth for each of them. 339 */ 340 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 341 struct sbitmap_queue *bt) 342 { 343 unsigned int depth, users; 344 345 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) 346 return true; 347 348 /* 349 * Don't try dividing an ant 350 */ 351 if (bt->sb.depth == 1) 352 return true; 353 354 if (blk_mq_is_shared_tags(hctx->flags)) { 355 struct request_queue *q = hctx->queue; 356 357 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) 358 return true; 359 } else { 360 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 361 return true; 362 } 363 364 users = atomic_read(&hctx->tags->active_queues); 365 366 if (!users) 367 return true; 368 369 /* 370 * Allow at least some tags 371 */ 372 depth = max((bt->sb.depth + users - 1) / users, 4U); 373 return __blk_mq_active_requests(hctx) < depth; 374 } 375 376 /* run the code block in @dispatch_ops with rcu/srcu read lock held */ 377 #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \ 378 do { \ 379 if (!blk_queue_has_srcu(q)) { \ 380 rcu_read_lock(); \ 381 (dispatch_ops); \ 382 rcu_read_unlock(); \ 383 } else { \ 384 int srcu_idx; \ 385 \ 386 might_sleep_if(check_sleep); \ 387 srcu_idx = srcu_read_lock((q)->srcu); \ 388 (dispatch_ops); \ 389 srcu_read_unlock((q)->srcu, srcu_idx); \ 390 } \ 391 } while (0) 392 393 #define blk_mq_run_dispatch_ops(q, dispatch_ops) \ 394 __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \ 395 396 #endif 397