1 /* 2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support 3 * fairer distribution of tags between multiple submitters when a shared tag map 4 * is used. 5 * 6 * Copyright (C) 2013-2014 Jens Axboe 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 11 #include <linux/blk-mq.h> 12 #include "blk.h" 13 #include "blk-mq.h" 14 #include "blk-mq-tag.h" 15 16 bool blk_mq_has_free_tags(struct blk_mq_tags *tags) 17 { 18 if (!tags) 19 return true; 20 21 return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); 22 } 23 24 /* 25 * If a previously inactive queue goes active, bump the active user count. 26 */ 27 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) 28 { 29 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && 30 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 31 atomic_inc(&hctx->tags->active_queues); 32 33 return true; 34 } 35 36 /* 37 * Wakeup all potentially sleeping on tags 38 */ 39 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) 40 { 41 sbitmap_queue_wake_all(&tags->bitmap_tags); 42 if (include_reserve) 43 sbitmap_queue_wake_all(&tags->breserved_tags); 44 } 45 46 /* 47 * If a previously busy queue goes inactive, potential waiters could now 48 * be allowed to queue. Wake them up and check. 49 */ 50 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) 51 { 52 struct blk_mq_tags *tags = hctx->tags; 53 54 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 55 return; 56 57 atomic_dec(&tags->active_queues); 58 59 blk_mq_tag_wakeup_all(tags, false); 60 } 61 62 /* 63 * For shared tag users, we track the number of currently active users 64 * and attempt to provide a fair share of the tag depth for each of them. 65 */ 66 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 67 struct sbitmap_queue *bt) 68 { 69 unsigned int depth, users; 70 71 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) 72 return true; 73 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 74 return true; 75 76 /* 77 * Don't try dividing an ant 78 */ 79 if (bt->sb.depth == 1) 80 return true; 81 82 users = atomic_read(&hctx->tags->active_queues); 83 if (!users) 84 return true; 85 86 /* 87 * Allow at least some tags 88 */ 89 depth = max((bt->sb.depth + users - 1) / users, 4U); 90 return atomic_read(&hctx->nr_active) < depth; 91 } 92 93 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, 94 struct sbitmap_queue *bt) 95 { 96 if (!(data->flags & BLK_MQ_REQ_INTERNAL) && 97 !hctx_may_queue(data->hctx, bt)) 98 return -1; 99 if (data->shallow_depth) 100 return __sbitmap_queue_get_shallow(bt, data->shallow_depth); 101 else 102 return __sbitmap_queue_get(bt); 103 } 104 105 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) 106 { 107 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 108 struct sbitmap_queue *bt; 109 struct sbq_wait_state *ws; 110 DEFINE_WAIT(wait); 111 unsigned int tag_offset; 112 bool drop_ctx; 113 int tag; 114 115 if (data->flags & BLK_MQ_REQ_RESERVED) { 116 if (unlikely(!tags->nr_reserved_tags)) { 117 WARN_ON_ONCE(1); 118 return BLK_MQ_TAG_FAIL; 119 } 120 bt = &tags->breserved_tags; 121 tag_offset = 0; 122 } else { 123 bt = &tags->bitmap_tags; 124 tag_offset = tags->nr_reserved_tags; 125 } 126 127 tag = __blk_mq_get_tag(data, bt); 128 if (tag != -1) 129 goto found_tag; 130 131 if (data->flags & BLK_MQ_REQ_NOWAIT) 132 return BLK_MQ_TAG_FAIL; 133 134 ws = bt_wait_ptr(bt, data->hctx); 135 drop_ctx = data->ctx == NULL; 136 do { 137 struct sbitmap_queue *bt_prev; 138 139 /* 140 * We're out of tags on this hardware queue, kick any 141 * pending IO submits before going to sleep waiting for 142 * some to complete. 143 */ 144 blk_mq_run_hw_queue(data->hctx, false); 145 146 /* 147 * Retry tag allocation after running the hardware queue, 148 * as running the queue may also have found completions. 149 */ 150 tag = __blk_mq_get_tag(data, bt); 151 if (tag != -1) 152 break; 153 154 prepare_to_wait_exclusive(&ws->wait, &wait, 155 TASK_UNINTERRUPTIBLE); 156 157 tag = __blk_mq_get_tag(data, bt); 158 if (tag != -1) 159 break; 160 161 if (data->ctx) 162 blk_mq_put_ctx(data->ctx); 163 164 bt_prev = bt; 165 io_schedule(); 166 167 data->ctx = blk_mq_get_ctx(data->q); 168 data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); 169 tags = blk_mq_tags_from_data(data); 170 if (data->flags & BLK_MQ_REQ_RESERVED) 171 bt = &tags->breserved_tags; 172 else 173 bt = &tags->bitmap_tags; 174 175 finish_wait(&ws->wait, &wait); 176 177 /* 178 * If destination hw queue is changed, fake wake up on 179 * previous queue for compensating the wake up miss, so 180 * other allocations on previous queue won't be starved. 181 */ 182 if (bt != bt_prev) 183 sbitmap_queue_wake_up(bt_prev); 184 185 ws = bt_wait_ptr(bt, data->hctx); 186 } while (1); 187 188 if (drop_ctx && data->ctx) 189 blk_mq_put_ctx(data->ctx); 190 191 finish_wait(&ws->wait, &wait); 192 193 found_tag: 194 return tag + tag_offset; 195 } 196 197 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, 198 struct blk_mq_ctx *ctx, unsigned int tag) 199 { 200 if (!blk_mq_tag_is_reserved(tags, tag)) { 201 const int real_tag = tag - tags->nr_reserved_tags; 202 203 BUG_ON(real_tag >= tags->nr_tags); 204 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); 205 } else { 206 BUG_ON(tag >= tags->nr_reserved_tags); 207 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); 208 } 209 } 210 211 struct bt_iter_data { 212 struct blk_mq_hw_ctx *hctx; 213 busy_iter_fn *fn; 214 void *data; 215 bool reserved; 216 }; 217 218 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) 219 { 220 struct bt_iter_data *iter_data = data; 221 struct blk_mq_hw_ctx *hctx = iter_data->hctx; 222 struct blk_mq_tags *tags = hctx->tags; 223 bool reserved = iter_data->reserved; 224 struct request *rq; 225 226 if (!reserved) 227 bitnr += tags->nr_reserved_tags; 228 rq = tags->rqs[bitnr]; 229 230 /* 231 * We can hit rq == NULL here, because the tagging functions 232 * test and set the bit before assining ->rqs[]. 233 */ 234 if (rq && rq->q == hctx->queue) 235 iter_data->fn(hctx, rq, iter_data->data, reserved); 236 return true; 237 } 238 239 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, 240 busy_iter_fn *fn, void *data, bool reserved) 241 { 242 struct bt_iter_data iter_data = { 243 .hctx = hctx, 244 .fn = fn, 245 .data = data, 246 .reserved = reserved, 247 }; 248 249 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); 250 } 251 252 struct bt_tags_iter_data { 253 struct blk_mq_tags *tags; 254 busy_tag_iter_fn *fn; 255 void *data; 256 bool reserved; 257 }; 258 259 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) 260 { 261 struct bt_tags_iter_data *iter_data = data; 262 struct blk_mq_tags *tags = iter_data->tags; 263 bool reserved = iter_data->reserved; 264 struct request *rq; 265 266 if (!reserved) 267 bitnr += tags->nr_reserved_tags; 268 269 /* 270 * We can hit rq == NULL here, because the tagging functions 271 * test and set the bit before assining ->rqs[]. 272 */ 273 rq = tags->rqs[bitnr]; 274 if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 275 iter_data->fn(rq, iter_data->data, reserved); 276 277 return true; 278 } 279 280 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, 281 busy_tag_iter_fn *fn, void *data, bool reserved) 282 { 283 struct bt_tags_iter_data iter_data = { 284 .tags = tags, 285 .fn = fn, 286 .data = data, 287 .reserved = reserved, 288 }; 289 290 if (tags->rqs) 291 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); 292 } 293 294 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, 295 busy_tag_iter_fn *fn, void *priv) 296 { 297 if (tags->nr_reserved_tags) 298 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); 299 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); 300 } 301 302 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 303 busy_tag_iter_fn *fn, void *priv) 304 { 305 int i; 306 307 for (i = 0; i < tagset->nr_hw_queues; i++) { 308 if (tagset->tags && tagset->tags[i]) 309 blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); 310 } 311 } 312 EXPORT_SYMBOL(blk_mq_tagset_busy_iter); 313 314 int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data, 315 int (fn)(void *, struct request *)) 316 { 317 int i, j, ret = 0; 318 319 if (WARN_ON_ONCE(!fn)) 320 goto out; 321 322 for (i = 0; i < set->nr_hw_queues; i++) { 323 struct blk_mq_tags *tags = set->tags[i]; 324 325 if (!tags) 326 continue; 327 328 for (j = 0; j < tags->nr_tags; j++) { 329 if (!tags->static_rqs[j]) 330 continue; 331 332 ret = fn(data, tags->static_rqs[j]); 333 if (ret) 334 goto out; 335 } 336 } 337 338 out: 339 return ret; 340 } 341 EXPORT_SYMBOL_GPL(blk_mq_tagset_iter); 342 343 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, 344 void *priv) 345 { 346 struct blk_mq_hw_ctx *hctx; 347 int i; 348 349 350 queue_for_each_hw_ctx(q, hctx, i) { 351 struct blk_mq_tags *tags = hctx->tags; 352 353 /* 354 * If not software queues are currently mapped to this 355 * hardware queue, there's nothing to check 356 */ 357 if (!blk_mq_hw_queue_mapped(hctx)) 358 continue; 359 360 if (tags->nr_reserved_tags) 361 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); 362 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); 363 } 364 365 } 366 367 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, 368 bool round_robin, int node) 369 { 370 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, 371 node); 372 } 373 374 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, 375 int node, int alloc_policy) 376 { 377 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; 378 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; 379 380 if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) 381 goto free_tags; 382 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, 383 node)) 384 goto free_bitmap_tags; 385 386 return tags; 387 free_bitmap_tags: 388 sbitmap_queue_free(&tags->bitmap_tags); 389 free_tags: 390 kfree(tags); 391 return NULL; 392 } 393 394 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, 395 unsigned int reserved_tags, 396 int node, int alloc_policy) 397 { 398 struct blk_mq_tags *tags; 399 400 if (total_tags > BLK_MQ_TAG_MAX) { 401 pr_err("blk-mq: tag depth too large\n"); 402 return NULL; 403 } 404 405 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); 406 if (!tags) 407 return NULL; 408 409 tags->nr_tags = total_tags; 410 tags->nr_reserved_tags = reserved_tags; 411 412 return blk_mq_init_bitmap_tags(tags, node, alloc_policy); 413 } 414 415 void blk_mq_free_tags(struct blk_mq_tags *tags) 416 { 417 sbitmap_queue_free(&tags->bitmap_tags); 418 sbitmap_queue_free(&tags->breserved_tags); 419 kfree(tags); 420 } 421 422 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 423 struct blk_mq_tags **tagsptr, unsigned int tdepth, 424 bool can_grow) 425 { 426 struct blk_mq_tags *tags = *tagsptr; 427 428 if (tdepth <= tags->nr_reserved_tags) 429 return -EINVAL; 430 431 tdepth -= tags->nr_reserved_tags; 432 433 /* 434 * If we are allowed to grow beyond the original size, allocate 435 * a new set of tags before freeing the old one. 436 */ 437 if (tdepth > tags->nr_tags) { 438 struct blk_mq_tag_set *set = hctx->queue->tag_set; 439 struct blk_mq_tags *new; 440 bool ret; 441 442 if (!can_grow) 443 return -EINVAL; 444 445 /* 446 * We need some sort of upper limit, set it high enough that 447 * no valid use cases should require more. 448 */ 449 if (tdepth > 16 * BLKDEV_MAX_RQ) 450 return -EINVAL; 451 452 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0); 453 if (!new) 454 return -ENOMEM; 455 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); 456 if (ret) { 457 blk_mq_free_rq_map(new); 458 return -ENOMEM; 459 } 460 461 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); 462 blk_mq_free_rq_map(*tagsptr); 463 *tagsptr = new; 464 } else { 465 /* 466 * Don't need (or can't) update reserved tags here, they 467 * remain static and should never need resizing. 468 */ 469 sbitmap_queue_resize(&tags->bitmap_tags, tdepth); 470 } 471 472 return 0; 473 } 474 475 /** 476 * blk_mq_unique_tag() - return a tag that is unique queue-wide 477 * @rq: request for which to compute a unique tag 478 * 479 * The tag field in struct request is unique per hardware queue but not over 480 * all hardware queues. Hence this function that returns a tag with the 481 * hardware context index in the upper bits and the per hardware queue tag in 482 * the lower bits. 483 * 484 * Note: When called for a request that is queued on a non-multiqueue request 485 * queue, the hardware context index is set to zero. 486 */ 487 u32 blk_mq_unique_tag(struct request *rq) 488 { 489 struct request_queue *q = rq->q; 490 struct blk_mq_hw_ctx *hctx; 491 int hwq = 0; 492 493 if (q->mq_ops) { 494 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); 495 hwq = hctx->queue_num; 496 } 497 498 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | 499 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); 500 } 501 EXPORT_SYMBOL(blk_mq_unique_tag); 502