xref: /openbmc/linux/block/blk-mq-tag.c (revision 867a0e05)
1 /*
2  * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3  * over multiple cachelines to avoid ping-pong between multiple submitters
4  * or submitter and completer. Uses rolling wakeups to avoid falling of
5  * the scaling cliff when we run out of tags and have to start putting
6  * submitters to sleep.
7  *
8  * Uses active queue tracking to support fairer distribution of tags
9  * between multiple submitters when a shared tag map is used.
10  *
11  * Copyright (C) 2013-2014 Jens Axboe
12  */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 
17 #include <linux/blk-mq.h>
18 #include "blk.h"
19 #include "blk-mq.h"
20 #include "blk-mq-tag.h"
21 
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
23 {
24 	int i;
25 
26 	for (i = 0; i < bt->map_nr; i++) {
27 		struct blk_align_bitmap *bm = &bt->map[i];
28 		int ret;
29 
30 		ret = find_first_zero_bit(&bm->word, bm->depth);
31 		if (ret < bm->depth)
32 			return true;
33 	}
34 
35 	return false;
36 }
37 
38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
39 {
40 	if (!tags)
41 		return true;
42 
43 	return bt_has_free_tags(&tags->bitmap_tags);
44 }
45 
46 static inline int bt_index_inc(int index)
47 {
48 	return (index + 1) & (BT_WAIT_QUEUES - 1);
49 }
50 
51 static inline void bt_index_atomic_inc(atomic_t *index)
52 {
53 	int old = atomic_read(index);
54 	int new = bt_index_inc(old);
55 	atomic_cmpxchg(index, old, new);
56 }
57 
58 /*
59  * If a previously inactive queue goes active, bump the active user count.
60  */
61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
62 {
63 	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
64 	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
65 		atomic_inc(&hctx->tags->active_queues);
66 
67 	return true;
68 }
69 
70 /*
71  * Wakeup all potentially sleeping on normal (non-reserved) tags
72  */
73 static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
74 {
75 	struct blk_mq_bitmap_tags *bt;
76 	int i, wake_index;
77 
78 	bt = &tags->bitmap_tags;
79 	wake_index = atomic_read(&bt->wake_index);
80 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
81 		struct bt_wait_state *bs = &bt->bs[wake_index];
82 
83 		if (waitqueue_active(&bs->wait))
84 			wake_up(&bs->wait);
85 
86 		wake_index = bt_index_inc(wake_index);
87 	}
88 }
89 
90 /*
91  * If a previously busy queue goes inactive, potential waiters could now
92  * be allowed to queue. Wake them up and check.
93  */
94 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
95 {
96 	struct blk_mq_tags *tags = hctx->tags;
97 
98 	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
99 		return;
100 
101 	atomic_dec(&tags->active_queues);
102 
103 	blk_mq_tag_wakeup_all(tags);
104 }
105 
106 /*
107  * For shared tag users, we track the number of currently active users
108  * and attempt to provide a fair share of the tag depth for each of them.
109  */
110 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
111 				  struct blk_mq_bitmap_tags *bt)
112 {
113 	unsigned int depth, users;
114 
115 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
116 		return true;
117 	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
118 		return true;
119 
120 	/*
121 	 * Don't try dividing an ant
122 	 */
123 	if (bt->depth == 1)
124 		return true;
125 
126 	users = atomic_read(&hctx->tags->active_queues);
127 	if (!users)
128 		return true;
129 
130 	/*
131 	 * Allow at least some tags
132 	 */
133 	depth = max((bt->depth + users - 1) / users, 4U);
134 	return atomic_read(&hctx->nr_active) < depth;
135 }
136 
137 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
138 {
139 	int tag, org_last_tag, end;
140 	bool wrap = last_tag != 0;
141 
142 	org_last_tag = last_tag;
143 	end = bm->depth;
144 	do {
145 restart:
146 		tag = find_next_zero_bit(&bm->word, end, last_tag);
147 		if (unlikely(tag >= end)) {
148 			/*
149 			 * We started with an offset, start from 0 to
150 			 * exhaust the map.
151 			 */
152 			if (wrap) {
153 				wrap = false;
154 				end = org_last_tag;
155 				last_tag = 0;
156 				goto restart;
157 			}
158 			return -1;
159 		}
160 		last_tag = tag + 1;
161 	} while (test_and_set_bit(tag, &bm->word));
162 
163 	return tag;
164 }
165 
166 /*
167  * Straight forward bitmap tag implementation, where each bit is a tag
168  * (cleared == free, and set == busy). The small twist is using per-cpu
169  * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
170  * contexts. This enables us to drastically limit the space searched,
171  * without dirtying an extra shared cacheline like we would if we stored
172  * the cache value inside the shared blk_mq_bitmap_tags structure. On top
173  * of that, each word of tags is in a separate cacheline. This means that
174  * multiple users will tend to stick to different cachelines, at least
175  * until the map is exhausted.
176  */
177 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
178 		    unsigned int *tag_cache)
179 {
180 	unsigned int last_tag, org_last_tag;
181 	int index, i, tag;
182 
183 	if (!hctx_may_queue(hctx, bt))
184 		return -1;
185 
186 	last_tag = org_last_tag = *tag_cache;
187 	index = TAG_TO_INDEX(bt, last_tag);
188 
189 	for (i = 0; i < bt->map_nr; i++) {
190 		tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
191 		if (tag != -1) {
192 			tag += (index << bt->bits_per_word);
193 			goto done;
194 		}
195 
196 		last_tag = 0;
197 		if (++index >= bt->map_nr)
198 			index = 0;
199 	}
200 
201 	*tag_cache = 0;
202 	return -1;
203 
204 	/*
205 	 * Only update the cache from the allocation path, if we ended
206 	 * up using the specific cached tag.
207 	 */
208 done:
209 	if (tag == org_last_tag) {
210 		last_tag = tag + 1;
211 		if (last_tag >= bt->depth - 1)
212 			last_tag = 0;
213 
214 		*tag_cache = last_tag;
215 	}
216 
217 	return tag;
218 }
219 
220 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
221 					 struct blk_mq_hw_ctx *hctx)
222 {
223 	struct bt_wait_state *bs;
224 	int wait_index;
225 
226 	if (!hctx)
227 		return &bt->bs[0];
228 
229 	wait_index = atomic_read(&hctx->wait_index);
230 	bs = &bt->bs[wait_index];
231 	bt_index_atomic_inc(&hctx->wait_index);
232 	return bs;
233 }
234 
235 static int bt_get(struct blk_mq_alloc_data *data,
236 		struct blk_mq_bitmap_tags *bt,
237 		struct blk_mq_hw_ctx *hctx,
238 		unsigned int *last_tag)
239 {
240 	struct bt_wait_state *bs;
241 	DEFINE_WAIT(wait);
242 	int tag;
243 
244 	tag = __bt_get(hctx, bt, last_tag);
245 	if (tag != -1)
246 		return tag;
247 
248 	if (!(data->gfp & __GFP_WAIT))
249 		return -1;
250 
251 	bs = bt_wait_ptr(bt, hctx);
252 	do {
253 		prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
254 
255 		tag = __bt_get(hctx, bt, last_tag);
256 		if (tag != -1)
257 			break;
258 
259 		/*
260 		 * We're out of tags on this hardware queue, kick any
261 		 * pending IO submits before going to sleep waiting for
262 		 * some to complete.
263 		 */
264 		blk_mq_run_hw_queue(hctx, false);
265 
266 		/*
267 		 * Retry tag allocation after running the hardware queue,
268 		 * as running the queue may also have found completions.
269 		 */
270 		tag = __bt_get(hctx, bt, last_tag);
271 		if (tag != -1)
272 			break;
273 
274 		blk_mq_put_ctx(data->ctx);
275 
276 		io_schedule();
277 
278 		data->ctx = blk_mq_get_ctx(data->q);
279 		data->hctx = data->q->mq_ops->map_queue(data->q,
280 				data->ctx->cpu);
281 		if (data->reserved) {
282 			bt = &data->hctx->tags->breserved_tags;
283 		} else {
284 			last_tag = &data->ctx->last_tag;
285 			hctx = data->hctx;
286 			bt = &hctx->tags->bitmap_tags;
287 		}
288 		finish_wait(&bs->wait, &wait);
289 		bs = bt_wait_ptr(bt, hctx);
290 	} while (1);
291 
292 	finish_wait(&bs->wait, &wait);
293 	return tag;
294 }
295 
296 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
297 {
298 	int tag;
299 
300 	tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
301 			&data->ctx->last_tag);
302 	if (tag >= 0)
303 		return tag + data->hctx->tags->nr_reserved_tags;
304 
305 	return BLK_MQ_TAG_FAIL;
306 }
307 
308 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
309 {
310 	int tag, zero = 0;
311 
312 	if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
313 		WARN_ON_ONCE(1);
314 		return BLK_MQ_TAG_FAIL;
315 	}
316 
317 	tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
318 	if (tag < 0)
319 		return BLK_MQ_TAG_FAIL;
320 
321 	return tag;
322 }
323 
324 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
325 {
326 	if (!data->reserved)
327 		return __blk_mq_get_tag(data);
328 
329 	return __blk_mq_get_reserved_tag(data);
330 }
331 
332 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
333 {
334 	int i, wake_index;
335 
336 	wake_index = atomic_read(&bt->wake_index);
337 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
338 		struct bt_wait_state *bs = &bt->bs[wake_index];
339 
340 		if (waitqueue_active(&bs->wait)) {
341 			int o = atomic_read(&bt->wake_index);
342 			if (wake_index != o)
343 				atomic_cmpxchg(&bt->wake_index, o, wake_index);
344 
345 			return bs;
346 		}
347 
348 		wake_index = bt_index_inc(wake_index);
349 	}
350 
351 	return NULL;
352 }
353 
354 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
355 {
356 	const int index = TAG_TO_INDEX(bt, tag);
357 	struct bt_wait_state *bs;
358 	int wait_cnt;
359 
360 	clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
361 
362 	/* Ensure that the wait list checks occur after clear_bit(). */
363 	smp_mb();
364 
365 	bs = bt_wake_ptr(bt);
366 	if (!bs)
367 		return;
368 
369 	wait_cnt = atomic_dec_return(&bs->wait_cnt);
370 	if (unlikely(wait_cnt < 0))
371 		wait_cnt = atomic_inc_return(&bs->wait_cnt);
372 	if (wait_cnt == 0) {
373 		atomic_add(bt->wake_cnt, &bs->wait_cnt);
374 		bt_index_atomic_inc(&bt->wake_index);
375 		wake_up(&bs->wait);
376 	}
377 }
378 
379 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
380 		    unsigned int *last_tag)
381 {
382 	struct blk_mq_tags *tags = hctx->tags;
383 
384 	if (tag >= tags->nr_reserved_tags) {
385 		const int real_tag = tag - tags->nr_reserved_tags;
386 
387 		BUG_ON(real_tag >= tags->nr_tags);
388 		bt_clear_tag(&tags->bitmap_tags, real_tag);
389 		*last_tag = real_tag;
390 	} else {
391 		BUG_ON(tag >= tags->nr_reserved_tags);
392 		bt_clear_tag(&tags->breserved_tags, tag);
393 	}
394 }
395 
396 static void bt_for_each(struct blk_mq_hw_ctx *hctx,
397 		struct blk_mq_bitmap_tags *bt, unsigned int off,
398 		busy_iter_fn *fn, void *data, bool reserved)
399 {
400 	struct request *rq;
401 	int bit, i;
402 
403 	for (i = 0; i < bt->map_nr; i++) {
404 		struct blk_align_bitmap *bm = &bt->map[i];
405 
406 		for (bit = find_first_bit(&bm->word, bm->depth);
407 		     bit < bm->depth;
408 		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
409 		     	rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
410 			if (rq->q == hctx->queue)
411 				fn(hctx, rq, data, reserved);
412 		}
413 
414 		off += (1 << bt->bits_per_word);
415 	}
416 }
417 
418 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
419 		void *priv)
420 {
421 	struct blk_mq_tags *tags = hctx->tags;
422 
423 	if (tags->nr_reserved_tags)
424 		bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
425 	bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
426 			false);
427 }
428 EXPORT_SYMBOL(blk_mq_tag_busy_iter);
429 
430 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
431 {
432 	unsigned int i, used;
433 
434 	for (i = 0, used = 0; i < bt->map_nr; i++) {
435 		struct blk_align_bitmap *bm = &bt->map[i];
436 
437 		used += bitmap_weight(&bm->word, bm->depth);
438 	}
439 
440 	return bt->depth - used;
441 }
442 
443 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
444 			    unsigned int depth)
445 {
446 	unsigned int tags_per_word = 1U << bt->bits_per_word;
447 	unsigned int map_depth = depth;
448 
449 	if (depth) {
450 		int i;
451 
452 		for (i = 0; i < bt->map_nr; i++) {
453 			bt->map[i].depth = min(map_depth, tags_per_word);
454 			map_depth -= bt->map[i].depth;
455 		}
456 	}
457 
458 	bt->wake_cnt = BT_WAIT_BATCH;
459 	if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
460 		bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
461 
462 	bt->depth = depth;
463 }
464 
465 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
466 			int node, bool reserved)
467 {
468 	int i;
469 
470 	bt->bits_per_word = ilog2(BITS_PER_LONG);
471 
472 	/*
473 	 * Depth can be zero for reserved tags, that's not a failure
474 	 * condition.
475 	 */
476 	if (depth) {
477 		unsigned int nr, tags_per_word;
478 
479 		tags_per_word = (1 << bt->bits_per_word);
480 
481 		/*
482 		 * If the tag space is small, shrink the number of tags
483 		 * per word so we spread over a few cachelines, at least.
484 		 * If less than 4 tags, just forget about it, it's not
485 		 * going to work optimally anyway.
486 		 */
487 		if (depth >= 4) {
488 			while (tags_per_word * 4 > depth) {
489 				bt->bits_per_word--;
490 				tags_per_word = (1 << bt->bits_per_word);
491 			}
492 		}
493 
494 		nr = ALIGN(depth, tags_per_word) / tags_per_word;
495 		bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
496 						GFP_KERNEL, node);
497 		if (!bt->map)
498 			return -ENOMEM;
499 
500 		bt->map_nr = nr;
501 	}
502 
503 	bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
504 	if (!bt->bs) {
505 		kfree(bt->map);
506 		return -ENOMEM;
507 	}
508 
509 	bt_update_count(bt, depth);
510 
511 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
512 		init_waitqueue_head(&bt->bs[i].wait);
513 		atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
514 	}
515 
516 	return 0;
517 }
518 
519 static void bt_free(struct blk_mq_bitmap_tags *bt)
520 {
521 	kfree(bt->map);
522 	kfree(bt->bs);
523 }
524 
525 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
526 						   int node)
527 {
528 	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
529 
530 	if (bt_alloc(&tags->bitmap_tags, depth, node, false))
531 		goto enomem;
532 	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
533 		goto enomem;
534 
535 	return tags;
536 enomem:
537 	bt_free(&tags->bitmap_tags);
538 	kfree(tags);
539 	return NULL;
540 }
541 
542 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
543 				     unsigned int reserved_tags, int node)
544 {
545 	struct blk_mq_tags *tags;
546 
547 	if (total_tags > BLK_MQ_TAG_MAX) {
548 		pr_err("blk-mq: tag depth too large\n");
549 		return NULL;
550 	}
551 
552 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
553 	if (!tags)
554 		return NULL;
555 
556 	tags->nr_tags = total_tags;
557 	tags->nr_reserved_tags = reserved_tags;
558 
559 	return blk_mq_init_bitmap_tags(tags, node);
560 }
561 
562 void blk_mq_free_tags(struct blk_mq_tags *tags)
563 {
564 	bt_free(&tags->bitmap_tags);
565 	bt_free(&tags->breserved_tags);
566 	kfree(tags);
567 }
568 
569 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
570 {
571 	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
572 
573 	*tag = prandom_u32() % depth;
574 }
575 
576 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
577 {
578 	tdepth -= tags->nr_reserved_tags;
579 	if (tdepth > tags->nr_tags)
580 		return -EINVAL;
581 
582 	/*
583 	 * Don't need (or can't) update reserved tags here, they remain
584 	 * static and should never need resizing.
585 	 */
586 	bt_update_count(&tags->bitmap_tags, tdepth);
587 	blk_mq_tag_wakeup_all(tags);
588 	return 0;
589 }
590 
591 /**
592  * blk_mq_unique_tag() - return a tag that is unique queue-wide
593  * @rq: request for which to compute a unique tag
594  *
595  * The tag field in struct request is unique per hardware queue but not over
596  * all hardware queues. Hence this function that returns a tag with the
597  * hardware context index in the upper bits and the per hardware queue tag in
598  * the lower bits.
599  *
600  * Note: When called for a request that is queued on a non-multiqueue request
601  * queue, the hardware context index is set to zero.
602  */
603 u32 blk_mq_unique_tag(struct request *rq)
604 {
605 	struct request_queue *q = rq->q;
606 	struct blk_mq_hw_ctx *hctx;
607 	int hwq = 0;
608 
609 	if (q->mq_ops) {
610 		hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
611 		hwq = hctx->queue_num;
612 	}
613 
614 	return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
615 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
616 }
617 EXPORT_SYMBOL(blk_mq_unique_tag);
618 
619 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
620 {
621 	char *orig_page = page;
622 	unsigned int free, res;
623 
624 	if (!tags)
625 		return 0;
626 
627 	page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
628 			"bits_per_word=%u\n",
629 			tags->nr_tags, tags->nr_reserved_tags,
630 			tags->bitmap_tags.bits_per_word);
631 
632 	free = bt_unused_tags(&tags->bitmap_tags);
633 	res = bt_unused_tags(&tags->breserved_tags);
634 
635 	page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
636 	page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
637 
638 	return page - orig_page;
639 }
640