xref: /openbmc/linux/block/blk-mq-tag.c (revision 4161b450)
1 /*
2  * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3  * over multiple cachelines to avoid ping-pong between multiple submitters
4  * or submitter and completer. Uses rolling wakeups to avoid falling of
5  * the scaling cliff when we run out of tags and have to start putting
6  * submitters to sleep.
7  *
8  * Uses active queue tracking to support fairer distribution of tags
9  * between multiple submitters when a shared tag map is used.
10  *
11  * Copyright (C) 2013-2014 Jens Axboe
12  */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 
17 #include <linux/blk-mq.h>
18 #include "blk.h"
19 #include "blk-mq.h"
20 #include "blk-mq-tag.h"
21 
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
23 {
24 	int i;
25 
26 	for (i = 0; i < bt->map_nr; i++) {
27 		struct blk_align_bitmap *bm = &bt->map[i];
28 		int ret;
29 
30 		ret = find_first_zero_bit(&bm->word, bm->depth);
31 		if (ret < bm->depth)
32 			return true;
33 	}
34 
35 	return false;
36 }
37 
38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
39 {
40 	if (!tags)
41 		return true;
42 
43 	return bt_has_free_tags(&tags->bitmap_tags);
44 }
45 
46 static inline int bt_index_inc(int index)
47 {
48 	return (index + 1) & (BT_WAIT_QUEUES - 1);
49 }
50 
51 static inline void bt_index_atomic_inc(atomic_t *index)
52 {
53 	int old = atomic_read(index);
54 	int new = bt_index_inc(old);
55 	atomic_cmpxchg(index, old, new);
56 }
57 
58 /*
59  * If a previously inactive queue goes active, bump the active user count.
60  */
61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
62 {
63 	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
64 	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
65 		atomic_inc(&hctx->tags->active_queues);
66 
67 	return true;
68 }
69 
70 /*
71  * Wakeup all potentially sleeping on tags
72  */
73 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
74 {
75 	struct blk_mq_bitmap_tags *bt;
76 	int i, wake_index;
77 
78 	bt = &tags->bitmap_tags;
79 	wake_index = atomic_read(&bt->wake_index);
80 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
81 		struct bt_wait_state *bs = &bt->bs[wake_index];
82 
83 		if (waitqueue_active(&bs->wait))
84 			wake_up(&bs->wait);
85 
86 		wake_index = bt_index_inc(wake_index);
87 	}
88 
89 	if (include_reserve) {
90 		bt = &tags->breserved_tags;
91 		if (waitqueue_active(&bt->bs[0].wait))
92 			wake_up(&bt->bs[0].wait);
93 	}
94 }
95 
96 /*
97  * If a previously busy queue goes inactive, potential waiters could now
98  * be allowed to queue. Wake them up and check.
99  */
100 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
101 {
102 	struct blk_mq_tags *tags = hctx->tags;
103 
104 	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
105 		return;
106 
107 	atomic_dec(&tags->active_queues);
108 
109 	blk_mq_tag_wakeup_all(tags, false);
110 }
111 
112 /*
113  * For shared tag users, we track the number of currently active users
114  * and attempt to provide a fair share of the tag depth for each of them.
115  */
116 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
117 				  struct blk_mq_bitmap_tags *bt)
118 {
119 	unsigned int depth, users;
120 
121 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
122 		return true;
123 	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
124 		return true;
125 
126 	/*
127 	 * Don't try dividing an ant
128 	 */
129 	if (bt->depth == 1)
130 		return true;
131 
132 	users = atomic_read(&hctx->tags->active_queues);
133 	if (!users)
134 		return true;
135 
136 	/*
137 	 * Allow at least some tags
138 	 */
139 	depth = max((bt->depth + users - 1) / users, 4U);
140 	return atomic_read(&hctx->nr_active) < depth;
141 }
142 
143 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
144 {
145 	int tag, org_last_tag, end;
146 	bool wrap = last_tag != 0;
147 
148 	org_last_tag = last_tag;
149 	end = bm->depth;
150 	do {
151 restart:
152 		tag = find_next_zero_bit(&bm->word, end, last_tag);
153 		if (unlikely(tag >= end)) {
154 			/*
155 			 * We started with an offset, start from 0 to
156 			 * exhaust the map.
157 			 */
158 			if (wrap) {
159 				wrap = false;
160 				end = org_last_tag;
161 				last_tag = 0;
162 				goto restart;
163 			}
164 			return -1;
165 		}
166 		last_tag = tag + 1;
167 	} while (test_and_set_bit(tag, &bm->word));
168 
169 	return tag;
170 }
171 
172 /*
173  * Straight forward bitmap tag implementation, where each bit is a tag
174  * (cleared == free, and set == busy). The small twist is using per-cpu
175  * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
176  * contexts. This enables us to drastically limit the space searched,
177  * without dirtying an extra shared cacheline like we would if we stored
178  * the cache value inside the shared blk_mq_bitmap_tags structure. On top
179  * of that, each word of tags is in a separate cacheline. This means that
180  * multiple users will tend to stick to different cachelines, at least
181  * until the map is exhausted.
182  */
183 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
184 		    unsigned int *tag_cache)
185 {
186 	unsigned int last_tag, org_last_tag;
187 	int index, i, tag;
188 
189 	if (!hctx_may_queue(hctx, bt))
190 		return -1;
191 
192 	last_tag = org_last_tag = *tag_cache;
193 	index = TAG_TO_INDEX(bt, last_tag);
194 
195 	for (i = 0; i < bt->map_nr; i++) {
196 		tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
197 		if (tag != -1) {
198 			tag += (index << bt->bits_per_word);
199 			goto done;
200 		}
201 
202 		last_tag = 0;
203 		if (++index >= bt->map_nr)
204 			index = 0;
205 	}
206 
207 	*tag_cache = 0;
208 	return -1;
209 
210 	/*
211 	 * Only update the cache from the allocation path, if we ended
212 	 * up using the specific cached tag.
213 	 */
214 done:
215 	if (tag == org_last_tag) {
216 		last_tag = tag + 1;
217 		if (last_tag >= bt->depth - 1)
218 			last_tag = 0;
219 
220 		*tag_cache = last_tag;
221 	}
222 
223 	return tag;
224 }
225 
226 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
227 					 struct blk_mq_hw_ctx *hctx)
228 {
229 	struct bt_wait_state *bs;
230 	int wait_index;
231 
232 	if (!hctx)
233 		return &bt->bs[0];
234 
235 	wait_index = atomic_read(&hctx->wait_index);
236 	bs = &bt->bs[wait_index];
237 	bt_index_atomic_inc(&hctx->wait_index);
238 	return bs;
239 }
240 
241 static int bt_get(struct blk_mq_alloc_data *data,
242 		struct blk_mq_bitmap_tags *bt,
243 		struct blk_mq_hw_ctx *hctx,
244 		unsigned int *last_tag)
245 {
246 	struct bt_wait_state *bs;
247 	DEFINE_WAIT(wait);
248 	int tag;
249 
250 	tag = __bt_get(hctx, bt, last_tag);
251 	if (tag != -1)
252 		return tag;
253 
254 	if (!(data->gfp & __GFP_WAIT))
255 		return -1;
256 
257 	bs = bt_wait_ptr(bt, hctx);
258 	do {
259 		prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
260 
261 		tag = __bt_get(hctx, bt, last_tag);
262 		if (tag != -1)
263 			break;
264 
265 		/*
266 		 * We're out of tags on this hardware queue, kick any
267 		 * pending IO submits before going to sleep waiting for
268 		 * some to complete.
269 		 */
270 		blk_mq_run_hw_queue(hctx, false);
271 
272 		/*
273 		 * Retry tag allocation after running the hardware queue,
274 		 * as running the queue may also have found completions.
275 		 */
276 		tag = __bt_get(hctx, bt, last_tag);
277 		if (tag != -1)
278 			break;
279 
280 		blk_mq_put_ctx(data->ctx);
281 
282 		io_schedule();
283 
284 		data->ctx = blk_mq_get_ctx(data->q);
285 		data->hctx = data->q->mq_ops->map_queue(data->q,
286 				data->ctx->cpu);
287 		if (data->reserved) {
288 			bt = &data->hctx->tags->breserved_tags;
289 		} else {
290 			last_tag = &data->ctx->last_tag;
291 			hctx = data->hctx;
292 			bt = &hctx->tags->bitmap_tags;
293 		}
294 		finish_wait(&bs->wait, &wait);
295 		bs = bt_wait_ptr(bt, hctx);
296 	} while (1);
297 
298 	finish_wait(&bs->wait, &wait);
299 	return tag;
300 }
301 
302 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
303 {
304 	int tag;
305 
306 	tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
307 			&data->ctx->last_tag);
308 	if (tag >= 0)
309 		return tag + data->hctx->tags->nr_reserved_tags;
310 
311 	return BLK_MQ_TAG_FAIL;
312 }
313 
314 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
315 {
316 	int tag, zero = 0;
317 
318 	if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
319 		WARN_ON_ONCE(1);
320 		return BLK_MQ_TAG_FAIL;
321 	}
322 
323 	tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
324 	if (tag < 0)
325 		return BLK_MQ_TAG_FAIL;
326 
327 	return tag;
328 }
329 
330 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
331 {
332 	if (!data->reserved)
333 		return __blk_mq_get_tag(data);
334 
335 	return __blk_mq_get_reserved_tag(data);
336 }
337 
338 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
339 {
340 	int i, wake_index;
341 
342 	wake_index = atomic_read(&bt->wake_index);
343 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
344 		struct bt_wait_state *bs = &bt->bs[wake_index];
345 
346 		if (waitqueue_active(&bs->wait)) {
347 			int o = atomic_read(&bt->wake_index);
348 			if (wake_index != o)
349 				atomic_cmpxchg(&bt->wake_index, o, wake_index);
350 
351 			return bs;
352 		}
353 
354 		wake_index = bt_index_inc(wake_index);
355 	}
356 
357 	return NULL;
358 }
359 
360 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
361 {
362 	const int index = TAG_TO_INDEX(bt, tag);
363 	struct bt_wait_state *bs;
364 	int wait_cnt;
365 
366 	clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
367 
368 	/* Ensure that the wait list checks occur after clear_bit(). */
369 	smp_mb();
370 
371 	bs = bt_wake_ptr(bt);
372 	if (!bs)
373 		return;
374 
375 	wait_cnt = atomic_dec_return(&bs->wait_cnt);
376 	if (unlikely(wait_cnt < 0))
377 		wait_cnt = atomic_inc_return(&bs->wait_cnt);
378 	if (wait_cnt == 0) {
379 		atomic_add(bt->wake_cnt, &bs->wait_cnt);
380 		bt_index_atomic_inc(&bt->wake_index);
381 		wake_up(&bs->wait);
382 	}
383 }
384 
385 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
386 		    unsigned int *last_tag)
387 {
388 	struct blk_mq_tags *tags = hctx->tags;
389 
390 	if (tag >= tags->nr_reserved_tags) {
391 		const int real_tag = tag - tags->nr_reserved_tags;
392 
393 		BUG_ON(real_tag >= tags->nr_tags);
394 		bt_clear_tag(&tags->bitmap_tags, real_tag);
395 		*last_tag = real_tag;
396 	} else {
397 		BUG_ON(tag >= tags->nr_reserved_tags);
398 		bt_clear_tag(&tags->breserved_tags, tag);
399 	}
400 }
401 
402 static void bt_for_each(struct blk_mq_hw_ctx *hctx,
403 		struct blk_mq_bitmap_tags *bt, unsigned int off,
404 		busy_iter_fn *fn, void *data, bool reserved)
405 {
406 	struct request *rq;
407 	int bit, i;
408 
409 	for (i = 0; i < bt->map_nr; i++) {
410 		struct blk_align_bitmap *bm = &bt->map[i];
411 
412 		for (bit = find_first_bit(&bm->word, bm->depth);
413 		     bit < bm->depth;
414 		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
415 		     	rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
416 			if (rq->q == hctx->queue)
417 				fn(hctx, rq, data, reserved);
418 		}
419 
420 		off += (1 << bt->bits_per_word);
421 	}
422 }
423 
424 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
425 		void *priv)
426 {
427 	struct blk_mq_tags *tags = hctx->tags;
428 
429 	if (tags->nr_reserved_tags)
430 		bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
431 	bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
432 			false);
433 }
434 EXPORT_SYMBOL(blk_mq_tag_busy_iter);
435 
436 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
437 {
438 	unsigned int i, used;
439 
440 	for (i = 0, used = 0; i < bt->map_nr; i++) {
441 		struct blk_align_bitmap *bm = &bt->map[i];
442 
443 		used += bitmap_weight(&bm->word, bm->depth);
444 	}
445 
446 	return bt->depth - used;
447 }
448 
449 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
450 			    unsigned int depth)
451 {
452 	unsigned int tags_per_word = 1U << bt->bits_per_word;
453 	unsigned int map_depth = depth;
454 
455 	if (depth) {
456 		int i;
457 
458 		for (i = 0; i < bt->map_nr; i++) {
459 			bt->map[i].depth = min(map_depth, tags_per_word);
460 			map_depth -= bt->map[i].depth;
461 		}
462 	}
463 
464 	bt->wake_cnt = BT_WAIT_BATCH;
465 	if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
466 		bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
467 
468 	bt->depth = depth;
469 }
470 
471 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
472 			int node, bool reserved)
473 {
474 	int i;
475 
476 	bt->bits_per_word = ilog2(BITS_PER_LONG);
477 
478 	/*
479 	 * Depth can be zero for reserved tags, that's not a failure
480 	 * condition.
481 	 */
482 	if (depth) {
483 		unsigned int nr, tags_per_word;
484 
485 		tags_per_word = (1 << bt->bits_per_word);
486 
487 		/*
488 		 * If the tag space is small, shrink the number of tags
489 		 * per word so we spread over a few cachelines, at least.
490 		 * If less than 4 tags, just forget about it, it's not
491 		 * going to work optimally anyway.
492 		 */
493 		if (depth >= 4) {
494 			while (tags_per_word * 4 > depth) {
495 				bt->bits_per_word--;
496 				tags_per_word = (1 << bt->bits_per_word);
497 			}
498 		}
499 
500 		nr = ALIGN(depth, tags_per_word) / tags_per_word;
501 		bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
502 						GFP_KERNEL, node);
503 		if (!bt->map)
504 			return -ENOMEM;
505 
506 		bt->map_nr = nr;
507 	}
508 
509 	bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
510 	if (!bt->bs) {
511 		kfree(bt->map);
512 		return -ENOMEM;
513 	}
514 
515 	bt_update_count(bt, depth);
516 
517 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
518 		init_waitqueue_head(&bt->bs[i].wait);
519 		atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
520 	}
521 
522 	return 0;
523 }
524 
525 static void bt_free(struct blk_mq_bitmap_tags *bt)
526 {
527 	kfree(bt->map);
528 	kfree(bt->bs);
529 }
530 
531 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
532 						   int node)
533 {
534 	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
535 
536 	if (bt_alloc(&tags->bitmap_tags, depth, node, false))
537 		goto enomem;
538 	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
539 		goto enomem;
540 
541 	return tags;
542 enomem:
543 	bt_free(&tags->bitmap_tags);
544 	kfree(tags);
545 	return NULL;
546 }
547 
548 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
549 				     unsigned int reserved_tags, int node)
550 {
551 	struct blk_mq_tags *tags;
552 
553 	if (total_tags > BLK_MQ_TAG_MAX) {
554 		pr_err("blk-mq: tag depth too large\n");
555 		return NULL;
556 	}
557 
558 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
559 	if (!tags)
560 		return NULL;
561 
562 	tags->nr_tags = total_tags;
563 	tags->nr_reserved_tags = reserved_tags;
564 
565 	return blk_mq_init_bitmap_tags(tags, node);
566 }
567 
568 void blk_mq_free_tags(struct blk_mq_tags *tags)
569 {
570 	bt_free(&tags->bitmap_tags);
571 	bt_free(&tags->breserved_tags);
572 	kfree(tags);
573 }
574 
575 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
576 {
577 	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
578 
579 	*tag = prandom_u32() % depth;
580 }
581 
582 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
583 {
584 	tdepth -= tags->nr_reserved_tags;
585 	if (tdepth > tags->nr_tags)
586 		return -EINVAL;
587 
588 	/*
589 	 * Don't need (or can't) update reserved tags here, they remain
590 	 * static and should never need resizing.
591 	 */
592 	bt_update_count(&tags->bitmap_tags, tdepth);
593 	blk_mq_tag_wakeup_all(tags, false);
594 	return 0;
595 }
596 
597 /**
598  * blk_mq_unique_tag() - return a tag that is unique queue-wide
599  * @rq: request for which to compute a unique tag
600  *
601  * The tag field in struct request is unique per hardware queue but not over
602  * all hardware queues. Hence this function that returns a tag with the
603  * hardware context index in the upper bits and the per hardware queue tag in
604  * the lower bits.
605  *
606  * Note: When called for a request that is queued on a non-multiqueue request
607  * queue, the hardware context index is set to zero.
608  */
609 u32 blk_mq_unique_tag(struct request *rq)
610 {
611 	struct request_queue *q = rq->q;
612 	struct blk_mq_hw_ctx *hctx;
613 	int hwq = 0;
614 
615 	if (q->mq_ops) {
616 		hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
617 		hwq = hctx->queue_num;
618 	}
619 
620 	return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
621 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
622 }
623 EXPORT_SYMBOL(blk_mq_unique_tag);
624 
625 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
626 {
627 	char *orig_page = page;
628 	unsigned int free, res;
629 
630 	if (!tags)
631 		return 0;
632 
633 	page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
634 			"bits_per_word=%u\n",
635 			tags->nr_tags, tags->nr_reserved_tags,
636 			tags->bitmap_tags.bits_per_word);
637 
638 	free = bt_unused_tags(&tags->bitmap_tags);
639 	res = bt_unused_tags(&tags->breserved_tags);
640 
641 	page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
642 	page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
643 
644 	return page - orig_page;
645 }
646