xref: /openbmc/linux/block/blk-mq-tag.c (revision 0199e993)
1 /*
2  * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3  * over multiple cachelines to avoid ping-pong between multiple submitters
4  * or submitter and completer. Uses rolling wakeups to avoid falling of
5  * the scaling cliff when we run out of tags and have to start putting
6  * submitters to sleep.
7  *
8  * Uses active queue tracking to support fairer distribution of tags
9  * between multiple submitters when a shared tag map is used.
10  *
11  * Copyright (C) 2013-2014 Jens Axboe
12  */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 
17 #include <linux/blk-mq.h>
18 #include "blk.h"
19 #include "blk-mq.h"
20 #include "blk-mq-tag.h"
21 
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
23 {
24 	int i;
25 
26 	for (i = 0; i < bt->map_nr; i++) {
27 		struct blk_align_bitmap *bm = &bt->map[i];
28 		int ret;
29 
30 		ret = find_first_zero_bit(&bm->word, bm->depth);
31 		if (ret < bm->depth)
32 			return true;
33 	}
34 
35 	return false;
36 }
37 
38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
39 {
40 	if (!tags)
41 		return true;
42 
43 	return bt_has_free_tags(&tags->bitmap_tags);
44 }
45 
46 static inline int bt_index_inc(int index)
47 {
48 	return (index + 1) & (BT_WAIT_QUEUES - 1);
49 }
50 
51 static inline void bt_index_atomic_inc(atomic_t *index)
52 {
53 	int old = atomic_read(index);
54 	int new = bt_index_inc(old);
55 	atomic_cmpxchg(index, old, new);
56 }
57 
58 /*
59  * If a previously inactive queue goes active, bump the active user count.
60  */
61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
62 {
63 	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
64 	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
65 		atomic_inc(&hctx->tags->active_queues);
66 
67 	return true;
68 }
69 
70 /*
71  * Wakeup all potentially sleeping on normal (non-reserved) tags
72  */
73 static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
74 {
75 	struct blk_mq_bitmap_tags *bt;
76 	int i, wake_index;
77 
78 	bt = &tags->bitmap_tags;
79 	wake_index = atomic_read(&bt->wake_index);
80 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
81 		struct bt_wait_state *bs = &bt->bs[wake_index];
82 
83 		if (waitqueue_active(&bs->wait))
84 			wake_up(&bs->wait);
85 
86 		wake_index = bt_index_inc(wake_index);
87 	}
88 }
89 
90 /*
91  * If a previously busy queue goes inactive, potential waiters could now
92  * be allowed to queue. Wake them up and check.
93  */
94 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
95 {
96 	struct blk_mq_tags *tags = hctx->tags;
97 
98 	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
99 		return;
100 
101 	atomic_dec(&tags->active_queues);
102 
103 	blk_mq_tag_wakeup_all(tags);
104 }
105 
106 /*
107  * For shared tag users, we track the number of currently active users
108  * and attempt to provide a fair share of the tag depth for each of them.
109  */
110 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
111 				  struct blk_mq_bitmap_tags *bt)
112 {
113 	unsigned int depth, users;
114 
115 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
116 		return true;
117 	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
118 		return true;
119 
120 	/*
121 	 * Don't try dividing an ant
122 	 */
123 	if (bt->depth == 1)
124 		return true;
125 
126 	users = atomic_read(&hctx->tags->active_queues);
127 	if (!users)
128 		return true;
129 
130 	/*
131 	 * Allow at least some tags
132 	 */
133 	depth = max((bt->depth + users - 1) / users, 4U);
134 	return atomic_read(&hctx->nr_active) < depth;
135 }
136 
137 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
138 {
139 	int tag, org_last_tag, end;
140 
141 	org_last_tag = last_tag;
142 	end = bm->depth;
143 	do {
144 restart:
145 		tag = find_next_zero_bit(&bm->word, end, last_tag);
146 		if (unlikely(tag >= end)) {
147 			/*
148 			 * We started with an offset, start from 0 to
149 			 * exhaust the map.
150 			 */
151 			if (org_last_tag && last_tag) {
152 				end = last_tag;
153 				last_tag = 0;
154 				goto restart;
155 			}
156 			return -1;
157 		}
158 		last_tag = tag + 1;
159 	} while (test_and_set_bit_lock(tag, &bm->word));
160 
161 	return tag;
162 }
163 
164 /*
165  * Straight forward bitmap tag implementation, where each bit is a tag
166  * (cleared == free, and set == busy). The small twist is using per-cpu
167  * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
168  * contexts. This enables us to drastically limit the space searched,
169  * without dirtying an extra shared cacheline like we would if we stored
170  * the cache value inside the shared blk_mq_bitmap_tags structure. On top
171  * of that, each word of tags is in a separate cacheline. This means that
172  * multiple users will tend to stick to different cachelines, at least
173  * until the map is exhausted.
174  */
175 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
176 		    unsigned int *tag_cache)
177 {
178 	unsigned int last_tag, org_last_tag;
179 	int index, i, tag;
180 
181 	if (!hctx_may_queue(hctx, bt))
182 		return -1;
183 
184 	last_tag = org_last_tag = *tag_cache;
185 	index = TAG_TO_INDEX(bt, last_tag);
186 
187 	for (i = 0; i < bt->map_nr; i++) {
188 		tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
189 		if (tag != -1) {
190 			tag += (index << bt->bits_per_word);
191 			goto done;
192 		}
193 
194 		last_tag = 0;
195 		if (++index >= bt->map_nr)
196 			index = 0;
197 	}
198 
199 	*tag_cache = 0;
200 	return -1;
201 
202 	/*
203 	 * Only update the cache from the allocation path, if we ended
204 	 * up using the specific cached tag.
205 	 */
206 done:
207 	if (tag == org_last_tag) {
208 		last_tag = tag + 1;
209 		if (last_tag >= bt->depth - 1)
210 			last_tag = 0;
211 
212 		*tag_cache = last_tag;
213 	}
214 
215 	return tag;
216 }
217 
218 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
219 					 struct blk_mq_hw_ctx *hctx)
220 {
221 	struct bt_wait_state *bs;
222 	int wait_index;
223 
224 	if (!hctx)
225 		return &bt->bs[0];
226 
227 	wait_index = atomic_read(&hctx->wait_index);
228 	bs = &bt->bs[wait_index];
229 	bt_index_atomic_inc(&hctx->wait_index);
230 	return bs;
231 }
232 
233 static int bt_get(struct blk_mq_alloc_data *data,
234 		struct blk_mq_bitmap_tags *bt,
235 		struct blk_mq_hw_ctx *hctx,
236 		unsigned int *last_tag)
237 {
238 	struct bt_wait_state *bs;
239 	DEFINE_WAIT(wait);
240 	int tag;
241 
242 	tag = __bt_get(hctx, bt, last_tag);
243 	if (tag != -1)
244 		return tag;
245 
246 	if (!(data->gfp & __GFP_WAIT))
247 		return -1;
248 
249 	bs = bt_wait_ptr(bt, hctx);
250 	do {
251 		prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
252 
253 		tag = __bt_get(hctx, bt, last_tag);
254 		if (tag != -1)
255 			break;
256 
257 		blk_mq_put_ctx(data->ctx);
258 
259 		io_schedule();
260 
261 		data->ctx = blk_mq_get_ctx(data->q);
262 		data->hctx = data->q->mq_ops->map_queue(data->q,
263 				data->ctx->cpu);
264 		if (data->reserved) {
265 			bt = &data->hctx->tags->breserved_tags;
266 		} else {
267 			last_tag = &data->ctx->last_tag;
268 			hctx = data->hctx;
269 			bt = &hctx->tags->bitmap_tags;
270 		}
271 		finish_wait(&bs->wait, &wait);
272 		bs = bt_wait_ptr(bt, hctx);
273 	} while (1);
274 
275 	finish_wait(&bs->wait, &wait);
276 	return tag;
277 }
278 
279 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
280 {
281 	int tag;
282 
283 	tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
284 			&data->ctx->last_tag);
285 	if (tag >= 0)
286 		return tag + data->hctx->tags->nr_reserved_tags;
287 
288 	return BLK_MQ_TAG_FAIL;
289 }
290 
291 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
292 {
293 	int tag, zero = 0;
294 
295 	if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
296 		WARN_ON_ONCE(1);
297 		return BLK_MQ_TAG_FAIL;
298 	}
299 
300 	tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
301 	if (tag < 0)
302 		return BLK_MQ_TAG_FAIL;
303 
304 	return tag;
305 }
306 
307 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
308 {
309 	if (!data->reserved)
310 		return __blk_mq_get_tag(data);
311 
312 	return __blk_mq_get_reserved_tag(data);
313 }
314 
315 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
316 {
317 	int i, wake_index;
318 
319 	wake_index = atomic_read(&bt->wake_index);
320 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
321 		struct bt_wait_state *bs = &bt->bs[wake_index];
322 
323 		if (waitqueue_active(&bs->wait)) {
324 			int o = atomic_read(&bt->wake_index);
325 			if (wake_index != o)
326 				atomic_cmpxchg(&bt->wake_index, o, wake_index);
327 
328 			return bs;
329 		}
330 
331 		wake_index = bt_index_inc(wake_index);
332 	}
333 
334 	return NULL;
335 }
336 
337 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
338 {
339 	const int index = TAG_TO_INDEX(bt, tag);
340 	struct bt_wait_state *bs;
341 	int wait_cnt;
342 
343 	/*
344 	 * The unlock memory barrier need to order access to req in free
345 	 * path and clearing tag bit
346 	 */
347 	clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
348 
349 	bs = bt_wake_ptr(bt);
350 	if (!bs)
351 		return;
352 
353 	wait_cnt = atomic_dec_return(&bs->wait_cnt);
354 	if (unlikely(wait_cnt < 0))
355 		wait_cnt = atomic_inc_return(&bs->wait_cnt);
356 	if (wait_cnt == 0) {
357 		atomic_add(bt->wake_cnt, &bs->wait_cnt);
358 		bt_index_atomic_inc(&bt->wake_index);
359 		wake_up(&bs->wait);
360 	}
361 }
362 
363 static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
364 {
365 	BUG_ON(tag >= tags->nr_tags);
366 
367 	bt_clear_tag(&tags->bitmap_tags, tag);
368 }
369 
370 static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
371 				      unsigned int tag)
372 {
373 	BUG_ON(tag >= tags->nr_reserved_tags);
374 
375 	bt_clear_tag(&tags->breserved_tags, tag);
376 }
377 
378 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
379 		    unsigned int *last_tag)
380 {
381 	struct blk_mq_tags *tags = hctx->tags;
382 
383 	if (tag >= tags->nr_reserved_tags) {
384 		const int real_tag = tag - tags->nr_reserved_tags;
385 
386 		__blk_mq_put_tag(tags, real_tag);
387 		*last_tag = real_tag;
388 	} else
389 		__blk_mq_put_reserved_tag(tags, tag);
390 }
391 
392 static void bt_for_each(struct blk_mq_hw_ctx *hctx,
393 		struct blk_mq_bitmap_tags *bt, unsigned int off,
394 		busy_iter_fn *fn, void *data, bool reserved)
395 {
396 	struct request *rq;
397 	int bit, i;
398 
399 	for (i = 0; i < bt->map_nr; i++) {
400 		struct blk_align_bitmap *bm = &bt->map[i];
401 
402 		for (bit = find_first_bit(&bm->word, bm->depth);
403 		     bit < bm->depth;
404 		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
405 		     	rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
406 			if (rq->q == hctx->queue)
407 				fn(hctx, rq, data, reserved);
408 		}
409 
410 		off += (1 << bt->bits_per_word);
411 	}
412 }
413 
414 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
415 		void *priv)
416 {
417 	struct blk_mq_tags *tags = hctx->tags;
418 
419 	if (tags->nr_reserved_tags)
420 		bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
421 	bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
422 			false);
423 }
424 EXPORT_SYMBOL(blk_mq_tag_busy_iter);
425 
426 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
427 {
428 	unsigned int i, used;
429 
430 	for (i = 0, used = 0; i < bt->map_nr; i++) {
431 		struct blk_align_bitmap *bm = &bt->map[i];
432 
433 		used += bitmap_weight(&bm->word, bm->depth);
434 	}
435 
436 	return bt->depth - used;
437 }
438 
439 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
440 			    unsigned int depth)
441 {
442 	unsigned int tags_per_word = 1U << bt->bits_per_word;
443 	unsigned int map_depth = depth;
444 
445 	if (depth) {
446 		int i;
447 
448 		for (i = 0; i < bt->map_nr; i++) {
449 			bt->map[i].depth = min(map_depth, tags_per_word);
450 			map_depth -= bt->map[i].depth;
451 		}
452 	}
453 
454 	bt->wake_cnt = BT_WAIT_BATCH;
455 	if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
456 		bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
457 
458 	bt->depth = depth;
459 }
460 
461 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
462 			int node, bool reserved)
463 {
464 	int i;
465 
466 	bt->bits_per_word = ilog2(BITS_PER_LONG);
467 
468 	/*
469 	 * Depth can be zero for reserved tags, that's not a failure
470 	 * condition.
471 	 */
472 	if (depth) {
473 		unsigned int nr, tags_per_word;
474 
475 		tags_per_word = (1 << bt->bits_per_word);
476 
477 		/*
478 		 * If the tag space is small, shrink the number of tags
479 		 * per word so we spread over a few cachelines, at least.
480 		 * If less than 4 tags, just forget about it, it's not
481 		 * going to work optimally anyway.
482 		 */
483 		if (depth >= 4) {
484 			while (tags_per_word * 4 > depth) {
485 				bt->bits_per_word--;
486 				tags_per_word = (1 << bt->bits_per_word);
487 			}
488 		}
489 
490 		nr = ALIGN(depth, tags_per_word) / tags_per_word;
491 		bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
492 						GFP_KERNEL, node);
493 		if (!bt->map)
494 			return -ENOMEM;
495 
496 		bt->map_nr = nr;
497 	}
498 
499 	bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
500 	if (!bt->bs) {
501 		kfree(bt->map);
502 		return -ENOMEM;
503 	}
504 
505 	bt_update_count(bt, depth);
506 
507 	for (i = 0; i < BT_WAIT_QUEUES; i++) {
508 		init_waitqueue_head(&bt->bs[i].wait);
509 		atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
510 	}
511 
512 	return 0;
513 }
514 
515 static void bt_free(struct blk_mq_bitmap_tags *bt)
516 {
517 	kfree(bt->map);
518 	kfree(bt->bs);
519 }
520 
521 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
522 						   int node)
523 {
524 	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
525 
526 	if (bt_alloc(&tags->bitmap_tags, depth, node, false))
527 		goto enomem;
528 	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
529 		goto enomem;
530 
531 	return tags;
532 enomem:
533 	bt_free(&tags->bitmap_tags);
534 	kfree(tags);
535 	return NULL;
536 }
537 
538 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
539 				     unsigned int reserved_tags, int node)
540 {
541 	struct blk_mq_tags *tags;
542 
543 	if (total_tags > BLK_MQ_TAG_MAX) {
544 		pr_err("blk-mq: tag depth too large\n");
545 		return NULL;
546 	}
547 
548 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
549 	if (!tags)
550 		return NULL;
551 
552 	tags->nr_tags = total_tags;
553 	tags->nr_reserved_tags = reserved_tags;
554 
555 	return blk_mq_init_bitmap_tags(tags, node);
556 }
557 
558 void blk_mq_free_tags(struct blk_mq_tags *tags)
559 {
560 	bt_free(&tags->bitmap_tags);
561 	bt_free(&tags->breserved_tags);
562 	kfree(tags);
563 }
564 
565 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
566 {
567 	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
568 
569 	*tag = prandom_u32() % depth;
570 }
571 
572 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
573 {
574 	tdepth -= tags->nr_reserved_tags;
575 	if (tdepth > tags->nr_tags)
576 		return -EINVAL;
577 
578 	/*
579 	 * Don't need (or can't) update reserved tags here, they remain
580 	 * static and should never need resizing.
581 	 */
582 	bt_update_count(&tags->bitmap_tags, tdepth);
583 	blk_mq_tag_wakeup_all(tags);
584 	return 0;
585 }
586 
587 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
588 {
589 	char *orig_page = page;
590 	unsigned int free, res;
591 
592 	if (!tags)
593 		return 0;
594 
595 	page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
596 			"bits_per_word=%u\n",
597 			tags->nr_tags, tags->nr_reserved_tags,
598 			tags->bitmap_tags.bits_per_word);
599 
600 	free = bt_unused_tags(&tags->bitmap_tags);
601 	res = bt_unused_tags(&tags->breserved_tags);
602 
603 	page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
604 	page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
605 
606 	return page - orig_page;
607 }
608