xref: /openbmc/linux/block/blk-mq-tag.c (revision 6f2bde9b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4  * fairer distribution of tags between multiple submitters when a shared tag map
5  * is used.
6  *
7  * Copyright (C) 2013-2014 Jens Axboe
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 
12 #include <linux/delay.h>
13 #include "blk.h"
14 #include "blk-mq.h"
15 #include "blk-mq-sched.h"
16 
17 /*
18  * Recalculate wakeup batch when tag is shared by hctx.
19  */
20 static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
21 		unsigned int users)
22 {
23 	if (!users)
24 		return;
25 
26 	sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
27 			users);
28 	sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
29 			users);
30 }
31 
32 /*
33  * If a previously inactive queue goes active, bump the active user count.
34  * We need to do this before try to allocate driver tag, then even if fail
35  * to get tag when first time, the other shared-tag users could reserve
36  * budget for it.
37  */
38 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
39 {
40 	unsigned int users;
41 
42 	/*
43 	 * calling test_bit() prior to test_and_set_bit() is intentional,
44 	 * it avoids dirtying the cacheline if the queue is already active.
45 	 */
46 	if (blk_mq_is_shared_tags(hctx->flags)) {
47 		struct request_queue *q = hctx->queue;
48 
49 		if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
50 		    test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
51 			return;
52 	} else {
53 		if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
54 		    test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
55 			return;
56 	}
57 
58 	users = atomic_inc_return(&hctx->tags->active_queues);
59 
60 	blk_mq_update_wake_batch(hctx->tags, users);
61 }
62 
63 /*
64  * Wakeup all potentially sleeping on tags
65  */
66 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
67 {
68 	sbitmap_queue_wake_all(&tags->bitmap_tags);
69 	if (include_reserve)
70 		sbitmap_queue_wake_all(&tags->breserved_tags);
71 }
72 
73 /*
74  * If a previously busy queue goes inactive, potential waiters could now
75  * be allowed to queue. Wake them up and check.
76  */
77 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
78 {
79 	struct blk_mq_tags *tags = hctx->tags;
80 	unsigned int users;
81 
82 	if (blk_mq_is_shared_tags(hctx->flags)) {
83 		struct request_queue *q = hctx->queue;
84 
85 		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
86 					&q->queue_flags))
87 			return;
88 	} else {
89 		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
90 			return;
91 	}
92 
93 	users = atomic_dec_return(&tags->active_queues);
94 
95 	blk_mq_update_wake_batch(tags, users);
96 
97 	blk_mq_tag_wakeup_all(tags, false);
98 }
99 
100 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
101 			    struct sbitmap_queue *bt)
102 {
103 	if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
104 			!hctx_may_queue(data->hctx, bt))
105 		return BLK_MQ_NO_TAG;
106 
107 	if (data->shallow_depth)
108 		return sbitmap_queue_get_shallow(bt, data->shallow_depth);
109 	else
110 		return __sbitmap_queue_get(bt);
111 }
112 
113 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
114 			      unsigned int *offset)
115 {
116 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
117 	struct sbitmap_queue *bt = &tags->bitmap_tags;
118 	unsigned long ret;
119 
120 	if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
121 	    data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
122 		return 0;
123 	ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
124 	*offset += tags->nr_reserved_tags;
125 	return ret;
126 }
127 
128 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
129 {
130 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
131 	struct sbitmap_queue *bt;
132 	struct sbq_wait_state *ws;
133 	DEFINE_SBQ_WAIT(wait);
134 	unsigned int tag_offset;
135 	int tag;
136 
137 	if (data->flags & BLK_MQ_REQ_RESERVED) {
138 		if (unlikely(!tags->nr_reserved_tags)) {
139 			WARN_ON_ONCE(1);
140 			return BLK_MQ_NO_TAG;
141 		}
142 		bt = &tags->breserved_tags;
143 		tag_offset = 0;
144 	} else {
145 		bt = &tags->bitmap_tags;
146 		tag_offset = tags->nr_reserved_tags;
147 	}
148 
149 	tag = __blk_mq_get_tag(data, bt);
150 	if (tag != BLK_MQ_NO_TAG)
151 		goto found_tag;
152 
153 	if (data->flags & BLK_MQ_REQ_NOWAIT)
154 		return BLK_MQ_NO_TAG;
155 
156 	ws = bt_wait_ptr(bt, data->hctx);
157 	do {
158 		struct sbitmap_queue *bt_prev;
159 
160 		/*
161 		 * We're out of tags on this hardware queue, kick any
162 		 * pending IO submits before going to sleep waiting for
163 		 * some to complete.
164 		 */
165 		blk_mq_run_hw_queue(data->hctx, false);
166 
167 		/*
168 		 * Retry tag allocation after running the hardware queue,
169 		 * as running the queue may also have found completions.
170 		 */
171 		tag = __blk_mq_get_tag(data, bt);
172 		if (tag != BLK_MQ_NO_TAG)
173 			break;
174 
175 		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
176 
177 		tag = __blk_mq_get_tag(data, bt);
178 		if (tag != BLK_MQ_NO_TAG)
179 			break;
180 
181 		bt_prev = bt;
182 		io_schedule();
183 
184 		sbitmap_finish_wait(bt, ws, &wait);
185 
186 		data->ctx = blk_mq_get_ctx(data->q);
187 		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
188 						data->ctx);
189 		tags = blk_mq_tags_from_data(data);
190 		if (data->flags & BLK_MQ_REQ_RESERVED)
191 			bt = &tags->breserved_tags;
192 		else
193 			bt = &tags->bitmap_tags;
194 
195 		/*
196 		 * If destination hw queue is changed, fake wake up on
197 		 * previous queue for compensating the wake up miss, so
198 		 * other allocations on previous queue won't be starved.
199 		 */
200 		if (bt != bt_prev)
201 			sbitmap_queue_wake_up(bt_prev, 1);
202 
203 		ws = bt_wait_ptr(bt, data->hctx);
204 	} while (1);
205 
206 	sbitmap_finish_wait(bt, ws, &wait);
207 
208 found_tag:
209 	/*
210 	 * Give up this allocation if the hctx is inactive.  The caller will
211 	 * retry on an active hctx.
212 	 */
213 	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
214 		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
215 		return BLK_MQ_NO_TAG;
216 	}
217 	return tag + tag_offset;
218 }
219 
220 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
221 		    unsigned int tag)
222 {
223 	if (!blk_mq_tag_is_reserved(tags, tag)) {
224 		const int real_tag = tag - tags->nr_reserved_tags;
225 
226 		BUG_ON(real_tag >= tags->nr_tags);
227 		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
228 	} else {
229 		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
230 	}
231 }
232 
233 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
234 {
235 	sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
236 					tag_array, nr_tags);
237 }
238 
239 struct bt_iter_data {
240 	struct blk_mq_hw_ctx *hctx;
241 	struct request_queue *q;
242 	busy_tag_iter_fn *fn;
243 	void *data;
244 	bool reserved;
245 };
246 
247 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
248 		unsigned int bitnr)
249 {
250 	struct request *rq;
251 	unsigned long flags;
252 
253 	spin_lock_irqsave(&tags->lock, flags);
254 	rq = tags->rqs[bitnr];
255 	if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
256 		rq = NULL;
257 	spin_unlock_irqrestore(&tags->lock, flags);
258 	return rq;
259 }
260 
261 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
262 {
263 	struct bt_iter_data *iter_data = data;
264 	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
265 	struct request_queue *q = iter_data->q;
266 	struct blk_mq_tag_set *set = q->tag_set;
267 	struct blk_mq_tags *tags;
268 	struct request *rq;
269 	bool ret = true;
270 
271 	if (blk_mq_is_shared_tags(set->flags))
272 		tags = set->shared_tags;
273 	else
274 		tags = hctx->tags;
275 
276 	if (!iter_data->reserved)
277 		bitnr += tags->nr_reserved_tags;
278 	/*
279 	 * We can hit rq == NULL here, because the tagging functions
280 	 * test and set the bit before assigning ->rqs[].
281 	 */
282 	rq = blk_mq_find_and_get_req(tags, bitnr);
283 	if (!rq)
284 		return true;
285 
286 	if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
287 		ret = iter_data->fn(rq, iter_data->data);
288 	blk_mq_put_rq_ref(rq);
289 	return ret;
290 }
291 
292 /**
293  * bt_for_each - iterate over the requests associated with a hardware queue
294  * @hctx:	Hardware queue to examine.
295  * @q:		Request queue to examine.
296  * @bt:		sbitmap to examine. This is either the breserved_tags member
297  *		or the bitmap_tags member of struct blk_mq_tags.
298  * @fn:		Pointer to the function that will be called for each request
299  *		associated with @hctx that has been assigned a driver tag.
300  *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
301  *		where rq is a pointer to a request. Return true to continue
302  *		iterating tags, false to stop.
303  * @data:	Will be passed as third argument to @fn.
304  * @reserved:	Indicates whether @bt is the breserved_tags member or the
305  *		bitmap_tags member of struct blk_mq_tags.
306  */
307 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
308 			struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
309 			void *data, bool reserved)
310 {
311 	struct bt_iter_data iter_data = {
312 		.hctx = hctx,
313 		.fn = fn,
314 		.data = data,
315 		.reserved = reserved,
316 		.q = q,
317 	};
318 
319 	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
320 }
321 
322 struct bt_tags_iter_data {
323 	struct blk_mq_tags *tags;
324 	busy_tag_iter_fn *fn;
325 	void *data;
326 	unsigned int flags;
327 };
328 
329 #define BT_TAG_ITER_RESERVED		(1 << 0)
330 #define BT_TAG_ITER_STARTED		(1 << 1)
331 #define BT_TAG_ITER_STATIC_RQS		(1 << 2)
332 
333 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
334 {
335 	struct bt_tags_iter_data *iter_data = data;
336 	struct blk_mq_tags *tags = iter_data->tags;
337 	struct request *rq;
338 	bool ret = true;
339 	bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
340 
341 	if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
342 		bitnr += tags->nr_reserved_tags;
343 
344 	/*
345 	 * We can hit rq == NULL here, because the tagging functions
346 	 * test and set the bit before assigning ->rqs[].
347 	 */
348 	if (iter_static_rqs)
349 		rq = tags->static_rqs[bitnr];
350 	else
351 		rq = blk_mq_find_and_get_req(tags, bitnr);
352 	if (!rq)
353 		return true;
354 
355 	if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
356 	    blk_mq_request_started(rq))
357 		ret = iter_data->fn(rq, iter_data->data);
358 	if (!iter_static_rqs)
359 		blk_mq_put_rq_ref(rq);
360 	return ret;
361 }
362 
363 /**
364  * bt_tags_for_each - iterate over the requests in a tag map
365  * @tags:	Tag map to iterate over.
366  * @bt:		sbitmap to examine. This is either the breserved_tags member
367  *		or the bitmap_tags member of struct blk_mq_tags.
368  * @fn:		Pointer to the function that will be called for each started
369  *		request. @fn will be called as follows: @fn(rq, @data,
370  *		@reserved) where rq is a pointer to a request. Return true
371  *		to continue iterating tags, false to stop.
372  * @data:	Will be passed as second argument to @fn.
373  * @flags:	BT_TAG_ITER_*
374  */
375 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
376 			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
377 {
378 	struct bt_tags_iter_data iter_data = {
379 		.tags = tags,
380 		.fn = fn,
381 		.data = data,
382 		.flags = flags,
383 	};
384 
385 	if (tags->rqs)
386 		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
387 }
388 
389 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
390 		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
391 {
392 	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
393 
394 	if (tags->nr_reserved_tags)
395 		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
396 				 flags | BT_TAG_ITER_RESERVED);
397 	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
398 }
399 
400 /**
401  * blk_mq_all_tag_iter - iterate over all requests in a tag map
402  * @tags:	Tag map to iterate over.
403  * @fn:		Pointer to the function that will be called for each
404  *		request. @fn will be called as follows: @fn(rq, @priv,
405  *		reserved) where rq is a pointer to a request. 'reserved'
406  *		indicates whether or not @rq is a reserved request. Return
407  *		true to continue iterating tags, false to stop.
408  * @priv:	Will be passed as second argument to @fn.
409  *
410  * Caller has to pass the tag map from which requests are allocated.
411  */
412 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
413 		void *priv)
414 {
415 	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
416 }
417 
418 /**
419  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
420  * @tagset:	Tag set to iterate over.
421  * @fn:		Pointer to the function that will be called for each started
422  *		request. @fn will be called as follows: @fn(rq, @priv,
423  *		reserved) where rq is a pointer to a request. 'reserved'
424  *		indicates whether or not @rq is a reserved request. Return
425  *		true to continue iterating tags, false to stop.
426  * @priv:	Will be passed as second argument to @fn.
427  *
428  * We grab one request reference before calling @fn and release it after
429  * @fn returns.
430  */
431 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
432 		busy_tag_iter_fn *fn, void *priv)
433 {
434 	unsigned int flags = tagset->flags;
435 	int i, nr_tags;
436 
437 	nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
438 
439 	for (i = 0; i < nr_tags; i++) {
440 		if (tagset->tags && tagset->tags[i])
441 			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
442 					      BT_TAG_ITER_STARTED);
443 	}
444 }
445 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
446 
447 static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
448 {
449 	unsigned *count = data;
450 
451 	if (blk_mq_request_completed(rq))
452 		(*count)++;
453 	return true;
454 }
455 
456 /**
457  * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
458  * completions have finished.
459  * @tagset:	Tag set to drain completed request
460  *
461  * Note: This function has to be run after all IO queues are shutdown
462  */
463 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
464 {
465 	while (true) {
466 		unsigned count = 0;
467 
468 		blk_mq_tagset_busy_iter(tagset,
469 				blk_mq_tagset_count_completed_rqs, &count);
470 		if (!count)
471 			break;
472 		msleep(5);
473 	}
474 }
475 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
476 
477 /**
478  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
479  * @q:		Request queue to examine.
480  * @fn:		Pointer to the function that will be called for each request
481  *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
482  *		reserved) where rq is a pointer to a request and hctx points
483  *		to the hardware queue associated with the request. 'reserved'
484  *		indicates whether or not @rq is a reserved request.
485  * @priv:	Will be passed as third argument to @fn.
486  *
487  * Note: if @q->tag_set is shared with other request queues then @fn will be
488  * called for all requests on all queues that share that tag set and not only
489  * for requests associated with @q.
490  */
491 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
492 		void *priv)
493 {
494 	/*
495 	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
496 	 * while the queue is frozen. So we can use q_usage_counter to avoid
497 	 * racing with it.
498 	 */
499 	if (!percpu_ref_tryget(&q->q_usage_counter))
500 		return;
501 
502 	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
503 		struct blk_mq_tags *tags = q->tag_set->shared_tags;
504 		struct sbitmap_queue *bresv = &tags->breserved_tags;
505 		struct sbitmap_queue *btags = &tags->bitmap_tags;
506 
507 		if (tags->nr_reserved_tags)
508 			bt_for_each(NULL, q, bresv, fn, priv, true);
509 		bt_for_each(NULL, q, btags, fn, priv, false);
510 	} else {
511 		struct blk_mq_hw_ctx *hctx;
512 		unsigned long i;
513 
514 		queue_for_each_hw_ctx(q, hctx, i) {
515 			struct blk_mq_tags *tags = hctx->tags;
516 			struct sbitmap_queue *bresv = &tags->breserved_tags;
517 			struct sbitmap_queue *btags = &tags->bitmap_tags;
518 
519 			/*
520 			 * If no software queues are currently mapped to this
521 			 * hardware queue, there's nothing to check
522 			 */
523 			if (!blk_mq_hw_queue_mapped(hctx))
524 				continue;
525 
526 			if (tags->nr_reserved_tags)
527 				bt_for_each(hctx, q, bresv, fn, priv, true);
528 			bt_for_each(hctx, q, btags, fn, priv, false);
529 		}
530 	}
531 	blk_queue_exit(q);
532 }
533 
534 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
535 		    bool round_robin, int node)
536 {
537 	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
538 				       node);
539 }
540 
541 int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
542 			struct sbitmap_queue *breserved_tags,
543 			unsigned int queue_depth, unsigned int reserved,
544 			int node, int alloc_policy)
545 {
546 	unsigned int depth = queue_depth - reserved;
547 	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
548 
549 	if (bt_alloc(bitmap_tags, depth, round_robin, node))
550 		return -ENOMEM;
551 	if (bt_alloc(breserved_tags, reserved, round_robin, node))
552 		goto free_bitmap_tags;
553 
554 	return 0;
555 
556 free_bitmap_tags:
557 	sbitmap_queue_free(bitmap_tags);
558 	return -ENOMEM;
559 }
560 
561 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
562 				     unsigned int reserved_tags,
563 				     int node, int alloc_policy)
564 {
565 	struct blk_mq_tags *tags;
566 
567 	if (total_tags > BLK_MQ_TAG_MAX) {
568 		pr_err("blk-mq: tag depth too large\n");
569 		return NULL;
570 	}
571 
572 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
573 	if (!tags)
574 		return NULL;
575 
576 	tags->nr_tags = total_tags;
577 	tags->nr_reserved_tags = reserved_tags;
578 	spin_lock_init(&tags->lock);
579 
580 	if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
581 				total_tags, reserved_tags, node,
582 				alloc_policy) < 0) {
583 		kfree(tags);
584 		return NULL;
585 	}
586 	return tags;
587 }
588 
589 void blk_mq_free_tags(struct blk_mq_tags *tags)
590 {
591 	sbitmap_queue_free(&tags->bitmap_tags);
592 	sbitmap_queue_free(&tags->breserved_tags);
593 	kfree(tags);
594 }
595 
596 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
597 			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
598 			    bool can_grow)
599 {
600 	struct blk_mq_tags *tags = *tagsptr;
601 
602 	if (tdepth <= tags->nr_reserved_tags)
603 		return -EINVAL;
604 
605 	/*
606 	 * If we are allowed to grow beyond the original size, allocate
607 	 * a new set of tags before freeing the old one.
608 	 */
609 	if (tdepth > tags->nr_tags) {
610 		struct blk_mq_tag_set *set = hctx->queue->tag_set;
611 		struct blk_mq_tags *new;
612 
613 		if (!can_grow)
614 			return -EINVAL;
615 
616 		/*
617 		 * We need some sort of upper limit, set it high enough that
618 		 * no valid use cases should require more.
619 		 */
620 		if (tdepth > MAX_SCHED_RQ)
621 			return -EINVAL;
622 
623 		/*
624 		 * Only the sbitmap needs resizing since we allocated the max
625 		 * initially.
626 		 */
627 		if (blk_mq_is_shared_tags(set->flags))
628 			return 0;
629 
630 		new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
631 		if (!new)
632 			return -ENOMEM;
633 
634 		blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
635 		*tagsptr = new;
636 	} else {
637 		/*
638 		 * Don't need (or can't) update reserved tags here, they
639 		 * remain static and should never need resizing.
640 		 */
641 		sbitmap_queue_resize(&tags->bitmap_tags,
642 				tdepth - tags->nr_reserved_tags);
643 	}
644 
645 	return 0;
646 }
647 
648 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
649 {
650 	struct blk_mq_tags *tags = set->shared_tags;
651 
652 	sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
653 }
654 
655 void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
656 {
657 	sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
658 			     q->nr_requests - q->tag_set->reserved_tags);
659 }
660 
661 /**
662  * blk_mq_unique_tag() - return a tag that is unique queue-wide
663  * @rq: request for which to compute a unique tag
664  *
665  * The tag field in struct request is unique per hardware queue but not over
666  * all hardware queues. Hence this function that returns a tag with the
667  * hardware context index in the upper bits and the per hardware queue tag in
668  * the lower bits.
669  *
670  * Note: When called for a request that is queued on a non-multiqueue request
671  * queue, the hardware context index is set to zero.
672  */
673 u32 blk_mq_unique_tag(struct request *rq)
674 {
675 	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
676 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
677 }
678 EXPORT_SYMBOL(blk_mq_unique_tag);
679