xref: /openbmc/linux/block/blk-mq-tag.c (revision 6e10e219)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4  * fairer distribution of tags between multiple submitters when a shared tag map
5  * is used.
6  *
7  * Copyright (C) 2013-2014 Jens Axboe
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 
12 #include <linux/blk-mq.h>
13 #include <linux/delay.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-sched.h"
17 #include "blk-mq-tag.h"
18 
19 /*
20  * If a previously inactive queue goes active, bump the active user count.
21  * We need to do this before try to allocate driver tag, then even if fail
22  * to get tag when first time, the other shared-tag users could reserve
23  * budget for it.
24  */
25 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
26 {
27 	if (blk_mq_is_shared_tags(hctx->flags)) {
28 		struct request_queue *q = hctx->queue;
29 
30 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
31 		    !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
32 			atomic_inc(&hctx->tags->active_queues);
33 	} else {
34 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
35 		    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
36 			atomic_inc(&hctx->tags->active_queues);
37 	}
38 
39 	return true;
40 }
41 
42 /*
43  * Wakeup all potentially sleeping on tags
44  */
45 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
46 {
47 	sbitmap_queue_wake_all(&tags->bitmap_tags);
48 	if (include_reserve)
49 		sbitmap_queue_wake_all(&tags->breserved_tags);
50 }
51 
52 /*
53  * If a previously busy queue goes inactive, potential waiters could now
54  * be allowed to queue. Wake them up and check.
55  */
56 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
57 {
58 	struct blk_mq_tags *tags = hctx->tags;
59 
60 	if (blk_mq_is_shared_tags(hctx->flags)) {
61 		struct request_queue *q = hctx->queue;
62 
63 		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
64 					&q->queue_flags))
65 			return;
66 	} else {
67 		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
68 			return;
69 	}
70 
71 	atomic_dec(&tags->active_queues);
72 
73 	blk_mq_tag_wakeup_all(tags, false);
74 }
75 
76 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
77 			    struct sbitmap_queue *bt)
78 {
79 	if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
80 			!hctx_may_queue(data->hctx, bt))
81 		return BLK_MQ_NO_TAG;
82 
83 	if (data->shallow_depth)
84 		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
85 	else
86 		return __sbitmap_queue_get(bt);
87 }
88 
89 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
90 			      unsigned int *offset)
91 {
92 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
93 	struct sbitmap_queue *bt = &tags->bitmap_tags;
94 	unsigned long ret;
95 
96 	if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
97 	    data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
98 		return 0;
99 	ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
100 	*offset += tags->nr_reserved_tags;
101 	return ret;
102 }
103 
104 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
105 {
106 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
107 	struct sbitmap_queue *bt;
108 	struct sbq_wait_state *ws;
109 	DEFINE_SBQ_WAIT(wait);
110 	unsigned int tag_offset;
111 	int tag;
112 
113 	if (data->flags & BLK_MQ_REQ_RESERVED) {
114 		if (unlikely(!tags->nr_reserved_tags)) {
115 			WARN_ON_ONCE(1);
116 			return BLK_MQ_NO_TAG;
117 		}
118 		bt = &tags->breserved_tags;
119 		tag_offset = 0;
120 	} else {
121 		bt = &tags->bitmap_tags;
122 		tag_offset = tags->nr_reserved_tags;
123 	}
124 
125 	tag = __blk_mq_get_tag(data, bt);
126 	if (tag != BLK_MQ_NO_TAG)
127 		goto found_tag;
128 
129 	if (data->flags & BLK_MQ_REQ_NOWAIT)
130 		return BLK_MQ_NO_TAG;
131 
132 	ws = bt_wait_ptr(bt, data->hctx);
133 	do {
134 		struct sbitmap_queue *bt_prev;
135 
136 		/*
137 		 * We're out of tags on this hardware queue, kick any
138 		 * pending IO submits before going to sleep waiting for
139 		 * some to complete.
140 		 */
141 		blk_mq_run_hw_queue(data->hctx, false);
142 
143 		/*
144 		 * Retry tag allocation after running the hardware queue,
145 		 * as running the queue may also have found completions.
146 		 */
147 		tag = __blk_mq_get_tag(data, bt);
148 		if (tag != BLK_MQ_NO_TAG)
149 			break;
150 
151 		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
152 
153 		tag = __blk_mq_get_tag(data, bt);
154 		if (tag != BLK_MQ_NO_TAG)
155 			break;
156 
157 		bt_prev = bt;
158 		io_schedule();
159 
160 		sbitmap_finish_wait(bt, ws, &wait);
161 
162 		data->ctx = blk_mq_get_ctx(data->q);
163 		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
164 						data->ctx);
165 		tags = blk_mq_tags_from_data(data);
166 		if (data->flags & BLK_MQ_REQ_RESERVED)
167 			bt = &tags->breserved_tags;
168 		else
169 			bt = &tags->bitmap_tags;
170 
171 		/*
172 		 * If destination hw queue is changed, fake wake up on
173 		 * previous queue for compensating the wake up miss, so
174 		 * other allocations on previous queue won't be starved.
175 		 */
176 		if (bt != bt_prev)
177 			sbitmap_queue_wake_up(bt_prev);
178 
179 		ws = bt_wait_ptr(bt, data->hctx);
180 	} while (1);
181 
182 	sbitmap_finish_wait(bt, ws, &wait);
183 
184 found_tag:
185 	/*
186 	 * Give up this allocation if the hctx is inactive.  The caller will
187 	 * retry on an active hctx.
188 	 */
189 	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
190 		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
191 		return BLK_MQ_NO_TAG;
192 	}
193 	return tag + tag_offset;
194 }
195 
196 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
197 		    unsigned int tag)
198 {
199 	if (!blk_mq_tag_is_reserved(tags, tag)) {
200 		const int real_tag = tag - tags->nr_reserved_tags;
201 
202 		BUG_ON(real_tag >= tags->nr_tags);
203 		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
204 	} else {
205 		BUG_ON(tag >= tags->nr_reserved_tags);
206 		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
207 	}
208 }
209 
210 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
211 {
212 	sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
213 					tag_array, nr_tags);
214 }
215 
216 struct bt_iter_data {
217 	struct blk_mq_hw_ctx *hctx;
218 	struct request_queue *q;
219 	busy_tag_iter_fn *fn;
220 	void *data;
221 	bool reserved;
222 };
223 
224 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
225 		unsigned int bitnr)
226 {
227 	struct request *rq;
228 	unsigned long flags;
229 
230 	spin_lock_irqsave(&tags->lock, flags);
231 	rq = tags->rqs[bitnr];
232 	if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
233 		rq = NULL;
234 	spin_unlock_irqrestore(&tags->lock, flags);
235 	return rq;
236 }
237 
238 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
239 {
240 	struct bt_iter_data *iter_data = data;
241 	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
242 	struct request_queue *q = iter_data->q;
243 	struct blk_mq_tag_set *set = q->tag_set;
244 	bool reserved = iter_data->reserved;
245 	struct blk_mq_tags *tags;
246 	struct request *rq;
247 	bool ret = true;
248 
249 	if (blk_mq_is_shared_tags(set->flags))
250 		tags = set->shared_tags;
251 	else
252 		tags = hctx->tags;
253 
254 	if (!reserved)
255 		bitnr += tags->nr_reserved_tags;
256 	/*
257 	 * We can hit rq == NULL here, because the tagging functions
258 	 * test and set the bit before assigning ->rqs[].
259 	 */
260 	rq = blk_mq_find_and_get_req(tags, bitnr);
261 	if (!rq)
262 		return true;
263 
264 	if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
265 		ret = iter_data->fn(rq, iter_data->data, reserved);
266 	blk_mq_put_rq_ref(rq);
267 	return ret;
268 }
269 
270 /**
271  * bt_for_each - iterate over the requests associated with a hardware queue
272  * @hctx:	Hardware queue to examine.
273  * @q:		Request queue to examine.
274  * @bt:		sbitmap to examine. This is either the breserved_tags member
275  *		or the bitmap_tags member of struct blk_mq_tags.
276  * @fn:		Pointer to the function that will be called for each request
277  *		associated with @hctx that has been assigned a driver tag.
278  *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
279  *		where rq is a pointer to a request. Return true to continue
280  *		iterating tags, false to stop.
281  * @data:	Will be passed as third argument to @fn.
282  * @reserved:	Indicates whether @bt is the breserved_tags member or the
283  *		bitmap_tags member of struct blk_mq_tags.
284  */
285 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
286 			struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
287 			void *data, bool reserved)
288 {
289 	struct bt_iter_data iter_data = {
290 		.hctx = hctx,
291 		.fn = fn,
292 		.data = data,
293 		.reserved = reserved,
294 		.q = q,
295 	};
296 
297 	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
298 }
299 
300 struct bt_tags_iter_data {
301 	struct blk_mq_tags *tags;
302 	busy_tag_iter_fn *fn;
303 	void *data;
304 	unsigned int flags;
305 };
306 
307 #define BT_TAG_ITER_RESERVED		(1 << 0)
308 #define BT_TAG_ITER_STARTED		(1 << 1)
309 #define BT_TAG_ITER_STATIC_RQS		(1 << 2)
310 
311 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
312 {
313 	struct bt_tags_iter_data *iter_data = data;
314 	struct blk_mq_tags *tags = iter_data->tags;
315 	bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
316 	struct request *rq;
317 	bool ret = true;
318 	bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
319 
320 	if (!reserved)
321 		bitnr += tags->nr_reserved_tags;
322 
323 	/*
324 	 * We can hit rq == NULL here, because the tagging functions
325 	 * test and set the bit before assigning ->rqs[].
326 	 */
327 	if (iter_static_rqs)
328 		rq = tags->static_rqs[bitnr];
329 	else
330 		rq = blk_mq_find_and_get_req(tags, bitnr);
331 	if (!rq)
332 		return true;
333 
334 	if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
335 	    blk_mq_request_started(rq))
336 		ret = iter_data->fn(rq, iter_data->data, reserved);
337 	if (!iter_static_rqs)
338 		blk_mq_put_rq_ref(rq);
339 	return ret;
340 }
341 
342 /**
343  * bt_tags_for_each - iterate over the requests in a tag map
344  * @tags:	Tag map to iterate over.
345  * @bt:		sbitmap to examine. This is either the breserved_tags member
346  *		or the bitmap_tags member of struct blk_mq_tags.
347  * @fn:		Pointer to the function that will be called for each started
348  *		request. @fn will be called as follows: @fn(rq, @data,
349  *		@reserved) where rq is a pointer to a request. Return true
350  *		to continue iterating tags, false to stop.
351  * @data:	Will be passed as second argument to @fn.
352  * @flags:	BT_TAG_ITER_*
353  */
354 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
355 			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
356 {
357 	struct bt_tags_iter_data iter_data = {
358 		.tags = tags,
359 		.fn = fn,
360 		.data = data,
361 		.flags = flags,
362 	};
363 
364 	if (tags->rqs)
365 		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
366 }
367 
368 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
369 		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
370 {
371 	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
372 
373 	if (tags->nr_reserved_tags)
374 		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
375 				 flags | BT_TAG_ITER_RESERVED);
376 	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
377 }
378 
379 /**
380  * blk_mq_all_tag_iter - iterate over all requests in a tag map
381  * @tags:	Tag map to iterate over.
382  * @fn:		Pointer to the function that will be called for each
383  *		request. @fn will be called as follows: @fn(rq, @priv,
384  *		reserved) where rq is a pointer to a request. 'reserved'
385  *		indicates whether or not @rq is a reserved request. Return
386  *		true to continue iterating tags, false to stop.
387  * @priv:	Will be passed as second argument to @fn.
388  *
389  * Caller has to pass the tag map from which requests are allocated.
390  */
391 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
392 		void *priv)
393 {
394 	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
395 }
396 
397 /**
398  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
399  * @tagset:	Tag set to iterate over.
400  * @fn:		Pointer to the function that will be called for each started
401  *		request. @fn will be called as follows: @fn(rq, @priv,
402  *		reserved) where rq is a pointer to a request. 'reserved'
403  *		indicates whether or not @rq is a reserved request. Return
404  *		true to continue iterating tags, false to stop.
405  * @priv:	Will be passed as second argument to @fn.
406  *
407  * We grab one request reference before calling @fn and release it after
408  * @fn returns.
409  */
410 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
411 		busy_tag_iter_fn *fn, void *priv)
412 {
413 	unsigned int flags = tagset->flags;
414 	int i, nr_tags;
415 
416 	nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
417 
418 	for (i = 0; i < nr_tags; i++) {
419 		if (tagset->tags && tagset->tags[i])
420 			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
421 					      BT_TAG_ITER_STARTED);
422 	}
423 }
424 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
425 
426 static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
427 		void *data, bool reserved)
428 {
429 	unsigned *count = data;
430 
431 	if (blk_mq_request_completed(rq))
432 		(*count)++;
433 	return true;
434 }
435 
436 /**
437  * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
438  * completions have finished.
439  * @tagset:	Tag set to drain completed request
440  *
441  * Note: This function has to be run after all IO queues are shutdown
442  */
443 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
444 {
445 	while (true) {
446 		unsigned count = 0;
447 
448 		blk_mq_tagset_busy_iter(tagset,
449 				blk_mq_tagset_count_completed_rqs, &count);
450 		if (!count)
451 			break;
452 		msleep(5);
453 	}
454 }
455 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
456 
457 /**
458  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
459  * @q:		Request queue to examine.
460  * @fn:		Pointer to the function that will be called for each request
461  *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
462  *		reserved) where rq is a pointer to a request and hctx points
463  *		to the hardware queue associated with the request. 'reserved'
464  *		indicates whether or not @rq is a reserved request.
465  * @priv:	Will be passed as third argument to @fn.
466  *
467  * Note: if @q->tag_set is shared with other request queues then @fn will be
468  * called for all requests on all queues that share that tag set and not only
469  * for requests associated with @q.
470  */
471 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
472 		void *priv)
473 {
474 	/*
475 	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
476 	 * while the queue is frozen. So we can use q_usage_counter to avoid
477 	 * racing with it.
478 	 */
479 	if (!percpu_ref_tryget(&q->q_usage_counter))
480 		return;
481 
482 	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
483 		struct blk_mq_tags *tags = q->tag_set->shared_tags;
484 		struct sbitmap_queue *bresv = &tags->breserved_tags;
485 		struct sbitmap_queue *btags = &tags->bitmap_tags;
486 
487 		if (tags->nr_reserved_tags)
488 			bt_for_each(NULL, q, bresv, fn, priv, true);
489 		bt_for_each(NULL, q, btags, fn, priv, false);
490 	} else {
491 		struct blk_mq_hw_ctx *hctx;
492 		int i;
493 
494 		queue_for_each_hw_ctx(q, hctx, i) {
495 			struct blk_mq_tags *tags = hctx->tags;
496 			struct sbitmap_queue *bresv = &tags->breserved_tags;
497 			struct sbitmap_queue *btags = &tags->bitmap_tags;
498 
499 			/*
500 			 * If no software queues are currently mapped to this
501 			 * hardware queue, there's nothing to check
502 			 */
503 			if (!blk_mq_hw_queue_mapped(hctx))
504 				continue;
505 
506 			if (tags->nr_reserved_tags)
507 				bt_for_each(hctx, q, bresv, fn, priv, true);
508 			bt_for_each(hctx, q, btags, fn, priv, false);
509 		}
510 	}
511 	blk_queue_exit(q);
512 }
513 
514 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
515 		    bool round_robin, int node)
516 {
517 	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
518 				       node);
519 }
520 
521 int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
522 			struct sbitmap_queue *breserved_tags,
523 			unsigned int queue_depth, unsigned int reserved,
524 			int node, int alloc_policy)
525 {
526 	unsigned int depth = queue_depth - reserved;
527 	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
528 
529 	if (bt_alloc(bitmap_tags, depth, round_robin, node))
530 		return -ENOMEM;
531 	if (bt_alloc(breserved_tags, reserved, round_robin, node))
532 		goto free_bitmap_tags;
533 
534 	return 0;
535 
536 free_bitmap_tags:
537 	sbitmap_queue_free(bitmap_tags);
538 	return -ENOMEM;
539 }
540 
541 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
542 				     unsigned int reserved_tags,
543 				     int node, int alloc_policy)
544 {
545 	struct blk_mq_tags *tags;
546 
547 	if (total_tags > BLK_MQ_TAG_MAX) {
548 		pr_err("blk-mq: tag depth too large\n");
549 		return NULL;
550 	}
551 
552 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
553 	if (!tags)
554 		return NULL;
555 
556 	tags->nr_tags = total_tags;
557 	tags->nr_reserved_tags = reserved_tags;
558 	spin_lock_init(&tags->lock);
559 
560 	if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
561 				total_tags, reserved_tags, node,
562 				alloc_policy) < 0) {
563 		kfree(tags);
564 		return NULL;
565 	}
566 	return tags;
567 }
568 
569 void blk_mq_free_tags(struct blk_mq_tags *tags)
570 {
571 	sbitmap_queue_free(&tags->bitmap_tags);
572 	sbitmap_queue_free(&tags->breserved_tags);
573 	kfree(tags);
574 }
575 
576 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
577 			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
578 			    bool can_grow)
579 {
580 	struct blk_mq_tags *tags = *tagsptr;
581 
582 	if (tdepth <= tags->nr_reserved_tags)
583 		return -EINVAL;
584 
585 	/*
586 	 * If we are allowed to grow beyond the original size, allocate
587 	 * a new set of tags before freeing the old one.
588 	 */
589 	if (tdepth > tags->nr_tags) {
590 		struct blk_mq_tag_set *set = hctx->queue->tag_set;
591 		struct blk_mq_tags *new;
592 
593 		if (!can_grow)
594 			return -EINVAL;
595 
596 		/*
597 		 * We need some sort of upper limit, set it high enough that
598 		 * no valid use cases should require more.
599 		 */
600 		if (tdepth > MAX_SCHED_RQ)
601 			return -EINVAL;
602 
603 		/*
604 		 * Only the sbitmap needs resizing since we allocated the max
605 		 * initially.
606 		 */
607 		if (blk_mq_is_shared_tags(set->flags))
608 			return 0;
609 
610 		new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
611 		if (!new)
612 			return -ENOMEM;
613 
614 		blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
615 		*tagsptr = new;
616 	} else {
617 		/*
618 		 * Don't need (or can't) update reserved tags here, they
619 		 * remain static and should never need resizing.
620 		 */
621 		sbitmap_queue_resize(&tags->bitmap_tags,
622 				tdepth - tags->nr_reserved_tags);
623 	}
624 
625 	return 0;
626 }
627 
628 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
629 {
630 	struct blk_mq_tags *tags = set->shared_tags;
631 
632 	sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
633 }
634 
635 void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
636 {
637 	sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
638 			     q->nr_requests - q->tag_set->reserved_tags);
639 }
640 
641 /**
642  * blk_mq_unique_tag() - return a tag that is unique queue-wide
643  * @rq: request for which to compute a unique tag
644  *
645  * The tag field in struct request is unique per hardware queue but not over
646  * all hardware queues. Hence this function that returns a tag with the
647  * hardware context index in the upper bits and the per hardware queue tag in
648  * the lower bits.
649  *
650  * Note: When called for a request that is queued on a non-multiqueue request
651  * queue, the hardware context index is set to zero.
652  */
653 u32 blk_mq_unique_tag(struct request *rq)
654 {
655 	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
656 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
657 }
658 EXPORT_SYMBOL(blk_mq_unique_tag);
659