Lines Matching +full:input +full:- +full:depth
1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-mq.h>
6 #include "blk-stat.h"
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
34 BLK_MQ_NO_TAG = -1U,
36 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
62 unsigned int hctx_idx, unsigned int depth);
68 * CPU -> queue mappings
73 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
82 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); in blk_mq_map_queue_type()
100 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
109 return ctx->hctxs[blk_mq_get_hctx_type(opf)]; in blk_mq_map_queue()
132 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
136 * This assumes per-cpu software queueing queues. They could be per-node
137 * as well, for instance. For now this is hardcoded as-is. Note that we don't
147 /* input parameter */
158 /* input & output parameter */
177 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
192 return &bt->ws[0]; in bt_wait_ptr()
193 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
201 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_busy()
207 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_idle()
214 return tag < tags->nr_reserved_tags; in blk_mq_tag_is_reserved()
224 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
225 return data->hctx->sched_tags; in blk_mq_tags_from_data()
226 return data->hctx->tags; in blk_mq_tags_from_data()
232 if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state))) in blk_mq_hctx_stopped()
244 return test_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_hctx_stopped()
249 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
260 if (q->mq_ops->put_budget) in blk_mq_put_dispatch_budget()
261 q->mq_ops->put_budget(q, budget_token); in blk_mq_put_dispatch_budget()
266 if (q->mq_ops->get_budget) in blk_mq_get_dispatch_budget()
267 return q->mq_ops->get_budget(q); in blk_mq_get_dispatch_budget()
276 if (rq->q->mq_ops->set_rq_budget_token) in blk_mq_set_rq_budget_token()
277 rq->q->mq_ops->set_rq_budget_token(rq, token); in blk_mq_set_rq_budget_token()
282 if (rq->q->mq_ops->get_rq_budget_token) in blk_mq_get_rq_budget_token()
283 return rq->q->mq_ops->get_rq_budget_token(rq); in blk_mq_get_rq_budget_token()
284 return -1; in blk_mq_get_rq_budget_token()
289 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_inc_active_requests()
290 atomic_inc(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_inc_active_requests()
292 atomic_inc(&hctx->nr_active); in __blk_mq_inc_active_requests()
298 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_sub_active_requests()
299 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_sub_active_requests()
301 atomic_sub(val, &hctx->nr_active); in __blk_mq_sub_active_requests()
311 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_active_requests()
312 return atomic_read(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_active_requests()
313 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests()
318 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); in __blk_mq_put_driver_tag()
319 rq->tag = BLK_MQ_NO_TAG; in __blk_mq_put_driver_tag()
321 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag()
322 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
329 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) in blk_mq_put_driver_tag()
332 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()
339 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_get_driver_tag()
341 if (rq->tag != BLK_MQ_NO_TAG && in blk_mq_get_driver_tag()
342 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { in blk_mq_get_driver_tag()
343 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
355 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
359 * blk_mq_plug() - Get caller context plug
373 * Return current->plug if the bio can be plugged and NULL otherwise
379 bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio))) in blk_mq_plug()
386 return current->plug; in blk_mq_plug()
393 struct request *rq = list_entry_rq(list->next); in blk_mq_free_requests()
395 list_del_init(&rq->queuelist); in blk_mq_free_requests()
402 * and attempt to provide a fair share of the tag depth for each of them.
407 unsigned int depth, users; in hctx_may_queue() local
409 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in hctx_may_queue()
415 if (bt->sb.depth == 1) in hctx_may_queue()
418 if (blk_mq_is_shared_tags(hctx->flags)) { in hctx_may_queue()
419 struct request_queue *q = hctx->queue; in hctx_may_queue()
421 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) in hctx_may_queue()
424 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
428 users = READ_ONCE(hctx->tags->active_queues); in hctx_may_queue()
435 depth = max((bt->sb.depth + users - 1) / users, 4U); in hctx_may_queue()
436 return __blk_mq_active_requests(hctx) < depth; in hctx_may_queue()
442 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
443 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
447 srcu_idx = srcu_read_lock(__tag_set->srcu); \
449 srcu_read_unlock(__tag_set->srcu, srcu_idx); \