Lines Matching refs:q

89 	struct request_queue *q = hctx->queue;  in __blk_mq_do_dispatch_sched()  local
90 struct elevator_queue *e = q->elevator; in __blk_mq_do_dispatch_sched()
114 budget_token = blk_mq_get_dispatch_budget(q); in __blk_mq_do_dispatch_sched()
120 blk_mq_put_dispatch_budget(q, budget_token); in __blk_mq_do_dispatch_sched()
156 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); in __blk_mq_do_dispatch_sched()
217 struct request_queue *q = hctx->queue; in blk_mq_do_dispatch_ctx() local
234 budget_token = blk_mq_get_dispatch_budget(q); in blk_mq_do_dispatch_ctx()
240 blk_mq_put_dispatch_budget(q, budget_token); in blk_mq_do_dispatch_ctx()
248 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); in blk_mq_do_dispatch_ctx()
321 struct request_queue *q = hctx->queue; in blk_mq_sched_dispatch_requests() local
324 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) in blk_mq_sched_dispatch_requests()
339 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_bio_merge() argument
342 struct elevator_queue *e = q->elevator; in blk_mq_sched_bio_merge()
349 ret = e->type->ops.bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge()
353 ctx = blk_mq_get_ctx(q); in blk_mq_sched_bio_merge()
354 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in blk_mq_sched_bio_merge()
367 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) in blk_mq_sched_bio_merge()
375 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, in blk_mq_sched_try_insert_merge() argument
378 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); in blk_mq_sched_try_insert_merge()
382 static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, in blk_mq_sched_alloc_map_and_rqs() argument
386 if (blk_mq_is_shared_tags(q->tag_set->flags)) { in blk_mq_sched_alloc_map_and_rqs()
387 hctx->sched_tags = q->sched_shared_tags; in blk_mq_sched_alloc_map_and_rqs()
391 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, in blk_mq_sched_alloc_map_and_rqs()
392 q->nr_requests); in blk_mq_sched_alloc_map_and_rqs()
406 static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) in blk_mq_sched_tags_teardown() argument
411 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_tags_teardown()
420 blk_mq_exit_sched_shared_tags(q); in blk_mq_sched_tags_teardown()
443 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) in blk_mq_init_sched() argument
445 unsigned int flags = q->tag_set->flags; in blk_mq_init_sched()
456 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, in blk_mq_init_sched()
460 ret = blk_mq_init_sched_shared_tags(q); in blk_mq_init_sched()
465 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched()
466 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); in blk_mq_init_sched()
471 ret = e->ops.init_sched(q, e); in blk_mq_init_sched()
475 mutex_lock(&q->debugfs_mutex); in blk_mq_init_sched()
476 blk_mq_debugfs_register_sched(q); in blk_mq_init_sched()
477 mutex_unlock(&q->debugfs_mutex); in blk_mq_init_sched()
479 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched()
483 eq = q->elevator; in blk_mq_init_sched()
484 blk_mq_sched_free_rqs(q); in blk_mq_init_sched()
485 blk_mq_exit_sched(q, eq); in blk_mq_init_sched()
490 mutex_lock(&q->debugfs_mutex); in blk_mq_init_sched()
491 blk_mq_debugfs_register_sched_hctx(q, hctx); in blk_mq_init_sched()
492 mutex_unlock(&q->debugfs_mutex); in blk_mq_init_sched()
498 blk_mq_sched_free_rqs(q); in blk_mq_init_sched()
499 blk_mq_sched_tags_teardown(q, flags); in blk_mq_init_sched()
501 q->elevator = NULL; in blk_mq_init_sched()
509 void blk_mq_sched_free_rqs(struct request_queue *q) in blk_mq_sched_free_rqs() argument
514 if (blk_mq_is_shared_tags(q->tag_set->flags)) { in blk_mq_sched_free_rqs()
515 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, in blk_mq_sched_free_rqs()
518 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_free_rqs()
520 blk_mq_free_rqs(q->tag_set, in blk_mq_sched_free_rqs()
526 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) in blk_mq_exit_sched() argument
532 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_sched()
533 mutex_lock(&q->debugfs_mutex); in blk_mq_exit_sched()
535 mutex_unlock(&q->debugfs_mutex); in blk_mq_exit_sched()
544 mutex_lock(&q->debugfs_mutex); in blk_mq_exit_sched()
545 blk_mq_debugfs_unregister_sched(q); in blk_mq_exit_sched()
546 mutex_unlock(&q->debugfs_mutex); in blk_mq_exit_sched()
550 blk_mq_sched_tags_teardown(q, flags); in blk_mq_exit_sched()
551 q->elevator = NULL; in blk_mq_exit_sched()