xref: /openbmc/linux/block/blk-mq-sched.h (revision dd6216bb)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2bd166ef1SJens Axboe #ifndef BLK_MQ_SCHED_H
3bd166ef1SJens Axboe #define BLK_MQ_SCHED_H
4bd166ef1SJens Axboe 
52e9bc346SChristoph Hellwig #include "elevator.h"
6bd166ef1SJens Axboe #include "blk-mq.h"
7bd166ef1SJens Axboe 
8d2a27964SJohn Garry #define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
9d97e594cSJohn Garry 
10e4d750c9SJens Axboe bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1114ccb66bSChristoph Hellwig 		unsigned int nr_segs, struct request **merged_request);
12179ae84fSPavel Begunkov bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
1314ccb66bSChristoph Hellwig 		unsigned int nr_segs);
14fd2ef39cSJan Kara bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
15fd2ef39cSJan Kara 				   struct list_head *free);
167211aef8SDamien Le Moal void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
17e9ea1596SPavel Begunkov void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
18bd166ef1SJens Axboe 
19bd166ef1SJens Axboe void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
20bd166ef1SJens Axboe 
216917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
2254d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
231820f4f0SJohn Garry void blk_mq_sched_free_rqs(struct request_queue *q);
24bd166ef1SJens Axboe 
blk_mq_sched_restart(struct blk_mq_hw_ctx * hctx)25e9ea1596SPavel Begunkov static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
26e9ea1596SPavel Begunkov {
27e9ea1596SPavel Begunkov 	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
28e9ea1596SPavel Begunkov 		__blk_mq_sched_restart(hctx);
29e9ea1596SPavel Begunkov }
30e9ea1596SPavel Begunkov 
bio_mergeable(struct bio * bio)318addffd6SChristoph Hellwig static inline bool bio_mergeable(struct bio *bio)
328addffd6SChristoph Hellwig {
338addffd6SChristoph Hellwig 	return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
348addffd6SChristoph Hellwig }
358addffd6SChristoph Hellwig 
36bd166ef1SJens Axboe static inline bool
blk_mq_sched_allow_merge(struct request_queue * q,struct request * rq,struct bio * bio)37bd166ef1SJens Axboe blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
38bd166ef1SJens Axboe 			 struct bio *bio)
39bd166ef1SJens Axboe {
40*dd6216bbSChristoph Hellwig 	if (rq->rq_flags & RQF_USE_SCHED) {
41bd166ef1SJens Axboe 		struct elevator_queue *e = q->elevator;
42bd166ef1SJens Axboe 
432ff0682dSJens Axboe 		if (e->type->ops.allow_merge)
44f9cd4bfeSJens Axboe 			return e->type->ops.allow_merge(q, rq, bio);
452ff0682dSJens Axboe 	}
46bd166ef1SJens Axboe 	return true;
47bd166ef1SJens Axboe }
48bd166ef1SJens Axboe 
blk_mq_sched_completed_request(struct request * rq,u64 now)49ed88660aSOmar Sandoval static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
50bd166ef1SJens Axboe {
51*dd6216bbSChristoph Hellwig 	if (rq->rq_flags & RQF_USE_SCHED) {
52c05f8525SOmar Sandoval 		struct elevator_queue *e = rq->q->elevator;
53bd166ef1SJens Axboe 
542ff0682dSJens Axboe 		if (e->type->ops.completed_request)
55f9cd4bfeSJens Axboe 			e->type->ops.completed_request(rq, now);
56bd166ef1SJens Axboe 	}
572ff0682dSJens Axboe }
58bd166ef1SJens Axboe 
blk_mq_sched_requeue_request(struct request * rq)59bd166ef1SJens Axboe static inline void blk_mq_sched_requeue_request(struct request *rq)
60bd166ef1SJens Axboe {
61*dd6216bbSChristoph Hellwig 	if (rq->rq_flags & RQF_USE_SCHED) {
62bd166ef1SJens Axboe 		struct request_queue *q = rq->q;
63bd166ef1SJens Axboe 		struct elevator_queue *e = q->elevator;
64bd166ef1SJens Axboe 
65fdcab6cdSChristoph Hellwig 		if (e->type->ops.requeue_request)
66f9cd4bfeSJens Axboe 			e->type->ops.requeue_request(rq);
67bd166ef1SJens Axboe 	}
682ff0682dSJens Axboe }
69bd166ef1SJens Axboe 
blk_mq_sched_has_work(struct blk_mq_hw_ctx * hctx)70bd166ef1SJens Axboe static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
71bd166ef1SJens Axboe {
72bd166ef1SJens Axboe 	struct elevator_queue *e = hctx->queue->elevator;
73bd166ef1SJens Axboe 
74f9cd4bfeSJens Axboe 	if (e && e->type->ops.has_work)
75f9cd4bfeSJens Axboe 		return e->type->ops.has_work(hctx);
76bd166ef1SJens Axboe 
77bd166ef1SJens Axboe 	return false;
78bd166ef1SJens Axboe }
79bd166ef1SJens Axboe 
blk_mq_sched_needs_restart(struct blk_mq_hw_ctx * hctx)80bd166ef1SJens Axboe static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
81bd166ef1SJens Axboe {
82bd166ef1SJens Axboe 	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
83bd166ef1SJens Axboe }
84bd166ef1SJens Axboe 
85bd166ef1SJens Axboe #endif
86