xref: /openbmc/linux/block/blk-mq-sched.h (revision 7aacf86b)
1 #ifndef BLK_MQ_SCHED_H
2 #define BLK_MQ_SCHED_H
3 
4 #include "blk-mq.h"
5 #include "blk-mq-tag.h"
6 
7 void blk_mq_sched_free_hctx_data(struct request_queue *q,
8 				 void (*exit)(struct blk_mq_hw_ctx *));
9 
10 void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
11 
12 void blk_mq_sched_request_inserted(struct request *rq);
13 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
14 				struct request **merged_request);
15 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
16 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
17 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
18 
19 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
20 				 bool run_queue, bool async, bool can_block);
21 void blk_mq_sched_insert_requests(struct request_queue *q,
22 				  struct blk_mq_ctx *ctx,
23 				  struct list_head *list, bool run_queue_async);
24 
25 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
26 
27 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
28 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
29 
30 int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
31 			   unsigned int hctx_idx);
32 void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
33 			    unsigned int hctx_idx);
34 
35 int blk_mq_sched_init(struct request_queue *q);
36 
37 static inline bool
38 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
39 {
40 	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
41 		return false;
42 
43 	return __blk_mq_sched_bio_merge(q, bio);
44 }
45 
46 static inline bool
47 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
48 			 struct bio *bio)
49 {
50 	struct elevator_queue *e = q->elevator;
51 
52 	if (e && e->type->ops.mq.allow_merge)
53 		return e->type->ops.mq.allow_merge(q, rq, bio);
54 
55 	return true;
56 }
57 
58 static inline void blk_mq_sched_completed_request(struct request *rq)
59 {
60 	struct elevator_queue *e = rq->q->elevator;
61 
62 	if (e && e->type->ops.mq.completed_request)
63 		e->type->ops.mq.completed_request(rq);
64 }
65 
66 static inline void blk_mq_sched_started_request(struct request *rq)
67 {
68 	struct request_queue *q = rq->q;
69 	struct elevator_queue *e = q->elevator;
70 
71 	if (e && e->type->ops.mq.started_request)
72 		e->type->ops.mq.started_request(rq);
73 }
74 
75 static inline void blk_mq_sched_requeue_request(struct request *rq)
76 {
77 	struct request_queue *q = rq->q;
78 	struct elevator_queue *e = q->elevator;
79 
80 	if (e && e->type->ops.mq.requeue_request)
81 		e->type->ops.mq.requeue_request(rq);
82 }
83 
84 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
85 {
86 	struct elevator_queue *e = hctx->queue->elevator;
87 
88 	if (e && e->type->ops.mq.has_work)
89 		return e->type->ops.mq.has_work(hctx);
90 
91 	return false;
92 }
93 
94 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
95 {
96 	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
97 }
98 
99 #endif
100