xref: /openbmc/linux/block/blk-mq.h (revision de1482974080ec9ef414bf048b2646b246b63f6e)
1320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
2320ae51fSJens Axboe #define INT_BLK_MQ_H
3320ae51fSJens Axboe 
4cf43e6beSJens Axboe #include "blk-stat.h"
5cf43e6beSJens Axboe 
624d2f903SChristoph Hellwig struct blk_mq_tag_set;
724d2f903SChristoph Hellwig 
8320ae51fSJens Axboe struct blk_mq_ctx {
9320ae51fSJens Axboe 	struct {
10320ae51fSJens Axboe 		spinlock_t		lock;
11320ae51fSJens Axboe 		struct list_head	rq_list;
12320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
13320ae51fSJens Axboe 
14320ae51fSJens Axboe 	unsigned int		cpu;
15320ae51fSJens Axboe 	unsigned int		index_hw;
16320ae51fSJens Axboe 
17320ae51fSJens Axboe 	/* incremented at dispatch time */
18320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
19320ae51fSJens Axboe 	unsigned long		rq_merged;
20320ae51fSJens Axboe 
21320ae51fSJens Axboe 	/* incremented at completion time */
22320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23320ae51fSJens Axboe 
24320ae51fSJens Axboe 	struct request_queue	*queue;
25320ae51fSJens Axboe 	struct kobject		kobj;
264bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
27320ae51fSJens Axboe 
28320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
29780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
303edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
31e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
32aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
33*de148297SMing Lei bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
342c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
3550e1dab8SJens Axboe bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
36bd6737f1SJens Axboe bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
37bd6737f1SJens Axboe 				bool wait);
382c3ad667SJens Axboe 
392c3ad667SJens Axboe /*
402c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
412c3ad667SJens Axboe  */
42cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
432c3ad667SJens Axboe 		     unsigned int hctx_idx);
44cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags);
45cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
46cc71a6f4SJens Axboe 					unsigned int hctx_idx,
47cc71a6f4SJens Axboe 					unsigned int nr_tags,
48cc71a6f4SJens Axboe 					unsigned int reserved_tags);
49cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
50cc71a6f4SJens Axboe 		     unsigned int hctx_idx, unsigned int depth);
512c3ad667SJens Axboe 
522c3ad667SJens Axboe /*
532c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
542c3ad667SJens Axboe  */
552c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
562c3ad667SJens Axboe 				bool at_head);
57157f377bSJens Axboe void blk_mq_request_bypass_insert(struct request *rq);
58bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
59bd166ef1SJens Axboe 				struct list_head *list);
60320ae51fSJens Axboe 
61320ae51fSJens Axboe /*
62320ae51fSJens Axboe  * CPU -> queue mappings
63320ae51fSJens Axboe  */
64f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
65320ae51fSJens Axboe 
667d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
677d7e0f90SChristoph Hellwig 		int cpu)
687d7e0f90SChristoph Hellwig {
697d7e0f90SChristoph Hellwig 	return q->queue_hw_ctx[q->mq_map[cpu]];
707d7e0f90SChristoph Hellwig }
717d7e0f90SChristoph Hellwig 
72e93ecf60SJens Axboe /*
7367aec14cSJens Axboe  * sysfs helpers
7467aec14cSJens Axboe  */
75737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
767ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
772d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
7867aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
7967aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
80868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
8167aec14cSJens Axboe 
8290415837SChristoph Hellwig extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
8390415837SChristoph Hellwig 
84e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
85e09aae7eSMing Lei 
861aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
871aecfe48SMing Lei 					   unsigned int cpu)
881aecfe48SMing Lei {
891aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
901aecfe48SMing Lei }
911aecfe48SMing Lei 
921aecfe48SMing Lei /*
931aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
941aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
951aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
961aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
971aecfe48SMing Lei  */
981aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
991aecfe48SMing Lei {
1001aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
1011aecfe48SMing Lei }
1021aecfe48SMing Lei 
1031aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
1041aecfe48SMing Lei {
1051aecfe48SMing Lei 	put_cpu();
1061aecfe48SMing Lei }
1071aecfe48SMing Lei 
108cb96a42cSMing Lei struct blk_mq_alloc_data {
109cb96a42cSMing Lei 	/* input parameter */
110cb96a42cSMing Lei 	struct request_queue *q;
1116f3b0e8bSChristoph Hellwig 	unsigned int flags;
112229a9287SOmar Sandoval 	unsigned int shallow_depth;
113cb96a42cSMing Lei 
114cb96a42cSMing Lei 	/* input & output parameter */
115cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
116cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
117cb96a42cSMing Lei };
118cb96a42cSMing Lei 
1194941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1204941115bSJens Axboe {
121bd166ef1SJens Axboe 	if (data->flags & BLK_MQ_REQ_INTERNAL)
122bd166ef1SJens Axboe 		return data->hctx->sched_tags;
123bd166ef1SJens Axboe 
1244941115bSJens Axboe 	return data->hctx->tags;
1254941115bSJens Axboe }
1264941115bSJens Axboe 
1275d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1285d1b25c1SBart Van Assche {
1295d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1305d1b25c1SBart Van Assche }
1315d1b25c1SBart Van Assche 
13219c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
13319c66e59SMing Lei {
13419c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
13519c66e59SMing Lei }
13619c66e59SMing Lei 
137f299b7c7SJens Axboe void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
138f299b7c7SJens Axboe 			unsigned int inflight[2]);
139f299b7c7SJens Axboe 
140*de148297SMing Lei static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
141*de148297SMing Lei {
142*de148297SMing Lei 	struct request_queue *q = hctx->queue;
143*de148297SMing Lei 
144*de148297SMing Lei 	if (q->mq_ops->put_budget)
145*de148297SMing Lei 		q->mq_ops->put_budget(hctx);
146*de148297SMing Lei }
147*de148297SMing Lei 
148*de148297SMing Lei static inline blk_status_t blk_mq_get_dispatch_budget(
149*de148297SMing Lei 		struct blk_mq_hw_ctx *hctx)
150*de148297SMing Lei {
151*de148297SMing Lei 	struct request_queue *q = hctx->queue;
152*de148297SMing Lei 
153*de148297SMing Lei 	if (q->mq_ops->get_budget)
154*de148297SMing Lei 		return q->mq_ops->get_budget(hctx);
155*de148297SMing Lei 	return BLK_STS_OK;
156*de148297SMing Lei }
157*de148297SMing Lei 
158320ae51fSJens Axboe #endif
159