xref: /openbmc/linux/block/blk-mq.h (revision 4941115bef2bc891aa00a2f0edeaf06dc982325a)
1320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
2320ae51fSJens Axboe #define INT_BLK_MQ_H
3320ae51fSJens Axboe 
4cf43e6beSJens Axboe #include "blk-stat.h"
5cf43e6beSJens Axboe 
624d2f903SChristoph Hellwig struct blk_mq_tag_set;
724d2f903SChristoph Hellwig 
8320ae51fSJens Axboe struct blk_mq_ctx {
9320ae51fSJens Axboe 	struct {
10320ae51fSJens Axboe 		spinlock_t		lock;
11320ae51fSJens Axboe 		struct list_head	rq_list;
12320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
13320ae51fSJens Axboe 
14320ae51fSJens Axboe 	unsigned int		cpu;
15320ae51fSJens Axboe 	unsigned int		index_hw;
16320ae51fSJens Axboe 
17320ae51fSJens Axboe 	/* incremented at dispatch time */
18320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
19320ae51fSJens Axboe 	unsigned long		rq_merged;
20320ae51fSJens Axboe 
21320ae51fSJens Axboe 	/* incremented at completion time */
22320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23cf43e6beSJens Axboe 	struct blk_rq_stat	stat[2];
24320ae51fSJens Axboe 
25320ae51fSJens Axboe 	struct request_queue	*queue;
26320ae51fSJens Axboe 	struct kobject		kobj;
274bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
28320ae51fSJens Axboe 
29320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
313edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
32e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
34f04c3df3SJens Axboe bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
352c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
362c3ad667SJens Axboe 
372c3ad667SJens Axboe /*
382c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
392c3ad667SJens Axboe  */
402c3ad667SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
412c3ad667SJens Axboe 			unsigned int hctx_idx);
422c3ad667SJens Axboe struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
432c3ad667SJens Axboe 					unsigned int hctx_idx);
442c3ad667SJens Axboe 
452c3ad667SJens Axboe /*
462c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
472c3ad667SJens Axboe  */
482c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
492c3ad667SJens Axboe 				bool at_head);
50320ae51fSJens Axboe 
51320ae51fSJens Axboe /*
52320ae51fSJens Axboe  * CPU hotplug helpers
53320ae51fSJens Axboe  */
54676141e4SJens Axboe void blk_mq_enable_hotplug(void);
55676141e4SJens Axboe void blk_mq_disable_hotplug(void);
56320ae51fSJens Axboe 
57320ae51fSJens Axboe /*
58320ae51fSJens Axboe  * CPU -> queue mappings
59320ae51fSJens Axboe  */
60f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
61320ae51fSJens Axboe 
627d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
637d7e0f90SChristoph Hellwig 		int cpu)
647d7e0f90SChristoph Hellwig {
657d7e0f90SChristoph Hellwig 	return q->queue_hw_ctx[q->mq_map[cpu]];
667d7e0f90SChristoph Hellwig }
677d7e0f90SChristoph Hellwig 
68e93ecf60SJens Axboe /*
6967aec14cSJens Axboe  * sysfs helpers
7067aec14cSJens Axboe  */
7167aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
7267aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
73868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
7467aec14cSJens Axboe 
7590415837SChristoph Hellwig extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
7690415837SChristoph Hellwig 
77e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
78e09aae7eSMing Lei 
791aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
801aecfe48SMing Lei 					   unsigned int cpu)
811aecfe48SMing Lei {
821aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
831aecfe48SMing Lei }
841aecfe48SMing Lei 
851aecfe48SMing Lei /*
861aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
871aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
881aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
891aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
901aecfe48SMing Lei  */
911aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
921aecfe48SMing Lei {
931aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
941aecfe48SMing Lei }
951aecfe48SMing Lei 
961aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
971aecfe48SMing Lei {
981aecfe48SMing Lei 	put_cpu();
991aecfe48SMing Lei }
1001aecfe48SMing Lei 
101cb96a42cSMing Lei struct blk_mq_alloc_data {
102cb96a42cSMing Lei 	/* input parameter */
103cb96a42cSMing Lei 	struct request_queue *q;
1046f3b0e8bSChristoph Hellwig 	unsigned int flags;
105cb96a42cSMing Lei 
106cb96a42cSMing Lei 	/* input & output parameter */
107cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
108cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
109cb96a42cSMing Lei };
110cb96a42cSMing Lei 
111cb96a42cSMing Lei static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
1126f3b0e8bSChristoph Hellwig 		struct request_queue *q, unsigned int flags,
1136f3b0e8bSChristoph Hellwig 		struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
114cb96a42cSMing Lei {
115cb96a42cSMing Lei 	data->q = q;
1166f3b0e8bSChristoph Hellwig 	data->flags = flags;
117cb96a42cSMing Lei 	data->ctx = ctx;
118cb96a42cSMing Lei 	data->hctx = hctx;
119cb96a42cSMing Lei }
120cb96a42cSMing Lei 
121*4941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
122*4941115bSJens Axboe {
123*4941115bSJens Axboe 	return data->hctx->tags;
124*4941115bSJens Axboe }
125*4941115bSJens Axboe 
1262c3ad667SJens Axboe /*
1272c3ad667SJens Axboe  * Internal helpers for request allocation/init/free
1282c3ad667SJens Axboe  */
1292c3ad667SJens Axboe void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
1302c3ad667SJens Axboe 			struct request *rq, unsigned int op);
1312c3ad667SJens Axboe void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1322c3ad667SJens Axboe 				struct request *rq);
1332c3ad667SJens Axboe struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
1342c3ad667SJens Axboe 					unsigned int op);
1352c3ad667SJens Axboe 
1365d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1375d1b25c1SBart Van Assche {
1385d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1395d1b25c1SBart Van Assche }
1405d1b25c1SBart Van Assche 
14119c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
14219c66e59SMing Lei {
14319c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
14419c66e59SMing Lei }
14519c66e59SMing Lei 
146320ae51fSJens Axboe #endif
147