xref: /openbmc/linux/block/blk-mq.h (revision cf43e6be865a582ba66ee4747ae27a0513f6bba1)
1320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
2320ae51fSJens Axboe #define INT_BLK_MQ_H
3320ae51fSJens Axboe 
4*cf43e6beSJens Axboe #include "blk-stat.h"
5*cf43e6beSJens Axboe 
624d2f903SChristoph Hellwig struct blk_mq_tag_set;
724d2f903SChristoph Hellwig 
8320ae51fSJens Axboe struct blk_mq_ctx {
9320ae51fSJens Axboe 	struct {
10320ae51fSJens Axboe 		spinlock_t		lock;
11320ae51fSJens Axboe 		struct list_head	rq_list;
12320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
13320ae51fSJens Axboe 
14320ae51fSJens Axboe 	unsigned int		cpu;
15320ae51fSJens Axboe 	unsigned int		index_hw;
16320ae51fSJens Axboe 
17320ae51fSJens Axboe 	/* incremented at dispatch time */
18320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
19320ae51fSJens Axboe 	unsigned long		rq_merged;
20320ae51fSJens Axboe 
21320ae51fSJens Axboe 	/* incremented at completion time */
22320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23*cf43e6beSJens Axboe 	struct blk_rq_stat	stat[2];
24320ae51fSJens Axboe 
25320ae51fSJens Axboe 	struct request_queue	*queue;
26320ae51fSJens Axboe 	struct kobject		kobj;
274bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
28320ae51fSJens Axboe 
29320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
313edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
32e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
34320ae51fSJens Axboe 
35320ae51fSJens Axboe /*
36320ae51fSJens Axboe  * CPU hotplug helpers
37320ae51fSJens Axboe  */
38676141e4SJens Axboe void blk_mq_enable_hotplug(void);
39676141e4SJens Axboe void blk_mq_disable_hotplug(void);
40320ae51fSJens Axboe 
41320ae51fSJens Axboe /*
42320ae51fSJens Axboe  * CPU -> queue mappings
43320ae51fSJens Axboe  */
44da695ba2SChristoph Hellwig int blk_mq_map_queues(struct blk_mq_tag_set *set);
45f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
46320ae51fSJens Axboe 
477d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
487d7e0f90SChristoph Hellwig 		int cpu)
497d7e0f90SChristoph Hellwig {
507d7e0f90SChristoph Hellwig 	return q->queue_hw_ctx[q->mq_map[cpu]];
517d7e0f90SChristoph Hellwig }
527d7e0f90SChristoph Hellwig 
53e93ecf60SJens Axboe /*
5467aec14cSJens Axboe  * sysfs helpers
5567aec14cSJens Axboe  */
5667aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
5767aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
58868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
5967aec14cSJens Axboe 
6090415837SChristoph Hellwig extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
6190415837SChristoph Hellwig 
62e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
63e09aae7eSMing Lei 
641aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
651aecfe48SMing Lei 					   unsigned int cpu)
661aecfe48SMing Lei {
671aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
681aecfe48SMing Lei }
691aecfe48SMing Lei 
701aecfe48SMing Lei /*
711aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
721aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
731aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
741aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
751aecfe48SMing Lei  */
761aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
771aecfe48SMing Lei {
781aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
791aecfe48SMing Lei }
801aecfe48SMing Lei 
811aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
821aecfe48SMing Lei {
831aecfe48SMing Lei 	put_cpu();
841aecfe48SMing Lei }
851aecfe48SMing Lei 
86cb96a42cSMing Lei struct blk_mq_alloc_data {
87cb96a42cSMing Lei 	/* input parameter */
88cb96a42cSMing Lei 	struct request_queue *q;
896f3b0e8bSChristoph Hellwig 	unsigned int flags;
90cb96a42cSMing Lei 
91cb96a42cSMing Lei 	/* input & output parameter */
92cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
93cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
94cb96a42cSMing Lei };
95cb96a42cSMing Lei 
96cb96a42cSMing Lei static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
976f3b0e8bSChristoph Hellwig 		struct request_queue *q, unsigned int flags,
986f3b0e8bSChristoph Hellwig 		struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
99cb96a42cSMing Lei {
100cb96a42cSMing Lei 	data->q = q;
1016f3b0e8bSChristoph Hellwig 	data->flags = flags;
102cb96a42cSMing Lei 	data->ctx = ctx;
103cb96a42cSMing Lei 	data->hctx = hctx;
104cb96a42cSMing Lei }
105cb96a42cSMing Lei 
1065d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1075d1b25c1SBart Van Assche {
1085d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1095d1b25c1SBart Van Assche }
1105d1b25c1SBart Van Assche 
11119c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
11219c66e59SMing Lei {
11319c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
11419c66e59SMing Lei }
11519c66e59SMing Lei 
116320ae51fSJens Axboe #endif
117