xref: /openbmc/linux/block/blk-mq.h (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
5cf43e6beSJens Axboe #include "blk-stat.h"
6cf43e6beSJens Axboe 
724d2f903SChristoph Hellwig struct blk_mq_tag_set;
824d2f903SChristoph Hellwig 
9320ae51fSJens Axboe struct blk_mq_ctx {
10320ae51fSJens Axboe 	struct {
11320ae51fSJens Axboe 		spinlock_t		lock;
12320ae51fSJens Axboe 		struct list_head	rq_list;
13320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
14320ae51fSJens Axboe 
15320ae51fSJens Axboe 	unsigned int		cpu;
16320ae51fSJens Axboe 	unsigned int		index_hw;
17320ae51fSJens Axboe 
18320ae51fSJens Axboe 	/* incremented at dispatch time */
19320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
20320ae51fSJens Axboe 	unsigned long		rq_merged;
21320ae51fSJens Axboe 
22320ae51fSJens Axboe 	/* incremented at completion time */
23320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
24320ae51fSJens Axboe 
25320ae51fSJens Axboe 	struct request_queue	*queue;
26320ae51fSJens Axboe 	struct kobject		kobj;
274bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
28320ae51fSJens Axboe 
29320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
313edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
32e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
3481380ca1SOmar Sandoval bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
352c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
3650e1dab8SJens Axboe bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37bd6737f1SJens Axboe bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
38bd6737f1SJens Axboe 				bool wait);
392c3ad667SJens Axboe 
402c3ad667SJens Axboe /*
412c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
422c3ad667SJens Axboe  */
43cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
442c3ad667SJens Axboe 		     unsigned int hctx_idx);
45cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags);
46cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
47cc71a6f4SJens Axboe 					unsigned int hctx_idx,
48cc71a6f4SJens Axboe 					unsigned int nr_tags,
49cc71a6f4SJens Axboe 					unsigned int reserved_tags);
50cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
51cc71a6f4SJens Axboe 		     unsigned int hctx_idx, unsigned int depth);
522c3ad667SJens Axboe 
532c3ad667SJens Axboe /*
542c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
552c3ad667SJens Axboe  */
562c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
572c3ad667SJens Axboe 				bool at_head);
58157f377bSJens Axboe void blk_mq_request_bypass_insert(struct request *rq);
59bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
60bd166ef1SJens Axboe 				struct list_head *list);
61320ae51fSJens Axboe 
62320ae51fSJens Axboe /*
63320ae51fSJens Axboe  * CPU -> queue mappings
64320ae51fSJens Axboe  */
65f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
66320ae51fSJens Axboe 
677d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
687d7e0f90SChristoph Hellwig 		int cpu)
697d7e0f90SChristoph Hellwig {
707d7e0f90SChristoph Hellwig 	return q->queue_hw_ctx[q->mq_map[cpu]];
717d7e0f90SChristoph Hellwig }
727d7e0f90SChristoph Hellwig 
73e93ecf60SJens Axboe /*
7467aec14cSJens Axboe  * sysfs helpers
7567aec14cSJens Axboe  */
76737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
777ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
782d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
7967aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
8067aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
81868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
8267aec14cSJens Axboe 
8390415837SChristoph Hellwig extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
8490415837SChristoph Hellwig 
85e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
86e09aae7eSMing Lei 
871aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
881aecfe48SMing Lei 					   unsigned int cpu)
891aecfe48SMing Lei {
901aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
911aecfe48SMing Lei }
921aecfe48SMing Lei 
931aecfe48SMing Lei /*
941aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
951aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
961aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
971aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
981aecfe48SMing Lei  */
991aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1001aecfe48SMing Lei {
1011aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
1021aecfe48SMing Lei }
1031aecfe48SMing Lei 
1041aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
1051aecfe48SMing Lei {
1061aecfe48SMing Lei 	put_cpu();
1071aecfe48SMing Lei }
1081aecfe48SMing Lei 
109cb96a42cSMing Lei struct blk_mq_alloc_data {
110cb96a42cSMing Lei 	/* input parameter */
111cb96a42cSMing Lei 	struct request_queue *q;
1126f3b0e8bSChristoph Hellwig 	unsigned int flags;
113229a9287SOmar Sandoval 	unsigned int shallow_depth;
114cb96a42cSMing Lei 
115cb96a42cSMing Lei 	/* input & output parameter */
116cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
117cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
118cb96a42cSMing Lei };
119cb96a42cSMing Lei 
1204941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1214941115bSJens Axboe {
122bd166ef1SJens Axboe 	if (data->flags & BLK_MQ_REQ_INTERNAL)
123bd166ef1SJens Axboe 		return data->hctx->sched_tags;
124bd166ef1SJens Axboe 
1254941115bSJens Axboe 	return data->hctx->tags;
1264941115bSJens Axboe }
1274941115bSJens Axboe 
1285d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1295d1b25c1SBart Van Assche {
1305d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1315d1b25c1SBart Van Assche }
1325d1b25c1SBart Van Assche 
13319c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
13419c66e59SMing Lei {
13519c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
13619c66e59SMing Lei }
13719c66e59SMing Lei 
138f299b7c7SJens Axboe void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
139f299b7c7SJens Axboe 			unsigned int inflight[2]);
140f299b7c7SJens Axboe 
141320ae51fSJens Axboe #endif
142