xref: /openbmc/linux/block/blk-mq.h (revision 5ace6934)
1 #ifndef INT_BLK_MQ_H
2 #define INT_BLK_MQ_H
3 
4 #include "blk-stat.h"
5 
6 struct blk_mq_tag_set;
7 
8 struct blk_mq_ctx {
9 	struct {
10 		spinlock_t		lock;
11 		struct list_head	rq_list;
12 	}  ____cacheline_aligned_in_smp;
13 
14 	unsigned int		cpu;
15 	unsigned int		index_hw;
16 
17 	/* incremented at dispatch time */
18 	unsigned long		rq_dispatched[2];
19 	unsigned long		rq_merged;
20 
21 	/* incremented at completion time */
22 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23 	struct blk_rq_stat	stat[2];
24 
25 	struct request_queue	*queue;
26 	struct kobject		kobj;
27 } ____cacheline_aligned_in_smp;
28 
29 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30 void blk_mq_freeze_queue(struct request_queue *q);
31 void blk_mq_free_queue(struct request_queue *q);
32 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33 void blk_mq_wake_waiters(struct request_queue *q);
34 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
35 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
36 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
38 				bool wait);
39 
40 /*
41  * Internal helpers for allocating/freeing the request map
42  */
43 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
44 		     unsigned int hctx_idx);
45 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
46 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
47 					unsigned int hctx_idx,
48 					unsigned int nr_tags,
49 					unsigned int reserved_tags);
50 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
51 		     unsigned int hctx_idx, unsigned int depth);
52 
53 /*
54  * Internal helpers for request insertion into sw queues
55  */
56 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
57 				bool at_head);
58 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
59 				struct list_head *list);
60 /*
61  * CPU hotplug helpers
62  */
63 void blk_mq_enable_hotplug(void);
64 void blk_mq_disable_hotplug(void);
65 
66 /*
67  * CPU -> queue mappings
68  */
69 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
70 
71 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
72 		int cpu)
73 {
74 	return q->queue_hw_ctx[q->mq_map[cpu]];
75 }
76 
77 /*
78  * sysfs helpers
79  */
80 extern int blk_mq_sysfs_register(struct request_queue *q);
81 extern void blk_mq_sysfs_unregister(struct request_queue *q);
82 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
83 
84 /*
85  * debugfs helpers
86  */
87 #ifdef CONFIG_BLK_DEBUG_FS
88 int blk_mq_debugfs_register(struct request_queue *q, const char *name);
89 void blk_mq_debugfs_unregister(struct request_queue *q);
90 int blk_mq_debugfs_register_hctxs(struct request_queue *q);
91 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
92 #else
93 static inline int blk_mq_debugfs_register(struct request_queue *q,
94 					  const char *name)
95 {
96 	return 0;
97 }
98 
99 static inline void blk_mq_debugfs_unregister(struct request_queue *q)
100 {
101 }
102 
103 static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
104 {
105 	return 0;
106 }
107 
108 static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
109 {
110 }
111 #endif
112 
113 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
114 
115 void blk_mq_release(struct request_queue *q);
116 
117 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
118 					   unsigned int cpu)
119 {
120 	return per_cpu_ptr(q->queue_ctx, cpu);
121 }
122 
123 /*
124  * This assumes per-cpu software queueing queues. They could be per-node
125  * as well, for instance. For now this is hardcoded as-is. Note that we don't
126  * care about preemption, since we know the ctx's are persistent. This does
127  * mean that we can't rely on ctx always matching the currently running CPU.
128  */
129 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
130 {
131 	return __blk_mq_get_ctx(q, get_cpu());
132 }
133 
134 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
135 {
136 	put_cpu();
137 }
138 
139 struct blk_mq_alloc_data {
140 	/* input parameter */
141 	struct request_queue *q;
142 	unsigned int flags;
143 
144 	/* input & output parameter */
145 	struct blk_mq_ctx *ctx;
146 	struct blk_mq_hw_ctx *hctx;
147 };
148 
149 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
150 {
151 	if (data->flags & BLK_MQ_REQ_INTERNAL)
152 		return data->hctx->sched_tags;
153 
154 	return data->hctx->tags;
155 }
156 
157 /*
158  * Internal helpers for request allocation/init/free
159  */
160 void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
161 			struct request *rq, unsigned int op);
162 void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
163 				struct request *rq);
164 void blk_mq_finish_request(struct request *rq);
165 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
166 					unsigned int op);
167 
168 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
169 {
170 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
171 }
172 
173 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
174 {
175 	return hctx->nr_ctx && hctx->tags;
176 }
177 
178 #endif
179