xref: /openbmc/linux/block/blk-mq.h (revision 7211ec63)
1 #ifndef INT_BLK_MQ_H
2 #define INT_BLK_MQ_H
3 
4 #include "blk-stat.h"
5 
6 struct blk_mq_tag_set;
7 
8 struct blk_mq_ctx {
9 	struct {
10 		spinlock_t		lock;
11 		struct list_head	rq_list;
12 	}  ____cacheline_aligned_in_smp;
13 
14 	unsigned int		cpu;
15 	unsigned int		index_hw;
16 
17 	/* incremented at dispatch time */
18 	unsigned long		rq_dispatched[2];
19 	unsigned long		rq_merged;
20 
21 	/* incremented at completion time */
22 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23 
24 	struct request_queue	*queue;
25 	struct kobject		kobj;
26 } ____cacheline_aligned_in_smp;
27 
28 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
29 void blk_mq_freeze_queue(struct request_queue *q);
30 void blk_mq_free_queue(struct request_queue *q);
31 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
32 void blk_mq_wake_waiters(struct request_queue *q);
33 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
34 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
35 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
36 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
37 				bool wait);
38 
39 /*
40  * Internal helpers for allocating/freeing the request map
41  */
42 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
43 		     unsigned int hctx_idx);
44 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
45 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
46 					unsigned int hctx_idx,
47 					unsigned int nr_tags,
48 					unsigned int reserved_tags);
49 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
50 		     unsigned int hctx_idx, unsigned int depth);
51 
52 /*
53  * Internal helpers for request insertion into sw queues
54  */
55 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
56 				bool at_head);
57 void blk_mq_request_bypass_insert(struct request *rq);
58 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
59 				struct list_head *list);
60 
61 /*
62  * CPU -> queue mappings
63  */
64 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
65 
66 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
67 		int cpu)
68 {
69 	return q->queue_hw_ctx[q->mq_map[cpu]];
70 }
71 
72 /*
73  * sysfs helpers
74  */
75 extern void blk_mq_sysfs_init(struct request_queue *q);
76 extern void blk_mq_sysfs_deinit(struct request_queue *q);
77 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
78 extern int blk_mq_sysfs_register(struct request_queue *q);
79 extern void blk_mq_sysfs_unregister(struct request_queue *q);
80 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
81 
82 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
83 
84 void blk_mq_release(struct request_queue *q);
85 
86 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
87 					   unsigned int cpu)
88 {
89 	return per_cpu_ptr(q->queue_ctx, cpu);
90 }
91 
92 /*
93  * This assumes per-cpu software queueing queues. They could be per-node
94  * as well, for instance. For now this is hardcoded as-is. Note that we don't
95  * care about preemption, since we know the ctx's are persistent. This does
96  * mean that we can't rely on ctx always matching the currently running CPU.
97  */
98 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
99 {
100 	return __blk_mq_get_ctx(q, get_cpu());
101 }
102 
103 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
104 {
105 	put_cpu();
106 }
107 
108 struct blk_mq_alloc_data {
109 	/* input parameter */
110 	struct request_queue *q;
111 	unsigned int flags;
112 	unsigned int shallow_depth;
113 
114 	/* input & output parameter */
115 	struct blk_mq_ctx *ctx;
116 	struct blk_mq_hw_ctx *hctx;
117 };
118 
119 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
120 {
121 	if (data->flags & BLK_MQ_REQ_INTERNAL)
122 		return data->hctx->sched_tags;
123 
124 	return data->hctx->tags;
125 }
126 
127 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
128 {
129 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
130 }
131 
132 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
133 {
134 	return hctx->nr_ctx && hctx->tags;
135 }
136 
137 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
138 			unsigned int inflight[2]);
139 
140 #endif
141