xref: /openbmc/linux/block/blk.h (revision ec2da07c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4 
5 #include <linux/idr.h>
6 #include <linux/blk-mq.h>
7 #include <xen/xen.h>
8 #include "blk-mq.h"
9 #include "blk-mq-sched.h"
10 
11 /* Max future timer expiry for timeouts */
12 #define BLK_MAX_TIMEOUT		(5 * HZ)
13 
14 #ifdef CONFIG_DEBUG_FS
15 extern struct dentry *blk_debugfs_root;
16 #endif
17 
18 struct blk_flush_queue {
19 	unsigned int		flush_queue_delayed:1;
20 	unsigned int		flush_pending_idx:1;
21 	unsigned int		flush_running_idx:1;
22 	unsigned long		flush_pending_since;
23 	struct list_head	flush_queue[2];
24 	struct list_head	flush_data_in_flight;
25 	struct request		*flush_rq;
26 
27 	/*
28 	 * flush_rq shares tag with this rq, both can't be active
29 	 * at the same time
30 	 */
31 	struct request		*orig_rq;
32 	spinlock_t		mq_flush_lock;
33 };
34 
35 extern struct kmem_cache *blk_requestq_cachep;
36 extern struct kobj_type blk_queue_ktype;
37 extern struct ida blk_queue_ida;
38 
39 static inline struct blk_flush_queue *
40 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
41 {
42 	return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
43 }
44 
45 static inline void __blk_get_queue(struct request_queue *q)
46 {
47 	kobject_get(&q->kobj);
48 }
49 
50 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
51 		int node, int cmd_size, gfp_t flags);
52 void blk_free_flush_queue(struct blk_flush_queue *q);
53 
54 void blk_freeze_queue(struct request_queue *q);
55 
56 static inline void blk_queue_enter_live(struct request_queue *q)
57 {
58 	/*
59 	 * Given that running in generic_make_request() context
60 	 * guarantees that a live reference against q_usage_counter has
61 	 * been established, further references under that same context
62 	 * need not check that the queue has been frozen (marked dead).
63 	 */
64 	percpu_ref_get(&q->q_usage_counter);
65 }
66 
67 static inline bool biovec_phys_mergeable(struct request_queue *q,
68 		struct bio_vec *vec1, struct bio_vec *vec2)
69 {
70 	unsigned long mask = queue_segment_boundary(q);
71 	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
72 	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
73 
74 	if (addr1 + vec1->bv_len != addr2)
75 		return false;
76 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
77 		return false;
78 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
79 		return false;
80 	return true;
81 }
82 
83 static inline bool __bvec_gap_to_prev(struct request_queue *q,
84 		struct bio_vec *bprv, unsigned int offset)
85 {
86 	return (offset & queue_virt_boundary(q)) ||
87 		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
88 }
89 
90 /*
91  * Check if adding a bio_vec after bprv with offset would create a gap in
92  * the SG list. Most drivers don't care about this, but some do.
93  */
94 static inline bool bvec_gap_to_prev(struct request_queue *q,
95 		struct bio_vec *bprv, unsigned int offset)
96 {
97 	if (!queue_virt_boundary(q))
98 		return false;
99 	return __bvec_gap_to_prev(q, bprv, offset);
100 }
101 
102 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
103 		unsigned int nr_segs)
104 {
105 	rq->nr_phys_segments = nr_segs;
106 	rq->__data_len = bio->bi_iter.bi_size;
107 	rq->bio = rq->biotail = bio;
108 	rq->ioprio = bio_prio(bio);
109 
110 	if (bio->bi_disk)
111 		rq->rq_disk = bio->bi_disk;
112 }
113 
114 #ifdef CONFIG_BLK_DEV_INTEGRITY
115 void blk_flush_integrity(void);
116 bool __bio_integrity_endio(struct bio *);
117 static inline bool bio_integrity_endio(struct bio *bio)
118 {
119 	if (bio_integrity(bio))
120 		return __bio_integrity_endio(bio);
121 	return true;
122 }
123 
124 static inline bool integrity_req_gap_back_merge(struct request *req,
125 		struct bio *next)
126 {
127 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
128 	struct bio_integrity_payload *bip_next = bio_integrity(next);
129 
130 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
131 				bip_next->bip_vec[0].bv_offset);
132 }
133 
134 static inline bool integrity_req_gap_front_merge(struct request *req,
135 		struct bio *bio)
136 {
137 	struct bio_integrity_payload *bip = bio_integrity(bio);
138 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
139 
140 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
141 				bip_next->bip_vec[0].bv_offset);
142 }
143 #else /* CONFIG_BLK_DEV_INTEGRITY */
144 static inline bool integrity_req_gap_back_merge(struct request *req,
145 		struct bio *next)
146 {
147 	return false;
148 }
149 static inline bool integrity_req_gap_front_merge(struct request *req,
150 		struct bio *bio)
151 {
152 	return false;
153 }
154 
155 static inline void blk_flush_integrity(void)
156 {
157 }
158 static inline bool bio_integrity_endio(struct bio *bio)
159 {
160 	return true;
161 }
162 #endif /* CONFIG_BLK_DEV_INTEGRITY */
163 
164 unsigned long blk_rq_timeout(unsigned long timeout);
165 void blk_add_timer(struct request *req);
166 
167 bool bio_attempt_front_merge(struct request *req, struct bio *bio,
168 		unsigned int nr_segs);
169 bool bio_attempt_back_merge(struct request *req, struct bio *bio,
170 		unsigned int nr_segs);
171 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
172 		struct bio *bio);
173 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
174 		unsigned int nr_segs, struct request **same_queue_rq);
175 
176 void blk_account_io_start(struct request *req, bool new_io);
177 void blk_account_io_completion(struct request *req, unsigned int bytes);
178 void blk_account_io_done(struct request *req, u64 now);
179 
180 /*
181  * Internal elevator interface
182  */
183 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
184 
185 void blk_insert_flush(struct request *rq);
186 
187 int elevator_init_mq(struct request_queue *q);
188 int elevator_switch_mq(struct request_queue *q,
189 			      struct elevator_type *new_e);
190 void __elevator_exit(struct request_queue *, struct elevator_queue *);
191 int elv_register_queue(struct request_queue *q);
192 void elv_unregister_queue(struct request_queue *q);
193 
194 static inline void elevator_exit(struct request_queue *q,
195 		struct elevator_queue *e)
196 {
197 	blk_mq_sched_free_requests(q);
198 	__elevator_exit(q, e);
199 }
200 
201 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
202 
203 #ifdef CONFIG_FAIL_IO_TIMEOUT
204 int blk_should_fake_timeout(struct request_queue *);
205 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
206 ssize_t part_timeout_store(struct device *, struct device_attribute *,
207 				const char *, size_t);
208 #else
209 static inline int blk_should_fake_timeout(struct request_queue *q)
210 {
211 	return 0;
212 }
213 #endif
214 
215 void __blk_queue_split(struct request_queue *q, struct bio **bio,
216 		unsigned int *nr_segs);
217 int ll_back_merge_fn(struct request *req, struct bio *bio,
218 		unsigned int nr_segs);
219 int ll_front_merge_fn(struct request *req,  struct bio *bio,
220 		unsigned int nr_segs);
221 struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
222 struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
223 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
224 				struct request *next);
225 unsigned int blk_recalc_rq_segments(struct request *rq);
226 void blk_rq_set_mixed_merge(struct request *rq);
227 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
228 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
229 
230 int blk_dev_init(void);
231 
232 /*
233  * Contribute to IO statistics IFF:
234  *
235  *	a) it's attached to a gendisk, and
236  *	b) the queue had IO stats enabled when this request was started, and
237  *	c) it's a file system request
238  */
239 static inline bool blk_do_io_stat(struct request *rq)
240 {
241 	return rq->rq_disk &&
242 	       (rq->rq_flags & RQF_IO_STAT) &&
243 		!blk_rq_is_passthrough(rq);
244 }
245 
246 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
247 {
248 	req->cmd_flags |= REQ_NOMERGE;
249 	if (req == q->last_merge)
250 		q->last_merge = NULL;
251 }
252 
253 /*
254  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
255  * is defined as 'unsigned int', meantime it has to aligned to with logical
256  * block size which is the minimum accepted unit by hardware.
257  */
258 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
259 {
260 	return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
261 }
262 
263 /*
264  * Internal io_context interface
265  */
266 void get_io_context(struct io_context *ioc);
267 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
268 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
269 			     gfp_t gfp_mask);
270 void ioc_clear_queue(struct request_queue *q);
271 
272 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
273 
274 /**
275  * create_io_context - try to create task->io_context
276  * @gfp_mask: allocation mask
277  * @node: allocation node
278  *
279  * If %current->io_context is %NULL, allocate a new io_context and install
280  * it.  Returns the current %current->io_context which may be %NULL if
281  * allocation failed.
282  *
283  * Note that this function can't be called with IRQ disabled because
284  * task_lock which protects %current->io_context is IRQ-unsafe.
285  */
286 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
287 {
288 	WARN_ON_ONCE(irqs_disabled());
289 	if (unlikely(!current->io_context))
290 		create_task_io_context(current, gfp_mask, node);
291 	return current->io_context;
292 }
293 
294 /*
295  * Internal throttling interface
296  */
297 #ifdef CONFIG_BLK_DEV_THROTTLING
298 extern void blk_throtl_drain(struct request_queue *q);
299 extern int blk_throtl_init(struct request_queue *q);
300 extern void blk_throtl_exit(struct request_queue *q);
301 extern void blk_throtl_register_queue(struct request_queue *q);
302 #else /* CONFIG_BLK_DEV_THROTTLING */
303 static inline void blk_throtl_drain(struct request_queue *q) { }
304 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
305 static inline void blk_throtl_exit(struct request_queue *q) { }
306 static inline void blk_throtl_register_queue(struct request_queue *q) { }
307 #endif /* CONFIG_BLK_DEV_THROTTLING */
308 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
309 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
310 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
311 	const char *page, size_t count);
312 extern void blk_throtl_bio_endio(struct bio *bio);
313 extern void blk_throtl_stat_add(struct request *rq, u64 time);
314 #else
315 static inline void blk_throtl_bio_endio(struct bio *bio) { }
316 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
317 #endif
318 
319 #ifdef CONFIG_BOUNCE
320 extern int init_emergency_isa_pool(void);
321 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
322 #else
323 static inline int init_emergency_isa_pool(void)
324 {
325 	return 0;
326 }
327 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
328 {
329 }
330 #endif /* CONFIG_BOUNCE */
331 
332 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
333 extern int blk_iolatency_init(struct request_queue *q);
334 #else
335 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
336 #endif
337 
338 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
339 
340 #ifdef CONFIG_BLK_DEV_ZONED
341 void blk_queue_free_zone_bitmaps(struct request_queue *q);
342 #else
343 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
344 #endif
345 
346 #endif /* BLK_INTERNAL_H */
347