xref: /openbmc/linux/block/blk.h (revision 2dd6532e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4 
5 #include <linux/blk-crypto.h>
6 #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
7 #include <xen/xen.h>
8 #include "blk-crypto-internal.h"
9 
10 struct elevator_type;
11 
12 /* Max future timer expiry for timeouts */
13 #define BLK_MAX_TIMEOUT		(5 * HZ)
14 
15 extern struct dentry *blk_debugfs_root;
16 
17 struct blk_flush_queue {
18 	unsigned int		flush_pending_idx:1;
19 	unsigned int		flush_running_idx:1;
20 	blk_status_t 		rq_status;
21 	unsigned long		flush_pending_since;
22 	struct list_head	flush_queue[2];
23 	struct list_head	flush_data_in_flight;
24 	struct request		*flush_rq;
25 
26 	spinlock_t		mq_flush_lock;
27 };
28 
29 extern struct kmem_cache *blk_requestq_cachep;
30 extern struct kmem_cache *blk_requestq_srcu_cachep;
31 extern struct kobj_type blk_queue_ktype;
32 extern struct ida blk_queue_ida;
33 
34 static inline void __blk_get_queue(struct request_queue *q)
35 {
36 	kobject_get(&q->kobj);
37 }
38 
39 bool is_flush_rq(struct request *req);
40 
41 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
42 					      gfp_t flags);
43 void blk_free_flush_queue(struct blk_flush_queue *q);
44 
45 void blk_freeze_queue(struct request_queue *q);
46 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
47 void blk_queue_start_drain(struct request_queue *q);
48 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
49 void submit_bio_noacct_nocheck(struct bio *bio);
50 
51 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
52 {
53 	rcu_read_lock();
54 	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
55 		goto fail;
56 
57 	/*
58 	 * The code that increments the pm_only counter must ensure that the
59 	 * counter is globally visible before the queue is unfrozen.
60 	 */
61 	if (blk_queue_pm_only(q) &&
62 	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
63 		goto fail_put;
64 
65 	rcu_read_unlock();
66 	return true;
67 
68 fail_put:
69 	blk_queue_exit(q);
70 fail:
71 	rcu_read_unlock();
72 	return false;
73 }
74 
75 static inline int bio_queue_enter(struct bio *bio)
76 {
77 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
78 
79 	if (blk_try_enter_queue(q, false))
80 		return 0;
81 	return __bio_queue_enter(q, bio);
82 }
83 
84 #define BIO_INLINE_VECS 4
85 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
86 		gfp_t gfp_mask);
87 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
88 
89 static inline bool biovec_phys_mergeable(struct request_queue *q,
90 		struct bio_vec *vec1, struct bio_vec *vec2)
91 {
92 	unsigned long mask = queue_segment_boundary(q);
93 	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
94 	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
95 
96 	if (addr1 + vec1->bv_len != addr2)
97 		return false;
98 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
99 		return false;
100 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
101 		return false;
102 	return true;
103 }
104 
105 static inline bool __bvec_gap_to_prev(struct request_queue *q,
106 		struct bio_vec *bprv, unsigned int offset)
107 {
108 	return (offset & queue_virt_boundary(q)) ||
109 		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
110 }
111 
112 /*
113  * Check if adding a bio_vec after bprv with offset would create a gap in
114  * the SG list. Most drivers don't care about this, but some do.
115  */
116 static inline bool bvec_gap_to_prev(struct request_queue *q,
117 		struct bio_vec *bprv, unsigned int offset)
118 {
119 	if (!queue_virt_boundary(q))
120 		return false;
121 	return __bvec_gap_to_prev(q, bprv, offset);
122 }
123 
124 static inline bool rq_mergeable(struct request *rq)
125 {
126 	if (blk_rq_is_passthrough(rq))
127 		return false;
128 
129 	if (req_op(rq) == REQ_OP_FLUSH)
130 		return false;
131 
132 	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
133 		return false;
134 
135 	if (req_op(rq) == REQ_OP_ZONE_APPEND)
136 		return false;
137 
138 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
139 		return false;
140 	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
141 		return false;
142 
143 	return true;
144 }
145 
146 /*
147  * There are two different ways to handle DISCARD merges:
148  *  1) If max_discard_segments > 1, the driver treats every bio as a range and
149  *     send the bios to controller together. The ranges don't need to be
150  *     contiguous.
151  *  2) Otherwise, the request will be normal read/write requests.  The ranges
152  *     need to be contiguous.
153  */
154 static inline bool blk_discard_mergable(struct request *req)
155 {
156 	if (req_op(req) == REQ_OP_DISCARD &&
157 	    queue_max_discard_segments(req->q) > 1)
158 		return true;
159 	return false;
160 }
161 
162 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
163 						     int op)
164 {
165 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
166 		return min(q->limits.max_discard_sectors,
167 			   UINT_MAX >> SECTOR_SHIFT);
168 
169 	if (unlikely(op == REQ_OP_WRITE_ZEROES))
170 		return q->limits.max_write_zeroes_sectors;
171 
172 	return q->limits.max_sectors;
173 }
174 
175 #ifdef CONFIG_BLK_DEV_INTEGRITY
176 void blk_flush_integrity(void);
177 bool __bio_integrity_endio(struct bio *);
178 void bio_integrity_free(struct bio *bio);
179 static inline bool bio_integrity_endio(struct bio *bio)
180 {
181 	if (bio_integrity(bio))
182 		return __bio_integrity_endio(bio);
183 	return true;
184 }
185 
186 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
187 		struct request *);
188 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
189 		struct bio *);
190 
191 static inline bool integrity_req_gap_back_merge(struct request *req,
192 		struct bio *next)
193 {
194 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
195 	struct bio_integrity_payload *bip_next = bio_integrity(next);
196 
197 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
198 				bip_next->bip_vec[0].bv_offset);
199 }
200 
201 static inline bool integrity_req_gap_front_merge(struct request *req,
202 		struct bio *bio)
203 {
204 	struct bio_integrity_payload *bip = bio_integrity(bio);
205 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
206 
207 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
208 				bip_next->bip_vec[0].bv_offset);
209 }
210 
211 int blk_integrity_add(struct gendisk *disk);
212 void blk_integrity_del(struct gendisk *);
213 #else /* CONFIG_BLK_DEV_INTEGRITY */
214 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
215 		struct request *r1, struct request *r2)
216 {
217 	return true;
218 }
219 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
220 		struct request *r, struct bio *b)
221 {
222 	return true;
223 }
224 static inline bool integrity_req_gap_back_merge(struct request *req,
225 		struct bio *next)
226 {
227 	return false;
228 }
229 static inline bool integrity_req_gap_front_merge(struct request *req,
230 		struct bio *bio)
231 {
232 	return false;
233 }
234 
235 static inline void blk_flush_integrity(void)
236 {
237 }
238 static inline bool bio_integrity_endio(struct bio *bio)
239 {
240 	return true;
241 }
242 static inline void bio_integrity_free(struct bio *bio)
243 {
244 }
245 static inline int blk_integrity_add(struct gendisk *disk)
246 {
247 	return 0;
248 }
249 static inline void blk_integrity_del(struct gendisk *disk)
250 {
251 }
252 #endif /* CONFIG_BLK_DEV_INTEGRITY */
253 
254 unsigned long blk_rq_timeout(unsigned long timeout);
255 void blk_add_timer(struct request *req);
256 const char *blk_status_to_str(blk_status_t status);
257 
258 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
259 		unsigned int nr_segs);
260 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
261 			struct bio *bio, unsigned int nr_segs);
262 
263 /*
264  * Plug flush limits
265  */
266 #define BLK_MAX_REQUEST_COUNT	32
267 #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
268 
269 /*
270  * Internal elevator interface
271  */
272 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
273 
274 void blk_insert_flush(struct request *rq);
275 
276 int elevator_switch_mq(struct request_queue *q,
277 			      struct elevator_type *new_e);
278 void elevator_exit(struct request_queue *q);
279 int elv_register_queue(struct request_queue *q, bool uevent);
280 void elv_unregister_queue(struct request_queue *q);
281 
282 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
283 		char *buf);
284 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
285 		char *buf);
286 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
287 		char *buf);
288 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
289 		char *buf);
290 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
291 		const char *buf, size_t count);
292 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
293 ssize_t part_timeout_store(struct device *, struct device_attribute *,
294 				const char *, size_t);
295 
296 static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
297 {
298 	switch (bio_op(bio)) {
299 	case REQ_OP_DISCARD:
300 	case REQ_OP_SECURE_ERASE:
301 	case REQ_OP_WRITE_ZEROES:
302 		return true; /* non-trivial splitting decisions */
303 	default:
304 		break;
305 	}
306 
307 	/*
308 	 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
309 	 * This is a quick and dirty check that relies on the fact that
310 	 * bi_io_vec[0] is always valid if a bio has data.  The check might
311 	 * lead to occasional false negatives when bios are cloned, but compared
312 	 * to the performance impact of cloned bios themselves the loop below
313 	 * doesn't matter anyway.
314 	 */
315 	return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
316 		bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
317 }
318 
319 void __blk_queue_split(struct request_queue *q, struct bio **bio,
320 			unsigned int *nr_segs);
321 int ll_back_merge_fn(struct request *req, struct bio *bio,
322 		unsigned int nr_segs);
323 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
324 				struct request *next);
325 unsigned int blk_recalc_rq_segments(struct request *rq);
326 void blk_rq_set_mixed_merge(struct request *rq);
327 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
328 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
329 
330 int blk_dev_init(void);
331 
332 /*
333  * Contribute to IO statistics IFF:
334  *
335  *	a) it's attached to a gendisk, and
336  *	b) the queue had IO stats enabled when this request was started
337  */
338 static inline bool blk_do_io_stat(struct request *rq)
339 {
340 	return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
341 }
342 
343 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
344 
345 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
346 {
347 	req->cmd_flags |= REQ_NOMERGE;
348 	if (req == q->last_merge)
349 		q->last_merge = NULL;
350 }
351 
352 /*
353  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
354  * is defined as 'unsigned int', meantime it has to aligned to with logical
355  * block size which is the minimum accepted unit by hardware.
356  */
357 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
358 {
359 	return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
360 }
361 
362 /*
363  * Internal io_context interface
364  */
365 struct io_cq *ioc_find_get_icq(struct request_queue *q);
366 struct io_cq *ioc_lookup_icq(struct request_queue *q);
367 #ifdef CONFIG_BLK_ICQ
368 void ioc_clear_queue(struct request_queue *q);
369 #else
370 static inline void ioc_clear_queue(struct request_queue *q)
371 {
372 }
373 #endif /* CONFIG_BLK_ICQ */
374 
375 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
376 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
377 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
378 	const char *page, size_t count);
379 extern void blk_throtl_bio_endio(struct bio *bio);
380 extern void blk_throtl_stat_add(struct request *rq, u64 time);
381 #else
382 static inline void blk_throtl_bio_endio(struct bio *bio) { }
383 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
384 #endif
385 
386 void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
387 
388 static inline bool blk_queue_may_bounce(struct request_queue *q)
389 {
390 	return IS_ENABLED(CONFIG_BOUNCE) &&
391 		q->limits.bounce == BLK_BOUNCE_HIGH &&
392 		max_low_pfn >= max_pfn;
393 }
394 
395 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
396 {
397 	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
398 		__blk_queue_bounce(q, bio);
399 }
400 
401 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
402 extern int blk_iolatency_init(struct request_queue *q);
403 #else
404 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
405 #endif
406 
407 #ifdef CONFIG_BLK_DEV_ZONED
408 void blk_queue_free_zone_bitmaps(struct request_queue *q);
409 void blk_queue_clear_zone_settings(struct request_queue *q);
410 #else
411 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
412 static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
413 #endif
414 
415 int blk_alloc_ext_minor(void);
416 void blk_free_ext_minor(unsigned int minor);
417 #define ADDPART_FLAG_NONE	0
418 #define ADDPART_FLAG_RAID	1
419 #define ADDPART_FLAG_WHOLEDISK	2
420 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
421 		sector_t length);
422 int bdev_del_partition(struct gendisk *disk, int partno);
423 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
424 		sector_t length);
425 void blk_drop_partitions(struct gendisk *disk);
426 
427 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
428 		struct lock_class_key *lkclass);
429 
430 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
431 		struct page *page, unsigned int len, unsigned int offset,
432 		unsigned int max_sectors, bool *same_page);
433 
434 static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
435 {
436 	if (srcu)
437 		return blk_requestq_srcu_cachep;
438 	return blk_requestq_cachep;
439 }
440 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
441 
442 int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
443 
444 int disk_alloc_events(struct gendisk *disk);
445 void disk_add_events(struct gendisk *disk);
446 void disk_del_events(struct gendisk *disk);
447 void disk_release_events(struct gendisk *disk);
448 void disk_block_events(struct gendisk *disk);
449 void disk_unblock_events(struct gendisk *disk);
450 void disk_flush_events(struct gendisk *disk, unsigned int mask);
451 extern struct device_attribute dev_attr_events;
452 extern struct device_attribute dev_attr_events_async;
453 extern struct device_attribute dev_attr_events_poll_msecs;
454 
455 extern struct attribute_group blk_trace_attr_group;
456 
457 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
458 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
459 
460 extern const struct address_space_operations def_blk_aops;
461 
462 int disk_register_independent_access_ranges(struct gendisk *disk);
463 void disk_unregister_independent_access_ranges(struct gendisk *disk);
464 
465 #ifdef CONFIG_FAIL_MAKE_REQUEST
466 bool should_fail_request(struct block_device *part, unsigned int bytes);
467 #else /* CONFIG_FAIL_MAKE_REQUEST */
468 static inline bool should_fail_request(struct block_device *part,
469 					unsigned int bytes)
470 {
471 	return false;
472 }
473 #endif /* CONFIG_FAIL_MAKE_REQUEST */
474 
475 /*
476  * Optimized request reference counting. Ideally we'd make timeouts be more
477  * clever, as that's the only reason we need references at all... But until
478  * this happens, this is faster than using refcount_t. Also see:
479  *
480  * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
481  */
482 #define req_ref_zero_or_close_to_overflow(req)	\
483 	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
484 
485 static inline bool req_ref_inc_not_zero(struct request *req)
486 {
487 	return atomic_inc_not_zero(&req->ref);
488 }
489 
490 static inline bool req_ref_put_and_test(struct request *req)
491 {
492 	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
493 	return atomic_dec_and_test(&req->ref);
494 }
495 
496 static inline void req_ref_set(struct request *req, int value)
497 {
498 	atomic_set(&req->ref, value);
499 }
500 
501 static inline int req_ref_read(struct request *req)
502 {
503 	return atomic_read(&req->ref);
504 }
505 
506 #endif /* BLK_INTERNAL_H */
507