xref: /openbmc/linux/block/blk.h (revision 83daab06252ee5d0e1f4373ff28b79304945fc19)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef BLK_INTERNAL_H
3  #define BLK_INTERNAL_H
4  
5  #include <linux/blk-crypto.h>
6  #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
7  #include <xen/xen.h>
8  #include "blk-crypto-internal.h"
9  
10  struct elevator_type;
11  
12  /* Max future timer expiry for timeouts */
13  #define BLK_MAX_TIMEOUT		(5 * HZ)
14  
15  extern struct dentry *blk_debugfs_root;
16  
17  struct blk_flush_queue {
18  	unsigned int		flush_pending_idx:1;
19  	unsigned int		flush_running_idx:1;
20  	blk_status_t 		rq_status;
21  	unsigned long		flush_pending_since;
22  	struct list_head	flush_queue[2];
23  	struct list_head	flush_data_in_flight;
24  	struct request		*flush_rq;
25  
26  	spinlock_t		mq_flush_lock;
27  };
28  
29  extern struct kmem_cache *blk_requestq_cachep;
30  extern struct kmem_cache *blk_requestq_srcu_cachep;
31  extern struct kobj_type blk_queue_ktype;
32  extern struct ida blk_queue_ida;
33  
34  static inline void __blk_get_queue(struct request_queue *q)
35  {
36  	kobject_get(&q->kobj);
37  }
38  
39  bool is_flush_rq(struct request *req);
40  
41  struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
42  					      gfp_t flags);
43  void blk_free_flush_queue(struct blk_flush_queue *q);
44  
45  void blk_freeze_queue(struct request_queue *q);
46  void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
47  void blk_queue_start_drain(struct request_queue *q);
48  int __bio_queue_enter(struct request_queue *q, struct bio *bio);
49  void submit_bio_noacct_nocheck(struct bio *bio);
50  
51  static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
52  {
53  	rcu_read_lock();
54  	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
55  		goto fail;
56  
57  	/*
58  	 * The code that increments the pm_only counter must ensure that the
59  	 * counter is globally visible before the queue is unfrozen.
60  	 */
61  	if (blk_queue_pm_only(q) &&
62  	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
63  		goto fail_put;
64  
65  	rcu_read_unlock();
66  	return true;
67  
68  fail_put:
69  	blk_queue_exit(q);
70  fail:
71  	rcu_read_unlock();
72  	return false;
73  }
74  
75  static inline int bio_queue_enter(struct bio *bio)
76  {
77  	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
78  
79  	if (blk_try_enter_queue(q, false))
80  		return 0;
81  	return __bio_queue_enter(q, bio);
82  }
83  
84  #define BIO_INLINE_VECS 4
85  struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
86  		gfp_t gfp_mask);
87  void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
88  
89  static inline bool biovec_phys_mergeable(struct request_queue *q,
90  		struct bio_vec *vec1, struct bio_vec *vec2)
91  {
92  	unsigned long mask = queue_segment_boundary(q);
93  	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
94  	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
95  
96  	if (addr1 + vec1->bv_len != addr2)
97  		return false;
98  	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
99  		return false;
100  	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
101  		return false;
102  	return true;
103  }
104  
105  static inline bool __bvec_gap_to_prev(struct request_queue *q,
106  		struct bio_vec *bprv, unsigned int offset)
107  {
108  	return (offset & queue_virt_boundary(q)) ||
109  		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
110  }
111  
112  /*
113   * Check if adding a bio_vec after bprv with offset would create a gap in
114   * the SG list. Most drivers don't care about this, but some do.
115   */
116  static inline bool bvec_gap_to_prev(struct request_queue *q,
117  		struct bio_vec *bprv, unsigned int offset)
118  {
119  	if (!queue_virt_boundary(q))
120  		return false;
121  	return __bvec_gap_to_prev(q, bprv, offset);
122  }
123  
124  static inline bool rq_mergeable(struct request *rq)
125  {
126  	if (blk_rq_is_passthrough(rq))
127  		return false;
128  
129  	if (req_op(rq) == REQ_OP_FLUSH)
130  		return false;
131  
132  	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
133  		return false;
134  
135  	if (req_op(rq) == REQ_OP_ZONE_APPEND)
136  		return false;
137  
138  	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
139  		return false;
140  	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
141  		return false;
142  
143  	return true;
144  }
145  
146  /*
147   * There are two different ways to handle DISCARD merges:
148   *  1) If max_discard_segments > 1, the driver treats every bio as a range and
149   *     send the bios to controller together. The ranges don't need to be
150   *     contiguous.
151   *  2) Otherwise, the request will be normal read/write requests.  The ranges
152   *     need to be contiguous.
153   */
154  static inline bool blk_discard_mergable(struct request *req)
155  {
156  	if (req_op(req) == REQ_OP_DISCARD &&
157  	    queue_max_discard_segments(req->q) > 1)
158  		return true;
159  	return false;
160  }
161  
162  #ifdef CONFIG_BLK_DEV_INTEGRITY
163  void blk_flush_integrity(void);
164  bool __bio_integrity_endio(struct bio *);
165  void bio_integrity_free(struct bio *bio);
166  static inline bool bio_integrity_endio(struct bio *bio)
167  {
168  	if (bio_integrity(bio))
169  		return __bio_integrity_endio(bio);
170  	return true;
171  }
172  
173  bool blk_integrity_merge_rq(struct request_queue *, struct request *,
174  		struct request *);
175  bool blk_integrity_merge_bio(struct request_queue *, struct request *,
176  		struct bio *);
177  
178  static inline bool integrity_req_gap_back_merge(struct request *req,
179  		struct bio *next)
180  {
181  	struct bio_integrity_payload *bip = bio_integrity(req->bio);
182  	struct bio_integrity_payload *bip_next = bio_integrity(next);
183  
184  	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
185  				bip_next->bip_vec[0].bv_offset);
186  }
187  
188  static inline bool integrity_req_gap_front_merge(struct request *req,
189  		struct bio *bio)
190  {
191  	struct bio_integrity_payload *bip = bio_integrity(bio);
192  	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
193  
194  	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
195  				bip_next->bip_vec[0].bv_offset);
196  }
197  
198  int blk_integrity_add(struct gendisk *disk);
199  void blk_integrity_del(struct gendisk *);
200  #else /* CONFIG_BLK_DEV_INTEGRITY */
201  static inline bool blk_integrity_merge_rq(struct request_queue *rq,
202  		struct request *r1, struct request *r2)
203  {
204  	return true;
205  }
206  static inline bool blk_integrity_merge_bio(struct request_queue *rq,
207  		struct request *r, struct bio *b)
208  {
209  	return true;
210  }
211  static inline bool integrity_req_gap_back_merge(struct request *req,
212  		struct bio *next)
213  {
214  	return false;
215  }
216  static inline bool integrity_req_gap_front_merge(struct request *req,
217  		struct bio *bio)
218  {
219  	return false;
220  }
221  
222  static inline void blk_flush_integrity(void)
223  {
224  }
225  static inline bool bio_integrity_endio(struct bio *bio)
226  {
227  	return true;
228  }
229  static inline void bio_integrity_free(struct bio *bio)
230  {
231  }
232  static inline int blk_integrity_add(struct gendisk *disk)
233  {
234  	return 0;
235  }
236  static inline void blk_integrity_del(struct gendisk *disk)
237  {
238  }
239  #endif /* CONFIG_BLK_DEV_INTEGRITY */
240  
241  unsigned long blk_rq_timeout(unsigned long timeout);
242  void blk_add_timer(struct request *req);
243  const char *blk_status_to_str(blk_status_t status);
244  
245  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
246  		unsigned int nr_segs);
247  bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
248  			struct bio *bio, unsigned int nr_segs);
249  
250  /*
251   * Plug flush limits
252   */
253  #define BLK_MAX_REQUEST_COUNT	32
254  #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
255  
256  /*
257   * Internal elevator interface
258   */
259  #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
260  
261  void blk_insert_flush(struct request *rq);
262  
263  int elevator_switch_mq(struct request_queue *q,
264  			      struct elevator_type *new_e);
265  void elevator_exit(struct request_queue *q);
266  int elv_register_queue(struct request_queue *q, bool uevent);
267  void elv_unregister_queue(struct request_queue *q);
268  
269  ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
270  		char *buf);
271  ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
272  		char *buf);
273  ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
274  		char *buf);
275  ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
276  		char *buf);
277  ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
278  		const char *buf, size_t count);
279  ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
280  ssize_t part_timeout_store(struct device *, struct device_attribute *,
281  				const char *, size_t);
282  
283  static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
284  {
285  	switch (bio_op(bio)) {
286  	case REQ_OP_DISCARD:
287  	case REQ_OP_SECURE_ERASE:
288  	case REQ_OP_WRITE_ZEROES:
289  		return true; /* non-trivial splitting decisions */
290  	default:
291  		break;
292  	}
293  
294  	/*
295  	 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
296  	 * This is a quick and dirty check that relies on the fact that
297  	 * bi_io_vec[0] is always valid if a bio has data.  The check might
298  	 * lead to occasional false negatives when bios are cloned, but compared
299  	 * to the performance impact of cloned bios themselves the loop below
300  	 * doesn't matter anyway.
301  	 */
302  	return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
303  		bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
304  }
305  
306  void __blk_queue_split(struct request_queue *q, struct bio **bio,
307  			unsigned int *nr_segs);
308  int ll_back_merge_fn(struct request *req, struct bio *bio,
309  		unsigned int nr_segs);
310  bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
311  				struct request *next);
312  unsigned int blk_recalc_rq_segments(struct request *rq);
313  void blk_rq_set_mixed_merge(struct request *rq);
314  bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
315  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
316  
317  int blk_dev_init(void);
318  
319  /*
320   * Contribute to IO statistics IFF:
321   *
322   *	a) it's attached to a gendisk, and
323   *	b) the queue had IO stats enabled when this request was started
324   */
325  static inline bool blk_do_io_stat(struct request *rq)
326  {
327  	return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
328  }
329  
330  void update_io_ticks(struct block_device *part, unsigned long now, bool end);
331  
332  static inline void req_set_nomerge(struct request_queue *q, struct request *req)
333  {
334  	req->cmd_flags |= REQ_NOMERGE;
335  	if (req == q->last_merge)
336  		q->last_merge = NULL;
337  }
338  
339  /*
340   * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
341   * is defined as 'unsigned int', meantime it has to aligned to with logical
342   * block size which is the minimum accepted unit by hardware.
343   */
344  static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
345  {
346  	return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
347  }
348  
349  /*
350   * The max bio size which is aligned to q->limits.discard_granularity. This
351   * is a hint to split large discard bio in generic block layer, then if device
352   * driver needs to split the discard bio into smaller ones, their bi_size can
353   * be very probably and easily aligned to discard_granularity of the device's
354   * queue.
355   */
356  static inline unsigned int bio_aligned_discard_max_sectors(
357  					struct request_queue *q)
358  {
359  	return round_down(UINT_MAX, q->limits.discard_granularity) >>
360  			SECTOR_SHIFT;
361  }
362  
363  /*
364   * Internal io_context interface
365   */
366  struct io_cq *ioc_find_get_icq(struct request_queue *q);
367  struct io_cq *ioc_lookup_icq(struct request_queue *q);
368  #ifdef CONFIG_BLK_ICQ
369  void ioc_clear_queue(struct request_queue *q);
370  #else
371  static inline void ioc_clear_queue(struct request_queue *q)
372  {
373  }
374  #endif /* CONFIG_BLK_ICQ */
375  
376  #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
377  extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
378  extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
379  	const char *page, size_t count);
380  extern void blk_throtl_bio_endio(struct bio *bio);
381  extern void blk_throtl_stat_add(struct request *rq, u64 time);
382  #else
383  static inline void blk_throtl_bio_endio(struct bio *bio) { }
384  static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
385  #endif
386  
387  void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
388  
389  static inline bool blk_queue_may_bounce(struct request_queue *q)
390  {
391  	return IS_ENABLED(CONFIG_BOUNCE) &&
392  		q->limits.bounce == BLK_BOUNCE_HIGH &&
393  		max_low_pfn >= max_pfn;
394  }
395  
396  static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
397  {
398  	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
399  		__blk_queue_bounce(q, bio);
400  }
401  
402  #ifdef CONFIG_BLK_CGROUP_IOLATENCY
403  extern int blk_iolatency_init(struct request_queue *q);
404  #else
405  static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
406  #endif
407  
408  #ifdef CONFIG_BLK_DEV_ZONED
409  void blk_queue_free_zone_bitmaps(struct request_queue *q);
410  void blk_queue_clear_zone_settings(struct request_queue *q);
411  #else
412  static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
413  static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
414  #endif
415  
416  int blk_alloc_ext_minor(void);
417  void blk_free_ext_minor(unsigned int minor);
418  #define ADDPART_FLAG_NONE	0
419  #define ADDPART_FLAG_RAID	1
420  #define ADDPART_FLAG_WHOLEDISK	2
421  int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
422  		sector_t length);
423  int bdev_del_partition(struct gendisk *disk, int partno);
424  int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
425  		sector_t length);
426  void blk_drop_partitions(struct gendisk *disk);
427  
428  int bio_add_hw_page(struct request_queue *q, struct bio *bio,
429  		struct page *page, unsigned int len, unsigned int offset,
430  		unsigned int max_sectors, bool *same_page);
431  
432  static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
433  {
434  	if (srcu)
435  		return blk_requestq_srcu_cachep;
436  	return blk_requestq_cachep;
437  }
438  struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
439  
440  int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
441  
442  int disk_alloc_events(struct gendisk *disk);
443  void disk_add_events(struct gendisk *disk);
444  void disk_del_events(struct gendisk *disk);
445  void disk_release_events(struct gendisk *disk);
446  void disk_block_events(struct gendisk *disk);
447  void disk_unblock_events(struct gendisk *disk);
448  void disk_flush_events(struct gendisk *disk, unsigned int mask);
449  extern struct device_attribute dev_attr_events;
450  extern struct device_attribute dev_attr_events_async;
451  extern struct device_attribute dev_attr_events_poll_msecs;
452  
453  static inline void bio_clear_polled(struct bio *bio)
454  {
455  	/* can't support alloc cache if we turn off polling */
456  	bio_clear_flag(bio, BIO_PERCPU_CACHE);
457  	bio->bi_opf &= ~REQ_POLLED;
458  }
459  
460  long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
461  long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
462  
463  extern const struct address_space_operations def_blk_aops;
464  
465  int disk_register_independent_access_ranges(struct gendisk *disk,
466  				struct blk_independent_access_ranges *new_iars);
467  void disk_unregister_independent_access_ranges(struct gendisk *disk);
468  
469  #ifdef CONFIG_FAIL_MAKE_REQUEST
470  bool should_fail_request(struct block_device *part, unsigned int bytes);
471  #else /* CONFIG_FAIL_MAKE_REQUEST */
472  static inline bool should_fail_request(struct block_device *part,
473  					unsigned int bytes)
474  {
475  	return false;
476  }
477  #endif /* CONFIG_FAIL_MAKE_REQUEST */
478  
479  /*
480   * Optimized request reference counting. Ideally we'd make timeouts be more
481   * clever, as that's the only reason we need references at all... But until
482   * this happens, this is faster than using refcount_t. Also see:
483   *
484   * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
485   */
486  #define req_ref_zero_or_close_to_overflow(req)	\
487  	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
488  
489  static inline bool req_ref_inc_not_zero(struct request *req)
490  {
491  	return atomic_inc_not_zero(&req->ref);
492  }
493  
494  static inline bool req_ref_put_and_test(struct request *req)
495  {
496  	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
497  	return atomic_dec_and_test(&req->ref);
498  }
499  
500  static inline void req_ref_set(struct request *req, int value)
501  {
502  	atomic_set(&req->ref, value);
503  }
504  
505  static inline int req_ref_read(struct request *req)
506  {
507  	return atomic_read(&req->ref);
508  }
509  
510  #endif /* BLK_INTERNAL_H */
511