1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_INTERNAL_H 3 #define BLK_INTERNAL_H 4 5 #include <linux/blk-crypto.h> 6 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ 7 #include <xen/xen.h> 8 #include "blk-crypto-internal.h" 9 10 struct elevator_type; 11 12 /* Max future timer expiry for timeouts */ 13 #define BLK_MAX_TIMEOUT (5 * HZ) 14 15 extern struct dentry *blk_debugfs_root; 16 17 struct blk_flush_queue { 18 unsigned int flush_pending_idx:1; 19 unsigned int flush_running_idx:1; 20 blk_status_t rq_status; 21 unsigned long flush_pending_since; 22 struct list_head flush_queue[2]; 23 struct list_head flush_data_in_flight; 24 struct request *flush_rq; 25 26 spinlock_t mq_flush_lock; 27 }; 28 29 extern struct kmem_cache *blk_requestq_cachep; 30 extern struct kmem_cache *blk_requestq_srcu_cachep; 31 extern struct kobj_type blk_queue_ktype; 32 extern struct ida blk_queue_ida; 33 34 static inline void __blk_get_queue(struct request_queue *q) 35 { 36 kobject_get(&q->kobj); 37 } 38 39 bool is_flush_rq(struct request *req); 40 41 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, 42 gfp_t flags); 43 void blk_free_flush_queue(struct blk_flush_queue *q); 44 45 void blk_freeze_queue(struct request_queue *q); 46 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); 47 void blk_queue_start_drain(struct request_queue *q); 48 int __bio_queue_enter(struct request_queue *q, struct bio *bio); 49 bool submit_bio_checks(struct bio *bio); 50 51 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) 52 { 53 rcu_read_lock(); 54 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) 55 goto fail; 56 57 /* 58 * The code that increments the pm_only counter must ensure that the 59 * counter is globally visible before the queue is unfrozen. 60 */ 61 if (blk_queue_pm_only(q) && 62 (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) 63 goto fail_put; 64 65 rcu_read_unlock(); 66 return true; 67 68 fail_put: 69 blk_queue_exit(q); 70 fail: 71 rcu_read_unlock(); 72 return false; 73 } 74 75 static inline int bio_queue_enter(struct bio *bio) 76 { 77 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 78 79 if (blk_try_enter_queue(q, false)) 80 return 0; 81 return __bio_queue_enter(q, bio); 82 } 83 84 #define BIO_INLINE_VECS 4 85 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, 86 gfp_t gfp_mask); 87 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs); 88 89 static inline bool biovec_phys_mergeable(struct request_queue *q, 90 struct bio_vec *vec1, struct bio_vec *vec2) 91 { 92 unsigned long mask = queue_segment_boundary(q); 93 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; 94 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; 95 96 if (addr1 + vec1->bv_len != addr2) 97 return false; 98 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) 99 return false; 100 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) 101 return false; 102 return true; 103 } 104 105 static inline bool __bvec_gap_to_prev(struct request_queue *q, 106 struct bio_vec *bprv, unsigned int offset) 107 { 108 return (offset & queue_virt_boundary(q)) || 109 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 110 } 111 112 /* 113 * Check if adding a bio_vec after bprv with offset would create a gap in 114 * the SG list. Most drivers don't care about this, but some do. 115 */ 116 static inline bool bvec_gap_to_prev(struct request_queue *q, 117 struct bio_vec *bprv, unsigned int offset) 118 { 119 if (!queue_virt_boundary(q)) 120 return false; 121 return __bvec_gap_to_prev(q, bprv, offset); 122 } 123 124 static inline bool rq_mergeable(struct request *rq) 125 { 126 if (blk_rq_is_passthrough(rq)) 127 return false; 128 129 if (req_op(rq) == REQ_OP_FLUSH) 130 return false; 131 132 if (req_op(rq) == REQ_OP_WRITE_ZEROES) 133 return false; 134 135 if (req_op(rq) == REQ_OP_ZONE_APPEND) 136 return false; 137 138 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 139 return false; 140 if (rq->rq_flags & RQF_NOMERGE_FLAGS) 141 return false; 142 143 return true; 144 } 145 146 /* 147 * There are two different ways to handle DISCARD merges: 148 * 1) If max_discard_segments > 1, the driver treats every bio as a range and 149 * send the bios to controller together. The ranges don't need to be 150 * contiguous. 151 * 2) Otherwise, the request will be normal read/write requests. The ranges 152 * need to be contiguous. 153 */ 154 static inline bool blk_discard_mergable(struct request *req) 155 { 156 if (req_op(req) == REQ_OP_DISCARD && 157 queue_max_discard_segments(req->q) > 1) 158 return true; 159 return false; 160 } 161 162 #ifdef CONFIG_BLK_DEV_INTEGRITY 163 void blk_flush_integrity(void); 164 bool __bio_integrity_endio(struct bio *); 165 void bio_integrity_free(struct bio *bio); 166 static inline bool bio_integrity_endio(struct bio *bio) 167 { 168 if (bio_integrity(bio)) 169 return __bio_integrity_endio(bio); 170 return true; 171 } 172 173 bool blk_integrity_merge_rq(struct request_queue *, struct request *, 174 struct request *); 175 bool blk_integrity_merge_bio(struct request_queue *, struct request *, 176 struct bio *); 177 178 static inline bool integrity_req_gap_back_merge(struct request *req, 179 struct bio *next) 180 { 181 struct bio_integrity_payload *bip = bio_integrity(req->bio); 182 struct bio_integrity_payload *bip_next = bio_integrity(next); 183 184 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 185 bip_next->bip_vec[0].bv_offset); 186 } 187 188 static inline bool integrity_req_gap_front_merge(struct request *req, 189 struct bio *bio) 190 { 191 struct bio_integrity_payload *bip = bio_integrity(bio); 192 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 193 194 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 195 bip_next->bip_vec[0].bv_offset); 196 } 197 198 int blk_integrity_add(struct gendisk *disk); 199 void blk_integrity_del(struct gendisk *); 200 #else /* CONFIG_BLK_DEV_INTEGRITY */ 201 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 202 struct request *r1, struct request *r2) 203 { 204 return true; 205 } 206 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 207 struct request *r, struct bio *b) 208 { 209 return true; 210 } 211 static inline bool integrity_req_gap_back_merge(struct request *req, 212 struct bio *next) 213 { 214 return false; 215 } 216 static inline bool integrity_req_gap_front_merge(struct request *req, 217 struct bio *bio) 218 { 219 return false; 220 } 221 222 static inline void blk_flush_integrity(void) 223 { 224 } 225 static inline bool bio_integrity_endio(struct bio *bio) 226 { 227 return true; 228 } 229 static inline void bio_integrity_free(struct bio *bio) 230 { 231 } 232 static inline int blk_integrity_add(struct gendisk *disk) 233 { 234 return 0; 235 } 236 static inline void blk_integrity_del(struct gendisk *disk) 237 { 238 } 239 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 240 241 unsigned long blk_rq_timeout(unsigned long timeout); 242 void blk_add_timer(struct request *req); 243 const char *blk_status_to_str(blk_status_t status); 244 245 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 246 unsigned int nr_segs); 247 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, 248 struct bio *bio, unsigned int nr_segs); 249 250 /* 251 * Plug flush limits 252 */ 253 #define BLK_MAX_REQUEST_COUNT 32 254 #define BLK_PLUG_FLUSH_SIZE (128 * 1024) 255 256 /* 257 * Internal elevator interface 258 */ 259 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 260 261 void blk_insert_flush(struct request *rq); 262 263 int elevator_switch_mq(struct request_queue *q, 264 struct elevator_type *new_e); 265 void elevator_exit(struct request_queue *q); 266 int elv_register_queue(struct request_queue *q, bool uevent); 267 void elv_unregister_queue(struct request_queue *q); 268 269 ssize_t part_size_show(struct device *dev, struct device_attribute *attr, 270 char *buf); 271 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, 272 char *buf); 273 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, 274 char *buf); 275 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, 276 char *buf); 277 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, 278 const char *buf, size_t count); 279 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 280 ssize_t part_timeout_store(struct device *, struct device_attribute *, 281 const char *, size_t); 282 283 static inline bool blk_may_split(struct request_queue *q, struct bio *bio) 284 { 285 switch (bio_op(bio)) { 286 case REQ_OP_DISCARD: 287 case REQ_OP_SECURE_ERASE: 288 case REQ_OP_WRITE_ZEROES: 289 case REQ_OP_WRITE_SAME: 290 return true; /* non-trivial splitting decisions */ 291 default: 292 break; 293 } 294 295 /* 296 * All drivers must accept single-segments bios that are <= PAGE_SIZE. 297 * This is a quick and dirty check that relies on the fact that 298 * bi_io_vec[0] is always valid if a bio has data. The check might 299 * lead to occasional false negatives when bios are cloned, but compared 300 * to the performance impact of cloned bios themselves the loop below 301 * doesn't matter anyway. 302 */ 303 return q->limits.chunk_sectors || bio->bi_vcnt != 1 || 304 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; 305 } 306 307 void __blk_queue_split(struct request_queue *q, struct bio **bio, 308 unsigned int *nr_segs); 309 int ll_back_merge_fn(struct request *req, struct bio *bio, 310 unsigned int nr_segs); 311 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, 312 struct request *next); 313 unsigned int blk_recalc_rq_segments(struct request *rq); 314 void blk_rq_set_mixed_merge(struct request *rq); 315 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 316 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 317 318 int blk_dev_init(void); 319 320 /* 321 * Contribute to IO statistics IFF: 322 * 323 * a) it's attached to a gendisk, and 324 * b) the queue had IO stats enabled when this request was started 325 */ 326 static inline bool blk_do_io_stat(struct request *rq) 327 { 328 return (rq->rq_flags & RQF_IO_STAT) && rq->q->disk; 329 } 330 331 void update_io_ticks(struct block_device *part, unsigned long now, bool end); 332 333 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 334 { 335 req->cmd_flags |= REQ_NOMERGE; 336 if (req == q->last_merge) 337 q->last_merge = NULL; 338 } 339 340 /* 341 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size 342 * is defined as 'unsigned int', meantime it has to aligned to with logical 343 * block size which is the minimum accepted unit by hardware. 344 */ 345 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) 346 { 347 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; 348 } 349 350 /* 351 * The max bio size which is aligned to q->limits.discard_granularity. This 352 * is a hint to split large discard bio in generic block layer, then if device 353 * driver needs to split the discard bio into smaller ones, their bi_size can 354 * be very probably and easily aligned to discard_granularity of the device's 355 * queue. 356 */ 357 static inline unsigned int bio_aligned_discard_max_sectors( 358 struct request_queue *q) 359 { 360 return round_down(UINT_MAX, q->limits.discard_granularity) >> 361 SECTOR_SHIFT; 362 } 363 364 /* 365 * Internal io_context interface 366 */ 367 struct io_cq *ioc_find_get_icq(struct request_queue *q); 368 struct io_cq *ioc_lookup_icq(struct request_queue *q); 369 #ifdef CONFIG_BLK_ICQ 370 void ioc_clear_queue(struct request_queue *q); 371 #else 372 static inline void ioc_clear_queue(struct request_queue *q) 373 { 374 } 375 #endif /* CONFIG_BLK_ICQ */ 376 377 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 378 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 379 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, 380 const char *page, size_t count); 381 extern void blk_throtl_bio_endio(struct bio *bio); 382 extern void blk_throtl_stat_add(struct request *rq, u64 time); 383 #else 384 static inline void blk_throtl_bio_endio(struct bio *bio) { } 385 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } 386 #endif 387 388 void __blk_queue_bounce(struct request_queue *q, struct bio **bio); 389 390 static inline bool blk_queue_may_bounce(struct request_queue *q) 391 { 392 return IS_ENABLED(CONFIG_BOUNCE) && 393 q->limits.bounce == BLK_BOUNCE_HIGH && 394 max_low_pfn >= max_pfn; 395 } 396 397 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 398 { 399 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio))) 400 __blk_queue_bounce(q, bio); 401 } 402 403 #ifdef CONFIG_BLK_CGROUP_IOLATENCY 404 extern int blk_iolatency_init(struct request_queue *q); 405 #else 406 static inline int blk_iolatency_init(struct request_queue *q) { return 0; } 407 #endif 408 409 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); 410 411 #ifdef CONFIG_BLK_DEV_ZONED 412 void blk_queue_free_zone_bitmaps(struct request_queue *q); 413 void blk_queue_clear_zone_settings(struct request_queue *q); 414 #else 415 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} 416 static inline void blk_queue_clear_zone_settings(struct request_queue *q) {} 417 #endif 418 419 int blk_alloc_ext_minor(void); 420 void blk_free_ext_minor(unsigned int minor); 421 #define ADDPART_FLAG_NONE 0 422 #define ADDPART_FLAG_RAID 1 423 #define ADDPART_FLAG_WHOLEDISK 2 424 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, 425 sector_t length); 426 int bdev_del_partition(struct gendisk *disk, int partno); 427 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, 428 sector_t length); 429 430 int bio_add_hw_page(struct request_queue *q, struct bio *bio, 431 struct page *page, unsigned int len, unsigned int offset, 432 unsigned int max_sectors, bool *same_page); 433 434 static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu) 435 { 436 if (srcu) 437 return blk_requestq_srcu_cachep; 438 return blk_requestq_cachep; 439 } 440 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu); 441 442 int disk_scan_partitions(struct gendisk *disk, fmode_t mode); 443 444 int disk_alloc_events(struct gendisk *disk); 445 void disk_add_events(struct gendisk *disk); 446 void disk_del_events(struct gendisk *disk); 447 void disk_release_events(struct gendisk *disk); 448 extern struct device_attribute dev_attr_events; 449 extern struct device_attribute dev_attr_events_async; 450 extern struct device_attribute dev_attr_events_poll_msecs; 451 452 static inline void bio_clear_polled(struct bio *bio) 453 { 454 /* can't support alloc cache if we turn off polling */ 455 bio_clear_flag(bio, BIO_PERCPU_CACHE); 456 bio->bi_opf &= ~REQ_POLLED; 457 } 458 459 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); 460 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); 461 462 extern const struct address_space_operations def_blk_aops; 463 464 int disk_register_independent_access_ranges(struct gendisk *disk, 465 struct blk_independent_access_ranges *new_iars); 466 void disk_unregister_independent_access_ranges(struct gendisk *disk); 467 468 #ifdef CONFIG_FAIL_MAKE_REQUEST 469 bool should_fail_request(struct block_device *part, unsigned int bytes); 470 #else /* CONFIG_FAIL_MAKE_REQUEST */ 471 static inline bool should_fail_request(struct block_device *part, 472 unsigned int bytes) 473 { 474 return false; 475 } 476 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 477 478 /* 479 * Optimized request reference counting. Ideally we'd make timeouts be more 480 * clever, as that's the only reason we need references at all... But until 481 * this happens, this is faster than using refcount_t. Also see: 482 * 483 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count") 484 */ 485 #define req_ref_zero_or_close_to_overflow(req) \ 486 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u) 487 488 static inline bool req_ref_inc_not_zero(struct request *req) 489 { 490 return atomic_inc_not_zero(&req->ref); 491 } 492 493 static inline bool req_ref_put_and_test(struct request *req) 494 { 495 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); 496 return atomic_dec_and_test(&req->ref); 497 } 498 499 static inline void req_ref_set(struct request *req, int value) 500 { 501 atomic_set(&req->ref, value); 502 } 503 504 static inline int req_ref_read(struct request *req) 505 { 506 return atomic_read(&req->ref); 507 } 508 509 #endif /* BLK_INTERNAL_H */ 510