1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_INTERNAL_H 3 #define BLK_INTERNAL_H 4 5 #include <linux/idr.h> 6 #include <linux/blk-mq.h> 7 #include <linux/part_stat.h> 8 #include <linux/blk-crypto.h> 9 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ 10 #include <xen/xen.h> 11 #include "blk-crypto-internal.h" 12 #include "blk-mq.h" 13 #include "blk-mq-sched.h" 14 15 /* Max future timer expiry for timeouts */ 16 #define BLK_MAX_TIMEOUT (5 * HZ) 17 18 extern struct dentry *blk_debugfs_root; 19 20 struct blk_flush_queue { 21 unsigned int flush_pending_idx:1; 22 unsigned int flush_running_idx:1; 23 blk_status_t rq_status; 24 unsigned long flush_pending_since; 25 struct list_head flush_queue[2]; 26 struct list_head flush_data_in_flight; 27 struct request *flush_rq; 28 29 spinlock_t mq_flush_lock; 30 }; 31 32 extern struct kmem_cache *blk_requestq_cachep; 33 extern struct kobj_type blk_queue_ktype; 34 extern struct ida blk_queue_ida; 35 36 static inline struct blk_flush_queue * 37 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) 38 { 39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; 40 } 41 42 static inline void __blk_get_queue(struct request_queue *q) 43 { 44 kobject_get(&q->kobj); 45 } 46 47 bool is_flush_rq(struct request *req); 48 49 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, 50 gfp_t flags); 51 void blk_free_flush_queue(struct blk_flush_queue *q); 52 53 void blk_freeze_queue(struct request_queue *q); 54 55 #define BIO_INLINE_VECS 4 56 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, 57 gfp_t gfp_mask); 58 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs); 59 60 static inline bool biovec_phys_mergeable(struct request_queue *q, 61 struct bio_vec *vec1, struct bio_vec *vec2) 62 { 63 unsigned long mask = queue_segment_boundary(q); 64 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; 65 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; 66 67 if (addr1 + vec1->bv_len != addr2) 68 return false; 69 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) 70 return false; 71 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) 72 return false; 73 return true; 74 } 75 76 static inline bool __bvec_gap_to_prev(struct request_queue *q, 77 struct bio_vec *bprv, unsigned int offset) 78 { 79 return (offset & queue_virt_boundary(q)) || 80 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 81 } 82 83 /* 84 * Check if adding a bio_vec after bprv with offset would create a gap in 85 * the SG list. Most drivers don't care about this, but some do. 86 */ 87 static inline bool bvec_gap_to_prev(struct request_queue *q, 88 struct bio_vec *bprv, unsigned int offset) 89 { 90 if (!queue_virt_boundary(q)) 91 return false; 92 return __bvec_gap_to_prev(q, bprv, offset); 93 } 94 95 #ifdef CONFIG_BLK_DEV_INTEGRITY 96 void blk_flush_integrity(void); 97 bool __bio_integrity_endio(struct bio *); 98 void bio_integrity_free(struct bio *bio); 99 static inline bool bio_integrity_endio(struct bio *bio) 100 { 101 if (bio_integrity(bio)) 102 return __bio_integrity_endio(bio); 103 return true; 104 } 105 106 bool blk_integrity_merge_rq(struct request_queue *, struct request *, 107 struct request *); 108 bool blk_integrity_merge_bio(struct request_queue *, struct request *, 109 struct bio *); 110 111 static inline bool integrity_req_gap_back_merge(struct request *req, 112 struct bio *next) 113 { 114 struct bio_integrity_payload *bip = bio_integrity(req->bio); 115 struct bio_integrity_payload *bip_next = bio_integrity(next); 116 117 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 118 bip_next->bip_vec[0].bv_offset); 119 } 120 121 static inline bool integrity_req_gap_front_merge(struct request *req, 122 struct bio *bio) 123 { 124 struct bio_integrity_payload *bip = bio_integrity(bio); 125 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 126 127 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 128 bip_next->bip_vec[0].bv_offset); 129 } 130 131 int blk_integrity_add(struct gendisk *disk); 132 void blk_integrity_del(struct gendisk *); 133 #else /* CONFIG_BLK_DEV_INTEGRITY */ 134 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 135 struct request *r1, struct request *r2) 136 { 137 return true; 138 } 139 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 140 struct request *r, struct bio *b) 141 { 142 return true; 143 } 144 static inline bool integrity_req_gap_back_merge(struct request *req, 145 struct bio *next) 146 { 147 return false; 148 } 149 static inline bool integrity_req_gap_front_merge(struct request *req, 150 struct bio *bio) 151 { 152 return false; 153 } 154 155 static inline void blk_flush_integrity(void) 156 { 157 } 158 static inline bool bio_integrity_endio(struct bio *bio) 159 { 160 return true; 161 } 162 static inline void bio_integrity_free(struct bio *bio) 163 { 164 } 165 static inline int blk_integrity_add(struct gendisk *disk) 166 { 167 return 0; 168 } 169 static inline void blk_integrity_del(struct gendisk *disk) 170 { 171 } 172 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 173 174 unsigned long blk_rq_timeout(unsigned long timeout); 175 void blk_add_timer(struct request *req); 176 177 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 178 unsigned int nr_segs, struct request **same_queue_rq); 179 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, 180 struct bio *bio, unsigned int nr_segs); 181 182 void blk_account_io_start(struct request *req); 183 void blk_account_io_done(struct request *req, u64 now); 184 185 /* 186 * Internal elevator interface 187 */ 188 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 189 190 void blk_insert_flush(struct request *rq); 191 192 int elevator_switch_mq(struct request_queue *q, 193 struct elevator_type *new_e); 194 void __elevator_exit(struct request_queue *, struct elevator_queue *); 195 int elv_register_queue(struct request_queue *q, bool uevent); 196 void elv_unregister_queue(struct request_queue *q); 197 198 static inline void elevator_exit(struct request_queue *q, 199 struct elevator_queue *e) 200 { 201 lockdep_assert_held(&q->sysfs_lock); 202 203 blk_mq_sched_free_requests(q); 204 __elevator_exit(q, e); 205 } 206 207 ssize_t part_size_show(struct device *dev, struct device_attribute *attr, 208 char *buf); 209 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, 210 char *buf); 211 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, 212 char *buf); 213 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, 214 char *buf); 215 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, 216 const char *buf, size_t count); 217 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 218 ssize_t part_timeout_store(struct device *, struct device_attribute *, 219 const char *, size_t); 220 221 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs); 222 int ll_back_merge_fn(struct request *req, struct bio *bio, 223 unsigned int nr_segs); 224 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, 225 struct request *next); 226 unsigned int blk_recalc_rq_segments(struct request *rq); 227 void blk_rq_set_mixed_merge(struct request *rq); 228 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 229 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 230 231 int blk_dev_init(void); 232 233 /* 234 * Contribute to IO statistics IFF: 235 * 236 * a) it's attached to a gendisk, and 237 * b) the queue had IO stats enabled when this request was started 238 */ 239 static inline bool blk_do_io_stat(struct request *rq) 240 { 241 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT); 242 } 243 244 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 245 { 246 req->cmd_flags |= REQ_NOMERGE; 247 if (req == q->last_merge) 248 q->last_merge = NULL; 249 } 250 251 /* 252 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size 253 * is defined as 'unsigned int', meantime it has to aligned to with logical 254 * block size which is the minimum accepted unit by hardware. 255 */ 256 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) 257 { 258 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; 259 } 260 261 /* 262 * The max bio size which is aligned to q->limits.discard_granularity. This 263 * is a hint to split large discard bio in generic block layer, then if device 264 * driver needs to split the discard bio into smaller ones, their bi_size can 265 * be very probably and easily aligned to discard_granularity of the device's 266 * queue. 267 */ 268 static inline unsigned int bio_aligned_discard_max_sectors( 269 struct request_queue *q) 270 { 271 return round_down(UINT_MAX, q->limits.discard_granularity) >> 272 SECTOR_SHIFT; 273 } 274 275 /* 276 * Internal io_context interface 277 */ 278 void get_io_context(struct io_context *ioc); 279 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 280 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 281 gfp_t gfp_mask); 282 void ioc_clear_queue(struct request_queue *q); 283 284 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 285 286 /* 287 * Internal throttling interface 288 */ 289 #ifdef CONFIG_BLK_DEV_THROTTLING 290 extern int blk_throtl_init(struct request_queue *q); 291 extern void blk_throtl_exit(struct request_queue *q); 292 extern void blk_throtl_register_queue(struct request_queue *q); 293 extern void blk_throtl_charge_bio_split(struct bio *bio); 294 bool blk_throtl_bio(struct bio *bio); 295 #else /* CONFIG_BLK_DEV_THROTTLING */ 296 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 297 static inline void blk_throtl_exit(struct request_queue *q) { } 298 static inline void blk_throtl_register_queue(struct request_queue *q) { } 299 static inline void blk_throtl_charge_bio_split(struct bio *bio) { } 300 static inline bool blk_throtl_bio(struct bio *bio) { return false; } 301 #endif /* CONFIG_BLK_DEV_THROTTLING */ 302 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 303 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 304 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, 305 const char *page, size_t count); 306 extern void blk_throtl_bio_endio(struct bio *bio); 307 extern void blk_throtl_stat_add(struct request *rq, u64 time); 308 #else 309 static inline void blk_throtl_bio_endio(struct bio *bio) { } 310 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } 311 #endif 312 313 void __blk_queue_bounce(struct request_queue *q, struct bio **bio); 314 315 static inline bool blk_queue_may_bounce(struct request_queue *q) 316 { 317 return IS_ENABLED(CONFIG_BOUNCE) && 318 q->limits.bounce == BLK_BOUNCE_HIGH && 319 max_low_pfn >= max_pfn; 320 } 321 322 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 323 { 324 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio))) 325 __blk_queue_bounce(q, bio); 326 } 327 328 #ifdef CONFIG_BLK_CGROUP_IOLATENCY 329 extern int blk_iolatency_init(struct request_queue *q); 330 #else 331 static inline int blk_iolatency_init(struct request_queue *q) { return 0; } 332 #endif 333 334 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); 335 336 #ifdef CONFIG_BLK_DEV_ZONED 337 void blk_queue_free_zone_bitmaps(struct request_queue *q); 338 void blk_queue_clear_zone_settings(struct request_queue *q); 339 #else 340 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} 341 static inline void blk_queue_clear_zone_settings(struct request_queue *q) {} 342 #endif 343 344 int blk_alloc_ext_minor(void); 345 void blk_free_ext_minor(unsigned int minor); 346 #define ADDPART_FLAG_NONE 0 347 #define ADDPART_FLAG_RAID 1 348 #define ADDPART_FLAG_WHOLEDISK 2 349 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, 350 sector_t length); 351 int bdev_del_partition(struct gendisk *disk, int partno); 352 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, 353 sector_t length); 354 355 int bio_add_hw_page(struct request_queue *q, struct bio *bio, 356 struct page *page, unsigned int len, unsigned int offset, 357 unsigned int max_sectors, bool *same_page); 358 359 struct request_queue *blk_alloc_queue(int node_id); 360 361 int disk_alloc_events(struct gendisk *disk); 362 void disk_add_events(struct gendisk *disk); 363 void disk_del_events(struct gendisk *disk); 364 void disk_release_events(struct gendisk *disk); 365 extern struct device_attribute dev_attr_events; 366 extern struct device_attribute dev_attr_events_async; 367 extern struct device_attribute dev_attr_events_poll_msecs; 368 369 static inline void bio_clear_hipri(struct bio *bio) 370 { 371 /* can't support alloc cache if we turn off polling */ 372 bio_clear_flag(bio, BIO_PERCPU_CACHE); 373 bio->bi_opf &= ~REQ_HIPRI; 374 } 375 376 extern const struct address_space_operations def_blk_aops; 377 378 #endif /* BLK_INTERNAL_H */ 379