1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_INTERNAL_H 3 #define BLK_INTERNAL_H 4 5 #include <linux/idr.h> 6 #include <linux/blk-mq.h> 7 #include <xen/xen.h> 8 #include "blk-mq.h" 9 #include "blk-mq-sched.h" 10 11 /* Max future timer expiry for timeouts */ 12 #define BLK_MAX_TIMEOUT (5 * HZ) 13 14 #ifdef CONFIG_DEBUG_FS 15 extern struct dentry *blk_debugfs_root; 16 #endif 17 18 struct blk_flush_queue { 19 unsigned int flush_queue_delayed:1; 20 unsigned int flush_pending_idx:1; 21 unsigned int flush_running_idx:1; 22 blk_status_t rq_status; 23 unsigned long flush_pending_since; 24 struct list_head flush_queue[2]; 25 struct list_head flush_data_in_flight; 26 struct request *flush_rq; 27 28 /* 29 * flush_rq shares tag with this rq, both can't be active 30 * at the same time 31 */ 32 struct request *orig_rq; 33 spinlock_t mq_flush_lock; 34 }; 35 36 extern struct kmem_cache *blk_requestq_cachep; 37 extern struct kobj_type blk_queue_ktype; 38 extern struct ida blk_queue_ida; 39 40 static inline struct blk_flush_queue * 41 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) 42 { 43 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; 44 } 45 46 static inline void __blk_get_queue(struct request_queue *q) 47 { 48 kobject_get(&q->kobj); 49 } 50 51 static inline bool 52 is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx) 53 { 54 return hctx->fq->flush_rq == req; 55 } 56 57 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 58 int node, int cmd_size, gfp_t flags); 59 void blk_free_flush_queue(struct blk_flush_queue *q); 60 61 void blk_freeze_queue(struct request_queue *q); 62 63 static inline void blk_queue_enter_live(struct request_queue *q) 64 { 65 /* 66 * Given that running in generic_make_request() context 67 * guarantees that a live reference against q_usage_counter has 68 * been established, further references under that same context 69 * need not check that the queue has been frozen (marked dead). 70 */ 71 percpu_ref_get(&q->q_usage_counter); 72 } 73 74 static inline bool biovec_phys_mergeable(struct request_queue *q, 75 struct bio_vec *vec1, struct bio_vec *vec2) 76 { 77 unsigned long mask = queue_segment_boundary(q); 78 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; 79 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; 80 81 if (addr1 + vec1->bv_len != addr2) 82 return false; 83 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) 84 return false; 85 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) 86 return false; 87 return true; 88 } 89 90 static inline bool __bvec_gap_to_prev(struct request_queue *q, 91 struct bio_vec *bprv, unsigned int offset) 92 { 93 return (offset & queue_virt_boundary(q)) || 94 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 95 } 96 97 /* 98 * Check if adding a bio_vec after bprv with offset would create a gap in 99 * the SG list. Most drivers don't care about this, but some do. 100 */ 101 static inline bool bvec_gap_to_prev(struct request_queue *q, 102 struct bio_vec *bprv, unsigned int offset) 103 { 104 if (!queue_virt_boundary(q)) 105 return false; 106 return __bvec_gap_to_prev(q, bprv, offset); 107 } 108 109 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, 110 unsigned int nr_segs) 111 { 112 rq->nr_phys_segments = nr_segs; 113 rq->__data_len = bio->bi_iter.bi_size; 114 rq->bio = rq->biotail = bio; 115 rq->ioprio = bio_prio(bio); 116 117 if (bio->bi_disk) 118 rq->rq_disk = bio->bi_disk; 119 } 120 121 #ifdef CONFIG_BLK_DEV_INTEGRITY 122 void blk_flush_integrity(void); 123 bool __bio_integrity_endio(struct bio *); 124 static inline bool bio_integrity_endio(struct bio *bio) 125 { 126 if (bio_integrity(bio)) 127 return __bio_integrity_endio(bio); 128 return true; 129 } 130 131 static inline bool integrity_req_gap_back_merge(struct request *req, 132 struct bio *next) 133 { 134 struct bio_integrity_payload *bip = bio_integrity(req->bio); 135 struct bio_integrity_payload *bip_next = bio_integrity(next); 136 137 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 138 bip_next->bip_vec[0].bv_offset); 139 } 140 141 static inline bool integrity_req_gap_front_merge(struct request *req, 142 struct bio *bio) 143 { 144 struct bio_integrity_payload *bip = bio_integrity(bio); 145 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 146 147 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 148 bip_next->bip_vec[0].bv_offset); 149 } 150 #else /* CONFIG_BLK_DEV_INTEGRITY */ 151 static inline bool integrity_req_gap_back_merge(struct request *req, 152 struct bio *next) 153 { 154 return false; 155 } 156 static inline bool integrity_req_gap_front_merge(struct request *req, 157 struct bio *bio) 158 { 159 return false; 160 } 161 162 static inline void blk_flush_integrity(void) 163 { 164 } 165 static inline bool bio_integrity_endio(struct bio *bio) 166 { 167 return true; 168 } 169 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 170 171 unsigned long blk_rq_timeout(unsigned long timeout); 172 void blk_add_timer(struct request *req); 173 174 bool bio_attempt_front_merge(struct request *req, struct bio *bio, 175 unsigned int nr_segs); 176 bool bio_attempt_back_merge(struct request *req, struct bio *bio, 177 unsigned int nr_segs); 178 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 179 struct bio *bio); 180 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 181 unsigned int nr_segs, struct request **same_queue_rq); 182 183 void blk_account_io_start(struct request *req, bool new_io); 184 void blk_account_io_completion(struct request *req, unsigned int bytes); 185 void blk_account_io_done(struct request *req, u64 now); 186 187 /* 188 * Internal elevator interface 189 */ 190 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 191 192 void blk_insert_flush(struct request *rq); 193 194 void elevator_init_mq(struct request_queue *q); 195 int elevator_switch_mq(struct request_queue *q, 196 struct elevator_type *new_e); 197 void __elevator_exit(struct request_queue *, struct elevator_queue *); 198 int elv_register_queue(struct request_queue *q, bool uevent); 199 void elv_unregister_queue(struct request_queue *q); 200 201 static inline void elevator_exit(struct request_queue *q, 202 struct elevator_queue *e) 203 { 204 lockdep_assert_held(&q->sysfs_lock); 205 206 blk_mq_sched_free_requests(q); 207 __elevator_exit(q, e); 208 } 209 210 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); 211 212 #ifdef CONFIG_FAIL_IO_TIMEOUT 213 int blk_should_fake_timeout(struct request_queue *); 214 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 215 ssize_t part_timeout_store(struct device *, struct device_attribute *, 216 const char *, size_t); 217 #else 218 static inline int blk_should_fake_timeout(struct request_queue *q) 219 { 220 return 0; 221 } 222 #endif 223 224 void __blk_queue_split(struct request_queue *q, struct bio **bio, 225 unsigned int *nr_segs); 226 int ll_back_merge_fn(struct request *req, struct bio *bio, 227 unsigned int nr_segs); 228 int ll_front_merge_fn(struct request *req, struct bio *bio, 229 unsigned int nr_segs); 230 struct request *attempt_back_merge(struct request_queue *q, struct request *rq); 231 struct request *attempt_front_merge(struct request_queue *q, struct request *rq); 232 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 233 struct request *next); 234 unsigned int blk_recalc_rq_segments(struct request *rq); 235 void blk_rq_set_mixed_merge(struct request *rq); 236 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 237 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 238 239 int blk_dev_init(void); 240 241 /* 242 * Contribute to IO statistics IFF: 243 * 244 * a) it's attached to a gendisk, and 245 * b) the queue had IO stats enabled when this request was started 246 */ 247 static inline bool blk_do_io_stat(struct request *rq) 248 { 249 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT); 250 } 251 252 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 253 { 254 req->cmd_flags |= REQ_NOMERGE; 255 if (req == q->last_merge) 256 q->last_merge = NULL; 257 } 258 259 /* 260 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size 261 * is defined as 'unsigned int', meantime it has to aligned to with logical 262 * block size which is the minimum accepted unit by hardware. 263 */ 264 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) 265 { 266 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; 267 } 268 269 /* 270 * Internal io_context interface 271 */ 272 void get_io_context(struct io_context *ioc); 273 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 274 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 275 gfp_t gfp_mask); 276 void ioc_clear_queue(struct request_queue *q); 277 278 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 279 280 /** 281 * create_io_context - try to create task->io_context 282 * @gfp_mask: allocation mask 283 * @node: allocation node 284 * 285 * If %current->io_context is %NULL, allocate a new io_context and install 286 * it. Returns the current %current->io_context which may be %NULL if 287 * allocation failed. 288 * 289 * Note that this function can't be called with IRQ disabled because 290 * task_lock which protects %current->io_context is IRQ-unsafe. 291 */ 292 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) 293 { 294 WARN_ON_ONCE(irqs_disabled()); 295 if (unlikely(!current->io_context)) 296 create_task_io_context(current, gfp_mask, node); 297 return current->io_context; 298 } 299 300 /* 301 * Internal throttling interface 302 */ 303 #ifdef CONFIG_BLK_DEV_THROTTLING 304 extern void blk_throtl_drain(struct request_queue *q); 305 extern int blk_throtl_init(struct request_queue *q); 306 extern void blk_throtl_exit(struct request_queue *q); 307 extern void blk_throtl_register_queue(struct request_queue *q); 308 #else /* CONFIG_BLK_DEV_THROTTLING */ 309 static inline void blk_throtl_drain(struct request_queue *q) { } 310 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 311 static inline void blk_throtl_exit(struct request_queue *q) { } 312 static inline void blk_throtl_register_queue(struct request_queue *q) { } 313 #endif /* CONFIG_BLK_DEV_THROTTLING */ 314 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 315 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 316 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, 317 const char *page, size_t count); 318 extern void blk_throtl_bio_endio(struct bio *bio); 319 extern void blk_throtl_stat_add(struct request *rq, u64 time); 320 #else 321 static inline void blk_throtl_bio_endio(struct bio *bio) { } 322 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } 323 #endif 324 325 #ifdef CONFIG_BOUNCE 326 extern int init_emergency_isa_pool(void); 327 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 328 #else 329 static inline int init_emergency_isa_pool(void) 330 { 331 return 0; 332 } 333 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 334 { 335 } 336 #endif /* CONFIG_BOUNCE */ 337 338 #ifdef CONFIG_BLK_CGROUP_IOLATENCY 339 extern int blk_iolatency_init(struct request_queue *q); 340 #else 341 static inline int blk_iolatency_init(struct request_queue *q) { return 0; } 342 #endif 343 344 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); 345 346 #ifdef CONFIG_BLK_DEV_ZONED 347 void blk_queue_free_zone_bitmaps(struct request_queue *q); 348 #else 349 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} 350 #endif 351 352 #endif /* BLK_INTERNAL_H */ 353