1 /* 2 * 2.5 block I/O model 3 * 4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public Licens 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 19 */ 20 #ifndef __LINUX_BIO_H 21 #define __LINUX_BIO_H 22 23 #include <linux/highmem.h> 24 #include <linux/mempool.h> 25 #include <linux/ioprio.h> 26 #include <linux/bug.h> 27 28 #ifdef CONFIG_BLOCK 29 30 #include <asm/io.h> 31 32 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 33 #include <linux/blk_types.h> 34 35 #define BIO_DEBUG 36 37 #ifdef BIO_DEBUG 38 #define BIO_BUG_ON BUG_ON 39 #else 40 #define BIO_BUG_ON 41 #endif 42 43 #define BIO_MAX_PAGES 256 44 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 45 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 46 47 /* 48 * upper 16 bits of bi_rw define the io priority of this bio 49 */ 50 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) 51 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) 52 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) 53 54 #define bio_set_prio(bio, prio) do { \ 55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \ 56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ 57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ 58 } while (0) 59 60 /* 61 * various member access, note that bio_data should of course not be used 62 * on highmem page vectors 63 */ 64 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) 65 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) 66 #define bio_page(bio) bio_iovec((bio))->bv_page 67 #define bio_offset(bio) bio_iovec((bio))->bv_offset 68 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 69 #define bio_sectors(bio) ((bio)->bi_size >> 9) 70 71 static inline unsigned int bio_cur_bytes(struct bio *bio) 72 { 73 if (bio->bi_vcnt) 74 return bio_iovec(bio)->bv_len; 75 else /* dataless requests such as discard */ 76 return bio->bi_size; 77 } 78 79 static inline void *bio_data(struct bio *bio) 80 { 81 if (bio->bi_vcnt) 82 return page_address(bio_page(bio)) + bio_offset(bio); 83 84 return NULL; 85 } 86 87 static inline int bio_has_allocated_vec(struct bio *bio) 88 { 89 return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs; 90 } 91 92 /* 93 * will die 94 */ 95 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) 96 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) 97 98 /* 99 * queues that have highmem support enabled may still need to revert to 100 * PIO transfers occasionally and thus map high pages temporarily. For 101 * permanent PIO fall back, user is probably better off disabling highmem 102 * I/O completely on that queue (see ide-dma for example) 103 */ 104 #define __bio_kmap_atomic(bio, idx, kmtype) \ 105 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \ 106 bio_iovec_idx((bio), (idx))->bv_offset) 107 108 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr) 109 110 /* 111 * merge helpers etc 112 */ 113 114 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) 115 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) 116 117 /* Default implementation of BIOVEC_PHYS_MERGEABLE */ 118 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 119 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 120 121 /* 122 * allow arch override, for eg virtualized architectures (put in asm/io.h) 123 */ 124 #ifndef BIOVEC_PHYS_MERGEABLE 125 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 126 __BIOVEC_PHYS_MERGEABLE(vec1, vec2) 127 #endif 128 129 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 130 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 131 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 132 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 133 #define BIO_SEG_BOUNDARY(q, b1, b2) \ 134 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) 135 136 #define bio_io_error(bio) bio_endio((bio), -EIO) 137 138 /* 139 * drivers should not use the __ version unless they _really_ want to 140 * run through the entire bio and not just pending pieces 141 */ 142 #define __bio_for_each_segment(bvl, bio, i, start_idx) \ 143 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ 144 i < (bio)->bi_vcnt; \ 145 bvl++, i++) 146 147 #define bio_for_each_segment(bvl, bio, i) \ 148 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx) 149 150 /* 151 * get a reference to a bio, so it won't disappear. the intended use is 152 * something like: 153 * 154 * bio_get(bio); 155 * submit_bio(rw, bio); 156 * if (bio->bi_flags ...) 157 * do_something 158 * bio_put(bio); 159 * 160 * without the bio_get(), it could potentially complete I/O before submit_bio 161 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 162 * runs 163 */ 164 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) 165 166 #if defined(CONFIG_BLK_DEV_INTEGRITY) 167 /* 168 * bio integrity payload 169 */ 170 struct bio_integrity_payload { 171 struct bio *bip_bio; /* parent bio */ 172 173 sector_t bip_sector; /* virtual start sector */ 174 175 void *bip_buf; /* generated integrity data */ 176 bio_end_io_t *bip_end_io; /* saved I/O completion fn */ 177 178 unsigned int bip_size; 179 180 unsigned short bip_slab; /* slab the bip came from */ 181 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 182 unsigned short bip_idx; /* current bip_vec index */ 183 184 struct work_struct bip_work; /* I/O completion */ 185 struct bio_vec bip_vec[0]; /* embedded bvec array */ 186 }; 187 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 188 189 /* 190 * A bio_pair is used when we need to split a bio. 191 * This can only happen for a bio that refers to just one 192 * page of data, and in the unusual situation when the 193 * page crosses a chunk/device boundary 194 * 195 * The address of the master bio is stored in bio1.bi_private 196 * The address of the pool the pair was allocated from is stored 197 * in bio2.bi_private 198 */ 199 struct bio_pair { 200 struct bio bio1, bio2; 201 struct bio_vec bv1, bv2; 202 #if defined(CONFIG_BLK_DEV_INTEGRITY) 203 struct bio_integrity_payload bip1, bip2; 204 struct bio_vec iv1, iv2; 205 #endif 206 atomic_t cnt; 207 int error; 208 }; 209 extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); 210 extern void bio_pair_release(struct bio_pair *dbio); 211 212 extern struct bio_set *bioset_create(unsigned int, unsigned int); 213 extern void bioset_free(struct bio_set *); 214 215 extern struct bio *bio_alloc(gfp_t, unsigned int); 216 extern struct bio *bio_kmalloc(gfp_t, unsigned int); 217 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 218 extern void bio_put(struct bio *); 219 extern void bio_free(struct bio *, struct bio_set *); 220 221 extern void bio_endio(struct bio *, int); 222 struct request_queue; 223 extern int bio_phys_segments(struct request_queue *, struct bio *); 224 225 extern void __bio_clone(struct bio *, struct bio *); 226 extern struct bio *bio_clone(struct bio *, gfp_t); 227 228 extern void bio_init(struct bio *); 229 230 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 231 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 232 unsigned int, unsigned int); 233 extern int bio_get_nr_vecs(struct block_device *); 234 extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int); 235 extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 236 unsigned long, unsigned int, int, gfp_t); 237 struct sg_iovec; 238 struct rq_map_data; 239 extern struct bio *bio_map_user_iov(struct request_queue *, 240 struct block_device *, 241 struct sg_iovec *, int, int, gfp_t); 242 extern void bio_unmap_user(struct bio *); 243 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 244 gfp_t); 245 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, 246 gfp_t, int); 247 extern void bio_set_pages_dirty(struct bio *bio); 248 extern void bio_check_pages_dirty(struct bio *bio); 249 250 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 251 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 252 #endif 253 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 254 extern void bio_flush_dcache_pages(struct bio *bi); 255 #else 256 static inline void bio_flush_dcache_pages(struct bio *bi) 257 { 258 } 259 #endif 260 261 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, 262 unsigned long, unsigned int, int, gfp_t); 263 extern struct bio *bio_copy_user_iov(struct request_queue *, 264 struct rq_map_data *, struct sg_iovec *, 265 int, int, gfp_t); 266 extern int bio_uncopy_user(struct bio *); 267 void zero_fill_bio(struct bio *bio); 268 extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); 269 extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); 270 extern unsigned int bvec_nr_vecs(unsigned short idx); 271 272 #ifdef CONFIG_BLK_CGROUP 273 int bio_associate_current(struct bio *bio); 274 void bio_disassociate_task(struct bio *bio); 275 #else /* CONFIG_BLK_CGROUP */ 276 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } 277 static inline void bio_disassociate_task(struct bio *bio) { } 278 #endif /* CONFIG_BLK_CGROUP */ 279 280 /* 281 * bio_set is used to allow other portions of the IO system to 282 * allocate their own private memory pools for bio and iovec structures. 283 * These memory pools in turn all allocate from the bio_slab 284 * and the bvec_slabs[]. 285 */ 286 #define BIO_POOL_SIZE 2 287 #define BIOVEC_NR_POOLS 6 288 #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) 289 290 struct bio_set { 291 struct kmem_cache *bio_slab; 292 unsigned int front_pad; 293 294 mempool_t *bio_pool; 295 #if defined(CONFIG_BLK_DEV_INTEGRITY) 296 mempool_t *bio_integrity_pool; 297 #endif 298 mempool_t *bvec_pool; 299 }; 300 301 struct biovec_slab { 302 int nr_vecs; 303 char *name; 304 struct kmem_cache *slab; 305 }; 306 307 extern struct bio_set *fs_bio_set; 308 309 /* 310 * a small number of entries is fine, not going to be performance critical. 311 * basically we just need to survive 312 */ 313 #define BIO_SPLIT_ENTRIES 2 314 315 #ifdef CONFIG_HIGHMEM 316 /* 317 * remember never ever reenable interrupts between a bvec_kmap_irq and 318 * bvec_kunmap_irq! 319 */ 320 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 321 { 322 unsigned long addr; 323 324 /* 325 * might not be a highmem page, but the preempt/irq count 326 * balancing is a lot nicer this way 327 */ 328 local_irq_save(*flags); 329 addr = (unsigned long) kmap_atomic(bvec->bv_page); 330 331 BUG_ON(addr & ~PAGE_MASK); 332 333 return (char *) addr + bvec->bv_offset; 334 } 335 336 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 337 { 338 unsigned long ptr = (unsigned long) buffer & PAGE_MASK; 339 340 kunmap_atomic((void *) ptr); 341 local_irq_restore(*flags); 342 } 343 344 #else 345 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 346 { 347 return page_address(bvec->bv_page) + bvec->bv_offset; 348 } 349 350 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 351 { 352 *flags = 0; 353 } 354 #endif 355 356 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, 357 unsigned long *flags) 358 { 359 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); 360 } 361 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 362 363 #define bio_kmap_irq(bio, flags) \ 364 __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 365 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 366 367 /* 368 * Check whether this bio carries any data or not. A NULL bio is allowed. 369 */ 370 static inline int bio_has_data(struct bio *bio) 371 { 372 return bio && bio->bi_io_vec != NULL; 373 } 374 375 /* 376 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 377 * 378 * A bio_list anchors a singly-linked list of bios chained through the bi_next 379 * member of the bio. The bio_list also caches the last list member to allow 380 * fast access to the tail. 381 */ 382 struct bio_list { 383 struct bio *head; 384 struct bio *tail; 385 }; 386 387 static inline int bio_list_empty(const struct bio_list *bl) 388 { 389 return bl->head == NULL; 390 } 391 392 static inline void bio_list_init(struct bio_list *bl) 393 { 394 bl->head = bl->tail = NULL; 395 } 396 397 #define bio_list_for_each(bio, bl) \ 398 for (bio = (bl)->head; bio; bio = bio->bi_next) 399 400 static inline unsigned bio_list_size(const struct bio_list *bl) 401 { 402 unsigned sz = 0; 403 struct bio *bio; 404 405 bio_list_for_each(bio, bl) 406 sz++; 407 408 return sz; 409 } 410 411 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 412 { 413 bio->bi_next = NULL; 414 415 if (bl->tail) 416 bl->tail->bi_next = bio; 417 else 418 bl->head = bio; 419 420 bl->tail = bio; 421 } 422 423 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 424 { 425 bio->bi_next = bl->head; 426 427 bl->head = bio; 428 429 if (!bl->tail) 430 bl->tail = bio; 431 } 432 433 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 434 { 435 if (!bl2->head) 436 return; 437 438 if (bl->tail) 439 bl->tail->bi_next = bl2->head; 440 else 441 bl->head = bl2->head; 442 443 bl->tail = bl2->tail; 444 } 445 446 static inline void bio_list_merge_head(struct bio_list *bl, 447 struct bio_list *bl2) 448 { 449 if (!bl2->head) 450 return; 451 452 if (bl->head) 453 bl2->tail->bi_next = bl->head; 454 else 455 bl->tail = bl2->tail; 456 457 bl->head = bl2->head; 458 } 459 460 static inline struct bio *bio_list_peek(struct bio_list *bl) 461 { 462 return bl->head; 463 } 464 465 static inline struct bio *bio_list_pop(struct bio_list *bl) 466 { 467 struct bio *bio = bl->head; 468 469 if (bio) { 470 bl->head = bl->head->bi_next; 471 if (!bl->head) 472 bl->tail = NULL; 473 474 bio->bi_next = NULL; 475 } 476 477 return bio; 478 } 479 480 static inline struct bio *bio_list_get(struct bio_list *bl) 481 { 482 struct bio *bio = bl->head; 483 484 bl->head = bl->tail = NULL; 485 486 return bio; 487 } 488 489 #if defined(CONFIG_BLK_DEV_INTEGRITY) 490 491 #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) 492 #define bip_vec(bip) bip_vec_idx(bip, 0) 493 494 #define __bip_for_each_vec(bvl, bip, i, start_idx) \ 495 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \ 496 i < (bip)->bip_vcnt; \ 497 bvl++, i++) 498 499 #define bip_for_each_vec(bvl, bip, i) \ 500 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 501 502 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 503 for_each_bio(_bio) \ 504 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 505 506 #define bio_integrity(bio) (bio->bi_integrity != NULL) 507 508 extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); 509 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 510 extern void bio_integrity_free(struct bio *, struct bio_set *); 511 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 512 extern int bio_integrity_enabled(struct bio *bio); 513 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); 514 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); 515 extern int bio_integrity_prep(struct bio *); 516 extern void bio_integrity_endio(struct bio *, int); 517 extern void bio_integrity_advance(struct bio *, unsigned int); 518 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 519 extern void bio_integrity_split(struct bio *, struct bio_pair *, int); 520 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *); 521 extern int bioset_integrity_create(struct bio_set *, int); 522 extern void bioset_integrity_free(struct bio_set *); 523 extern void bio_integrity_init(void); 524 525 #else /* CONFIG_BLK_DEV_INTEGRITY */ 526 527 static inline int bio_integrity(struct bio *bio) 528 { 529 return 0; 530 } 531 532 static inline int bio_integrity_enabled(struct bio *bio) 533 { 534 return 0; 535 } 536 537 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 538 { 539 return 0; 540 } 541 542 static inline void bioset_integrity_free (struct bio_set *bs) 543 { 544 return; 545 } 546 547 static inline int bio_integrity_prep(struct bio *bio) 548 { 549 return 0; 550 } 551 552 static inline void bio_integrity_free(struct bio *bio, struct bio_set *bs) 553 { 554 return; 555 } 556 557 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 558 gfp_t gfp_mask, struct bio_set *bs) 559 { 560 return 0; 561 } 562 563 static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp, 564 int sectors) 565 { 566 return; 567 } 568 569 static inline void bio_integrity_advance(struct bio *bio, 570 unsigned int bytes_done) 571 { 572 return; 573 } 574 575 static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, 576 unsigned int sectors) 577 { 578 return; 579 } 580 581 static inline void bio_integrity_init(void) 582 { 583 return; 584 } 585 586 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 587 588 #endif /* CONFIG_BLOCK */ 589 #endif /* __LINUX_BIO_H */ 590