1 /* 2 * 2.5 block I/O model 3 * 4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public Licens 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 19 */ 20 #ifndef __LINUX_BIO_H 21 #define __LINUX_BIO_H 22 23 #include <linux/highmem.h> 24 #include <linux/mempool.h> 25 #include <linux/ioprio.h> 26 #include <linux/bug.h> 27 28 #ifdef CONFIG_BLOCK 29 30 #include <asm/io.h> 31 32 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 33 #include <linux/blk_types.h> 34 35 #define BIO_DEBUG 36 37 #ifdef BIO_DEBUG 38 #define BIO_BUG_ON BUG_ON 39 #else 40 #define BIO_BUG_ON 41 #endif 42 43 #define BIO_MAX_PAGES 256 44 45 #define bio_prio(bio) (bio)->bi_ioprio 46 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) 47 48 #define bio_iter_iovec(bio, iter) \ 49 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 50 51 #define bio_iter_page(bio, iter) \ 52 bvec_iter_page((bio)->bi_io_vec, (iter)) 53 #define bio_iter_len(bio, iter) \ 54 bvec_iter_len((bio)->bi_io_vec, (iter)) 55 #define bio_iter_offset(bio, iter) \ 56 bvec_iter_offset((bio)->bi_io_vec, (iter)) 57 58 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 59 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 60 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 61 62 #define bio_multiple_segments(bio) \ 63 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) 64 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 65 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 66 67 /* 68 * Check whether this bio carries any data or not. A NULL bio is allowed. 69 */ 70 static inline bool bio_has_data(struct bio *bio) 71 { 72 if (bio && 73 bio->bi_iter.bi_size && 74 bio_op(bio) != REQ_OP_DISCARD && 75 bio_op(bio) != REQ_OP_SECURE_ERASE) 76 return true; 77 78 return false; 79 } 80 81 static inline bool bio_no_advance_iter(struct bio *bio) 82 { 83 return bio_op(bio) == REQ_OP_DISCARD || 84 bio_op(bio) == REQ_OP_SECURE_ERASE || 85 bio_op(bio) == REQ_OP_WRITE_SAME; 86 } 87 88 static inline bool bio_is_rw(struct bio *bio) 89 { 90 if (!bio_has_data(bio)) 91 return false; 92 93 if (bio_no_advance_iter(bio)) 94 return false; 95 96 return true; 97 } 98 99 static inline bool bio_mergeable(struct bio *bio) 100 { 101 if (bio->bi_opf & REQ_NOMERGE_FLAGS) 102 return false; 103 104 return true; 105 } 106 107 static inline unsigned int bio_cur_bytes(struct bio *bio) 108 { 109 if (bio_has_data(bio)) 110 return bio_iovec(bio).bv_len; 111 else /* dataless requests such as discard */ 112 return bio->bi_iter.bi_size; 113 } 114 115 static inline void *bio_data(struct bio *bio) 116 { 117 if (bio_has_data(bio)) 118 return page_address(bio_page(bio)) + bio_offset(bio); 119 120 return NULL; 121 } 122 123 /* 124 * will die 125 */ 126 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) 127 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) 128 129 /* 130 * queues that have highmem support enabled may still need to revert to 131 * PIO transfers occasionally and thus map high pages temporarily. For 132 * permanent PIO fall back, user is probably better off disabling highmem 133 * I/O completely on that queue (see ide-dma for example) 134 */ 135 #define __bio_kmap_atomic(bio, iter) \ 136 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \ 137 bio_iter_iovec((bio), (iter)).bv_offset) 138 139 #define __bio_kunmap_atomic(addr) kunmap_atomic(addr) 140 141 /* 142 * merge helpers etc 143 */ 144 145 /* Default implementation of BIOVEC_PHYS_MERGEABLE */ 146 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 147 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 148 149 /* 150 * allow arch override, for eg virtualized architectures (put in asm/io.h) 151 */ 152 #ifndef BIOVEC_PHYS_MERGEABLE 153 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 154 __BIOVEC_PHYS_MERGEABLE(vec1, vec2) 155 #endif 156 157 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 158 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 159 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 160 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 161 162 /* 163 * drivers should _never_ use the all version - the bio may have been split 164 * before it got to the driver and the driver won't own all of it 165 */ 166 #define bio_for_each_segment_all(bvl, bio, i) \ 167 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) 168 169 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, 170 unsigned bytes) 171 { 172 iter->bi_sector += bytes >> 9; 173 174 if (bio_no_advance_iter(bio)) 175 iter->bi_size -= bytes; 176 else 177 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 178 } 179 180 #define __bio_for_each_segment(bvl, bio, iter, start) \ 181 for (iter = (start); \ 182 (iter).bi_size && \ 183 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 184 bio_advance_iter((bio), &(iter), (bvl).bv_len)) 185 186 #define bio_for_each_segment(bvl, bio, iter) \ 187 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 188 189 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 190 191 static inline unsigned bio_segments(struct bio *bio) 192 { 193 unsigned segs = 0; 194 struct bio_vec bv; 195 struct bvec_iter iter; 196 197 /* 198 * We special case discard/write same, because they interpret bi_size 199 * differently: 200 */ 201 202 if (bio_op(bio) == REQ_OP_DISCARD) 203 return 1; 204 205 if (bio_op(bio) == REQ_OP_SECURE_ERASE) 206 return 1; 207 208 if (bio_op(bio) == REQ_OP_WRITE_SAME) 209 return 1; 210 211 bio_for_each_segment(bv, bio, iter) 212 segs++; 213 214 return segs; 215 } 216 217 /* 218 * get a reference to a bio, so it won't disappear. the intended use is 219 * something like: 220 * 221 * bio_get(bio); 222 * submit_bio(rw, bio); 223 * if (bio->bi_flags ...) 224 * do_something 225 * bio_put(bio); 226 * 227 * without the bio_get(), it could potentially complete I/O before submit_bio 228 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 229 * runs 230 */ 231 static inline void bio_get(struct bio *bio) 232 { 233 bio->bi_flags |= (1 << BIO_REFFED); 234 smp_mb__before_atomic(); 235 atomic_inc(&bio->__bi_cnt); 236 } 237 238 static inline void bio_cnt_set(struct bio *bio, unsigned int count) 239 { 240 if (count != 1) { 241 bio->bi_flags |= (1 << BIO_REFFED); 242 smp_mb__before_atomic(); 243 } 244 atomic_set(&bio->__bi_cnt, count); 245 } 246 247 static inline bool bio_flagged(struct bio *bio, unsigned int bit) 248 { 249 return (bio->bi_flags & (1U << bit)) != 0; 250 } 251 252 static inline void bio_set_flag(struct bio *bio, unsigned int bit) 253 { 254 bio->bi_flags |= (1U << bit); 255 } 256 257 static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 258 { 259 bio->bi_flags &= ~(1U << bit); 260 } 261 262 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) 263 { 264 *bv = bio_iovec(bio); 265 } 266 267 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) 268 { 269 struct bvec_iter iter = bio->bi_iter; 270 int idx; 271 272 if (unlikely(!bio_multiple_segments(bio))) { 273 *bv = bio_iovec(bio); 274 return; 275 } 276 277 bio_advance_iter(bio, &iter, iter.bi_size); 278 279 if (!iter.bi_bvec_done) 280 idx = iter.bi_idx - 1; 281 else /* in the middle of bvec */ 282 idx = iter.bi_idx; 283 284 *bv = bio->bi_io_vec[idx]; 285 286 /* 287 * iter.bi_bvec_done records actual length of the last bvec 288 * if this bio ends in the middle of one io vector 289 */ 290 if (iter.bi_bvec_done) 291 bv->bv_len = iter.bi_bvec_done; 292 } 293 294 enum bip_flags { 295 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 296 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 297 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ 298 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ 299 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ 300 }; 301 302 /* 303 * bio integrity payload 304 */ 305 struct bio_integrity_payload { 306 struct bio *bip_bio; /* parent bio */ 307 308 struct bvec_iter bip_iter; 309 310 bio_end_io_t *bip_end_io; /* saved I/O completion fn */ 311 312 unsigned short bip_slab; /* slab the bip came from */ 313 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 314 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 315 unsigned short bip_flags; /* control flags */ 316 317 struct work_struct bip_work; /* I/O completion */ 318 319 struct bio_vec *bip_vec; 320 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ 321 }; 322 323 #if defined(CONFIG_BLK_DEV_INTEGRITY) 324 325 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) 326 { 327 if (bio->bi_opf & REQ_INTEGRITY) 328 return bio->bi_integrity; 329 330 return NULL; 331 } 332 333 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 334 { 335 struct bio_integrity_payload *bip = bio_integrity(bio); 336 337 if (bip) 338 return bip->bip_flags & flag; 339 340 return false; 341 } 342 343 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) 344 { 345 return bip->bip_iter.bi_sector; 346 } 347 348 static inline void bip_set_seed(struct bio_integrity_payload *bip, 349 sector_t seed) 350 { 351 bip->bip_iter.bi_sector = seed; 352 } 353 354 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 355 356 extern void bio_trim(struct bio *bio, int offset, int size); 357 extern struct bio *bio_split(struct bio *bio, int sectors, 358 gfp_t gfp, struct bio_set *bs); 359 360 /** 361 * bio_next_split - get next @sectors from a bio, splitting if necessary 362 * @bio: bio to split 363 * @sectors: number of sectors to split from the front of @bio 364 * @gfp: gfp mask 365 * @bs: bio set to allocate from 366 * 367 * Returns a bio representing the next @sectors of @bio - if the bio is smaller 368 * than @sectors, returns the original bio unchanged. 369 */ 370 static inline struct bio *bio_next_split(struct bio *bio, int sectors, 371 gfp_t gfp, struct bio_set *bs) 372 { 373 if (sectors >= bio_sectors(bio)) 374 return bio; 375 376 return bio_split(bio, sectors, gfp, bs); 377 } 378 379 extern struct bio_set *bioset_create(unsigned int, unsigned int); 380 extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int); 381 extern void bioset_free(struct bio_set *); 382 extern mempool_t *biovec_create_pool(int pool_entries); 383 384 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 385 extern void bio_put(struct bio *); 386 387 extern void __bio_clone_fast(struct bio *, struct bio *); 388 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); 389 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 390 391 extern struct bio_set *fs_bio_set; 392 393 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 394 { 395 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 396 } 397 398 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) 399 { 400 return bio_clone_bioset(bio, gfp_mask, fs_bio_set); 401 } 402 403 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) 404 { 405 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); 406 } 407 408 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) 409 { 410 return bio_clone_bioset(bio, gfp_mask, NULL); 411 412 } 413 414 extern void bio_endio(struct bio *); 415 416 static inline void bio_io_error(struct bio *bio) 417 { 418 bio->bi_error = -EIO; 419 bio_endio(bio); 420 } 421 422 struct request_queue; 423 extern int bio_phys_segments(struct request_queue *, struct bio *); 424 425 extern int submit_bio_wait(struct bio *bio); 426 extern void bio_advance(struct bio *, unsigned); 427 428 extern void bio_init(struct bio *); 429 extern void bio_reset(struct bio *); 430 void bio_chain(struct bio *, struct bio *); 431 432 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 433 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 434 unsigned int, unsigned int); 435 struct rq_map_data; 436 extern struct bio *bio_map_user_iov(struct request_queue *, 437 const struct iov_iter *, gfp_t); 438 extern void bio_unmap_user(struct bio *); 439 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 440 gfp_t); 441 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, 442 gfp_t, int); 443 extern void bio_set_pages_dirty(struct bio *bio); 444 extern void bio_check_pages_dirty(struct bio *bio); 445 446 void generic_start_io_acct(int rw, unsigned long sectors, 447 struct hd_struct *part); 448 void generic_end_io_acct(int rw, struct hd_struct *part, 449 unsigned long start_time); 450 451 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 452 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 453 #endif 454 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 455 extern void bio_flush_dcache_pages(struct bio *bi); 456 #else 457 static inline void bio_flush_dcache_pages(struct bio *bi) 458 { 459 } 460 #endif 461 462 extern void bio_copy_data(struct bio *dst, struct bio *src); 463 extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); 464 465 extern struct bio *bio_copy_user_iov(struct request_queue *, 466 struct rq_map_data *, 467 const struct iov_iter *, 468 gfp_t); 469 extern int bio_uncopy_user(struct bio *); 470 void zero_fill_bio(struct bio *bio); 471 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); 472 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); 473 extern unsigned int bvec_nr_vecs(unsigned short idx); 474 475 #ifdef CONFIG_BLK_CGROUP 476 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); 477 int bio_associate_current(struct bio *bio); 478 void bio_disassociate_task(struct bio *bio); 479 void bio_clone_blkcg_association(struct bio *dst, struct bio *src); 480 #else /* CONFIG_BLK_CGROUP */ 481 static inline int bio_associate_blkcg(struct bio *bio, 482 struct cgroup_subsys_state *blkcg_css) { return 0; } 483 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } 484 static inline void bio_disassociate_task(struct bio *bio) { } 485 static inline void bio_clone_blkcg_association(struct bio *dst, 486 struct bio *src) { } 487 #endif /* CONFIG_BLK_CGROUP */ 488 489 #ifdef CONFIG_HIGHMEM 490 /* 491 * remember never ever reenable interrupts between a bvec_kmap_irq and 492 * bvec_kunmap_irq! 493 */ 494 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 495 { 496 unsigned long addr; 497 498 /* 499 * might not be a highmem page, but the preempt/irq count 500 * balancing is a lot nicer this way 501 */ 502 local_irq_save(*flags); 503 addr = (unsigned long) kmap_atomic(bvec->bv_page); 504 505 BUG_ON(addr & ~PAGE_MASK); 506 507 return (char *) addr + bvec->bv_offset; 508 } 509 510 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 511 { 512 unsigned long ptr = (unsigned long) buffer & PAGE_MASK; 513 514 kunmap_atomic((void *) ptr); 515 local_irq_restore(*flags); 516 } 517 518 #else 519 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 520 { 521 return page_address(bvec->bv_page) + bvec->bv_offset; 522 } 523 524 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 525 { 526 *flags = 0; 527 } 528 #endif 529 530 static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter, 531 unsigned long *flags) 532 { 533 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags); 534 } 535 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 536 537 #define bio_kmap_irq(bio, flags) \ 538 __bio_kmap_irq((bio), (bio)->bi_iter, (flags)) 539 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 540 541 /* 542 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 543 * 544 * A bio_list anchors a singly-linked list of bios chained through the bi_next 545 * member of the bio. The bio_list also caches the last list member to allow 546 * fast access to the tail. 547 */ 548 struct bio_list { 549 struct bio *head; 550 struct bio *tail; 551 }; 552 553 static inline int bio_list_empty(const struct bio_list *bl) 554 { 555 return bl->head == NULL; 556 } 557 558 static inline void bio_list_init(struct bio_list *bl) 559 { 560 bl->head = bl->tail = NULL; 561 } 562 563 #define BIO_EMPTY_LIST { NULL, NULL } 564 565 #define bio_list_for_each(bio, bl) \ 566 for (bio = (bl)->head; bio; bio = bio->bi_next) 567 568 static inline unsigned bio_list_size(const struct bio_list *bl) 569 { 570 unsigned sz = 0; 571 struct bio *bio; 572 573 bio_list_for_each(bio, bl) 574 sz++; 575 576 return sz; 577 } 578 579 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 580 { 581 bio->bi_next = NULL; 582 583 if (bl->tail) 584 bl->tail->bi_next = bio; 585 else 586 bl->head = bio; 587 588 bl->tail = bio; 589 } 590 591 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 592 { 593 bio->bi_next = bl->head; 594 595 bl->head = bio; 596 597 if (!bl->tail) 598 bl->tail = bio; 599 } 600 601 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 602 { 603 if (!bl2->head) 604 return; 605 606 if (bl->tail) 607 bl->tail->bi_next = bl2->head; 608 else 609 bl->head = bl2->head; 610 611 bl->tail = bl2->tail; 612 } 613 614 static inline void bio_list_merge_head(struct bio_list *bl, 615 struct bio_list *bl2) 616 { 617 if (!bl2->head) 618 return; 619 620 if (bl->head) 621 bl2->tail->bi_next = bl->head; 622 else 623 bl->tail = bl2->tail; 624 625 bl->head = bl2->head; 626 } 627 628 static inline struct bio *bio_list_peek(struct bio_list *bl) 629 { 630 return bl->head; 631 } 632 633 static inline struct bio *bio_list_pop(struct bio_list *bl) 634 { 635 struct bio *bio = bl->head; 636 637 if (bio) { 638 bl->head = bl->head->bi_next; 639 if (!bl->head) 640 bl->tail = NULL; 641 642 bio->bi_next = NULL; 643 } 644 645 return bio; 646 } 647 648 static inline struct bio *bio_list_get(struct bio_list *bl) 649 { 650 struct bio *bio = bl->head; 651 652 bl->head = bl->tail = NULL; 653 654 return bio; 655 } 656 657 /* 658 * Increment chain count for the bio. Make sure the CHAIN flag update 659 * is visible before the raised count. 660 */ 661 static inline void bio_inc_remaining(struct bio *bio) 662 { 663 bio_set_flag(bio, BIO_CHAIN); 664 smp_mb__before_atomic(); 665 atomic_inc(&bio->__bi_remaining); 666 } 667 668 /* 669 * bio_set is used to allow other portions of the IO system to 670 * allocate their own private memory pools for bio and iovec structures. 671 * These memory pools in turn all allocate from the bio_slab 672 * and the bvec_slabs[]. 673 */ 674 #define BIO_POOL_SIZE 2 675 676 struct bio_set { 677 struct kmem_cache *bio_slab; 678 unsigned int front_pad; 679 680 mempool_t *bio_pool; 681 mempool_t *bvec_pool; 682 #if defined(CONFIG_BLK_DEV_INTEGRITY) 683 mempool_t *bio_integrity_pool; 684 mempool_t *bvec_integrity_pool; 685 #endif 686 687 /* 688 * Deadlock avoidance for stacking block drivers: see comments in 689 * bio_alloc_bioset() for details 690 */ 691 spinlock_t rescue_lock; 692 struct bio_list rescue_list; 693 struct work_struct rescue_work; 694 struct workqueue_struct *rescue_workqueue; 695 }; 696 697 struct biovec_slab { 698 int nr_vecs; 699 char *name; 700 struct kmem_cache *slab; 701 }; 702 703 /* 704 * a small number of entries is fine, not going to be performance critical. 705 * basically we just need to survive 706 */ 707 #define BIO_SPLIT_ENTRIES 2 708 709 #if defined(CONFIG_BLK_DEV_INTEGRITY) 710 711 #define bip_for_each_vec(bvl, bip, iter) \ 712 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 713 714 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 715 for_each_bio(_bio) \ 716 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 717 718 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 719 extern void bio_integrity_free(struct bio *); 720 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 721 extern bool bio_integrity_enabled(struct bio *bio); 722 extern int bio_integrity_prep(struct bio *); 723 extern void bio_integrity_endio(struct bio *); 724 extern void bio_integrity_advance(struct bio *, unsigned int); 725 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 726 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 727 extern int bioset_integrity_create(struct bio_set *, int); 728 extern void bioset_integrity_free(struct bio_set *); 729 extern void bio_integrity_init(void); 730 731 #else /* CONFIG_BLK_DEV_INTEGRITY */ 732 733 static inline void *bio_integrity(struct bio *bio) 734 { 735 return NULL; 736 } 737 738 static inline bool bio_integrity_enabled(struct bio *bio) 739 { 740 return false; 741 } 742 743 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 744 { 745 return 0; 746 } 747 748 static inline void bioset_integrity_free (struct bio_set *bs) 749 { 750 return; 751 } 752 753 static inline int bio_integrity_prep(struct bio *bio) 754 { 755 return 0; 756 } 757 758 static inline void bio_integrity_free(struct bio *bio) 759 { 760 return; 761 } 762 763 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 764 gfp_t gfp_mask) 765 { 766 return 0; 767 } 768 769 static inline void bio_integrity_advance(struct bio *bio, 770 unsigned int bytes_done) 771 { 772 return; 773 } 774 775 static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, 776 unsigned int sectors) 777 { 778 return; 779 } 780 781 static inline void bio_integrity_init(void) 782 { 783 return; 784 } 785 786 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 787 { 788 return false; 789 } 790 791 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, 792 unsigned int nr) 793 { 794 return ERR_PTR(-EINVAL); 795 } 796 797 static inline int bio_integrity_add_page(struct bio *bio, struct page *page, 798 unsigned int len, unsigned int offset) 799 { 800 return 0; 801 } 802 803 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 804 805 #endif /* CONFIG_BLOCK */ 806 #endif /* __LINUX_BIO_H */ 807