1 /* 2 * 2.5 block I/O model 3 * 4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public Licens 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 19 */ 20 #ifndef __LINUX_BIO_H 21 #define __LINUX_BIO_H 22 23 #include <linux/highmem.h> 24 #include <linux/mempool.h> 25 #include <linux/ioprio.h> 26 #include <linux/bug.h> 27 28 #ifdef CONFIG_BLOCK 29 30 #include <asm/io.h> 31 32 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 33 #include <linux/blk_types.h> 34 35 #define BIO_DEBUG 36 37 #ifdef BIO_DEBUG 38 #define BIO_BUG_ON BUG_ON 39 #else 40 #define BIO_BUG_ON 41 #endif 42 43 #define BIO_MAX_PAGES 256 44 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 45 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 46 47 /* 48 * upper 16 bits of bi_rw define the io priority of this bio 49 */ 50 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) 51 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) 52 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) 53 54 #define bio_set_prio(bio, prio) do { \ 55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \ 56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ 57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ 58 } while (0) 59 60 /* 61 * various member access, note that bio_data should of course not be used 62 * on highmem page vectors 63 */ 64 #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) 65 66 #define bvec_iter_page(bvec, iter) \ 67 (__bvec_iter_bvec((bvec), (iter))->bv_page) 68 69 #define bvec_iter_len(bvec, iter) \ 70 min((iter).bi_size, \ 71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) 72 73 #define bvec_iter_offset(bvec, iter) \ 74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 75 76 #define bvec_iter_bvec(bvec, iter) \ 77 ((struct bio_vec) { \ 78 .bv_page = bvec_iter_page((bvec), (iter)), \ 79 .bv_len = bvec_iter_len((bvec), (iter)), \ 80 .bv_offset = bvec_iter_offset((bvec), (iter)), \ 81 }) 82 83 #define bio_iter_iovec(bio, iter) \ 84 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 85 86 #define bio_iter_page(bio, iter) \ 87 bvec_iter_page((bio)->bi_io_vec, (iter)) 88 #define bio_iter_len(bio, iter) \ 89 bvec_iter_len((bio)->bi_io_vec, (iter)) 90 #define bio_iter_offset(bio, iter) \ 91 bvec_iter_offset((bio)->bi_io_vec, (iter)) 92 93 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 94 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 95 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 96 97 #define bio_multiple_segments(bio) \ 98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) 99 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 100 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 101 102 /* 103 * Check whether this bio carries any data or not. A NULL bio is allowed. 104 */ 105 static inline bool bio_has_data(struct bio *bio) 106 { 107 if (bio && 108 bio->bi_iter.bi_size && 109 !(bio->bi_rw & REQ_DISCARD)) 110 return true; 111 112 return false; 113 } 114 115 static inline bool bio_is_rw(struct bio *bio) 116 { 117 if (!bio_has_data(bio)) 118 return false; 119 120 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) 121 return false; 122 123 return true; 124 } 125 126 static inline bool bio_mergeable(struct bio *bio) 127 { 128 if (bio->bi_rw & REQ_NOMERGE_FLAGS) 129 return false; 130 131 return true; 132 } 133 134 static inline unsigned int bio_cur_bytes(struct bio *bio) 135 { 136 if (bio_has_data(bio)) 137 return bio_iovec(bio).bv_len; 138 else /* dataless requests such as discard */ 139 return bio->bi_iter.bi_size; 140 } 141 142 static inline void *bio_data(struct bio *bio) 143 { 144 if (bio_has_data(bio)) 145 return page_address(bio_page(bio)) + bio_offset(bio); 146 147 return NULL; 148 } 149 150 /* 151 * will die 152 */ 153 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) 154 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) 155 156 /* 157 * queues that have highmem support enabled may still need to revert to 158 * PIO transfers occasionally and thus map high pages temporarily. For 159 * permanent PIO fall back, user is probably better off disabling highmem 160 * I/O completely on that queue (see ide-dma for example) 161 */ 162 #define __bio_kmap_atomic(bio, iter) \ 163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \ 164 bio_iter_iovec((bio), (iter)).bv_offset) 165 166 #define __bio_kunmap_atomic(addr) kunmap_atomic(addr) 167 168 /* 169 * merge helpers etc 170 */ 171 172 /* Default implementation of BIOVEC_PHYS_MERGEABLE */ 173 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 174 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 175 176 /* 177 * allow arch override, for eg virtualized architectures (put in asm/io.h) 178 */ 179 #ifndef BIOVEC_PHYS_MERGEABLE 180 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 181 __BIOVEC_PHYS_MERGEABLE(vec1, vec2) 182 #endif 183 184 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 185 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 186 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 188 189 #define bio_io_error(bio) bio_endio((bio), -EIO) 190 191 /* 192 * drivers should _never_ use the all version - the bio may have been split 193 * before it got to the driver and the driver won't own all of it 194 */ 195 #define bio_for_each_segment_all(bvl, bio, i) \ 196 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) 197 198 static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, 199 unsigned bytes) 200 { 201 WARN_ONCE(bytes > iter->bi_size, 202 "Attempted to advance past end of bvec iter\n"); 203 204 while (bytes) { 205 unsigned len = min(bytes, bvec_iter_len(bv, *iter)); 206 207 bytes -= len; 208 iter->bi_size -= len; 209 iter->bi_bvec_done += len; 210 211 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { 212 iter->bi_bvec_done = 0; 213 iter->bi_idx++; 214 } 215 } 216 } 217 218 #define for_each_bvec(bvl, bio_vec, iter, start) \ 219 for (iter = (start); \ 220 (iter).bi_size && \ 221 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ 222 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) 223 224 225 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, 226 unsigned bytes) 227 { 228 iter->bi_sector += bytes >> 9; 229 230 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) 231 iter->bi_size -= bytes; 232 else 233 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 234 } 235 236 #define __bio_for_each_segment(bvl, bio, iter, start) \ 237 for (iter = (start); \ 238 (iter).bi_size && \ 239 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 240 bio_advance_iter((bio), &(iter), (bvl).bv_len)) 241 242 #define bio_for_each_segment(bvl, bio, iter) \ 243 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 244 245 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 246 247 static inline unsigned bio_segments(struct bio *bio) 248 { 249 unsigned segs = 0; 250 struct bio_vec bv; 251 struct bvec_iter iter; 252 253 /* 254 * We special case discard/write same, because they interpret bi_size 255 * differently: 256 */ 257 258 if (bio->bi_rw & REQ_DISCARD) 259 return 1; 260 261 if (bio->bi_rw & REQ_WRITE_SAME) 262 return 1; 263 264 bio_for_each_segment(bv, bio, iter) 265 segs++; 266 267 return segs; 268 } 269 270 /* 271 * get a reference to a bio, so it won't disappear. the intended use is 272 * something like: 273 * 274 * bio_get(bio); 275 * submit_bio(rw, bio); 276 * if (bio->bi_flags ...) 277 * do_something 278 * bio_put(bio); 279 * 280 * without the bio_get(), it could potentially complete I/O before submit_bio 281 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 282 * runs 283 */ 284 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) 285 286 #if defined(CONFIG_BLK_DEV_INTEGRITY) 287 /* 288 * bio integrity payload 289 */ 290 struct bio_integrity_payload { 291 struct bio *bip_bio; /* parent bio */ 292 293 struct bvec_iter bip_iter; 294 295 /* kill - should just use bip_vec */ 296 void *bip_buf; /* generated integrity data */ 297 298 bio_end_io_t *bip_end_io; /* saved I/O completion fn */ 299 300 unsigned short bip_slab; /* slab the bip came from */ 301 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 302 unsigned bip_owns_buf:1; /* should free bip_buf */ 303 304 struct work_struct bip_work; /* I/O completion */ 305 306 struct bio_vec *bip_vec; 307 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ 308 }; 309 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 310 311 extern void bio_trim(struct bio *bio, int offset, int size); 312 extern struct bio *bio_split(struct bio *bio, int sectors, 313 gfp_t gfp, struct bio_set *bs); 314 315 /** 316 * bio_next_split - get next @sectors from a bio, splitting if necessary 317 * @bio: bio to split 318 * @sectors: number of sectors to split from the front of @bio 319 * @gfp: gfp mask 320 * @bs: bio set to allocate from 321 * 322 * Returns a bio representing the next @sectors of @bio - if the bio is smaller 323 * than @sectors, returns the original bio unchanged. 324 */ 325 static inline struct bio *bio_next_split(struct bio *bio, int sectors, 326 gfp_t gfp, struct bio_set *bs) 327 { 328 if (sectors >= bio_sectors(bio)) 329 return bio; 330 331 return bio_split(bio, sectors, gfp, bs); 332 } 333 334 extern struct bio_set *bioset_create(unsigned int, unsigned int); 335 extern void bioset_free(struct bio_set *); 336 extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries); 337 338 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 339 extern void bio_put(struct bio *); 340 341 extern void __bio_clone_fast(struct bio *, struct bio *); 342 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); 343 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 344 345 extern struct bio_set *fs_bio_set; 346 unsigned int bio_integrity_tag_size(struct bio *bio); 347 348 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 349 { 350 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 351 } 352 353 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) 354 { 355 return bio_clone_bioset(bio, gfp_mask, fs_bio_set); 356 } 357 358 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) 359 { 360 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); 361 } 362 363 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) 364 { 365 return bio_clone_bioset(bio, gfp_mask, NULL); 366 367 } 368 369 extern void bio_endio(struct bio *, int); 370 extern void bio_endio_nodec(struct bio *, int); 371 struct request_queue; 372 extern int bio_phys_segments(struct request_queue *, struct bio *); 373 374 extern int submit_bio_wait(int rw, struct bio *bio); 375 extern void bio_advance(struct bio *, unsigned); 376 377 extern void bio_init(struct bio *); 378 extern void bio_reset(struct bio *); 379 void bio_chain(struct bio *, struct bio *); 380 381 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 382 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 383 unsigned int, unsigned int); 384 extern int bio_get_nr_vecs(struct block_device *); 385 extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 386 unsigned long, unsigned int, int, gfp_t); 387 struct sg_iovec; 388 struct rq_map_data; 389 extern struct bio *bio_map_user_iov(struct request_queue *, 390 struct block_device *, 391 const struct sg_iovec *, int, int, gfp_t); 392 extern void bio_unmap_user(struct bio *); 393 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 394 gfp_t); 395 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, 396 gfp_t, int); 397 extern void bio_set_pages_dirty(struct bio *bio); 398 extern void bio_check_pages_dirty(struct bio *bio); 399 400 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 401 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 402 #endif 403 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 404 extern void bio_flush_dcache_pages(struct bio *bi); 405 #else 406 static inline void bio_flush_dcache_pages(struct bio *bi) 407 { 408 } 409 #endif 410 411 extern void bio_copy_data(struct bio *dst, struct bio *src); 412 extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); 413 414 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, 415 unsigned long, unsigned int, int, gfp_t); 416 extern struct bio *bio_copy_user_iov(struct request_queue *, 417 struct rq_map_data *, 418 const struct sg_iovec *, 419 int, int, gfp_t); 420 extern int bio_uncopy_user(struct bio *); 421 void zero_fill_bio(struct bio *bio); 422 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); 423 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); 424 extern unsigned int bvec_nr_vecs(unsigned short idx); 425 426 #ifdef CONFIG_BLK_CGROUP 427 int bio_associate_current(struct bio *bio); 428 void bio_disassociate_task(struct bio *bio); 429 #else /* CONFIG_BLK_CGROUP */ 430 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } 431 static inline void bio_disassociate_task(struct bio *bio) { } 432 #endif /* CONFIG_BLK_CGROUP */ 433 434 #ifdef CONFIG_HIGHMEM 435 /* 436 * remember never ever reenable interrupts between a bvec_kmap_irq and 437 * bvec_kunmap_irq! 438 */ 439 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 440 { 441 unsigned long addr; 442 443 /* 444 * might not be a highmem page, but the preempt/irq count 445 * balancing is a lot nicer this way 446 */ 447 local_irq_save(*flags); 448 addr = (unsigned long) kmap_atomic(bvec->bv_page); 449 450 BUG_ON(addr & ~PAGE_MASK); 451 452 return (char *) addr + bvec->bv_offset; 453 } 454 455 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 456 { 457 unsigned long ptr = (unsigned long) buffer & PAGE_MASK; 458 459 kunmap_atomic((void *) ptr); 460 local_irq_restore(*flags); 461 } 462 463 #else 464 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 465 { 466 return page_address(bvec->bv_page) + bvec->bv_offset; 467 } 468 469 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 470 { 471 *flags = 0; 472 } 473 #endif 474 475 static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter, 476 unsigned long *flags) 477 { 478 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags); 479 } 480 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 481 482 #define bio_kmap_irq(bio, flags) \ 483 __bio_kmap_irq((bio), (bio)->bi_iter, (flags)) 484 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 485 486 /* 487 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 488 * 489 * A bio_list anchors a singly-linked list of bios chained through the bi_next 490 * member of the bio. The bio_list also caches the last list member to allow 491 * fast access to the tail. 492 */ 493 struct bio_list { 494 struct bio *head; 495 struct bio *tail; 496 }; 497 498 static inline int bio_list_empty(const struct bio_list *bl) 499 { 500 return bl->head == NULL; 501 } 502 503 static inline void bio_list_init(struct bio_list *bl) 504 { 505 bl->head = bl->tail = NULL; 506 } 507 508 #define BIO_EMPTY_LIST { NULL, NULL } 509 510 #define bio_list_for_each(bio, bl) \ 511 for (bio = (bl)->head; bio; bio = bio->bi_next) 512 513 static inline unsigned bio_list_size(const struct bio_list *bl) 514 { 515 unsigned sz = 0; 516 struct bio *bio; 517 518 bio_list_for_each(bio, bl) 519 sz++; 520 521 return sz; 522 } 523 524 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 525 { 526 bio->bi_next = NULL; 527 528 if (bl->tail) 529 bl->tail->bi_next = bio; 530 else 531 bl->head = bio; 532 533 bl->tail = bio; 534 } 535 536 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 537 { 538 bio->bi_next = bl->head; 539 540 bl->head = bio; 541 542 if (!bl->tail) 543 bl->tail = bio; 544 } 545 546 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 547 { 548 if (!bl2->head) 549 return; 550 551 if (bl->tail) 552 bl->tail->bi_next = bl2->head; 553 else 554 bl->head = bl2->head; 555 556 bl->tail = bl2->tail; 557 } 558 559 static inline void bio_list_merge_head(struct bio_list *bl, 560 struct bio_list *bl2) 561 { 562 if (!bl2->head) 563 return; 564 565 if (bl->head) 566 bl2->tail->bi_next = bl->head; 567 else 568 bl->tail = bl2->tail; 569 570 bl->head = bl2->head; 571 } 572 573 static inline struct bio *bio_list_peek(struct bio_list *bl) 574 { 575 return bl->head; 576 } 577 578 static inline struct bio *bio_list_pop(struct bio_list *bl) 579 { 580 struct bio *bio = bl->head; 581 582 if (bio) { 583 bl->head = bl->head->bi_next; 584 if (!bl->head) 585 bl->tail = NULL; 586 587 bio->bi_next = NULL; 588 } 589 590 return bio; 591 } 592 593 static inline struct bio *bio_list_get(struct bio_list *bl) 594 { 595 struct bio *bio = bl->head; 596 597 bl->head = bl->tail = NULL; 598 599 return bio; 600 } 601 602 /* 603 * bio_set is used to allow other portions of the IO system to 604 * allocate their own private memory pools for bio and iovec structures. 605 * These memory pools in turn all allocate from the bio_slab 606 * and the bvec_slabs[]. 607 */ 608 #define BIO_POOL_SIZE 2 609 #define BIOVEC_NR_POOLS 6 610 #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) 611 612 struct bio_set { 613 struct kmem_cache *bio_slab; 614 unsigned int front_pad; 615 616 mempool_t *bio_pool; 617 mempool_t *bvec_pool; 618 #if defined(CONFIG_BLK_DEV_INTEGRITY) 619 mempool_t *bio_integrity_pool; 620 mempool_t *bvec_integrity_pool; 621 #endif 622 623 /* 624 * Deadlock avoidance for stacking block drivers: see comments in 625 * bio_alloc_bioset() for details 626 */ 627 spinlock_t rescue_lock; 628 struct bio_list rescue_list; 629 struct work_struct rescue_work; 630 struct workqueue_struct *rescue_workqueue; 631 }; 632 633 struct biovec_slab { 634 int nr_vecs; 635 char *name; 636 struct kmem_cache *slab; 637 }; 638 639 /* 640 * a small number of entries is fine, not going to be performance critical. 641 * basically we just need to survive 642 */ 643 #define BIO_SPLIT_ENTRIES 2 644 645 #if defined(CONFIG_BLK_DEV_INTEGRITY) 646 647 648 649 #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) 650 651 #define bip_for_each_vec(bvl, bip, iter) \ 652 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 653 654 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 655 for_each_bio(_bio) \ 656 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 657 658 #define bio_integrity(bio) (bio->bi_integrity != NULL) 659 660 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 661 extern void bio_integrity_free(struct bio *); 662 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 663 extern int bio_integrity_enabled(struct bio *bio); 664 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); 665 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); 666 extern int bio_integrity_prep(struct bio *); 667 extern void bio_integrity_endio(struct bio *, int); 668 extern void bio_integrity_advance(struct bio *, unsigned int); 669 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 670 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 671 extern int bioset_integrity_create(struct bio_set *, int); 672 extern void bioset_integrity_free(struct bio_set *); 673 extern void bio_integrity_init(void); 674 675 #else /* CONFIG_BLK_DEV_INTEGRITY */ 676 677 static inline int bio_integrity(struct bio *bio) 678 { 679 return 0; 680 } 681 682 static inline int bio_integrity_enabled(struct bio *bio) 683 { 684 return 0; 685 } 686 687 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 688 { 689 return 0; 690 } 691 692 static inline void bioset_integrity_free (struct bio_set *bs) 693 { 694 return; 695 } 696 697 static inline int bio_integrity_prep(struct bio *bio) 698 { 699 return 0; 700 } 701 702 static inline void bio_integrity_free(struct bio *bio) 703 { 704 return; 705 } 706 707 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 708 gfp_t gfp_mask) 709 { 710 return 0; 711 } 712 713 static inline void bio_integrity_advance(struct bio *bio, 714 unsigned int bytes_done) 715 { 716 return; 717 } 718 719 static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, 720 unsigned int sectors) 721 { 722 return; 723 } 724 725 static inline void bio_integrity_init(void) 726 { 727 return; 728 } 729 730 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 731 732 #endif /* CONFIG_BLOCK */ 733 #endif /* __LINUX_BIO_H */ 734