1 /* 2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public Licens 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 17 */ 18 #ifndef __LINUX_BIO_H 19 #define __LINUX_BIO_H 20 21 #include <linux/highmem.h> 22 #include <linux/mempool.h> 23 #include <linux/ioprio.h> 24 #include <linux/bug.h> 25 26 #ifdef CONFIG_BLOCK 27 28 #include <asm/io.h> 29 30 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 31 #include <linux/blk_types.h> 32 33 #define BIO_DEBUG 34 35 #ifdef BIO_DEBUG 36 #define BIO_BUG_ON BUG_ON 37 #else 38 #define BIO_BUG_ON 39 #endif 40 41 #ifdef CONFIG_THP_SWAP 42 #if HPAGE_PMD_NR > 256 43 #define BIO_MAX_PAGES HPAGE_PMD_NR 44 #else 45 #define BIO_MAX_PAGES 256 46 #endif 47 #else 48 #define BIO_MAX_PAGES 256 49 #endif 50 51 #define bio_prio(bio) (bio)->bi_ioprio 52 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) 53 54 #define bio_iter_iovec(bio, iter) \ 55 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 56 57 #define bio_iter_page(bio, iter) \ 58 bvec_iter_page((bio)->bi_io_vec, (iter)) 59 #define bio_iter_len(bio, iter) \ 60 bvec_iter_len((bio)->bi_io_vec, (iter)) 61 #define bio_iter_offset(bio, iter) \ 62 bvec_iter_offset((bio)->bi_io_vec, (iter)) 63 64 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 65 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 66 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 67 68 #define bio_multiple_segments(bio) \ 69 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) 70 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 71 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 72 73 /* 74 * Return the data direction, READ or WRITE. 75 */ 76 #define bio_data_dir(bio) \ 77 (op_is_write(bio_op(bio)) ? WRITE : READ) 78 79 /* 80 * Check whether this bio carries any data or not. A NULL bio is allowed. 81 */ 82 static inline bool bio_has_data(struct bio *bio) 83 { 84 if (bio && 85 bio->bi_iter.bi_size && 86 bio_op(bio) != REQ_OP_DISCARD && 87 bio_op(bio) != REQ_OP_SECURE_ERASE && 88 bio_op(bio) != REQ_OP_WRITE_ZEROES) 89 return true; 90 91 return false; 92 } 93 94 static inline bool bio_no_advance_iter(struct bio *bio) 95 { 96 return bio_op(bio) == REQ_OP_DISCARD || 97 bio_op(bio) == REQ_OP_SECURE_ERASE || 98 bio_op(bio) == REQ_OP_WRITE_SAME || 99 bio_op(bio) == REQ_OP_WRITE_ZEROES; 100 } 101 102 static inline bool bio_mergeable(struct bio *bio) 103 { 104 if (bio->bi_opf & REQ_NOMERGE_FLAGS) 105 return false; 106 107 return true; 108 } 109 110 static inline unsigned int bio_cur_bytes(struct bio *bio) 111 { 112 if (bio_has_data(bio)) 113 return bio_iovec(bio).bv_len; 114 else /* dataless requests such as discard */ 115 return bio->bi_iter.bi_size; 116 } 117 118 static inline void *bio_data(struct bio *bio) 119 { 120 if (bio_has_data(bio)) 121 return page_address(bio_page(bio)) + bio_offset(bio); 122 123 return NULL; 124 } 125 126 /* 127 * will die 128 */ 129 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) 130 131 /* 132 * merge helpers etc 133 */ 134 135 /* Default implementation of BIOVEC_PHYS_MERGEABLE */ 136 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 137 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 138 139 /* 140 * allow arch override, for eg virtualized architectures (put in asm/io.h) 141 */ 142 #ifndef BIOVEC_PHYS_MERGEABLE 143 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 144 __BIOVEC_PHYS_MERGEABLE(vec1, vec2) 145 #endif 146 147 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 148 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 149 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 150 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 151 152 /* 153 * drivers should _never_ use the all version - the bio may have been split 154 * before it got to the driver and the driver won't own all of it 155 */ 156 #define bio_for_each_segment_all(bvl, bio, i) \ 157 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) 158 159 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, 160 unsigned bytes) 161 { 162 iter->bi_sector += bytes >> 9; 163 164 if (bio_no_advance_iter(bio)) { 165 iter->bi_size -= bytes; 166 iter->bi_done += bytes; 167 } else { 168 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 169 /* TODO: It is reasonable to complete bio with error here. */ 170 } 171 } 172 173 static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter, 174 unsigned int bytes) 175 { 176 iter->bi_sector -= bytes >> 9; 177 178 if (bio_no_advance_iter(bio)) { 179 iter->bi_size += bytes; 180 iter->bi_done -= bytes; 181 return true; 182 } 183 184 return bvec_iter_rewind(bio->bi_io_vec, iter, bytes); 185 } 186 187 #define __bio_for_each_segment(bvl, bio, iter, start) \ 188 for (iter = (start); \ 189 (iter).bi_size && \ 190 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 191 bio_advance_iter((bio), &(iter), (bvl).bv_len)) 192 193 #define bio_for_each_segment(bvl, bio, iter) \ 194 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 195 196 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 197 198 static inline unsigned bio_segments(struct bio *bio) 199 { 200 unsigned segs = 0; 201 struct bio_vec bv; 202 struct bvec_iter iter; 203 204 /* 205 * We special case discard/write same/write zeroes, because they 206 * interpret bi_size differently: 207 */ 208 209 switch (bio_op(bio)) { 210 case REQ_OP_DISCARD: 211 case REQ_OP_SECURE_ERASE: 212 case REQ_OP_WRITE_ZEROES: 213 return 0; 214 case REQ_OP_WRITE_SAME: 215 return 1; 216 default: 217 break; 218 } 219 220 bio_for_each_segment(bv, bio, iter) 221 segs++; 222 223 return segs; 224 } 225 226 /* 227 * get a reference to a bio, so it won't disappear. the intended use is 228 * something like: 229 * 230 * bio_get(bio); 231 * submit_bio(rw, bio); 232 * if (bio->bi_flags ...) 233 * do_something 234 * bio_put(bio); 235 * 236 * without the bio_get(), it could potentially complete I/O before submit_bio 237 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 238 * runs 239 */ 240 static inline void bio_get(struct bio *bio) 241 { 242 bio->bi_flags |= (1 << BIO_REFFED); 243 smp_mb__before_atomic(); 244 atomic_inc(&bio->__bi_cnt); 245 } 246 247 static inline void bio_cnt_set(struct bio *bio, unsigned int count) 248 { 249 if (count != 1) { 250 bio->bi_flags |= (1 << BIO_REFFED); 251 smp_mb__before_atomic(); 252 } 253 atomic_set(&bio->__bi_cnt, count); 254 } 255 256 static inline bool bio_flagged(struct bio *bio, unsigned int bit) 257 { 258 return (bio->bi_flags & (1U << bit)) != 0; 259 } 260 261 static inline void bio_set_flag(struct bio *bio, unsigned int bit) 262 { 263 bio->bi_flags |= (1U << bit); 264 } 265 266 static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 267 { 268 bio->bi_flags &= ~(1U << bit); 269 } 270 271 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) 272 { 273 *bv = bio_iovec(bio); 274 } 275 276 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) 277 { 278 struct bvec_iter iter = bio->bi_iter; 279 int idx; 280 281 if (unlikely(!bio_multiple_segments(bio))) { 282 *bv = bio_iovec(bio); 283 return; 284 } 285 286 bio_advance_iter(bio, &iter, iter.bi_size); 287 288 if (!iter.bi_bvec_done) 289 idx = iter.bi_idx - 1; 290 else /* in the middle of bvec */ 291 idx = iter.bi_idx; 292 293 *bv = bio->bi_io_vec[idx]; 294 295 /* 296 * iter.bi_bvec_done records actual length of the last bvec 297 * if this bio ends in the middle of one io vector 298 */ 299 if (iter.bi_bvec_done) 300 bv->bv_len = iter.bi_bvec_done; 301 } 302 303 static inline unsigned bio_pages_all(struct bio *bio) 304 { 305 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 306 return bio->bi_vcnt; 307 } 308 309 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) 310 { 311 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 312 return bio->bi_io_vec; 313 } 314 315 static inline struct page *bio_first_page_all(struct bio *bio) 316 { 317 return bio_first_bvec_all(bio)->bv_page; 318 } 319 320 static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) 321 { 322 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 323 return &bio->bi_io_vec[bio->bi_vcnt - 1]; 324 } 325 326 enum bip_flags { 327 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 328 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 329 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ 330 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ 331 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ 332 }; 333 334 /* 335 * bio integrity payload 336 */ 337 struct bio_integrity_payload { 338 struct bio *bip_bio; /* parent bio */ 339 340 struct bvec_iter bip_iter; 341 342 unsigned short bip_slab; /* slab the bip came from */ 343 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 344 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 345 unsigned short bip_flags; /* control flags */ 346 347 struct work_struct bip_work; /* I/O completion */ 348 349 struct bio_vec *bip_vec; 350 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ 351 }; 352 353 #if defined(CONFIG_BLK_DEV_INTEGRITY) 354 355 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) 356 { 357 if (bio->bi_opf & REQ_INTEGRITY) 358 return bio->bi_integrity; 359 360 return NULL; 361 } 362 363 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 364 { 365 struct bio_integrity_payload *bip = bio_integrity(bio); 366 367 if (bip) 368 return bip->bip_flags & flag; 369 370 return false; 371 } 372 373 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) 374 { 375 return bip->bip_iter.bi_sector; 376 } 377 378 static inline void bip_set_seed(struct bio_integrity_payload *bip, 379 sector_t seed) 380 { 381 bip->bip_iter.bi_sector = seed; 382 } 383 384 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 385 386 extern void bio_trim(struct bio *bio, int offset, int size); 387 extern struct bio *bio_split(struct bio *bio, int sectors, 388 gfp_t gfp, struct bio_set *bs); 389 390 /** 391 * bio_next_split - get next @sectors from a bio, splitting if necessary 392 * @bio: bio to split 393 * @sectors: number of sectors to split from the front of @bio 394 * @gfp: gfp mask 395 * @bs: bio set to allocate from 396 * 397 * Returns a bio representing the next @sectors of @bio - if the bio is smaller 398 * than @sectors, returns the original bio unchanged. 399 */ 400 static inline struct bio *bio_next_split(struct bio *bio, int sectors, 401 gfp_t gfp, struct bio_set *bs) 402 { 403 if (sectors >= bio_sectors(bio)) 404 return bio; 405 406 return bio_split(bio, sectors, gfp, bs); 407 } 408 409 extern struct bio_set *bioset_create(unsigned int, unsigned int, int flags); 410 enum { 411 BIOSET_NEED_BVECS = BIT(0), 412 BIOSET_NEED_RESCUER = BIT(1), 413 }; 414 extern void bioset_free(struct bio_set *); 415 extern mempool_t *biovec_create_pool(int pool_entries); 416 417 extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); 418 extern void bio_put(struct bio *); 419 420 extern void __bio_clone_fast(struct bio *, struct bio *); 421 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); 422 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 423 424 extern struct bio_set *fs_bio_set; 425 426 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 427 { 428 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 429 } 430 431 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) 432 { 433 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); 434 } 435 436 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) 437 { 438 return bio_clone_bioset(bio, gfp_mask, NULL); 439 440 } 441 442 extern blk_qc_t submit_bio(struct bio *); 443 444 extern void bio_endio(struct bio *); 445 446 static inline void bio_io_error(struct bio *bio) 447 { 448 bio->bi_status = BLK_STS_IOERR; 449 bio_endio(bio); 450 } 451 452 static inline void bio_wouldblock_error(struct bio *bio) 453 { 454 bio->bi_status = BLK_STS_AGAIN; 455 bio_endio(bio); 456 } 457 458 struct request_queue; 459 extern int bio_phys_segments(struct request_queue *, struct bio *); 460 461 extern int submit_bio_wait(struct bio *bio); 462 extern void bio_advance(struct bio *, unsigned); 463 464 extern void bio_init(struct bio *bio, struct bio_vec *table, 465 unsigned short max_vecs); 466 extern void bio_uninit(struct bio *); 467 extern void bio_reset(struct bio *); 468 void bio_chain(struct bio *, struct bio *); 469 470 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 471 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 472 unsigned int, unsigned int); 473 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); 474 struct rq_map_data; 475 extern struct bio *bio_map_user_iov(struct request_queue *, 476 struct iov_iter *, gfp_t); 477 extern void bio_unmap_user(struct bio *); 478 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 479 gfp_t); 480 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, 481 gfp_t, int); 482 extern void bio_set_pages_dirty(struct bio *bio); 483 extern void bio_check_pages_dirty(struct bio *bio); 484 485 void generic_start_io_acct(struct request_queue *q, int rw, 486 unsigned long sectors, struct hd_struct *part); 487 void generic_end_io_acct(struct request_queue *q, int rw, 488 struct hd_struct *part, 489 unsigned long start_time); 490 491 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 492 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 493 #endif 494 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 495 extern void bio_flush_dcache_pages(struct bio *bi); 496 #else 497 static inline void bio_flush_dcache_pages(struct bio *bi) 498 { 499 } 500 #endif 501 502 extern void bio_copy_data(struct bio *dst, struct bio *src); 503 extern void bio_free_pages(struct bio *bio); 504 505 extern struct bio *bio_copy_user_iov(struct request_queue *, 506 struct rq_map_data *, 507 struct iov_iter *, 508 gfp_t); 509 extern int bio_uncopy_user(struct bio *); 510 void zero_fill_bio(struct bio *bio); 511 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); 512 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); 513 extern unsigned int bvec_nr_vecs(unsigned short idx); 514 extern const char *bio_devname(struct bio *bio, char *buffer); 515 516 #define bio_set_dev(bio, bdev) \ 517 do { \ 518 if ((bio)->bi_disk != (bdev)->bd_disk) \ 519 bio_clear_flag(bio, BIO_THROTTLED);\ 520 (bio)->bi_disk = (bdev)->bd_disk; \ 521 (bio)->bi_partno = (bdev)->bd_partno; \ 522 } while (0) 523 524 #define bio_copy_dev(dst, src) \ 525 do { \ 526 (dst)->bi_disk = (src)->bi_disk; \ 527 (dst)->bi_partno = (src)->bi_partno; \ 528 } while (0) 529 530 #define bio_dev(bio) \ 531 disk_devt((bio)->bi_disk) 532 533 #ifdef CONFIG_BLK_CGROUP 534 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); 535 void bio_disassociate_task(struct bio *bio); 536 void bio_clone_blkcg_association(struct bio *dst, struct bio *src); 537 #else /* CONFIG_BLK_CGROUP */ 538 static inline int bio_associate_blkcg(struct bio *bio, 539 struct cgroup_subsys_state *blkcg_css) { return 0; } 540 static inline void bio_disassociate_task(struct bio *bio) { } 541 static inline void bio_clone_blkcg_association(struct bio *dst, 542 struct bio *src) { } 543 #endif /* CONFIG_BLK_CGROUP */ 544 545 #ifdef CONFIG_HIGHMEM 546 /* 547 * remember never ever reenable interrupts between a bvec_kmap_irq and 548 * bvec_kunmap_irq! 549 */ 550 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 551 { 552 unsigned long addr; 553 554 /* 555 * might not be a highmem page, but the preempt/irq count 556 * balancing is a lot nicer this way 557 */ 558 local_irq_save(*flags); 559 addr = (unsigned long) kmap_atomic(bvec->bv_page); 560 561 BUG_ON(addr & ~PAGE_MASK); 562 563 return (char *) addr + bvec->bv_offset; 564 } 565 566 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 567 { 568 unsigned long ptr = (unsigned long) buffer & PAGE_MASK; 569 570 kunmap_atomic((void *) ptr); 571 local_irq_restore(*flags); 572 } 573 574 #else 575 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 576 { 577 return page_address(bvec->bv_page) + bvec->bv_offset; 578 } 579 580 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 581 { 582 *flags = 0; 583 } 584 #endif 585 586 /* 587 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 588 * 589 * A bio_list anchors a singly-linked list of bios chained through the bi_next 590 * member of the bio. The bio_list also caches the last list member to allow 591 * fast access to the tail. 592 */ 593 struct bio_list { 594 struct bio *head; 595 struct bio *tail; 596 }; 597 598 static inline int bio_list_empty(const struct bio_list *bl) 599 { 600 return bl->head == NULL; 601 } 602 603 static inline void bio_list_init(struct bio_list *bl) 604 { 605 bl->head = bl->tail = NULL; 606 } 607 608 #define BIO_EMPTY_LIST { NULL, NULL } 609 610 #define bio_list_for_each(bio, bl) \ 611 for (bio = (bl)->head; bio; bio = bio->bi_next) 612 613 static inline unsigned bio_list_size(const struct bio_list *bl) 614 { 615 unsigned sz = 0; 616 struct bio *bio; 617 618 bio_list_for_each(bio, bl) 619 sz++; 620 621 return sz; 622 } 623 624 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 625 { 626 bio->bi_next = NULL; 627 628 if (bl->tail) 629 bl->tail->bi_next = bio; 630 else 631 bl->head = bio; 632 633 bl->tail = bio; 634 } 635 636 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 637 { 638 bio->bi_next = bl->head; 639 640 bl->head = bio; 641 642 if (!bl->tail) 643 bl->tail = bio; 644 } 645 646 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 647 { 648 if (!bl2->head) 649 return; 650 651 if (bl->tail) 652 bl->tail->bi_next = bl2->head; 653 else 654 bl->head = bl2->head; 655 656 bl->tail = bl2->tail; 657 } 658 659 static inline void bio_list_merge_head(struct bio_list *bl, 660 struct bio_list *bl2) 661 { 662 if (!bl2->head) 663 return; 664 665 if (bl->head) 666 bl2->tail->bi_next = bl->head; 667 else 668 bl->tail = bl2->tail; 669 670 bl->head = bl2->head; 671 } 672 673 static inline struct bio *bio_list_peek(struct bio_list *bl) 674 { 675 return bl->head; 676 } 677 678 static inline struct bio *bio_list_pop(struct bio_list *bl) 679 { 680 struct bio *bio = bl->head; 681 682 if (bio) { 683 bl->head = bl->head->bi_next; 684 if (!bl->head) 685 bl->tail = NULL; 686 687 bio->bi_next = NULL; 688 } 689 690 return bio; 691 } 692 693 static inline struct bio *bio_list_get(struct bio_list *bl) 694 { 695 struct bio *bio = bl->head; 696 697 bl->head = bl->tail = NULL; 698 699 return bio; 700 } 701 702 /* 703 * Increment chain count for the bio. Make sure the CHAIN flag update 704 * is visible before the raised count. 705 */ 706 static inline void bio_inc_remaining(struct bio *bio) 707 { 708 bio_set_flag(bio, BIO_CHAIN); 709 smp_mb__before_atomic(); 710 atomic_inc(&bio->__bi_remaining); 711 } 712 713 /* 714 * bio_set is used to allow other portions of the IO system to 715 * allocate their own private memory pools for bio and iovec structures. 716 * These memory pools in turn all allocate from the bio_slab 717 * and the bvec_slabs[]. 718 */ 719 #define BIO_POOL_SIZE 2 720 721 struct bio_set { 722 struct kmem_cache *bio_slab; 723 unsigned int front_pad; 724 725 mempool_t *bio_pool; 726 mempool_t *bvec_pool; 727 #if defined(CONFIG_BLK_DEV_INTEGRITY) 728 mempool_t *bio_integrity_pool; 729 mempool_t *bvec_integrity_pool; 730 #endif 731 732 /* 733 * Deadlock avoidance for stacking block drivers: see comments in 734 * bio_alloc_bioset() for details 735 */ 736 spinlock_t rescue_lock; 737 struct bio_list rescue_list; 738 struct work_struct rescue_work; 739 struct workqueue_struct *rescue_workqueue; 740 }; 741 742 struct biovec_slab { 743 int nr_vecs; 744 char *name; 745 struct kmem_cache *slab; 746 }; 747 748 /* 749 * a small number of entries is fine, not going to be performance critical. 750 * basically we just need to survive 751 */ 752 #define BIO_SPLIT_ENTRIES 2 753 754 #if defined(CONFIG_BLK_DEV_INTEGRITY) 755 756 #define bip_for_each_vec(bvl, bip, iter) \ 757 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 758 759 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 760 for_each_bio(_bio) \ 761 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 762 763 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 764 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 765 extern bool bio_integrity_prep(struct bio *); 766 extern void bio_integrity_advance(struct bio *, unsigned int); 767 extern void bio_integrity_trim(struct bio *); 768 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 769 extern int bioset_integrity_create(struct bio_set *, int); 770 extern void bioset_integrity_free(struct bio_set *); 771 extern void bio_integrity_init(void); 772 773 #else /* CONFIG_BLK_DEV_INTEGRITY */ 774 775 static inline void *bio_integrity(struct bio *bio) 776 { 777 return NULL; 778 } 779 780 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 781 { 782 return 0; 783 } 784 785 static inline void bioset_integrity_free (struct bio_set *bs) 786 { 787 return; 788 } 789 790 static inline bool bio_integrity_prep(struct bio *bio) 791 { 792 return true; 793 } 794 795 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 796 gfp_t gfp_mask) 797 { 798 return 0; 799 } 800 801 static inline void bio_integrity_advance(struct bio *bio, 802 unsigned int bytes_done) 803 { 804 return; 805 } 806 807 static inline void bio_integrity_trim(struct bio *bio) 808 { 809 return; 810 } 811 812 static inline void bio_integrity_init(void) 813 { 814 return; 815 } 816 817 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 818 { 819 return false; 820 } 821 822 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, 823 unsigned int nr) 824 { 825 return ERR_PTR(-EINVAL); 826 } 827 828 static inline int bio_integrity_add_page(struct bio *bio, struct page *page, 829 unsigned int len, unsigned int offset) 830 { 831 return 0; 832 } 833 834 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 835 836 #endif /* CONFIG_BLOCK */ 837 #endif /* __LINUX_BIO_H */ 838