1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 4 */ 5 #ifndef __LINUX_BIO_H 6 #define __LINUX_BIO_H 7 8 #include <linux/mempool.h> 9 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 10 #include <linux/blk_types.h> 11 #include <linux/uio.h> 12 13 #define BIO_MAX_VECS 256U 14 15 struct queue_limits; 16 17 static inline unsigned int bio_max_segs(unsigned int nr_segs) 18 { 19 return min(nr_segs, BIO_MAX_VECS); 20 } 21 22 #define bio_prio(bio) (bio)->bi_ioprio 23 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) 24 25 #define bio_iter_iovec(bio, iter) \ 26 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 27 28 #define bio_iter_page(bio, iter) \ 29 bvec_iter_page((bio)->bi_io_vec, (iter)) 30 #define bio_iter_len(bio, iter) \ 31 bvec_iter_len((bio)->bi_io_vec, (iter)) 32 #define bio_iter_offset(bio, iter) \ 33 bvec_iter_offset((bio)->bi_io_vec, (iter)) 34 35 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 36 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 37 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 38 39 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) 40 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) 41 42 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) 43 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) 44 45 /* 46 * Return the data direction, READ or WRITE. 47 */ 48 #define bio_data_dir(bio) \ 49 (op_is_write(bio_op(bio)) ? WRITE : READ) 50 51 /* 52 * Check whether this bio carries any data or not. A NULL bio is allowed. 53 */ 54 static inline bool bio_has_data(struct bio *bio) 55 { 56 if (bio && 57 bio->bi_iter.bi_size && 58 bio_op(bio) != REQ_OP_DISCARD && 59 bio_op(bio) != REQ_OP_SECURE_ERASE && 60 bio_op(bio) != REQ_OP_WRITE_ZEROES) 61 return true; 62 63 return false; 64 } 65 66 static inline bool bio_no_advance_iter(const struct bio *bio) 67 { 68 return bio_op(bio) == REQ_OP_DISCARD || 69 bio_op(bio) == REQ_OP_SECURE_ERASE || 70 bio_op(bio) == REQ_OP_WRITE_ZEROES; 71 } 72 73 static inline void *bio_data(struct bio *bio) 74 { 75 if (bio_has_data(bio)) 76 return page_address(bio_page(bio)) + bio_offset(bio); 77 78 return NULL; 79 } 80 81 static inline bool bio_next_segment(const struct bio *bio, 82 struct bvec_iter_all *iter) 83 { 84 if (iter->idx >= bio->bi_vcnt) 85 return false; 86 87 bvec_advance(&bio->bi_io_vec[iter->idx], iter); 88 return true; 89 } 90 91 /* 92 * drivers should _never_ use the all version - the bio may have been split 93 * before it got to the driver and the driver won't own all of it 94 */ 95 #define bio_for_each_segment_all(bvl, bio, iter) \ 96 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) 97 98 static inline void bio_advance_iter(const struct bio *bio, 99 struct bvec_iter *iter, unsigned int bytes) 100 { 101 iter->bi_sector += bytes >> 9; 102 103 if (bio_no_advance_iter(bio)) 104 iter->bi_size -= bytes; 105 else 106 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 107 /* TODO: It is reasonable to complete bio with error here. */ 108 } 109 110 /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ 111 static inline void bio_advance_iter_single(const struct bio *bio, 112 struct bvec_iter *iter, 113 unsigned int bytes) 114 { 115 iter->bi_sector += bytes >> 9; 116 117 if (bio_no_advance_iter(bio)) 118 iter->bi_size -= bytes; 119 else 120 bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); 121 } 122 123 void __bio_advance(struct bio *, unsigned bytes); 124 125 /** 126 * bio_advance - increment/complete a bio by some number of bytes 127 * @bio: bio to advance 128 * @nbytes: number of bytes to complete 129 * 130 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 131 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 132 * be updated on the last bvec as well. 133 * 134 * @bio will then represent the remaining, uncompleted portion of the io. 135 */ 136 static inline void bio_advance(struct bio *bio, unsigned int nbytes) 137 { 138 if (nbytes == bio->bi_iter.bi_size) { 139 bio->bi_iter.bi_size = 0; 140 return; 141 } 142 __bio_advance(bio, nbytes); 143 } 144 145 #define __bio_for_each_segment(bvl, bio, iter, start) \ 146 for (iter = (start); \ 147 (iter).bi_size && \ 148 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 149 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 150 151 #define bio_for_each_segment(bvl, bio, iter) \ 152 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 153 154 #define __bio_for_each_bvec(bvl, bio, iter, start) \ 155 for (iter = (start); \ 156 (iter).bi_size && \ 157 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ 158 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 159 160 /* iterate over multi-page bvec */ 161 #define bio_for_each_bvec(bvl, bio, iter) \ 162 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) 163 164 /* 165 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the 166 * same reasons as bio_for_each_segment_all(). 167 */ 168 #define bio_for_each_bvec_all(bvl, bio, i) \ 169 for (i = 0, bvl = bio_first_bvec_all(bio); \ 170 i < (bio)->bi_vcnt; i++, bvl++) 171 172 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 173 174 static inline unsigned bio_segments(struct bio *bio) 175 { 176 unsigned segs = 0; 177 struct bio_vec bv; 178 struct bvec_iter iter; 179 180 /* 181 * We special case discard/write same/write zeroes, because they 182 * interpret bi_size differently: 183 */ 184 185 switch (bio_op(bio)) { 186 case REQ_OP_DISCARD: 187 case REQ_OP_SECURE_ERASE: 188 case REQ_OP_WRITE_ZEROES: 189 return 0; 190 default: 191 break; 192 } 193 194 bio_for_each_segment(bv, bio, iter) 195 segs++; 196 197 return segs; 198 } 199 200 /* 201 * get a reference to a bio, so it won't disappear. the intended use is 202 * something like: 203 * 204 * bio_get(bio); 205 * submit_bio(rw, bio); 206 * if (bio->bi_flags ...) 207 * do_something 208 * bio_put(bio); 209 * 210 * without the bio_get(), it could potentially complete I/O before submit_bio 211 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 212 * runs 213 */ 214 static inline void bio_get(struct bio *bio) 215 { 216 bio->bi_flags |= (1 << BIO_REFFED); 217 smp_mb__before_atomic(); 218 atomic_inc(&bio->__bi_cnt); 219 } 220 221 static inline void bio_cnt_set(struct bio *bio, unsigned int count) 222 { 223 if (count != 1) { 224 bio->bi_flags |= (1 << BIO_REFFED); 225 smp_mb(); 226 } 227 atomic_set(&bio->__bi_cnt, count); 228 } 229 230 static inline bool bio_flagged(struct bio *bio, unsigned int bit) 231 { 232 return (bio->bi_flags & (1U << bit)) != 0; 233 } 234 235 static inline void bio_set_flag(struct bio *bio, unsigned int bit) 236 { 237 bio->bi_flags |= (1U << bit); 238 } 239 240 static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 241 { 242 bio->bi_flags &= ~(1U << bit); 243 } 244 245 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) 246 { 247 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 248 return bio->bi_io_vec; 249 } 250 251 static inline struct page *bio_first_page_all(struct bio *bio) 252 { 253 return bio_first_bvec_all(bio)->bv_page; 254 } 255 256 static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) 257 { 258 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 259 return &bio->bi_io_vec[bio->bi_vcnt - 1]; 260 } 261 262 /** 263 * struct folio_iter - State for iterating all folios in a bio. 264 * @folio: The current folio we're iterating. NULL after the last folio. 265 * @offset: The byte offset within the current folio. 266 * @length: The number of bytes in this iteration (will not cross folio 267 * boundary). 268 */ 269 struct folio_iter { 270 struct folio *folio; 271 size_t offset; 272 size_t length; 273 /* private: for use by the iterator */ 274 struct folio *_next; 275 size_t _seg_count; 276 int _i; 277 }; 278 279 static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, 280 int i) 281 { 282 struct bio_vec *bvec = bio_first_bvec_all(bio) + i; 283 284 fi->folio = page_folio(bvec->bv_page); 285 fi->offset = bvec->bv_offset + 286 PAGE_SIZE * (bvec->bv_page - &fi->folio->page); 287 fi->_seg_count = bvec->bv_len; 288 fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); 289 fi->_next = folio_next(fi->folio); 290 fi->_i = i; 291 } 292 293 static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) 294 { 295 fi->_seg_count -= fi->length; 296 if (fi->_seg_count) { 297 fi->folio = fi->_next; 298 fi->offset = 0; 299 fi->length = min(folio_size(fi->folio), fi->_seg_count); 300 fi->_next = folio_next(fi->folio); 301 } else if (fi->_i + 1 < bio->bi_vcnt) { 302 bio_first_folio(fi, bio, fi->_i + 1); 303 } else { 304 fi->folio = NULL; 305 } 306 } 307 308 /** 309 * bio_for_each_folio_all - Iterate over each folio in a bio. 310 * @fi: struct folio_iter which is updated for each folio. 311 * @bio: struct bio to iterate over. 312 */ 313 #define bio_for_each_folio_all(fi, bio) \ 314 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) 315 316 enum bip_flags { 317 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 318 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 319 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ 320 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ 321 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ 322 }; 323 324 /* 325 * bio integrity payload 326 */ 327 struct bio_integrity_payload { 328 struct bio *bip_bio; /* parent bio */ 329 330 struct bvec_iter bip_iter; 331 332 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 333 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 334 unsigned short bip_flags; /* control flags */ 335 336 struct bvec_iter bio_iter; /* for rewinding parent bio */ 337 338 struct work_struct bip_work; /* I/O completion */ 339 340 struct bio_vec *bip_vec; 341 struct bio_vec bip_inline_vecs[];/* embedded bvec array */ 342 }; 343 344 #if defined(CONFIG_BLK_DEV_INTEGRITY) 345 346 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) 347 { 348 if (bio->bi_opf & REQ_INTEGRITY) 349 return bio->bi_integrity; 350 351 return NULL; 352 } 353 354 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 355 { 356 struct bio_integrity_payload *bip = bio_integrity(bio); 357 358 if (bip) 359 return bip->bip_flags & flag; 360 361 return false; 362 } 363 364 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) 365 { 366 return bip->bip_iter.bi_sector; 367 } 368 369 static inline void bip_set_seed(struct bio_integrity_payload *bip, 370 sector_t seed) 371 { 372 bip->bip_iter.bi_sector = seed; 373 } 374 375 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 376 377 void bio_trim(struct bio *bio, sector_t offset, sector_t size); 378 extern struct bio *bio_split(struct bio *bio, int sectors, 379 gfp_t gfp, struct bio_set *bs); 380 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, 381 unsigned *segs, struct bio_set *bs, unsigned max_bytes); 382 383 /** 384 * bio_next_split - get next @sectors from a bio, splitting if necessary 385 * @bio: bio to split 386 * @sectors: number of sectors to split from the front of @bio 387 * @gfp: gfp mask 388 * @bs: bio set to allocate from 389 * 390 * Return: a bio representing the next @sectors of @bio - if the bio is smaller 391 * than @sectors, returns the original bio unchanged. 392 */ 393 static inline struct bio *bio_next_split(struct bio *bio, int sectors, 394 gfp_t gfp, struct bio_set *bs) 395 { 396 if (sectors >= bio_sectors(bio)) 397 return bio; 398 399 return bio_split(bio, sectors, gfp, bs); 400 } 401 402 enum { 403 BIOSET_NEED_BVECS = BIT(0), 404 BIOSET_NEED_RESCUER = BIT(1), 405 BIOSET_PERCPU_CACHE = BIT(2), 406 }; 407 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); 408 extern void bioset_exit(struct bio_set *); 409 extern int biovec_init_pool(mempool_t *pool, int pool_entries); 410 411 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 412 blk_opf_t opf, gfp_t gfp_mask, 413 struct bio_set *bs); 414 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); 415 extern void bio_put(struct bio *); 416 417 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 418 gfp_t gfp, struct bio_set *bs); 419 int bio_init_clone(struct block_device *bdev, struct bio *bio, 420 struct bio *bio_src, gfp_t gfp); 421 422 extern struct bio_set fs_bio_set; 423 424 static inline struct bio *bio_alloc(struct block_device *bdev, 425 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) 426 { 427 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); 428 } 429 430 void submit_bio(struct bio *bio); 431 432 extern void bio_endio(struct bio *); 433 434 static inline void bio_io_error(struct bio *bio) 435 { 436 bio->bi_status = BLK_STS_IOERR; 437 bio_endio(bio); 438 } 439 440 static inline void bio_wouldblock_error(struct bio *bio) 441 { 442 bio_set_flag(bio, BIO_QUIET); 443 bio->bi_status = BLK_STS_AGAIN; 444 bio_endio(bio); 445 } 446 447 /* 448 * Calculate number of bvec segments that should be allocated to fit data 449 * pointed by @iter. If @iter is backed by bvec it's going to be reused 450 * instead of allocating a new one. 451 */ 452 static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) 453 { 454 if (iov_iter_is_bvec(iter)) 455 return 0; 456 return iov_iter_npages(iter, max_segs); 457 } 458 459 struct request_queue; 460 461 extern int submit_bio_wait(struct bio *bio); 462 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 463 unsigned short max_vecs, blk_opf_t opf); 464 extern void bio_uninit(struct bio *); 465 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); 466 void bio_chain(struct bio *, struct bio *); 467 468 int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off); 469 bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off); 470 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 471 unsigned int, unsigned int); 472 int bio_add_zone_append_page(struct bio *bio, struct page *page, 473 unsigned int len, unsigned int offset); 474 void __bio_add_page(struct bio *bio, struct page *page, 475 unsigned int len, unsigned int off); 476 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); 477 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter); 478 void __bio_release_pages(struct bio *bio, bool mark_dirty); 479 extern void bio_set_pages_dirty(struct bio *bio); 480 extern void bio_check_pages_dirty(struct bio *bio); 481 482 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 483 struct bio *src, struct bvec_iter *src_iter); 484 extern void bio_copy_data(struct bio *dst, struct bio *src); 485 extern void bio_free_pages(struct bio *bio); 486 void guard_bio_eod(struct bio *bio); 487 void zero_fill_bio(struct bio *bio); 488 489 static inline void bio_release_pages(struct bio *bio, bool mark_dirty) 490 { 491 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) 492 __bio_release_pages(bio, mark_dirty); 493 } 494 495 #define bio_dev(bio) \ 496 disk_devt((bio)->bi_bdev->bd_disk) 497 498 #ifdef CONFIG_BLK_CGROUP 499 void bio_associate_blkg(struct bio *bio); 500 void bio_associate_blkg_from_css(struct bio *bio, 501 struct cgroup_subsys_state *css); 502 void bio_clone_blkg_association(struct bio *dst, struct bio *src); 503 void blkcg_punt_bio_submit(struct bio *bio); 504 #else /* CONFIG_BLK_CGROUP */ 505 static inline void bio_associate_blkg(struct bio *bio) { } 506 static inline void bio_associate_blkg_from_css(struct bio *bio, 507 struct cgroup_subsys_state *css) 508 { } 509 static inline void bio_clone_blkg_association(struct bio *dst, 510 struct bio *src) { } 511 static inline void blkcg_punt_bio_submit(struct bio *bio) 512 { 513 submit_bio(bio); 514 } 515 #endif /* CONFIG_BLK_CGROUP */ 516 517 static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) 518 { 519 bio_clear_flag(bio, BIO_REMAPPED); 520 if (bio->bi_bdev != bdev) 521 bio_clear_flag(bio, BIO_BPS_THROTTLED); 522 bio->bi_bdev = bdev; 523 bio_associate_blkg(bio); 524 } 525 526 /* 527 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 528 * 529 * A bio_list anchors a singly-linked list of bios chained through the bi_next 530 * member of the bio. The bio_list also caches the last list member to allow 531 * fast access to the tail. 532 */ 533 struct bio_list { 534 struct bio *head; 535 struct bio *tail; 536 }; 537 538 static inline int bio_list_empty(const struct bio_list *bl) 539 { 540 return bl->head == NULL; 541 } 542 543 static inline void bio_list_init(struct bio_list *bl) 544 { 545 bl->head = bl->tail = NULL; 546 } 547 548 #define BIO_EMPTY_LIST { NULL, NULL } 549 550 #define bio_list_for_each(bio, bl) \ 551 for (bio = (bl)->head; bio; bio = bio->bi_next) 552 553 static inline unsigned bio_list_size(const struct bio_list *bl) 554 { 555 unsigned sz = 0; 556 struct bio *bio; 557 558 bio_list_for_each(bio, bl) 559 sz++; 560 561 return sz; 562 } 563 564 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 565 { 566 bio->bi_next = NULL; 567 568 if (bl->tail) 569 bl->tail->bi_next = bio; 570 else 571 bl->head = bio; 572 573 bl->tail = bio; 574 } 575 576 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 577 { 578 bio->bi_next = bl->head; 579 580 bl->head = bio; 581 582 if (!bl->tail) 583 bl->tail = bio; 584 } 585 586 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 587 { 588 if (!bl2->head) 589 return; 590 591 if (bl->tail) 592 bl->tail->bi_next = bl2->head; 593 else 594 bl->head = bl2->head; 595 596 bl->tail = bl2->tail; 597 } 598 599 static inline void bio_list_merge_head(struct bio_list *bl, 600 struct bio_list *bl2) 601 { 602 if (!bl2->head) 603 return; 604 605 if (bl->head) 606 bl2->tail->bi_next = bl->head; 607 else 608 bl->tail = bl2->tail; 609 610 bl->head = bl2->head; 611 } 612 613 static inline struct bio *bio_list_peek(struct bio_list *bl) 614 { 615 return bl->head; 616 } 617 618 static inline struct bio *bio_list_pop(struct bio_list *bl) 619 { 620 struct bio *bio = bl->head; 621 622 if (bio) { 623 bl->head = bl->head->bi_next; 624 if (!bl->head) 625 bl->tail = NULL; 626 627 bio->bi_next = NULL; 628 } 629 630 return bio; 631 } 632 633 static inline struct bio *bio_list_get(struct bio_list *bl) 634 { 635 struct bio *bio = bl->head; 636 637 bl->head = bl->tail = NULL; 638 639 return bio; 640 } 641 642 /* 643 * Increment chain count for the bio. Make sure the CHAIN flag update 644 * is visible before the raised count. 645 */ 646 static inline void bio_inc_remaining(struct bio *bio) 647 { 648 bio_set_flag(bio, BIO_CHAIN); 649 smp_mb__before_atomic(); 650 atomic_inc(&bio->__bi_remaining); 651 } 652 653 /* 654 * bio_set is used to allow other portions of the IO system to 655 * allocate their own private memory pools for bio and iovec structures. 656 * These memory pools in turn all allocate from the bio_slab 657 * and the bvec_slabs[]. 658 */ 659 #define BIO_POOL_SIZE 2 660 661 struct bio_set { 662 struct kmem_cache *bio_slab; 663 unsigned int front_pad; 664 665 /* 666 * per-cpu bio alloc cache 667 */ 668 struct bio_alloc_cache __percpu *cache; 669 670 mempool_t bio_pool; 671 mempool_t bvec_pool; 672 #if defined(CONFIG_BLK_DEV_INTEGRITY) 673 mempool_t bio_integrity_pool; 674 mempool_t bvec_integrity_pool; 675 #endif 676 677 unsigned int back_pad; 678 /* 679 * Deadlock avoidance for stacking block drivers: see comments in 680 * bio_alloc_bioset() for details 681 */ 682 spinlock_t rescue_lock; 683 struct bio_list rescue_list; 684 struct work_struct rescue_work; 685 struct workqueue_struct *rescue_workqueue; 686 687 /* 688 * Hot un-plug notifier for the per-cpu cache, if used 689 */ 690 struct hlist_node cpuhp_dead; 691 }; 692 693 static inline bool bioset_initialized(struct bio_set *bs) 694 { 695 return bs->bio_slab != NULL; 696 } 697 698 #if defined(CONFIG_BLK_DEV_INTEGRITY) 699 700 #define bip_for_each_vec(bvl, bip, iter) \ 701 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 702 703 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 704 for_each_bio(_bio) \ 705 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 706 707 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 708 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 709 extern bool bio_integrity_prep(struct bio *); 710 extern void bio_integrity_advance(struct bio *, unsigned int); 711 extern void bio_integrity_trim(struct bio *); 712 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 713 extern int bioset_integrity_create(struct bio_set *, int); 714 extern void bioset_integrity_free(struct bio_set *); 715 extern void bio_integrity_init(void); 716 717 #else /* CONFIG_BLK_DEV_INTEGRITY */ 718 719 static inline void *bio_integrity(struct bio *bio) 720 { 721 return NULL; 722 } 723 724 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 725 { 726 return 0; 727 } 728 729 static inline void bioset_integrity_free (struct bio_set *bs) 730 { 731 return; 732 } 733 734 static inline bool bio_integrity_prep(struct bio *bio) 735 { 736 return true; 737 } 738 739 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 740 gfp_t gfp_mask) 741 { 742 return 0; 743 } 744 745 static inline void bio_integrity_advance(struct bio *bio, 746 unsigned int bytes_done) 747 { 748 return; 749 } 750 751 static inline void bio_integrity_trim(struct bio *bio) 752 { 753 return; 754 } 755 756 static inline void bio_integrity_init(void) 757 { 758 return; 759 } 760 761 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 762 { 763 return false; 764 } 765 766 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, 767 unsigned int nr) 768 { 769 return ERR_PTR(-EINVAL); 770 } 771 772 static inline int bio_integrity_add_page(struct bio *bio, struct page *page, 773 unsigned int len, unsigned int offset) 774 { 775 return 0; 776 } 777 778 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 779 780 /* 781 * Mark a bio as polled. Note that for async polled IO, the caller must 782 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). 783 * We cannot block waiting for requests on polled IO, as those completions 784 * must be found by the caller. This is different than IRQ driven IO, where 785 * it's safe to wait for IO to complete. 786 */ 787 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) 788 { 789 bio->bi_opf |= REQ_POLLED; 790 if (!is_sync_kiocb(kiocb)) 791 bio->bi_opf |= REQ_NOWAIT; 792 } 793 794 static inline void bio_clear_polled(struct bio *bio) 795 { 796 bio->bi_opf &= ~REQ_POLLED; 797 } 798 799 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 800 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); 801 802 #endif /* __LINUX_BIO_H */ 803