/openbmc/linux/include/linux/ |
H A D | bio.h | 9 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 22 #define bio_prio(bio) (bio)->bi_ioprio argument 23 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) argument 25 #define bio_iter_iovec(bio, iter) \ argument 26 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 28 #define bio_iter_page(bio, iter) \ argument 29 bvec_iter_page((bio)->bi_io_vec, (iter)) 30 #define bio_iter_len(bio, iter) \ argument 31 bvec_iter_len((bio)->bi_io_vec, (iter)) 32 #define bio_iter_offset(bio, iter) \ argument [all …]
|
/openbmc/linux/block/ |
H A D | bio.c | 7 #include <linux/bio.h> 32 struct bio *free_list; 33 struct bio *free_list_irq; 68 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 93 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); in create_bio_slab() 115 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size() 144 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) in bio_put_slab() 214 void bio_uninit(struct bio *bio) in bio_uninit() argument 217 if (bio->bi_blkg) { in bio_uninit() 218 blkg_put(bio->bi_blkg); in bio_uninit() [all …]
|
H A D | blk-map.c | 8 #include <linux/bio.h> 41 * bio_copy_from_iter - copy all pages from iov_iter to bio 42 * @bio: The &struct bio which describes the I/O as destination 45 * Copy all pages from iov_iter to bio. 48 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument 53 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter() 72 * bio_copy_to_iter - copy all pages from bio to iov_iter 73 * @bio: The &struct bio which describes the I/O as source 76 * Copy all pages from bio to iov_iter. 79 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument [all …]
|
H A D | blk-merge.c | 7 #include <linux/bio.h> 21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_first_bvec() argument 23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_get_first_bvec() 26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_last_bvec() argument 28 struct bvec_iter iter = bio->bi_iter; in bio_get_last_bvec() 31 bio_get_first_bvec(bio, bv); in bio_get_last_bvec() 32 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec() 33 return; /* this bio only has a single bvec */ in bio_get_last_bvec() 35 bio_advance_iter(bio, &iter, iter.bi_size); in bio_get_last_bvec() 42 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec() [all …]
|
H A D | bio-integrity.c | 3 * bio-integrity.c - bio data integrity extensions 12 #include <linux/bio.h> 39 * bio_integrity_alloc - Allocate integrity payload and attach it to bio 40 * @bio: bio to attach integrity metadata to 44 * Description: This function prepares a bio for attaching integrity 48 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument 53 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc() 56 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) in bio_integrity_alloc() 83 bip->bip_bio = bio; in bio_integrity_alloc() 84 bio->bi_integrity = bip; in bio_integrity_alloc() [all …]
|
H A D | bounce.c | 13 #include <linux/bio.h> 76 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq() 81 * The bio of @from is created by bounce, so we can iterate in copy_to_high_bio_irq() 102 static void bounce_end_io(struct bio *bio) in bounce_end_io() argument 104 struct bio *bio_orig = bio->bi_private; in bounce_end_io() 112 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io() 121 bio_orig->bi_status = bio->bi_status; in bounce_end_io() 123 bio_put(bio); in bounce_end_io() 126 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument 128 bounce_end_io(bio); in bounce_end_io_write() [all …]
|
H A D | blk-crypto-internal.h | 9 #include <linux/bio.h> 31 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 37 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 40 bio->bi_crypt_context); in bio_crypt_ctx_back_mergeable() 44 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 46 return bio_crypt_ctx_mergeable(bio->bi_crypt_context, in bio_crypt_ctx_front_mergeable() 47 bio->bi_iter.bi_size, req->crypt_ctx); in bio_crypt_ctx_front_mergeable() 97 struct bio *bio) in bio_crypt_rq_ctx_compatible() argument 103 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 109 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument [all …]
|
H A D | blk-core.c | 9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 17 #include <linux/bio.h> 135 * string format. Useful in the debugging and tracing bio or request. For 339 int __bio_queue_enter(struct request_queue *q, struct bio *bio) in __bio_queue_enter() argument 342 struct gendisk *disk = bio->bi_bdev->bd_disk; in __bio_queue_enter() 344 if (bio->bi_opf & REQ_NOWAIT) { in __bio_queue_enter() 347 bio_wouldblock_error(bio); in __bio_queue_enter() 369 bio_io_error(bio); in __bio_queue_enter() 502 static inline void bio_check_ro(struct bio *bio) in bio_check_ro() argument 504 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { in bio_check_ro() [all …]
|
H A D | blk-crypto-fallback.c | 38 …"Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallb… 43 * Copy of the bvec_iter when this bio was submitted. 44 * We only want to en/decrypt the part of the bio as described by the 45 * bvec_iter upon submission because bio might be split before being 52 struct bio *bio; member 144 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) in blk_crypto_fallback_encrypt_endio() 146 struct bio *src_bio = enc_bio->bi_private; in blk_crypto_fallback_encrypt_endio() 160 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) in blk_crypto_fallback_clone_bio() 165 struct bio *bio; in blk_crypto_fallback_clone_bio() local 167 bio = bio_kmalloc(nr_segs, GFP_NOIO); in blk_crypto_fallback_clone_bio() [all …]
|
H A D | blk-lib.c | 7 #include <linux/bio.h> 32 * Align the bio size to the discard granularity to make splitting the bio in bio_discard_limit() 39 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() 41 struct bio *bio = *biop; in __blkdev_issue_discard() local 67 bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask); in __blkdev_issue_discard() 68 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard() 69 bio->bi_iter.bi_size = req_sects << 9; in __blkdev_issue_discard() 82 *biop = bio; in __blkdev_issue_discard() 100 struct bio *bio = NULL; in blkdev_issue_discard() local 105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard() [all …]
|
H A D | blk-rq-qos.h | 38 void (*throttle)(struct rq_qos *, struct bio *); 39 void (*track)(struct rq_qos *, struct request *, struct bio *); 40 void (*merge)(struct rq_qos *, struct request *, struct bio *); 44 void (*done_bio)(struct rq_qos *, struct bio *); 45 void (*cleanup)(struct rq_qos *, struct bio *); 103 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 107 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 108 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 109 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); 110 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); [all …]
|
H A D | blk.h | 37 int __bio_queue_enter(struct request_queue *q, struct bio *bio); 38 void submit_bio_noacct_nocheck(struct bio *bio); 64 static inline int bio_queue_enter(struct bio *bio) in bio_queue_enter() argument 66 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_queue_enter() 70 return __bio_queue_enter(q, bio); in bio_queue_enter() 148 * 1) If max_discard_segments > 1, the driver treats every bio as a range and 184 bool __bio_integrity_endio(struct bio *); 185 void bio_integrity_free(struct bio *bio); 186 static inline bool bio_integrity_endio(struct bio *bio) in bio_integrity_endio() argument 188 if (bio_integrity(bio)) in bio_integrity_endio() [all …]
|
/openbmc/linux/fs/btrfs/ |
H A D | bio.c | 7 #include <linux/bio.h> 8 #include "bio.h" 38 return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE; in bbio_has_ordered_extent() 42 * Initialize a btrfs_bio structure. This skips the embedded bio itself as it 48 memset(bbio, 0, offsetof(struct btrfs_bio, bio)); in btrfs_bio_init() 67 struct bio *bio; in btrfs_bio_alloc() local 69 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); in btrfs_bio_alloc() 70 bbio = btrfs_bio(bio); in btrfs_bio_alloc() 80 struct bio *bio; in btrfs_split_bio() local 85 bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, in btrfs_split_bio() [all …]
|
/openbmc/linux/drivers/md/bcache/ |
H A D | request.c | 40 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument 46 bio_for_each_segment(bv, bio, iter) { in bio_csum() 111 struct bio *bio = op->bio; in bch_data_invalidate() local 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 116 while (bio_sectors(bio)) { in bch_data_invalidate() 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate() 128 bio->bi_iter.bi_sector, in bch_data_invalidate() 134 bio_put(bio); in bch_data_invalidate() [all …]
|
H A D | io.c | 17 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free() argument 19 struct bbio *b = container_of(bio, struct bbio, bio); in bch_bbio_free() 24 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc() 27 struct bio *bio = &b->bio; in bch_bbio_alloc() local 29 bio_init(bio, NULL, bio->bi_inline_vecs, in bch_bbio_alloc() 32 return bio; in bch_bbio_alloc() 35 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio() argument 37 struct bbio *b = container_of(bio, struct bbio, bio); in __bch_submit_bbio() 39 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio() 40 bio_set_dev(bio, c->cache->bdev); in __bch_submit_bbio() [all …]
|
H A D | movinggc.c | 19 struct bbio bio; member 48 struct bio *bio = &io->bio.bio; in write_moving_finish() local 50 bio_free_pages(bio); in write_moving_finish() 62 static void read_moving_endio(struct bio *bio) in read_moving_endio() argument 64 struct bbio *b = container_of(bio, struct bbio, bio); in read_moving_endio() 65 struct moving_io *io = container_of(bio->bi_private, in read_moving_endio() 68 if (bio->bi_status) in read_moving_endio() 69 io->op.status = bio->bi_status; in read_moving_endio() 75 bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); in read_moving_endio() 80 struct bio *bio = &io->bio.bio; in moving_init() local [all …]
|
/openbmc/linux/drivers/md/ |
H A D | dm-io-rewind.c | 6 #include <linux/bio.h> 49 * @bio: bio whose integrity vector to update 56 static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done) in dm_bio_integrity_rewind() argument 58 struct bio_integrity_payload *bip = bio_integrity(bio); in dm_bio_integrity_rewind() 59 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in dm_bio_integrity_rewind() 68 static inline void dm_bio_integrity_rewind(struct bio *bio, in dm_bio_integrity_rewind() argument 94 static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) in dm_bio_crypt_rewind() argument 96 struct bio_crypt_ctx *bc = bio->bi_crypt_context; in dm_bio_crypt_rewind() 104 static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) in dm_bio_crypt_rewind() argument 110 static inline void dm_bio_rewind_iter(const struct bio *bio, in dm_bio_rewind_iter() argument [all …]
|
H A D | dm-bio-record.h | 11 #include <linux/bio.h> 15 * There are lots of mutable fields in the bio struct that get 17 * such as multipath, may wish to resubmit a bio on error. The 19 * original bio state. 33 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) in dm_bio_record() argument 35 bd->bi_bdev = bio->bi_bdev; in dm_bio_record() 36 bd->bi_flags = bio->bi_flags; in dm_bio_record() 37 bd->bi_iter = bio->bi_iter; in dm_bio_record() 38 bd->__bi_remaining = atomic_read(&bio->__bi_remaining); in dm_bio_record() 39 bd->bi_end_io = bio->bi_end_io; in dm_bio_record() [all …]
|
H A D | dm-zoned-target.c | 17 * Zone BIO context. 22 struct bio *bio; member 73 * Target BIO completion. 75 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) in dmz_bio_endio() argument 78 dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); in dmz_bio_endio() 80 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) in dmz_bio_endio() 81 bio->bi_status = status; in dmz_bio_endio() 82 if (bioctx->dev && bio->bi_status != BLK_STS_OK) in dmz_bio_endio() 89 if (bio->bi_status != BLK_STS_OK && in dmz_bio_endio() 90 bio_op(bio) == REQ_OP_WRITE && in dmz_bio_endio() [all …]
|
H A D | raid1-10.c | 13 * correct the read error. To keep track of bad blocks on a per-bio 16 #define IO_BLOCKED ((struct bio *)1) 19 * the success by setting devs[n].bio to IO_MADE_GOOD 21 #define IO_MADE_GOOD ((struct bio *)2) 23 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) argument 88 * IO, and it is per-bio, so make .bi_private points to it. 90 static inline struct resync_pages *get_resync_pages(struct bio *bio) in get_resync_pages() argument 92 return bio->bi_private; in get_resync_pages() 96 static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp, in md_bio_reset_resync_pages() argument 106 if (WARN_ON(!bio_add_page(bio, page, len, 0))) { in md_bio_reset_resync_pages() [all …]
|
/openbmc/linux/fs/ext4/ |
H A D | readpage.c | 36 #include <linux/bio.h> 63 struct bio *bio; member 69 static void __read_end_io(struct bio *bio) in __read_end_io() argument 73 bio_for_each_folio_all(fi, bio) { in __read_end_io() 76 if (bio->bi_status) in __read_end_io() 82 if (bio->bi_private) in __read_end_io() 83 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in __read_end_io() 84 bio_put(bio); in __read_end_io() 93 struct bio *bio = ctx->bio; in decrypt_work() local 95 if (fscrypt_decrypt_bio(bio)) in decrypt_work() [all …]
|
/openbmc/linux/fs/squashfs/ |
H A D | block.c | 22 #include <linux/bio.h> 33 static int copy_bio_to_actor(struct bio *bio, in copy_bio_to_actor() argument 46 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) in copy_bio_to_actor() 70 if (!bio_next_segment(bio, &iter_all)) in copy_bio_to_actor() 79 static int squashfs_bio_read_cached(struct bio *fullbio, in squashfs_bio_read_cached() 87 struct bio *bio = NULL; in squashfs_bio_read_cached() local 114 if (!bio || idx != end_idx) { in squashfs_bio_read_cached() 115 struct bio *new = bio_alloc_clone(bdev, fullbio, in squashfs_bio_read_cached() 118 if (bio) { in squashfs_bio_read_cached() 119 bio_trim(bio, start_idx * PAGE_SECTORS, in squashfs_bio_read_cached() [all …]
|
/openbmc/linux/fs/ |
H A D | mpage.c | 13 * use bio_add_page() to build bio's just the right size 21 #include <linux/bio.h> 37 * The mpage code never puts partial pages into a BIO (except for end-of-file). 46 static void mpage_read_end_io(struct bio *bio) in mpage_read_end_io() argument 49 int err = blk_status_to_errno(bio->bi_status); in mpage_read_end_io() 51 bio_for_each_folio_all(fi, bio) { in mpage_read_end_io() 59 bio_put(bio); in mpage_read_end_io() 62 static void mpage_write_end_io(struct bio *bio) in mpage_write_end_io() argument 65 int err = blk_status_to_errno(bio->bi_status); in mpage_write_end_io() 67 bio_for_each_folio_all(fi, bio) { in mpage_write_end_io() [all …]
|
/openbmc/linux/drivers/nvme/target/ |
H A D | io-cmd-bdev.c | 179 static void nvmet_bio_done(struct bio *bio) in nvmet_bio_done() argument 181 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done() 183 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done() 184 nvmet_req_bio_put(req, bio); in nvmet_bio_done() 188 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument 202 bip = bio_integrity_alloc(bio, GFP_NOIO, in nvmet_bdev_alloc_bip() 210 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip() 213 resid = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip() 216 rc = bio_integrity_add_page(bio, miter->page, len, in nvmet_bdev_alloc_bip() 233 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument [all …]
|
/openbmc/linux/include/trace/events/ |
H A D | block.h | 145 * the @rq->bio is %NULL, then there is absolutely no additional work to 146 * do for the request. If @rq->bio is non-NULL then there is 279 * @bio: block operation completed 282 * block IO operation @bio. 286 TP_PROTO(struct request_queue *q, struct bio *bio), 288 TP_ARGS(q, bio), 299 __entry->dev = bio_dev(bio); 300 __entry->sector = bio->bi_iter.bi_sector; 301 __entry->nr_sector = bio_sectors(bio); 302 __entry->error = blk_status_to_errno(bio->bi_status); [all …]
|