18c16567dSChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
41da177e4SLinus Torvalds */
51da177e4SLinus Torvalds #ifndef __LINUX_BIO_H
61da177e4SLinus Torvalds #define __LINUX_BIO_H
71da177e4SLinus Torvalds
81da177e4SLinus Torvalds #include <linux/mempool.h>
97cc01581STejun Heo /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
107cc01581STejun Heo #include <linux/blk_types.h>
113e1a88ecSPavel Begunkov #include <linux/uio.h>
127cc01581STejun Heo
13a8affc03SChristoph Hellwig #define BIO_MAX_VECS 256U
145f7136dbSMatthew Wilcox (Oracle)
15fd8f8edeSChristoph Hellwig struct queue_limits;
16fd8f8edeSChristoph Hellwig
bio_max_segs(unsigned int nr_segs)175f7136dbSMatthew Wilcox (Oracle) static inline unsigned int bio_max_segs(unsigned int nr_segs)
185f7136dbSMatthew Wilcox (Oracle) {
19a8affc03SChristoph Hellwig return min(nr_segs, BIO_MAX_VECS);
205f7136dbSMatthew Wilcox (Oracle) }
211da177e4SLinus Torvalds
2243b62ce3SMike Christie #define bio_prio(bio) (bio)->bi_ioprio
2343b62ce3SMike Christie #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
2422e2c507SJens Axboe
254550dd6cSKent Overstreet #define bio_iter_iovec(bio, iter) \
264550dd6cSKent Overstreet bvec_iter_bvec((bio)->bi_io_vec, (iter))
274550dd6cSKent Overstreet
284550dd6cSKent Overstreet #define bio_iter_page(bio, iter) \
294550dd6cSKent Overstreet bvec_iter_page((bio)->bi_io_vec, (iter))
304550dd6cSKent Overstreet #define bio_iter_len(bio, iter) \
314550dd6cSKent Overstreet bvec_iter_len((bio)->bi_io_vec, (iter))
324550dd6cSKent Overstreet #define bio_iter_offset(bio, iter) \
334550dd6cSKent Overstreet bvec_iter_offset((bio)->bi_io_vec, (iter))
344550dd6cSKent Overstreet
354550dd6cSKent Overstreet #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
364550dd6cSKent Overstreet #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
374550dd6cSKent Overstreet #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
387988613bSKent Overstreet
3938a72dacSKent Overstreet #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
4038a72dacSKent Overstreet #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
4138a72dacSKent Overstreet
4238a72dacSKent Overstreet #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
4338a72dacSKent Overstreet #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
44bf2de6f5SJens Axboe
45458b76edSKent Overstreet /*
46d3849953SChristoph Hellwig * Return the data direction, READ or WRITE.
47d3849953SChristoph Hellwig */
48d3849953SChristoph Hellwig #define bio_data_dir(bio) \
49d3849953SChristoph Hellwig (op_is_write(bio_op(bio)) ? WRITE : READ)
50d3849953SChristoph Hellwig
51d3849953SChristoph Hellwig /*
52458b76edSKent Overstreet * Check whether this bio carries any data or not. A NULL bio is allowed.
53458b76edSKent Overstreet */
bio_has_data(struct bio * bio)54458b76edSKent Overstreet static inline bool bio_has_data(struct bio *bio)
55458b76edSKent Overstreet {
56458b76edSKent Overstreet if (bio &&
57458b76edSKent Overstreet bio->bi_iter.bi_size &&
587afafc8aSAdrian Hunter bio_op(bio) != REQ_OP_DISCARD &&
59a6f0788eSChaitanya Kulkarni bio_op(bio) != REQ_OP_SECURE_ERASE &&
60a6f0788eSChaitanya Kulkarni bio_op(bio) != REQ_OP_WRITE_ZEROES)
61458b76edSKent Overstreet return true;
62458b76edSKent Overstreet
63458b76edSKent Overstreet return false;
64458b76edSKent Overstreet }
65458b76edSKent Overstreet
bio_no_advance_iter(const struct bio * bio)66c1527c0eSBart Van Assche static inline bool bio_no_advance_iter(const struct bio *bio)
6795fe6c1aSMike Christie {
687afafc8aSAdrian Hunter return bio_op(bio) == REQ_OP_DISCARD ||
697afafc8aSAdrian Hunter bio_op(bio) == REQ_OP_SECURE_ERASE ||
70a6f0788eSChaitanya Kulkarni bio_op(bio) == REQ_OP_WRITE_ZEROES;
7195fe6c1aSMike Christie }
7295fe6c1aSMike Christie
bio_data(struct bio * bio)73bf2de6f5SJens Axboe static inline void *bio_data(struct bio *bio)
74bf2de6f5SJens Axboe {
75458b76edSKent Overstreet if (bio_has_data(bio))
76bf2de6f5SJens Axboe return page_address(bio_page(bio)) + bio_offset(bio);
77bf2de6f5SJens Axboe
78bf2de6f5SJens Axboe return NULL;
79bf2de6f5SJens Axboe }
801da177e4SLinus Torvalds
bio_next_segment(const struct bio * bio,struct bvec_iter_all * iter)811200e07fSMing Lei static inline bool bio_next_segment(const struct bio *bio,
821200e07fSMing Lei struct bvec_iter_all *iter)
831200e07fSMing Lei {
841200e07fSMing Lei if (iter->idx >= bio->bi_vcnt)
851200e07fSMing Lei return false;
861200e07fSMing Lei
871200e07fSMing Lei bvec_advance(&bio->bi_io_vec[iter->idx], iter);
881200e07fSMing Lei return true;
891200e07fSMing Lei }
906dc4f100SMing Lei
911da177e4SLinus Torvalds /*
92d74c6d51SKent Overstreet * drivers should _never_ use the all version - the bio may have been split
93d74c6d51SKent Overstreet * before it got to the driver and the driver won't own all of it
94d74c6d51SKent Overstreet */
952b070cfeSChristoph Hellwig #define bio_for_each_segment_all(bvl, bio, iter) \
962b070cfeSChristoph Hellwig for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
97d74c6d51SKent Overstreet
bio_advance_iter(const struct bio * bio,struct bvec_iter * iter,unsigned int bytes)98c1527c0eSBart Van Assche static inline void bio_advance_iter(const struct bio *bio,
99c1527c0eSBart Van Assche struct bvec_iter *iter, unsigned int bytes)
1004550dd6cSKent Overstreet {
1014550dd6cSKent Overstreet iter->bi_sector += bytes >> 9;
1024550dd6cSKent Overstreet
1037759eb23SMing Lei if (bio_no_advance_iter(bio))
1044550dd6cSKent Overstreet iter->bi_size -= bytes;
1057759eb23SMing Lei else
1064550dd6cSKent Overstreet bvec_iter_advance(bio->bi_io_vec, iter, bytes);
107b1fb2c52SDmitry Monakhov /* TODO: It is reasonable to complete bio with error here. */
108b1fb2c52SDmitry Monakhov }
109f9df1cd9SDmitry Monakhov
11022b56c29SPavel Begunkov /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
bio_advance_iter_single(const struct bio * bio,struct bvec_iter * iter,unsigned int bytes)11122b56c29SPavel Begunkov static inline void bio_advance_iter_single(const struct bio *bio,
11222b56c29SPavel Begunkov struct bvec_iter *iter,
11322b56c29SPavel Begunkov unsigned int bytes)
11422b56c29SPavel Begunkov {
11522b56c29SPavel Begunkov iter->bi_sector += bytes >> 9;
11622b56c29SPavel Begunkov
11722b56c29SPavel Begunkov if (bio_no_advance_iter(bio))
11822b56c29SPavel Begunkov iter->bi_size -= bytes;
11922b56c29SPavel Begunkov else
12022b56c29SPavel Begunkov bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
12122b56c29SPavel Begunkov }
12222b56c29SPavel Begunkov
123d4aa57a1SJens Axboe void __bio_advance(struct bio *, unsigned bytes);
124d4aa57a1SJens Axboe
125d4aa57a1SJens Axboe /**
126d4aa57a1SJens Axboe * bio_advance - increment/complete a bio by some number of bytes
127d4aa57a1SJens Axboe * @bio: bio to advance
1286fd3c510SRandy Dunlap * @nbytes: number of bytes to complete
129d4aa57a1SJens Axboe *
130d4aa57a1SJens Axboe * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
131d4aa57a1SJens Axboe * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
132d4aa57a1SJens Axboe * be updated on the last bvec as well.
133d4aa57a1SJens Axboe *
134d4aa57a1SJens Axboe * @bio will then represent the remaining, uncompleted portion of the io.
135d4aa57a1SJens Axboe */
bio_advance(struct bio * bio,unsigned int nbytes)136d4aa57a1SJens Axboe static inline void bio_advance(struct bio *bio, unsigned int nbytes)
137d4aa57a1SJens Axboe {
138d4aa57a1SJens Axboe if (nbytes == bio->bi_iter.bi_size) {
139d4aa57a1SJens Axboe bio->bi_iter.bi_size = 0;
140d4aa57a1SJens Axboe return;
141d4aa57a1SJens Axboe }
142d4aa57a1SJens Axboe __bio_advance(bio, nbytes);
143d4aa57a1SJens Axboe }
144d4aa57a1SJens Axboe
1457988613bSKent Overstreet #define __bio_for_each_segment(bvl, bio, iter, start) \
1467988613bSKent Overstreet for (iter = (start); \
1474550dd6cSKent Overstreet (iter).bi_size && \
1484550dd6cSKent Overstreet ((bvl = bio_iter_iovec((bio), (iter))), 1); \
14922b56c29SPavel Begunkov bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
1507988613bSKent Overstreet
1517988613bSKent Overstreet #define bio_for_each_segment(bvl, bio, iter) \
1527988613bSKent Overstreet __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
1537988613bSKent Overstreet
154d18d9174SMing Lei #define __bio_for_each_bvec(bvl, bio, iter, start) \
155d18d9174SMing Lei for (iter = (start); \
156d18d9174SMing Lei (iter).bi_size && \
157d18d9174SMing Lei ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
15822b56c29SPavel Begunkov bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
159d18d9174SMing Lei
160d18d9174SMing Lei /* iterate over multi-page bvec */
161d18d9174SMing Lei #define bio_for_each_bvec(bvl, bio, iter) \
162d18d9174SMing Lei __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
163d18d9174SMing Lei
1641072c12dSOmar Sandoval /*
1651072c12dSOmar Sandoval * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
1661072c12dSOmar Sandoval * same reasons as bio_for_each_segment_all().
1671072c12dSOmar Sandoval */
1681072c12dSOmar Sandoval #define bio_for_each_bvec_all(bvl, bio, i) \
1691072c12dSOmar Sandoval for (i = 0, bvl = bio_first_bvec_all(bio); \
170640d1930SMatthew Wilcox (Oracle) i < (bio)->bi_vcnt; i++, bvl++)
1711072c12dSOmar Sandoval
1724550dd6cSKent Overstreet #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
1731da177e4SLinus Torvalds
bio_segments(struct bio * bio)174f4595875SShaohua Li static inline unsigned bio_segments(struct bio *bio)
175458b76edSKent Overstreet {
176458b76edSKent Overstreet unsigned segs = 0;
177458b76edSKent Overstreet struct bio_vec bv;
178458b76edSKent Overstreet struct bvec_iter iter;
179458b76edSKent Overstreet
1808423ae3dSKent Overstreet /*
181a6f0788eSChaitanya Kulkarni * We special case discard/write same/write zeroes, because they
182a6f0788eSChaitanya Kulkarni * interpret bi_size differently:
1838423ae3dSKent Overstreet */
1848423ae3dSKent Overstreet
185a6f0788eSChaitanya Kulkarni switch (bio_op(bio)) {
186a6f0788eSChaitanya Kulkarni case REQ_OP_DISCARD:
187a6f0788eSChaitanya Kulkarni case REQ_OP_SECURE_ERASE:
188a6f0788eSChaitanya Kulkarni case REQ_OP_WRITE_ZEROES:
189f9d03f96SChristoph Hellwig return 0;
190a6f0788eSChaitanya Kulkarni default:
191a6f0788eSChaitanya Kulkarni break;
192a6f0788eSChaitanya Kulkarni }
1938423ae3dSKent Overstreet
194f4595875SShaohua Li bio_for_each_segment(bv, bio, iter)
195458b76edSKent Overstreet segs++;
196458b76edSKent Overstreet
197458b76edSKent Overstreet return segs;
198458b76edSKent Overstreet }
199458b76edSKent Overstreet
2001da177e4SLinus Torvalds /*
2011da177e4SLinus Torvalds * get a reference to a bio, so it won't disappear. the intended use is
2021da177e4SLinus Torvalds * something like:
2031da177e4SLinus Torvalds *
2041da177e4SLinus Torvalds * bio_get(bio);
2051da177e4SLinus Torvalds * submit_bio(rw, bio);
2061da177e4SLinus Torvalds * if (bio->bi_flags ...)
2071da177e4SLinus Torvalds * do_something
2081da177e4SLinus Torvalds * bio_put(bio);
2091da177e4SLinus Torvalds *
2101da177e4SLinus Torvalds * without the bio_get(), it could potentially complete I/O before submit_bio
2111da177e4SLinus Torvalds * returns. and then bio would be freed memory when if (bio->bi_flags ...)
2121da177e4SLinus Torvalds * runs
2131da177e4SLinus Torvalds */
bio_get(struct bio * bio)214dac56212SJens Axboe static inline void bio_get(struct bio *bio)
215dac56212SJens Axboe {
216dac56212SJens Axboe bio->bi_flags |= (1 << BIO_REFFED);
217dac56212SJens Axboe smp_mb__before_atomic();
218dac56212SJens Axboe atomic_inc(&bio->__bi_cnt);
219dac56212SJens Axboe }
220dac56212SJens Axboe
bio_cnt_set(struct bio * bio,unsigned int count)221dac56212SJens Axboe static inline void bio_cnt_set(struct bio *bio, unsigned int count)
222dac56212SJens Axboe {
223dac56212SJens Axboe if (count != 1) {
224dac56212SJens Axboe bio->bi_flags |= (1 << BIO_REFFED);
225f381c6a4SAndrea Parri smp_mb();
226dac56212SJens Axboe }
227dac56212SJens Axboe atomic_set(&bio->__bi_cnt, count);
228dac56212SJens Axboe }
2291da177e4SLinus Torvalds
bio_flagged(struct bio * bio,unsigned int bit)230b7c44ed9SJens Axboe static inline bool bio_flagged(struct bio *bio, unsigned int bit)
231b7c44ed9SJens Axboe {
23209e8c253SDavid Howells return bio->bi_flags & (1U << bit);
233b7c44ed9SJens Axboe }
234b7c44ed9SJens Axboe
bio_set_flag(struct bio * bio,unsigned int bit)235b7c44ed9SJens Axboe static inline void bio_set_flag(struct bio *bio, unsigned int bit)
236b7c44ed9SJens Axboe {
2372c68f6dcSJens Axboe bio->bi_flags |= (1U << bit);
238b7c44ed9SJens Axboe }
239b7c44ed9SJens Axboe
bio_clear_flag(struct bio * bio,unsigned int bit)240b7c44ed9SJens Axboe static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
241b7c44ed9SJens Axboe {
2422c68f6dcSJens Axboe bio->bi_flags &= ~(1U << bit);
243b7c44ed9SJens Axboe }
244b7c44ed9SJens Axboe
bio_first_bvec_all(struct bio * bio)24586292abcSMing Lei static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
24686292abcSMing Lei {
24786292abcSMing Lei WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
24886292abcSMing Lei return bio->bi_io_vec;
24986292abcSMing Lei }
25086292abcSMing Lei
bio_first_page_all(struct bio * bio)25186292abcSMing Lei static inline struct page *bio_first_page_all(struct bio *bio)
25286292abcSMing Lei {
25386292abcSMing Lei return bio_first_bvec_all(bio)->bv_page;
25486292abcSMing Lei }
25586292abcSMing Lei
bio_first_folio_all(struct bio * bio)2566d2790d9SZhangPeng static inline struct folio *bio_first_folio_all(struct bio *bio)
2576d2790d9SZhangPeng {
2586d2790d9SZhangPeng return page_folio(bio_first_page_all(bio));
2596d2790d9SZhangPeng }
2606d2790d9SZhangPeng
bio_last_bvec_all(struct bio * bio)26186292abcSMing Lei static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
26286292abcSMing Lei {
26386292abcSMing Lei WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
26486292abcSMing Lei return &bio->bi_io_vec[bio->bi_vcnt - 1];
26586292abcSMing Lei }
26686292abcSMing Lei
267640d1930SMatthew Wilcox (Oracle) /**
268640d1930SMatthew Wilcox (Oracle) * struct folio_iter - State for iterating all folios in a bio.
269640d1930SMatthew Wilcox (Oracle) * @folio: The current folio we're iterating. NULL after the last folio.
270640d1930SMatthew Wilcox (Oracle) * @offset: The byte offset within the current folio.
271640d1930SMatthew Wilcox (Oracle) * @length: The number of bytes in this iteration (will not cross folio
272640d1930SMatthew Wilcox (Oracle) * boundary).
273640d1930SMatthew Wilcox (Oracle) */
274640d1930SMatthew Wilcox (Oracle) struct folio_iter {
275640d1930SMatthew Wilcox (Oracle) struct folio *folio;
276640d1930SMatthew Wilcox (Oracle) size_t offset;
277640d1930SMatthew Wilcox (Oracle) size_t length;
278640d1930SMatthew Wilcox (Oracle) /* private: for use by the iterator */
279170f37d6SMatthew Wilcox (Oracle) struct folio *_next;
280640d1930SMatthew Wilcox (Oracle) size_t _seg_count;
281640d1930SMatthew Wilcox (Oracle) int _i;
282640d1930SMatthew Wilcox (Oracle) };
283640d1930SMatthew Wilcox (Oracle)
bio_first_folio(struct folio_iter * fi,struct bio * bio,int i)284640d1930SMatthew Wilcox (Oracle) static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
285640d1930SMatthew Wilcox (Oracle) int i)
286640d1930SMatthew Wilcox (Oracle) {
287640d1930SMatthew Wilcox (Oracle) struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
288640d1930SMatthew Wilcox (Oracle)
289*a6bd8182SMatthew Wilcox (Oracle) if (unlikely(i >= bio->bi_vcnt)) {
290*a6bd8182SMatthew Wilcox (Oracle) fi->folio = NULL;
291*a6bd8182SMatthew Wilcox (Oracle) return;
292*a6bd8182SMatthew Wilcox (Oracle) }
293*a6bd8182SMatthew Wilcox (Oracle)
294640d1930SMatthew Wilcox (Oracle) fi->folio = page_folio(bvec->bv_page);
295640d1930SMatthew Wilcox (Oracle) fi->offset = bvec->bv_offset +
296640d1930SMatthew Wilcox (Oracle) PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
297640d1930SMatthew Wilcox (Oracle) fi->_seg_count = bvec->bv_len;
298640d1930SMatthew Wilcox (Oracle) fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
299170f37d6SMatthew Wilcox (Oracle) fi->_next = folio_next(fi->folio);
300640d1930SMatthew Wilcox (Oracle) fi->_i = i;
301640d1930SMatthew Wilcox (Oracle) }
302640d1930SMatthew Wilcox (Oracle)
bio_next_folio(struct folio_iter * fi,struct bio * bio)303640d1930SMatthew Wilcox (Oracle) static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
304640d1930SMatthew Wilcox (Oracle) {
305640d1930SMatthew Wilcox (Oracle) fi->_seg_count -= fi->length;
306640d1930SMatthew Wilcox (Oracle) if (fi->_seg_count) {
307170f37d6SMatthew Wilcox (Oracle) fi->folio = fi->_next;
308640d1930SMatthew Wilcox (Oracle) fi->offset = 0;
309640d1930SMatthew Wilcox (Oracle) fi->length = min(folio_size(fi->folio), fi->_seg_count);
310170f37d6SMatthew Wilcox (Oracle) fi->_next = folio_next(fi->folio);
311640d1930SMatthew Wilcox (Oracle) } else {
312*a6bd8182SMatthew Wilcox (Oracle) bio_first_folio(fi, bio, fi->_i + 1);
313640d1930SMatthew Wilcox (Oracle) }
314640d1930SMatthew Wilcox (Oracle) }
315640d1930SMatthew Wilcox (Oracle)
316640d1930SMatthew Wilcox (Oracle) /**
317640d1930SMatthew Wilcox (Oracle) * bio_for_each_folio_all - Iterate over each folio in a bio.
318640d1930SMatthew Wilcox (Oracle) * @fi: struct folio_iter which is updated for each folio.
319640d1930SMatthew Wilcox (Oracle) * @bio: struct bio to iterate over.
320640d1930SMatthew Wilcox (Oracle) */
321640d1930SMatthew Wilcox (Oracle) #define bio_for_each_folio_all(fi, bio) \
322640d1930SMatthew Wilcox (Oracle) for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
323640d1930SMatthew Wilcox (Oracle)
324c611529eSMartin K. Petersen enum bip_flags {
325c611529eSMartin K. Petersen BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
326c611529eSMartin K. Petersen BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
327c611529eSMartin K. Petersen BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
328c611529eSMartin K. Petersen BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
329c611529eSMartin K. Petersen BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
330c611529eSMartin K. Petersen };
331c611529eSMartin K. Petersen
3327ba1ba12SMartin K. Petersen /*
3337ba1ba12SMartin K. Petersen * bio integrity payload
3347ba1ba12SMartin K. Petersen */
3357ba1ba12SMartin K. Petersen struct bio_integrity_payload {
3367ba1ba12SMartin K. Petersen struct bio *bip_bio; /* parent bio */
3377ba1ba12SMartin K. Petersen
338d57a5f7cSKent Overstreet struct bvec_iter bip_iter;
3397ba1ba12SMartin K. Petersen
3407ba1ba12SMartin K. Petersen unsigned short bip_vcnt; /* # of integrity bio_vecs */
341cbcd1054SGu Zheng unsigned short bip_max_vcnt; /* integrity bio_vec slots */
342b1f01388SMartin K. Petersen unsigned short bip_flags; /* control flags */
3437ba1ba12SMartin K. Petersen
3447759eb23SMing Lei struct bvec_iter bio_iter; /* for rewinding parent bio */
3457759eb23SMing Lei
3467ba1ba12SMartin K. Petersen struct work_struct bip_work; /* I/O completion */
3476fda981cSKent Overstreet
3486fda981cSKent Overstreet struct bio_vec *bip_vec;
3490a368bf0SGustavo A. R. Silva struct bio_vec bip_inline_vecs[];/* embedded bvec array */
3507ba1ba12SMartin K. Petersen };
35118593088SMartin K. Petersen
35206c1e390SKeith Busch #if defined(CONFIG_BLK_DEV_INTEGRITY)
35306c1e390SKeith Busch
bio_integrity(struct bio * bio)35406c1e390SKeith Busch static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
35506c1e390SKeith Busch {
3561eff9d32SJens Axboe if (bio->bi_opf & REQ_INTEGRITY)
35706c1e390SKeith Busch return bio->bi_integrity;
35806c1e390SKeith Busch
35906c1e390SKeith Busch return NULL;
36006c1e390SKeith Busch }
36106c1e390SKeith Busch
bio_integrity_flagged(struct bio * bio,enum bip_flags flag)362c611529eSMartin K. Petersen static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
363c611529eSMartin K. Petersen {
364c611529eSMartin K. Petersen struct bio_integrity_payload *bip = bio_integrity(bio);
365c611529eSMartin K. Petersen
366c611529eSMartin K. Petersen if (bip)
367c611529eSMartin K. Petersen return bip->bip_flags & flag;
368c611529eSMartin K. Petersen
369c611529eSMartin K. Petersen return false;
370c611529eSMartin K. Petersen }
371b1f01388SMartin K. Petersen
bip_get_seed(struct bio_integrity_payload * bip)37218593088SMartin K. Petersen static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
37318593088SMartin K. Petersen {
37418593088SMartin K. Petersen return bip->bip_iter.bi_sector;
37518593088SMartin K. Petersen }
37618593088SMartin K. Petersen
bip_set_seed(struct bio_integrity_payload * bip,sector_t seed)37718593088SMartin K. Petersen static inline void bip_set_seed(struct bio_integrity_payload *bip,
37818593088SMartin K. Petersen sector_t seed)
37918593088SMartin K. Petersen {
38018593088SMartin K. Petersen bip->bip_iter.bi_sector = seed;
38118593088SMartin K. Petersen }
38218593088SMartin K. Petersen
3837ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */
3841da177e4SLinus Torvalds
385e83502caSChaitanya Kulkarni void bio_trim(struct bio *bio, sector_t offset, sector_t size);
38620d0189bSKent Overstreet extern struct bio *bio_split(struct bio *bio, int sectors,
38720d0189bSKent Overstreet gfp_t gfp, struct bio_set *bs);
388fd8f8edeSChristoph Hellwig struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
389fd8f8edeSChristoph Hellwig unsigned *segs, struct bio_set *bs, unsigned max_bytes);
39020d0189bSKent Overstreet
39120d0189bSKent Overstreet /**
39220d0189bSKent Overstreet * bio_next_split - get next @sectors from a bio, splitting if necessary
39320d0189bSKent Overstreet * @bio: bio to split
39420d0189bSKent Overstreet * @sectors: number of sectors to split from the front of @bio
39520d0189bSKent Overstreet * @gfp: gfp mask
39620d0189bSKent Overstreet * @bs: bio set to allocate from
39720d0189bSKent Overstreet *
3986fd3c510SRandy Dunlap * Return: a bio representing the next @sectors of @bio - if the bio is smaller
39920d0189bSKent Overstreet * than @sectors, returns the original bio unchanged.
40020d0189bSKent Overstreet */
bio_next_split(struct bio * bio,int sectors,gfp_t gfp,struct bio_set * bs)40120d0189bSKent Overstreet static inline struct bio *bio_next_split(struct bio *bio, int sectors,
40220d0189bSKent Overstreet gfp_t gfp, struct bio_set *bs)
40320d0189bSKent Overstreet {
40420d0189bSKent Overstreet if (sectors >= bio_sectors(bio))
40520d0189bSKent Overstreet return bio;
40620d0189bSKent Overstreet
40720d0189bSKent Overstreet return bio_split(bio, sectors, gfp, bs);
40820d0189bSKent Overstreet }
40920d0189bSKent Overstreet
410011067b0SNeilBrown enum {
411011067b0SNeilBrown BIOSET_NEED_BVECS = BIT(0),
41247e0fb46SNeilBrown BIOSET_NEED_RESCUER = BIT(1),
413be4d234dSJens Axboe BIOSET_PERCPU_CACHE = BIT(2),
414011067b0SNeilBrown };
415dad08527SKent Overstreet extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
416dad08527SKent Overstreet extern void bioset_exit(struct bio_set *);
4178aa6ba2fSKent Overstreet extern int biovec_init_pool(mempool_t *pool, int pool_entries);
4181da177e4SLinus Torvalds
419609be106SChristoph Hellwig struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
42016458cf3SBart Van Assche blk_opf_t opf, gfp_t gfp_mask,
4210f2e6ab8SChristoph Hellwig struct bio_set *bs);
422066ff571SChristoph Hellwig struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
4231da177e4SLinus Torvalds extern void bio_put(struct bio *);
4241da177e4SLinus Torvalds
425abfc426dSChristoph Hellwig struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
426abfc426dSChristoph Hellwig gfp_t gfp, struct bio_set *bs);
427abfc426dSChristoph Hellwig int bio_init_clone(struct block_device *bdev, struct bio *bio,
428abfc426dSChristoph Hellwig struct bio *bio_src, gfp_t gfp);
429bf800ef1SKent Overstreet
430f4f8154aSKent Overstreet extern struct bio_set fs_bio_set;
4313f86a82aSKent Overstreet
bio_alloc(struct block_device * bdev,unsigned short nr_vecs,blk_opf_t opf,gfp_t gfp_mask)43207888c66SChristoph Hellwig static inline struct bio *bio_alloc(struct block_device *bdev,
43316458cf3SBart Van Assche unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
4343f86a82aSKent Overstreet {
43507888c66SChristoph Hellwig return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
4363f86a82aSKent Overstreet }
4373f86a82aSKent Overstreet
4383e08773cSChristoph Hellwig void submit_bio(struct bio *bio);
4391e3914d4SChristoph Hellwig
4404246a0b6SChristoph Hellwig extern void bio_endio(struct bio *);
4414246a0b6SChristoph Hellwig
bio_io_error(struct bio * bio)4424246a0b6SChristoph Hellwig static inline void bio_io_error(struct bio *bio)
4434246a0b6SChristoph Hellwig {
4444e4cbee9SChristoph Hellwig bio->bi_status = BLK_STS_IOERR;
4454246a0b6SChristoph Hellwig bio_endio(bio);
4464246a0b6SChristoph Hellwig }
4474246a0b6SChristoph Hellwig
bio_wouldblock_error(struct bio * bio)44803a07c92SGoldwyn Rodrigues static inline void bio_wouldblock_error(struct bio *bio)
44903a07c92SGoldwyn Rodrigues {
450abb30460SJens Axboe bio_set_flag(bio, BIO_QUIET);
45103a07c92SGoldwyn Rodrigues bio->bi_status = BLK_STS_AGAIN;
452bf800ef1SKent Overstreet bio_endio(bio);
453bf800ef1SKent Overstreet }
4546712ecf8SNeilBrown
4553e1a88ecSPavel Begunkov /*
4563e1a88ecSPavel Begunkov * Calculate number of bvec segments that should be allocated to fit data
457c42bca92SPavel Begunkov * pointed by @iter. If @iter is backed by bvec it's going to be reused
458c42bca92SPavel Begunkov * instead of allocating a new one.
4593e1a88ecSPavel Begunkov */
bio_iov_vecs_to_alloc(struct iov_iter * iter,int max_segs)4603e1a88ecSPavel Begunkov static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
4613e1a88ecSPavel Begunkov {
462c42bca92SPavel Begunkov if (iov_iter_is_bvec(iter))
463c42bca92SPavel Begunkov return 0;
4643e1a88ecSPavel Begunkov return iov_iter_npages(iter, max_segs);
4653e1a88ecSPavel Begunkov }
4663e1a88ecSPavel Begunkov
4671da177e4SLinus Torvalds struct request_queue;
4681da177e4SLinus Torvalds
4694e49ea4aSMike Christie extern int submit_bio_wait(struct bio *bio);
47049add496SChristoph Hellwig void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
47116458cf3SBart Van Assche unsigned short max_vecs, blk_opf_t opf);
4729ae3b3f5SJens Axboe extern void bio_uninit(struct bio *);
47316458cf3SBart Van Assche void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
474196d38bcSKent Overstreet void bio_chain(struct bio *, struct bio *);
4751da177e4SLinus Torvalds
47683f2caaaSJohannes Thumshirn int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
47783f2caaaSJohannes Thumshirn unsigned off);
4786c500000SJohannes Thumshirn bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
4796c500000SJohannes Thumshirn size_t len, size_t off);
4806e68af66SMike Christie extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
4816e68af66SMike Christie unsigned int, unsigned int);
482ae29333fSJohannes Thumshirn int bio_add_zone_append_page(struct bio *bio, struct page *page,
483ae29333fSJohannes Thumshirn unsigned int len, unsigned int offset);
4840aa69fd3SChristoph Hellwig void __bio_add_page(struct bio *bio, struct page *page,
4850aa69fd3SChristoph Hellwig unsigned int len, unsigned int off);
4867a150f1eSJohannes Thumshirn void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
4877a150f1eSJohannes Thumshirn size_t off);
4882cefe4dbSKent Overstreet int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
4891bb6b810SPavel Begunkov void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
490c809084aSPavel Begunkov void __bio_release_pages(struct bio *bio, bool mark_dirty);
4911da177e4SLinus Torvalds extern void bio_set_pages_dirty(struct bio *bio);
4921da177e4SLinus Torvalds extern void bio_check_pages_dirty(struct bio *bio);
4932d4dc890SIlya Loginov
494ee4b4e22SJens Axboe extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
495ee4b4e22SJens Axboe struct bio *src, struct bvec_iter *src_iter);
49616ac3d63SKent Overstreet extern void bio_copy_data(struct bio *dst, struct bio *src);
497491221f8SGuoqing Jiang extern void bio_free_pages(struct bio *bio);
49829125ed6SChristoph Hellwig void guard_bio_eod(struct bio *bio);
499649f070eSKent Overstreet void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
500649f070eSKent Overstreet
zero_fill_bio(struct bio * bio)501649f070eSKent Overstreet static inline void zero_fill_bio(struct bio *bio)
502649f070eSKent Overstreet {
503649f070eSKent Overstreet zero_fill_bio_iter(bio, bio->bi_iter);
504649f070eSKent Overstreet }
50538a72dacSKent Overstreet
bio_release_pages(struct bio * bio,bool mark_dirty)506c809084aSPavel Begunkov static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
507c809084aSPavel Begunkov {
508e4cc6465SChristoph Hellwig if (bio_flagged(bio, BIO_PAGE_PINNED))
509c809084aSPavel Begunkov __bio_release_pages(bio, mark_dirty);
510c809084aSPavel Begunkov }
511c809084aSPavel Begunkov
51274d46992SChristoph Hellwig #define bio_dev(bio) \
513309dca30SChristoph Hellwig disk_devt((bio)->bi_bdev->bd_disk)
51474d46992SChristoph Hellwig
515852c788fSTejun Heo #ifdef CONFIG_BLK_CGROUP
5162268c0feSDennis Zhou void bio_associate_blkg(struct bio *bio);
517fd42df30SDennis Zhou void bio_associate_blkg_from_css(struct bio *bio,
518fd42df30SDennis Zhou struct cgroup_subsys_state *css);
519db6638d7SDennis Zhou void bio_clone_blkg_association(struct bio *dst, struct bio *src);
5203480373eSChristoph Hellwig void blkcg_punt_bio_submit(struct bio *bio);
521852c788fSTejun Heo #else /* CONFIG_BLK_CGROUP */
bio_associate_blkg(struct bio * bio)5222268c0feSDennis Zhou static inline void bio_associate_blkg(struct bio *bio) { }
bio_associate_blkg_from_css(struct bio * bio,struct cgroup_subsys_state * css)523fd42df30SDennis Zhou static inline void bio_associate_blkg_from_css(struct bio *bio,
524fd42df30SDennis Zhou struct cgroup_subsys_state *css)
525fd42df30SDennis Zhou { }
bio_clone_blkg_association(struct bio * dst,struct bio * src)526db6638d7SDennis Zhou static inline void bio_clone_blkg_association(struct bio *dst,
52720bd723eSPaolo Valente struct bio *src) { }
blkcg_punt_bio_submit(struct bio * bio)5283480373eSChristoph Hellwig static inline void blkcg_punt_bio_submit(struct bio *bio)
5293480373eSChristoph Hellwig {
5303480373eSChristoph Hellwig submit_bio(bio);
5313480373eSChristoph Hellwig }
532852c788fSTejun Heo #endif /* CONFIG_BLK_CGROUP */
533852c788fSTejun Heo
bio_set_dev(struct bio * bio,struct block_device * bdev)534cf6d6238SPavel Begunkov static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
535cf6d6238SPavel Begunkov {
536cf6d6238SPavel Begunkov bio_clear_flag(bio, BIO_REMAPPED);
537cf6d6238SPavel Begunkov if (bio->bi_bdev != bdev)
538320fb0f9SYu Kuai bio_clear_flag(bio, BIO_BPS_THROTTLED);
539cf6d6238SPavel Begunkov bio->bi_bdev = bdev;
540cf6d6238SPavel Begunkov bio_associate_blkg(bio);
541cf6d6238SPavel Begunkov }
542cf6d6238SPavel Begunkov
5437a67f63bSJens Axboe /*
544e686307fSAkinobu Mita * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
5458f3d8ba2SChristoph Hellwig *
5468f3d8ba2SChristoph Hellwig * A bio_list anchors a singly-linked list of bios chained through the bi_next
5478f3d8ba2SChristoph Hellwig * member of the bio. The bio_list also caches the last list member to allow
5488f3d8ba2SChristoph Hellwig * fast access to the tail.
5498f3d8ba2SChristoph Hellwig */
5508f3d8ba2SChristoph Hellwig struct bio_list {
5518f3d8ba2SChristoph Hellwig struct bio *head;
5528f3d8ba2SChristoph Hellwig struct bio *tail;
5538f3d8ba2SChristoph Hellwig };
5548f3d8ba2SChristoph Hellwig
bio_list_empty(const struct bio_list * bl)5558f3d8ba2SChristoph Hellwig static inline int bio_list_empty(const struct bio_list *bl)
5568f3d8ba2SChristoph Hellwig {
5578f3d8ba2SChristoph Hellwig return bl->head == NULL;
5588f3d8ba2SChristoph Hellwig }
5598f3d8ba2SChristoph Hellwig
bio_list_init(struct bio_list * bl)5608f3d8ba2SChristoph Hellwig static inline void bio_list_init(struct bio_list *bl)
5618f3d8ba2SChristoph Hellwig {
5628f3d8ba2SChristoph Hellwig bl->head = bl->tail = NULL;
5638f3d8ba2SChristoph Hellwig }
5648f3d8ba2SChristoph Hellwig
565320ae51fSJens Axboe #define BIO_EMPTY_LIST { NULL, NULL }
566320ae51fSJens Axboe
5678f3d8ba2SChristoph Hellwig #define bio_list_for_each(bio, bl) \
5688f3d8ba2SChristoph Hellwig for (bio = (bl)->head; bio; bio = bio->bi_next)
5698f3d8ba2SChristoph Hellwig
bio_list_size(const struct bio_list * bl)5708f3d8ba2SChristoph Hellwig static inline unsigned bio_list_size(const struct bio_list *bl)
5718f3d8ba2SChristoph Hellwig {
5728f3d8ba2SChristoph Hellwig unsigned sz = 0;
5738f3d8ba2SChristoph Hellwig struct bio *bio;
5748f3d8ba2SChristoph Hellwig
5758f3d8ba2SChristoph Hellwig bio_list_for_each(bio, bl)
5768f3d8ba2SChristoph Hellwig sz++;
5778f3d8ba2SChristoph Hellwig
5788f3d8ba2SChristoph Hellwig return sz;
5798f3d8ba2SChristoph Hellwig }
5808f3d8ba2SChristoph Hellwig
bio_list_add(struct bio_list * bl,struct bio * bio)5818f3d8ba2SChristoph Hellwig static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
5828f3d8ba2SChristoph Hellwig {
5838f3d8ba2SChristoph Hellwig bio->bi_next = NULL;
5848f3d8ba2SChristoph Hellwig
5858f3d8ba2SChristoph Hellwig if (bl->tail)
5868f3d8ba2SChristoph Hellwig bl->tail->bi_next = bio;
5878f3d8ba2SChristoph Hellwig else
5888f3d8ba2SChristoph Hellwig bl->head = bio;
5898f3d8ba2SChristoph Hellwig
5908f3d8ba2SChristoph Hellwig bl->tail = bio;
5918f3d8ba2SChristoph Hellwig }
5928f3d8ba2SChristoph Hellwig
bio_list_add_head(struct bio_list * bl,struct bio * bio)5938f3d8ba2SChristoph Hellwig static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
5948f3d8ba2SChristoph Hellwig {
5958f3d8ba2SChristoph Hellwig bio->bi_next = bl->head;
5968f3d8ba2SChristoph Hellwig
5978f3d8ba2SChristoph Hellwig bl->head = bio;
5988f3d8ba2SChristoph Hellwig
5998f3d8ba2SChristoph Hellwig if (!bl->tail)
6008f3d8ba2SChristoph Hellwig bl->tail = bio;
6018f3d8ba2SChristoph Hellwig }
6028f3d8ba2SChristoph Hellwig
bio_list_merge(struct bio_list * bl,struct bio_list * bl2)6038f3d8ba2SChristoph Hellwig static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
6048f3d8ba2SChristoph Hellwig {
6058f3d8ba2SChristoph Hellwig if (!bl2->head)
6068f3d8ba2SChristoph Hellwig return;
6078f3d8ba2SChristoph Hellwig
6088f3d8ba2SChristoph Hellwig if (bl->tail)
6098f3d8ba2SChristoph Hellwig bl->tail->bi_next = bl2->head;
6108f3d8ba2SChristoph Hellwig else
6118f3d8ba2SChristoph Hellwig bl->head = bl2->head;
6128f3d8ba2SChristoph Hellwig
6138f3d8ba2SChristoph Hellwig bl->tail = bl2->tail;
6148f3d8ba2SChristoph Hellwig }
6158f3d8ba2SChristoph Hellwig
bio_list_merge_head(struct bio_list * bl,struct bio_list * bl2)6168f3d8ba2SChristoph Hellwig static inline void bio_list_merge_head(struct bio_list *bl,
6178f3d8ba2SChristoph Hellwig struct bio_list *bl2)
6188f3d8ba2SChristoph Hellwig {
6198f3d8ba2SChristoph Hellwig if (!bl2->head)
6208f3d8ba2SChristoph Hellwig return;
6218f3d8ba2SChristoph Hellwig
6228f3d8ba2SChristoph Hellwig if (bl->head)
6238f3d8ba2SChristoph Hellwig bl2->tail->bi_next = bl->head;
6248f3d8ba2SChristoph Hellwig else
6258f3d8ba2SChristoph Hellwig bl->tail = bl2->tail;
6268f3d8ba2SChristoph Hellwig
6278f3d8ba2SChristoph Hellwig bl->head = bl2->head;
6288f3d8ba2SChristoph Hellwig }
6298f3d8ba2SChristoph Hellwig
bio_list_peek(struct bio_list * bl)63013685a16SGeert Uytterhoeven static inline struct bio *bio_list_peek(struct bio_list *bl)
63113685a16SGeert Uytterhoeven {
63213685a16SGeert Uytterhoeven return bl->head;
63313685a16SGeert Uytterhoeven }
63413685a16SGeert Uytterhoeven
bio_list_pop(struct bio_list * bl)6358f3d8ba2SChristoph Hellwig static inline struct bio *bio_list_pop(struct bio_list *bl)
6368f3d8ba2SChristoph Hellwig {
6378f3d8ba2SChristoph Hellwig struct bio *bio = bl->head;
6388f3d8ba2SChristoph Hellwig
6398f3d8ba2SChristoph Hellwig if (bio) {
6408f3d8ba2SChristoph Hellwig bl->head = bl->head->bi_next;
6418f3d8ba2SChristoph Hellwig if (!bl->head)
6428f3d8ba2SChristoph Hellwig bl->tail = NULL;
6438f3d8ba2SChristoph Hellwig
6448f3d8ba2SChristoph Hellwig bio->bi_next = NULL;
6458f3d8ba2SChristoph Hellwig }
6468f3d8ba2SChristoph Hellwig
6478f3d8ba2SChristoph Hellwig return bio;
6488f3d8ba2SChristoph Hellwig }
6498f3d8ba2SChristoph Hellwig
bio_list_get(struct bio_list * bl)6508f3d8ba2SChristoph Hellwig static inline struct bio *bio_list_get(struct bio_list *bl)
6518f3d8ba2SChristoph Hellwig {
6528f3d8ba2SChristoph Hellwig struct bio *bio = bl->head;
6538f3d8ba2SChristoph Hellwig
6548f3d8ba2SChristoph Hellwig bl->head = bl->tail = NULL;
6558f3d8ba2SChristoph Hellwig
6568f3d8ba2SChristoph Hellwig return bio;
6578f3d8ba2SChristoph Hellwig }
6588f3d8ba2SChristoph Hellwig
65957fb233fSKent Overstreet /*
6600ef5a50cSMike Snitzer * Increment chain count for the bio. Make sure the CHAIN flag update
6610ef5a50cSMike Snitzer * is visible before the raised count.
6620ef5a50cSMike Snitzer */
bio_inc_remaining(struct bio * bio)6630ef5a50cSMike Snitzer static inline void bio_inc_remaining(struct bio *bio)
6640ef5a50cSMike Snitzer {
6650ef5a50cSMike Snitzer bio_set_flag(bio, BIO_CHAIN);
6660ef5a50cSMike Snitzer smp_mb__before_atomic();
6670ef5a50cSMike Snitzer atomic_inc(&bio->__bi_remaining);
6680ef5a50cSMike Snitzer }
6690ef5a50cSMike Snitzer
6700ef5a50cSMike Snitzer /*
67157fb233fSKent Overstreet * bio_set is used to allow other portions of the IO system to
67257fb233fSKent Overstreet * allocate their own private memory pools for bio and iovec structures.
67357fb233fSKent Overstreet * These memory pools in turn all allocate from the bio_slab
67457fb233fSKent Overstreet * and the bvec_slabs[].
67557fb233fSKent Overstreet */
67657fb233fSKent Overstreet #define BIO_POOL_SIZE 2
67757fb233fSKent Overstreet
67857fb233fSKent Overstreet struct bio_set {
67957fb233fSKent Overstreet struct kmem_cache *bio_slab;
68057fb233fSKent Overstreet unsigned int front_pad;
68157fb233fSKent Overstreet
682be4d234dSJens Axboe /*
683be4d234dSJens Axboe * per-cpu bio alloc cache
684be4d234dSJens Axboe */
685be4d234dSJens Axboe struct bio_alloc_cache __percpu *cache;
686be4d234dSJens Axboe
6878aa6ba2fSKent Overstreet mempool_t bio_pool;
6888aa6ba2fSKent Overstreet mempool_t bvec_pool;
68957fb233fSKent Overstreet #if defined(CONFIG_BLK_DEV_INTEGRITY)
6908aa6ba2fSKent Overstreet mempool_t bio_integrity_pool;
6918aa6ba2fSKent Overstreet mempool_t bvec_integrity_pool;
69257fb233fSKent Overstreet #endif
693df2cb6daSKent Overstreet
6949f180e31SMing Lei unsigned int back_pad;
695df2cb6daSKent Overstreet /*
696df2cb6daSKent Overstreet * Deadlock avoidance for stacking block drivers: see comments in
697df2cb6daSKent Overstreet * bio_alloc_bioset() for details
698df2cb6daSKent Overstreet */
699df2cb6daSKent Overstreet spinlock_t rescue_lock;
700df2cb6daSKent Overstreet struct bio_list rescue_list;
701df2cb6daSKent Overstreet struct work_struct rescue_work;
702df2cb6daSKent Overstreet struct workqueue_struct *rescue_workqueue;
703be4d234dSJens Axboe
704be4d234dSJens Axboe /*
705be4d234dSJens Axboe * Hot un-plug notifier for the per-cpu cache, if used
706be4d234dSJens Axboe */
707be4d234dSJens Axboe struct hlist_node cpuhp_dead;
70857fb233fSKent Overstreet };
70957fb233fSKent Overstreet
bioset_initialized(struct bio_set * bs)710338aa96dSKent Overstreet static inline bool bioset_initialized(struct bio_set *bs)
711338aa96dSKent Overstreet {
712338aa96dSKent Overstreet return bs->bio_slab != NULL;
713338aa96dSKent Overstreet }
714338aa96dSKent Overstreet
7157ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY)
7167ba1ba12SMartin K. Petersen
717d57a5f7cSKent Overstreet #define bip_for_each_vec(bvl, bip, iter) \
718d57a5f7cSKent Overstreet for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
7197ba1ba12SMartin K. Petersen
72013f05c8dSMartin K. Petersen #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
72113f05c8dSMartin K. Petersen for_each_bio(_bio) \
72213f05c8dSMartin K. Petersen bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
72313f05c8dSMartin K. Petersen
7247ba1ba12SMartin K. Petersen extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
7257ba1ba12SMartin K. Petersen extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
726e23947bdSDmitry Monakhov extern bool bio_integrity_prep(struct bio *);
7277ba1ba12SMartin K. Petersen extern void bio_integrity_advance(struct bio *, unsigned int);
728fbd08e76SDmitry Monakhov extern void bio_integrity_trim(struct bio *);
7291e2a410fSKent Overstreet extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
7307878cba9SMartin K. Petersen extern int bioset_integrity_create(struct bio_set *, int);
7317878cba9SMartin K. Petersen extern void bioset_integrity_free(struct bio_set *);
7327878cba9SMartin K. Petersen extern void bio_integrity_init(void);
7337ba1ba12SMartin K. Petersen
7347ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */
7357ba1ba12SMartin K. Petersen
bio_integrity(struct bio * bio)736c611529eSMartin K. Petersen static inline void *bio_integrity(struct bio *bio)
7376898e3bdSMartin K. Petersen {
738c611529eSMartin K. Petersen return NULL;
7396898e3bdSMartin K. Petersen }
7406898e3bdSMartin K. Petersen
bioset_integrity_create(struct bio_set * bs,int pool_size)7416898e3bdSMartin K. Petersen static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
7426898e3bdSMartin K. Petersen {
7436898e3bdSMartin K. Petersen return 0;
7446898e3bdSMartin K. Petersen }
7456898e3bdSMartin K. Petersen
bioset_integrity_free(struct bio_set * bs)7466898e3bdSMartin K. Petersen static inline void bioset_integrity_free (struct bio_set *bs)
7476898e3bdSMartin K. Petersen {
7486898e3bdSMartin K. Petersen return;
7496898e3bdSMartin K. Petersen }
7506898e3bdSMartin K. Petersen
bio_integrity_prep(struct bio * bio)751e23947bdSDmitry Monakhov static inline bool bio_integrity_prep(struct bio *bio)
7526898e3bdSMartin K. Petersen {
753e23947bdSDmitry Monakhov return true;
7546898e3bdSMartin K. Petersen }
7556898e3bdSMartin K. Petersen
bio_integrity_clone(struct bio * bio,struct bio * bio_src,gfp_t gfp_mask)7560c614e2dSStephen Rothwell static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
7571e2a410fSKent Overstreet gfp_t gfp_mask)
7580c614e2dSStephen Rothwell {
7590c614e2dSStephen Rothwell return 0;
7600c614e2dSStephen Rothwell }
7616898e3bdSMartin K. Petersen
bio_integrity_advance(struct bio * bio,unsigned int bytes_done)7626898e3bdSMartin K. Petersen static inline void bio_integrity_advance(struct bio *bio,
7636898e3bdSMartin K. Petersen unsigned int bytes_done)
7646898e3bdSMartin K. Petersen {
7656898e3bdSMartin K. Petersen return;
7666898e3bdSMartin K. Petersen }
7676898e3bdSMartin K. Petersen
bio_integrity_trim(struct bio * bio)768fbd08e76SDmitry Monakhov static inline void bio_integrity_trim(struct bio *bio)
7696898e3bdSMartin K. Petersen {
7706898e3bdSMartin K. Petersen return;
7716898e3bdSMartin K. Petersen }
7726898e3bdSMartin K. Petersen
bio_integrity_init(void)7736898e3bdSMartin K. Petersen static inline void bio_integrity_init(void)
7746898e3bdSMartin K. Petersen {
7756898e3bdSMartin K. Petersen return;
7766898e3bdSMartin K. Petersen }
7777ba1ba12SMartin K. Petersen
bio_integrity_flagged(struct bio * bio,enum bip_flags flag)778c611529eSMartin K. Petersen static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
779c611529eSMartin K. Petersen {
780c611529eSMartin K. Petersen return false;
781c611529eSMartin K. Petersen }
782c611529eSMartin K. Petersen
bio_integrity_alloc(struct bio * bio,gfp_t gfp,unsigned int nr)78306c1e390SKeith Busch static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
78406c1e390SKeith Busch unsigned int nr)
78506c1e390SKeith Busch {
78606c1e390SKeith Busch return ERR_PTR(-EINVAL);
78706c1e390SKeith Busch }
78806c1e390SKeith Busch
bio_integrity_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)78906c1e390SKeith Busch static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
79006c1e390SKeith Busch unsigned int len, unsigned int offset)
79106c1e390SKeith Busch {
79206c1e390SKeith Busch return 0;
79306c1e390SKeith Busch }
79406c1e390SKeith Busch
7957ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */
7967ba1ba12SMartin K. Petersen
7970bbb280dSJens Axboe /*
7980bbb280dSJens Axboe * Mark a bio as polled. Note that for async polled IO, the caller must
7990bbb280dSJens Axboe * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
8000bbb280dSJens Axboe * We cannot block waiting for requests on polled IO, as those completions
8010bbb280dSJens Axboe * must be found by the caller. This is different than IRQ driven IO, where
8020bbb280dSJens Axboe * it's safe to wait for IO to complete.
8030bbb280dSJens Axboe */
bio_set_polled(struct bio * bio,struct kiocb * kiocb)8040bbb280dSJens Axboe static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
8050bbb280dSJens Axboe {
8066ce913feSChristoph Hellwig bio->bi_opf |= REQ_POLLED;
8072bc05769SJens Axboe if (kiocb->ki_flags & IOCB_NOWAIT)
8080bbb280dSJens Axboe bio->bi_opf |= REQ_NOWAIT;
8090bbb280dSJens Axboe }
8100bbb280dSJens Axboe
bio_clear_polled(struct bio * bio)811b53f3dcdSMike Snitzer static inline void bio_clear_polled(struct bio *bio)
812b53f3dcdSMike Snitzer {
81353eab8e7SJens Axboe bio->bi_opf &= ~REQ_POLLED;
814b53f3dcdSMike Snitzer }
815b53f3dcdSMike Snitzer
8160a3140eaSChaitanya Kulkarni struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
81716458cf3SBart Van Assche unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
818c28a6147SChaitanya Kulkarni
8191da177e4SLinus Torvalds #endif /* __LINUX_BIO_H */
820