xref: /openbmc/linux/block/bio.c (revision 609be106)
18c16567dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f9c78b2bSJens Axboe /*
3f9c78b2bSJens Axboe  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4f9c78b2bSJens Axboe  */
5f9c78b2bSJens Axboe #include <linux/mm.h>
6f9c78b2bSJens Axboe #include <linux/swap.h>
7f9c78b2bSJens Axboe #include <linux/bio.h>
8f9c78b2bSJens Axboe #include <linux/blkdev.h>
9f9c78b2bSJens Axboe #include <linux/uio.h>
10f9c78b2bSJens Axboe #include <linux/iocontext.h>
11f9c78b2bSJens Axboe #include <linux/slab.h>
12f9c78b2bSJens Axboe #include <linux/init.h>
13f9c78b2bSJens Axboe #include <linux/kernel.h>
14f9c78b2bSJens Axboe #include <linux/export.h>
15f9c78b2bSJens Axboe #include <linux/mempool.h>
16f9c78b2bSJens Axboe #include <linux/workqueue.h>
17f9c78b2bSJens Axboe #include <linux/cgroup.h>
1808e18eabSJosef Bacik #include <linux/blk-cgroup.h>
19b4c5875dSDamien Le Moal #include <linux/highmem.h>
20de6a78b6SMing Lei #include <linux/sched/sysctl.h>
21a892c8d5SSatya Tangirala #include <linux/blk-crypto.h>
2249d1ec85SMing Lei #include <linux/xarray.h>
23f9c78b2bSJens Axboe 
24f9c78b2bSJens Axboe #include <trace/events/block.h>
259e234eeaSShaohua Li #include "blk.h"
2667b42d0bSJosef Bacik #include "blk-rq-qos.h"
27f9c78b2bSJens Axboe 
28be4d234dSJens Axboe struct bio_alloc_cache {
29fcade2ceSJens Axboe 	struct bio		*free_list;
30be4d234dSJens Axboe 	unsigned int		nr;
31be4d234dSJens Axboe };
32be4d234dSJens Axboe 
33de76fd89SChristoph Hellwig static struct biovec_slab {
346ac0b715SChristoph Hellwig 	int nr_vecs;
356ac0b715SChristoph Hellwig 	char *name;
366ac0b715SChristoph Hellwig 	struct kmem_cache *slab;
37de76fd89SChristoph Hellwig } bvec_slabs[] __read_mostly = {
38de76fd89SChristoph Hellwig 	{ .nr_vecs = 16, .name = "biovec-16" },
39de76fd89SChristoph Hellwig 	{ .nr_vecs = 64, .name = "biovec-64" },
40de76fd89SChristoph Hellwig 	{ .nr_vecs = 128, .name = "biovec-128" },
41a8affc03SChristoph Hellwig 	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
42f9c78b2bSJens Axboe };
436ac0b715SChristoph Hellwig 
447a800a20SChristoph Hellwig static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
457a800a20SChristoph Hellwig {
467a800a20SChristoph Hellwig 	switch (nr_vecs) {
477a800a20SChristoph Hellwig 	/* smaller bios use inline vecs */
487a800a20SChristoph Hellwig 	case 5 ... 16:
497a800a20SChristoph Hellwig 		return &bvec_slabs[0];
507a800a20SChristoph Hellwig 	case 17 ... 64:
517a800a20SChristoph Hellwig 		return &bvec_slabs[1];
527a800a20SChristoph Hellwig 	case 65 ... 128:
537a800a20SChristoph Hellwig 		return &bvec_slabs[2];
54a8affc03SChristoph Hellwig 	case 129 ... BIO_MAX_VECS:
557a800a20SChristoph Hellwig 		return &bvec_slabs[3];
567a800a20SChristoph Hellwig 	default:
577a800a20SChristoph Hellwig 		BUG();
587a800a20SChristoph Hellwig 		return NULL;
597a800a20SChristoph Hellwig 	}
607a800a20SChristoph Hellwig }
61f9c78b2bSJens Axboe 
62f9c78b2bSJens Axboe /*
63f9c78b2bSJens Axboe  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
64f9c78b2bSJens Axboe  * IO code that does not need private memory pools.
65f9c78b2bSJens Axboe  */
66f4f8154aSKent Overstreet struct bio_set fs_bio_set;
67f9c78b2bSJens Axboe EXPORT_SYMBOL(fs_bio_set);
68f9c78b2bSJens Axboe 
69f9c78b2bSJens Axboe /*
70f9c78b2bSJens Axboe  * Our slab pool management
71f9c78b2bSJens Axboe  */
72f9c78b2bSJens Axboe struct bio_slab {
73f9c78b2bSJens Axboe 	struct kmem_cache *slab;
74f9c78b2bSJens Axboe 	unsigned int slab_ref;
75f9c78b2bSJens Axboe 	unsigned int slab_size;
76f9c78b2bSJens Axboe 	char name[8];
77f9c78b2bSJens Axboe };
78f9c78b2bSJens Axboe static DEFINE_MUTEX(bio_slab_lock);
7949d1ec85SMing Lei static DEFINE_XARRAY(bio_slabs);
80f9c78b2bSJens Axboe 
8149d1ec85SMing Lei static struct bio_slab *create_bio_slab(unsigned int size)
82f9c78b2bSJens Axboe {
8349d1ec85SMing Lei 	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
8449d1ec85SMing Lei 
8549d1ec85SMing Lei 	if (!bslab)
8649d1ec85SMing Lei 		return NULL;
8749d1ec85SMing Lei 
8849d1ec85SMing Lei 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
8949d1ec85SMing Lei 	bslab->slab = kmem_cache_create(bslab->name, size,
901a7e76e4SChristoph Hellwig 			ARCH_KMALLOC_MINALIGN,
911a7e76e4SChristoph Hellwig 			SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
9249d1ec85SMing Lei 	if (!bslab->slab)
9349d1ec85SMing Lei 		goto fail_alloc_slab;
9449d1ec85SMing Lei 
9549d1ec85SMing Lei 	bslab->slab_ref = 1;
9649d1ec85SMing Lei 	bslab->slab_size = size;
9749d1ec85SMing Lei 
9849d1ec85SMing Lei 	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
9949d1ec85SMing Lei 		return bslab;
10049d1ec85SMing Lei 
10149d1ec85SMing Lei 	kmem_cache_destroy(bslab->slab);
10249d1ec85SMing Lei 
10349d1ec85SMing Lei fail_alloc_slab:
10449d1ec85SMing Lei 	kfree(bslab);
10549d1ec85SMing Lei 	return NULL;
10649d1ec85SMing Lei }
10749d1ec85SMing Lei 
10849d1ec85SMing Lei static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
10949d1ec85SMing Lei {
1109f180e31SMing Lei 	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
11149d1ec85SMing Lei }
11249d1ec85SMing Lei 
11349d1ec85SMing Lei static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
11449d1ec85SMing Lei {
11549d1ec85SMing Lei 	unsigned int size = bs_bio_slab_size(bs);
11649d1ec85SMing Lei 	struct bio_slab *bslab;
117f9c78b2bSJens Axboe 
118f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
11949d1ec85SMing Lei 	bslab = xa_load(&bio_slabs, size);
12049d1ec85SMing Lei 	if (bslab)
121f9c78b2bSJens Axboe 		bslab->slab_ref++;
12249d1ec85SMing Lei 	else
12349d1ec85SMing Lei 		bslab = create_bio_slab(size);
124f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
12549d1ec85SMing Lei 
12649d1ec85SMing Lei 	if (bslab)
12749d1ec85SMing Lei 		return bslab->slab;
12849d1ec85SMing Lei 	return NULL;
129f9c78b2bSJens Axboe }
130f9c78b2bSJens Axboe 
131f9c78b2bSJens Axboe static void bio_put_slab(struct bio_set *bs)
132f9c78b2bSJens Axboe {
133f9c78b2bSJens Axboe 	struct bio_slab *bslab = NULL;
13449d1ec85SMing Lei 	unsigned int slab_size = bs_bio_slab_size(bs);
135f9c78b2bSJens Axboe 
136f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
137f9c78b2bSJens Axboe 
13849d1ec85SMing Lei 	bslab = xa_load(&bio_slabs, slab_size);
139f9c78b2bSJens Axboe 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
140f9c78b2bSJens Axboe 		goto out;
141f9c78b2bSJens Axboe 
14249d1ec85SMing Lei 	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
14349d1ec85SMing Lei 
144f9c78b2bSJens Axboe 	WARN_ON(!bslab->slab_ref);
145f9c78b2bSJens Axboe 
146f9c78b2bSJens Axboe 	if (--bslab->slab_ref)
147f9c78b2bSJens Axboe 		goto out;
148f9c78b2bSJens Axboe 
14949d1ec85SMing Lei 	xa_erase(&bio_slabs, slab_size);
15049d1ec85SMing Lei 
151f9c78b2bSJens Axboe 	kmem_cache_destroy(bslab->slab);
15249d1ec85SMing Lei 	kfree(bslab);
153f9c78b2bSJens Axboe 
154f9c78b2bSJens Axboe out:
155f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
156f9c78b2bSJens Axboe }
157f9c78b2bSJens Axboe 
1587a800a20SChristoph Hellwig void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
159f9c78b2bSJens Axboe {
1609e8c0d0dSChristoph Hellwig 	BUG_ON(nr_vecs > BIO_MAX_VECS);
161f9c78b2bSJens Axboe 
162a8affc03SChristoph Hellwig 	if (nr_vecs == BIO_MAX_VECS)
163f9c78b2bSJens Axboe 		mempool_free(bv, pool);
1647a800a20SChristoph Hellwig 	else if (nr_vecs > BIO_INLINE_VECS)
1657a800a20SChristoph Hellwig 		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
166f9c78b2bSJens Axboe }
167f9c78b2bSJens Axboe 
168f2c3eb9bSChristoph Hellwig /*
169f2c3eb9bSChristoph Hellwig  * Make the first allocation restricted and don't dump info on allocation
170f2c3eb9bSChristoph Hellwig  * failures, since we'll fall back to the mempool in case of failure.
171f2c3eb9bSChristoph Hellwig  */
172f2c3eb9bSChristoph Hellwig static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
173f9c78b2bSJens Axboe {
174f2c3eb9bSChristoph Hellwig 	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
175f2c3eb9bSChristoph Hellwig 		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
176f2c3eb9bSChristoph Hellwig }
177f2c3eb9bSChristoph Hellwig 
1787a800a20SChristoph Hellwig struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
1797a800a20SChristoph Hellwig 		gfp_t gfp_mask)
180f9c78b2bSJens Axboe {
1817a800a20SChristoph Hellwig 	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
1827a800a20SChristoph Hellwig 
1837a800a20SChristoph Hellwig 	if (WARN_ON_ONCE(!bvs))
184f9c78b2bSJens Axboe 		return NULL;
1857a800a20SChristoph Hellwig 
1867a800a20SChristoph Hellwig 	/*
1877a800a20SChristoph Hellwig 	 * Upgrade the nr_vecs request to take full advantage of the allocation.
1887a800a20SChristoph Hellwig 	 * We also rely on this in the bvec_free path.
1897a800a20SChristoph Hellwig 	 */
1907a800a20SChristoph Hellwig 	*nr_vecs = bvs->nr_vecs;
191f9c78b2bSJens Axboe 
192f9c78b2bSJens Axboe 	/*
193f007a3d6SChristoph Hellwig 	 * Try a slab allocation first for all smaller allocations.  If that
194f007a3d6SChristoph Hellwig 	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
195a8affc03SChristoph Hellwig 	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
196f9c78b2bSJens Axboe 	 */
197a8affc03SChristoph Hellwig 	if (*nr_vecs < BIO_MAX_VECS) {
198f9c78b2bSJens Axboe 		struct bio_vec *bvl;
199f9c78b2bSJens Axboe 
200f2c3eb9bSChristoph Hellwig 		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
2017a800a20SChristoph Hellwig 		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
202f9c78b2bSJens Axboe 			return bvl;
203a8affc03SChristoph Hellwig 		*nr_vecs = BIO_MAX_VECS;
204f9c78b2bSJens Axboe 	}
205f9c78b2bSJens Axboe 
206f007a3d6SChristoph Hellwig 	return mempool_alloc(pool, gfp_mask);
207f9c78b2bSJens Axboe }
208f9c78b2bSJens Axboe 
2099ae3b3f5SJens Axboe void bio_uninit(struct bio *bio)
210f9c78b2bSJens Axboe {
211db9819c7SChristoph Hellwig #ifdef CONFIG_BLK_CGROUP
212db9819c7SChristoph Hellwig 	if (bio->bi_blkg) {
213db9819c7SChristoph Hellwig 		blkg_put(bio->bi_blkg);
214db9819c7SChristoph Hellwig 		bio->bi_blkg = NULL;
215db9819c7SChristoph Hellwig 	}
216db9819c7SChristoph Hellwig #endif
217ece841abSJustin Tee 	if (bio_integrity(bio))
218ece841abSJustin Tee 		bio_integrity_free(bio);
219a892c8d5SSatya Tangirala 
220a892c8d5SSatya Tangirala 	bio_crypt_free_ctx(bio);
221f9c78b2bSJens Axboe }
2229ae3b3f5SJens Axboe EXPORT_SYMBOL(bio_uninit);
223f9c78b2bSJens Axboe 
224f9c78b2bSJens Axboe static void bio_free(struct bio *bio)
225f9c78b2bSJens Axboe {
226f9c78b2bSJens Axboe 	struct bio_set *bs = bio->bi_pool;
227f9c78b2bSJens Axboe 	void *p;
228f9c78b2bSJens Axboe 
2299ae3b3f5SJens Axboe 	bio_uninit(bio);
230f9c78b2bSJens Axboe 
231f9c78b2bSJens Axboe 	if (bs) {
2327a800a20SChristoph Hellwig 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
233f9c78b2bSJens Axboe 
234f9c78b2bSJens Axboe 		/*
235f9c78b2bSJens Axboe 		 * If we have front padding, adjust the bio pointer before freeing
236f9c78b2bSJens Axboe 		 */
237f9c78b2bSJens Axboe 		p = bio;
238f9c78b2bSJens Axboe 		p -= bs->front_pad;
239f9c78b2bSJens Axboe 
2408aa6ba2fSKent Overstreet 		mempool_free(p, &bs->bio_pool);
241f9c78b2bSJens Axboe 	} else {
242f9c78b2bSJens Axboe 		/* Bio was allocated by bio_kmalloc() */
243f9c78b2bSJens Axboe 		kfree(bio);
244f9c78b2bSJens Axboe 	}
245f9c78b2bSJens Axboe }
246f9c78b2bSJens Axboe 
2479ae3b3f5SJens Axboe /*
2489ae3b3f5SJens Axboe  * Users of this function have their own bio allocation. Subsequently,
2499ae3b3f5SJens Axboe  * they must remember to pair any call to bio_init() with bio_uninit()
2509ae3b3f5SJens Axboe  * when IO has completed, or when the bio is released.
2519ae3b3f5SJens Axboe  */
2523a83f467SMing Lei void bio_init(struct bio *bio, struct bio_vec *table,
2533a83f467SMing Lei 	      unsigned short max_vecs)
254f9c78b2bSJens Axboe {
255da521626SJens Axboe 	bio->bi_next = NULL;
256da521626SJens Axboe 	bio->bi_bdev = NULL;
257da521626SJens Axboe 	bio->bi_opf = 0;
258da521626SJens Axboe 	bio->bi_flags = 0;
259da521626SJens Axboe 	bio->bi_ioprio = 0;
260da521626SJens Axboe 	bio->bi_write_hint = 0;
261da521626SJens Axboe 	bio->bi_status = 0;
262da521626SJens Axboe 	bio->bi_iter.bi_sector = 0;
263da521626SJens Axboe 	bio->bi_iter.bi_size = 0;
264da521626SJens Axboe 	bio->bi_iter.bi_idx = 0;
265da521626SJens Axboe 	bio->bi_iter.bi_bvec_done = 0;
266da521626SJens Axboe 	bio->bi_end_io = NULL;
267da521626SJens Axboe 	bio->bi_private = NULL;
268da521626SJens Axboe #ifdef CONFIG_BLK_CGROUP
269da521626SJens Axboe 	bio->bi_blkg = NULL;
270da521626SJens Axboe 	bio->bi_issue.value = 0;
271da521626SJens Axboe #ifdef CONFIG_BLK_CGROUP_IOCOST
272da521626SJens Axboe 	bio->bi_iocost_cost = 0;
273da521626SJens Axboe #endif
274da521626SJens Axboe #endif
275da521626SJens Axboe #ifdef CONFIG_BLK_INLINE_ENCRYPTION
276da521626SJens Axboe 	bio->bi_crypt_context = NULL;
277da521626SJens Axboe #endif
278da521626SJens Axboe #ifdef CONFIG_BLK_DEV_INTEGRITY
279da521626SJens Axboe 	bio->bi_integrity = NULL;
280da521626SJens Axboe #endif
281da521626SJens Axboe 	bio->bi_vcnt = 0;
282da521626SJens Axboe 
283c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
284dac56212SJens Axboe 	atomic_set(&bio->__bi_cnt, 1);
2853e08773cSChristoph Hellwig 	bio->bi_cookie = BLK_QC_T_NONE;
2863a83f467SMing Lei 
2873a83f467SMing Lei 	bio->bi_max_vecs = max_vecs;
288da521626SJens Axboe 	bio->bi_io_vec = table;
289da521626SJens Axboe 	bio->bi_pool = NULL;
290f9c78b2bSJens Axboe }
291f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_init);
292f9c78b2bSJens Axboe 
293f9c78b2bSJens Axboe /**
294f9c78b2bSJens Axboe  * bio_reset - reinitialize a bio
295f9c78b2bSJens Axboe  * @bio:	bio to reset
296f9c78b2bSJens Axboe  *
297f9c78b2bSJens Axboe  * Description:
298f9c78b2bSJens Axboe  *   After calling bio_reset(), @bio will be in the same state as a freshly
299f9c78b2bSJens Axboe  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
300f9c78b2bSJens Axboe  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
301f9c78b2bSJens Axboe  *   comment in struct bio.
302f9c78b2bSJens Axboe  */
303f9c78b2bSJens Axboe void bio_reset(struct bio *bio)
304f9c78b2bSJens Axboe {
3059ae3b3f5SJens Axboe 	bio_uninit(bio);
306f9c78b2bSJens Axboe 	memset(bio, 0, BIO_RESET_BYTES);
307c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
308f9c78b2bSJens Axboe }
309f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_reset);
310f9c78b2bSJens Axboe 
31138f8baaeSChristoph Hellwig static struct bio *__bio_chain_endio(struct bio *bio)
312f9c78b2bSJens Axboe {
3134246a0b6SChristoph Hellwig 	struct bio *parent = bio->bi_private;
3144246a0b6SChristoph Hellwig 
3153edf5346SYufen Yu 	if (bio->bi_status && !parent->bi_status)
3164e4cbee9SChristoph Hellwig 		parent->bi_status = bio->bi_status;
317f9c78b2bSJens Axboe 	bio_put(bio);
31838f8baaeSChristoph Hellwig 	return parent;
31938f8baaeSChristoph Hellwig }
32038f8baaeSChristoph Hellwig 
32138f8baaeSChristoph Hellwig static void bio_chain_endio(struct bio *bio)
32238f8baaeSChristoph Hellwig {
32338f8baaeSChristoph Hellwig 	bio_endio(__bio_chain_endio(bio));
324f9c78b2bSJens Axboe }
325f9c78b2bSJens Axboe 
326f9c78b2bSJens Axboe /**
327f9c78b2bSJens Axboe  * bio_chain - chain bio completions
328f9c78b2bSJens Axboe  * @bio: the target bio
3295b874af6SMauro Carvalho Chehab  * @parent: the parent bio of @bio
330f9c78b2bSJens Axboe  *
331f9c78b2bSJens Axboe  * The caller won't have a bi_end_io called when @bio completes - instead,
332f9c78b2bSJens Axboe  * @parent's bi_end_io won't be called until both @parent and @bio have
333f9c78b2bSJens Axboe  * completed; the chained bio will also be freed when it completes.
334f9c78b2bSJens Axboe  *
335f9c78b2bSJens Axboe  * The caller must not set bi_private or bi_end_io in @bio.
336f9c78b2bSJens Axboe  */
337f9c78b2bSJens Axboe void bio_chain(struct bio *bio, struct bio *parent)
338f9c78b2bSJens Axboe {
339f9c78b2bSJens Axboe 	BUG_ON(bio->bi_private || bio->bi_end_io);
340f9c78b2bSJens Axboe 
341f9c78b2bSJens Axboe 	bio->bi_private = parent;
342f9c78b2bSJens Axboe 	bio->bi_end_io	= bio_chain_endio;
343c4cf5261SJens Axboe 	bio_inc_remaining(parent);
344f9c78b2bSJens Axboe }
345f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_chain);
346f9c78b2bSJens Axboe 
3470a3140eaSChaitanya Kulkarni struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
3480a3140eaSChaitanya Kulkarni 		unsigned int nr_pages, unsigned int opf, gfp_t gfp)
3493b005bf6SChristoph Hellwig {
3503b005bf6SChristoph Hellwig 	struct bio *new = bio_alloc(gfp, nr_pages);
3513b005bf6SChristoph Hellwig 
3520a3140eaSChaitanya Kulkarni 	bio_set_dev(new, bdev);
3530a3140eaSChaitanya Kulkarni 	new->bi_opf = opf;
3540a3140eaSChaitanya Kulkarni 
3553b005bf6SChristoph Hellwig 	if (bio) {
3563b005bf6SChristoph Hellwig 		bio_chain(bio, new);
3573b005bf6SChristoph Hellwig 		submit_bio(bio);
3583b005bf6SChristoph Hellwig 	}
3593b005bf6SChristoph Hellwig 
3603b005bf6SChristoph Hellwig 	return new;
3613b005bf6SChristoph Hellwig }
3623b005bf6SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_next_bio);
3633b005bf6SChristoph Hellwig 
364f9c78b2bSJens Axboe static void bio_alloc_rescue(struct work_struct *work)
365f9c78b2bSJens Axboe {
366f9c78b2bSJens Axboe 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
367f9c78b2bSJens Axboe 	struct bio *bio;
368f9c78b2bSJens Axboe 
369f9c78b2bSJens Axboe 	while (1) {
370f9c78b2bSJens Axboe 		spin_lock(&bs->rescue_lock);
371f9c78b2bSJens Axboe 		bio = bio_list_pop(&bs->rescue_list);
372f9c78b2bSJens Axboe 		spin_unlock(&bs->rescue_lock);
373f9c78b2bSJens Axboe 
374f9c78b2bSJens Axboe 		if (!bio)
375f9c78b2bSJens Axboe 			break;
376f9c78b2bSJens Axboe 
377ed00aabdSChristoph Hellwig 		submit_bio_noacct(bio);
378f9c78b2bSJens Axboe 	}
379f9c78b2bSJens Axboe }
380f9c78b2bSJens Axboe 
381f9c78b2bSJens Axboe static void punt_bios_to_rescuer(struct bio_set *bs)
382f9c78b2bSJens Axboe {
383f9c78b2bSJens Axboe 	struct bio_list punt, nopunt;
384f9c78b2bSJens Axboe 	struct bio *bio;
385f9c78b2bSJens Axboe 
38647e0fb46SNeilBrown 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
38747e0fb46SNeilBrown 		return;
388f9c78b2bSJens Axboe 	/*
389f9c78b2bSJens Axboe 	 * In order to guarantee forward progress we must punt only bios that
390f9c78b2bSJens Axboe 	 * were allocated from this bio_set; otherwise, if there was a bio on
391f9c78b2bSJens Axboe 	 * there for a stacking driver higher up in the stack, processing it
392f9c78b2bSJens Axboe 	 * could require allocating bios from this bio_set, and doing that from
393f9c78b2bSJens Axboe 	 * our own rescuer would be bad.
394f9c78b2bSJens Axboe 	 *
395f9c78b2bSJens Axboe 	 * Since bio lists are singly linked, pop them all instead of trying to
396f9c78b2bSJens Axboe 	 * remove from the middle of the list:
397f9c78b2bSJens Axboe 	 */
398f9c78b2bSJens Axboe 
399f9c78b2bSJens Axboe 	bio_list_init(&punt);
400f9c78b2bSJens Axboe 	bio_list_init(&nopunt);
401f9c78b2bSJens Axboe 
402f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[0])))
403f9c78b2bSJens Axboe 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
404f5fe1b51SNeilBrown 	current->bio_list[0] = nopunt;
405f9c78b2bSJens Axboe 
406f5fe1b51SNeilBrown 	bio_list_init(&nopunt);
407f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[1])))
408f5fe1b51SNeilBrown 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
409f5fe1b51SNeilBrown 	current->bio_list[1] = nopunt;
410f9c78b2bSJens Axboe 
411f9c78b2bSJens Axboe 	spin_lock(&bs->rescue_lock);
412f9c78b2bSJens Axboe 	bio_list_merge(&bs->rescue_list, &punt);
413f9c78b2bSJens Axboe 	spin_unlock(&bs->rescue_lock);
414f9c78b2bSJens Axboe 
415f9c78b2bSJens Axboe 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
416f9c78b2bSJens Axboe }
417f9c78b2bSJens Axboe 
418f9c78b2bSJens Axboe /**
419f9c78b2bSJens Axboe  * bio_alloc_bioset - allocate a bio for I/O
420*609be106SChristoph Hellwig  * @bdev:	block device to allocate the bio for (can be %NULL)
421*609be106SChristoph Hellwig  * @nr_vecs:	number of bvecs to pre-allocate
422*609be106SChristoph Hellwig  * @opf:	operation and flags for bio
423519c8e9fSRandy Dunlap  * @gfp_mask:   the GFP_* mask given to the slab allocator
424f9c78b2bSJens Axboe  * @bs:		the bio_set to allocate from.
425f9c78b2bSJens Axboe  *
4263175199aSChristoph Hellwig  * Allocate a bio from the mempools in @bs.
427f9c78b2bSJens Axboe  *
4283175199aSChristoph Hellwig  * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
4293175199aSChristoph Hellwig  * allocate a bio.  This is due to the mempool guarantees.  To make this work,
4303175199aSChristoph Hellwig  * callers must never allocate more than 1 bio at a time from the general pool.
4313175199aSChristoph Hellwig  * Callers that need to allocate more than 1 bio must always submit the
4323175199aSChristoph Hellwig  * previously allocated bio for IO before attempting to allocate a new one.
4333175199aSChristoph Hellwig  * Failure to do so can cause deadlocks under memory pressure.
434f9c78b2bSJens Axboe  *
4353175199aSChristoph Hellwig  * Note that when running under submit_bio_noacct() (i.e. any block driver),
4363175199aSChristoph Hellwig  * bios are not submitted until after you return - see the code in
437ed00aabdSChristoph Hellwig  * submit_bio_noacct() that converts recursion into iteration, to prevent
438f9c78b2bSJens Axboe  * stack overflows.
439f9c78b2bSJens Axboe  *
4403175199aSChristoph Hellwig  * This would normally mean allocating multiple bios under submit_bio_noacct()
4413175199aSChristoph Hellwig  * would be susceptible to deadlocks, but we have
442f9c78b2bSJens Axboe  * deadlock avoidance code that resubmits any blocked bios from a rescuer
443f9c78b2bSJens Axboe  * thread.
444f9c78b2bSJens Axboe  *
445f9c78b2bSJens Axboe  * However, we do not guarantee forward progress for allocations from other
446f9c78b2bSJens Axboe  * mempools. Doing multiple allocations from the same mempool under
447ed00aabdSChristoph Hellwig  * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
448f9c78b2bSJens Axboe  * for per bio allocations.
449f9c78b2bSJens Axboe  *
4503175199aSChristoph Hellwig  * Returns: Pointer to new bio on success, NULL on failure.
451f9c78b2bSJens Axboe  */
452*609be106SChristoph Hellwig struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
453*609be106SChristoph Hellwig 			     unsigned int opf, gfp_t gfp_mask,
4547a88fa19SDan Carpenter 			     struct bio_set *bs)
455f9c78b2bSJens Axboe {
456f9c78b2bSJens Axboe 	gfp_t saved_gfp = gfp_mask;
457f9c78b2bSJens Axboe 	struct bio *bio;
458f9c78b2bSJens Axboe 	void *p;
459f9c78b2bSJens Axboe 
460*609be106SChristoph Hellwig 	/* should not use nobvec bioset for nr_vecs > 0 */
461*609be106SChristoph Hellwig 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
462f9c78b2bSJens Axboe 		return NULL;
463f9c78b2bSJens Axboe 
464f9c78b2bSJens Axboe 	/*
4653175199aSChristoph Hellwig 	 * submit_bio_noacct() converts recursion to iteration; this means if
4663175199aSChristoph Hellwig 	 * we're running beneath it, any bios we allocate and submit will not be
4673175199aSChristoph Hellwig 	 * submitted (and thus freed) until after we return.
468f9c78b2bSJens Axboe 	 *
4693175199aSChristoph Hellwig 	 * This exposes us to a potential deadlock if we allocate multiple bios
4703175199aSChristoph Hellwig 	 * from the same bio_set() while running underneath submit_bio_noacct().
4713175199aSChristoph Hellwig 	 * If we were to allocate multiple bios (say a stacking block driver
4723175199aSChristoph Hellwig 	 * that was splitting bios), we would deadlock if we exhausted the
4733175199aSChristoph Hellwig 	 * mempool's reserve.
474f9c78b2bSJens Axboe 	 *
475f9c78b2bSJens Axboe 	 * We solve this, and guarantee forward progress, with a rescuer
4763175199aSChristoph Hellwig 	 * workqueue per bio_set. If we go to allocate and there are bios on
4773175199aSChristoph Hellwig 	 * current->bio_list, we first try the allocation without
4783175199aSChristoph Hellwig 	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
4793175199aSChristoph Hellwig 	 * blocking to the rescuer workqueue before we retry with the original
4803175199aSChristoph Hellwig 	 * gfp_flags.
481f9c78b2bSJens Axboe 	 */
482f5fe1b51SNeilBrown 	if (current->bio_list &&
483f5fe1b51SNeilBrown 	    (!bio_list_empty(&current->bio_list[0]) ||
48447e0fb46SNeilBrown 	     !bio_list_empty(&current->bio_list[1])) &&
48547e0fb46SNeilBrown 	    bs->rescue_workqueue)
486d0164adcSMel Gorman 		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
487f9c78b2bSJens Axboe 
4888aa6ba2fSKent Overstreet 	p = mempool_alloc(&bs->bio_pool, gfp_mask);
489f9c78b2bSJens Axboe 	if (!p && gfp_mask != saved_gfp) {
490f9c78b2bSJens Axboe 		punt_bios_to_rescuer(bs);
491f9c78b2bSJens Axboe 		gfp_mask = saved_gfp;
4928aa6ba2fSKent Overstreet 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
493f9c78b2bSJens Axboe 	}
494f9c78b2bSJens Axboe 	if (unlikely(!p))
495f9c78b2bSJens Axboe 		return NULL;
496f9c78b2bSJens Axboe 
4973175199aSChristoph Hellwig 	bio = p + bs->front_pad;
498*609be106SChristoph Hellwig 	if (nr_vecs > BIO_INLINE_VECS) {
4993175199aSChristoph Hellwig 		struct bio_vec *bvl = NULL;
500f9c78b2bSJens Axboe 
501*609be106SChristoph Hellwig 		bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
502f9c78b2bSJens Axboe 		if (!bvl && gfp_mask != saved_gfp) {
503f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
504f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
505*609be106SChristoph Hellwig 			bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
506f9c78b2bSJens Axboe 		}
507f9c78b2bSJens Axboe 		if (unlikely(!bvl))
508f9c78b2bSJens Axboe 			goto err_free;
509f9c78b2bSJens Axboe 
510*609be106SChristoph Hellwig 		bio_init(bio, bvl, nr_vecs);
511*609be106SChristoph Hellwig 	} else if (nr_vecs) {
5123175199aSChristoph Hellwig 		bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
5133175199aSChristoph Hellwig 	} else {
5143175199aSChristoph Hellwig 		bio_init(bio, NULL, 0);
515f9c78b2bSJens Axboe 	}
516f9c78b2bSJens Axboe 
517f9c78b2bSJens Axboe 	bio->bi_pool = bs;
518*609be106SChristoph Hellwig 	if (bdev)
519*609be106SChristoph Hellwig 		bio_set_dev(bio, bdev);
520*609be106SChristoph Hellwig 	bio->bi_opf = opf;
521f9c78b2bSJens Axboe 	return bio;
522f9c78b2bSJens Axboe 
523f9c78b2bSJens Axboe err_free:
5248aa6ba2fSKent Overstreet 	mempool_free(p, &bs->bio_pool);
525f9c78b2bSJens Axboe 	return NULL;
526f9c78b2bSJens Axboe }
527f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_alloc_bioset);
528f9c78b2bSJens Axboe 
5293175199aSChristoph Hellwig /**
5303175199aSChristoph Hellwig  * bio_kmalloc - kmalloc a bio for I/O
5313175199aSChristoph Hellwig  * @gfp_mask:   the GFP_* mask given to the slab allocator
5323175199aSChristoph Hellwig  * @nr_iovecs:	number of iovecs to pre-allocate
5333175199aSChristoph Hellwig  *
5343175199aSChristoph Hellwig  * Use kmalloc to allocate and initialize a bio.
5353175199aSChristoph Hellwig  *
5363175199aSChristoph Hellwig  * Returns: Pointer to new bio on success, NULL on failure.
5373175199aSChristoph Hellwig  */
5380f2e6ab8SChristoph Hellwig struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
5393175199aSChristoph Hellwig {
5403175199aSChristoph Hellwig 	struct bio *bio;
5413175199aSChristoph Hellwig 
5423175199aSChristoph Hellwig 	if (nr_iovecs > UIO_MAXIOV)
5433175199aSChristoph Hellwig 		return NULL;
5443175199aSChristoph Hellwig 
5453175199aSChristoph Hellwig 	bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
5463175199aSChristoph Hellwig 	if (unlikely(!bio))
5473175199aSChristoph Hellwig 		return NULL;
5483175199aSChristoph Hellwig 	bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
5493175199aSChristoph Hellwig 	bio->bi_pool = NULL;
5503175199aSChristoph Hellwig 	return bio;
5513175199aSChristoph Hellwig }
5523175199aSChristoph Hellwig EXPORT_SYMBOL(bio_kmalloc);
5533175199aSChristoph Hellwig 
5546f822e1bSChristoph Hellwig void zero_fill_bio(struct bio *bio)
555f9c78b2bSJens Axboe {
556f9c78b2bSJens Axboe 	struct bio_vec bv;
557f9c78b2bSJens Axboe 	struct bvec_iter iter;
558f9c78b2bSJens Axboe 
559ab6c340eSChristoph Hellwig 	bio_for_each_segment(bv, bio, iter)
560ab6c340eSChristoph Hellwig 		memzero_bvec(&bv);
561f9c78b2bSJens Axboe }
5626f822e1bSChristoph Hellwig EXPORT_SYMBOL(zero_fill_bio);
563f9c78b2bSJens Axboe 
56483c9c547SMing Lei /**
56583c9c547SMing Lei  * bio_truncate - truncate the bio to small size of @new_size
56683c9c547SMing Lei  * @bio:	the bio to be truncated
56783c9c547SMing Lei  * @new_size:	new size for truncating the bio
56883c9c547SMing Lei  *
56983c9c547SMing Lei  * Description:
57083c9c547SMing Lei  *   Truncate the bio to new size of @new_size. If bio_op(bio) is
57183c9c547SMing Lei  *   REQ_OP_READ, zero the truncated part. This function should only
57283c9c547SMing Lei  *   be used for handling corner cases, such as bio eod.
57383c9c547SMing Lei  */
5744f7ab09aSChristoph Hellwig static void bio_truncate(struct bio *bio, unsigned new_size)
57585a8ce62SMing Lei {
57685a8ce62SMing Lei 	struct bio_vec bv;
57785a8ce62SMing Lei 	struct bvec_iter iter;
57885a8ce62SMing Lei 	unsigned int done = 0;
57985a8ce62SMing Lei 	bool truncated = false;
58085a8ce62SMing Lei 
58185a8ce62SMing Lei 	if (new_size >= bio->bi_iter.bi_size)
58285a8ce62SMing Lei 		return;
58385a8ce62SMing Lei 
58483c9c547SMing Lei 	if (bio_op(bio) != REQ_OP_READ)
58585a8ce62SMing Lei 		goto exit;
58685a8ce62SMing Lei 
58785a8ce62SMing Lei 	bio_for_each_segment(bv, bio, iter) {
58885a8ce62SMing Lei 		if (done + bv.bv_len > new_size) {
58985a8ce62SMing Lei 			unsigned offset;
59085a8ce62SMing Lei 
59185a8ce62SMing Lei 			if (!truncated)
59285a8ce62SMing Lei 				offset = new_size - done;
59385a8ce62SMing Lei 			else
59485a8ce62SMing Lei 				offset = 0;
5953ee859e3SOGAWA Hirofumi 			zero_user(bv.bv_page, bv.bv_offset + offset,
5963ee859e3SOGAWA Hirofumi 				  bv.bv_len - offset);
59785a8ce62SMing Lei 			truncated = true;
59885a8ce62SMing Lei 		}
59985a8ce62SMing Lei 		done += bv.bv_len;
60085a8ce62SMing Lei 	}
60185a8ce62SMing Lei 
60285a8ce62SMing Lei  exit:
60385a8ce62SMing Lei 	/*
60485a8ce62SMing Lei 	 * Don't touch bvec table here and make it really immutable, since
60585a8ce62SMing Lei 	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
60685a8ce62SMing Lei 	 * in its .end_bio() callback.
60785a8ce62SMing Lei 	 *
60885a8ce62SMing Lei 	 * It is enough to truncate bio by updating .bi_size since we can make
60985a8ce62SMing Lei 	 * correct bvec with the updated .bi_size for drivers.
61085a8ce62SMing Lei 	 */
61185a8ce62SMing Lei 	bio->bi_iter.bi_size = new_size;
61285a8ce62SMing Lei }
61385a8ce62SMing Lei 
614f9c78b2bSJens Axboe /**
61529125ed6SChristoph Hellwig  * guard_bio_eod - truncate a BIO to fit the block device
61629125ed6SChristoph Hellwig  * @bio:	bio to truncate
61729125ed6SChristoph Hellwig  *
61829125ed6SChristoph Hellwig  * This allows us to do IO even on the odd last sectors of a device, even if the
61929125ed6SChristoph Hellwig  * block size is some multiple of the physical sector size.
62029125ed6SChristoph Hellwig  *
62129125ed6SChristoph Hellwig  * We'll just truncate the bio to the size of the device, and clear the end of
62229125ed6SChristoph Hellwig  * the buffer head manually.  Truly out-of-range accesses will turn into actual
62329125ed6SChristoph Hellwig  * I/O errors, this only handles the "we need to be able to do I/O at the final
62429125ed6SChristoph Hellwig  * sector" case.
62529125ed6SChristoph Hellwig  */
62629125ed6SChristoph Hellwig void guard_bio_eod(struct bio *bio)
62729125ed6SChristoph Hellwig {
628309dca30SChristoph Hellwig 	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
62929125ed6SChristoph Hellwig 
63029125ed6SChristoph Hellwig 	if (!maxsector)
63129125ed6SChristoph Hellwig 		return;
63229125ed6SChristoph Hellwig 
63329125ed6SChristoph Hellwig 	/*
63429125ed6SChristoph Hellwig 	 * If the *whole* IO is past the end of the device,
63529125ed6SChristoph Hellwig 	 * let it through, and the IO layer will turn it into
63629125ed6SChristoph Hellwig 	 * an EIO.
63729125ed6SChristoph Hellwig 	 */
63829125ed6SChristoph Hellwig 	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
63929125ed6SChristoph Hellwig 		return;
64029125ed6SChristoph Hellwig 
64129125ed6SChristoph Hellwig 	maxsector -= bio->bi_iter.bi_sector;
64229125ed6SChristoph Hellwig 	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
64329125ed6SChristoph Hellwig 		return;
64429125ed6SChristoph Hellwig 
64529125ed6SChristoph Hellwig 	bio_truncate(bio, maxsector << 9);
64629125ed6SChristoph Hellwig }
64729125ed6SChristoph Hellwig 
648be4d234dSJens Axboe #define ALLOC_CACHE_MAX		512
649be4d234dSJens Axboe #define ALLOC_CACHE_SLACK	 64
650be4d234dSJens Axboe 
651be4d234dSJens Axboe static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
652be4d234dSJens Axboe 				  unsigned int nr)
653be4d234dSJens Axboe {
654be4d234dSJens Axboe 	unsigned int i = 0;
655be4d234dSJens Axboe 	struct bio *bio;
656be4d234dSJens Axboe 
657fcade2ceSJens Axboe 	while ((bio = cache->free_list) != NULL) {
658fcade2ceSJens Axboe 		cache->free_list = bio->bi_next;
659be4d234dSJens Axboe 		cache->nr--;
660be4d234dSJens Axboe 		bio_free(bio);
661be4d234dSJens Axboe 		if (++i == nr)
662be4d234dSJens Axboe 			break;
663be4d234dSJens Axboe 	}
664be4d234dSJens Axboe }
665be4d234dSJens Axboe 
666be4d234dSJens Axboe static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
667be4d234dSJens Axboe {
668be4d234dSJens Axboe 	struct bio_set *bs;
669be4d234dSJens Axboe 
670be4d234dSJens Axboe 	bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
671be4d234dSJens Axboe 	if (bs->cache) {
672be4d234dSJens Axboe 		struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
673be4d234dSJens Axboe 
674be4d234dSJens Axboe 		bio_alloc_cache_prune(cache, -1U);
675be4d234dSJens Axboe 	}
676be4d234dSJens Axboe 	return 0;
677be4d234dSJens Axboe }
678be4d234dSJens Axboe 
679be4d234dSJens Axboe static void bio_alloc_cache_destroy(struct bio_set *bs)
680be4d234dSJens Axboe {
681be4d234dSJens Axboe 	int cpu;
682be4d234dSJens Axboe 
683be4d234dSJens Axboe 	if (!bs->cache)
684be4d234dSJens Axboe 		return;
685be4d234dSJens Axboe 
686be4d234dSJens Axboe 	cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
687be4d234dSJens Axboe 	for_each_possible_cpu(cpu) {
688be4d234dSJens Axboe 		struct bio_alloc_cache *cache;
689be4d234dSJens Axboe 
690be4d234dSJens Axboe 		cache = per_cpu_ptr(bs->cache, cpu);
691be4d234dSJens Axboe 		bio_alloc_cache_prune(cache, -1U);
692be4d234dSJens Axboe 	}
693be4d234dSJens Axboe 	free_percpu(bs->cache);
694be4d234dSJens Axboe }
695be4d234dSJens Axboe 
69629125ed6SChristoph Hellwig /**
697f9c78b2bSJens Axboe  * bio_put - release a reference to a bio
698f9c78b2bSJens Axboe  * @bio:   bio to release reference to
699f9c78b2bSJens Axboe  *
700f9c78b2bSJens Axboe  * Description:
701f9c78b2bSJens Axboe  *   Put a reference to a &struct bio, either one you have gotten with
7029b10f6a9SNeilBrown  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
703f9c78b2bSJens Axboe  **/
704f9c78b2bSJens Axboe void bio_put(struct bio *bio)
705f9c78b2bSJens Axboe {
706be4d234dSJens Axboe 	if (unlikely(bio_flagged(bio, BIO_REFFED))) {
7079e8c0d0dSChristoph Hellwig 		BUG_ON(!atomic_read(&bio->__bi_cnt));
708be4d234dSJens Axboe 		if (!atomic_dec_and_test(&bio->__bi_cnt))
709be4d234dSJens Axboe 			return;
710be4d234dSJens Axboe 	}
711f9c78b2bSJens Axboe 
712be4d234dSJens Axboe 	if (bio_flagged(bio, BIO_PERCPU_CACHE)) {
713be4d234dSJens Axboe 		struct bio_alloc_cache *cache;
714be4d234dSJens Axboe 
715be4d234dSJens Axboe 		bio_uninit(bio);
716be4d234dSJens Axboe 		cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
717fcade2ceSJens Axboe 		bio->bi_next = cache->free_list;
718fcade2ceSJens Axboe 		cache->free_list = bio;
719be4d234dSJens Axboe 		if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
720be4d234dSJens Axboe 			bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
721be4d234dSJens Axboe 		put_cpu();
722be4d234dSJens Axboe 	} else {
723f9c78b2bSJens Axboe 		bio_free(bio);
724f9c78b2bSJens Axboe 	}
725dac56212SJens Axboe }
726f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_put);
727f9c78b2bSJens Axboe 
728f9c78b2bSJens Axboe /**
729f9c78b2bSJens Axboe  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
730f9c78b2bSJens Axboe  * 	@bio: destination bio
731f9c78b2bSJens Axboe  * 	@bio_src: bio to clone
732f9c78b2bSJens Axboe  *
733f9c78b2bSJens Axboe  *	Clone a &bio. Caller will own the returned bio, but not
734f9c78b2bSJens Axboe  *	the actual data it points to. Reference count of returned
735f9c78b2bSJens Axboe  * 	bio will be one.
736f9c78b2bSJens Axboe  *
737f9c78b2bSJens Axboe  * 	Caller must ensure that @bio_src is not freed before @bio.
738f9c78b2bSJens Axboe  */
739f9c78b2bSJens Axboe void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
740f9c78b2bSJens Axboe {
7417a800a20SChristoph Hellwig 	WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
742f9c78b2bSJens Axboe 
743f9c78b2bSJens Axboe 	/*
744309dca30SChristoph Hellwig 	 * most users will be overriding ->bi_bdev with a new target,
745f9c78b2bSJens Axboe 	 * so we don't set nor calculate new physical/hw segment counts here
746f9c78b2bSJens Axboe 	 */
747309dca30SChristoph Hellwig 	bio->bi_bdev = bio_src->bi_bdev;
748b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_CLONED);
749111be883SShaohua Li 	if (bio_flagged(bio_src, BIO_THROTTLED))
750111be883SShaohua Li 		bio_set_flag(bio, BIO_THROTTLED);
75146bbf653SChristoph Hellwig 	if (bio_flagged(bio_src, BIO_REMAPPED))
75246bbf653SChristoph Hellwig 		bio_set_flag(bio, BIO_REMAPPED);
7531eff9d32SJens Axboe 	bio->bi_opf = bio_src->bi_opf;
754ca474b73SHannes Reinecke 	bio->bi_ioprio = bio_src->bi_ioprio;
755cb6934f8SJens Axboe 	bio->bi_write_hint = bio_src->bi_write_hint;
756f9c78b2bSJens Axboe 	bio->bi_iter = bio_src->bi_iter;
757f9c78b2bSJens Axboe 	bio->bi_io_vec = bio_src->bi_io_vec;
75820bd723eSPaolo Valente 
759db6638d7SDennis Zhou 	bio_clone_blkg_association(bio, bio_src);
760e439bedfSDennis Zhou 	blkcg_bio_issue_init(bio);
761f9c78b2bSJens Axboe }
762f9c78b2bSJens Axboe EXPORT_SYMBOL(__bio_clone_fast);
763f9c78b2bSJens Axboe 
764f9c78b2bSJens Axboe /**
765f9c78b2bSJens Axboe  *	bio_clone_fast - clone a bio that shares the original bio's biovec
766f9c78b2bSJens Axboe  *	@bio: bio to clone
767f9c78b2bSJens Axboe  *	@gfp_mask: allocation priority
768f9c78b2bSJens Axboe  *	@bs: bio_set to allocate from
769f9c78b2bSJens Axboe  *
770f9c78b2bSJens Axboe  * 	Like __bio_clone_fast, only also allocates the returned bio
771f9c78b2bSJens Axboe  */
772f9c78b2bSJens Axboe struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
773f9c78b2bSJens Axboe {
774f9c78b2bSJens Axboe 	struct bio *b;
775f9c78b2bSJens Axboe 
776*609be106SChristoph Hellwig 	b = bio_alloc_bioset(NULL, 0, 0, gfp_mask, bs);
777f9c78b2bSJens Axboe 	if (!b)
778f9c78b2bSJens Axboe 		return NULL;
779f9c78b2bSJens Axboe 
780f9c78b2bSJens Axboe 	__bio_clone_fast(b, bio);
781f9c78b2bSJens Axboe 
78207560151SEric Biggers 	if (bio_crypt_clone(b, bio, gfp_mask) < 0)
78307560151SEric Biggers 		goto err_put;
784a892c8d5SSatya Tangirala 
78507560151SEric Biggers 	if (bio_integrity(bio) &&
78607560151SEric Biggers 	    bio_integrity_clone(b, bio, gfp_mask) < 0)
78707560151SEric Biggers 		goto err_put;
788f9c78b2bSJens Axboe 
789f9c78b2bSJens Axboe 	return b;
79007560151SEric Biggers 
79107560151SEric Biggers err_put:
79207560151SEric Biggers 	bio_put(b);
79307560151SEric Biggers 	return NULL;
794f9c78b2bSJens Axboe }
795f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_clone_fast);
796f9c78b2bSJens Axboe 
7975cbd28e3SChristoph Hellwig const char *bio_devname(struct bio *bio, char *buf)
7985cbd28e3SChristoph Hellwig {
799309dca30SChristoph Hellwig 	return bdevname(bio->bi_bdev, buf);
8005cbd28e3SChristoph Hellwig }
8015cbd28e3SChristoph Hellwig EXPORT_SYMBOL(bio_devname);
8025cbd28e3SChristoph Hellwig 
8039a6083beSChristoph Hellwig /**
8049a6083beSChristoph Hellwig  * bio_full - check if the bio is full
8059a6083beSChristoph Hellwig  * @bio:	bio to check
8069a6083beSChristoph Hellwig  * @len:	length of one segment to be added
8079a6083beSChristoph Hellwig  *
8089a6083beSChristoph Hellwig  * Return true if @bio is full and one segment with @len bytes can't be
8099a6083beSChristoph Hellwig  * added to the bio, otherwise return false
8109a6083beSChristoph Hellwig  */
8119a6083beSChristoph Hellwig static inline bool bio_full(struct bio *bio, unsigned len)
8129a6083beSChristoph Hellwig {
8139a6083beSChristoph Hellwig 	if (bio->bi_vcnt >= bio->bi_max_vecs)
8149a6083beSChristoph Hellwig 		return true;
8159a6083beSChristoph Hellwig 	if (bio->bi_iter.bi_size > UINT_MAX - len)
8169a6083beSChristoph Hellwig 		return true;
8179a6083beSChristoph Hellwig 	return false;
8189a6083beSChristoph Hellwig }
8199a6083beSChristoph Hellwig 
8205919482eSMing Lei static inline bool page_is_mergeable(const struct bio_vec *bv,
8215919482eSMing Lei 		struct page *page, unsigned int len, unsigned int off,
822ff896738SChristoph Hellwig 		bool *same_page)
8235919482eSMing Lei {
824d8166519SMatthew Wilcox (Oracle) 	size_t bv_end = bv->bv_offset + bv->bv_len;
825d8166519SMatthew Wilcox (Oracle) 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
8265919482eSMing Lei 	phys_addr_t page_addr = page_to_phys(page);
8275919482eSMing Lei 
8285919482eSMing Lei 	if (vec_end_addr + 1 != page_addr + off)
8295919482eSMing Lei 		return false;
8305919482eSMing Lei 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
8315919482eSMing Lei 		return false;
83252d52d1cSChristoph Hellwig 
833ff896738SChristoph Hellwig 	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
834d8166519SMatthew Wilcox (Oracle) 	if (*same_page)
8355919482eSMing Lei 		return true;
836d8166519SMatthew Wilcox (Oracle) 	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
8375919482eSMing Lei }
8385919482eSMing Lei 
8399774b391SChristoph Hellwig /**
8409774b391SChristoph Hellwig  * __bio_try_merge_page - try appending data to an existing bvec.
8419774b391SChristoph Hellwig  * @bio: destination bio
8429774b391SChristoph Hellwig  * @page: start page to add
8439774b391SChristoph Hellwig  * @len: length of the data to add
8449774b391SChristoph Hellwig  * @off: offset of the data relative to @page
8459774b391SChristoph Hellwig  * @same_page: return if the segment has been merged inside the same page
8469774b391SChristoph Hellwig  *
8479774b391SChristoph Hellwig  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
8489774b391SChristoph Hellwig  * useful optimisation for file systems with a block size smaller than the
8499774b391SChristoph Hellwig  * page size.
8509774b391SChristoph Hellwig  *
8519774b391SChristoph Hellwig  * Warn if (@len, @off) crosses pages in case that @same_page is true.
8529774b391SChristoph Hellwig  *
8539774b391SChristoph Hellwig  * Return %true on success or %false on failure.
8549774b391SChristoph Hellwig  */
8559774b391SChristoph Hellwig static bool __bio_try_merge_page(struct bio *bio, struct page *page,
8569774b391SChristoph Hellwig 		unsigned int len, unsigned int off, bool *same_page)
8579774b391SChristoph Hellwig {
8589774b391SChristoph Hellwig 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
8599774b391SChristoph Hellwig 		return false;
8609774b391SChristoph Hellwig 
8619774b391SChristoph Hellwig 	if (bio->bi_vcnt > 0) {
8629774b391SChristoph Hellwig 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
8639774b391SChristoph Hellwig 
8649774b391SChristoph Hellwig 		if (page_is_mergeable(bv, page, len, off, same_page)) {
8659774b391SChristoph Hellwig 			if (bio->bi_iter.bi_size > UINT_MAX - len) {
8669774b391SChristoph Hellwig 				*same_page = false;
8679774b391SChristoph Hellwig 				return false;
8689774b391SChristoph Hellwig 			}
8699774b391SChristoph Hellwig 			bv->bv_len += len;
8709774b391SChristoph Hellwig 			bio->bi_iter.bi_size += len;
8719774b391SChristoph Hellwig 			return true;
8729774b391SChristoph Hellwig 		}
8739774b391SChristoph Hellwig 	}
8749774b391SChristoph Hellwig 	return false;
8759774b391SChristoph Hellwig }
8769774b391SChristoph Hellwig 
877e4581105SChristoph Hellwig /*
878e4581105SChristoph Hellwig  * Try to merge a page into a segment, while obeying the hardware segment
879e4581105SChristoph Hellwig  * size limit.  This is not for normal read/write bios, but for passthrough
880e4581105SChristoph Hellwig  * or Zone Append operations that we can't split.
881e4581105SChristoph Hellwig  */
882e4581105SChristoph Hellwig static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
883e4581105SChristoph Hellwig 				 struct page *page, unsigned len,
884e4581105SChristoph Hellwig 				 unsigned offset, bool *same_page)
885489fbbcbSMing Lei {
886384209cdSChristoph Hellwig 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
887489fbbcbSMing Lei 	unsigned long mask = queue_segment_boundary(q);
888489fbbcbSMing Lei 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
889489fbbcbSMing Lei 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
890489fbbcbSMing Lei 
891489fbbcbSMing Lei 	if ((addr1 | mask) != (addr2 | mask))
892489fbbcbSMing Lei 		return false;
893489fbbcbSMing Lei 	if (bv->bv_len + len > queue_max_segment_size(q))
894489fbbcbSMing Lei 		return false;
895384209cdSChristoph Hellwig 	return __bio_try_merge_page(bio, page, len, offset, same_page);
896489fbbcbSMing Lei }
897489fbbcbSMing Lei 
898f4595875SShaohua Li /**
899e4581105SChristoph Hellwig  * bio_add_hw_page - attempt to add a page to a bio with hw constraints
900c66a14d0SKent Overstreet  * @q: the target queue
901c66a14d0SKent Overstreet  * @bio: destination bio
902c66a14d0SKent Overstreet  * @page: page to add
903c66a14d0SKent Overstreet  * @len: vec entry length
904c66a14d0SKent Overstreet  * @offset: vec entry offset
905e4581105SChristoph Hellwig  * @max_sectors: maximum number of sectors that can be added
906e4581105SChristoph Hellwig  * @same_page: return if the segment has been merged inside the same page
907f9c78b2bSJens Axboe  *
908e4581105SChristoph Hellwig  * Add a page to a bio while respecting the hardware max_sectors, max_segment
909e4581105SChristoph Hellwig  * and gap limitations.
910f9c78b2bSJens Axboe  */
911e4581105SChristoph Hellwig int bio_add_hw_page(struct request_queue *q, struct bio *bio,
91219047087SMing Lei 		struct page *page, unsigned int len, unsigned int offset,
913e4581105SChristoph Hellwig 		unsigned int max_sectors, bool *same_page)
914f9c78b2bSJens Axboe {
915f9c78b2bSJens Axboe 	struct bio_vec *bvec;
916f9c78b2bSJens Axboe 
917e4581105SChristoph Hellwig 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
918f9c78b2bSJens Axboe 		return 0;
919f9c78b2bSJens Axboe 
920e4581105SChristoph Hellwig 	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
921f9c78b2bSJens Axboe 		return 0;
922f9c78b2bSJens Axboe 
923f9c78b2bSJens Axboe 	if (bio->bi_vcnt > 0) {
924e4581105SChristoph Hellwig 		if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
925384209cdSChristoph Hellwig 			return len;
926320ea869SChristoph Hellwig 
927320ea869SChristoph Hellwig 		/*
928320ea869SChristoph Hellwig 		 * If the queue doesn't support SG gaps and adding this segment
929320ea869SChristoph Hellwig 		 * would create a gap, disallow it.
930320ea869SChristoph Hellwig 		 */
931384209cdSChristoph Hellwig 		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
932320ea869SChristoph Hellwig 		if (bvec_gap_to_prev(q, bvec, offset))
933320ea869SChristoph Hellwig 			return 0;
934f9c78b2bSJens Axboe 	}
935f9c78b2bSJens Axboe 
93679d08f89SMing Lei 	if (bio_full(bio, len))
937f9c78b2bSJens Axboe 		return 0;
938f9c78b2bSJens Axboe 
93914ccb66bSChristoph Hellwig 	if (bio->bi_vcnt >= queue_max_segments(q))
940489fbbcbSMing Lei 		return 0;
941489fbbcbSMing Lei 
942f9c78b2bSJens Axboe 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
943f9c78b2bSJens Axboe 	bvec->bv_page = page;
944f9c78b2bSJens Axboe 	bvec->bv_len = len;
945f9c78b2bSJens Axboe 	bvec->bv_offset = offset;
946fcbf6a08SMaurizio Lombardi 	bio->bi_vcnt++;
947dcdca753SChristoph Hellwig 	bio->bi_iter.bi_size += len;
948f9c78b2bSJens Axboe 	return len;
949f9c78b2bSJens Axboe }
95019047087SMing Lei 
951e4581105SChristoph Hellwig /**
952e4581105SChristoph Hellwig  * bio_add_pc_page	- attempt to add page to passthrough bio
953e4581105SChristoph Hellwig  * @q: the target queue
954e4581105SChristoph Hellwig  * @bio: destination bio
955e4581105SChristoph Hellwig  * @page: page to add
956e4581105SChristoph Hellwig  * @len: vec entry length
957e4581105SChristoph Hellwig  * @offset: vec entry offset
958e4581105SChristoph Hellwig  *
959e4581105SChristoph Hellwig  * Attempt to add a page to the bio_vec maplist. This can fail for a
960e4581105SChristoph Hellwig  * number of reasons, such as the bio being full or target block device
961e4581105SChristoph Hellwig  * limitations. The target block device must allow bio's up to PAGE_SIZE,
962e4581105SChristoph Hellwig  * so it is always possible to add a single page to an empty bio.
963e4581105SChristoph Hellwig  *
964e4581105SChristoph Hellwig  * This should only be used by passthrough bios.
965e4581105SChristoph Hellwig  */
96619047087SMing Lei int bio_add_pc_page(struct request_queue *q, struct bio *bio,
96719047087SMing Lei 		struct page *page, unsigned int len, unsigned int offset)
96819047087SMing Lei {
969d1916c86SChristoph Hellwig 	bool same_page = false;
970e4581105SChristoph Hellwig 	return bio_add_hw_page(q, bio, page, len, offset,
971e4581105SChristoph Hellwig 			queue_max_hw_sectors(q), &same_page);
97219047087SMing Lei }
973f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_pc_page);
974f9c78b2bSJens Axboe 
975f9c78b2bSJens Axboe /**
976ae29333fSJohannes Thumshirn  * bio_add_zone_append_page - attempt to add page to zone-append bio
977ae29333fSJohannes Thumshirn  * @bio: destination bio
978ae29333fSJohannes Thumshirn  * @page: page to add
979ae29333fSJohannes Thumshirn  * @len: vec entry length
980ae29333fSJohannes Thumshirn  * @offset: vec entry offset
981ae29333fSJohannes Thumshirn  *
982ae29333fSJohannes Thumshirn  * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
983ae29333fSJohannes Thumshirn  * for a zone-append request. This can fail for a number of reasons, such as the
984ae29333fSJohannes Thumshirn  * bio being full or the target block device is not a zoned block device or
985ae29333fSJohannes Thumshirn  * other limitations of the target block device. The target block device must
986ae29333fSJohannes Thumshirn  * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
987ae29333fSJohannes Thumshirn  * to an empty bio.
988ae29333fSJohannes Thumshirn  *
989ae29333fSJohannes Thumshirn  * Returns: number of bytes added to the bio, or 0 in case of a failure.
990ae29333fSJohannes Thumshirn  */
991ae29333fSJohannes Thumshirn int bio_add_zone_append_page(struct bio *bio, struct page *page,
992ae29333fSJohannes Thumshirn 			     unsigned int len, unsigned int offset)
993ae29333fSJohannes Thumshirn {
9943caee463SPavel Begunkov 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
995ae29333fSJohannes Thumshirn 	bool same_page = false;
996ae29333fSJohannes Thumshirn 
997ae29333fSJohannes Thumshirn 	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
998ae29333fSJohannes Thumshirn 		return 0;
999ae29333fSJohannes Thumshirn 
1000ae29333fSJohannes Thumshirn 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
1001ae29333fSJohannes Thumshirn 		return 0;
1002ae29333fSJohannes Thumshirn 
1003ae29333fSJohannes Thumshirn 	return bio_add_hw_page(q, bio, page, len, offset,
1004ae29333fSJohannes Thumshirn 			       queue_max_zone_append_sectors(q), &same_page);
1005ae29333fSJohannes Thumshirn }
1006ae29333fSJohannes Thumshirn EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
1007ae29333fSJohannes Thumshirn 
1008ae29333fSJohannes Thumshirn /**
1009551879a4SMing Lei  * __bio_add_page - add page(s) to a bio in a new segment
10100aa69fd3SChristoph Hellwig  * @bio: destination bio
1011551879a4SMing Lei  * @page: start page to add
1012551879a4SMing Lei  * @len: length of the data to add, may cross pages
1013551879a4SMing Lei  * @off: offset of the data relative to @page, may cross pages
10140aa69fd3SChristoph Hellwig  *
10150aa69fd3SChristoph Hellwig  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
10160aa69fd3SChristoph Hellwig  * that @bio has space for another bvec.
10170aa69fd3SChristoph Hellwig  */
10180aa69fd3SChristoph Hellwig void __bio_add_page(struct bio *bio, struct page *page,
10190aa69fd3SChristoph Hellwig 		unsigned int len, unsigned int off)
10200aa69fd3SChristoph Hellwig {
10210aa69fd3SChristoph Hellwig 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
10220aa69fd3SChristoph Hellwig 
10230aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
102479d08f89SMing Lei 	WARN_ON_ONCE(bio_full(bio, len));
10250aa69fd3SChristoph Hellwig 
10260aa69fd3SChristoph Hellwig 	bv->bv_page = page;
10270aa69fd3SChristoph Hellwig 	bv->bv_offset = off;
10280aa69fd3SChristoph Hellwig 	bv->bv_len = len;
10290aa69fd3SChristoph Hellwig 
10300aa69fd3SChristoph Hellwig 	bio->bi_iter.bi_size += len;
10310aa69fd3SChristoph Hellwig 	bio->bi_vcnt++;
1032b8e24a93SJohannes Weiner 
1033b8e24a93SJohannes Weiner 	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
1034b8e24a93SJohannes Weiner 		bio_set_flag(bio, BIO_WORKINGSET);
10350aa69fd3SChristoph Hellwig }
10360aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_add_page);
10370aa69fd3SChristoph Hellwig 
10380aa69fd3SChristoph Hellwig /**
1039551879a4SMing Lei  *	bio_add_page	-	attempt to add page(s) to bio
1040f9c78b2bSJens Axboe  *	@bio: destination bio
1041551879a4SMing Lei  *	@page: start page to add
1042551879a4SMing Lei  *	@len: vec entry length, may cross pages
1043551879a4SMing Lei  *	@offset: vec entry offset relative to @page, may cross pages
1044f9c78b2bSJens Axboe  *
1045551879a4SMing Lei  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
1046c66a14d0SKent Overstreet  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1047f9c78b2bSJens Axboe  */
1048c66a14d0SKent Overstreet int bio_add_page(struct bio *bio, struct page *page,
1049c66a14d0SKent Overstreet 		 unsigned int len, unsigned int offset)
1050f9c78b2bSJens Axboe {
1051ff896738SChristoph Hellwig 	bool same_page = false;
1052ff896738SChristoph Hellwig 
1053ff896738SChristoph Hellwig 	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
105479d08f89SMing Lei 		if (bio_full(bio, len))
1055c66a14d0SKent Overstreet 			return 0;
10560aa69fd3SChristoph Hellwig 		__bio_add_page(bio, page, len, offset);
1057c66a14d0SKent Overstreet 	}
1058c66a14d0SKent Overstreet 	return len;
1059f9c78b2bSJens Axboe }
1060f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_page);
1061f9c78b2bSJens Axboe 
106285f5a74cSMatthew Wilcox (Oracle) /**
106385f5a74cSMatthew Wilcox (Oracle)  * bio_add_folio - Attempt to add part of a folio to a bio.
106485f5a74cSMatthew Wilcox (Oracle)  * @bio: BIO to add to.
106585f5a74cSMatthew Wilcox (Oracle)  * @folio: Folio to add.
106685f5a74cSMatthew Wilcox (Oracle)  * @len: How many bytes from the folio to add.
106785f5a74cSMatthew Wilcox (Oracle)  * @off: First byte in this folio to add.
106885f5a74cSMatthew Wilcox (Oracle)  *
106985f5a74cSMatthew Wilcox (Oracle)  * Filesystems that use folios can call this function instead of calling
107085f5a74cSMatthew Wilcox (Oracle)  * bio_add_page() for each page in the folio.  If @off is bigger than
107185f5a74cSMatthew Wilcox (Oracle)  * PAGE_SIZE, this function can create a bio_vec that starts in a page
107285f5a74cSMatthew Wilcox (Oracle)  * after the bv_page.  BIOs do not support folios that are 4GiB or larger.
107385f5a74cSMatthew Wilcox (Oracle)  *
107485f5a74cSMatthew Wilcox (Oracle)  * Return: Whether the addition was successful.
107585f5a74cSMatthew Wilcox (Oracle)  */
107685f5a74cSMatthew Wilcox (Oracle) bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
107785f5a74cSMatthew Wilcox (Oracle) 		   size_t off)
107885f5a74cSMatthew Wilcox (Oracle) {
107985f5a74cSMatthew Wilcox (Oracle) 	if (len > UINT_MAX || off > UINT_MAX)
108085f5a74cSMatthew Wilcox (Oracle) 		return 0;
108185f5a74cSMatthew Wilcox (Oracle) 	return bio_add_page(bio, &folio->page, len, off) > 0;
108285f5a74cSMatthew Wilcox (Oracle) }
108385f5a74cSMatthew Wilcox (Oracle) 
1084c809084aSPavel Begunkov void __bio_release_pages(struct bio *bio, bool mark_dirty)
10857321ecbfSChristoph Hellwig {
10867321ecbfSChristoph Hellwig 	struct bvec_iter_all iter_all;
10877321ecbfSChristoph Hellwig 	struct bio_vec *bvec;
10887321ecbfSChristoph Hellwig 
1089d241a95fSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
1090d241a95fSChristoph Hellwig 		if (mark_dirty && !PageCompound(bvec->bv_page))
1091d241a95fSChristoph Hellwig 			set_page_dirty_lock(bvec->bv_page);
10927321ecbfSChristoph Hellwig 		put_page(bvec->bv_page);
10937321ecbfSChristoph Hellwig 	}
1094d241a95fSChristoph Hellwig }
1095c809084aSPavel Begunkov EXPORT_SYMBOL_GPL(__bio_release_pages);
10967321ecbfSChristoph Hellwig 
10971bb6b810SPavel Begunkov void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
10986d0c48aeSJens Axboe {
1099fa5fa8ecSPavel Begunkov 	size_t size = iov_iter_count(iter);
1100fa5fa8ecSPavel Begunkov 
11017a800a20SChristoph Hellwig 	WARN_ON_ONCE(bio->bi_max_vecs);
11026d0c48aeSJens Axboe 
1103fa5fa8ecSPavel Begunkov 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1104fa5fa8ecSPavel Begunkov 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1105fa5fa8ecSPavel Begunkov 		size_t max_sectors = queue_max_zone_append_sectors(q);
1106fa5fa8ecSPavel Begunkov 
1107fa5fa8ecSPavel Begunkov 		size = min(size, max_sectors << SECTOR_SHIFT);
1108fa5fa8ecSPavel Begunkov 	}
1109fa5fa8ecSPavel Begunkov 
1110c42bca92SPavel Begunkov 	bio->bi_vcnt = iter->nr_segs;
1111c42bca92SPavel Begunkov 	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1112c42bca92SPavel Begunkov 	bio->bi_iter.bi_bvec_done = iter->iov_offset;
1113fa5fa8ecSPavel Begunkov 	bio->bi_iter.bi_size = size;
1114ed97ce5eSChristoph Hellwig 	bio_set_flag(bio, BIO_NO_PAGE_REF);
1115977be012SChristoph Hellwig 	bio_set_flag(bio, BIO_CLONED);
11167de55b7dSJohannes Thumshirn }
11176d0c48aeSJens Axboe 
1118d9cf3bd5SPavel Begunkov static void bio_put_pages(struct page **pages, size_t size, size_t off)
1119d9cf3bd5SPavel Begunkov {
1120d9cf3bd5SPavel Begunkov 	size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
1121d9cf3bd5SPavel Begunkov 
1122d9cf3bd5SPavel Begunkov 	for (i = 0; i < nr; i++)
1123d9cf3bd5SPavel Begunkov 		put_page(pages[i]);
1124d9cf3bd5SPavel Begunkov }
1125d9cf3bd5SPavel Begunkov 
1126576ed913SChristoph Hellwig #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
1127576ed913SChristoph Hellwig 
11282cefe4dbSKent Overstreet /**
112917d51b10SMartin Wilck  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
11302cefe4dbSKent Overstreet  * @bio: bio to add pages to
11312cefe4dbSKent Overstreet  * @iter: iov iterator describing the region to be mapped
11322cefe4dbSKent Overstreet  *
113317d51b10SMartin Wilck  * Pins pages from *iter and appends them to @bio's bvec array. The
11342cefe4dbSKent Overstreet  * pages will have to be released using put_page() when done.
113517d51b10SMartin Wilck  * For multi-segment *iter, this function only adds pages from the
11363cf14889SRandy Dunlap  * next non-empty segment of the iov iterator.
11372cefe4dbSKent Overstreet  */
113817d51b10SMartin Wilck static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
11392cefe4dbSKent Overstreet {
1140576ed913SChristoph Hellwig 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1141576ed913SChristoph Hellwig 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
11422cefe4dbSKent Overstreet 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
11432cefe4dbSKent Overstreet 	struct page **pages = (struct page **)bv;
114445691804SChristoph Hellwig 	bool same_page = false;
1145576ed913SChristoph Hellwig 	ssize_t size, left;
1146576ed913SChristoph Hellwig 	unsigned len, i;
1147b403ea24SMartin Wilck 	size_t offset;
1148576ed913SChristoph Hellwig 
1149576ed913SChristoph Hellwig 	/*
1150576ed913SChristoph Hellwig 	 * Move page array up in the allocated memory for the bio vecs as far as
1151576ed913SChristoph Hellwig 	 * possible so that we can start filling biovecs from the beginning
1152576ed913SChristoph Hellwig 	 * without overwriting the temporary page array.
1153576ed913SChristoph Hellwig 	*/
1154576ed913SChristoph Hellwig 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1155576ed913SChristoph Hellwig 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
11562cefe4dbSKent Overstreet 
115735c820e7SJens Axboe 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
11582cefe4dbSKent Overstreet 	if (unlikely(size <= 0))
11592cefe4dbSKent Overstreet 		return size ? size : -EFAULT;
11602cefe4dbSKent Overstreet 
1161576ed913SChristoph Hellwig 	for (left = size, i = 0; left > 0; left -= len, i++) {
1162576ed913SChristoph Hellwig 		struct page *page = pages[i];
11632cefe4dbSKent Overstreet 
1164576ed913SChristoph Hellwig 		len = min_t(size_t, PAGE_SIZE - offset, left);
116545691804SChristoph Hellwig 
116645691804SChristoph Hellwig 		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
116745691804SChristoph Hellwig 			if (same_page)
116845691804SChristoph Hellwig 				put_page(page);
116945691804SChristoph Hellwig 		} else {
1170d9cf3bd5SPavel Begunkov 			if (WARN_ON_ONCE(bio_full(bio, len))) {
1171d9cf3bd5SPavel Begunkov 				bio_put_pages(pages + i, left, offset);
1172576ed913SChristoph Hellwig 				return -EINVAL;
1173d9cf3bd5SPavel Begunkov 			}
117445691804SChristoph Hellwig 			__bio_add_page(bio, page, len, offset);
117545691804SChristoph Hellwig 		}
1176576ed913SChristoph Hellwig 		offset = 0;
11772cefe4dbSKent Overstreet 	}
11782cefe4dbSKent Overstreet 
11792cefe4dbSKent Overstreet 	iov_iter_advance(iter, size);
11802cefe4dbSKent Overstreet 	return 0;
11812cefe4dbSKent Overstreet }
118217d51b10SMartin Wilck 
11830512a75bSKeith Busch static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
11840512a75bSKeith Busch {
11850512a75bSKeith Busch 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
11860512a75bSKeith Busch 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
11873caee463SPavel Begunkov 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
11880512a75bSKeith Busch 	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
11890512a75bSKeith Busch 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
11900512a75bSKeith Busch 	struct page **pages = (struct page **)bv;
11910512a75bSKeith Busch 	ssize_t size, left;
11920512a75bSKeith Busch 	unsigned len, i;
11930512a75bSKeith Busch 	size_t offset;
11944977d121SNaohiro Aota 	int ret = 0;
11950512a75bSKeith Busch 
11960512a75bSKeith Busch 	if (WARN_ON_ONCE(!max_append_sectors))
11970512a75bSKeith Busch 		return 0;
11980512a75bSKeith Busch 
11990512a75bSKeith Busch 	/*
12000512a75bSKeith Busch 	 * Move page array up in the allocated memory for the bio vecs as far as
12010512a75bSKeith Busch 	 * possible so that we can start filling biovecs from the beginning
12020512a75bSKeith Busch 	 * without overwriting the temporary page array.
12030512a75bSKeith Busch 	 */
12040512a75bSKeith Busch 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
12050512a75bSKeith Busch 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
12060512a75bSKeith Busch 
12070512a75bSKeith Busch 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
12080512a75bSKeith Busch 	if (unlikely(size <= 0))
12090512a75bSKeith Busch 		return size ? size : -EFAULT;
12100512a75bSKeith Busch 
12110512a75bSKeith Busch 	for (left = size, i = 0; left > 0; left -= len, i++) {
12120512a75bSKeith Busch 		struct page *page = pages[i];
12130512a75bSKeith Busch 		bool same_page = false;
12140512a75bSKeith Busch 
12150512a75bSKeith Busch 		len = min_t(size_t, PAGE_SIZE - offset, left);
12160512a75bSKeith Busch 		if (bio_add_hw_page(q, bio, page, len, offset,
12174977d121SNaohiro Aota 				max_append_sectors, &same_page) != len) {
1218d9cf3bd5SPavel Begunkov 			bio_put_pages(pages + i, left, offset);
12194977d121SNaohiro Aota 			ret = -EINVAL;
12204977d121SNaohiro Aota 			break;
12214977d121SNaohiro Aota 		}
12220512a75bSKeith Busch 		if (same_page)
12230512a75bSKeith Busch 			put_page(page);
12240512a75bSKeith Busch 		offset = 0;
12250512a75bSKeith Busch 	}
12260512a75bSKeith Busch 
12274977d121SNaohiro Aota 	iov_iter_advance(iter, size - left);
12284977d121SNaohiro Aota 	return ret;
12290512a75bSKeith Busch }
12300512a75bSKeith Busch 
123117d51b10SMartin Wilck /**
12326d0c48aeSJens Axboe  * bio_iov_iter_get_pages - add user or kernel pages to a bio
123317d51b10SMartin Wilck  * @bio: bio to add pages to
12346d0c48aeSJens Axboe  * @iter: iov iterator describing the region to be added
123517d51b10SMartin Wilck  *
12366d0c48aeSJens Axboe  * This takes either an iterator pointing to user memory, or one pointing to
12376d0c48aeSJens Axboe  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
12386d0c48aeSJens Axboe  * map them into the kernel. On IO completion, the caller should put those
1239c42bca92SPavel Begunkov  * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1240c42bca92SPavel Begunkov  * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1241c42bca92SPavel Begunkov  * to ensure the bvecs and pages stay referenced until the submitted I/O is
1242c42bca92SPavel Begunkov  * completed by a call to ->ki_complete() or returns with an error other than
1243c42bca92SPavel Begunkov  * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1244c42bca92SPavel Begunkov  * on IO completion. If it isn't, then pages should be released.
12456d0c48aeSJens Axboe  *
124617d51b10SMartin Wilck  * The function tries, but does not guarantee, to pin as many pages as
12475cd3ddc1SMauro Carvalho Chehab  * fit into the bio, or are requested in @iter, whatever is smaller. If
12486d0c48aeSJens Axboe  * MM encounters an error pinning the requested pages, it stops. Error
12496d0c48aeSJens Axboe  * is returned only if 0 pages could be pinned.
12500cf41e5eSPavel Begunkov  *
12510cf41e5eSPavel Begunkov  * It's intended for direct IO, so doesn't do PSI tracking, the caller is
12520cf41e5eSPavel Begunkov  * responsible for setting BIO_WORKINGSET if necessary.
125317d51b10SMartin Wilck  */
125417d51b10SMartin Wilck int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
125517d51b10SMartin Wilck {
1256c42bca92SPavel Begunkov 	int ret = 0;
125714eacf12SChristoph Hellwig 
1258c42bca92SPavel Begunkov 	if (iov_iter_is_bvec(iter)) {
1259fa5fa8ecSPavel Begunkov 		bio_iov_bvec_set(bio, iter);
1260fa5fa8ecSPavel Begunkov 		iov_iter_advance(iter, bio->bi_iter.bi_size);
1261fa5fa8ecSPavel Begunkov 		return 0;
126286004515SChristoph Hellwig 	}
126317d51b10SMartin Wilck 
126417d51b10SMartin Wilck 	do {
1265c42bca92SPavel Begunkov 		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
12660512a75bSKeith Busch 			ret = __bio_iov_append_get_pages(bio, iter);
12676d0c48aeSJens Axboe 		else
12686d0c48aeSJens Axboe 			ret = __bio_iov_iter_get_pages(bio, iter);
126979d08f89SMing Lei 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
127017d51b10SMartin Wilck 
12710cf41e5eSPavel Begunkov 	/* don't account direct I/O as memory stall */
12720cf41e5eSPavel Begunkov 	bio_clear_flag(bio, BIO_WORKINGSET);
127314eacf12SChristoph Hellwig 	return bio->bi_vcnt ? 0 : ret;
127417d51b10SMartin Wilck }
127529b2a3aaSJohannes Thumshirn EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
12762cefe4dbSKent Overstreet 
12774246a0b6SChristoph Hellwig static void submit_bio_wait_endio(struct bio *bio)
1278f9c78b2bSJens Axboe {
127965e53aabSChristoph Hellwig 	complete(bio->bi_private);
1280f9c78b2bSJens Axboe }
1281f9c78b2bSJens Axboe 
1282f9c78b2bSJens Axboe /**
1283f9c78b2bSJens Axboe  * submit_bio_wait - submit a bio, and wait until it completes
1284f9c78b2bSJens Axboe  * @bio: The &struct bio which describes the I/O
1285f9c78b2bSJens Axboe  *
1286f9c78b2bSJens Axboe  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1287f9c78b2bSJens Axboe  * bio_endio() on failure.
12883d289d68SJan Kara  *
12893d289d68SJan Kara  * WARNING: Unlike to how submit_bio() is usually used, this function does not
12903d289d68SJan Kara  * result in bio reference to be consumed. The caller must drop the reference
12913d289d68SJan Kara  * on his own.
1292f9c78b2bSJens Axboe  */
12934e49ea4aSMike Christie int submit_bio_wait(struct bio *bio)
1294f9c78b2bSJens Axboe {
1295309dca30SChristoph Hellwig 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1296309dca30SChristoph Hellwig 			bio->bi_bdev->bd_disk->lockdep_map);
1297de6a78b6SMing Lei 	unsigned long hang_check;
1298f9c78b2bSJens Axboe 
129965e53aabSChristoph Hellwig 	bio->bi_private = &done;
1300f9c78b2bSJens Axboe 	bio->bi_end_io = submit_bio_wait_endio;
13011eff9d32SJens Axboe 	bio->bi_opf |= REQ_SYNC;
13024e49ea4aSMike Christie 	submit_bio(bio);
1303de6a78b6SMing Lei 
1304de6a78b6SMing Lei 	/* Prevent hang_check timer from firing at us during very long I/O */
1305de6a78b6SMing Lei 	hang_check = sysctl_hung_task_timeout_secs;
1306de6a78b6SMing Lei 	if (hang_check)
1307de6a78b6SMing Lei 		while (!wait_for_completion_io_timeout(&done,
1308de6a78b6SMing Lei 					hang_check * (HZ/2)))
1309de6a78b6SMing Lei 			;
1310de6a78b6SMing Lei 	else
131165e53aabSChristoph Hellwig 		wait_for_completion_io(&done);
1312f9c78b2bSJens Axboe 
131365e53aabSChristoph Hellwig 	return blk_status_to_errno(bio->bi_status);
1314f9c78b2bSJens Axboe }
1315f9c78b2bSJens Axboe EXPORT_SYMBOL(submit_bio_wait);
1316f9c78b2bSJens Axboe 
1317d4aa57a1SJens Axboe void __bio_advance(struct bio *bio, unsigned bytes)
1318f9c78b2bSJens Axboe {
1319f9c78b2bSJens Axboe 	if (bio_integrity(bio))
1320f9c78b2bSJens Axboe 		bio_integrity_advance(bio, bytes);
1321f9c78b2bSJens Axboe 
1322a892c8d5SSatya Tangirala 	bio_crypt_advance(bio, bytes);
1323f9c78b2bSJens Axboe 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1324f9c78b2bSJens Axboe }
1325d4aa57a1SJens Axboe EXPORT_SYMBOL(__bio_advance);
1326f9c78b2bSJens Axboe 
132745db54d5SKent Overstreet void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
132845db54d5SKent Overstreet 			struct bio *src, struct bvec_iter *src_iter)
1329f9c78b2bSJens Axboe {
133045db54d5SKent Overstreet 	while (src_iter->bi_size && dst_iter->bi_size) {
1331f8b679a0SChristoph Hellwig 		struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1332f8b679a0SChristoph Hellwig 		struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1333f8b679a0SChristoph Hellwig 		unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1334f8b679a0SChristoph Hellwig 		void *src_buf;
133545db54d5SKent Overstreet 
1336f8b679a0SChristoph Hellwig 		src_buf = bvec_kmap_local(&src_bv);
1337f8b679a0SChristoph Hellwig 		memcpy_to_bvec(&dst_bv, src_buf);
1338f8b679a0SChristoph Hellwig 		kunmap_local(src_buf);
13396e6e811dSKent Overstreet 
134022b56c29SPavel Begunkov 		bio_advance_iter_single(src, src_iter, bytes);
134122b56c29SPavel Begunkov 		bio_advance_iter_single(dst, dst_iter, bytes);
134245db54d5SKent Overstreet 	}
134345db54d5SKent Overstreet }
134445db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data_iter);
134545db54d5SKent Overstreet 
134645db54d5SKent Overstreet /**
134745db54d5SKent Overstreet  * bio_copy_data - copy contents of data buffers from one bio to another
134845db54d5SKent Overstreet  * @src: source bio
134945db54d5SKent Overstreet  * @dst: destination bio
135045db54d5SKent Overstreet  *
135145db54d5SKent Overstreet  * Stops when it reaches the end of either @src or @dst - that is, copies
135245db54d5SKent Overstreet  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
135345db54d5SKent Overstreet  */
135445db54d5SKent Overstreet void bio_copy_data(struct bio *dst, struct bio *src)
135545db54d5SKent Overstreet {
135645db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
135745db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
135845db54d5SKent Overstreet 
135945db54d5SKent Overstreet 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
136045db54d5SKent Overstreet }
136145db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data);
136245db54d5SKent Overstreet 
1363491221f8SGuoqing Jiang void bio_free_pages(struct bio *bio)
13641dfa0f68SChristoph Hellwig {
13651dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
13666dc4f100SMing Lei 	struct bvec_iter_all iter_all;
13671dfa0f68SChristoph Hellwig 
13682b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all)
13691dfa0f68SChristoph Hellwig 		__free_page(bvec->bv_page);
13701dfa0f68SChristoph Hellwig }
1371491221f8SGuoqing Jiang EXPORT_SYMBOL(bio_free_pages);
13721dfa0f68SChristoph Hellwig 
1373f9c78b2bSJens Axboe /*
1374f9c78b2bSJens Axboe  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1375f9c78b2bSJens Axboe  * for performing direct-IO in BIOs.
1376f9c78b2bSJens Axboe  *
1377f9c78b2bSJens Axboe  * The problem is that we cannot run set_page_dirty() from interrupt context
1378f9c78b2bSJens Axboe  * because the required locks are not interrupt-safe.  So what we can do is to
1379f9c78b2bSJens Axboe  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1380f9c78b2bSJens Axboe  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1381f9c78b2bSJens Axboe  * in process context.
1382f9c78b2bSJens Axboe  *
1383f9c78b2bSJens Axboe  * We special-case compound pages here: normally this means reads into hugetlb
1384f9c78b2bSJens Axboe  * pages.  The logic in here doesn't really work right for compound pages
1385f9c78b2bSJens Axboe  * because the VM does not uniformly chase down the head page in all cases.
1386f9c78b2bSJens Axboe  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1387f9c78b2bSJens Axboe  * handle them at all.  So we skip compound pages here at an early stage.
1388f9c78b2bSJens Axboe  *
1389f9c78b2bSJens Axboe  * Note that this code is very hard to test under normal circumstances because
1390f9c78b2bSJens Axboe  * direct-io pins the pages with get_user_pages().  This makes
1391f9c78b2bSJens Axboe  * is_page_cache_freeable return false, and the VM will not clean the pages.
1392f9c78b2bSJens Axboe  * But other code (eg, flusher threads) could clean the pages if they are mapped
1393f9c78b2bSJens Axboe  * pagecache.
1394f9c78b2bSJens Axboe  *
1395f9c78b2bSJens Axboe  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1396f9c78b2bSJens Axboe  * deferred bio dirtying paths.
1397f9c78b2bSJens Axboe  */
1398f9c78b2bSJens Axboe 
1399f9c78b2bSJens Axboe /*
1400f9c78b2bSJens Axboe  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1401f9c78b2bSJens Axboe  */
1402f9c78b2bSJens Axboe void bio_set_pages_dirty(struct bio *bio)
1403f9c78b2bSJens Axboe {
1404f9c78b2bSJens Axboe 	struct bio_vec *bvec;
14056dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1406f9c78b2bSJens Axboe 
14072b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
14083bb50983SChristoph Hellwig 		if (!PageCompound(bvec->bv_page))
14093bb50983SChristoph Hellwig 			set_page_dirty_lock(bvec->bv_page);
1410f9c78b2bSJens Axboe 	}
1411f9c78b2bSJens Axboe }
1412f9c78b2bSJens Axboe 
1413f9c78b2bSJens Axboe /*
1414f9c78b2bSJens Axboe  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1415f9c78b2bSJens Axboe  * If they are, then fine.  If, however, some pages are clean then they must
1416f9c78b2bSJens Axboe  * have been written out during the direct-IO read.  So we take another ref on
141724d5493fSChristoph Hellwig  * the BIO and re-dirty the pages in process context.
1418f9c78b2bSJens Axboe  *
1419f9c78b2bSJens Axboe  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1420ea1754a0SKirill A. Shutemov  * here on.  It will run one put_page() against each page and will run one
1421ea1754a0SKirill A. Shutemov  * bio_put() against the BIO.
1422f9c78b2bSJens Axboe  */
1423f9c78b2bSJens Axboe 
1424f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work);
1425f9c78b2bSJens Axboe 
1426f9c78b2bSJens Axboe static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1427f9c78b2bSJens Axboe static DEFINE_SPINLOCK(bio_dirty_lock);
1428f9c78b2bSJens Axboe static struct bio *bio_dirty_list;
1429f9c78b2bSJens Axboe 
1430f9c78b2bSJens Axboe /*
1431f9c78b2bSJens Axboe  * This runs in process context
1432f9c78b2bSJens Axboe  */
1433f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work)
1434f9c78b2bSJens Axboe {
143524d5493fSChristoph Hellwig 	struct bio *bio, *next;
1436f9c78b2bSJens Axboe 
143724d5493fSChristoph Hellwig 	spin_lock_irq(&bio_dirty_lock);
143824d5493fSChristoph Hellwig 	next = bio_dirty_list;
1439f9c78b2bSJens Axboe 	bio_dirty_list = NULL;
144024d5493fSChristoph Hellwig 	spin_unlock_irq(&bio_dirty_lock);
1441f9c78b2bSJens Axboe 
144224d5493fSChristoph Hellwig 	while ((bio = next) != NULL) {
144324d5493fSChristoph Hellwig 		next = bio->bi_private;
1444f9c78b2bSJens Axboe 
1445d241a95fSChristoph Hellwig 		bio_release_pages(bio, true);
1446f9c78b2bSJens Axboe 		bio_put(bio);
1447f9c78b2bSJens Axboe 	}
1448f9c78b2bSJens Axboe }
1449f9c78b2bSJens Axboe 
1450f9c78b2bSJens Axboe void bio_check_pages_dirty(struct bio *bio)
1451f9c78b2bSJens Axboe {
1452f9c78b2bSJens Axboe 	struct bio_vec *bvec;
145324d5493fSChristoph Hellwig 	unsigned long flags;
14546dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1455f9c78b2bSJens Axboe 
14562b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
145724d5493fSChristoph Hellwig 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
145824d5493fSChristoph Hellwig 			goto defer;
1459f9c78b2bSJens Axboe 	}
1460f9c78b2bSJens Axboe 
1461d241a95fSChristoph Hellwig 	bio_release_pages(bio, false);
146224d5493fSChristoph Hellwig 	bio_put(bio);
146324d5493fSChristoph Hellwig 	return;
146424d5493fSChristoph Hellwig defer:
1465f9c78b2bSJens Axboe 	spin_lock_irqsave(&bio_dirty_lock, flags);
1466f9c78b2bSJens Axboe 	bio->bi_private = bio_dirty_list;
1467f9c78b2bSJens Axboe 	bio_dirty_list = bio;
1468f9c78b2bSJens Axboe 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1469f9c78b2bSJens Axboe 	schedule_work(&bio_dirty_work);
1470f9c78b2bSJens Axboe }
1471f9c78b2bSJens Axboe 
1472c4cf5261SJens Axboe static inline bool bio_remaining_done(struct bio *bio)
1473c4cf5261SJens Axboe {
1474c4cf5261SJens Axboe 	/*
1475c4cf5261SJens Axboe 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1476c4cf5261SJens Axboe 	 * we always end io on the first invocation.
1477c4cf5261SJens Axboe 	 */
1478c4cf5261SJens Axboe 	if (!bio_flagged(bio, BIO_CHAIN))
1479c4cf5261SJens Axboe 		return true;
1480c4cf5261SJens Axboe 
1481c4cf5261SJens Axboe 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1482c4cf5261SJens Axboe 
1483326e1dbbSMike Snitzer 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1484b7c44ed9SJens Axboe 		bio_clear_flag(bio, BIO_CHAIN);
1485c4cf5261SJens Axboe 		return true;
1486326e1dbbSMike Snitzer 	}
1487c4cf5261SJens Axboe 
1488c4cf5261SJens Axboe 	return false;
1489c4cf5261SJens Axboe }
1490c4cf5261SJens Axboe 
1491f9c78b2bSJens Axboe /**
1492f9c78b2bSJens Axboe  * bio_endio - end I/O on a bio
1493f9c78b2bSJens Axboe  * @bio:	bio
1494f9c78b2bSJens Axboe  *
1495f9c78b2bSJens Axboe  * Description:
14964246a0b6SChristoph Hellwig  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
14974246a0b6SChristoph Hellwig  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
14984246a0b6SChristoph Hellwig  *   bio unless they own it and thus know that it has an end_io function.
1499fbbaf700SNeilBrown  *
1500fbbaf700SNeilBrown  *   bio_endio() can be called several times on a bio that has been chained
1501fbbaf700SNeilBrown  *   using bio_chain().  The ->bi_end_io() function will only be called the
150260b6a7e6SEdward Hsieh  *   last time.
1503f9c78b2bSJens Axboe  **/
15044246a0b6SChristoph Hellwig void bio_endio(struct bio *bio)
1505f9c78b2bSJens Axboe {
1506ba8c6967SChristoph Hellwig again:
15072b885517SChristoph Hellwig 	if (!bio_remaining_done(bio))
1508ba8c6967SChristoph Hellwig 		return;
15097c20f116SChristoph Hellwig 	if (!bio_integrity_endio(bio))
15107c20f116SChristoph Hellwig 		return;
1511f9c78b2bSJens Axboe 
1512a647a524SMing Lei 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
15133caee463SPavel Begunkov 		rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio);
151467b42d0bSJosef Bacik 
151560b6a7e6SEdward Hsieh 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
15163caee463SPavel Begunkov 		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
151760b6a7e6SEdward Hsieh 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
151860b6a7e6SEdward Hsieh 	}
151960b6a7e6SEdward Hsieh 
1520f9c78b2bSJens Axboe 	/*
1521ba8c6967SChristoph Hellwig 	 * Need to have a real endio function for chained bios, otherwise
1522ba8c6967SChristoph Hellwig 	 * various corner cases will break (like stacking block devices that
1523ba8c6967SChristoph Hellwig 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1524ba8c6967SChristoph Hellwig 	 * recursion and blowing the stack. Tail call optimization would
1525ba8c6967SChristoph Hellwig 	 * handle this, but compiling with frame pointers also disables
1526ba8c6967SChristoph Hellwig 	 * gcc's sibling call optimization.
1527f9c78b2bSJens Axboe 	 */
1528f9c78b2bSJens Axboe 	if (bio->bi_end_io == bio_chain_endio) {
152938f8baaeSChristoph Hellwig 		bio = __bio_chain_endio(bio);
1530ba8c6967SChristoph Hellwig 		goto again;
1531ba8c6967SChristoph Hellwig 	}
1532ba8c6967SChristoph Hellwig 
15339e234eeaSShaohua Li 	blk_throtl_bio_endio(bio);
1534b222dd2fSShaohua Li 	/* release cgroup info */
1535b222dd2fSShaohua Li 	bio_uninit(bio);
1536f9c78b2bSJens Axboe 	if (bio->bi_end_io)
15374246a0b6SChristoph Hellwig 		bio->bi_end_io(bio);
1538f9c78b2bSJens Axboe }
1539f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_endio);
1540f9c78b2bSJens Axboe 
1541f9c78b2bSJens Axboe /**
1542f9c78b2bSJens Axboe  * bio_split - split a bio
1543f9c78b2bSJens Axboe  * @bio:	bio to split
1544f9c78b2bSJens Axboe  * @sectors:	number of sectors to split from the front of @bio
1545f9c78b2bSJens Axboe  * @gfp:	gfp mask
1546f9c78b2bSJens Axboe  * @bs:		bio set to allocate from
1547f9c78b2bSJens Axboe  *
1548f9c78b2bSJens Axboe  * Allocates and returns a new bio which represents @sectors from the start of
1549f9c78b2bSJens Axboe  * @bio, and updates @bio to represent the remaining sectors.
1550f9c78b2bSJens Axboe  *
1551f3f5da62SMartin K. Petersen  * Unless this is a discard request the newly allocated bio will point
1552dad77584SBart Van Assche  * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1553dad77584SBart Van Assche  * neither @bio nor @bs are freed before the split bio.
1554f9c78b2bSJens Axboe  */
1555f9c78b2bSJens Axboe struct bio *bio_split(struct bio *bio, int sectors,
1556f9c78b2bSJens Axboe 		      gfp_t gfp, struct bio_set *bs)
1557f9c78b2bSJens Axboe {
1558f341a4d3SMikulas Patocka 	struct bio *split;
1559f9c78b2bSJens Axboe 
1560f9c78b2bSJens Axboe 	BUG_ON(sectors <= 0);
1561f9c78b2bSJens Axboe 	BUG_ON(sectors >= bio_sectors(bio));
1562f9c78b2bSJens Axboe 
15630512a75bSKeith Busch 	/* Zone append commands cannot be split */
15640512a75bSKeith Busch 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
15650512a75bSKeith Busch 		return NULL;
15660512a75bSKeith Busch 
1567f9c78b2bSJens Axboe 	split = bio_clone_fast(bio, gfp, bs);
1568f9c78b2bSJens Axboe 	if (!split)
1569f9c78b2bSJens Axboe 		return NULL;
1570f9c78b2bSJens Axboe 
1571f9c78b2bSJens Axboe 	split->bi_iter.bi_size = sectors << 9;
1572f9c78b2bSJens Axboe 
1573f9c78b2bSJens Axboe 	if (bio_integrity(split))
1574fbd08e76SDmitry Monakhov 		bio_integrity_trim(split);
1575f9c78b2bSJens Axboe 
1576f9c78b2bSJens Axboe 	bio_advance(bio, split->bi_iter.bi_size);
1577f9c78b2bSJens Axboe 
1578fbbaf700SNeilBrown 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
157920d59023SGoldwyn Rodrigues 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1580fbbaf700SNeilBrown 
1581f9c78b2bSJens Axboe 	return split;
1582f9c78b2bSJens Axboe }
1583f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_split);
1584f9c78b2bSJens Axboe 
1585f9c78b2bSJens Axboe /**
1586f9c78b2bSJens Axboe  * bio_trim - trim a bio
1587f9c78b2bSJens Axboe  * @bio:	bio to trim
1588f9c78b2bSJens Axboe  * @offset:	number of sectors to trim from the front of @bio
1589f9c78b2bSJens Axboe  * @size:	size we want to trim @bio to, in sectors
1590e83502caSChaitanya Kulkarni  *
1591e83502caSChaitanya Kulkarni  * This function is typically used for bios that are cloned and submitted
1592e83502caSChaitanya Kulkarni  * to the underlying device in parts.
1593f9c78b2bSJens Axboe  */
1594e83502caSChaitanya Kulkarni void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1595f9c78b2bSJens Axboe {
1596e83502caSChaitanya Kulkarni 	if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1597e83502caSChaitanya Kulkarni 			 offset + size > bio->bi_iter.bi_size))
1598e83502caSChaitanya Kulkarni 		return;
1599f9c78b2bSJens Axboe 
1600f9c78b2bSJens Axboe 	size <<= 9;
1601f9c78b2bSJens Axboe 	if (offset == 0 && size == bio->bi_iter.bi_size)
1602f9c78b2bSJens Axboe 		return;
1603f9c78b2bSJens Axboe 
1604f9c78b2bSJens Axboe 	bio_advance(bio, offset << 9);
1605f9c78b2bSJens Axboe 	bio->bi_iter.bi_size = size;
1606376a78abSDmitry Monakhov 
1607376a78abSDmitry Monakhov 	if (bio_integrity(bio))
1608fbd08e76SDmitry Monakhov 		bio_integrity_trim(bio);
1609f9c78b2bSJens Axboe }
1610f9c78b2bSJens Axboe EXPORT_SYMBOL_GPL(bio_trim);
1611f9c78b2bSJens Axboe 
1612f9c78b2bSJens Axboe /*
1613f9c78b2bSJens Axboe  * create memory pools for biovec's in a bio_set.
1614f9c78b2bSJens Axboe  * use the global biovec slabs created for general use.
1615f9c78b2bSJens Axboe  */
16168aa6ba2fSKent Overstreet int biovec_init_pool(mempool_t *pool, int pool_entries)
1617f9c78b2bSJens Axboe {
16187a800a20SChristoph Hellwig 	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1619f9c78b2bSJens Axboe 
16208aa6ba2fSKent Overstreet 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1621f9c78b2bSJens Axboe }
1622f9c78b2bSJens Axboe 
1623917a38c7SKent Overstreet /*
1624917a38c7SKent Overstreet  * bioset_exit - exit a bioset initialized with bioset_init()
1625917a38c7SKent Overstreet  *
1626917a38c7SKent Overstreet  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1627917a38c7SKent Overstreet  * kzalloc()).
1628917a38c7SKent Overstreet  */
1629917a38c7SKent Overstreet void bioset_exit(struct bio_set *bs)
1630f9c78b2bSJens Axboe {
1631be4d234dSJens Axboe 	bio_alloc_cache_destroy(bs);
1632f9c78b2bSJens Axboe 	if (bs->rescue_workqueue)
1633f9c78b2bSJens Axboe 		destroy_workqueue(bs->rescue_workqueue);
1634917a38c7SKent Overstreet 	bs->rescue_workqueue = NULL;
1635f9c78b2bSJens Axboe 
16368aa6ba2fSKent Overstreet 	mempool_exit(&bs->bio_pool);
16378aa6ba2fSKent Overstreet 	mempool_exit(&bs->bvec_pool);
1638f9c78b2bSJens Axboe 
1639f9c78b2bSJens Axboe 	bioset_integrity_free(bs);
1640917a38c7SKent Overstreet 	if (bs->bio_slab)
1641f9c78b2bSJens Axboe 		bio_put_slab(bs);
1642917a38c7SKent Overstreet 	bs->bio_slab = NULL;
1643917a38c7SKent Overstreet }
1644917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_exit);
1645f9c78b2bSJens Axboe 
1646011067b0SNeilBrown /**
1647917a38c7SKent Overstreet  * bioset_init - Initialize a bio_set
1648dad08527SKent Overstreet  * @bs:		pool to initialize
1649917a38c7SKent Overstreet  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1650917a38c7SKent Overstreet  * @front_pad:	Number of bytes to allocate in front of the returned bio
1651917a38c7SKent Overstreet  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1652917a38c7SKent Overstreet  *              and %BIOSET_NEED_RESCUER
1653917a38c7SKent Overstreet  *
1654dad08527SKent Overstreet  * Description:
1655dad08527SKent Overstreet  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1656dad08527SKent Overstreet  *    to ask for a number of bytes to be allocated in front of the bio.
1657dad08527SKent Overstreet  *    Front pad allocation is useful for embedding the bio inside
1658dad08527SKent Overstreet  *    another structure, to avoid allocating extra data to go with the bio.
1659dad08527SKent Overstreet  *    Note that the bio must be embedded at the END of that structure always,
1660dad08527SKent Overstreet  *    or things will break badly.
1661dad08527SKent Overstreet  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1662dad08527SKent Overstreet  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1663dad08527SKent Overstreet  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1664dad08527SKent Overstreet  *    dispatch queued requests when the mempool runs out of space.
1665dad08527SKent Overstreet  *
1666917a38c7SKent Overstreet  */
1667917a38c7SKent Overstreet int bioset_init(struct bio_set *bs,
1668917a38c7SKent Overstreet 		unsigned int pool_size,
1669917a38c7SKent Overstreet 		unsigned int front_pad,
1670917a38c7SKent Overstreet 		int flags)
1671917a38c7SKent Overstreet {
1672917a38c7SKent Overstreet 	bs->front_pad = front_pad;
16739f180e31SMing Lei 	if (flags & BIOSET_NEED_BVECS)
16749f180e31SMing Lei 		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
16759f180e31SMing Lei 	else
16769f180e31SMing Lei 		bs->back_pad = 0;
1677917a38c7SKent Overstreet 
1678917a38c7SKent Overstreet 	spin_lock_init(&bs->rescue_lock);
1679917a38c7SKent Overstreet 	bio_list_init(&bs->rescue_list);
1680917a38c7SKent Overstreet 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1681917a38c7SKent Overstreet 
168249d1ec85SMing Lei 	bs->bio_slab = bio_find_or_create_slab(bs);
1683917a38c7SKent Overstreet 	if (!bs->bio_slab)
1684917a38c7SKent Overstreet 		return -ENOMEM;
1685917a38c7SKent Overstreet 
1686917a38c7SKent Overstreet 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1687917a38c7SKent Overstreet 		goto bad;
1688917a38c7SKent Overstreet 
1689917a38c7SKent Overstreet 	if ((flags & BIOSET_NEED_BVECS) &&
1690917a38c7SKent Overstreet 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1691917a38c7SKent Overstreet 		goto bad;
1692917a38c7SKent Overstreet 
1693be4d234dSJens Axboe 	if (flags & BIOSET_NEED_RESCUER) {
1694be4d234dSJens Axboe 		bs->rescue_workqueue = alloc_workqueue("bioset",
1695be4d234dSJens Axboe 							WQ_MEM_RECLAIM, 0);
1696917a38c7SKent Overstreet 		if (!bs->rescue_workqueue)
1697917a38c7SKent Overstreet 			goto bad;
1698be4d234dSJens Axboe 	}
1699be4d234dSJens Axboe 	if (flags & BIOSET_PERCPU_CACHE) {
1700be4d234dSJens Axboe 		bs->cache = alloc_percpu(struct bio_alloc_cache);
1701be4d234dSJens Axboe 		if (!bs->cache)
1702be4d234dSJens Axboe 			goto bad;
1703be4d234dSJens Axboe 		cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1704be4d234dSJens Axboe 	}
1705917a38c7SKent Overstreet 
1706917a38c7SKent Overstreet 	return 0;
1707917a38c7SKent Overstreet bad:
1708917a38c7SKent Overstreet 	bioset_exit(bs);
1709917a38c7SKent Overstreet 	return -ENOMEM;
1710917a38c7SKent Overstreet }
1711917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_init);
1712917a38c7SKent Overstreet 
171328e89fd9SJens Axboe /*
171428e89fd9SJens Axboe  * Initialize and setup a new bio_set, based on the settings from
171528e89fd9SJens Axboe  * another bio_set.
171628e89fd9SJens Axboe  */
171728e89fd9SJens Axboe int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
171828e89fd9SJens Axboe {
171928e89fd9SJens Axboe 	int flags;
172028e89fd9SJens Axboe 
172128e89fd9SJens Axboe 	flags = 0;
172228e89fd9SJens Axboe 	if (src->bvec_pool.min_nr)
172328e89fd9SJens Axboe 		flags |= BIOSET_NEED_BVECS;
172428e89fd9SJens Axboe 	if (src->rescue_workqueue)
172528e89fd9SJens Axboe 		flags |= BIOSET_NEED_RESCUER;
172628e89fd9SJens Axboe 
172728e89fd9SJens Axboe 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
172828e89fd9SJens Axboe }
172928e89fd9SJens Axboe EXPORT_SYMBOL(bioset_init_from_src);
173028e89fd9SJens Axboe 
1731be4d234dSJens Axboe /**
1732be4d234dSJens Axboe  * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
1733be4d234dSJens Axboe  * @kiocb:	kiocb describing the IO
17340ef47db1SJens Axboe  * @nr_vecs:	number of iovecs to pre-allocate
1735be4d234dSJens Axboe  * @bs:		bio_set to allocate from
1736be4d234dSJens Axboe  *
1737be4d234dSJens Axboe  * Description:
1738be4d234dSJens Axboe  *    Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only
1739be4d234dSJens Axboe  *    used to check if we should dip into the per-cpu bio_set allocation
17403d5b3fbeSJens Axboe  *    cache. The allocation uses GFP_KERNEL internally. On return, the
17413d5b3fbeSJens Axboe  *    bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio
17423d5b3fbeSJens Axboe  *    MUST be done from process context, not hard/soft IRQ.
1743be4d234dSJens Axboe  *
1744be4d234dSJens Axboe  */
1745be4d234dSJens Axboe struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
1746be4d234dSJens Axboe 			    struct bio_set *bs)
1747be4d234dSJens Axboe {
1748be4d234dSJens Axboe 	struct bio_alloc_cache *cache;
1749be4d234dSJens Axboe 	struct bio *bio;
1750be4d234dSJens Axboe 
1751be4d234dSJens Axboe 	if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
1752*609be106SChristoph Hellwig 		return bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs);
1753be4d234dSJens Axboe 
1754be4d234dSJens Axboe 	cache = per_cpu_ptr(bs->cache, get_cpu());
1755fcade2ceSJens Axboe 	if (cache->free_list) {
1756fcade2ceSJens Axboe 		bio = cache->free_list;
1757fcade2ceSJens Axboe 		cache->free_list = bio->bi_next;
1758be4d234dSJens Axboe 		cache->nr--;
1759be4d234dSJens Axboe 		put_cpu();
1760be4d234dSJens Axboe 		bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
1761be4d234dSJens Axboe 		bio->bi_pool = bs;
1762be4d234dSJens Axboe 		bio_set_flag(bio, BIO_PERCPU_CACHE);
1763be4d234dSJens Axboe 		return bio;
1764be4d234dSJens Axboe 	}
1765be4d234dSJens Axboe 	put_cpu();
1766*609be106SChristoph Hellwig 	bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs);
1767be4d234dSJens Axboe 	bio_set_flag(bio, BIO_PERCPU_CACHE);
1768be4d234dSJens Axboe 	return bio;
1769be4d234dSJens Axboe }
1770be4d234dSJens Axboe EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
1771be4d234dSJens Axboe 
1772de76fd89SChristoph Hellwig static int __init init_bio(void)
1773f9c78b2bSJens Axboe {
1774f9c78b2bSJens Axboe 	int i;
1775f9c78b2bSJens Axboe 
1776f9c78b2bSJens Axboe 	bio_integrity_init();
1777de76fd89SChristoph Hellwig 
1778de76fd89SChristoph Hellwig 	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1779f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + i;
1780f9c78b2bSJens Axboe 
1781de76fd89SChristoph Hellwig 		bvs->slab = kmem_cache_create(bvs->name,
1782de76fd89SChristoph Hellwig 				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1783f9c78b2bSJens Axboe 				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1784f9c78b2bSJens Axboe 	}
1785f9c78b2bSJens Axboe 
1786be4d234dSJens Axboe 	cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1787be4d234dSJens Axboe 					bio_cpu_dead);
1788be4d234dSJens Axboe 
1789f4f8154aSKent Overstreet 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1790f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
1791f9c78b2bSJens Axboe 
1792f4f8154aSKent Overstreet 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1793f9c78b2bSJens Axboe 		panic("bio: can't create integrity pool\n");
1794f9c78b2bSJens Axboe 
1795f9c78b2bSJens Axboe 	return 0;
1796f9c78b2bSJens Axboe }
1797f9c78b2bSJens Axboe subsys_initcall(init_bio);
1798