xref: /openbmc/linux/block/bio.c (revision 7de55b7d)
18c16567dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f9c78b2bSJens Axboe /*
3f9c78b2bSJens Axboe  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4f9c78b2bSJens Axboe  */
5f9c78b2bSJens Axboe #include <linux/mm.h>
6f9c78b2bSJens Axboe #include <linux/swap.h>
7f9c78b2bSJens Axboe #include <linux/bio.h>
8f9c78b2bSJens Axboe #include <linux/blkdev.h>
9f9c78b2bSJens Axboe #include <linux/uio.h>
10f9c78b2bSJens Axboe #include <linux/iocontext.h>
11f9c78b2bSJens Axboe #include <linux/slab.h>
12f9c78b2bSJens Axboe #include <linux/init.h>
13f9c78b2bSJens Axboe #include <linux/kernel.h>
14f9c78b2bSJens Axboe #include <linux/export.h>
15f9c78b2bSJens Axboe #include <linux/mempool.h>
16f9c78b2bSJens Axboe #include <linux/workqueue.h>
17f9c78b2bSJens Axboe #include <linux/cgroup.h>
1808e18eabSJosef Bacik #include <linux/blk-cgroup.h>
19b4c5875dSDamien Le Moal #include <linux/highmem.h>
20de6a78b6SMing Lei #include <linux/sched/sysctl.h>
21a892c8d5SSatya Tangirala #include <linux/blk-crypto.h>
2249d1ec85SMing Lei #include <linux/xarray.h>
23f9c78b2bSJens Axboe 
24f9c78b2bSJens Axboe #include <trace/events/block.h>
259e234eeaSShaohua Li #include "blk.h"
2667b42d0bSJosef Bacik #include "blk-rq-qos.h"
27f9c78b2bSJens Axboe 
28de76fd89SChristoph Hellwig static struct biovec_slab {
296ac0b715SChristoph Hellwig 	int nr_vecs;
306ac0b715SChristoph Hellwig 	char *name;
316ac0b715SChristoph Hellwig 	struct kmem_cache *slab;
32de76fd89SChristoph Hellwig } bvec_slabs[] __read_mostly = {
33de76fd89SChristoph Hellwig 	{ .nr_vecs = 16, .name = "biovec-16" },
34de76fd89SChristoph Hellwig 	{ .nr_vecs = 64, .name = "biovec-64" },
35de76fd89SChristoph Hellwig 	{ .nr_vecs = 128, .name = "biovec-128" },
36a8affc03SChristoph Hellwig 	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
37f9c78b2bSJens Axboe };
386ac0b715SChristoph Hellwig 
397a800a20SChristoph Hellwig static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
407a800a20SChristoph Hellwig {
417a800a20SChristoph Hellwig 	switch (nr_vecs) {
427a800a20SChristoph Hellwig 	/* smaller bios use inline vecs */
437a800a20SChristoph Hellwig 	case 5 ... 16:
447a800a20SChristoph Hellwig 		return &bvec_slabs[0];
457a800a20SChristoph Hellwig 	case 17 ... 64:
467a800a20SChristoph Hellwig 		return &bvec_slabs[1];
477a800a20SChristoph Hellwig 	case 65 ... 128:
487a800a20SChristoph Hellwig 		return &bvec_slabs[2];
49a8affc03SChristoph Hellwig 	case 129 ... BIO_MAX_VECS:
507a800a20SChristoph Hellwig 		return &bvec_slabs[3];
517a800a20SChristoph Hellwig 	default:
527a800a20SChristoph Hellwig 		BUG();
537a800a20SChristoph Hellwig 		return NULL;
547a800a20SChristoph Hellwig 	}
557a800a20SChristoph Hellwig }
56f9c78b2bSJens Axboe 
57f9c78b2bSJens Axboe /*
58f9c78b2bSJens Axboe  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
59f9c78b2bSJens Axboe  * IO code that does not need private memory pools.
60f9c78b2bSJens Axboe  */
61f4f8154aSKent Overstreet struct bio_set fs_bio_set;
62f9c78b2bSJens Axboe EXPORT_SYMBOL(fs_bio_set);
63f9c78b2bSJens Axboe 
64f9c78b2bSJens Axboe /*
65f9c78b2bSJens Axboe  * Our slab pool management
66f9c78b2bSJens Axboe  */
67f9c78b2bSJens Axboe struct bio_slab {
68f9c78b2bSJens Axboe 	struct kmem_cache *slab;
69f9c78b2bSJens Axboe 	unsigned int slab_ref;
70f9c78b2bSJens Axboe 	unsigned int slab_size;
71f9c78b2bSJens Axboe 	char name[8];
72f9c78b2bSJens Axboe };
73f9c78b2bSJens Axboe static DEFINE_MUTEX(bio_slab_lock);
7449d1ec85SMing Lei static DEFINE_XARRAY(bio_slabs);
75f9c78b2bSJens Axboe 
7649d1ec85SMing Lei static struct bio_slab *create_bio_slab(unsigned int size)
77f9c78b2bSJens Axboe {
7849d1ec85SMing Lei 	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
7949d1ec85SMing Lei 
8049d1ec85SMing Lei 	if (!bslab)
8149d1ec85SMing Lei 		return NULL;
8249d1ec85SMing Lei 
8349d1ec85SMing Lei 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
8449d1ec85SMing Lei 	bslab->slab = kmem_cache_create(bslab->name, size,
8549d1ec85SMing Lei 			ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
8649d1ec85SMing Lei 	if (!bslab->slab)
8749d1ec85SMing Lei 		goto fail_alloc_slab;
8849d1ec85SMing Lei 
8949d1ec85SMing Lei 	bslab->slab_ref = 1;
9049d1ec85SMing Lei 	bslab->slab_size = size;
9149d1ec85SMing Lei 
9249d1ec85SMing Lei 	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
9349d1ec85SMing Lei 		return bslab;
9449d1ec85SMing Lei 
9549d1ec85SMing Lei 	kmem_cache_destroy(bslab->slab);
9649d1ec85SMing Lei 
9749d1ec85SMing Lei fail_alloc_slab:
9849d1ec85SMing Lei 	kfree(bslab);
9949d1ec85SMing Lei 	return NULL;
10049d1ec85SMing Lei }
10149d1ec85SMing Lei 
10249d1ec85SMing Lei static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
10349d1ec85SMing Lei {
1049f180e31SMing Lei 	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
10549d1ec85SMing Lei }
10649d1ec85SMing Lei 
10749d1ec85SMing Lei static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
10849d1ec85SMing Lei {
10949d1ec85SMing Lei 	unsigned int size = bs_bio_slab_size(bs);
11049d1ec85SMing Lei 	struct bio_slab *bslab;
111f9c78b2bSJens Axboe 
112f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
11349d1ec85SMing Lei 	bslab = xa_load(&bio_slabs, size);
11449d1ec85SMing Lei 	if (bslab)
115f9c78b2bSJens Axboe 		bslab->slab_ref++;
11649d1ec85SMing Lei 	else
11749d1ec85SMing Lei 		bslab = create_bio_slab(size);
118f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
11949d1ec85SMing Lei 
12049d1ec85SMing Lei 	if (bslab)
12149d1ec85SMing Lei 		return bslab->slab;
12249d1ec85SMing Lei 	return NULL;
123f9c78b2bSJens Axboe }
124f9c78b2bSJens Axboe 
125f9c78b2bSJens Axboe static void bio_put_slab(struct bio_set *bs)
126f9c78b2bSJens Axboe {
127f9c78b2bSJens Axboe 	struct bio_slab *bslab = NULL;
12849d1ec85SMing Lei 	unsigned int slab_size = bs_bio_slab_size(bs);
129f9c78b2bSJens Axboe 
130f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
131f9c78b2bSJens Axboe 
13249d1ec85SMing Lei 	bslab = xa_load(&bio_slabs, slab_size);
133f9c78b2bSJens Axboe 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
134f9c78b2bSJens Axboe 		goto out;
135f9c78b2bSJens Axboe 
13649d1ec85SMing Lei 	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
13749d1ec85SMing Lei 
138f9c78b2bSJens Axboe 	WARN_ON(!bslab->slab_ref);
139f9c78b2bSJens Axboe 
140f9c78b2bSJens Axboe 	if (--bslab->slab_ref)
141f9c78b2bSJens Axboe 		goto out;
142f9c78b2bSJens Axboe 
14349d1ec85SMing Lei 	xa_erase(&bio_slabs, slab_size);
14449d1ec85SMing Lei 
145f9c78b2bSJens Axboe 	kmem_cache_destroy(bslab->slab);
14649d1ec85SMing Lei 	kfree(bslab);
147f9c78b2bSJens Axboe 
148f9c78b2bSJens Axboe out:
149f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
150f9c78b2bSJens Axboe }
151f9c78b2bSJens Axboe 
1527a800a20SChristoph Hellwig void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
153f9c78b2bSJens Axboe {
154a8affc03SChristoph Hellwig 	BIO_BUG_ON(nr_vecs > BIO_MAX_VECS);
155f9c78b2bSJens Axboe 
156a8affc03SChristoph Hellwig 	if (nr_vecs == BIO_MAX_VECS)
157f9c78b2bSJens Axboe 		mempool_free(bv, pool);
1587a800a20SChristoph Hellwig 	else if (nr_vecs > BIO_INLINE_VECS)
1597a800a20SChristoph Hellwig 		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
160f9c78b2bSJens Axboe }
161f9c78b2bSJens Axboe 
162f2c3eb9bSChristoph Hellwig /*
163f2c3eb9bSChristoph Hellwig  * Make the first allocation restricted and don't dump info on allocation
164f2c3eb9bSChristoph Hellwig  * failures, since we'll fall back to the mempool in case of failure.
165f2c3eb9bSChristoph Hellwig  */
166f2c3eb9bSChristoph Hellwig static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
167f9c78b2bSJens Axboe {
168f2c3eb9bSChristoph Hellwig 	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
169f2c3eb9bSChristoph Hellwig 		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
170f2c3eb9bSChristoph Hellwig }
171f2c3eb9bSChristoph Hellwig 
1727a800a20SChristoph Hellwig struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
1737a800a20SChristoph Hellwig 		gfp_t gfp_mask)
174f9c78b2bSJens Axboe {
1757a800a20SChristoph Hellwig 	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
1767a800a20SChristoph Hellwig 
1777a800a20SChristoph Hellwig 	if (WARN_ON_ONCE(!bvs))
178f9c78b2bSJens Axboe 		return NULL;
1797a800a20SChristoph Hellwig 
1807a800a20SChristoph Hellwig 	/*
1817a800a20SChristoph Hellwig 	 * Upgrade the nr_vecs request to take full advantage of the allocation.
1827a800a20SChristoph Hellwig 	 * We also rely on this in the bvec_free path.
1837a800a20SChristoph Hellwig 	 */
1847a800a20SChristoph Hellwig 	*nr_vecs = bvs->nr_vecs;
185f9c78b2bSJens Axboe 
186f9c78b2bSJens Axboe 	/*
187f007a3d6SChristoph Hellwig 	 * Try a slab allocation first for all smaller allocations.  If that
188f007a3d6SChristoph Hellwig 	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
189a8affc03SChristoph Hellwig 	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
190f9c78b2bSJens Axboe 	 */
191a8affc03SChristoph Hellwig 	if (*nr_vecs < BIO_MAX_VECS) {
192f9c78b2bSJens Axboe 		struct bio_vec *bvl;
193f9c78b2bSJens Axboe 
194f2c3eb9bSChristoph Hellwig 		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
1957a800a20SChristoph Hellwig 		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
196f9c78b2bSJens Axboe 			return bvl;
197a8affc03SChristoph Hellwig 		*nr_vecs = BIO_MAX_VECS;
198f9c78b2bSJens Axboe 	}
199f9c78b2bSJens Axboe 
200f007a3d6SChristoph Hellwig 	return mempool_alloc(pool, gfp_mask);
201f9c78b2bSJens Axboe }
202f9c78b2bSJens Axboe 
2039ae3b3f5SJens Axboe void bio_uninit(struct bio *bio)
204f9c78b2bSJens Axboe {
205db9819c7SChristoph Hellwig #ifdef CONFIG_BLK_CGROUP
206db9819c7SChristoph Hellwig 	if (bio->bi_blkg) {
207db9819c7SChristoph Hellwig 		blkg_put(bio->bi_blkg);
208db9819c7SChristoph Hellwig 		bio->bi_blkg = NULL;
209db9819c7SChristoph Hellwig 	}
210db9819c7SChristoph Hellwig #endif
211ece841abSJustin Tee 	if (bio_integrity(bio))
212ece841abSJustin Tee 		bio_integrity_free(bio);
213a892c8d5SSatya Tangirala 
214a892c8d5SSatya Tangirala 	bio_crypt_free_ctx(bio);
215f9c78b2bSJens Axboe }
2169ae3b3f5SJens Axboe EXPORT_SYMBOL(bio_uninit);
217f9c78b2bSJens Axboe 
218f9c78b2bSJens Axboe static void bio_free(struct bio *bio)
219f9c78b2bSJens Axboe {
220f9c78b2bSJens Axboe 	struct bio_set *bs = bio->bi_pool;
221f9c78b2bSJens Axboe 	void *p;
222f9c78b2bSJens Axboe 
2239ae3b3f5SJens Axboe 	bio_uninit(bio);
224f9c78b2bSJens Axboe 
225f9c78b2bSJens Axboe 	if (bs) {
2267a800a20SChristoph Hellwig 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
227f9c78b2bSJens Axboe 
228f9c78b2bSJens Axboe 		/*
229f9c78b2bSJens Axboe 		 * If we have front padding, adjust the bio pointer before freeing
230f9c78b2bSJens Axboe 		 */
231f9c78b2bSJens Axboe 		p = bio;
232f9c78b2bSJens Axboe 		p -= bs->front_pad;
233f9c78b2bSJens Axboe 
2348aa6ba2fSKent Overstreet 		mempool_free(p, &bs->bio_pool);
235f9c78b2bSJens Axboe 	} else {
236f9c78b2bSJens Axboe 		/* Bio was allocated by bio_kmalloc() */
237f9c78b2bSJens Axboe 		kfree(bio);
238f9c78b2bSJens Axboe 	}
239f9c78b2bSJens Axboe }
240f9c78b2bSJens Axboe 
2419ae3b3f5SJens Axboe /*
2429ae3b3f5SJens Axboe  * Users of this function have their own bio allocation. Subsequently,
2439ae3b3f5SJens Axboe  * they must remember to pair any call to bio_init() with bio_uninit()
2449ae3b3f5SJens Axboe  * when IO has completed, or when the bio is released.
2459ae3b3f5SJens Axboe  */
2463a83f467SMing Lei void bio_init(struct bio *bio, struct bio_vec *table,
2473a83f467SMing Lei 	      unsigned short max_vecs)
248f9c78b2bSJens Axboe {
249f9c78b2bSJens Axboe 	memset(bio, 0, sizeof(*bio));
250c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
251dac56212SJens Axboe 	atomic_set(&bio->__bi_cnt, 1);
2523a83f467SMing Lei 
2533a83f467SMing Lei 	bio->bi_io_vec = table;
2543a83f467SMing Lei 	bio->bi_max_vecs = max_vecs;
255f9c78b2bSJens Axboe }
256f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_init);
257f9c78b2bSJens Axboe 
258f9c78b2bSJens Axboe /**
259f9c78b2bSJens Axboe  * bio_reset - reinitialize a bio
260f9c78b2bSJens Axboe  * @bio:	bio to reset
261f9c78b2bSJens Axboe  *
262f9c78b2bSJens Axboe  * Description:
263f9c78b2bSJens Axboe  *   After calling bio_reset(), @bio will be in the same state as a freshly
264f9c78b2bSJens Axboe  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
265f9c78b2bSJens Axboe  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
266f9c78b2bSJens Axboe  *   comment in struct bio.
267f9c78b2bSJens Axboe  */
268f9c78b2bSJens Axboe void bio_reset(struct bio *bio)
269f9c78b2bSJens Axboe {
2709ae3b3f5SJens Axboe 	bio_uninit(bio);
271f9c78b2bSJens Axboe 	memset(bio, 0, BIO_RESET_BYTES);
272c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
273f9c78b2bSJens Axboe }
274f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_reset);
275f9c78b2bSJens Axboe 
27638f8baaeSChristoph Hellwig static struct bio *__bio_chain_endio(struct bio *bio)
277f9c78b2bSJens Axboe {
2784246a0b6SChristoph Hellwig 	struct bio *parent = bio->bi_private;
2794246a0b6SChristoph Hellwig 
2804e4cbee9SChristoph Hellwig 	if (!parent->bi_status)
2814e4cbee9SChristoph Hellwig 		parent->bi_status = bio->bi_status;
282f9c78b2bSJens Axboe 	bio_put(bio);
28338f8baaeSChristoph Hellwig 	return parent;
28438f8baaeSChristoph Hellwig }
28538f8baaeSChristoph Hellwig 
28638f8baaeSChristoph Hellwig static void bio_chain_endio(struct bio *bio)
28738f8baaeSChristoph Hellwig {
28838f8baaeSChristoph Hellwig 	bio_endio(__bio_chain_endio(bio));
289f9c78b2bSJens Axboe }
290f9c78b2bSJens Axboe 
291f9c78b2bSJens Axboe /**
292f9c78b2bSJens Axboe  * bio_chain - chain bio completions
293f9c78b2bSJens Axboe  * @bio: the target bio
2945b874af6SMauro Carvalho Chehab  * @parent: the parent bio of @bio
295f9c78b2bSJens Axboe  *
296f9c78b2bSJens Axboe  * The caller won't have a bi_end_io called when @bio completes - instead,
297f9c78b2bSJens Axboe  * @parent's bi_end_io won't be called until both @parent and @bio have
298f9c78b2bSJens Axboe  * completed; the chained bio will also be freed when it completes.
299f9c78b2bSJens Axboe  *
300f9c78b2bSJens Axboe  * The caller must not set bi_private or bi_end_io in @bio.
301f9c78b2bSJens Axboe  */
302f9c78b2bSJens Axboe void bio_chain(struct bio *bio, struct bio *parent)
303f9c78b2bSJens Axboe {
304f9c78b2bSJens Axboe 	BUG_ON(bio->bi_private || bio->bi_end_io);
305f9c78b2bSJens Axboe 
306f9c78b2bSJens Axboe 	bio->bi_private = parent;
307f9c78b2bSJens Axboe 	bio->bi_end_io	= bio_chain_endio;
308c4cf5261SJens Axboe 	bio_inc_remaining(parent);
309f9c78b2bSJens Axboe }
310f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_chain);
311f9c78b2bSJens Axboe 
312f9c78b2bSJens Axboe static void bio_alloc_rescue(struct work_struct *work)
313f9c78b2bSJens Axboe {
314f9c78b2bSJens Axboe 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
315f9c78b2bSJens Axboe 	struct bio *bio;
316f9c78b2bSJens Axboe 
317f9c78b2bSJens Axboe 	while (1) {
318f9c78b2bSJens Axboe 		spin_lock(&bs->rescue_lock);
319f9c78b2bSJens Axboe 		bio = bio_list_pop(&bs->rescue_list);
320f9c78b2bSJens Axboe 		spin_unlock(&bs->rescue_lock);
321f9c78b2bSJens Axboe 
322f9c78b2bSJens Axboe 		if (!bio)
323f9c78b2bSJens Axboe 			break;
324f9c78b2bSJens Axboe 
325ed00aabdSChristoph Hellwig 		submit_bio_noacct(bio);
326f9c78b2bSJens Axboe 	}
327f9c78b2bSJens Axboe }
328f9c78b2bSJens Axboe 
329f9c78b2bSJens Axboe static void punt_bios_to_rescuer(struct bio_set *bs)
330f9c78b2bSJens Axboe {
331f9c78b2bSJens Axboe 	struct bio_list punt, nopunt;
332f9c78b2bSJens Axboe 	struct bio *bio;
333f9c78b2bSJens Axboe 
33447e0fb46SNeilBrown 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
33547e0fb46SNeilBrown 		return;
336f9c78b2bSJens Axboe 	/*
337f9c78b2bSJens Axboe 	 * In order to guarantee forward progress we must punt only bios that
338f9c78b2bSJens Axboe 	 * were allocated from this bio_set; otherwise, if there was a bio on
339f9c78b2bSJens Axboe 	 * there for a stacking driver higher up in the stack, processing it
340f9c78b2bSJens Axboe 	 * could require allocating bios from this bio_set, and doing that from
341f9c78b2bSJens Axboe 	 * our own rescuer would be bad.
342f9c78b2bSJens Axboe 	 *
343f9c78b2bSJens Axboe 	 * Since bio lists are singly linked, pop them all instead of trying to
344f9c78b2bSJens Axboe 	 * remove from the middle of the list:
345f9c78b2bSJens Axboe 	 */
346f9c78b2bSJens Axboe 
347f9c78b2bSJens Axboe 	bio_list_init(&punt);
348f9c78b2bSJens Axboe 	bio_list_init(&nopunt);
349f9c78b2bSJens Axboe 
350f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[0])))
351f9c78b2bSJens Axboe 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
352f5fe1b51SNeilBrown 	current->bio_list[0] = nopunt;
353f9c78b2bSJens Axboe 
354f5fe1b51SNeilBrown 	bio_list_init(&nopunt);
355f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[1])))
356f5fe1b51SNeilBrown 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
357f5fe1b51SNeilBrown 	current->bio_list[1] = nopunt;
358f9c78b2bSJens Axboe 
359f9c78b2bSJens Axboe 	spin_lock(&bs->rescue_lock);
360f9c78b2bSJens Axboe 	bio_list_merge(&bs->rescue_list, &punt);
361f9c78b2bSJens Axboe 	spin_unlock(&bs->rescue_lock);
362f9c78b2bSJens Axboe 
363f9c78b2bSJens Axboe 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
364f9c78b2bSJens Axboe }
365f9c78b2bSJens Axboe 
366f9c78b2bSJens Axboe /**
367f9c78b2bSJens Axboe  * bio_alloc_bioset - allocate a bio for I/O
368519c8e9fSRandy Dunlap  * @gfp_mask:   the GFP_* mask given to the slab allocator
369f9c78b2bSJens Axboe  * @nr_iovecs:	number of iovecs to pre-allocate
370f9c78b2bSJens Axboe  * @bs:		the bio_set to allocate from.
371f9c78b2bSJens Axboe  *
3723175199aSChristoph Hellwig  * Allocate a bio from the mempools in @bs.
373f9c78b2bSJens Axboe  *
3743175199aSChristoph Hellwig  * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
3753175199aSChristoph Hellwig  * allocate a bio.  This is due to the mempool guarantees.  To make this work,
3763175199aSChristoph Hellwig  * callers must never allocate more than 1 bio at a time from the general pool.
3773175199aSChristoph Hellwig  * Callers that need to allocate more than 1 bio must always submit the
3783175199aSChristoph Hellwig  * previously allocated bio for IO before attempting to allocate a new one.
3793175199aSChristoph Hellwig  * Failure to do so can cause deadlocks under memory pressure.
380f9c78b2bSJens Axboe  *
3813175199aSChristoph Hellwig  * Note that when running under submit_bio_noacct() (i.e. any block driver),
3823175199aSChristoph Hellwig  * bios are not submitted until after you return - see the code in
383ed00aabdSChristoph Hellwig  * submit_bio_noacct() that converts recursion into iteration, to prevent
384f9c78b2bSJens Axboe  * stack overflows.
385f9c78b2bSJens Axboe  *
3863175199aSChristoph Hellwig  * This would normally mean allocating multiple bios under submit_bio_noacct()
3873175199aSChristoph Hellwig  * would be susceptible to deadlocks, but we have
388f9c78b2bSJens Axboe  * deadlock avoidance code that resubmits any blocked bios from a rescuer
389f9c78b2bSJens Axboe  * thread.
390f9c78b2bSJens Axboe  *
391f9c78b2bSJens Axboe  * However, we do not guarantee forward progress for allocations from other
392f9c78b2bSJens Axboe  * mempools. Doing multiple allocations from the same mempool under
393ed00aabdSChristoph Hellwig  * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
394f9c78b2bSJens Axboe  * for per bio allocations.
395f9c78b2bSJens Axboe  *
3963175199aSChristoph Hellwig  * Returns: Pointer to new bio on success, NULL on failure.
397f9c78b2bSJens Axboe  */
3980f2e6ab8SChristoph Hellwig struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
3997a88fa19SDan Carpenter 			     struct bio_set *bs)
400f9c78b2bSJens Axboe {
401f9c78b2bSJens Axboe 	gfp_t saved_gfp = gfp_mask;
402f9c78b2bSJens Axboe 	struct bio *bio;
403f9c78b2bSJens Axboe 	void *p;
404f9c78b2bSJens Axboe 
4053175199aSChristoph Hellwig 	/* should not use nobvec bioset for nr_iovecs > 0 */
4063175199aSChristoph Hellwig 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
407f9c78b2bSJens Axboe 		return NULL;
408f9c78b2bSJens Axboe 
409f9c78b2bSJens Axboe 	/*
4103175199aSChristoph Hellwig 	 * submit_bio_noacct() converts recursion to iteration; this means if
4113175199aSChristoph Hellwig 	 * we're running beneath it, any bios we allocate and submit will not be
4123175199aSChristoph Hellwig 	 * submitted (and thus freed) until after we return.
413f9c78b2bSJens Axboe 	 *
4143175199aSChristoph Hellwig 	 * This exposes us to a potential deadlock if we allocate multiple bios
4153175199aSChristoph Hellwig 	 * from the same bio_set() while running underneath submit_bio_noacct().
4163175199aSChristoph Hellwig 	 * If we were to allocate multiple bios (say a stacking block driver
4173175199aSChristoph Hellwig 	 * that was splitting bios), we would deadlock if we exhausted the
4183175199aSChristoph Hellwig 	 * mempool's reserve.
419f9c78b2bSJens Axboe 	 *
420f9c78b2bSJens Axboe 	 * We solve this, and guarantee forward progress, with a rescuer
4213175199aSChristoph Hellwig 	 * workqueue per bio_set. If we go to allocate and there are bios on
4223175199aSChristoph Hellwig 	 * current->bio_list, we first try the allocation without
4233175199aSChristoph Hellwig 	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
4243175199aSChristoph Hellwig 	 * blocking to the rescuer workqueue before we retry with the original
4253175199aSChristoph Hellwig 	 * gfp_flags.
426f9c78b2bSJens Axboe 	 */
427f5fe1b51SNeilBrown 	if (current->bio_list &&
428f5fe1b51SNeilBrown 	    (!bio_list_empty(&current->bio_list[0]) ||
42947e0fb46SNeilBrown 	     !bio_list_empty(&current->bio_list[1])) &&
43047e0fb46SNeilBrown 	    bs->rescue_workqueue)
431d0164adcSMel Gorman 		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
432f9c78b2bSJens Axboe 
4338aa6ba2fSKent Overstreet 	p = mempool_alloc(&bs->bio_pool, gfp_mask);
434f9c78b2bSJens Axboe 	if (!p && gfp_mask != saved_gfp) {
435f9c78b2bSJens Axboe 		punt_bios_to_rescuer(bs);
436f9c78b2bSJens Axboe 		gfp_mask = saved_gfp;
4378aa6ba2fSKent Overstreet 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
438f9c78b2bSJens Axboe 	}
439f9c78b2bSJens Axboe 	if (unlikely(!p))
440f9c78b2bSJens Axboe 		return NULL;
441f9c78b2bSJens Axboe 
4423175199aSChristoph Hellwig 	bio = p + bs->front_pad;
4433175199aSChristoph Hellwig 	if (nr_iovecs > BIO_INLINE_VECS) {
4443175199aSChristoph Hellwig 		struct bio_vec *bvl = NULL;
445f9c78b2bSJens Axboe 
4467a800a20SChristoph Hellwig 		bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
447f9c78b2bSJens Axboe 		if (!bvl && gfp_mask != saved_gfp) {
448f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
449f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
4507a800a20SChristoph Hellwig 			bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
451f9c78b2bSJens Axboe 		}
452f9c78b2bSJens Axboe 		if (unlikely(!bvl))
453f9c78b2bSJens Axboe 			goto err_free;
454f9c78b2bSJens Axboe 
4557a800a20SChristoph Hellwig 		bio_init(bio, bvl, nr_iovecs);
456f9c78b2bSJens Axboe 	} else if (nr_iovecs) {
4573175199aSChristoph Hellwig 		bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
4583175199aSChristoph Hellwig 	} else {
4593175199aSChristoph Hellwig 		bio_init(bio, NULL, 0);
460f9c78b2bSJens Axboe 	}
461f9c78b2bSJens Axboe 
462f9c78b2bSJens Axboe 	bio->bi_pool = bs;
463f9c78b2bSJens Axboe 	return bio;
464f9c78b2bSJens Axboe 
465f9c78b2bSJens Axboe err_free:
4668aa6ba2fSKent Overstreet 	mempool_free(p, &bs->bio_pool);
467f9c78b2bSJens Axboe 	return NULL;
468f9c78b2bSJens Axboe }
469f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_alloc_bioset);
470f9c78b2bSJens Axboe 
4713175199aSChristoph Hellwig /**
4723175199aSChristoph Hellwig  * bio_kmalloc - kmalloc a bio for I/O
4733175199aSChristoph Hellwig  * @gfp_mask:   the GFP_* mask given to the slab allocator
4743175199aSChristoph Hellwig  * @nr_iovecs:	number of iovecs to pre-allocate
4753175199aSChristoph Hellwig  *
4763175199aSChristoph Hellwig  * Use kmalloc to allocate and initialize a bio.
4773175199aSChristoph Hellwig  *
4783175199aSChristoph Hellwig  * Returns: Pointer to new bio on success, NULL on failure.
4793175199aSChristoph Hellwig  */
4800f2e6ab8SChristoph Hellwig struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
4813175199aSChristoph Hellwig {
4823175199aSChristoph Hellwig 	struct bio *bio;
4833175199aSChristoph Hellwig 
4843175199aSChristoph Hellwig 	if (nr_iovecs > UIO_MAXIOV)
4853175199aSChristoph Hellwig 		return NULL;
4863175199aSChristoph Hellwig 
4873175199aSChristoph Hellwig 	bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
4883175199aSChristoph Hellwig 	if (unlikely(!bio))
4893175199aSChristoph Hellwig 		return NULL;
4903175199aSChristoph Hellwig 	bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
4913175199aSChristoph Hellwig 	bio->bi_pool = NULL;
4923175199aSChristoph Hellwig 	return bio;
4933175199aSChristoph Hellwig }
4943175199aSChristoph Hellwig EXPORT_SYMBOL(bio_kmalloc);
4953175199aSChristoph Hellwig 
49638a72dacSKent Overstreet void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
497f9c78b2bSJens Axboe {
498f9c78b2bSJens Axboe 	unsigned long flags;
499f9c78b2bSJens Axboe 	struct bio_vec bv;
500f9c78b2bSJens Axboe 	struct bvec_iter iter;
501f9c78b2bSJens Axboe 
50238a72dacSKent Overstreet 	__bio_for_each_segment(bv, bio, iter, start) {
503f9c78b2bSJens Axboe 		char *data = bvec_kmap_irq(&bv, &flags);
504f9c78b2bSJens Axboe 		memset(data, 0, bv.bv_len);
505f9c78b2bSJens Axboe 		flush_dcache_page(bv.bv_page);
506f9c78b2bSJens Axboe 		bvec_kunmap_irq(data, &flags);
507f9c78b2bSJens Axboe 	}
508f9c78b2bSJens Axboe }
50938a72dacSKent Overstreet EXPORT_SYMBOL(zero_fill_bio_iter);
510f9c78b2bSJens Axboe 
51183c9c547SMing Lei /**
51283c9c547SMing Lei  * bio_truncate - truncate the bio to small size of @new_size
51383c9c547SMing Lei  * @bio:	the bio to be truncated
51483c9c547SMing Lei  * @new_size:	new size for truncating the bio
51583c9c547SMing Lei  *
51683c9c547SMing Lei  * Description:
51783c9c547SMing Lei  *   Truncate the bio to new size of @new_size. If bio_op(bio) is
51883c9c547SMing Lei  *   REQ_OP_READ, zero the truncated part. This function should only
51983c9c547SMing Lei  *   be used for handling corner cases, such as bio eod.
52083c9c547SMing Lei  */
52185a8ce62SMing Lei void bio_truncate(struct bio *bio, unsigned new_size)
52285a8ce62SMing Lei {
52385a8ce62SMing Lei 	struct bio_vec bv;
52485a8ce62SMing Lei 	struct bvec_iter iter;
52585a8ce62SMing Lei 	unsigned int done = 0;
52685a8ce62SMing Lei 	bool truncated = false;
52785a8ce62SMing Lei 
52885a8ce62SMing Lei 	if (new_size >= bio->bi_iter.bi_size)
52985a8ce62SMing Lei 		return;
53085a8ce62SMing Lei 
53183c9c547SMing Lei 	if (bio_op(bio) != REQ_OP_READ)
53285a8ce62SMing Lei 		goto exit;
53385a8ce62SMing Lei 
53485a8ce62SMing Lei 	bio_for_each_segment(bv, bio, iter) {
53585a8ce62SMing Lei 		if (done + bv.bv_len > new_size) {
53685a8ce62SMing Lei 			unsigned offset;
53785a8ce62SMing Lei 
53885a8ce62SMing Lei 			if (!truncated)
53985a8ce62SMing Lei 				offset = new_size - done;
54085a8ce62SMing Lei 			else
54185a8ce62SMing Lei 				offset = 0;
54285a8ce62SMing Lei 			zero_user(bv.bv_page, offset, bv.bv_len - offset);
54385a8ce62SMing Lei 			truncated = true;
54485a8ce62SMing Lei 		}
54585a8ce62SMing Lei 		done += bv.bv_len;
54685a8ce62SMing Lei 	}
54785a8ce62SMing Lei 
54885a8ce62SMing Lei  exit:
54985a8ce62SMing Lei 	/*
55085a8ce62SMing Lei 	 * Don't touch bvec table here and make it really immutable, since
55185a8ce62SMing Lei 	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
55285a8ce62SMing Lei 	 * in its .end_bio() callback.
55385a8ce62SMing Lei 	 *
55485a8ce62SMing Lei 	 * It is enough to truncate bio by updating .bi_size since we can make
55585a8ce62SMing Lei 	 * correct bvec with the updated .bi_size for drivers.
55685a8ce62SMing Lei 	 */
55785a8ce62SMing Lei 	bio->bi_iter.bi_size = new_size;
55885a8ce62SMing Lei }
55985a8ce62SMing Lei 
560f9c78b2bSJens Axboe /**
56129125ed6SChristoph Hellwig  * guard_bio_eod - truncate a BIO to fit the block device
56229125ed6SChristoph Hellwig  * @bio:	bio to truncate
56329125ed6SChristoph Hellwig  *
56429125ed6SChristoph Hellwig  * This allows us to do IO even on the odd last sectors of a device, even if the
56529125ed6SChristoph Hellwig  * block size is some multiple of the physical sector size.
56629125ed6SChristoph Hellwig  *
56729125ed6SChristoph Hellwig  * We'll just truncate the bio to the size of the device, and clear the end of
56829125ed6SChristoph Hellwig  * the buffer head manually.  Truly out-of-range accesses will turn into actual
56929125ed6SChristoph Hellwig  * I/O errors, this only handles the "we need to be able to do I/O at the final
57029125ed6SChristoph Hellwig  * sector" case.
57129125ed6SChristoph Hellwig  */
57229125ed6SChristoph Hellwig void guard_bio_eod(struct bio *bio)
57329125ed6SChristoph Hellwig {
574309dca30SChristoph Hellwig 	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
57529125ed6SChristoph Hellwig 
57629125ed6SChristoph Hellwig 	if (!maxsector)
57729125ed6SChristoph Hellwig 		return;
57829125ed6SChristoph Hellwig 
57929125ed6SChristoph Hellwig 	/*
58029125ed6SChristoph Hellwig 	 * If the *whole* IO is past the end of the device,
58129125ed6SChristoph Hellwig 	 * let it through, and the IO layer will turn it into
58229125ed6SChristoph Hellwig 	 * an EIO.
58329125ed6SChristoph Hellwig 	 */
58429125ed6SChristoph Hellwig 	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
58529125ed6SChristoph Hellwig 		return;
58629125ed6SChristoph Hellwig 
58729125ed6SChristoph Hellwig 	maxsector -= bio->bi_iter.bi_sector;
58829125ed6SChristoph Hellwig 	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
58929125ed6SChristoph Hellwig 		return;
59029125ed6SChristoph Hellwig 
59129125ed6SChristoph Hellwig 	bio_truncate(bio, maxsector << 9);
59229125ed6SChristoph Hellwig }
59329125ed6SChristoph Hellwig 
59429125ed6SChristoph Hellwig /**
595f9c78b2bSJens Axboe  * bio_put - release a reference to a bio
596f9c78b2bSJens Axboe  * @bio:   bio to release reference to
597f9c78b2bSJens Axboe  *
598f9c78b2bSJens Axboe  * Description:
599f9c78b2bSJens Axboe  *   Put a reference to a &struct bio, either one you have gotten with
6009b10f6a9SNeilBrown  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
601f9c78b2bSJens Axboe  **/
602f9c78b2bSJens Axboe void bio_put(struct bio *bio)
603f9c78b2bSJens Axboe {
604dac56212SJens Axboe 	if (!bio_flagged(bio, BIO_REFFED))
605dac56212SJens Axboe 		bio_free(bio);
606dac56212SJens Axboe 	else {
607dac56212SJens Axboe 		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
608f9c78b2bSJens Axboe 
609f9c78b2bSJens Axboe 		/*
610f9c78b2bSJens Axboe 		 * last put frees it
611f9c78b2bSJens Axboe 		 */
612dac56212SJens Axboe 		if (atomic_dec_and_test(&bio->__bi_cnt))
613f9c78b2bSJens Axboe 			bio_free(bio);
614f9c78b2bSJens Axboe 	}
615dac56212SJens Axboe }
616f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_put);
617f9c78b2bSJens Axboe 
618f9c78b2bSJens Axboe /**
619f9c78b2bSJens Axboe  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
620f9c78b2bSJens Axboe  * 	@bio: destination bio
621f9c78b2bSJens Axboe  * 	@bio_src: bio to clone
622f9c78b2bSJens Axboe  *
623f9c78b2bSJens Axboe  *	Clone a &bio. Caller will own the returned bio, but not
624f9c78b2bSJens Axboe  *	the actual data it points to. Reference count of returned
625f9c78b2bSJens Axboe  * 	bio will be one.
626f9c78b2bSJens Axboe  *
627f9c78b2bSJens Axboe  * 	Caller must ensure that @bio_src is not freed before @bio.
628f9c78b2bSJens Axboe  */
629f9c78b2bSJens Axboe void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
630f9c78b2bSJens Axboe {
6317a800a20SChristoph Hellwig 	WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
632f9c78b2bSJens Axboe 
633f9c78b2bSJens Axboe 	/*
634309dca30SChristoph Hellwig 	 * most users will be overriding ->bi_bdev with a new target,
635f9c78b2bSJens Axboe 	 * so we don't set nor calculate new physical/hw segment counts here
636f9c78b2bSJens Axboe 	 */
637309dca30SChristoph Hellwig 	bio->bi_bdev = bio_src->bi_bdev;
638b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_CLONED);
639111be883SShaohua Li 	if (bio_flagged(bio_src, BIO_THROTTLED))
640111be883SShaohua Li 		bio_set_flag(bio, BIO_THROTTLED);
64146bbf653SChristoph Hellwig 	if (bio_flagged(bio_src, BIO_REMAPPED))
64246bbf653SChristoph Hellwig 		bio_set_flag(bio, BIO_REMAPPED);
6431eff9d32SJens Axboe 	bio->bi_opf = bio_src->bi_opf;
644ca474b73SHannes Reinecke 	bio->bi_ioprio = bio_src->bi_ioprio;
645cb6934f8SJens Axboe 	bio->bi_write_hint = bio_src->bi_write_hint;
646f9c78b2bSJens Axboe 	bio->bi_iter = bio_src->bi_iter;
647f9c78b2bSJens Axboe 	bio->bi_io_vec = bio_src->bi_io_vec;
64820bd723eSPaolo Valente 
649db6638d7SDennis Zhou 	bio_clone_blkg_association(bio, bio_src);
650e439bedfSDennis Zhou 	blkcg_bio_issue_init(bio);
651f9c78b2bSJens Axboe }
652f9c78b2bSJens Axboe EXPORT_SYMBOL(__bio_clone_fast);
653f9c78b2bSJens Axboe 
654f9c78b2bSJens Axboe /**
655f9c78b2bSJens Axboe  *	bio_clone_fast - clone a bio that shares the original bio's biovec
656f9c78b2bSJens Axboe  *	@bio: bio to clone
657f9c78b2bSJens Axboe  *	@gfp_mask: allocation priority
658f9c78b2bSJens Axboe  *	@bs: bio_set to allocate from
659f9c78b2bSJens Axboe  *
660f9c78b2bSJens Axboe  * 	Like __bio_clone_fast, only also allocates the returned bio
661f9c78b2bSJens Axboe  */
662f9c78b2bSJens Axboe struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
663f9c78b2bSJens Axboe {
664f9c78b2bSJens Axboe 	struct bio *b;
665f9c78b2bSJens Axboe 
666f9c78b2bSJens Axboe 	b = bio_alloc_bioset(gfp_mask, 0, bs);
667f9c78b2bSJens Axboe 	if (!b)
668f9c78b2bSJens Axboe 		return NULL;
669f9c78b2bSJens Axboe 
670f9c78b2bSJens Axboe 	__bio_clone_fast(b, bio);
671f9c78b2bSJens Axboe 
67207560151SEric Biggers 	if (bio_crypt_clone(b, bio, gfp_mask) < 0)
67307560151SEric Biggers 		goto err_put;
674a892c8d5SSatya Tangirala 
67507560151SEric Biggers 	if (bio_integrity(bio) &&
67607560151SEric Biggers 	    bio_integrity_clone(b, bio, gfp_mask) < 0)
67707560151SEric Biggers 		goto err_put;
678f9c78b2bSJens Axboe 
679f9c78b2bSJens Axboe 	return b;
68007560151SEric Biggers 
68107560151SEric Biggers err_put:
68207560151SEric Biggers 	bio_put(b);
68307560151SEric Biggers 	return NULL;
684f9c78b2bSJens Axboe }
685f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_clone_fast);
686f9c78b2bSJens Axboe 
6875cbd28e3SChristoph Hellwig const char *bio_devname(struct bio *bio, char *buf)
6885cbd28e3SChristoph Hellwig {
689309dca30SChristoph Hellwig 	return bdevname(bio->bi_bdev, buf);
6905cbd28e3SChristoph Hellwig }
6915cbd28e3SChristoph Hellwig EXPORT_SYMBOL(bio_devname);
6925cbd28e3SChristoph Hellwig 
6935919482eSMing Lei static inline bool page_is_mergeable(const struct bio_vec *bv,
6945919482eSMing Lei 		struct page *page, unsigned int len, unsigned int off,
695ff896738SChristoph Hellwig 		bool *same_page)
6965919482eSMing Lei {
697d8166519SMatthew Wilcox (Oracle) 	size_t bv_end = bv->bv_offset + bv->bv_len;
698d8166519SMatthew Wilcox (Oracle) 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
6995919482eSMing Lei 	phys_addr_t page_addr = page_to_phys(page);
7005919482eSMing Lei 
7015919482eSMing Lei 	if (vec_end_addr + 1 != page_addr + off)
7025919482eSMing Lei 		return false;
7035919482eSMing Lei 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
7045919482eSMing Lei 		return false;
70552d52d1cSChristoph Hellwig 
706ff896738SChristoph Hellwig 	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
707d8166519SMatthew Wilcox (Oracle) 	if (*same_page)
7085919482eSMing Lei 		return true;
709d8166519SMatthew Wilcox (Oracle) 	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
7105919482eSMing Lei }
7115919482eSMing Lei 
712e4581105SChristoph Hellwig /*
713e4581105SChristoph Hellwig  * Try to merge a page into a segment, while obeying the hardware segment
714e4581105SChristoph Hellwig  * size limit.  This is not for normal read/write bios, but for passthrough
715e4581105SChristoph Hellwig  * or Zone Append operations that we can't split.
716e4581105SChristoph Hellwig  */
717e4581105SChristoph Hellwig static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
718e4581105SChristoph Hellwig 				 struct page *page, unsigned len,
719e4581105SChristoph Hellwig 				 unsigned offset, bool *same_page)
720489fbbcbSMing Lei {
721384209cdSChristoph Hellwig 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
722489fbbcbSMing Lei 	unsigned long mask = queue_segment_boundary(q);
723489fbbcbSMing Lei 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
724489fbbcbSMing Lei 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
725489fbbcbSMing Lei 
726489fbbcbSMing Lei 	if ((addr1 | mask) != (addr2 | mask))
727489fbbcbSMing Lei 		return false;
728489fbbcbSMing Lei 	if (bv->bv_len + len > queue_max_segment_size(q))
729489fbbcbSMing Lei 		return false;
730384209cdSChristoph Hellwig 	return __bio_try_merge_page(bio, page, len, offset, same_page);
731489fbbcbSMing Lei }
732489fbbcbSMing Lei 
733f4595875SShaohua Li /**
734e4581105SChristoph Hellwig  * bio_add_hw_page - attempt to add a page to a bio with hw constraints
735c66a14d0SKent Overstreet  * @q: the target queue
736c66a14d0SKent Overstreet  * @bio: destination bio
737c66a14d0SKent Overstreet  * @page: page to add
738c66a14d0SKent Overstreet  * @len: vec entry length
739c66a14d0SKent Overstreet  * @offset: vec entry offset
740e4581105SChristoph Hellwig  * @max_sectors: maximum number of sectors that can be added
741e4581105SChristoph Hellwig  * @same_page: return if the segment has been merged inside the same page
742f9c78b2bSJens Axboe  *
743e4581105SChristoph Hellwig  * Add a page to a bio while respecting the hardware max_sectors, max_segment
744e4581105SChristoph Hellwig  * and gap limitations.
745f9c78b2bSJens Axboe  */
746e4581105SChristoph Hellwig int bio_add_hw_page(struct request_queue *q, struct bio *bio,
74719047087SMing Lei 		struct page *page, unsigned int len, unsigned int offset,
748e4581105SChristoph Hellwig 		unsigned int max_sectors, bool *same_page)
749f9c78b2bSJens Axboe {
750f9c78b2bSJens Axboe 	struct bio_vec *bvec;
751f9c78b2bSJens Axboe 
752e4581105SChristoph Hellwig 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
753f9c78b2bSJens Axboe 		return 0;
754f9c78b2bSJens Axboe 
755e4581105SChristoph Hellwig 	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
756f9c78b2bSJens Axboe 		return 0;
757f9c78b2bSJens Axboe 
758f9c78b2bSJens Axboe 	if (bio->bi_vcnt > 0) {
759e4581105SChristoph Hellwig 		if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
760384209cdSChristoph Hellwig 			return len;
761320ea869SChristoph Hellwig 
762320ea869SChristoph Hellwig 		/*
763320ea869SChristoph Hellwig 		 * If the queue doesn't support SG gaps and adding this segment
764320ea869SChristoph Hellwig 		 * would create a gap, disallow it.
765320ea869SChristoph Hellwig 		 */
766384209cdSChristoph Hellwig 		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
767320ea869SChristoph Hellwig 		if (bvec_gap_to_prev(q, bvec, offset))
768320ea869SChristoph Hellwig 			return 0;
769f9c78b2bSJens Axboe 	}
770f9c78b2bSJens Axboe 
77179d08f89SMing Lei 	if (bio_full(bio, len))
772f9c78b2bSJens Axboe 		return 0;
773f9c78b2bSJens Axboe 
77414ccb66bSChristoph Hellwig 	if (bio->bi_vcnt >= queue_max_segments(q))
775489fbbcbSMing Lei 		return 0;
776489fbbcbSMing Lei 
777f9c78b2bSJens Axboe 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
778f9c78b2bSJens Axboe 	bvec->bv_page = page;
779f9c78b2bSJens Axboe 	bvec->bv_len = len;
780f9c78b2bSJens Axboe 	bvec->bv_offset = offset;
781fcbf6a08SMaurizio Lombardi 	bio->bi_vcnt++;
782dcdca753SChristoph Hellwig 	bio->bi_iter.bi_size += len;
783f9c78b2bSJens Axboe 	return len;
784f9c78b2bSJens Axboe }
78519047087SMing Lei 
786e4581105SChristoph Hellwig /**
787e4581105SChristoph Hellwig  * bio_add_pc_page	- attempt to add page to passthrough bio
788e4581105SChristoph Hellwig  * @q: the target queue
789e4581105SChristoph Hellwig  * @bio: destination bio
790e4581105SChristoph Hellwig  * @page: page to add
791e4581105SChristoph Hellwig  * @len: vec entry length
792e4581105SChristoph Hellwig  * @offset: vec entry offset
793e4581105SChristoph Hellwig  *
794e4581105SChristoph Hellwig  * Attempt to add a page to the bio_vec maplist. This can fail for a
795e4581105SChristoph Hellwig  * number of reasons, such as the bio being full or target block device
796e4581105SChristoph Hellwig  * limitations. The target block device must allow bio's up to PAGE_SIZE,
797e4581105SChristoph Hellwig  * so it is always possible to add a single page to an empty bio.
798e4581105SChristoph Hellwig  *
799e4581105SChristoph Hellwig  * This should only be used by passthrough bios.
800e4581105SChristoph Hellwig  */
80119047087SMing Lei int bio_add_pc_page(struct request_queue *q, struct bio *bio,
80219047087SMing Lei 		struct page *page, unsigned int len, unsigned int offset)
80319047087SMing Lei {
804d1916c86SChristoph Hellwig 	bool same_page = false;
805e4581105SChristoph Hellwig 	return bio_add_hw_page(q, bio, page, len, offset,
806e4581105SChristoph Hellwig 			queue_max_hw_sectors(q), &same_page);
80719047087SMing Lei }
808f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_pc_page);
809f9c78b2bSJens Axboe 
810f9c78b2bSJens Axboe /**
811ae29333fSJohannes Thumshirn  * bio_add_zone_append_page - attempt to add page to zone-append bio
812ae29333fSJohannes Thumshirn  * @bio: destination bio
813ae29333fSJohannes Thumshirn  * @page: page to add
814ae29333fSJohannes Thumshirn  * @len: vec entry length
815ae29333fSJohannes Thumshirn  * @offset: vec entry offset
816ae29333fSJohannes Thumshirn  *
817ae29333fSJohannes Thumshirn  * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
818ae29333fSJohannes Thumshirn  * for a zone-append request. This can fail for a number of reasons, such as the
819ae29333fSJohannes Thumshirn  * bio being full or the target block device is not a zoned block device or
820ae29333fSJohannes Thumshirn  * other limitations of the target block device. The target block device must
821ae29333fSJohannes Thumshirn  * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
822ae29333fSJohannes Thumshirn  * to an empty bio.
823ae29333fSJohannes Thumshirn  *
824ae29333fSJohannes Thumshirn  * Returns: number of bytes added to the bio, or 0 in case of a failure.
825ae29333fSJohannes Thumshirn  */
826ae29333fSJohannes Thumshirn int bio_add_zone_append_page(struct bio *bio, struct page *page,
827ae29333fSJohannes Thumshirn 			     unsigned int len, unsigned int offset)
828ae29333fSJohannes Thumshirn {
829582cd91fSLinus Torvalds 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
830ae29333fSJohannes Thumshirn 	bool same_page = false;
831ae29333fSJohannes Thumshirn 
832ae29333fSJohannes Thumshirn 	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
833ae29333fSJohannes Thumshirn 		return 0;
834ae29333fSJohannes Thumshirn 
835ae29333fSJohannes Thumshirn 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
836ae29333fSJohannes Thumshirn 		return 0;
837ae29333fSJohannes Thumshirn 
838ae29333fSJohannes Thumshirn 	return bio_add_hw_page(q, bio, page, len, offset,
839ae29333fSJohannes Thumshirn 			       queue_max_zone_append_sectors(q), &same_page);
840ae29333fSJohannes Thumshirn }
841ae29333fSJohannes Thumshirn EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
842ae29333fSJohannes Thumshirn 
843ae29333fSJohannes Thumshirn /**
8440aa69fd3SChristoph Hellwig  * __bio_try_merge_page - try appending data to an existing bvec.
8450aa69fd3SChristoph Hellwig  * @bio: destination bio
846551879a4SMing Lei  * @page: start page to add
8470aa69fd3SChristoph Hellwig  * @len: length of the data to add
848551879a4SMing Lei  * @off: offset of the data relative to @page
849ff896738SChristoph Hellwig  * @same_page: return if the segment has been merged inside the same page
8500aa69fd3SChristoph Hellwig  *
8510aa69fd3SChristoph Hellwig  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
8523cf14889SRandy Dunlap  * useful optimisation for file systems with a block size smaller than the
8530aa69fd3SChristoph Hellwig  * page size.
8540aa69fd3SChristoph Hellwig  *
855551879a4SMing Lei  * Warn if (@len, @off) crosses pages in case that @same_page is true.
856551879a4SMing Lei  *
8570aa69fd3SChristoph Hellwig  * Return %true on success or %false on failure.
8580aa69fd3SChristoph Hellwig  */
8590aa69fd3SChristoph Hellwig bool __bio_try_merge_page(struct bio *bio, struct page *page,
860ff896738SChristoph Hellwig 		unsigned int len, unsigned int off, bool *same_page)
8610aa69fd3SChristoph Hellwig {
8620aa69fd3SChristoph Hellwig 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
8630aa69fd3SChristoph Hellwig 		return false;
8640aa69fd3SChristoph Hellwig 
865cc90bc68SAndreas Gruenbacher 	if (bio->bi_vcnt > 0) {
8660aa69fd3SChristoph Hellwig 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
8670aa69fd3SChristoph Hellwig 
8685919482eSMing Lei 		if (page_is_mergeable(bv, page, len, off, same_page)) {
8692cd896a5SRitesh Harjani 			if (bio->bi_iter.bi_size > UINT_MAX - len) {
8702cd896a5SRitesh Harjani 				*same_page = false;
871cc90bc68SAndreas Gruenbacher 				return false;
8722cd896a5SRitesh Harjani 			}
8730aa69fd3SChristoph Hellwig 			bv->bv_len += len;
8740aa69fd3SChristoph Hellwig 			bio->bi_iter.bi_size += len;
8750aa69fd3SChristoph Hellwig 			return true;
8760aa69fd3SChristoph Hellwig 		}
8775919482eSMing Lei 	}
8780aa69fd3SChristoph Hellwig 	return false;
8790aa69fd3SChristoph Hellwig }
8800aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_try_merge_page);
8810aa69fd3SChristoph Hellwig 
8820aa69fd3SChristoph Hellwig /**
883551879a4SMing Lei  * __bio_add_page - add page(s) to a bio in a new segment
8840aa69fd3SChristoph Hellwig  * @bio: destination bio
885551879a4SMing Lei  * @page: start page to add
886551879a4SMing Lei  * @len: length of the data to add, may cross pages
887551879a4SMing Lei  * @off: offset of the data relative to @page, may cross pages
8880aa69fd3SChristoph Hellwig  *
8890aa69fd3SChristoph Hellwig  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
8900aa69fd3SChristoph Hellwig  * that @bio has space for another bvec.
8910aa69fd3SChristoph Hellwig  */
8920aa69fd3SChristoph Hellwig void __bio_add_page(struct bio *bio, struct page *page,
8930aa69fd3SChristoph Hellwig 		unsigned int len, unsigned int off)
8940aa69fd3SChristoph Hellwig {
8950aa69fd3SChristoph Hellwig 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
8960aa69fd3SChristoph Hellwig 
8970aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
89879d08f89SMing Lei 	WARN_ON_ONCE(bio_full(bio, len));
8990aa69fd3SChristoph Hellwig 
9000aa69fd3SChristoph Hellwig 	bv->bv_page = page;
9010aa69fd3SChristoph Hellwig 	bv->bv_offset = off;
9020aa69fd3SChristoph Hellwig 	bv->bv_len = len;
9030aa69fd3SChristoph Hellwig 
9040aa69fd3SChristoph Hellwig 	bio->bi_iter.bi_size += len;
9050aa69fd3SChristoph Hellwig 	bio->bi_vcnt++;
906b8e24a93SJohannes Weiner 
907b8e24a93SJohannes Weiner 	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
908b8e24a93SJohannes Weiner 		bio_set_flag(bio, BIO_WORKINGSET);
9090aa69fd3SChristoph Hellwig }
9100aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_add_page);
9110aa69fd3SChristoph Hellwig 
9120aa69fd3SChristoph Hellwig /**
913551879a4SMing Lei  *	bio_add_page	-	attempt to add page(s) to bio
914f9c78b2bSJens Axboe  *	@bio: destination bio
915551879a4SMing Lei  *	@page: start page to add
916551879a4SMing Lei  *	@len: vec entry length, may cross pages
917551879a4SMing Lei  *	@offset: vec entry offset relative to @page, may cross pages
918f9c78b2bSJens Axboe  *
919551879a4SMing Lei  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
920c66a14d0SKent Overstreet  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
921f9c78b2bSJens Axboe  */
922c66a14d0SKent Overstreet int bio_add_page(struct bio *bio, struct page *page,
923c66a14d0SKent Overstreet 		 unsigned int len, unsigned int offset)
924f9c78b2bSJens Axboe {
925ff896738SChristoph Hellwig 	bool same_page = false;
926ff896738SChristoph Hellwig 
927ff896738SChristoph Hellwig 	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
92879d08f89SMing Lei 		if (bio_full(bio, len))
929c66a14d0SKent Overstreet 			return 0;
9300aa69fd3SChristoph Hellwig 		__bio_add_page(bio, page, len, offset);
931c66a14d0SKent Overstreet 	}
932c66a14d0SKent Overstreet 	return len;
933f9c78b2bSJens Axboe }
934f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_page);
935f9c78b2bSJens Axboe 
936d241a95fSChristoph Hellwig void bio_release_pages(struct bio *bio, bool mark_dirty)
9377321ecbfSChristoph Hellwig {
9387321ecbfSChristoph Hellwig 	struct bvec_iter_all iter_all;
9397321ecbfSChristoph Hellwig 	struct bio_vec *bvec;
9407321ecbfSChristoph Hellwig 
941b2d0d991SChristoph Hellwig 	if (bio_flagged(bio, BIO_NO_PAGE_REF))
942b2d0d991SChristoph Hellwig 		return;
943b2d0d991SChristoph Hellwig 
944d241a95fSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
945d241a95fSChristoph Hellwig 		if (mark_dirty && !PageCompound(bvec->bv_page))
946d241a95fSChristoph Hellwig 			set_page_dirty_lock(bvec->bv_page);
9477321ecbfSChristoph Hellwig 		put_page(bvec->bv_page);
9487321ecbfSChristoph Hellwig 	}
949d241a95fSChristoph Hellwig }
95029b2a3aaSJohannes Thumshirn EXPORT_SYMBOL_GPL(bio_release_pages);
9517321ecbfSChristoph Hellwig 
952*7de55b7dSJohannes Thumshirn static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
9536d0c48aeSJens Axboe {
9547a800a20SChristoph Hellwig 	WARN_ON_ONCE(bio->bi_max_vecs);
9556d0c48aeSJens Axboe 
956c42bca92SPavel Begunkov 	bio->bi_vcnt = iter->nr_segs;
957c42bca92SPavel Begunkov 	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
958c42bca92SPavel Begunkov 	bio->bi_iter.bi_bvec_done = iter->iov_offset;
959c42bca92SPavel Begunkov 	bio->bi_iter.bi_size = iter->count;
960ed97ce5eSChristoph Hellwig 	bio_set_flag(bio, BIO_NO_PAGE_REF);
961977be012SChristoph Hellwig 	bio_set_flag(bio, BIO_CLONED);
962*7de55b7dSJohannes Thumshirn }
9636d0c48aeSJens Axboe 
964*7de55b7dSJohannes Thumshirn static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
965*7de55b7dSJohannes Thumshirn {
966*7de55b7dSJohannes Thumshirn 	__bio_iov_bvec_set(bio, iter);
967c42bca92SPavel Begunkov 	iov_iter_advance(iter, iter->count);
9686d0c48aeSJens Axboe 	return 0;
9696d0c48aeSJens Axboe }
9706d0c48aeSJens Axboe 
971*7de55b7dSJohannes Thumshirn static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
972*7de55b7dSJohannes Thumshirn {
973*7de55b7dSJohannes Thumshirn 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
974*7de55b7dSJohannes Thumshirn 	struct iov_iter i = *iter;
975*7de55b7dSJohannes Thumshirn 
976*7de55b7dSJohannes Thumshirn 	iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
977*7de55b7dSJohannes Thumshirn 	__bio_iov_bvec_set(bio, &i);
978*7de55b7dSJohannes Thumshirn 	iov_iter_advance(iter, i.count);
979*7de55b7dSJohannes Thumshirn 	return 0;
980*7de55b7dSJohannes Thumshirn }
981*7de55b7dSJohannes Thumshirn 
982576ed913SChristoph Hellwig #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
983576ed913SChristoph Hellwig 
9842cefe4dbSKent Overstreet /**
98517d51b10SMartin Wilck  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
9862cefe4dbSKent Overstreet  * @bio: bio to add pages to
9872cefe4dbSKent Overstreet  * @iter: iov iterator describing the region to be mapped
9882cefe4dbSKent Overstreet  *
98917d51b10SMartin Wilck  * Pins pages from *iter and appends them to @bio's bvec array. The
9902cefe4dbSKent Overstreet  * pages will have to be released using put_page() when done.
99117d51b10SMartin Wilck  * For multi-segment *iter, this function only adds pages from the
9923cf14889SRandy Dunlap  * next non-empty segment of the iov iterator.
9932cefe4dbSKent Overstreet  */
99417d51b10SMartin Wilck static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
9952cefe4dbSKent Overstreet {
996576ed913SChristoph Hellwig 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
997576ed913SChristoph Hellwig 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
9982cefe4dbSKent Overstreet 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
9992cefe4dbSKent Overstreet 	struct page **pages = (struct page **)bv;
100045691804SChristoph Hellwig 	bool same_page = false;
1001576ed913SChristoph Hellwig 	ssize_t size, left;
1002576ed913SChristoph Hellwig 	unsigned len, i;
1003b403ea24SMartin Wilck 	size_t offset;
1004576ed913SChristoph Hellwig 
1005576ed913SChristoph Hellwig 	/*
1006576ed913SChristoph Hellwig 	 * Move page array up in the allocated memory for the bio vecs as far as
1007576ed913SChristoph Hellwig 	 * possible so that we can start filling biovecs from the beginning
1008576ed913SChristoph Hellwig 	 * without overwriting the temporary page array.
1009576ed913SChristoph Hellwig 	*/
1010576ed913SChristoph Hellwig 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1011576ed913SChristoph Hellwig 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
10122cefe4dbSKent Overstreet 
10132cefe4dbSKent Overstreet 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
10142cefe4dbSKent Overstreet 	if (unlikely(size <= 0))
10152cefe4dbSKent Overstreet 		return size ? size : -EFAULT;
10162cefe4dbSKent Overstreet 
1017576ed913SChristoph Hellwig 	for (left = size, i = 0; left > 0; left -= len, i++) {
1018576ed913SChristoph Hellwig 		struct page *page = pages[i];
10192cefe4dbSKent Overstreet 
1020576ed913SChristoph Hellwig 		len = min_t(size_t, PAGE_SIZE - offset, left);
102145691804SChristoph Hellwig 
102245691804SChristoph Hellwig 		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
102345691804SChristoph Hellwig 			if (same_page)
102445691804SChristoph Hellwig 				put_page(page);
102545691804SChristoph Hellwig 		} else {
102679d08f89SMing Lei 			if (WARN_ON_ONCE(bio_full(bio, len)))
1027576ed913SChristoph Hellwig                                 return -EINVAL;
102845691804SChristoph Hellwig 			__bio_add_page(bio, page, len, offset);
102945691804SChristoph Hellwig 		}
1030576ed913SChristoph Hellwig 		offset = 0;
10312cefe4dbSKent Overstreet 	}
10322cefe4dbSKent Overstreet 
10332cefe4dbSKent Overstreet 	iov_iter_advance(iter, size);
10342cefe4dbSKent Overstreet 	return 0;
10352cefe4dbSKent Overstreet }
103617d51b10SMartin Wilck 
10370512a75bSKeith Busch static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
10380512a75bSKeith Busch {
10390512a75bSKeith Busch 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
10400512a75bSKeith Busch 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1041309dca30SChristoph Hellwig 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
10420512a75bSKeith Busch 	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
10430512a75bSKeith Busch 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
10440512a75bSKeith Busch 	struct page **pages = (struct page **)bv;
10450512a75bSKeith Busch 	ssize_t size, left;
10460512a75bSKeith Busch 	unsigned len, i;
10470512a75bSKeith Busch 	size_t offset;
10484977d121SNaohiro Aota 	int ret = 0;
10490512a75bSKeith Busch 
10500512a75bSKeith Busch 	if (WARN_ON_ONCE(!max_append_sectors))
10510512a75bSKeith Busch 		return 0;
10520512a75bSKeith Busch 
10530512a75bSKeith Busch 	/*
10540512a75bSKeith Busch 	 * Move page array up in the allocated memory for the bio vecs as far as
10550512a75bSKeith Busch 	 * possible so that we can start filling biovecs from the beginning
10560512a75bSKeith Busch 	 * without overwriting the temporary page array.
10570512a75bSKeith Busch 	 */
10580512a75bSKeith Busch 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
10590512a75bSKeith Busch 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
10600512a75bSKeith Busch 
10610512a75bSKeith Busch 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
10620512a75bSKeith Busch 	if (unlikely(size <= 0))
10630512a75bSKeith Busch 		return size ? size : -EFAULT;
10640512a75bSKeith Busch 
10650512a75bSKeith Busch 	for (left = size, i = 0; left > 0; left -= len, i++) {
10660512a75bSKeith Busch 		struct page *page = pages[i];
10670512a75bSKeith Busch 		bool same_page = false;
10680512a75bSKeith Busch 
10690512a75bSKeith Busch 		len = min_t(size_t, PAGE_SIZE - offset, left);
10700512a75bSKeith Busch 		if (bio_add_hw_page(q, bio, page, len, offset,
10714977d121SNaohiro Aota 				max_append_sectors, &same_page) != len) {
10724977d121SNaohiro Aota 			ret = -EINVAL;
10734977d121SNaohiro Aota 			break;
10744977d121SNaohiro Aota 		}
10750512a75bSKeith Busch 		if (same_page)
10760512a75bSKeith Busch 			put_page(page);
10770512a75bSKeith Busch 		offset = 0;
10780512a75bSKeith Busch 	}
10790512a75bSKeith Busch 
10804977d121SNaohiro Aota 	iov_iter_advance(iter, size - left);
10814977d121SNaohiro Aota 	return ret;
10820512a75bSKeith Busch }
10830512a75bSKeith Busch 
108417d51b10SMartin Wilck /**
10856d0c48aeSJens Axboe  * bio_iov_iter_get_pages - add user or kernel pages to a bio
108617d51b10SMartin Wilck  * @bio: bio to add pages to
10876d0c48aeSJens Axboe  * @iter: iov iterator describing the region to be added
108817d51b10SMartin Wilck  *
10896d0c48aeSJens Axboe  * This takes either an iterator pointing to user memory, or one pointing to
10906d0c48aeSJens Axboe  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
10916d0c48aeSJens Axboe  * map them into the kernel. On IO completion, the caller should put those
1092c42bca92SPavel Begunkov  * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1093c42bca92SPavel Begunkov  * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1094c42bca92SPavel Begunkov  * to ensure the bvecs and pages stay referenced until the submitted I/O is
1095c42bca92SPavel Begunkov  * completed by a call to ->ki_complete() or returns with an error other than
1096c42bca92SPavel Begunkov  * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1097c42bca92SPavel Begunkov  * on IO completion. If it isn't, then pages should be released.
10986d0c48aeSJens Axboe  *
109917d51b10SMartin Wilck  * The function tries, but does not guarantee, to pin as many pages as
11005cd3ddc1SMauro Carvalho Chehab  * fit into the bio, or are requested in @iter, whatever is smaller. If
11016d0c48aeSJens Axboe  * MM encounters an error pinning the requested pages, it stops. Error
11026d0c48aeSJens Axboe  * is returned only if 0 pages could be pinned.
11030cf41e5eSPavel Begunkov  *
11040cf41e5eSPavel Begunkov  * It's intended for direct IO, so doesn't do PSI tracking, the caller is
11050cf41e5eSPavel Begunkov  * responsible for setting BIO_WORKINGSET if necessary.
110617d51b10SMartin Wilck  */
110717d51b10SMartin Wilck int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
110817d51b10SMartin Wilck {
1109c42bca92SPavel Begunkov 	int ret = 0;
111014eacf12SChristoph Hellwig 
1111c42bca92SPavel Begunkov 	if (iov_iter_is_bvec(iter)) {
1112*7de55b7dSJohannes Thumshirn 		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1113*7de55b7dSJohannes Thumshirn 			return bio_iov_bvec_set_append(bio, iter);
1114ed97ce5eSChristoph Hellwig 		return bio_iov_bvec_set(bio, iter);
111586004515SChristoph Hellwig 	}
111617d51b10SMartin Wilck 
111717d51b10SMartin Wilck 	do {
1118c42bca92SPavel Begunkov 		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
11190512a75bSKeith Busch 			ret = __bio_iov_append_get_pages(bio, iter);
11206d0c48aeSJens Axboe 		else
11216d0c48aeSJens Axboe 			ret = __bio_iov_iter_get_pages(bio, iter);
112279d08f89SMing Lei 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
112317d51b10SMartin Wilck 
11240cf41e5eSPavel Begunkov 	/* don't account direct I/O as memory stall */
11250cf41e5eSPavel Begunkov 	bio_clear_flag(bio, BIO_WORKINGSET);
112614eacf12SChristoph Hellwig 	return bio->bi_vcnt ? 0 : ret;
112717d51b10SMartin Wilck }
112829b2a3aaSJohannes Thumshirn EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
11292cefe4dbSKent Overstreet 
11304246a0b6SChristoph Hellwig static void submit_bio_wait_endio(struct bio *bio)
1131f9c78b2bSJens Axboe {
113265e53aabSChristoph Hellwig 	complete(bio->bi_private);
1133f9c78b2bSJens Axboe }
1134f9c78b2bSJens Axboe 
1135f9c78b2bSJens Axboe /**
1136f9c78b2bSJens Axboe  * submit_bio_wait - submit a bio, and wait until it completes
1137f9c78b2bSJens Axboe  * @bio: The &struct bio which describes the I/O
1138f9c78b2bSJens Axboe  *
1139f9c78b2bSJens Axboe  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1140f9c78b2bSJens Axboe  * bio_endio() on failure.
11413d289d68SJan Kara  *
11423d289d68SJan Kara  * WARNING: Unlike to how submit_bio() is usually used, this function does not
11433d289d68SJan Kara  * result in bio reference to be consumed. The caller must drop the reference
11443d289d68SJan Kara  * on his own.
1145f9c78b2bSJens Axboe  */
11464e49ea4aSMike Christie int submit_bio_wait(struct bio *bio)
1147f9c78b2bSJens Axboe {
1148309dca30SChristoph Hellwig 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1149309dca30SChristoph Hellwig 			bio->bi_bdev->bd_disk->lockdep_map);
1150de6a78b6SMing Lei 	unsigned long hang_check;
1151f9c78b2bSJens Axboe 
115265e53aabSChristoph Hellwig 	bio->bi_private = &done;
1153f9c78b2bSJens Axboe 	bio->bi_end_io = submit_bio_wait_endio;
11541eff9d32SJens Axboe 	bio->bi_opf |= REQ_SYNC;
11554e49ea4aSMike Christie 	submit_bio(bio);
1156de6a78b6SMing Lei 
1157de6a78b6SMing Lei 	/* Prevent hang_check timer from firing at us during very long I/O */
1158de6a78b6SMing Lei 	hang_check = sysctl_hung_task_timeout_secs;
1159de6a78b6SMing Lei 	if (hang_check)
1160de6a78b6SMing Lei 		while (!wait_for_completion_io_timeout(&done,
1161de6a78b6SMing Lei 					hang_check * (HZ/2)))
1162de6a78b6SMing Lei 			;
1163de6a78b6SMing Lei 	else
116465e53aabSChristoph Hellwig 		wait_for_completion_io(&done);
1165f9c78b2bSJens Axboe 
116665e53aabSChristoph Hellwig 	return blk_status_to_errno(bio->bi_status);
1167f9c78b2bSJens Axboe }
1168f9c78b2bSJens Axboe EXPORT_SYMBOL(submit_bio_wait);
1169f9c78b2bSJens Axboe 
1170f9c78b2bSJens Axboe /**
1171f9c78b2bSJens Axboe  * bio_advance - increment/complete a bio by some number of bytes
1172f9c78b2bSJens Axboe  * @bio:	bio to advance
1173f9c78b2bSJens Axboe  * @bytes:	number of bytes to complete
1174f9c78b2bSJens Axboe  *
1175f9c78b2bSJens Axboe  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1176f9c78b2bSJens Axboe  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1177f9c78b2bSJens Axboe  * be updated on the last bvec as well.
1178f9c78b2bSJens Axboe  *
1179f9c78b2bSJens Axboe  * @bio will then represent the remaining, uncompleted portion of the io.
1180f9c78b2bSJens Axboe  */
1181f9c78b2bSJens Axboe void bio_advance(struct bio *bio, unsigned bytes)
1182f9c78b2bSJens Axboe {
1183f9c78b2bSJens Axboe 	if (bio_integrity(bio))
1184f9c78b2bSJens Axboe 		bio_integrity_advance(bio, bytes);
1185f9c78b2bSJens Axboe 
1186a892c8d5SSatya Tangirala 	bio_crypt_advance(bio, bytes);
1187f9c78b2bSJens Axboe 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1188f9c78b2bSJens Axboe }
1189f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_advance);
1190f9c78b2bSJens Axboe 
119145db54d5SKent Overstreet void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
119245db54d5SKent Overstreet 			struct bio *src, struct bvec_iter *src_iter)
1193f9c78b2bSJens Axboe {
1194f9c78b2bSJens Axboe 	struct bio_vec src_bv, dst_bv;
1195f9c78b2bSJens Axboe 	void *src_p, *dst_p;
1196f9c78b2bSJens Axboe 	unsigned bytes;
1197f9c78b2bSJens Axboe 
119845db54d5SKent Overstreet 	while (src_iter->bi_size && dst_iter->bi_size) {
119945db54d5SKent Overstreet 		src_bv = bio_iter_iovec(src, *src_iter);
120045db54d5SKent Overstreet 		dst_bv = bio_iter_iovec(dst, *dst_iter);
120145db54d5SKent Overstreet 
120245db54d5SKent Overstreet 		bytes = min(src_bv.bv_len, dst_bv.bv_len);
120345db54d5SKent Overstreet 
120445db54d5SKent Overstreet 		src_p = kmap_atomic(src_bv.bv_page);
120545db54d5SKent Overstreet 		dst_p = kmap_atomic(dst_bv.bv_page);
120645db54d5SKent Overstreet 
120745db54d5SKent Overstreet 		memcpy(dst_p + dst_bv.bv_offset,
120845db54d5SKent Overstreet 		       src_p + src_bv.bv_offset,
120945db54d5SKent Overstreet 		       bytes);
121045db54d5SKent Overstreet 
121145db54d5SKent Overstreet 		kunmap_atomic(dst_p);
121245db54d5SKent Overstreet 		kunmap_atomic(src_p);
121345db54d5SKent Overstreet 
12146e6e811dSKent Overstreet 		flush_dcache_page(dst_bv.bv_page);
12156e6e811dSKent Overstreet 
121622b56c29SPavel Begunkov 		bio_advance_iter_single(src, src_iter, bytes);
121722b56c29SPavel Begunkov 		bio_advance_iter_single(dst, dst_iter, bytes);
121845db54d5SKent Overstreet 	}
121945db54d5SKent Overstreet }
122045db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data_iter);
122145db54d5SKent Overstreet 
122245db54d5SKent Overstreet /**
122345db54d5SKent Overstreet  * bio_copy_data - copy contents of data buffers from one bio to another
122445db54d5SKent Overstreet  * @src: source bio
122545db54d5SKent Overstreet  * @dst: destination bio
122645db54d5SKent Overstreet  *
122745db54d5SKent Overstreet  * Stops when it reaches the end of either @src or @dst - that is, copies
122845db54d5SKent Overstreet  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
122945db54d5SKent Overstreet  */
123045db54d5SKent Overstreet void bio_copy_data(struct bio *dst, struct bio *src)
123145db54d5SKent Overstreet {
123245db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
123345db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
123445db54d5SKent Overstreet 
123545db54d5SKent Overstreet 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
123645db54d5SKent Overstreet }
123745db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data);
123845db54d5SKent Overstreet 
123945db54d5SKent Overstreet /**
124045db54d5SKent Overstreet  * bio_list_copy_data - copy contents of data buffers from one chain of bios to
124145db54d5SKent Overstreet  * another
124245db54d5SKent Overstreet  * @src: source bio list
124345db54d5SKent Overstreet  * @dst: destination bio list
124445db54d5SKent Overstreet  *
124545db54d5SKent Overstreet  * Stops when it reaches the end of either the @src list or @dst list - that is,
124645db54d5SKent Overstreet  * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
124745db54d5SKent Overstreet  * bios).
124845db54d5SKent Overstreet  */
124945db54d5SKent Overstreet void bio_list_copy_data(struct bio *dst, struct bio *src)
125045db54d5SKent Overstreet {
125145db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
125245db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
125345db54d5SKent Overstreet 
1254f9c78b2bSJens Axboe 	while (1) {
1255f9c78b2bSJens Axboe 		if (!src_iter.bi_size) {
1256f9c78b2bSJens Axboe 			src = src->bi_next;
1257f9c78b2bSJens Axboe 			if (!src)
1258f9c78b2bSJens Axboe 				break;
1259f9c78b2bSJens Axboe 
1260f9c78b2bSJens Axboe 			src_iter = src->bi_iter;
1261f9c78b2bSJens Axboe 		}
1262f9c78b2bSJens Axboe 
1263f9c78b2bSJens Axboe 		if (!dst_iter.bi_size) {
1264f9c78b2bSJens Axboe 			dst = dst->bi_next;
1265f9c78b2bSJens Axboe 			if (!dst)
1266f9c78b2bSJens Axboe 				break;
1267f9c78b2bSJens Axboe 
1268f9c78b2bSJens Axboe 			dst_iter = dst->bi_iter;
1269f9c78b2bSJens Axboe 		}
1270f9c78b2bSJens Axboe 
127145db54d5SKent Overstreet 		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1272f9c78b2bSJens Axboe 	}
1273f9c78b2bSJens Axboe }
127445db54d5SKent Overstreet EXPORT_SYMBOL(bio_list_copy_data);
1275f9c78b2bSJens Axboe 
1276491221f8SGuoqing Jiang void bio_free_pages(struct bio *bio)
12771dfa0f68SChristoph Hellwig {
12781dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
12796dc4f100SMing Lei 	struct bvec_iter_all iter_all;
12801dfa0f68SChristoph Hellwig 
12812b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all)
12821dfa0f68SChristoph Hellwig 		__free_page(bvec->bv_page);
12831dfa0f68SChristoph Hellwig }
1284491221f8SGuoqing Jiang EXPORT_SYMBOL(bio_free_pages);
12851dfa0f68SChristoph Hellwig 
1286f9c78b2bSJens Axboe /*
1287f9c78b2bSJens Axboe  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1288f9c78b2bSJens Axboe  * for performing direct-IO in BIOs.
1289f9c78b2bSJens Axboe  *
1290f9c78b2bSJens Axboe  * The problem is that we cannot run set_page_dirty() from interrupt context
1291f9c78b2bSJens Axboe  * because the required locks are not interrupt-safe.  So what we can do is to
1292f9c78b2bSJens Axboe  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1293f9c78b2bSJens Axboe  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1294f9c78b2bSJens Axboe  * in process context.
1295f9c78b2bSJens Axboe  *
1296f9c78b2bSJens Axboe  * We special-case compound pages here: normally this means reads into hugetlb
1297f9c78b2bSJens Axboe  * pages.  The logic in here doesn't really work right for compound pages
1298f9c78b2bSJens Axboe  * because the VM does not uniformly chase down the head page in all cases.
1299f9c78b2bSJens Axboe  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1300f9c78b2bSJens Axboe  * handle them at all.  So we skip compound pages here at an early stage.
1301f9c78b2bSJens Axboe  *
1302f9c78b2bSJens Axboe  * Note that this code is very hard to test under normal circumstances because
1303f9c78b2bSJens Axboe  * direct-io pins the pages with get_user_pages().  This makes
1304f9c78b2bSJens Axboe  * is_page_cache_freeable return false, and the VM will not clean the pages.
1305f9c78b2bSJens Axboe  * But other code (eg, flusher threads) could clean the pages if they are mapped
1306f9c78b2bSJens Axboe  * pagecache.
1307f9c78b2bSJens Axboe  *
1308f9c78b2bSJens Axboe  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1309f9c78b2bSJens Axboe  * deferred bio dirtying paths.
1310f9c78b2bSJens Axboe  */
1311f9c78b2bSJens Axboe 
1312f9c78b2bSJens Axboe /*
1313f9c78b2bSJens Axboe  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1314f9c78b2bSJens Axboe  */
1315f9c78b2bSJens Axboe void bio_set_pages_dirty(struct bio *bio)
1316f9c78b2bSJens Axboe {
1317f9c78b2bSJens Axboe 	struct bio_vec *bvec;
13186dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1319f9c78b2bSJens Axboe 
13202b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
13213bb50983SChristoph Hellwig 		if (!PageCompound(bvec->bv_page))
13223bb50983SChristoph Hellwig 			set_page_dirty_lock(bvec->bv_page);
1323f9c78b2bSJens Axboe 	}
1324f9c78b2bSJens Axboe }
1325f9c78b2bSJens Axboe 
1326f9c78b2bSJens Axboe /*
1327f9c78b2bSJens Axboe  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1328f9c78b2bSJens Axboe  * If they are, then fine.  If, however, some pages are clean then they must
1329f9c78b2bSJens Axboe  * have been written out during the direct-IO read.  So we take another ref on
133024d5493fSChristoph Hellwig  * the BIO and re-dirty the pages in process context.
1331f9c78b2bSJens Axboe  *
1332f9c78b2bSJens Axboe  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1333ea1754a0SKirill A. Shutemov  * here on.  It will run one put_page() against each page and will run one
1334ea1754a0SKirill A. Shutemov  * bio_put() against the BIO.
1335f9c78b2bSJens Axboe  */
1336f9c78b2bSJens Axboe 
1337f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work);
1338f9c78b2bSJens Axboe 
1339f9c78b2bSJens Axboe static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1340f9c78b2bSJens Axboe static DEFINE_SPINLOCK(bio_dirty_lock);
1341f9c78b2bSJens Axboe static struct bio *bio_dirty_list;
1342f9c78b2bSJens Axboe 
1343f9c78b2bSJens Axboe /*
1344f9c78b2bSJens Axboe  * This runs in process context
1345f9c78b2bSJens Axboe  */
1346f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work)
1347f9c78b2bSJens Axboe {
134824d5493fSChristoph Hellwig 	struct bio *bio, *next;
1349f9c78b2bSJens Axboe 
135024d5493fSChristoph Hellwig 	spin_lock_irq(&bio_dirty_lock);
135124d5493fSChristoph Hellwig 	next = bio_dirty_list;
1352f9c78b2bSJens Axboe 	bio_dirty_list = NULL;
135324d5493fSChristoph Hellwig 	spin_unlock_irq(&bio_dirty_lock);
1354f9c78b2bSJens Axboe 
135524d5493fSChristoph Hellwig 	while ((bio = next) != NULL) {
135624d5493fSChristoph Hellwig 		next = bio->bi_private;
1357f9c78b2bSJens Axboe 
1358d241a95fSChristoph Hellwig 		bio_release_pages(bio, true);
1359f9c78b2bSJens Axboe 		bio_put(bio);
1360f9c78b2bSJens Axboe 	}
1361f9c78b2bSJens Axboe }
1362f9c78b2bSJens Axboe 
1363f9c78b2bSJens Axboe void bio_check_pages_dirty(struct bio *bio)
1364f9c78b2bSJens Axboe {
1365f9c78b2bSJens Axboe 	struct bio_vec *bvec;
136624d5493fSChristoph Hellwig 	unsigned long flags;
13676dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1368f9c78b2bSJens Axboe 
13692b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
137024d5493fSChristoph Hellwig 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
137124d5493fSChristoph Hellwig 			goto defer;
1372f9c78b2bSJens Axboe 	}
1373f9c78b2bSJens Axboe 
1374d241a95fSChristoph Hellwig 	bio_release_pages(bio, false);
137524d5493fSChristoph Hellwig 	bio_put(bio);
137624d5493fSChristoph Hellwig 	return;
137724d5493fSChristoph Hellwig defer:
1378f9c78b2bSJens Axboe 	spin_lock_irqsave(&bio_dirty_lock, flags);
1379f9c78b2bSJens Axboe 	bio->bi_private = bio_dirty_list;
1380f9c78b2bSJens Axboe 	bio_dirty_list = bio;
1381f9c78b2bSJens Axboe 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1382f9c78b2bSJens Axboe 	schedule_work(&bio_dirty_work);
1383f9c78b2bSJens Axboe }
1384f9c78b2bSJens Axboe 
1385c4cf5261SJens Axboe static inline bool bio_remaining_done(struct bio *bio)
1386c4cf5261SJens Axboe {
1387c4cf5261SJens Axboe 	/*
1388c4cf5261SJens Axboe 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1389c4cf5261SJens Axboe 	 * we always end io on the first invocation.
1390c4cf5261SJens Axboe 	 */
1391c4cf5261SJens Axboe 	if (!bio_flagged(bio, BIO_CHAIN))
1392c4cf5261SJens Axboe 		return true;
1393c4cf5261SJens Axboe 
1394c4cf5261SJens Axboe 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1395c4cf5261SJens Axboe 
1396326e1dbbSMike Snitzer 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1397b7c44ed9SJens Axboe 		bio_clear_flag(bio, BIO_CHAIN);
1398c4cf5261SJens Axboe 		return true;
1399326e1dbbSMike Snitzer 	}
1400c4cf5261SJens Axboe 
1401c4cf5261SJens Axboe 	return false;
1402c4cf5261SJens Axboe }
1403c4cf5261SJens Axboe 
1404f9c78b2bSJens Axboe /**
1405f9c78b2bSJens Axboe  * bio_endio - end I/O on a bio
1406f9c78b2bSJens Axboe  * @bio:	bio
1407f9c78b2bSJens Axboe  *
1408f9c78b2bSJens Axboe  * Description:
14094246a0b6SChristoph Hellwig  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
14104246a0b6SChristoph Hellwig  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
14114246a0b6SChristoph Hellwig  *   bio unless they own it and thus know that it has an end_io function.
1412fbbaf700SNeilBrown  *
1413fbbaf700SNeilBrown  *   bio_endio() can be called several times on a bio that has been chained
1414fbbaf700SNeilBrown  *   using bio_chain().  The ->bi_end_io() function will only be called the
1415fbbaf700SNeilBrown  *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1416fbbaf700SNeilBrown  *   generated if BIO_TRACE_COMPLETION is set.
1417f9c78b2bSJens Axboe  **/
14184246a0b6SChristoph Hellwig void bio_endio(struct bio *bio)
1419f9c78b2bSJens Axboe {
1420ba8c6967SChristoph Hellwig again:
14212b885517SChristoph Hellwig 	if (!bio_remaining_done(bio))
1422ba8c6967SChristoph Hellwig 		return;
14237c20f116SChristoph Hellwig 	if (!bio_integrity_endio(bio))
14247c20f116SChristoph Hellwig 		return;
1425f9c78b2bSJens Axboe 
1426309dca30SChristoph Hellwig 	if (bio->bi_bdev)
1427309dca30SChristoph Hellwig 		rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
142867b42d0bSJosef Bacik 
1429f9c78b2bSJens Axboe 	/*
1430ba8c6967SChristoph Hellwig 	 * Need to have a real endio function for chained bios, otherwise
1431ba8c6967SChristoph Hellwig 	 * various corner cases will break (like stacking block devices that
1432ba8c6967SChristoph Hellwig 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1433ba8c6967SChristoph Hellwig 	 * recursion and blowing the stack. Tail call optimization would
1434ba8c6967SChristoph Hellwig 	 * handle this, but compiling with frame pointers also disables
1435ba8c6967SChristoph Hellwig 	 * gcc's sibling call optimization.
1436f9c78b2bSJens Axboe 	 */
1437f9c78b2bSJens Axboe 	if (bio->bi_end_io == bio_chain_endio) {
143838f8baaeSChristoph Hellwig 		bio = __bio_chain_endio(bio);
1439ba8c6967SChristoph Hellwig 		goto again;
1440ba8c6967SChristoph Hellwig 	}
1441ba8c6967SChristoph Hellwig 
1442309dca30SChristoph Hellwig 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1443309dca30SChristoph Hellwig 		trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
1444fbbaf700SNeilBrown 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1445fbbaf700SNeilBrown 	}
1446fbbaf700SNeilBrown 
14479e234eeaSShaohua Li 	blk_throtl_bio_endio(bio);
1448b222dd2fSShaohua Li 	/* release cgroup info */
1449b222dd2fSShaohua Li 	bio_uninit(bio);
1450f9c78b2bSJens Axboe 	if (bio->bi_end_io)
14514246a0b6SChristoph Hellwig 		bio->bi_end_io(bio);
1452f9c78b2bSJens Axboe }
1453f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_endio);
1454f9c78b2bSJens Axboe 
1455f9c78b2bSJens Axboe /**
1456f9c78b2bSJens Axboe  * bio_split - split a bio
1457f9c78b2bSJens Axboe  * @bio:	bio to split
1458f9c78b2bSJens Axboe  * @sectors:	number of sectors to split from the front of @bio
1459f9c78b2bSJens Axboe  * @gfp:	gfp mask
1460f9c78b2bSJens Axboe  * @bs:		bio set to allocate from
1461f9c78b2bSJens Axboe  *
1462f9c78b2bSJens Axboe  * Allocates and returns a new bio which represents @sectors from the start of
1463f9c78b2bSJens Axboe  * @bio, and updates @bio to represent the remaining sectors.
1464f9c78b2bSJens Axboe  *
1465f3f5da62SMartin K. Petersen  * Unless this is a discard request the newly allocated bio will point
1466dad77584SBart Van Assche  * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1467dad77584SBart Van Assche  * neither @bio nor @bs are freed before the split bio.
1468f9c78b2bSJens Axboe  */
1469f9c78b2bSJens Axboe struct bio *bio_split(struct bio *bio, int sectors,
1470f9c78b2bSJens Axboe 		      gfp_t gfp, struct bio_set *bs)
1471f9c78b2bSJens Axboe {
1472f341a4d3SMikulas Patocka 	struct bio *split;
1473f9c78b2bSJens Axboe 
1474f9c78b2bSJens Axboe 	BUG_ON(sectors <= 0);
1475f9c78b2bSJens Axboe 	BUG_ON(sectors >= bio_sectors(bio));
1476f9c78b2bSJens Axboe 
14770512a75bSKeith Busch 	/* Zone append commands cannot be split */
14780512a75bSKeith Busch 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
14790512a75bSKeith Busch 		return NULL;
14800512a75bSKeith Busch 
1481f9c78b2bSJens Axboe 	split = bio_clone_fast(bio, gfp, bs);
1482f9c78b2bSJens Axboe 	if (!split)
1483f9c78b2bSJens Axboe 		return NULL;
1484f9c78b2bSJens Axboe 
1485f9c78b2bSJens Axboe 	split->bi_iter.bi_size = sectors << 9;
1486f9c78b2bSJens Axboe 
1487f9c78b2bSJens Axboe 	if (bio_integrity(split))
1488fbd08e76SDmitry Monakhov 		bio_integrity_trim(split);
1489f9c78b2bSJens Axboe 
1490f9c78b2bSJens Axboe 	bio_advance(bio, split->bi_iter.bi_size);
1491f9c78b2bSJens Axboe 
1492fbbaf700SNeilBrown 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
149320d59023SGoldwyn Rodrigues 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1494fbbaf700SNeilBrown 
1495f9c78b2bSJens Axboe 	return split;
1496f9c78b2bSJens Axboe }
1497f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_split);
1498f9c78b2bSJens Axboe 
1499f9c78b2bSJens Axboe /**
1500f9c78b2bSJens Axboe  * bio_trim - trim a bio
1501f9c78b2bSJens Axboe  * @bio:	bio to trim
1502f9c78b2bSJens Axboe  * @offset:	number of sectors to trim from the front of @bio
1503f9c78b2bSJens Axboe  * @size:	size we want to trim @bio to, in sectors
1504f9c78b2bSJens Axboe  */
1505f9c78b2bSJens Axboe void bio_trim(struct bio *bio, int offset, int size)
1506f9c78b2bSJens Axboe {
1507f9c78b2bSJens Axboe 	/* 'bio' is a cloned bio which we need to trim to match
1508f9c78b2bSJens Axboe 	 * the given offset and size.
1509f9c78b2bSJens Axboe 	 */
1510f9c78b2bSJens Axboe 
1511f9c78b2bSJens Axboe 	size <<= 9;
1512f9c78b2bSJens Axboe 	if (offset == 0 && size == bio->bi_iter.bi_size)
1513f9c78b2bSJens Axboe 		return;
1514f9c78b2bSJens Axboe 
1515f9c78b2bSJens Axboe 	bio_advance(bio, offset << 9);
1516f9c78b2bSJens Axboe 	bio->bi_iter.bi_size = size;
1517376a78abSDmitry Monakhov 
1518376a78abSDmitry Monakhov 	if (bio_integrity(bio))
1519fbd08e76SDmitry Monakhov 		bio_integrity_trim(bio);
1520376a78abSDmitry Monakhov 
1521f9c78b2bSJens Axboe }
1522f9c78b2bSJens Axboe EXPORT_SYMBOL_GPL(bio_trim);
1523f9c78b2bSJens Axboe 
1524f9c78b2bSJens Axboe /*
1525f9c78b2bSJens Axboe  * create memory pools for biovec's in a bio_set.
1526f9c78b2bSJens Axboe  * use the global biovec slabs created for general use.
1527f9c78b2bSJens Axboe  */
15288aa6ba2fSKent Overstreet int biovec_init_pool(mempool_t *pool, int pool_entries)
1529f9c78b2bSJens Axboe {
15307a800a20SChristoph Hellwig 	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1531f9c78b2bSJens Axboe 
15328aa6ba2fSKent Overstreet 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1533f9c78b2bSJens Axboe }
1534f9c78b2bSJens Axboe 
1535917a38c7SKent Overstreet /*
1536917a38c7SKent Overstreet  * bioset_exit - exit a bioset initialized with bioset_init()
1537917a38c7SKent Overstreet  *
1538917a38c7SKent Overstreet  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1539917a38c7SKent Overstreet  * kzalloc()).
1540917a38c7SKent Overstreet  */
1541917a38c7SKent Overstreet void bioset_exit(struct bio_set *bs)
1542f9c78b2bSJens Axboe {
1543f9c78b2bSJens Axboe 	if (bs->rescue_workqueue)
1544f9c78b2bSJens Axboe 		destroy_workqueue(bs->rescue_workqueue);
1545917a38c7SKent Overstreet 	bs->rescue_workqueue = NULL;
1546f9c78b2bSJens Axboe 
15478aa6ba2fSKent Overstreet 	mempool_exit(&bs->bio_pool);
15488aa6ba2fSKent Overstreet 	mempool_exit(&bs->bvec_pool);
1549f9c78b2bSJens Axboe 
1550f9c78b2bSJens Axboe 	bioset_integrity_free(bs);
1551917a38c7SKent Overstreet 	if (bs->bio_slab)
1552f9c78b2bSJens Axboe 		bio_put_slab(bs);
1553917a38c7SKent Overstreet 	bs->bio_slab = NULL;
1554917a38c7SKent Overstreet }
1555917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_exit);
1556f9c78b2bSJens Axboe 
1557011067b0SNeilBrown /**
1558917a38c7SKent Overstreet  * bioset_init - Initialize a bio_set
1559dad08527SKent Overstreet  * @bs:		pool to initialize
1560917a38c7SKent Overstreet  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1561917a38c7SKent Overstreet  * @front_pad:	Number of bytes to allocate in front of the returned bio
1562917a38c7SKent Overstreet  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1563917a38c7SKent Overstreet  *              and %BIOSET_NEED_RESCUER
1564917a38c7SKent Overstreet  *
1565dad08527SKent Overstreet  * Description:
1566dad08527SKent Overstreet  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1567dad08527SKent Overstreet  *    to ask for a number of bytes to be allocated in front of the bio.
1568dad08527SKent Overstreet  *    Front pad allocation is useful for embedding the bio inside
1569dad08527SKent Overstreet  *    another structure, to avoid allocating extra data to go with the bio.
1570dad08527SKent Overstreet  *    Note that the bio must be embedded at the END of that structure always,
1571dad08527SKent Overstreet  *    or things will break badly.
1572dad08527SKent Overstreet  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1573dad08527SKent Overstreet  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1574dad08527SKent Overstreet  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1575dad08527SKent Overstreet  *    dispatch queued requests when the mempool runs out of space.
1576dad08527SKent Overstreet  *
1577917a38c7SKent Overstreet  */
1578917a38c7SKent Overstreet int bioset_init(struct bio_set *bs,
1579917a38c7SKent Overstreet 		unsigned int pool_size,
1580917a38c7SKent Overstreet 		unsigned int front_pad,
1581917a38c7SKent Overstreet 		int flags)
1582917a38c7SKent Overstreet {
1583917a38c7SKent Overstreet 	bs->front_pad = front_pad;
15849f180e31SMing Lei 	if (flags & BIOSET_NEED_BVECS)
15859f180e31SMing Lei 		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
15869f180e31SMing Lei 	else
15879f180e31SMing Lei 		bs->back_pad = 0;
1588917a38c7SKent Overstreet 
1589917a38c7SKent Overstreet 	spin_lock_init(&bs->rescue_lock);
1590917a38c7SKent Overstreet 	bio_list_init(&bs->rescue_list);
1591917a38c7SKent Overstreet 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1592917a38c7SKent Overstreet 
159349d1ec85SMing Lei 	bs->bio_slab = bio_find_or_create_slab(bs);
1594917a38c7SKent Overstreet 	if (!bs->bio_slab)
1595917a38c7SKent Overstreet 		return -ENOMEM;
1596917a38c7SKent Overstreet 
1597917a38c7SKent Overstreet 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1598917a38c7SKent Overstreet 		goto bad;
1599917a38c7SKent Overstreet 
1600917a38c7SKent Overstreet 	if ((flags & BIOSET_NEED_BVECS) &&
1601917a38c7SKent Overstreet 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1602917a38c7SKent Overstreet 		goto bad;
1603917a38c7SKent Overstreet 
1604917a38c7SKent Overstreet 	if (!(flags & BIOSET_NEED_RESCUER))
1605917a38c7SKent Overstreet 		return 0;
1606917a38c7SKent Overstreet 
1607917a38c7SKent Overstreet 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1608917a38c7SKent Overstreet 	if (!bs->rescue_workqueue)
1609917a38c7SKent Overstreet 		goto bad;
1610917a38c7SKent Overstreet 
1611917a38c7SKent Overstreet 	return 0;
1612917a38c7SKent Overstreet bad:
1613917a38c7SKent Overstreet 	bioset_exit(bs);
1614917a38c7SKent Overstreet 	return -ENOMEM;
1615917a38c7SKent Overstreet }
1616917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_init);
1617917a38c7SKent Overstreet 
161828e89fd9SJens Axboe /*
161928e89fd9SJens Axboe  * Initialize and setup a new bio_set, based on the settings from
162028e89fd9SJens Axboe  * another bio_set.
162128e89fd9SJens Axboe  */
162228e89fd9SJens Axboe int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
162328e89fd9SJens Axboe {
162428e89fd9SJens Axboe 	int flags;
162528e89fd9SJens Axboe 
162628e89fd9SJens Axboe 	flags = 0;
162728e89fd9SJens Axboe 	if (src->bvec_pool.min_nr)
162828e89fd9SJens Axboe 		flags |= BIOSET_NEED_BVECS;
162928e89fd9SJens Axboe 	if (src->rescue_workqueue)
163028e89fd9SJens Axboe 		flags |= BIOSET_NEED_RESCUER;
163128e89fd9SJens Axboe 
163228e89fd9SJens Axboe 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
163328e89fd9SJens Axboe }
163428e89fd9SJens Axboe EXPORT_SYMBOL(bioset_init_from_src);
163528e89fd9SJens Axboe 
1636de76fd89SChristoph Hellwig static int __init init_bio(void)
1637f9c78b2bSJens Axboe {
1638f9c78b2bSJens Axboe 	int i;
1639f9c78b2bSJens Axboe 
1640f9c78b2bSJens Axboe 	bio_integrity_init();
1641de76fd89SChristoph Hellwig 
1642de76fd89SChristoph Hellwig 	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1643f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + i;
1644f9c78b2bSJens Axboe 
1645de76fd89SChristoph Hellwig 		bvs->slab = kmem_cache_create(bvs->name,
1646de76fd89SChristoph Hellwig 				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1647f9c78b2bSJens Axboe 				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1648f9c78b2bSJens Axboe 	}
1649f9c78b2bSJens Axboe 
1650f4f8154aSKent Overstreet 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1651f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
1652f9c78b2bSJens Axboe 
1653f4f8154aSKent Overstreet 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1654f9c78b2bSJens Axboe 		panic("bio: can't create integrity pool\n");
1655f9c78b2bSJens Axboe 
1656f9c78b2bSJens Axboe 	return 0;
1657f9c78b2bSJens Axboe }
1658f9c78b2bSJens Axboe subsys_initcall(init_bio);
1659