xref: /openbmc/linux/block/bio.c (revision 8c16567d)
18c16567dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f9c78b2bSJens Axboe /*
3f9c78b2bSJens Axboe  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4f9c78b2bSJens Axboe  */
5f9c78b2bSJens Axboe #include <linux/mm.h>
6f9c78b2bSJens Axboe #include <linux/swap.h>
7f9c78b2bSJens Axboe #include <linux/bio.h>
8f9c78b2bSJens Axboe #include <linux/blkdev.h>
9f9c78b2bSJens Axboe #include <linux/uio.h>
10f9c78b2bSJens Axboe #include <linux/iocontext.h>
11f9c78b2bSJens Axboe #include <linux/slab.h>
12f9c78b2bSJens Axboe #include <linux/init.h>
13f9c78b2bSJens Axboe #include <linux/kernel.h>
14f9c78b2bSJens Axboe #include <linux/export.h>
15f9c78b2bSJens Axboe #include <linux/mempool.h>
16f9c78b2bSJens Axboe #include <linux/workqueue.h>
17f9c78b2bSJens Axboe #include <linux/cgroup.h>
1808e18eabSJosef Bacik #include <linux/blk-cgroup.h>
19f9c78b2bSJens Axboe 
20f9c78b2bSJens Axboe #include <trace/events/block.h>
219e234eeaSShaohua Li #include "blk.h"
2267b42d0bSJosef Bacik #include "blk-rq-qos.h"
23f9c78b2bSJens Axboe 
24f9c78b2bSJens Axboe /*
25f9c78b2bSJens Axboe  * Test patch to inline a certain number of bi_io_vec's inside the bio
26f9c78b2bSJens Axboe  * itself, to shrink a bio data allocation from two mempool calls to one
27f9c78b2bSJens Axboe  */
28f9c78b2bSJens Axboe #define BIO_INLINE_VECS		4
29f9c78b2bSJens Axboe 
30f9c78b2bSJens Axboe /*
31f9c78b2bSJens Axboe  * if you change this list, also change bvec_alloc or things will
32f9c78b2bSJens Axboe  * break badly! cannot be bigger than what you can fit into an
33f9c78b2bSJens Axboe  * unsigned short
34f9c78b2bSJens Axboe  */
35bd5c4facSMikulas Patocka #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
36ed996a52SChristoph Hellwig static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
37bd5c4facSMikulas Patocka 	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
38f9c78b2bSJens Axboe };
39f9c78b2bSJens Axboe #undef BV
40f9c78b2bSJens Axboe 
41f9c78b2bSJens Axboe /*
42f9c78b2bSJens Axboe  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
43f9c78b2bSJens Axboe  * IO code that does not need private memory pools.
44f9c78b2bSJens Axboe  */
45f4f8154aSKent Overstreet struct bio_set fs_bio_set;
46f9c78b2bSJens Axboe EXPORT_SYMBOL(fs_bio_set);
47f9c78b2bSJens Axboe 
48f9c78b2bSJens Axboe /*
49f9c78b2bSJens Axboe  * Our slab pool management
50f9c78b2bSJens Axboe  */
51f9c78b2bSJens Axboe struct bio_slab {
52f9c78b2bSJens Axboe 	struct kmem_cache *slab;
53f9c78b2bSJens Axboe 	unsigned int slab_ref;
54f9c78b2bSJens Axboe 	unsigned int slab_size;
55f9c78b2bSJens Axboe 	char name[8];
56f9c78b2bSJens Axboe };
57f9c78b2bSJens Axboe static DEFINE_MUTEX(bio_slab_lock);
58f9c78b2bSJens Axboe static struct bio_slab *bio_slabs;
59f9c78b2bSJens Axboe static unsigned int bio_slab_nr, bio_slab_max;
60f9c78b2bSJens Axboe 
61f9c78b2bSJens Axboe static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
62f9c78b2bSJens Axboe {
63f9c78b2bSJens Axboe 	unsigned int sz = sizeof(struct bio) + extra_size;
64f9c78b2bSJens Axboe 	struct kmem_cache *slab = NULL;
65f9c78b2bSJens Axboe 	struct bio_slab *bslab, *new_bio_slabs;
66f9c78b2bSJens Axboe 	unsigned int new_bio_slab_max;
67f9c78b2bSJens Axboe 	unsigned int i, entry = -1;
68f9c78b2bSJens Axboe 
69f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
70f9c78b2bSJens Axboe 
71f9c78b2bSJens Axboe 	i = 0;
72f9c78b2bSJens Axboe 	while (i < bio_slab_nr) {
73f9c78b2bSJens Axboe 		bslab = &bio_slabs[i];
74f9c78b2bSJens Axboe 
75f9c78b2bSJens Axboe 		if (!bslab->slab && entry == -1)
76f9c78b2bSJens Axboe 			entry = i;
77f9c78b2bSJens Axboe 		else if (bslab->slab_size == sz) {
78f9c78b2bSJens Axboe 			slab = bslab->slab;
79f9c78b2bSJens Axboe 			bslab->slab_ref++;
80f9c78b2bSJens Axboe 			break;
81f9c78b2bSJens Axboe 		}
82f9c78b2bSJens Axboe 		i++;
83f9c78b2bSJens Axboe 	}
84f9c78b2bSJens Axboe 
85f9c78b2bSJens Axboe 	if (slab)
86f9c78b2bSJens Axboe 		goto out_unlock;
87f9c78b2bSJens Axboe 
88f9c78b2bSJens Axboe 	if (bio_slab_nr == bio_slab_max && entry == -1) {
89f9c78b2bSJens Axboe 		new_bio_slab_max = bio_slab_max << 1;
90f9c78b2bSJens Axboe 		new_bio_slabs = krealloc(bio_slabs,
91f9c78b2bSJens Axboe 					 new_bio_slab_max * sizeof(struct bio_slab),
92f9c78b2bSJens Axboe 					 GFP_KERNEL);
93f9c78b2bSJens Axboe 		if (!new_bio_slabs)
94f9c78b2bSJens Axboe 			goto out_unlock;
95f9c78b2bSJens Axboe 		bio_slab_max = new_bio_slab_max;
96f9c78b2bSJens Axboe 		bio_slabs = new_bio_slabs;
97f9c78b2bSJens Axboe 	}
98f9c78b2bSJens Axboe 	if (entry == -1)
99f9c78b2bSJens Axboe 		entry = bio_slab_nr++;
100f9c78b2bSJens Axboe 
101f9c78b2bSJens Axboe 	bslab = &bio_slabs[entry];
102f9c78b2bSJens Axboe 
103f9c78b2bSJens Axboe 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
1046a241483SMikulas Patocka 	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
1056a241483SMikulas Patocka 				 SLAB_HWCACHE_ALIGN, NULL);
106f9c78b2bSJens Axboe 	if (!slab)
107f9c78b2bSJens Axboe 		goto out_unlock;
108f9c78b2bSJens Axboe 
109f9c78b2bSJens Axboe 	bslab->slab = slab;
110f9c78b2bSJens Axboe 	bslab->slab_ref = 1;
111f9c78b2bSJens Axboe 	bslab->slab_size = sz;
112f9c78b2bSJens Axboe out_unlock:
113f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
114f9c78b2bSJens Axboe 	return slab;
115f9c78b2bSJens Axboe }
116f9c78b2bSJens Axboe 
117f9c78b2bSJens Axboe static void bio_put_slab(struct bio_set *bs)
118f9c78b2bSJens Axboe {
119f9c78b2bSJens Axboe 	struct bio_slab *bslab = NULL;
120f9c78b2bSJens Axboe 	unsigned int i;
121f9c78b2bSJens Axboe 
122f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
123f9c78b2bSJens Axboe 
124f9c78b2bSJens Axboe 	for (i = 0; i < bio_slab_nr; i++) {
125f9c78b2bSJens Axboe 		if (bs->bio_slab == bio_slabs[i].slab) {
126f9c78b2bSJens Axboe 			bslab = &bio_slabs[i];
127f9c78b2bSJens Axboe 			break;
128f9c78b2bSJens Axboe 		}
129f9c78b2bSJens Axboe 	}
130f9c78b2bSJens Axboe 
131f9c78b2bSJens Axboe 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
132f9c78b2bSJens Axboe 		goto out;
133f9c78b2bSJens Axboe 
134f9c78b2bSJens Axboe 	WARN_ON(!bslab->slab_ref);
135f9c78b2bSJens Axboe 
136f9c78b2bSJens Axboe 	if (--bslab->slab_ref)
137f9c78b2bSJens Axboe 		goto out;
138f9c78b2bSJens Axboe 
139f9c78b2bSJens Axboe 	kmem_cache_destroy(bslab->slab);
140f9c78b2bSJens Axboe 	bslab->slab = NULL;
141f9c78b2bSJens Axboe 
142f9c78b2bSJens Axboe out:
143f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
144f9c78b2bSJens Axboe }
145f9c78b2bSJens Axboe 
146f9c78b2bSJens Axboe unsigned int bvec_nr_vecs(unsigned short idx)
147f9c78b2bSJens Axboe {
148d6c02a9bSGreg Edwards 	return bvec_slabs[--idx].nr_vecs;
149f9c78b2bSJens Axboe }
150f9c78b2bSJens Axboe 
151f9c78b2bSJens Axboe void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
152f9c78b2bSJens Axboe {
153ed996a52SChristoph Hellwig 	if (!idx)
154ed996a52SChristoph Hellwig 		return;
155ed996a52SChristoph Hellwig 	idx--;
156f9c78b2bSJens Axboe 
157ed996a52SChristoph Hellwig 	BIO_BUG_ON(idx >= BVEC_POOL_NR);
158ed996a52SChristoph Hellwig 
159ed996a52SChristoph Hellwig 	if (idx == BVEC_POOL_MAX) {
160f9c78b2bSJens Axboe 		mempool_free(bv, pool);
161ed996a52SChristoph Hellwig 	} else {
162f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + idx;
163f9c78b2bSJens Axboe 
164f9c78b2bSJens Axboe 		kmem_cache_free(bvs->slab, bv);
165f9c78b2bSJens Axboe 	}
166f9c78b2bSJens Axboe }
167f9c78b2bSJens Axboe 
168f9c78b2bSJens Axboe struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
169f9c78b2bSJens Axboe 			   mempool_t *pool)
170f9c78b2bSJens Axboe {
171f9c78b2bSJens Axboe 	struct bio_vec *bvl;
172f9c78b2bSJens Axboe 
173f9c78b2bSJens Axboe 	/*
174f9c78b2bSJens Axboe 	 * see comment near bvec_array define!
175f9c78b2bSJens Axboe 	 */
176f9c78b2bSJens Axboe 	switch (nr) {
177f9c78b2bSJens Axboe 	case 1:
178f9c78b2bSJens Axboe 		*idx = 0;
179f9c78b2bSJens Axboe 		break;
180f9c78b2bSJens Axboe 	case 2 ... 4:
181f9c78b2bSJens Axboe 		*idx = 1;
182f9c78b2bSJens Axboe 		break;
183f9c78b2bSJens Axboe 	case 5 ... 16:
184f9c78b2bSJens Axboe 		*idx = 2;
185f9c78b2bSJens Axboe 		break;
186f9c78b2bSJens Axboe 	case 17 ... 64:
187f9c78b2bSJens Axboe 		*idx = 3;
188f9c78b2bSJens Axboe 		break;
189f9c78b2bSJens Axboe 	case 65 ... 128:
190f9c78b2bSJens Axboe 		*idx = 4;
191f9c78b2bSJens Axboe 		break;
192f9c78b2bSJens Axboe 	case 129 ... BIO_MAX_PAGES:
193f9c78b2bSJens Axboe 		*idx = 5;
194f9c78b2bSJens Axboe 		break;
195f9c78b2bSJens Axboe 	default:
196f9c78b2bSJens Axboe 		return NULL;
197f9c78b2bSJens Axboe 	}
198f9c78b2bSJens Axboe 
199f9c78b2bSJens Axboe 	/*
200f9c78b2bSJens Axboe 	 * idx now points to the pool we want to allocate from. only the
201f9c78b2bSJens Axboe 	 * 1-vec entry pool is mempool backed.
202f9c78b2bSJens Axboe 	 */
203ed996a52SChristoph Hellwig 	if (*idx == BVEC_POOL_MAX) {
204f9c78b2bSJens Axboe fallback:
205f9c78b2bSJens Axboe 		bvl = mempool_alloc(pool, gfp_mask);
206f9c78b2bSJens Axboe 	} else {
207f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + *idx;
208d0164adcSMel Gorman 		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
209f9c78b2bSJens Axboe 
210f9c78b2bSJens Axboe 		/*
211f9c78b2bSJens Axboe 		 * Make this allocation restricted and don't dump info on
212f9c78b2bSJens Axboe 		 * allocation failures, since we'll fallback to the mempool
213f9c78b2bSJens Axboe 		 * in case of failure.
214f9c78b2bSJens Axboe 		 */
215f9c78b2bSJens Axboe 		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
216f9c78b2bSJens Axboe 
217f9c78b2bSJens Axboe 		/*
218d0164adcSMel Gorman 		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
219f9c78b2bSJens Axboe 		 * is set, retry with the 1-entry mempool
220f9c78b2bSJens Axboe 		 */
221f9c78b2bSJens Axboe 		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
222d0164adcSMel Gorman 		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
223ed996a52SChristoph Hellwig 			*idx = BVEC_POOL_MAX;
224f9c78b2bSJens Axboe 			goto fallback;
225f9c78b2bSJens Axboe 		}
226f9c78b2bSJens Axboe 	}
227f9c78b2bSJens Axboe 
228ed996a52SChristoph Hellwig 	(*idx)++;
229f9c78b2bSJens Axboe 	return bvl;
230f9c78b2bSJens Axboe }
231f9c78b2bSJens Axboe 
2329ae3b3f5SJens Axboe void bio_uninit(struct bio *bio)
233f9c78b2bSJens Axboe {
2346f70fb66SDennis Zhou 	bio_disassociate_blkg(bio);
235f9c78b2bSJens Axboe }
2369ae3b3f5SJens Axboe EXPORT_SYMBOL(bio_uninit);
237f9c78b2bSJens Axboe 
238f9c78b2bSJens Axboe static void bio_free(struct bio *bio)
239f9c78b2bSJens Axboe {
240f9c78b2bSJens Axboe 	struct bio_set *bs = bio->bi_pool;
241f9c78b2bSJens Axboe 	void *p;
242f9c78b2bSJens Axboe 
2439ae3b3f5SJens Axboe 	bio_uninit(bio);
244f9c78b2bSJens Axboe 
245f9c78b2bSJens Axboe 	if (bs) {
2468aa6ba2fSKent Overstreet 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
247f9c78b2bSJens Axboe 
248f9c78b2bSJens Axboe 		/*
249f9c78b2bSJens Axboe 		 * If we have front padding, adjust the bio pointer before freeing
250f9c78b2bSJens Axboe 		 */
251f9c78b2bSJens Axboe 		p = bio;
252f9c78b2bSJens Axboe 		p -= bs->front_pad;
253f9c78b2bSJens Axboe 
2548aa6ba2fSKent Overstreet 		mempool_free(p, &bs->bio_pool);
255f9c78b2bSJens Axboe 	} else {
256f9c78b2bSJens Axboe 		/* Bio was allocated by bio_kmalloc() */
257f9c78b2bSJens Axboe 		kfree(bio);
258f9c78b2bSJens Axboe 	}
259f9c78b2bSJens Axboe }
260f9c78b2bSJens Axboe 
2619ae3b3f5SJens Axboe /*
2629ae3b3f5SJens Axboe  * Users of this function have their own bio allocation. Subsequently,
2639ae3b3f5SJens Axboe  * they must remember to pair any call to bio_init() with bio_uninit()
2649ae3b3f5SJens Axboe  * when IO has completed, or when the bio is released.
2659ae3b3f5SJens Axboe  */
2663a83f467SMing Lei void bio_init(struct bio *bio, struct bio_vec *table,
2673a83f467SMing Lei 	      unsigned short max_vecs)
268f9c78b2bSJens Axboe {
269f9c78b2bSJens Axboe 	memset(bio, 0, sizeof(*bio));
270c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
271dac56212SJens Axboe 	atomic_set(&bio->__bi_cnt, 1);
2723a83f467SMing Lei 
2733a83f467SMing Lei 	bio->bi_io_vec = table;
2743a83f467SMing Lei 	bio->bi_max_vecs = max_vecs;
275f9c78b2bSJens Axboe }
276f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_init);
277f9c78b2bSJens Axboe 
278f9c78b2bSJens Axboe /**
279f9c78b2bSJens Axboe  * bio_reset - reinitialize a bio
280f9c78b2bSJens Axboe  * @bio:	bio to reset
281f9c78b2bSJens Axboe  *
282f9c78b2bSJens Axboe  * Description:
283f9c78b2bSJens Axboe  *   After calling bio_reset(), @bio will be in the same state as a freshly
284f9c78b2bSJens Axboe  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
285f9c78b2bSJens Axboe  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
286f9c78b2bSJens Axboe  *   comment in struct bio.
287f9c78b2bSJens Axboe  */
288f9c78b2bSJens Axboe void bio_reset(struct bio *bio)
289f9c78b2bSJens Axboe {
290f9c78b2bSJens Axboe 	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
291f9c78b2bSJens Axboe 
2929ae3b3f5SJens Axboe 	bio_uninit(bio);
293f9c78b2bSJens Axboe 
294f9c78b2bSJens Axboe 	memset(bio, 0, BIO_RESET_BYTES);
2954246a0b6SChristoph Hellwig 	bio->bi_flags = flags;
296c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
297f9c78b2bSJens Axboe }
298f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_reset);
299f9c78b2bSJens Axboe 
30038f8baaeSChristoph Hellwig static struct bio *__bio_chain_endio(struct bio *bio)
301f9c78b2bSJens Axboe {
3024246a0b6SChristoph Hellwig 	struct bio *parent = bio->bi_private;
3034246a0b6SChristoph Hellwig 
3044e4cbee9SChristoph Hellwig 	if (!parent->bi_status)
3054e4cbee9SChristoph Hellwig 		parent->bi_status = bio->bi_status;
306f9c78b2bSJens Axboe 	bio_put(bio);
30738f8baaeSChristoph Hellwig 	return parent;
30838f8baaeSChristoph Hellwig }
30938f8baaeSChristoph Hellwig 
31038f8baaeSChristoph Hellwig static void bio_chain_endio(struct bio *bio)
31138f8baaeSChristoph Hellwig {
31238f8baaeSChristoph Hellwig 	bio_endio(__bio_chain_endio(bio));
313f9c78b2bSJens Axboe }
314f9c78b2bSJens Axboe 
315f9c78b2bSJens Axboe /**
316f9c78b2bSJens Axboe  * bio_chain - chain bio completions
317f9c78b2bSJens Axboe  * @bio: the target bio
318f9c78b2bSJens Axboe  * @parent: the @bio's parent bio
319f9c78b2bSJens Axboe  *
320f9c78b2bSJens Axboe  * The caller won't have a bi_end_io called when @bio completes - instead,
321f9c78b2bSJens Axboe  * @parent's bi_end_io won't be called until both @parent and @bio have
322f9c78b2bSJens Axboe  * completed; the chained bio will also be freed when it completes.
323f9c78b2bSJens Axboe  *
324f9c78b2bSJens Axboe  * The caller must not set bi_private or bi_end_io in @bio.
325f9c78b2bSJens Axboe  */
326f9c78b2bSJens Axboe void bio_chain(struct bio *bio, struct bio *parent)
327f9c78b2bSJens Axboe {
328f9c78b2bSJens Axboe 	BUG_ON(bio->bi_private || bio->bi_end_io);
329f9c78b2bSJens Axboe 
330f9c78b2bSJens Axboe 	bio->bi_private = parent;
331f9c78b2bSJens Axboe 	bio->bi_end_io	= bio_chain_endio;
332c4cf5261SJens Axboe 	bio_inc_remaining(parent);
333f9c78b2bSJens Axboe }
334f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_chain);
335f9c78b2bSJens Axboe 
336f9c78b2bSJens Axboe static void bio_alloc_rescue(struct work_struct *work)
337f9c78b2bSJens Axboe {
338f9c78b2bSJens Axboe 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
339f9c78b2bSJens Axboe 	struct bio *bio;
340f9c78b2bSJens Axboe 
341f9c78b2bSJens Axboe 	while (1) {
342f9c78b2bSJens Axboe 		spin_lock(&bs->rescue_lock);
343f9c78b2bSJens Axboe 		bio = bio_list_pop(&bs->rescue_list);
344f9c78b2bSJens Axboe 		spin_unlock(&bs->rescue_lock);
345f9c78b2bSJens Axboe 
346f9c78b2bSJens Axboe 		if (!bio)
347f9c78b2bSJens Axboe 			break;
348f9c78b2bSJens Axboe 
349f9c78b2bSJens Axboe 		generic_make_request(bio);
350f9c78b2bSJens Axboe 	}
351f9c78b2bSJens Axboe }
352f9c78b2bSJens Axboe 
353f9c78b2bSJens Axboe static void punt_bios_to_rescuer(struct bio_set *bs)
354f9c78b2bSJens Axboe {
355f9c78b2bSJens Axboe 	struct bio_list punt, nopunt;
356f9c78b2bSJens Axboe 	struct bio *bio;
357f9c78b2bSJens Axboe 
35847e0fb46SNeilBrown 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
35947e0fb46SNeilBrown 		return;
360f9c78b2bSJens Axboe 	/*
361f9c78b2bSJens Axboe 	 * In order to guarantee forward progress we must punt only bios that
362f9c78b2bSJens Axboe 	 * were allocated from this bio_set; otherwise, if there was a bio on
363f9c78b2bSJens Axboe 	 * there for a stacking driver higher up in the stack, processing it
364f9c78b2bSJens Axboe 	 * could require allocating bios from this bio_set, and doing that from
365f9c78b2bSJens Axboe 	 * our own rescuer would be bad.
366f9c78b2bSJens Axboe 	 *
367f9c78b2bSJens Axboe 	 * Since bio lists are singly linked, pop them all instead of trying to
368f9c78b2bSJens Axboe 	 * remove from the middle of the list:
369f9c78b2bSJens Axboe 	 */
370f9c78b2bSJens Axboe 
371f9c78b2bSJens Axboe 	bio_list_init(&punt);
372f9c78b2bSJens Axboe 	bio_list_init(&nopunt);
373f9c78b2bSJens Axboe 
374f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[0])))
375f9c78b2bSJens Axboe 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
376f5fe1b51SNeilBrown 	current->bio_list[0] = nopunt;
377f9c78b2bSJens Axboe 
378f5fe1b51SNeilBrown 	bio_list_init(&nopunt);
379f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[1])))
380f5fe1b51SNeilBrown 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
381f5fe1b51SNeilBrown 	current->bio_list[1] = nopunt;
382f9c78b2bSJens Axboe 
383f9c78b2bSJens Axboe 	spin_lock(&bs->rescue_lock);
384f9c78b2bSJens Axboe 	bio_list_merge(&bs->rescue_list, &punt);
385f9c78b2bSJens Axboe 	spin_unlock(&bs->rescue_lock);
386f9c78b2bSJens Axboe 
387f9c78b2bSJens Axboe 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
388f9c78b2bSJens Axboe }
389f9c78b2bSJens Axboe 
390f9c78b2bSJens Axboe /**
391f9c78b2bSJens Axboe  * bio_alloc_bioset - allocate a bio for I/O
392519c8e9fSRandy Dunlap  * @gfp_mask:   the GFP_* mask given to the slab allocator
393f9c78b2bSJens Axboe  * @nr_iovecs:	number of iovecs to pre-allocate
394f9c78b2bSJens Axboe  * @bs:		the bio_set to allocate from.
395f9c78b2bSJens Axboe  *
396f9c78b2bSJens Axboe  * Description:
397f9c78b2bSJens Axboe  *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
398f9c78b2bSJens Axboe  *   backed by the @bs's mempool.
399f9c78b2bSJens Axboe  *
400d0164adcSMel Gorman  *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
401d0164adcSMel Gorman  *   always be able to allocate a bio. This is due to the mempool guarantees.
402d0164adcSMel Gorman  *   To make this work, callers must never allocate more than 1 bio at a time
403d0164adcSMel Gorman  *   from this pool. Callers that need to allocate more than 1 bio must always
404d0164adcSMel Gorman  *   submit the previously allocated bio for IO before attempting to allocate
405d0164adcSMel Gorman  *   a new one. Failure to do so can cause deadlocks under memory pressure.
406f9c78b2bSJens Axboe  *
407f9c78b2bSJens Axboe  *   Note that when running under generic_make_request() (i.e. any block
408f9c78b2bSJens Axboe  *   driver), bios are not submitted until after you return - see the code in
409f9c78b2bSJens Axboe  *   generic_make_request() that converts recursion into iteration, to prevent
410f9c78b2bSJens Axboe  *   stack overflows.
411f9c78b2bSJens Axboe  *
412f9c78b2bSJens Axboe  *   This would normally mean allocating multiple bios under
413f9c78b2bSJens Axboe  *   generic_make_request() would be susceptible to deadlocks, but we have
414f9c78b2bSJens Axboe  *   deadlock avoidance code that resubmits any blocked bios from a rescuer
415f9c78b2bSJens Axboe  *   thread.
416f9c78b2bSJens Axboe  *
417f9c78b2bSJens Axboe  *   However, we do not guarantee forward progress for allocations from other
418f9c78b2bSJens Axboe  *   mempools. Doing multiple allocations from the same mempool under
419f9c78b2bSJens Axboe  *   generic_make_request() should be avoided - instead, use bio_set's front_pad
420f9c78b2bSJens Axboe  *   for per bio allocations.
421f9c78b2bSJens Axboe  *
422f9c78b2bSJens Axboe  *   RETURNS:
423f9c78b2bSJens Axboe  *   Pointer to new bio on success, NULL on failure.
424f9c78b2bSJens Axboe  */
4257a88fa19SDan Carpenter struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
4267a88fa19SDan Carpenter 			     struct bio_set *bs)
427f9c78b2bSJens Axboe {
428f9c78b2bSJens Axboe 	gfp_t saved_gfp = gfp_mask;
429f9c78b2bSJens Axboe 	unsigned front_pad;
430f9c78b2bSJens Axboe 	unsigned inline_vecs;
431f9c78b2bSJens Axboe 	struct bio_vec *bvl = NULL;
432f9c78b2bSJens Axboe 	struct bio *bio;
433f9c78b2bSJens Axboe 	void *p;
434f9c78b2bSJens Axboe 
435f9c78b2bSJens Axboe 	if (!bs) {
436f9c78b2bSJens Axboe 		if (nr_iovecs > UIO_MAXIOV)
437f9c78b2bSJens Axboe 			return NULL;
438f9c78b2bSJens Axboe 
439f9c78b2bSJens Axboe 		p = kmalloc(sizeof(struct bio) +
440f9c78b2bSJens Axboe 			    nr_iovecs * sizeof(struct bio_vec),
441f9c78b2bSJens Axboe 			    gfp_mask);
442f9c78b2bSJens Axboe 		front_pad = 0;
443f9c78b2bSJens Axboe 		inline_vecs = nr_iovecs;
444f9c78b2bSJens Axboe 	} else {
445d8f429e1SJunichi Nomura 		/* should not use nobvec bioset for nr_iovecs > 0 */
4468aa6ba2fSKent Overstreet 		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
4478aa6ba2fSKent Overstreet 				 nr_iovecs > 0))
448d8f429e1SJunichi Nomura 			return NULL;
449f9c78b2bSJens Axboe 		/*
450f9c78b2bSJens Axboe 		 * generic_make_request() converts recursion to iteration; this
451f9c78b2bSJens Axboe 		 * means if we're running beneath it, any bios we allocate and
452f9c78b2bSJens Axboe 		 * submit will not be submitted (and thus freed) until after we
453f9c78b2bSJens Axboe 		 * return.
454f9c78b2bSJens Axboe 		 *
455f9c78b2bSJens Axboe 		 * This exposes us to a potential deadlock if we allocate
456f9c78b2bSJens Axboe 		 * multiple bios from the same bio_set() while running
457f9c78b2bSJens Axboe 		 * underneath generic_make_request(). If we were to allocate
458f9c78b2bSJens Axboe 		 * multiple bios (say a stacking block driver that was splitting
459f9c78b2bSJens Axboe 		 * bios), we would deadlock if we exhausted the mempool's
460f9c78b2bSJens Axboe 		 * reserve.
461f9c78b2bSJens Axboe 		 *
462f9c78b2bSJens Axboe 		 * We solve this, and guarantee forward progress, with a rescuer
463f9c78b2bSJens Axboe 		 * workqueue per bio_set. If we go to allocate and there are
464f9c78b2bSJens Axboe 		 * bios on current->bio_list, we first try the allocation
465d0164adcSMel Gorman 		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
466d0164adcSMel Gorman 		 * bios we would be blocking to the rescuer workqueue before
467d0164adcSMel Gorman 		 * we retry with the original gfp_flags.
468f9c78b2bSJens Axboe 		 */
469f9c78b2bSJens Axboe 
470f5fe1b51SNeilBrown 		if (current->bio_list &&
471f5fe1b51SNeilBrown 		    (!bio_list_empty(&current->bio_list[0]) ||
47247e0fb46SNeilBrown 		     !bio_list_empty(&current->bio_list[1])) &&
47347e0fb46SNeilBrown 		    bs->rescue_workqueue)
474d0164adcSMel Gorman 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
475f9c78b2bSJens Axboe 
4768aa6ba2fSKent Overstreet 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
477f9c78b2bSJens Axboe 		if (!p && gfp_mask != saved_gfp) {
478f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
479f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
4808aa6ba2fSKent Overstreet 			p = mempool_alloc(&bs->bio_pool, gfp_mask);
481f9c78b2bSJens Axboe 		}
482f9c78b2bSJens Axboe 
483f9c78b2bSJens Axboe 		front_pad = bs->front_pad;
484f9c78b2bSJens Axboe 		inline_vecs = BIO_INLINE_VECS;
485f9c78b2bSJens Axboe 	}
486f9c78b2bSJens Axboe 
487f9c78b2bSJens Axboe 	if (unlikely(!p))
488f9c78b2bSJens Axboe 		return NULL;
489f9c78b2bSJens Axboe 
490f9c78b2bSJens Axboe 	bio = p + front_pad;
4913a83f467SMing Lei 	bio_init(bio, NULL, 0);
492f9c78b2bSJens Axboe 
493f9c78b2bSJens Axboe 	if (nr_iovecs > inline_vecs) {
494ed996a52SChristoph Hellwig 		unsigned long idx = 0;
495ed996a52SChristoph Hellwig 
4968aa6ba2fSKent Overstreet 		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
497f9c78b2bSJens Axboe 		if (!bvl && gfp_mask != saved_gfp) {
498f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
499f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
5008aa6ba2fSKent Overstreet 			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
501f9c78b2bSJens Axboe 		}
502f9c78b2bSJens Axboe 
503f9c78b2bSJens Axboe 		if (unlikely(!bvl))
504f9c78b2bSJens Axboe 			goto err_free;
505f9c78b2bSJens Axboe 
506ed996a52SChristoph Hellwig 		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
507f9c78b2bSJens Axboe 	} else if (nr_iovecs) {
508f9c78b2bSJens Axboe 		bvl = bio->bi_inline_vecs;
509f9c78b2bSJens Axboe 	}
510f9c78b2bSJens Axboe 
511f9c78b2bSJens Axboe 	bio->bi_pool = bs;
512f9c78b2bSJens Axboe 	bio->bi_max_vecs = nr_iovecs;
513f9c78b2bSJens Axboe 	bio->bi_io_vec = bvl;
514f9c78b2bSJens Axboe 	return bio;
515f9c78b2bSJens Axboe 
516f9c78b2bSJens Axboe err_free:
5178aa6ba2fSKent Overstreet 	mempool_free(p, &bs->bio_pool);
518f9c78b2bSJens Axboe 	return NULL;
519f9c78b2bSJens Axboe }
520f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_alloc_bioset);
521f9c78b2bSJens Axboe 
52238a72dacSKent Overstreet void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
523f9c78b2bSJens Axboe {
524f9c78b2bSJens Axboe 	unsigned long flags;
525f9c78b2bSJens Axboe 	struct bio_vec bv;
526f9c78b2bSJens Axboe 	struct bvec_iter iter;
527f9c78b2bSJens Axboe 
52838a72dacSKent Overstreet 	__bio_for_each_segment(bv, bio, iter, start) {
529f9c78b2bSJens Axboe 		char *data = bvec_kmap_irq(&bv, &flags);
530f9c78b2bSJens Axboe 		memset(data, 0, bv.bv_len);
531f9c78b2bSJens Axboe 		flush_dcache_page(bv.bv_page);
532f9c78b2bSJens Axboe 		bvec_kunmap_irq(data, &flags);
533f9c78b2bSJens Axboe 	}
534f9c78b2bSJens Axboe }
53538a72dacSKent Overstreet EXPORT_SYMBOL(zero_fill_bio_iter);
536f9c78b2bSJens Axboe 
537f9c78b2bSJens Axboe /**
538f9c78b2bSJens Axboe  * bio_put - release a reference to a bio
539f9c78b2bSJens Axboe  * @bio:   bio to release reference to
540f9c78b2bSJens Axboe  *
541f9c78b2bSJens Axboe  * Description:
542f9c78b2bSJens Axboe  *   Put a reference to a &struct bio, either one you have gotten with
5439b10f6a9SNeilBrown  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
544f9c78b2bSJens Axboe  **/
545f9c78b2bSJens Axboe void bio_put(struct bio *bio)
546f9c78b2bSJens Axboe {
547dac56212SJens Axboe 	if (!bio_flagged(bio, BIO_REFFED))
548dac56212SJens Axboe 		bio_free(bio);
549dac56212SJens Axboe 	else {
550dac56212SJens Axboe 		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
551f9c78b2bSJens Axboe 
552f9c78b2bSJens Axboe 		/*
553f9c78b2bSJens Axboe 		 * last put frees it
554f9c78b2bSJens Axboe 		 */
555dac56212SJens Axboe 		if (atomic_dec_and_test(&bio->__bi_cnt))
556f9c78b2bSJens Axboe 			bio_free(bio);
557f9c78b2bSJens Axboe 	}
558dac56212SJens Axboe }
559f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_put);
560f9c78b2bSJens Axboe 
5616c210aa5SChristoph Hellwig int bio_phys_segments(struct request_queue *q, struct bio *bio)
562f9c78b2bSJens Axboe {
563f9c78b2bSJens Axboe 	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
564f9c78b2bSJens Axboe 		blk_recount_segments(q, bio);
565f9c78b2bSJens Axboe 
566f9c78b2bSJens Axboe 	return bio->bi_phys_segments;
567f9c78b2bSJens Axboe }
568f9c78b2bSJens Axboe 
569f9c78b2bSJens Axboe /**
570f9c78b2bSJens Axboe  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
571f9c78b2bSJens Axboe  * 	@bio: destination bio
572f9c78b2bSJens Axboe  * 	@bio_src: bio to clone
573f9c78b2bSJens Axboe  *
574f9c78b2bSJens Axboe  *	Clone a &bio. Caller will own the returned bio, but not
575f9c78b2bSJens Axboe  *	the actual data it points to. Reference count of returned
576f9c78b2bSJens Axboe  * 	bio will be one.
577f9c78b2bSJens Axboe  *
578f9c78b2bSJens Axboe  * 	Caller must ensure that @bio_src is not freed before @bio.
579f9c78b2bSJens Axboe  */
580f9c78b2bSJens Axboe void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
581f9c78b2bSJens Axboe {
582ed996a52SChristoph Hellwig 	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
583f9c78b2bSJens Axboe 
584f9c78b2bSJens Axboe 	/*
58574d46992SChristoph Hellwig 	 * most users will be overriding ->bi_disk with a new target,
586f9c78b2bSJens Axboe 	 * so we don't set nor calculate new physical/hw segment counts here
587f9c78b2bSJens Axboe 	 */
58874d46992SChristoph Hellwig 	bio->bi_disk = bio_src->bi_disk;
58962530ed8SMichael Lyle 	bio->bi_partno = bio_src->bi_partno;
590b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_CLONED);
591111be883SShaohua Li 	if (bio_flagged(bio_src, BIO_THROTTLED))
592111be883SShaohua Li 		bio_set_flag(bio, BIO_THROTTLED);
5931eff9d32SJens Axboe 	bio->bi_opf = bio_src->bi_opf;
594ca474b73SHannes Reinecke 	bio->bi_ioprio = bio_src->bi_ioprio;
595cb6934f8SJens Axboe 	bio->bi_write_hint = bio_src->bi_write_hint;
596f9c78b2bSJens Axboe 	bio->bi_iter = bio_src->bi_iter;
597f9c78b2bSJens Axboe 	bio->bi_io_vec = bio_src->bi_io_vec;
59820bd723eSPaolo Valente 
599db6638d7SDennis Zhou 	bio_clone_blkg_association(bio, bio_src);
600e439bedfSDennis Zhou 	blkcg_bio_issue_init(bio);
601f9c78b2bSJens Axboe }
602f9c78b2bSJens Axboe EXPORT_SYMBOL(__bio_clone_fast);
603f9c78b2bSJens Axboe 
604f9c78b2bSJens Axboe /**
605f9c78b2bSJens Axboe  *	bio_clone_fast - clone a bio that shares the original bio's biovec
606f9c78b2bSJens Axboe  *	@bio: bio to clone
607f9c78b2bSJens Axboe  *	@gfp_mask: allocation priority
608f9c78b2bSJens Axboe  *	@bs: bio_set to allocate from
609f9c78b2bSJens Axboe  *
610f9c78b2bSJens Axboe  * 	Like __bio_clone_fast, only also allocates the returned bio
611f9c78b2bSJens Axboe  */
612f9c78b2bSJens Axboe struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
613f9c78b2bSJens Axboe {
614f9c78b2bSJens Axboe 	struct bio *b;
615f9c78b2bSJens Axboe 
616f9c78b2bSJens Axboe 	b = bio_alloc_bioset(gfp_mask, 0, bs);
617f9c78b2bSJens Axboe 	if (!b)
618f9c78b2bSJens Axboe 		return NULL;
619f9c78b2bSJens Axboe 
620f9c78b2bSJens Axboe 	__bio_clone_fast(b, bio);
621f9c78b2bSJens Axboe 
622f9c78b2bSJens Axboe 	if (bio_integrity(bio)) {
623f9c78b2bSJens Axboe 		int ret;
624f9c78b2bSJens Axboe 
625f9c78b2bSJens Axboe 		ret = bio_integrity_clone(b, bio, gfp_mask);
626f9c78b2bSJens Axboe 
627f9c78b2bSJens Axboe 		if (ret < 0) {
628f9c78b2bSJens Axboe 			bio_put(b);
629f9c78b2bSJens Axboe 			return NULL;
630f9c78b2bSJens Axboe 		}
631f9c78b2bSJens Axboe 	}
632f9c78b2bSJens Axboe 
633f9c78b2bSJens Axboe 	return b;
634f9c78b2bSJens Axboe }
635f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_clone_fast);
636f9c78b2bSJens Axboe 
6375919482eSMing Lei static inline bool page_is_mergeable(const struct bio_vec *bv,
6385919482eSMing Lei 		struct page *page, unsigned int len, unsigned int off,
6395919482eSMing Lei 		bool same_page)
6405919482eSMing Lei {
6415919482eSMing Lei 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
6425919482eSMing Lei 		bv->bv_offset + bv->bv_len - 1;
6435919482eSMing Lei 	phys_addr_t page_addr = page_to_phys(page);
6445919482eSMing Lei 
6455919482eSMing Lei 	if (vec_end_addr + 1 != page_addr + off)
6465919482eSMing Lei 		return false;
6475919482eSMing Lei 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
6485919482eSMing Lei 		return false;
64952d52d1cSChristoph Hellwig 
65052d52d1cSChristoph Hellwig 	if ((vec_end_addr & PAGE_MASK) != page_addr) {
65152d52d1cSChristoph Hellwig 		if (same_page)
6525919482eSMing Lei 			return false;
65352d52d1cSChristoph Hellwig 		if (pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
65452d52d1cSChristoph Hellwig 			return false;
65552d52d1cSChristoph Hellwig 	}
6565919482eSMing Lei 
657551879a4SMing Lei 	WARN_ON_ONCE(same_page && (len + off) > PAGE_SIZE);
658551879a4SMing Lei 
6595919482eSMing Lei 	return true;
6605919482eSMing Lei }
6615919482eSMing Lei 
662489fbbcbSMing Lei /*
663489fbbcbSMing Lei  * Check if the @page can be added to the current segment(@bv), and make
664489fbbcbSMing Lei  * sure to call it only if page_is_mergeable(@bv, @page) is true
665489fbbcbSMing Lei  */
666489fbbcbSMing Lei static bool can_add_page_to_seg(struct request_queue *q,
667489fbbcbSMing Lei 		struct bio_vec *bv, struct page *page, unsigned len,
668489fbbcbSMing Lei 		unsigned offset)
669489fbbcbSMing Lei {
670489fbbcbSMing Lei 	unsigned long mask = queue_segment_boundary(q);
671489fbbcbSMing Lei 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
672489fbbcbSMing Lei 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
673489fbbcbSMing Lei 
674489fbbcbSMing Lei 	if ((addr1 | mask) != (addr2 | mask))
675489fbbcbSMing Lei 		return false;
676489fbbcbSMing Lei 
677489fbbcbSMing Lei 	if (bv->bv_len + len > queue_max_segment_size(q))
678489fbbcbSMing Lei 		return false;
679489fbbcbSMing Lei 
680489fbbcbSMing Lei 	return true;
681489fbbcbSMing Lei }
682489fbbcbSMing Lei 
683f4595875SShaohua Li /**
68419047087SMing Lei  *	__bio_add_pc_page	- attempt to add page to passthrough bio
685c66a14d0SKent Overstreet  *	@q: the target queue
686c66a14d0SKent Overstreet  *	@bio: destination bio
687c66a14d0SKent Overstreet  *	@page: page to add
688c66a14d0SKent Overstreet  *	@len: vec entry length
689c66a14d0SKent Overstreet  *	@offset: vec entry offset
69019047087SMing Lei  *	@put_same_page: put the page if it is same with last added page
691f9c78b2bSJens Axboe  *
692c66a14d0SKent Overstreet  *	Attempt to add a page to the bio_vec maplist. This can fail for a
693c66a14d0SKent Overstreet  *	number of reasons, such as the bio being full or target block device
694c66a14d0SKent Overstreet  *	limitations. The target block device must allow bio's up to PAGE_SIZE,
695c66a14d0SKent Overstreet  *	so it is always possible to add a single page to an empty bio.
696c66a14d0SKent Overstreet  *
6975a8ce240SMing Lei  *	This should only be used by passthrough bios.
698f9c78b2bSJens Axboe  */
6994713839dSChristoph Hellwig static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
70019047087SMing Lei 		struct page *page, unsigned int len, unsigned int offset,
70119047087SMing Lei 		bool put_same_page)
702f9c78b2bSJens Axboe {
703f9c78b2bSJens Axboe 	struct bio_vec *bvec;
704f9c78b2bSJens Axboe 
705f9c78b2bSJens Axboe 	/*
706f9c78b2bSJens Axboe 	 * cloned bio must not modify vec list
707f9c78b2bSJens Axboe 	 */
708f9c78b2bSJens Axboe 	if (unlikely(bio_flagged(bio, BIO_CLONED)))
709f9c78b2bSJens Axboe 		return 0;
710f9c78b2bSJens Axboe 
711c66a14d0SKent Overstreet 	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
712f9c78b2bSJens Axboe 		return 0;
713f9c78b2bSJens Axboe 
714f9c78b2bSJens Axboe 	if (bio->bi_vcnt > 0) {
7155a8ce240SMing Lei 		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
716f9c78b2bSJens Axboe 
7175a8ce240SMing Lei 		if (page == bvec->bv_page &&
7185a8ce240SMing Lei 		    offset == bvec->bv_offset + bvec->bv_len) {
71919047087SMing Lei 			if (put_same_page)
72019047087SMing Lei 				put_page(page);
7215a8ce240SMing Lei 			bvec->bv_len += len;
722f9c78b2bSJens Axboe 			goto done;
723f9c78b2bSJens Axboe 		}
72466cb45aaSJens Axboe 
72566cb45aaSJens Axboe 		/*
72666cb45aaSJens Axboe 		 * If the queue doesn't support SG gaps and adding this
72766cb45aaSJens Axboe 		 * offset would create a gap, disallow it.
72866cb45aaSJens Axboe 		 */
7295a8ce240SMing Lei 		if (bvec_gap_to_prev(q, bvec, offset))
73066cb45aaSJens Axboe 			return 0;
731489fbbcbSMing Lei 
732489fbbcbSMing Lei 		if (page_is_mergeable(bvec, page, len, offset, false) &&
733dcdca753SChristoph Hellwig 		    can_add_page_to_seg(q, bvec, page, len, offset)) {
734dcdca753SChristoph Hellwig 			bvec->bv_len += len;
735dcdca753SChristoph Hellwig 			goto done;
736dcdca753SChristoph Hellwig 		}
737f9c78b2bSJens Axboe 	}
738f9c78b2bSJens Axboe 
7390aa69fd3SChristoph Hellwig 	if (bio_full(bio))
740f9c78b2bSJens Axboe 		return 0;
741f9c78b2bSJens Axboe 
742489fbbcbSMing Lei 	if (bio->bi_phys_segments >= queue_max_segments(q))
743489fbbcbSMing Lei 		return 0;
744489fbbcbSMing Lei 
745f9c78b2bSJens Axboe 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
746f9c78b2bSJens Axboe 	bvec->bv_page = page;
747f9c78b2bSJens Axboe 	bvec->bv_len = len;
748f9c78b2bSJens Axboe 	bvec->bv_offset = offset;
749fcbf6a08SMaurizio Lombardi 	bio->bi_vcnt++;
750f9c78b2bSJens Axboe  done:
751dcdca753SChristoph Hellwig 	bio->bi_iter.bi_size += len;
752489fbbcbSMing Lei 	bio->bi_phys_segments = bio->bi_vcnt;
753489fbbcbSMing Lei 	bio_set_flag(bio, BIO_SEG_VALID);
754f9c78b2bSJens Axboe 	return len;
755f9c78b2bSJens Axboe }
75619047087SMing Lei 
75719047087SMing Lei int bio_add_pc_page(struct request_queue *q, struct bio *bio,
75819047087SMing Lei 		struct page *page, unsigned int len, unsigned int offset)
75919047087SMing Lei {
76019047087SMing Lei 	return __bio_add_pc_page(q, bio, page, len, offset, false);
76119047087SMing Lei }
762f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_pc_page);
763f9c78b2bSJens Axboe 
764f9c78b2bSJens Axboe /**
7650aa69fd3SChristoph Hellwig  * __bio_try_merge_page - try appending data to an existing bvec.
7660aa69fd3SChristoph Hellwig  * @bio: destination bio
767551879a4SMing Lei  * @page: start page to add
7680aa69fd3SChristoph Hellwig  * @len: length of the data to add
769551879a4SMing Lei  * @off: offset of the data relative to @page
77007173c3eSMing Lei  * @same_page: if %true only merge if the new data is in the same physical
77107173c3eSMing Lei  *		page as the last segment of the bio.
7720aa69fd3SChristoph Hellwig  *
7730aa69fd3SChristoph Hellwig  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
7740aa69fd3SChristoph Hellwig  * a useful optimisation for file systems with a block size smaller than the
7750aa69fd3SChristoph Hellwig  * page size.
7760aa69fd3SChristoph Hellwig  *
777551879a4SMing Lei  * Warn if (@len, @off) crosses pages in case that @same_page is true.
778551879a4SMing Lei  *
7790aa69fd3SChristoph Hellwig  * Return %true on success or %false on failure.
7800aa69fd3SChristoph Hellwig  */
7810aa69fd3SChristoph Hellwig bool __bio_try_merge_page(struct bio *bio, struct page *page,
78207173c3eSMing Lei 		unsigned int len, unsigned int off, bool same_page)
7830aa69fd3SChristoph Hellwig {
7840aa69fd3SChristoph Hellwig 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
7850aa69fd3SChristoph Hellwig 		return false;
7860aa69fd3SChristoph Hellwig 
7870aa69fd3SChristoph Hellwig 	if (bio->bi_vcnt > 0) {
7880aa69fd3SChristoph Hellwig 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
7890aa69fd3SChristoph Hellwig 
7905919482eSMing Lei 		if (page_is_mergeable(bv, page, len, off, same_page)) {
7910aa69fd3SChristoph Hellwig 			bv->bv_len += len;
7920aa69fd3SChristoph Hellwig 			bio->bi_iter.bi_size += len;
7930aa69fd3SChristoph Hellwig 			return true;
7940aa69fd3SChristoph Hellwig 		}
7955919482eSMing Lei 	}
7960aa69fd3SChristoph Hellwig 	return false;
7970aa69fd3SChristoph Hellwig }
7980aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_try_merge_page);
7990aa69fd3SChristoph Hellwig 
8000aa69fd3SChristoph Hellwig /**
801551879a4SMing Lei  * __bio_add_page - add page(s) to a bio in a new segment
8020aa69fd3SChristoph Hellwig  * @bio: destination bio
803551879a4SMing Lei  * @page: start page to add
804551879a4SMing Lei  * @len: length of the data to add, may cross pages
805551879a4SMing Lei  * @off: offset of the data relative to @page, may cross pages
8060aa69fd3SChristoph Hellwig  *
8070aa69fd3SChristoph Hellwig  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
8080aa69fd3SChristoph Hellwig  * that @bio has space for another bvec.
8090aa69fd3SChristoph Hellwig  */
8100aa69fd3SChristoph Hellwig void __bio_add_page(struct bio *bio, struct page *page,
8110aa69fd3SChristoph Hellwig 		unsigned int len, unsigned int off)
8120aa69fd3SChristoph Hellwig {
8130aa69fd3SChristoph Hellwig 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
8140aa69fd3SChristoph Hellwig 
8150aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
8160aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_full(bio));
8170aa69fd3SChristoph Hellwig 
8180aa69fd3SChristoph Hellwig 	bv->bv_page = page;
8190aa69fd3SChristoph Hellwig 	bv->bv_offset = off;
8200aa69fd3SChristoph Hellwig 	bv->bv_len = len;
8210aa69fd3SChristoph Hellwig 
8220aa69fd3SChristoph Hellwig 	bio->bi_iter.bi_size += len;
8230aa69fd3SChristoph Hellwig 	bio->bi_vcnt++;
8240aa69fd3SChristoph Hellwig }
8250aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_add_page);
8260aa69fd3SChristoph Hellwig 
8270aa69fd3SChristoph Hellwig /**
828551879a4SMing Lei  *	bio_add_page	-	attempt to add page(s) to bio
829f9c78b2bSJens Axboe  *	@bio: destination bio
830551879a4SMing Lei  *	@page: start page to add
831551879a4SMing Lei  *	@len: vec entry length, may cross pages
832551879a4SMing Lei  *	@offset: vec entry offset relative to @page, may cross pages
833f9c78b2bSJens Axboe  *
834551879a4SMing Lei  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
835c66a14d0SKent Overstreet  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
836f9c78b2bSJens Axboe  */
837c66a14d0SKent Overstreet int bio_add_page(struct bio *bio, struct page *page,
838c66a14d0SKent Overstreet 		 unsigned int len, unsigned int offset)
839f9c78b2bSJens Axboe {
84007173c3eSMing Lei 	if (!__bio_try_merge_page(bio, page, len, offset, false)) {
8410aa69fd3SChristoph Hellwig 		if (bio_full(bio))
842c66a14d0SKent Overstreet 			return 0;
8430aa69fd3SChristoph Hellwig 		__bio_add_page(bio, page, len, offset);
844c66a14d0SKent Overstreet 	}
845c66a14d0SKent Overstreet 	return len;
846f9c78b2bSJens Axboe }
847f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_page);
848f9c78b2bSJens Axboe 
8497321ecbfSChristoph Hellwig static void bio_get_pages(struct bio *bio)
8507321ecbfSChristoph Hellwig {
8517321ecbfSChristoph Hellwig 	struct bvec_iter_all iter_all;
8527321ecbfSChristoph Hellwig 	struct bio_vec *bvec;
8537321ecbfSChristoph Hellwig 
8542b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all)
8557321ecbfSChristoph Hellwig 		get_page(bvec->bv_page);
8567321ecbfSChristoph Hellwig }
8577321ecbfSChristoph Hellwig 
8587321ecbfSChristoph Hellwig static void bio_release_pages(struct bio *bio)
8597321ecbfSChristoph Hellwig {
8607321ecbfSChristoph Hellwig 	struct bvec_iter_all iter_all;
8617321ecbfSChristoph Hellwig 	struct bio_vec *bvec;
8627321ecbfSChristoph Hellwig 
8632b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all)
8647321ecbfSChristoph Hellwig 		put_page(bvec->bv_page);
8657321ecbfSChristoph Hellwig }
8667321ecbfSChristoph Hellwig 
8676d0c48aeSJens Axboe static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
8686d0c48aeSJens Axboe {
8696d0c48aeSJens Axboe 	const struct bio_vec *bv = iter->bvec;
8706d0c48aeSJens Axboe 	unsigned int len;
8716d0c48aeSJens Axboe 	size_t size;
8726d0c48aeSJens Axboe 
8736d0c48aeSJens Axboe 	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
8746d0c48aeSJens Axboe 		return -EINVAL;
8756d0c48aeSJens Axboe 
8766d0c48aeSJens Axboe 	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
8776d0c48aeSJens Axboe 	size = bio_add_page(bio, bv->bv_page, len,
8786d0c48aeSJens Axboe 				bv->bv_offset + iter->iov_offset);
879a10584c3SChristoph Hellwig 	if (unlikely(size != len))
880a10584c3SChristoph Hellwig 		return -EINVAL;
8816d0c48aeSJens Axboe 	iov_iter_advance(iter, size);
8826d0c48aeSJens Axboe 	return 0;
8836d0c48aeSJens Axboe }
8846d0c48aeSJens Axboe 
885576ed913SChristoph Hellwig #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
886576ed913SChristoph Hellwig 
8872cefe4dbSKent Overstreet /**
88817d51b10SMartin Wilck  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
8892cefe4dbSKent Overstreet  * @bio: bio to add pages to
8902cefe4dbSKent Overstreet  * @iter: iov iterator describing the region to be mapped
8912cefe4dbSKent Overstreet  *
89217d51b10SMartin Wilck  * Pins pages from *iter and appends them to @bio's bvec array. The
8932cefe4dbSKent Overstreet  * pages will have to be released using put_page() when done.
89417d51b10SMartin Wilck  * For multi-segment *iter, this function only adds pages from the
89517d51b10SMartin Wilck  * the next non-empty segment of the iov iterator.
8962cefe4dbSKent Overstreet  */
89717d51b10SMartin Wilck static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
8982cefe4dbSKent Overstreet {
899576ed913SChristoph Hellwig 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
900576ed913SChristoph Hellwig 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
9012cefe4dbSKent Overstreet 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
9022cefe4dbSKent Overstreet 	struct page **pages = (struct page **)bv;
903576ed913SChristoph Hellwig 	ssize_t size, left;
904576ed913SChristoph Hellwig 	unsigned len, i;
905b403ea24SMartin Wilck 	size_t offset;
906576ed913SChristoph Hellwig 
907576ed913SChristoph Hellwig 	/*
908576ed913SChristoph Hellwig 	 * Move page array up in the allocated memory for the bio vecs as far as
909576ed913SChristoph Hellwig 	 * possible so that we can start filling biovecs from the beginning
910576ed913SChristoph Hellwig 	 * without overwriting the temporary page array.
911576ed913SChristoph Hellwig 	*/
912576ed913SChristoph Hellwig 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
913576ed913SChristoph Hellwig 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
9142cefe4dbSKent Overstreet 
9152cefe4dbSKent Overstreet 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
9162cefe4dbSKent Overstreet 	if (unlikely(size <= 0))
9172cefe4dbSKent Overstreet 		return size ? size : -EFAULT;
9182cefe4dbSKent Overstreet 
919576ed913SChristoph Hellwig 	for (left = size, i = 0; left > 0; left -= len, i++) {
920576ed913SChristoph Hellwig 		struct page *page = pages[i];
9212cefe4dbSKent Overstreet 
922576ed913SChristoph Hellwig 		len = min_t(size_t, PAGE_SIZE - offset, left);
923576ed913SChristoph Hellwig 		if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
924576ed913SChristoph Hellwig 			return -EINVAL;
925576ed913SChristoph Hellwig 		offset = 0;
9262cefe4dbSKent Overstreet 	}
9272cefe4dbSKent Overstreet 
9282cefe4dbSKent Overstreet 	iov_iter_advance(iter, size);
9292cefe4dbSKent Overstreet 	return 0;
9302cefe4dbSKent Overstreet }
93117d51b10SMartin Wilck 
93217d51b10SMartin Wilck /**
9336d0c48aeSJens Axboe  * bio_iov_iter_get_pages - add user or kernel pages to a bio
93417d51b10SMartin Wilck  * @bio: bio to add pages to
9356d0c48aeSJens Axboe  * @iter: iov iterator describing the region to be added
93617d51b10SMartin Wilck  *
9376d0c48aeSJens Axboe  * This takes either an iterator pointing to user memory, or one pointing to
9386d0c48aeSJens Axboe  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
9396d0c48aeSJens Axboe  * map them into the kernel. On IO completion, the caller should put those
940399254aaSJens Axboe  * pages. If we're adding kernel pages, and the caller told us it's safe to
941399254aaSJens Axboe  * do so, we just have to add the pages to the bio directly. We don't grab an
942399254aaSJens Axboe  * extra reference to those pages (the user should already have that), and we
943399254aaSJens Axboe  * don't put the page on IO completion. The caller needs to check if the bio is
944399254aaSJens Axboe  * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
945399254aaSJens Axboe  * released.
9466d0c48aeSJens Axboe  *
94717d51b10SMartin Wilck  * The function tries, but does not guarantee, to pin as many pages as
9486d0c48aeSJens Axboe  * fit into the bio, or are requested in *iter, whatever is smaller. If
9496d0c48aeSJens Axboe  * MM encounters an error pinning the requested pages, it stops. Error
9506d0c48aeSJens Axboe  * is returned only if 0 pages could be pinned.
95117d51b10SMartin Wilck  */
95217d51b10SMartin Wilck int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
95317d51b10SMartin Wilck {
9546d0c48aeSJens Axboe 	const bool is_bvec = iov_iter_is_bvec(iter);
95514eacf12SChristoph Hellwig 	int ret;
95614eacf12SChristoph Hellwig 
95714eacf12SChristoph Hellwig 	if (WARN_ON_ONCE(bio->bi_vcnt))
95814eacf12SChristoph Hellwig 		return -EINVAL;
95917d51b10SMartin Wilck 
96017d51b10SMartin Wilck 	do {
9616d0c48aeSJens Axboe 		if (is_bvec)
9626d0c48aeSJens Axboe 			ret = __bio_iov_bvec_add_pages(bio, iter);
9636d0c48aeSJens Axboe 		else
9646d0c48aeSJens Axboe 			ret = __bio_iov_iter_get_pages(bio, iter);
96514eacf12SChristoph Hellwig 	} while (!ret && iov_iter_count(iter) && !bio_full(bio));
96617d51b10SMartin Wilck 
9677321ecbfSChristoph Hellwig 	if (iov_iter_bvec_no_ref(iter))
9687321ecbfSChristoph Hellwig 		bio_set_flag(bio, BIO_NO_PAGE_REF);
9690257c0edSMing Lei 	else if (is_bvec)
9707321ecbfSChristoph Hellwig 		bio_get_pages(bio);
9717321ecbfSChristoph Hellwig 
97214eacf12SChristoph Hellwig 	return bio->bi_vcnt ? 0 : ret;
97317d51b10SMartin Wilck }
9742cefe4dbSKent Overstreet 
9754246a0b6SChristoph Hellwig static void submit_bio_wait_endio(struct bio *bio)
976f9c78b2bSJens Axboe {
97765e53aabSChristoph Hellwig 	complete(bio->bi_private);
978f9c78b2bSJens Axboe }
979f9c78b2bSJens Axboe 
980f9c78b2bSJens Axboe /**
981f9c78b2bSJens Axboe  * submit_bio_wait - submit a bio, and wait until it completes
982f9c78b2bSJens Axboe  * @bio: The &struct bio which describes the I/O
983f9c78b2bSJens Axboe  *
984f9c78b2bSJens Axboe  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
985f9c78b2bSJens Axboe  * bio_endio() on failure.
9863d289d68SJan Kara  *
9873d289d68SJan Kara  * WARNING: Unlike to how submit_bio() is usually used, this function does not
9883d289d68SJan Kara  * result in bio reference to be consumed. The caller must drop the reference
9893d289d68SJan Kara  * on his own.
990f9c78b2bSJens Axboe  */
9914e49ea4aSMike Christie int submit_bio_wait(struct bio *bio)
992f9c78b2bSJens Axboe {
993e319e1fbSByungchul Park 	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
994f9c78b2bSJens Axboe 
99565e53aabSChristoph Hellwig 	bio->bi_private = &done;
996f9c78b2bSJens Axboe 	bio->bi_end_io = submit_bio_wait_endio;
9971eff9d32SJens Axboe 	bio->bi_opf |= REQ_SYNC;
9984e49ea4aSMike Christie 	submit_bio(bio);
99965e53aabSChristoph Hellwig 	wait_for_completion_io(&done);
1000f9c78b2bSJens Axboe 
100165e53aabSChristoph Hellwig 	return blk_status_to_errno(bio->bi_status);
1002f9c78b2bSJens Axboe }
1003f9c78b2bSJens Axboe EXPORT_SYMBOL(submit_bio_wait);
1004f9c78b2bSJens Axboe 
1005f9c78b2bSJens Axboe /**
1006f9c78b2bSJens Axboe  * bio_advance - increment/complete a bio by some number of bytes
1007f9c78b2bSJens Axboe  * @bio:	bio to advance
1008f9c78b2bSJens Axboe  * @bytes:	number of bytes to complete
1009f9c78b2bSJens Axboe  *
1010f9c78b2bSJens Axboe  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1011f9c78b2bSJens Axboe  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1012f9c78b2bSJens Axboe  * be updated on the last bvec as well.
1013f9c78b2bSJens Axboe  *
1014f9c78b2bSJens Axboe  * @bio will then represent the remaining, uncompleted portion of the io.
1015f9c78b2bSJens Axboe  */
1016f9c78b2bSJens Axboe void bio_advance(struct bio *bio, unsigned bytes)
1017f9c78b2bSJens Axboe {
1018f9c78b2bSJens Axboe 	if (bio_integrity(bio))
1019f9c78b2bSJens Axboe 		bio_integrity_advance(bio, bytes);
1020f9c78b2bSJens Axboe 
1021f9c78b2bSJens Axboe 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1022f9c78b2bSJens Axboe }
1023f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_advance);
1024f9c78b2bSJens Axboe 
102545db54d5SKent Overstreet void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
102645db54d5SKent Overstreet 			struct bio *src, struct bvec_iter *src_iter)
1027f9c78b2bSJens Axboe {
1028f9c78b2bSJens Axboe 	struct bio_vec src_bv, dst_bv;
1029f9c78b2bSJens Axboe 	void *src_p, *dst_p;
1030f9c78b2bSJens Axboe 	unsigned bytes;
1031f9c78b2bSJens Axboe 
103245db54d5SKent Overstreet 	while (src_iter->bi_size && dst_iter->bi_size) {
103345db54d5SKent Overstreet 		src_bv = bio_iter_iovec(src, *src_iter);
103445db54d5SKent Overstreet 		dst_bv = bio_iter_iovec(dst, *dst_iter);
103545db54d5SKent Overstreet 
103645db54d5SKent Overstreet 		bytes = min(src_bv.bv_len, dst_bv.bv_len);
103745db54d5SKent Overstreet 
103845db54d5SKent Overstreet 		src_p = kmap_atomic(src_bv.bv_page);
103945db54d5SKent Overstreet 		dst_p = kmap_atomic(dst_bv.bv_page);
104045db54d5SKent Overstreet 
104145db54d5SKent Overstreet 		memcpy(dst_p + dst_bv.bv_offset,
104245db54d5SKent Overstreet 		       src_p + src_bv.bv_offset,
104345db54d5SKent Overstreet 		       bytes);
104445db54d5SKent Overstreet 
104545db54d5SKent Overstreet 		kunmap_atomic(dst_p);
104645db54d5SKent Overstreet 		kunmap_atomic(src_p);
104745db54d5SKent Overstreet 
10486e6e811dSKent Overstreet 		flush_dcache_page(dst_bv.bv_page);
10496e6e811dSKent Overstreet 
105045db54d5SKent Overstreet 		bio_advance_iter(src, src_iter, bytes);
105145db54d5SKent Overstreet 		bio_advance_iter(dst, dst_iter, bytes);
105245db54d5SKent Overstreet 	}
105345db54d5SKent Overstreet }
105445db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data_iter);
105545db54d5SKent Overstreet 
105645db54d5SKent Overstreet /**
105745db54d5SKent Overstreet  * bio_copy_data - copy contents of data buffers from one bio to another
105845db54d5SKent Overstreet  * @src: source bio
105945db54d5SKent Overstreet  * @dst: destination bio
106045db54d5SKent Overstreet  *
106145db54d5SKent Overstreet  * Stops when it reaches the end of either @src or @dst - that is, copies
106245db54d5SKent Overstreet  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
106345db54d5SKent Overstreet  */
106445db54d5SKent Overstreet void bio_copy_data(struct bio *dst, struct bio *src)
106545db54d5SKent Overstreet {
106645db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
106745db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
106845db54d5SKent Overstreet 
106945db54d5SKent Overstreet 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
107045db54d5SKent Overstreet }
107145db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data);
107245db54d5SKent Overstreet 
107345db54d5SKent Overstreet /**
107445db54d5SKent Overstreet  * bio_list_copy_data - copy contents of data buffers from one chain of bios to
107545db54d5SKent Overstreet  * another
107645db54d5SKent Overstreet  * @src: source bio list
107745db54d5SKent Overstreet  * @dst: destination bio list
107845db54d5SKent Overstreet  *
107945db54d5SKent Overstreet  * Stops when it reaches the end of either the @src list or @dst list - that is,
108045db54d5SKent Overstreet  * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
108145db54d5SKent Overstreet  * bios).
108245db54d5SKent Overstreet  */
108345db54d5SKent Overstreet void bio_list_copy_data(struct bio *dst, struct bio *src)
108445db54d5SKent Overstreet {
108545db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
108645db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
108745db54d5SKent Overstreet 
1088f9c78b2bSJens Axboe 	while (1) {
1089f9c78b2bSJens Axboe 		if (!src_iter.bi_size) {
1090f9c78b2bSJens Axboe 			src = src->bi_next;
1091f9c78b2bSJens Axboe 			if (!src)
1092f9c78b2bSJens Axboe 				break;
1093f9c78b2bSJens Axboe 
1094f9c78b2bSJens Axboe 			src_iter = src->bi_iter;
1095f9c78b2bSJens Axboe 		}
1096f9c78b2bSJens Axboe 
1097f9c78b2bSJens Axboe 		if (!dst_iter.bi_size) {
1098f9c78b2bSJens Axboe 			dst = dst->bi_next;
1099f9c78b2bSJens Axboe 			if (!dst)
1100f9c78b2bSJens Axboe 				break;
1101f9c78b2bSJens Axboe 
1102f9c78b2bSJens Axboe 			dst_iter = dst->bi_iter;
1103f9c78b2bSJens Axboe 		}
1104f9c78b2bSJens Axboe 
110545db54d5SKent Overstreet 		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1106f9c78b2bSJens Axboe 	}
1107f9c78b2bSJens Axboe }
110845db54d5SKent Overstreet EXPORT_SYMBOL(bio_list_copy_data);
1109f9c78b2bSJens Axboe 
1110f9c78b2bSJens Axboe struct bio_map_data {
1111f9c78b2bSJens Axboe 	int is_our_pages;
111226e49cfcSKent Overstreet 	struct iov_iter iter;
111326e49cfcSKent Overstreet 	struct iovec iov[];
1114f9c78b2bSJens Axboe };
1115f9c78b2bSJens Axboe 
11160e5b935dSAl Viro static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1117f9c78b2bSJens Axboe 					       gfp_t gfp_mask)
1118f9c78b2bSJens Axboe {
11190e5b935dSAl Viro 	struct bio_map_data *bmd;
11200e5b935dSAl Viro 	if (data->nr_segs > UIO_MAXIOV)
1121f9c78b2bSJens Axboe 		return NULL;
1122f9c78b2bSJens Axboe 
11230e5b935dSAl Viro 	bmd = kmalloc(sizeof(struct bio_map_data) +
11240e5b935dSAl Viro 		       sizeof(struct iovec) * data->nr_segs, gfp_mask);
11250e5b935dSAl Viro 	if (!bmd)
11260e5b935dSAl Viro 		return NULL;
11270e5b935dSAl Viro 	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
11280e5b935dSAl Viro 	bmd->iter = *data;
11290e5b935dSAl Viro 	bmd->iter.iov = bmd->iov;
11300e5b935dSAl Viro 	return bmd;
1131f9c78b2bSJens Axboe }
1132f9c78b2bSJens Axboe 
11339124d3feSDongsu Park /**
11349124d3feSDongsu Park  * bio_copy_from_iter - copy all pages from iov_iter to bio
11359124d3feSDongsu Park  * @bio: The &struct bio which describes the I/O as destination
11369124d3feSDongsu Park  * @iter: iov_iter as source
11379124d3feSDongsu Park  *
11389124d3feSDongsu Park  * Copy all pages from iov_iter to bio.
11399124d3feSDongsu Park  * Returns 0 on success, or error on failure.
11409124d3feSDongsu Park  */
114198a09d61SAl Viro static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1142f9c78b2bSJens Axboe {
1143f9c78b2bSJens Axboe 	struct bio_vec *bvec;
11446dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1145f9c78b2bSJens Axboe 
11462b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
11479124d3feSDongsu Park 		ssize_t ret;
1148f9c78b2bSJens Axboe 
11499124d3feSDongsu Park 		ret = copy_page_from_iter(bvec->bv_page,
11509124d3feSDongsu Park 					  bvec->bv_offset,
11519124d3feSDongsu Park 					  bvec->bv_len,
115298a09d61SAl Viro 					  iter);
1153f9c78b2bSJens Axboe 
115498a09d61SAl Viro 		if (!iov_iter_count(iter))
11559124d3feSDongsu Park 			break;
1156f9c78b2bSJens Axboe 
11579124d3feSDongsu Park 		if (ret < bvec->bv_len)
11589124d3feSDongsu Park 			return -EFAULT;
1159f9c78b2bSJens Axboe 	}
1160f9c78b2bSJens Axboe 
11619124d3feSDongsu Park 	return 0;
1162f9c78b2bSJens Axboe }
1163f9c78b2bSJens Axboe 
11649124d3feSDongsu Park /**
11659124d3feSDongsu Park  * bio_copy_to_iter - copy all pages from bio to iov_iter
11669124d3feSDongsu Park  * @bio: The &struct bio which describes the I/O as source
11679124d3feSDongsu Park  * @iter: iov_iter as destination
11689124d3feSDongsu Park  *
11699124d3feSDongsu Park  * Copy all pages from bio to iov_iter.
11709124d3feSDongsu Park  * Returns 0 on success, or error on failure.
11719124d3feSDongsu Park  */
11729124d3feSDongsu Park static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
11739124d3feSDongsu Park {
11749124d3feSDongsu Park 	struct bio_vec *bvec;
11756dc4f100SMing Lei 	struct bvec_iter_all iter_all;
11769124d3feSDongsu Park 
11772b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
11789124d3feSDongsu Park 		ssize_t ret;
11799124d3feSDongsu Park 
11809124d3feSDongsu Park 		ret = copy_page_to_iter(bvec->bv_page,
11819124d3feSDongsu Park 					bvec->bv_offset,
11829124d3feSDongsu Park 					bvec->bv_len,
11839124d3feSDongsu Park 					&iter);
11849124d3feSDongsu Park 
11859124d3feSDongsu Park 		if (!iov_iter_count(&iter))
11869124d3feSDongsu Park 			break;
11879124d3feSDongsu Park 
11889124d3feSDongsu Park 		if (ret < bvec->bv_len)
11899124d3feSDongsu Park 			return -EFAULT;
11909124d3feSDongsu Park 	}
11919124d3feSDongsu Park 
11929124d3feSDongsu Park 	return 0;
1193f9c78b2bSJens Axboe }
1194f9c78b2bSJens Axboe 
1195491221f8SGuoqing Jiang void bio_free_pages(struct bio *bio)
11961dfa0f68SChristoph Hellwig {
11971dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
11986dc4f100SMing Lei 	struct bvec_iter_all iter_all;
11991dfa0f68SChristoph Hellwig 
12002b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all)
12011dfa0f68SChristoph Hellwig 		__free_page(bvec->bv_page);
12021dfa0f68SChristoph Hellwig }
1203491221f8SGuoqing Jiang EXPORT_SYMBOL(bio_free_pages);
12041dfa0f68SChristoph Hellwig 
1205f9c78b2bSJens Axboe /**
1206f9c78b2bSJens Axboe  *	bio_uncopy_user	-	finish previously mapped bio
1207f9c78b2bSJens Axboe  *	@bio: bio being terminated
1208f9c78b2bSJens Axboe  *
1209ddad8dd0SChristoph Hellwig  *	Free pages allocated from bio_copy_user_iov() and write back data
1210f9c78b2bSJens Axboe  *	to user space in case of a read.
1211f9c78b2bSJens Axboe  */
1212f9c78b2bSJens Axboe int bio_uncopy_user(struct bio *bio)
1213f9c78b2bSJens Axboe {
1214f9c78b2bSJens Axboe 	struct bio_map_data *bmd = bio->bi_private;
12151dfa0f68SChristoph Hellwig 	int ret = 0;
1216f9c78b2bSJens Axboe 
1217f9c78b2bSJens Axboe 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1218f9c78b2bSJens Axboe 		/*
1219f9c78b2bSJens Axboe 		 * if we're in a workqueue, the request is orphaned, so
12202d99b55dSHannes Reinecke 		 * don't copy into a random user address space, just free
12212d99b55dSHannes Reinecke 		 * and return -EINTR so user space doesn't expect any data.
1222f9c78b2bSJens Axboe 		 */
12232d99b55dSHannes Reinecke 		if (!current->mm)
12242d99b55dSHannes Reinecke 			ret = -EINTR;
12252d99b55dSHannes Reinecke 		else if (bio_data_dir(bio) == READ)
12269124d3feSDongsu Park 			ret = bio_copy_to_iter(bio, bmd->iter);
12271dfa0f68SChristoph Hellwig 		if (bmd->is_our_pages)
12281dfa0f68SChristoph Hellwig 			bio_free_pages(bio);
1229f9c78b2bSJens Axboe 	}
1230f9c78b2bSJens Axboe 	kfree(bmd);
1231f9c78b2bSJens Axboe 	bio_put(bio);
1232f9c78b2bSJens Axboe 	return ret;
1233f9c78b2bSJens Axboe }
1234f9c78b2bSJens Axboe 
1235f9c78b2bSJens Axboe /**
1236f9c78b2bSJens Axboe  *	bio_copy_user_iov	-	copy user data to bio
1237f9c78b2bSJens Axboe  *	@q:		destination block queue
1238f9c78b2bSJens Axboe  *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
123926e49cfcSKent Overstreet  *	@iter:		iovec iterator
1240f9c78b2bSJens Axboe  *	@gfp_mask:	memory allocation flags
1241f9c78b2bSJens Axboe  *
1242f9c78b2bSJens Axboe  *	Prepares and returns a bio for indirect user io, bouncing data
1243f9c78b2bSJens Axboe  *	to/from kernel pages as necessary. Must be paired with
1244f9c78b2bSJens Axboe  *	call bio_uncopy_user() on io completion.
1245f9c78b2bSJens Axboe  */
1246f9c78b2bSJens Axboe struct bio *bio_copy_user_iov(struct request_queue *q,
1247f9c78b2bSJens Axboe 			      struct rq_map_data *map_data,
1248e81cef5dSAl Viro 			      struct iov_iter *iter,
124926e49cfcSKent Overstreet 			      gfp_t gfp_mask)
1250f9c78b2bSJens Axboe {
1251f9c78b2bSJens Axboe 	struct bio_map_data *bmd;
1252f9c78b2bSJens Axboe 	struct page *page;
1253f9c78b2bSJens Axboe 	struct bio *bio;
1254d16d44ebSAl Viro 	int i = 0, ret;
1255d16d44ebSAl Viro 	int nr_pages;
125626e49cfcSKent Overstreet 	unsigned int len = iter->count;
1257bd5ceceaSGeliang Tang 	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1258f9c78b2bSJens Axboe 
12590e5b935dSAl Viro 	bmd = bio_alloc_map_data(iter, gfp_mask);
1260f9c78b2bSJens Axboe 	if (!bmd)
1261f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1262f9c78b2bSJens Axboe 
126326e49cfcSKent Overstreet 	/*
126426e49cfcSKent Overstreet 	 * We need to do a deep copy of the iov_iter including the iovecs.
126526e49cfcSKent Overstreet 	 * The caller provided iov might point to an on-stack or otherwise
126626e49cfcSKent Overstreet 	 * shortlived one.
126726e49cfcSKent Overstreet 	 */
126826e49cfcSKent Overstreet 	bmd->is_our_pages = map_data ? 0 : 1;
126926e49cfcSKent Overstreet 
1270d16d44ebSAl Viro 	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1271d16d44ebSAl Viro 	if (nr_pages > BIO_MAX_PAGES)
1272d16d44ebSAl Viro 		nr_pages = BIO_MAX_PAGES;
1273f9c78b2bSJens Axboe 
1274f9c78b2bSJens Axboe 	ret = -ENOMEM;
1275f9c78b2bSJens Axboe 	bio = bio_kmalloc(gfp_mask, nr_pages);
1276f9c78b2bSJens Axboe 	if (!bio)
1277f9c78b2bSJens Axboe 		goto out_bmd;
1278f9c78b2bSJens Axboe 
1279f9c78b2bSJens Axboe 	ret = 0;
1280f9c78b2bSJens Axboe 
1281f9c78b2bSJens Axboe 	if (map_data) {
1282f9c78b2bSJens Axboe 		nr_pages = 1 << map_data->page_order;
1283f9c78b2bSJens Axboe 		i = map_data->offset / PAGE_SIZE;
1284f9c78b2bSJens Axboe 	}
1285f9c78b2bSJens Axboe 	while (len) {
1286f9c78b2bSJens Axboe 		unsigned int bytes = PAGE_SIZE;
1287f9c78b2bSJens Axboe 
1288f9c78b2bSJens Axboe 		bytes -= offset;
1289f9c78b2bSJens Axboe 
1290f9c78b2bSJens Axboe 		if (bytes > len)
1291f9c78b2bSJens Axboe 			bytes = len;
1292f9c78b2bSJens Axboe 
1293f9c78b2bSJens Axboe 		if (map_data) {
1294f9c78b2bSJens Axboe 			if (i == map_data->nr_entries * nr_pages) {
1295f9c78b2bSJens Axboe 				ret = -ENOMEM;
1296f9c78b2bSJens Axboe 				break;
1297f9c78b2bSJens Axboe 			}
1298f9c78b2bSJens Axboe 
1299f9c78b2bSJens Axboe 			page = map_data->pages[i / nr_pages];
1300f9c78b2bSJens Axboe 			page += (i % nr_pages);
1301f9c78b2bSJens Axboe 
1302f9c78b2bSJens Axboe 			i++;
1303f9c78b2bSJens Axboe 		} else {
1304f9c78b2bSJens Axboe 			page = alloc_page(q->bounce_gfp | gfp_mask);
1305f9c78b2bSJens Axboe 			if (!page) {
1306f9c78b2bSJens Axboe 				ret = -ENOMEM;
1307f9c78b2bSJens Axboe 				break;
1308f9c78b2bSJens Axboe 			}
1309f9c78b2bSJens Axboe 		}
1310f9c78b2bSJens Axboe 
1311a3761c3cSJérôme Glisse 		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
1312a3761c3cSJérôme Glisse 			if (!map_data)
1313a3761c3cSJérôme Glisse 				__free_page(page);
1314f9c78b2bSJens Axboe 			break;
1315a3761c3cSJérôme Glisse 		}
1316f9c78b2bSJens Axboe 
1317f9c78b2bSJens Axboe 		len -= bytes;
1318f9c78b2bSJens Axboe 		offset = 0;
1319f9c78b2bSJens Axboe 	}
1320f9c78b2bSJens Axboe 
1321f9c78b2bSJens Axboe 	if (ret)
1322f9c78b2bSJens Axboe 		goto cleanup;
1323f9c78b2bSJens Axboe 
13242884d0beSAl Viro 	if (map_data)
13252884d0beSAl Viro 		map_data->offset += bio->bi_iter.bi_size;
13262884d0beSAl Viro 
1327f9c78b2bSJens Axboe 	/*
1328f9c78b2bSJens Axboe 	 * success
1329f9c78b2bSJens Axboe 	 */
133000e23707SDavid Howells 	if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
1331f9c78b2bSJens Axboe 	    (map_data && map_data->from_user)) {
133298a09d61SAl Viro 		ret = bio_copy_from_iter(bio, iter);
1333f9c78b2bSJens Axboe 		if (ret)
1334f9c78b2bSJens Axboe 			goto cleanup;
133598a09d61SAl Viro 	} else {
1336f55adad6SKeith Busch 		if (bmd->is_our_pages)
1337f3587d76SKeith Busch 			zero_fill_bio(bio);
1338e81cef5dSAl Viro 		iov_iter_advance(iter, bio->bi_iter.bi_size);
1339f9c78b2bSJens Axboe 	}
1340f9c78b2bSJens Axboe 
134126e49cfcSKent Overstreet 	bio->bi_private = bmd;
13422884d0beSAl Viro 	if (map_data && map_data->null_mapped)
13432884d0beSAl Viro 		bio_set_flag(bio, BIO_NULL_MAPPED);
1344f9c78b2bSJens Axboe 	return bio;
1345f9c78b2bSJens Axboe cleanup:
1346f9c78b2bSJens Axboe 	if (!map_data)
13471dfa0f68SChristoph Hellwig 		bio_free_pages(bio);
1348f9c78b2bSJens Axboe 	bio_put(bio);
1349f9c78b2bSJens Axboe out_bmd:
1350f9c78b2bSJens Axboe 	kfree(bmd);
1351f9c78b2bSJens Axboe 	return ERR_PTR(ret);
1352f9c78b2bSJens Axboe }
1353f9c78b2bSJens Axboe 
135437f19e57SChristoph Hellwig /**
135537f19e57SChristoph Hellwig  *	bio_map_user_iov - map user iovec into bio
135637f19e57SChristoph Hellwig  *	@q:		the struct request_queue for the bio
135737f19e57SChristoph Hellwig  *	@iter:		iovec iterator
135837f19e57SChristoph Hellwig  *	@gfp_mask:	memory allocation flags
135937f19e57SChristoph Hellwig  *
136037f19e57SChristoph Hellwig  *	Map the user space address into a bio suitable for io to a block
136137f19e57SChristoph Hellwig  *	device. Returns an error pointer in case of error.
136237f19e57SChristoph Hellwig  */
136337f19e57SChristoph Hellwig struct bio *bio_map_user_iov(struct request_queue *q,
1364e81cef5dSAl Viro 			     struct iov_iter *iter,
136526e49cfcSKent Overstreet 			     gfp_t gfp_mask)
1366f9c78b2bSJens Axboe {
136726e49cfcSKent Overstreet 	int j;
1368f9c78b2bSJens Axboe 	struct bio *bio;
1369076098e5SAl Viro 	int ret;
13702b04e8f6SAl Viro 	struct bio_vec *bvec;
13716dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1372f9c78b2bSJens Axboe 
1373b282cc76SAl Viro 	if (!iov_iter_count(iter))
1374f9c78b2bSJens Axboe 		return ERR_PTR(-EINVAL);
1375f9c78b2bSJens Axboe 
1376b282cc76SAl Viro 	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1377f9c78b2bSJens Axboe 	if (!bio)
1378f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1379f9c78b2bSJens Axboe 
13800a0f1513SAl Viro 	while (iov_iter_count(iter)) {
1381629e42bcSAl Viro 		struct page **pages;
1382076098e5SAl Viro 		ssize_t bytes;
1383076098e5SAl Viro 		size_t offs, added = 0;
1384076098e5SAl Viro 		int npages;
1385f9c78b2bSJens Axboe 
13860a0f1513SAl Viro 		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1387076098e5SAl Viro 		if (unlikely(bytes <= 0)) {
1388076098e5SAl Viro 			ret = bytes ? bytes : -EFAULT;
1389f9c78b2bSJens Axboe 			goto out_unmap;
1390f9c78b2bSJens Axboe 		}
1391f9c78b2bSJens Axboe 
1392076098e5SAl Viro 		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1393076098e5SAl Viro 
139498f0bc99SAl Viro 		if (unlikely(offs & queue_dma_alignment(q))) {
139598f0bc99SAl Viro 			ret = -EINVAL;
139698f0bc99SAl Viro 			j = 0;
139798f0bc99SAl Viro 		} else {
1398629e42bcSAl Viro 			for (j = 0; j < npages; j++) {
139998f0bc99SAl Viro 				struct page *page = pages[j];
1400076098e5SAl Viro 				unsigned int n = PAGE_SIZE - offs;
1401f9c78b2bSJens Axboe 
1402076098e5SAl Viro 				if (n > bytes)
1403076098e5SAl Viro 					n = bytes;
1404f9c78b2bSJens Axboe 
140519047087SMing Lei 				if (!__bio_add_pc_page(q, bio, page, n, offs,
140619047087SMing Lei 							true))
1407f9c78b2bSJens Axboe 					break;
1408f9c78b2bSJens Axboe 
1409076098e5SAl Viro 				added += n;
1410076098e5SAl Viro 				bytes -= n;
1411076098e5SAl Viro 				offs = 0;
1412f9c78b2bSJens Axboe 			}
14130a0f1513SAl Viro 			iov_iter_advance(iter, added);
141498f0bc99SAl Viro 		}
1415f9c78b2bSJens Axboe 		/*
1416f9c78b2bSJens Axboe 		 * release the pages we didn't map into the bio, if any
1417f9c78b2bSJens Axboe 		 */
1418629e42bcSAl Viro 		while (j < npages)
141909cbfeafSKirill A. Shutemov 			put_page(pages[j++]);
1420629e42bcSAl Viro 		kvfree(pages);
1421e2e115d1SAl Viro 		/* couldn't stuff something into bio? */
1422e2e115d1SAl Viro 		if (bytes)
1423e2e115d1SAl Viro 			break;
1424f9c78b2bSJens Axboe 	}
1425f9c78b2bSJens Axboe 
1426b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_USER_MAPPED);
142737f19e57SChristoph Hellwig 
142837f19e57SChristoph Hellwig 	/*
14295fad1b64SBart Van Assche 	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
143037f19e57SChristoph Hellwig 	 * it would normally disappear when its bi_end_io is run.
143137f19e57SChristoph Hellwig 	 * however, we need it for the unmap, so grab an extra
143237f19e57SChristoph Hellwig 	 * reference to it
143337f19e57SChristoph Hellwig 	 */
143437f19e57SChristoph Hellwig 	bio_get(bio);
1435f9c78b2bSJens Axboe 	return bio;
1436f9c78b2bSJens Axboe 
1437f9c78b2bSJens Axboe  out_unmap:
14382b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
14392b04e8f6SAl Viro 		put_page(bvec->bv_page);
1440f9c78b2bSJens Axboe 	}
1441f9c78b2bSJens Axboe 	bio_put(bio);
1442f9c78b2bSJens Axboe 	return ERR_PTR(ret);
1443f9c78b2bSJens Axboe }
1444f9c78b2bSJens Axboe 
1445f9c78b2bSJens Axboe static void __bio_unmap_user(struct bio *bio)
1446f9c78b2bSJens Axboe {
1447f9c78b2bSJens Axboe 	struct bio_vec *bvec;
14486dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1449f9c78b2bSJens Axboe 
1450f9c78b2bSJens Axboe 	/*
1451f9c78b2bSJens Axboe 	 * make sure we dirty pages we wrote to
1452f9c78b2bSJens Axboe 	 */
14532b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
1454f9c78b2bSJens Axboe 		if (bio_data_dir(bio) == READ)
1455f9c78b2bSJens Axboe 			set_page_dirty_lock(bvec->bv_page);
1456f9c78b2bSJens Axboe 
145709cbfeafSKirill A. Shutemov 		put_page(bvec->bv_page);
1458f9c78b2bSJens Axboe 	}
1459f9c78b2bSJens Axboe 
1460f9c78b2bSJens Axboe 	bio_put(bio);
1461f9c78b2bSJens Axboe }
1462f9c78b2bSJens Axboe 
1463f9c78b2bSJens Axboe /**
1464f9c78b2bSJens Axboe  *	bio_unmap_user	-	unmap a bio
1465f9c78b2bSJens Axboe  *	@bio:		the bio being unmapped
1466f9c78b2bSJens Axboe  *
14675fad1b64SBart Van Assche  *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
14685fad1b64SBart Van Assche  *	process context.
1469f9c78b2bSJens Axboe  *
1470f9c78b2bSJens Axboe  *	bio_unmap_user() may sleep.
1471f9c78b2bSJens Axboe  */
1472f9c78b2bSJens Axboe void bio_unmap_user(struct bio *bio)
1473f9c78b2bSJens Axboe {
1474f9c78b2bSJens Axboe 	__bio_unmap_user(bio);
1475f9c78b2bSJens Axboe 	bio_put(bio);
1476f9c78b2bSJens Axboe }
1477f9c78b2bSJens Axboe 
14784246a0b6SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio)
1479f9c78b2bSJens Axboe {
1480f9c78b2bSJens Axboe 	bio_put(bio);
1481f9c78b2bSJens Axboe }
1482f9c78b2bSJens Axboe 
148375c72b83SChristoph Hellwig /**
148475c72b83SChristoph Hellwig  *	bio_map_kern	-	map kernel address into bio
148575c72b83SChristoph Hellwig  *	@q: the struct request_queue for the bio
148675c72b83SChristoph Hellwig  *	@data: pointer to buffer to map
148775c72b83SChristoph Hellwig  *	@len: length in bytes
148875c72b83SChristoph Hellwig  *	@gfp_mask: allocation flags for bio allocation
148975c72b83SChristoph Hellwig  *
149075c72b83SChristoph Hellwig  *	Map the kernel address into a bio suitable for io to a block
149175c72b83SChristoph Hellwig  *	device. Returns an error pointer in case of error.
149275c72b83SChristoph Hellwig  */
149375c72b83SChristoph Hellwig struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
149475c72b83SChristoph Hellwig 			 gfp_t gfp_mask)
1495f9c78b2bSJens Axboe {
1496f9c78b2bSJens Axboe 	unsigned long kaddr = (unsigned long)data;
1497f9c78b2bSJens Axboe 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1498f9c78b2bSJens Axboe 	unsigned long start = kaddr >> PAGE_SHIFT;
1499f9c78b2bSJens Axboe 	const int nr_pages = end - start;
1500f9c78b2bSJens Axboe 	int offset, i;
1501f9c78b2bSJens Axboe 	struct bio *bio;
1502f9c78b2bSJens Axboe 
1503f9c78b2bSJens Axboe 	bio = bio_kmalloc(gfp_mask, nr_pages);
1504f9c78b2bSJens Axboe 	if (!bio)
1505f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1506f9c78b2bSJens Axboe 
1507f9c78b2bSJens Axboe 	offset = offset_in_page(kaddr);
1508f9c78b2bSJens Axboe 	for (i = 0; i < nr_pages; i++) {
1509f9c78b2bSJens Axboe 		unsigned int bytes = PAGE_SIZE - offset;
1510f9c78b2bSJens Axboe 
1511f9c78b2bSJens Axboe 		if (len <= 0)
1512f9c78b2bSJens Axboe 			break;
1513f9c78b2bSJens Axboe 
1514f9c78b2bSJens Axboe 		if (bytes > len)
1515f9c78b2bSJens Axboe 			bytes = len;
1516f9c78b2bSJens Axboe 
1517f9c78b2bSJens Axboe 		if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
151875c72b83SChristoph Hellwig 				    offset) < bytes) {
151975c72b83SChristoph Hellwig 			/* we don't support partial mappings */
152075c72b83SChristoph Hellwig 			bio_put(bio);
152175c72b83SChristoph Hellwig 			return ERR_PTR(-EINVAL);
152275c72b83SChristoph Hellwig 		}
1523f9c78b2bSJens Axboe 
1524f9c78b2bSJens Axboe 		data += bytes;
1525f9c78b2bSJens Axboe 		len -= bytes;
1526f9c78b2bSJens Axboe 		offset = 0;
1527f9c78b2bSJens Axboe 	}
1528f9c78b2bSJens Axboe 
1529f9c78b2bSJens Axboe 	bio->bi_end_io = bio_map_kern_endio;
1530f9c78b2bSJens Axboe 	return bio;
1531f9c78b2bSJens Axboe }
1532f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_map_kern);
1533f9c78b2bSJens Axboe 
15344246a0b6SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio)
1535f9c78b2bSJens Axboe {
15361dfa0f68SChristoph Hellwig 	bio_free_pages(bio);
15371dfa0f68SChristoph Hellwig 	bio_put(bio);
15381dfa0f68SChristoph Hellwig }
15391dfa0f68SChristoph Hellwig 
15404246a0b6SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio)
15411dfa0f68SChristoph Hellwig {
154242d2683aSChristoph Hellwig 	char *p = bio->bi_private;
15431dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
15446dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1545f9c78b2bSJens Axboe 
15462b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
15471dfa0f68SChristoph Hellwig 		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1548f9c78b2bSJens Axboe 		p += bvec->bv_len;
1549f9c78b2bSJens Axboe 	}
1550f9c78b2bSJens Axboe 
15514246a0b6SChristoph Hellwig 	bio_copy_kern_endio(bio);
1552f9c78b2bSJens Axboe }
1553f9c78b2bSJens Axboe 
1554f9c78b2bSJens Axboe /**
1555f9c78b2bSJens Axboe  *	bio_copy_kern	-	copy kernel address into bio
1556f9c78b2bSJens Axboe  *	@q: the struct request_queue for the bio
1557f9c78b2bSJens Axboe  *	@data: pointer to buffer to copy
1558f9c78b2bSJens Axboe  *	@len: length in bytes
1559f9c78b2bSJens Axboe  *	@gfp_mask: allocation flags for bio and page allocation
1560f9c78b2bSJens Axboe  *	@reading: data direction is READ
1561f9c78b2bSJens Axboe  *
1562f9c78b2bSJens Axboe  *	copy the kernel address into a bio suitable for io to a block
1563f9c78b2bSJens Axboe  *	device. Returns an error pointer in case of error.
1564f9c78b2bSJens Axboe  */
1565f9c78b2bSJens Axboe struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1566f9c78b2bSJens Axboe 			  gfp_t gfp_mask, int reading)
1567f9c78b2bSJens Axboe {
156842d2683aSChristoph Hellwig 	unsigned long kaddr = (unsigned long)data;
156942d2683aSChristoph Hellwig 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
157042d2683aSChristoph Hellwig 	unsigned long start = kaddr >> PAGE_SHIFT;
157142d2683aSChristoph Hellwig 	struct bio *bio;
1572f9c78b2bSJens Axboe 	void *p = data;
15731dfa0f68SChristoph Hellwig 	int nr_pages = 0;
1574f9c78b2bSJens Axboe 
157542d2683aSChristoph Hellwig 	/*
157642d2683aSChristoph Hellwig 	 * Overflow, abort
157742d2683aSChristoph Hellwig 	 */
157842d2683aSChristoph Hellwig 	if (end < start)
157942d2683aSChristoph Hellwig 		return ERR_PTR(-EINVAL);
1580f9c78b2bSJens Axboe 
158142d2683aSChristoph Hellwig 	nr_pages = end - start;
158242d2683aSChristoph Hellwig 	bio = bio_kmalloc(gfp_mask, nr_pages);
158342d2683aSChristoph Hellwig 	if (!bio)
158442d2683aSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
158542d2683aSChristoph Hellwig 
158642d2683aSChristoph Hellwig 	while (len) {
158742d2683aSChristoph Hellwig 		struct page *page;
158842d2683aSChristoph Hellwig 		unsigned int bytes = PAGE_SIZE;
158942d2683aSChristoph Hellwig 
159042d2683aSChristoph Hellwig 		if (bytes > len)
159142d2683aSChristoph Hellwig 			bytes = len;
159242d2683aSChristoph Hellwig 
159342d2683aSChristoph Hellwig 		page = alloc_page(q->bounce_gfp | gfp_mask);
159442d2683aSChristoph Hellwig 		if (!page)
159542d2683aSChristoph Hellwig 			goto cleanup;
159642d2683aSChristoph Hellwig 
159742d2683aSChristoph Hellwig 		if (!reading)
159842d2683aSChristoph Hellwig 			memcpy(page_address(page), p, bytes);
159942d2683aSChristoph Hellwig 
160042d2683aSChristoph Hellwig 		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
160142d2683aSChristoph Hellwig 			break;
160242d2683aSChristoph Hellwig 
160342d2683aSChristoph Hellwig 		len -= bytes;
160442d2683aSChristoph Hellwig 		p += bytes;
1605f9c78b2bSJens Axboe 	}
1606f9c78b2bSJens Axboe 
16071dfa0f68SChristoph Hellwig 	if (reading) {
16081dfa0f68SChristoph Hellwig 		bio->bi_end_io = bio_copy_kern_endio_read;
160942d2683aSChristoph Hellwig 		bio->bi_private = data;
16101dfa0f68SChristoph Hellwig 	} else {
1611f9c78b2bSJens Axboe 		bio->bi_end_io = bio_copy_kern_endio;
16121dfa0f68SChristoph Hellwig 	}
16131dfa0f68SChristoph Hellwig 
1614f9c78b2bSJens Axboe 	return bio;
161542d2683aSChristoph Hellwig 
161642d2683aSChristoph Hellwig cleanup:
16171dfa0f68SChristoph Hellwig 	bio_free_pages(bio);
161842d2683aSChristoph Hellwig 	bio_put(bio);
161942d2683aSChristoph Hellwig 	return ERR_PTR(-ENOMEM);
1620f9c78b2bSJens Axboe }
1621f9c78b2bSJens Axboe 
1622f9c78b2bSJens Axboe /*
1623f9c78b2bSJens Axboe  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1624f9c78b2bSJens Axboe  * for performing direct-IO in BIOs.
1625f9c78b2bSJens Axboe  *
1626f9c78b2bSJens Axboe  * The problem is that we cannot run set_page_dirty() from interrupt context
1627f9c78b2bSJens Axboe  * because the required locks are not interrupt-safe.  So what we can do is to
1628f9c78b2bSJens Axboe  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1629f9c78b2bSJens Axboe  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1630f9c78b2bSJens Axboe  * in process context.
1631f9c78b2bSJens Axboe  *
1632f9c78b2bSJens Axboe  * We special-case compound pages here: normally this means reads into hugetlb
1633f9c78b2bSJens Axboe  * pages.  The logic in here doesn't really work right for compound pages
1634f9c78b2bSJens Axboe  * because the VM does not uniformly chase down the head page in all cases.
1635f9c78b2bSJens Axboe  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1636f9c78b2bSJens Axboe  * handle them at all.  So we skip compound pages here at an early stage.
1637f9c78b2bSJens Axboe  *
1638f9c78b2bSJens Axboe  * Note that this code is very hard to test under normal circumstances because
1639f9c78b2bSJens Axboe  * direct-io pins the pages with get_user_pages().  This makes
1640f9c78b2bSJens Axboe  * is_page_cache_freeable return false, and the VM will not clean the pages.
1641f9c78b2bSJens Axboe  * But other code (eg, flusher threads) could clean the pages if they are mapped
1642f9c78b2bSJens Axboe  * pagecache.
1643f9c78b2bSJens Axboe  *
1644f9c78b2bSJens Axboe  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1645f9c78b2bSJens Axboe  * deferred bio dirtying paths.
1646f9c78b2bSJens Axboe  */
1647f9c78b2bSJens Axboe 
1648f9c78b2bSJens Axboe /*
1649f9c78b2bSJens Axboe  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1650f9c78b2bSJens Axboe  */
1651f9c78b2bSJens Axboe void bio_set_pages_dirty(struct bio *bio)
1652f9c78b2bSJens Axboe {
1653f9c78b2bSJens Axboe 	struct bio_vec *bvec;
16546dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1655f9c78b2bSJens Axboe 
16562b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
16573bb50983SChristoph Hellwig 		if (!PageCompound(bvec->bv_page))
16583bb50983SChristoph Hellwig 			set_page_dirty_lock(bvec->bv_page);
1659f9c78b2bSJens Axboe 	}
1660f9c78b2bSJens Axboe }
1661f9c78b2bSJens Axboe 
1662f9c78b2bSJens Axboe /*
1663f9c78b2bSJens Axboe  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1664f9c78b2bSJens Axboe  * If they are, then fine.  If, however, some pages are clean then they must
1665f9c78b2bSJens Axboe  * have been written out during the direct-IO read.  So we take another ref on
166624d5493fSChristoph Hellwig  * the BIO and re-dirty the pages in process context.
1667f9c78b2bSJens Axboe  *
1668f9c78b2bSJens Axboe  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1669ea1754a0SKirill A. Shutemov  * here on.  It will run one put_page() against each page and will run one
1670ea1754a0SKirill A. Shutemov  * bio_put() against the BIO.
1671f9c78b2bSJens Axboe  */
1672f9c78b2bSJens Axboe 
1673f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work);
1674f9c78b2bSJens Axboe 
1675f9c78b2bSJens Axboe static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1676f9c78b2bSJens Axboe static DEFINE_SPINLOCK(bio_dirty_lock);
1677f9c78b2bSJens Axboe static struct bio *bio_dirty_list;
1678f9c78b2bSJens Axboe 
1679f9c78b2bSJens Axboe /*
1680f9c78b2bSJens Axboe  * This runs in process context
1681f9c78b2bSJens Axboe  */
1682f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work)
1683f9c78b2bSJens Axboe {
168424d5493fSChristoph Hellwig 	struct bio *bio, *next;
1685f9c78b2bSJens Axboe 
168624d5493fSChristoph Hellwig 	spin_lock_irq(&bio_dirty_lock);
168724d5493fSChristoph Hellwig 	next = bio_dirty_list;
1688f9c78b2bSJens Axboe 	bio_dirty_list = NULL;
168924d5493fSChristoph Hellwig 	spin_unlock_irq(&bio_dirty_lock);
1690f9c78b2bSJens Axboe 
169124d5493fSChristoph Hellwig 	while ((bio = next) != NULL) {
169224d5493fSChristoph Hellwig 		next = bio->bi_private;
1693f9c78b2bSJens Axboe 
1694f9c78b2bSJens Axboe 		bio_set_pages_dirty(bio);
1695399254aaSJens Axboe 		if (!bio_flagged(bio, BIO_NO_PAGE_REF))
1696f9c78b2bSJens Axboe 			bio_release_pages(bio);
1697f9c78b2bSJens Axboe 		bio_put(bio);
1698f9c78b2bSJens Axboe 	}
1699f9c78b2bSJens Axboe }
1700f9c78b2bSJens Axboe 
1701f9c78b2bSJens Axboe void bio_check_pages_dirty(struct bio *bio)
1702f9c78b2bSJens Axboe {
1703f9c78b2bSJens Axboe 	struct bio_vec *bvec;
170424d5493fSChristoph Hellwig 	unsigned long flags;
17056dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1706f9c78b2bSJens Axboe 
17072b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
170824d5493fSChristoph Hellwig 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
170924d5493fSChristoph Hellwig 			goto defer;
1710f9c78b2bSJens Axboe 	}
1711f9c78b2bSJens Axboe 
1712399254aaSJens Axboe 	if (!bio_flagged(bio, BIO_NO_PAGE_REF))
171324d5493fSChristoph Hellwig 		bio_release_pages(bio);
171424d5493fSChristoph Hellwig 	bio_put(bio);
171524d5493fSChristoph Hellwig 	return;
171624d5493fSChristoph Hellwig defer:
1717f9c78b2bSJens Axboe 	spin_lock_irqsave(&bio_dirty_lock, flags);
1718f9c78b2bSJens Axboe 	bio->bi_private = bio_dirty_list;
1719f9c78b2bSJens Axboe 	bio_dirty_list = bio;
1720f9c78b2bSJens Axboe 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1721f9c78b2bSJens Axboe 	schedule_work(&bio_dirty_work);
1722f9c78b2bSJens Axboe }
1723f9c78b2bSJens Axboe 
17245b18b5a7SMikulas Patocka void update_io_ticks(struct hd_struct *part, unsigned long now)
17255b18b5a7SMikulas Patocka {
17265b18b5a7SMikulas Patocka 	unsigned long stamp;
17275b18b5a7SMikulas Patocka again:
17285b18b5a7SMikulas Patocka 	stamp = READ_ONCE(part->stamp);
17295b18b5a7SMikulas Patocka 	if (unlikely(stamp != now)) {
17305b18b5a7SMikulas Patocka 		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
17315b18b5a7SMikulas Patocka 			__part_stat_add(part, io_ticks, 1);
17325b18b5a7SMikulas Patocka 		}
17335b18b5a7SMikulas Patocka 	}
17345b18b5a7SMikulas Patocka 	if (part->partno) {
17355b18b5a7SMikulas Patocka 		part = &part_to_disk(part)->part0;
17365b18b5a7SMikulas Patocka 		goto again;
17375b18b5a7SMikulas Patocka 	}
17385b18b5a7SMikulas Patocka }
1739f9c78b2bSJens Axboe 
1740ddcf35d3SMichael Callahan void generic_start_io_acct(struct request_queue *q, int op,
1741d62e26b3SJens Axboe 			   unsigned long sectors, struct hd_struct *part)
1742394ffa50SGu Zheng {
1743ddcf35d3SMichael Callahan 	const int sgrp = op_stat_group(op);
1744394ffa50SGu Zheng 
1745112f158fSMike Snitzer 	part_stat_lock();
1746112f158fSMike Snitzer 
17475b18b5a7SMikulas Patocka 	update_io_ticks(part, jiffies);
1748112f158fSMike Snitzer 	part_stat_inc(part, ios[sgrp]);
1749112f158fSMike Snitzer 	part_stat_add(part, sectors[sgrp], sectors);
1750ddcf35d3SMichael Callahan 	part_inc_in_flight(q, part, op_is_write(op));
1751394ffa50SGu Zheng 
1752394ffa50SGu Zheng 	part_stat_unlock();
1753394ffa50SGu Zheng }
1754394ffa50SGu Zheng EXPORT_SYMBOL(generic_start_io_acct);
1755394ffa50SGu Zheng 
1756ddcf35d3SMichael Callahan void generic_end_io_acct(struct request_queue *q, int req_op,
1757d62e26b3SJens Axboe 			 struct hd_struct *part, unsigned long start_time)
1758394ffa50SGu Zheng {
17595b18b5a7SMikulas Patocka 	unsigned long now = jiffies;
17605b18b5a7SMikulas Patocka 	unsigned long duration = now - start_time;
1761ddcf35d3SMichael Callahan 	const int sgrp = op_stat_group(req_op);
1762394ffa50SGu Zheng 
1763112f158fSMike Snitzer 	part_stat_lock();
1764112f158fSMike Snitzer 
17655b18b5a7SMikulas Patocka 	update_io_ticks(part, now);
1766112f158fSMike Snitzer 	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
17675b18b5a7SMikulas Patocka 	part_stat_add(part, time_in_queue, duration);
1768ddcf35d3SMichael Callahan 	part_dec_in_flight(q, part, op_is_write(req_op));
1769394ffa50SGu Zheng 
1770394ffa50SGu Zheng 	part_stat_unlock();
1771394ffa50SGu Zheng }
1772394ffa50SGu Zheng EXPORT_SYMBOL(generic_end_io_acct);
1773394ffa50SGu Zheng 
1774f9c78b2bSJens Axboe #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1775f9c78b2bSJens Axboe void bio_flush_dcache_pages(struct bio *bi)
1776f9c78b2bSJens Axboe {
1777f9c78b2bSJens Axboe 	struct bio_vec bvec;
1778f9c78b2bSJens Axboe 	struct bvec_iter iter;
1779f9c78b2bSJens Axboe 
1780f9c78b2bSJens Axboe 	bio_for_each_segment(bvec, bi, iter)
1781f9c78b2bSJens Axboe 		flush_dcache_page(bvec.bv_page);
1782f9c78b2bSJens Axboe }
1783f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_flush_dcache_pages);
1784f9c78b2bSJens Axboe #endif
1785f9c78b2bSJens Axboe 
1786c4cf5261SJens Axboe static inline bool bio_remaining_done(struct bio *bio)
1787c4cf5261SJens Axboe {
1788c4cf5261SJens Axboe 	/*
1789c4cf5261SJens Axboe 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1790c4cf5261SJens Axboe 	 * we always end io on the first invocation.
1791c4cf5261SJens Axboe 	 */
1792c4cf5261SJens Axboe 	if (!bio_flagged(bio, BIO_CHAIN))
1793c4cf5261SJens Axboe 		return true;
1794c4cf5261SJens Axboe 
1795c4cf5261SJens Axboe 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1796c4cf5261SJens Axboe 
1797326e1dbbSMike Snitzer 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1798b7c44ed9SJens Axboe 		bio_clear_flag(bio, BIO_CHAIN);
1799c4cf5261SJens Axboe 		return true;
1800326e1dbbSMike Snitzer 	}
1801c4cf5261SJens Axboe 
1802c4cf5261SJens Axboe 	return false;
1803c4cf5261SJens Axboe }
1804c4cf5261SJens Axboe 
1805f9c78b2bSJens Axboe /**
1806f9c78b2bSJens Axboe  * bio_endio - end I/O on a bio
1807f9c78b2bSJens Axboe  * @bio:	bio
1808f9c78b2bSJens Axboe  *
1809f9c78b2bSJens Axboe  * Description:
18104246a0b6SChristoph Hellwig  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
18114246a0b6SChristoph Hellwig  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
18124246a0b6SChristoph Hellwig  *   bio unless they own it and thus know that it has an end_io function.
1813fbbaf700SNeilBrown  *
1814fbbaf700SNeilBrown  *   bio_endio() can be called several times on a bio that has been chained
1815fbbaf700SNeilBrown  *   using bio_chain().  The ->bi_end_io() function will only be called the
1816fbbaf700SNeilBrown  *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1817fbbaf700SNeilBrown  *   generated if BIO_TRACE_COMPLETION is set.
1818f9c78b2bSJens Axboe  **/
18194246a0b6SChristoph Hellwig void bio_endio(struct bio *bio)
1820f9c78b2bSJens Axboe {
1821ba8c6967SChristoph Hellwig again:
18222b885517SChristoph Hellwig 	if (!bio_remaining_done(bio))
1823ba8c6967SChristoph Hellwig 		return;
18247c20f116SChristoph Hellwig 	if (!bio_integrity_endio(bio))
18257c20f116SChristoph Hellwig 		return;
1826f9c78b2bSJens Axboe 
182767b42d0bSJosef Bacik 	if (bio->bi_disk)
182867b42d0bSJosef Bacik 		rq_qos_done_bio(bio->bi_disk->queue, bio);
182967b42d0bSJosef Bacik 
1830f9c78b2bSJens Axboe 	/*
1831ba8c6967SChristoph Hellwig 	 * Need to have a real endio function for chained bios, otherwise
1832ba8c6967SChristoph Hellwig 	 * various corner cases will break (like stacking block devices that
1833ba8c6967SChristoph Hellwig 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1834ba8c6967SChristoph Hellwig 	 * recursion and blowing the stack. Tail call optimization would
1835ba8c6967SChristoph Hellwig 	 * handle this, but compiling with frame pointers also disables
1836ba8c6967SChristoph Hellwig 	 * gcc's sibling call optimization.
1837f9c78b2bSJens Axboe 	 */
1838f9c78b2bSJens Axboe 	if (bio->bi_end_io == bio_chain_endio) {
183938f8baaeSChristoph Hellwig 		bio = __bio_chain_endio(bio);
1840ba8c6967SChristoph Hellwig 		goto again;
1841ba8c6967SChristoph Hellwig 	}
1842ba8c6967SChristoph Hellwig 
184374d46992SChristoph Hellwig 	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
184474d46992SChristoph Hellwig 		trace_block_bio_complete(bio->bi_disk->queue, bio,
1845a462b950SBart Van Assche 					 blk_status_to_errno(bio->bi_status));
1846fbbaf700SNeilBrown 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1847fbbaf700SNeilBrown 	}
1848fbbaf700SNeilBrown 
18499e234eeaSShaohua Li 	blk_throtl_bio_endio(bio);
1850b222dd2fSShaohua Li 	/* release cgroup info */
1851b222dd2fSShaohua Li 	bio_uninit(bio);
1852f9c78b2bSJens Axboe 	if (bio->bi_end_io)
18534246a0b6SChristoph Hellwig 		bio->bi_end_io(bio);
1854f9c78b2bSJens Axboe }
1855f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_endio);
1856f9c78b2bSJens Axboe 
1857f9c78b2bSJens Axboe /**
1858f9c78b2bSJens Axboe  * bio_split - split a bio
1859f9c78b2bSJens Axboe  * @bio:	bio to split
1860f9c78b2bSJens Axboe  * @sectors:	number of sectors to split from the front of @bio
1861f9c78b2bSJens Axboe  * @gfp:	gfp mask
1862f9c78b2bSJens Axboe  * @bs:		bio set to allocate from
1863f9c78b2bSJens Axboe  *
1864f9c78b2bSJens Axboe  * Allocates and returns a new bio which represents @sectors from the start of
1865f9c78b2bSJens Axboe  * @bio, and updates @bio to represent the remaining sectors.
1866f9c78b2bSJens Axboe  *
1867f3f5da62SMartin K. Petersen  * Unless this is a discard request the newly allocated bio will point
1868f3f5da62SMartin K. Petersen  * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1869f3f5da62SMartin K. Petersen  * @bio is not freed before the split.
1870f9c78b2bSJens Axboe  */
1871f9c78b2bSJens Axboe struct bio *bio_split(struct bio *bio, int sectors,
1872f9c78b2bSJens Axboe 		      gfp_t gfp, struct bio_set *bs)
1873f9c78b2bSJens Axboe {
1874f341a4d3SMikulas Patocka 	struct bio *split;
1875f9c78b2bSJens Axboe 
1876f9c78b2bSJens Axboe 	BUG_ON(sectors <= 0);
1877f9c78b2bSJens Axboe 	BUG_ON(sectors >= bio_sectors(bio));
1878f9c78b2bSJens Axboe 
1879f9c78b2bSJens Axboe 	split = bio_clone_fast(bio, gfp, bs);
1880f9c78b2bSJens Axboe 	if (!split)
1881f9c78b2bSJens Axboe 		return NULL;
1882f9c78b2bSJens Axboe 
1883f9c78b2bSJens Axboe 	split->bi_iter.bi_size = sectors << 9;
1884f9c78b2bSJens Axboe 
1885f9c78b2bSJens Axboe 	if (bio_integrity(split))
1886fbd08e76SDmitry Monakhov 		bio_integrity_trim(split);
1887f9c78b2bSJens Axboe 
1888f9c78b2bSJens Axboe 	bio_advance(bio, split->bi_iter.bi_size);
1889f9c78b2bSJens Axboe 
1890fbbaf700SNeilBrown 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
189120d59023SGoldwyn Rodrigues 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1892fbbaf700SNeilBrown 
1893f9c78b2bSJens Axboe 	return split;
1894f9c78b2bSJens Axboe }
1895f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_split);
1896f9c78b2bSJens Axboe 
1897f9c78b2bSJens Axboe /**
1898f9c78b2bSJens Axboe  * bio_trim - trim a bio
1899f9c78b2bSJens Axboe  * @bio:	bio to trim
1900f9c78b2bSJens Axboe  * @offset:	number of sectors to trim from the front of @bio
1901f9c78b2bSJens Axboe  * @size:	size we want to trim @bio to, in sectors
1902f9c78b2bSJens Axboe  */
1903f9c78b2bSJens Axboe void bio_trim(struct bio *bio, int offset, int size)
1904f9c78b2bSJens Axboe {
1905f9c78b2bSJens Axboe 	/* 'bio' is a cloned bio which we need to trim to match
1906f9c78b2bSJens Axboe 	 * the given offset and size.
1907f9c78b2bSJens Axboe 	 */
1908f9c78b2bSJens Axboe 
1909f9c78b2bSJens Axboe 	size <<= 9;
1910f9c78b2bSJens Axboe 	if (offset == 0 && size == bio->bi_iter.bi_size)
1911f9c78b2bSJens Axboe 		return;
1912f9c78b2bSJens Axboe 
1913b7c44ed9SJens Axboe 	bio_clear_flag(bio, BIO_SEG_VALID);
1914f9c78b2bSJens Axboe 
1915f9c78b2bSJens Axboe 	bio_advance(bio, offset << 9);
1916f9c78b2bSJens Axboe 
1917f9c78b2bSJens Axboe 	bio->bi_iter.bi_size = size;
1918376a78abSDmitry Monakhov 
1919376a78abSDmitry Monakhov 	if (bio_integrity(bio))
1920fbd08e76SDmitry Monakhov 		bio_integrity_trim(bio);
1921376a78abSDmitry Monakhov 
1922f9c78b2bSJens Axboe }
1923f9c78b2bSJens Axboe EXPORT_SYMBOL_GPL(bio_trim);
1924f9c78b2bSJens Axboe 
1925f9c78b2bSJens Axboe /*
1926f9c78b2bSJens Axboe  * create memory pools for biovec's in a bio_set.
1927f9c78b2bSJens Axboe  * use the global biovec slabs created for general use.
1928f9c78b2bSJens Axboe  */
19298aa6ba2fSKent Overstreet int biovec_init_pool(mempool_t *pool, int pool_entries)
1930f9c78b2bSJens Axboe {
1931ed996a52SChristoph Hellwig 	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1932f9c78b2bSJens Axboe 
19338aa6ba2fSKent Overstreet 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1934f9c78b2bSJens Axboe }
1935f9c78b2bSJens Axboe 
1936917a38c7SKent Overstreet /*
1937917a38c7SKent Overstreet  * bioset_exit - exit a bioset initialized with bioset_init()
1938917a38c7SKent Overstreet  *
1939917a38c7SKent Overstreet  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1940917a38c7SKent Overstreet  * kzalloc()).
1941917a38c7SKent Overstreet  */
1942917a38c7SKent Overstreet void bioset_exit(struct bio_set *bs)
1943f9c78b2bSJens Axboe {
1944f9c78b2bSJens Axboe 	if (bs->rescue_workqueue)
1945f9c78b2bSJens Axboe 		destroy_workqueue(bs->rescue_workqueue);
1946917a38c7SKent Overstreet 	bs->rescue_workqueue = NULL;
1947f9c78b2bSJens Axboe 
19488aa6ba2fSKent Overstreet 	mempool_exit(&bs->bio_pool);
19498aa6ba2fSKent Overstreet 	mempool_exit(&bs->bvec_pool);
1950f9c78b2bSJens Axboe 
1951f9c78b2bSJens Axboe 	bioset_integrity_free(bs);
1952917a38c7SKent Overstreet 	if (bs->bio_slab)
1953f9c78b2bSJens Axboe 		bio_put_slab(bs);
1954917a38c7SKent Overstreet 	bs->bio_slab = NULL;
1955917a38c7SKent Overstreet }
1956917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_exit);
1957f9c78b2bSJens Axboe 
1958011067b0SNeilBrown /**
1959917a38c7SKent Overstreet  * bioset_init - Initialize a bio_set
1960dad08527SKent Overstreet  * @bs:		pool to initialize
1961917a38c7SKent Overstreet  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1962917a38c7SKent Overstreet  * @front_pad:	Number of bytes to allocate in front of the returned bio
1963917a38c7SKent Overstreet  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1964917a38c7SKent Overstreet  *              and %BIOSET_NEED_RESCUER
1965917a38c7SKent Overstreet  *
1966dad08527SKent Overstreet  * Description:
1967dad08527SKent Overstreet  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1968dad08527SKent Overstreet  *    to ask for a number of bytes to be allocated in front of the bio.
1969dad08527SKent Overstreet  *    Front pad allocation is useful for embedding the bio inside
1970dad08527SKent Overstreet  *    another structure, to avoid allocating extra data to go with the bio.
1971dad08527SKent Overstreet  *    Note that the bio must be embedded at the END of that structure always,
1972dad08527SKent Overstreet  *    or things will break badly.
1973dad08527SKent Overstreet  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1974dad08527SKent Overstreet  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1975dad08527SKent Overstreet  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1976dad08527SKent Overstreet  *    dispatch queued requests when the mempool runs out of space.
1977dad08527SKent Overstreet  *
1978917a38c7SKent Overstreet  */
1979917a38c7SKent Overstreet int bioset_init(struct bio_set *bs,
1980917a38c7SKent Overstreet 		unsigned int pool_size,
1981917a38c7SKent Overstreet 		unsigned int front_pad,
1982917a38c7SKent Overstreet 		int flags)
1983917a38c7SKent Overstreet {
1984917a38c7SKent Overstreet 	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1985917a38c7SKent Overstreet 
1986917a38c7SKent Overstreet 	bs->front_pad = front_pad;
1987917a38c7SKent Overstreet 
1988917a38c7SKent Overstreet 	spin_lock_init(&bs->rescue_lock);
1989917a38c7SKent Overstreet 	bio_list_init(&bs->rescue_list);
1990917a38c7SKent Overstreet 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1991917a38c7SKent Overstreet 
1992917a38c7SKent Overstreet 	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1993917a38c7SKent Overstreet 	if (!bs->bio_slab)
1994917a38c7SKent Overstreet 		return -ENOMEM;
1995917a38c7SKent Overstreet 
1996917a38c7SKent Overstreet 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1997917a38c7SKent Overstreet 		goto bad;
1998917a38c7SKent Overstreet 
1999917a38c7SKent Overstreet 	if ((flags & BIOSET_NEED_BVECS) &&
2000917a38c7SKent Overstreet 	    biovec_init_pool(&bs->bvec_pool, pool_size))
2001917a38c7SKent Overstreet 		goto bad;
2002917a38c7SKent Overstreet 
2003917a38c7SKent Overstreet 	if (!(flags & BIOSET_NEED_RESCUER))
2004917a38c7SKent Overstreet 		return 0;
2005917a38c7SKent Overstreet 
2006917a38c7SKent Overstreet 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
2007917a38c7SKent Overstreet 	if (!bs->rescue_workqueue)
2008917a38c7SKent Overstreet 		goto bad;
2009917a38c7SKent Overstreet 
2010917a38c7SKent Overstreet 	return 0;
2011917a38c7SKent Overstreet bad:
2012917a38c7SKent Overstreet 	bioset_exit(bs);
2013917a38c7SKent Overstreet 	return -ENOMEM;
2014917a38c7SKent Overstreet }
2015917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_init);
2016917a38c7SKent Overstreet 
201728e89fd9SJens Axboe /*
201828e89fd9SJens Axboe  * Initialize and setup a new bio_set, based on the settings from
201928e89fd9SJens Axboe  * another bio_set.
202028e89fd9SJens Axboe  */
202128e89fd9SJens Axboe int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
202228e89fd9SJens Axboe {
202328e89fd9SJens Axboe 	int flags;
202428e89fd9SJens Axboe 
202528e89fd9SJens Axboe 	flags = 0;
202628e89fd9SJens Axboe 	if (src->bvec_pool.min_nr)
202728e89fd9SJens Axboe 		flags |= BIOSET_NEED_BVECS;
202828e89fd9SJens Axboe 	if (src->rescue_workqueue)
202928e89fd9SJens Axboe 		flags |= BIOSET_NEED_RESCUER;
203028e89fd9SJens Axboe 
203128e89fd9SJens Axboe 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
203228e89fd9SJens Axboe }
203328e89fd9SJens Axboe EXPORT_SYMBOL(bioset_init_from_src);
203428e89fd9SJens Axboe 
2035f9c78b2bSJens Axboe #ifdef CONFIG_BLK_CGROUP
20361d933cf0STejun Heo 
20371d933cf0STejun Heo /**
20382268c0feSDennis Zhou  * bio_disassociate_blkg - puts back the blkg reference if associated
2039b5f2954dSDennis Zhou  * @bio: target bio
2040b5f2954dSDennis Zhou  *
20412268c0feSDennis Zhou  * Helper to disassociate the blkg from @bio if a blkg is associated.
2042b5f2954dSDennis Zhou  */
20432268c0feSDennis Zhou void bio_disassociate_blkg(struct bio *bio)
2044b5f2954dSDennis Zhou {
204508e18eabSJosef Bacik 	if (bio->bi_blkg) {
204608e18eabSJosef Bacik 		blkg_put(bio->bi_blkg);
204708e18eabSJosef Bacik 		bio->bi_blkg = NULL;
204808e18eabSJosef Bacik 	}
2049f9c78b2bSJens Axboe }
2050892ad71fSDennis Zhou EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
2051f9c78b2bSJens Axboe 
205220bd723eSPaolo Valente /**
20532268c0feSDennis Zhou  * __bio_associate_blkg - associate a bio with the a blkg
2054f9c78b2bSJens Axboe  * @bio: target bio
2055f9c78b2bSJens Axboe  * @blkg: the blkg to associate
2056f9c78b2bSJens Axboe  *
2057beea9da0SDennis Zhou  * This tries to associate @bio with the specified @blkg.  Association failure
2058beea9da0SDennis Zhou  * is handled by walking up the blkg tree.  Therefore, the blkg associated can
2059beea9da0SDennis Zhou  * be anything between @blkg and the root_blkg.  This situation only happens
2060beea9da0SDennis Zhou  * when a cgroup is dying and then the remaining bios will spill to the closest
2061beea9da0SDennis Zhou  * alive blkg.
2062beea9da0SDennis Zhou  *
2063beea9da0SDennis Zhou  * A reference will be taken on the @blkg and will be released when @bio is
2064beea9da0SDennis Zhou  * freed.
2065f9c78b2bSJens Axboe  */
20662268c0feSDennis Zhou static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2067f9c78b2bSJens Axboe {
20682268c0feSDennis Zhou 	bio_disassociate_blkg(bio);
20692268c0feSDennis Zhou 
20707754f669SDennis Zhou 	bio->bi_blkg = blkg_tryget_closest(blkg);
20712268c0feSDennis Zhou }
20722268c0feSDennis Zhou 
2073fd42df30SDennis Zhou /**
2074fd42df30SDennis Zhou  * bio_associate_blkg_from_css - associate a bio with a specified css
2075fd42df30SDennis Zhou  * @bio: target bio
2076fd42df30SDennis Zhou  * @css: target css
2077fd42df30SDennis Zhou  *
2078fd42df30SDennis Zhou  * Associate @bio with the blkg found by combining the css's blkg and the
2079fc5a828bSDennis Zhou  * request_queue of the @bio.  This falls back to the queue's root_blkg if
2080fc5a828bSDennis Zhou  * the association fails with the css.
2081fd42df30SDennis Zhou  */
2082fd42df30SDennis Zhou void bio_associate_blkg_from_css(struct bio *bio,
2083fd42df30SDennis Zhou 				 struct cgroup_subsys_state *css)
2084fd42df30SDennis Zhou {
2085fc5a828bSDennis Zhou 	struct request_queue *q = bio->bi_disk->queue;
2086fc5a828bSDennis Zhou 	struct blkcg_gq *blkg;
2087fc5a828bSDennis Zhou 
2088fc5a828bSDennis Zhou 	rcu_read_lock();
2089fc5a828bSDennis Zhou 
2090fc5a828bSDennis Zhou 	if (!css || !css->parent)
2091fc5a828bSDennis Zhou 		blkg = q->root_blkg;
2092fc5a828bSDennis Zhou 	else
2093fc5a828bSDennis Zhou 		blkg = blkg_lookup_create(css_to_blkcg(css), q);
2094fc5a828bSDennis Zhou 
2095fc5a828bSDennis Zhou 	__bio_associate_blkg(bio, blkg);
2096fc5a828bSDennis Zhou 
2097fc5a828bSDennis Zhou 	rcu_read_unlock();
2098fd42df30SDennis Zhou }
2099fd42df30SDennis Zhou EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
2100fd42df30SDennis Zhou 
21016a7f6d86SDennis Zhou #ifdef CONFIG_MEMCG
21026a7f6d86SDennis Zhou /**
21036a7f6d86SDennis Zhou  * bio_associate_blkg_from_page - associate a bio with the page's blkg
21046a7f6d86SDennis Zhou  * @bio: target bio
21056a7f6d86SDennis Zhou  * @page: the page to lookup the blkcg from
21066a7f6d86SDennis Zhou  *
21076a7f6d86SDennis Zhou  * Associate @bio with the blkg from @page's owning memcg and the respective
2108fc5a828bSDennis Zhou  * request_queue.  If cgroup_e_css returns %NULL, fall back to the queue's
2109fc5a828bSDennis Zhou  * root_blkg.
21106a7f6d86SDennis Zhou  */
21116a7f6d86SDennis Zhou void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
21126a7f6d86SDennis Zhou {
21136a7f6d86SDennis Zhou 	struct cgroup_subsys_state *css;
21146a7f6d86SDennis Zhou 
21156a7f6d86SDennis Zhou 	if (!page->mem_cgroup)
21166a7f6d86SDennis Zhou 		return;
21176a7f6d86SDennis Zhou 
2118fc5a828bSDennis Zhou 	rcu_read_lock();
2119fc5a828bSDennis Zhou 
2120fc5a828bSDennis Zhou 	css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
2121fc5a828bSDennis Zhou 	bio_associate_blkg_from_css(bio, css);
2122fc5a828bSDennis Zhou 
2123fc5a828bSDennis Zhou 	rcu_read_unlock();
21246a7f6d86SDennis Zhou }
21256a7f6d86SDennis Zhou #endif /* CONFIG_MEMCG */
21266a7f6d86SDennis Zhou 
21272268c0feSDennis Zhou /**
21282268c0feSDennis Zhou  * bio_associate_blkg - associate a bio with a blkg
21292268c0feSDennis Zhou  * @bio: target bio
21302268c0feSDennis Zhou  *
21312268c0feSDennis Zhou  * Associate @bio with the blkg found from the bio's css and request_queue.
21322268c0feSDennis Zhou  * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
21332268c0feSDennis Zhou  * already associated, the css is reused and association redone as the
21342268c0feSDennis Zhou  * request_queue may have changed.
21352268c0feSDennis Zhou  */
21362268c0feSDennis Zhou void bio_associate_blkg(struct bio *bio)
21372268c0feSDennis Zhou {
2138fc5a828bSDennis Zhou 	struct cgroup_subsys_state *css;
21392268c0feSDennis Zhou 
21402268c0feSDennis Zhou 	rcu_read_lock();
21412268c0feSDennis Zhou 
2142db6638d7SDennis Zhou 	if (bio->bi_blkg)
2143fc5a828bSDennis Zhou 		css = &bio_blkcg(bio)->css;
2144db6638d7SDennis Zhou 	else
2145fc5a828bSDennis Zhou 		css = blkcg_css();
21462268c0feSDennis Zhou 
2147fc5a828bSDennis Zhou 	bio_associate_blkg_from_css(bio, css);
21482268c0feSDennis Zhou 
21492268c0feSDennis Zhou 	rcu_read_unlock();
2150f9c78b2bSJens Axboe }
21515cdf2e3fSDennis Zhou EXPORT_SYMBOL_GPL(bio_associate_blkg);
2152f9c78b2bSJens Axboe 
215320bd723eSPaolo Valente /**
2154db6638d7SDennis Zhou  * bio_clone_blkg_association - clone blkg association from src to dst bio
215520bd723eSPaolo Valente  * @dst: destination bio
215620bd723eSPaolo Valente  * @src: source bio
215720bd723eSPaolo Valente  */
2158db6638d7SDennis Zhou void bio_clone_blkg_association(struct bio *dst, struct bio *src)
215920bd723eSPaolo Valente {
21606ab21879SDennis Zhou 	rcu_read_lock();
21616ab21879SDennis Zhou 
2162fc5a828bSDennis Zhou 	if (src->bi_blkg)
21632268c0feSDennis Zhou 		__bio_associate_blkg(dst, src->bi_blkg);
21646ab21879SDennis Zhou 
21656ab21879SDennis Zhou 	rcu_read_unlock();
216620bd723eSPaolo Valente }
2167db6638d7SDennis Zhou EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
2168f9c78b2bSJens Axboe #endif /* CONFIG_BLK_CGROUP */
2169f9c78b2bSJens Axboe 
2170f9c78b2bSJens Axboe static void __init biovec_init_slabs(void)
2171f9c78b2bSJens Axboe {
2172f9c78b2bSJens Axboe 	int i;
2173f9c78b2bSJens Axboe 
2174ed996a52SChristoph Hellwig 	for (i = 0; i < BVEC_POOL_NR; i++) {
2175f9c78b2bSJens Axboe 		int size;
2176f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + i;
2177f9c78b2bSJens Axboe 
2178f9c78b2bSJens Axboe 		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2179f9c78b2bSJens Axboe 			bvs->slab = NULL;
2180f9c78b2bSJens Axboe 			continue;
2181f9c78b2bSJens Axboe 		}
2182f9c78b2bSJens Axboe 
2183f9c78b2bSJens Axboe 		size = bvs->nr_vecs * sizeof(struct bio_vec);
2184f9c78b2bSJens Axboe 		bvs->slab = kmem_cache_create(bvs->name, size, 0,
2185f9c78b2bSJens Axboe                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2186f9c78b2bSJens Axboe 	}
2187f9c78b2bSJens Axboe }
2188f9c78b2bSJens Axboe 
2189f9c78b2bSJens Axboe static int __init init_bio(void)
2190f9c78b2bSJens Axboe {
2191f9c78b2bSJens Axboe 	bio_slab_max = 2;
2192f9c78b2bSJens Axboe 	bio_slab_nr = 0;
21936396bb22SKees Cook 	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
21946396bb22SKees Cook 			    GFP_KERNEL);
21952b24e6f6SJohannes Thumshirn 
21962b24e6f6SJohannes Thumshirn 	BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
21972b24e6f6SJohannes Thumshirn 
2198f9c78b2bSJens Axboe 	if (!bio_slabs)
2199f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
2200f9c78b2bSJens Axboe 
2201f9c78b2bSJens Axboe 	bio_integrity_init();
2202f9c78b2bSJens Axboe 	biovec_init_slabs();
2203f9c78b2bSJens Axboe 
2204f4f8154aSKent Overstreet 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2205f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
2206f9c78b2bSJens Axboe 
2207f4f8154aSKent Overstreet 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2208f9c78b2bSJens Axboe 		panic("bio: can't create integrity pool\n");
2209f9c78b2bSJens Axboe 
2210f9c78b2bSJens Axboe 	return 0;
2211f9c78b2bSJens Axboe }
2212f9c78b2bSJens Axboe subsys_initcall(init_bio);
2213