xref: /openbmc/linux/block/bio.c (revision 2268c0fe)
1f9c78b2bSJens Axboe /*
2f9c78b2bSJens Axboe  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3f9c78b2bSJens Axboe  *
4f9c78b2bSJens Axboe  * This program is free software; you can redistribute it and/or modify
5f9c78b2bSJens Axboe  * it under the terms of the GNU General Public License version 2 as
6f9c78b2bSJens Axboe  * published by the Free Software Foundation.
7f9c78b2bSJens Axboe  *
8f9c78b2bSJens Axboe  * This program is distributed in the hope that it will be useful,
9f9c78b2bSJens Axboe  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10f9c78b2bSJens Axboe  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11f9c78b2bSJens Axboe  * GNU General Public License for more details.
12f9c78b2bSJens Axboe  *
13f9c78b2bSJens Axboe  * You should have received a copy of the GNU General Public Licens
14f9c78b2bSJens Axboe  * along with this program; if not, write to the Free Software
15f9c78b2bSJens Axboe  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
16f9c78b2bSJens Axboe  *
17f9c78b2bSJens Axboe  */
18f9c78b2bSJens Axboe #include <linux/mm.h>
19f9c78b2bSJens Axboe #include <linux/swap.h>
20f9c78b2bSJens Axboe #include <linux/bio.h>
21f9c78b2bSJens Axboe #include <linux/blkdev.h>
22f9c78b2bSJens Axboe #include <linux/uio.h>
23f9c78b2bSJens Axboe #include <linux/iocontext.h>
24f9c78b2bSJens Axboe #include <linux/slab.h>
25f9c78b2bSJens Axboe #include <linux/init.h>
26f9c78b2bSJens Axboe #include <linux/kernel.h>
27f9c78b2bSJens Axboe #include <linux/export.h>
28f9c78b2bSJens Axboe #include <linux/mempool.h>
29f9c78b2bSJens Axboe #include <linux/workqueue.h>
30f9c78b2bSJens Axboe #include <linux/cgroup.h>
3108e18eabSJosef Bacik #include <linux/blk-cgroup.h>
32f9c78b2bSJens Axboe 
33f9c78b2bSJens Axboe #include <trace/events/block.h>
349e234eeaSShaohua Li #include "blk.h"
3567b42d0bSJosef Bacik #include "blk-rq-qos.h"
36f9c78b2bSJens Axboe 
37f9c78b2bSJens Axboe /*
38f9c78b2bSJens Axboe  * Test patch to inline a certain number of bi_io_vec's inside the bio
39f9c78b2bSJens Axboe  * itself, to shrink a bio data allocation from two mempool calls to one
40f9c78b2bSJens Axboe  */
41f9c78b2bSJens Axboe #define BIO_INLINE_VECS		4
42f9c78b2bSJens Axboe 
43f9c78b2bSJens Axboe /*
44f9c78b2bSJens Axboe  * if you change this list, also change bvec_alloc or things will
45f9c78b2bSJens Axboe  * break badly! cannot be bigger than what you can fit into an
46f9c78b2bSJens Axboe  * unsigned short
47f9c78b2bSJens Axboe  */
48bd5c4facSMikulas Patocka #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
49ed996a52SChristoph Hellwig static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
50bd5c4facSMikulas Patocka 	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
51f9c78b2bSJens Axboe };
52f9c78b2bSJens Axboe #undef BV
53f9c78b2bSJens Axboe 
54f9c78b2bSJens Axboe /*
55f9c78b2bSJens Axboe  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
56f9c78b2bSJens Axboe  * IO code that does not need private memory pools.
57f9c78b2bSJens Axboe  */
58f4f8154aSKent Overstreet struct bio_set fs_bio_set;
59f9c78b2bSJens Axboe EXPORT_SYMBOL(fs_bio_set);
60f9c78b2bSJens Axboe 
61f9c78b2bSJens Axboe /*
62f9c78b2bSJens Axboe  * Our slab pool management
63f9c78b2bSJens Axboe  */
64f9c78b2bSJens Axboe struct bio_slab {
65f9c78b2bSJens Axboe 	struct kmem_cache *slab;
66f9c78b2bSJens Axboe 	unsigned int slab_ref;
67f9c78b2bSJens Axboe 	unsigned int slab_size;
68f9c78b2bSJens Axboe 	char name[8];
69f9c78b2bSJens Axboe };
70f9c78b2bSJens Axboe static DEFINE_MUTEX(bio_slab_lock);
71f9c78b2bSJens Axboe static struct bio_slab *bio_slabs;
72f9c78b2bSJens Axboe static unsigned int bio_slab_nr, bio_slab_max;
73f9c78b2bSJens Axboe 
74f9c78b2bSJens Axboe static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
75f9c78b2bSJens Axboe {
76f9c78b2bSJens Axboe 	unsigned int sz = sizeof(struct bio) + extra_size;
77f9c78b2bSJens Axboe 	struct kmem_cache *slab = NULL;
78f9c78b2bSJens Axboe 	struct bio_slab *bslab, *new_bio_slabs;
79f9c78b2bSJens Axboe 	unsigned int new_bio_slab_max;
80f9c78b2bSJens Axboe 	unsigned int i, entry = -1;
81f9c78b2bSJens Axboe 
82f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
83f9c78b2bSJens Axboe 
84f9c78b2bSJens Axboe 	i = 0;
85f9c78b2bSJens Axboe 	while (i < bio_slab_nr) {
86f9c78b2bSJens Axboe 		bslab = &bio_slabs[i];
87f9c78b2bSJens Axboe 
88f9c78b2bSJens Axboe 		if (!bslab->slab && entry == -1)
89f9c78b2bSJens Axboe 			entry = i;
90f9c78b2bSJens Axboe 		else if (bslab->slab_size == sz) {
91f9c78b2bSJens Axboe 			slab = bslab->slab;
92f9c78b2bSJens Axboe 			bslab->slab_ref++;
93f9c78b2bSJens Axboe 			break;
94f9c78b2bSJens Axboe 		}
95f9c78b2bSJens Axboe 		i++;
96f9c78b2bSJens Axboe 	}
97f9c78b2bSJens Axboe 
98f9c78b2bSJens Axboe 	if (slab)
99f9c78b2bSJens Axboe 		goto out_unlock;
100f9c78b2bSJens Axboe 
101f9c78b2bSJens Axboe 	if (bio_slab_nr == bio_slab_max && entry == -1) {
102f9c78b2bSJens Axboe 		new_bio_slab_max = bio_slab_max << 1;
103f9c78b2bSJens Axboe 		new_bio_slabs = krealloc(bio_slabs,
104f9c78b2bSJens Axboe 					 new_bio_slab_max * sizeof(struct bio_slab),
105f9c78b2bSJens Axboe 					 GFP_KERNEL);
106f9c78b2bSJens Axboe 		if (!new_bio_slabs)
107f9c78b2bSJens Axboe 			goto out_unlock;
108f9c78b2bSJens Axboe 		bio_slab_max = new_bio_slab_max;
109f9c78b2bSJens Axboe 		bio_slabs = new_bio_slabs;
110f9c78b2bSJens Axboe 	}
111f9c78b2bSJens Axboe 	if (entry == -1)
112f9c78b2bSJens Axboe 		entry = bio_slab_nr++;
113f9c78b2bSJens Axboe 
114f9c78b2bSJens Axboe 	bslab = &bio_slabs[entry];
115f9c78b2bSJens Axboe 
116f9c78b2bSJens Axboe 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
1176a241483SMikulas Patocka 	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
1186a241483SMikulas Patocka 				 SLAB_HWCACHE_ALIGN, NULL);
119f9c78b2bSJens Axboe 	if (!slab)
120f9c78b2bSJens Axboe 		goto out_unlock;
121f9c78b2bSJens Axboe 
122f9c78b2bSJens Axboe 	bslab->slab = slab;
123f9c78b2bSJens Axboe 	bslab->slab_ref = 1;
124f9c78b2bSJens Axboe 	bslab->slab_size = sz;
125f9c78b2bSJens Axboe out_unlock:
126f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
127f9c78b2bSJens Axboe 	return slab;
128f9c78b2bSJens Axboe }
129f9c78b2bSJens Axboe 
130f9c78b2bSJens Axboe static void bio_put_slab(struct bio_set *bs)
131f9c78b2bSJens Axboe {
132f9c78b2bSJens Axboe 	struct bio_slab *bslab = NULL;
133f9c78b2bSJens Axboe 	unsigned int i;
134f9c78b2bSJens Axboe 
135f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
136f9c78b2bSJens Axboe 
137f9c78b2bSJens Axboe 	for (i = 0; i < bio_slab_nr; i++) {
138f9c78b2bSJens Axboe 		if (bs->bio_slab == bio_slabs[i].slab) {
139f9c78b2bSJens Axboe 			bslab = &bio_slabs[i];
140f9c78b2bSJens Axboe 			break;
141f9c78b2bSJens Axboe 		}
142f9c78b2bSJens Axboe 	}
143f9c78b2bSJens Axboe 
144f9c78b2bSJens Axboe 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
145f9c78b2bSJens Axboe 		goto out;
146f9c78b2bSJens Axboe 
147f9c78b2bSJens Axboe 	WARN_ON(!bslab->slab_ref);
148f9c78b2bSJens Axboe 
149f9c78b2bSJens Axboe 	if (--bslab->slab_ref)
150f9c78b2bSJens Axboe 		goto out;
151f9c78b2bSJens Axboe 
152f9c78b2bSJens Axboe 	kmem_cache_destroy(bslab->slab);
153f9c78b2bSJens Axboe 	bslab->slab = NULL;
154f9c78b2bSJens Axboe 
155f9c78b2bSJens Axboe out:
156f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
157f9c78b2bSJens Axboe }
158f9c78b2bSJens Axboe 
159f9c78b2bSJens Axboe unsigned int bvec_nr_vecs(unsigned short idx)
160f9c78b2bSJens Axboe {
161d6c02a9bSGreg Edwards 	return bvec_slabs[--idx].nr_vecs;
162f9c78b2bSJens Axboe }
163f9c78b2bSJens Axboe 
164f9c78b2bSJens Axboe void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
165f9c78b2bSJens Axboe {
166ed996a52SChristoph Hellwig 	if (!idx)
167ed996a52SChristoph Hellwig 		return;
168ed996a52SChristoph Hellwig 	idx--;
169f9c78b2bSJens Axboe 
170ed996a52SChristoph Hellwig 	BIO_BUG_ON(idx >= BVEC_POOL_NR);
171ed996a52SChristoph Hellwig 
172ed996a52SChristoph Hellwig 	if (idx == BVEC_POOL_MAX) {
173f9c78b2bSJens Axboe 		mempool_free(bv, pool);
174ed996a52SChristoph Hellwig 	} else {
175f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + idx;
176f9c78b2bSJens Axboe 
177f9c78b2bSJens Axboe 		kmem_cache_free(bvs->slab, bv);
178f9c78b2bSJens Axboe 	}
179f9c78b2bSJens Axboe }
180f9c78b2bSJens Axboe 
181f9c78b2bSJens Axboe struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
182f9c78b2bSJens Axboe 			   mempool_t *pool)
183f9c78b2bSJens Axboe {
184f9c78b2bSJens Axboe 	struct bio_vec *bvl;
185f9c78b2bSJens Axboe 
186f9c78b2bSJens Axboe 	/*
187f9c78b2bSJens Axboe 	 * see comment near bvec_array define!
188f9c78b2bSJens Axboe 	 */
189f9c78b2bSJens Axboe 	switch (nr) {
190f9c78b2bSJens Axboe 	case 1:
191f9c78b2bSJens Axboe 		*idx = 0;
192f9c78b2bSJens Axboe 		break;
193f9c78b2bSJens Axboe 	case 2 ... 4:
194f9c78b2bSJens Axboe 		*idx = 1;
195f9c78b2bSJens Axboe 		break;
196f9c78b2bSJens Axboe 	case 5 ... 16:
197f9c78b2bSJens Axboe 		*idx = 2;
198f9c78b2bSJens Axboe 		break;
199f9c78b2bSJens Axboe 	case 17 ... 64:
200f9c78b2bSJens Axboe 		*idx = 3;
201f9c78b2bSJens Axboe 		break;
202f9c78b2bSJens Axboe 	case 65 ... 128:
203f9c78b2bSJens Axboe 		*idx = 4;
204f9c78b2bSJens Axboe 		break;
205f9c78b2bSJens Axboe 	case 129 ... BIO_MAX_PAGES:
206f9c78b2bSJens Axboe 		*idx = 5;
207f9c78b2bSJens Axboe 		break;
208f9c78b2bSJens Axboe 	default:
209f9c78b2bSJens Axboe 		return NULL;
210f9c78b2bSJens Axboe 	}
211f9c78b2bSJens Axboe 
212f9c78b2bSJens Axboe 	/*
213f9c78b2bSJens Axboe 	 * idx now points to the pool we want to allocate from. only the
214f9c78b2bSJens Axboe 	 * 1-vec entry pool is mempool backed.
215f9c78b2bSJens Axboe 	 */
216ed996a52SChristoph Hellwig 	if (*idx == BVEC_POOL_MAX) {
217f9c78b2bSJens Axboe fallback:
218f9c78b2bSJens Axboe 		bvl = mempool_alloc(pool, gfp_mask);
219f9c78b2bSJens Axboe 	} else {
220f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + *idx;
221d0164adcSMel Gorman 		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
222f9c78b2bSJens Axboe 
223f9c78b2bSJens Axboe 		/*
224f9c78b2bSJens Axboe 		 * Make this allocation restricted and don't dump info on
225f9c78b2bSJens Axboe 		 * allocation failures, since we'll fallback to the mempool
226f9c78b2bSJens Axboe 		 * in case of failure.
227f9c78b2bSJens Axboe 		 */
228f9c78b2bSJens Axboe 		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
229f9c78b2bSJens Axboe 
230f9c78b2bSJens Axboe 		/*
231d0164adcSMel Gorman 		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
232f9c78b2bSJens Axboe 		 * is set, retry with the 1-entry mempool
233f9c78b2bSJens Axboe 		 */
234f9c78b2bSJens Axboe 		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
235d0164adcSMel Gorman 		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
236ed996a52SChristoph Hellwig 			*idx = BVEC_POOL_MAX;
237f9c78b2bSJens Axboe 			goto fallback;
238f9c78b2bSJens Axboe 		}
239f9c78b2bSJens Axboe 	}
240f9c78b2bSJens Axboe 
241ed996a52SChristoph Hellwig 	(*idx)++;
242f9c78b2bSJens Axboe 	return bvl;
243f9c78b2bSJens Axboe }
244f9c78b2bSJens Axboe 
2459ae3b3f5SJens Axboe void bio_uninit(struct bio *bio)
246f9c78b2bSJens Axboe {
247f9c78b2bSJens Axboe 	bio_disassociate_task(bio);
248f9c78b2bSJens Axboe }
2499ae3b3f5SJens Axboe EXPORT_SYMBOL(bio_uninit);
250f9c78b2bSJens Axboe 
251f9c78b2bSJens Axboe static void bio_free(struct bio *bio)
252f9c78b2bSJens Axboe {
253f9c78b2bSJens Axboe 	struct bio_set *bs = bio->bi_pool;
254f9c78b2bSJens Axboe 	void *p;
255f9c78b2bSJens Axboe 
2569ae3b3f5SJens Axboe 	bio_uninit(bio);
257f9c78b2bSJens Axboe 
258f9c78b2bSJens Axboe 	if (bs) {
2598aa6ba2fSKent Overstreet 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
260f9c78b2bSJens Axboe 
261f9c78b2bSJens Axboe 		/*
262f9c78b2bSJens Axboe 		 * If we have front padding, adjust the bio pointer before freeing
263f9c78b2bSJens Axboe 		 */
264f9c78b2bSJens Axboe 		p = bio;
265f9c78b2bSJens Axboe 		p -= bs->front_pad;
266f9c78b2bSJens Axboe 
2678aa6ba2fSKent Overstreet 		mempool_free(p, &bs->bio_pool);
268f9c78b2bSJens Axboe 	} else {
269f9c78b2bSJens Axboe 		/* Bio was allocated by bio_kmalloc() */
270f9c78b2bSJens Axboe 		kfree(bio);
271f9c78b2bSJens Axboe 	}
272f9c78b2bSJens Axboe }
273f9c78b2bSJens Axboe 
2749ae3b3f5SJens Axboe /*
2759ae3b3f5SJens Axboe  * Users of this function have their own bio allocation. Subsequently,
2769ae3b3f5SJens Axboe  * they must remember to pair any call to bio_init() with bio_uninit()
2779ae3b3f5SJens Axboe  * when IO has completed, or when the bio is released.
2789ae3b3f5SJens Axboe  */
2793a83f467SMing Lei void bio_init(struct bio *bio, struct bio_vec *table,
2803a83f467SMing Lei 	      unsigned short max_vecs)
281f9c78b2bSJens Axboe {
282f9c78b2bSJens Axboe 	memset(bio, 0, sizeof(*bio));
283c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
284dac56212SJens Axboe 	atomic_set(&bio->__bi_cnt, 1);
2853a83f467SMing Lei 
2863a83f467SMing Lei 	bio->bi_io_vec = table;
2873a83f467SMing Lei 	bio->bi_max_vecs = max_vecs;
288f9c78b2bSJens Axboe }
289f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_init);
290f9c78b2bSJens Axboe 
291f9c78b2bSJens Axboe /**
292f9c78b2bSJens Axboe  * bio_reset - reinitialize a bio
293f9c78b2bSJens Axboe  * @bio:	bio to reset
294f9c78b2bSJens Axboe  *
295f9c78b2bSJens Axboe  * Description:
296f9c78b2bSJens Axboe  *   After calling bio_reset(), @bio will be in the same state as a freshly
297f9c78b2bSJens Axboe  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
298f9c78b2bSJens Axboe  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
299f9c78b2bSJens Axboe  *   comment in struct bio.
300f9c78b2bSJens Axboe  */
301f9c78b2bSJens Axboe void bio_reset(struct bio *bio)
302f9c78b2bSJens Axboe {
303f9c78b2bSJens Axboe 	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
304f9c78b2bSJens Axboe 
3059ae3b3f5SJens Axboe 	bio_uninit(bio);
306f9c78b2bSJens Axboe 
307f9c78b2bSJens Axboe 	memset(bio, 0, BIO_RESET_BYTES);
3084246a0b6SChristoph Hellwig 	bio->bi_flags = flags;
309c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
310f9c78b2bSJens Axboe }
311f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_reset);
312f9c78b2bSJens Axboe 
31338f8baaeSChristoph Hellwig static struct bio *__bio_chain_endio(struct bio *bio)
314f9c78b2bSJens Axboe {
3154246a0b6SChristoph Hellwig 	struct bio *parent = bio->bi_private;
3164246a0b6SChristoph Hellwig 
3174e4cbee9SChristoph Hellwig 	if (!parent->bi_status)
3184e4cbee9SChristoph Hellwig 		parent->bi_status = bio->bi_status;
319f9c78b2bSJens Axboe 	bio_put(bio);
32038f8baaeSChristoph Hellwig 	return parent;
32138f8baaeSChristoph Hellwig }
32238f8baaeSChristoph Hellwig 
32338f8baaeSChristoph Hellwig static void bio_chain_endio(struct bio *bio)
32438f8baaeSChristoph Hellwig {
32538f8baaeSChristoph Hellwig 	bio_endio(__bio_chain_endio(bio));
326f9c78b2bSJens Axboe }
327f9c78b2bSJens Axboe 
328f9c78b2bSJens Axboe /**
329f9c78b2bSJens Axboe  * bio_chain - chain bio completions
330f9c78b2bSJens Axboe  * @bio: the target bio
331f9c78b2bSJens Axboe  * @parent: the @bio's parent bio
332f9c78b2bSJens Axboe  *
333f9c78b2bSJens Axboe  * The caller won't have a bi_end_io called when @bio completes - instead,
334f9c78b2bSJens Axboe  * @parent's bi_end_io won't be called until both @parent and @bio have
335f9c78b2bSJens Axboe  * completed; the chained bio will also be freed when it completes.
336f9c78b2bSJens Axboe  *
337f9c78b2bSJens Axboe  * The caller must not set bi_private or bi_end_io in @bio.
338f9c78b2bSJens Axboe  */
339f9c78b2bSJens Axboe void bio_chain(struct bio *bio, struct bio *parent)
340f9c78b2bSJens Axboe {
341f9c78b2bSJens Axboe 	BUG_ON(bio->bi_private || bio->bi_end_io);
342f9c78b2bSJens Axboe 
343f9c78b2bSJens Axboe 	bio->bi_private = parent;
344f9c78b2bSJens Axboe 	bio->bi_end_io	= bio_chain_endio;
345c4cf5261SJens Axboe 	bio_inc_remaining(parent);
346f9c78b2bSJens Axboe }
347f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_chain);
348f9c78b2bSJens Axboe 
349f9c78b2bSJens Axboe static void bio_alloc_rescue(struct work_struct *work)
350f9c78b2bSJens Axboe {
351f9c78b2bSJens Axboe 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
352f9c78b2bSJens Axboe 	struct bio *bio;
353f9c78b2bSJens Axboe 
354f9c78b2bSJens Axboe 	while (1) {
355f9c78b2bSJens Axboe 		spin_lock(&bs->rescue_lock);
356f9c78b2bSJens Axboe 		bio = bio_list_pop(&bs->rescue_list);
357f9c78b2bSJens Axboe 		spin_unlock(&bs->rescue_lock);
358f9c78b2bSJens Axboe 
359f9c78b2bSJens Axboe 		if (!bio)
360f9c78b2bSJens Axboe 			break;
361f9c78b2bSJens Axboe 
362f9c78b2bSJens Axboe 		generic_make_request(bio);
363f9c78b2bSJens Axboe 	}
364f9c78b2bSJens Axboe }
365f9c78b2bSJens Axboe 
366f9c78b2bSJens Axboe static void punt_bios_to_rescuer(struct bio_set *bs)
367f9c78b2bSJens Axboe {
368f9c78b2bSJens Axboe 	struct bio_list punt, nopunt;
369f9c78b2bSJens Axboe 	struct bio *bio;
370f9c78b2bSJens Axboe 
37147e0fb46SNeilBrown 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
37247e0fb46SNeilBrown 		return;
373f9c78b2bSJens Axboe 	/*
374f9c78b2bSJens Axboe 	 * In order to guarantee forward progress we must punt only bios that
375f9c78b2bSJens Axboe 	 * were allocated from this bio_set; otherwise, if there was a bio on
376f9c78b2bSJens Axboe 	 * there for a stacking driver higher up in the stack, processing it
377f9c78b2bSJens Axboe 	 * could require allocating bios from this bio_set, and doing that from
378f9c78b2bSJens Axboe 	 * our own rescuer would be bad.
379f9c78b2bSJens Axboe 	 *
380f9c78b2bSJens Axboe 	 * Since bio lists are singly linked, pop them all instead of trying to
381f9c78b2bSJens Axboe 	 * remove from the middle of the list:
382f9c78b2bSJens Axboe 	 */
383f9c78b2bSJens Axboe 
384f9c78b2bSJens Axboe 	bio_list_init(&punt);
385f9c78b2bSJens Axboe 	bio_list_init(&nopunt);
386f9c78b2bSJens Axboe 
387f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[0])))
388f9c78b2bSJens Axboe 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
389f5fe1b51SNeilBrown 	current->bio_list[0] = nopunt;
390f9c78b2bSJens Axboe 
391f5fe1b51SNeilBrown 	bio_list_init(&nopunt);
392f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[1])))
393f5fe1b51SNeilBrown 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
394f5fe1b51SNeilBrown 	current->bio_list[1] = nopunt;
395f9c78b2bSJens Axboe 
396f9c78b2bSJens Axboe 	spin_lock(&bs->rescue_lock);
397f9c78b2bSJens Axboe 	bio_list_merge(&bs->rescue_list, &punt);
398f9c78b2bSJens Axboe 	spin_unlock(&bs->rescue_lock);
399f9c78b2bSJens Axboe 
400f9c78b2bSJens Axboe 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
401f9c78b2bSJens Axboe }
402f9c78b2bSJens Axboe 
403f9c78b2bSJens Axboe /**
404f9c78b2bSJens Axboe  * bio_alloc_bioset - allocate a bio for I/O
405519c8e9fSRandy Dunlap  * @gfp_mask:   the GFP_* mask given to the slab allocator
406f9c78b2bSJens Axboe  * @nr_iovecs:	number of iovecs to pre-allocate
407f9c78b2bSJens Axboe  * @bs:		the bio_set to allocate from.
408f9c78b2bSJens Axboe  *
409f9c78b2bSJens Axboe  * Description:
410f9c78b2bSJens Axboe  *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
411f9c78b2bSJens Axboe  *   backed by the @bs's mempool.
412f9c78b2bSJens Axboe  *
413d0164adcSMel Gorman  *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
414d0164adcSMel Gorman  *   always be able to allocate a bio. This is due to the mempool guarantees.
415d0164adcSMel Gorman  *   To make this work, callers must never allocate more than 1 bio at a time
416d0164adcSMel Gorman  *   from this pool. Callers that need to allocate more than 1 bio must always
417d0164adcSMel Gorman  *   submit the previously allocated bio for IO before attempting to allocate
418d0164adcSMel Gorman  *   a new one. Failure to do so can cause deadlocks under memory pressure.
419f9c78b2bSJens Axboe  *
420f9c78b2bSJens Axboe  *   Note that when running under generic_make_request() (i.e. any block
421f9c78b2bSJens Axboe  *   driver), bios are not submitted until after you return - see the code in
422f9c78b2bSJens Axboe  *   generic_make_request() that converts recursion into iteration, to prevent
423f9c78b2bSJens Axboe  *   stack overflows.
424f9c78b2bSJens Axboe  *
425f9c78b2bSJens Axboe  *   This would normally mean allocating multiple bios under
426f9c78b2bSJens Axboe  *   generic_make_request() would be susceptible to deadlocks, but we have
427f9c78b2bSJens Axboe  *   deadlock avoidance code that resubmits any blocked bios from a rescuer
428f9c78b2bSJens Axboe  *   thread.
429f9c78b2bSJens Axboe  *
430f9c78b2bSJens Axboe  *   However, we do not guarantee forward progress for allocations from other
431f9c78b2bSJens Axboe  *   mempools. Doing multiple allocations from the same mempool under
432f9c78b2bSJens Axboe  *   generic_make_request() should be avoided - instead, use bio_set's front_pad
433f9c78b2bSJens Axboe  *   for per bio allocations.
434f9c78b2bSJens Axboe  *
435f9c78b2bSJens Axboe  *   RETURNS:
436f9c78b2bSJens Axboe  *   Pointer to new bio on success, NULL on failure.
437f9c78b2bSJens Axboe  */
4387a88fa19SDan Carpenter struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
4397a88fa19SDan Carpenter 			     struct bio_set *bs)
440f9c78b2bSJens Axboe {
441f9c78b2bSJens Axboe 	gfp_t saved_gfp = gfp_mask;
442f9c78b2bSJens Axboe 	unsigned front_pad;
443f9c78b2bSJens Axboe 	unsigned inline_vecs;
444f9c78b2bSJens Axboe 	struct bio_vec *bvl = NULL;
445f9c78b2bSJens Axboe 	struct bio *bio;
446f9c78b2bSJens Axboe 	void *p;
447f9c78b2bSJens Axboe 
448f9c78b2bSJens Axboe 	if (!bs) {
449f9c78b2bSJens Axboe 		if (nr_iovecs > UIO_MAXIOV)
450f9c78b2bSJens Axboe 			return NULL;
451f9c78b2bSJens Axboe 
452f9c78b2bSJens Axboe 		p = kmalloc(sizeof(struct bio) +
453f9c78b2bSJens Axboe 			    nr_iovecs * sizeof(struct bio_vec),
454f9c78b2bSJens Axboe 			    gfp_mask);
455f9c78b2bSJens Axboe 		front_pad = 0;
456f9c78b2bSJens Axboe 		inline_vecs = nr_iovecs;
457f9c78b2bSJens Axboe 	} else {
458d8f429e1SJunichi Nomura 		/* should not use nobvec bioset for nr_iovecs > 0 */
4598aa6ba2fSKent Overstreet 		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
4608aa6ba2fSKent Overstreet 				 nr_iovecs > 0))
461d8f429e1SJunichi Nomura 			return NULL;
462f9c78b2bSJens Axboe 		/*
463f9c78b2bSJens Axboe 		 * generic_make_request() converts recursion to iteration; this
464f9c78b2bSJens Axboe 		 * means if we're running beneath it, any bios we allocate and
465f9c78b2bSJens Axboe 		 * submit will not be submitted (and thus freed) until after we
466f9c78b2bSJens Axboe 		 * return.
467f9c78b2bSJens Axboe 		 *
468f9c78b2bSJens Axboe 		 * This exposes us to a potential deadlock if we allocate
469f9c78b2bSJens Axboe 		 * multiple bios from the same bio_set() while running
470f9c78b2bSJens Axboe 		 * underneath generic_make_request(). If we were to allocate
471f9c78b2bSJens Axboe 		 * multiple bios (say a stacking block driver that was splitting
472f9c78b2bSJens Axboe 		 * bios), we would deadlock if we exhausted the mempool's
473f9c78b2bSJens Axboe 		 * reserve.
474f9c78b2bSJens Axboe 		 *
475f9c78b2bSJens Axboe 		 * We solve this, and guarantee forward progress, with a rescuer
476f9c78b2bSJens Axboe 		 * workqueue per bio_set. If we go to allocate and there are
477f9c78b2bSJens Axboe 		 * bios on current->bio_list, we first try the allocation
478d0164adcSMel Gorman 		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
479d0164adcSMel Gorman 		 * bios we would be blocking to the rescuer workqueue before
480d0164adcSMel Gorman 		 * we retry with the original gfp_flags.
481f9c78b2bSJens Axboe 		 */
482f9c78b2bSJens Axboe 
483f5fe1b51SNeilBrown 		if (current->bio_list &&
484f5fe1b51SNeilBrown 		    (!bio_list_empty(&current->bio_list[0]) ||
48547e0fb46SNeilBrown 		     !bio_list_empty(&current->bio_list[1])) &&
48647e0fb46SNeilBrown 		    bs->rescue_workqueue)
487d0164adcSMel Gorman 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
488f9c78b2bSJens Axboe 
4898aa6ba2fSKent Overstreet 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
490f9c78b2bSJens Axboe 		if (!p && gfp_mask != saved_gfp) {
491f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
492f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
4938aa6ba2fSKent Overstreet 			p = mempool_alloc(&bs->bio_pool, gfp_mask);
494f9c78b2bSJens Axboe 		}
495f9c78b2bSJens Axboe 
496f9c78b2bSJens Axboe 		front_pad = bs->front_pad;
497f9c78b2bSJens Axboe 		inline_vecs = BIO_INLINE_VECS;
498f9c78b2bSJens Axboe 	}
499f9c78b2bSJens Axboe 
500f9c78b2bSJens Axboe 	if (unlikely(!p))
501f9c78b2bSJens Axboe 		return NULL;
502f9c78b2bSJens Axboe 
503f9c78b2bSJens Axboe 	bio = p + front_pad;
5043a83f467SMing Lei 	bio_init(bio, NULL, 0);
505f9c78b2bSJens Axboe 
506f9c78b2bSJens Axboe 	if (nr_iovecs > inline_vecs) {
507ed996a52SChristoph Hellwig 		unsigned long idx = 0;
508ed996a52SChristoph Hellwig 
5098aa6ba2fSKent Overstreet 		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
510f9c78b2bSJens Axboe 		if (!bvl && gfp_mask != saved_gfp) {
511f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
512f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
5138aa6ba2fSKent Overstreet 			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
514f9c78b2bSJens Axboe 		}
515f9c78b2bSJens Axboe 
516f9c78b2bSJens Axboe 		if (unlikely(!bvl))
517f9c78b2bSJens Axboe 			goto err_free;
518f9c78b2bSJens Axboe 
519ed996a52SChristoph Hellwig 		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
520f9c78b2bSJens Axboe 	} else if (nr_iovecs) {
521f9c78b2bSJens Axboe 		bvl = bio->bi_inline_vecs;
522f9c78b2bSJens Axboe 	}
523f9c78b2bSJens Axboe 
524f9c78b2bSJens Axboe 	bio->bi_pool = bs;
525f9c78b2bSJens Axboe 	bio->bi_max_vecs = nr_iovecs;
526f9c78b2bSJens Axboe 	bio->bi_io_vec = bvl;
527f9c78b2bSJens Axboe 	return bio;
528f9c78b2bSJens Axboe 
529f9c78b2bSJens Axboe err_free:
5308aa6ba2fSKent Overstreet 	mempool_free(p, &bs->bio_pool);
531f9c78b2bSJens Axboe 	return NULL;
532f9c78b2bSJens Axboe }
533f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_alloc_bioset);
534f9c78b2bSJens Axboe 
53538a72dacSKent Overstreet void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
536f9c78b2bSJens Axboe {
537f9c78b2bSJens Axboe 	unsigned long flags;
538f9c78b2bSJens Axboe 	struct bio_vec bv;
539f9c78b2bSJens Axboe 	struct bvec_iter iter;
540f9c78b2bSJens Axboe 
54138a72dacSKent Overstreet 	__bio_for_each_segment(bv, bio, iter, start) {
542f9c78b2bSJens Axboe 		char *data = bvec_kmap_irq(&bv, &flags);
543f9c78b2bSJens Axboe 		memset(data, 0, bv.bv_len);
544f9c78b2bSJens Axboe 		flush_dcache_page(bv.bv_page);
545f9c78b2bSJens Axboe 		bvec_kunmap_irq(data, &flags);
546f9c78b2bSJens Axboe 	}
547f9c78b2bSJens Axboe }
54838a72dacSKent Overstreet EXPORT_SYMBOL(zero_fill_bio_iter);
549f9c78b2bSJens Axboe 
550f9c78b2bSJens Axboe /**
551f9c78b2bSJens Axboe  * bio_put - release a reference to a bio
552f9c78b2bSJens Axboe  * @bio:   bio to release reference to
553f9c78b2bSJens Axboe  *
554f9c78b2bSJens Axboe  * Description:
555f9c78b2bSJens Axboe  *   Put a reference to a &struct bio, either one you have gotten with
5569b10f6a9SNeilBrown  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
557f9c78b2bSJens Axboe  **/
558f9c78b2bSJens Axboe void bio_put(struct bio *bio)
559f9c78b2bSJens Axboe {
560dac56212SJens Axboe 	if (!bio_flagged(bio, BIO_REFFED))
561dac56212SJens Axboe 		bio_free(bio);
562dac56212SJens Axboe 	else {
563dac56212SJens Axboe 		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
564f9c78b2bSJens Axboe 
565f9c78b2bSJens Axboe 		/*
566f9c78b2bSJens Axboe 		 * last put frees it
567f9c78b2bSJens Axboe 		 */
568dac56212SJens Axboe 		if (atomic_dec_and_test(&bio->__bi_cnt))
569f9c78b2bSJens Axboe 			bio_free(bio);
570f9c78b2bSJens Axboe 	}
571dac56212SJens Axboe }
572f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_put);
573f9c78b2bSJens Axboe 
574f9c78b2bSJens Axboe inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
575f9c78b2bSJens Axboe {
576f9c78b2bSJens Axboe 	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
577f9c78b2bSJens Axboe 		blk_recount_segments(q, bio);
578f9c78b2bSJens Axboe 
579f9c78b2bSJens Axboe 	return bio->bi_phys_segments;
580f9c78b2bSJens Axboe }
581f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_phys_segments);
582f9c78b2bSJens Axboe 
583f9c78b2bSJens Axboe /**
584f9c78b2bSJens Axboe  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
585f9c78b2bSJens Axboe  * 	@bio: destination bio
586f9c78b2bSJens Axboe  * 	@bio_src: bio to clone
587f9c78b2bSJens Axboe  *
588f9c78b2bSJens Axboe  *	Clone a &bio. Caller will own the returned bio, but not
589f9c78b2bSJens Axboe  *	the actual data it points to. Reference count of returned
590f9c78b2bSJens Axboe  * 	bio will be one.
591f9c78b2bSJens Axboe  *
592f9c78b2bSJens Axboe  * 	Caller must ensure that @bio_src is not freed before @bio.
593f9c78b2bSJens Axboe  */
594f9c78b2bSJens Axboe void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
595f9c78b2bSJens Axboe {
596ed996a52SChristoph Hellwig 	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
597f9c78b2bSJens Axboe 
598f9c78b2bSJens Axboe 	/*
59974d46992SChristoph Hellwig 	 * most users will be overriding ->bi_disk with a new target,
600f9c78b2bSJens Axboe 	 * so we don't set nor calculate new physical/hw segment counts here
601f9c78b2bSJens Axboe 	 */
60274d46992SChristoph Hellwig 	bio->bi_disk = bio_src->bi_disk;
60362530ed8SMichael Lyle 	bio->bi_partno = bio_src->bi_partno;
604b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_CLONED);
605111be883SShaohua Li 	if (bio_flagged(bio_src, BIO_THROTTLED))
606111be883SShaohua Li 		bio_set_flag(bio, BIO_THROTTLED);
6071eff9d32SJens Axboe 	bio->bi_opf = bio_src->bi_opf;
608ca474b73SHannes Reinecke 	bio->bi_ioprio = bio_src->bi_ioprio;
609cb6934f8SJens Axboe 	bio->bi_write_hint = bio_src->bi_write_hint;
610f9c78b2bSJens Axboe 	bio->bi_iter = bio_src->bi_iter;
611f9c78b2bSJens Axboe 	bio->bi_io_vec = bio_src->bi_io_vec;
61220bd723eSPaolo Valente 
613b5f2954dSDennis Zhou 	bio_clone_blkcg_association(bio, bio_src);
614f9c78b2bSJens Axboe }
615f9c78b2bSJens Axboe EXPORT_SYMBOL(__bio_clone_fast);
616f9c78b2bSJens Axboe 
617f9c78b2bSJens Axboe /**
618f9c78b2bSJens Axboe  *	bio_clone_fast - clone a bio that shares the original bio's biovec
619f9c78b2bSJens Axboe  *	@bio: bio to clone
620f9c78b2bSJens Axboe  *	@gfp_mask: allocation priority
621f9c78b2bSJens Axboe  *	@bs: bio_set to allocate from
622f9c78b2bSJens Axboe  *
623f9c78b2bSJens Axboe  * 	Like __bio_clone_fast, only also allocates the returned bio
624f9c78b2bSJens Axboe  */
625f9c78b2bSJens Axboe struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
626f9c78b2bSJens Axboe {
627f9c78b2bSJens Axboe 	struct bio *b;
628f9c78b2bSJens Axboe 
629f9c78b2bSJens Axboe 	b = bio_alloc_bioset(gfp_mask, 0, bs);
630f9c78b2bSJens Axboe 	if (!b)
631f9c78b2bSJens Axboe 		return NULL;
632f9c78b2bSJens Axboe 
633f9c78b2bSJens Axboe 	__bio_clone_fast(b, bio);
634f9c78b2bSJens Axboe 
635f9c78b2bSJens Axboe 	if (bio_integrity(bio)) {
636f9c78b2bSJens Axboe 		int ret;
637f9c78b2bSJens Axboe 
638f9c78b2bSJens Axboe 		ret = bio_integrity_clone(b, bio, gfp_mask);
639f9c78b2bSJens Axboe 
640f9c78b2bSJens Axboe 		if (ret < 0) {
641f9c78b2bSJens Axboe 			bio_put(b);
642f9c78b2bSJens Axboe 			return NULL;
643f9c78b2bSJens Axboe 		}
644f9c78b2bSJens Axboe 	}
645f9c78b2bSJens Axboe 
646f9c78b2bSJens Axboe 	return b;
647f9c78b2bSJens Axboe }
648f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_clone_fast);
649f9c78b2bSJens Axboe 
650f4595875SShaohua Li /**
651c66a14d0SKent Overstreet  *	bio_add_pc_page	-	attempt to add page to bio
652c66a14d0SKent Overstreet  *	@q: the target queue
653c66a14d0SKent Overstreet  *	@bio: destination bio
654c66a14d0SKent Overstreet  *	@page: page to add
655c66a14d0SKent Overstreet  *	@len: vec entry length
656c66a14d0SKent Overstreet  *	@offset: vec entry offset
657f9c78b2bSJens Axboe  *
658c66a14d0SKent Overstreet  *	Attempt to add a page to the bio_vec maplist. This can fail for a
659c66a14d0SKent Overstreet  *	number of reasons, such as the bio being full or target block device
660c66a14d0SKent Overstreet  *	limitations. The target block device must allow bio's up to PAGE_SIZE,
661c66a14d0SKent Overstreet  *	so it is always possible to add a single page to an empty bio.
662c66a14d0SKent Overstreet  *
663c66a14d0SKent Overstreet  *	This should only be used by REQ_PC bios.
664f9c78b2bSJens Axboe  */
665c66a14d0SKent Overstreet int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
666c66a14d0SKent Overstreet 		    *page, unsigned int len, unsigned int offset)
667f9c78b2bSJens Axboe {
668f9c78b2bSJens Axboe 	int retried_segments = 0;
669f9c78b2bSJens Axboe 	struct bio_vec *bvec;
670f9c78b2bSJens Axboe 
671f9c78b2bSJens Axboe 	/*
672f9c78b2bSJens Axboe 	 * cloned bio must not modify vec list
673f9c78b2bSJens Axboe 	 */
674f9c78b2bSJens Axboe 	if (unlikely(bio_flagged(bio, BIO_CLONED)))
675f9c78b2bSJens Axboe 		return 0;
676f9c78b2bSJens Axboe 
677c66a14d0SKent Overstreet 	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
678f9c78b2bSJens Axboe 		return 0;
679f9c78b2bSJens Axboe 
680f9c78b2bSJens Axboe 	/*
681f9c78b2bSJens Axboe 	 * For filesystems with a blocksize smaller than the pagesize
682f9c78b2bSJens Axboe 	 * we will often be called with the same page as last time and
683f9c78b2bSJens Axboe 	 * a consecutive offset.  Optimize this special case.
684f9c78b2bSJens Axboe 	 */
685f9c78b2bSJens Axboe 	if (bio->bi_vcnt > 0) {
686f9c78b2bSJens Axboe 		struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
687f9c78b2bSJens Axboe 
688f9c78b2bSJens Axboe 		if (page == prev->bv_page &&
689f9c78b2bSJens Axboe 		    offset == prev->bv_offset + prev->bv_len) {
690f9c78b2bSJens Axboe 			prev->bv_len += len;
691fcbf6a08SMaurizio Lombardi 			bio->bi_iter.bi_size += len;
692f9c78b2bSJens Axboe 			goto done;
693f9c78b2bSJens Axboe 		}
69466cb45aaSJens Axboe 
69566cb45aaSJens Axboe 		/*
69666cb45aaSJens Axboe 		 * If the queue doesn't support SG gaps and adding this
69766cb45aaSJens Axboe 		 * offset would create a gap, disallow it.
69866cb45aaSJens Axboe 		 */
69903100aadSKeith Busch 		if (bvec_gap_to_prev(q, prev, offset))
70066cb45aaSJens Axboe 			return 0;
701f9c78b2bSJens Axboe 	}
702f9c78b2bSJens Axboe 
7030aa69fd3SChristoph Hellwig 	if (bio_full(bio))
704f9c78b2bSJens Axboe 		return 0;
705f9c78b2bSJens Axboe 
706f9c78b2bSJens Axboe 	/*
707f9c78b2bSJens Axboe 	 * setup the new entry, we might clear it again later if we
708f9c78b2bSJens Axboe 	 * cannot add the page
709f9c78b2bSJens Axboe 	 */
710f9c78b2bSJens Axboe 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
711f9c78b2bSJens Axboe 	bvec->bv_page = page;
712f9c78b2bSJens Axboe 	bvec->bv_len = len;
713f9c78b2bSJens Axboe 	bvec->bv_offset = offset;
714fcbf6a08SMaurizio Lombardi 	bio->bi_vcnt++;
715fcbf6a08SMaurizio Lombardi 	bio->bi_phys_segments++;
716fcbf6a08SMaurizio Lombardi 	bio->bi_iter.bi_size += len;
717fcbf6a08SMaurizio Lombardi 
718fcbf6a08SMaurizio Lombardi 	/*
719fcbf6a08SMaurizio Lombardi 	 * Perform a recount if the number of segments is greater
720fcbf6a08SMaurizio Lombardi 	 * than queue_max_segments(q).
721fcbf6a08SMaurizio Lombardi 	 */
722fcbf6a08SMaurizio Lombardi 
723fcbf6a08SMaurizio Lombardi 	while (bio->bi_phys_segments > queue_max_segments(q)) {
724fcbf6a08SMaurizio Lombardi 
725fcbf6a08SMaurizio Lombardi 		if (retried_segments)
726fcbf6a08SMaurizio Lombardi 			goto failed;
727fcbf6a08SMaurizio Lombardi 
728fcbf6a08SMaurizio Lombardi 		retried_segments = 1;
729fcbf6a08SMaurizio Lombardi 		blk_recount_segments(q, bio);
730fcbf6a08SMaurizio Lombardi 	}
731f9c78b2bSJens Axboe 
732f9c78b2bSJens Axboe 	/* If we may be able to merge these biovecs, force a recount */
7333dccdae5SChristoph Hellwig 	if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
734b7c44ed9SJens Axboe 		bio_clear_flag(bio, BIO_SEG_VALID);
735f9c78b2bSJens Axboe 
736f9c78b2bSJens Axboe  done:
737f9c78b2bSJens Axboe 	return len;
738fcbf6a08SMaurizio Lombardi 
739fcbf6a08SMaurizio Lombardi  failed:
740fcbf6a08SMaurizio Lombardi 	bvec->bv_page = NULL;
741fcbf6a08SMaurizio Lombardi 	bvec->bv_len = 0;
742fcbf6a08SMaurizio Lombardi 	bvec->bv_offset = 0;
743fcbf6a08SMaurizio Lombardi 	bio->bi_vcnt--;
744fcbf6a08SMaurizio Lombardi 	bio->bi_iter.bi_size -= len;
745fcbf6a08SMaurizio Lombardi 	blk_recount_segments(q, bio);
746fcbf6a08SMaurizio Lombardi 	return 0;
747f9c78b2bSJens Axboe }
748f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_pc_page);
749f9c78b2bSJens Axboe 
750f9c78b2bSJens Axboe /**
7510aa69fd3SChristoph Hellwig  * __bio_try_merge_page - try appending data to an existing bvec.
7520aa69fd3SChristoph Hellwig  * @bio: destination bio
7530aa69fd3SChristoph Hellwig  * @page: page to add
7540aa69fd3SChristoph Hellwig  * @len: length of the data to add
7550aa69fd3SChristoph Hellwig  * @off: offset of the data in @page
7560aa69fd3SChristoph Hellwig  *
7570aa69fd3SChristoph Hellwig  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
7580aa69fd3SChristoph Hellwig  * a useful optimisation for file systems with a block size smaller than the
7590aa69fd3SChristoph Hellwig  * page size.
7600aa69fd3SChristoph Hellwig  *
7610aa69fd3SChristoph Hellwig  * Return %true on success or %false on failure.
7620aa69fd3SChristoph Hellwig  */
7630aa69fd3SChristoph Hellwig bool __bio_try_merge_page(struct bio *bio, struct page *page,
7640aa69fd3SChristoph Hellwig 		unsigned int len, unsigned int off)
7650aa69fd3SChristoph Hellwig {
7660aa69fd3SChristoph Hellwig 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
7670aa69fd3SChristoph Hellwig 		return false;
7680aa69fd3SChristoph Hellwig 
7690aa69fd3SChristoph Hellwig 	if (bio->bi_vcnt > 0) {
7700aa69fd3SChristoph Hellwig 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
7710aa69fd3SChristoph Hellwig 
7720aa69fd3SChristoph Hellwig 		if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
7730aa69fd3SChristoph Hellwig 			bv->bv_len += len;
7740aa69fd3SChristoph Hellwig 			bio->bi_iter.bi_size += len;
7750aa69fd3SChristoph Hellwig 			return true;
7760aa69fd3SChristoph Hellwig 		}
7770aa69fd3SChristoph Hellwig 	}
7780aa69fd3SChristoph Hellwig 	return false;
7790aa69fd3SChristoph Hellwig }
7800aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_try_merge_page);
7810aa69fd3SChristoph Hellwig 
7820aa69fd3SChristoph Hellwig /**
7830aa69fd3SChristoph Hellwig  * __bio_add_page - add page to a bio in a new segment
7840aa69fd3SChristoph Hellwig  * @bio: destination bio
7850aa69fd3SChristoph Hellwig  * @page: page to add
7860aa69fd3SChristoph Hellwig  * @len: length of the data to add
7870aa69fd3SChristoph Hellwig  * @off: offset of the data in @page
7880aa69fd3SChristoph Hellwig  *
7890aa69fd3SChristoph Hellwig  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
7900aa69fd3SChristoph Hellwig  * that @bio has space for another bvec.
7910aa69fd3SChristoph Hellwig  */
7920aa69fd3SChristoph Hellwig void __bio_add_page(struct bio *bio, struct page *page,
7930aa69fd3SChristoph Hellwig 		unsigned int len, unsigned int off)
7940aa69fd3SChristoph Hellwig {
7950aa69fd3SChristoph Hellwig 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
7960aa69fd3SChristoph Hellwig 
7970aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
7980aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_full(bio));
7990aa69fd3SChristoph Hellwig 
8000aa69fd3SChristoph Hellwig 	bv->bv_page = page;
8010aa69fd3SChristoph Hellwig 	bv->bv_offset = off;
8020aa69fd3SChristoph Hellwig 	bv->bv_len = len;
8030aa69fd3SChristoph Hellwig 
8040aa69fd3SChristoph Hellwig 	bio->bi_iter.bi_size += len;
8050aa69fd3SChristoph Hellwig 	bio->bi_vcnt++;
8060aa69fd3SChristoph Hellwig }
8070aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_add_page);
8080aa69fd3SChristoph Hellwig 
8090aa69fd3SChristoph Hellwig /**
810f9c78b2bSJens Axboe  *	bio_add_page	-	attempt to add page to bio
811f9c78b2bSJens Axboe  *	@bio: destination bio
812f9c78b2bSJens Axboe  *	@page: page to add
813f9c78b2bSJens Axboe  *	@len: vec entry length
814f9c78b2bSJens Axboe  *	@offset: vec entry offset
815f9c78b2bSJens Axboe  *
816c66a14d0SKent Overstreet  *	Attempt to add a page to the bio_vec maplist. This will only fail
817c66a14d0SKent Overstreet  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
818f9c78b2bSJens Axboe  */
819c66a14d0SKent Overstreet int bio_add_page(struct bio *bio, struct page *page,
820c66a14d0SKent Overstreet 		 unsigned int len, unsigned int offset)
821f9c78b2bSJens Axboe {
8220aa69fd3SChristoph Hellwig 	if (!__bio_try_merge_page(bio, page, len, offset)) {
8230aa69fd3SChristoph Hellwig 		if (bio_full(bio))
824c66a14d0SKent Overstreet 			return 0;
8250aa69fd3SChristoph Hellwig 		__bio_add_page(bio, page, len, offset);
826c66a14d0SKent Overstreet 	}
827c66a14d0SKent Overstreet 	return len;
828f9c78b2bSJens Axboe }
829f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_page);
830f9c78b2bSJens Axboe 
831576ed913SChristoph Hellwig #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
832576ed913SChristoph Hellwig 
8332cefe4dbSKent Overstreet /**
83417d51b10SMartin Wilck  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
8352cefe4dbSKent Overstreet  * @bio: bio to add pages to
8362cefe4dbSKent Overstreet  * @iter: iov iterator describing the region to be mapped
8372cefe4dbSKent Overstreet  *
83817d51b10SMartin Wilck  * Pins pages from *iter and appends them to @bio's bvec array. The
8392cefe4dbSKent Overstreet  * pages will have to be released using put_page() when done.
84017d51b10SMartin Wilck  * For multi-segment *iter, this function only adds pages from the
84117d51b10SMartin Wilck  * the next non-empty segment of the iov iterator.
8422cefe4dbSKent Overstreet  */
84317d51b10SMartin Wilck static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
8442cefe4dbSKent Overstreet {
845576ed913SChristoph Hellwig 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
846576ed913SChristoph Hellwig 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
8472cefe4dbSKent Overstreet 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
8482cefe4dbSKent Overstreet 	struct page **pages = (struct page **)bv;
849576ed913SChristoph Hellwig 	ssize_t size, left;
850576ed913SChristoph Hellwig 	unsigned len, i;
851b403ea24SMartin Wilck 	size_t offset;
852576ed913SChristoph Hellwig 
853576ed913SChristoph Hellwig 	/*
854576ed913SChristoph Hellwig 	 * Move page array up in the allocated memory for the bio vecs as far as
855576ed913SChristoph Hellwig 	 * possible so that we can start filling biovecs from the beginning
856576ed913SChristoph Hellwig 	 * without overwriting the temporary page array.
857576ed913SChristoph Hellwig 	*/
858576ed913SChristoph Hellwig 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
859576ed913SChristoph Hellwig 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
8602cefe4dbSKent Overstreet 
8612cefe4dbSKent Overstreet 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
8622cefe4dbSKent Overstreet 	if (unlikely(size <= 0))
8632cefe4dbSKent Overstreet 		return size ? size : -EFAULT;
8642cefe4dbSKent Overstreet 
865576ed913SChristoph Hellwig 	for (left = size, i = 0; left > 0; left -= len, i++) {
866576ed913SChristoph Hellwig 		struct page *page = pages[i];
8672cefe4dbSKent Overstreet 
868576ed913SChristoph Hellwig 		len = min_t(size_t, PAGE_SIZE - offset, left);
869576ed913SChristoph Hellwig 		if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
870576ed913SChristoph Hellwig 			return -EINVAL;
871576ed913SChristoph Hellwig 		offset = 0;
8722cefe4dbSKent Overstreet 	}
8732cefe4dbSKent Overstreet 
8742cefe4dbSKent Overstreet 	iov_iter_advance(iter, size);
8752cefe4dbSKent Overstreet 	return 0;
8762cefe4dbSKent Overstreet }
87717d51b10SMartin Wilck 
87817d51b10SMartin Wilck /**
87917d51b10SMartin Wilck  * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
88017d51b10SMartin Wilck  * @bio: bio to add pages to
88117d51b10SMartin Wilck  * @iter: iov iterator describing the region to be mapped
88217d51b10SMartin Wilck  *
88317d51b10SMartin Wilck  * Pins pages from *iter and appends them to @bio's bvec array. The
88417d51b10SMartin Wilck  * pages will have to be released using put_page() when done.
88517d51b10SMartin Wilck  * The function tries, but does not guarantee, to pin as many pages as
88617d51b10SMartin Wilck  * fit into the bio, or are requested in *iter, whatever is smaller.
88717d51b10SMartin Wilck  * If MM encounters an error pinning the requested pages, it stops.
88817d51b10SMartin Wilck  * Error is returned only if 0 pages could be pinned.
88917d51b10SMartin Wilck  */
89017d51b10SMartin Wilck int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
89117d51b10SMartin Wilck {
89217d51b10SMartin Wilck 	unsigned short orig_vcnt = bio->bi_vcnt;
89317d51b10SMartin Wilck 
89417d51b10SMartin Wilck 	do {
89517d51b10SMartin Wilck 		int ret = __bio_iov_iter_get_pages(bio, iter);
89617d51b10SMartin Wilck 
89717d51b10SMartin Wilck 		if (unlikely(ret))
89817d51b10SMartin Wilck 			return bio->bi_vcnt > orig_vcnt ? 0 : ret;
89917d51b10SMartin Wilck 
90017d51b10SMartin Wilck 	} while (iov_iter_count(iter) && !bio_full(bio));
90117d51b10SMartin Wilck 
90217d51b10SMartin Wilck 	return 0;
90317d51b10SMartin Wilck }
9042cefe4dbSKent Overstreet EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
9052cefe4dbSKent Overstreet 
9064246a0b6SChristoph Hellwig static void submit_bio_wait_endio(struct bio *bio)
907f9c78b2bSJens Axboe {
90865e53aabSChristoph Hellwig 	complete(bio->bi_private);
909f9c78b2bSJens Axboe }
910f9c78b2bSJens Axboe 
911f9c78b2bSJens Axboe /**
912f9c78b2bSJens Axboe  * submit_bio_wait - submit a bio, and wait until it completes
913f9c78b2bSJens Axboe  * @bio: The &struct bio which describes the I/O
914f9c78b2bSJens Axboe  *
915f9c78b2bSJens Axboe  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
916f9c78b2bSJens Axboe  * bio_endio() on failure.
9173d289d68SJan Kara  *
9183d289d68SJan Kara  * WARNING: Unlike to how submit_bio() is usually used, this function does not
9193d289d68SJan Kara  * result in bio reference to be consumed. The caller must drop the reference
9203d289d68SJan Kara  * on his own.
921f9c78b2bSJens Axboe  */
9224e49ea4aSMike Christie int submit_bio_wait(struct bio *bio)
923f9c78b2bSJens Axboe {
924e319e1fbSByungchul Park 	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
925f9c78b2bSJens Axboe 
92665e53aabSChristoph Hellwig 	bio->bi_private = &done;
927f9c78b2bSJens Axboe 	bio->bi_end_io = submit_bio_wait_endio;
9281eff9d32SJens Axboe 	bio->bi_opf |= REQ_SYNC;
9294e49ea4aSMike Christie 	submit_bio(bio);
93065e53aabSChristoph Hellwig 	wait_for_completion_io(&done);
931f9c78b2bSJens Axboe 
93265e53aabSChristoph Hellwig 	return blk_status_to_errno(bio->bi_status);
933f9c78b2bSJens Axboe }
934f9c78b2bSJens Axboe EXPORT_SYMBOL(submit_bio_wait);
935f9c78b2bSJens Axboe 
936f9c78b2bSJens Axboe /**
937f9c78b2bSJens Axboe  * bio_advance - increment/complete a bio by some number of bytes
938f9c78b2bSJens Axboe  * @bio:	bio to advance
939f9c78b2bSJens Axboe  * @bytes:	number of bytes to complete
940f9c78b2bSJens Axboe  *
941f9c78b2bSJens Axboe  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
942f9c78b2bSJens Axboe  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
943f9c78b2bSJens Axboe  * be updated on the last bvec as well.
944f9c78b2bSJens Axboe  *
945f9c78b2bSJens Axboe  * @bio will then represent the remaining, uncompleted portion of the io.
946f9c78b2bSJens Axboe  */
947f9c78b2bSJens Axboe void bio_advance(struct bio *bio, unsigned bytes)
948f9c78b2bSJens Axboe {
949f9c78b2bSJens Axboe 	if (bio_integrity(bio))
950f9c78b2bSJens Axboe 		bio_integrity_advance(bio, bytes);
951f9c78b2bSJens Axboe 
952f9c78b2bSJens Axboe 	bio_advance_iter(bio, &bio->bi_iter, bytes);
953f9c78b2bSJens Axboe }
954f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_advance);
955f9c78b2bSJens Axboe 
95645db54d5SKent Overstreet void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
95745db54d5SKent Overstreet 			struct bio *src, struct bvec_iter *src_iter)
958f9c78b2bSJens Axboe {
959f9c78b2bSJens Axboe 	struct bio_vec src_bv, dst_bv;
960f9c78b2bSJens Axboe 	void *src_p, *dst_p;
961f9c78b2bSJens Axboe 	unsigned bytes;
962f9c78b2bSJens Axboe 
96345db54d5SKent Overstreet 	while (src_iter->bi_size && dst_iter->bi_size) {
96445db54d5SKent Overstreet 		src_bv = bio_iter_iovec(src, *src_iter);
96545db54d5SKent Overstreet 		dst_bv = bio_iter_iovec(dst, *dst_iter);
96645db54d5SKent Overstreet 
96745db54d5SKent Overstreet 		bytes = min(src_bv.bv_len, dst_bv.bv_len);
96845db54d5SKent Overstreet 
96945db54d5SKent Overstreet 		src_p = kmap_atomic(src_bv.bv_page);
97045db54d5SKent Overstreet 		dst_p = kmap_atomic(dst_bv.bv_page);
97145db54d5SKent Overstreet 
97245db54d5SKent Overstreet 		memcpy(dst_p + dst_bv.bv_offset,
97345db54d5SKent Overstreet 		       src_p + src_bv.bv_offset,
97445db54d5SKent Overstreet 		       bytes);
97545db54d5SKent Overstreet 
97645db54d5SKent Overstreet 		kunmap_atomic(dst_p);
97745db54d5SKent Overstreet 		kunmap_atomic(src_p);
97845db54d5SKent Overstreet 
9796e6e811dSKent Overstreet 		flush_dcache_page(dst_bv.bv_page);
9806e6e811dSKent Overstreet 
98145db54d5SKent Overstreet 		bio_advance_iter(src, src_iter, bytes);
98245db54d5SKent Overstreet 		bio_advance_iter(dst, dst_iter, bytes);
98345db54d5SKent Overstreet 	}
98445db54d5SKent Overstreet }
98545db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data_iter);
98645db54d5SKent Overstreet 
98745db54d5SKent Overstreet /**
98845db54d5SKent Overstreet  * bio_copy_data - copy contents of data buffers from one bio to another
98945db54d5SKent Overstreet  * @src: source bio
99045db54d5SKent Overstreet  * @dst: destination bio
99145db54d5SKent Overstreet  *
99245db54d5SKent Overstreet  * Stops when it reaches the end of either @src or @dst - that is, copies
99345db54d5SKent Overstreet  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
99445db54d5SKent Overstreet  */
99545db54d5SKent Overstreet void bio_copy_data(struct bio *dst, struct bio *src)
99645db54d5SKent Overstreet {
99745db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
99845db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
99945db54d5SKent Overstreet 
100045db54d5SKent Overstreet 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
100145db54d5SKent Overstreet }
100245db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data);
100345db54d5SKent Overstreet 
100445db54d5SKent Overstreet /**
100545db54d5SKent Overstreet  * bio_list_copy_data - copy contents of data buffers from one chain of bios to
100645db54d5SKent Overstreet  * another
100745db54d5SKent Overstreet  * @src: source bio list
100845db54d5SKent Overstreet  * @dst: destination bio list
100945db54d5SKent Overstreet  *
101045db54d5SKent Overstreet  * Stops when it reaches the end of either the @src list or @dst list - that is,
101145db54d5SKent Overstreet  * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
101245db54d5SKent Overstreet  * bios).
101345db54d5SKent Overstreet  */
101445db54d5SKent Overstreet void bio_list_copy_data(struct bio *dst, struct bio *src)
101545db54d5SKent Overstreet {
101645db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
101745db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
101845db54d5SKent Overstreet 
1019f9c78b2bSJens Axboe 	while (1) {
1020f9c78b2bSJens Axboe 		if (!src_iter.bi_size) {
1021f9c78b2bSJens Axboe 			src = src->bi_next;
1022f9c78b2bSJens Axboe 			if (!src)
1023f9c78b2bSJens Axboe 				break;
1024f9c78b2bSJens Axboe 
1025f9c78b2bSJens Axboe 			src_iter = src->bi_iter;
1026f9c78b2bSJens Axboe 		}
1027f9c78b2bSJens Axboe 
1028f9c78b2bSJens Axboe 		if (!dst_iter.bi_size) {
1029f9c78b2bSJens Axboe 			dst = dst->bi_next;
1030f9c78b2bSJens Axboe 			if (!dst)
1031f9c78b2bSJens Axboe 				break;
1032f9c78b2bSJens Axboe 
1033f9c78b2bSJens Axboe 			dst_iter = dst->bi_iter;
1034f9c78b2bSJens Axboe 		}
1035f9c78b2bSJens Axboe 
103645db54d5SKent Overstreet 		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1037f9c78b2bSJens Axboe 	}
1038f9c78b2bSJens Axboe }
103945db54d5SKent Overstreet EXPORT_SYMBOL(bio_list_copy_data);
1040f9c78b2bSJens Axboe 
1041f9c78b2bSJens Axboe struct bio_map_data {
1042f9c78b2bSJens Axboe 	int is_our_pages;
104326e49cfcSKent Overstreet 	struct iov_iter iter;
104426e49cfcSKent Overstreet 	struct iovec iov[];
1045f9c78b2bSJens Axboe };
1046f9c78b2bSJens Axboe 
10470e5b935dSAl Viro static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1048f9c78b2bSJens Axboe 					       gfp_t gfp_mask)
1049f9c78b2bSJens Axboe {
10500e5b935dSAl Viro 	struct bio_map_data *bmd;
10510e5b935dSAl Viro 	if (data->nr_segs > UIO_MAXIOV)
1052f9c78b2bSJens Axboe 		return NULL;
1053f9c78b2bSJens Axboe 
10540e5b935dSAl Viro 	bmd = kmalloc(sizeof(struct bio_map_data) +
10550e5b935dSAl Viro 		       sizeof(struct iovec) * data->nr_segs, gfp_mask);
10560e5b935dSAl Viro 	if (!bmd)
10570e5b935dSAl Viro 		return NULL;
10580e5b935dSAl Viro 	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
10590e5b935dSAl Viro 	bmd->iter = *data;
10600e5b935dSAl Viro 	bmd->iter.iov = bmd->iov;
10610e5b935dSAl Viro 	return bmd;
1062f9c78b2bSJens Axboe }
1063f9c78b2bSJens Axboe 
10649124d3feSDongsu Park /**
10659124d3feSDongsu Park  * bio_copy_from_iter - copy all pages from iov_iter to bio
10669124d3feSDongsu Park  * @bio: The &struct bio which describes the I/O as destination
10679124d3feSDongsu Park  * @iter: iov_iter as source
10689124d3feSDongsu Park  *
10699124d3feSDongsu Park  * Copy all pages from iov_iter to bio.
10709124d3feSDongsu Park  * Returns 0 on success, or error on failure.
10719124d3feSDongsu Park  */
107298a09d61SAl Viro static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1073f9c78b2bSJens Axboe {
10749124d3feSDongsu Park 	int i;
1075f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1076f9c78b2bSJens Axboe 
1077f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
10789124d3feSDongsu Park 		ssize_t ret;
1079f9c78b2bSJens Axboe 
10809124d3feSDongsu Park 		ret = copy_page_from_iter(bvec->bv_page,
10819124d3feSDongsu Park 					  bvec->bv_offset,
10829124d3feSDongsu Park 					  bvec->bv_len,
108398a09d61SAl Viro 					  iter);
1084f9c78b2bSJens Axboe 
108598a09d61SAl Viro 		if (!iov_iter_count(iter))
10869124d3feSDongsu Park 			break;
1087f9c78b2bSJens Axboe 
10889124d3feSDongsu Park 		if (ret < bvec->bv_len)
10899124d3feSDongsu Park 			return -EFAULT;
1090f9c78b2bSJens Axboe 	}
1091f9c78b2bSJens Axboe 
10929124d3feSDongsu Park 	return 0;
1093f9c78b2bSJens Axboe }
1094f9c78b2bSJens Axboe 
10959124d3feSDongsu Park /**
10969124d3feSDongsu Park  * bio_copy_to_iter - copy all pages from bio to iov_iter
10979124d3feSDongsu Park  * @bio: The &struct bio which describes the I/O as source
10989124d3feSDongsu Park  * @iter: iov_iter as destination
10999124d3feSDongsu Park  *
11009124d3feSDongsu Park  * Copy all pages from bio to iov_iter.
11019124d3feSDongsu Park  * Returns 0 on success, or error on failure.
11029124d3feSDongsu Park  */
11039124d3feSDongsu Park static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
11049124d3feSDongsu Park {
11059124d3feSDongsu Park 	int i;
11069124d3feSDongsu Park 	struct bio_vec *bvec;
11079124d3feSDongsu Park 
11089124d3feSDongsu Park 	bio_for_each_segment_all(bvec, bio, i) {
11099124d3feSDongsu Park 		ssize_t ret;
11109124d3feSDongsu Park 
11119124d3feSDongsu Park 		ret = copy_page_to_iter(bvec->bv_page,
11129124d3feSDongsu Park 					bvec->bv_offset,
11139124d3feSDongsu Park 					bvec->bv_len,
11149124d3feSDongsu Park 					&iter);
11159124d3feSDongsu Park 
11169124d3feSDongsu Park 		if (!iov_iter_count(&iter))
11179124d3feSDongsu Park 			break;
11189124d3feSDongsu Park 
11199124d3feSDongsu Park 		if (ret < bvec->bv_len)
11209124d3feSDongsu Park 			return -EFAULT;
11219124d3feSDongsu Park 	}
11229124d3feSDongsu Park 
11239124d3feSDongsu Park 	return 0;
1124f9c78b2bSJens Axboe }
1125f9c78b2bSJens Axboe 
1126491221f8SGuoqing Jiang void bio_free_pages(struct bio *bio)
11271dfa0f68SChristoph Hellwig {
11281dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
11291dfa0f68SChristoph Hellwig 	int i;
11301dfa0f68SChristoph Hellwig 
11311dfa0f68SChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, i)
11321dfa0f68SChristoph Hellwig 		__free_page(bvec->bv_page);
11331dfa0f68SChristoph Hellwig }
1134491221f8SGuoqing Jiang EXPORT_SYMBOL(bio_free_pages);
11351dfa0f68SChristoph Hellwig 
1136f9c78b2bSJens Axboe /**
1137f9c78b2bSJens Axboe  *	bio_uncopy_user	-	finish previously mapped bio
1138f9c78b2bSJens Axboe  *	@bio: bio being terminated
1139f9c78b2bSJens Axboe  *
1140ddad8dd0SChristoph Hellwig  *	Free pages allocated from bio_copy_user_iov() and write back data
1141f9c78b2bSJens Axboe  *	to user space in case of a read.
1142f9c78b2bSJens Axboe  */
1143f9c78b2bSJens Axboe int bio_uncopy_user(struct bio *bio)
1144f9c78b2bSJens Axboe {
1145f9c78b2bSJens Axboe 	struct bio_map_data *bmd = bio->bi_private;
11461dfa0f68SChristoph Hellwig 	int ret = 0;
1147f9c78b2bSJens Axboe 
1148f9c78b2bSJens Axboe 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1149f9c78b2bSJens Axboe 		/*
1150f9c78b2bSJens Axboe 		 * if we're in a workqueue, the request is orphaned, so
11512d99b55dSHannes Reinecke 		 * don't copy into a random user address space, just free
11522d99b55dSHannes Reinecke 		 * and return -EINTR so user space doesn't expect any data.
1153f9c78b2bSJens Axboe 		 */
11542d99b55dSHannes Reinecke 		if (!current->mm)
11552d99b55dSHannes Reinecke 			ret = -EINTR;
11562d99b55dSHannes Reinecke 		else if (bio_data_dir(bio) == READ)
11579124d3feSDongsu Park 			ret = bio_copy_to_iter(bio, bmd->iter);
11581dfa0f68SChristoph Hellwig 		if (bmd->is_our_pages)
11591dfa0f68SChristoph Hellwig 			bio_free_pages(bio);
1160f9c78b2bSJens Axboe 	}
1161f9c78b2bSJens Axboe 	kfree(bmd);
1162f9c78b2bSJens Axboe 	bio_put(bio);
1163f9c78b2bSJens Axboe 	return ret;
1164f9c78b2bSJens Axboe }
1165f9c78b2bSJens Axboe 
1166f9c78b2bSJens Axboe /**
1167f9c78b2bSJens Axboe  *	bio_copy_user_iov	-	copy user data to bio
1168f9c78b2bSJens Axboe  *	@q:		destination block queue
1169f9c78b2bSJens Axboe  *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
117026e49cfcSKent Overstreet  *	@iter:		iovec iterator
1171f9c78b2bSJens Axboe  *	@gfp_mask:	memory allocation flags
1172f9c78b2bSJens Axboe  *
1173f9c78b2bSJens Axboe  *	Prepares and returns a bio for indirect user io, bouncing data
1174f9c78b2bSJens Axboe  *	to/from kernel pages as necessary. Must be paired with
1175f9c78b2bSJens Axboe  *	call bio_uncopy_user() on io completion.
1176f9c78b2bSJens Axboe  */
1177f9c78b2bSJens Axboe struct bio *bio_copy_user_iov(struct request_queue *q,
1178f9c78b2bSJens Axboe 			      struct rq_map_data *map_data,
1179e81cef5dSAl Viro 			      struct iov_iter *iter,
118026e49cfcSKent Overstreet 			      gfp_t gfp_mask)
1181f9c78b2bSJens Axboe {
1182f9c78b2bSJens Axboe 	struct bio_map_data *bmd;
1183f9c78b2bSJens Axboe 	struct page *page;
1184f9c78b2bSJens Axboe 	struct bio *bio;
1185d16d44ebSAl Viro 	int i = 0, ret;
1186d16d44ebSAl Viro 	int nr_pages;
118726e49cfcSKent Overstreet 	unsigned int len = iter->count;
1188bd5ceceaSGeliang Tang 	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1189f9c78b2bSJens Axboe 
11900e5b935dSAl Viro 	bmd = bio_alloc_map_data(iter, gfp_mask);
1191f9c78b2bSJens Axboe 	if (!bmd)
1192f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1193f9c78b2bSJens Axboe 
119426e49cfcSKent Overstreet 	/*
119526e49cfcSKent Overstreet 	 * We need to do a deep copy of the iov_iter including the iovecs.
119626e49cfcSKent Overstreet 	 * The caller provided iov might point to an on-stack or otherwise
119726e49cfcSKent Overstreet 	 * shortlived one.
119826e49cfcSKent Overstreet 	 */
119926e49cfcSKent Overstreet 	bmd->is_our_pages = map_data ? 0 : 1;
120026e49cfcSKent Overstreet 
1201d16d44ebSAl Viro 	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1202d16d44ebSAl Viro 	if (nr_pages > BIO_MAX_PAGES)
1203d16d44ebSAl Viro 		nr_pages = BIO_MAX_PAGES;
1204f9c78b2bSJens Axboe 
1205f9c78b2bSJens Axboe 	ret = -ENOMEM;
1206f9c78b2bSJens Axboe 	bio = bio_kmalloc(gfp_mask, nr_pages);
1207f9c78b2bSJens Axboe 	if (!bio)
1208f9c78b2bSJens Axboe 		goto out_bmd;
1209f9c78b2bSJens Axboe 
1210f9c78b2bSJens Axboe 	ret = 0;
1211f9c78b2bSJens Axboe 
1212f9c78b2bSJens Axboe 	if (map_data) {
1213f9c78b2bSJens Axboe 		nr_pages = 1 << map_data->page_order;
1214f9c78b2bSJens Axboe 		i = map_data->offset / PAGE_SIZE;
1215f9c78b2bSJens Axboe 	}
1216f9c78b2bSJens Axboe 	while (len) {
1217f9c78b2bSJens Axboe 		unsigned int bytes = PAGE_SIZE;
1218f9c78b2bSJens Axboe 
1219f9c78b2bSJens Axboe 		bytes -= offset;
1220f9c78b2bSJens Axboe 
1221f9c78b2bSJens Axboe 		if (bytes > len)
1222f9c78b2bSJens Axboe 			bytes = len;
1223f9c78b2bSJens Axboe 
1224f9c78b2bSJens Axboe 		if (map_data) {
1225f9c78b2bSJens Axboe 			if (i == map_data->nr_entries * nr_pages) {
1226f9c78b2bSJens Axboe 				ret = -ENOMEM;
1227f9c78b2bSJens Axboe 				break;
1228f9c78b2bSJens Axboe 			}
1229f9c78b2bSJens Axboe 
1230f9c78b2bSJens Axboe 			page = map_data->pages[i / nr_pages];
1231f9c78b2bSJens Axboe 			page += (i % nr_pages);
1232f9c78b2bSJens Axboe 
1233f9c78b2bSJens Axboe 			i++;
1234f9c78b2bSJens Axboe 		} else {
1235f9c78b2bSJens Axboe 			page = alloc_page(q->bounce_gfp | gfp_mask);
1236f9c78b2bSJens Axboe 			if (!page) {
1237f9c78b2bSJens Axboe 				ret = -ENOMEM;
1238f9c78b2bSJens Axboe 				break;
1239f9c78b2bSJens Axboe 			}
1240f9c78b2bSJens Axboe 		}
1241f9c78b2bSJens Axboe 
1242f9c78b2bSJens Axboe 		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1243f9c78b2bSJens Axboe 			break;
1244f9c78b2bSJens Axboe 
1245f9c78b2bSJens Axboe 		len -= bytes;
1246f9c78b2bSJens Axboe 		offset = 0;
1247f9c78b2bSJens Axboe 	}
1248f9c78b2bSJens Axboe 
1249f9c78b2bSJens Axboe 	if (ret)
1250f9c78b2bSJens Axboe 		goto cleanup;
1251f9c78b2bSJens Axboe 
12522884d0beSAl Viro 	if (map_data)
12532884d0beSAl Viro 		map_data->offset += bio->bi_iter.bi_size;
12542884d0beSAl Viro 
1255f9c78b2bSJens Axboe 	/*
1256f9c78b2bSJens Axboe 	 * success
1257f9c78b2bSJens Axboe 	 */
125800e23707SDavid Howells 	if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
1259f9c78b2bSJens Axboe 	    (map_data && map_data->from_user)) {
126098a09d61SAl Viro 		ret = bio_copy_from_iter(bio, iter);
1261f9c78b2bSJens Axboe 		if (ret)
1262f9c78b2bSJens Axboe 			goto cleanup;
126398a09d61SAl Viro 	} else {
1264f3587d76SKeith Busch 		zero_fill_bio(bio);
1265e81cef5dSAl Viro 		iov_iter_advance(iter, bio->bi_iter.bi_size);
1266f9c78b2bSJens Axboe 	}
1267f9c78b2bSJens Axboe 
126826e49cfcSKent Overstreet 	bio->bi_private = bmd;
12692884d0beSAl Viro 	if (map_data && map_data->null_mapped)
12702884d0beSAl Viro 		bio_set_flag(bio, BIO_NULL_MAPPED);
1271f9c78b2bSJens Axboe 	return bio;
1272f9c78b2bSJens Axboe cleanup:
1273f9c78b2bSJens Axboe 	if (!map_data)
12741dfa0f68SChristoph Hellwig 		bio_free_pages(bio);
1275f9c78b2bSJens Axboe 	bio_put(bio);
1276f9c78b2bSJens Axboe out_bmd:
1277f9c78b2bSJens Axboe 	kfree(bmd);
1278f9c78b2bSJens Axboe 	return ERR_PTR(ret);
1279f9c78b2bSJens Axboe }
1280f9c78b2bSJens Axboe 
128137f19e57SChristoph Hellwig /**
128237f19e57SChristoph Hellwig  *	bio_map_user_iov - map user iovec into bio
128337f19e57SChristoph Hellwig  *	@q:		the struct request_queue for the bio
128437f19e57SChristoph Hellwig  *	@iter:		iovec iterator
128537f19e57SChristoph Hellwig  *	@gfp_mask:	memory allocation flags
128637f19e57SChristoph Hellwig  *
128737f19e57SChristoph Hellwig  *	Map the user space address into a bio suitable for io to a block
128837f19e57SChristoph Hellwig  *	device. Returns an error pointer in case of error.
128937f19e57SChristoph Hellwig  */
129037f19e57SChristoph Hellwig struct bio *bio_map_user_iov(struct request_queue *q,
1291e81cef5dSAl Viro 			     struct iov_iter *iter,
129226e49cfcSKent Overstreet 			     gfp_t gfp_mask)
1293f9c78b2bSJens Axboe {
129426e49cfcSKent Overstreet 	int j;
1295f9c78b2bSJens Axboe 	struct bio *bio;
1296076098e5SAl Viro 	int ret;
12972b04e8f6SAl Viro 	struct bio_vec *bvec;
1298f9c78b2bSJens Axboe 
1299b282cc76SAl Viro 	if (!iov_iter_count(iter))
1300f9c78b2bSJens Axboe 		return ERR_PTR(-EINVAL);
1301f9c78b2bSJens Axboe 
1302b282cc76SAl Viro 	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1303f9c78b2bSJens Axboe 	if (!bio)
1304f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1305f9c78b2bSJens Axboe 
13060a0f1513SAl Viro 	while (iov_iter_count(iter)) {
1307629e42bcSAl Viro 		struct page **pages;
1308076098e5SAl Viro 		ssize_t bytes;
1309076098e5SAl Viro 		size_t offs, added = 0;
1310076098e5SAl Viro 		int npages;
1311f9c78b2bSJens Axboe 
13120a0f1513SAl Viro 		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1313076098e5SAl Viro 		if (unlikely(bytes <= 0)) {
1314076098e5SAl Viro 			ret = bytes ? bytes : -EFAULT;
1315f9c78b2bSJens Axboe 			goto out_unmap;
1316f9c78b2bSJens Axboe 		}
1317f9c78b2bSJens Axboe 
1318076098e5SAl Viro 		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1319076098e5SAl Viro 
132098f0bc99SAl Viro 		if (unlikely(offs & queue_dma_alignment(q))) {
132198f0bc99SAl Viro 			ret = -EINVAL;
132298f0bc99SAl Viro 			j = 0;
132398f0bc99SAl Viro 		} else {
1324629e42bcSAl Viro 			for (j = 0; j < npages; j++) {
132598f0bc99SAl Viro 				struct page *page = pages[j];
1326076098e5SAl Viro 				unsigned int n = PAGE_SIZE - offs;
132795d78c28SVitaly Mayatskikh 				unsigned short prev_bi_vcnt = bio->bi_vcnt;
1328f9c78b2bSJens Axboe 
1329076098e5SAl Viro 				if (n > bytes)
1330076098e5SAl Viro 					n = bytes;
1331f9c78b2bSJens Axboe 
133298f0bc99SAl Viro 				if (!bio_add_pc_page(q, bio, page, n, offs))
1333f9c78b2bSJens Axboe 					break;
1334f9c78b2bSJens Axboe 
133595d78c28SVitaly Mayatskikh 				/*
133695d78c28SVitaly Mayatskikh 				 * check if vector was merged with previous
133795d78c28SVitaly Mayatskikh 				 * drop page reference if needed
133895d78c28SVitaly Mayatskikh 				 */
133995d78c28SVitaly Mayatskikh 				if (bio->bi_vcnt == prev_bi_vcnt)
134098f0bc99SAl Viro 					put_page(page);
134195d78c28SVitaly Mayatskikh 
1342076098e5SAl Viro 				added += n;
1343076098e5SAl Viro 				bytes -= n;
1344076098e5SAl Viro 				offs = 0;
1345f9c78b2bSJens Axboe 			}
13460a0f1513SAl Viro 			iov_iter_advance(iter, added);
134798f0bc99SAl Viro 		}
1348f9c78b2bSJens Axboe 		/*
1349f9c78b2bSJens Axboe 		 * release the pages we didn't map into the bio, if any
1350f9c78b2bSJens Axboe 		 */
1351629e42bcSAl Viro 		while (j < npages)
135209cbfeafSKirill A. Shutemov 			put_page(pages[j++]);
1353629e42bcSAl Viro 		kvfree(pages);
1354e2e115d1SAl Viro 		/* couldn't stuff something into bio? */
1355e2e115d1SAl Viro 		if (bytes)
1356e2e115d1SAl Viro 			break;
1357f9c78b2bSJens Axboe 	}
1358f9c78b2bSJens Axboe 
1359b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_USER_MAPPED);
136037f19e57SChristoph Hellwig 
136137f19e57SChristoph Hellwig 	/*
13625fad1b64SBart Van Assche 	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
136337f19e57SChristoph Hellwig 	 * it would normally disappear when its bi_end_io is run.
136437f19e57SChristoph Hellwig 	 * however, we need it for the unmap, so grab an extra
136537f19e57SChristoph Hellwig 	 * reference to it
136637f19e57SChristoph Hellwig 	 */
136737f19e57SChristoph Hellwig 	bio_get(bio);
1368f9c78b2bSJens Axboe 	return bio;
1369f9c78b2bSJens Axboe 
1370f9c78b2bSJens Axboe  out_unmap:
13712b04e8f6SAl Viro 	bio_for_each_segment_all(bvec, bio, j) {
13722b04e8f6SAl Viro 		put_page(bvec->bv_page);
1373f9c78b2bSJens Axboe 	}
1374f9c78b2bSJens Axboe 	bio_put(bio);
1375f9c78b2bSJens Axboe 	return ERR_PTR(ret);
1376f9c78b2bSJens Axboe }
1377f9c78b2bSJens Axboe 
1378f9c78b2bSJens Axboe static void __bio_unmap_user(struct bio *bio)
1379f9c78b2bSJens Axboe {
1380f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1381f9c78b2bSJens Axboe 	int i;
1382f9c78b2bSJens Axboe 
1383f9c78b2bSJens Axboe 	/*
1384f9c78b2bSJens Axboe 	 * make sure we dirty pages we wrote to
1385f9c78b2bSJens Axboe 	 */
1386f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
1387f9c78b2bSJens Axboe 		if (bio_data_dir(bio) == READ)
1388f9c78b2bSJens Axboe 			set_page_dirty_lock(bvec->bv_page);
1389f9c78b2bSJens Axboe 
139009cbfeafSKirill A. Shutemov 		put_page(bvec->bv_page);
1391f9c78b2bSJens Axboe 	}
1392f9c78b2bSJens Axboe 
1393f9c78b2bSJens Axboe 	bio_put(bio);
1394f9c78b2bSJens Axboe }
1395f9c78b2bSJens Axboe 
1396f9c78b2bSJens Axboe /**
1397f9c78b2bSJens Axboe  *	bio_unmap_user	-	unmap a bio
1398f9c78b2bSJens Axboe  *	@bio:		the bio being unmapped
1399f9c78b2bSJens Axboe  *
14005fad1b64SBart Van Assche  *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
14015fad1b64SBart Van Assche  *	process context.
1402f9c78b2bSJens Axboe  *
1403f9c78b2bSJens Axboe  *	bio_unmap_user() may sleep.
1404f9c78b2bSJens Axboe  */
1405f9c78b2bSJens Axboe void bio_unmap_user(struct bio *bio)
1406f9c78b2bSJens Axboe {
1407f9c78b2bSJens Axboe 	__bio_unmap_user(bio);
1408f9c78b2bSJens Axboe 	bio_put(bio);
1409f9c78b2bSJens Axboe }
1410f9c78b2bSJens Axboe 
14114246a0b6SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio)
1412f9c78b2bSJens Axboe {
1413f9c78b2bSJens Axboe 	bio_put(bio);
1414f9c78b2bSJens Axboe }
1415f9c78b2bSJens Axboe 
141675c72b83SChristoph Hellwig /**
141775c72b83SChristoph Hellwig  *	bio_map_kern	-	map kernel address into bio
141875c72b83SChristoph Hellwig  *	@q: the struct request_queue for the bio
141975c72b83SChristoph Hellwig  *	@data: pointer to buffer to map
142075c72b83SChristoph Hellwig  *	@len: length in bytes
142175c72b83SChristoph Hellwig  *	@gfp_mask: allocation flags for bio allocation
142275c72b83SChristoph Hellwig  *
142375c72b83SChristoph Hellwig  *	Map the kernel address into a bio suitable for io to a block
142475c72b83SChristoph Hellwig  *	device. Returns an error pointer in case of error.
142575c72b83SChristoph Hellwig  */
142675c72b83SChristoph Hellwig struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
142775c72b83SChristoph Hellwig 			 gfp_t gfp_mask)
1428f9c78b2bSJens Axboe {
1429f9c78b2bSJens Axboe 	unsigned long kaddr = (unsigned long)data;
1430f9c78b2bSJens Axboe 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1431f9c78b2bSJens Axboe 	unsigned long start = kaddr >> PAGE_SHIFT;
1432f9c78b2bSJens Axboe 	const int nr_pages = end - start;
1433f9c78b2bSJens Axboe 	int offset, i;
1434f9c78b2bSJens Axboe 	struct bio *bio;
1435f9c78b2bSJens Axboe 
1436f9c78b2bSJens Axboe 	bio = bio_kmalloc(gfp_mask, nr_pages);
1437f9c78b2bSJens Axboe 	if (!bio)
1438f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1439f9c78b2bSJens Axboe 
1440f9c78b2bSJens Axboe 	offset = offset_in_page(kaddr);
1441f9c78b2bSJens Axboe 	for (i = 0; i < nr_pages; i++) {
1442f9c78b2bSJens Axboe 		unsigned int bytes = PAGE_SIZE - offset;
1443f9c78b2bSJens Axboe 
1444f9c78b2bSJens Axboe 		if (len <= 0)
1445f9c78b2bSJens Axboe 			break;
1446f9c78b2bSJens Axboe 
1447f9c78b2bSJens Axboe 		if (bytes > len)
1448f9c78b2bSJens Axboe 			bytes = len;
1449f9c78b2bSJens Axboe 
1450f9c78b2bSJens Axboe 		if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
145175c72b83SChristoph Hellwig 				    offset) < bytes) {
145275c72b83SChristoph Hellwig 			/* we don't support partial mappings */
145375c72b83SChristoph Hellwig 			bio_put(bio);
145475c72b83SChristoph Hellwig 			return ERR_PTR(-EINVAL);
145575c72b83SChristoph Hellwig 		}
1456f9c78b2bSJens Axboe 
1457f9c78b2bSJens Axboe 		data += bytes;
1458f9c78b2bSJens Axboe 		len -= bytes;
1459f9c78b2bSJens Axboe 		offset = 0;
1460f9c78b2bSJens Axboe 	}
1461f9c78b2bSJens Axboe 
1462f9c78b2bSJens Axboe 	bio->bi_end_io = bio_map_kern_endio;
1463f9c78b2bSJens Axboe 	return bio;
1464f9c78b2bSJens Axboe }
1465f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_map_kern);
1466f9c78b2bSJens Axboe 
14674246a0b6SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio)
1468f9c78b2bSJens Axboe {
14691dfa0f68SChristoph Hellwig 	bio_free_pages(bio);
14701dfa0f68SChristoph Hellwig 	bio_put(bio);
14711dfa0f68SChristoph Hellwig }
14721dfa0f68SChristoph Hellwig 
14734246a0b6SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio)
14741dfa0f68SChristoph Hellwig {
147542d2683aSChristoph Hellwig 	char *p = bio->bi_private;
14761dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
1477f9c78b2bSJens Axboe 	int i;
1478f9c78b2bSJens Axboe 
1479f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
14801dfa0f68SChristoph Hellwig 		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1481f9c78b2bSJens Axboe 		p += bvec->bv_len;
1482f9c78b2bSJens Axboe 	}
1483f9c78b2bSJens Axboe 
14844246a0b6SChristoph Hellwig 	bio_copy_kern_endio(bio);
1485f9c78b2bSJens Axboe }
1486f9c78b2bSJens Axboe 
1487f9c78b2bSJens Axboe /**
1488f9c78b2bSJens Axboe  *	bio_copy_kern	-	copy kernel address into bio
1489f9c78b2bSJens Axboe  *	@q: the struct request_queue for the bio
1490f9c78b2bSJens Axboe  *	@data: pointer to buffer to copy
1491f9c78b2bSJens Axboe  *	@len: length in bytes
1492f9c78b2bSJens Axboe  *	@gfp_mask: allocation flags for bio and page allocation
1493f9c78b2bSJens Axboe  *	@reading: data direction is READ
1494f9c78b2bSJens Axboe  *
1495f9c78b2bSJens Axboe  *	copy the kernel address into a bio suitable for io to a block
1496f9c78b2bSJens Axboe  *	device. Returns an error pointer in case of error.
1497f9c78b2bSJens Axboe  */
1498f9c78b2bSJens Axboe struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1499f9c78b2bSJens Axboe 			  gfp_t gfp_mask, int reading)
1500f9c78b2bSJens Axboe {
150142d2683aSChristoph Hellwig 	unsigned long kaddr = (unsigned long)data;
150242d2683aSChristoph Hellwig 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
150342d2683aSChristoph Hellwig 	unsigned long start = kaddr >> PAGE_SHIFT;
150442d2683aSChristoph Hellwig 	struct bio *bio;
1505f9c78b2bSJens Axboe 	void *p = data;
15061dfa0f68SChristoph Hellwig 	int nr_pages = 0;
1507f9c78b2bSJens Axboe 
150842d2683aSChristoph Hellwig 	/*
150942d2683aSChristoph Hellwig 	 * Overflow, abort
151042d2683aSChristoph Hellwig 	 */
151142d2683aSChristoph Hellwig 	if (end < start)
151242d2683aSChristoph Hellwig 		return ERR_PTR(-EINVAL);
1513f9c78b2bSJens Axboe 
151442d2683aSChristoph Hellwig 	nr_pages = end - start;
151542d2683aSChristoph Hellwig 	bio = bio_kmalloc(gfp_mask, nr_pages);
151642d2683aSChristoph Hellwig 	if (!bio)
151742d2683aSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
151842d2683aSChristoph Hellwig 
151942d2683aSChristoph Hellwig 	while (len) {
152042d2683aSChristoph Hellwig 		struct page *page;
152142d2683aSChristoph Hellwig 		unsigned int bytes = PAGE_SIZE;
152242d2683aSChristoph Hellwig 
152342d2683aSChristoph Hellwig 		if (bytes > len)
152442d2683aSChristoph Hellwig 			bytes = len;
152542d2683aSChristoph Hellwig 
152642d2683aSChristoph Hellwig 		page = alloc_page(q->bounce_gfp | gfp_mask);
152742d2683aSChristoph Hellwig 		if (!page)
152842d2683aSChristoph Hellwig 			goto cleanup;
152942d2683aSChristoph Hellwig 
153042d2683aSChristoph Hellwig 		if (!reading)
153142d2683aSChristoph Hellwig 			memcpy(page_address(page), p, bytes);
153242d2683aSChristoph Hellwig 
153342d2683aSChristoph Hellwig 		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
153442d2683aSChristoph Hellwig 			break;
153542d2683aSChristoph Hellwig 
153642d2683aSChristoph Hellwig 		len -= bytes;
153742d2683aSChristoph Hellwig 		p += bytes;
1538f9c78b2bSJens Axboe 	}
1539f9c78b2bSJens Axboe 
15401dfa0f68SChristoph Hellwig 	if (reading) {
15411dfa0f68SChristoph Hellwig 		bio->bi_end_io = bio_copy_kern_endio_read;
154242d2683aSChristoph Hellwig 		bio->bi_private = data;
15431dfa0f68SChristoph Hellwig 	} else {
1544f9c78b2bSJens Axboe 		bio->bi_end_io = bio_copy_kern_endio;
15451dfa0f68SChristoph Hellwig 	}
15461dfa0f68SChristoph Hellwig 
1547f9c78b2bSJens Axboe 	return bio;
154842d2683aSChristoph Hellwig 
154942d2683aSChristoph Hellwig cleanup:
15501dfa0f68SChristoph Hellwig 	bio_free_pages(bio);
155142d2683aSChristoph Hellwig 	bio_put(bio);
155242d2683aSChristoph Hellwig 	return ERR_PTR(-ENOMEM);
1553f9c78b2bSJens Axboe }
1554f9c78b2bSJens Axboe 
1555f9c78b2bSJens Axboe /*
1556f9c78b2bSJens Axboe  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1557f9c78b2bSJens Axboe  * for performing direct-IO in BIOs.
1558f9c78b2bSJens Axboe  *
1559f9c78b2bSJens Axboe  * The problem is that we cannot run set_page_dirty() from interrupt context
1560f9c78b2bSJens Axboe  * because the required locks are not interrupt-safe.  So what we can do is to
1561f9c78b2bSJens Axboe  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1562f9c78b2bSJens Axboe  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1563f9c78b2bSJens Axboe  * in process context.
1564f9c78b2bSJens Axboe  *
1565f9c78b2bSJens Axboe  * We special-case compound pages here: normally this means reads into hugetlb
1566f9c78b2bSJens Axboe  * pages.  The logic in here doesn't really work right for compound pages
1567f9c78b2bSJens Axboe  * because the VM does not uniformly chase down the head page in all cases.
1568f9c78b2bSJens Axboe  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1569f9c78b2bSJens Axboe  * handle them at all.  So we skip compound pages here at an early stage.
1570f9c78b2bSJens Axboe  *
1571f9c78b2bSJens Axboe  * Note that this code is very hard to test under normal circumstances because
1572f9c78b2bSJens Axboe  * direct-io pins the pages with get_user_pages().  This makes
1573f9c78b2bSJens Axboe  * is_page_cache_freeable return false, and the VM will not clean the pages.
1574f9c78b2bSJens Axboe  * But other code (eg, flusher threads) could clean the pages if they are mapped
1575f9c78b2bSJens Axboe  * pagecache.
1576f9c78b2bSJens Axboe  *
1577f9c78b2bSJens Axboe  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1578f9c78b2bSJens Axboe  * deferred bio dirtying paths.
1579f9c78b2bSJens Axboe  */
1580f9c78b2bSJens Axboe 
1581f9c78b2bSJens Axboe /*
1582f9c78b2bSJens Axboe  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1583f9c78b2bSJens Axboe  */
1584f9c78b2bSJens Axboe void bio_set_pages_dirty(struct bio *bio)
1585f9c78b2bSJens Axboe {
1586f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1587f9c78b2bSJens Axboe 	int i;
1588f9c78b2bSJens Axboe 
1589f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
15903bb50983SChristoph Hellwig 		if (!PageCompound(bvec->bv_page))
15913bb50983SChristoph Hellwig 			set_page_dirty_lock(bvec->bv_page);
1592f9c78b2bSJens Axboe 	}
1593f9c78b2bSJens Axboe }
15941900fcc4SKent Overstreet EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1595f9c78b2bSJens Axboe 
1596f9c78b2bSJens Axboe static void bio_release_pages(struct bio *bio)
1597f9c78b2bSJens Axboe {
1598f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1599f9c78b2bSJens Axboe 	int i;
1600f9c78b2bSJens Axboe 
160124d5493fSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, i)
160224d5493fSChristoph Hellwig 		put_page(bvec->bv_page);
1603f9c78b2bSJens Axboe }
1604f9c78b2bSJens Axboe 
1605f9c78b2bSJens Axboe /*
1606f9c78b2bSJens Axboe  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1607f9c78b2bSJens Axboe  * If they are, then fine.  If, however, some pages are clean then they must
1608f9c78b2bSJens Axboe  * have been written out during the direct-IO read.  So we take another ref on
160924d5493fSChristoph Hellwig  * the BIO and re-dirty the pages in process context.
1610f9c78b2bSJens Axboe  *
1611f9c78b2bSJens Axboe  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1612ea1754a0SKirill A. Shutemov  * here on.  It will run one put_page() against each page and will run one
1613ea1754a0SKirill A. Shutemov  * bio_put() against the BIO.
1614f9c78b2bSJens Axboe  */
1615f9c78b2bSJens Axboe 
1616f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work);
1617f9c78b2bSJens Axboe 
1618f9c78b2bSJens Axboe static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1619f9c78b2bSJens Axboe static DEFINE_SPINLOCK(bio_dirty_lock);
1620f9c78b2bSJens Axboe static struct bio *bio_dirty_list;
1621f9c78b2bSJens Axboe 
1622f9c78b2bSJens Axboe /*
1623f9c78b2bSJens Axboe  * This runs in process context
1624f9c78b2bSJens Axboe  */
1625f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work)
1626f9c78b2bSJens Axboe {
162724d5493fSChristoph Hellwig 	struct bio *bio, *next;
1628f9c78b2bSJens Axboe 
162924d5493fSChristoph Hellwig 	spin_lock_irq(&bio_dirty_lock);
163024d5493fSChristoph Hellwig 	next = bio_dirty_list;
1631f9c78b2bSJens Axboe 	bio_dirty_list = NULL;
163224d5493fSChristoph Hellwig 	spin_unlock_irq(&bio_dirty_lock);
1633f9c78b2bSJens Axboe 
163424d5493fSChristoph Hellwig 	while ((bio = next) != NULL) {
163524d5493fSChristoph Hellwig 		next = bio->bi_private;
1636f9c78b2bSJens Axboe 
1637f9c78b2bSJens Axboe 		bio_set_pages_dirty(bio);
1638f9c78b2bSJens Axboe 		bio_release_pages(bio);
1639f9c78b2bSJens Axboe 		bio_put(bio);
1640f9c78b2bSJens Axboe 	}
1641f9c78b2bSJens Axboe }
1642f9c78b2bSJens Axboe 
1643f9c78b2bSJens Axboe void bio_check_pages_dirty(struct bio *bio)
1644f9c78b2bSJens Axboe {
1645f9c78b2bSJens Axboe 	struct bio_vec *bvec;
164624d5493fSChristoph Hellwig 	unsigned long flags;
1647f9c78b2bSJens Axboe 	int i;
1648f9c78b2bSJens Axboe 
1649f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
165024d5493fSChristoph Hellwig 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
165124d5493fSChristoph Hellwig 			goto defer;
1652f9c78b2bSJens Axboe 	}
1653f9c78b2bSJens Axboe 
165424d5493fSChristoph Hellwig 	bio_release_pages(bio);
165524d5493fSChristoph Hellwig 	bio_put(bio);
165624d5493fSChristoph Hellwig 	return;
165724d5493fSChristoph Hellwig defer:
1658f9c78b2bSJens Axboe 	spin_lock_irqsave(&bio_dirty_lock, flags);
1659f9c78b2bSJens Axboe 	bio->bi_private = bio_dirty_list;
1660f9c78b2bSJens Axboe 	bio_dirty_list = bio;
1661f9c78b2bSJens Axboe 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1662f9c78b2bSJens Axboe 	schedule_work(&bio_dirty_work);
1663f9c78b2bSJens Axboe }
16641900fcc4SKent Overstreet EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1665f9c78b2bSJens Axboe 
1666ddcf35d3SMichael Callahan void generic_start_io_acct(struct request_queue *q, int op,
1667d62e26b3SJens Axboe 			   unsigned long sectors, struct hd_struct *part)
1668394ffa50SGu Zheng {
1669ddcf35d3SMichael Callahan 	const int sgrp = op_stat_group(op);
1670394ffa50SGu Zheng 	int cpu = part_stat_lock();
1671394ffa50SGu Zheng 
1672d62e26b3SJens Axboe 	part_round_stats(q, cpu, part);
1673ddcf35d3SMichael Callahan 	part_stat_inc(cpu, part, ios[sgrp]);
1674ddcf35d3SMichael Callahan 	part_stat_add(cpu, part, sectors[sgrp], sectors);
1675ddcf35d3SMichael Callahan 	part_inc_in_flight(q, part, op_is_write(op));
1676394ffa50SGu Zheng 
1677394ffa50SGu Zheng 	part_stat_unlock();
1678394ffa50SGu Zheng }
1679394ffa50SGu Zheng EXPORT_SYMBOL(generic_start_io_acct);
1680394ffa50SGu Zheng 
1681ddcf35d3SMichael Callahan void generic_end_io_acct(struct request_queue *q, int req_op,
1682d62e26b3SJens Axboe 			 struct hd_struct *part, unsigned long start_time)
1683394ffa50SGu Zheng {
1684394ffa50SGu Zheng 	unsigned long duration = jiffies - start_time;
1685ddcf35d3SMichael Callahan 	const int sgrp = op_stat_group(req_op);
1686394ffa50SGu Zheng 	int cpu = part_stat_lock();
1687394ffa50SGu Zheng 
1688b57e99b4SOmar Sandoval 	part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
1689d62e26b3SJens Axboe 	part_round_stats(q, cpu, part);
1690ddcf35d3SMichael Callahan 	part_dec_in_flight(q, part, op_is_write(req_op));
1691394ffa50SGu Zheng 
1692394ffa50SGu Zheng 	part_stat_unlock();
1693394ffa50SGu Zheng }
1694394ffa50SGu Zheng EXPORT_SYMBOL(generic_end_io_acct);
1695394ffa50SGu Zheng 
1696f9c78b2bSJens Axboe #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1697f9c78b2bSJens Axboe void bio_flush_dcache_pages(struct bio *bi)
1698f9c78b2bSJens Axboe {
1699f9c78b2bSJens Axboe 	struct bio_vec bvec;
1700f9c78b2bSJens Axboe 	struct bvec_iter iter;
1701f9c78b2bSJens Axboe 
1702f9c78b2bSJens Axboe 	bio_for_each_segment(bvec, bi, iter)
1703f9c78b2bSJens Axboe 		flush_dcache_page(bvec.bv_page);
1704f9c78b2bSJens Axboe }
1705f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_flush_dcache_pages);
1706f9c78b2bSJens Axboe #endif
1707f9c78b2bSJens Axboe 
1708c4cf5261SJens Axboe static inline bool bio_remaining_done(struct bio *bio)
1709c4cf5261SJens Axboe {
1710c4cf5261SJens Axboe 	/*
1711c4cf5261SJens Axboe 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1712c4cf5261SJens Axboe 	 * we always end io on the first invocation.
1713c4cf5261SJens Axboe 	 */
1714c4cf5261SJens Axboe 	if (!bio_flagged(bio, BIO_CHAIN))
1715c4cf5261SJens Axboe 		return true;
1716c4cf5261SJens Axboe 
1717c4cf5261SJens Axboe 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1718c4cf5261SJens Axboe 
1719326e1dbbSMike Snitzer 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1720b7c44ed9SJens Axboe 		bio_clear_flag(bio, BIO_CHAIN);
1721c4cf5261SJens Axboe 		return true;
1722326e1dbbSMike Snitzer 	}
1723c4cf5261SJens Axboe 
1724c4cf5261SJens Axboe 	return false;
1725c4cf5261SJens Axboe }
1726c4cf5261SJens Axboe 
1727f9c78b2bSJens Axboe /**
1728f9c78b2bSJens Axboe  * bio_endio - end I/O on a bio
1729f9c78b2bSJens Axboe  * @bio:	bio
1730f9c78b2bSJens Axboe  *
1731f9c78b2bSJens Axboe  * Description:
17324246a0b6SChristoph Hellwig  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
17334246a0b6SChristoph Hellwig  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
17344246a0b6SChristoph Hellwig  *   bio unless they own it and thus know that it has an end_io function.
1735fbbaf700SNeilBrown  *
1736fbbaf700SNeilBrown  *   bio_endio() can be called several times on a bio that has been chained
1737fbbaf700SNeilBrown  *   using bio_chain().  The ->bi_end_io() function will only be called the
1738fbbaf700SNeilBrown  *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1739fbbaf700SNeilBrown  *   generated if BIO_TRACE_COMPLETION is set.
1740f9c78b2bSJens Axboe  **/
17414246a0b6SChristoph Hellwig void bio_endio(struct bio *bio)
1742f9c78b2bSJens Axboe {
1743ba8c6967SChristoph Hellwig again:
17442b885517SChristoph Hellwig 	if (!bio_remaining_done(bio))
1745ba8c6967SChristoph Hellwig 		return;
17467c20f116SChristoph Hellwig 	if (!bio_integrity_endio(bio))
17477c20f116SChristoph Hellwig 		return;
1748f9c78b2bSJens Axboe 
174967b42d0bSJosef Bacik 	if (bio->bi_disk)
175067b42d0bSJosef Bacik 		rq_qos_done_bio(bio->bi_disk->queue, bio);
175167b42d0bSJosef Bacik 
1752f9c78b2bSJens Axboe 	/*
1753ba8c6967SChristoph Hellwig 	 * Need to have a real endio function for chained bios, otherwise
1754ba8c6967SChristoph Hellwig 	 * various corner cases will break (like stacking block devices that
1755ba8c6967SChristoph Hellwig 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1756ba8c6967SChristoph Hellwig 	 * recursion and blowing the stack. Tail call optimization would
1757ba8c6967SChristoph Hellwig 	 * handle this, but compiling with frame pointers also disables
1758ba8c6967SChristoph Hellwig 	 * gcc's sibling call optimization.
1759f9c78b2bSJens Axboe 	 */
1760f9c78b2bSJens Axboe 	if (bio->bi_end_io == bio_chain_endio) {
176138f8baaeSChristoph Hellwig 		bio = __bio_chain_endio(bio);
1762ba8c6967SChristoph Hellwig 		goto again;
1763ba8c6967SChristoph Hellwig 	}
1764ba8c6967SChristoph Hellwig 
176574d46992SChristoph Hellwig 	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
176674d46992SChristoph Hellwig 		trace_block_bio_complete(bio->bi_disk->queue, bio,
1767a462b950SBart Van Assche 					 blk_status_to_errno(bio->bi_status));
1768fbbaf700SNeilBrown 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1769fbbaf700SNeilBrown 	}
1770fbbaf700SNeilBrown 
17719e234eeaSShaohua Li 	blk_throtl_bio_endio(bio);
1772b222dd2fSShaohua Li 	/* release cgroup info */
1773b222dd2fSShaohua Li 	bio_uninit(bio);
1774f9c78b2bSJens Axboe 	if (bio->bi_end_io)
17754246a0b6SChristoph Hellwig 		bio->bi_end_io(bio);
1776f9c78b2bSJens Axboe }
1777f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_endio);
1778f9c78b2bSJens Axboe 
1779f9c78b2bSJens Axboe /**
1780f9c78b2bSJens Axboe  * bio_split - split a bio
1781f9c78b2bSJens Axboe  * @bio:	bio to split
1782f9c78b2bSJens Axboe  * @sectors:	number of sectors to split from the front of @bio
1783f9c78b2bSJens Axboe  * @gfp:	gfp mask
1784f9c78b2bSJens Axboe  * @bs:		bio set to allocate from
1785f9c78b2bSJens Axboe  *
1786f9c78b2bSJens Axboe  * Allocates and returns a new bio which represents @sectors from the start of
1787f9c78b2bSJens Axboe  * @bio, and updates @bio to represent the remaining sectors.
1788f9c78b2bSJens Axboe  *
1789f3f5da62SMartin K. Petersen  * Unless this is a discard request the newly allocated bio will point
1790f3f5da62SMartin K. Petersen  * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1791f3f5da62SMartin K. Petersen  * @bio is not freed before the split.
1792f9c78b2bSJens Axboe  */
1793f9c78b2bSJens Axboe struct bio *bio_split(struct bio *bio, int sectors,
1794f9c78b2bSJens Axboe 		      gfp_t gfp, struct bio_set *bs)
1795f9c78b2bSJens Axboe {
1796f341a4d3SMikulas Patocka 	struct bio *split;
1797f9c78b2bSJens Axboe 
1798f9c78b2bSJens Axboe 	BUG_ON(sectors <= 0);
1799f9c78b2bSJens Axboe 	BUG_ON(sectors >= bio_sectors(bio));
1800f9c78b2bSJens Axboe 
1801f9c78b2bSJens Axboe 	split = bio_clone_fast(bio, gfp, bs);
1802f9c78b2bSJens Axboe 	if (!split)
1803f9c78b2bSJens Axboe 		return NULL;
1804f9c78b2bSJens Axboe 
1805f9c78b2bSJens Axboe 	split->bi_iter.bi_size = sectors << 9;
1806f9c78b2bSJens Axboe 
1807f9c78b2bSJens Axboe 	if (bio_integrity(split))
1808fbd08e76SDmitry Monakhov 		bio_integrity_trim(split);
1809f9c78b2bSJens Axboe 
1810f9c78b2bSJens Axboe 	bio_advance(bio, split->bi_iter.bi_size);
1811f9c78b2bSJens Axboe 
1812fbbaf700SNeilBrown 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
181320d59023SGoldwyn Rodrigues 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1814fbbaf700SNeilBrown 
1815f9c78b2bSJens Axboe 	return split;
1816f9c78b2bSJens Axboe }
1817f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_split);
1818f9c78b2bSJens Axboe 
1819f9c78b2bSJens Axboe /**
1820f9c78b2bSJens Axboe  * bio_trim - trim a bio
1821f9c78b2bSJens Axboe  * @bio:	bio to trim
1822f9c78b2bSJens Axboe  * @offset:	number of sectors to trim from the front of @bio
1823f9c78b2bSJens Axboe  * @size:	size we want to trim @bio to, in sectors
1824f9c78b2bSJens Axboe  */
1825f9c78b2bSJens Axboe void bio_trim(struct bio *bio, int offset, int size)
1826f9c78b2bSJens Axboe {
1827f9c78b2bSJens Axboe 	/* 'bio' is a cloned bio which we need to trim to match
1828f9c78b2bSJens Axboe 	 * the given offset and size.
1829f9c78b2bSJens Axboe 	 */
1830f9c78b2bSJens Axboe 
1831f9c78b2bSJens Axboe 	size <<= 9;
1832f9c78b2bSJens Axboe 	if (offset == 0 && size == bio->bi_iter.bi_size)
1833f9c78b2bSJens Axboe 		return;
1834f9c78b2bSJens Axboe 
1835b7c44ed9SJens Axboe 	bio_clear_flag(bio, BIO_SEG_VALID);
1836f9c78b2bSJens Axboe 
1837f9c78b2bSJens Axboe 	bio_advance(bio, offset << 9);
1838f9c78b2bSJens Axboe 
1839f9c78b2bSJens Axboe 	bio->bi_iter.bi_size = size;
1840376a78abSDmitry Monakhov 
1841376a78abSDmitry Monakhov 	if (bio_integrity(bio))
1842fbd08e76SDmitry Monakhov 		bio_integrity_trim(bio);
1843376a78abSDmitry Monakhov 
1844f9c78b2bSJens Axboe }
1845f9c78b2bSJens Axboe EXPORT_SYMBOL_GPL(bio_trim);
1846f9c78b2bSJens Axboe 
1847f9c78b2bSJens Axboe /*
1848f9c78b2bSJens Axboe  * create memory pools for biovec's in a bio_set.
1849f9c78b2bSJens Axboe  * use the global biovec slabs created for general use.
1850f9c78b2bSJens Axboe  */
18518aa6ba2fSKent Overstreet int biovec_init_pool(mempool_t *pool, int pool_entries)
1852f9c78b2bSJens Axboe {
1853ed996a52SChristoph Hellwig 	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1854f9c78b2bSJens Axboe 
18558aa6ba2fSKent Overstreet 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1856f9c78b2bSJens Axboe }
1857f9c78b2bSJens Axboe 
1858917a38c7SKent Overstreet /*
1859917a38c7SKent Overstreet  * bioset_exit - exit a bioset initialized with bioset_init()
1860917a38c7SKent Overstreet  *
1861917a38c7SKent Overstreet  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1862917a38c7SKent Overstreet  * kzalloc()).
1863917a38c7SKent Overstreet  */
1864917a38c7SKent Overstreet void bioset_exit(struct bio_set *bs)
1865f9c78b2bSJens Axboe {
1866f9c78b2bSJens Axboe 	if (bs->rescue_workqueue)
1867f9c78b2bSJens Axboe 		destroy_workqueue(bs->rescue_workqueue);
1868917a38c7SKent Overstreet 	bs->rescue_workqueue = NULL;
1869f9c78b2bSJens Axboe 
18708aa6ba2fSKent Overstreet 	mempool_exit(&bs->bio_pool);
18718aa6ba2fSKent Overstreet 	mempool_exit(&bs->bvec_pool);
1872f9c78b2bSJens Axboe 
1873f9c78b2bSJens Axboe 	bioset_integrity_free(bs);
1874917a38c7SKent Overstreet 	if (bs->bio_slab)
1875f9c78b2bSJens Axboe 		bio_put_slab(bs);
1876917a38c7SKent Overstreet 	bs->bio_slab = NULL;
1877917a38c7SKent Overstreet }
1878917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_exit);
1879f9c78b2bSJens Axboe 
1880011067b0SNeilBrown /**
1881917a38c7SKent Overstreet  * bioset_init - Initialize a bio_set
1882dad08527SKent Overstreet  * @bs:		pool to initialize
1883917a38c7SKent Overstreet  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1884917a38c7SKent Overstreet  * @front_pad:	Number of bytes to allocate in front of the returned bio
1885917a38c7SKent Overstreet  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1886917a38c7SKent Overstreet  *              and %BIOSET_NEED_RESCUER
1887917a38c7SKent Overstreet  *
1888dad08527SKent Overstreet  * Description:
1889dad08527SKent Overstreet  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1890dad08527SKent Overstreet  *    to ask for a number of bytes to be allocated in front of the bio.
1891dad08527SKent Overstreet  *    Front pad allocation is useful for embedding the bio inside
1892dad08527SKent Overstreet  *    another structure, to avoid allocating extra data to go with the bio.
1893dad08527SKent Overstreet  *    Note that the bio must be embedded at the END of that structure always,
1894dad08527SKent Overstreet  *    or things will break badly.
1895dad08527SKent Overstreet  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1896dad08527SKent Overstreet  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1897dad08527SKent Overstreet  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1898dad08527SKent Overstreet  *    dispatch queued requests when the mempool runs out of space.
1899dad08527SKent Overstreet  *
1900917a38c7SKent Overstreet  */
1901917a38c7SKent Overstreet int bioset_init(struct bio_set *bs,
1902917a38c7SKent Overstreet 		unsigned int pool_size,
1903917a38c7SKent Overstreet 		unsigned int front_pad,
1904917a38c7SKent Overstreet 		int flags)
1905917a38c7SKent Overstreet {
1906917a38c7SKent Overstreet 	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1907917a38c7SKent Overstreet 
1908917a38c7SKent Overstreet 	bs->front_pad = front_pad;
1909917a38c7SKent Overstreet 
1910917a38c7SKent Overstreet 	spin_lock_init(&bs->rescue_lock);
1911917a38c7SKent Overstreet 	bio_list_init(&bs->rescue_list);
1912917a38c7SKent Overstreet 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1913917a38c7SKent Overstreet 
1914917a38c7SKent Overstreet 	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1915917a38c7SKent Overstreet 	if (!bs->bio_slab)
1916917a38c7SKent Overstreet 		return -ENOMEM;
1917917a38c7SKent Overstreet 
1918917a38c7SKent Overstreet 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1919917a38c7SKent Overstreet 		goto bad;
1920917a38c7SKent Overstreet 
1921917a38c7SKent Overstreet 	if ((flags & BIOSET_NEED_BVECS) &&
1922917a38c7SKent Overstreet 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1923917a38c7SKent Overstreet 		goto bad;
1924917a38c7SKent Overstreet 
1925917a38c7SKent Overstreet 	if (!(flags & BIOSET_NEED_RESCUER))
1926917a38c7SKent Overstreet 		return 0;
1927917a38c7SKent Overstreet 
1928917a38c7SKent Overstreet 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1929917a38c7SKent Overstreet 	if (!bs->rescue_workqueue)
1930917a38c7SKent Overstreet 		goto bad;
1931917a38c7SKent Overstreet 
1932917a38c7SKent Overstreet 	return 0;
1933917a38c7SKent Overstreet bad:
1934917a38c7SKent Overstreet 	bioset_exit(bs);
1935917a38c7SKent Overstreet 	return -ENOMEM;
1936917a38c7SKent Overstreet }
1937917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_init);
1938917a38c7SKent Overstreet 
193928e89fd9SJens Axboe /*
194028e89fd9SJens Axboe  * Initialize and setup a new bio_set, based on the settings from
194128e89fd9SJens Axboe  * another bio_set.
194228e89fd9SJens Axboe  */
194328e89fd9SJens Axboe int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
194428e89fd9SJens Axboe {
194528e89fd9SJens Axboe 	int flags;
194628e89fd9SJens Axboe 
194728e89fd9SJens Axboe 	flags = 0;
194828e89fd9SJens Axboe 	if (src->bvec_pool.min_nr)
194928e89fd9SJens Axboe 		flags |= BIOSET_NEED_BVECS;
195028e89fd9SJens Axboe 	if (src->rescue_workqueue)
195128e89fd9SJens Axboe 		flags |= BIOSET_NEED_RESCUER;
195228e89fd9SJens Axboe 
195328e89fd9SJens Axboe 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
195428e89fd9SJens Axboe }
195528e89fd9SJens Axboe EXPORT_SYMBOL(bioset_init_from_src);
195628e89fd9SJens Axboe 
1957f9c78b2bSJens Axboe #ifdef CONFIG_BLK_CGROUP
19581d933cf0STejun Heo 
1959b5f2954dSDennis Zhou #ifdef CONFIG_MEMCG
19601d933cf0STejun Heo /**
1961b5f2954dSDennis Zhou  * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
1962b5f2954dSDennis Zhou  * @bio: target bio
1963b5f2954dSDennis Zhou  * @page: the page to lookup the blkcg from
1964b5f2954dSDennis Zhou  *
1965b5f2954dSDennis Zhou  * Associate @bio with the blkcg from @page's owning memcg.  This works like
1966b5f2954dSDennis Zhou  * every other associate function wrt references.
1967b5f2954dSDennis Zhou  */
1968b5f2954dSDennis Zhou int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
1969b5f2954dSDennis Zhou {
1970b5f2954dSDennis Zhou 	struct cgroup_subsys_state *blkcg_css;
1971b5f2954dSDennis Zhou 
1972b5f2954dSDennis Zhou 	if (unlikely(bio->bi_css))
1973b5f2954dSDennis Zhou 		return -EBUSY;
1974b5f2954dSDennis Zhou 	if (!page->mem_cgroup)
1975b5f2954dSDennis Zhou 		return 0;
1976b5f2954dSDennis Zhou 	blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
1977b5f2954dSDennis Zhou 				     &io_cgrp_subsys);
1978b5f2954dSDennis Zhou 	bio->bi_css = blkcg_css;
1979b5f2954dSDennis Zhou 	return 0;
1980b5f2954dSDennis Zhou }
1981b5f2954dSDennis Zhou #endif /* CONFIG_MEMCG */
1982b5f2954dSDennis Zhou 
1983b5f2954dSDennis Zhou /**
1984b5f2954dSDennis Zhou  * bio_associate_blkcg - associate a bio with the specified blkcg
1985b5f2954dSDennis Zhou  * @bio: target bio
1986b5f2954dSDennis Zhou  * @blkcg_css: css of the blkcg to associate
1987b5f2954dSDennis Zhou  *
1988b5f2954dSDennis Zhou  * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
1989b5f2954dSDennis Zhou  * treat @bio as if it were issued by a task which belongs to the blkcg.
1990b5f2954dSDennis Zhou  *
1991b5f2954dSDennis Zhou  * This function takes an extra reference of @blkcg_css which will be put
1992b5f2954dSDennis Zhou  * when @bio is released.  The caller must own @bio and is responsible for
19930fe061b9SDennis Zhou  * synchronizing calls to this function.  If @blkcg_css is %NULL, a call to
19940fe061b9SDennis Zhou  * blkcg_get_css() finds the current css from the kthread or task.
1995b5f2954dSDennis Zhou  */
1996b5f2954dSDennis Zhou int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1997b5f2954dSDennis Zhou {
1998b5f2954dSDennis Zhou 	if (unlikely(bio->bi_css))
1999b5f2954dSDennis Zhou 		return -EBUSY;
20000fe061b9SDennis Zhou 
20010fe061b9SDennis Zhou 	if (blkcg_css)
2002b5f2954dSDennis Zhou 		css_get(blkcg_css);
20030fe061b9SDennis Zhou 	else
20040fe061b9SDennis Zhou 		blkcg_css = blkcg_get_css();
20050fe061b9SDennis Zhou 
2006b5f2954dSDennis Zhou 	bio->bi_css = blkcg_css;
2007b5f2954dSDennis Zhou 	return 0;
2008b5f2954dSDennis Zhou }
2009b5f2954dSDennis Zhou EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2010b5f2954dSDennis Zhou 
2011b5f2954dSDennis Zhou /**
20122268c0feSDennis Zhou  * bio_disassociate_blkg - puts back the blkg reference if associated
20132268c0feSDennis Zhou  * @bio: target bio
20142268c0feSDennis Zhou  *
20152268c0feSDennis Zhou  * Helper to disassociate the blkg from @bio if a blkg is associated.
20162268c0feSDennis Zhou  */
20172268c0feSDennis Zhou void bio_disassociate_blkg(struct bio *bio)
20182268c0feSDennis Zhou {
20192268c0feSDennis Zhou 	if (bio->bi_blkg) {
20202268c0feSDennis Zhou 		blkg_put(bio->bi_blkg);
20212268c0feSDennis Zhou 		bio->bi_blkg = NULL;
20222268c0feSDennis Zhou 	}
20232268c0feSDennis Zhou }
20242268c0feSDennis Zhou 
20252268c0feSDennis Zhou /**
20262268c0feSDennis Zhou  * __bio_associate_blkg - associate a bio with the a blkg
202708e18eabSJosef Bacik  * @bio: target bio
202808e18eabSJosef Bacik  * @blkg: the blkg to associate
202908e18eabSJosef Bacik  *
2030beea9da0SDennis Zhou  * This tries to associate @bio with the specified @blkg.  Association failure
2031beea9da0SDennis Zhou  * is handled by walking up the blkg tree.  Therefore, the blkg associated can
2032beea9da0SDennis Zhou  * be anything between @blkg and the root_blkg.  This situation only happens
2033beea9da0SDennis Zhou  * when a cgroup is dying and then the remaining bios will spill to the closest
2034beea9da0SDennis Zhou  * alive blkg.
2035beea9da0SDennis Zhou  *
2036beea9da0SDennis Zhou  * A reference will be taken on the @blkg and will be released when @bio is
2037beea9da0SDennis Zhou  * freed.
203808e18eabSJosef Bacik  */
20392268c0feSDennis Zhou static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
204008e18eabSJosef Bacik {
20412268c0feSDennis Zhou 	bio_disassociate_blkg(bio);
20422268c0feSDennis Zhou 
2043beea9da0SDennis Zhou 	bio->bi_blkg = blkg_try_get_closest(blkg);
20442268c0feSDennis Zhou }
20452268c0feSDennis Zhou 
20462268c0feSDennis Zhou /**
20472268c0feSDennis Zhou  * bio_associate_blkg - associate a bio with a blkg
20482268c0feSDennis Zhou  * @bio: target bio
20492268c0feSDennis Zhou  *
20502268c0feSDennis Zhou  * Associate @bio with the blkg found from the bio's css and request_queue.
20512268c0feSDennis Zhou  * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
20522268c0feSDennis Zhou  * already associated, the css is reused and association redone as the
20532268c0feSDennis Zhou  * request_queue may have changed.
20542268c0feSDennis Zhou  */
20552268c0feSDennis Zhou void bio_associate_blkg(struct bio *bio)
20562268c0feSDennis Zhou {
20572268c0feSDennis Zhou 	struct request_queue *q = bio->bi_disk->queue;
20582268c0feSDennis Zhou 	struct blkcg *blkcg;
20592268c0feSDennis Zhou 	struct blkcg_gq *blkg;
20602268c0feSDennis Zhou 
20612268c0feSDennis Zhou 	rcu_read_lock();
20622268c0feSDennis Zhou 
20632268c0feSDennis Zhou 	bio_associate_blkcg(bio, NULL);
20642268c0feSDennis Zhou 	blkcg = bio_blkcg(bio);
20652268c0feSDennis Zhou 
20662268c0feSDennis Zhou 	if (!blkcg->css.parent) {
20672268c0feSDennis Zhou 		__bio_associate_blkg(bio, q->root_blkg);
20682268c0feSDennis Zhou 	} else {
20692268c0feSDennis Zhou 		blkg = blkg_lookup_create(blkcg, q);
20702268c0feSDennis Zhou 
20712268c0feSDennis Zhou 		__bio_associate_blkg(bio, blkg);
20722268c0feSDennis Zhou 	}
20732268c0feSDennis Zhou 
20742268c0feSDennis Zhou 	rcu_read_unlock();
207508e18eabSJosef Bacik }
207608e18eabSJosef Bacik 
2077f0fcb3ecSDennis Zhou (Facebook) /**
2078f9c78b2bSJens Axboe  * bio_disassociate_task - undo bio_associate_current()
2079f9c78b2bSJens Axboe  * @bio: target bio
2080f9c78b2bSJens Axboe  */
2081f9c78b2bSJens Axboe void bio_disassociate_task(struct bio *bio)
2082f9c78b2bSJens Axboe {
2083b5f2954dSDennis Zhou 	if (bio->bi_css) {
2084b5f2954dSDennis Zhou 		css_put(bio->bi_css);
2085b5f2954dSDennis Zhou 		bio->bi_css = NULL;
2086b5f2954dSDennis Zhou 	}
20872268c0feSDennis Zhou 	bio_disassociate_blkg(bio);
2088f9c78b2bSJens Axboe }
2089f9c78b2bSJens Axboe 
209020bd723eSPaolo Valente /**
2091b5f2954dSDennis Zhou  * bio_clone_blkcg_association - clone blkcg association from src to dst bio
209220bd723eSPaolo Valente  * @dst: destination bio
209320bd723eSPaolo Valente  * @src: source bio
209420bd723eSPaolo Valente  */
2095b5f2954dSDennis Zhou void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
209620bd723eSPaolo Valente {
2097b5f2954dSDennis Zhou 	if (src->bi_css)
2098b5f2954dSDennis Zhou 		WARN_ON(bio_associate_blkcg(dst, src->bi_css));
20992268c0feSDennis Zhou 
21002268c0feSDennis Zhou 	if (src->bi_blkg)
21012268c0feSDennis Zhou 		__bio_associate_blkg(dst, src->bi_blkg);
210220bd723eSPaolo Valente }
2103b5f2954dSDennis Zhou EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
2104f9c78b2bSJens Axboe #endif /* CONFIG_BLK_CGROUP */
2105f9c78b2bSJens Axboe 
2106f9c78b2bSJens Axboe static void __init biovec_init_slabs(void)
2107f9c78b2bSJens Axboe {
2108f9c78b2bSJens Axboe 	int i;
2109f9c78b2bSJens Axboe 
2110ed996a52SChristoph Hellwig 	for (i = 0; i < BVEC_POOL_NR; i++) {
2111f9c78b2bSJens Axboe 		int size;
2112f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + i;
2113f9c78b2bSJens Axboe 
2114f9c78b2bSJens Axboe 		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2115f9c78b2bSJens Axboe 			bvs->slab = NULL;
2116f9c78b2bSJens Axboe 			continue;
2117f9c78b2bSJens Axboe 		}
2118f9c78b2bSJens Axboe 
2119f9c78b2bSJens Axboe 		size = bvs->nr_vecs * sizeof(struct bio_vec);
2120f9c78b2bSJens Axboe 		bvs->slab = kmem_cache_create(bvs->name, size, 0,
2121f9c78b2bSJens Axboe                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2122f9c78b2bSJens Axboe 	}
2123f9c78b2bSJens Axboe }
2124f9c78b2bSJens Axboe 
2125f9c78b2bSJens Axboe static int __init init_bio(void)
2126f9c78b2bSJens Axboe {
2127f9c78b2bSJens Axboe 	bio_slab_max = 2;
2128f9c78b2bSJens Axboe 	bio_slab_nr = 0;
21296396bb22SKees Cook 	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
21306396bb22SKees Cook 			    GFP_KERNEL);
2131f9c78b2bSJens Axboe 	if (!bio_slabs)
2132f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
2133f9c78b2bSJens Axboe 
2134f9c78b2bSJens Axboe 	bio_integrity_init();
2135f9c78b2bSJens Axboe 	biovec_init_slabs();
2136f9c78b2bSJens Axboe 
2137f4f8154aSKent Overstreet 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2138f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
2139f9c78b2bSJens Axboe 
2140f4f8154aSKent Overstreet 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2141f9c78b2bSJens Axboe 		panic("bio: can't create integrity pool\n");
2142f9c78b2bSJens Axboe 
2143f9c78b2bSJens Axboe 	return 0;
2144f9c78b2bSJens Axboe }
2145f9c78b2bSJens Axboe subsys_initcall(init_bio);
2146