xref: /openbmc/linux/block/bio.c (revision fd42df30)
1f9c78b2bSJens Axboe /*
2f9c78b2bSJens Axboe  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3f9c78b2bSJens Axboe  *
4f9c78b2bSJens Axboe  * This program is free software; you can redistribute it and/or modify
5f9c78b2bSJens Axboe  * it under the terms of the GNU General Public License version 2 as
6f9c78b2bSJens Axboe  * published by the Free Software Foundation.
7f9c78b2bSJens Axboe  *
8f9c78b2bSJens Axboe  * This program is distributed in the hope that it will be useful,
9f9c78b2bSJens Axboe  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10f9c78b2bSJens Axboe  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11f9c78b2bSJens Axboe  * GNU General Public License for more details.
12f9c78b2bSJens Axboe  *
13f9c78b2bSJens Axboe  * You should have received a copy of the GNU General Public Licens
14f9c78b2bSJens Axboe  * along with this program; if not, write to the Free Software
15f9c78b2bSJens Axboe  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
16f9c78b2bSJens Axboe  *
17f9c78b2bSJens Axboe  */
18f9c78b2bSJens Axboe #include <linux/mm.h>
19f9c78b2bSJens Axboe #include <linux/swap.h>
20f9c78b2bSJens Axboe #include <linux/bio.h>
21f9c78b2bSJens Axboe #include <linux/blkdev.h>
22f9c78b2bSJens Axboe #include <linux/uio.h>
23f9c78b2bSJens Axboe #include <linux/iocontext.h>
24f9c78b2bSJens Axboe #include <linux/slab.h>
25f9c78b2bSJens Axboe #include <linux/init.h>
26f9c78b2bSJens Axboe #include <linux/kernel.h>
27f9c78b2bSJens Axboe #include <linux/export.h>
28f9c78b2bSJens Axboe #include <linux/mempool.h>
29f9c78b2bSJens Axboe #include <linux/workqueue.h>
30f9c78b2bSJens Axboe #include <linux/cgroup.h>
3108e18eabSJosef Bacik #include <linux/blk-cgroup.h>
32f9c78b2bSJens Axboe 
33f9c78b2bSJens Axboe #include <trace/events/block.h>
349e234eeaSShaohua Li #include "blk.h"
3567b42d0bSJosef Bacik #include "blk-rq-qos.h"
36f9c78b2bSJens Axboe 
37f9c78b2bSJens Axboe /*
38f9c78b2bSJens Axboe  * Test patch to inline a certain number of bi_io_vec's inside the bio
39f9c78b2bSJens Axboe  * itself, to shrink a bio data allocation from two mempool calls to one
40f9c78b2bSJens Axboe  */
41f9c78b2bSJens Axboe #define BIO_INLINE_VECS		4
42f9c78b2bSJens Axboe 
43f9c78b2bSJens Axboe /*
44f9c78b2bSJens Axboe  * if you change this list, also change bvec_alloc or things will
45f9c78b2bSJens Axboe  * break badly! cannot be bigger than what you can fit into an
46f9c78b2bSJens Axboe  * unsigned short
47f9c78b2bSJens Axboe  */
48bd5c4facSMikulas Patocka #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
49ed996a52SChristoph Hellwig static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
50bd5c4facSMikulas Patocka 	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
51f9c78b2bSJens Axboe };
52f9c78b2bSJens Axboe #undef BV
53f9c78b2bSJens Axboe 
54f9c78b2bSJens Axboe /*
55f9c78b2bSJens Axboe  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
56f9c78b2bSJens Axboe  * IO code that does not need private memory pools.
57f9c78b2bSJens Axboe  */
58f4f8154aSKent Overstreet struct bio_set fs_bio_set;
59f9c78b2bSJens Axboe EXPORT_SYMBOL(fs_bio_set);
60f9c78b2bSJens Axboe 
61f9c78b2bSJens Axboe /*
62f9c78b2bSJens Axboe  * Our slab pool management
63f9c78b2bSJens Axboe  */
64f9c78b2bSJens Axboe struct bio_slab {
65f9c78b2bSJens Axboe 	struct kmem_cache *slab;
66f9c78b2bSJens Axboe 	unsigned int slab_ref;
67f9c78b2bSJens Axboe 	unsigned int slab_size;
68f9c78b2bSJens Axboe 	char name[8];
69f9c78b2bSJens Axboe };
70f9c78b2bSJens Axboe static DEFINE_MUTEX(bio_slab_lock);
71f9c78b2bSJens Axboe static struct bio_slab *bio_slabs;
72f9c78b2bSJens Axboe static unsigned int bio_slab_nr, bio_slab_max;
73f9c78b2bSJens Axboe 
74f9c78b2bSJens Axboe static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
75f9c78b2bSJens Axboe {
76f9c78b2bSJens Axboe 	unsigned int sz = sizeof(struct bio) + extra_size;
77f9c78b2bSJens Axboe 	struct kmem_cache *slab = NULL;
78f9c78b2bSJens Axboe 	struct bio_slab *bslab, *new_bio_slabs;
79f9c78b2bSJens Axboe 	unsigned int new_bio_slab_max;
80f9c78b2bSJens Axboe 	unsigned int i, entry = -1;
81f9c78b2bSJens Axboe 
82f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
83f9c78b2bSJens Axboe 
84f9c78b2bSJens Axboe 	i = 0;
85f9c78b2bSJens Axboe 	while (i < bio_slab_nr) {
86f9c78b2bSJens Axboe 		bslab = &bio_slabs[i];
87f9c78b2bSJens Axboe 
88f9c78b2bSJens Axboe 		if (!bslab->slab && entry == -1)
89f9c78b2bSJens Axboe 			entry = i;
90f9c78b2bSJens Axboe 		else if (bslab->slab_size == sz) {
91f9c78b2bSJens Axboe 			slab = bslab->slab;
92f9c78b2bSJens Axboe 			bslab->slab_ref++;
93f9c78b2bSJens Axboe 			break;
94f9c78b2bSJens Axboe 		}
95f9c78b2bSJens Axboe 		i++;
96f9c78b2bSJens Axboe 	}
97f9c78b2bSJens Axboe 
98f9c78b2bSJens Axboe 	if (slab)
99f9c78b2bSJens Axboe 		goto out_unlock;
100f9c78b2bSJens Axboe 
101f9c78b2bSJens Axboe 	if (bio_slab_nr == bio_slab_max && entry == -1) {
102f9c78b2bSJens Axboe 		new_bio_slab_max = bio_slab_max << 1;
103f9c78b2bSJens Axboe 		new_bio_slabs = krealloc(bio_slabs,
104f9c78b2bSJens Axboe 					 new_bio_slab_max * sizeof(struct bio_slab),
105f9c78b2bSJens Axboe 					 GFP_KERNEL);
106f9c78b2bSJens Axboe 		if (!new_bio_slabs)
107f9c78b2bSJens Axboe 			goto out_unlock;
108f9c78b2bSJens Axboe 		bio_slab_max = new_bio_slab_max;
109f9c78b2bSJens Axboe 		bio_slabs = new_bio_slabs;
110f9c78b2bSJens Axboe 	}
111f9c78b2bSJens Axboe 	if (entry == -1)
112f9c78b2bSJens Axboe 		entry = bio_slab_nr++;
113f9c78b2bSJens Axboe 
114f9c78b2bSJens Axboe 	bslab = &bio_slabs[entry];
115f9c78b2bSJens Axboe 
116f9c78b2bSJens Axboe 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
1176a241483SMikulas Patocka 	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
1186a241483SMikulas Patocka 				 SLAB_HWCACHE_ALIGN, NULL);
119f9c78b2bSJens Axboe 	if (!slab)
120f9c78b2bSJens Axboe 		goto out_unlock;
121f9c78b2bSJens Axboe 
122f9c78b2bSJens Axboe 	bslab->slab = slab;
123f9c78b2bSJens Axboe 	bslab->slab_ref = 1;
124f9c78b2bSJens Axboe 	bslab->slab_size = sz;
125f9c78b2bSJens Axboe out_unlock:
126f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
127f9c78b2bSJens Axboe 	return slab;
128f9c78b2bSJens Axboe }
129f9c78b2bSJens Axboe 
130f9c78b2bSJens Axboe static void bio_put_slab(struct bio_set *bs)
131f9c78b2bSJens Axboe {
132f9c78b2bSJens Axboe 	struct bio_slab *bslab = NULL;
133f9c78b2bSJens Axboe 	unsigned int i;
134f9c78b2bSJens Axboe 
135f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
136f9c78b2bSJens Axboe 
137f9c78b2bSJens Axboe 	for (i = 0; i < bio_slab_nr; i++) {
138f9c78b2bSJens Axboe 		if (bs->bio_slab == bio_slabs[i].slab) {
139f9c78b2bSJens Axboe 			bslab = &bio_slabs[i];
140f9c78b2bSJens Axboe 			break;
141f9c78b2bSJens Axboe 		}
142f9c78b2bSJens Axboe 	}
143f9c78b2bSJens Axboe 
144f9c78b2bSJens Axboe 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
145f9c78b2bSJens Axboe 		goto out;
146f9c78b2bSJens Axboe 
147f9c78b2bSJens Axboe 	WARN_ON(!bslab->slab_ref);
148f9c78b2bSJens Axboe 
149f9c78b2bSJens Axboe 	if (--bslab->slab_ref)
150f9c78b2bSJens Axboe 		goto out;
151f9c78b2bSJens Axboe 
152f9c78b2bSJens Axboe 	kmem_cache_destroy(bslab->slab);
153f9c78b2bSJens Axboe 	bslab->slab = NULL;
154f9c78b2bSJens Axboe 
155f9c78b2bSJens Axboe out:
156f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
157f9c78b2bSJens Axboe }
158f9c78b2bSJens Axboe 
159f9c78b2bSJens Axboe unsigned int bvec_nr_vecs(unsigned short idx)
160f9c78b2bSJens Axboe {
161d6c02a9bSGreg Edwards 	return bvec_slabs[--idx].nr_vecs;
162f9c78b2bSJens Axboe }
163f9c78b2bSJens Axboe 
164f9c78b2bSJens Axboe void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
165f9c78b2bSJens Axboe {
166ed996a52SChristoph Hellwig 	if (!idx)
167ed996a52SChristoph Hellwig 		return;
168ed996a52SChristoph Hellwig 	idx--;
169f9c78b2bSJens Axboe 
170ed996a52SChristoph Hellwig 	BIO_BUG_ON(idx >= BVEC_POOL_NR);
171ed996a52SChristoph Hellwig 
172ed996a52SChristoph Hellwig 	if (idx == BVEC_POOL_MAX) {
173f9c78b2bSJens Axboe 		mempool_free(bv, pool);
174ed996a52SChristoph Hellwig 	} else {
175f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + idx;
176f9c78b2bSJens Axboe 
177f9c78b2bSJens Axboe 		kmem_cache_free(bvs->slab, bv);
178f9c78b2bSJens Axboe 	}
179f9c78b2bSJens Axboe }
180f9c78b2bSJens Axboe 
181f9c78b2bSJens Axboe struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
182f9c78b2bSJens Axboe 			   mempool_t *pool)
183f9c78b2bSJens Axboe {
184f9c78b2bSJens Axboe 	struct bio_vec *bvl;
185f9c78b2bSJens Axboe 
186f9c78b2bSJens Axboe 	/*
187f9c78b2bSJens Axboe 	 * see comment near bvec_array define!
188f9c78b2bSJens Axboe 	 */
189f9c78b2bSJens Axboe 	switch (nr) {
190f9c78b2bSJens Axboe 	case 1:
191f9c78b2bSJens Axboe 		*idx = 0;
192f9c78b2bSJens Axboe 		break;
193f9c78b2bSJens Axboe 	case 2 ... 4:
194f9c78b2bSJens Axboe 		*idx = 1;
195f9c78b2bSJens Axboe 		break;
196f9c78b2bSJens Axboe 	case 5 ... 16:
197f9c78b2bSJens Axboe 		*idx = 2;
198f9c78b2bSJens Axboe 		break;
199f9c78b2bSJens Axboe 	case 17 ... 64:
200f9c78b2bSJens Axboe 		*idx = 3;
201f9c78b2bSJens Axboe 		break;
202f9c78b2bSJens Axboe 	case 65 ... 128:
203f9c78b2bSJens Axboe 		*idx = 4;
204f9c78b2bSJens Axboe 		break;
205f9c78b2bSJens Axboe 	case 129 ... BIO_MAX_PAGES:
206f9c78b2bSJens Axboe 		*idx = 5;
207f9c78b2bSJens Axboe 		break;
208f9c78b2bSJens Axboe 	default:
209f9c78b2bSJens Axboe 		return NULL;
210f9c78b2bSJens Axboe 	}
211f9c78b2bSJens Axboe 
212f9c78b2bSJens Axboe 	/*
213f9c78b2bSJens Axboe 	 * idx now points to the pool we want to allocate from. only the
214f9c78b2bSJens Axboe 	 * 1-vec entry pool is mempool backed.
215f9c78b2bSJens Axboe 	 */
216ed996a52SChristoph Hellwig 	if (*idx == BVEC_POOL_MAX) {
217f9c78b2bSJens Axboe fallback:
218f9c78b2bSJens Axboe 		bvl = mempool_alloc(pool, gfp_mask);
219f9c78b2bSJens Axboe 	} else {
220f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + *idx;
221d0164adcSMel Gorman 		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
222f9c78b2bSJens Axboe 
223f9c78b2bSJens Axboe 		/*
224f9c78b2bSJens Axboe 		 * Make this allocation restricted and don't dump info on
225f9c78b2bSJens Axboe 		 * allocation failures, since we'll fallback to the mempool
226f9c78b2bSJens Axboe 		 * in case of failure.
227f9c78b2bSJens Axboe 		 */
228f9c78b2bSJens Axboe 		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
229f9c78b2bSJens Axboe 
230f9c78b2bSJens Axboe 		/*
231d0164adcSMel Gorman 		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
232f9c78b2bSJens Axboe 		 * is set, retry with the 1-entry mempool
233f9c78b2bSJens Axboe 		 */
234f9c78b2bSJens Axboe 		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
235d0164adcSMel Gorman 		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
236ed996a52SChristoph Hellwig 			*idx = BVEC_POOL_MAX;
237f9c78b2bSJens Axboe 			goto fallback;
238f9c78b2bSJens Axboe 		}
239f9c78b2bSJens Axboe 	}
240f9c78b2bSJens Axboe 
241ed996a52SChristoph Hellwig 	(*idx)++;
242f9c78b2bSJens Axboe 	return bvl;
243f9c78b2bSJens Axboe }
244f9c78b2bSJens Axboe 
2459ae3b3f5SJens Axboe void bio_uninit(struct bio *bio)
246f9c78b2bSJens Axboe {
247f9c78b2bSJens Axboe 	bio_disassociate_task(bio);
248f9c78b2bSJens Axboe }
2499ae3b3f5SJens Axboe EXPORT_SYMBOL(bio_uninit);
250f9c78b2bSJens Axboe 
251f9c78b2bSJens Axboe static void bio_free(struct bio *bio)
252f9c78b2bSJens Axboe {
253f9c78b2bSJens Axboe 	struct bio_set *bs = bio->bi_pool;
254f9c78b2bSJens Axboe 	void *p;
255f9c78b2bSJens Axboe 
2569ae3b3f5SJens Axboe 	bio_uninit(bio);
257f9c78b2bSJens Axboe 
258f9c78b2bSJens Axboe 	if (bs) {
2598aa6ba2fSKent Overstreet 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
260f9c78b2bSJens Axboe 
261f9c78b2bSJens Axboe 		/*
262f9c78b2bSJens Axboe 		 * If we have front padding, adjust the bio pointer before freeing
263f9c78b2bSJens Axboe 		 */
264f9c78b2bSJens Axboe 		p = bio;
265f9c78b2bSJens Axboe 		p -= bs->front_pad;
266f9c78b2bSJens Axboe 
2678aa6ba2fSKent Overstreet 		mempool_free(p, &bs->bio_pool);
268f9c78b2bSJens Axboe 	} else {
269f9c78b2bSJens Axboe 		/* Bio was allocated by bio_kmalloc() */
270f9c78b2bSJens Axboe 		kfree(bio);
271f9c78b2bSJens Axboe 	}
272f9c78b2bSJens Axboe }
273f9c78b2bSJens Axboe 
2749ae3b3f5SJens Axboe /*
2759ae3b3f5SJens Axboe  * Users of this function have their own bio allocation. Subsequently,
2769ae3b3f5SJens Axboe  * they must remember to pair any call to bio_init() with bio_uninit()
2779ae3b3f5SJens Axboe  * when IO has completed, or when the bio is released.
2789ae3b3f5SJens Axboe  */
2793a83f467SMing Lei void bio_init(struct bio *bio, struct bio_vec *table,
2803a83f467SMing Lei 	      unsigned short max_vecs)
281f9c78b2bSJens Axboe {
282f9c78b2bSJens Axboe 	memset(bio, 0, sizeof(*bio));
283c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
284dac56212SJens Axboe 	atomic_set(&bio->__bi_cnt, 1);
2853a83f467SMing Lei 
2863a83f467SMing Lei 	bio->bi_io_vec = table;
2873a83f467SMing Lei 	bio->bi_max_vecs = max_vecs;
288f9c78b2bSJens Axboe }
289f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_init);
290f9c78b2bSJens Axboe 
291f9c78b2bSJens Axboe /**
292f9c78b2bSJens Axboe  * bio_reset - reinitialize a bio
293f9c78b2bSJens Axboe  * @bio:	bio to reset
294f9c78b2bSJens Axboe  *
295f9c78b2bSJens Axboe  * Description:
296f9c78b2bSJens Axboe  *   After calling bio_reset(), @bio will be in the same state as a freshly
297f9c78b2bSJens Axboe  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
298f9c78b2bSJens Axboe  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
299f9c78b2bSJens Axboe  *   comment in struct bio.
300f9c78b2bSJens Axboe  */
301f9c78b2bSJens Axboe void bio_reset(struct bio *bio)
302f9c78b2bSJens Axboe {
303f9c78b2bSJens Axboe 	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
304f9c78b2bSJens Axboe 
3059ae3b3f5SJens Axboe 	bio_uninit(bio);
306f9c78b2bSJens Axboe 
307f9c78b2bSJens Axboe 	memset(bio, 0, BIO_RESET_BYTES);
3084246a0b6SChristoph Hellwig 	bio->bi_flags = flags;
309c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
310f9c78b2bSJens Axboe }
311f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_reset);
312f9c78b2bSJens Axboe 
31338f8baaeSChristoph Hellwig static struct bio *__bio_chain_endio(struct bio *bio)
314f9c78b2bSJens Axboe {
3154246a0b6SChristoph Hellwig 	struct bio *parent = bio->bi_private;
3164246a0b6SChristoph Hellwig 
3174e4cbee9SChristoph Hellwig 	if (!parent->bi_status)
3184e4cbee9SChristoph Hellwig 		parent->bi_status = bio->bi_status;
319f9c78b2bSJens Axboe 	bio_put(bio);
32038f8baaeSChristoph Hellwig 	return parent;
32138f8baaeSChristoph Hellwig }
32238f8baaeSChristoph Hellwig 
32338f8baaeSChristoph Hellwig static void bio_chain_endio(struct bio *bio)
32438f8baaeSChristoph Hellwig {
32538f8baaeSChristoph Hellwig 	bio_endio(__bio_chain_endio(bio));
326f9c78b2bSJens Axboe }
327f9c78b2bSJens Axboe 
328f9c78b2bSJens Axboe /**
329f9c78b2bSJens Axboe  * bio_chain - chain bio completions
330f9c78b2bSJens Axboe  * @bio: the target bio
331f9c78b2bSJens Axboe  * @parent: the @bio's parent bio
332f9c78b2bSJens Axboe  *
333f9c78b2bSJens Axboe  * The caller won't have a bi_end_io called when @bio completes - instead,
334f9c78b2bSJens Axboe  * @parent's bi_end_io won't be called until both @parent and @bio have
335f9c78b2bSJens Axboe  * completed; the chained bio will also be freed when it completes.
336f9c78b2bSJens Axboe  *
337f9c78b2bSJens Axboe  * The caller must not set bi_private or bi_end_io in @bio.
338f9c78b2bSJens Axboe  */
339f9c78b2bSJens Axboe void bio_chain(struct bio *bio, struct bio *parent)
340f9c78b2bSJens Axboe {
341f9c78b2bSJens Axboe 	BUG_ON(bio->bi_private || bio->bi_end_io);
342f9c78b2bSJens Axboe 
343f9c78b2bSJens Axboe 	bio->bi_private = parent;
344f9c78b2bSJens Axboe 	bio->bi_end_io	= bio_chain_endio;
345c4cf5261SJens Axboe 	bio_inc_remaining(parent);
346f9c78b2bSJens Axboe }
347f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_chain);
348f9c78b2bSJens Axboe 
349f9c78b2bSJens Axboe static void bio_alloc_rescue(struct work_struct *work)
350f9c78b2bSJens Axboe {
351f9c78b2bSJens Axboe 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
352f9c78b2bSJens Axboe 	struct bio *bio;
353f9c78b2bSJens Axboe 
354f9c78b2bSJens Axboe 	while (1) {
355f9c78b2bSJens Axboe 		spin_lock(&bs->rescue_lock);
356f9c78b2bSJens Axboe 		bio = bio_list_pop(&bs->rescue_list);
357f9c78b2bSJens Axboe 		spin_unlock(&bs->rescue_lock);
358f9c78b2bSJens Axboe 
359f9c78b2bSJens Axboe 		if (!bio)
360f9c78b2bSJens Axboe 			break;
361f9c78b2bSJens Axboe 
362f9c78b2bSJens Axboe 		generic_make_request(bio);
363f9c78b2bSJens Axboe 	}
364f9c78b2bSJens Axboe }
365f9c78b2bSJens Axboe 
366f9c78b2bSJens Axboe static void punt_bios_to_rescuer(struct bio_set *bs)
367f9c78b2bSJens Axboe {
368f9c78b2bSJens Axboe 	struct bio_list punt, nopunt;
369f9c78b2bSJens Axboe 	struct bio *bio;
370f9c78b2bSJens Axboe 
37147e0fb46SNeilBrown 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
37247e0fb46SNeilBrown 		return;
373f9c78b2bSJens Axboe 	/*
374f9c78b2bSJens Axboe 	 * In order to guarantee forward progress we must punt only bios that
375f9c78b2bSJens Axboe 	 * were allocated from this bio_set; otherwise, if there was a bio on
376f9c78b2bSJens Axboe 	 * there for a stacking driver higher up in the stack, processing it
377f9c78b2bSJens Axboe 	 * could require allocating bios from this bio_set, and doing that from
378f9c78b2bSJens Axboe 	 * our own rescuer would be bad.
379f9c78b2bSJens Axboe 	 *
380f9c78b2bSJens Axboe 	 * Since bio lists are singly linked, pop them all instead of trying to
381f9c78b2bSJens Axboe 	 * remove from the middle of the list:
382f9c78b2bSJens Axboe 	 */
383f9c78b2bSJens Axboe 
384f9c78b2bSJens Axboe 	bio_list_init(&punt);
385f9c78b2bSJens Axboe 	bio_list_init(&nopunt);
386f9c78b2bSJens Axboe 
387f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[0])))
388f9c78b2bSJens Axboe 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
389f5fe1b51SNeilBrown 	current->bio_list[0] = nopunt;
390f9c78b2bSJens Axboe 
391f5fe1b51SNeilBrown 	bio_list_init(&nopunt);
392f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[1])))
393f5fe1b51SNeilBrown 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
394f5fe1b51SNeilBrown 	current->bio_list[1] = nopunt;
395f9c78b2bSJens Axboe 
396f9c78b2bSJens Axboe 	spin_lock(&bs->rescue_lock);
397f9c78b2bSJens Axboe 	bio_list_merge(&bs->rescue_list, &punt);
398f9c78b2bSJens Axboe 	spin_unlock(&bs->rescue_lock);
399f9c78b2bSJens Axboe 
400f9c78b2bSJens Axboe 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
401f9c78b2bSJens Axboe }
402f9c78b2bSJens Axboe 
403f9c78b2bSJens Axboe /**
404f9c78b2bSJens Axboe  * bio_alloc_bioset - allocate a bio for I/O
405519c8e9fSRandy Dunlap  * @gfp_mask:   the GFP_* mask given to the slab allocator
406f9c78b2bSJens Axboe  * @nr_iovecs:	number of iovecs to pre-allocate
407f9c78b2bSJens Axboe  * @bs:		the bio_set to allocate from.
408f9c78b2bSJens Axboe  *
409f9c78b2bSJens Axboe  * Description:
410f9c78b2bSJens Axboe  *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
411f9c78b2bSJens Axboe  *   backed by the @bs's mempool.
412f9c78b2bSJens Axboe  *
413d0164adcSMel Gorman  *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
414d0164adcSMel Gorman  *   always be able to allocate a bio. This is due to the mempool guarantees.
415d0164adcSMel Gorman  *   To make this work, callers must never allocate more than 1 bio at a time
416d0164adcSMel Gorman  *   from this pool. Callers that need to allocate more than 1 bio must always
417d0164adcSMel Gorman  *   submit the previously allocated bio for IO before attempting to allocate
418d0164adcSMel Gorman  *   a new one. Failure to do so can cause deadlocks under memory pressure.
419f9c78b2bSJens Axboe  *
420f9c78b2bSJens Axboe  *   Note that when running under generic_make_request() (i.e. any block
421f9c78b2bSJens Axboe  *   driver), bios are not submitted until after you return - see the code in
422f9c78b2bSJens Axboe  *   generic_make_request() that converts recursion into iteration, to prevent
423f9c78b2bSJens Axboe  *   stack overflows.
424f9c78b2bSJens Axboe  *
425f9c78b2bSJens Axboe  *   This would normally mean allocating multiple bios under
426f9c78b2bSJens Axboe  *   generic_make_request() would be susceptible to deadlocks, but we have
427f9c78b2bSJens Axboe  *   deadlock avoidance code that resubmits any blocked bios from a rescuer
428f9c78b2bSJens Axboe  *   thread.
429f9c78b2bSJens Axboe  *
430f9c78b2bSJens Axboe  *   However, we do not guarantee forward progress for allocations from other
431f9c78b2bSJens Axboe  *   mempools. Doing multiple allocations from the same mempool under
432f9c78b2bSJens Axboe  *   generic_make_request() should be avoided - instead, use bio_set's front_pad
433f9c78b2bSJens Axboe  *   for per bio allocations.
434f9c78b2bSJens Axboe  *
435f9c78b2bSJens Axboe  *   RETURNS:
436f9c78b2bSJens Axboe  *   Pointer to new bio on success, NULL on failure.
437f9c78b2bSJens Axboe  */
4387a88fa19SDan Carpenter struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
4397a88fa19SDan Carpenter 			     struct bio_set *bs)
440f9c78b2bSJens Axboe {
441f9c78b2bSJens Axboe 	gfp_t saved_gfp = gfp_mask;
442f9c78b2bSJens Axboe 	unsigned front_pad;
443f9c78b2bSJens Axboe 	unsigned inline_vecs;
444f9c78b2bSJens Axboe 	struct bio_vec *bvl = NULL;
445f9c78b2bSJens Axboe 	struct bio *bio;
446f9c78b2bSJens Axboe 	void *p;
447f9c78b2bSJens Axboe 
448f9c78b2bSJens Axboe 	if (!bs) {
449f9c78b2bSJens Axboe 		if (nr_iovecs > UIO_MAXIOV)
450f9c78b2bSJens Axboe 			return NULL;
451f9c78b2bSJens Axboe 
452f9c78b2bSJens Axboe 		p = kmalloc(sizeof(struct bio) +
453f9c78b2bSJens Axboe 			    nr_iovecs * sizeof(struct bio_vec),
454f9c78b2bSJens Axboe 			    gfp_mask);
455f9c78b2bSJens Axboe 		front_pad = 0;
456f9c78b2bSJens Axboe 		inline_vecs = nr_iovecs;
457f9c78b2bSJens Axboe 	} else {
458d8f429e1SJunichi Nomura 		/* should not use nobvec bioset for nr_iovecs > 0 */
4598aa6ba2fSKent Overstreet 		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
4608aa6ba2fSKent Overstreet 				 nr_iovecs > 0))
461d8f429e1SJunichi Nomura 			return NULL;
462f9c78b2bSJens Axboe 		/*
463f9c78b2bSJens Axboe 		 * generic_make_request() converts recursion to iteration; this
464f9c78b2bSJens Axboe 		 * means if we're running beneath it, any bios we allocate and
465f9c78b2bSJens Axboe 		 * submit will not be submitted (and thus freed) until after we
466f9c78b2bSJens Axboe 		 * return.
467f9c78b2bSJens Axboe 		 *
468f9c78b2bSJens Axboe 		 * This exposes us to a potential deadlock if we allocate
469f9c78b2bSJens Axboe 		 * multiple bios from the same bio_set() while running
470f9c78b2bSJens Axboe 		 * underneath generic_make_request(). If we were to allocate
471f9c78b2bSJens Axboe 		 * multiple bios (say a stacking block driver that was splitting
472f9c78b2bSJens Axboe 		 * bios), we would deadlock if we exhausted the mempool's
473f9c78b2bSJens Axboe 		 * reserve.
474f9c78b2bSJens Axboe 		 *
475f9c78b2bSJens Axboe 		 * We solve this, and guarantee forward progress, with a rescuer
476f9c78b2bSJens Axboe 		 * workqueue per bio_set. If we go to allocate and there are
477f9c78b2bSJens Axboe 		 * bios on current->bio_list, we first try the allocation
478d0164adcSMel Gorman 		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
479d0164adcSMel Gorman 		 * bios we would be blocking to the rescuer workqueue before
480d0164adcSMel Gorman 		 * we retry with the original gfp_flags.
481f9c78b2bSJens Axboe 		 */
482f9c78b2bSJens Axboe 
483f5fe1b51SNeilBrown 		if (current->bio_list &&
484f5fe1b51SNeilBrown 		    (!bio_list_empty(&current->bio_list[0]) ||
48547e0fb46SNeilBrown 		     !bio_list_empty(&current->bio_list[1])) &&
48647e0fb46SNeilBrown 		    bs->rescue_workqueue)
487d0164adcSMel Gorman 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
488f9c78b2bSJens Axboe 
4898aa6ba2fSKent Overstreet 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
490f9c78b2bSJens Axboe 		if (!p && gfp_mask != saved_gfp) {
491f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
492f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
4938aa6ba2fSKent Overstreet 			p = mempool_alloc(&bs->bio_pool, gfp_mask);
494f9c78b2bSJens Axboe 		}
495f9c78b2bSJens Axboe 
496f9c78b2bSJens Axboe 		front_pad = bs->front_pad;
497f9c78b2bSJens Axboe 		inline_vecs = BIO_INLINE_VECS;
498f9c78b2bSJens Axboe 	}
499f9c78b2bSJens Axboe 
500f9c78b2bSJens Axboe 	if (unlikely(!p))
501f9c78b2bSJens Axboe 		return NULL;
502f9c78b2bSJens Axboe 
503f9c78b2bSJens Axboe 	bio = p + front_pad;
5043a83f467SMing Lei 	bio_init(bio, NULL, 0);
505f9c78b2bSJens Axboe 
506f9c78b2bSJens Axboe 	if (nr_iovecs > inline_vecs) {
507ed996a52SChristoph Hellwig 		unsigned long idx = 0;
508ed996a52SChristoph Hellwig 
5098aa6ba2fSKent Overstreet 		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
510f9c78b2bSJens Axboe 		if (!bvl && gfp_mask != saved_gfp) {
511f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
512f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
5138aa6ba2fSKent Overstreet 			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
514f9c78b2bSJens Axboe 		}
515f9c78b2bSJens Axboe 
516f9c78b2bSJens Axboe 		if (unlikely(!bvl))
517f9c78b2bSJens Axboe 			goto err_free;
518f9c78b2bSJens Axboe 
519ed996a52SChristoph Hellwig 		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
520f9c78b2bSJens Axboe 	} else if (nr_iovecs) {
521f9c78b2bSJens Axboe 		bvl = bio->bi_inline_vecs;
522f9c78b2bSJens Axboe 	}
523f9c78b2bSJens Axboe 
524f9c78b2bSJens Axboe 	bio->bi_pool = bs;
525f9c78b2bSJens Axboe 	bio->bi_max_vecs = nr_iovecs;
526f9c78b2bSJens Axboe 	bio->bi_io_vec = bvl;
527f9c78b2bSJens Axboe 	return bio;
528f9c78b2bSJens Axboe 
529f9c78b2bSJens Axboe err_free:
5308aa6ba2fSKent Overstreet 	mempool_free(p, &bs->bio_pool);
531f9c78b2bSJens Axboe 	return NULL;
532f9c78b2bSJens Axboe }
533f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_alloc_bioset);
534f9c78b2bSJens Axboe 
53538a72dacSKent Overstreet void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
536f9c78b2bSJens Axboe {
537f9c78b2bSJens Axboe 	unsigned long flags;
538f9c78b2bSJens Axboe 	struct bio_vec bv;
539f9c78b2bSJens Axboe 	struct bvec_iter iter;
540f9c78b2bSJens Axboe 
54138a72dacSKent Overstreet 	__bio_for_each_segment(bv, bio, iter, start) {
542f9c78b2bSJens Axboe 		char *data = bvec_kmap_irq(&bv, &flags);
543f9c78b2bSJens Axboe 		memset(data, 0, bv.bv_len);
544f9c78b2bSJens Axboe 		flush_dcache_page(bv.bv_page);
545f9c78b2bSJens Axboe 		bvec_kunmap_irq(data, &flags);
546f9c78b2bSJens Axboe 	}
547f9c78b2bSJens Axboe }
54838a72dacSKent Overstreet EXPORT_SYMBOL(zero_fill_bio_iter);
549f9c78b2bSJens Axboe 
550f9c78b2bSJens Axboe /**
551f9c78b2bSJens Axboe  * bio_put - release a reference to a bio
552f9c78b2bSJens Axboe  * @bio:   bio to release reference to
553f9c78b2bSJens Axboe  *
554f9c78b2bSJens Axboe  * Description:
555f9c78b2bSJens Axboe  *   Put a reference to a &struct bio, either one you have gotten with
5569b10f6a9SNeilBrown  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
557f9c78b2bSJens Axboe  **/
558f9c78b2bSJens Axboe void bio_put(struct bio *bio)
559f9c78b2bSJens Axboe {
560dac56212SJens Axboe 	if (!bio_flagged(bio, BIO_REFFED))
561dac56212SJens Axboe 		bio_free(bio);
562dac56212SJens Axboe 	else {
563dac56212SJens Axboe 		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
564f9c78b2bSJens Axboe 
565f9c78b2bSJens Axboe 		/*
566f9c78b2bSJens Axboe 		 * last put frees it
567f9c78b2bSJens Axboe 		 */
568dac56212SJens Axboe 		if (atomic_dec_and_test(&bio->__bi_cnt))
569f9c78b2bSJens Axboe 			bio_free(bio);
570f9c78b2bSJens Axboe 	}
571dac56212SJens Axboe }
572f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_put);
573f9c78b2bSJens Axboe 
574f9c78b2bSJens Axboe inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
575f9c78b2bSJens Axboe {
576f9c78b2bSJens Axboe 	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
577f9c78b2bSJens Axboe 		blk_recount_segments(q, bio);
578f9c78b2bSJens Axboe 
579f9c78b2bSJens Axboe 	return bio->bi_phys_segments;
580f9c78b2bSJens Axboe }
581f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_phys_segments);
582f9c78b2bSJens Axboe 
583f9c78b2bSJens Axboe /**
584f9c78b2bSJens Axboe  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
585f9c78b2bSJens Axboe  * 	@bio: destination bio
586f9c78b2bSJens Axboe  * 	@bio_src: bio to clone
587f9c78b2bSJens Axboe  *
588f9c78b2bSJens Axboe  *	Clone a &bio. Caller will own the returned bio, but not
589f9c78b2bSJens Axboe  *	the actual data it points to. Reference count of returned
590f9c78b2bSJens Axboe  * 	bio will be one.
591f9c78b2bSJens Axboe  *
592f9c78b2bSJens Axboe  * 	Caller must ensure that @bio_src is not freed before @bio.
593f9c78b2bSJens Axboe  */
594f9c78b2bSJens Axboe void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
595f9c78b2bSJens Axboe {
596ed996a52SChristoph Hellwig 	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
597f9c78b2bSJens Axboe 
598f9c78b2bSJens Axboe 	/*
59974d46992SChristoph Hellwig 	 * most users will be overriding ->bi_disk with a new target,
600f9c78b2bSJens Axboe 	 * so we don't set nor calculate new physical/hw segment counts here
601f9c78b2bSJens Axboe 	 */
60274d46992SChristoph Hellwig 	bio->bi_disk = bio_src->bi_disk;
60362530ed8SMichael Lyle 	bio->bi_partno = bio_src->bi_partno;
604b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_CLONED);
605111be883SShaohua Li 	if (bio_flagged(bio_src, BIO_THROTTLED))
606111be883SShaohua Li 		bio_set_flag(bio, BIO_THROTTLED);
6071eff9d32SJens Axboe 	bio->bi_opf = bio_src->bi_opf;
608ca474b73SHannes Reinecke 	bio->bi_ioprio = bio_src->bi_ioprio;
609cb6934f8SJens Axboe 	bio->bi_write_hint = bio_src->bi_write_hint;
610f9c78b2bSJens Axboe 	bio->bi_iter = bio_src->bi_iter;
611f9c78b2bSJens Axboe 	bio->bi_io_vec = bio_src->bi_io_vec;
61220bd723eSPaolo Valente 
613b5f2954dSDennis Zhou 	bio_clone_blkcg_association(bio, bio_src);
614e439bedfSDennis Zhou 	blkcg_bio_issue_init(bio);
615f9c78b2bSJens Axboe }
616f9c78b2bSJens Axboe EXPORT_SYMBOL(__bio_clone_fast);
617f9c78b2bSJens Axboe 
618f9c78b2bSJens Axboe /**
619f9c78b2bSJens Axboe  *	bio_clone_fast - clone a bio that shares the original bio's biovec
620f9c78b2bSJens Axboe  *	@bio: bio to clone
621f9c78b2bSJens Axboe  *	@gfp_mask: allocation priority
622f9c78b2bSJens Axboe  *	@bs: bio_set to allocate from
623f9c78b2bSJens Axboe  *
624f9c78b2bSJens Axboe  * 	Like __bio_clone_fast, only also allocates the returned bio
625f9c78b2bSJens Axboe  */
626f9c78b2bSJens Axboe struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
627f9c78b2bSJens Axboe {
628f9c78b2bSJens Axboe 	struct bio *b;
629f9c78b2bSJens Axboe 
630f9c78b2bSJens Axboe 	b = bio_alloc_bioset(gfp_mask, 0, bs);
631f9c78b2bSJens Axboe 	if (!b)
632f9c78b2bSJens Axboe 		return NULL;
633f9c78b2bSJens Axboe 
634f9c78b2bSJens Axboe 	__bio_clone_fast(b, bio);
635f9c78b2bSJens Axboe 
636f9c78b2bSJens Axboe 	if (bio_integrity(bio)) {
637f9c78b2bSJens Axboe 		int ret;
638f9c78b2bSJens Axboe 
639f9c78b2bSJens Axboe 		ret = bio_integrity_clone(b, bio, gfp_mask);
640f9c78b2bSJens Axboe 
641f9c78b2bSJens Axboe 		if (ret < 0) {
642f9c78b2bSJens Axboe 			bio_put(b);
643f9c78b2bSJens Axboe 			return NULL;
644f9c78b2bSJens Axboe 		}
645f9c78b2bSJens Axboe 	}
646f9c78b2bSJens Axboe 
647f9c78b2bSJens Axboe 	return b;
648f9c78b2bSJens Axboe }
649f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_clone_fast);
650f9c78b2bSJens Axboe 
651f4595875SShaohua Li /**
652c66a14d0SKent Overstreet  *	bio_add_pc_page	-	attempt to add page to bio
653c66a14d0SKent Overstreet  *	@q: the target queue
654c66a14d0SKent Overstreet  *	@bio: destination bio
655c66a14d0SKent Overstreet  *	@page: page to add
656c66a14d0SKent Overstreet  *	@len: vec entry length
657c66a14d0SKent Overstreet  *	@offset: vec entry offset
658f9c78b2bSJens Axboe  *
659c66a14d0SKent Overstreet  *	Attempt to add a page to the bio_vec maplist. This can fail for a
660c66a14d0SKent Overstreet  *	number of reasons, such as the bio being full or target block device
661c66a14d0SKent Overstreet  *	limitations. The target block device must allow bio's up to PAGE_SIZE,
662c66a14d0SKent Overstreet  *	so it is always possible to add a single page to an empty bio.
663c66a14d0SKent Overstreet  *
664c66a14d0SKent Overstreet  *	This should only be used by REQ_PC bios.
665f9c78b2bSJens Axboe  */
666c66a14d0SKent Overstreet int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
667c66a14d0SKent Overstreet 		    *page, unsigned int len, unsigned int offset)
668f9c78b2bSJens Axboe {
669f9c78b2bSJens Axboe 	int retried_segments = 0;
670f9c78b2bSJens Axboe 	struct bio_vec *bvec;
671f9c78b2bSJens Axboe 
672f9c78b2bSJens Axboe 	/*
673f9c78b2bSJens Axboe 	 * cloned bio must not modify vec list
674f9c78b2bSJens Axboe 	 */
675f9c78b2bSJens Axboe 	if (unlikely(bio_flagged(bio, BIO_CLONED)))
676f9c78b2bSJens Axboe 		return 0;
677f9c78b2bSJens Axboe 
678c66a14d0SKent Overstreet 	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
679f9c78b2bSJens Axboe 		return 0;
680f9c78b2bSJens Axboe 
681f9c78b2bSJens Axboe 	/*
682f9c78b2bSJens Axboe 	 * For filesystems with a blocksize smaller than the pagesize
683f9c78b2bSJens Axboe 	 * we will often be called with the same page as last time and
684f9c78b2bSJens Axboe 	 * a consecutive offset.  Optimize this special case.
685f9c78b2bSJens Axboe 	 */
686f9c78b2bSJens Axboe 	if (bio->bi_vcnt > 0) {
687f9c78b2bSJens Axboe 		struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
688f9c78b2bSJens Axboe 
689f9c78b2bSJens Axboe 		if (page == prev->bv_page &&
690f9c78b2bSJens Axboe 		    offset == prev->bv_offset + prev->bv_len) {
691f9c78b2bSJens Axboe 			prev->bv_len += len;
692fcbf6a08SMaurizio Lombardi 			bio->bi_iter.bi_size += len;
693f9c78b2bSJens Axboe 			goto done;
694f9c78b2bSJens Axboe 		}
69566cb45aaSJens Axboe 
69666cb45aaSJens Axboe 		/*
69766cb45aaSJens Axboe 		 * If the queue doesn't support SG gaps and adding this
69866cb45aaSJens Axboe 		 * offset would create a gap, disallow it.
69966cb45aaSJens Axboe 		 */
70003100aadSKeith Busch 		if (bvec_gap_to_prev(q, prev, offset))
70166cb45aaSJens Axboe 			return 0;
702f9c78b2bSJens Axboe 	}
703f9c78b2bSJens Axboe 
7040aa69fd3SChristoph Hellwig 	if (bio_full(bio))
705f9c78b2bSJens Axboe 		return 0;
706f9c78b2bSJens Axboe 
707f9c78b2bSJens Axboe 	/*
708f9c78b2bSJens Axboe 	 * setup the new entry, we might clear it again later if we
709f9c78b2bSJens Axboe 	 * cannot add the page
710f9c78b2bSJens Axboe 	 */
711f9c78b2bSJens Axboe 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
712f9c78b2bSJens Axboe 	bvec->bv_page = page;
713f9c78b2bSJens Axboe 	bvec->bv_len = len;
714f9c78b2bSJens Axboe 	bvec->bv_offset = offset;
715fcbf6a08SMaurizio Lombardi 	bio->bi_vcnt++;
716fcbf6a08SMaurizio Lombardi 	bio->bi_phys_segments++;
717fcbf6a08SMaurizio Lombardi 	bio->bi_iter.bi_size += len;
718fcbf6a08SMaurizio Lombardi 
719fcbf6a08SMaurizio Lombardi 	/*
720fcbf6a08SMaurizio Lombardi 	 * Perform a recount if the number of segments is greater
721fcbf6a08SMaurizio Lombardi 	 * than queue_max_segments(q).
722fcbf6a08SMaurizio Lombardi 	 */
723fcbf6a08SMaurizio Lombardi 
724fcbf6a08SMaurizio Lombardi 	while (bio->bi_phys_segments > queue_max_segments(q)) {
725fcbf6a08SMaurizio Lombardi 
726fcbf6a08SMaurizio Lombardi 		if (retried_segments)
727fcbf6a08SMaurizio Lombardi 			goto failed;
728fcbf6a08SMaurizio Lombardi 
729fcbf6a08SMaurizio Lombardi 		retried_segments = 1;
730fcbf6a08SMaurizio Lombardi 		blk_recount_segments(q, bio);
731fcbf6a08SMaurizio Lombardi 	}
732f9c78b2bSJens Axboe 
733f9c78b2bSJens Axboe 	/* If we may be able to merge these biovecs, force a recount */
7343dccdae5SChristoph Hellwig 	if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
735b7c44ed9SJens Axboe 		bio_clear_flag(bio, BIO_SEG_VALID);
736f9c78b2bSJens Axboe 
737f9c78b2bSJens Axboe  done:
738f9c78b2bSJens Axboe 	return len;
739fcbf6a08SMaurizio Lombardi 
740fcbf6a08SMaurizio Lombardi  failed:
741fcbf6a08SMaurizio Lombardi 	bvec->bv_page = NULL;
742fcbf6a08SMaurizio Lombardi 	bvec->bv_len = 0;
743fcbf6a08SMaurizio Lombardi 	bvec->bv_offset = 0;
744fcbf6a08SMaurizio Lombardi 	bio->bi_vcnt--;
745fcbf6a08SMaurizio Lombardi 	bio->bi_iter.bi_size -= len;
746fcbf6a08SMaurizio Lombardi 	blk_recount_segments(q, bio);
747fcbf6a08SMaurizio Lombardi 	return 0;
748f9c78b2bSJens Axboe }
749f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_pc_page);
750f9c78b2bSJens Axboe 
751f9c78b2bSJens Axboe /**
7520aa69fd3SChristoph Hellwig  * __bio_try_merge_page - try appending data to an existing bvec.
7530aa69fd3SChristoph Hellwig  * @bio: destination bio
7540aa69fd3SChristoph Hellwig  * @page: page to add
7550aa69fd3SChristoph Hellwig  * @len: length of the data to add
7560aa69fd3SChristoph Hellwig  * @off: offset of the data in @page
7570aa69fd3SChristoph Hellwig  *
7580aa69fd3SChristoph Hellwig  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
7590aa69fd3SChristoph Hellwig  * a useful optimisation for file systems with a block size smaller than the
7600aa69fd3SChristoph Hellwig  * page size.
7610aa69fd3SChristoph Hellwig  *
7620aa69fd3SChristoph Hellwig  * Return %true on success or %false on failure.
7630aa69fd3SChristoph Hellwig  */
7640aa69fd3SChristoph Hellwig bool __bio_try_merge_page(struct bio *bio, struct page *page,
7650aa69fd3SChristoph Hellwig 		unsigned int len, unsigned int off)
7660aa69fd3SChristoph Hellwig {
7670aa69fd3SChristoph Hellwig 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
7680aa69fd3SChristoph Hellwig 		return false;
7690aa69fd3SChristoph Hellwig 
7700aa69fd3SChristoph Hellwig 	if (bio->bi_vcnt > 0) {
7710aa69fd3SChristoph Hellwig 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
7720aa69fd3SChristoph Hellwig 
7730aa69fd3SChristoph Hellwig 		if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
7740aa69fd3SChristoph Hellwig 			bv->bv_len += len;
7750aa69fd3SChristoph Hellwig 			bio->bi_iter.bi_size += len;
7760aa69fd3SChristoph Hellwig 			return true;
7770aa69fd3SChristoph Hellwig 		}
7780aa69fd3SChristoph Hellwig 	}
7790aa69fd3SChristoph Hellwig 	return false;
7800aa69fd3SChristoph Hellwig }
7810aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_try_merge_page);
7820aa69fd3SChristoph Hellwig 
7830aa69fd3SChristoph Hellwig /**
7840aa69fd3SChristoph Hellwig  * __bio_add_page - add page to a bio in a new segment
7850aa69fd3SChristoph Hellwig  * @bio: destination bio
7860aa69fd3SChristoph Hellwig  * @page: page to add
7870aa69fd3SChristoph Hellwig  * @len: length of the data to add
7880aa69fd3SChristoph Hellwig  * @off: offset of the data in @page
7890aa69fd3SChristoph Hellwig  *
7900aa69fd3SChristoph Hellwig  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
7910aa69fd3SChristoph Hellwig  * that @bio has space for another bvec.
7920aa69fd3SChristoph Hellwig  */
7930aa69fd3SChristoph Hellwig void __bio_add_page(struct bio *bio, struct page *page,
7940aa69fd3SChristoph Hellwig 		unsigned int len, unsigned int off)
7950aa69fd3SChristoph Hellwig {
7960aa69fd3SChristoph Hellwig 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
7970aa69fd3SChristoph Hellwig 
7980aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
7990aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_full(bio));
8000aa69fd3SChristoph Hellwig 
8010aa69fd3SChristoph Hellwig 	bv->bv_page = page;
8020aa69fd3SChristoph Hellwig 	bv->bv_offset = off;
8030aa69fd3SChristoph Hellwig 	bv->bv_len = len;
8040aa69fd3SChristoph Hellwig 
8050aa69fd3SChristoph Hellwig 	bio->bi_iter.bi_size += len;
8060aa69fd3SChristoph Hellwig 	bio->bi_vcnt++;
8070aa69fd3SChristoph Hellwig }
8080aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_add_page);
8090aa69fd3SChristoph Hellwig 
8100aa69fd3SChristoph Hellwig /**
811f9c78b2bSJens Axboe  *	bio_add_page	-	attempt to add page to bio
812f9c78b2bSJens Axboe  *	@bio: destination bio
813f9c78b2bSJens Axboe  *	@page: page to add
814f9c78b2bSJens Axboe  *	@len: vec entry length
815f9c78b2bSJens Axboe  *	@offset: vec entry offset
816f9c78b2bSJens Axboe  *
817c66a14d0SKent Overstreet  *	Attempt to add a page to the bio_vec maplist. This will only fail
818c66a14d0SKent Overstreet  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
819f9c78b2bSJens Axboe  */
820c66a14d0SKent Overstreet int bio_add_page(struct bio *bio, struct page *page,
821c66a14d0SKent Overstreet 		 unsigned int len, unsigned int offset)
822f9c78b2bSJens Axboe {
8230aa69fd3SChristoph Hellwig 	if (!__bio_try_merge_page(bio, page, len, offset)) {
8240aa69fd3SChristoph Hellwig 		if (bio_full(bio))
825c66a14d0SKent Overstreet 			return 0;
8260aa69fd3SChristoph Hellwig 		__bio_add_page(bio, page, len, offset);
827c66a14d0SKent Overstreet 	}
828c66a14d0SKent Overstreet 	return len;
829f9c78b2bSJens Axboe }
830f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_page);
831f9c78b2bSJens Axboe 
832576ed913SChristoph Hellwig #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
833576ed913SChristoph Hellwig 
8342cefe4dbSKent Overstreet /**
83517d51b10SMartin Wilck  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
8362cefe4dbSKent Overstreet  * @bio: bio to add pages to
8372cefe4dbSKent Overstreet  * @iter: iov iterator describing the region to be mapped
8382cefe4dbSKent Overstreet  *
83917d51b10SMartin Wilck  * Pins pages from *iter and appends them to @bio's bvec array. The
8402cefe4dbSKent Overstreet  * pages will have to be released using put_page() when done.
84117d51b10SMartin Wilck  * For multi-segment *iter, this function only adds pages from the
84217d51b10SMartin Wilck  * the next non-empty segment of the iov iterator.
8432cefe4dbSKent Overstreet  */
84417d51b10SMartin Wilck static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
8452cefe4dbSKent Overstreet {
846576ed913SChristoph Hellwig 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
847576ed913SChristoph Hellwig 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
8482cefe4dbSKent Overstreet 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
8492cefe4dbSKent Overstreet 	struct page **pages = (struct page **)bv;
850576ed913SChristoph Hellwig 	ssize_t size, left;
851576ed913SChristoph Hellwig 	unsigned len, i;
852b403ea24SMartin Wilck 	size_t offset;
853576ed913SChristoph Hellwig 
854576ed913SChristoph Hellwig 	/*
855576ed913SChristoph Hellwig 	 * Move page array up in the allocated memory for the bio vecs as far as
856576ed913SChristoph Hellwig 	 * possible so that we can start filling biovecs from the beginning
857576ed913SChristoph Hellwig 	 * without overwriting the temporary page array.
858576ed913SChristoph Hellwig 	*/
859576ed913SChristoph Hellwig 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
860576ed913SChristoph Hellwig 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
8612cefe4dbSKent Overstreet 
8622cefe4dbSKent Overstreet 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
8632cefe4dbSKent Overstreet 	if (unlikely(size <= 0))
8642cefe4dbSKent Overstreet 		return size ? size : -EFAULT;
8652cefe4dbSKent Overstreet 
866576ed913SChristoph Hellwig 	for (left = size, i = 0; left > 0; left -= len, i++) {
867576ed913SChristoph Hellwig 		struct page *page = pages[i];
8682cefe4dbSKent Overstreet 
869576ed913SChristoph Hellwig 		len = min_t(size_t, PAGE_SIZE - offset, left);
870576ed913SChristoph Hellwig 		if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
871576ed913SChristoph Hellwig 			return -EINVAL;
872576ed913SChristoph Hellwig 		offset = 0;
8732cefe4dbSKent Overstreet 	}
8742cefe4dbSKent Overstreet 
8752cefe4dbSKent Overstreet 	iov_iter_advance(iter, size);
8762cefe4dbSKent Overstreet 	return 0;
8772cefe4dbSKent Overstreet }
87817d51b10SMartin Wilck 
87917d51b10SMartin Wilck /**
88017d51b10SMartin Wilck  * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
88117d51b10SMartin Wilck  * @bio: bio to add pages to
88217d51b10SMartin Wilck  * @iter: iov iterator describing the region to be mapped
88317d51b10SMartin Wilck  *
88417d51b10SMartin Wilck  * Pins pages from *iter and appends them to @bio's bvec array. The
88517d51b10SMartin Wilck  * pages will have to be released using put_page() when done.
88617d51b10SMartin Wilck  * The function tries, but does not guarantee, to pin as many pages as
88717d51b10SMartin Wilck  * fit into the bio, or are requested in *iter, whatever is smaller.
88817d51b10SMartin Wilck  * If MM encounters an error pinning the requested pages, it stops.
88917d51b10SMartin Wilck  * Error is returned only if 0 pages could be pinned.
89017d51b10SMartin Wilck  */
89117d51b10SMartin Wilck int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
89217d51b10SMartin Wilck {
89317d51b10SMartin Wilck 	unsigned short orig_vcnt = bio->bi_vcnt;
89417d51b10SMartin Wilck 
89517d51b10SMartin Wilck 	do {
89617d51b10SMartin Wilck 		int ret = __bio_iov_iter_get_pages(bio, iter);
89717d51b10SMartin Wilck 
89817d51b10SMartin Wilck 		if (unlikely(ret))
89917d51b10SMartin Wilck 			return bio->bi_vcnt > orig_vcnt ? 0 : ret;
90017d51b10SMartin Wilck 
90117d51b10SMartin Wilck 	} while (iov_iter_count(iter) && !bio_full(bio));
90217d51b10SMartin Wilck 
90317d51b10SMartin Wilck 	return 0;
90417d51b10SMartin Wilck }
9052cefe4dbSKent Overstreet EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
9062cefe4dbSKent Overstreet 
9074246a0b6SChristoph Hellwig static void submit_bio_wait_endio(struct bio *bio)
908f9c78b2bSJens Axboe {
90965e53aabSChristoph Hellwig 	complete(bio->bi_private);
910f9c78b2bSJens Axboe }
911f9c78b2bSJens Axboe 
912f9c78b2bSJens Axboe /**
913f9c78b2bSJens Axboe  * submit_bio_wait - submit a bio, and wait until it completes
914f9c78b2bSJens Axboe  * @bio: The &struct bio which describes the I/O
915f9c78b2bSJens Axboe  *
916f9c78b2bSJens Axboe  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
917f9c78b2bSJens Axboe  * bio_endio() on failure.
9183d289d68SJan Kara  *
9193d289d68SJan Kara  * WARNING: Unlike to how submit_bio() is usually used, this function does not
9203d289d68SJan Kara  * result in bio reference to be consumed. The caller must drop the reference
9213d289d68SJan Kara  * on his own.
922f9c78b2bSJens Axboe  */
9234e49ea4aSMike Christie int submit_bio_wait(struct bio *bio)
924f9c78b2bSJens Axboe {
925e319e1fbSByungchul Park 	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
926f9c78b2bSJens Axboe 
92765e53aabSChristoph Hellwig 	bio->bi_private = &done;
928f9c78b2bSJens Axboe 	bio->bi_end_io = submit_bio_wait_endio;
9291eff9d32SJens Axboe 	bio->bi_opf |= REQ_SYNC;
9304e49ea4aSMike Christie 	submit_bio(bio);
93165e53aabSChristoph Hellwig 	wait_for_completion_io(&done);
932f9c78b2bSJens Axboe 
93365e53aabSChristoph Hellwig 	return blk_status_to_errno(bio->bi_status);
934f9c78b2bSJens Axboe }
935f9c78b2bSJens Axboe EXPORT_SYMBOL(submit_bio_wait);
936f9c78b2bSJens Axboe 
937f9c78b2bSJens Axboe /**
938f9c78b2bSJens Axboe  * bio_advance - increment/complete a bio by some number of bytes
939f9c78b2bSJens Axboe  * @bio:	bio to advance
940f9c78b2bSJens Axboe  * @bytes:	number of bytes to complete
941f9c78b2bSJens Axboe  *
942f9c78b2bSJens Axboe  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
943f9c78b2bSJens Axboe  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
944f9c78b2bSJens Axboe  * be updated on the last bvec as well.
945f9c78b2bSJens Axboe  *
946f9c78b2bSJens Axboe  * @bio will then represent the remaining, uncompleted portion of the io.
947f9c78b2bSJens Axboe  */
948f9c78b2bSJens Axboe void bio_advance(struct bio *bio, unsigned bytes)
949f9c78b2bSJens Axboe {
950f9c78b2bSJens Axboe 	if (bio_integrity(bio))
951f9c78b2bSJens Axboe 		bio_integrity_advance(bio, bytes);
952f9c78b2bSJens Axboe 
953f9c78b2bSJens Axboe 	bio_advance_iter(bio, &bio->bi_iter, bytes);
954f9c78b2bSJens Axboe }
955f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_advance);
956f9c78b2bSJens Axboe 
95745db54d5SKent Overstreet void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
95845db54d5SKent Overstreet 			struct bio *src, struct bvec_iter *src_iter)
959f9c78b2bSJens Axboe {
960f9c78b2bSJens Axboe 	struct bio_vec src_bv, dst_bv;
961f9c78b2bSJens Axboe 	void *src_p, *dst_p;
962f9c78b2bSJens Axboe 	unsigned bytes;
963f9c78b2bSJens Axboe 
96445db54d5SKent Overstreet 	while (src_iter->bi_size && dst_iter->bi_size) {
96545db54d5SKent Overstreet 		src_bv = bio_iter_iovec(src, *src_iter);
96645db54d5SKent Overstreet 		dst_bv = bio_iter_iovec(dst, *dst_iter);
96745db54d5SKent Overstreet 
96845db54d5SKent Overstreet 		bytes = min(src_bv.bv_len, dst_bv.bv_len);
96945db54d5SKent Overstreet 
97045db54d5SKent Overstreet 		src_p = kmap_atomic(src_bv.bv_page);
97145db54d5SKent Overstreet 		dst_p = kmap_atomic(dst_bv.bv_page);
97245db54d5SKent Overstreet 
97345db54d5SKent Overstreet 		memcpy(dst_p + dst_bv.bv_offset,
97445db54d5SKent Overstreet 		       src_p + src_bv.bv_offset,
97545db54d5SKent Overstreet 		       bytes);
97645db54d5SKent Overstreet 
97745db54d5SKent Overstreet 		kunmap_atomic(dst_p);
97845db54d5SKent Overstreet 		kunmap_atomic(src_p);
97945db54d5SKent Overstreet 
9806e6e811dSKent Overstreet 		flush_dcache_page(dst_bv.bv_page);
9816e6e811dSKent Overstreet 
98245db54d5SKent Overstreet 		bio_advance_iter(src, src_iter, bytes);
98345db54d5SKent Overstreet 		bio_advance_iter(dst, dst_iter, bytes);
98445db54d5SKent Overstreet 	}
98545db54d5SKent Overstreet }
98645db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data_iter);
98745db54d5SKent Overstreet 
98845db54d5SKent Overstreet /**
98945db54d5SKent Overstreet  * bio_copy_data - copy contents of data buffers from one bio to another
99045db54d5SKent Overstreet  * @src: source bio
99145db54d5SKent Overstreet  * @dst: destination bio
99245db54d5SKent Overstreet  *
99345db54d5SKent Overstreet  * Stops when it reaches the end of either @src or @dst - that is, copies
99445db54d5SKent Overstreet  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
99545db54d5SKent Overstreet  */
99645db54d5SKent Overstreet void bio_copy_data(struct bio *dst, struct bio *src)
99745db54d5SKent Overstreet {
99845db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
99945db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
100045db54d5SKent Overstreet 
100145db54d5SKent Overstreet 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
100245db54d5SKent Overstreet }
100345db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data);
100445db54d5SKent Overstreet 
100545db54d5SKent Overstreet /**
100645db54d5SKent Overstreet  * bio_list_copy_data - copy contents of data buffers from one chain of bios to
100745db54d5SKent Overstreet  * another
100845db54d5SKent Overstreet  * @src: source bio list
100945db54d5SKent Overstreet  * @dst: destination bio list
101045db54d5SKent Overstreet  *
101145db54d5SKent Overstreet  * Stops when it reaches the end of either the @src list or @dst list - that is,
101245db54d5SKent Overstreet  * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
101345db54d5SKent Overstreet  * bios).
101445db54d5SKent Overstreet  */
101545db54d5SKent Overstreet void bio_list_copy_data(struct bio *dst, struct bio *src)
101645db54d5SKent Overstreet {
101745db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
101845db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
101945db54d5SKent Overstreet 
1020f9c78b2bSJens Axboe 	while (1) {
1021f9c78b2bSJens Axboe 		if (!src_iter.bi_size) {
1022f9c78b2bSJens Axboe 			src = src->bi_next;
1023f9c78b2bSJens Axboe 			if (!src)
1024f9c78b2bSJens Axboe 				break;
1025f9c78b2bSJens Axboe 
1026f9c78b2bSJens Axboe 			src_iter = src->bi_iter;
1027f9c78b2bSJens Axboe 		}
1028f9c78b2bSJens Axboe 
1029f9c78b2bSJens Axboe 		if (!dst_iter.bi_size) {
1030f9c78b2bSJens Axboe 			dst = dst->bi_next;
1031f9c78b2bSJens Axboe 			if (!dst)
1032f9c78b2bSJens Axboe 				break;
1033f9c78b2bSJens Axboe 
1034f9c78b2bSJens Axboe 			dst_iter = dst->bi_iter;
1035f9c78b2bSJens Axboe 		}
1036f9c78b2bSJens Axboe 
103745db54d5SKent Overstreet 		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1038f9c78b2bSJens Axboe 	}
1039f9c78b2bSJens Axboe }
104045db54d5SKent Overstreet EXPORT_SYMBOL(bio_list_copy_data);
1041f9c78b2bSJens Axboe 
1042f9c78b2bSJens Axboe struct bio_map_data {
1043f9c78b2bSJens Axboe 	int is_our_pages;
104426e49cfcSKent Overstreet 	struct iov_iter iter;
104526e49cfcSKent Overstreet 	struct iovec iov[];
1046f9c78b2bSJens Axboe };
1047f9c78b2bSJens Axboe 
10480e5b935dSAl Viro static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1049f9c78b2bSJens Axboe 					       gfp_t gfp_mask)
1050f9c78b2bSJens Axboe {
10510e5b935dSAl Viro 	struct bio_map_data *bmd;
10520e5b935dSAl Viro 	if (data->nr_segs > UIO_MAXIOV)
1053f9c78b2bSJens Axboe 		return NULL;
1054f9c78b2bSJens Axboe 
10550e5b935dSAl Viro 	bmd = kmalloc(sizeof(struct bio_map_data) +
10560e5b935dSAl Viro 		       sizeof(struct iovec) * data->nr_segs, gfp_mask);
10570e5b935dSAl Viro 	if (!bmd)
10580e5b935dSAl Viro 		return NULL;
10590e5b935dSAl Viro 	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
10600e5b935dSAl Viro 	bmd->iter = *data;
10610e5b935dSAl Viro 	bmd->iter.iov = bmd->iov;
10620e5b935dSAl Viro 	return bmd;
1063f9c78b2bSJens Axboe }
1064f9c78b2bSJens Axboe 
10659124d3feSDongsu Park /**
10669124d3feSDongsu Park  * bio_copy_from_iter - copy all pages from iov_iter to bio
10679124d3feSDongsu Park  * @bio: The &struct bio which describes the I/O as destination
10689124d3feSDongsu Park  * @iter: iov_iter as source
10699124d3feSDongsu Park  *
10709124d3feSDongsu Park  * Copy all pages from iov_iter to bio.
10719124d3feSDongsu Park  * Returns 0 on success, or error on failure.
10729124d3feSDongsu Park  */
107398a09d61SAl Viro static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1074f9c78b2bSJens Axboe {
10759124d3feSDongsu Park 	int i;
1076f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1077f9c78b2bSJens Axboe 
1078f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
10799124d3feSDongsu Park 		ssize_t ret;
1080f9c78b2bSJens Axboe 
10819124d3feSDongsu Park 		ret = copy_page_from_iter(bvec->bv_page,
10829124d3feSDongsu Park 					  bvec->bv_offset,
10839124d3feSDongsu Park 					  bvec->bv_len,
108498a09d61SAl Viro 					  iter);
1085f9c78b2bSJens Axboe 
108698a09d61SAl Viro 		if (!iov_iter_count(iter))
10879124d3feSDongsu Park 			break;
1088f9c78b2bSJens Axboe 
10899124d3feSDongsu Park 		if (ret < bvec->bv_len)
10909124d3feSDongsu Park 			return -EFAULT;
1091f9c78b2bSJens Axboe 	}
1092f9c78b2bSJens Axboe 
10939124d3feSDongsu Park 	return 0;
1094f9c78b2bSJens Axboe }
1095f9c78b2bSJens Axboe 
10969124d3feSDongsu Park /**
10979124d3feSDongsu Park  * bio_copy_to_iter - copy all pages from bio to iov_iter
10989124d3feSDongsu Park  * @bio: The &struct bio which describes the I/O as source
10999124d3feSDongsu Park  * @iter: iov_iter as destination
11009124d3feSDongsu Park  *
11019124d3feSDongsu Park  * Copy all pages from bio to iov_iter.
11029124d3feSDongsu Park  * Returns 0 on success, or error on failure.
11039124d3feSDongsu Park  */
11049124d3feSDongsu Park static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
11059124d3feSDongsu Park {
11069124d3feSDongsu Park 	int i;
11079124d3feSDongsu Park 	struct bio_vec *bvec;
11089124d3feSDongsu Park 
11099124d3feSDongsu Park 	bio_for_each_segment_all(bvec, bio, i) {
11109124d3feSDongsu Park 		ssize_t ret;
11119124d3feSDongsu Park 
11129124d3feSDongsu Park 		ret = copy_page_to_iter(bvec->bv_page,
11139124d3feSDongsu Park 					bvec->bv_offset,
11149124d3feSDongsu Park 					bvec->bv_len,
11159124d3feSDongsu Park 					&iter);
11169124d3feSDongsu Park 
11179124d3feSDongsu Park 		if (!iov_iter_count(&iter))
11189124d3feSDongsu Park 			break;
11199124d3feSDongsu Park 
11209124d3feSDongsu Park 		if (ret < bvec->bv_len)
11219124d3feSDongsu Park 			return -EFAULT;
11229124d3feSDongsu Park 	}
11239124d3feSDongsu Park 
11249124d3feSDongsu Park 	return 0;
1125f9c78b2bSJens Axboe }
1126f9c78b2bSJens Axboe 
1127491221f8SGuoqing Jiang void bio_free_pages(struct bio *bio)
11281dfa0f68SChristoph Hellwig {
11291dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
11301dfa0f68SChristoph Hellwig 	int i;
11311dfa0f68SChristoph Hellwig 
11321dfa0f68SChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, i)
11331dfa0f68SChristoph Hellwig 		__free_page(bvec->bv_page);
11341dfa0f68SChristoph Hellwig }
1135491221f8SGuoqing Jiang EXPORT_SYMBOL(bio_free_pages);
11361dfa0f68SChristoph Hellwig 
1137f9c78b2bSJens Axboe /**
1138f9c78b2bSJens Axboe  *	bio_uncopy_user	-	finish previously mapped bio
1139f9c78b2bSJens Axboe  *	@bio: bio being terminated
1140f9c78b2bSJens Axboe  *
1141ddad8dd0SChristoph Hellwig  *	Free pages allocated from bio_copy_user_iov() and write back data
1142f9c78b2bSJens Axboe  *	to user space in case of a read.
1143f9c78b2bSJens Axboe  */
1144f9c78b2bSJens Axboe int bio_uncopy_user(struct bio *bio)
1145f9c78b2bSJens Axboe {
1146f9c78b2bSJens Axboe 	struct bio_map_data *bmd = bio->bi_private;
11471dfa0f68SChristoph Hellwig 	int ret = 0;
1148f9c78b2bSJens Axboe 
1149f9c78b2bSJens Axboe 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1150f9c78b2bSJens Axboe 		/*
1151f9c78b2bSJens Axboe 		 * if we're in a workqueue, the request is orphaned, so
11522d99b55dSHannes Reinecke 		 * don't copy into a random user address space, just free
11532d99b55dSHannes Reinecke 		 * and return -EINTR so user space doesn't expect any data.
1154f9c78b2bSJens Axboe 		 */
11552d99b55dSHannes Reinecke 		if (!current->mm)
11562d99b55dSHannes Reinecke 			ret = -EINTR;
11572d99b55dSHannes Reinecke 		else if (bio_data_dir(bio) == READ)
11589124d3feSDongsu Park 			ret = bio_copy_to_iter(bio, bmd->iter);
11591dfa0f68SChristoph Hellwig 		if (bmd->is_our_pages)
11601dfa0f68SChristoph Hellwig 			bio_free_pages(bio);
1161f9c78b2bSJens Axboe 	}
1162f9c78b2bSJens Axboe 	kfree(bmd);
1163f9c78b2bSJens Axboe 	bio_put(bio);
1164f9c78b2bSJens Axboe 	return ret;
1165f9c78b2bSJens Axboe }
1166f9c78b2bSJens Axboe 
1167f9c78b2bSJens Axboe /**
1168f9c78b2bSJens Axboe  *	bio_copy_user_iov	-	copy user data to bio
1169f9c78b2bSJens Axboe  *	@q:		destination block queue
1170f9c78b2bSJens Axboe  *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
117126e49cfcSKent Overstreet  *	@iter:		iovec iterator
1172f9c78b2bSJens Axboe  *	@gfp_mask:	memory allocation flags
1173f9c78b2bSJens Axboe  *
1174f9c78b2bSJens Axboe  *	Prepares and returns a bio for indirect user io, bouncing data
1175f9c78b2bSJens Axboe  *	to/from kernel pages as necessary. Must be paired with
1176f9c78b2bSJens Axboe  *	call bio_uncopy_user() on io completion.
1177f9c78b2bSJens Axboe  */
1178f9c78b2bSJens Axboe struct bio *bio_copy_user_iov(struct request_queue *q,
1179f9c78b2bSJens Axboe 			      struct rq_map_data *map_data,
1180e81cef5dSAl Viro 			      struct iov_iter *iter,
118126e49cfcSKent Overstreet 			      gfp_t gfp_mask)
1182f9c78b2bSJens Axboe {
1183f9c78b2bSJens Axboe 	struct bio_map_data *bmd;
1184f9c78b2bSJens Axboe 	struct page *page;
1185f9c78b2bSJens Axboe 	struct bio *bio;
1186d16d44ebSAl Viro 	int i = 0, ret;
1187d16d44ebSAl Viro 	int nr_pages;
118826e49cfcSKent Overstreet 	unsigned int len = iter->count;
1189bd5ceceaSGeliang Tang 	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1190f9c78b2bSJens Axboe 
11910e5b935dSAl Viro 	bmd = bio_alloc_map_data(iter, gfp_mask);
1192f9c78b2bSJens Axboe 	if (!bmd)
1193f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1194f9c78b2bSJens Axboe 
119526e49cfcSKent Overstreet 	/*
119626e49cfcSKent Overstreet 	 * We need to do a deep copy of the iov_iter including the iovecs.
119726e49cfcSKent Overstreet 	 * The caller provided iov might point to an on-stack or otherwise
119826e49cfcSKent Overstreet 	 * shortlived one.
119926e49cfcSKent Overstreet 	 */
120026e49cfcSKent Overstreet 	bmd->is_our_pages = map_data ? 0 : 1;
120126e49cfcSKent Overstreet 
1202d16d44ebSAl Viro 	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1203d16d44ebSAl Viro 	if (nr_pages > BIO_MAX_PAGES)
1204d16d44ebSAl Viro 		nr_pages = BIO_MAX_PAGES;
1205f9c78b2bSJens Axboe 
1206f9c78b2bSJens Axboe 	ret = -ENOMEM;
1207f9c78b2bSJens Axboe 	bio = bio_kmalloc(gfp_mask, nr_pages);
1208f9c78b2bSJens Axboe 	if (!bio)
1209f9c78b2bSJens Axboe 		goto out_bmd;
1210f9c78b2bSJens Axboe 
1211f9c78b2bSJens Axboe 	ret = 0;
1212f9c78b2bSJens Axboe 
1213f9c78b2bSJens Axboe 	if (map_data) {
1214f9c78b2bSJens Axboe 		nr_pages = 1 << map_data->page_order;
1215f9c78b2bSJens Axboe 		i = map_data->offset / PAGE_SIZE;
1216f9c78b2bSJens Axboe 	}
1217f9c78b2bSJens Axboe 	while (len) {
1218f9c78b2bSJens Axboe 		unsigned int bytes = PAGE_SIZE;
1219f9c78b2bSJens Axboe 
1220f9c78b2bSJens Axboe 		bytes -= offset;
1221f9c78b2bSJens Axboe 
1222f9c78b2bSJens Axboe 		if (bytes > len)
1223f9c78b2bSJens Axboe 			bytes = len;
1224f9c78b2bSJens Axboe 
1225f9c78b2bSJens Axboe 		if (map_data) {
1226f9c78b2bSJens Axboe 			if (i == map_data->nr_entries * nr_pages) {
1227f9c78b2bSJens Axboe 				ret = -ENOMEM;
1228f9c78b2bSJens Axboe 				break;
1229f9c78b2bSJens Axboe 			}
1230f9c78b2bSJens Axboe 
1231f9c78b2bSJens Axboe 			page = map_data->pages[i / nr_pages];
1232f9c78b2bSJens Axboe 			page += (i % nr_pages);
1233f9c78b2bSJens Axboe 
1234f9c78b2bSJens Axboe 			i++;
1235f9c78b2bSJens Axboe 		} else {
1236f9c78b2bSJens Axboe 			page = alloc_page(q->bounce_gfp | gfp_mask);
1237f9c78b2bSJens Axboe 			if (!page) {
1238f9c78b2bSJens Axboe 				ret = -ENOMEM;
1239f9c78b2bSJens Axboe 				break;
1240f9c78b2bSJens Axboe 			}
1241f9c78b2bSJens Axboe 		}
1242f9c78b2bSJens Axboe 
1243f9c78b2bSJens Axboe 		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1244f9c78b2bSJens Axboe 			break;
1245f9c78b2bSJens Axboe 
1246f9c78b2bSJens Axboe 		len -= bytes;
1247f9c78b2bSJens Axboe 		offset = 0;
1248f9c78b2bSJens Axboe 	}
1249f9c78b2bSJens Axboe 
1250f9c78b2bSJens Axboe 	if (ret)
1251f9c78b2bSJens Axboe 		goto cleanup;
1252f9c78b2bSJens Axboe 
12532884d0beSAl Viro 	if (map_data)
12542884d0beSAl Viro 		map_data->offset += bio->bi_iter.bi_size;
12552884d0beSAl Viro 
1256f9c78b2bSJens Axboe 	/*
1257f9c78b2bSJens Axboe 	 * success
1258f9c78b2bSJens Axboe 	 */
125900e23707SDavid Howells 	if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
1260f9c78b2bSJens Axboe 	    (map_data && map_data->from_user)) {
126198a09d61SAl Viro 		ret = bio_copy_from_iter(bio, iter);
1262f9c78b2bSJens Axboe 		if (ret)
1263f9c78b2bSJens Axboe 			goto cleanup;
126498a09d61SAl Viro 	} else {
1265f3587d76SKeith Busch 		zero_fill_bio(bio);
1266e81cef5dSAl Viro 		iov_iter_advance(iter, bio->bi_iter.bi_size);
1267f9c78b2bSJens Axboe 	}
1268f9c78b2bSJens Axboe 
126926e49cfcSKent Overstreet 	bio->bi_private = bmd;
12702884d0beSAl Viro 	if (map_data && map_data->null_mapped)
12712884d0beSAl Viro 		bio_set_flag(bio, BIO_NULL_MAPPED);
1272f9c78b2bSJens Axboe 	return bio;
1273f9c78b2bSJens Axboe cleanup:
1274f9c78b2bSJens Axboe 	if (!map_data)
12751dfa0f68SChristoph Hellwig 		bio_free_pages(bio);
1276f9c78b2bSJens Axboe 	bio_put(bio);
1277f9c78b2bSJens Axboe out_bmd:
1278f9c78b2bSJens Axboe 	kfree(bmd);
1279f9c78b2bSJens Axboe 	return ERR_PTR(ret);
1280f9c78b2bSJens Axboe }
1281f9c78b2bSJens Axboe 
128237f19e57SChristoph Hellwig /**
128337f19e57SChristoph Hellwig  *	bio_map_user_iov - map user iovec into bio
128437f19e57SChristoph Hellwig  *	@q:		the struct request_queue for the bio
128537f19e57SChristoph Hellwig  *	@iter:		iovec iterator
128637f19e57SChristoph Hellwig  *	@gfp_mask:	memory allocation flags
128737f19e57SChristoph Hellwig  *
128837f19e57SChristoph Hellwig  *	Map the user space address into a bio suitable for io to a block
128937f19e57SChristoph Hellwig  *	device. Returns an error pointer in case of error.
129037f19e57SChristoph Hellwig  */
129137f19e57SChristoph Hellwig struct bio *bio_map_user_iov(struct request_queue *q,
1292e81cef5dSAl Viro 			     struct iov_iter *iter,
129326e49cfcSKent Overstreet 			     gfp_t gfp_mask)
1294f9c78b2bSJens Axboe {
129526e49cfcSKent Overstreet 	int j;
1296f9c78b2bSJens Axboe 	struct bio *bio;
1297076098e5SAl Viro 	int ret;
12982b04e8f6SAl Viro 	struct bio_vec *bvec;
1299f9c78b2bSJens Axboe 
1300b282cc76SAl Viro 	if (!iov_iter_count(iter))
1301f9c78b2bSJens Axboe 		return ERR_PTR(-EINVAL);
1302f9c78b2bSJens Axboe 
1303b282cc76SAl Viro 	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1304f9c78b2bSJens Axboe 	if (!bio)
1305f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1306f9c78b2bSJens Axboe 
13070a0f1513SAl Viro 	while (iov_iter_count(iter)) {
1308629e42bcSAl Viro 		struct page **pages;
1309076098e5SAl Viro 		ssize_t bytes;
1310076098e5SAl Viro 		size_t offs, added = 0;
1311076098e5SAl Viro 		int npages;
1312f9c78b2bSJens Axboe 
13130a0f1513SAl Viro 		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1314076098e5SAl Viro 		if (unlikely(bytes <= 0)) {
1315076098e5SAl Viro 			ret = bytes ? bytes : -EFAULT;
1316f9c78b2bSJens Axboe 			goto out_unmap;
1317f9c78b2bSJens Axboe 		}
1318f9c78b2bSJens Axboe 
1319076098e5SAl Viro 		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1320076098e5SAl Viro 
132198f0bc99SAl Viro 		if (unlikely(offs & queue_dma_alignment(q))) {
132298f0bc99SAl Viro 			ret = -EINVAL;
132398f0bc99SAl Viro 			j = 0;
132498f0bc99SAl Viro 		} else {
1325629e42bcSAl Viro 			for (j = 0; j < npages; j++) {
132698f0bc99SAl Viro 				struct page *page = pages[j];
1327076098e5SAl Viro 				unsigned int n = PAGE_SIZE - offs;
132895d78c28SVitaly Mayatskikh 				unsigned short prev_bi_vcnt = bio->bi_vcnt;
1329f9c78b2bSJens Axboe 
1330076098e5SAl Viro 				if (n > bytes)
1331076098e5SAl Viro 					n = bytes;
1332f9c78b2bSJens Axboe 
133398f0bc99SAl Viro 				if (!bio_add_pc_page(q, bio, page, n, offs))
1334f9c78b2bSJens Axboe 					break;
1335f9c78b2bSJens Axboe 
133695d78c28SVitaly Mayatskikh 				/*
133795d78c28SVitaly Mayatskikh 				 * check if vector was merged with previous
133895d78c28SVitaly Mayatskikh 				 * drop page reference if needed
133995d78c28SVitaly Mayatskikh 				 */
134095d78c28SVitaly Mayatskikh 				if (bio->bi_vcnt == prev_bi_vcnt)
134198f0bc99SAl Viro 					put_page(page);
134295d78c28SVitaly Mayatskikh 
1343076098e5SAl Viro 				added += n;
1344076098e5SAl Viro 				bytes -= n;
1345076098e5SAl Viro 				offs = 0;
1346f9c78b2bSJens Axboe 			}
13470a0f1513SAl Viro 			iov_iter_advance(iter, added);
134898f0bc99SAl Viro 		}
1349f9c78b2bSJens Axboe 		/*
1350f9c78b2bSJens Axboe 		 * release the pages we didn't map into the bio, if any
1351f9c78b2bSJens Axboe 		 */
1352629e42bcSAl Viro 		while (j < npages)
135309cbfeafSKirill A. Shutemov 			put_page(pages[j++]);
1354629e42bcSAl Viro 		kvfree(pages);
1355e2e115d1SAl Viro 		/* couldn't stuff something into bio? */
1356e2e115d1SAl Viro 		if (bytes)
1357e2e115d1SAl Viro 			break;
1358f9c78b2bSJens Axboe 	}
1359f9c78b2bSJens Axboe 
1360b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_USER_MAPPED);
136137f19e57SChristoph Hellwig 
136237f19e57SChristoph Hellwig 	/*
13635fad1b64SBart Van Assche 	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
136437f19e57SChristoph Hellwig 	 * it would normally disappear when its bi_end_io is run.
136537f19e57SChristoph Hellwig 	 * however, we need it for the unmap, so grab an extra
136637f19e57SChristoph Hellwig 	 * reference to it
136737f19e57SChristoph Hellwig 	 */
136837f19e57SChristoph Hellwig 	bio_get(bio);
1369f9c78b2bSJens Axboe 	return bio;
1370f9c78b2bSJens Axboe 
1371f9c78b2bSJens Axboe  out_unmap:
13722b04e8f6SAl Viro 	bio_for_each_segment_all(bvec, bio, j) {
13732b04e8f6SAl Viro 		put_page(bvec->bv_page);
1374f9c78b2bSJens Axboe 	}
1375f9c78b2bSJens Axboe 	bio_put(bio);
1376f9c78b2bSJens Axboe 	return ERR_PTR(ret);
1377f9c78b2bSJens Axboe }
1378f9c78b2bSJens Axboe 
1379f9c78b2bSJens Axboe static void __bio_unmap_user(struct bio *bio)
1380f9c78b2bSJens Axboe {
1381f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1382f9c78b2bSJens Axboe 	int i;
1383f9c78b2bSJens Axboe 
1384f9c78b2bSJens Axboe 	/*
1385f9c78b2bSJens Axboe 	 * make sure we dirty pages we wrote to
1386f9c78b2bSJens Axboe 	 */
1387f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
1388f9c78b2bSJens Axboe 		if (bio_data_dir(bio) == READ)
1389f9c78b2bSJens Axboe 			set_page_dirty_lock(bvec->bv_page);
1390f9c78b2bSJens Axboe 
139109cbfeafSKirill A. Shutemov 		put_page(bvec->bv_page);
1392f9c78b2bSJens Axboe 	}
1393f9c78b2bSJens Axboe 
1394f9c78b2bSJens Axboe 	bio_put(bio);
1395f9c78b2bSJens Axboe }
1396f9c78b2bSJens Axboe 
1397f9c78b2bSJens Axboe /**
1398f9c78b2bSJens Axboe  *	bio_unmap_user	-	unmap a bio
1399f9c78b2bSJens Axboe  *	@bio:		the bio being unmapped
1400f9c78b2bSJens Axboe  *
14015fad1b64SBart Van Assche  *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
14025fad1b64SBart Van Assche  *	process context.
1403f9c78b2bSJens Axboe  *
1404f9c78b2bSJens Axboe  *	bio_unmap_user() may sleep.
1405f9c78b2bSJens Axboe  */
1406f9c78b2bSJens Axboe void bio_unmap_user(struct bio *bio)
1407f9c78b2bSJens Axboe {
1408f9c78b2bSJens Axboe 	__bio_unmap_user(bio);
1409f9c78b2bSJens Axboe 	bio_put(bio);
1410f9c78b2bSJens Axboe }
1411f9c78b2bSJens Axboe 
14124246a0b6SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio)
1413f9c78b2bSJens Axboe {
1414f9c78b2bSJens Axboe 	bio_put(bio);
1415f9c78b2bSJens Axboe }
1416f9c78b2bSJens Axboe 
141775c72b83SChristoph Hellwig /**
141875c72b83SChristoph Hellwig  *	bio_map_kern	-	map kernel address into bio
141975c72b83SChristoph Hellwig  *	@q: the struct request_queue for the bio
142075c72b83SChristoph Hellwig  *	@data: pointer to buffer to map
142175c72b83SChristoph Hellwig  *	@len: length in bytes
142275c72b83SChristoph Hellwig  *	@gfp_mask: allocation flags for bio allocation
142375c72b83SChristoph Hellwig  *
142475c72b83SChristoph Hellwig  *	Map the kernel address into a bio suitable for io to a block
142575c72b83SChristoph Hellwig  *	device. Returns an error pointer in case of error.
142675c72b83SChristoph Hellwig  */
142775c72b83SChristoph Hellwig struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
142875c72b83SChristoph Hellwig 			 gfp_t gfp_mask)
1429f9c78b2bSJens Axboe {
1430f9c78b2bSJens Axboe 	unsigned long kaddr = (unsigned long)data;
1431f9c78b2bSJens Axboe 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1432f9c78b2bSJens Axboe 	unsigned long start = kaddr >> PAGE_SHIFT;
1433f9c78b2bSJens Axboe 	const int nr_pages = end - start;
1434f9c78b2bSJens Axboe 	int offset, i;
1435f9c78b2bSJens Axboe 	struct bio *bio;
1436f9c78b2bSJens Axboe 
1437f9c78b2bSJens Axboe 	bio = bio_kmalloc(gfp_mask, nr_pages);
1438f9c78b2bSJens Axboe 	if (!bio)
1439f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1440f9c78b2bSJens Axboe 
1441f9c78b2bSJens Axboe 	offset = offset_in_page(kaddr);
1442f9c78b2bSJens Axboe 	for (i = 0; i < nr_pages; i++) {
1443f9c78b2bSJens Axboe 		unsigned int bytes = PAGE_SIZE - offset;
1444f9c78b2bSJens Axboe 
1445f9c78b2bSJens Axboe 		if (len <= 0)
1446f9c78b2bSJens Axboe 			break;
1447f9c78b2bSJens Axboe 
1448f9c78b2bSJens Axboe 		if (bytes > len)
1449f9c78b2bSJens Axboe 			bytes = len;
1450f9c78b2bSJens Axboe 
1451f9c78b2bSJens Axboe 		if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
145275c72b83SChristoph Hellwig 				    offset) < bytes) {
145375c72b83SChristoph Hellwig 			/* we don't support partial mappings */
145475c72b83SChristoph Hellwig 			bio_put(bio);
145575c72b83SChristoph Hellwig 			return ERR_PTR(-EINVAL);
145675c72b83SChristoph Hellwig 		}
1457f9c78b2bSJens Axboe 
1458f9c78b2bSJens Axboe 		data += bytes;
1459f9c78b2bSJens Axboe 		len -= bytes;
1460f9c78b2bSJens Axboe 		offset = 0;
1461f9c78b2bSJens Axboe 	}
1462f9c78b2bSJens Axboe 
1463f9c78b2bSJens Axboe 	bio->bi_end_io = bio_map_kern_endio;
1464f9c78b2bSJens Axboe 	return bio;
1465f9c78b2bSJens Axboe }
1466f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_map_kern);
1467f9c78b2bSJens Axboe 
14684246a0b6SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio)
1469f9c78b2bSJens Axboe {
14701dfa0f68SChristoph Hellwig 	bio_free_pages(bio);
14711dfa0f68SChristoph Hellwig 	bio_put(bio);
14721dfa0f68SChristoph Hellwig }
14731dfa0f68SChristoph Hellwig 
14744246a0b6SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio)
14751dfa0f68SChristoph Hellwig {
147642d2683aSChristoph Hellwig 	char *p = bio->bi_private;
14771dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
1478f9c78b2bSJens Axboe 	int i;
1479f9c78b2bSJens Axboe 
1480f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
14811dfa0f68SChristoph Hellwig 		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1482f9c78b2bSJens Axboe 		p += bvec->bv_len;
1483f9c78b2bSJens Axboe 	}
1484f9c78b2bSJens Axboe 
14854246a0b6SChristoph Hellwig 	bio_copy_kern_endio(bio);
1486f9c78b2bSJens Axboe }
1487f9c78b2bSJens Axboe 
1488f9c78b2bSJens Axboe /**
1489f9c78b2bSJens Axboe  *	bio_copy_kern	-	copy kernel address into bio
1490f9c78b2bSJens Axboe  *	@q: the struct request_queue for the bio
1491f9c78b2bSJens Axboe  *	@data: pointer to buffer to copy
1492f9c78b2bSJens Axboe  *	@len: length in bytes
1493f9c78b2bSJens Axboe  *	@gfp_mask: allocation flags for bio and page allocation
1494f9c78b2bSJens Axboe  *	@reading: data direction is READ
1495f9c78b2bSJens Axboe  *
1496f9c78b2bSJens Axboe  *	copy the kernel address into a bio suitable for io to a block
1497f9c78b2bSJens Axboe  *	device. Returns an error pointer in case of error.
1498f9c78b2bSJens Axboe  */
1499f9c78b2bSJens Axboe struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1500f9c78b2bSJens Axboe 			  gfp_t gfp_mask, int reading)
1501f9c78b2bSJens Axboe {
150242d2683aSChristoph Hellwig 	unsigned long kaddr = (unsigned long)data;
150342d2683aSChristoph Hellwig 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
150442d2683aSChristoph Hellwig 	unsigned long start = kaddr >> PAGE_SHIFT;
150542d2683aSChristoph Hellwig 	struct bio *bio;
1506f9c78b2bSJens Axboe 	void *p = data;
15071dfa0f68SChristoph Hellwig 	int nr_pages = 0;
1508f9c78b2bSJens Axboe 
150942d2683aSChristoph Hellwig 	/*
151042d2683aSChristoph Hellwig 	 * Overflow, abort
151142d2683aSChristoph Hellwig 	 */
151242d2683aSChristoph Hellwig 	if (end < start)
151342d2683aSChristoph Hellwig 		return ERR_PTR(-EINVAL);
1514f9c78b2bSJens Axboe 
151542d2683aSChristoph Hellwig 	nr_pages = end - start;
151642d2683aSChristoph Hellwig 	bio = bio_kmalloc(gfp_mask, nr_pages);
151742d2683aSChristoph Hellwig 	if (!bio)
151842d2683aSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
151942d2683aSChristoph Hellwig 
152042d2683aSChristoph Hellwig 	while (len) {
152142d2683aSChristoph Hellwig 		struct page *page;
152242d2683aSChristoph Hellwig 		unsigned int bytes = PAGE_SIZE;
152342d2683aSChristoph Hellwig 
152442d2683aSChristoph Hellwig 		if (bytes > len)
152542d2683aSChristoph Hellwig 			bytes = len;
152642d2683aSChristoph Hellwig 
152742d2683aSChristoph Hellwig 		page = alloc_page(q->bounce_gfp | gfp_mask);
152842d2683aSChristoph Hellwig 		if (!page)
152942d2683aSChristoph Hellwig 			goto cleanup;
153042d2683aSChristoph Hellwig 
153142d2683aSChristoph Hellwig 		if (!reading)
153242d2683aSChristoph Hellwig 			memcpy(page_address(page), p, bytes);
153342d2683aSChristoph Hellwig 
153442d2683aSChristoph Hellwig 		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
153542d2683aSChristoph Hellwig 			break;
153642d2683aSChristoph Hellwig 
153742d2683aSChristoph Hellwig 		len -= bytes;
153842d2683aSChristoph Hellwig 		p += bytes;
1539f9c78b2bSJens Axboe 	}
1540f9c78b2bSJens Axboe 
15411dfa0f68SChristoph Hellwig 	if (reading) {
15421dfa0f68SChristoph Hellwig 		bio->bi_end_io = bio_copy_kern_endio_read;
154342d2683aSChristoph Hellwig 		bio->bi_private = data;
15441dfa0f68SChristoph Hellwig 	} else {
1545f9c78b2bSJens Axboe 		bio->bi_end_io = bio_copy_kern_endio;
15461dfa0f68SChristoph Hellwig 	}
15471dfa0f68SChristoph Hellwig 
1548f9c78b2bSJens Axboe 	return bio;
154942d2683aSChristoph Hellwig 
155042d2683aSChristoph Hellwig cleanup:
15511dfa0f68SChristoph Hellwig 	bio_free_pages(bio);
155242d2683aSChristoph Hellwig 	bio_put(bio);
155342d2683aSChristoph Hellwig 	return ERR_PTR(-ENOMEM);
1554f9c78b2bSJens Axboe }
1555f9c78b2bSJens Axboe 
1556f9c78b2bSJens Axboe /*
1557f9c78b2bSJens Axboe  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1558f9c78b2bSJens Axboe  * for performing direct-IO in BIOs.
1559f9c78b2bSJens Axboe  *
1560f9c78b2bSJens Axboe  * The problem is that we cannot run set_page_dirty() from interrupt context
1561f9c78b2bSJens Axboe  * because the required locks are not interrupt-safe.  So what we can do is to
1562f9c78b2bSJens Axboe  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1563f9c78b2bSJens Axboe  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1564f9c78b2bSJens Axboe  * in process context.
1565f9c78b2bSJens Axboe  *
1566f9c78b2bSJens Axboe  * We special-case compound pages here: normally this means reads into hugetlb
1567f9c78b2bSJens Axboe  * pages.  The logic in here doesn't really work right for compound pages
1568f9c78b2bSJens Axboe  * because the VM does not uniformly chase down the head page in all cases.
1569f9c78b2bSJens Axboe  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1570f9c78b2bSJens Axboe  * handle them at all.  So we skip compound pages here at an early stage.
1571f9c78b2bSJens Axboe  *
1572f9c78b2bSJens Axboe  * Note that this code is very hard to test under normal circumstances because
1573f9c78b2bSJens Axboe  * direct-io pins the pages with get_user_pages().  This makes
1574f9c78b2bSJens Axboe  * is_page_cache_freeable return false, and the VM will not clean the pages.
1575f9c78b2bSJens Axboe  * But other code (eg, flusher threads) could clean the pages if they are mapped
1576f9c78b2bSJens Axboe  * pagecache.
1577f9c78b2bSJens Axboe  *
1578f9c78b2bSJens Axboe  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1579f9c78b2bSJens Axboe  * deferred bio dirtying paths.
1580f9c78b2bSJens Axboe  */
1581f9c78b2bSJens Axboe 
1582f9c78b2bSJens Axboe /*
1583f9c78b2bSJens Axboe  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1584f9c78b2bSJens Axboe  */
1585f9c78b2bSJens Axboe void bio_set_pages_dirty(struct bio *bio)
1586f9c78b2bSJens Axboe {
1587f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1588f9c78b2bSJens Axboe 	int i;
1589f9c78b2bSJens Axboe 
1590f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
15913bb50983SChristoph Hellwig 		if (!PageCompound(bvec->bv_page))
15923bb50983SChristoph Hellwig 			set_page_dirty_lock(bvec->bv_page);
1593f9c78b2bSJens Axboe 	}
1594f9c78b2bSJens Axboe }
15951900fcc4SKent Overstreet EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1596f9c78b2bSJens Axboe 
1597f9c78b2bSJens Axboe static void bio_release_pages(struct bio *bio)
1598f9c78b2bSJens Axboe {
1599f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1600f9c78b2bSJens Axboe 	int i;
1601f9c78b2bSJens Axboe 
160224d5493fSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, i)
160324d5493fSChristoph Hellwig 		put_page(bvec->bv_page);
1604f9c78b2bSJens Axboe }
1605f9c78b2bSJens Axboe 
1606f9c78b2bSJens Axboe /*
1607f9c78b2bSJens Axboe  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1608f9c78b2bSJens Axboe  * If they are, then fine.  If, however, some pages are clean then they must
1609f9c78b2bSJens Axboe  * have been written out during the direct-IO read.  So we take another ref on
161024d5493fSChristoph Hellwig  * the BIO and re-dirty the pages in process context.
1611f9c78b2bSJens Axboe  *
1612f9c78b2bSJens Axboe  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1613ea1754a0SKirill A. Shutemov  * here on.  It will run one put_page() against each page and will run one
1614ea1754a0SKirill A. Shutemov  * bio_put() against the BIO.
1615f9c78b2bSJens Axboe  */
1616f9c78b2bSJens Axboe 
1617f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work);
1618f9c78b2bSJens Axboe 
1619f9c78b2bSJens Axboe static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1620f9c78b2bSJens Axboe static DEFINE_SPINLOCK(bio_dirty_lock);
1621f9c78b2bSJens Axboe static struct bio *bio_dirty_list;
1622f9c78b2bSJens Axboe 
1623f9c78b2bSJens Axboe /*
1624f9c78b2bSJens Axboe  * This runs in process context
1625f9c78b2bSJens Axboe  */
1626f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work)
1627f9c78b2bSJens Axboe {
162824d5493fSChristoph Hellwig 	struct bio *bio, *next;
1629f9c78b2bSJens Axboe 
163024d5493fSChristoph Hellwig 	spin_lock_irq(&bio_dirty_lock);
163124d5493fSChristoph Hellwig 	next = bio_dirty_list;
1632f9c78b2bSJens Axboe 	bio_dirty_list = NULL;
163324d5493fSChristoph Hellwig 	spin_unlock_irq(&bio_dirty_lock);
1634f9c78b2bSJens Axboe 
163524d5493fSChristoph Hellwig 	while ((bio = next) != NULL) {
163624d5493fSChristoph Hellwig 		next = bio->bi_private;
1637f9c78b2bSJens Axboe 
1638f9c78b2bSJens Axboe 		bio_set_pages_dirty(bio);
1639f9c78b2bSJens Axboe 		bio_release_pages(bio);
1640f9c78b2bSJens Axboe 		bio_put(bio);
1641f9c78b2bSJens Axboe 	}
1642f9c78b2bSJens Axboe }
1643f9c78b2bSJens Axboe 
1644f9c78b2bSJens Axboe void bio_check_pages_dirty(struct bio *bio)
1645f9c78b2bSJens Axboe {
1646f9c78b2bSJens Axboe 	struct bio_vec *bvec;
164724d5493fSChristoph Hellwig 	unsigned long flags;
1648f9c78b2bSJens Axboe 	int i;
1649f9c78b2bSJens Axboe 
1650f9c78b2bSJens Axboe 	bio_for_each_segment_all(bvec, bio, i) {
165124d5493fSChristoph Hellwig 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
165224d5493fSChristoph Hellwig 			goto defer;
1653f9c78b2bSJens Axboe 	}
1654f9c78b2bSJens Axboe 
165524d5493fSChristoph Hellwig 	bio_release_pages(bio);
165624d5493fSChristoph Hellwig 	bio_put(bio);
165724d5493fSChristoph Hellwig 	return;
165824d5493fSChristoph Hellwig defer:
1659f9c78b2bSJens Axboe 	spin_lock_irqsave(&bio_dirty_lock, flags);
1660f9c78b2bSJens Axboe 	bio->bi_private = bio_dirty_list;
1661f9c78b2bSJens Axboe 	bio_dirty_list = bio;
1662f9c78b2bSJens Axboe 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1663f9c78b2bSJens Axboe 	schedule_work(&bio_dirty_work);
1664f9c78b2bSJens Axboe }
16651900fcc4SKent Overstreet EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1666f9c78b2bSJens Axboe 
1667ddcf35d3SMichael Callahan void generic_start_io_acct(struct request_queue *q, int op,
1668d62e26b3SJens Axboe 			   unsigned long sectors, struct hd_struct *part)
1669394ffa50SGu Zheng {
1670ddcf35d3SMichael Callahan 	const int sgrp = op_stat_group(op);
1671394ffa50SGu Zheng 	int cpu = part_stat_lock();
1672394ffa50SGu Zheng 
1673d62e26b3SJens Axboe 	part_round_stats(q, cpu, part);
1674ddcf35d3SMichael Callahan 	part_stat_inc(cpu, part, ios[sgrp]);
1675ddcf35d3SMichael Callahan 	part_stat_add(cpu, part, sectors[sgrp], sectors);
1676ddcf35d3SMichael Callahan 	part_inc_in_flight(q, part, op_is_write(op));
1677394ffa50SGu Zheng 
1678394ffa50SGu Zheng 	part_stat_unlock();
1679394ffa50SGu Zheng }
1680394ffa50SGu Zheng EXPORT_SYMBOL(generic_start_io_acct);
1681394ffa50SGu Zheng 
1682ddcf35d3SMichael Callahan void generic_end_io_acct(struct request_queue *q, int req_op,
1683d62e26b3SJens Axboe 			 struct hd_struct *part, unsigned long start_time)
1684394ffa50SGu Zheng {
1685394ffa50SGu Zheng 	unsigned long duration = jiffies - start_time;
1686ddcf35d3SMichael Callahan 	const int sgrp = op_stat_group(req_op);
1687394ffa50SGu Zheng 	int cpu = part_stat_lock();
1688394ffa50SGu Zheng 
1689b57e99b4SOmar Sandoval 	part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
1690d62e26b3SJens Axboe 	part_round_stats(q, cpu, part);
1691ddcf35d3SMichael Callahan 	part_dec_in_flight(q, part, op_is_write(req_op));
1692394ffa50SGu Zheng 
1693394ffa50SGu Zheng 	part_stat_unlock();
1694394ffa50SGu Zheng }
1695394ffa50SGu Zheng EXPORT_SYMBOL(generic_end_io_acct);
1696394ffa50SGu Zheng 
1697f9c78b2bSJens Axboe #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1698f9c78b2bSJens Axboe void bio_flush_dcache_pages(struct bio *bi)
1699f9c78b2bSJens Axboe {
1700f9c78b2bSJens Axboe 	struct bio_vec bvec;
1701f9c78b2bSJens Axboe 	struct bvec_iter iter;
1702f9c78b2bSJens Axboe 
1703f9c78b2bSJens Axboe 	bio_for_each_segment(bvec, bi, iter)
1704f9c78b2bSJens Axboe 		flush_dcache_page(bvec.bv_page);
1705f9c78b2bSJens Axboe }
1706f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_flush_dcache_pages);
1707f9c78b2bSJens Axboe #endif
1708f9c78b2bSJens Axboe 
1709c4cf5261SJens Axboe static inline bool bio_remaining_done(struct bio *bio)
1710c4cf5261SJens Axboe {
1711c4cf5261SJens Axboe 	/*
1712c4cf5261SJens Axboe 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1713c4cf5261SJens Axboe 	 * we always end io on the first invocation.
1714c4cf5261SJens Axboe 	 */
1715c4cf5261SJens Axboe 	if (!bio_flagged(bio, BIO_CHAIN))
1716c4cf5261SJens Axboe 		return true;
1717c4cf5261SJens Axboe 
1718c4cf5261SJens Axboe 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1719c4cf5261SJens Axboe 
1720326e1dbbSMike Snitzer 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1721b7c44ed9SJens Axboe 		bio_clear_flag(bio, BIO_CHAIN);
1722c4cf5261SJens Axboe 		return true;
1723326e1dbbSMike Snitzer 	}
1724c4cf5261SJens Axboe 
1725c4cf5261SJens Axboe 	return false;
1726c4cf5261SJens Axboe }
1727c4cf5261SJens Axboe 
1728f9c78b2bSJens Axboe /**
1729f9c78b2bSJens Axboe  * bio_endio - end I/O on a bio
1730f9c78b2bSJens Axboe  * @bio:	bio
1731f9c78b2bSJens Axboe  *
1732f9c78b2bSJens Axboe  * Description:
17334246a0b6SChristoph Hellwig  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
17344246a0b6SChristoph Hellwig  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
17354246a0b6SChristoph Hellwig  *   bio unless they own it and thus know that it has an end_io function.
1736fbbaf700SNeilBrown  *
1737fbbaf700SNeilBrown  *   bio_endio() can be called several times on a bio that has been chained
1738fbbaf700SNeilBrown  *   using bio_chain().  The ->bi_end_io() function will only be called the
1739fbbaf700SNeilBrown  *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1740fbbaf700SNeilBrown  *   generated if BIO_TRACE_COMPLETION is set.
1741f9c78b2bSJens Axboe  **/
17424246a0b6SChristoph Hellwig void bio_endio(struct bio *bio)
1743f9c78b2bSJens Axboe {
1744ba8c6967SChristoph Hellwig again:
17452b885517SChristoph Hellwig 	if (!bio_remaining_done(bio))
1746ba8c6967SChristoph Hellwig 		return;
17477c20f116SChristoph Hellwig 	if (!bio_integrity_endio(bio))
17487c20f116SChristoph Hellwig 		return;
1749f9c78b2bSJens Axboe 
175067b42d0bSJosef Bacik 	if (bio->bi_disk)
175167b42d0bSJosef Bacik 		rq_qos_done_bio(bio->bi_disk->queue, bio);
175267b42d0bSJosef Bacik 
1753f9c78b2bSJens Axboe 	/*
1754ba8c6967SChristoph Hellwig 	 * Need to have a real endio function for chained bios, otherwise
1755ba8c6967SChristoph Hellwig 	 * various corner cases will break (like stacking block devices that
1756ba8c6967SChristoph Hellwig 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1757ba8c6967SChristoph Hellwig 	 * recursion and blowing the stack. Tail call optimization would
1758ba8c6967SChristoph Hellwig 	 * handle this, but compiling with frame pointers also disables
1759ba8c6967SChristoph Hellwig 	 * gcc's sibling call optimization.
1760f9c78b2bSJens Axboe 	 */
1761f9c78b2bSJens Axboe 	if (bio->bi_end_io == bio_chain_endio) {
176238f8baaeSChristoph Hellwig 		bio = __bio_chain_endio(bio);
1763ba8c6967SChristoph Hellwig 		goto again;
1764ba8c6967SChristoph Hellwig 	}
1765ba8c6967SChristoph Hellwig 
176674d46992SChristoph Hellwig 	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
176774d46992SChristoph Hellwig 		trace_block_bio_complete(bio->bi_disk->queue, bio,
1768a462b950SBart Van Assche 					 blk_status_to_errno(bio->bi_status));
1769fbbaf700SNeilBrown 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1770fbbaf700SNeilBrown 	}
1771fbbaf700SNeilBrown 
17729e234eeaSShaohua Li 	blk_throtl_bio_endio(bio);
1773b222dd2fSShaohua Li 	/* release cgroup info */
1774b222dd2fSShaohua Li 	bio_uninit(bio);
1775f9c78b2bSJens Axboe 	if (bio->bi_end_io)
17764246a0b6SChristoph Hellwig 		bio->bi_end_io(bio);
1777f9c78b2bSJens Axboe }
1778f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_endio);
1779f9c78b2bSJens Axboe 
1780f9c78b2bSJens Axboe /**
1781f9c78b2bSJens Axboe  * bio_split - split a bio
1782f9c78b2bSJens Axboe  * @bio:	bio to split
1783f9c78b2bSJens Axboe  * @sectors:	number of sectors to split from the front of @bio
1784f9c78b2bSJens Axboe  * @gfp:	gfp mask
1785f9c78b2bSJens Axboe  * @bs:		bio set to allocate from
1786f9c78b2bSJens Axboe  *
1787f9c78b2bSJens Axboe  * Allocates and returns a new bio which represents @sectors from the start of
1788f9c78b2bSJens Axboe  * @bio, and updates @bio to represent the remaining sectors.
1789f9c78b2bSJens Axboe  *
1790f3f5da62SMartin K. Petersen  * Unless this is a discard request the newly allocated bio will point
1791f3f5da62SMartin K. Petersen  * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1792f3f5da62SMartin K. Petersen  * @bio is not freed before the split.
1793f9c78b2bSJens Axboe  */
1794f9c78b2bSJens Axboe struct bio *bio_split(struct bio *bio, int sectors,
1795f9c78b2bSJens Axboe 		      gfp_t gfp, struct bio_set *bs)
1796f9c78b2bSJens Axboe {
1797f341a4d3SMikulas Patocka 	struct bio *split;
1798f9c78b2bSJens Axboe 
1799f9c78b2bSJens Axboe 	BUG_ON(sectors <= 0);
1800f9c78b2bSJens Axboe 	BUG_ON(sectors >= bio_sectors(bio));
1801f9c78b2bSJens Axboe 
1802f9c78b2bSJens Axboe 	split = bio_clone_fast(bio, gfp, bs);
1803f9c78b2bSJens Axboe 	if (!split)
1804f9c78b2bSJens Axboe 		return NULL;
1805f9c78b2bSJens Axboe 
1806f9c78b2bSJens Axboe 	split->bi_iter.bi_size = sectors << 9;
1807f9c78b2bSJens Axboe 
1808f9c78b2bSJens Axboe 	if (bio_integrity(split))
1809fbd08e76SDmitry Monakhov 		bio_integrity_trim(split);
1810f9c78b2bSJens Axboe 
1811f9c78b2bSJens Axboe 	bio_advance(bio, split->bi_iter.bi_size);
1812f9c78b2bSJens Axboe 
1813fbbaf700SNeilBrown 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
181420d59023SGoldwyn Rodrigues 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1815fbbaf700SNeilBrown 
1816f9c78b2bSJens Axboe 	return split;
1817f9c78b2bSJens Axboe }
1818f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_split);
1819f9c78b2bSJens Axboe 
1820f9c78b2bSJens Axboe /**
1821f9c78b2bSJens Axboe  * bio_trim - trim a bio
1822f9c78b2bSJens Axboe  * @bio:	bio to trim
1823f9c78b2bSJens Axboe  * @offset:	number of sectors to trim from the front of @bio
1824f9c78b2bSJens Axboe  * @size:	size we want to trim @bio to, in sectors
1825f9c78b2bSJens Axboe  */
1826f9c78b2bSJens Axboe void bio_trim(struct bio *bio, int offset, int size)
1827f9c78b2bSJens Axboe {
1828f9c78b2bSJens Axboe 	/* 'bio' is a cloned bio which we need to trim to match
1829f9c78b2bSJens Axboe 	 * the given offset and size.
1830f9c78b2bSJens Axboe 	 */
1831f9c78b2bSJens Axboe 
1832f9c78b2bSJens Axboe 	size <<= 9;
1833f9c78b2bSJens Axboe 	if (offset == 0 && size == bio->bi_iter.bi_size)
1834f9c78b2bSJens Axboe 		return;
1835f9c78b2bSJens Axboe 
1836b7c44ed9SJens Axboe 	bio_clear_flag(bio, BIO_SEG_VALID);
1837f9c78b2bSJens Axboe 
1838f9c78b2bSJens Axboe 	bio_advance(bio, offset << 9);
1839f9c78b2bSJens Axboe 
1840f9c78b2bSJens Axboe 	bio->bi_iter.bi_size = size;
1841376a78abSDmitry Monakhov 
1842376a78abSDmitry Monakhov 	if (bio_integrity(bio))
1843fbd08e76SDmitry Monakhov 		bio_integrity_trim(bio);
1844376a78abSDmitry Monakhov 
1845f9c78b2bSJens Axboe }
1846f9c78b2bSJens Axboe EXPORT_SYMBOL_GPL(bio_trim);
1847f9c78b2bSJens Axboe 
1848f9c78b2bSJens Axboe /*
1849f9c78b2bSJens Axboe  * create memory pools for biovec's in a bio_set.
1850f9c78b2bSJens Axboe  * use the global biovec slabs created for general use.
1851f9c78b2bSJens Axboe  */
18528aa6ba2fSKent Overstreet int biovec_init_pool(mempool_t *pool, int pool_entries)
1853f9c78b2bSJens Axboe {
1854ed996a52SChristoph Hellwig 	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1855f9c78b2bSJens Axboe 
18568aa6ba2fSKent Overstreet 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1857f9c78b2bSJens Axboe }
1858f9c78b2bSJens Axboe 
1859917a38c7SKent Overstreet /*
1860917a38c7SKent Overstreet  * bioset_exit - exit a bioset initialized with bioset_init()
1861917a38c7SKent Overstreet  *
1862917a38c7SKent Overstreet  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1863917a38c7SKent Overstreet  * kzalloc()).
1864917a38c7SKent Overstreet  */
1865917a38c7SKent Overstreet void bioset_exit(struct bio_set *bs)
1866f9c78b2bSJens Axboe {
1867f9c78b2bSJens Axboe 	if (bs->rescue_workqueue)
1868f9c78b2bSJens Axboe 		destroy_workqueue(bs->rescue_workqueue);
1869917a38c7SKent Overstreet 	bs->rescue_workqueue = NULL;
1870f9c78b2bSJens Axboe 
18718aa6ba2fSKent Overstreet 	mempool_exit(&bs->bio_pool);
18728aa6ba2fSKent Overstreet 	mempool_exit(&bs->bvec_pool);
1873f9c78b2bSJens Axboe 
1874f9c78b2bSJens Axboe 	bioset_integrity_free(bs);
1875917a38c7SKent Overstreet 	if (bs->bio_slab)
1876f9c78b2bSJens Axboe 		bio_put_slab(bs);
1877917a38c7SKent Overstreet 	bs->bio_slab = NULL;
1878917a38c7SKent Overstreet }
1879917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_exit);
1880f9c78b2bSJens Axboe 
1881011067b0SNeilBrown /**
1882917a38c7SKent Overstreet  * bioset_init - Initialize a bio_set
1883dad08527SKent Overstreet  * @bs:		pool to initialize
1884917a38c7SKent Overstreet  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1885917a38c7SKent Overstreet  * @front_pad:	Number of bytes to allocate in front of the returned bio
1886917a38c7SKent Overstreet  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1887917a38c7SKent Overstreet  *              and %BIOSET_NEED_RESCUER
1888917a38c7SKent Overstreet  *
1889dad08527SKent Overstreet  * Description:
1890dad08527SKent Overstreet  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1891dad08527SKent Overstreet  *    to ask for a number of bytes to be allocated in front of the bio.
1892dad08527SKent Overstreet  *    Front pad allocation is useful for embedding the bio inside
1893dad08527SKent Overstreet  *    another structure, to avoid allocating extra data to go with the bio.
1894dad08527SKent Overstreet  *    Note that the bio must be embedded at the END of that structure always,
1895dad08527SKent Overstreet  *    or things will break badly.
1896dad08527SKent Overstreet  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1897dad08527SKent Overstreet  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1898dad08527SKent Overstreet  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1899dad08527SKent Overstreet  *    dispatch queued requests when the mempool runs out of space.
1900dad08527SKent Overstreet  *
1901917a38c7SKent Overstreet  */
1902917a38c7SKent Overstreet int bioset_init(struct bio_set *bs,
1903917a38c7SKent Overstreet 		unsigned int pool_size,
1904917a38c7SKent Overstreet 		unsigned int front_pad,
1905917a38c7SKent Overstreet 		int flags)
1906917a38c7SKent Overstreet {
1907917a38c7SKent Overstreet 	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1908917a38c7SKent Overstreet 
1909917a38c7SKent Overstreet 	bs->front_pad = front_pad;
1910917a38c7SKent Overstreet 
1911917a38c7SKent Overstreet 	spin_lock_init(&bs->rescue_lock);
1912917a38c7SKent Overstreet 	bio_list_init(&bs->rescue_list);
1913917a38c7SKent Overstreet 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1914917a38c7SKent Overstreet 
1915917a38c7SKent Overstreet 	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1916917a38c7SKent Overstreet 	if (!bs->bio_slab)
1917917a38c7SKent Overstreet 		return -ENOMEM;
1918917a38c7SKent Overstreet 
1919917a38c7SKent Overstreet 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1920917a38c7SKent Overstreet 		goto bad;
1921917a38c7SKent Overstreet 
1922917a38c7SKent Overstreet 	if ((flags & BIOSET_NEED_BVECS) &&
1923917a38c7SKent Overstreet 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1924917a38c7SKent Overstreet 		goto bad;
1925917a38c7SKent Overstreet 
1926917a38c7SKent Overstreet 	if (!(flags & BIOSET_NEED_RESCUER))
1927917a38c7SKent Overstreet 		return 0;
1928917a38c7SKent Overstreet 
1929917a38c7SKent Overstreet 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1930917a38c7SKent Overstreet 	if (!bs->rescue_workqueue)
1931917a38c7SKent Overstreet 		goto bad;
1932917a38c7SKent Overstreet 
1933917a38c7SKent Overstreet 	return 0;
1934917a38c7SKent Overstreet bad:
1935917a38c7SKent Overstreet 	bioset_exit(bs);
1936917a38c7SKent Overstreet 	return -ENOMEM;
1937917a38c7SKent Overstreet }
1938917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_init);
1939917a38c7SKent Overstreet 
194028e89fd9SJens Axboe /*
194128e89fd9SJens Axboe  * Initialize and setup a new bio_set, based on the settings from
194228e89fd9SJens Axboe  * another bio_set.
194328e89fd9SJens Axboe  */
194428e89fd9SJens Axboe int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
194528e89fd9SJens Axboe {
194628e89fd9SJens Axboe 	int flags;
194728e89fd9SJens Axboe 
194828e89fd9SJens Axboe 	flags = 0;
194928e89fd9SJens Axboe 	if (src->bvec_pool.min_nr)
195028e89fd9SJens Axboe 		flags |= BIOSET_NEED_BVECS;
195128e89fd9SJens Axboe 	if (src->rescue_workqueue)
195228e89fd9SJens Axboe 		flags |= BIOSET_NEED_RESCUER;
195328e89fd9SJens Axboe 
195428e89fd9SJens Axboe 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
195528e89fd9SJens Axboe }
195628e89fd9SJens Axboe EXPORT_SYMBOL(bioset_init_from_src);
195728e89fd9SJens Axboe 
1958f9c78b2bSJens Axboe #ifdef CONFIG_BLK_CGROUP
19591d933cf0STejun Heo 
1960b5f2954dSDennis Zhou /**
1961b5f2954dSDennis Zhou  * bio_associate_blkcg - associate a bio with the specified blkcg
1962b5f2954dSDennis Zhou  * @bio: target bio
1963b5f2954dSDennis Zhou  * @blkcg_css: css of the blkcg to associate
1964b5f2954dSDennis Zhou  *
1965b5f2954dSDennis Zhou  * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
1966b5f2954dSDennis Zhou  * treat @bio as if it were issued by a task which belongs to the blkcg.
1967b5f2954dSDennis Zhou  *
1968b5f2954dSDennis Zhou  * This function takes an extra reference of @blkcg_css which will be put
1969b5f2954dSDennis Zhou  * when @bio is released.  The caller must own @bio and is responsible for
19700fe061b9SDennis Zhou  * synchronizing calls to this function.  If @blkcg_css is %NULL, a call to
19710fe061b9SDennis Zhou  * blkcg_get_css() finds the current css from the kthread or task.
1972b5f2954dSDennis Zhou  */
1973b5f2954dSDennis Zhou int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1974b5f2954dSDennis Zhou {
1975b5f2954dSDennis Zhou 	if (unlikely(bio->bi_css))
1976b5f2954dSDennis Zhou 		return -EBUSY;
19770fe061b9SDennis Zhou 
19780fe061b9SDennis Zhou 	if (blkcg_css)
1979b5f2954dSDennis Zhou 		css_get(blkcg_css);
19800fe061b9SDennis Zhou 	else
19810fe061b9SDennis Zhou 		blkcg_css = blkcg_get_css();
19820fe061b9SDennis Zhou 
1983b5f2954dSDennis Zhou 	bio->bi_css = blkcg_css;
1984b5f2954dSDennis Zhou 	return 0;
1985b5f2954dSDennis Zhou }
1986b5f2954dSDennis Zhou EXPORT_SYMBOL_GPL(bio_associate_blkcg);
1987b5f2954dSDennis Zhou 
1988b5f2954dSDennis Zhou /**
19892268c0feSDennis Zhou  * bio_disassociate_blkg - puts back the blkg reference if associated
19902268c0feSDennis Zhou  * @bio: target bio
19912268c0feSDennis Zhou  *
19922268c0feSDennis Zhou  * Helper to disassociate the blkg from @bio if a blkg is associated.
19932268c0feSDennis Zhou  */
19942268c0feSDennis Zhou void bio_disassociate_blkg(struct bio *bio)
19952268c0feSDennis Zhou {
19962268c0feSDennis Zhou 	if (bio->bi_blkg) {
19972268c0feSDennis Zhou 		blkg_put(bio->bi_blkg);
19982268c0feSDennis Zhou 		bio->bi_blkg = NULL;
19992268c0feSDennis Zhou 	}
20002268c0feSDennis Zhou }
2001892ad71fSDennis Zhou EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
20022268c0feSDennis Zhou 
20032268c0feSDennis Zhou /**
20042268c0feSDennis Zhou  * __bio_associate_blkg - associate a bio with the a blkg
200508e18eabSJosef Bacik  * @bio: target bio
200608e18eabSJosef Bacik  * @blkg: the blkg to associate
200708e18eabSJosef Bacik  *
2008beea9da0SDennis Zhou  * This tries to associate @bio with the specified @blkg.  Association failure
2009beea9da0SDennis Zhou  * is handled by walking up the blkg tree.  Therefore, the blkg associated can
2010beea9da0SDennis Zhou  * be anything between @blkg and the root_blkg.  This situation only happens
2011beea9da0SDennis Zhou  * when a cgroup is dying and then the remaining bios will spill to the closest
2012beea9da0SDennis Zhou  * alive blkg.
2013beea9da0SDennis Zhou  *
2014beea9da0SDennis Zhou  * A reference will be taken on the @blkg and will be released when @bio is
2015beea9da0SDennis Zhou  * freed.
201608e18eabSJosef Bacik  */
20172268c0feSDennis Zhou static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
201808e18eabSJosef Bacik {
20192268c0feSDennis Zhou 	bio_disassociate_blkg(bio);
20202268c0feSDennis Zhou 
2021beea9da0SDennis Zhou 	bio->bi_blkg = blkg_try_get_closest(blkg);
20222268c0feSDennis Zhou }
20232268c0feSDennis Zhou 
20246a7f6d86SDennis Zhou static void __bio_associate_blkg_from_css(struct bio *bio,
20256a7f6d86SDennis Zhou 					  struct cgroup_subsys_state *css)
20266a7f6d86SDennis Zhou {
20276a7f6d86SDennis Zhou 	struct blkcg_gq *blkg;
20286a7f6d86SDennis Zhou 
20296a7f6d86SDennis Zhou 	rcu_read_lock();
20306a7f6d86SDennis Zhou 
20316a7f6d86SDennis Zhou 	blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue);
20326a7f6d86SDennis Zhou 	__bio_associate_blkg(bio, blkg);
20336a7f6d86SDennis Zhou 
20346a7f6d86SDennis Zhou 	rcu_read_unlock();
20356a7f6d86SDennis Zhou }
20366a7f6d86SDennis Zhou 
2037fd42df30SDennis Zhou /**
2038fd42df30SDennis Zhou  * bio_associate_blkg_from_css - associate a bio with a specified css
2039fd42df30SDennis Zhou  * @bio: target bio
2040fd42df30SDennis Zhou  * @css: target css
2041fd42df30SDennis Zhou  *
2042fd42df30SDennis Zhou  * Associate @bio with the blkg found by combining the css's blkg and the
2043fd42df30SDennis Zhou  * request_queue of the @bio.  This takes a reference on the css that will
2044fd42df30SDennis Zhou  * be put upon freeing of @bio.
2045fd42df30SDennis Zhou  */
2046fd42df30SDennis Zhou void bio_associate_blkg_from_css(struct bio *bio,
2047fd42df30SDennis Zhou 				 struct cgroup_subsys_state *css)
2048fd42df30SDennis Zhou {
2049fd42df30SDennis Zhou 	css_get(css);
2050fd42df30SDennis Zhou 	bio->bi_css = css;
2051fd42df30SDennis Zhou 	__bio_associate_blkg_from_css(bio, css);
2052fd42df30SDennis Zhou }
2053fd42df30SDennis Zhou EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
2054fd42df30SDennis Zhou 
20556a7f6d86SDennis Zhou #ifdef CONFIG_MEMCG
20566a7f6d86SDennis Zhou /**
20576a7f6d86SDennis Zhou  * bio_associate_blkg_from_page - associate a bio with the page's blkg
20586a7f6d86SDennis Zhou  * @bio: target bio
20596a7f6d86SDennis Zhou  * @page: the page to lookup the blkcg from
20606a7f6d86SDennis Zhou  *
20616a7f6d86SDennis Zhou  * Associate @bio with the blkg from @page's owning memcg and the respective
20626a7f6d86SDennis Zhou  * request_queue.  This works like every other associate function wrt
20636a7f6d86SDennis Zhou  * references.
20646a7f6d86SDennis Zhou  */
20656a7f6d86SDennis Zhou void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
20666a7f6d86SDennis Zhou {
20676a7f6d86SDennis Zhou 	struct cgroup_subsys_state *css;
20686a7f6d86SDennis Zhou 
20696a7f6d86SDennis Zhou 	if (unlikely(bio->bi_css))
20706a7f6d86SDennis Zhou 		return;
20716a7f6d86SDennis Zhou 	if (!page->mem_cgroup)
20726a7f6d86SDennis Zhou 		return;
20736a7f6d86SDennis Zhou 
20746a7f6d86SDennis Zhou 	css = cgroup_get_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
20756a7f6d86SDennis Zhou 	bio->bi_css = css;
20766a7f6d86SDennis Zhou 	__bio_associate_blkg_from_css(bio, css);
20776a7f6d86SDennis Zhou }
20786a7f6d86SDennis Zhou #endif /* CONFIG_MEMCG */
20796a7f6d86SDennis Zhou 
20802268c0feSDennis Zhou /**
20812268c0feSDennis Zhou  * bio_associate_blkg - associate a bio with a blkg
20822268c0feSDennis Zhou  * @bio: target bio
20832268c0feSDennis Zhou  *
20842268c0feSDennis Zhou  * Associate @bio with the blkg found from the bio's css and request_queue.
20852268c0feSDennis Zhou  * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
20862268c0feSDennis Zhou  * already associated, the css is reused and association redone as the
20872268c0feSDennis Zhou  * request_queue may have changed.
20882268c0feSDennis Zhou  */
20892268c0feSDennis Zhou void bio_associate_blkg(struct bio *bio)
20902268c0feSDennis Zhou {
20912268c0feSDennis Zhou 	struct request_queue *q = bio->bi_disk->queue;
20922268c0feSDennis Zhou 	struct blkcg *blkcg;
20932268c0feSDennis Zhou 	struct blkcg_gq *blkg;
20942268c0feSDennis Zhou 
20952268c0feSDennis Zhou 	rcu_read_lock();
20962268c0feSDennis Zhou 
20972268c0feSDennis Zhou 	bio_associate_blkcg(bio, NULL);
20982268c0feSDennis Zhou 	blkcg = bio_blkcg(bio);
20992268c0feSDennis Zhou 
21002268c0feSDennis Zhou 	if (!blkcg->css.parent) {
21012268c0feSDennis Zhou 		__bio_associate_blkg(bio, q->root_blkg);
21022268c0feSDennis Zhou 	} else {
21032268c0feSDennis Zhou 		blkg = blkg_lookup_create(blkcg, q);
21042268c0feSDennis Zhou 
21052268c0feSDennis Zhou 		__bio_associate_blkg(bio, blkg);
21062268c0feSDennis Zhou 	}
21072268c0feSDennis Zhou 
21082268c0feSDennis Zhou 	rcu_read_unlock();
210908e18eabSJosef Bacik }
21105cdf2e3fSDennis Zhou EXPORT_SYMBOL_GPL(bio_associate_blkg);
211108e18eabSJosef Bacik 
2112f0fcb3ecSDennis Zhou (Facebook) /**
2113f9c78b2bSJens Axboe  * bio_disassociate_task - undo bio_associate_current()
2114f9c78b2bSJens Axboe  * @bio: target bio
2115f9c78b2bSJens Axboe  */
2116f9c78b2bSJens Axboe void bio_disassociate_task(struct bio *bio)
2117f9c78b2bSJens Axboe {
2118b5f2954dSDennis Zhou 	if (bio->bi_css) {
2119b5f2954dSDennis Zhou 		css_put(bio->bi_css);
2120b5f2954dSDennis Zhou 		bio->bi_css = NULL;
2121b5f2954dSDennis Zhou 	}
21222268c0feSDennis Zhou 	bio_disassociate_blkg(bio);
2123f9c78b2bSJens Axboe }
2124f9c78b2bSJens Axboe 
212520bd723eSPaolo Valente /**
2126b5f2954dSDennis Zhou  * bio_clone_blkcg_association - clone blkcg association from src to dst bio
212720bd723eSPaolo Valente  * @dst: destination bio
212820bd723eSPaolo Valente  * @src: source bio
212920bd723eSPaolo Valente  */
2130b5f2954dSDennis Zhou void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
213120bd723eSPaolo Valente {
2132b5f2954dSDennis Zhou 	if (src->bi_css)
2133b5f2954dSDennis Zhou 		WARN_ON(bio_associate_blkcg(dst, src->bi_css));
21342268c0feSDennis Zhou 
21352268c0feSDennis Zhou 	if (src->bi_blkg)
21362268c0feSDennis Zhou 		__bio_associate_blkg(dst, src->bi_blkg);
213720bd723eSPaolo Valente }
2138b5f2954dSDennis Zhou EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
2139f9c78b2bSJens Axboe #endif /* CONFIG_BLK_CGROUP */
2140f9c78b2bSJens Axboe 
2141f9c78b2bSJens Axboe static void __init biovec_init_slabs(void)
2142f9c78b2bSJens Axboe {
2143f9c78b2bSJens Axboe 	int i;
2144f9c78b2bSJens Axboe 
2145ed996a52SChristoph Hellwig 	for (i = 0; i < BVEC_POOL_NR; i++) {
2146f9c78b2bSJens Axboe 		int size;
2147f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + i;
2148f9c78b2bSJens Axboe 
2149f9c78b2bSJens Axboe 		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2150f9c78b2bSJens Axboe 			bvs->slab = NULL;
2151f9c78b2bSJens Axboe 			continue;
2152f9c78b2bSJens Axboe 		}
2153f9c78b2bSJens Axboe 
2154f9c78b2bSJens Axboe 		size = bvs->nr_vecs * sizeof(struct bio_vec);
2155f9c78b2bSJens Axboe 		bvs->slab = kmem_cache_create(bvs->name, size, 0,
2156f9c78b2bSJens Axboe                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2157f9c78b2bSJens Axboe 	}
2158f9c78b2bSJens Axboe }
2159f9c78b2bSJens Axboe 
2160f9c78b2bSJens Axboe static int __init init_bio(void)
2161f9c78b2bSJens Axboe {
2162f9c78b2bSJens Axboe 	bio_slab_max = 2;
2163f9c78b2bSJens Axboe 	bio_slab_nr = 0;
21646396bb22SKees Cook 	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
21656396bb22SKees Cook 			    GFP_KERNEL);
2166f9c78b2bSJens Axboe 	if (!bio_slabs)
2167f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
2168f9c78b2bSJens Axboe 
2169f9c78b2bSJens Axboe 	bio_integrity_init();
2170f9c78b2bSJens Axboe 	biovec_init_slabs();
2171f9c78b2bSJens Axboe 
2172f4f8154aSKent Overstreet 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2173f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
2174f9c78b2bSJens Axboe 
2175f4f8154aSKent Overstreet 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2176f9c78b2bSJens Axboe 		panic("bio: can't create integrity pool\n");
2177f9c78b2bSJens Axboe 
2178f9c78b2bSJens Axboe 	return 0;
2179f9c78b2bSJens Axboe }
2180f9c78b2bSJens Axboe subsys_initcall(init_bio);
2181