xref: /openbmc/linux/block/bio.c (revision a10584c3)
1f9c78b2bSJens Axboe /*
2f9c78b2bSJens Axboe  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3f9c78b2bSJens Axboe  *
4f9c78b2bSJens Axboe  * This program is free software; you can redistribute it and/or modify
5f9c78b2bSJens Axboe  * it under the terms of the GNU General Public License version 2 as
6f9c78b2bSJens Axboe  * published by the Free Software Foundation.
7f9c78b2bSJens Axboe  *
8f9c78b2bSJens Axboe  * This program is distributed in the hope that it will be useful,
9f9c78b2bSJens Axboe  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10f9c78b2bSJens Axboe  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11f9c78b2bSJens Axboe  * GNU General Public License for more details.
12f9c78b2bSJens Axboe  *
13f9c78b2bSJens Axboe  * You should have received a copy of the GNU General Public Licens
14f9c78b2bSJens Axboe  * along with this program; if not, write to the Free Software
15f9c78b2bSJens Axboe  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
16f9c78b2bSJens Axboe  *
17f9c78b2bSJens Axboe  */
18f9c78b2bSJens Axboe #include <linux/mm.h>
19f9c78b2bSJens Axboe #include <linux/swap.h>
20f9c78b2bSJens Axboe #include <linux/bio.h>
21f9c78b2bSJens Axboe #include <linux/blkdev.h>
22f9c78b2bSJens Axboe #include <linux/uio.h>
23f9c78b2bSJens Axboe #include <linux/iocontext.h>
24f9c78b2bSJens Axboe #include <linux/slab.h>
25f9c78b2bSJens Axboe #include <linux/init.h>
26f9c78b2bSJens Axboe #include <linux/kernel.h>
27f9c78b2bSJens Axboe #include <linux/export.h>
28f9c78b2bSJens Axboe #include <linux/mempool.h>
29f9c78b2bSJens Axboe #include <linux/workqueue.h>
30f9c78b2bSJens Axboe #include <linux/cgroup.h>
3108e18eabSJosef Bacik #include <linux/blk-cgroup.h>
32f9c78b2bSJens Axboe 
33f9c78b2bSJens Axboe #include <trace/events/block.h>
349e234eeaSShaohua Li #include "blk.h"
3567b42d0bSJosef Bacik #include "blk-rq-qos.h"
36f9c78b2bSJens Axboe 
37f9c78b2bSJens Axboe /*
38f9c78b2bSJens Axboe  * Test patch to inline a certain number of bi_io_vec's inside the bio
39f9c78b2bSJens Axboe  * itself, to shrink a bio data allocation from two mempool calls to one
40f9c78b2bSJens Axboe  */
41f9c78b2bSJens Axboe #define BIO_INLINE_VECS		4
42f9c78b2bSJens Axboe 
43f9c78b2bSJens Axboe /*
44f9c78b2bSJens Axboe  * if you change this list, also change bvec_alloc or things will
45f9c78b2bSJens Axboe  * break badly! cannot be bigger than what you can fit into an
46f9c78b2bSJens Axboe  * unsigned short
47f9c78b2bSJens Axboe  */
48bd5c4facSMikulas Patocka #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
49ed996a52SChristoph Hellwig static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
50bd5c4facSMikulas Patocka 	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
51f9c78b2bSJens Axboe };
52f9c78b2bSJens Axboe #undef BV
53f9c78b2bSJens Axboe 
54f9c78b2bSJens Axboe /*
55f9c78b2bSJens Axboe  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
56f9c78b2bSJens Axboe  * IO code that does not need private memory pools.
57f9c78b2bSJens Axboe  */
58f4f8154aSKent Overstreet struct bio_set fs_bio_set;
59f9c78b2bSJens Axboe EXPORT_SYMBOL(fs_bio_set);
60f9c78b2bSJens Axboe 
61f9c78b2bSJens Axboe /*
62f9c78b2bSJens Axboe  * Our slab pool management
63f9c78b2bSJens Axboe  */
64f9c78b2bSJens Axboe struct bio_slab {
65f9c78b2bSJens Axboe 	struct kmem_cache *slab;
66f9c78b2bSJens Axboe 	unsigned int slab_ref;
67f9c78b2bSJens Axboe 	unsigned int slab_size;
68f9c78b2bSJens Axboe 	char name[8];
69f9c78b2bSJens Axboe };
70f9c78b2bSJens Axboe static DEFINE_MUTEX(bio_slab_lock);
71f9c78b2bSJens Axboe static struct bio_slab *bio_slabs;
72f9c78b2bSJens Axboe static unsigned int bio_slab_nr, bio_slab_max;
73f9c78b2bSJens Axboe 
74f9c78b2bSJens Axboe static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
75f9c78b2bSJens Axboe {
76f9c78b2bSJens Axboe 	unsigned int sz = sizeof(struct bio) + extra_size;
77f9c78b2bSJens Axboe 	struct kmem_cache *slab = NULL;
78f9c78b2bSJens Axboe 	struct bio_slab *bslab, *new_bio_slabs;
79f9c78b2bSJens Axboe 	unsigned int new_bio_slab_max;
80f9c78b2bSJens Axboe 	unsigned int i, entry = -1;
81f9c78b2bSJens Axboe 
82f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
83f9c78b2bSJens Axboe 
84f9c78b2bSJens Axboe 	i = 0;
85f9c78b2bSJens Axboe 	while (i < bio_slab_nr) {
86f9c78b2bSJens Axboe 		bslab = &bio_slabs[i];
87f9c78b2bSJens Axboe 
88f9c78b2bSJens Axboe 		if (!bslab->slab && entry == -1)
89f9c78b2bSJens Axboe 			entry = i;
90f9c78b2bSJens Axboe 		else if (bslab->slab_size == sz) {
91f9c78b2bSJens Axboe 			slab = bslab->slab;
92f9c78b2bSJens Axboe 			bslab->slab_ref++;
93f9c78b2bSJens Axboe 			break;
94f9c78b2bSJens Axboe 		}
95f9c78b2bSJens Axboe 		i++;
96f9c78b2bSJens Axboe 	}
97f9c78b2bSJens Axboe 
98f9c78b2bSJens Axboe 	if (slab)
99f9c78b2bSJens Axboe 		goto out_unlock;
100f9c78b2bSJens Axboe 
101f9c78b2bSJens Axboe 	if (bio_slab_nr == bio_slab_max && entry == -1) {
102f9c78b2bSJens Axboe 		new_bio_slab_max = bio_slab_max << 1;
103f9c78b2bSJens Axboe 		new_bio_slabs = krealloc(bio_slabs,
104f9c78b2bSJens Axboe 					 new_bio_slab_max * sizeof(struct bio_slab),
105f9c78b2bSJens Axboe 					 GFP_KERNEL);
106f9c78b2bSJens Axboe 		if (!new_bio_slabs)
107f9c78b2bSJens Axboe 			goto out_unlock;
108f9c78b2bSJens Axboe 		bio_slab_max = new_bio_slab_max;
109f9c78b2bSJens Axboe 		bio_slabs = new_bio_slabs;
110f9c78b2bSJens Axboe 	}
111f9c78b2bSJens Axboe 	if (entry == -1)
112f9c78b2bSJens Axboe 		entry = bio_slab_nr++;
113f9c78b2bSJens Axboe 
114f9c78b2bSJens Axboe 	bslab = &bio_slabs[entry];
115f9c78b2bSJens Axboe 
116f9c78b2bSJens Axboe 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
1176a241483SMikulas Patocka 	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
1186a241483SMikulas Patocka 				 SLAB_HWCACHE_ALIGN, NULL);
119f9c78b2bSJens Axboe 	if (!slab)
120f9c78b2bSJens Axboe 		goto out_unlock;
121f9c78b2bSJens Axboe 
122f9c78b2bSJens Axboe 	bslab->slab = slab;
123f9c78b2bSJens Axboe 	bslab->slab_ref = 1;
124f9c78b2bSJens Axboe 	bslab->slab_size = sz;
125f9c78b2bSJens Axboe out_unlock:
126f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
127f9c78b2bSJens Axboe 	return slab;
128f9c78b2bSJens Axboe }
129f9c78b2bSJens Axboe 
130f9c78b2bSJens Axboe static void bio_put_slab(struct bio_set *bs)
131f9c78b2bSJens Axboe {
132f9c78b2bSJens Axboe 	struct bio_slab *bslab = NULL;
133f9c78b2bSJens Axboe 	unsigned int i;
134f9c78b2bSJens Axboe 
135f9c78b2bSJens Axboe 	mutex_lock(&bio_slab_lock);
136f9c78b2bSJens Axboe 
137f9c78b2bSJens Axboe 	for (i = 0; i < bio_slab_nr; i++) {
138f9c78b2bSJens Axboe 		if (bs->bio_slab == bio_slabs[i].slab) {
139f9c78b2bSJens Axboe 			bslab = &bio_slabs[i];
140f9c78b2bSJens Axboe 			break;
141f9c78b2bSJens Axboe 		}
142f9c78b2bSJens Axboe 	}
143f9c78b2bSJens Axboe 
144f9c78b2bSJens Axboe 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
145f9c78b2bSJens Axboe 		goto out;
146f9c78b2bSJens Axboe 
147f9c78b2bSJens Axboe 	WARN_ON(!bslab->slab_ref);
148f9c78b2bSJens Axboe 
149f9c78b2bSJens Axboe 	if (--bslab->slab_ref)
150f9c78b2bSJens Axboe 		goto out;
151f9c78b2bSJens Axboe 
152f9c78b2bSJens Axboe 	kmem_cache_destroy(bslab->slab);
153f9c78b2bSJens Axboe 	bslab->slab = NULL;
154f9c78b2bSJens Axboe 
155f9c78b2bSJens Axboe out:
156f9c78b2bSJens Axboe 	mutex_unlock(&bio_slab_lock);
157f9c78b2bSJens Axboe }
158f9c78b2bSJens Axboe 
159f9c78b2bSJens Axboe unsigned int bvec_nr_vecs(unsigned short idx)
160f9c78b2bSJens Axboe {
161d6c02a9bSGreg Edwards 	return bvec_slabs[--idx].nr_vecs;
162f9c78b2bSJens Axboe }
163f9c78b2bSJens Axboe 
164f9c78b2bSJens Axboe void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
165f9c78b2bSJens Axboe {
166ed996a52SChristoph Hellwig 	if (!idx)
167ed996a52SChristoph Hellwig 		return;
168ed996a52SChristoph Hellwig 	idx--;
169f9c78b2bSJens Axboe 
170ed996a52SChristoph Hellwig 	BIO_BUG_ON(idx >= BVEC_POOL_NR);
171ed996a52SChristoph Hellwig 
172ed996a52SChristoph Hellwig 	if (idx == BVEC_POOL_MAX) {
173f9c78b2bSJens Axboe 		mempool_free(bv, pool);
174ed996a52SChristoph Hellwig 	} else {
175f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + idx;
176f9c78b2bSJens Axboe 
177f9c78b2bSJens Axboe 		kmem_cache_free(bvs->slab, bv);
178f9c78b2bSJens Axboe 	}
179f9c78b2bSJens Axboe }
180f9c78b2bSJens Axboe 
181f9c78b2bSJens Axboe struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
182f9c78b2bSJens Axboe 			   mempool_t *pool)
183f9c78b2bSJens Axboe {
184f9c78b2bSJens Axboe 	struct bio_vec *bvl;
185f9c78b2bSJens Axboe 
186f9c78b2bSJens Axboe 	/*
187f9c78b2bSJens Axboe 	 * see comment near bvec_array define!
188f9c78b2bSJens Axboe 	 */
189f9c78b2bSJens Axboe 	switch (nr) {
190f9c78b2bSJens Axboe 	case 1:
191f9c78b2bSJens Axboe 		*idx = 0;
192f9c78b2bSJens Axboe 		break;
193f9c78b2bSJens Axboe 	case 2 ... 4:
194f9c78b2bSJens Axboe 		*idx = 1;
195f9c78b2bSJens Axboe 		break;
196f9c78b2bSJens Axboe 	case 5 ... 16:
197f9c78b2bSJens Axboe 		*idx = 2;
198f9c78b2bSJens Axboe 		break;
199f9c78b2bSJens Axboe 	case 17 ... 64:
200f9c78b2bSJens Axboe 		*idx = 3;
201f9c78b2bSJens Axboe 		break;
202f9c78b2bSJens Axboe 	case 65 ... 128:
203f9c78b2bSJens Axboe 		*idx = 4;
204f9c78b2bSJens Axboe 		break;
205f9c78b2bSJens Axboe 	case 129 ... BIO_MAX_PAGES:
206f9c78b2bSJens Axboe 		*idx = 5;
207f9c78b2bSJens Axboe 		break;
208f9c78b2bSJens Axboe 	default:
209f9c78b2bSJens Axboe 		return NULL;
210f9c78b2bSJens Axboe 	}
211f9c78b2bSJens Axboe 
212f9c78b2bSJens Axboe 	/*
213f9c78b2bSJens Axboe 	 * idx now points to the pool we want to allocate from. only the
214f9c78b2bSJens Axboe 	 * 1-vec entry pool is mempool backed.
215f9c78b2bSJens Axboe 	 */
216ed996a52SChristoph Hellwig 	if (*idx == BVEC_POOL_MAX) {
217f9c78b2bSJens Axboe fallback:
218f9c78b2bSJens Axboe 		bvl = mempool_alloc(pool, gfp_mask);
219f9c78b2bSJens Axboe 	} else {
220f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + *idx;
221d0164adcSMel Gorman 		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
222f9c78b2bSJens Axboe 
223f9c78b2bSJens Axboe 		/*
224f9c78b2bSJens Axboe 		 * Make this allocation restricted and don't dump info on
225f9c78b2bSJens Axboe 		 * allocation failures, since we'll fallback to the mempool
226f9c78b2bSJens Axboe 		 * in case of failure.
227f9c78b2bSJens Axboe 		 */
228f9c78b2bSJens Axboe 		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
229f9c78b2bSJens Axboe 
230f9c78b2bSJens Axboe 		/*
231d0164adcSMel Gorman 		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
232f9c78b2bSJens Axboe 		 * is set, retry with the 1-entry mempool
233f9c78b2bSJens Axboe 		 */
234f9c78b2bSJens Axboe 		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
235d0164adcSMel Gorman 		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
236ed996a52SChristoph Hellwig 			*idx = BVEC_POOL_MAX;
237f9c78b2bSJens Axboe 			goto fallback;
238f9c78b2bSJens Axboe 		}
239f9c78b2bSJens Axboe 	}
240f9c78b2bSJens Axboe 
241ed996a52SChristoph Hellwig 	(*idx)++;
242f9c78b2bSJens Axboe 	return bvl;
243f9c78b2bSJens Axboe }
244f9c78b2bSJens Axboe 
2459ae3b3f5SJens Axboe void bio_uninit(struct bio *bio)
246f9c78b2bSJens Axboe {
2476f70fb66SDennis Zhou 	bio_disassociate_blkg(bio);
248f9c78b2bSJens Axboe }
2499ae3b3f5SJens Axboe EXPORT_SYMBOL(bio_uninit);
250f9c78b2bSJens Axboe 
251f9c78b2bSJens Axboe static void bio_free(struct bio *bio)
252f9c78b2bSJens Axboe {
253f9c78b2bSJens Axboe 	struct bio_set *bs = bio->bi_pool;
254f9c78b2bSJens Axboe 	void *p;
255f9c78b2bSJens Axboe 
2569ae3b3f5SJens Axboe 	bio_uninit(bio);
257f9c78b2bSJens Axboe 
258f9c78b2bSJens Axboe 	if (bs) {
2598aa6ba2fSKent Overstreet 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
260f9c78b2bSJens Axboe 
261f9c78b2bSJens Axboe 		/*
262f9c78b2bSJens Axboe 		 * If we have front padding, adjust the bio pointer before freeing
263f9c78b2bSJens Axboe 		 */
264f9c78b2bSJens Axboe 		p = bio;
265f9c78b2bSJens Axboe 		p -= bs->front_pad;
266f9c78b2bSJens Axboe 
2678aa6ba2fSKent Overstreet 		mempool_free(p, &bs->bio_pool);
268f9c78b2bSJens Axboe 	} else {
269f9c78b2bSJens Axboe 		/* Bio was allocated by bio_kmalloc() */
270f9c78b2bSJens Axboe 		kfree(bio);
271f9c78b2bSJens Axboe 	}
272f9c78b2bSJens Axboe }
273f9c78b2bSJens Axboe 
2749ae3b3f5SJens Axboe /*
2759ae3b3f5SJens Axboe  * Users of this function have their own bio allocation. Subsequently,
2769ae3b3f5SJens Axboe  * they must remember to pair any call to bio_init() with bio_uninit()
2779ae3b3f5SJens Axboe  * when IO has completed, or when the bio is released.
2789ae3b3f5SJens Axboe  */
2793a83f467SMing Lei void bio_init(struct bio *bio, struct bio_vec *table,
2803a83f467SMing Lei 	      unsigned short max_vecs)
281f9c78b2bSJens Axboe {
282f9c78b2bSJens Axboe 	memset(bio, 0, sizeof(*bio));
283c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
284dac56212SJens Axboe 	atomic_set(&bio->__bi_cnt, 1);
2853a83f467SMing Lei 
2863a83f467SMing Lei 	bio->bi_io_vec = table;
2873a83f467SMing Lei 	bio->bi_max_vecs = max_vecs;
288f9c78b2bSJens Axboe }
289f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_init);
290f9c78b2bSJens Axboe 
291f9c78b2bSJens Axboe /**
292f9c78b2bSJens Axboe  * bio_reset - reinitialize a bio
293f9c78b2bSJens Axboe  * @bio:	bio to reset
294f9c78b2bSJens Axboe  *
295f9c78b2bSJens Axboe  * Description:
296f9c78b2bSJens Axboe  *   After calling bio_reset(), @bio will be in the same state as a freshly
297f9c78b2bSJens Axboe  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
298f9c78b2bSJens Axboe  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
299f9c78b2bSJens Axboe  *   comment in struct bio.
300f9c78b2bSJens Axboe  */
301f9c78b2bSJens Axboe void bio_reset(struct bio *bio)
302f9c78b2bSJens Axboe {
303f9c78b2bSJens Axboe 	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
304f9c78b2bSJens Axboe 
3059ae3b3f5SJens Axboe 	bio_uninit(bio);
306f9c78b2bSJens Axboe 
307f9c78b2bSJens Axboe 	memset(bio, 0, BIO_RESET_BYTES);
3084246a0b6SChristoph Hellwig 	bio->bi_flags = flags;
309c4cf5261SJens Axboe 	atomic_set(&bio->__bi_remaining, 1);
310f9c78b2bSJens Axboe }
311f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_reset);
312f9c78b2bSJens Axboe 
31338f8baaeSChristoph Hellwig static struct bio *__bio_chain_endio(struct bio *bio)
314f9c78b2bSJens Axboe {
3154246a0b6SChristoph Hellwig 	struct bio *parent = bio->bi_private;
3164246a0b6SChristoph Hellwig 
3174e4cbee9SChristoph Hellwig 	if (!parent->bi_status)
3184e4cbee9SChristoph Hellwig 		parent->bi_status = bio->bi_status;
319f9c78b2bSJens Axboe 	bio_put(bio);
32038f8baaeSChristoph Hellwig 	return parent;
32138f8baaeSChristoph Hellwig }
32238f8baaeSChristoph Hellwig 
32338f8baaeSChristoph Hellwig static void bio_chain_endio(struct bio *bio)
32438f8baaeSChristoph Hellwig {
32538f8baaeSChristoph Hellwig 	bio_endio(__bio_chain_endio(bio));
326f9c78b2bSJens Axboe }
327f9c78b2bSJens Axboe 
328f9c78b2bSJens Axboe /**
329f9c78b2bSJens Axboe  * bio_chain - chain bio completions
330f9c78b2bSJens Axboe  * @bio: the target bio
331f9c78b2bSJens Axboe  * @parent: the @bio's parent bio
332f9c78b2bSJens Axboe  *
333f9c78b2bSJens Axboe  * The caller won't have a bi_end_io called when @bio completes - instead,
334f9c78b2bSJens Axboe  * @parent's bi_end_io won't be called until both @parent and @bio have
335f9c78b2bSJens Axboe  * completed; the chained bio will also be freed when it completes.
336f9c78b2bSJens Axboe  *
337f9c78b2bSJens Axboe  * The caller must not set bi_private or bi_end_io in @bio.
338f9c78b2bSJens Axboe  */
339f9c78b2bSJens Axboe void bio_chain(struct bio *bio, struct bio *parent)
340f9c78b2bSJens Axboe {
341f9c78b2bSJens Axboe 	BUG_ON(bio->bi_private || bio->bi_end_io);
342f9c78b2bSJens Axboe 
343f9c78b2bSJens Axboe 	bio->bi_private = parent;
344f9c78b2bSJens Axboe 	bio->bi_end_io	= bio_chain_endio;
345c4cf5261SJens Axboe 	bio_inc_remaining(parent);
346f9c78b2bSJens Axboe }
347f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_chain);
348f9c78b2bSJens Axboe 
349f9c78b2bSJens Axboe static void bio_alloc_rescue(struct work_struct *work)
350f9c78b2bSJens Axboe {
351f9c78b2bSJens Axboe 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
352f9c78b2bSJens Axboe 	struct bio *bio;
353f9c78b2bSJens Axboe 
354f9c78b2bSJens Axboe 	while (1) {
355f9c78b2bSJens Axboe 		spin_lock(&bs->rescue_lock);
356f9c78b2bSJens Axboe 		bio = bio_list_pop(&bs->rescue_list);
357f9c78b2bSJens Axboe 		spin_unlock(&bs->rescue_lock);
358f9c78b2bSJens Axboe 
359f9c78b2bSJens Axboe 		if (!bio)
360f9c78b2bSJens Axboe 			break;
361f9c78b2bSJens Axboe 
362f9c78b2bSJens Axboe 		generic_make_request(bio);
363f9c78b2bSJens Axboe 	}
364f9c78b2bSJens Axboe }
365f9c78b2bSJens Axboe 
366f9c78b2bSJens Axboe static void punt_bios_to_rescuer(struct bio_set *bs)
367f9c78b2bSJens Axboe {
368f9c78b2bSJens Axboe 	struct bio_list punt, nopunt;
369f9c78b2bSJens Axboe 	struct bio *bio;
370f9c78b2bSJens Axboe 
37147e0fb46SNeilBrown 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
37247e0fb46SNeilBrown 		return;
373f9c78b2bSJens Axboe 	/*
374f9c78b2bSJens Axboe 	 * In order to guarantee forward progress we must punt only bios that
375f9c78b2bSJens Axboe 	 * were allocated from this bio_set; otherwise, if there was a bio on
376f9c78b2bSJens Axboe 	 * there for a stacking driver higher up in the stack, processing it
377f9c78b2bSJens Axboe 	 * could require allocating bios from this bio_set, and doing that from
378f9c78b2bSJens Axboe 	 * our own rescuer would be bad.
379f9c78b2bSJens Axboe 	 *
380f9c78b2bSJens Axboe 	 * Since bio lists are singly linked, pop them all instead of trying to
381f9c78b2bSJens Axboe 	 * remove from the middle of the list:
382f9c78b2bSJens Axboe 	 */
383f9c78b2bSJens Axboe 
384f9c78b2bSJens Axboe 	bio_list_init(&punt);
385f9c78b2bSJens Axboe 	bio_list_init(&nopunt);
386f9c78b2bSJens Axboe 
387f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[0])))
388f9c78b2bSJens Axboe 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
389f5fe1b51SNeilBrown 	current->bio_list[0] = nopunt;
390f9c78b2bSJens Axboe 
391f5fe1b51SNeilBrown 	bio_list_init(&nopunt);
392f5fe1b51SNeilBrown 	while ((bio = bio_list_pop(&current->bio_list[1])))
393f5fe1b51SNeilBrown 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
394f5fe1b51SNeilBrown 	current->bio_list[1] = nopunt;
395f9c78b2bSJens Axboe 
396f9c78b2bSJens Axboe 	spin_lock(&bs->rescue_lock);
397f9c78b2bSJens Axboe 	bio_list_merge(&bs->rescue_list, &punt);
398f9c78b2bSJens Axboe 	spin_unlock(&bs->rescue_lock);
399f9c78b2bSJens Axboe 
400f9c78b2bSJens Axboe 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
401f9c78b2bSJens Axboe }
402f9c78b2bSJens Axboe 
403f9c78b2bSJens Axboe /**
404f9c78b2bSJens Axboe  * bio_alloc_bioset - allocate a bio for I/O
405519c8e9fSRandy Dunlap  * @gfp_mask:   the GFP_* mask given to the slab allocator
406f9c78b2bSJens Axboe  * @nr_iovecs:	number of iovecs to pre-allocate
407f9c78b2bSJens Axboe  * @bs:		the bio_set to allocate from.
408f9c78b2bSJens Axboe  *
409f9c78b2bSJens Axboe  * Description:
410f9c78b2bSJens Axboe  *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
411f9c78b2bSJens Axboe  *   backed by the @bs's mempool.
412f9c78b2bSJens Axboe  *
413d0164adcSMel Gorman  *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
414d0164adcSMel Gorman  *   always be able to allocate a bio. This is due to the mempool guarantees.
415d0164adcSMel Gorman  *   To make this work, callers must never allocate more than 1 bio at a time
416d0164adcSMel Gorman  *   from this pool. Callers that need to allocate more than 1 bio must always
417d0164adcSMel Gorman  *   submit the previously allocated bio for IO before attempting to allocate
418d0164adcSMel Gorman  *   a new one. Failure to do so can cause deadlocks under memory pressure.
419f9c78b2bSJens Axboe  *
420f9c78b2bSJens Axboe  *   Note that when running under generic_make_request() (i.e. any block
421f9c78b2bSJens Axboe  *   driver), bios are not submitted until after you return - see the code in
422f9c78b2bSJens Axboe  *   generic_make_request() that converts recursion into iteration, to prevent
423f9c78b2bSJens Axboe  *   stack overflows.
424f9c78b2bSJens Axboe  *
425f9c78b2bSJens Axboe  *   This would normally mean allocating multiple bios under
426f9c78b2bSJens Axboe  *   generic_make_request() would be susceptible to deadlocks, but we have
427f9c78b2bSJens Axboe  *   deadlock avoidance code that resubmits any blocked bios from a rescuer
428f9c78b2bSJens Axboe  *   thread.
429f9c78b2bSJens Axboe  *
430f9c78b2bSJens Axboe  *   However, we do not guarantee forward progress for allocations from other
431f9c78b2bSJens Axboe  *   mempools. Doing multiple allocations from the same mempool under
432f9c78b2bSJens Axboe  *   generic_make_request() should be avoided - instead, use bio_set's front_pad
433f9c78b2bSJens Axboe  *   for per bio allocations.
434f9c78b2bSJens Axboe  *
435f9c78b2bSJens Axboe  *   RETURNS:
436f9c78b2bSJens Axboe  *   Pointer to new bio on success, NULL on failure.
437f9c78b2bSJens Axboe  */
4387a88fa19SDan Carpenter struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
4397a88fa19SDan Carpenter 			     struct bio_set *bs)
440f9c78b2bSJens Axboe {
441f9c78b2bSJens Axboe 	gfp_t saved_gfp = gfp_mask;
442f9c78b2bSJens Axboe 	unsigned front_pad;
443f9c78b2bSJens Axboe 	unsigned inline_vecs;
444f9c78b2bSJens Axboe 	struct bio_vec *bvl = NULL;
445f9c78b2bSJens Axboe 	struct bio *bio;
446f9c78b2bSJens Axboe 	void *p;
447f9c78b2bSJens Axboe 
448f9c78b2bSJens Axboe 	if (!bs) {
449f9c78b2bSJens Axboe 		if (nr_iovecs > UIO_MAXIOV)
450f9c78b2bSJens Axboe 			return NULL;
451f9c78b2bSJens Axboe 
452f9c78b2bSJens Axboe 		p = kmalloc(sizeof(struct bio) +
453f9c78b2bSJens Axboe 			    nr_iovecs * sizeof(struct bio_vec),
454f9c78b2bSJens Axboe 			    gfp_mask);
455f9c78b2bSJens Axboe 		front_pad = 0;
456f9c78b2bSJens Axboe 		inline_vecs = nr_iovecs;
457f9c78b2bSJens Axboe 	} else {
458d8f429e1SJunichi Nomura 		/* should not use nobvec bioset for nr_iovecs > 0 */
4598aa6ba2fSKent Overstreet 		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
4608aa6ba2fSKent Overstreet 				 nr_iovecs > 0))
461d8f429e1SJunichi Nomura 			return NULL;
462f9c78b2bSJens Axboe 		/*
463f9c78b2bSJens Axboe 		 * generic_make_request() converts recursion to iteration; this
464f9c78b2bSJens Axboe 		 * means if we're running beneath it, any bios we allocate and
465f9c78b2bSJens Axboe 		 * submit will not be submitted (and thus freed) until after we
466f9c78b2bSJens Axboe 		 * return.
467f9c78b2bSJens Axboe 		 *
468f9c78b2bSJens Axboe 		 * This exposes us to a potential deadlock if we allocate
469f9c78b2bSJens Axboe 		 * multiple bios from the same bio_set() while running
470f9c78b2bSJens Axboe 		 * underneath generic_make_request(). If we were to allocate
471f9c78b2bSJens Axboe 		 * multiple bios (say a stacking block driver that was splitting
472f9c78b2bSJens Axboe 		 * bios), we would deadlock if we exhausted the mempool's
473f9c78b2bSJens Axboe 		 * reserve.
474f9c78b2bSJens Axboe 		 *
475f9c78b2bSJens Axboe 		 * We solve this, and guarantee forward progress, with a rescuer
476f9c78b2bSJens Axboe 		 * workqueue per bio_set. If we go to allocate and there are
477f9c78b2bSJens Axboe 		 * bios on current->bio_list, we first try the allocation
478d0164adcSMel Gorman 		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
479d0164adcSMel Gorman 		 * bios we would be blocking to the rescuer workqueue before
480d0164adcSMel Gorman 		 * we retry with the original gfp_flags.
481f9c78b2bSJens Axboe 		 */
482f9c78b2bSJens Axboe 
483f5fe1b51SNeilBrown 		if (current->bio_list &&
484f5fe1b51SNeilBrown 		    (!bio_list_empty(&current->bio_list[0]) ||
48547e0fb46SNeilBrown 		     !bio_list_empty(&current->bio_list[1])) &&
48647e0fb46SNeilBrown 		    bs->rescue_workqueue)
487d0164adcSMel Gorman 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
488f9c78b2bSJens Axboe 
4898aa6ba2fSKent Overstreet 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
490f9c78b2bSJens Axboe 		if (!p && gfp_mask != saved_gfp) {
491f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
492f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
4938aa6ba2fSKent Overstreet 			p = mempool_alloc(&bs->bio_pool, gfp_mask);
494f9c78b2bSJens Axboe 		}
495f9c78b2bSJens Axboe 
496f9c78b2bSJens Axboe 		front_pad = bs->front_pad;
497f9c78b2bSJens Axboe 		inline_vecs = BIO_INLINE_VECS;
498f9c78b2bSJens Axboe 	}
499f9c78b2bSJens Axboe 
500f9c78b2bSJens Axboe 	if (unlikely(!p))
501f9c78b2bSJens Axboe 		return NULL;
502f9c78b2bSJens Axboe 
503f9c78b2bSJens Axboe 	bio = p + front_pad;
5043a83f467SMing Lei 	bio_init(bio, NULL, 0);
505f9c78b2bSJens Axboe 
506f9c78b2bSJens Axboe 	if (nr_iovecs > inline_vecs) {
507ed996a52SChristoph Hellwig 		unsigned long idx = 0;
508ed996a52SChristoph Hellwig 
5098aa6ba2fSKent Overstreet 		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
510f9c78b2bSJens Axboe 		if (!bvl && gfp_mask != saved_gfp) {
511f9c78b2bSJens Axboe 			punt_bios_to_rescuer(bs);
512f9c78b2bSJens Axboe 			gfp_mask = saved_gfp;
5138aa6ba2fSKent Overstreet 			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
514f9c78b2bSJens Axboe 		}
515f9c78b2bSJens Axboe 
516f9c78b2bSJens Axboe 		if (unlikely(!bvl))
517f9c78b2bSJens Axboe 			goto err_free;
518f9c78b2bSJens Axboe 
519ed996a52SChristoph Hellwig 		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
520f9c78b2bSJens Axboe 	} else if (nr_iovecs) {
521f9c78b2bSJens Axboe 		bvl = bio->bi_inline_vecs;
522f9c78b2bSJens Axboe 	}
523f9c78b2bSJens Axboe 
524f9c78b2bSJens Axboe 	bio->bi_pool = bs;
525f9c78b2bSJens Axboe 	bio->bi_max_vecs = nr_iovecs;
526f9c78b2bSJens Axboe 	bio->bi_io_vec = bvl;
527f9c78b2bSJens Axboe 	return bio;
528f9c78b2bSJens Axboe 
529f9c78b2bSJens Axboe err_free:
5308aa6ba2fSKent Overstreet 	mempool_free(p, &bs->bio_pool);
531f9c78b2bSJens Axboe 	return NULL;
532f9c78b2bSJens Axboe }
533f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_alloc_bioset);
534f9c78b2bSJens Axboe 
53538a72dacSKent Overstreet void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
536f9c78b2bSJens Axboe {
537f9c78b2bSJens Axboe 	unsigned long flags;
538f9c78b2bSJens Axboe 	struct bio_vec bv;
539f9c78b2bSJens Axboe 	struct bvec_iter iter;
540f9c78b2bSJens Axboe 
54138a72dacSKent Overstreet 	__bio_for_each_segment(bv, bio, iter, start) {
542f9c78b2bSJens Axboe 		char *data = bvec_kmap_irq(&bv, &flags);
543f9c78b2bSJens Axboe 		memset(data, 0, bv.bv_len);
544f9c78b2bSJens Axboe 		flush_dcache_page(bv.bv_page);
545f9c78b2bSJens Axboe 		bvec_kunmap_irq(data, &flags);
546f9c78b2bSJens Axboe 	}
547f9c78b2bSJens Axboe }
54838a72dacSKent Overstreet EXPORT_SYMBOL(zero_fill_bio_iter);
549f9c78b2bSJens Axboe 
550f9c78b2bSJens Axboe /**
551f9c78b2bSJens Axboe  * bio_put - release a reference to a bio
552f9c78b2bSJens Axboe  * @bio:   bio to release reference to
553f9c78b2bSJens Axboe  *
554f9c78b2bSJens Axboe  * Description:
555f9c78b2bSJens Axboe  *   Put a reference to a &struct bio, either one you have gotten with
5569b10f6a9SNeilBrown  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
557f9c78b2bSJens Axboe  **/
558f9c78b2bSJens Axboe void bio_put(struct bio *bio)
559f9c78b2bSJens Axboe {
560dac56212SJens Axboe 	if (!bio_flagged(bio, BIO_REFFED))
561dac56212SJens Axboe 		bio_free(bio);
562dac56212SJens Axboe 	else {
563dac56212SJens Axboe 		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
564f9c78b2bSJens Axboe 
565f9c78b2bSJens Axboe 		/*
566f9c78b2bSJens Axboe 		 * last put frees it
567f9c78b2bSJens Axboe 		 */
568dac56212SJens Axboe 		if (atomic_dec_and_test(&bio->__bi_cnt))
569f9c78b2bSJens Axboe 			bio_free(bio);
570f9c78b2bSJens Axboe 	}
571dac56212SJens Axboe }
572f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_put);
573f9c78b2bSJens Axboe 
5746c210aa5SChristoph Hellwig int bio_phys_segments(struct request_queue *q, struct bio *bio)
575f9c78b2bSJens Axboe {
576f9c78b2bSJens Axboe 	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
577f9c78b2bSJens Axboe 		blk_recount_segments(q, bio);
578f9c78b2bSJens Axboe 
579f9c78b2bSJens Axboe 	return bio->bi_phys_segments;
580f9c78b2bSJens Axboe }
581f9c78b2bSJens Axboe 
582f9c78b2bSJens Axboe /**
583f9c78b2bSJens Axboe  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
584f9c78b2bSJens Axboe  * 	@bio: destination bio
585f9c78b2bSJens Axboe  * 	@bio_src: bio to clone
586f9c78b2bSJens Axboe  *
587f9c78b2bSJens Axboe  *	Clone a &bio. Caller will own the returned bio, but not
588f9c78b2bSJens Axboe  *	the actual data it points to. Reference count of returned
589f9c78b2bSJens Axboe  * 	bio will be one.
590f9c78b2bSJens Axboe  *
591f9c78b2bSJens Axboe  * 	Caller must ensure that @bio_src is not freed before @bio.
592f9c78b2bSJens Axboe  */
593f9c78b2bSJens Axboe void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
594f9c78b2bSJens Axboe {
595ed996a52SChristoph Hellwig 	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
596f9c78b2bSJens Axboe 
597f9c78b2bSJens Axboe 	/*
59874d46992SChristoph Hellwig 	 * most users will be overriding ->bi_disk with a new target,
599f9c78b2bSJens Axboe 	 * so we don't set nor calculate new physical/hw segment counts here
600f9c78b2bSJens Axboe 	 */
60174d46992SChristoph Hellwig 	bio->bi_disk = bio_src->bi_disk;
60262530ed8SMichael Lyle 	bio->bi_partno = bio_src->bi_partno;
603b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_CLONED);
604111be883SShaohua Li 	if (bio_flagged(bio_src, BIO_THROTTLED))
605111be883SShaohua Li 		bio_set_flag(bio, BIO_THROTTLED);
6061eff9d32SJens Axboe 	bio->bi_opf = bio_src->bi_opf;
607ca474b73SHannes Reinecke 	bio->bi_ioprio = bio_src->bi_ioprio;
608cb6934f8SJens Axboe 	bio->bi_write_hint = bio_src->bi_write_hint;
609f9c78b2bSJens Axboe 	bio->bi_iter = bio_src->bi_iter;
610f9c78b2bSJens Axboe 	bio->bi_io_vec = bio_src->bi_io_vec;
61120bd723eSPaolo Valente 
612db6638d7SDennis Zhou 	bio_clone_blkg_association(bio, bio_src);
613e439bedfSDennis Zhou 	blkcg_bio_issue_init(bio);
614f9c78b2bSJens Axboe }
615f9c78b2bSJens Axboe EXPORT_SYMBOL(__bio_clone_fast);
616f9c78b2bSJens Axboe 
617f9c78b2bSJens Axboe /**
618f9c78b2bSJens Axboe  *	bio_clone_fast - clone a bio that shares the original bio's biovec
619f9c78b2bSJens Axboe  *	@bio: bio to clone
620f9c78b2bSJens Axboe  *	@gfp_mask: allocation priority
621f9c78b2bSJens Axboe  *	@bs: bio_set to allocate from
622f9c78b2bSJens Axboe  *
623f9c78b2bSJens Axboe  * 	Like __bio_clone_fast, only also allocates the returned bio
624f9c78b2bSJens Axboe  */
625f9c78b2bSJens Axboe struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
626f9c78b2bSJens Axboe {
627f9c78b2bSJens Axboe 	struct bio *b;
628f9c78b2bSJens Axboe 
629f9c78b2bSJens Axboe 	b = bio_alloc_bioset(gfp_mask, 0, bs);
630f9c78b2bSJens Axboe 	if (!b)
631f9c78b2bSJens Axboe 		return NULL;
632f9c78b2bSJens Axboe 
633f9c78b2bSJens Axboe 	__bio_clone_fast(b, bio);
634f9c78b2bSJens Axboe 
635f9c78b2bSJens Axboe 	if (bio_integrity(bio)) {
636f9c78b2bSJens Axboe 		int ret;
637f9c78b2bSJens Axboe 
638f9c78b2bSJens Axboe 		ret = bio_integrity_clone(b, bio, gfp_mask);
639f9c78b2bSJens Axboe 
640f9c78b2bSJens Axboe 		if (ret < 0) {
641f9c78b2bSJens Axboe 			bio_put(b);
642f9c78b2bSJens Axboe 			return NULL;
643f9c78b2bSJens Axboe 		}
644f9c78b2bSJens Axboe 	}
645f9c78b2bSJens Axboe 
646f9c78b2bSJens Axboe 	return b;
647f9c78b2bSJens Axboe }
648f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_clone_fast);
649f9c78b2bSJens Axboe 
6505919482eSMing Lei static inline bool page_is_mergeable(const struct bio_vec *bv,
6515919482eSMing Lei 		struct page *page, unsigned int len, unsigned int off,
6525919482eSMing Lei 		bool same_page)
6535919482eSMing Lei {
6545919482eSMing Lei 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
6555919482eSMing Lei 		bv->bv_offset + bv->bv_len - 1;
6565919482eSMing Lei 	phys_addr_t page_addr = page_to_phys(page);
6575919482eSMing Lei 
6585919482eSMing Lei 	if (vec_end_addr + 1 != page_addr + off)
6595919482eSMing Lei 		return false;
6605919482eSMing Lei 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
6615919482eSMing Lei 		return false;
6625919482eSMing Lei 	if (same_page && (vec_end_addr & PAGE_MASK) != page_addr)
6635919482eSMing Lei 		return false;
6645919482eSMing Lei 
6655919482eSMing Lei 	return true;
6665919482eSMing Lei }
6675919482eSMing Lei 
668489fbbcbSMing Lei /*
669489fbbcbSMing Lei  * Check if the @page can be added to the current segment(@bv), and make
670489fbbcbSMing Lei  * sure to call it only if page_is_mergeable(@bv, @page) is true
671489fbbcbSMing Lei  */
672489fbbcbSMing Lei static bool can_add_page_to_seg(struct request_queue *q,
673489fbbcbSMing Lei 		struct bio_vec *bv, struct page *page, unsigned len,
674489fbbcbSMing Lei 		unsigned offset)
675489fbbcbSMing Lei {
676489fbbcbSMing Lei 	unsigned long mask = queue_segment_boundary(q);
677489fbbcbSMing Lei 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
678489fbbcbSMing Lei 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
679489fbbcbSMing Lei 
680489fbbcbSMing Lei 	if ((addr1 | mask) != (addr2 | mask))
681489fbbcbSMing Lei 		return false;
682489fbbcbSMing Lei 
683489fbbcbSMing Lei 	if (bv->bv_len + len > queue_max_segment_size(q))
684489fbbcbSMing Lei 		return false;
685489fbbcbSMing Lei 
686489fbbcbSMing Lei 	return true;
687489fbbcbSMing Lei }
688489fbbcbSMing Lei 
689f4595875SShaohua Li /**
69019047087SMing Lei  *	__bio_add_pc_page	- attempt to add page to passthrough bio
691c66a14d0SKent Overstreet  *	@q: the target queue
692c66a14d0SKent Overstreet  *	@bio: destination bio
693c66a14d0SKent Overstreet  *	@page: page to add
694c66a14d0SKent Overstreet  *	@len: vec entry length
695c66a14d0SKent Overstreet  *	@offset: vec entry offset
69619047087SMing Lei  *	@put_same_page: put the page if it is same with last added page
697f9c78b2bSJens Axboe  *
698c66a14d0SKent Overstreet  *	Attempt to add a page to the bio_vec maplist. This can fail for a
699c66a14d0SKent Overstreet  *	number of reasons, such as the bio being full or target block device
700c66a14d0SKent Overstreet  *	limitations. The target block device must allow bio's up to PAGE_SIZE,
701c66a14d0SKent Overstreet  *	so it is always possible to add a single page to an empty bio.
702c66a14d0SKent Overstreet  *
7035a8ce240SMing Lei  *	This should only be used by passthrough bios.
704f9c78b2bSJens Axboe  */
70519047087SMing Lei int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
70619047087SMing Lei 		struct page *page, unsigned int len, unsigned int offset,
70719047087SMing Lei 		bool put_same_page)
708f9c78b2bSJens Axboe {
709f9c78b2bSJens Axboe 	struct bio_vec *bvec;
710f9c78b2bSJens Axboe 
711f9c78b2bSJens Axboe 	/*
712f9c78b2bSJens Axboe 	 * cloned bio must not modify vec list
713f9c78b2bSJens Axboe 	 */
714f9c78b2bSJens Axboe 	if (unlikely(bio_flagged(bio, BIO_CLONED)))
715f9c78b2bSJens Axboe 		return 0;
716f9c78b2bSJens Axboe 
717c66a14d0SKent Overstreet 	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
718f9c78b2bSJens Axboe 		return 0;
719f9c78b2bSJens Axboe 
720f9c78b2bSJens Axboe 	/*
721f9c78b2bSJens Axboe 	 * For filesystems with a blocksize smaller than the pagesize
722f9c78b2bSJens Axboe 	 * we will often be called with the same page as last time and
723f9c78b2bSJens Axboe 	 * a consecutive offset.  Optimize this special case.
724f9c78b2bSJens Axboe 	 */
725f9c78b2bSJens Axboe 	if (bio->bi_vcnt > 0) {
7265a8ce240SMing Lei 		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
727f9c78b2bSJens Axboe 
7285a8ce240SMing Lei 		if (page == bvec->bv_page &&
7295a8ce240SMing Lei 		    offset == bvec->bv_offset + bvec->bv_len) {
73019047087SMing Lei 			if (put_same_page)
73119047087SMing Lei 				put_page(page);
732489fbbcbSMing Lei  bvec_merge:
7335a8ce240SMing Lei 			bvec->bv_len += len;
734fcbf6a08SMaurizio Lombardi 			bio->bi_iter.bi_size += len;
735f9c78b2bSJens Axboe 			goto done;
736f9c78b2bSJens Axboe 		}
73766cb45aaSJens Axboe 
73866cb45aaSJens Axboe 		/*
73966cb45aaSJens Axboe 		 * If the queue doesn't support SG gaps and adding this
74066cb45aaSJens Axboe 		 * offset would create a gap, disallow it.
74166cb45aaSJens Axboe 		 */
7425a8ce240SMing Lei 		if (bvec_gap_to_prev(q, bvec, offset))
74366cb45aaSJens Axboe 			return 0;
744489fbbcbSMing Lei 
745489fbbcbSMing Lei 		if (page_is_mergeable(bvec, page, len, offset, false) &&
746489fbbcbSMing Lei 				can_add_page_to_seg(q, bvec, page, len, offset))
747489fbbcbSMing Lei 			goto bvec_merge;
748f9c78b2bSJens Axboe 	}
749f9c78b2bSJens Axboe 
7500aa69fd3SChristoph Hellwig 	if (bio_full(bio))
751f9c78b2bSJens Axboe 		return 0;
752f9c78b2bSJens Axboe 
753489fbbcbSMing Lei 	if (bio->bi_phys_segments >= queue_max_segments(q))
754489fbbcbSMing Lei 		return 0;
755489fbbcbSMing Lei 
756f9c78b2bSJens Axboe 	/*
757f9c78b2bSJens Axboe 	 * setup the new entry, we might clear it again later if we
758f9c78b2bSJens Axboe 	 * cannot add the page
759f9c78b2bSJens Axboe 	 */
760f9c78b2bSJens Axboe 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
761f9c78b2bSJens Axboe 	bvec->bv_page = page;
762f9c78b2bSJens Axboe 	bvec->bv_len = len;
763f9c78b2bSJens Axboe 	bvec->bv_offset = offset;
764fcbf6a08SMaurizio Lombardi 	bio->bi_vcnt++;
765fcbf6a08SMaurizio Lombardi 	bio->bi_iter.bi_size += len;
766fcbf6a08SMaurizio Lombardi 
767f9c78b2bSJens Axboe  done:
768489fbbcbSMing Lei 	bio->bi_phys_segments = bio->bi_vcnt;
769489fbbcbSMing Lei 	bio_set_flag(bio, BIO_SEG_VALID);
770f9c78b2bSJens Axboe 	return len;
771f9c78b2bSJens Axboe }
77219047087SMing Lei EXPORT_SYMBOL(__bio_add_pc_page);
77319047087SMing Lei 
77419047087SMing Lei int bio_add_pc_page(struct request_queue *q, struct bio *bio,
77519047087SMing Lei 		struct page *page, unsigned int len, unsigned int offset)
77619047087SMing Lei {
77719047087SMing Lei 	return __bio_add_pc_page(q, bio, page, len, offset, false);
77819047087SMing Lei }
779f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_pc_page);
780f9c78b2bSJens Axboe 
781f9c78b2bSJens Axboe /**
7820aa69fd3SChristoph Hellwig  * __bio_try_merge_page - try appending data to an existing bvec.
7830aa69fd3SChristoph Hellwig  * @bio: destination bio
7840aa69fd3SChristoph Hellwig  * @page: page to add
7850aa69fd3SChristoph Hellwig  * @len: length of the data to add
7860aa69fd3SChristoph Hellwig  * @off: offset of the data in @page
78707173c3eSMing Lei  * @same_page: if %true only merge if the new data is in the same physical
78807173c3eSMing Lei  *		page as the last segment of the bio.
7890aa69fd3SChristoph Hellwig  *
7900aa69fd3SChristoph Hellwig  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
7910aa69fd3SChristoph Hellwig  * a useful optimisation for file systems with a block size smaller than the
7920aa69fd3SChristoph Hellwig  * page size.
7930aa69fd3SChristoph Hellwig  *
7940aa69fd3SChristoph Hellwig  * Return %true on success or %false on failure.
7950aa69fd3SChristoph Hellwig  */
7960aa69fd3SChristoph Hellwig bool __bio_try_merge_page(struct bio *bio, struct page *page,
79707173c3eSMing Lei 		unsigned int len, unsigned int off, bool same_page)
7980aa69fd3SChristoph Hellwig {
7990aa69fd3SChristoph Hellwig 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
8000aa69fd3SChristoph Hellwig 		return false;
8010aa69fd3SChristoph Hellwig 
8020aa69fd3SChristoph Hellwig 	if (bio->bi_vcnt > 0) {
8030aa69fd3SChristoph Hellwig 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
8040aa69fd3SChristoph Hellwig 
8055919482eSMing Lei 		if (page_is_mergeable(bv, page, len, off, same_page)) {
8060aa69fd3SChristoph Hellwig 			bv->bv_len += len;
8070aa69fd3SChristoph Hellwig 			bio->bi_iter.bi_size += len;
8080aa69fd3SChristoph Hellwig 			return true;
8090aa69fd3SChristoph Hellwig 		}
8105919482eSMing Lei 	}
8110aa69fd3SChristoph Hellwig 	return false;
8120aa69fd3SChristoph Hellwig }
8130aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_try_merge_page);
8140aa69fd3SChristoph Hellwig 
8150aa69fd3SChristoph Hellwig /**
8160aa69fd3SChristoph Hellwig  * __bio_add_page - add page to a bio in a new segment
8170aa69fd3SChristoph Hellwig  * @bio: destination bio
8180aa69fd3SChristoph Hellwig  * @page: page to add
8190aa69fd3SChristoph Hellwig  * @len: length of the data to add
8200aa69fd3SChristoph Hellwig  * @off: offset of the data in @page
8210aa69fd3SChristoph Hellwig  *
8220aa69fd3SChristoph Hellwig  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
8230aa69fd3SChristoph Hellwig  * that @bio has space for another bvec.
8240aa69fd3SChristoph Hellwig  */
8250aa69fd3SChristoph Hellwig void __bio_add_page(struct bio *bio, struct page *page,
8260aa69fd3SChristoph Hellwig 		unsigned int len, unsigned int off)
8270aa69fd3SChristoph Hellwig {
8280aa69fd3SChristoph Hellwig 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
8290aa69fd3SChristoph Hellwig 
8300aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
8310aa69fd3SChristoph Hellwig 	WARN_ON_ONCE(bio_full(bio));
8320aa69fd3SChristoph Hellwig 
8330aa69fd3SChristoph Hellwig 	bv->bv_page = page;
8340aa69fd3SChristoph Hellwig 	bv->bv_offset = off;
8350aa69fd3SChristoph Hellwig 	bv->bv_len = len;
8360aa69fd3SChristoph Hellwig 
8370aa69fd3SChristoph Hellwig 	bio->bi_iter.bi_size += len;
8380aa69fd3SChristoph Hellwig 	bio->bi_vcnt++;
8390aa69fd3SChristoph Hellwig }
8400aa69fd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__bio_add_page);
8410aa69fd3SChristoph Hellwig 
8420aa69fd3SChristoph Hellwig /**
843f9c78b2bSJens Axboe  *	bio_add_page	-	attempt to add page to bio
844f9c78b2bSJens Axboe  *	@bio: destination bio
845f9c78b2bSJens Axboe  *	@page: page to add
846f9c78b2bSJens Axboe  *	@len: vec entry length
847f9c78b2bSJens Axboe  *	@offset: vec entry offset
848f9c78b2bSJens Axboe  *
849c66a14d0SKent Overstreet  *	Attempt to add a page to the bio_vec maplist. This will only fail
850c66a14d0SKent Overstreet  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
851f9c78b2bSJens Axboe  */
852c66a14d0SKent Overstreet int bio_add_page(struct bio *bio, struct page *page,
853c66a14d0SKent Overstreet 		 unsigned int len, unsigned int offset)
854f9c78b2bSJens Axboe {
85507173c3eSMing Lei 	if (!__bio_try_merge_page(bio, page, len, offset, false)) {
8560aa69fd3SChristoph Hellwig 		if (bio_full(bio))
857c66a14d0SKent Overstreet 			return 0;
8580aa69fd3SChristoph Hellwig 		__bio_add_page(bio, page, len, offset);
859c66a14d0SKent Overstreet 	}
860c66a14d0SKent Overstreet 	return len;
861f9c78b2bSJens Axboe }
862f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_page);
863f9c78b2bSJens Axboe 
8646d0c48aeSJens Axboe static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
8656d0c48aeSJens Axboe {
8666d0c48aeSJens Axboe 	const struct bio_vec *bv = iter->bvec;
8676d0c48aeSJens Axboe 	unsigned int len;
8686d0c48aeSJens Axboe 	size_t size;
8696d0c48aeSJens Axboe 
8706d0c48aeSJens Axboe 	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
8716d0c48aeSJens Axboe 		return -EINVAL;
8726d0c48aeSJens Axboe 
8736d0c48aeSJens Axboe 	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
8746d0c48aeSJens Axboe 	size = bio_add_page(bio, bv->bv_page, len,
8756d0c48aeSJens Axboe 				bv->bv_offset + iter->iov_offset);
876a10584c3SChristoph Hellwig 	if (unlikely(size != len))
877a10584c3SChristoph Hellwig 		return -EINVAL;
878a10584c3SChristoph Hellwig 
879399254aaSJens Axboe 	if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
8806d0c48aeSJens Axboe 		struct page *page;
8816d0c48aeSJens Axboe 		int i;
8826d0c48aeSJens Axboe 
8836d0c48aeSJens Axboe 		mp_bvec_for_each_page(page, bv, i)
8846d0c48aeSJens Axboe 			get_page(page);
885399254aaSJens Axboe 	}
886399254aaSJens Axboe 
8876d0c48aeSJens Axboe 	iov_iter_advance(iter, size);
8886d0c48aeSJens Axboe 	return 0;
8896d0c48aeSJens Axboe }
8906d0c48aeSJens Axboe 
891576ed913SChristoph Hellwig #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
892576ed913SChristoph Hellwig 
8932cefe4dbSKent Overstreet /**
89417d51b10SMartin Wilck  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
8952cefe4dbSKent Overstreet  * @bio: bio to add pages to
8962cefe4dbSKent Overstreet  * @iter: iov iterator describing the region to be mapped
8972cefe4dbSKent Overstreet  *
89817d51b10SMartin Wilck  * Pins pages from *iter and appends them to @bio's bvec array. The
8992cefe4dbSKent Overstreet  * pages will have to be released using put_page() when done.
90017d51b10SMartin Wilck  * For multi-segment *iter, this function only adds pages from the
90117d51b10SMartin Wilck  * the next non-empty segment of the iov iterator.
9022cefe4dbSKent Overstreet  */
90317d51b10SMartin Wilck static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
9042cefe4dbSKent Overstreet {
905576ed913SChristoph Hellwig 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
906576ed913SChristoph Hellwig 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
9072cefe4dbSKent Overstreet 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
9082cefe4dbSKent Overstreet 	struct page **pages = (struct page **)bv;
909576ed913SChristoph Hellwig 	ssize_t size, left;
910576ed913SChristoph Hellwig 	unsigned len, i;
911b403ea24SMartin Wilck 	size_t offset;
912576ed913SChristoph Hellwig 
913576ed913SChristoph Hellwig 	/*
914576ed913SChristoph Hellwig 	 * Move page array up in the allocated memory for the bio vecs as far as
915576ed913SChristoph Hellwig 	 * possible so that we can start filling biovecs from the beginning
916576ed913SChristoph Hellwig 	 * without overwriting the temporary page array.
917576ed913SChristoph Hellwig 	*/
918576ed913SChristoph Hellwig 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
919576ed913SChristoph Hellwig 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
9202cefe4dbSKent Overstreet 
9212cefe4dbSKent Overstreet 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
9222cefe4dbSKent Overstreet 	if (unlikely(size <= 0))
9232cefe4dbSKent Overstreet 		return size ? size : -EFAULT;
9242cefe4dbSKent Overstreet 
925576ed913SChristoph Hellwig 	for (left = size, i = 0; left > 0; left -= len, i++) {
926576ed913SChristoph Hellwig 		struct page *page = pages[i];
9272cefe4dbSKent Overstreet 
928576ed913SChristoph Hellwig 		len = min_t(size_t, PAGE_SIZE - offset, left);
929576ed913SChristoph Hellwig 		if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
930576ed913SChristoph Hellwig 			return -EINVAL;
931576ed913SChristoph Hellwig 		offset = 0;
9322cefe4dbSKent Overstreet 	}
9332cefe4dbSKent Overstreet 
9342cefe4dbSKent Overstreet 	iov_iter_advance(iter, size);
9352cefe4dbSKent Overstreet 	return 0;
9362cefe4dbSKent Overstreet }
93717d51b10SMartin Wilck 
93817d51b10SMartin Wilck /**
9396d0c48aeSJens Axboe  * bio_iov_iter_get_pages - add user or kernel pages to a bio
94017d51b10SMartin Wilck  * @bio: bio to add pages to
9416d0c48aeSJens Axboe  * @iter: iov iterator describing the region to be added
94217d51b10SMartin Wilck  *
9436d0c48aeSJens Axboe  * This takes either an iterator pointing to user memory, or one pointing to
9446d0c48aeSJens Axboe  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
9456d0c48aeSJens Axboe  * map them into the kernel. On IO completion, the caller should put those
946399254aaSJens Axboe  * pages. If we're adding kernel pages, and the caller told us it's safe to
947399254aaSJens Axboe  * do so, we just have to add the pages to the bio directly. We don't grab an
948399254aaSJens Axboe  * extra reference to those pages (the user should already have that), and we
949399254aaSJens Axboe  * don't put the page on IO completion. The caller needs to check if the bio is
950399254aaSJens Axboe  * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
951399254aaSJens Axboe  * released.
9526d0c48aeSJens Axboe  *
95317d51b10SMartin Wilck  * The function tries, but does not guarantee, to pin as many pages as
9546d0c48aeSJens Axboe  * fit into the bio, or are requested in *iter, whatever is smaller. If
9556d0c48aeSJens Axboe  * MM encounters an error pinning the requested pages, it stops. Error
9566d0c48aeSJens Axboe  * is returned only if 0 pages could be pinned.
95717d51b10SMartin Wilck  */
95817d51b10SMartin Wilck int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
95917d51b10SMartin Wilck {
9606d0c48aeSJens Axboe 	const bool is_bvec = iov_iter_is_bvec(iter);
96117d51b10SMartin Wilck 	unsigned short orig_vcnt = bio->bi_vcnt;
96217d51b10SMartin Wilck 
963399254aaSJens Axboe 	/*
964399254aaSJens Axboe 	 * If this is a BVEC iter, then the pages are kernel pages. Don't
965399254aaSJens Axboe 	 * release them on IO completion, if the caller asked us to.
966399254aaSJens Axboe 	 */
967399254aaSJens Axboe 	if (is_bvec && iov_iter_bvec_no_ref(iter))
968399254aaSJens Axboe 		bio_set_flag(bio, BIO_NO_PAGE_REF);
969399254aaSJens Axboe 
97017d51b10SMartin Wilck 	do {
9716d0c48aeSJens Axboe 		int ret;
9726d0c48aeSJens Axboe 
9736d0c48aeSJens Axboe 		if (is_bvec)
9746d0c48aeSJens Axboe 			ret = __bio_iov_bvec_add_pages(bio, iter);
9756d0c48aeSJens Axboe 		else
9766d0c48aeSJens Axboe 			ret = __bio_iov_iter_get_pages(bio, iter);
97717d51b10SMartin Wilck 
97817d51b10SMartin Wilck 		if (unlikely(ret))
97917d51b10SMartin Wilck 			return bio->bi_vcnt > orig_vcnt ? 0 : ret;
98017d51b10SMartin Wilck 
98117d51b10SMartin Wilck 	} while (iov_iter_count(iter) && !bio_full(bio));
98217d51b10SMartin Wilck 
98317d51b10SMartin Wilck 	return 0;
98417d51b10SMartin Wilck }
9852cefe4dbSKent Overstreet 
9864246a0b6SChristoph Hellwig static void submit_bio_wait_endio(struct bio *bio)
987f9c78b2bSJens Axboe {
98865e53aabSChristoph Hellwig 	complete(bio->bi_private);
989f9c78b2bSJens Axboe }
990f9c78b2bSJens Axboe 
991f9c78b2bSJens Axboe /**
992f9c78b2bSJens Axboe  * submit_bio_wait - submit a bio, and wait until it completes
993f9c78b2bSJens Axboe  * @bio: The &struct bio which describes the I/O
994f9c78b2bSJens Axboe  *
995f9c78b2bSJens Axboe  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
996f9c78b2bSJens Axboe  * bio_endio() on failure.
9973d289d68SJan Kara  *
9983d289d68SJan Kara  * WARNING: Unlike to how submit_bio() is usually used, this function does not
9993d289d68SJan Kara  * result in bio reference to be consumed. The caller must drop the reference
10003d289d68SJan Kara  * on his own.
1001f9c78b2bSJens Axboe  */
10024e49ea4aSMike Christie int submit_bio_wait(struct bio *bio)
1003f9c78b2bSJens Axboe {
1004e319e1fbSByungchul Park 	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
1005f9c78b2bSJens Axboe 
100665e53aabSChristoph Hellwig 	bio->bi_private = &done;
1007f9c78b2bSJens Axboe 	bio->bi_end_io = submit_bio_wait_endio;
10081eff9d32SJens Axboe 	bio->bi_opf |= REQ_SYNC;
10094e49ea4aSMike Christie 	submit_bio(bio);
101065e53aabSChristoph Hellwig 	wait_for_completion_io(&done);
1011f9c78b2bSJens Axboe 
101265e53aabSChristoph Hellwig 	return blk_status_to_errno(bio->bi_status);
1013f9c78b2bSJens Axboe }
1014f9c78b2bSJens Axboe EXPORT_SYMBOL(submit_bio_wait);
1015f9c78b2bSJens Axboe 
1016f9c78b2bSJens Axboe /**
1017f9c78b2bSJens Axboe  * bio_advance - increment/complete a bio by some number of bytes
1018f9c78b2bSJens Axboe  * @bio:	bio to advance
1019f9c78b2bSJens Axboe  * @bytes:	number of bytes to complete
1020f9c78b2bSJens Axboe  *
1021f9c78b2bSJens Axboe  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1022f9c78b2bSJens Axboe  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1023f9c78b2bSJens Axboe  * be updated on the last bvec as well.
1024f9c78b2bSJens Axboe  *
1025f9c78b2bSJens Axboe  * @bio will then represent the remaining, uncompleted portion of the io.
1026f9c78b2bSJens Axboe  */
1027f9c78b2bSJens Axboe void bio_advance(struct bio *bio, unsigned bytes)
1028f9c78b2bSJens Axboe {
1029f9c78b2bSJens Axboe 	if (bio_integrity(bio))
1030f9c78b2bSJens Axboe 		bio_integrity_advance(bio, bytes);
1031f9c78b2bSJens Axboe 
1032f9c78b2bSJens Axboe 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1033f9c78b2bSJens Axboe }
1034f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_advance);
1035f9c78b2bSJens Axboe 
103645db54d5SKent Overstreet void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
103745db54d5SKent Overstreet 			struct bio *src, struct bvec_iter *src_iter)
1038f9c78b2bSJens Axboe {
1039f9c78b2bSJens Axboe 	struct bio_vec src_bv, dst_bv;
1040f9c78b2bSJens Axboe 	void *src_p, *dst_p;
1041f9c78b2bSJens Axboe 	unsigned bytes;
1042f9c78b2bSJens Axboe 
104345db54d5SKent Overstreet 	while (src_iter->bi_size && dst_iter->bi_size) {
104445db54d5SKent Overstreet 		src_bv = bio_iter_iovec(src, *src_iter);
104545db54d5SKent Overstreet 		dst_bv = bio_iter_iovec(dst, *dst_iter);
104645db54d5SKent Overstreet 
104745db54d5SKent Overstreet 		bytes = min(src_bv.bv_len, dst_bv.bv_len);
104845db54d5SKent Overstreet 
104945db54d5SKent Overstreet 		src_p = kmap_atomic(src_bv.bv_page);
105045db54d5SKent Overstreet 		dst_p = kmap_atomic(dst_bv.bv_page);
105145db54d5SKent Overstreet 
105245db54d5SKent Overstreet 		memcpy(dst_p + dst_bv.bv_offset,
105345db54d5SKent Overstreet 		       src_p + src_bv.bv_offset,
105445db54d5SKent Overstreet 		       bytes);
105545db54d5SKent Overstreet 
105645db54d5SKent Overstreet 		kunmap_atomic(dst_p);
105745db54d5SKent Overstreet 		kunmap_atomic(src_p);
105845db54d5SKent Overstreet 
10596e6e811dSKent Overstreet 		flush_dcache_page(dst_bv.bv_page);
10606e6e811dSKent Overstreet 
106145db54d5SKent Overstreet 		bio_advance_iter(src, src_iter, bytes);
106245db54d5SKent Overstreet 		bio_advance_iter(dst, dst_iter, bytes);
106345db54d5SKent Overstreet 	}
106445db54d5SKent Overstreet }
106545db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data_iter);
106645db54d5SKent Overstreet 
106745db54d5SKent Overstreet /**
106845db54d5SKent Overstreet  * bio_copy_data - copy contents of data buffers from one bio to another
106945db54d5SKent Overstreet  * @src: source bio
107045db54d5SKent Overstreet  * @dst: destination bio
107145db54d5SKent Overstreet  *
107245db54d5SKent Overstreet  * Stops when it reaches the end of either @src or @dst - that is, copies
107345db54d5SKent Overstreet  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
107445db54d5SKent Overstreet  */
107545db54d5SKent Overstreet void bio_copy_data(struct bio *dst, struct bio *src)
107645db54d5SKent Overstreet {
107745db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
107845db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
107945db54d5SKent Overstreet 
108045db54d5SKent Overstreet 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
108145db54d5SKent Overstreet }
108245db54d5SKent Overstreet EXPORT_SYMBOL(bio_copy_data);
108345db54d5SKent Overstreet 
108445db54d5SKent Overstreet /**
108545db54d5SKent Overstreet  * bio_list_copy_data - copy contents of data buffers from one chain of bios to
108645db54d5SKent Overstreet  * another
108745db54d5SKent Overstreet  * @src: source bio list
108845db54d5SKent Overstreet  * @dst: destination bio list
108945db54d5SKent Overstreet  *
109045db54d5SKent Overstreet  * Stops when it reaches the end of either the @src list or @dst list - that is,
109145db54d5SKent Overstreet  * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
109245db54d5SKent Overstreet  * bios).
109345db54d5SKent Overstreet  */
109445db54d5SKent Overstreet void bio_list_copy_data(struct bio *dst, struct bio *src)
109545db54d5SKent Overstreet {
109645db54d5SKent Overstreet 	struct bvec_iter src_iter = src->bi_iter;
109745db54d5SKent Overstreet 	struct bvec_iter dst_iter = dst->bi_iter;
109845db54d5SKent Overstreet 
1099f9c78b2bSJens Axboe 	while (1) {
1100f9c78b2bSJens Axboe 		if (!src_iter.bi_size) {
1101f9c78b2bSJens Axboe 			src = src->bi_next;
1102f9c78b2bSJens Axboe 			if (!src)
1103f9c78b2bSJens Axboe 				break;
1104f9c78b2bSJens Axboe 
1105f9c78b2bSJens Axboe 			src_iter = src->bi_iter;
1106f9c78b2bSJens Axboe 		}
1107f9c78b2bSJens Axboe 
1108f9c78b2bSJens Axboe 		if (!dst_iter.bi_size) {
1109f9c78b2bSJens Axboe 			dst = dst->bi_next;
1110f9c78b2bSJens Axboe 			if (!dst)
1111f9c78b2bSJens Axboe 				break;
1112f9c78b2bSJens Axboe 
1113f9c78b2bSJens Axboe 			dst_iter = dst->bi_iter;
1114f9c78b2bSJens Axboe 		}
1115f9c78b2bSJens Axboe 
111645db54d5SKent Overstreet 		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1117f9c78b2bSJens Axboe 	}
1118f9c78b2bSJens Axboe }
111945db54d5SKent Overstreet EXPORT_SYMBOL(bio_list_copy_data);
1120f9c78b2bSJens Axboe 
1121f9c78b2bSJens Axboe struct bio_map_data {
1122f9c78b2bSJens Axboe 	int is_our_pages;
112326e49cfcSKent Overstreet 	struct iov_iter iter;
112426e49cfcSKent Overstreet 	struct iovec iov[];
1125f9c78b2bSJens Axboe };
1126f9c78b2bSJens Axboe 
11270e5b935dSAl Viro static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1128f9c78b2bSJens Axboe 					       gfp_t gfp_mask)
1129f9c78b2bSJens Axboe {
11300e5b935dSAl Viro 	struct bio_map_data *bmd;
11310e5b935dSAl Viro 	if (data->nr_segs > UIO_MAXIOV)
1132f9c78b2bSJens Axboe 		return NULL;
1133f9c78b2bSJens Axboe 
11340e5b935dSAl Viro 	bmd = kmalloc(sizeof(struct bio_map_data) +
11350e5b935dSAl Viro 		       sizeof(struct iovec) * data->nr_segs, gfp_mask);
11360e5b935dSAl Viro 	if (!bmd)
11370e5b935dSAl Viro 		return NULL;
11380e5b935dSAl Viro 	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
11390e5b935dSAl Viro 	bmd->iter = *data;
11400e5b935dSAl Viro 	bmd->iter.iov = bmd->iov;
11410e5b935dSAl Viro 	return bmd;
1142f9c78b2bSJens Axboe }
1143f9c78b2bSJens Axboe 
11449124d3feSDongsu Park /**
11459124d3feSDongsu Park  * bio_copy_from_iter - copy all pages from iov_iter to bio
11469124d3feSDongsu Park  * @bio: The &struct bio which describes the I/O as destination
11479124d3feSDongsu Park  * @iter: iov_iter as source
11489124d3feSDongsu Park  *
11499124d3feSDongsu Park  * Copy all pages from iov_iter to bio.
11509124d3feSDongsu Park  * Returns 0 on success, or error on failure.
11519124d3feSDongsu Park  */
115298a09d61SAl Viro static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1153f9c78b2bSJens Axboe {
11549124d3feSDongsu Park 	int i;
1155f9c78b2bSJens Axboe 	struct bio_vec *bvec;
11566dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1157f9c78b2bSJens Axboe 
11586dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, i, iter_all) {
11599124d3feSDongsu Park 		ssize_t ret;
1160f9c78b2bSJens Axboe 
11619124d3feSDongsu Park 		ret = copy_page_from_iter(bvec->bv_page,
11629124d3feSDongsu Park 					  bvec->bv_offset,
11639124d3feSDongsu Park 					  bvec->bv_len,
116498a09d61SAl Viro 					  iter);
1165f9c78b2bSJens Axboe 
116698a09d61SAl Viro 		if (!iov_iter_count(iter))
11679124d3feSDongsu Park 			break;
1168f9c78b2bSJens Axboe 
11699124d3feSDongsu Park 		if (ret < bvec->bv_len)
11709124d3feSDongsu Park 			return -EFAULT;
1171f9c78b2bSJens Axboe 	}
1172f9c78b2bSJens Axboe 
11739124d3feSDongsu Park 	return 0;
1174f9c78b2bSJens Axboe }
1175f9c78b2bSJens Axboe 
11769124d3feSDongsu Park /**
11779124d3feSDongsu Park  * bio_copy_to_iter - copy all pages from bio to iov_iter
11789124d3feSDongsu Park  * @bio: The &struct bio which describes the I/O as source
11799124d3feSDongsu Park  * @iter: iov_iter as destination
11809124d3feSDongsu Park  *
11819124d3feSDongsu Park  * Copy all pages from bio to iov_iter.
11829124d3feSDongsu Park  * Returns 0 on success, or error on failure.
11839124d3feSDongsu Park  */
11849124d3feSDongsu Park static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
11859124d3feSDongsu Park {
11869124d3feSDongsu Park 	int i;
11879124d3feSDongsu Park 	struct bio_vec *bvec;
11886dc4f100SMing Lei 	struct bvec_iter_all iter_all;
11899124d3feSDongsu Park 
11906dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, i, iter_all) {
11919124d3feSDongsu Park 		ssize_t ret;
11929124d3feSDongsu Park 
11939124d3feSDongsu Park 		ret = copy_page_to_iter(bvec->bv_page,
11949124d3feSDongsu Park 					bvec->bv_offset,
11959124d3feSDongsu Park 					bvec->bv_len,
11969124d3feSDongsu Park 					&iter);
11979124d3feSDongsu Park 
11989124d3feSDongsu Park 		if (!iov_iter_count(&iter))
11999124d3feSDongsu Park 			break;
12009124d3feSDongsu Park 
12019124d3feSDongsu Park 		if (ret < bvec->bv_len)
12029124d3feSDongsu Park 			return -EFAULT;
12039124d3feSDongsu Park 	}
12049124d3feSDongsu Park 
12059124d3feSDongsu Park 	return 0;
1206f9c78b2bSJens Axboe }
1207f9c78b2bSJens Axboe 
1208491221f8SGuoqing Jiang void bio_free_pages(struct bio *bio)
12091dfa0f68SChristoph Hellwig {
12101dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
12111dfa0f68SChristoph Hellwig 	int i;
12126dc4f100SMing Lei 	struct bvec_iter_all iter_all;
12131dfa0f68SChristoph Hellwig 
12146dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, i, iter_all)
12151dfa0f68SChristoph Hellwig 		__free_page(bvec->bv_page);
12161dfa0f68SChristoph Hellwig }
1217491221f8SGuoqing Jiang EXPORT_SYMBOL(bio_free_pages);
12181dfa0f68SChristoph Hellwig 
1219f9c78b2bSJens Axboe /**
1220f9c78b2bSJens Axboe  *	bio_uncopy_user	-	finish previously mapped bio
1221f9c78b2bSJens Axboe  *	@bio: bio being terminated
1222f9c78b2bSJens Axboe  *
1223ddad8dd0SChristoph Hellwig  *	Free pages allocated from bio_copy_user_iov() and write back data
1224f9c78b2bSJens Axboe  *	to user space in case of a read.
1225f9c78b2bSJens Axboe  */
1226f9c78b2bSJens Axboe int bio_uncopy_user(struct bio *bio)
1227f9c78b2bSJens Axboe {
1228f9c78b2bSJens Axboe 	struct bio_map_data *bmd = bio->bi_private;
12291dfa0f68SChristoph Hellwig 	int ret = 0;
1230f9c78b2bSJens Axboe 
1231f9c78b2bSJens Axboe 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1232f9c78b2bSJens Axboe 		/*
1233f9c78b2bSJens Axboe 		 * if we're in a workqueue, the request is orphaned, so
12342d99b55dSHannes Reinecke 		 * don't copy into a random user address space, just free
12352d99b55dSHannes Reinecke 		 * and return -EINTR so user space doesn't expect any data.
1236f9c78b2bSJens Axboe 		 */
12372d99b55dSHannes Reinecke 		if (!current->mm)
12382d99b55dSHannes Reinecke 			ret = -EINTR;
12392d99b55dSHannes Reinecke 		else if (bio_data_dir(bio) == READ)
12409124d3feSDongsu Park 			ret = bio_copy_to_iter(bio, bmd->iter);
12411dfa0f68SChristoph Hellwig 		if (bmd->is_our_pages)
12421dfa0f68SChristoph Hellwig 			bio_free_pages(bio);
1243f9c78b2bSJens Axboe 	}
1244f9c78b2bSJens Axboe 	kfree(bmd);
1245f9c78b2bSJens Axboe 	bio_put(bio);
1246f9c78b2bSJens Axboe 	return ret;
1247f9c78b2bSJens Axboe }
1248f9c78b2bSJens Axboe 
1249f9c78b2bSJens Axboe /**
1250f9c78b2bSJens Axboe  *	bio_copy_user_iov	-	copy user data to bio
1251f9c78b2bSJens Axboe  *	@q:		destination block queue
1252f9c78b2bSJens Axboe  *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
125326e49cfcSKent Overstreet  *	@iter:		iovec iterator
1254f9c78b2bSJens Axboe  *	@gfp_mask:	memory allocation flags
1255f9c78b2bSJens Axboe  *
1256f9c78b2bSJens Axboe  *	Prepares and returns a bio for indirect user io, bouncing data
1257f9c78b2bSJens Axboe  *	to/from kernel pages as necessary. Must be paired with
1258f9c78b2bSJens Axboe  *	call bio_uncopy_user() on io completion.
1259f9c78b2bSJens Axboe  */
1260f9c78b2bSJens Axboe struct bio *bio_copy_user_iov(struct request_queue *q,
1261f9c78b2bSJens Axboe 			      struct rq_map_data *map_data,
1262e81cef5dSAl Viro 			      struct iov_iter *iter,
126326e49cfcSKent Overstreet 			      gfp_t gfp_mask)
1264f9c78b2bSJens Axboe {
1265f9c78b2bSJens Axboe 	struct bio_map_data *bmd;
1266f9c78b2bSJens Axboe 	struct page *page;
1267f9c78b2bSJens Axboe 	struct bio *bio;
1268d16d44ebSAl Viro 	int i = 0, ret;
1269d16d44ebSAl Viro 	int nr_pages;
127026e49cfcSKent Overstreet 	unsigned int len = iter->count;
1271bd5ceceaSGeliang Tang 	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1272f9c78b2bSJens Axboe 
12730e5b935dSAl Viro 	bmd = bio_alloc_map_data(iter, gfp_mask);
1274f9c78b2bSJens Axboe 	if (!bmd)
1275f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1276f9c78b2bSJens Axboe 
127726e49cfcSKent Overstreet 	/*
127826e49cfcSKent Overstreet 	 * We need to do a deep copy of the iov_iter including the iovecs.
127926e49cfcSKent Overstreet 	 * The caller provided iov might point to an on-stack or otherwise
128026e49cfcSKent Overstreet 	 * shortlived one.
128126e49cfcSKent Overstreet 	 */
128226e49cfcSKent Overstreet 	bmd->is_our_pages = map_data ? 0 : 1;
128326e49cfcSKent Overstreet 
1284d16d44ebSAl Viro 	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1285d16d44ebSAl Viro 	if (nr_pages > BIO_MAX_PAGES)
1286d16d44ebSAl Viro 		nr_pages = BIO_MAX_PAGES;
1287f9c78b2bSJens Axboe 
1288f9c78b2bSJens Axboe 	ret = -ENOMEM;
1289f9c78b2bSJens Axboe 	bio = bio_kmalloc(gfp_mask, nr_pages);
1290f9c78b2bSJens Axboe 	if (!bio)
1291f9c78b2bSJens Axboe 		goto out_bmd;
1292f9c78b2bSJens Axboe 
1293f9c78b2bSJens Axboe 	ret = 0;
1294f9c78b2bSJens Axboe 
1295f9c78b2bSJens Axboe 	if (map_data) {
1296f9c78b2bSJens Axboe 		nr_pages = 1 << map_data->page_order;
1297f9c78b2bSJens Axboe 		i = map_data->offset / PAGE_SIZE;
1298f9c78b2bSJens Axboe 	}
1299f9c78b2bSJens Axboe 	while (len) {
1300f9c78b2bSJens Axboe 		unsigned int bytes = PAGE_SIZE;
1301f9c78b2bSJens Axboe 
1302f9c78b2bSJens Axboe 		bytes -= offset;
1303f9c78b2bSJens Axboe 
1304f9c78b2bSJens Axboe 		if (bytes > len)
1305f9c78b2bSJens Axboe 			bytes = len;
1306f9c78b2bSJens Axboe 
1307f9c78b2bSJens Axboe 		if (map_data) {
1308f9c78b2bSJens Axboe 			if (i == map_data->nr_entries * nr_pages) {
1309f9c78b2bSJens Axboe 				ret = -ENOMEM;
1310f9c78b2bSJens Axboe 				break;
1311f9c78b2bSJens Axboe 			}
1312f9c78b2bSJens Axboe 
1313f9c78b2bSJens Axboe 			page = map_data->pages[i / nr_pages];
1314f9c78b2bSJens Axboe 			page += (i % nr_pages);
1315f9c78b2bSJens Axboe 
1316f9c78b2bSJens Axboe 			i++;
1317f9c78b2bSJens Axboe 		} else {
1318f9c78b2bSJens Axboe 			page = alloc_page(q->bounce_gfp | gfp_mask);
1319f9c78b2bSJens Axboe 			if (!page) {
1320f9c78b2bSJens Axboe 				ret = -ENOMEM;
1321f9c78b2bSJens Axboe 				break;
1322f9c78b2bSJens Axboe 			}
1323f9c78b2bSJens Axboe 		}
1324f9c78b2bSJens Axboe 
1325f9c78b2bSJens Axboe 		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1326f9c78b2bSJens Axboe 			break;
1327f9c78b2bSJens Axboe 
1328f9c78b2bSJens Axboe 		len -= bytes;
1329f9c78b2bSJens Axboe 		offset = 0;
1330f9c78b2bSJens Axboe 	}
1331f9c78b2bSJens Axboe 
1332f9c78b2bSJens Axboe 	if (ret)
1333f9c78b2bSJens Axboe 		goto cleanup;
1334f9c78b2bSJens Axboe 
13352884d0beSAl Viro 	if (map_data)
13362884d0beSAl Viro 		map_data->offset += bio->bi_iter.bi_size;
13372884d0beSAl Viro 
1338f9c78b2bSJens Axboe 	/*
1339f9c78b2bSJens Axboe 	 * success
1340f9c78b2bSJens Axboe 	 */
134100e23707SDavid Howells 	if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
1342f9c78b2bSJens Axboe 	    (map_data && map_data->from_user)) {
134398a09d61SAl Viro 		ret = bio_copy_from_iter(bio, iter);
1344f9c78b2bSJens Axboe 		if (ret)
1345f9c78b2bSJens Axboe 			goto cleanup;
134698a09d61SAl Viro 	} else {
1347f55adad6SKeith Busch 		if (bmd->is_our_pages)
1348f3587d76SKeith Busch 			zero_fill_bio(bio);
1349e81cef5dSAl Viro 		iov_iter_advance(iter, bio->bi_iter.bi_size);
1350f9c78b2bSJens Axboe 	}
1351f9c78b2bSJens Axboe 
135226e49cfcSKent Overstreet 	bio->bi_private = bmd;
13532884d0beSAl Viro 	if (map_data && map_data->null_mapped)
13542884d0beSAl Viro 		bio_set_flag(bio, BIO_NULL_MAPPED);
1355f9c78b2bSJens Axboe 	return bio;
1356f9c78b2bSJens Axboe cleanup:
1357f9c78b2bSJens Axboe 	if (!map_data)
13581dfa0f68SChristoph Hellwig 		bio_free_pages(bio);
1359f9c78b2bSJens Axboe 	bio_put(bio);
1360f9c78b2bSJens Axboe out_bmd:
1361f9c78b2bSJens Axboe 	kfree(bmd);
1362f9c78b2bSJens Axboe 	return ERR_PTR(ret);
1363f9c78b2bSJens Axboe }
1364f9c78b2bSJens Axboe 
136537f19e57SChristoph Hellwig /**
136637f19e57SChristoph Hellwig  *	bio_map_user_iov - map user iovec into bio
136737f19e57SChristoph Hellwig  *	@q:		the struct request_queue for the bio
136837f19e57SChristoph Hellwig  *	@iter:		iovec iterator
136937f19e57SChristoph Hellwig  *	@gfp_mask:	memory allocation flags
137037f19e57SChristoph Hellwig  *
137137f19e57SChristoph Hellwig  *	Map the user space address into a bio suitable for io to a block
137237f19e57SChristoph Hellwig  *	device. Returns an error pointer in case of error.
137337f19e57SChristoph Hellwig  */
137437f19e57SChristoph Hellwig struct bio *bio_map_user_iov(struct request_queue *q,
1375e81cef5dSAl Viro 			     struct iov_iter *iter,
137626e49cfcSKent Overstreet 			     gfp_t gfp_mask)
1377f9c78b2bSJens Axboe {
137826e49cfcSKent Overstreet 	int j;
1379f9c78b2bSJens Axboe 	struct bio *bio;
1380076098e5SAl Viro 	int ret;
13812b04e8f6SAl Viro 	struct bio_vec *bvec;
13826dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1383f9c78b2bSJens Axboe 
1384b282cc76SAl Viro 	if (!iov_iter_count(iter))
1385f9c78b2bSJens Axboe 		return ERR_PTR(-EINVAL);
1386f9c78b2bSJens Axboe 
1387b282cc76SAl Viro 	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1388f9c78b2bSJens Axboe 	if (!bio)
1389f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1390f9c78b2bSJens Axboe 
13910a0f1513SAl Viro 	while (iov_iter_count(iter)) {
1392629e42bcSAl Viro 		struct page **pages;
1393076098e5SAl Viro 		ssize_t bytes;
1394076098e5SAl Viro 		size_t offs, added = 0;
1395076098e5SAl Viro 		int npages;
1396f9c78b2bSJens Axboe 
13970a0f1513SAl Viro 		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1398076098e5SAl Viro 		if (unlikely(bytes <= 0)) {
1399076098e5SAl Viro 			ret = bytes ? bytes : -EFAULT;
1400f9c78b2bSJens Axboe 			goto out_unmap;
1401f9c78b2bSJens Axboe 		}
1402f9c78b2bSJens Axboe 
1403076098e5SAl Viro 		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1404076098e5SAl Viro 
140598f0bc99SAl Viro 		if (unlikely(offs & queue_dma_alignment(q))) {
140698f0bc99SAl Viro 			ret = -EINVAL;
140798f0bc99SAl Viro 			j = 0;
140898f0bc99SAl Viro 		} else {
1409629e42bcSAl Viro 			for (j = 0; j < npages; j++) {
141098f0bc99SAl Viro 				struct page *page = pages[j];
1411076098e5SAl Viro 				unsigned int n = PAGE_SIZE - offs;
1412f9c78b2bSJens Axboe 
1413076098e5SAl Viro 				if (n > bytes)
1414076098e5SAl Viro 					n = bytes;
1415f9c78b2bSJens Axboe 
141619047087SMing Lei 				if (!__bio_add_pc_page(q, bio, page, n, offs,
141719047087SMing Lei 							true))
1418f9c78b2bSJens Axboe 					break;
1419f9c78b2bSJens Axboe 
1420076098e5SAl Viro 				added += n;
1421076098e5SAl Viro 				bytes -= n;
1422076098e5SAl Viro 				offs = 0;
1423f9c78b2bSJens Axboe 			}
14240a0f1513SAl Viro 			iov_iter_advance(iter, added);
142598f0bc99SAl Viro 		}
1426f9c78b2bSJens Axboe 		/*
1427f9c78b2bSJens Axboe 		 * release the pages we didn't map into the bio, if any
1428f9c78b2bSJens Axboe 		 */
1429629e42bcSAl Viro 		while (j < npages)
143009cbfeafSKirill A. Shutemov 			put_page(pages[j++]);
1431629e42bcSAl Viro 		kvfree(pages);
1432e2e115d1SAl Viro 		/* couldn't stuff something into bio? */
1433e2e115d1SAl Viro 		if (bytes)
1434e2e115d1SAl Viro 			break;
1435f9c78b2bSJens Axboe 	}
1436f9c78b2bSJens Axboe 
1437b7c44ed9SJens Axboe 	bio_set_flag(bio, BIO_USER_MAPPED);
143837f19e57SChristoph Hellwig 
143937f19e57SChristoph Hellwig 	/*
14405fad1b64SBart Van Assche 	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
144137f19e57SChristoph Hellwig 	 * it would normally disappear when its bi_end_io is run.
144237f19e57SChristoph Hellwig 	 * however, we need it for the unmap, so grab an extra
144337f19e57SChristoph Hellwig 	 * reference to it
144437f19e57SChristoph Hellwig 	 */
144537f19e57SChristoph Hellwig 	bio_get(bio);
1446f9c78b2bSJens Axboe 	return bio;
1447f9c78b2bSJens Axboe 
1448f9c78b2bSJens Axboe  out_unmap:
14496dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, j, iter_all) {
14502b04e8f6SAl Viro 		put_page(bvec->bv_page);
1451f9c78b2bSJens Axboe 	}
1452f9c78b2bSJens Axboe 	bio_put(bio);
1453f9c78b2bSJens Axboe 	return ERR_PTR(ret);
1454f9c78b2bSJens Axboe }
1455f9c78b2bSJens Axboe 
1456f9c78b2bSJens Axboe static void __bio_unmap_user(struct bio *bio)
1457f9c78b2bSJens Axboe {
1458f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1459f9c78b2bSJens Axboe 	int i;
14606dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1461f9c78b2bSJens Axboe 
1462f9c78b2bSJens Axboe 	/*
1463f9c78b2bSJens Axboe 	 * make sure we dirty pages we wrote to
1464f9c78b2bSJens Axboe 	 */
14656dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, i, iter_all) {
1466f9c78b2bSJens Axboe 		if (bio_data_dir(bio) == READ)
1467f9c78b2bSJens Axboe 			set_page_dirty_lock(bvec->bv_page);
1468f9c78b2bSJens Axboe 
146909cbfeafSKirill A. Shutemov 		put_page(bvec->bv_page);
1470f9c78b2bSJens Axboe 	}
1471f9c78b2bSJens Axboe 
1472f9c78b2bSJens Axboe 	bio_put(bio);
1473f9c78b2bSJens Axboe }
1474f9c78b2bSJens Axboe 
1475f9c78b2bSJens Axboe /**
1476f9c78b2bSJens Axboe  *	bio_unmap_user	-	unmap a bio
1477f9c78b2bSJens Axboe  *	@bio:		the bio being unmapped
1478f9c78b2bSJens Axboe  *
14795fad1b64SBart Van Assche  *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
14805fad1b64SBart Van Assche  *	process context.
1481f9c78b2bSJens Axboe  *
1482f9c78b2bSJens Axboe  *	bio_unmap_user() may sleep.
1483f9c78b2bSJens Axboe  */
1484f9c78b2bSJens Axboe void bio_unmap_user(struct bio *bio)
1485f9c78b2bSJens Axboe {
1486f9c78b2bSJens Axboe 	__bio_unmap_user(bio);
1487f9c78b2bSJens Axboe 	bio_put(bio);
1488f9c78b2bSJens Axboe }
1489f9c78b2bSJens Axboe 
14904246a0b6SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio)
1491f9c78b2bSJens Axboe {
1492f9c78b2bSJens Axboe 	bio_put(bio);
1493f9c78b2bSJens Axboe }
1494f9c78b2bSJens Axboe 
149575c72b83SChristoph Hellwig /**
149675c72b83SChristoph Hellwig  *	bio_map_kern	-	map kernel address into bio
149775c72b83SChristoph Hellwig  *	@q: the struct request_queue for the bio
149875c72b83SChristoph Hellwig  *	@data: pointer to buffer to map
149975c72b83SChristoph Hellwig  *	@len: length in bytes
150075c72b83SChristoph Hellwig  *	@gfp_mask: allocation flags for bio allocation
150175c72b83SChristoph Hellwig  *
150275c72b83SChristoph Hellwig  *	Map the kernel address into a bio suitable for io to a block
150375c72b83SChristoph Hellwig  *	device. Returns an error pointer in case of error.
150475c72b83SChristoph Hellwig  */
150575c72b83SChristoph Hellwig struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
150675c72b83SChristoph Hellwig 			 gfp_t gfp_mask)
1507f9c78b2bSJens Axboe {
1508f9c78b2bSJens Axboe 	unsigned long kaddr = (unsigned long)data;
1509f9c78b2bSJens Axboe 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1510f9c78b2bSJens Axboe 	unsigned long start = kaddr >> PAGE_SHIFT;
1511f9c78b2bSJens Axboe 	const int nr_pages = end - start;
1512f9c78b2bSJens Axboe 	int offset, i;
1513f9c78b2bSJens Axboe 	struct bio *bio;
1514f9c78b2bSJens Axboe 
1515f9c78b2bSJens Axboe 	bio = bio_kmalloc(gfp_mask, nr_pages);
1516f9c78b2bSJens Axboe 	if (!bio)
1517f9c78b2bSJens Axboe 		return ERR_PTR(-ENOMEM);
1518f9c78b2bSJens Axboe 
1519f9c78b2bSJens Axboe 	offset = offset_in_page(kaddr);
1520f9c78b2bSJens Axboe 	for (i = 0; i < nr_pages; i++) {
1521f9c78b2bSJens Axboe 		unsigned int bytes = PAGE_SIZE - offset;
1522f9c78b2bSJens Axboe 
1523f9c78b2bSJens Axboe 		if (len <= 0)
1524f9c78b2bSJens Axboe 			break;
1525f9c78b2bSJens Axboe 
1526f9c78b2bSJens Axboe 		if (bytes > len)
1527f9c78b2bSJens Axboe 			bytes = len;
1528f9c78b2bSJens Axboe 
1529f9c78b2bSJens Axboe 		if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
153075c72b83SChristoph Hellwig 				    offset) < bytes) {
153175c72b83SChristoph Hellwig 			/* we don't support partial mappings */
153275c72b83SChristoph Hellwig 			bio_put(bio);
153375c72b83SChristoph Hellwig 			return ERR_PTR(-EINVAL);
153475c72b83SChristoph Hellwig 		}
1535f9c78b2bSJens Axboe 
1536f9c78b2bSJens Axboe 		data += bytes;
1537f9c78b2bSJens Axboe 		len -= bytes;
1538f9c78b2bSJens Axboe 		offset = 0;
1539f9c78b2bSJens Axboe 	}
1540f9c78b2bSJens Axboe 
1541f9c78b2bSJens Axboe 	bio->bi_end_io = bio_map_kern_endio;
1542f9c78b2bSJens Axboe 	return bio;
1543f9c78b2bSJens Axboe }
1544f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_map_kern);
1545f9c78b2bSJens Axboe 
15464246a0b6SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio)
1547f9c78b2bSJens Axboe {
15481dfa0f68SChristoph Hellwig 	bio_free_pages(bio);
15491dfa0f68SChristoph Hellwig 	bio_put(bio);
15501dfa0f68SChristoph Hellwig }
15511dfa0f68SChristoph Hellwig 
15524246a0b6SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio)
15531dfa0f68SChristoph Hellwig {
155442d2683aSChristoph Hellwig 	char *p = bio->bi_private;
15551dfa0f68SChristoph Hellwig 	struct bio_vec *bvec;
1556f9c78b2bSJens Axboe 	int i;
15576dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1558f9c78b2bSJens Axboe 
15596dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, i, iter_all) {
15601dfa0f68SChristoph Hellwig 		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1561f9c78b2bSJens Axboe 		p += bvec->bv_len;
1562f9c78b2bSJens Axboe 	}
1563f9c78b2bSJens Axboe 
15644246a0b6SChristoph Hellwig 	bio_copy_kern_endio(bio);
1565f9c78b2bSJens Axboe }
1566f9c78b2bSJens Axboe 
1567f9c78b2bSJens Axboe /**
1568f9c78b2bSJens Axboe  *	bio_copy_kern	-	copy kernel address into bio
1569f9c78b2bSJens Axboe  *	@q: the struct request_queue for the bio
1570f9c78b2bSJens Axboe  *	@data: pointer to buffer to copy
1571f9c78b2bSJens Axboe  *	@len: length in bytes
1572f9c78b2bSJens Axboe  *	@gfp_mask: allocation flags for bio and page allocation
1573f9c78b2bSJens Axboe  *	@reading: data direction is READ
1574f9c78b2bSJens Axboe  *
1575f9c78b2bSJens Axboe  *	copy the kernel address into a bio suitable for io to a block
1576f9c78b2bSJens Axboe  *	device. Returns an error pointer in case of error.
1577f9c78b2bSJens Axboe  */
1578f9c78b2bSJens Axboe struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1579f9c78b2bSJens Axboe 			  gfp_t gfp_mask, int reading)
1580f9c78b2bSJens Axboe {
158142d2683aSChristoph Hellwig 	unsigned long kaddr = (unsigned long)data;
158242d2683aSChristoph Hellwig 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
158342d2683aSChristoph Hellwig 	unsigned long start = kaddr >> PAGE_SHIFT;
158442d2683aSChristoph Hellwig 	struct bio *bio;
1585f9c78b2bSJens Axboe 	void *p = data;
15861dfa0f68SChristoph Hellwig 	int nr_pages = 0;
1587f9c78b2bSJens Axboe 
158842d2683aSChristoph Hellwig 	/*
158942d2683aSChristoph Hellwig 	 * Overflow, abort
159042d2683aSChristoph Hellwig 	 */
159142d2683aSChristoph Hellwig 	if (end < start)
159242d2683aSChristoph Hellwig 		return ERR_PTR(-EINVAL);
1593f9c78b2bSJens Axboe 
159442d2683aSChristoph Hellwig 	nr_pages = end - start;
159542d2683aSChristoph Hellwig 	bio = bio_kmalloc(gfp_mask, nr_pages);
159642d2683aSChristoph Hellwig 	if (!bio)
159742d2683aSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
159842d2683aSChristoph Hellwig 
159942d2683aSChristoph Hellwig 	while (len) {
160042d2683aSChristoph Hellwig 		struct page *page;
160142d2683aSChristoph Hellwig 		unsigned int bytes = PAGE_SIZE;
160242d2683aSChristoph Hellwig 
160342d2683aSChristoph Hellwig 		if (bytes > len)
160442d2683aSChristoph Hellwig 			bytes = len;
160542d2683aSChristoph Hellwig 
160642d2683aSChristoph Hellwig 		page = alloc_page(q->bounce_gfp | gfp_mask);
160742d2683aSChristoph Hellwig 		if (!page)
160842d2683aSChristoph Hellwig 			goto cleanup;
160942d2683aSChristoph Hellwig 
161042d2683aSChristoph Hellwig 		if (!reading)
161142d2683aSChristoph Hellwig 			memcpy(page_address(page), p, bytes);
161242d2683aSChristoph Hellwig 
161342d2683aSChristoph Hellwig 		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
161442d2683aSChristoph Hellwig 			break;
161542d2683aSChristoph Hellwig 
161642d2683aSChristoph Hellwig 		len -= bytes;
161742d2683aSChristoph Hellwig 		p += bytes;
1618f9c78b2bSJens Axboe 	}
1619f9c78b2bSJens Axboe 
16201dfa0f68SChristoph Hellwig 	if (reading) {
16211dfa0f68SChristoph Hellwig 		bio->bi_end_io = bio_copy_kern_endio_read;
162242d2683aSChristoph Hellwig 		bio->bi_private = data;
16231dfa0f68SChristoph Hellwig 	} else {
1624f9c78b2bSJens Axboe 		bio->bi_end_io = bio_copy_kern_endio;
16251dfa0f68SChristoph Hellwig 	}
16261dfa0f68SChristoph Hellwig 
1627f9c78b2bSJens Axboe 	return bio;
162842d2683aSChristoph Hellwig 
162942d2683aSChristoph Hellwig cleanup:
16301dfa0f68SChristoph Hellwig 	bio_free_pages(bio);
163142d2683aSChristoph Hellwig 	bio_put(bio);
163242d2683aSChristoph Hellwig 	return ERR_PTR(-ENOMEM);
1633f9c78b2bSJens Axboe }
1634f9c78b2bSJens Axboe 
1635f9c78b2bSJens Axboe /*
1636f9c78b2bSJens Axboe  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1637f9c78b2bSJens Axboe  * for performing direct-IO in BIOs.
1638f9c78b2bSJens Axboe  *
1639f9c78b2bSJens Axboe  * The problem is that we cannot run set_page_dirty() from interrupt context
1640f9c78b2bSJens Axboe  * because the required locks are not interrupt-safe.  So what we can do is to
1641f9c78b2bSJens Axboe  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1642f9c78b2bSJens Axboe  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1643f9c78b2bSJens Axboe  * in process context.
1644f9c78b2bSJens Axboe  *
1645f9c78b2bSJens Axboe  * We special-case compound pages here: normally this means reads into hugetlb
1646f9c78b2bSJens Axboe  * pages.  The logic in here doesn't really work right for compound pages
1647f9c78b2bSJens Axboe  * because the VM does not uniformly chase down the head page in all cases.
1648f9c78b2bSJens Axboe  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1649f9c78b2bSJens Axboe  * handle them at all.  So we skip compound pages here at an early stage.
1650f9c78b2bSJens Axboe  *
1651f9c78b2bSJens Axboe  * Note that this code is very hard to test under normal circumstances because
1652f9c78b2bSJens Axboe  * direct-io pins the pages with get_user_pages().  This makes
1653f9c78b2bSJens Axboe  * is_page_cache_freeable return false, and the VM will not clean the pages.
1654f9c78b2bSJens Axboe  * But other code (eg, flusher threads) could clean the pages if they are mapped
1655f9c78b2bSJens Axboe  * pagecache.
1656f9c78b2bSJens Axboe  *
1657f9c78b2bSJens Axboe  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1658f9c78b2bSJens Axboe  * deferred bio dirtying paths.
1659f9c78b2bSJens Axboe  */
1660f9c78b2bSJens Axboe 
1661f9c78b2bSJens Axboe /*
1662f9c78b2bSJens Axboe  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1663f9c78b2bSJens Axboe  */
1664f9c78b2bSJens Axboe void bio_set_pages_dirty(struct bio *bio)
1665f9c78b2bSJens Axboe {
1666f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1667f9c78b2bSJens Axboe 	int i;
16686dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1669f9c78b2bSJens Axboe 
16706dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, i, iter_all) {
16713bb50983SChristoph Hellwig 		if (!PageCompound(bvec->bv_page))
16723bb50983SChristoph Hellwig 			set_page_dirty_lock(bvec->bv_page);
1673f9c78b2bSJens Axboe 	}
1674f9c78b2bSJens Axboe }
1675f9c78b2bSJens Axboe 
1676f9c78b2bSJens Axboe static void bio_release_pages(struct bio *bio)
1677f9c78b2bSJens Axboe {
1678f9c78b2bSJens Axboe 	struct bio_vec *bvec;
1679f9c78b2bSJens Axboe 	int i;
16806dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1681f9c78b2bSJens Axboe 
16826dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, i, iter_all)
168324d5493fSChristoph Hellwig 		put_page(bvec->bv_page);
1684f9c78b2bSJens Axboe }
1685f9c78b2bSJens Axboe 
1686f9c78b2bSJens Axboe /*
1687f9c78b2bSJens Axboe  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1688f9c78b2bSJens Axboe  * If they are, then fine.  If, however, some pages are clean then they must
1689f9c78b2bSJens Axboe  * have been written out during the direct-IO read.  So we take another ref on
169024d5493fSChristoph Hellwig  * the BIO and re-dirty the pages in process context.
1691f9c78b2bSJens Axboe  *
1692f9c78b2bSJens Axboe  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1693ea1754a0SKirill A. Shutemov  * here on.  It will run one put_page() against each page and will run one
1694ea1754a0SKirill A. Shutemov  * bio_put() against the BIO.
1695f9c78b2bSJens Axboe  */
1696f9c78b2bSJens Axboe 
1697f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work);
1698f9c78b2bSJens Axboe 
1699f9c78b2bSJens Axboe static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1700f9c78b2bSJens Axboe static DEFINE_SPINLOCK(bio_dirty_lock);
1701f9c78b2bSJens Axboe static struct bio *bio_dirty_list;
1702f9c78b2bSJens Axboe 
1703f9c78b2bSJens Axboe /*
1704f9c78b2bSJens Axboe  * This runs in process context
1705f9c78b2bSJens Axboe  */
1706f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work)
1707f9c78b2bSJens Axboe {
170824d5493fSChristoph Hellwig 	struct bio *bio, *next;
1709f9c78b2bSJens Axboe 
171024d5493fSChristoph Hellwig 	spin_lock_irq(&bio_dirty_lock);
171124d5493fSChristoph Hellwig 	next = bio_dirty_list;
1712f9c78b2bSJens Axboe 	bio_dirty_list = NULL;
171324d5493fSChristoph Hellwig 	spin_unlock_irq(&bio_dirty_lock);
1714f9c78b2bSJens Axboe 
171524d5493fSChristoph Hellwig 	while ((bio = next) != NULL) {
171624d5493fSChristoph Hellwig 		next = bio->bi_private;
1717f9c78b2bSJens Axboe 
1718f9c78b2bSJens Axboe 		bio_set_pages_dirty(bio);
1719399254aaSJens Axboe 		if (!bio_flagged(bio, BIO_NO_PAGE_REF))
1720f9c78b2bSJens Axboe 			bio_release_pages(bio);
1721f9c78b2bSJens Axboe 		bio_put(bio);
1722f9c78b2bSJens Axboe 	}
1723f9c78b2bSJens Axboe }
1724f9c78b2bSJens Axboe 
1725f9c78b2bSJens Axboe void bio_check_pages_dirty(struct bio *bio)
1726f9c78b2bSJens Axboe {
1727f9c78b2bSJens Axboe 	struct bio_vec *bvec;
172824d5493fSChristoph Hellwig 	unsigned long flags;
1729f9c78b2bSJens Axboe 	int i;
17306dc4f100SMing Lei 	struct bvec_iter_all iter_all;
1731f9c78b2bSJens Axboe 
17326dc4f100SMing Lei 	bio_for_each_segment_all(bvec, bio, i, iter_all) {
173324d5493fSChristoph Hellwig 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
173424d5493fSChristoph Hellwig 			goto defer;
1735f9c78b2bSJens Axboe 	}
1736f9c78b2bSJens Axboe 
1737399254aaSJens Axboe 	if (!bio_flagged(bio, BIO_NO_PAGE_REF))
173824d5493fSChristoph Hellwig 		bio_release_pages(bio);
173924d5493fSChristoph Hellwig 	bio_put(bio);
174024d5493fSChristoph Hellwig 	return;
174124d5493fSChristoph Hellwig defer:
1742f9c78b2bSJens Axboe 	spin_lock_irqsave(&bio_dirty_lock, flags);
1743f9c78b2bSJens Axboe 	bio->bi_private = bio_dirty_list;
1744f9c78b2bSJens Axboe 	bio_dirty_list = bio;
1745f9c78b2bSJens Axboe 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1746f9c78b2bSJens Axboe 	schedule_work(&bio_dirty_work);
1747f9c78b2bSJens Axboe }
1748f9c78b2bSJens Axboe 
17495b18b5a7SMikulas Patocka void update_io_ticks(struct hd_struct *part, unsigned long now)
17505b18b5a7SMikulas Patocka {
17515b18b5a7SMikulas Patocka 	unsigned long stamp;
17525b18b5a7SMikulas Patocka again:
17535b18b5a7SMikulas Patocka 	stamp = READ_ONCE(part->stamp);
17545b18b5a7SMikulas Patocka 	if (unlikely(stamp != now)) {
17555b18b5a7SMikulas Patocka 		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
17565b18b5a7SMikulas Patocka 			__part_stat_add(part, io_ticks, 1);
17575b18b5a7SMikulas Patocka 		}
17585b18b5a7SMikulas Patocka 	}
17595b18b5a7SMikulas Patocka 	if (part->partno) {
17605b18b5a7SMikulas Patocka 		part = &part_to_disk(part)->part0;
17615b18b5a7SMikulas Patocka 		goto again;
17625b18b5a7SMikulas Patocka 	}
17635b18b5a7SMikulas Patocka }
1764f9c78b2bSJens Axboe 
1765ddcf35d3SMichael Callahan void generic_start_io_acct(struct request_queue *q, int op,
1766d62e26b3SJens Axboe 			   unsigned long sectors, struct hd_struct *part)
1767394ffa50SGu Zheng {
1768ddcf35d3SMichael Callahan 	const int sgrp = op_stat_group(op);
1769394ffa50SGu Zheng 
1770112f158fSMike Snitzer 	part_stat_lock();
1771112f158fSMike Snitzer 
17725b18b5a7SMikulas Patocka 	update_io_ticks(part, jiffies);
1773112f158fSMike Snitzer 	part_stat_inc(part, ios[sgrp]);
1774112f158fSMike Snitzer 	part_stat_add(part, sectors[sgrp], sectors);
1775ddcf35d3SMichael Callahan 	part_inc_in_flight(q, part, op_is_write(op));
1776394ffa50SGu Zheng 
1777394ffa50SGu Zheng 	part_stat_unlock();
1778394ffa50SGu Zheng }
1779394ffa50SGu Zheng EXPORT_SYMBOL(generic_start_io_acct);
1780394ffa50SGu Zheng 
1781ddcf35d3SMichael Callahan void generic_end_io_acct(struct request_queue *q, int req_op,
1782d62e26b3SJens Axboe 			 struct hd_struct *part, unsigned long start_time)
1783394ffa50SGu Zheng {
17845b18b5a7SMikulas Patocka 	unsigned long now = jiffies;
17855b18b5a7SMikulas Patocka 	unsigned long duration = now - start_time;
1786ddcf35d3SMichael Callahan 	const int sgrp = op_stat_group(req_op);
1787394ffa50SGu Zheng 
1788112f158fSMike Snitzer 	part_stat_lock();
1789112f158fSMike Snitzer 
17905b18b5a7SMikulas Patocka 	update_io_ticks(part, now);
1791112f158fSMike Snitzer 	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
17925b18b5a7SMikulas Patocka 	part_stat_add(part, time_in_queue, duration);
1793ddcf35d3SMichael Callahan 	part_dec_in_flight(q, part, op_is_write(req_op));
1794394ffa50SGu Zheng 
1795394ffa50SGu Zheng 	part_stat_unlock();
1796394ffa50SGu Zheng }
1797394ffa50SGu Zheng EXPORT_SYMBOL(generic_end_io_acct);
1798394ffa50SGu Zheng 
1799f9c78b2bSJens Axboe #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1800f9c78b2bSJens Axboe void bio_flush_dcache_pages(struct bio *bi)
1801f9c78b2bSJens Axboe {
1802f9c78b2bSJens Axboe 	struct bio_vec bvec;
1803f9c78b2bSJens Axboe 	struct bvec_iter iter;
1804f9c78b2bSJens Axboe 
1805f9c78b2bSJens Axboe 	bio_for_each_segment(bvec, bi, iter)
1806f9c78b2bSJens Axboe 		flush_dcache_page(bvec.bv_page);
1807f9c78b2bSJens Axboe }
1808f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_flush_dcache_pages);
1809f9c78b2bSJens Axboe #endif
1810f9c78b2bSJens Axboe 
1811c4cf5261SJens Axboe static inline bool bio_remaining_done(struct bio *bio)
1812c4cf5261SJens Axboe {
1813c4cf5261SJens Axboe 	/*
1814c4cf5261SJens Axboe 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1815c4cf5261SJens Axboe 	 * we always end io on the first invocation.
1816c4cf5261SJens Axboe 	 */
1817c4cf5261SJens Axboe 	if (!bio_flagged(bio, BIO_CHAIN))
1818c4cf5261SJens Axboe 		return true;
1819c4cf5261SJens Axboe 
1820c4cf5261SJens Axboe 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1821c4cf5261SJens Axboe 
1822326e1dbbSMike Snitzer 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1823b7c44ed9SJens Axboe 		bio_clear_flag(bio, BIO_CHAIN);
1824c4cf5261SJens Axboe 		return true;
1825326e1dbbSMike Snitzer 	}
1826c4cf5261SJens Axboe 
1827c4cf5261SJens Axboe 	return false;
1828c4cf5261SJens Axboe }
1829c4cf5261SJens Axboe 
1830f9c78b2bSJens Axboe /**
1831f9c78b2bSJens Axboe  * bio_endio - end I/O on a bio
1832f9c78b2bSJens Axboe  * @bio:	bio
1833f9c78b2bSJens Axboe  *
1834f9c78b2bSJens Axboe  * Description:
18354246a0b6SChristoph Hellwig  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
18364246a0b6SChristoph Hellwig  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
18374246a0b6SChristoph Hellwig  *   bio unless they own it and thus know that it has an end_io function.
1838fbbaf700SNeilBrown  *
1839fbbaf700SNeilBrown  *   bio_endio() can be called several times on a bio that has been chained
1840fbbaf700SNeilBrown  *   using bio_chain().  The ->bi_end_io() function will only be called the
1841fbbaf700SNeilBrown  *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1842fbbaf700SNeilBrown  *   generated if BIO_TRACE_COMPLETION is set.
1843f9c78b2bSJens Axboe  **/
18444246a0b6SChristoph Hellwig void bio_endio(struct bio *bio)
1845f9c78b2bSJens Axboe {
1846ba8c6967SChristoph Hellwig again:
18472b885517SChristoph Hellwig 	if (!bio_remaining_done(bio))
1848ba8c6967SChristoph Hellwig 		return;
18497c20f116SChristoph Hellwig 	if (!bio_integrity_endio(bio))
18507c20f116SChristoph Hellwig 		return;
1851f9c78b2bSJens Axboe 
185267b42d0bSJosef Bacik 	if (bio->bi_disk)
185367b42d0bSJosef Bacik 		rq_qos_done_bio(bio->bi_disk->queue, bio);
185467b42d0bSJosef Bacik 
1855f9c78b2bSJens Axboe 	/*
1856ba8c6967SChristoph Hellwig 	 * Need to have a real endio function for chained bios, otherwise
1857ba8c6967SChristoph Hellwig 	 * various corner cases will break (like stacking block devices that
1858ba8c6967SChristoph Hellwig 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1859ba8c6967SChristoph Hellwig 	 * recursion and blowing the stack. Tail call optimization would
1860ba8c6967SChristoph Hellwig 	 * handle this, but compiling with frame pointers also disables
1861ba8c6967SChristoph Hellwig 	 * gcc's sibling call optimization.
1862f9c78b2bSJens Axboe 	 */
1863f9c78b2bSJens Axboe 	if (bio->bi_end_io == bio_chain_endio) {
186438f8baaeSChristoph Hellwig 		bio = __bio_chain_endio(bio);
1865ba8c6967SChristoph Hellwig 		goto again;
1866ba8c6967SChristoph Hellwig 	}
1867ba8c6967SChristoph Hellwig 
186874d46992SChristoph Hellwig 	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
186974d46992SChristoph Hellwig 		trace_block_bio_complete(bio->bi_disk->queue, bio,
1870a462b950SBart Van Assche 					 blk_status_to_errno(bio->bi_status));
1871fbbaf700SNeilBrown 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1872fbbaf700SNeilBrown 	}
1873fbbaf700SNeilBrown 
18749e234eeaSShaohua Li 	blk_throtl_bio_endio(bio);
1875b222dd2fSShaohua Li 	/* release cgroup info */
1876b222dd2fSShaohua Li 	bio_uninit(bio);
1877f9c78b2bSJens Axboe 	if (bio->bi_end_io)
18784246a0b6SChristoph Hellwig 		bio->bi_end_io(bio);
1879f9c78b2bSJens Axboe }
1880f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_endio);
1881f9c78b2bSJens Axboe 
1882f9c78b2bSJens Axboe /**
1883f9c78b2bSJens Axboe  * bio_split - split a bio
1884f9c78b2bSJens Axboe  * @bio:	bio to split
1885f9c78b2bSJens Axboe  * @sectors:	number of sectors to split from the front of @bio
1886f9c78b2bSJens Axboe  * @gfp:	gfp mask
1887f9c78b2bSJens Axboe  * @bs:		bio set to allocate from
1888f9c78b2bSJens Axboe  *
1889f9c78b2bSJens Axboe  * Allocates and returns a new bio which represents @sectors from the start of
1890f9c78b2bSJens Axboe  * @bio, and updates @bio to represent the remaining sectors.
1891f9c78b2bSJens Axboe  *
1892f3f5da62SMartin K. Petersen  * Unless this is a discard request the newly allocated bio will point
1893f3f5da62SMartin K. Petersen  * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1894f3f5da62SMartin K. Petersen  * @bio is not freed before the split.
1895f9c78b2bSJens Axboe  */
1896f9c78b2bSJens Axboe struct bio *bio_split(struct bio *bio, int sectors,
1897f9c78b2bSJens Axboe 		      gfp_t gfp, struct bio_set *bs)
1898f9c78b2bSJens Axboe {
1899f341a4d3SMikulas Patocka 	struct bio *split;
1900f9c78b2bSJens Axboe 
1901f9c78b2bSJens Axboe 	BUG_ON(sectors <= 0);
1902f9c78b2bSJens Axboe 	BUG_ON(sectors >= bio_sectors(bio));
1903f9c78b2bSJens Axboe 
1904f9c78b2bSJens Axboe 	split = bio_clone_fast(bio, gfp, bs);
1905f9c78b2bSJens Axboe 	if (!split)
1906f9c78b2bSJens Axboe 		return NULL;
1907f9c78b2bSJens Axboe 
1908f9c78b2bSJens Axboe 	split->bi_iter.bi_size = sectors << 9;
1909f9c78b2bSJens Axboe 
1910f9c78b2bSJens Axboe 	if (bio_integrity(split))
1911fbd08e76SDmitry Monakhov 		bio_integrity_trim(split);
1912f9c78b2bSJens Axboe 
1913f9c78b2bSJens Axboe 	bio_advance(bio, split->bi_iter.bi_size);
1914f9c78b2bSJens Axboe 
1915fbbaf700SNeilBrown 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
191620d59023SGoldwyn Rodrigues 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1917fbbaf700SNeilBrown 
1918f9c78b2bSJens Axboe 	return split;
1919f9c78b2bSJens Axboe }
1920f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_split);
1921f9c78b2bSJens Axboe 
1922f9c78b2bSJens Axboe /**
1923f9c78b2bSJens Axboe  * bio_trim - trim a bio
1924f9c78b2bSJens Axboe  * @bio:	bio to trim
1925f9c78b2bSJens Axboe  * @offset:	number of sectors to trim from the front of @bio
1926f9c78b2bSJens Axboe  * @size:	size we want to trim @bio to, in sectors
1927f9c78b2bSJens Axboe  */
1928f9c78b2bSJens Axboe void bio_trim(struct bio *bio, int offset, int size)
1929f9c78b2bSJens Axboe {
1930f9c78b2bSJens Axboe 	/* 'bio' is a cloned bio which we need to trim to match
1931f9c78b2bSJens Axboe 	 * the given offset and size.
1932f9c78b2bSJens Axboe 	 */
1933f9c78b2bSJens Axboe 
1934f9c78b2bSJens Axboe 	size <<= 9;
1935f9c78b2bSJens Axboe 	if (offset == 0 && size == bio->bi_iter.bi_size)
1936f9c78b2bSJens Axboe 		return;
1937f9c78b2bSJens Axboe 
1938b7c44ed9SJens Axboe 	bio_clear_flag(bio, BIO_SEG_VALID);
1939f9c78b2bSJens Axboe 
1940f9c78b2bSJens Axboe 	bio_advance(bio, offset << 9);
1941f9c78b2bSJens Axboe 
1942f9c78b2bSJens Axboe 	bio->bi_iter.bi_size = size;
1943376a78abSDmitry Monakhov 
1944376a78abSDmitry Monakhov 	if (bio_integrity(bio))
1945fbd08e76SDmitry Monakhov 		bio_integrity_trim(bio);
1946376a78abSDmitry Monakhov 
1947f9c78b2bSJens Axboe }
1948f9c78b2bSJens Axboe EXPORT_SYMBOL_GPL(bio_trim);
1949f9c78b2bSJens Axboe 
1950f9c78b2bSJens Axboe /*
1951f9c78b2bSJens Axboe  * create memory pools for biovec's in a bio_set.
1952f9c78b2bSJens Axboe  * use the global biovec slabs created for general use.
1953f9c78b2bSJens Axboe  */
19548aa6ba2fSKent Overstreet int biovec_init_pool(mempool_t *pool, int pool_entries)
1955f9c78b2bSJens Axboe {
1956ed996a52SChristoph Hellwig 	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1957f9c78b2bSJens Axboe 
19588aa6ba2fSKent Overstreet 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1959f9c78b2bSJens Axboe }
1960f9c78b2bSJens Axboe 
1961917a38c7SKent Overstreet /*
1962917a38c7SKent Overstreet  * bioset_exit - exit a bioset initialized with bioset_init()
1963917a38c7SKent Overstreet  *
1964917a38c7SKent Overstreet  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1965917a38c7SKent Overstreet  * kzalloc()).
1966917a38c7SKent Overstreet  */
1967917a38c7SKent Overstreet void bioset_exit(struct bio_set *bs)
1968f9c78b2bSJens Axboe {
1969f9c78b2bSJens Axboe 	if (bs->rescue_workqueue)
1970f9c78b2bSJens Axboe 		destroy_workqueue(bs->rescue_workqueue);
1971917a38c7SKent Overstreet 	bs->rescue_workqueue = NULL;
1972f9c78b2bSJens Axboe 
19738aa6ba2fSKent Overstreet 	mempool_exit(&bs->bio_pool);
19748aa6ba2fSKent Overstreet 	mempool_exit(&bs->bvec_pool);
1975f9c78b2bSJens Axboe 
1976f9c78b2bSJens Axboe 	bioset_integrity_free(bs);
1977917a38c7SKent Overstreet 	if (bs->bio_slab)
1978f9c78b2bSJens Axboe 		bio_put_slab(bs);
1979917a38c7SKent Overstreet 	bs->bio_slab = NULL;
1980917a38c7SKent Overstreet }
1981917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_exit);
1982f9c78b2bSJens Axboe 
1983011067b0SNeilBrown /**
1984917a38c7SKent Overstreet  * bioset_init - Initialize a bio_set
1985dad08527SKent Overstreet  * @bs:		pool to initialize
1986917a38c7SKent Overstreet  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1987917a38c7SKent Overstreet  * @front_pad:	Number of bytes to allocate in front of the returned bio
1988917a38c7SKent Overstreet  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1989917a38c7SKent Overstreet  *              and %BIOSET_NEED_RESCUER
1990917a38c7SKent Overstreet  *
1991dad08527SKent Overstreet  * Description:
1992dad08527SKent Overstreet  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1993dad08527SKent Overstreet  *    to ask for a number of bytes to be allocated in front of the bio.
1994dad08527SKent Overstreet  *    Front pad allocation is useful for embedding the bio inside
1995dad08527SKent Overstreet  *    another structure, to avoid allocating extra data to go with the bio.
1996dad08527SKent Overstreet  *    Note that the bio must be embedded at the END of that structure always,
1997dad08527SKent Overstreet  *    or things will break badly.
1998dad08527SKent Overstreet  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1999dad08527SKent Overstreet  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
2000dad08527SKent Overstreet  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
2001dad08527SKent Overstreet  *    dispatch queued requests when the mempool runs out of space.
2002dad08527SKent Overstreet  *
2003917a38c7SKent Overstreet  */
2004917a38c7SKent Overstreet int bioset_init(struct bio_set *bs,
2005917a38c7SKent Overstreet 		unsigned int pool_size,
2006917a38c7SKent Overstreet 		unsigned int front_pad,
2007917a38c7SKent Overstreet 		int flags)
2008917a38c7SKent Overstreet {
2009917a38c7SKent Overstreet 	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
2010917a38c7SKent Overstreet 
2011917a38c7SKent Overstreet 	bs->front_pad = front_pad;
2012917a38c7SKent Overstreet 
2013917a38c7SKent Overstreet 	spin_lock_init(&bs->rescue_lock);
2014917a38c7SKent Overstreet 	bio_list_init(&bs->rescue_list);
2015917a38c7SKent Overstreet 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
2016917a38c7SKent Overstreet 
2017917a38c7SKent Overstreet 	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
2018917a38c7SKent Overstreet 	if (!bs->bio_slab)
2019917a38c7SKent Overstreet 		return -ENOMEM;
2020917a38c7SKent Overstreet 
2021917a38c7SKent Overstreet 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
2022917a38c7SKent Overstreet 		goto bad;
2023917a38c7SKent Overstreet 
2024917a38c7SKent Overstreet 	if ((flags & BIOSET_NEED_BVECS) &&
2025917a38c7SKent Overstreet 	    biovec_init_pool(&bs->bvec_pool, pool_size))
2026917a38c7SKent Overstreet 		goto bad;
2027917a38c7SKent Overstreet 
2028917a38c7SKent Overstreet 	if (!(flags & BIOSET_NEED_RESCUER))
2029917a38c7SKent Overstreet 		return 0;
2030917a38c7SKent Overstreet 
2031917a38c7SKent Overstreet 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
2032917a38c7SKent Overstreet 	if (!bs->rescue_workqueue)
2033917a38c7SKent Overstreet 		goto bad;
2034917a38c7SKent Overstreet 
2035917a38c7SKent Overstreet 	return 0;
2036917a38c7SKent Overstreet bad:
2037917a38c7SKent Overstreet 	bioset_exit(bs);
2038917a38c7SKent Overstreet 	return -ENOMEM;
2039917a38c7SKent Overstreet }
2040917a38c7SKent Overstreet EXPORT_SYMBOL(bioset_init);
2041917a38c7SKent Overstreet 
204228e89fd9SJens Axboe /*
204328e89fd9SJens Axboe  * Initialize and setup a new bio_set, based on the settings from
204428e89fd9SJens Axboe  * another bio_set.
204528e89fd9SJens Axboe  */
204628e89fd9SJens Axboe int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
204728e89fd9SJens Axboe {
204828e89fd9SJens Axboe 	int flags;
204928e89fd9SJens Axboe 
205028e89fd9SJens Axboe 	flags = 0;
205128e89fd9SJens Axboe 	if (src->bvec_pool.min_nr)
205228e89fd9SJens Axboe 		flags |= BIOSET_NEED_BVECS;
205328e89fd9SJens Axboe 	if (src->rescue_workqueue)
205428e89fd9SJens Axboe 		flags |= BIOSET_NEED_RESCUER;
205528e89fd9SJens Axboe 
205628e89fd9SJens Axboe 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
205728e89fd9SJens Axboe }
205828e89fd9SJens Axboe EXPORT_SYMBOL(bioset_init_from_src);
205928e89fd9SJens Axboe 
2060f9c78b2bSJens Axboe #ifdef CONFIG_BLK_CGROUP
20611d933cf0STejun Heo 
20621d933cf0STejun Heo /**
20632268c0feSDennis Zhou  * bio_disassociate_blkg - puts back the blkg reference if associated
2064b5f2954dSDennis Zhou  * @bio: target bio
2065b5f2954dSDennis Zhou  *
20662268c0feSDennis Zhou  * Helper to disassociate the blkg from @bio if a blkg is associated.
2067b5f2954dSDennis Zhou  */
20682268c0feSDennis Zhou void bio_disassociate_blkg(struct bio *bio)
2069b5f2954dSDennis Zhou {
207008e18eabSJosef Bacik 	if (bio->bi_blkg) {
207108e18eabSJosef Bacik 		blkg_put(bio->bi_blkg);
207208e18eabSJosef Bacik 		bio->bi_blkg = NULL;
207308e18eabSJosef Bacik 	}
2074f9c78b2bSJens Axboe }
2075892ad71fSDennis Zhou EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
2076f9c78b2bSJens Axboe 
207720bd723eSPaolo Valente /**
20782268c0feSDennis Zhou  * __bio_associate_blkg - associate a bio with the a blkg
2079f9c78b2bSJens Axboe  * @bio: target bio
2080f9c78b2bSJens Axboe  * @blkg: the blkg to associate
2081f9c78b2bSJens Axboe  *
2082beea9da0SDennis Zhou  * This tries to associate @bio with the specified @blkg.  Association failure
2083beea9da0SDennis Zhou  * is handled by walking up the blkg tree.  Therefore, the blkg associated can
2084beea9da0SDennis Zhou  * be anything between @blkg and the root_blkg.  This situation only happens
2085beea9da0SDennis Zhou  * when a cgroup is dying and then the remaining bios will spill to the closest
2086beea9da0SDennis Zhou  * alive blkg.
2087beea9da0SDennis Zhou  *
2088beea9da0SDennis Zhou  * A reference will be taken on the @blkg and will be released when @bio is
2089beea9da0SDennis Zhou  * freed.
2090f9c78b2bSJens Axboe  */
20912268c0feSDennis Zhou static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2092f9c78b2bSJens Axboe {
20932268c0feSDennis Zhou 	bio_disassociate_blkg(bio);
20942268c0feSDennis Zhou 
20957754f669SDennis Zhou 	bio->bi_blkg = blkg_tryget_closest(blkg);
20962268c0feSDennis Zhou }
20972268c0feSDennis Zhou 
2098fd42df30SDennis Zhou /**
2099fd42df30SDennis Zhou  * bio_associate_blkg_from_css - associate a bio with a specified css
2100fd42df30SDennis Zhou  * @bio: target bio
2101fd42df30SDennis Zhou  * @css: target css
2102fd42df30SDennis Zhou  *
2103fd42df30SDennis Zhou  * Associate @bio with the blkg found by combining the css's blkg and the
2104fc5a828bSDennis Zhou  * request_queue of the @bio.  This falls back to the queue's root_blkg if
2105fc5a828bSDennis Zhou  * the association fails with the css.
2106fd42df30SDennis Zhou  */
2107fd42df30SDennis Zhou void bio_associate_blkg_from_css(struct bio *bio,
2108fd42df30SDennis Zhou 				 struct cgroup_subsys_state *css)
2109fd42df30SDennis Zhou {
2110fc5a828bSDennis Zhou 	struct request_queue *q = bio->bi_disk->queue;
2111fc5a828bSDennis Zhou 	struct blkcg_gq *blkg;
2112fc5a828bSDennis Zhou 
2113fc5a828bSDennis Zhou 	rcu_read_lock();
2114fc5a828bSDennis Zhou 
2115fc5a828bSDennis Zhou 	if (!css || !css->parent)
2116fc5a828bSDennis Zhou 		blkg = q->root_blkg;
2117fc5a828bSDennis Zhou 	else
2118fc5a828bSDennis Zhou 		blkg = blkg_lookup_create(css_to_blkcg(css), q);
2119fc5a828bSDennis Zhou 
2120fc5a828bSDennis Zhou 	__bio_associate_blkg(bio, blkg);
2121fc5a828bSDennis Zhou 
2122fc5a828bSDennis Zhou 	rcu_read_unlock();
2123fd42df30SDennis Zhou }
2124fd42df30SDennis Zhou EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
2125fd42df30SDennis Zhou 
21266a7f6d86SDennis Zhou #ifdef CONFIG_MEMCG
21276a7f6d86SDennis Zhou /**
21286a7f6d86SDennis Zhou  * bio_associate_blkg_from_page - associate a bio with the page's blkg
21296a7f6d86SDennis Zhou  * @bio: target bio
21306a7f6d86SDennis Zhou  * @page: the page to lookup the blkcg from
21316a7f6d86SDennis Zhou  *
21326a7f6d86SDennis Zhou  * Associate @bio with the blkg from @page's owning memcg and the respective
2133fc5a828bSDennis Zhou  * request_queue.  If cgroup_e_css returns %NULL, fall back to the queue's
2134fc5a828bSDennis Zhou  * root_blkg.
21356a7f6d86SDennis Zhou  */
21366a7f6d86SDennis Zhou void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
21376a7f6d86SDennis Zhou {
21386a7f6d86SDennis Zhou 	struct cgroup_subsys_state *css;
21396a7f6d86SDennis Zhou 
21406a7f6d86SDennis Zhou 	if (!page->mem_cgroup)
21416a7f6d86SDennis Zhou 		return;
21426a7f6d86SDennis Zhou 
2143fc5a828bSDennis Zhou 	rcu_read_lock();
2144fc5a828bSDennis Zhou 
2145fc5a828bSDennis Zhou 	css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
2146fc5a828bSDennis Zhou 	bio_associate_blkg_from_css(bio, css);
2147fc5a828bSDennis Zhou 
2148fc5a828bSDennis Zhou 	rcu_read_unlock();
21496a7f6d86SDennis Zhou }
21506a7f6d86SDennis Zhou #endif /* CONFIG_MEMCG */
21516a7f6d86SDennis Zhou 
21522268c0feSDennis Zhou /**
21532268c0feSDennis Zhou  * bio_associate_blkg - associate a bio with a blkg
21542268c0feSDennis Zhou  * @bio: target bio
21552268c0feSDennis Zhou  *
21562268c0feSDennis Zhou  * Associate @bio with the blkg found from the bio's css and request_queue.
21572268c0feSDennis Zhou  * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
21582268c0feSDennis Zhou  * already associated, the css is reused and association redone as the
21592268c0feSDennis Zhou  * request_queue may have changed.
21602268c0feSDennis Zhou  */
21612268c0feSDennis Zhou void bio_associate_blkg(struct bio *bio)
21622268c0feSDennis Zhou {
2163fc5a828bSDennis Zhou 	struct cgroup_subsys_state *css;
21642268c0feSDennis Zhou 
21652268c0feSDennis Zhou 	rcu_read_lock();
21662268c0feSDennis Zhou 
2167db6638d7SDennis Zhou 	if (bio->bi_blkg)
2168fc5a828bSDennis Zhou 		css = &bio_blkcg(bio)->css;
2169db6638d7SDennis Zhou 	else
2170fc5a828bSDennis Zhou 		css = blkcg_css();
21712268c0feSDennis Zhou 
2172fc5a828bSDennis Zhou 	bio_associate_blkg_from_css(bio, css);
21732268c0feSDennis Zhou 
21742268c0feSDennis Zhou 	rcu_read_unlock();
2175f9c78b2bSJens Axboe }
21765cdf2e3fSDennis Zhou EXPORT_SYMBOL_GPL(bio_associate_blkg);
2177f9c78b2bSJens Axboe 
217820bd723eSPaolo Valente /**
2179db6638d7SDennis Zhou  * bio_clone_blkg_association - clone blkg association from src to dst bio
218020bd723eSPaolo Valente  * @dst: destination bio
218120bd723eSPaolo Valente  * @src: source bio
218220bd723eSPaolo Valente  */
2183db6638d7SDennis Zhou void bio_clone_blkg_association(struct bio *dst, struct bio *src)
218420bd723eSPaolo Valente {
21856ab21879SDennis Zhou 	rcu_read_lock();
21866ab21879SDennis Zhou 
2187fc5a828bSDennis Zhou 	if (src->bi_blkg)
21882268c0feSDennis Zhou 		__bio_associate_blkg(dst, src->bi_blkg);
21896ab21879SDennis Zhou 
21906ab21879SDennis Zhou 	rcu_read_unlock();
219120bd723eSPaolo Valente }
2192db6638d7SDennis Zhou EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
2193f9c78b2bSJens Axboe #endif /* CONFIG_BLK_CGROUP */
2194f9c78b2bSJens Axboe 
2195f9c78b2bSJens Axboe static void __init biovec_init_slabs(void)
2196f9c78b2bSJens Axboe {
2197f9c78b2bSJens Axboe 	int i;
2198f9c78b2bSJens Axboe 
2199ed996a52SChristoph Hellwig 	for (i = 0; i < BVEC_POOL_NR; i++) {
2200f9c78b2bSJens Axboe 		int size;
2201f9c78b2bSJens Axboe 		struct biovec_slab *bvs = bvec_slabs + i;
2202f9c78b2bSJens Axboe 
2203f9c78b2bSJens Axboe 		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2204f9c78b2bSJens Axboe 			bvs->slab = NULL;
2205f9c78b2bSJens Axboe 			continue;
2206f9c78b2bSJens Axboe 		}
2207f9c78b2bSJens Axboe 
2208f9c78b2bSJens Axboe 		size = bvs->nr_vecs * sizeof(struct bio_vec);
2209f9c78b2bSJens Axboe 		bvs->slab = kmem_cache_create(bvs->name, size, 0,
2210f9c78b2bSJens Axboe                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2211f9c78b2bSJens Axboe 	}
2212f9c78b2bSJens Axboe }
2213f9c78b2bSJens Axboe 
2214f9c78b2bSJens Axboe static int __init init_bio(void)
2215f9c78b2bSJens Axboe {
2216f9c78b2bSJens Axboe 	bio_slab_max = 2;
2217f9c78b2bSJens Axboe 	bio_slab_nr = 0;
22186396bb22SKees Cook 	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
22196396bb22SKees Cook 			    GFP_KERNEL);
22202b24e6f6SJohannes Thumshirn 
22212b24e6f6SJohannes Thumshirn 	BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
22222b24e6f6SJohannes Thumshirn 
2223f9c78b2bSJens Axboe 	if (!bio_slabs)
2224f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
2225f9c78b2bSJens Axboe 
2226f9c78b2bSJens Axboe 	bio_integrity_init();
2227f9c78b2bSJens Axboe 	biovec_init_slabs();
2228f9c78b2bSJens Axboe 
2229f4f8154aSKent Overstreet 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2230f9c78b2bSJens Axboe 		panic("bio: can't allocate bios\n");
2231f9c78b2bSJens Axboe 
2232f4f8154aSKent Overstreet 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2233f9c78b2bSJens Axboe 		panic("bio: can't create integrity pool\n");
2234f9c78b2bSJens Axboe 
2235f9c78b2bSJens Axboe 	return 0;
2236f9c78b2bSJens Axboe }
2237f9c78b2bSJens Axboe subsys_initcall(init_bio);
2238