1f9c78b2bSJens Axboe /* 2f9c78b2bSJens Axboe * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 3f9c78b2bSJens Axboe * 4f9c78b2bSJens Axboe * This program is free software; you can redistribute it and/or modify 5f9c78b2bSJens Axboe * it under the terms of the GNU General Public License version 2 as 6f9c78b2bSJens Axboe * published by the Free Software Foundation. 7f9c78b2bSJens Axboe * 8f9c78b2bSJens Axboe * This program is distributed in the hope that it will be useful, 9f9c78b2bSJens Axboe * but WITHOUT ANY WARRANTY; without even the implied warranty of 10f9c78b2bSJens Axboe * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11f9c78b2bSJens Axboe * GNU General Public License for more details. 12f9c78b2bSJens Axboe * 13f9c78b2bSJens Axboe * You should have received a copy of the GNU General Public Licens 14f9c78b2bSJens Axboe * along with this program; if not, write to the Free Software 15f9c78b2bSJens Axboe * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16f9c78b2bSJens Axboe * 17f9c78b2bSJens Axboe */ 18f9c78b2bSJens Axboe #include <linux/mm.h> 19f9c78b2bSJens Axboe #include <linux/swap.h> 20f9c78b2bSJens Axboe #include <linux/bio.h> 21f9c78b2bSJens Axboe #include <linux/blkdev.h> 22f9c78b2bSJens Axboe #include <linux/uio.h> 23f9c78b2bSJens Axboe #include <linux/iocontext.h> 24f9c78b2bSJens Axboe #include <linux/slab.h> 25f9c78b2bSJens Axboe #include <linux/init.h> 26f9c78b2bSJens Axboe #include <linux/kernel.h> 27f9c78b2bSJens Axboe #include <linux/export.h> 28f9c78b2bSJens Axboe #include <linux/mempool.h> 29f9c78b2bSJens Axboe #include <linux/workqueue.h> 30f9c78b2bSJens Axboe #include <linux/cgroup.h> 31f9c78b2bSJens Axboe 32f9c78b2bSJens Axboe #include <trace/events/block.h> 339e234eeaSShaohua Li #include "blk.h" 34f9c78b2bSJens Axboe 35f9c78b2bSJens Axboe /* 36f9c78b2bSJens Axboe * Test patch to inline a certain number of bi_io_vec's inside the bio 37f9c78b2bSJens Axboe * itself, to shrink a bio data allocation from two mempool calls to one 38f9c78b2bSJens Axboe */ 39f9c78b2bSJens Axboe #define BIO_INLINE_VECS 4 40f9c78b2bSJens Axboe 41f9c78b2bSJens Axboe /* 42f9c78b2bSJens Axboe * if you change this list, also change bvec_alloc or things will 43f9c78b2bSJens Axboe * break badly! cannot be bigger than what you can fit into an 44f9c78b2bSJens Axboe * unsigned short 45f9c78b2bSJens Axboe */ 46bd5c4facSMikulas Patocka #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n } 47ed996a52SChristoph Hellwig static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { 48bd5c4facSMikulas Patocka BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max), 49f9c78b2bSJens Axboe }; 50f9c78b2bSJens Axboe #undef BV 51f9c78b2bSJens Axboe 52f9c78b2bSJens Axboe /* 53f9c78b2bSJens Axboe * fs_bio_set is the bio_set containing bio and iovec memory pools used by 54f9c78b2bSJens Axboe * IO code that does not need private memory pools. 55f9c78b2bSJens Axboe */ 56f9c78b2bSJens Axboe struct bio_set *fs_bio_set; 57f9c78b2bSJens Axboe EXPORT_SYMBOL(fs_bio_set); 58f9c78b2bSJens Axboe 59f9c78b2bSJens Axboe /* 60f9c78b2bSJens Axboe * Our slab pool management 61f9c78b2bSJens Axboe */ 62f9c78b2bSJens Axboe struct bio_slab { 63f9c78b2bSJens Axboe struct kmem_cache *slab; 64f9c78b2bSJens Axboe unsigned int slab_ref; 65f9c78b2bSJens Axboe unsigned int slab_size; 66f9c78b2bSJens Axboe char name[8]; 67f9c78b2bSJens Axboe }; 68f9c78b2bSJens Axboe static DEFINE_MUTEX(bio_slab_lock); 69f9c78b2bSJens Axboe static struct bio_slab *bio_slabs; 70f9c78b2bSJens Axboe static unsigned int bio_slab_nr, bio_slab_max; 71f9c78b2bSJens Axboe 72f9c78b2bSJens Axboe static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 73f9c78b2bSJens Axboe { 74f9c78b2bSJens Axboe unsigned int sz = sizeof(struct bio) + extra_size; 75f9c78b2bSJens Axboe struct kmem_cache *slab = NULL; 76f9c78b2bSJens Axboe struct bio_slab *bslab, *new_bio_slabs; 77f9c78b2bSJens Axboe unsigned int new_bio_slab_max; 78f9c78b2bSJens Axboe unsigned int i, entry = -1; 79f9c78b2bSJens Axboe 80f9c78b2bSJens Axboe mutex_lock(&bio_slab_lock); 81f9c78b2bSJens Axboe 82f9c78b2bSJens Axboe i = 0; 83f9c78b2bSJens Axboe while (i < bio_slab_nr) { 84f9c78b2bSJens Axboe bslab = &bio_slabs[i]; 85f9c78b2bSJens Axboe 86f9c78b2bSJens Axboe if (!bslab->slab && entry == -1) 87f9c78b2bSJens Axboe entry = i; 88f9c78b2bSJens Axboe else if (bslab->slab_size == sz) { 89f9c78b2bSJens Axboe slab = bslab->slab; 90f9c78b2bSJens Axboe bslab->slab_ref++; 91f9c78b2bSJens Axboe break; 92f9c78b2bSJens Axboe } 93f9c78b2bSJens Axboe i++; 94f9c78b2bSJens Axboe } 95f9c78b2bSJens Axboe 96f9c78b2bSJens Axboe if (slab) 97f9c78b2bSJens Axboe goto out_unlock; 98f9c78b2bSJens Axboe 99f9c78b2bSJens Axboe if (bio_slab_nr == bio_slab_max && entry == -1) { 100f9c78b2bSJens Axboe new_bio_slab_max = bio_slab_max << 1; 101f9c78b2bSJens Axboe new_bio_slabs = krealloc(bio_slabs, 102f9c78b2bSJens Axboe new_bio_slab_max * sizeof(struct bio_slab), 103f9c78b2bSJens Axboe GFP_KERNEL); 104f9c78b2bSJens Axboe if (!new_bio_slabs) 105f9c78b2bSJens Axboe goto out_unlock; 106f9c78b2bSJens Axboe bio_slab_max = new_bio_slab_max; 107f9c78b2bSJens Axboe bio_slabs = new_bio_slabs; 108f9c78b2bSJens Axboe } 109f9c78b2bSJens Axboe if (entry == -1) 110f9c78b2bSJens Axboe entry = bio_slab_nr++; 111f9c78b2bSJens Axboe 112f9c78b2bSJens Axboe bslab = &bio_slabs[entry]; 113f9c78b2bSJens Axboe 114f9c78b2bSJens Axboe snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 1156a241483SMikulas Patocka slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 1166a241483SMikulas Patocka SLAB_HWCACHE_ALIGN, NULL); 117f9c78b2bSJens Axboe if (!slab) 118f9c78b2bSJens Axboe goto out_unlock; 119f9c78b2bSJens Axboe 120f9c78b2bSJens Axboe bslab->slab = slab; 121f9c78b2bSJens Axboe bslab->slab_ref = 1; 122f9c78b2bSJens Axboe bslab->slab_size = sz; 123f9c78b2bSJens Axboe out_unlock: 124f9c78b2bSJens Axboe mutex_unlock(&bio_slab_lock); 125f9c78b2bSJens Axboe return slab; 126f9c78b2bSJens Axboe } 127f9c78b2bSJens Axboe 128f9c78b2bSJens Axboe static void bio_put_slab(struct bio_set *bs) 129f9c78b2bSJens Axboe { 130f9c78b2bSJens Axboe struct bio_slab *bslab = NULL; 131f9c78b2bSJens Axboe unsigned int i; 132f9c78b2bSJens Axboe 133f9c78b2bSJens Axboe mutex_lock(&bio_slab_lock); 134f9c78b2bSJens Axboe 135f9c78b2bSJens Axboe for (i = 0; i < bio_slab_nr; i++) { 136f9c78b2bSJens Axboe if (bs->bio_slab == bio_slabs[i].slab) { 137f9c78b2bSJens Axboe bslab = &bio_slabs[i]; 138f9c78b2bSJens Axboe break; 139f9c78b2bSJens Axboe } 140f9c78b2bSJens Axboe } 141f9c78b2bSJens Axboe 142f9c78b2bSJens Axboe if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 143f9c78b2bSJens Axboe goto out; 144f9c78b2bSJens Axboe 145f9c78b2bSJens Axboe WARN_ON(!bslab->slab_ref); 146f9c78b2bSJens Axboe 147f9c78b2bSJens Axboe if (--bslab->slab_ref) 148f9c78b2bSJens Axboe goto out; 149f9c78b2bSJens Axboe 150f9c78b2bSJens Axboe kmem_cache_destroy(bslab->slab); 151f9c78b2bSJens Axboe bslab->slab = NULL; 152f9c78b2bSJens Axboe 153f9c78b2bSJens Axboe out: 154f9c78b2bSJens Axboe mutex_unlock(&bio_slab_lock); 155f9c78b2bSJens Axboe } 156f9c78b2bSJens Axboe 157f9c78b2bSJens Axboe unsigned int bvec_nr_vecs(unsigned short idx) 158f9c78b2bSJens Axboe { 159f9c78b2bSJens Axboe return bvec_slabs[idx].nr_vecs; 160f9c78b2bSJens Axboe } 161f9c78b2bSJens Axboe 162f9c78b2bSJens Axboe void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) 163f9c78b2bSJens Axboe { 164ed996a52SChristoph Hellwig if (!idx) 165ed996a52SChristoph Hellwig return; 166ed996a52SChristoph Hellwig idx--; 167f9c78b2bSJens Axboe 168ed996a52SChristoph Hellwig BIO_BUG_ON(idx >= BVEC_POOL_NR); 169ed996a52SChristoph Hellwig 170ed996a52SChristoph Hellwig if (idx == BVEC_POOL_MAX) { 171f9c78b2bSJens Axboe mempool_free(bv, pool); 172ed996a52SChristoph Hellwig } else { 173f9c78b2bSJens Axboe struct biovec_slab *bvs = bvec_slabs + idx; 174f9c78b2bSJens Axboe 175f9c78b2bSJens Axboe kmem_cache_free(bvs->slab, bv); 176f9c78b2bSJens Axboe } 177f9c78b2bSJens Axboe } 178f9c78b2bSJens Axboe 179f9c78b2bSJens Axboe struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, 180f9c78b2bSJens Axboe mempool_t *pool) 181f9c78b2bSJens Axboe { 182f9c78b2bSJens Axboe struct bio_vec *bvl; 183f9c78b2bSJens Axboe 184f9c78b2bSJens Axboe /* 185f9c78b2bSJens Axboe * see comment near bvec_array define! 186f9c78b2bSJens Axboe */ 187f9c78b2bSJens Axboe switch (nr) { 188f9c78b2bSJens Axboe case 1: 189f9c78b2bSJens Axboe *idx = 0; 190f9c78b2bSJens Axboe break; 191f9c78b2bSJens Axboe case 2 ... 4: 192f9c78b2bSJens Axboe *idx = 1; 193f9c78b2bSJens Axboe break; 194f9c78b2bSJens Axboe case 5 ... 16: 195f9c78b2bSJens Axboe *idx = 2; 196f9c78b2bSJens Axboe break; 197f9c78b2bSJens Axboe case 17 ... 64: 198f9c78b2bSJens Axboe *idx = 3; 199f9c78b2bSJens Axboe break; 200f9c78b2bSJens Axboe case 65 ... 128: 201f9c78b2bSJens Axboe *idx = 4; 202f9c78b2bSJens Axboe break; 203f9c78b2bSJens Axboe case 129 ... BIO_MAX_PAGES: 204f9c78b2bSJens Axboe *idx = 5; 205f9c78b2bSJens Axboe break; 206f9c78b2bSJens Axboe default: 207f9c78b2bSJens Axboe return NULL; 208f9c78b2bSJens Axboe } 209f9c78b2bSJens Axboe 210f9c78b2bSJens Axboe /* 211f9c78b2bSJens Axboe * idx now points to the pool we want to allocate from. only the 212f9c78b2bSJens Axboe * 1-vec entry pool is mempool backed. 213f9c78b2bSJens Axboe */ 214ed996a52SChristoph Hellwig if (*idx == BVEC_POOL_MAX) { 215f9c78b2bSJens Axboe fallback: 216f9c78b2bSJens Axboe bvl = mempool_alloc(pool, gfp_mask); 217f9c78b2bSJens Axboe } else { 218f9c78b2bSJens Axboe struct biovec_slab *bvs = bvec_slabs + *idx; 219d0164adcSMel Gorman gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); 220f9c78b2bSJens Axboe 221f9c78b2bSJens Axboe /* 222f9c78b2bSJens Axboe * Make this allocation restricted and don't dump info on 223f9c78b2bSJens Axboe * allocation failures, since we'll fallback to the mempool 224f9c78b2bSJens Axboe * in case of failure. 225f9c78b2bSJens Axboe */ 226f9c78b2bSJens Axboe __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 227f9c78b2bSJens Axboe 228f9c78b2bSJens Axboe /* 229d0164adcSMel Gorman * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM 230f9c78b2bSJens Axboe * is set, retry with the 1-entry mempool 231f9c78b2bSJens Axboe */ 232f9c78b2bSJens Axboe bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 233d0164adcSMel Gorman if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { 234ed996a52SChristoph Hellwig *idx = BVEC_POOL_MAX; 235f9c78b2bSJens Axboe goto fallback; 236f9c78b2bSJens Axboe } 237f9c78b2bSJens Axboe } 238f9c78b2bSJens Axboe 239ed996a52SChristoph Hellwig (*idx)++; 240f9c78b2bSJens Axboe return bvl; 241f9c78b2bSJens Axboe } 242f9c78b2bSJens Axboe 2439ae3b3f5SJens Axboe void bio_uninit(struct bio *bio) 244f9c78b2bSJens Axboe { 245f9c78b2bSJens Axboe bio_disassociate_task(bio); 246f9c78b2bSJens Axboe } 2479ae3b3f5SJens Axboe EXPORT_SYMBOL(bio_uninit); 248f9c78b2bSJens Axboe 249f9c78b2bSJens Axboe static void bio_free(struct bio *bio) 250f9c78b2bSJens Axboe { 251f9c78b2bSJens Axboe struct bio_set *bs = bio->bi_pool; 252f9c78b2bSJens Axboe void *p; 253f9c78b2bSJens Axboe 2549ae3b3f5SJens Axboe bio_uninit(bio); 255f9c78b2bSJens Axboe 256f9c78b2bSJens Axboe if (bs) { 257ed996a52SChristoph Hellwig bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); 258f9c78b2bSJens Axboe 259f9c78b2bSJens Axboe /* 260f9c78b2bSJens Axboe * If we have front padding, adjust the bio pointer before freeing 261f9c78b2bSJens Axboe */ 262f9c78b2bSJens Axboe p = bio; 263f9c78b2bSJens Axboe p -= bs->front_pad; 264f9c78b2bSJens Axboe 265f9c78b2bSJens Axboe mempool_free(p, bs->bio_pool); 266f9c78b2bSJens Axboe } else { 267f9c78b2bSJens Axboe /* Bio was allocated by bio_kmalloc() */ 268f9c78b2bSJens Axboe kfree(bio); 269f9c78b2bSJens Axboe } 270f9c78b2bSJens Axboe } 271f9c78b2bSJens Axboe 2729ae3b3f5SJens Axboe /* 2739ae3b3f5SJens Axboe * Users of this function have their own bio allocation. Subsequently, 2749ae3b3f5SJens Axboe * they must remember to pair any call to bio_init() with bio_uninit() 2759ae3b3f5SJens Axboe * when IO has completed, or when the bio is released. 2769ae3b3f5SJens Axboe */ 2773a83f467SMing Lei void bio_init(struct bio *bio, struct bio_vec *table, 2783a83f467SMing Lei unsigned short max_vecs) 279f9c78b2bSJens Axboe { 280f9c78b2bSJens Axboe memset(bio, 0, sizeof(*bio)); 281c4cf5261SJens Axboe atomic_set(&bio->__bi_remaining, 1); 282dac56212SJens Axboe atomic_set(&bio->__bi_cnt, 1); 2833a83f467SMing Lei 2843a83f467SMing Lei bio->bi_io_vec = table; 2853a83f467SMing Lei bio->bi_max_vecs = max_vecs; 286f9c78b2bSJens Axboe } 287f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_init); 288f9c78b2bSJens Axboe 289f9c78b2bSJens Axboe /** 290f9c78b2bSJens Axboe * bio_reset - reinitialize a bio 291f9c78b2bSJens Axboe * @bio: bio to reset 292f9c78b2bSJens Axboe * 293f9c78b2bSJens Axboe * Description: 294f9c78b2bSJens Axboe * After calling bio_reset(), @bio will be in the same state as a freshly 295f9c78b2bSJens Axboe * allocated bio returned bio bio_alloc_bioset() - the only fields that are 296f9c78b2bSJens Axboe * preserved are the ones that are initialized by bio_alloc_bioset(). See 297f9c78b2bSJens Axboe * comment in struct bio. 298f9c78b2bSJens Axboe */ 299f9c78b2bSJens Axboe void bio_reset(struct bio *bio) 300f9c78b2bSJens Axboe { 301f9c78b2bSJens Axboe unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 302f9c78b2bSJens Axboe 3039ae3b3f5SJens Axboe bio_uninit(bio); 304f9c78b2bSJens Axboe 305f9c78b2bSJens Axboe memset(bio, 0, BIO_RESET_BYTES); 3064246a0b6SChristoph Hellwig bio->bi_flags = flags; 307c4cf5261SJens Axboe atomic_set(&bio->__bi_remaining, 1); 308f9c78b2bSJens Axboe } 309f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_reset); 310f9c78b2bSJens Axboe 31138f8baaeSChristoph Hellwig static struct bio *__bio_chain_endio(struct bio *bio) 312f9c78b2bSJens Axboe { 3134246a0b6SChristoph Hellwig struct bio *parent = bio->bi_private; 3144246a0b6SChristoph Hellwig 3154e4cbee9SChristoph Hellwig if (!parent->bi_status) 3164e4cbee9SChristoph Hellwig parent->bi_status = bio->bi_status; 317f9c78b2bSJens Axboe bio_put(bio); 31838f8baaeSChristoph Hellwig return parent; 31938f8baaeSChristoph Hellwig } 32038f8baaeSChristoph Hellwig 32138f8baaeSChristoph Hellwig static void bio_chain_endio(struct bio *bio) 32238f8baaeSChristoph Hellwig { 32338f8baaeSChristoph Hellwig bio_endio(__bio_chain_endio(bio)); 324f9c78b2bSJens Axboe } 325f9c78b2bSJens Axboe 326f9c78b2bSJens Axboe /** 327f9c78b2bSJens Axboe * bio_chain - chain bio completions 328f9c78b2bSJens Axboe * @bio: the target bio 329f9c78b2bSJens Axboe * @parent: the @bio's parent bio 330f9c78b2bSJens Axboe * 331f9c78b2bSJens Axboe * The caller won't have a bi_end_io called when @bio completes - instead, 332f9c78b2bSJens Axboe * @parent's bi_end_io won't be called until both @parent and @bio have 333f9c78b2bSJens Axboe * completed; the chained bio will also be freed when it completes. 334f9c78b2bSJens Axboe * 335f9c78b2bSJens Axboe * The caller must not set bi_private or bi_end_io in @bio. 336f9c78b2bSJens Axboe */ 337f9c78b2bSJens Axboe void bio_chain(struct bio *bio, struct bio *parent) 338f9c78b2bSJens Axboe { 339f9c78b2bSJens Axboe BUG_ON(bio->bi_private || bio->bi_end_io); 340f9c78b2bSJens Axboe 341f9c78b2bSJens Axboe bio->bi_private = parent; 342f9c78b2bSJens Axboe bio->bi_end_io = bio_chain_endio; 343c4cf5261SJens Axboe bio_inc_remaining(parent); 344f9c78b2bSJens Axboe } 345f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_chain); 346f9c78b2bSJens Axboe 347f9c78b2bSJens Axboe static void bio_alloc_rescue(struct work_struct *work) 348f9c78b2bSJens Axboe { 349f9c78b2bSJens Axboe struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 350f9c78b2bSJens Axboe struct bio *bio; 351f9c78b2bSJens Axboe 352f9c78b2bSJens Axboe while (1) { 353f9c78b2bSJens Axboe spin_lock(&bs->rescue_lock); 354f9c78b2bSJens Axboe bio = bio_list_pop(&bs->rescue_list); 355f9c78b2bSJens Axboe spin_unlock(&bs->rescue_lock); 356f9c78b2bSJens Axboe 357f9c78b2bSJens Axboe if (!bio) 358f9c78b2bSJens Axboe break; 359f9c78b2bSJens Axboe 360f9c78b2bSJens Axboe generic_make_request(bio); 361f9c78b2bSJens Axboe } 362f9c78b2bSJens Axboe } 363f9c78b2bSJens Axboe 364f9c78b2bSJens Axboe static void punt_bios_to_rescuer(struct bio_set *bs) 365f9c78b2bSJens Axboe { 366f9c78b2bSJens Axboe struct bio_list punt, nopunt; 367f9c78b2bSJens Axboe struct bio *bio; 368f9c78b2bSJens Axboe 36947e0fb46SNeilBrown if (WARN_ON_ONCE(!bs->rescue_workqueue)) 37047e0fb46SNeilBrown return; 371f9c78b2bSJens Axboe /* 372f9c78b2bSJens Axboe * In order to guarantee forward progress we must punt only bios that 373f9c78b2bSJens Axboe * were allocated from this bio_set; otherwise, if there was a bio on 374f9c78b2bSJens Axboe * there for a stacking driver higher up in the stack, processing it 375f9c78b2bSJens Axboe * could require allocating bios from this bio_set, and doing that from 376f9c78b2bSJens Axboe * our own rescuer would be bad. 377f9c78b2bSJens Axboe * 378f9c78b2bSJens Axboe * Since bio lists are singly linked, pop them all instead of trying to 379f9c78b2bSJens Axboe * remove from the middle of the list: 380f9c78b2bSJens Axboe */ 381f9c78b2bSJens Axboe 382f9c78b2bSJens Axboe bio_list_init(&punt); 383f9c78b2bSJens Axboe bio_list_init(&nopunt); 384f9c78b2bSJens Axboe 385f5fe1b51SNeilBrown while ((bio = bio_list_pop(¤t->bio_list[0]))) 386f9c78b2bSJens Axboe bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 387f5fe1b51SNeilBrown current->bio_list[0] = nopunt; 388f9c78b2bSJens Axboe 389f5fe1b51SNeilBrown bio_list_init(&nopunt); 390f5fe1b51SNeilBrown while ((bio = bio_list_pop(¤t->bio_list[1]))) 391f5fe1b51SNeilBrown bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 392f5fe1b51SNeilBrown current->bio_list[1] = nopunt; 393f9c78b2bSJens Axboe 394f9c78b2bSJens Axboe spin_lock(&bs->rescue_lock); 395f9c78b2bSJens Axboe bio_list_merge(&bs->rescue_list, &punt); 396f9c78b2bSJens Axboe spin_unlock(&bs->rescue_lock); 397f9c78b2bSJens Axboe 398f9c78b2bSJens Axboe queue_work(bs->rescue_workqueue, &bs->rescue_work); 399f9c78b2bSJens Axboe } 400f9c78b2bSJens Axboe 401f9c78b2bSJens Axboe /** 402f9c78b2bSJens Axboe * bio_alloc_bioset - allocate a bio for I/O 403519c8e9fSRandy Dunlap * @gfp_mask: the GFP_* mask given to the slab allocator 404f9c78b2bSJens Axboe * @nr_iovecs: number of iovecs to pre-allocate 405f9c78b2bSJens Axboe * @bs: the bio_set to allocate from. 406f9c78b2bSJens Axboe * 407f9c78b2bSJens Axboe * Description: 408f9c78b2bSJens Axboe * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 409f9c78b2bSJens Axboe * backed by the @bs's mempool. 410f9c78b2bSJens Axboe * 411d0164adcSMel Gorman * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will 412d0164adcSMel Gorman * always be able to allocate a bio. This is due to the mempool guarantees. 413d0164adcSMel Gorman * To make this work, callers must never allocate more than 1 bio at a time 414d0164adcSMel Gorman * from this pool. Callers that need to allocate more than 1 bio must always 415d0164adcSMel Gorman * submit the previously allocated bio for IO before attempting to allocate 416d0164adcSMel Gorman * a new one. Failure to do so can cause deadlocks under memory pressure. 417f9c78b2bSJens Axboe * 418f9c78b2bSJens Axboe * Note that when running under generic_make_request() (i.e. any block 419f9c78b2bSJens Axboe * driver), bios are not submitted until after you return - see the code in 420f9c78b2bSJens Axboe * generic_make_request() that converts recursion into iteration, to prevent 421f9c78b2bSJens Axboe * stack overflows. 422f9c78b2bSJens Axboe * 423f9c78b2bSJens Axboe * This would normally mean allocating multiple bios under 424f9c78b2bSJens Axboe * generic_make_request() would be susceptible to deadlocks, but we have 425f9c78b2bSJens Axboe * deadlock avoidance code that resubmits any blocked bios from a rescuer 426f9c78b2bSJens Axboe * thread. 427f9c78b2bSJens Axboe * 428f9c78b2bSJens Axboe * However, we do not guarantee forward progress for allocations from other 429f9c78b2bSJens Axboe * mempools. Doing multiple allocations from the same mempool under 430f9c78b2bSJens Axboe * generic_make_request() should be avoided - instead, use bio_set's front_pad 431f9c78b2bSJens Axboe * for per bio allocations. 432f9c78b2bSJens Axboe * 433f9c78b2bSJens Axboe * RETURNS: 434f9c78b2bSJens Axboe * Pointer to new bio on success, NULL on failure. 435f9c78b2bSJens Axboe */ 4367a88fa19SDan Carpenter struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, 4377a88fa19SDan Carpenter struct bio_set *bs) 438f9c78b2bSJens Axboe { 439f9c78b2bSJens Axboe gfp_t saved_gfp = gfp_mask; 440f9c78b2bSJens Axboe unsigned front_pad; 441f9c78b2bSJens Axboe unsigned inline_vecs; 442f9c78b2bSJens Axboe struct bio_vec *bvl = NULL; 443f9c78b2bSJens Axboe struct bio *bio; 444f9c78b2bSJens Axboe void *p; 445f9c78b2bSJens Axboe 446f9c78b2bSJens Axboe if (!bs) { 447f9c78b2bSJens Axboe if (nr_iovecs > UIO_MAXIOV) 448f9c78b2bSJens Axboe return NULL; 449f9c78b2bSJens Axboe 450f9c78b2bSJens Axboe p = kmalloc(sizeof(struct bio) + 451f9c78b2bSJens Axboe nr_iovecs * sizeof(struct bio_vec), 452f9c78b2bSJens Axboe gfp_mask); 453f9c78b2bSJens Axboe front_pad = 0; 454f9c78b2bSJens Axboe inline_vecs = nr_iovecs; 455f9c78b2bSJens Axboe } else { 456d8f429e1SJunichi Nomura /* should not use nobvec bioset for nr_iovecs > 0 */ 457d8f429e1SJunichi Nomura if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0)) 458d8f429e1SJunichi Nomura return NULL; 459f9c78b2bSJens Axboe /* 460f9c78b2bSJens Axboe * generic_make_request() converts recursion to iteration; this 461f9c78b2bSJens Axboe * means if we're running beneath it, any bios we allocate and 462f9c78b2bSJens Axboe * submit will not be submitted (and thus freed) until after we 463f9c78b2bSJens Axboe * return. 464f9c78b2bSJens Axboe * 465f9c78b2bSJens Axboe * This exposes us to a potential deadlock if we allocate 466f9c78b2bSJens Axboe * multiple bios from the same bio_set() while running 467f9c78b2bSJens Axboe * underneath generic_make_request(). If we were to allocate 468f9c78b2bSJens Axboe * multiple bios (say a stacking block driver that was splitting 469f9c78b2bSJens Axboe * bios), we would deadlock if we exhausted the mempool's 470f9c78b2bSJens Axboe * reserve. 471f9c78b2bSJens Axboe * 472f9c78b2bSJens Axboe * We solve this, and guarantee forward progress, with a rescuer 473f9c78b2bSJens Axboe * workqueue per bio_set. If we go to allocate and there are 474f9c78b2bSJens Axboe * bios on current->bio_list, we first try the allocation 475d0164adcSMel Gorman * without __GFP_DIRECT_RECLAIM; if that fails, we punt those 476d0164adcSMel Gorman * bios we would be blocking to the rescuer workqueue before 477d0164adcSMel Gorman * we retry with the original gfp_flags. 478f9c78b2bSJens Axboe */ 479f9c78b2bSJens Axboe 480f5fe1b51SNeilBrown if (current->bio_list && 481f5fe1b51SNeilBrown (!bio_list_empty(¤t->bio_list[0]) || 48247e0fb46SNeilBrown !bio_list_empty(¤t->bio_list[1])) && 48347e0fb46SNeilBrown bs->rescue_workqueue) 484d0164adcSMel Gorman gfp_mask &= ~__GFP_DIRECT_RECLAIM; 485f9c78b2bSJens Axboe 486f9c78b2bSJens Axboe p = mempool_alloc(bs->bio_pool, gfp_mask); 487f9c78b2bSJens Axboe if (!p && gfp_mask != saved_gfp) { 488f9c78b2bSJens Axboe punt_bios_to_rescuer(bs); 489f9c78b2bSJens Axboe gfp_mask = saved_gfp; 490f9c78b2bSJens Axboe p = mempool_alloc(bs->bio_pool, gfp_mask); 491f9c78b2bSJens Axboe } 492f9c78b2bSJens Axboe 493f9c78b2bSJens Axboe front_pad = bs->front_pad; 494f9c78b2bSJens Axboe inline_vecs = BIO_INLINE_VECS; 495f9c78b2bSJens Axboe } 496f9c78b2bSJens Axboe 497f9c78b2bSJens Axboe if (unlikely(!p)) 498f9c78b2bSJens Axboe return NULL; 499f9c78b2bSJens Axboe 500f9c78b2bSJens Axboe bio = p + front_pad; 5013a83f467SMing Lei bio_init(bio, NULL, 0); 502f9c78b2bSJens Axboe 503f9c78b2bSJens Axboe if (nr_iovecs > inline_vecs) { 504ed996a52SChristoph Hellwig unsigned long idx = 0; 505ed996a52SChristoph Hellwig 506f9c78b2bSJens Axboe bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 507f9c78b2bSJens Axboe if (!bvl && gfp_mask != saved_gfp) { 508f9c78b2bSJens Axboe punt_bios_to_rescuer(bs); 509f9c78b2bSJens Axboe gfp_mask = saved_gfp; 510f9c78b2bSJens Axboe bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 511f9c78b2bSJens Axboe } 512f9c78b2bSJens Axboe 513f9c78b2bSJens Axboe if (unlikely(!bvl)) 514f9c78b2bSJens Axboe goto err_free; 515f9c78b2bSJens Axboe 516ed996a52SChristoph Hellwig bio->bi_flags |= idx << BVEC_POOL_OFFSET; 517f9c78b2bSJens Axboe } else if (nr_iovecs) { 518f9c78b2bSJens Axboe bvl = bio->bi_inline_vecs; 519f9c78b2bSJens Axboe } 520f9c78b2bSJens Axboe 521f9c78b2bSJens Axboe bio->bi_pool = bs; 522f9c78b2bSJens Axboe bio->bi_max_vecs = nr_iovecs; 523f9c78b2bSJens Axboe bio->bi_io_vec = bvl; 524f9c78b2bSJens Axboe return bio; 525f9c78b2bSJens Axboe 526f9c78b2bSJens Axboe err_free: 527f9c78b2bSJens Axboe mempool_free(p, bs->bio_pool); 528f9c78b2bSJens Axboe return NULL; 529f9c78b2bSJens Axboe } 530f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_alloc_bioset); 531f9c78b2bSJens Axboe 532f9c78b2bSJens Axboe void zero_fill_bio(struct bio *bio) 533f9c78b2bSJens Axboe { 534f9c78b2bSJens Axboe unsigned long flags; 535f9c78b2bSJens Axboe struct bio_vec bv; 536f9c78b2bSJens Axboe struct bvec_iter iter; 537f9c78b2bSJens Axboe 538f9c78b2bSJens Axboe bio_for_each_segment(bv, bio, iter) { 539f9c78b2bSJens Axboe char *data = bvec_kmap_irq(&bv, &flags); 540f9c78b2bSJens Axboe memset(data, 0, bv.bv_len); 541f9c78b2bSJens Axboe flush_dcache_page(bv.bv_page); 542f9c78b2bSJens Axboe bvec_kunmap_irq(data, &flags); 543f9c78b2bSJens Axboe } 544f9c78b2bSJens Axboe } 545f9c78b2bSJens Axboe EXPORT_SYMBOL(zero_fill_bio); 546f9c78b2bSJens Axboe 547f9c78b2bSJens Axboe /** 548f9c78b2bSJens Axboe * bio_put - release a reference to a bio 549f9c78b2bSJens Axboe * @bio: bio to release reference to 550f9c78b2bSJens Axboe * 551f9c78b2bSJens Axboe * Description: 552f9c78b2bSJens Axboe * Put a reference to a &struct bio, either one you have gotten with 5539b10f6a9SNeilBrown * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. 554f9c78b2bSJens Axboe **/ 555f9c78b2bSJens Axboe void bio_put(struct bio *bio) 556f9c78b2bSJens Axboe { 557dac56212SJens Axboe if (!bio_flagged(bio, BIO_REFFED)) 558dac56212SJens Axboe bio_free(bio); 559dac56212SJens Axboe else { 560dac56212SJens Axboe BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); 561f9c78b2bSJens Axboe 562f9c78b2bSJens Axboe /* 563f9c78b2bSJens Axboe * last put frees it 564f9c78b2bSJens Axboe */ 565dac56212SJens Axboe if (atomic_dec_and_test(&bio->__bi_cnt)) 566f9c78b2bSJens Axboe bio_free(bio); 567f9c78b2bSJens Axboe } 568dac56212SJens Axboe } 569f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_put); 570f9c78b2bSJens Axboe 571f9c78b2bSJens Axboe inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 572f9c78b2bSJens Axboe { 573f9c78b2bSJens Axboe if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 574f9c78b2bSJens Axboe blk_recount_segments(q, bio); 575f9c78b2bSJens Axboe 576f9c78b2bSJens Axboe return bio->bi_phys_segments; 577f9c78b2bSJens Axboe } 578f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_phys_segments); 579f9c78b2bSJens Axboe 580f9c78b2bSJens Axboe /** 581f9c78b2bSJens Axboe * __bio_clone_fast - clone a bio that shares the original bio's biovec 582f9c78b2bSJens Axboe * @bio: destination bio 583f9c78b2bSJens Axboe * @bio_src: bio to clone 584f9c78b2bSJens Axboe * 585f9c78b2bSJens Axboe * Clone a &bio. Caller will own the returned bio, but not 586f9c78b2bSJens Axboe * the actual data it points to. Reference count of returned 587f9c78b2bSJens Axboe * bio will be one. 588f9c78b2bSJens Axboe * 589f9c78b2bSJens Axboe * Caller must ensure that @bio_src is not freed before @bio. 590f9c78b2bSJens Axboe */ 591f9c78b2bSJens Axboe void __bio_clone_fast(struct bio *bio, struct bio *bio_src) 592f9c78b2bSJens Axboe { 593ed996a52SChristoph Hellwig BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 594f9c78b2bSJens Axboe 595f9c78b2bSJens Axboe /* 59674d46992SChristoph Hellwig * most users will be overriding ->bi_disk with a new target, 597f9c78b2bSJens Axboe * so we don't set nor calculate new physical/hw segment counts here 598f9c78b2bSJens Axboe */ 59974d46992SChristoph Hellwig bio->bi_disk = bio_src->bi_disk; 60062530ed8SMichael Lyle bio->bi_partno = bio_src->bi_partno; 601b7c44ed9SJens Axboe bio_set_flag(bio, BIO_CLONED); 602111be883SShaohua Li if (bio_flagged(bio_src, BIO_THROTTLED)) 603111be883SShaohua Li bio_set_flag(bio, BIO_THROTTLED); 6041eff9d32SJens Axboe bio->bi_opf = bio_src->bi_opf; 605cb6934f8SJens Axboe bio->bi_write_hint = bio_src->bi_write_hint; 606f9c78b2bSJens Axboe bio->bi_iter = bio_src->bi_iter; 607f9c78b2bSJens Axboe bio->bi_io_vec = bio_src->bi_io_vec; 60820bd723eSPaolo Valente 60920bd723eSPaolo Valente bio_clone_blkcg_association(bio, bio_src); 610f9c78b2bSJens Axboe } 611f9c78b2bSJens Axboe EXPORT_SYMBOL(__bio_clone_fast); 612f9c78b2bSJens Axboe 613f9c78b2bSJens Axboe /** 614f9c78b2bSJens Axboe * bio_clone_fast - clone a bio that shares the original bio's biovec 615f9c78b2bSJens Axboe * @bio: bio to clone 616f9c78b2bSJens Axboe * @gfp_mask: allocation priority 617f9c78b2bSJens Axboe * @bs: bio_set to allocate from 618f9c78b2bSJens Axboe * 619f9c78b2bSJens Axboe * Like __bio_clone_fast, only also allocates the returned bio 620f9c78b2bSJens Axboe */ 621f9c78b2bSJens Axboe struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) 622f9c78b2bSJens Axboe { 623f9c78b2bSJens Axboe struct bio *b; 624f9c78b2bSJens Axboe 625f9c78b2bSJens Axboe b = bio_alloc_bioset(gfp_mask, 0, bs); 626f9c78b2bSJens Axboe if (!b) 627f9c78b2bSJens Axboe return NULL; 628f9c78b2bSJens Axboe 629f9c78b2bSJens Axboe __bio_clone_fast(b, bio); 630f9c78b2bSJens Axboe 631f9c78b2bSJens Axboe if (bio_integrity(bio)) { 632f9c78b2bSJens Axboe int ret; 633f9c78b2bSJens Axboe 634f9c78b2bSJens Axboe ret = bio_integrity_clone(b, bio, gfp_mask); 635f9c78b2bSJens Axboe 636f9c78b2bSJens Axboe if (ret < 0) { 637f9c78b2bSJens Axboe bio_put(b); 638f9c78b2bSJens Axboe return NULL; 639f9c78b2bSJens Axboe } 640f9c78b2bSJens Axboe } 641f9c78b2bSJens Axboe 642f9c78b2bSJens Axboe return b; 643f9c78b2bSJens Axboe } 644f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_clone_fast); 645f9c78b2bSJens Axboe 646f4595875SShaohua Li /** 647f4595875SShaohua Li * bio_clone_bioset - clone a bio 648f4595875SShaohua Li * @bio_src: bio to clone 649f4595875SShaohua Li * @gfp_mask: allocation priority 650f4595875SShaohua Li * @bs: bio_set to allocate from 651f4595875SShaohua Li * 652f4595875SShaohua Li * Clone bio. Caller will own the returned bio, but not the actual data it 653f4595875SShaohua Li * points to. Reference count of returned bio will be one. 654f4595875SShaohua Li */ 655f4595875SShaohua Li struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, 656f4595875SShaohua Li struct bio_set *bs) 657f9c78b2bSJens Axboe { 658f9c78b2bSJens Axboe struct bvec_iter iter; 659f9c78b2bSJens Axboe struct bio_vec bv; 660f9c78b2bSJens Axboe struct bio *bio; 661f9c78b2bSJens Axboe 662f9c78b2bSJens Axboe /* 663f9c78b2bSJens Axboe * Pre immutable biovecs, __bio_clone() used to just do a memcpy from 664f9c78b2bSJens Axboe * bio_src->bi_io_vec to bio->bi_io_vec. 665f9c78b2bSJens Axboe * 666f9c78b2bSJens Axboe * We can't do that anymore, because: 667f9c78b2bSJens Axboe * 668f9c78b2bSJens Axboe * - The point of cloning the biovec is to produce a bio with a biovec 669f9c78b2bSJens Axboe * the caller can modify: bi_idx and bi_bvec_done should be 0. 670f9c78b2bSJens Axboe * 671f9c78b2bSJens Axboe * - The original bio could've had more than BIO_MAX_PAGES biovecs; if 672f9c78b2bSJens Axboe * we tried to clone the whole thing bio_alloc_bioset() would fail. 673f9c78b2bSJens Axboe * But the clone should succeed as long as the number of biovecs we 674f9c78b2bSJens Axboe * actually need to allocate is fewer than BIO_MAX_PAGES. 675f9c78b2bSJens Axboe * 676f9c78b2bSJens Axboe * - Lastly, bi_vcnt should not be looked at or relied upon by code 677f9c78b2bSJens Axboe * that does not own the bio - reason being drivers don't use it for 678f9c78b2bSJens Axboe * iterating over the biovec anymore, so expecting it to be kept up 679f9c78b2bSJens Axboe * to date (i.e. for clones that share the parent biovec) is just 680f9c78b2bSJens Axboe * asking for trouble and would force extra work on 681f9c78b2bSJens Axboe * __bio_clone_fast() anyways. 682f9c78b2bSJens Axboe */ 683f9c78b2bSJens Axboe 684f4595875SShaohua Li bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); 685f9c78b2bSJens Axboe if (!bio) 686f9c78b2bSJens Axboe return NULL; 68774d46992SChristoph Hellwig bio->bi_disk = bio_src->bi_disk; 6881eff9d32SJens Axboe bio->bi_opf = bio_src->bi_opf; 689cb6934f8SJens Axboe bio->bi_write_hint = bio_src->bi_write_hint; 690f9c78b2bSJens Axboe bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 691f9c78b2bSJens Axboe bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 692f9c78b2bSJens Axboe 6937afafc8aSAdrian Hunter switch (bio_op(bio)) { 6947afafc8aSAdrian Hunter case REQ_OP_DISCARD: 6957afafc8aSAdrian Hunter case REQ_OP_SECURE_ERASE: 696a6f0788eSChaitanya Kulkarni case REQ_OP_WRITE_ZEROES: 6977afafc8aSAdrian Hunter break; 6987afafc8aSAdrian Hunter case REQ_OP_WRITE_SAME: 699f9c78b2bSJens Axboe bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; 7007afafc8aSAdrian Hunter break; 7017afafc8aSAdrian Hunter default: 702f4595875SShaohua Li bio_for_each_segment(bv, bio_src, iter) 703f9c78b2bSJens Axboe bio->bi_io_vec[bio->bi_vcnt++] = bv; 7047afafc8aSAdrian Hunter break; 7057afafc8aSAdrian Hunter } 706f9c78b2bSJens Axboe 707f9c78b2bSJens Axboe if (bio_integrity(bio_src)) { 708f9c78b2bSJens Axboe int ret; 709f9c78b2bSJens Axboe 710f9c78b2bSJens Axboe ret = bio_integrity_clone(bio, bio_src, gfp_mask); 711f9c78b2bSJens Axboe if (ret < 0) { 712f9c78b2bSJens Axboe bio_put(bio); 713f9c78b2bSJens Axboe return NULL; 714f9c78b2bSJens Axboe } 715f9c78b2bSJens Axboe } 716f9c78b2bSJens Axboe 71720bd723eSPaolo Valente bio_clone_blkcg_association(bio, bio_src); 71820bd723eSPaolo Valente 719f9c78b2bSJens Axboe return bio; 720f9c78b2bSJens Axboe } 721f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_clone_bioset); 722f9c78b2bSJens Axboe 723f9c78b2bSJens Axboe /** 724c66a14d0SKent Overstreet * bio_add_pc_page - attempt to add page to bio 725c66a14d0SKent Overstreet * @q: the target queue 726c66a14d0SKent Overstreet * @bio: destination bio 727c66a14d0SKent Overstreet * @page: page to add 728c66a14d0SKent Overstreet * @len: vec entry length 729c66a14d0SKent Overstreet * @offset: vec entry offset 730f9c78b2bSJens Axboe * 731c66a14d0SKent Overstreet * Attempt to add a page to the bio_vec maplist. This can fail for a 732c66a14d0SKent Overstreet * number of reasons, such as the bio being full or target block device 733c66a14d0SKent Overstreet * limitations. The target block device must allow bio's up to PAGE_SIZE, 734c66a14d0SKent Overstreet * so it is always possible to add a single page to an empty bio. 735c66a14d0SKent Overstreet * 736c66a14d0SKent Overstreet * This should only be used by REQ_PC bios. 737f9c78b2bSJens Axboe */ 738c66a14d0SKent Overstreet int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page 739c66a14d0SKent Overstreet *page, unsigned int len, unsigned int offset) 740f9c78b2bSJens Axboe { 741f9c78b2bSJens Axboe int retried_segments = 0; 742f9c78b2bSJens Axboe struct bio_vec *bvec; 743f9c78b2bSJens Axboe 744f9c78b2bSJens Axboe /* 745f9c78b2bSJens Axboe * cloned bio must not modify vec list 746f9c78b2bSJens Axboe */ 747f9c78b2bSJens Axboe if (unlikely(bio_flagged(bio, BIO_CLONED))) 748f9c78b2bSJens Axboe return 0; 749f9c78b2bSJens Axboe 750c66a14d0SKent Overstreet if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) 751f9c78b2bSJens Axboe return 0; 752f9c78b2bSJens Axboe 753f9c78b2bSJens Axboe /* 754f9c78b2bSJens Axboe * For filesystems with a blocksize smaller than the pagesize 755f9c78b2bSJens Axboe * we will often be called with the same page as last time and 756f9c78b2bSJens Axboe * a consecutive offset. Optimize this special case. 757f9c78b2bSJens Axboe */ 758f9c78b2bSJens Axboe if (bio->bi_vcnt > 0) { 759f9c78b2bSJens Axboe struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 760f9c78b2bSJens Axboe 761f9c78b2bSJens Axboe if (page == prev->bv_page && 762f9c78b2bSJens Axboe offset == prev->bv_offset + prev->bv_len) { 763f9c78b2bSJens Axboe prev->bv_len += len; 764fcbf6a08SMaurizio Lombardi bio->bi_iter.bi_size += len; 765f9c78b2bSJens Axboe goto done; 766f9c78b2bSJens Axboe } 76766cb45aaSJens Axboe 76866cb45aaSJens Axboe /* 76966cb45aaSJens Axboe * If the queue doesn't support SG gaps and adding this 77066cb45aaSJens Axboe * offset would create a gap, disallow it. 77166cb45aaSJens Axboe */ 77203100aadSKeith Busch if (bvec_gap_to_prev(q, prev, offset)) 77366cb45aaSJens Axboe return 0; 774f9c78b2bSJens Axboe } 775f9c78b2bSJens Axboe 776f9c78b2bSJens Axboe if (bio->bi_vcnt >= bio->bi_max_vecs) 777f9c78b2bSJens Axboe return 0; 778f9c78b2bSJens Axboe 779f9c78b2bSJens Axboe /* 780f9c78b2bSJens Axboe * setup the new entry, we might clear it again later if we 781f9c78b2bSJens Axboe * cannot add the page 782f9c78b2bSJens Axboe */ 783f9c78b2bSJens Axboe bvec = &bio->bi_io_vec[bio->bi_vcnt]; 784f9c78b2bSJens Axboe bvec->bv_page = page; 785f9c78b2bSJens Axboe bvec->bv_len = len; 786f9c78b2bSJens Axboe bvec->bv_offset = offset; 787fcbf6a08SMaurizio Lombardi bio->bi_vcnt++; 788fcbf6a08SMaurizio Lombardi bio->bi_phys_segments++; 789fcbf6a08SMaurizio Lombardi bio->bi_iter.bi_size += len; 790fcbf6a08SMaurizio Lombardi 791fcbf6a08SMaurizio Lombardi /* 792fcbf6a08SMaurizio Lombardi * Perform a recount if the number of segments is greater 793fcbf6a08SMaurizio Lombardi * than queue_max_segments(q). 794fcbf6a08SMaurizio Lombardi */ 795fcbf6a08SMaurizio Lombardi 796fcbf6a08SMaurizio Lombardi while (bio->bi_phys_segments > queue_max_segments(q)) { 797fcbf6a08SMaurizio Lombardi 798fcbf6a08SMaurizio Lombardi if (retried_segments) 799fcbf6a08SMaurizio Lombardi goto failed; 800fcbf6a08SMaurizio Lombardi 801fcbf6a08SMaurizio Lombardi retried_segments = 1; 802fcbf6a08SMaurizio Lombardi blk_recount_segments(q, bio); 803fcbf6a08SMaurizio Lombardi } 804f9c78b2bSJens Axboe 805f9c78b2bSJens Axboe /* If we may be able to merge these biovecs, force a recount */ 806fcbf6a08SMaurizio Lombardi if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) 807b7c44ed9SJens Axboe bio_clear_flag(bio, BIO_SEG_VALID); 808f9c78b2bSJens Axboe 809f9c78b2bSJens Axboe done: 810f9c78b2bSJens Axboe return len; 811fcbf6a08SMaurizio Lombardi 812fcbf6a08SMaurizio Lombardi failed: 813fcbf6a08SMaurizio Lombardi bvec->bv_page = NULL; 814fcbf6a08SMaurizio Lombardi bvec->bv_len = 0; 815fcbf6a08SMaurizio Lombardi bvec->bv_offset = 0; 816fcbf6a08SMaurizio Lombardi bio->bi_vcnt--; 817fcbf6a08SMaurizio Lombardi bio->bi_iter.bi_size -= len; 818fcbf6a08SMaurizio Lombardi blk_recount_segments(q, bio); 819fcbf6a08SMaurizio Lombardi return 0; 820f9c78b2bSJens Axboe } 821f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_pc_page); 822f9c78b2bSJens Axboe 823f9c78b2bSJens Axboe /** 824f9c78b2bSJens Axboe * bio_add_page - attempt to add page to bio 825f9c78b2bSJens Axboe * @bio: destination bio 826f9c78b2bSJens Axboe * @page: page to add 827f9c78b2bSJens Axboe * @len: vec entry length 828f9c78b2bSJens Axboe * @offset: vec entry offset 829f9c78b2bSJens Axboe * 830c66a14d0SKent Overstreet * Attempt to add a page to the bio_vec maplist. This will only fail 831c66a14d0SKent Overstreet * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 832f9c78b2bSJens Axboe */ 833c66a14d0SKent Overstreet int bio_add_page(struct bio *bio, struct page *page, 834c66a14d0SKent Overstreet unsigned int len, unsigned int offset) 835f9c78b2bSJens Axboe { 836c66a14d0SKent Overstreet struct bio_vec *bv; 837762380adSJens Axboe 838c66a14d0SKent Overstreet /* 839c66a14d0SKent Overstreet * cloned bio must not modify vec list 840c66a14d0SKent Overstreet */ 841c66a14d0SKent Overstreet if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 842c66a14d0SKent Overstreet return 0; 84358a4915aSJens Axboe 844c66a14d0SKent Overstreet /* 845c66a14d0SKent Overstreet * For filesystems with a blocksize smaller than the pagesize 846c66a14d0SKent Overstreet * we will often be called with the same page as last time and 847c66a14d0SKent Overstreet * a consecutive offset. Optimize this special case. 848c66a14d0SKent Overstreet */ 849c66a14d0SKent Overstreet if (bio->bi_vcnt > 0) { 850c66a14d0SKent Overstreet bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 851c66a14d0SKent Overstreet 852c66a14d0SKent Overstreet if (page == bv->bv_page && 853c66a14d0SKent Overstreet offset == bv->bv_offset + bv->bv_len) { 854c66a14d0SKent Overstreet bv->bv_len += len; 855c66a14d0SKent Overstreet goto done; 856c66a14d0SKent Overstreet } 857c66a14d0SKent Overstreet } 858c66a14d0SKent Overstreet 859c66a14d0SKent Overstreet if (bio->bi_vcnt >= bio->bi_max_vecs) 860c66a14d0SKent Overstreet return 0; 861c66a14d0SKent Overstreet 862c66a14d0SKent Overstreet bv = &bio->bi_io_vec[bio->bi_vcnt]; 863c66a14d0SKent Overstreet bv->bv_page = page; 864c66a14d0SKent Overstreet bv->bv_len = len; 865c66a14d0SKent Overstreet bv->bv_offset = offset; 866c66a14d0SKent Overstreet 867c66a14d0SKent Overstreet bio->bi_vcnt++; 868c66a14d0SKent Overstreet done: 869c66a14d0SKent Overstreet bio->bi_iter.bi_size += len; 870c66a14d0SKent Overstreet return len; 871f9c78b2bSJens Axboe } 872f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_add_page); 873f9c78b2bSJens Axboe 8742cefe4dbSKent Overstreet /** 8752cefe4dbSKent Overstreet * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 8762cefe4dbSKent Overstreet * @bio: bio to add pages to 8772cefe4dbSKent Overstreet * @iter: iov iterator describing the region to be mapped 8782cefe4dbSKent Overstreet * 8792cefe4dbSKent Overstreet * Pins as many pages from *iter and appends them to @bio's bvec array. The 8802cefe4dbSKent Overstreet * pages will have to be released using put_page() when done. 8812cefe4dbSKent Overstreet */ 8822cefe4dbSKent Overstreet int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 8832cefe4dbSKent Overstreet { 8842cefe4dbSKent Overstreet unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 8852cefe4dbSKent Overstreet struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 8862cefe4dbSKent Overstreet struct page **pages = (struct page **)bv; 8872cefe4dbSKent Overstreet size_t offset, diff; 8882cefe4dbSKent Overstreet ssize_t size; 8892cefe4dbSKent Overstreet 8902cefe4dbSKent Overstreet size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 8912cefe4dbSKent Overstreet if (unlikely(size <= 0)) 8922cefe4dbSKent Overstreet return size ? size : -EFAULT; 8932cefe4dbSKent Overstreet nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; 8942cefe4dbSKent Overstreet 8952cefe4dbSKent Overstreet /* 8962cefe4dbSKent Overstreet * Deep magic below: We need to walk the pinned pages backwards 8972cefe4dbSKent Overstreet * because we are abusing the space allocated for the bio_vecs 8982cefe4dbSKent Overstreet * for the page array. Because the bio_vecs are larger than the 8992cefe4dbSKent Overstreet * page pointers by definition this will always work. But it also 9002cefe4dbSKent Overstreet * means we can't use bio_add_page, so any changes to it's semantics 9012cefe4dbSKent Overstreet * need to be reflected here as well. 9022cefe4dbSKent Overstreet */ 9032cefe4dbSKent Overstreet bio->bi_iter.bi_size += size; 9042cefe4dbSKent Overstreet bio->bi_vcnt += nr_pages; 9052cefe4dbSKent Overstreet 9062cefe4dbSKent Overstreet diff = (nr_pages * PAGE_SIZE - offset) - size; 9072cefe4dbSKent Overstreet while (nr_pages--) { 9082cefe4dbSKent Overstreet bv[nr_pages].bv_page = pages[nr_pages]; 9092cefe4dbSKent Overstreet bv[nr_pages].bv_len = PAGE_SIZE; 9102cefe4dbSKent Overstreet bv[nr_pages].bv_offset = 0; 9112cefe4dbSKent Overstreet } 9122cefe4dbSKent Overstreet 9132cefe4dbSKent Overstreet bv[0].bv_offset += offset; 9142cefe4dbSKent Overstreet bv[0].bv_len -= offset; 9152cefe4dbSKent Overstreet if (diff) 9162cefe4dbSKent Overstreet bv[bio->bi_vcnt - 1].bv_len -= diff; 9172cefe4dbSKent Overstreet 9182cefe4dbSKent Overstreet iov_iter_advance(iter, size); 9192cefe4dbSKent Overstreet return 0; 9202cefe4dbSKent Overstreet } 9212cefe4dbSKent Overstreet EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); 9222cefe4dbSKent Overstreet 9234246a0b6SChristoph Hellwig static void submit_bio_wait_endio(struct bio *bio) 924f9c78b2bSJens Axboe { 92565e53aabSChristoph Hellwig complete(bio->bi_private); 926f9c78b2bSJens Axboe } 927f9c78b2bSJens Axboe 928f9c78b2bSJens Axboe /** 929f9c78b2bSJens Axboe * submit_bio_wait - submit a bio, and wait until it completes 930f9c78b2bSJens Axboe * @bio: The &struct bio which describes the I/O 931f9c78b2bSJens Axboe * 932f9c78b2bSJens Axboe * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 933f9c78b2bSJens Axboe * bio_endio() on failure. 9343d289d68SJan Kara * 9353d289d68SJan Kara * WARNING: Unlike to how submit_bio() is usually used, this function does not 9363d289d68SJan Kara * result in bio reference to be consumed. The caller must drop the reference 9373d289d68SJan Kara * on his own. 938f9c78b2bSJens Axboe */ 9394e49ea4aSMike Christie int submit_bio_wait(struct bio *bio) 940f9c78b2bSJens Axboe { 941e319e1fbSByungchul Park DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map); 942f9c78b2bSJens Axboe 94365e53aabSChristoph Hellwig bio->bi_private = &done; 944f9c78b2bSJens Axboe bio->bi_end_io = submit_bio_wait_endio; 9451eff9d32SJens Axboe bio->bi_opf |= REQ_SYNC; 9464e49ea4aSMike Christie submit_bio(bio); 94765e53aabSChristoph Hellwig wait_for_completion_io(&done); 948f9c78b2bSJens Axboe 94965e53aabSChristoph Hellwig return blk_status_to_errno(bio->bi_status); 950f9c78b2bSJens Axboe } 951f9c78b2bSJens Axboe EXPORT_SYMBOL(submit_bio_wait); 952f9c78b2bSJens Axboe 953f9c78b2bSJens Axboe /** 954f9c78b2bSJens Axboe * bio_advance - increment/complete a bio by some number of bytes 955f9c78b2bSJens Axboe * @bio: bio to advance 956f9c78b2bSJens Axboe * @bytes: number of bytes to complete 957f9c78b2bSJens Axboe * 958f9c78b2bSJens Axboe * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 959f9c78b2bSJens Axboe * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 960f9c78b2bSJens Axboe * be updated on the last bvec as well. 961f9c78b2bSJens Axboe * 962f9c78b2bSJens Axboe * @bio will then represent the remaining, uncompleted portion of the io. 963f9c78b2bSJens Axboe */ 964f9c78b2bSJens Axboe void bio_advance(struct bio *bio, unsigned bytes) 965f9c78b2bSJens Axboe { 966f9c78b2bSJens Axboe if (bio_integrity(bio)) 967f9c78b2bSJens Axboe bio_integrity_advance(bio, bytes); 968f9c78b2bSJens Axboe 969f9c78b2bSJens Axboe bio_advance_iter(bio, &bio->bi_iter, bytes); 970f9c78b2bSJens Axboe } 971f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_advance); 972f9c78b2bSJens Axboe 973f9c78b2bSJens Axboe /** 974f9c78b2bSJens Axboe * bio_copy_data - copy contents of data buffers from one chain of bios to 975f9c78b2bSJens Axboe * another 976f9c78b2bSJens Axboe * @src: source bio list 977f9c78b2bSJens Axboe * @dst: destination bio list 978f9c78b2bSJens Axboe * 979f9c78b2bSJens Axboe * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats 980f9c78b2bSJens Axboe * @src and @dst as linked lists of bios. 981f9c78b2bSJens Axboe * 982f9c78b2bSJens Axboe * Stops when it reaches the end of either @src or @dst - that is, copies 983f9c78b2bSJens Axboe * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 984f9c78b2bSJens Axboe */ 985f9c78b2bSJens Axboe void bio_copy_data(struct bio *dst, struct bio *src) 986f9c78b2bSJens Axboe { 987f9c78b2bSJens Axboe struct bvec_iter src_iter, dst_iter; 988f9c78b2bSJens Axboe struct bio_vec src_bv, dst_bv; 989f9c78b2bSJens Axboe void *src_p, *dst_p; 990f9c78b2bSJens Axboe unsigned bytes; 991f9c78b2bSJens Axboe 992f9c78b2bSJens Axboe src_iter = src->bi_iter; 993f9c78b2bSJens Axboe dst_iter = dst->bi_iter; 994f9c78b2bSJens Axboe 995f9c78b2bSJens Axboe while (1) { 996f9c78b2bSJens Axboe if (!src_iter.bi_size) { 997f9c78b2bSJens Axboe src = src->bi_next; 998f9c78b2bSJens Axboe if (!src) 999f9c78b2bSJens Axboe break; 1000f9c78b2bSJens Axboe 1001f9c78b2bSJens Axboe src_iter = src->bi_iter; 1002f9c78b2bSJens Axboe } 1003f9c78b2bSJens Axboe 1004f9c78b2bSJens Axboe if (!dst_iter.bi_size) { 1005f9c78b2bSJens Axboe dst = dst->bi_next; 1006f9c78b2bSJens Axboe if (!dst) 1007f9c78b2bSJens Axboe break; 1008f9c78b2bSJens Axboe 1009f9c78b2bSJens Axboe dst_iter = dst->bi_iter; 1010f9c78b2bSJens Axboe } 1011f9c78b2bSJens Axboe 1012f9c78b2bSJens Axboe src_bv = bio_iter_iovec(src, src_iter); 1013f9c78b2bSJens Axboe dst_bv = bio_iter_iovec(dst, dst_iter); 1014f9c78b2bSJens Axboe 1015f9c78b2bSJens Axboe bytes = min(src_bv.bv_len, dst_bv.bv_len); 1016f9c78b2bSJens Axboe 1017f9c78b2bSJens Axboe src_p = kmap_atomic(src_bv.bv_page); 1018f9c78b2bSJens Axboe dst_p = kmap_atomic(dst_bv.bv_page); 1019f9c78b2bSJens Axboe 1020f9c78b2bSJens Axboe memcpy(dst_p + dst_bv.bv_offset, 1021f9c78b2bSJens Axboe src_p + src_bv.bv_offset, 1022f9c78b2bSJens Axboe bytes); 1023f9c78b2bSJens Axboe 1024f9c78b2bSJens Axboe kunmap_atomic(dst_p); 1025f9c78b2bSJens Axboe kunmap_atomic(src_p); 1026f9c78b2bSJens Axboe 1027f9c78b2bSJens Axboe bio_advance_iter(src, &src_iter, bytes); 1028f9c78b2bSJens Axboe bio_advance_iter(dst, &dst_iter, bytes); 1029f9c78b2bSJens Axboe } 1030f9c78b2bSJens Axboe } 1031f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_copy_data); 1032f9c78b2bSJens Axboe 1033f9c78b2bSJens Axboe struct bio_map_data { 1034f9c78b2bSJens Axboe int is_our_pages; 103526e49cfcSKent Overstreet struct iov_iter iter; 103626e49cfcSKent Overstreet struct iovec iov[]; 1037f9c78b2bSJens Axboe }; 1038f9c78b2bSJens Axboe 10390e5b935dSAl Viro static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, 1040f9c78b2bSJens Axboe gfp_t gfp_mask) 1041f9c78b2bSJens Axboe { 10420e5b935dSAl Viro struct bio_map_data *bmd; 10430e5b935dSAl Viro if (data->nr_segs > UIO_MAXIOV) 1044f9c78b2bSJens Axboe return NULL; 1045f9c78b2bSJens Axboe 10460e5b935dSAl Viro bmd = kmalloc(sizeof(struct bio_map_data) + 10470e5b935dSAl Viro sizeof(struct iovec) * data->nr_segs, gfp_mask); 10480e5b935dSAl Viro if (!bmd) 10490e5b935dSAl Viro return NULL; 10500e5b935dSAl Viro memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); 10510e5b935dSAl Viro bmd->iter = *data; 10520e5b935dSAl Viro bmd->iter.iov = bmd->iov; 10530e5b935dSAl Viro return bmd; 1054f9c78b2bSJens Axboe } 1055f9c78b2bSJens Axboe 10569124d3feSDongsu Park /** 10579124d3feSDongsu Park * bio_copy_from_iter - copy all pages from iov_iter to bio 10589124d3feSDongsu Park * @bio: The &struct bio which describes the I/O as destination 10599124d3feSDongsu Park * @iter: iov_iter as source 10609124d3feSDongsu Park * 10619124d3feSDongsu Park * Copy all pages from iov_iter to bio. 10629124d3feSDongsu Park * Returns 0 on success, or error on failure. 10639124d3feSDongsu Park */ 106498a09d61SAl Viro static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) 1065f9c78b2bSJens Axboe { 10669124d3feSDongsu Park int i; 1067f9c78b2bSJens Axboe struct bio_vec *bvec; 1068f9c78b2bSJens Axboe 1069f9c78b2bSJens Axboe bio_for_each_segment_all(bvec, bio, i) { 10709124d3feSDongsu Park ssize_t ret; 1071f9c78b2bSJens Axboe 10729124d3feSDongsu Park ret = copy_page_from_iter(bvec->bv_page, 10739124d3feSDongsu Park bvec->bv_offset, 10749124d3feSDongsu Park bvec->bv_len, 107598a09d61SAl Viro iter); 1076f9c78b2bSJens Axboe 107798a09d61SAl Viro if (!iov_iter_count(iter)) 10789124d3feSDongsu Park break; 1079f9c78b2bSJens Axboe 10809124d3feSDongsu Park if (ret < bvec->bv_len) 10819124d3feSDongsu Park return -EFAULT; 1082f9c78b2bSJens Axboe } 1083f9c78b2bSJens Axboe 10849124d3feSDongsu Park return 0; 1085f9c78b2bSJens Axboe } 1086f9c78b2bSJens Axboe 10879124d3feSDongsu Park /** 10889124d3feSDongsu Park * bio_copy_to_iter - copy all pages from bio to iov_iter 10899124d3feSDongsu Park * @bio: The &struct bio which describes the I/O as source 10909124d3feSDongsu Park * @iter: iov_iter as destination 10919124d3feSDongsu Park * 10929124d3feSDongsu Park * Copy all pages from bio to iov_iter. 10939124d3feSDongsu Park * Returns 0 on success, or error on failure. 10949124d3feSDongsu Park */ 10959124d3feSDongsu Park static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 10969124d3feSDongsu Park { 10979124d3feSDongsu Park int i; 10989124d3feSDongsu Park struct bio_vec *bvec; 10999124d3feSDongsu Park 11009124d3feSDongsu Park bio_for_each_segment_all(bvec, bio, i) { 11019124d3feSDongsu Park ssize_t ret; 11029124d3feSDongsu Park 11039124d3feSDongsu Park ret = copy_page_to_iter(bvec->bv_page, 11049124d3feSDongsu Park bvec->bv_offset, 11059124d3feSDongsu Park bvec->bv_len, 11069124d3feSDongsu Park &iter); 11079124d3feSDongsu Park 11089124d3feSDongsu Park if (!iov_iter_count(&iter)) 11099124d3feSDongsu Park break; 11109124d3feSDongsu Park 11119124d3feSDongsu Park if (ret < bvec->bv_len) 11129124d3feSDongsu Park return -EFAULT; 11139124d3feSDongsu Park } 11149124d3feSDongsu Park 11159124d3feSDongsu Park return 0; 1116f9c78b2bSJens Axboe } 1117f9c78b2bSJens Axboe 1118491221f8SGuoqing Jiang void bio_free_pages(struct bio *bio) 11191dfa0f68SChristoph Hellwig { 11201dfa0f68SChristoph Hellwig struct bio_vec *bvec; 11211dfa0f68SChristoph Hellwig int i; 11221dfa0f68SChristoph Hellwig 11231dfa0f68SChristoph Hellwig bio_for_each_segment_all(bvec, bio, i) 11241dfa0f68SChristoph Hellwig __free_page(bvec->bv_page); 11251dfa0f68SChristoph Hellwig } 1126491221f8SGuoqing Jiang EXPORT_SYMBOL(bio_free_pages); 11271dfa0f68SChristoph Hellwig 1128f9c78b2bSJens Axboe /** 1129f9c78b2bSJens Axboe * bio_uncopy_user - finish previously mapped bio 1130f9c78b2bSJens Axboe * @bio: bio being terminated 1131f9c78b2bSJens Axboe * 1132ddad8dd0SChristoph Hellwig * Free pages allocated from bio_copy_user_iov() and write back data 1133f9c78b2bSJens Axboe * to user space in case of a read. 1134f9c78b2bSJens Axboe */ 1135f9c78b2bSJens Axboe int bio_uncopy_user(struct bio *bio) 1136f9c78b2bSJens Axboe { 1137f9c78b2bSJens Axboe struct bio_map_data *bmd = bio->bi_private; 11381dfa0f68SChristoph Hellwig int ret = 0; 1139f9c78b2bSJens Axboe 1140f9c78b2bSJens Axboe if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1141f9c78b2bSJens Axboe /* 1142f9c78b2bSJens Axboe * if we're in a workqueue, the request is orphaned, so 11432d99b55dSHannes Reinecke * don't copy into a random user address space, just free 11442d99b55dSHannes Reinecke * and return -EINTR so user space doesn't expect any data. 1145f9c78b2bSJens Axboe */ 11462d99b55dSHannes Reinecke if (!current->mm) 11472d99b55dSHannes Reinecke ret = -EINTR; 11482d99b55dSHannes Reinecke else if (bio_data_dir(bio) == READ) 11499124d3feSDongsu Park ret = bio_copy_to_iter(bio, bmd->iter); 11501dfa0f68SChristoph Hellwig if (bmd->is_our_pages) 11511dfa0f68SChristoph Hellwig bio_free_pages(bio); 1152f9c78b2bSJens Axboe } 1153f9c78b2bSJens Axboe kfree(bmd); 1154f9c78b2bSJens Axboe bio_put(bio); 1155f9c78b2bSJens Axboe return ret; 1156f9c78b2bSJens Axboe } 1157f9c78b2bSJens Axboe 1158f9c78b2bSJens Axboe /** 1159f9c78b2bSJens Axboe * bio_copy_user_iov - copy user data to bio 1160f9c78b2bSJens Axboe * @q: destination block queue 1161f9c78b2bSJens Axboe * @map_data: pointer to the rq_map_data holding pages (if necessary) 116226e49cfcSKent Overstreet * @iter: iovec iterator 1163f9c78b2bSJens Axboe * @gfp_mask: memory allocation flags 1164f9c78b2bSJens Axboe * 1165f9c78b2bSJens Axboe * Prepares and returns a bio for indirect user io, bouncing data 1166f9c78b2bSJens Axboe * to/from kernel pages as necessary. Must be paired with 1167f9c78b2bSJens Axboe * call bio_uncopy_user() on io completion. 1168f9c78b2bSJens Axboe */ 1169f9c78b2bSJens Axboe struct bio *bio_copy_user_iov(struct request_queue *q, 1170f9c78b2bSJens Axboe struct rq_map_data *map_data, 1171e81cef5dSAl Viro struct iov_iter *iter, 117226e49cfcSKent Overstreet gfp_t gfp_mask) 1173f9c78b2bSJens Axboe { 1174f9c78b2bSJens Axboe struct bio_map_data *bmd; 1175f9c78b2bSJens Axboe struct page *page; 1176f9c78b2bSJens Axboe struct bio *bio; 1177d16d44ebSAl Viro int i = 0, ret; 1178d16d44ebSAl Viro int nr_pages; 117926e49cfcSKent Overstreet unsigned int len = iter->count; 1180bd5ceceaSGeliang Tang unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 1181f9c78b2bSJens Axboe 11820e5b935dSAl Viro bmd = bio_alloc_map_data(iter, gfp_mask); 1183f9c78b2bSJens Axboe if (!bmd) 1184f9c78b2bSJens Axboe return ERR_PTR(-ENOMEM); 1185f9c78b2bSJens Axboe 118626e49cfcSKent Overstreet /* 118726e49cfcSKent Overstreet * We need to do a deep copy of the iov_iter including the iovecs. 118826e49cfcSKent Overstreet * The caller provided iov might point to an on-stack or otherwise 118926e49cfcSKent Overstreet * shortlived one. 119026e49cfcSKent Overstreet */ 119126e49cfcSKent Overstreet bmd->is_our_pages = map_data ? 0 : 1; 119226e49cfcSKent Overstreet 1193d16d44ebSAl Viro nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); 1194d16d44ebSAl Viro if (nr_pages > BIO_MAX_PAGES) 1195d16d44ebSAl Viro nr_pages = BIO_MAX_PAGES; 1196f9c78b2bSJens Axboe 1197f9c78b2bSJens Axboe ret = -ENOMEM; 1198f9c78b2bSJens Axboe bio = bio_kmalloc(gfp_mask, nr_pages); 1199f9c78b2bSJens Axboe if (!bio) 1200f9c78b2bSJens Axboe goto out_bmd; 1201f9c78b2bSJens Axboe 1202f9c78b2bSJens Axboe ret = 0; 1203f9c78b2bSJens Axboe 1204f9c78b2bSJens Axboe if (map_data) { 1205f9c78b2bSJens Axboe nr_pages = 1 << map_data->page_order; 1206f9c78b2bSJens Axboe i = map_data->offset / PAGE_SIZE; 1207f9c78b2bSJens Axboe } 1208f9c78b2bSJens Axboe while (len) { 1209f9c78b2bSJens Axboe unsigned int bytes = PAGE_SIZE; 1210f9c78b2bSJens Axboe 1211f9c78b2bSJens Axboe bytes -= offset; 1212f9c78b2bSJens Axboe 1213f9c78b2bSJens Axboe if (bytes > len) 1214f9c78b2bSJens Axboe bytes = len; 1215f9c78b2bSJens Axboe 1216f9c78b2bSJens Axboe if (map_data) { 1217f9c78b2bSJens Axboe if (i == map_data->nr_entries * nr_pages) { 1218f9c78b2bSJens Axboe ret = -ENOMEM; 1219f9c78b2bSJens Axboe break; 1220f9c78b2bSJens Axboe } 1221f9c78b2bSJens Axboe 1222f9c78b2bSJens Axboe page = map_data->pages[i / nr_pages]; 1223f9c78b2bSJens Axboe page += (i % nr_pages); 1224f9c78b2bSJens Axboe 1225f9c78b2bSJens Axboe i++; 1226f9c78b2bSJens Axboe } else { 1227f9c78b2bSJens Axboe page = alloc_page(q->bounce_gfp | gfp_mask); 1228f9c78b2bSJens Axboe if (!page) { 1229f9c78b2bSJens Axboe ret = -ENOMEM; 1230f9c78b2bSJens Axboe break; 1231f9c78b2bSJens Axboe } 1232f9c78b2bSJens Axboe } 1233f9c78b2bSJens Axboe 1234f9c78b2bSJens Axboe if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) 1235f9c78b2bSJens Axboe break; 1236f9c78b2bSJens Axboe 1237f9c78b2bSJens Axboe len -= bytes; 1238f9c78b2bSJens Axboe offset = 0; 1239f9c78b2bSJens Axboe } 1240f9c78b2bSJens Axboe 1241f9c78b2bSJens Axboe if (ret) 1242f9c78b2bSJens Axboe goto cleanup; 1243f9c78b2bSJens Axboe 12442884d0beSAl Viro if (map_data) 12452884d0beSAl Viro map_data->offset += bio->bi_iter.bi_size; 12462884d0beSAl Viro 1247f9c78b2bSJens Axboe /* 1248f9c78b2bSJens Axboe * success 1249f9c78b2bSJens Axboe */ 125026e49cfcSKent Overstreet if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || 1251f9c78b2bSJens Axboe (map_data && map_data->from_user)) { 125298a09d61SAl Viro ret = bio_copy_from_iter(bio, iter); 1253f9c78b2bSJens Axboe if (ret) 1254f9c78b2bSJens Axboe goto cleanup; 125598a09d61SAl Viro } else { 1256e81cef5dSAl Viro iov_iter_advance(iter, bio->bi_iter.bi_size); 1257f9c78b2bSJens Axboe } 1258f9c78b2bSJens Axboe 125926e49cfcSKent Overstreet bio->bi_private = bmd; 12602884d0beSAl Viro if (map_data && map_data->null_mapped) 12612884d0beSAl Viro bio_set_flag(bio, BIO_NULL_MAPPED); 1262f9c78b2bSJens Axboe return bio; 1263f9c78b2bSJens Axboe cleanup: 1264f9c78b2bSJens Axboe if (!map_data) 12651dfa0f68SChristoph Hellwig bio_free_pages(bio); 1266f9c78b2bSJens Axboe bio_put(bio); 1267f9c78b2bSJens Axboe out_bmd: 1268f9c78b2bSJens Axboe kfree(bmd); 1269f9c78b2bSJens Axboe return ERR_PTR(ret); 1270f9c78b2bSJens Axboe } 1271f9c78b2bSJens Axboe 127237f19e57SChristoph Hellwig /** 127337f19e57SChristoph Hellwig * bio_map_user_iov - map user iovec into bio 127437f19e57SChristoph Hellwig * @q: the struct request_queue for the bio 127537f19e57SChristoph Hellwig * @iter: iovec iterator 127637f19e57SChristoph Hellwig * @gfp_mask: memory allocation flags 127737f19e57SChristoph Hellwig * 127837f19e57SChristoph Hellwig * Map the user space address into a bio suitable for io to a block 127937f19e57SChristoph Hellwig * device. Returns an error pointer in case of error. 128037f19e57SChristoph Hellwig */ 128137f19e57SChristoph Hellwig struct bio *bio_map_user_iov(struct request_queue *q, 1282e81cef5dSAl Viro struct iov_iter *iter, 128326e49cfcSKent Overstreet gfp_t gfp_mask) 1284f9c78b2bSJens Axboe { 128526e49cfcSKent Overstreet int j; 1286f9c78b2bSJens Axboe struct bio *bio; 1287076098e5SAl Viro int ret; 12882b04e8f6SAl Viro struct bio_vec *bvec; 1289f9c78b2bSJens Axboe 1290b282cc76SAl Viro if (!iov_iter_count(iter)) 1291f9c78b2bSJens Axboe return ERR_PTR(-EINVAL); 1292f9c78b2bSJens Axboe 1293b282cc76SAl Viro bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); 1294f9c78b2bSJens Axboe if (!bio) 1295f9c78b2bSJens Axboe return ERR_PTR(-ENOMEM); 1296f9c78b2bSJens Axboe 12970a0f1513SAl Viro while (iov_iter_count(iter)) { 1298629e42bcSAl Viro struct page **pages; 1299076098e5SAl Viro ssize_t bytes; 1300076098e5SAl Viro size_t offs, added = 0; 1301076098e5SAl Viro int npages; 1302f9c78b2bSJens Axboe 13030a0f1513SAl Viro bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs); 1304076098e5SAl Viro if (unlikely(bytes <= 0)) { 1305076098e5SAl Viro ret = bytes ? bytes : -EFAULT; 1306f9c78b2bSJens Axboe goto out_unmap; 1307f9c78b2bSJens Axboe } 1308f9c78b2bSJens Axboe 1309076098e5SAl Viro npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); 1310076098e5SAl Viro 131198f0bc99SAl Viro if (unlikely(offs & queue_dma_alignment(q))) { 131298f0bc99SAl Viro ret = -EINVAL; 131398f0bc99SAl Viro j = 0; 131498f0bc99SAl Viro } else { 1315629e42bcSAl Viro for (j = 0; j < npages; j++) { 131698f0bc99SAl Viro struct page *page = pages[j]; 1317076098e5SAl Viro unsigned int n = PAGE_SIZE - offs; 131895d78c28SVitaly Mayatskikh unsigned short prev_bi_vcnt = bio->bi_vcnt; 1319f9c78b2bSJens Axboe 1320076098e5SAl Viro if (n > bytes) 1321076098e5SAl Viro n = bytes; 1322f9c78b2bSJens Axboe 132398f0bc99SAl Viro if (!bio_add_pc_page(q, bio, page, n, offs)) 1324f9c78b2bSJens Axboe break; 1325f9c78b2bSJens Axboe 132695d78c28SVitaly Mayatskikh /* 132795d78c28SVitaly Mayatskikh * check if vector was merged with previous 132895d78c28SVitaly Mayatskikh * drop page reference if needed 132995d78c28SVitaly Mayatskikh */ 133095d78c28SVitaly Mayatskikh if (bio->bi_vcnt == prev_bi_vcnt) 133198f0bc99SAl Viro put_page(page); 133295d78c28SVitaly Mayatskikh 1333076098e5SAl Viro added += n; 1334076098e5SAl Viro bytes -= n; 1335076098e5SAl Viro offs = 0; 1336f9c78b2bSJens Axboe } 13370a0f1513SAl Viro iov_iter_advance(iter, added); 133898f0bc99SAl Viro } 1339f9c78b2bSJens Axboe /* 1340f9c78b2bSJens Axboe * release the pages we didn't map into the bio, if any 1341f9c78b2bSJens Axboe */ 1342629e42bcSAl Viro while (j < npages) 134309cbfeafSKirill A. Shutemov put_page(pages[j++]); 1344629e42bcSAl Viro kvfree(pages); 1345e2e115d1SAl Viro /* couldn't stuff something into bio? */ 1346e2e115d1SAl Viro if (bytes) 1347e2e115d1SAl Viro break; 1348f9c78b2bSJens Axboe } 1349f9c78b2bSJens Axboe 1350b7c44ed9SJens Axboe bio_set_flag(bio, BIO_USER_MAPPED); 135137f19e57SChristoph Hellwig 135237f19e57SChristoph Hellwig /* 13535fad1b64SBart Van Assche * subtle -- if bio_map_user_iov() ended up bouncing a bio, 135437f19e57SChristoph Hellwig * it would normally disappear when its bi_end_io is run. 135537f19e57SChristoph Hellwig * however, we need it for the unmap, so grab an extra 135637f19e57SChristoph Hellwig * reference to it 135737f19e57SChristoph Hellwig */ 135837f19e57SChristoph Hellwig bio_get(bio); 1359f9c78b2bSJens Axboe return bio; 1360f9c78b2bSJens Axboe 1361f9c78b2bSJens Axboe out_unmap: 13622b04e8f6SAl Viro bio_for_each_segment_all(bvec, bio, j) { 13632b04e8f6SAl Viro put_page(bvec->bv_page); 1364f9c78b2bSJens Axboe } 1365f9c78b2bSJens Axboe bio_put(bio); 1366f9c78b2bSJens Axboe return ERR_PTR(ret); 1367f9c78b2bSJens Axboe } 1368f9c78b2bSJens Axboe 1369f9c78b2bSJens Axboe static void __bio_unmap_user(struct bio *bio) 1370f9c78b2bSJens Axboe { 1371f9c78b2bSJens Axboe struct bio_vec *bvec; 1372f9c78b2bSJens Axboe int i; 1373f9c78b2bSJens Axboe 1374f9c78b2bSJens Axboe /* 1375f9c78b2bSJens Axboe * make sure we dirty pages we wrote to 1376f9c78b2bSJens Axboe */ 1377f9c78b2bSJens Axboe bio_for_each_segment_all(bvec, bio, i) { 1378f9c78b2bSJens Axboe if (bio_data_dir(bio) == READ) 1379f9c78b2bSJens Axboe set_page_dirty_lock(bvec->bv_page); 1380f9c78b2bSJens Axboe 138109cbfeafSKirill A. Shutemov put_page(bvec->bv_page); 1382f9c78b2bSJens Axboe } 1383f9c78b2bSJens Axboe 1384f9c78b2bSJens Axboe bio_put(bio); 1385f9c78b2bSJens Axboe } 1386f9c78b2bSJens Axboe 1387f9c78b2bSJens Axboe /** 1388f9c78b2bSJens Axboe * bio_unmap_user - unmap a bio 1389f9c78b2bSJens Axboe * @bio: the bio being unmapped 1390f9c78b2bSJens Axboe * 13915fad1b64SBart Van Assche * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from 13925fad1b64SBart Van Assche * process context. 1393f9c78b2bSJens Axboe * 1394f9c78b2bSJens Axboe * bio_unmap_user() may sleep. 1395f9c78b2bSJens Axboe */ 1396f9c78b2bSJens Axboe void bio_unmap_user(struct bio *bio) 1397f9c78b2bSJens Axboe { 1398f9c78b2bSJens Axboe __bio_unmap_user(bio); 1399f9c78b2bSJens Axboe bio_put(bio); 1400f9c78b2bSJens Axboe } 1401f9c78b2bSJens Axboe 14024246a0b6SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio) 1403f9c78b2bSJens Axboe { 1404f9c78b2bSJens Axboe bio_put(bio); 1405f9c78b2bSJens Axboe } 1406f9c78b2bSJens Axboe 140775c72b83SChristoph Hellwig /** 140875c72b83SChristoph Hellwig * bio_map_kern - map kernel address into bio 140975c72b83SChristoph Hellwig * @q: the struct request_queue for the bio 141075c72b83SChristoph Hellwig * @data: pointer to buffer to map 141175c72b83SChristoph Hellwig * @len: length in bytes 141275c72b83SChristoph Hellwig * @gfp_mask: allocation flags for bio allocation 141375c72b83SChristoph Hellwig * 141475c72b83SChristoph Hellwig * Map the kernel address into a bio suitable for io to a block 141575c72b83SChristoph Hellwig * device. Returns an error pointer in case of error. 141675c72b83SChristoph Hellwig */ 141775c72b83SChristoph Hellwig struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 141875c72b83SChristoph Hellwig gfp_t gfp_mask) 1419f9c78b2bSJens Axboe { 1420f9c78b2bSJens Axboe unsigned long kaddr = (unsigned long)data; 1421f9c78b2bSJens Axboe unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1422f9c78b2bSJens Axboe unsigned long start = kaddr >> PAGE_SHIFT; 1423f9c78b2bSJens Axboe const int nr_pages = end - start; 1424f9c78b2bSJens Axboe int offset, i; 1425f9c78b2bSJens Axboe struct bio *bio; 1426f9c78b2bSJens Axboe 1427f9c78b2bSJens Axboe bio = bio_kmalloc(gfp_mask, nr_pages); 1428f9c78b2bSJens Axboe if (!bio) 1429f9c78b2bSJens Axboe return ERR_PTR(-ENOMEM); 1430f9c78b2bSJens Axboe 1431f9c78b2bSJens Axboe offset = offset_in_page(kaddr); 1432f9c78b2bSJens Axboe for (i = 0; i < nr_pages; i++) { 1433f9c78b2bSJens Axboe unsigned int bytes = PAGE_SIZE - offset; 1434f9c78b2bSJens Axboe 1435f9c78b2bSJens Axboe if (len <= 0) 1436f9c78b2bSJens Axboe break; 1437f9c78b2bSJens Axboe 1438f9c78b2bSJens Axboe if (bytes > len) 1439f9c78b2bSJens Axboe bytes = len; 1440f9c78b2bSJens Axboe 1441f9c78b2bSJens Axboe if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, 144275c72b83SChristoph Hellwig offset) < bytes) { 144375c72b83SChristoph Hellwig /* we don't support partial mappings */ 144475c72b83SChristoph Hellwig bio_put(bio); 144575c72b83SChristoph Hellwig return ERR_PTR(-EINVAL); 144675c72b83SChristoph Hellwig } 1447f9c78b2bSJens Axboe 1448f9c78b2bSJens Axboe data += bytes; 1449f9c78b2bSJens Axboe len -= bytes; 1450f9c78b2bSJens Axboe offset = 0; 1451f9c78b2bSJens Axboe } 1452f9c78b2bSJens Axboe 1453f9c78b2bSJens Axboe bio->bi_end_io = bio_map_kern_endio; 1454f9c78b2bSJens Axboe return bio; 1455f9c78b2bSJens Axboe } 1456f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_map_kern); 1457f9c78b2bSJens Axboe 14584246a0b6SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio) 1459f9c78b2bSJens Axboe { 14601dfa0f68SChristoph Hellwig bio_free_pages(bio); 14611dfa0f68SChristoph Hellwig bio_put(bio); 14621dfa0f68SChristoph Hellwig } 14631dfa0f68SChristoph Hellwig 14644246a0b6SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio) 14651dfa0f68SChristoph Hellwig { 146642d2683aSChristoph Hellwig char *p = bio->bi_private; 14671dfa0f68SChristoph Hellwig struct bio_vec *bvec; 1468f9c78b2bSJens Axboe int i; 1469f9c78b2bSJens Axboe 1470f9c78b2bSJens Axboe bio_for_each_segment_all(bvec, bio, i) { 14711dfa0f68SChristoph Hellwig memcpy(p, page_address(bvec->bv_page), bvec->bv_len); 1472f9c78b2bSJens Axboe p += bvec->bv_len; 1473f9c78b2bSJens Axboe } 1474f9c78b2bSJens Axboe 14754246a0b6SChristoph Hellwig bio_copy_kern_endio(bio); 1476f9c78b2bSJens Axboe } 1477f9c78b2bSJens Axboe 1478f9c78b2bSJens Axboe /** 1479f9c78b2bSJens Axboe * bio_copy_kern - copy kernel address into bio 1480f9c78b2bSJens Axboe * @q: the struct request_queue for the bio 1481f9c78b2bSJens Axboe * @data: pointer to buffer to copy 1482f9c78b2bSJens Axboe * @len: length in bytes 1483f9c78b2bSJens Axboe * @gfp_mask: allocation flags for bio and page allocation 1484f9c78b2bSJens Axboe * @reading: data direction is READ 1485f9c78b2bSJens Axboe * 1486f9c78b2bSJens Axboe * copy the kernel address into a bio suitable for io to a block 1487f9c78b2bSJens Axboe * device. Returns an error pointer in case of error. 1488f9c78b2bSJens Axboe */ 1489f9c78b2bSJens Axboe struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1490f9c78b2bSJens Axboe gfp_t gfp_mask, int reading) 1491f9c78b2bSJens Axboe { 149242d2683aSChristoph Hellwig unsigned long kaddr = (unsigned long)data; 149342d2683aSChristoph Hellwig unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 149442d2683aSChristoph Hellwig unsigned long start = kaddr >> PAGE_SHIFT; 149542d2683aSChristoph Hellwig struct bio *bio; 1496f9c78b2bSJens Axboe void *p = data; 14971dfa0f68SChristoph Hellwig int nr_pages = 0; 1498f9c78b2bSJens Axboe 149942d2683aSChristoph Hellwig /* 150042d2683aSChristoph Hellwig * Overflow, abort 150142d2683aSChristoph Hellwig */ 150242d2683aSChristoph Hellwig if (end < start) 150342d2683aSChristoph Hellwig return ERR_PTR(-EINVAL); 1504f9c78b2bSJens Axboe 150542d2683aSChristoph Hellwig nr_pages = end - start; 150642d2683aSChristoph Hellwig bio = bio_kmalloc(gfp_mask, nr_pages); 150742d2683aSChristoph Hellwig if (!bio) 150842d2683aSChristoph Hellwig return ERR_PTR(-ENOMEM); 150942d2683aSChristoph Hellwig 151042d2683aSChristoph Hellwig while (len) { 151142d2683aSChristoph Hellwig struct page *page; 151242d2683aSChristoph Hellwig unsigned int bytes = PAGE_SIZE; 151342d2683aSChristoph Hellwig 151442d2683aSChristoph Hellwig if (bytes > len) 151542d2683aSChristoph Hellwig bytes = len; 151642d2683aSChristoph Hellwig 151742d2683aSChristoph Hellwig page = alloc_page(q->bounce_gfp | gfp_mask); 151842d2683aSChristoph Hellwig if (!page) 151942d2683aSChristoph Hellwig goto cleanup; 152042d2683aSChristoph Hellwig 152142d2683aSChristoph Hellwig if (!reading) 152242d2683aSChristoph Hellwig memcpy(page_address(page), p, bytes); 152342d2683aSChristoph Hellwig 152442d2683aSChristoph Hellwig if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 152542d2683aSChristoph Hellwig break; 152642d2683aSChristoph Hellwig 152742d2683aSChristoph Hellwig len -= bytes; 152842d2683aSChristoph Hellwig p += bytes; 1529f9c78b2bSJens Axboe } 1530f9c78b2bSJens Axboe 15311dfa0f68SChristoph Hellwig if (reading) { 15321dfa0f68SChristoph Hellwig bio->bi_end_io = bio_copy_kern_endio_read; 153342d2683aSChristoph Hellwig bio->bi_private = data; 15341dfa0f68SChristoph Hellwig } else { 1535f9c78b2bSJens Axboe bio->bi_end_io = bio_copy_kern_endio; 15361dfa0f68SChristoph Hellwig } 15371dfa0f68SChristoph Hellwig 1538f9c78b2bSJens Axboe return bio; 153942d2683aSChristoph Hellwig 154042d2683aSChristoph Hellwig cleanup: 15411dfa0f68SChristoph Hellwig bio_free_pages(bio); 154242d2683aSChristoph Hellwig bio_put(bio); 154342d2683aSChristoph Hellwig return ERR_PTR(-ENOMEM); 1544f9c78b2bSJens Axboe } 1545f9c78b2bSJens Axboe 1546f9c78b2bSJens Axboe /* 1547f9c78b2bSJens Axboe * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1548f9c78b2bSJens Axboe * for performing direct-IO in BIOs. 1549f9c78b2bSJens Axboe * 1550f9c78b2bSJens Axboe * The problem is that we cannot run set_page_dirty() from interrupt context 1551f9c78b2bSJens Axboe * because the required locks are not interrupt-safe. So what we can do is to 1552f9c78b2bSJens Axboe * mark the pages dirty _before_ performing IO. And in interrupt context, 1553f9c78b2bSJens Axboe * check that the pages are still dirty. If so, fine. If not, redirty them 1554f9c78b2bSJens Axboe * in process context. 1555f9c78b2bSJens Axboe * 1556f9c78b2bSJens Axboe * We special-case compound pages here: normally this means reads into hugetlb 1557f9c78b2bSJens Axboe * pages. The logic in here doesn't really work right for compound pages 1558f9c78b2bSJens Axboe * because the VM does not uniformly chase down the head page in all cases. 1559f9c78b2bSJens Axboe * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1560f9c78b2bSJens Axboe * handle them at all. So we skip compound pages here at an early stage. 1561f9c78b2bSJens Axboe * 1562f9c78b2bSJens Axboe * Note that this code is very hard to test under normal circumstances because 1563f9c78b2bSJens Axboe * direct-io pins the pages with get_user_pages(). This makes 1564f9c78b2bSJens Axboe * is_page_cache_freeable return false, and the VM will not clean the pages. 1565f9c78b2bSJens Axboe * But other code (eg, flusher threads) could clean the pages if they are mapped 1566f9c78b2bSJens Axboe * pagecache. 1567f9c78b2bSJens Axboe * 1568f9c78b2bSJens Axboe * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1569f9c78b2bSJens Axboe * deferred bio dirtying paths. 1570f9c78b2bSJens Axboe */ 1571f9c78b2bSJens Axboe 1572f9c78b2bSJens Axboe /* 1573f9c78b2bSJens Axboe * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1574f9c78b2bSJens Axboe */ 1575f9c78b2bSJens Axboe void bio_set_pages_dirty(struct bio *bio) 1576f9c78b2bSJens Axboe { 1577f9c78b2bSJens Axboe struct bio_vec *bvec; 1578f9c78b2bSJens Axboe int i; 1579f9c78b2bSJens Axboe 1580f9c78b2bSJens Axboe bio_for_each_segment_all(bvec, bio, i) { 1581f9c78b2bSJens Axboe struct page *page = bvec->bv_page; 1582f9c78b2bSJens Axboe 1583f9c78b2bSJens Axboe if (page && !PageCompound(page)) 1584f9c78b2bSJens Axboe set_page_dirty_lock(page); 1585f9c78b2bSJens Axboe } 1586f9c78b2bSJens Axboe } 1587f9c78b2bSJens Axboe 1588f9c78b2bSJens Axboe static void bio_release_pages(struct bio *bio) 1589f9c78b2bSJens Axboe { 1590f9c78b2bSJens Axboe struct bio_vec *bvec; 1591f9c78b2bSJens Axboe int i; 1592f9c78b2bSJens Axboe 1593f9c78b2bSJens Axboe bio_for_each_segment_all(bvec, bio, i) { 1594f9c78b2bSJens Axboe struct page *page = bvec->bv_page; 1595f9c78b2bSJens Axboe 1596f9c78b2bSJens Axboe if (page) 1597f9c78b2bSJens Axboe put_page(page); 1598f9c78b2bSJens Axboe } 1599f9c78b2bSJens Axboe } 1600f9c78b2bSJens Axboe 1601f9c78b2bSJens Axboe /* 1602f9c78b2bSJens Axboe * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1603f9c78b2bSJens Axboe * If they are, then fine. If, however, some pages are clean then they must 1604f9c78b2bSJens Axboe * have been written out during the direct-IO read. So we take another ref on 1605f9c78b2bSJens Axboe * the BIO and the offending pages and re-dirty the pages in process context. 1606f9c78b2bSJens Axboe * 1607f9c78b2bSJens Axboe * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1608ea1754a0SKirill A. Shutemov * here on. It will run one put_page() against each page and will run one 1609ea1754a0SKirill A. Shutemov * bio_put() against the BIO. 1610f9c78b2bSJens Axboe */ 1611f9c78b2bSJens Axboe 1612f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work); 1613f9c78b2bSJens Axboe 1614f9c78b2bSJens Axboe static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1615f9c78b2bSJens Axboe static DEFINE_SPINLOCK(bio_dirty_lock); 1616f9c78b2bSJens Axboe static struct bio *bio_dirty_list; 1617f9c78b2bSJens Axboe 1618f9c78b2bSJens Axboe /* 1619f9c78b2bSJens Axboe * This runs in process context 1620f9c78b2bSJens Axboe */ 1621f9c78b2bSJens Axboe static void bio_dirty_fn(struct work_struct *work) 1622f9c78b2bSJens Axboe { 1623f9c78b2bSJens Axboe unsigned long flags; 1624f9c78b2bSJens Axboe struct bio *bio; 1625f9c78b2bSJens Axboe 1626f9c78b2bSJens Axboe spin_lock_irqsave(&bio_dirty_lock, flags); 1627f9c78b2bSJens Axboe bio = bio_dirty_list; 1628f9c78b2bSJens Axboe bio_dirty_list = NULL; 1629f9c78b2bSJens Axboe spin_unlock_irqrestore(&bio_dirty_lock, flags); 1630f9c78b2bSJens Axboe 1631f9c78b2bSJens Axboe while (bio) { 1632f9c78b2bSJens Axboe struct bio *next = bio->bi_private; 1633f9c78b2bSJens Axboe 1634f9c78b2bSJens Axboe bio_set_pages_dirty(bio); 1635f9c78b2bSJens Axboe bio_release_pages(bio); 1636f9c78b2bSJens Axboe bio_put(bio); 1637f9c78b2bSJens Axboe bio = next; 1638f9c78b2bSJens Axboe } 1639f9c78b2bSJens Axboe } 1640f9c78b2bSJens Axboe 1641f9c78b2bSJens Axboe void bio_check_pages_dirty(struct bio *bio) 1642f9c78b2bSJens Axboe { 1643f9c78b2bSJens Axboe struct bio_vec *bvec; 1644f9c78b2bSJens Axboe int nr_clean_pages = 0; 1645f9c78b2bSJens Axboe int i; 1646f9c78b2bSJens Axboe 1647f9c78b2bSJens Axboe bio_for_each_segment_all(bvec, bio, i) { 1648f9c78b2bSJens Axboe struct page *page = bvec->bv_page; 1649f9c78b2bSJens Axboe 1650f9c78b2bSJens Axboe if (PageDirty(page) || PageCompound(page)) { 165109cbfeafSKirill A. Shutemov put_page(page); 1652f9c78b2bSJens Axboe bvec->bv_page = NULL; 1653f9c78b2bSJens Axboe } else { 1654f9c78b2bSJens Axboe nr_clean_pages++; 1655f9c78b2bSJens Axboe } 1656f9c78b2bSJens Axboe } 1657f9c78b2bSJens Axboe 1658f9c78b2bSJens Axboe if (nr_clean_pages) { 1659f9c78b2bSJens Axboe unsigned long flags; 1660f9c78b2bSJens Axboe 1661f9c78b2bSJens Axboe spin_lock_irqsave(&bio_dirty_lock, flags); 1662f9c78b2bSJens Axboe bio->bi_private = bio_dirty_list; 1663f9c78b2bSJens Axboe bio_dirty_list = bio; 1664f9c78b2bSJens Axboe spin_unlock_irqrestore(&bio_dirty_lock, flags); 1665f9c78b2bSJens Axboe schedule_work(&bio_dirty_work); 1666f9c78b2bSJens Axboe } else { 1667f9c78b2bSJens Axboe bio_put(bio); 1668f9c78b2bSJens Axboe } 1669f9c78b2bSJens Axboe } 1670f9c78b2bSJens Axboe 1671d62e26b3SJens Axboe void generic_start_io_acct(struct request_queue *q, int rw, 1672d62e26b3SJens Axboe unsigned long sectors, struct hd_struct *part) 1673394ffa50SGu Zheng { 1674394ffa50SGu Zheng int cpu = part_stat_lock(); 1675394ffa50SGu Zheng 1676d62e26b3SJens Axboe part_round_stats(q, cpu, part); 1677394ffa50SGu Zheng part_stat_inc(cpu, part, ios[rw]); 1678394ffa50SGu Zheng part_stat_add(cpu, part, sectors[rw], sectors); 1679d62e26b3SJens Axboe part_inc_in_flight(q, part, rw); 1680394ffa50SGu Zheng 1681394ffa50SGu Zheng part_stat_unlock(); 1682394ffa50SGu Zheng } 1683394ffa50SGu Zheng EXPORT_SYMBOL(generic_start_io_acct); 1684394ffa50SGu Zheng 1685d62e26b3SJens Axboe void generic_end_io_acct(struct request_queue *q, int rw, 1686d62e26b3SJens Axboe struct hd_struct *part, unsigned long start_time) 1687394ffa50SGu Zheng { 1688394ffa50SGu Zheng unsigned long duration = jiffies - start_time; 1689394ffa50SGu Zheng int cpu = part_stat_lock(); 1690394ffa50SGu Zheng 1691394ffa50SGu Zheng part_stat_add(cpu, part, ticks[rw], duration); 1692d62e26b3SJens Axboe part_round_stats(q, cpu, part); 1693d62e26b3SJens Axboe part_dec_in_flight(q, part, rw); 1694394ffa50SGu Zheng 1695394ffa50SGu Zheng part_stat_unlock(); 1696394ffa50SGu Zheng } 1697394ffa50SGu Zheng EXPORT_SYMBOL(generic_end_io_acct); 1698394ffa50SGu Zheng 1699f9c78b2bSJens Axboe #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1700f9c78b2bSJens Axboe void bio_flush_dcache_pages(struct bio *bi) 1701f9c78b2bSJens Axboe { 1702f9c78b2bSJens Axboe struct bio_vec bvec; 1703f9c78b2bSJens Axboe struct bvec_iter iter; 1704f9c78b2bSJens Axboe 1705f9c78b2bSJens Axboe bio_for_each_segment(bvec, bi, iter) 1706f9c78b2bSJens Axboe flush_dcache_page(bvec.bv_page); 1707f9c78b2bSJens Axboe } 1708f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_flush_dcache_pages); 1709f9c78b2bSJens Axboe #endif 1710f9c78b2bSJens Axboe 1711c4cf5261SJens Axboe static inline bool bio_remaining_done(struct bio *bio) 1712c4cf5261SJens Axboe { 1713c4cf5261SJens Axboe /* 1714c4cf5261SJens Axboe * If we're not chaining, then ->__bi_remaining is always 1 and 1715c4cf5261SJens Axboe * we always end io on the first invocation. 1716c4cf5261SJens Axboe */ 1717c4cf5261SJens Axboe if (!bio_flagged(bio, BIO_CHAIN)) 1718c4cf5261SJens Axboe return true; 1719c4cf5261SJens Axboe 1720c4cf5261SJens Axboe BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1721c4cf5261SJens Axboe 1722326e1dbbSMike Snitzer if (atomic_dec_and_test(&bio->__bi_remaining)) { 1723b7c44ed9SJens Axboe bio_clear_flag(bio, BIO_CHAIN); 1724c4cf5261SJens Axboe return true; 1725326e1dbbSMike Snitzer } 1726c4cf5261SJens Axboe 1727c4cf5261SJens Axboe return false; 1728c4cf5261SJens Axboe } 1729c4cf5261SJens Axboe 1730f9c78b2bSJens Axboe /** 1731f9c78b2bSJens Axboe * bio_endio - end I/O on a bio 1732f9c78b2bSJens Axboe * @bio: bio 1733f9c78b2bSJens Axboe * 1734f9c78b2bSJens Axboe * Description: 17354246a0b6SChristoph Hellwig * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 17364246a0b6SChristoph Hellwig * way to end I/O on a bio. No one should call bi_end_io() directly on a 17374246a0b6SChristoph Hellwig * bio unless they own it and thus know that it has an end_io function. 1738fbbaf700SNeilBrown * 1739fbbaf700SNeilBrown * bio_endio() can be called several times on a bio that has been chained 1740fbbaf700SNeilBrown * using bio_chain(). The ->bi_end_io() function will only be called the 1741fbbaf700SNeilBrown * last time. At this point the BLK_TA_COMPLETE tracing event will be 1742fbbaf700SNeilBrown * generated if BIO_TRACE_COMPLETION is set. 1743f9c78b2bSJens Axboe **/ 17444246a0b6SChristoph Hellwig void bio_endio(struct bio *bio) 1745f9c78b2bSJens Axboe { 1746ba8c6967SChristoph Hellwig again: 17472b885517SChristoph Hellwig if (!bio_remaining_done(bio)) 1748ba8c6967SChristoph Hellwig return; 17497c20f116SChristoph Hellwig if (!bio_integrity_endio(bio)) 17507c20f116SChristoph Hellwig return; 1751f9c78b2bSJens Axboe 1752f9c78b2bSJens Axboe /* 1753ba8c6967SChristoph Hellwig * Need to have a real endio function for chained bios, otherwise 1754ba8c6967SChristoph Hellwig * various corner cases will break (like stacking block devices that 1755ba8c6967SChristoph Hellwig * save/restore bi_end_io) - however, we want to avoid unbounded 1756ba8c6967SChristoph Hellwig * recursion and blowing the stack. Tail call optimization would 1757ba8c6967SChristoph Hellwig * handle this, but compiling with frame pointers also disables 1758ba8c6967SChristoph Hellwig * gcc's sibling call optimization. 1759f9c78b2bSJens Axboe */ 1760f9c78b2bSJens Axboe if (bio->bi_end_io == bio_chain_endio) { 176138f8baaeSChristoph Hellwig bio = __bio_chain_endio(bio); 1762ba8c6967SChristoph Hellwig goto again; 1763ba8c6967SChristoph Hellwig } 1764ba8c6967SChristoph Hellwig 176574d46992SChristoph Hellwig if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 176674d46992SChristoph Hellwig trace_block_bio_complete(bio->bi_disk->queue, bio, 1767a462b950SBart Van Assche blk_status_to_errno(bio->bi_status)); 1768fbbaf700SNeilBrown bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1769fbbaf700SNeilBrown } 1770fbbaf700SNeilBrown 17719e234eeaSShaohua Li blk_throtl_bio_endio(bio); 1772b222dd2fSShaohua Li /* release cgroup info */ 1773b222dd2fSShaohua Li bio_uninit(bio); 1774f9c78b2bSJens Axboe if (bio->bi_end_io) 17754246a0b6SChristoph Hellwig bio->bi_end_io(bio); 1776f9c78b2bSJens Axboe } 1777f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_endio); 1778f9c78b2bSJens Axboe 1779f9c78b2bSJens Axboe /** 1780f9c78b2bSJens Axboe * bio_split - split a bio 1781f9c78b2bSJens Axboe * @bio: bio to split 1782f9c78b2bSJens Axboe * @sectors: number of sectors to split from the front of @bio 1783f9c78b2bSJens Axboe * @gfp: gfp mask 1784f9c78b2bSJens Axboe * @bs: bio set to allocate from 1785f9c78b2bSJens Axboe * 1786f9c78b2bSJens Axboe * Allocates and returns a new bio which represents @sectors from the start of 1787f9c78b2bSJens Axboe * @bio, and updates @bio to represent the remaining sectors. 1788f9c78b2bSJens Axboe * 1789f3f5da62SMartin K. Petersen * Unless this is a discard request the newly allocated bio will point 1790f3f5da62SMartin K. Petersen * to @bio's bi_io_vec; it is the caller's responsibility to ensure that 1791f3f5da62SMartin K. Petersen * @bio is not freed before the split. 1792f9c78b2bSJens Axboe */ 1793f9c78b2bSJens Axboe struct bio *bio_split(struct bio *bio, int sectors, 1794f9c78b2bSJens Axboe gfp_t gfp, struct bio_set *bs) 1795f9c78b2bSJens Axboe { 1796f341a4d3SMikulas Patocka struct bio *split; 1797f9c78b2bSJens Axboe 1798f9c78b2bSJens Axboe BUG_ON(sectors <= 0); 1799f9c78b2bSJens Axboe BUG_ON(sectors >= bio_sectors(bio)); 1800f9c78b2bSJens Axboe 1801f9c78b2bSJens Axboe split = bio_clone_fast(bio, gfp, bs); 1802f9c78b2bSJens Axboe if (!split) 1803f9c78b2bSJens Axboe return NULL; 1804f9c78b2bSJens Axboe 1805f9c78b2bSJens Axboe split->bi_iter.bi_size = sectors << 9; 1806f9c78b2bSJens Axboe 1807f9c78b2bSJens Axboe if (bio_integrity(split)) 1808fbd08e76SDmitry Monakhov bio_integrity_trim(split); 1809f9c78b2bSJens Axboe 1810f9c78b2bSJens Axboe bio_advance(bio, split->bi_iter.bi_size); 1811f9c78b2bSJens Axboe 1812fbbaf700SNeilBrown if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 181320d59023SGoldwyn Rodrigues bio_set_flag(split, BIO_TRACE_COMPLETION); 1814fbbaf700SNeilBrown 1815f9c78b2bSJens Axboe return split; 1816f9c78b2bSJens Axboe } 1817f9c78b2bSJens Axboe EXPORT_SYMBOL(bio_split); 1818f9c78b2bSJens Axboe 1819f9c78b2bSJens Axboe /** 1820f9c78b2bSJens Axboe * bio_trim - trim a bio 1821f9c78b2bSJens Axboe * @bio: bio to trim 1822f9c78b2bSJens Axboe * @offset: number of sectors to trim from the front of @bio 1823f9c78b2bSJens Axboe * @size: size we want to trim @bio to, in sectors 1824f9c78b2bSJens Axboe */ 1825f9c78b2bSJens Axboe void bio_trim(struct bio *bio, int offset, int size) 1826f9c78b2bSJens Axboe { 1827f9c78b2bSJens Axboe /* 'bio' is a cloned bio which we need to trim to match 1828f9c78b2bSJens Axboe * the given offset and size. 1829f9c78b2bSJens Axboe */ 1830f9c78b2bSJens Axboe 1831f9c78b2bSJens Axboe size <<= 9; 1832f9c78b2bSJens Axboe if (offset == 0 && size == bio->bi_iter.bi_size) 1833f9c78b2bSJens Axboe return; 1834f9c78b2bSJens Axboe 1835b7c44ed9SJens Axboe bio_clear_flag(bio, BIO_SEG_VALID); 1836f9c78b2bSJens Axboe 1837f9c78b2bSJens Axboe bio_advance(bio, offset << 9); 1838f9c78b2bSJens Axboe 1839f9c78b2bSJens Axboe bio->bi_iter.bi_size = size; 1840376a78abSDmitry Monakhov 1841376a78abSDmitry Monakhov if (bio_integrity(bio)) 1842fbd08e76SDmitry Monakhov bio_integrity_trim(bio); 1843376a78abSDmitry Monakhov 1844f9c78b2bSJens Axboe } 1845f9c78b2bSJens Axboe EXPORT_SYMBOL_GPL(bio_trim); 1846f9c78b2bSJens Axboe 1847f9c78b2bSJens Axboe /* 1848f9c78b2bSJens Axboe * create memory pools for biovec's in a bio_set. 1849f9c78b2bSJens Axboe * use the global biovec slabs created for general use. 1850f9c78b2bSJens Axboe */ 1851f9c78b2bSJens Axboe mempool_t *biovec_create_pool(int pool_entries) 1852f9c78b2bSJens Axboe { 1853ed996a52SChristoph Hellwig struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX; 1854f9c78b2bSJens Axboe 1855f9c78b2bSJens Axboe return mempool_create_slab_pool(pool_entries, bp->slab); 1856f9c78b2bSJens Axboe } 1857f9c78b2bSJens Axboe 1858f9c78b2bSJens Axboe void bioset_free(struct bio_set *bs) 1859f9c78b2bSJens Axboe { 1860f9c78b2bSJens Axboe if (bs->rescue_workqueue) 1861f9c78b2bSJens Axboe destroy_workqueue(bs->rescue_workqueue); 1862f9c78b2bSJens Axboe 1863f9c78b2bSJens Axboe mempool_destroy(bs->bio_pool); 1864f9c78b2bSJens Axboe mempool_destroy(bs->bvec_pool); 1865f9c78b2bSJens Axboe 1866f9c78b2bSJens Axboe bioset_integrity_free(bs); 1867f9c78b2bSJens Axboe bio_put_slab(bs); 1868f9c78b2bSJens Axboe 1869f9c78b2bSJens Axboe kfree(bs); 1870f9c78b2bSJens Axboe } 1871f9c78b2bSJens Axboe EXPORT_SYMBOL(bioset_free); 1872f9c78b2bSJens Axboe 1873011067b0SNeilBrown /** 1874011067b0SNeilBrown * bioset_create - Create a bio_set 1875011067b0SNeilBrown * @pool_size: Number of bio and bio_vecs to cache in the mempool 1876011067b0SNeilBrown * @front_pad: Number of bytes to allocate in front of the returned bio 187747e0fb46SNeilBrown * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS 187847e0fb46SNeilBrown * and %BIOSET_NEED_RESCUER 1879011067b0SNeilBrown * 1880011067b0SNeilBrown * Description: 1881011067b0SNeilBrown * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1882011067b0SNeilBrown * to ask for a number of bytes to be allocated in front of the bio. 1883011067b0SNeilBrown * Front pad allocation is useful for embedding the bio inside 1884011067b0SNeilBrown * another structure, to avoid allocating extra data to go with the bio. 1885011067b0SNeilBrown * Note that the bio must be embedded at the END of that structure always, 1886011067b0SNeilBrown * or things will break badly. 1887011067b0SNeilBrown * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated 1888011067b0SNeilBrown * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast(). 188947e0fb46SNeilBrown * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to 189047e0fb46SNeilBrown * dispatch queued requests when the mempool runs out of space. 1891011067b0SNeilBrown * 1892011067b0SNeilBrown */ 1893011067b0SNeilBrown struct bio_set *bioset_create(unsigned int pool_size, 1894d8f429e1SJunichi Nomura unsigned int front_pad, 1895011067b0SNeilBrown int flags) 1896f9c78b2bSJens Axboe { 1897f9c78b2bSJens Axboe unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1898f9c78b2bSJens Axboe struct bio_set *bs; 1899f9c78b2bSJens Axboe 1900f9c78b2bSJens Axboe bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1901f9c78b2bSJens Axboe if (!bs) 1902f9c78b2bSJens Axboe return NULL; 1903f9c78b2bSJens Axboe 1904f9c78b2bSJens Axboe bs->front_pad = front_pad; 1905f9c78b2bSJens Axboe 1906f9c78b2bSJens Axboe spin_lock_init(&bs->rescue_lock); 1907f9c78b2bSJens Axboe bio_list_init(&bs->rescue_list); 1908f9c78b2bSJens Axboe INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1909f9c78b2bSJens Axboe 1910f9c78b2bSJens Axboe bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1911f9c78b2bSJens Axboe if (!bs->bio_slab) { 1912f9c78b2bSJens Axboe kfree(bs); 1913f9c78b2bSJens Axboe return NULL; 1914f9c78b2bSJens Axboe } 1915f9c78b2bSJens Axboe 1916f9c78b2bSJens Axboe bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); 1917f9c78b2bSJens Axboe if (!bs->bio_pool) 1918f9c78b2bSJens Axboe goto bad; 1919f9c78b2bSJens Axboe 1920011067b0SNeilBrown if (flags & BIOSET_NEED_BVECS) { 1921f9c78b2bSJens Axboe bs->bvec_pool = biovec_create_pool(pool_size); 1922f9c78b2bSJens Axboe if (!bs->bvec_pool) 1923f9c78b2bSJens Axboe goto bad; 1924d8f429e1SJunichi Nomura } 1925f9c78b2bSJens Axboe 192647e0fb46SNeilBrown if (!(flags & BIOSET_NEED_RESCUER)) 192747e0fb46SNeilBrown return bs; 192847e0fb46SNeilBrown 1929f9c78b2bSJens Axboe bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); 1930f9c78b2bSJens Axboe if (!bs->rescue_workqueue) 1931f9c78b2bSJens Axboe goto bad; 1932f9c78b2bSJens Axboe 1933f9c78b2bSJens Axboe return bs; 1934f9c78b2bSJens Axboe bad: 1935f9c78b2bSJens Axboe bioset_free(bs); 1936f9c78b2bSJens Axboe return NULL; 1937f9c78b2bSJens Axboe } 1938f9c78b2bSJens Axboe EXPORT_SYMBOL(bioset_create); 1939f9c78b2bSJens Axboe 1940f9c78b2bSJens Axboe #ifdef CONFIG_BLK_CGROUP 19411d933cf0STejun Heo 19421d933cf0STejun Heo /** 19431d933cf0STejun Heo * bio_associate_blkcg - associate a bio with the specified blkcg 19441d933cf0STejun Heo * @bio: target bio 19451d933cf0STejun Heo * @blkcg_css: css of the blkcg to associate 19461d933cf0STejun Heo * 19471d933cf0STejun Heo * Associate @bio with the blkcg specified by @blkcg_css. Block layer will 19481d933cf0STejun Heo * treat @bio as if it were issued by a task which belongs to the blkcg. 19491d933cf0STejun Heo * 19501d933cf0STejun Heo * This function takes an extra reference of @blkcg_css which will be put 19511d933cf0STejun Heo * when @bio is released. The caller must own @bio and is responsible for 19521d933cf0STejun Heo * synchronizing calls to this function. 19531d933cf0STejun Heo */ 19541d933cf0STejun Heo int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) 19551d933cf0STejun Heo { 19561d933cf0STejun Heo if (unlikely(bio->bi_css)) 19571d933cf0STejun Heo return -EBUSY; 19581d933cf0STejun Heo css_get(blkcg_css); 19591d933cf0STejun Heo bio->bi_css = blkcg_css; 19601d933cf0STejun Heo return 0; 19611d933cf0STejun Heo } 19625aa2a96bSTejun Heo EXPORT_SYMBOL_GPL(bio_associate_blkcg); 19631d933cf0STejun Heo 1964f9c78b2bSJens Axboe /** 1965f9c78b2bSJens Axboe * bio_disassociate_task - undo bio_associate_current() 1966f9c78b2bSJens Axboe * @bio: target bio 1967f9c78b2bSJens Axboe */ 1968f9c78b2bSJens Axboe void bio_disassociate_task(struct bio *bio) 1969f9c78b2bSJens Axboe { 1970f9c78b2bSJens Axboe if (bio->bi_ioc) { 1971f9c78b2bSJens Axboe put_io_context(bio->bi_ioc); 1972f9c78b2bSJens Axboe bio->bi_ioc = NULL; 1973f9c78b2bSJens Axboe } 1974f9c78b2bSJens Axboe if (bio->bi_css) { 1975f9c78b2bSJens Axboe css_put(bio->bi_css); 1976f9c78b2bSJens Axboe bio->bi_css = NULL; 1977f9c78b2bSJens Axboe } 1978f9c78b2bSJens Axboe } 1979f9c78b2bSJens Axboe 198020bd723eSPaolo Valente /** 198120bd723eSPaolo Valente * bio_clone_blkcg_association - clone blkcg association from src to dst bio 198220bd723eSPaolo Valente * @dst: destination bio 198320bd723eSPaolo Valente * @src: source bio 198420bd723eSPaolo Valente */ 198520bd723eSPaolo Valente void bio_clone_blkcg_association(struct bio *dst, struct bio *src) 198620bd723eSPaolo Valente { 198720bd723eSPaolo Valente if (src->bi_css) 198820bd723eSPaolo Valente WARN_ON(bio_associate_blkcg(dst, src->bi_css)); 198920bd723eSPaolo Valente } 19908a8e6f84SShaohua Li EXPORT_SYMBOL_GPL(bio_clone_blkcg_association); 1991f9c78b2bSJens Axboe #endif /* CONFIG_BLK_CGROUP */ 1992f9c78b2bSJens Axboe 1993f9c78b2bSJens Axboe static void __init biovec_init_slabs(void) 1994f9c78b2bSJens Axboe { 1995f9c78b2bSJens Axboe int i; 1996f9c78b2bSJens Axboe 1997ed996a52SChristoph Hellwig for (i = 0; i < BVEC_POOL_NR; i++) { 1998f9c78b2bSJens Axboe int size; 1999f9c78b2bSJens Axboe struct biovec_slab *bvs = bvec_slabs + i; 2000f9c78b2bSJens Axboe 2001f9c78b2bSJens Axboe if (bvs->nr_vecs <= BIO_INLINE_VECS) { 2002f9c78b2bSJens Axboe bvs->slab = NULL; 2003f9c78b2bSJens Axboe continue; 2004f9c78b2bSJens Axboe } 2005f9c78b2bSJens Axboe 2006f9c78b2bSJens Axboe size = bvs->nr_vecs * sizeof(struct bio_vec); 2007f9c78b2bSJens Axboe bvs->slab = kmem_cache_create(bvs->name, size, 0, 2008f9c78b2bSJens Axboe SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2009f9c78b2bSJens Axboe } 2010f9c78b2bSJens Axboe } 2011f9c78b2bSJens Axboe 2012f9c78b2bSJens Axboe static int __init init_bio(void) 2013f9c78b2bSJens Axboe { 2014f9c78b2bSJens Axboe bio_slab_max = 2; 2015f9c78b2bSJens Axboe bio_slab_nr = 0; 2016f9c78b2bSJens Axboe bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); 2017f9c78b2bSJens Axboe if (!bio_slabs) 2018f9c78b2bSJens Axboe panic("bio: can't allocate bios\n"); 2019f9c78b2bSJens Axboe 2020f9c78b2bSJens Axboe bio_integrity_init(); 2021f9c78b2bSJens Axboe biovec_init_slabs(); 2022f9c78b2bSJens Axboe 2023011067b0SNeilBrown fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 2024f9c78b2bSJens Axboe if (!fs_bio_set) 2025f9c78b2bSJens Axboe panic("bio: can't allocate bios\n"); 2026f9c78b2bSJens Axboe 2027f9c78b2bSJens Axboe if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 2028f9c78b2bSJens Axboe panic("bio: can't create integrity pool\n"); 2029f9c78b2bSJens Axboe 2030f9c78b2bSJens Axboe return 0; 2031f9c78b2bSJens Axboe } 2032f9c78b2bSJens Axboe subsys_initcall(init_bio); 2033