1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 286db1e29SJens Axboe /* 386db1e29SJens Axboe * Functions related to mapping data to requests 486db1e29SJens Axboe */ 586db1e29SJens Axboe #include <linux/kernel.h> 668db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 786db1e29SJens Axboe #include <linux/module.h> 886db1e29SJens Axboe #include <linux/bio.h> 986db1e29SJens Axboe #include <linux/blkdev.h> 1026e49cfcSKent Overstreet #include <linux/uio.h> 1186db1e29SJens Axboe 1286db1e29SJens Axboe #include "blk.h" 1386db1e29SJens Axboe 14130879f1SChristoph Hellwig struct bio_map_data { 15*f3256075SChristoph Hellwig bool is_our_pages : 1; 16*f3256075SChristoph Hellwig bool is_null_mapped : 1; 17130879f1SChristoph Hellwig struct iov_iter iter; 18130879f1SChristoph Hellwig struct iovec iov[]; 19130879f1SChristoph Hellwig }; 20130879f1SChristoph Hellwig 21130879f1SChristoph Hellwig static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, 22130879f1SChristoph Hellwig gfp_t gfp_mask) 23130879f1SChristoph Hellwig { 24130879f1SChristoph Hellwig struct bio_map_data *bmd; 25130879f1SChristoph Hellwig 26130879f1SChristoph Hellwig if (data->nr_segs > UIO_MAXIOV) 27130879f1SChristoph Hellwig return NULL; 28130879f1SChristoph Hellwig 29130879f1SChristoph Hellwig bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); 30130879f1SChristoph Hellwig if (!bmd) 31130879f1SChristoph Hellwig return NULL; 32130879f1SChristoph Hellwig memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); 33130879f1SChristoph Hellwig bmd->iter = *data; 34130879f1SChristoph Hellwig bmd->iter.iov = bmd->iov; 35130879f1SChristoph Hellwig return bmd; 36130879f1SChristoph Hellwig } 37130879f1SChristoph Hellwig 38130879f1SChristoph Hellwig /** 39130879f1SChristoph Hellwig * bio_copy_from_iter - copy all pages from iov_iter to bio 40130879f1SChristoph Hellwig * @bio: The &struct bio which describes the I/O as destination 41130879f1SChristoph Hellwig * @iter: iov_iter as source 42130879f1SChristoph Hellwig * 43130879f1SChristoph Hellwig * Copy all pages from iov_iter to bio. 44130879f1SChristoph Hellwig * Returns 0 on success, or error on failure. 45130879f1SChristoph Hellwig */ 46130879f1SChristoph Hellwig static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) 47130879f1SChristoph Hellwig { 48130879f1SChristoph Hellwig struct bio_vec *bvec; 49130879f1SChristoph Hellwig struct bvec_iter_all iter_all; 50130879f1SChristoph Hellwig 51130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) { 52130879f1SChristoph Hellwig ssize_t ret; 53130879f1SChristoph Hellwig 54130879f1SChristoph Hellwig ret = copy_page_from_iter(bvec->bv_page, 55130879f1SChristoph Hellwig bvec->bv_offset, 56130879f1SChristoph Hellwig bvec->bv_len, 57130879f1SChristoph Hellwig iter); 58130879f1SChristoph Hellwig 59130879f1SChristoph Hellwig if (!iov_iter_count(iter)) 60130879f1SChristoph Hellwig break; 61130879f1SChristoph Hellwig 62130879f1SChristoph Hellwig if (ret < bvec->bv_len) 63130879f1SChristoph Hellwig return -EFAULT; 64130879f1SChristoph Hellwig } 65130879f1SChristoph Hellwig 66130879f1SChristoph Hellwig return 0; 67130879f1SChristoph Hellwig } 68130879f1SChristoph Hellwig 69130879f1SChristoph Hellwig /** 70130879f1SChristoph Hellwig * bio_copy_to_iter - copy all pages from bio to iov_iter 71130879f1SChristoph Hellwig * @bio: The &struct bio which describes the I/O as source 72130879f1SChristoph Hellwig * @iter: iov_iter as destination 73130879f1SChristoph Hellwig * 74130879f1SChristoph Hellwig * Copy all pages from bio to iov_iter. 75130879f1SChristoph Hellwig * Returns 0 on success, or error on failure. 76130879f1SChristoph Hellwig */ 77130879f1SChristoph Hellwig static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 78130879f1SChristoph Hellwig { 79130879f1SChristoph Hellwig struct bio_vec *bvec; 80130879f1SChristoph Hellwig struct bvec_iter_all iter_all; 81130879f1SChristoph Hellwig 82130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) { 83130879f1SChristoph Hellwig ssize_t ret; 84130879f1SChristoph Hellwig 85130879f1SChristoph Hellwig ret = copy_page_to_iter(bvec->bv_page, 86130879f1SChristoph Hellwig bvec->bv_offset, 87130879f1SChristoph Hellwig bvec->bv_len, 88130879f1SChristoph Hellwig &iter); 89130879f1SChristoph Hellwig 90130879f1SChristoph Hellwig if (!iov_iter_count(&iter)) 91130879f1SChristoph Hellwig break; 92130879f1SChristoph Hellwig 93130879f1SChristoph Hellwig if (ret < bvec->bv_len) 94130879f1SChristoph Hellwig return -EFAULT; 95130879f1SChristoph Hellwig } 96130879f1SChristoph Hellwig 97130879f1SChristoph Hellwig return 0; 98130879f1SChristoph Hellwig } 99130879f1SChristoph Hellwig 100130879f1SChristoph Hellwig /** 101130879f1SChristoph Hellwig * bio_uncopy_user - finish previously mapped bio 102130879f1SChristoph Hellwig * @bio: bio being terminated 103130879f1SChristoph Hellwig * 104130879f1SChristoph Hellwig * Free pages allocated from bio_copy_user_iov() and write back data 105130879f1SChristoph Hellwig * to user space in case of a read. 106130879f1SChristoph Hellwig */ 107130879f1SChristoph Hellwig static int bio_uncopy_user(struct bio *bio) 108130879f1SChristoph Hellwig { 109130879f1SChristoph Hellwig struct bio_map_data *bmd = bio->bi_private; 110130879f1SChristoph Hellwig int ret = 0; 111130879f1SChristoph Hellwig 112*f3256075SChristoph Hellwig if (!bmd || !bmd->is_null_mapped) { 113130879f1SChristoph Hellwig /* 114130879f1SChristoph Hellwig * if we're in a workqueue, the request is orphaned, so 115130879f1SChristoph Hellwig * don't copy into a random user address space, just free 116130879f1SChristoph Hellwig * and return -EINTR so user space doesn't expect any data. 117130879f1SChristoph Hellwig */ 118130879f1SChristoph Hellwig if (!current->mm) 119130879f1SChristoph Hellwig ret = -EINTR; 120130879f1SChristoph Hellwig else if (bio_data_dir(bio) == READ) 121130879f1SChristoph Hellwig ret = bio_copy_to_iter(bio, bmd->iter); 122130879f1SChristoph Hellwig if (bmd->is_our_pages) 123130879f1SChristoph Hellwig bio_free_pages(bio); 124130879f1SChristoph Hellwig } 125130879f1SChristoph Hellwig kfree(bmd); 126130879f1SChristoph Hellwig bio_put(bio); 127130879f1SChristoph Hellwig return ret; 128130879f1SChristoph Hellwig } 129130879f1SChristoph Hellwig 130130879f1SChristoph Hellwig /** 131130879f1SChristoph Hellwig * bio_copy_user_iov - copy user data to bio 132130879f1SChristoph Hellwig * @q: destination block queue 133130879f1SChristoph Hellwig * @map_data: pointer to the rq_map_data holding pages (if necessary) 134130879f1SChristoph Hellwig * @iter: iovec iterator 135130879f1SChristoph Hellwig * @gfp_mask: memory allocation flags 136130879f1SChristoph Hellwig * 137130879f1SChristoph Hellwig * Prepares and returns a bio for indirect user io, bouncing data 138130879f1SChristoph Hellwig * to/from kernel pages as necessary. Must be paired with 139130879f1SChristoph Hellwig * call bio_uncopy_user() on io completion. 140130879f1SChristoph Hellwig */ 141130879f1SChristoph Hellwig static struct bio *bio_copy_user_iov(struct request_queue *q, 142130879f1SChristoph Hellwig struct rq_map_data *map_data, struct iov_iter *iter, 143130879f1SChristoph Hellwig gfp_t gfp_mask) 144130879f1SChristoph Hellwig { 145130879f1SChristoph Hellwig struct bio_map_data *bmd; 146130879f1SChristoph Hellwig struct page *page; 147130879f1SChristoph Hellwig struct bio *bio; 148130879f1SChristoph Hellwig int i = 0, ret; 149130879f1SChristoph Hellwig int nr_pages; 150130879f1SChristoph Hellwig unsigned int len = iter->count; 151130879f1SChristoph Hellwig unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 152130879f1SChristoph Hellwig 153130879f1SChristoph Hellwig bmd = bio_alloc_map_data(iter, gfp_mask); 154130879f1SChristoph Hellwig if (!bmd) 155130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM); 156130879f1SChristoph Hellwig 157130879f1SChristoph Hellwig /* 158130879f1SChristoph Hellwig * We need to do a deep copy of the iov_iter including the iovecs. 159130879f1SChristoph Hellwig * The caller provided iov might point to an on-stack or otherwise 160130879f1SChristoph Hellwig * shortlived one. 161130879f1SChristoph Hellwig */ 162*f3256075SChristoph Hellwig bmd->is_our_pages = !map_data; 163130879f1SChristoph Hellwig 164130879f1SChristoph Hellwig nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); 165130879f1SChristoph Hellwig if (nr_pages > BIO_MAX_PAGES) 166130879f1SChristoph Hellwig nr_pages = BIO_MAX_PAGES; 167130879f1SChristoph Hellwig 168130879f1SChristoph Hellwig ret = -ENOMEM; 169130879f1SChristoph Hellwig bio = bio_kmalloc(gfp_mask, nr_pages); 170130879f1SChristoph Hellwig if (!bio) 171130879f1SChristoph Hellwig goto out_bmd; 172130879f1SChristoph Hellwig 173130879f1SChristoph Hellwig ret = 0; 174130879f1SChristoph Hellwig 175130879f1SChristoph Hellwig if (map_data) { 176130879f1SChristoph Hellwig nr_pages = 1 << map_data->page_order; 177130879f1SChristoph Hellwig i = map_data->offset / PAGE_SIZE; 178130879f1SChristoph Hellwig } 179130879f1SChristoph Hellwig while (len) { 180130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE; 181130879f1SChristoph Hellwig 182130879f1SChristoph Hellwig bytes -= offset; 183130879f1SChristoph Hellwig 184130879f1SChristoph Hellwig if (bytes > len) 185130879f1SChristoph Hellwig bytes = len; 186130879f1SChristoph Hellwig 187130879f1SChristoph Hellwig if (map_data) { 188130879f1SChristoph Hellwig if (i == map_data->nr_entries * nr_pages) { 189130879f1SChristoph Hellwig ret = -ENOMEM; 190130879f1SChristoph Hellwig break; 191130879f1SChristoph Hellwig } 192130879f1SChristoph Hellwig 193130879f1SChristoph Hellwig page = map_data->pages[i / nr_pages]; 194130879f1SChristoph Hellwig page += (i % nr_pages); 195130879f1SChristoph Hellwig 196130879f1SChristoph Hellwig i++; 197130879f1SChristoph Hellwig } else { 198130879f1SChristoph Hellwig page = alloc_page(q->bounce_gfp | gfp_mask); 199130879f1SChristoph Hellwig if (!page) { 200130879f1SChristoph Hellwig ret = -ENOMEM; 201130879f1SChristoph Hellwig break; 202130879f1SChristoph Hellwig } 203130879f1SChristoph Hellwig } 204130879f1SChristoph Hellwig 205130879f1SChristoph Hellwig if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { 206130879f1SChristoph Hellwig if (!map_data) 207130879f1SChristoph Hellwig __free_page(page); 208130879f1SChristoph Hellwig break; 209130879f1SChristoph Hellwig } 210130879f1SChristoph Hellwig 211130879f1SChristoph Hellwig len -= bytes; 212130879f1SChristoph Hellwig offset = 0; 213130879f1SChristoph Hellwig } 214130879f1SChristoph Hellwig 215130879f1SChristoph Hellwig if (ret) 216130879f1SChristoph Hellwig goto cleanup; 217130879f1SChristoph Hellwig 218130879f1SChristoph Hellwig if (map_data) 219130879f1SChristoph Hellwig map_data->offset += bio->bi_iter.bi_size; 220130879f1SChristoph Hellwig 221130879f1SChristoph Hellwig /* 222130879f1SChristoph Hellwig * success 223130879f1SChristoph Hellwig */ 224130879f1SChristoph Hellwig if ((iov_iter_rw(iter) == WRITE && 225130879f1SChristoph Hellwig (!map_data || !map_data->null_mapped)) || 226130879f1SChristoph Hellwig (map_data && map_data->from_user)) { 227130879f1SChristoph Hellwig ret = bio_copy_from_iter(bio, iter); 228130879f1SChristoph Hellwig if (ret) 229130879f1SChristoph Hellwig goto cleanup; 230130879f1SChristoph Hellwig } else { 231130879f1SChristoph Hellwig if (bmd->is_our_pages) 232130879f1SChristoph Hellwig zero_fill_bio(bio); 233130879f1SChristoph Hellwig iov_iter_advance(iter, bio->bi_iter.bi_size); 234130879f1SChristoph Hellwig } 235130879f1SChristoph Hellwig 236130879f1SChristoph Hellwig bio->bi_private = bmd; 237130879f1SChristoph Hellwig if (map_data && map_data->null_mapped) 238*f3256075SChristoph Hellwig bmd->is_null_mapped = true; 239130879f1SChristoph Hellwig return bio; 240130879f1SChristoph Hellwig cleanup: 241130879f1SChristoph Hellwig if (!map_data) 242130879f1SChristoph Hellwig bio_free_pages(bio); 243130879f1SChristoph Hellwig bio_put(bio); 244130879f1SChristoph Hellwig out_bmd: 245130879f1SChristoph Hellwig kfree(bmd); 246130879f1SChristoph Hellwig return ERR_PTR(ret); 247130879f1SChristoph Hellwig } 248130879f1SChristoph Hellwig 249130879f1SChristoph Hellwig /** 250130879f1SChristoph Hellwig * bio_map_user_iov - map user iovec into bio 251130879f1SChristoph Hellwig * @q: the struct request_queue for the bio 252130879f1SChristoph Hellwig * @iter: iovec iterator 253130879f1SChristoph Hellwig * @gfp_mask: memory allocation flags 254130879f1SChristoph Hellwig * 255130879f1SChristoph Hellwig * Map the user space address into a bio suitable for io to a block 256130879f1SChristoph Hellwig * device. Returns an error pointer in case of error. 257130879f1SChristoph Hellwig */ 258130879f1SChristoph Hellwig static struct bio *bio_map_user_iov(struct request_queue *q, 259130879f1SChristoph Hellwig struct iov_iter *iter, gfp_t gfp_mask) 260130879f1SChristoph Hellwig { 261e4581105SChristoph Hellwig unsigned int max_sectors = queue_max_hw_sectors(q); 262130879f1SChristoph Hellwig int j; 263130879f1SChristoph Hellwig struct bio *bio; 264130879f1SChristoph Hellwig int ret; 265130879f1SChristoph Hellwig 266130879f1SChristoph Hellwig if (!iov_iter_count(iter)) 267130879f1SChristoph Hellwig return ERR_PTR(-EINVAL); 268130879f1SChristoph Hellwig 269130879f1SChristoph Hellwig bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); 270130879f1SChristoph Hellwig if (!bio) 271130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM); 272130879f1SChristoph Hellwig 273130879f1SChristoph Hellwig while (iov_iter_count(iter)) { 274130879f1SChristoph Hellwig struct page **pages; 275130879f1SChristoph Hellwig ssize_t bytes; 276130879f1SChristoph Hellwig size_t offs, added = 0; 277130879f1SChristoph Hellwig int npages; 278130879f1SChristoph Hellwig 279130879f1SChristoph Hellwig bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs); 280130879f1SChristoph Hellwig if (unlikely(bytes <= 0)) { 281130879f1SChristoph Hellwig ret = bytes ? bytes : -EFAULT; 282130879f1SChristoph Hellwig goto out_unmap; 283130879f1SChristoph Hellwig } 284130879f1SChristoph Hellwig 285130879f1SChristoph Hellwig npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); 286130879f1SChristoph Hellwig 287130879f1SChristoph Hellwig if (unlikely(offs & queue_dma_alignment(q))) { 288130879f1SChristoph Hellwig ret = -EINVAL; 289130879f1SChristoph Hellwig j = 0; 290130879f1SChristoph Hellwig } else { 291130879f1SChristoph Hellwig for (j = 0; j < npages; j++) { 292130879f1SChristoph Hellwig struct page *page = pages[j]; 293130879f1SChristoph Hellwig unsigned int n = PAGE_SIZE - offs; 294130879f1SChristoph Hellwig bool same_page = false; 295130879f1SChristoph Hellwig 296130879f1SChristoph Hellwig if (n > bytes) 297130879f1SChristoph Hellwig n = bytes; 298130879f1SChristoph Hellwig 299e4581105SChristoph Hellwig if (!bio_add_hw_page(q, bio, page, n, offs, 300e4581105SChristoph Hellwig max_sectors, &same_page)) { 301130879f1SChristoph Hellwig if (same_page) 302130879f1SChristoph Hellwig put_page(page); 303130879f1SChristoph Hellwig break; 304130879f1SChristoph Hellwig } 305130879f1SChristoph Hellwig 306130879f1SChristoph Hellwig added += n; 307130879f1SChristoph Hellwig bytes -= n; 308130879f1SChristoph Hellwig offs = 0; 309130879f1SChristoph Hellwig } 310130879f1SChristoph Hellwig iov_iter_advance(iter, added); 311130879f1SChristoph Hellwig } 312130879f1SChristoph Hellwig /* 313130879f1SChristoph Hellwig * release the pages we didn't map into the bio, if any 314130879f1SChristoph Hellwig */ 315130879f1SChristoph Hellwig while (j < npages) 316130879f1SChristoph Hellwig put_page(pages[j++]); 317130879f1SChristoph Hellwig kvfree(pages); 318130879f1SChristoph Hellwig /* couldn't stuff something into bio? */ 319130879f1SChristoph Hellwig if (bytes) 320130879f1SChristoph Hellwig break; 321130879f1SChristoph Hellwig } 322130879f1SChristoph Hellwig 323130879f1SChristoph Hellwig bio_set_flag(bio, BIO_USER_MAPPED); 324130879f1SChristoph Hellwig 325130879f1SChristoph Hellwig /* 326130879f1SChristoph Hellwig * subtle -- if bio_map_user_iov() ended up bouncing a bio, 327130879f1SChristoph Hellwig * it would normally disappear when its bi_end_io is run. 328130879f1SChristoph Hellwig * however, we need it for the unmap, so grab an extra 329130879f1SChristoph Hellwig * reference to it 330130879f1SChristoph Hellwig */ 331130879f1SChristoph Hellwig bio_get(bio); 332130879f1SChristoph Hellwig return bio; 333130879f1SChristoph Hellwig 334130879f1SChristoph Hellwig out_unmap: 335130879f1SChristoph Hellwig bio_release_pages(bio, false); 336130879f1SChristoph Hellwig bio_put(bio); 337130879f1SChristoph Hellwig return ERR_PTR(ret); 338130879f1SChristoph Hellwig } 339130879f1SChristoph Hellwig 340130879f1SChristoph Hellwig /** 341130879f1SChristoph Hellwig * bio_unmap_user - unmap a bio 342130879f1SChristoph Hellwig * @bio: the bio being unmapped 343130879f1SChristoph Hellwig * 344130879f1SChristoph Hellwig * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from 345130879f1SChristoph Hellwig * process context. 346130879f1SChristoph Hellwig * 347130879f1SChristoph Hellwig * bio_unmap_user() may sleep. 348130879f1SChristoph Hellwig */ 349130879f1SChristoph Hellwig static void bio_unmap_user(struct bio *bio) 350130879f1SChristoph Hellwig { 351130879f1SChristoph Hellwig bio_release_pages(bio, bio_data_dir(bio) == READ); 352130879f1SChristoph Hellwig bio_put(bio); 353130879f1SChristoph Hellwig bio_put(bio); 354130879f1SChristoph Hellwig } 355130879f1SChristoph Hellwig 356130879f1SChristoph Hellwig static void bio_invalidate_vmalloc_pages(struct bio *bio) 357130879f1SChristoph Hellwig { 358130879f1SChristoph Hellwig #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 359130879f1SChristoph Hellwig if (bio->bi_private && !op_is_write(bio_op(bio))) { 360130879f1SChristoph Hellwig unsigned long i, len = 0; 361130879f1SChristoph Hellwig 362130879f1SChristoph Hellwig for (i = 0; i < bio->bi_vcnt; i++) 363130879f1SChristoph Hellwig len += bio->bi_io_vec[i].bv_len; 364130879f1SChristoph Hellwig invalidate_kernel_vmap_range(bio->bi_private, len); 365130879f1SChristoph Hellwig } 366130879f1SChristoph Hellwig #endif 367130879f1SChristoph Hellwig } 368130879f1SChristoph Hellwig 369130879f1SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio) 370130879f1SChristoph Hellwig { 371130879f1SChristoph Hellwig bio_invalidate_vmalloc_pages(bio); 372130879f1SChristoph Hellwig bio_put(bio); 373130879f1SChristoph Hellwig } 374130879f1SChristoph Hellwig 375130879f1SChristoph Hellwig /** 376130879f1SChristoph Hellwig * bio_map_kern - map kernel address into bio 377130879f1SChristoph Hellwig * @q: the struct request_queue for the bio 378130879f1SChristoph Hellwig * @data: pointer to buffer to map 379130879f1SChristoph Hellwig * @len: length in bytes 380130879f1SChristoph Hellwig * @gfp_mask: allocation flags for bio allocation 381130879f1SChristoph Hellwig * 382130879f1SChristoph Hellwig * Map the kernel address into a bio suitable for io to a block 383130879f1SChristoph Hellwig * device. Returns an error pointer in case of error. 384130879f1SChristoph Hellwig */ 385130879f1SChristoph Hellwig static struct bio *bio_map_kern(struct request_queue *q, void *data, 386130879f1SChristoph Hellwig unsigned int len, gfp_t gfp_mask) 387130879f1SChristoph Hellwig { 388130879f1SChristoph Hellwig unsigned long kaddr = (unsigned long)data; 389130879f1SChristoph Hellwig unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 390130879f1SChristoph Hellwig unsigned long start = kaddr >> PAGE_SHIFT; 391130879f1SChristoph Hellwig const int nr_pages = end - start; 392130879f1SChristoph Hellwig bool is_vmalloc = is_vmalloc_addr(data); 393130879f1SChristoph Hellwig struct page *page; 394130879f1SChristoph Hellwig int offset, i; 395130879f1SChristoph Hellwig struct bio *bio; 396130879f1SChristoph Hellwig 397130879f1SChristoph Hellwig bio = bio_kmalloc(gfp_mask, nr_pages); 398130879f1SChristoph Hellwig if (!bio) 399130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM); 400130879f1SChristoph Hellwig 401130879f1SChristoph Hellwig if (is_vmalloc) { 402130879f1SChristoph Hellwig flush_kernel_vmap_range(data, len); 403130879f1SChristoph Hellwig bio->bi_private = data; 404130879f1SChristoph Hellwig } 405130879f1SChristoph Hellwig 406130879f1SChristoph Hellwig offset = offset_in_page(kaddr); 407130879f1SChristoph Hellwig for (i = 0; i < nr_pages; i++) { 408130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE - offset; 409130879f1SChristoph Hellwig 410130879f1SChristoph Hellwig if (len <= 0) 411130879f1SChristoph Hellwig break; 412130879f1SChristoph Hellwig 413130879f1SChristoph Hellwig if (bytes > len) 414130879f1SChristoph Hellwig bytes = len; 415130879f1SChristoph Hellwig 416130879f1SChristoph Hellwig if (!is_vmalloc) 417130879f1SChristoph Hellwig page = virt_to_page(data); 418130879f1SChristoph Hellwig else 419130879f1SChristoph Hellwig page = vmalloc_to_page(data); 420130879f1SChristoph Hellwig if (bio_add_pc_page(q, bio, page, bytes, 421130879f1SChristoph Hellwig offset) < bytes) { 422130879f1SChristoph Hellwig /* we don't support partial mappings */ 423130879f1SChristoph Hellwig bio_put(bio); 424130879f1SChristoph Hellwig return ERR_PTR(-EINVAL); 425130879f1SChristoph Hellwig } 426130879f1SChristoph Hellwig 427130879f1SChristoph Hellwig data += bytes; 428130879f1SChristoph Hellwig len -= bytes; 429130879f1SChristoph Hellwig offset = 0; 430130879f1SChristoph Hellwig } 431130879f1SChristoph Hellwig 432130879f1SChristoph Hellwig bio->bi_end_io = bio_map_kern_endio; 433130879f1SChristoph Hellwig return bio; 434130879f1SChristoph Hellwig } 435130879f1SChristoph Hellwig 436130879f1SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio) 437130879f1SChristoph Hellwig { 438130879f1SChristoph Hellwig bio_free_pages(bio); 439130879f1SChristoph Hellwig bio_put(bio); 440130879f1SChristoph Hellwig } 441130879f1SChristoph Hellwig 442130879f1SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio) 443130879f1SChristoph Hellwig { 444130879f1SChristoph Hellwig char *p = bio->bi_private; 445130879f1SChristoph Hellwig struct bio_vec *bvec; 446130879f1SChristoph Hellwig struct bvec_iter_all iter_all; 447130879f1SChristoph Hellwig 448130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) { 449130879f1SChristoph Hellwig memcpy(p, page_address(bvec->bv_page), bvec->bv_len); 450130879f1SChristoph Hellwig p += bvec->bv_len; 451130879f1SChristoph Hellwig } 452130879f1SChristoph Hellwig 453130879f1SChristoph Hellwig bio_copy_kern_endio(bio); 454130879f1SChristoph Hellwig } 455130879f1SChristoph Hellwig 456130879f1SChristoph Hellwig /** 457130879f1SChristoph Hellwig * bio_copy_kern - copy kernel address into bio 458130879f1SChristoph Hellwig * @q: the struct request_queue for the bio 459130879f1SChristoph Hellwig * @data: pointer to buffer to copy 460130879f1SChristoph Hellwig * @len: length in bytes 461130879f1SChristoph Hellwig * @gfp_mask: allocation flags for bio and page allocation 462130879f1SChristoph Hellwig * @reading: data direction is READ 463130879f1SChristoph Hellwig * 464130879f1SChristoph Hellwig * copy the kernel address into a bio suitable for io to a block 465130879f1SChristoph Hellwig * device. Returns an error pointer in case of error. 466130879f1SChristoph Hellwig */ 467130879f1SChristoph Hellwig static struct bio *bio_copy_kern(struct request_queue *q, void *data, 468130879f1SChristoph Hellwig unsigned int len, gfp_t gfp_mask, int reading) 469130879f1SChristoph Hellwig { 470130879f1SChristoph Hellwig unsigned long kaddr = (unsigned long)data; 471130879f1SChristoph Hellwig unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 472130879f1SChristoph Hellwig unsigned long start = kaddr >> PAGE_SHIFT; 473130879f1SChristoph Hellwig struct bio *bio; 474130879f1SChristoph Hellwig void *p = data; 475130879f1SChristoph Hellwig int nr_pages = 0; 476130879f1SChristoph Hellwig 477130879f1SChristoph Hellwig /* 478130879f1SChristoph Hellwig * Overflow, abort 479130879f1SChristoph Hellwig */ 480130879f1SChristoph Hellwig if (end < start) 481130879f1SChristoph Hellwig return ERR_PTR(-EINVAL); 482130879f1SChristoph Hellwig 483130879f1SChristoph Hellwig nr_pages = end - start; 484130879f1SChristoph Hellwig bio = bio_kmalloc(gfp_mask, nr_pages); 485130879f1SChristoph Hellwig if (!bio) 486130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM); 487130879f1SChristoph Hellwig 488130879f1SChristoph Hellwig while (len) { 489130879f1SChristoph Hellwig struct page *page; 490130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE; 491130879f1SChristoph Hellwig 492130879f1SChristoph Hellwig if (bytes > len) 493130879f1SChristoph Hellwig bytes = len; 494130879f1SChristoph Hellwig 495130879f1SChristoph Hellwig page = alloc_page(q->bounce_gfp | gfp_mask); 496130879f1SChristoph Hellwig if (!page) 497130879f1SChristoph Hellwig goto cleanup; 498130879f1SChristoph Hellwig 499130879f1SChristoph Hellwig if (!reading) 500130879f1SChristoph Hellwig memcpy(page_address(page), p, bytes); 501130879f1SChristoph Hellwig 502130879f1SChristoph Hellwig if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 503130879f1SChristoph Hellwig break; 504130879f1SChristoph Hellwig 505130879f1SChristoph Hellwig len -= bytes; 506130879f1SChristoph Hellwig p += bytes; 507130879f1SChristoph Hellwig } 508130879f1SChristoph Hellwig 509130879f1SChristoph Hellwig if (reading) { 510130879f1SChristoph Hellwig bio->bi_end_io = bio_copy_kern_endio_read; 511130879f1SChristoph Hellwig bio->bi_private = data; 512130879f1SChristoph Hellwig } else { 513130879f1SChristoph Hellwig bio->bi_end_io = bio_copy_kern_endio; 514130879f1SChristoph Hellwig } 515130879f1SChristoph Hellwig 516130879f1SChristoph Hellwig return bio; 517130879f1SChristoph Hellwig 518130879f1SChristoph Hellwig cleanup: 519130879f1SChristoph Hellwig bio_free_pages(bio); 520130879f1SChristoph Hellwig bio_put(bio); 521130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM); 522130879f1SChristoph Hellwig } 523130879f1SChristoph Hellwig 52498d61d5bSChristoph Hellwig /* 5250abc2a10SJens Axboe * Append a bio to a passthrough request. Only works if the bio can be merged 5260abc2a10SJens Axboe * into the request based on the driver constraints. 52798d61d5bSChristoph Hellwig */ 5280abc2a10SJens Axboe int blk_rq_append_bio(struct request *rq, struct bio **bio) 52986db1e29SJens Axboe { 5300abc2a10SJens Axboe struct bio *orig_bio = *bio; 53114ccb66bSChristoph Hellwig struct bvec_iter iter; 53214ccb66bSChristoph Hellwig struct bio_vec bv; 53314ccb66bSChristoph Hellwig unsigned int nr_segs = 0; 5340abc2a10SJens Axboe 5350abc2a10SJens Axboe blk_queue_bounce(rq->q, bio); 536caa4b024SChristoph Hellwig 53714ccb66bSChristoph Hellwig bio_for_each_bvec(bv, *bio, iter) 53814ccb66bSChristoph Hellwig nr_segs++; 53914ccb66bSChristoph Hellwig 54098d61d5bSChristoph Hellwig if (!rq->bio) { 54114ccb66bSChristoph Hellwig blk_rq_bio_prep(rq, *bio, nr_segs); 54298d61d5bSChristoph Hellwig } else { 54314ccb66bSChristoph Hellwig if (!ll_back_merge_fn(rq, *bio, nr_segs)) { 5440abc2a10SJens Axboe if (orig_bio != *bio) { 5450abc2a10SJens Axboe bio_put(*bio); 5460abc2a10SJens Axboe *bio = orig_bio; 5470abc2a10SJens Axboe } 54886db1e29SJens Axboe return -EINVAL; 5490abc2a10SJens Axboe } 55098d61d5bSChristoph Hellwig 5510abc2a10SJens Axboe rq->biotail->bi_next = *bio; 5520abc2a10SJens Axboe rq->biotail = *bio; 5530abc2a10SJens Axboe rq->__data_len += (*bio)->bi_iter.bi_size; 554a892c8d5SSatya Tangirala bio_crypt_free_ctx(*bio); 55586db1e29SJens Axboe } 55698d61d5bSChristoph Hellwig 55786db1e29SJens Axboe return 0; 55886db1e29SJens Axboe } 55998d61d5bSChristoph Hellwig EXPORT_SYMBOL(blk_rq_append_bio); 56086db1e29SJens Axboe 56186db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 56286db1e29SJens Axboe { 56386db1e29SJens Axboe int ret = 0; 56486db1e29SJens Axboe 56586db1e29SJens Axboe if (bio) { 56686db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 56786db1e29SJens Axboe bio_unmap_user(bio); 56886db1e29SJens Axboe else 56986db1e29SJens Axboe ret = bio_uncopy_user(bio); 57086db1e29SJens Axboe } 57186db1e29SJens Axboe 57286db1e29SJens Axboe return ret; 57386db1e29SJens Axboe } 57486db1e29SJens Axboe 5754d6af73dSChristoph Hellwig static int __blk_rq_map_user_iov(struct request *rq, 5764d6af73dSChristoph Hellwig struct rq_map_data *map_data, struct iov_iter *iter, 5774d6af73dSChristoph Hellwig gfp_t gfp_mask, bool copy) 5784d6af73dSChristoph Hellwig { 5794d6af73dSChristoph Hellwig struct request_queue *q = rq->q; 5804d6af73dSChristoph Hellwig struct bio *bio, *orig_bio; 5814d6af73dSChristoph Hellwig int ret; 5824d6af73dSChristoph Hellwig 5834d6af73dSChristoph Hellwig if (copy) 5844d6af73dSChristoph Hellwig bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 5854d6af73dSChristoph Hellwig else 5864d6af73dSChristoph Hellwig bio = bio_map_user_iov(q, iter, gfp_mask); 5874d6af73dSChristoph Hellwig 5884d6af73dSChristoph Hellwig if (IS_ERR(bio)) 5894d6af73dSChristoph Hellwig return PTR_ERR(bio); 5904d6af73dSChristoph Hellwig 591aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 592aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 593aebf526bSChristoph Hellwig 5944d6af73dSChristoph Hellwig orig_bio = bio; 5954d6af73dSChristoph Hellwig 5964d6af73dSChristoph Hellwig /* 5974d6af73dSChristoph Hellwig * We link the bounce buffer in and could have to traverse it 5984d6af73dSChristoph Hellwig * later so we have to get a ref to prevent it from being freed 5994d6af73dSChristoph Hellwig */ 6000abc2a10SJens Axboe ret = blk_rq_append_bio(rq, &bio); 6014d6af73dSChristoph Hellwig if (ret) { 6024d6af73dSChristoph Hellwig __blk_rq_unmap_user(orig_bio); 6034d6af73dSChristoph Hellwig return ret; 6044d6af73dSChristoph Hellwig } 6050abc2a10SJens Axboe bio_get(bio); 6064d6af73dSChristoph Hellwig 6074d6af73dSChristoph Hellwig return 0; 6084d6af73dSChristoph Hellwig } 6094d6af73dSChristoph Hellwig 61086db1e29SJens Axboe /** 611aebf526bSChristoph Hellwig * blk_rq_map_user_iov - map user data to a request, for passthrough requests 61286db1e29SJens Axboe * @q: request queue where request should be inserted 61386db1e29SJens Axboe * @rq: request to map data to 614152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 61526e49cfcSKent Overstreet * @iter: iovec iterator 616a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 61786db1e29SJens Axboe * 61886db1e29SJens Axboe * Description: 619710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 62086db1e29SJens Axboe * a kernel bounce buffer is used. 62186db1e29SJens Axboe * 622710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 62386db1e29SJens Axboe * still in process context. 62486db1e29SJens Axboe * 62586db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 62686db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 62786db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 62886db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 62986db1e29SJens Axboe * unmapping. 63086db1e29SJens Axboe */ 63186db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 63226e49cfcSKent Overstreet struct rq_map_data *map_data, 63326e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask) 63486db1e29SJens Axboe { 635357f435dSAl Viro bool copy = false; 636357f435dSAl Viro unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 6374d6af73dSChristoph Hellwig struct bio *bio = NULL; 6384d6af73dSChristoph Hellwig struct iov_iter i; 63969e0927bSDouglas Gilbert int ret = -EINVAL; 64086db1e29SJens Axboe 641a0ac402cSLinus Torvalds if (!iter_is_iovec(iter)) 642a0ac402cSLinus Torvalds goto fail; 643a0ac402cSLinus Torvalds 644357f435dSAl Viro if (map_data) 6454d6af73dSChristoph Hellwig copy = true; 646357f435dSAl Viro else if (iov_iter_alignment(iter) & align) 647357f435dSAl Viro copy = true; 648357f435dSAl Viro else if (queue_virt_boundary(q)) 649357f435dSAl Viro copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 650afdc1a78SFUJITA Tomonori 6514d6af73dSChristoph Hellwig i = *iter; 6524d6af73dSChristoph Hellwig do { 6534d6af73dSChristoph Hellwig ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); 6544d6af73dSChristoph Hellwig if (ret) 6554d6af73dSChristoph Hellwig goto unmap_rq; 6564d6af73dSChristoph Hellwig if (!bio) 6574d6af73dSChristoph Hellwig bio = rq->bio; 6584d6af73dSChristoph Hellwig } while (iov_iter_count(&i)); 65986db1e29SJens Axboe 66086db1e29SJens Axboe return 0; 6614d6af73dSChristoph Hellwig 6624d6af73dSChristoph Hellwig unmap_rq: 6633b7995a9SYang Yingliang blk_rq_unmap_user(bio); 664a0ac402cSLinus Torvalds fail: 6654d6af73dSChristoph Hellwig rq->bio = NULL; 66669e0927bSDouglas Gilbert return ret; 66786db1e29SJens Axboe } 668152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 66986db1e29SJens Axboe 670ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 671ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 672ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 673ddad8dd0SChristoph Hellwig { 67426e49cfcSKent Overstreet struct iovec iov; 67526e49cfcSKent Overstreet struct iov_iter i; 6768f7e885aSAl Viro int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 677ddad8dd0SChristoph Hellwig 6788f7e885aSAl Viro if (unlikely(ret < 0)) 6798f7e885aSAl Viro return ret; 680ddad8dd0SChristoph Hellwig 68126e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 682ddad8dd0SChristoph Hellwig } 683ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 684ddad8dd0SChristoph Hellwig 68586db1e29SJens Axboe /** 68686db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 68786db1e29SJens Axboe * @bio: start of bio list 68886db1e29SJens Axboe * 68986db1e29SJens Axboe * Description: 69086db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 69186db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 692710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 69386db1e29SJens Axboe */ 69486db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 69586db1e29SJens Axboe { 69686db1e29SJens Axboe struct bio *mapped_bio; 69786db1e29SJens Axboe int ret = 0, ret2; 69886db1e29SJens Axboe 69986db1e29SJens Axboe while (bio) { 70086db1e29SJens Axboe mapped_bio = bio; 70186db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 70286db1e29SJens Axboe mapped_bio = bio->bi_private; 70386db1e29SJens Axboe 70486db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 70586db1e29SJens Axboe if (ret2 && !ret) 70686db1e29SJens Axboe ret = ret2; 70786db1e29SJens Axboe 70886db1e29SJens Axboe mapped_bio = bio; 70986db1e29SJens Axboe bio = bio->bi_next; 71086db1e29SJens Axboe bio_put(mapped_bio); 71186db1e29SJens Axboe } 71286db1e29SJens Axboe 71386db1e29SJens Axboe return ret; 71486db1e29SJens Axboe } 71586db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 71686db1e29SJens Axboe 71786db1e29SJens Axboe /** 718aebf526bSChristoph Hellwig * blk_rq_map_kern - map kernel data to a request, for passthrough requests 71986db1e29SJens Axboe * @q: request queue where request should be inserted 72086db1e29SJens Axboe * @rq: request to fill 72186db1e29SJens Axboe * @kbuf: the kernel buffer 72286db1e29SJens Axboe * @len: length of user data 72386db1e29SJens Axboe * @gfp_mask: memory allocation flags 72468154e90SFUJITA Tomonori * 72568154e90SFUJITA Tomonori * Description: 72668154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 727e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 7283a5a3927SJames Bottomley * buffers. 72986db1e29SJens Axboe */ 73086db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 73186db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 73286db1e29SJens Axboe { 73368154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 73414417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 7350abc2a10SJens Axboe struct bio *bio, *orig_bio; 7363a5a3927SJames Bottomley int ret; 73786db1e29SJens Axboe 738ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 73986db1e29SJens Axboe return -EINVAL; 74086db1e29SJens Axboe if (!len || !kbuf) 74186db1e29SJens Axboe return -EINVAL; 74286db1e29SJens Axboe 743e64a0e16SChristoph Hellwig if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf)) 74468154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 74568154e90SFUJITA Tomonori else 74686db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 74768154e90SFUJITA Tomonori 74886db1e29SJens Axboe if (IS_ERR(bio)) 74986db1e29SJens Axboe return PTR_ERR(bio); 75086db1e29SJens Axboe 751aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 752aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 75386db1e29SJens Axboe 7540abc2a10SJens Axboe orig_bio = bio; 7550abc2a10SJens Axboe ret = blk_rq_append_bio(rq, &bio); 7563a5a3927SJames Bottomley if (unlikely(ret)) { 7573a5a3927SJames Bottomley /* request is too big */ 7580abc2a10SJens Axboe bio_put(orig_bio); 7593a5a3927SJames Bottomley return ret; 7603a5a3927SJames Bottomley } 7613a5a3927SJames Bottomley 76286db1e29SJens Axboe return 0; 76386db1e29SJens Axboe } 76486db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 765