1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 286db1e29SJens Axboe /* 386db1e29SJens Axboe * Functions related to mapping data to requests 486db1e29SJens Axboe */ 586db1e29SJens Axboe #include <linux/kernel.h> 668db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 786db1e29SJens Axboe #include <linux/module.h> 886db1e29SJens Axboe #include <linux/bio.h> 986db1e29SJens Axboe #include <linux/blkdev.h> 1026e49cfcSKent Overstreet #include <linux/uio.h> 1186db1e29SJens Axboe 1286db1e29SJens Axboe #include "blk.h" 1386db1e29SJens Axboe 14130879f1SChristoph Hellwig struct bio_map_data { 15f3256075SChristoph Hellwig bool is_our_pages : 1; 16f3256075SChristoph Hellwig bool is_null_mapped : 1; 17130879f1SChristoph Hellwig struct iov_iter iter; 18130879f1SChristoph Hellwig struct iovec iov[]; 19130879f1SChristoph Hellwig }; 20130879f1SChristoph Hellwig 21130879f1SChristoph Hellwig static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, 22130879f1SChristoph Hellwig gfp_t gfp_mask) 23130879f1SChristoph Hellwig { 24130879f1SChristoph Hellwig struct bio_map_data *bmd; 25130879f1SChristoph Hellwig 26130879f1SChristoph Hellwig if (data->nr_segs > UIO_MAXIOV) 27130879f1SChristoph Hellwig return NULL; 28130879f1SChristoph Hellwig 29130879f1SChristoph Hellwig bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); 30130879f1SChristoph Hellwig if (!bmd) 31130879f1SChristoph Hellwig return NULL; 32130879f1SChristoph Hellwig memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); 33130879f1SChristoph Hellwig bmd->iter = *data; 34130879f1SChristoph Hellwig bmd->iter.iov = bmd->iov; 35130879f1SChristoph Hellwig return bmd; 36130879f1SChristoph Hellwig } 37130879f1SChristoph Hellwig 38130879f1SChristoph Hellwig /** 39130879f1SChristoph Hellwig * bio_copy_from_iter - copy all pages from iov_iter to bio 40130879f1SChristoph Hellwig * @bio: The &struct bio which describes the I/O as destination 41130879f1SChristoph Hellwig * @iter: iov_iter as source 42130879f1SChristoph Hellwig * 43130879f1SChristoph Hellwig * Copy all pages from iov_iter to bio. 44130879f1SChristoph Hellwig * Returns 0 on success, or error on failure. 45130879f1SChristoph Hellwig */ 46130879f1SChristoph Hellwig static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) 47130879f1SChristoph Hellwig { 48130879f1SChristoph Hellwig struct bio_vec *bvec; 49130879f1SChristoph Hellwig struct bvec_iter_all iter_all; 50130879f1SChristoph Hellwig 51130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) { 52130879f1SChristoph Hellwig ssize_t ret; 53130879f1SChristoph Hellwig 54130879f1SChristoph Hellwig ret = copy_page_from_iter(bvec->bv_page, 55130879f1SChristoph Hellwig bvec->bv_offset, 56130879f1SChristoph Hellwig bvec->bv_len, 57130879f1SChristoph Hellwig iter); 58130879f1SChristoph Hellwig 59130879f1SChristoph Hellwig if (!iov_iter_count(iter)) 60130879f1SChristoph Hellwig break; 61130879f1SChristoph Hellwig 62130879f1SChristoph Hellwig if (ret < bvec->bv_len) 63130879f1SChristoph Hellwig return -EFAULT; 64130879f1SChristoph Hellwig } 65130879f1SChristoph Hellwig 66130879f1SChristoph Hellwig return 0; 67130879f1SChristoph Hellwig } 68130879f1SChristoph Hellwig 69130879f1SChristoph Hellwig /** 70130879f1SChristoph Hellwig * bio_copy_to_iter - copy all pages from bio to iov_iter 71130879f1SChristoph Hellwig * @bio: The &struct bio which describes the I/O as source 72130879f1SChristoph Hellwig * @iter: iov_iter as destination 73130879f1SChristoph Hellwig * 74130879f1SChristoph Hellwig * Copy all pages from bio to iov_iter. 75130879f1SChristoph Hellwig * Returns 0 on success, or error on failure. 76130879f1SChristoph Hellwig */ 77130879f1SChristoph Hellwig static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 78130879f1SChristoph Hellwig { 79130879f1SChristoph Hellwig struct bio_vec *bvec; 80130879f1SChristoph Hellwig struct bvec_iter_all iter_all; 81130879f1SChristoph Hellwig 82130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) { 83130879f1SChristoph Hellwig ssize_t ret; 84130879f1SChristoph Hellwig 85130879f1SChristoph Hellwig ret = copy_page_to_iter(bvec->bv_page, 86130879f1SChristoph Hellwig bvec->bv_offset, 87130879f1SChristoph Hellwig bvec->bv_len, 88130879f1SChristoph Hellwig &iter); 89130879f1SChristoph Hellwig 90130879f1SChristoph Hellwig if (!iov_iter_count(&iter)) 91130879f1SChristoph Hellwig break; 92130879f1SChristoph Hellwig 93130879f1SChristoph Hellwig if (ret < bvec->bv_len) 94130879f1SChristoph Hellwig return -EFAULT; 95130879f1SChristoph Hellwig } 96130879f1SChristoph Hellwig 97130879f1SChristoph Hellwig return 0; 98130879f1SChristoph Hellwig } 99130879f1SChristoph Hellwig 100130879f1SChristoph Hellwig /** 101130879f1SChristoph Hellwig * bio_uncopy_user - finish previously mapped bio 102130879f1SChristoph Hellwig * @bio: bio being terminated 103130879f1SChristoph Hellwig * 104130879f1SChristoph Hellwig * Free pages allocated from bio_copy_user_iov() and write back data 105130879f1SChristoph Hellwig * to user space in case of a read. 106130879f1SChristoph Hellwig */ 107130879f1SChristoph Hellwig static int bio_uncopy_user(struct bio *bio) 108130879f1SChristoph Hellwig { 109130879f1SChristoph Hellwig struct bio_map_data *bmd = bio->bi_private; 110130879f1SChristoph Hellwig int ret = 0; 111130879f1SChristoph Hellwig 1123310eebaSChristoph Hellwig if (!bmd->is_null_mapped) { 113130879f1SChristoph Hellwig /* 114130879f1SChristoph Hellwig * if we're in a workqueue, the request is orphaned, so 115130879f1SChristoph Hellwig * don't copy into a random user address space, just free 116130879f1SChristoph Hellwig * and return -EINTR so user space doesn't expect any data. 117130879f1SChristoph Hellwig */ 118130879f1SChristoph Hellwig if (!current->mm) 119130879f1SChristoph Hellwig ret = -EINTR; 120130879f1SChristoph Hellwig else if (bio_data_dir(bio) == READ) 121130879f1SChristoph Hellwig ret = bio_copy_to_iter(bio, bmd->iter); 122130879f1SChristoph Hellwig if (bmd->is_our_pages) 123130879f1SChristoph Hellwig bio_free_pages(bio); 124130879f1SChristoph Hellwig } 125130879f1SChristoph Hellwig kfree(bmd); 126130879f1SChristoph Hellwig return ret; 127130879f1SChristoph Hellwig } 128130879f1SChristoph Hellwig 1297589ad67SChristoph Hellwig static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, 1307589ad67SChristoph Hellwig struct iov_iter *iter, gfp_t gfp_mask) 131130879f1SChristoph Hellwig { 132130879f1SChristoph Hellwig struct bio_map_data *bmd; 133130879f1SChristoph Hellwig struct page *page; 134393bb12eSChristoph Hellwig struct bio *bio; 135130879f1SChristoph Hellwig int i = 0, ret; 136130879f1SChristoph Hellwig int nr_pages; 137130879f1SChristoph Hellwig unsigned int len = iter->count; 138130879f1SChristoph Hellwig unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 139130879f1SChristoph Hellwig 140130879f1SChristoph Hellwig bmd = bio_alloc_map_data(iter, gfp_mask); 141130879f1SChristoph Hellwig if (!bmd) 1427589ad67SChristoph Hellwig return -ENOMEM; 143130879f1SChristoph Hellwig 144130879f1SChristoph Hellwig /* 145130879f1SChristoph Hellwig * We need to do a deep copy of the iov_iter including the iovecs. 146130879f1SChristoph Hellwig * The caller provided iov might point to an on-stack or otherwise 147130879f1SChristoph Hellwig * shortlived one. 148130879f1SChristoph Hellwig */ 149f3256075SChristoph Hellwig bmd->is_our_pages = !map_data; 15003859717SChristoph Hellwig bmd->is_null_mapped = (map_data && map_data->null_mapped); 151130879f1SChristoph Hellwig 1525f7136dbSMatthew Wilcox (Oracle) nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); 153130879f1SChristoph Hellwig 154130879f1SChristoph Hellwig ret = -ENOMEM; 155066ff571SChristoph Hellwig bio = bio_kmalloc(nr_pages, gfp_mask); 156130879f1SChristoph Hellwig if (!bio) 157130879f1SChristoph Hellwig goto out_bmd; 158066ff571SChristoph Hellwig bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); 159130879f1SChristoph Hellwig 160130879f1SChristoph Hellwig if (map_data) { 161f5d632d1SJens Axboe nr_pages = 1U << map_data->page_order; 162130879f1SChristoph Hellwig i = map_data->offset / PAGE_SIZE; 163130879f1SChristoph Hellwig } 164130879f1SChristoph Hellwig while (len) { 165130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE; 166130879f1SChristoph Hellwig 167130879f1SChristoph Hellwig bytes -= offset; 168130879f1SChristoph Hellwig 169130879f1SChristoph Hellwig if (bytes > len) 170130879f1SChristoph Hellwig bytes = len; 171130879f1SChristoph Hellwig 172130879f1SChristoph Hellwig if (map_data) { 173130879f1SChristoph Hellwig if (i == map_data->nr_entries * nr_pages) { 174130879f1SChristoph Hellwig ret = -ENOMEM; 1757589ad67SChristoph Hellwig goto cleanup; 176130879f1SChristoph Hellwig } 177130879f1SChristoph Hellwig 178130879f1SChristoph Hellwig page = map_data->pages[i / nr_pages]; 179130879f1SChristoph Hellwig page += (i % nr_pages); 180130879f1SChristoph Hellwig 181130879f1SChristoph Hellwig i++; 182130879f1SChristoph Hellwig } else { 183ce288e05SChristoph Hellwig page = alloc_page(GFP_NOIO | gfp_mask); 184130879f1SChristoph Hellwig if (!page) { 185130879f1SChristoph Hellwig ret = -ENOMEM; 1867589ad67SChristoph Hellwig goto cleanup; 187130879f1SChristoph Hellwig } 188130879f1SChristoph Hellwig } 189130879f1SChristoph Hellwig 1907589ad67SChristoph Hellwig if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { 191130879f1SChristoph Hellwig if (!map_data) 192130879f1SChristoph Hellwig __free_page(page); 193130879f1SChristoph Hellwig break; 194130879f1SChristoph Hellwig } 195130879f1SChristoph Hellwig 196130879f1SChristoph Hellwig len -= bytes; 197130879f1SChristoph Hellwig offset = 0; 198130879f1SChristoph Hellwig } 199130879f1SChristoph Hellwig 200130879f1SChristoph Hellwig if (map_data) 201130879f1SChristoph Hellwig map_data->offset += bio->bi_iter.bi_size; 202130879f1SChristoph Hellwig 203130879f1SChristoph Hellwig /* 204130879f1SChristoph Hellwig * success 205130879f1SChristoph Hellwig */ 206130879f1SChristoph Hellwig if ((iov_iter_rw(iter) == WRITE && 207130879f1SChristoph Hellwig (!map_data || !map_data->null_mapped)) || 208130879f1SChristoph Hellwig (map_data && map_data->from_user)) { 209130879f1SChristoph Hellwig ret = bio_copy_from_iter(bio, iter); 210130879f1SChristoph Hellwig if (ret) 211130879f1SChristoph Hellwig goto cleanup; 212130879f1SChristoph Hellwig } else { 213130879f1SChristoph Hellwig if (bmd->is_our_pages) 214130879f1SChristoph Hellwig zero_fill_bio(bio); 215130879f1SChristoph Hellwig iov_iter_advance(iter, bio->bi_iter.bi_size); 216130879f1SChristoph Hellwig } 217130879f1SChristoph Hellwig 218130879f1SChristoph Hellwig bio->bi_private = bmd; 2197589ad67SChristoph Hellwig 220393bb12eSChristoph Hellwig ret = blk_rq_append_bio(rq, bio); 2217589ad67SChristoph Hellwig if (ret) 2227589ad67SChristoph Hellwig goto cleanup; 2237589ad67SChristoph Hellwig return 0; 224130879f1SChristoph Hellwig cleanup: 225130879f1SChristoph Hellwig if (!map_data) 226130879f1SChristoph Hellwig bio_free_pages(bio); 227066ff571SChristoph Hellwig bio_uninit(bio); 228066ff571SChristoph Hellwig kfree(bio); 229130879f1SChristoph Hellwig out_bmd: 230130879f1SChristoph Hellwig kfree(bmd); 2317589ad67SChristoph Hellwig return ret; 232130879f1SChristoph Hellwig } 233130879f1SChristoph Hellwig 23432f1c71bSAnuj Gupta static void blk_mq_map_bio_put(struct bio *bio) 2358af870aaSJens Axboe { 2368af870aaSJens Axboe if (bio->bi_opf & REQ_ALLOC_CACHE) { 2378af870aaSJens Axboe bio_put(bio); 2388af870aaSJens Axboe } else { 2398af870aaSJens Axboe bio_uninit(bio); 2408af870aaSJens Axboe kfree(bio); 2418af870aaSJens Axboe } 2428af870aaSJens Axboe } 2438af870aaSJens Axboe 244ab89e8e7SKanchan Joshi static struct bio *blk_rq_map_bio_alloc(struct request *rq, 245ab89e8e7SKanchan Joshi unsigned int nr_vecs, gfp_t gfp_mask) 246ab89e8e7SKanchan Joshi { 247ab89e8e7SKanchan Joshi struct bio *bio; 248ab89e8e7SKanchan Joshi 249ab89e8e7SKanchan Joshi if (rq->cmd_flags & REQ_POLLED) { 250ab89e8e7SKanchan Joshi blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE; 251ab89e8e7SKanchan Joshi 252ab89e8e7SKanchan Joshi bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, 253ab89e8e7SKanchan Joshi &fs_bio_set); 254ab89e8e7SKanchan Joshi if (!bio) 255ab89e8e7SKanchan Joshi return NULL; 256ab89e8e7SKanchan Joshi } else { 257ab89e8e7SKanchan Joshi bio = bio_kmalloc(nr_vecs, gfp_mask); 258ab89e8e7SKanchan Joshi if (!bio) 259ab89e8e7SKanchan Joshi return NULL; 260ab89e8e7SKanchan Joshi bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); 261ab89e8e7SKanchan Joshi } 262ab89e8e7SKanchan Joshi return bio; 263ab89e8e7SKanchan Joshi } 264ab89e8e7SKanchan Joshi 2657589ad67SChristoph Hellwig static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, 2667589ad67SChristoph Hellwig gfp_t gfp_mask) 267130879f1SChristoph Hellwig { 268*f62e52d1SDavid Howells iov_iter_extraction_t extraction_flags = 0; 2697589ad67SChristoph Hellwig unsigned int max_sectors = queue_max_hw_sectors(rq->q); 270066ff571SChristoph Hellwig unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); 271393bb12eSChristoph Hellwig struct bio *bio; 272130879f1SChristoph Hellwig int ret; 2737589ad67SChristoph Hellwig int j; 274130879f1SChristoph Hellwig 275130879f1SChristoph Hellwig if (!iov_iter_count(iter)) 2767589ad67SChristoph Hellwig return -EINVAL; 277130879f1SChristoph Hellwig 278ab89e8e7SKanchan Joshi bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); 279ab89e8e7SKanchan Joshi if (bio == NULL) 2808af870aaSJens Axboe return -ENOMEM; 281130879f1SChristoph Hellwig 2827ee4ccf5SLogan Gunthorpe if (blk_queue_pci_p2pdma(rq->q)) 283*f62e52d1SDavid Howells extraction_flags |= ITER_ALLOW_P2PDMA; 2847ee4ccf5SLogan Gunthorpe 285130879f1SChristoph Hellwig while (iov_iter_count(iter)) { 286e88811bcSJens Axboe struct page **pages, *stack_pages[UIO_FASTIOV]; 287130879f1SChristoph Hellwig ssize_t bytes; 28891e5addaSJiapeng Chong size_t offs; 289130879f1SChristoph Hellwig int npages; 290130879f1SChristoph Hellwig 291e88811bcSJens Axboe if (nr_vecs <= ARRAY_SIZE(stack_pages)) { 292e88811bcSJens Axboe pages = stack_pages; 2937ee4ccf5SLogan Gunthorpe bytes = iov_iter_get_pages(iter, pages, LONG_MAX, 294*f62e52d1SDavid Howells nr_vecs, &offs, extraction_flags); 295e88811bcSJens Axboe } else { 2967ee4ccf5SLogan Gunthorpe bytes = iov_iter_get_pages_alloc(iter, &pages, 297*f62e52d1SDavid Howells LONG_MAX, &offs, extraction_flags); 298e88811bcSJens Axboe } 299130879f1SChristoph Hellwig if (unlikely(bytes <= 0)) { 300130879f1SChristoph Hellwig ret = bytes ? bytes : -EFAULT; 301130879f1SChristoph Hellwig goto out_unmap; 302130879f1SChristoph Hellwig } 303130879f1SChristoph Hellwig 304130879f1SChristoph Hellwig npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); 305130879f1SChristoph Hellwig 3067ab89db9SMichal Orzel if (unlikely(offs & queue_dma_alignment(rq->q))) 307130879f1SChristoph Hellwig j = 0; 3087ab89db9SMichal Orzel else { 309130879f1SChristoph Hellwig for (j = 0; j < npages; j++) { 310130879f1SChristoph Hellwig struct page *page = pages[j]; 311130879f1SChristoph Hellwig unsigned int n = PAGE_SIZE - offs; 312130879f1SChristoph Hellwig bool same_page = false; 313130879f1SChristoph Hellwig 314130879f1SChristoph Hellwig if (n > bytes) 315130879f1SChristoph Hellwig n = bytes; 316130879f1SChristoph Hellwig 3177589ad67SChristoph Hellwig if (!bio_add_hw_page(rq->q, bio, page, n, offs, 318e4581105SChristoph Hellwig max_sectors, &same_page)) { 319130879f1SChristoph Hellwig if (same_page) 320130879f1SChristoph Hellwig put_page(page); 321130879f1SChristoph Hellwig break; 322130879f1SChristoph Hellwig } 323130879f1SChristoph Hellwig 324130879f1SChristoph Hellwig bytes -= n; 325130879f1SChristoph Hellwig offs = 0; 326130879f1SChristoph Hellwig } 327130879f1SChristoph Hellwig } 328130879f1SChristoph Hellwig /* 329130879f1SChristoph Hellwig * release the pages we didn't map into the bio, if any 330130879f1SChristoph Hellwig */ 331130879f1SChristoph Hellwig while (j < npages) 332130879f1SChristoph Hellwig put_page(pages[j++]); 333e88811bcSJens Axboe if (pages != stack_pages) 334130879f1SChristoph Hellwig kvfree(pages); 335130879f1SChristoph Hellwig /* couldn't stuff something into bio? */ 336480cb846SAl Viro if (bytes) { 337480cb846SAl Viro iov_iter_revert(iter, bytes); 338130879f1SChristoph Hellwig break; 339130879f1SChristoph Hellwig } 340480cb846SAl Viro } 341130879f1SChristoph Hellwig 342393bb12eSChristoph Hellwig ret = blk_rq_append_bio(rq, bio); 3437589ad67SChristoph Hellwig if (ret) 344393bb12eSChristoph Hellwig goto out_unmap; 3457589ad67SChristoph Hellwig return 0; 3467589ad67SChristoph Hellwig 347130879f1SChristoph Hellwig out_unmap: 348130879f1SChristoph Hellwig bio_release_pages(bio, false); 34932f1c71bSAnuj Gupta blk_mq_map_bio_put(bio); 3507589ad67SChristoph Hellwig return ret; 351130879f1SChristoph Hellwig } 352130879f1SChristoph Hellwig 353130879f1SChristoph Hellwig static void bio_invalidate_vmalloc_pages(struct bio *bio) 354130879f1SChristoph Hellwig { 355f358afc5SChristoph Hellwig #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 356130879f1SChristoph Hellwig if (bio->bi_private && !op_is_write(bio_op(bio))) { 357130879f1SChristoph Hellwig unsigned long i, len = 0; 358130879f1SChristoph Hellwig 359130879f1SChristoph Hellwig for (i = 0; i < bio->bi_vcnt; i++) 360130879f1SChristoph Hellwig len += bio->bi_io_vec[i].bv_len; 361130879f1SChristoph Hellwig invalidate_kernel_vmap_range(bio->bi_private, len); 362130879f1SChristoph Hellwig } 363130879f1SChristoph Hellwig #endif 364130879f1SChristoph Hellwig } 365130879f1SChristoph Hellwig 366130879f1SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio) 367130879f1SChristoph Hellwig { 368130879f1SChristoph Hellwig bio_invalidate_vmalloc_pages(bio); 369066ff571SChristoph Hellwig bio_uninit(bio); 370066ff571SChristoph Hellwig kfree(bio); 371130879f1SChristoph Hellwig } 372130879f1SChristoph Hellwig 373130879f1SChristoph Hellwig /** 374130879f1SChristoph Hellwig * bio_map_kern - map kernel address into bio 375130879f1SChristoph Hellwig * @q: the struct request_queue for the bio 376130879f1SChristoph Hellwig * @data: pointer to buffer to map 377130879f1SChristoph Hellwig * @len: length in bytes 378130879f1SChristoph Hellwig * @gfp_mask: allocation flags for bio allocation 379130879f1SChristoph Hellwig * 380130879f1SChristoph Hellwig * Map the kernel address into a bio suitable for io to a block 381130879f1SChristoph Hellwig * device. Returns an error pointer in case of error. 382130879f1SChristoph Hellwig */ 383130879f1SChristoph Hellwig static struct bio *bio_map_kern(struct request_queue *q, void *data, 384130879f1SChristoph Hellwig unsigned int len, gfp_t gfp_mask) 385130879f1SChristoph Hellwig { 386130879f1SChristoph Hellwig unsigned long kaddr = (unsigned long)data; 387130879f1SChristoph Hellwig unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 388130879f1SChristoph Hellwig unsigned long start = kaddr >> PAGE_SHIFT; 389130879f1SChristoph Hellwig const int nr_pages = end - start; 390130879f1SChristoph Hellwig bool is_vmalloc = is_vmalloc_addr(data); 391130879f1SChristoph Hellwig struct page *page; 392130879f1SChristoph Hellwig int offset, i; 393130879f1SChristoph Hellwig struct bio *bio; 394130879f1SChristoph Hellwig 395066ff571SChristoph Hellwig bio = bio_kmalloc(nr_pages, gfp_mask); 396130879f1SChristoph Hellwig if (!bio) 397130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM); 398066ff571SChristoph Hellwig bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 399130879f1SChristoph Hellwig 400130879f1SChristoph Hellwig if (is_vmalloc) { 401130879f1SChristoph Hellwig flush_kernel_vmap_range(data, len); 402130879f1SChristoph Hellwig bio->bi_private = data; 403130879f1SChristoph Hellwig } 404130879f1SChristoph Hellwig 405130879f1SChristoph Hellwig offset = offset_in_page(kaddr); 406130879f1SChristoph Hellwig for (i = 0; i < nr_pages; i++) { 407130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE - offset; 408130879f1SChristoph Hellwig 409130879f1SChristoph Hellwig if (len <= 0) 410130879f1SChristoph Hellwig break; 411130879f1SChristoph Hellwig 412130879f1SChristoph Hellwig if (bytes > len) 413130879f1SChristoph Hellwig bytes = len; 414130879f1SChristoph Hellwig 415130879f1SChristoph Hellwig if (!is_vmalloc) 416130879f1SChristoph Hellwig page = virt_to_page(data); 417130879f1SChristoph Hellwig else 418130879f1SChristoph Hellwig page = vmalloc_to_page(data); 419130879f1SChristoph Hellwig if (bio_add_pc_page(q, bio, page, bytes, 420130879f1SChristoph Hellwig offset) < bytes) { 421130879f1SChristoph Hellwig /* we don't support partial mappings */ 422066ff571SChristoph Hellwig bio_uninit(bio); 423066ff571SChristoph Hellwig kfree(bio); 424130879f1SChristoph Hellwig return ERR_PTR(-EINVAL); 425130879f1SChristoph Hellwig } 426130879f1SChristoph Hellwig 427130879f1SChristoph Hellwig data += bytes; 428130879f1SChristoph Hellwig len -= bytes; 429130879f1SChristoph Hellwig offset = 0; 430130879f1SChristoph Hellwig } 431130879f1SChristoph Hellwig 432130879f1SChristoph Hellwig bio->bi_end_io = bio_map_kern_endio; 433130879f1SChristoph Hellwig return bio; 434130879f1SChristoph Hellwig } 435130879f1SChristoph Hellwig 436130879f1SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio) 437130879f1SChristoph Hellwig { 438130879f1SChristoph Hellwig bio_free_pages(bio); 439066ff571SChristoph Hellwig bio_uninit(bio); 440066ff571SChristoph Hellwig kfree(bio); 441130879f1SChristoph Hellwig } 442130879f1SChristoph Hellwig 443130879f1SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio) 444130879f1SChristoph Hellwig { 445130879f1SChristoph Hellwig char *p = bio->bi_private; 446130879f1SChristoph Hellwig struct bio_vec *bvec; 447130879f1SChristoph Hellwig struct bvec_iter_all iter_all; 448130879f1SChristoph Hellwig 449130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) { 450d24920e2SChristoph Hellwig memcpy_from_bvec(p, bvec); 451130879f1SChristoph Hellwig p += bvec->bv_len; 452130879f1SChristoph Hellwig } 453130879f1SChristoph Hellwig 454130879f1SChristoph Hellwig bio_copy_kern_endio(bio); 455130879f1SChristoph Hellwig } 456130879f1SChristoph Hellwig 457130879f1SChristoph Hellwig /** 458130879f1SChristoph Hellwig * bio_copy_kern - copy kernel address into bio 459130879f1SChristoph Hellwig * @q: the struct request_queue for the bio 460130879f1SChristoph Hellwig * @data: pointer to buffer to copy 461130879f1SChristoph Hellwig * @len: length in bytes 462130879f1SChristoph Hellwig * @gfp_mask: allocation flags for bio and page allocation 463130879f1SChristoph Hellwig * @reading: data direction is READ 464130879f1SChristoph Hellwig * 465130879f1SChristoph Hellwig * copy the kernel address into a bio suitable for io to a block 466130879f1SChristoph Hellwig * device. Returns an error pointer in case of error. 467130879f1SChristoph Hellwig */ 468130879f1SChristoph Hellwig static struct bio *bio_copy_kern(struct request_queue *q, void *data, 469130879f1SChristoph Hellwig unsigned int len, gfp_t gfp_mask, int reading) 470130879f1SChristoph Hellwig { 471130879f1SChristoph Hellwig unsigned long kaddr = (unsigned long)data; 472130879f1SChristoph Hellwig unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 473130879f1SChristoph Hellwig unsigned long start = kaddr >> PAGE_SHIFT; 474130879f1SChristoph Hellwig struct bio *bio; 475130879f1SChristoph Hellwig void *p = data; 476130879f1SChristoph Hellwig int nr_pages = 0; 477130879f1SChristoph Hellwig 478130879f1SChristoph Hellwig /* 479130879f1SChristoph Hellwig * Overflow, abort 480130879f1SChristoph Hellwig */ 481130879f1SChristoph Hellwig if (end < start) 482130879f1SChristoph Hellwig return ERR_PTR(-EINVAL); 483130879f1SChristoph Hellwig 484130879f1SChristoph Hellwig nr_pages = end - start; 485066ff571SChristoph Hellwig bio = bio_kmalloc(nr_pages, gfp_mask); 486130879f1SChristoph Hellwig if (!bio) 487130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM); 488066ff571SChristoph Hellwig bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 489130879f1SChristoph Hellwig 490130879f1SChristoph Hellwig while (len) { 491130879f1SChristoph Hellwig struct page *page; 492130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE; 493130879f1SChristoph Hellwig 494130879f1SChristoph Hellwig if (bytes > len) 495130879f1SChristoph Hellwig bytes = len; 496130879f1SChristoph Hellwig 497cc8f7fe1SHaimin Zhang page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); 498130879f1SChristoph Hellwig if (!page) 499130879f1SChristoph Hellwig goto cleanup; 500130879f1SChristoph Hellwig 501130879f1SChristoph Hellwig if (!reading) 502130879f1SChristoph Hellwig memcpy(page_address(page), p, bytes); 503130879f1SChristoph Hellwig 504130879f1SChristoph Hellwig if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 505130879f1SChristoph Hellwig break; 506130879f1SChristoph Hellwig 507130879f1SChristoph Hellwig len -= bytes; 508130879f1SChristoph Hellwig p += bytes; 509130879f1SChristoph Hellwig } 510130879f1SChristoph Hellwig 511130879f1SChristoph Hellwig if (reading) { 512130879f1SChristoph Hellwig bio->bi_end_io = bio_copy_kern_endio_read; 513130879f1SChristoph Hellwig bio->bi_private = data; 514130879f1SChristoph Hellwig } else { 515130879f1SChristoph Hellwig bio->bi_end_io = bio_copy_kern_endio; 516130879f1SChristoph Hellwig } 517130879f1SChristoph Hellwig 518130879f1SChristoph Hellwig return bio; 519130879f1SChristoph Hellwig 520130879f1SChristoph Hellwig cleanup: 521130879f1SChristoph Hellwig bio_free_pages(bio); 522066ff571SChristoph Hellwig bio_uninit(bio); 523066ff571SChristoph Hellwig kfree(bio); 524130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM); 525130879f1SChristoph Hellwig } 526130879f1SChristoph Hellwig 52798d61d5bSChristoph Hellwig /* 5280abc2a10SJens Axboe * Append a bio to a passthrough request. Only works if the bio can be merged 5290abc2a10SJens Axboe * into the request based on the driver constraints. 53098d61d5bSChristoph Hellwig */ 531393bb12eSChristoph Hellwig int blk_rq_append_bio(struct request *rq, struct bio *bio) 53286db1e29SJens Axboe { 53314ccb66bSChristoph Hellwig struct bvec_iter iter; 53414ccb66bSChristoph Hellwig struct bio_vec bv; 53514ccb66bSChristoph Hellwig unsigned int nr_segs = 0; 5360abc2a10SJens Axboe 537393bb12eSChristoph Hellwig bio_for_each_bvec(bv, bio, iter) 53814ccb66bSChristoph Hellwig nr_segs++; 53914ccb66bSChristoph Hellwig 54098d61d5bSChristoph Hellwig if (!rq->bio) { 541393bb12eSChristoph Hellwig blk_rq_bio_prep(rq, bio, nr_segs); 54298d61d5bSChristoph Hellwig } else { 543393bb12eSChristoph Hellwig if (!ll_back_merge_fn(rq, bio, nr_segs)) 54486db1e29SJens Axboe return -EINVAL; 545393bb12eSChristoph Hellwig rq->biotail->bi_next = bio; 546393bb12eSChristoph Hellwig rq->biotail = bio; 547393bb12eSChristoph Hellwig rq->__data_len += (bio)->bi_iter.bi_size; 548393bb12eSChristoph Hellwig bio_crypt_free_ctx(bio); 54986db1e29SJens Axboe } 55098d61d5bSChristoph Hellwig 55186db1e29SJens Axboe return 0; 55286db1e29SJens Axboe } 55398d61d5bSChristoph Hellwig EXPORT_SYMBOL(blk_rq_append_bio); 55486db1e29SJens Axboe 55537987547SKanchan Joshi /* Prepare bio for passthrough IO given ITER_BVEC iter */ 55637987547SKanchan Joshi static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) 55737987547SKanchan Joshi { 55837987547SKanchan Joshi struct request_queue *q = rq->q; 55937987547SKanchan Joshi size_t nr_iter = iov_iter_count(iter); 56037987547SKanchan Joshi size_t nr_segs = iter->nr_segs; 56137987547SKanchan Joshi struct bio_vec *bvecs, *bvprvp = NULL; 562aa261f20SBart Van Assche const struct queue_limits *lim = &q->limits; 56337987547SKanchan Joshi unsigned int nsegs = 0, bytes = 0; 56437987547SKanchan Joshi struct bio *bio; 56537987547SKanchan Joshi size_t i; 56637987547SKanchan Joshi 56737987547SKanchan Joshi if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) 56837987547SKanchan Joshi return -EINVAL; 56937987547SKanchan Joshi if (nr_segs > queue_max_segments(q)) 57037987547SKanchan Joshi return -EINVAL; 57137987547SKanchan Joshi 57237987547SKanchan Joshi /* no iovecs to alloc, as we already have a BVEC iterator */ 57337987547SKanchan Joshi bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); 57437987547SKanchan Joshi if (bio == NULL) 57537987547SKanchan Joshi return -ENOMEM; 57637987547SKanchan Joshi 57737987547SKanchan Joshi bio_iov_bvec_set(bio, (struct iov_iter *)iter); 57837987547SKanchan Joshi blk_rq_bio_prep(rq, bio, nr_segs); 57937987547SKanchan Joshi 58037987547SKanchan Joshi /* loop to perform a bunch of sanity checks */ 58137987547SKanchan Joshi bvecs = (struct bio_vec *)iter->bvec; 58237987547SKanchan Joshi for (i = 0; i < nr_segs; i++) { 58337987547SKanchan Joshi struct bio_vec *bv = &bvecs[i]; 58437987547SKanchan Joshi 58537987547SKanchan Joshi /* 58637987547SKanchan Joshi * If the queue doesn't support SG gaps and adding this 58737987547SKanchan Joshi * offset would create a gap, fallback to copy. 58837987547SKanchan Joshi */ 58937987547SKanchan Joshi if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { 59037987547SKanchan Joshi blk_mq_map_bio_put(bio); 59137987547SKanchan Joshi return -EREMOTEIO; 59237987547SKanchan Joshi } 59337987547SKanchan Joshi /* check full condition */ 59437987547SKanchan Joshi if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) 59537987547SKanchan Joshi goto put_bio; 59637987547SKanchan Joshi if (bytes + bv->bv_len > nr_iter) 59737987547SKanchan Joshi goto put_bio; 59837987547SKanchan Joshi if (bv->bv_offset + bv->bv_len > PAGE_SIZE) 59937987547SKanchan Joshi goto put_bio; 60037987547SKanchan Joshi 60137987547SKanchan Joshi nsegs++; 60237987547SKanchan Joshi bytes += bv->bv_len; 60337987547SKanchan Joshi bvprvp = bv; 60437987547SKanchan Joshi } 60537987547SKanchan Joshi return 0; 60637987547SKanchan Joshi put_bio: 60737987547SKanchan Joshi blk_mq_map_bio_put(bio); 60837987547SKanchan Joshi return -EINVAL; 60937987547SKanchan Joshi } 61037987547SKanchan Joshi 61186db1e29SJens Axboe /** 612aebf526bSChristoph Hellwig * blk_rq_map_user_iov - map user data to a request, for passthrough requests 61386db1e29SJens Axboe * @q: request queue where request should be inserted 61486db1e29SJens Axboe * @rq: request to map data to 615152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 61626e49cfcSKent Overstreet * @iter: iovec iterator 617a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 61886db1e29SJens Axboe * 61986db1e29SJens Axboe * Description: 620710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 62186db1e29SJens Axboe * a kernel bounce buffer is used. 62286db1e29SJens Axboe * 623710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 62486db1e29SJens Axboe * still in process context. 62586db1e29SJens Axboe */ 62686db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 62726e49cfcSKent Overstreet struct rq_map_data *map_data, 62826e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask) 62986db1e29SJens Axboe { 63037987547SKanchan Joshi bool copy = false, map_bvec = false; 631357f435dSAl Viro unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 6324d6af73dSChristoph Hellwig struct bio *bio = NULL; 6334d6af73dSChristoph Hellwig struct iov_iter i; 63469e0927bSDouglas Gilbert int ret = -EINVAL; 63586db1e29SJens Axboe 636357f435dSAl Viro if (map_data) 6374d6af73dSChristoph Hellwig copy = true; 638393bb12eSChristoph Hellwig else if (blk_queue_may_bounce(q)) 639393bb12eSChristoph Hellwig copy = true; 640357f435dSAl Viro else if (iov_iter_alignment(iter) & align) 641357f435dSAl Viro copy = true; 64237987547SKanchan Joshi else if (iov_iter_is_bvec(iter)) 64337987547SKanchan Joshi map_bvec = true; 64437987547SKanchan Joshi else if (!iter_is_iovec(iter)) 64537987547SKanchan Joshi copy = true; 646357f435dSAl Viro else if (queue_virt_boundary(q)) 647357f435dSAl Viro copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 648afdc1a78SFUJITA Tomonori 64937987547SKanchan Joshi if (map_bvec) { 65037987547SKanchan Joshi ret = blk_rq_map_user_bvec(rq, iter); 65137987547SKanchan Joshi if (!ret) 65237987547SKanchan Joshi return 0; 65337987547SKanchan Joshi if (ret != -EREMOTEIO) 65437987547SKanchan Joshi goto fail; 65537987547SKanchan Joshi /* fall back to copying the data on limits mismatches */ 65637987547SKanchan Joshi copy = true; 65737987547SKanchan Joshi } 65837987547SKanchan Joshi 6594d6af73dSChristoph Hellwig i = *iter; 6604d6af73dSChristoph Hellwig do { 6617589ad67SChristoph Hellwig if (copy) 6627589ad67SChristoph Hellwig ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); 6637589ad67SChristoph Hellwig else 6647589ad67SChristoph Hellwig ret = bio_map_user_iov(rq, &i, gfp_mask); 6654d6af73dSChristoph Hellwig if (ret) 6664d6af73dSChristoph Hellwig goto unmap_rq; 6674d6af73dSChristoph Hellwig if (!bio) 6684d6af73dSChristoph Hellwig bio = rq->bio; 6694d6af73dSChristoph Hellwig } while (iov_iter_count(&i)); 67086db1e29SJens Axboe 67186db1e29SJens Axboe return 0; 6724d6af73dSChristoph Hellwig 6734d6af73dSChristoph Hellwig unmap_rq: 6743b7995a9SYang Yingliang blk_rq_unmap_user(bio); 675a0ac402cSLinus Torvalds fail: 6764d6af73dSChristoph Hellwig rq->bio = NULL; 67769e0927bSDouglas Gilbert return ret; 67886db1e29SJens Axboe } 679152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 68086db1e29SJens Axboe 681ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 682ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 683ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 684ddad8dd0SChristoph Hellwig { 68526e49cfcSKent Overstreet struct iovec iov; 68626e49cfcSKent Overstreet struct iov_iter i; 6878f7e885aSAl Viro int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 688ddad8dd0SChristoph Hellwig 6898f7e885aSAl Viro if (unlikely(ret < 0)) 6908f7e885aSAl Viro return ret; 691ddad8dd0SChristoph Hellwig 69226e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 693ddad8dd0SChristoph Hellwig } 694ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 695ddad8dd0SChristoph Hellwig 69655765402SAnuj Gupta int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, 69755765402SAnuj Gupta void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, 69855765402SAnuj Gupta bool vec, int iov_count, bool check_iter_count, int rw) 69955765402SAnuj Gupta { 70055765402SAnuj Gupta int ret = 0; 70155765402SAnuj Gupta 70255765402SAnuj Gupta if (vec) { 70355765402SAnuj Gupta struct iovec fast_iov[UIO_FASTIOV]; 70455765402SAnuj Gupta struct iovec *iov = fast_iov; 70555765402SAnuj Gupta struct iov_iter iter; 70655765402SAnuj Gupta 70755765402SAnuj Gupta ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len, 70855765402SAnuj Gupta UIO_FASTIOV, &iov, &iter); 70955765402SAnuj Gupta if (ret < 0) 71055765402SAnuj Gupta return ret; 71155765402SAnuj Gupta 71255765402SAnuj Gupta if (iov_count) { 71355765402SAnuj Gupta /* SG_IO howto says that the shorter of the two wins */ 71455765402SAnuj Gupta iov_iter_truncate(&iter, buf_len); 71555765402SAnuj Gupta if (check_iter_count && !iov_iter_count(&iter)) { 71655765402SAnuj Gupta kfree(iov); 71755765402SAnuj Gupta return -EINVAL; 71855765402SAnuj Gupta } 71955765402SAnuj Gupta } 72055765402SAnuj Gupta 72155765402SAnuj Gupta ret = blk_rq_map_user_iov(req->q, req, map_data, &iter, 72255765402SAnuj Gupta gfp_mask); 72355765402SAnuj Gupta kfree(iov); 72455765402SAnuj Gupta } else if (buf_len) { 72555765402SAnuj Gupta ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len, 72655765402SAnuj Gupta gfp_mask); 72755765402SAnuj Gupta } 72855765402SAnuj Gupta return ret; 72955765402SAnuj Gupta } 73055765402SAnuj Gupta EXPORT_SYMBOL(blk_rq_map_user_io); 73155765402SAnuj Gupta 73286db1e29SJens Axboe /** 73386db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 73486db1e29SJens Axboe * @bio: start of bio list 73586db1e29SJens Axboe * 73686db1e29SJens Axboe * Description: 73786db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 73886db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 739710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 74086db1e29SJens Axboe */ 74186db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 74286db1e29SJens Axboe { 743393bb12eSChristoph Hellwig struct bio *next_bio; 74486db1e29SJens Axboe int ret = 0, ret2; 74586db1e29SJens Axboe 74686db1e29SJens Axboe while (bio) { 7473310eebaSChristoph Hellwig if (bio->bi_private) { 748393bb12eSChristoph Hellwig ret2 = bio_uncopy_user(bio); 74986db1e29SJens Axboe if (ret2 && !ret) 75086db1e29SJens Axboe ret = ret2; 7513310eebaSChristoph Hellwig } else { 752393bb12eSChristoph Hellwig bio_release_pages(bio, bio_data_dir(bio) == READ); 7537b63c052SChristoph Hellwig } 75486db1e29SJens Axboe 755393bb12eSChristoph Hellwig next_bio = bio; 75686db1e29SJens Axboe bio = bio->bi_next; 75732f1c71bSAnuj Gupta blk_mq_map_bio_put(next_bio); 75886db1e29SJens Axboe } 75986db1e29SJens Axboe 76086db1e29SJens Axboe return ret; 76186db1e29SJens Axboe } 76286db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 76386db1e29SJens Axboe 76486db1e29SJens Axboe /** 765aebf526bSChristoph Hellwig * blk_rq_map_kern - map kernel data to a request, for passthrough requests 76686db1e29SJens Axboe * @q: request queue where request should be inserted 76786db1e29SJens Axboe * @rq: request to fill 76886db1e29SJens Axboe * @kbuf: the kernel buffer 76986db1e29SJens Axboe * @len: length of user data 77086db1e29SJens Axboe * @gfp_mask: memory allocation flags 77168154e90SFUJITA Tomonori * 77268154e90SFUJITA Tomonori * Description: 77368154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 774e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 7753a5a3927SJames Bottomley * buffers. 77686db1e29SJens Axboe */ 77786db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 77886db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 77986db1e29SJens Axboe { 78068154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 78114417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 782393bb12eSChristoph Hellwig struct bio *bio; 7833a5a3927SJames Bottomley int ret; 78486db1e29SJens Axboe 785ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 78686db1e29SJens Axboe return -EINVAL; 78786db1e29SJens Axboe if (!len || !kbuf) 78886db1e29SJens Axboe return -EINVAL; 78986db1e29SJens Axboe 790393bb12eSChristoph Hellwig if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || 791393bb12eSChristoph Hellwig blk_queue_may_bounce(q)) 79268154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 79368154e90SFUJITA Tomonori else 79486db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 79568154e90SFUJITA Tomonori 79686db1e29SJens Axboe if (IS_ERR(bio)) 79786db1e29SJens Axboe return PTR_ERR(bio); 79886db1e29SJens Axboe 799aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 800aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 80186db1e29SJens Axboe 802393bb12eSChristoph Hellwig ret = blk_rq_append_bio(rq, bio); 803066ff571SChristoph Hellwig if (unlikely(ret)) { 804066ff571SChristoph Hellwig bio_uninit(bio); 805066ff571SChristoph Hellwig kfree(bio); 806066ff571SChristoph Hellwig } 8073a5a3927SJames Bottomley return ret; 8083a5a3927SJames Bottomley } 80986db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 810