1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 286db1e29SJens Axboe /* 386db1e29SJens Axboe * Functions related to mapping data to requests 486db1e29SJens Axboe */ 586db1e29SJens Axboe #include <linux/kernel.h> 668db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 786db1e29SJens Axboe #include <linux/module.h> 886db1e29SJens Axboe #include <linux/bio.h> 986db1e29SJens Axboe #include <linux/blkdev.h> 1026e49cfcSKent Overstreet #include <linux/uio.h> 1186db1e29SJens Axboe 1286db1e29SJens Axboe #include "blk.h" 1386db1e29SJens Axboe 1498d61d5bSChristoph Hellwig /* 1598d61d5bSChristoph Hellwig * Append a bio to a passthrough request. Only works can be merged into 1698d61d5bSChristoph Hellwig * the request based on the driver constraints. 1798d61d5bSChristoph Hellwig */ 1898d61d5bSChristoph Hellwig int blk_rq_append_bio(struct request *rq, struct bio *bio) 1986db1e29SJens Axboe { 20caa4b024SChristoph Hellwig blk_queue_bounce(rq->q, &bio); 21caa4b024SChristoph Hellwig 2298d61d5bSChristoph Hellwig if (!rq->bio) { 2398d61d5bSChristoph Hellwig blk_rq_bio_prep(rq->q, rq, bio); 2498d61d5bSChristoph Hellwig } else { 2598d61d5bSChristoph Hellwig if (!ll_back_merge_fn(rq->q, rq, bio)) 2686db1e29SJens Axboe return -EINVAL; 2798d61d5bSChristoph Hellwig 2886db1e29SJens Axboe rq->biotail->bi_next = bio; 2986db1e29SJens Axboe rq->biotail = bio; 304f024f37SKent Overstreet rq->__data_len += bio->bi_iter.bi_size; 3186db1e29SJens Axboe } 3298d61d5bSChristoph Hellwig 3386db1e29SJens Axboe return 0; 3486db1e29SJens Axboe } 3598d61d5bSChristoph Hellwig EXPORT_SYMBOL(blk_rq_append_bio); 3686db1e29SJens Axboe 3786db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 3886db1e29SJens Axboe { 3986db1e29SJens Axboe int ret = 0; 4086db1e29SJens Axboe 4186db1e29SJens Axboe if (bio) { 4286db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 4386db1e29SJens Axboe bio_unmap_user(bio); 4486db1e29SJens Axboe else 4586db1e29SJens Axboe ret = bio_uncopy_user(bio); 4686db1e29SJens Axboe } 4786db1e29SJens Axboe 4886db1e29SJens Axboe return ret; 4986db1e29SJens Axboe } 5086db1e29SJens Axboe 514d6af73dSChristoph Hellwig static int __blk_rq_map_user_iov(struct request *rq, 524d6af73dSChristoph Hellwig struct rq_map_data *map_data, struct iov_iter *iter, 534d6af73dSChristoph Hellwig gfp_t gfp_mask, bool copy) 544d6af73dSChristoph Hellwig { 554d6af73dSChristoph Hellwig struct request_queue *q = rq->q; 564d6af73dSChristoph Hellwig struct bio *bio, *orig_bio; 574d6af73dSChristoph Hellwig int ret; 584d6af73dSChristoph Hellwig 594d6af73dSChristoph Hellwig if (copy) 604d6af73dSChristoph Hellwig bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 614d6af73dSChristoph Hellwig else 624d6af73dSChristoph Hellwig bio = bio_map_user_iov(q, iter, gfp_mask); 634d6af73dSChristoph Hellwig 644d6af73dSChristoph Hellwig if (IS_ERR(bio)) 654d6af73dSChristoph Hellwig return PTR_ERR(bio); 664d6af73dSChristoph Hellwig 67aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 68aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 69aebf526bSChristoph Hellwig 704d6af73dSChristoph Hellwig if (map_data && map_data->null_mapped) 714d6af73dSChristoph Hellwig bio_set_flag(bio, BIO_NULL_MAPPED); 724d6af73dSChristoph Hellwig 734d6af73dSChristoph Hellwig iov_iter_advance(iter, bio->bi_iter.bi_size); 744d6af73dSChristoph Hellwig if (map_data) 754d6af73dSChristoph Hellwig map_data->offset += bio->bi_iter.bi_size; 764d6af73dSChristoph Hellwig 774d6af73dSChristoph Hellwig orig_bio = bio; 784d6af73dSChristoph Hellwig 794d6af73dSChristoph Hellwig /* 804d6af73dSChristoph Hellwig * We link the bounce buffer in and could have to traverse it 814d6af73dSChristoph Hellwig * later so we have to get a ref to prevent it from being freed 824d6af73dSChristoph Hellwig */ 8398d61d5bSChristoph Hellwig ret = blk_rq_append_bio(rq, bio); 84caa4b024SChristoph Hellwig bio_get(bio); 854d6af73dSChristoph Hellwig if (ret) { 864d6af73dSChristoph Hellwig bio_endio(bio); 874d6af73dSChristoph Hellwig __blk_rq_unmap_user(orig_bio); 884d6af73dSChristoph Hellwig bio_put(bio); 894d6af73dSChristoph Hellwig return ret; 904d6af73dSChristoph Hellwig } 914d6af73dSChristoph Hellwig 924d6af73dSChristoph Hellwig return 0; 934d6af73dSChristoph Hellwig } 944d6af73dSChristoph Hellwig 9586db1e29SJens Axboe /** 96aebf526bSChristoph Hellwig * blk_rq_map_user_iov - map user data to a request, for passthrough requests 9786db1e29SJens Axboe * @q: request queue where request should be inserted 9886db1e29SJens Axboe * @rq: request to map data to 99152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 10026e49cfcSKent Overstreet * @iter: iovec iterator 101a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 10286db1e29SJens Axboe * 10386db1e29SJens Axboe * Description: 104710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 10586db1e29SJens Axboe * a kernel bounce buffer is used. 10686db1e29SJens Axboe * 107710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 10886db1e29SJens Axboe * still in process context. 10986db1e29SJens Axboe * 11086db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 11186db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 11286db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 11386db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 11486db1e29SJens Axboe * unmapping. 11586db1e29SJens Axboe */ 11686db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 11726e49cfcSKent Overstreet struct rq_map_data *map_data, 11826e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask) 11986db1e29SJens Axboe { 120357f435dSAl Viro bool copy = false; 121357f435dSAl Viro unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 1224d6af73dSChristoph Hellwig struct bio *bio = NULL; 1234d6af73dSChristoph Hellwig struct iov_iter i; 1244d6af73dSChristoph Hellwig int ret; 12586db1e29SJens Axboe 126a0ac402cSLinus Torvalds if (!iter_is_iovec(iter)) 127a0ac402cSLinus Torvalds goto fail; 128a0ac402cSLinus Torvalds 129357f435dSAl Viro if (map_data) 1304d6af73dSChristoph Hellwig copy = true; 131357f435dSAl Viro else if (iov_iter_alignment(iter) & align) 132357f435dSAl Viro copy = true; 133357f435dSAl Viro else if (queue_virt_boundary(q)) 134357f435dSAl Viro copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 135afdc1a78SFUJITA Tomonori 1364d6af73dSChristoph Hellwig i = *iter; 1374d6af73dSChristoph Hellwig do { 1384d6af73dSChristoph Hellwig ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); 1394d6af73dSChristoph Hellwig if (ret) 1404d6af73dSChristoph Hellwig goto unmap_rq; 1414d6af73dSChristoph Hellwig if (!bio) 1424d6af73dSChristoph Hellwig bio = rq->bio; 1434d6af73dSChristoph Hellwig } while (iov_iter_count(&i)); 14486db1e29SJens Axboe 145f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 146e8064021SChristoph Hellwig rq->rq_flags |= RQF_COPY_USER; 14786db1e29SJens Axboe return 0; 1484d6af73dSChristoph Hellwig 1494d6af73dSChristoph Hellwig unmap_rq: 1504d6af73dSChristoph Hellwig __blk_rq_unmap_user(bio); 151a0ac402cSLinus Torvalds fail: 1524d6af73dSChristoph Hellwig rq->bio = NULL; 1534d6af73dSChristoph Hellwig return -EINVAL; 15486db1e29SJens Axboe } 155152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 15686db1e29SJens Axboe 157ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 158ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 159ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 160ddad8dd0SChristoph Hellwig { 16126e49cfcSKent Overstreet struct iovec iov; 16226e49cfcSKent Overstreet struct iov_iter i; 1638f7e885aSAl Viro int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 164ddad8dd0SChristoph Hellwig 1658f7e885aSAl Viro if (unlikely(ret < 0)) 1668f7e885aSAl Viro return ret; 167ddad8dd0SChristoph Hellwig 16826e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 169ddad8dd0SChristoph Hellwig } 170ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 171ddad8dd0SChristoph Hellwig 17286db1e29SJens Axboe /** 17386db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 17486db1e29SJens Axboe * @bio: start of bio list 17586db1e29SJens Axboe * 17686db1e29SJens Axboe * Description: 17786db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 17886db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 179710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 18086db1e29SJens Axboe */ 18186db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 18286db1e29SJens Axboe { 18386db1e29SJens Axboe struct bio *mapped_bio; 18486db1e29SJens Axboe int ret = 0, ret2; 18586db1e29SJens Axboe 18686db1e29SJens Axboe while (bio) { 18786db1e29SJens Axboe mapped_bio = bio; 18886db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 18986db1e29SJens Axboe mapped_bio = bio->bi_private; 19086db1e29SJens Axboe 19186db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 19286db1e29SJens Axboe if (ret2 && !ret) 19386db1e29SJens Axboe ret = ret2; 19486db1e29SJens Axboe 19586db1e29SJens Axboe mapped_bio = bio; 19686db1e29SJens Axboe bio = bio->bi_next; 19786db1e29SJens Axboe bio_put(mapped_bio); 19886db1e29SJens Axboe } 19986db1e29SJens Axboe 20086db1e29SJens Axboe return ret; 20186db1e29SJens Axboe } 20286db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 20386db1e29SJens Axboe 20486db1e29SJens Axboe /** 205aebf526bSChristoph Hellwig * blk_rq_map_kern - map kernel data to a request, for passthrough requests 20686db1e29SJens Axboe * @q: request queue where request should be inserted 20786db1e29SJens Axboe * @rq: request to fill 20886db1e29SJens Axboe * @kbuf: the kernel buffer 20986db1e29SJens Axboe * @len: length of user data 21086db1e29SJens Axboe * @gfp_mask: memory allocation flags 21168154e90SFUJITA Tomonori * 21268154e90SFUJITA Tomonori * Description: 21368154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 214e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 2153a5a3927SJames Bottomley * buffers. 21686db1e29SJens Axboe */ 21786db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 21886db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 21986db1e29SJens Axboe { 22068154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 22114417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 22268154e90SFUJITA Tomonori int do_copy = 0; 22386db1e29SJens Axboe struct bio *bio; 2243a5a3927SJames Bottomley int ret; 22586db1e29SJens Axboe 226ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 22786db1e29SJens Axboe return -EINVAL; 22886db1e29SJens Axboe if (!len || !kbuf) 22986db1e29SJens Axboe return -EINVAL; 23086db1e29SJens Axboe 23114417799SNamhyung Kim do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 23268154e90SFUJITA Tomonori if (do_copy) 23368154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 23468154e90SFUJITA Tomonori else 23586db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 23668154e90SFUJITA Tomonori 23786db1e29SJens Axboe if (IS_ERR(bio)) 23886db1e29SJens Axboe return PTR_ERR(bio); 23986db1e29SJens Axboe 240aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 241aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 24286db1e29SJens Axboe 24368154e90SFUJITA Tomonori if (do_copy) 244e8064021SChristoph Hellwig rq->rq_flags |= RQF_COPY_USER; 24568154e90SFUJITA Tomonori 24698d61d5bSChristoph Hellwig ret = blk_rq_append_bio(rq, bio); 2473a5a3927SJames Bottomley if (unlikely(ret)) { 2483a5a3927SJames Bottomley /* request is too big */ 2493a5a3927SJames Bottomley bio_put(bio); 2503a5a3927SJames Bottomley return ret; 2513a5a3927SJames Bottomley } 2523a5a3927SJames Bottomley 25386db1e29SJens Axboe return 0; 25486db1e29SJens Axboe } 25586db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 256