1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 286db1e29SJens Axboe /* 386db1e29SJens Axboe * Functions related to mapping data to requests 486db1e29SJens Axboe */ 586db1e29SJens Axboe #include <linux/kernel.h> 668db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 786db1e29SJens Axboe #include <linux/module.h> 886db1e29SJens Axboe #include <linux/bio.h> 986db1e29SJens Axboe #include <linux/blkdev.h> 1026e49cfcSKent Overstreet #include <linux/uio.h> 1186db1e29SJens Axboe 1286db1e29SJens Axboe #include "blk.h" 1386db1e29SJens Axboe 1498d61d5bSChristoph Hellwig /* 15*0abc2a10SJens Axboe * Append a bio to a passthrough request. Only works if the bio can be merged 16*0abc2a10SJens Axboe * into the request based on the driver constraints. 1798d61d5bSChristoph Hellwig */ 18*0abc2a10SJens Axboe int blk_rq_append_bio(struct request *rq, struct bio **bio) 1986db1e29SJens Axboe { 20*0abc2a10SJens Axboe struct bio *orig_bio = *bio; 21*0abc2a10SJens Axboe 22*0abc2a10SJens Axboe blk_queue_bounce(rq->q, bio); 23caa4b024SChristoph Hellwig 2498d61d5bSChristoph Hellwig if (!rq->bio) { 25*0abc2a10SJens Axboe blk_rq_bio_prep(rq->q, rq, *bio); 2698d61d5bSChristoph Hellwig } else { 27*0abc2a10SJens Axboe if (!ll_back_merge_fn(rq->q, rq, *bio)) { 28*0abc2a10SJens Axboe if (orig_bio != *bio) { 29*0abc2a10SJens Axboe bio_put(*bio); 30*0abc2a10SJens Axboe *bio = orig_bio; 31*0abc2a10SJens Axboe } 3286db1e29SJens Axboe return -EINVAL; 33*0abc2a10SJens Axboe } 3498d61d5bSChristoph Hellwig 35*0abc2a10SJens Axboe rq->biotail->bi_next = *bio; 36*0abc2a10SJens Axboe rq->biotail = *bio; 37*0abc2a10SJens Axboe rq->__data_len += (*bio)->bi_iter.bi_size; 3886db1e29SJens Axboe } 3998d61d5bSChristoph Hellwig 4086db1e29SJens Axboe return 0; 4186db1e29SJens Axboe } 4298d61d5bSChristoph Hellwig EXPORT_SYMBOL(blk_rq_append_bio); 4386db1e29SJens Axboe 4486db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 4586db1e29SJens Axboe { 4686db1e29SJens Axboe int ret = 0; 4786db1e29SJens Axboe 4886db1e29SJens Axboe if (bio) { 4986db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 5086db1e29SJens Axboe bio_unmap_user(bio); 5186db1e29SJens Axboe else 5286db1e29SJens Axboe ret = bio_uncopy_user(bio); 5386db1e29SJens Axboe } 5486db1e29SJens Axboe 5586db1e29SJens Axboe return ret; 5686db1e29SJens Axboe } 5786db1e29SJens Axboe 584d6af73dSChristoph Hellwig static int __blk_rq_map_user_iov(struct request *rq, 594d6af73dSChristoph Hellwig struct rq_map_data *map_data, struct iov_iter *iter, 604d6af73dSChristoph Hellwig gfp_t gfp_mask, bool copy) 614d6af73dSChristoph Hellwig { 624d6af73dSChristoph Hellwig struct request_queue *q = rq->q; 634d6af73dSChristoph Hellwig struct bio *bio, *orig_bio; 644d6af73dSChristoph Hellwig int ret; 654d6af73dSChristoph Hellwig 664d6af73dSChristoph Hellwig if (copy) 674d6af73dSChristoph Hellwig bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 684d6af73dSChristoph Hellwig else 694d6af73dSChristoph Hellwig bio = bio_map_user_iov(q, iter, gfp_mask); 704d6af73dSChristoph Hellwig 714d6af73dSChristoph Hellwig if (IS_ERR(bio)) 724d6af73dSChristoph Hellwig return PTR_ERR(bio); 734d6af73dSChristoph Hellwig 74aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 75aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 76aebf526bSChristoph Hellwig 774d6af73dSChristoph Hellwig orig_bio = bio; 784d6af73dSChristoph Hellwig 794d6af73dSChristoph Hellwig /* 804d6af73dSChristoph Hellwig * We link the bounce buffer in and could have to traverse it 814d6af73dSChristoph Hellwig * later so we have to get a ref to prevent it from being freed 824d6af73dSChristoph Hellwig */ 83*0abc2a10SJens Axboe ret = blk_rq_append_bio(rq, &bio); 844d6af73dSChristoph Hellwig if (ret) { 854d6af73dSChristoph Hellwig __blk_rq_unmap_user(orig_bio); 864d6af73dSChristoph Hellwig return ret; 874d6af73dSChristoph Hellwig } 88*0abc2a10SJens Axboe bio_get(bio); 894d6af73dSChristoph Hellwig 904d6af73dSChristoph Hellwig return 0; 914d6af73dSChristoph Hellwig } 924d6af73dSChristoph Hellwig 9386db1e29SJens Axboe /** 94aebf526bSChristoph Hellwig * blk_rq_map_user_iov - map user data to a request, for passthrough requests 9586db1e29SJens Axboe * @q: request queue where request should be inserted 9686db1e29SJens Axboe * @rq: request to map data to 97152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 9826e49cfcSKent Overstreet * @iter: iovec iterator 99a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 10086db1e29SJens Axboe * 10186db1e29SJens Axboe * Description: 102710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 10386db1e29SJens Axboe * a kernel bounce buffer is used. 10486db1e29SJens Axboe * 105710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 10686db1e29SJens Axboe * still in process context. 10786db1e29SJens Axboe * 10886db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 10986db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 11086db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 11186db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 11286db1e29SJens Axboe * unmapping. 11386db1e29SJens Axboe */ 11486db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 11526e49cfcSKent Overstreet struct rq_map_data *map_data, 11626e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask) 11786db1e29SJens Axboe { 118357f435dSAl Viro bool copy = false; 119357f435dSAl Viro unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 1204d6af73dSChristoph Hellwig struct bio *bio = NULL; 1214d6af73dSChristoph Hellwig struct iov_iter i; 1224d6af73dSChristoph Hellwig int ret; 12386db1e29SJens Axboe 124a0ac402cSLinus Torvalds if (!iter_is_iovec(iter)) 125a0ac402cSLinus Torvalds goto fail; 126a0ac402cSLinus Torvalds 127357f435dSAl Viro if (map_data) 1284d6af73dSChristoph Hellwig copy = true; 129357f435dSAl Viro else if (iov_iter_alignment(iter) & align) 130357f435dSAl Viro copy = true; 131357f435dSAl Viro else if (queue_virt_boundary(q)) 132357f435dSAl Viro copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 133afdc1a78SFUJITA Tomonori 1344d6af73dSChristoph Hellwig i = *iter; 1354d6af73dSChristoph Hellwig do { 1364d6af73dSChristoph Hellwig ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); 1374d6af73dSChristoph Hellwig if (ret) 1384d6af73dSChristoph Hellwig goto unmap_rq; 1394d6af73dSChristoph Hellwig if (!bio) 1404d6af73dSChristoph Hellwig bio = rq->bio; 1414d6af73dSChristoph Hellwig } while (iov_iter_count(&i)); 14286db1e29SJens Axboe 143f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 144e8064021SChristoph Hellwig rq->rq_flags |= RQF_COPY_USER; 14586db1e29SJens Axboe return 0; 1464d6af73dSChristoph Hellwig 1474d6af73dSChristoph Hellwig unmap_rq: 1484d6af73dSChristoph Hellwig __blk_rq_unmap_user(bio); 149a0ac402cSLinus Torvalds fail: 1504d6af73dSChristoph Hellwig rq->bio = NULL; 1514d6af73dSChristoph Hellwig return -EINVAL; 15286db1e29SJens Axboe } 153152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 15486db1e29SJens Axboe 155ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 156ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 157ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 158ddad8dd0SChristoph Hellwig { 15926e49cfcSKent Overstreet struct iovec iov; 16026e49cfcSKent Overstreet struct iov_iter i; 1618f7e885aSAl Viro int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 162ddad8dd0SChristoph Hellwig 1638f7e885aSAl Viro if (unlikely(ret < 0)) 1648f7e885aSAl Viro return ret; 165ddad8dd0SChristoph Hellwig 16626e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 167ddad8dd0SChristoph Hellwig } 168ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 169ddad8dd0SChristoph Hellwig 17086db1e29SJens Axboe /** 17186db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 17286db1e29SJens Axboe * @bio: start of bio list 17386db1e29SJens Axboe * 17486db1e29SJens Axboe * Description: 17586db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 17686db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 177710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 17886db1e29SJens Axboe */ 17986db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 18086db1e29SJens Axboe { 18186db1e29SJens Axboe struct bio *mapped_bio; 18286db1e29SJens Axboe int ret = 0, ret2; 18386db1e29SJens Axboe 18486db1e29SJens Axboe while (bio) { 18586db1e29SJens Axboe mapped_bio = bio; 18686db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 18786db1e29SJens Axboe mapped_bio = bio->bi_private; 18886db1e29SJens Axboe 18986db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 19086db1e29SJens Axboe if (ret2 && !ret) 19186db1e29SJens Axboe ret = ret2; 19286db1e29SJens Axboe 19386db1e29SJens Axboe mapped_bio = bio; 19486db1e29SJens Axboe bio = bio->bi_next; 19586db1e29SJens Axboe bio_put(mapped_bio); 19686db1e29SJens Axboe } 19786db1e29SJens Axboe 19886db1e29SJens Axboe return ret; 19986db1e29SJens Axboe } 20086db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 20186db1e29SJens Axboe 20286db1e29SJens Axboe /** 203aebf526bSChristoph Hellwig * blk_rq_map_kern - map kernel data to a request, for passthrough requests 20486db1e29SJens Axboe * @q: request queue where request should be inserted 20586db1e29SJens Axboe * @rq: request to fill 20686db1e29SJens Axboe * @kbuf: the kernel buffer 20786db1e29SJens Axboe * @len: length of user data 20886db1e29SJens Axboe * @gfp_mask: memory allocation flags 20968154e90SFUJITA Tomonori * 21068154e90SFUJITA Tomonori * Description: 21168154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 212e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 2133a5a3927SJames Bottomley * buffers. 21486db1e29SJens Axboe */ 21586db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 21686db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 21786db1e29SJens Axboe { 21868154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 21914417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 22068154e90SFUJITA Tomonori int do_copy = 0; 221*0abc2a10SJens Axboe struct bio *bio, *orig_bio; 2223a5a3927SJames Bottomley int ret; 22386db1e29SJens Axboe 224ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 22586db1e29SJens Axboe return -EINVAL; 22686db1e29SJens Axboe if (!len || !kbuf) 22786db1e29SJens Axboe return -EINVAL; 22886db1e29SJens Axboe 22914417799SNamhyung Kim do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 23068154e90SFUJITA Tomonori if (do_copy) 23168154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 23268154e90SFUJITA Tomonori else 23386db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 23468154e90SFUJITA Tomonori 23586db1e29SJens Axboe if (IS_ERR(bio)) 23686db1e29SJens Axboe return PTR_ERR(bio); 23786db1e29SJens Axboe 238aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 239aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 24086db1e29SJens Axboe 24168154e90SFUJITA Tomonori if (do_copy) 242e8064021SChristoph Hellwig rq->rq_flags |= RQF_COPY_USER; 24368154e90SFUJITA Tomonori 244*0abc2a10SJens Axboe orig_bio = bio; 245*0abc2a10SJens Axboe ret = blk_rq_append_bio(rq, &bio); 2463a5a3927SJames Bottomley if (unlikely(ret)) { 2473a5a3927SJames Bottomley /* request is too big */ 248*0abc2a10SJens Axboe bio_put(orig_bio); 2493a5a3927SJames Bottomley return ret; 2503a5a3927SJames Bottomley } 2513a5a3927SJames Bottomley 25286db1e29SJens Axboe return 0; 25386db1e29SJens Axboe } 25486db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 255