1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 286db1e29SJens Axboe /* 386db1e29SJens Axboe * Functions related to mapping data to requests 486db1e29SJens Axboe */ 586db1e29SJens Axboe #include <linux/kernel.h> 668db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 786db1e29SJens Axboe #include <linux/module.h> 886db1e29SJens Axboe #include <linux/bio.h> 986db1e29SJens Axboe #include <linux/blkdev.h> 1026e49cfcSKent Overstreet #include <linux/uio.h> 1186db1e29SJens Axboe 1286db1e29SJens Axboe #include "blk.h" 1386db1e29SJens Axboe 1498d61d5bSChristoph Hellwig /* 150abc2a10SJens Axboe * Append a bio to a passthrough request. Only works if the bio can be merged 160abc2a10SJens Axboe * into the request based on the driver constraints. 1798d61d5bSChristoph Hellwig */ 180abc2a10SJens Axboe int blk_rq_append_bio(struct request *rq, struct bio **bio) 1986db1e29SJens Axboe { 200abc2a10SJens Axboe struct bio *orig_bio = *bio; 2114ccb66bSChristoph Hellwig struct bvec_iter iter; 2214ccb66bSChristoph Hellwig struct bio_vec bv; 2314ccb66bSChristoph Hellwig unsigned int nr_segs = 0; 240abc2a10SJens Axboe 250abc2a10SJens Axboe blk_queue_bounce(rq->q, bio); 26caa4b024SChristoph Hellwig 2714ccb66bSChristoph Hellwig bio_for_each_bvec(bv, *bio, iter) 2814ccb66bSChristoph Hellwig nr_segs++; 2914ccb66bSChristoph Hellwig 3098d61d5bSChristoph Hellwig if (!rq->bio) { 3114ccb66bSChristoph Hellwig blk_rq_bio_prep(rq, *bio, nr_segs); 3298d61d5bSChristoph Hellwig } else { 3314ccb66bSChristoph Hellwig if (!ll_back_merge_fn(rq, *bio, nr_segs)) { 340abc2a10SJens Axboe if (orig_bio != *bio) { 350abc2a10SJens Axboe bio_put(*bio); 360abc2a10SJens Axboe *bio = orig_bio; 370abc2a10SJens Axboe } 3886db1e29SJens Axboe return -EINVAL; 390abc2a10SJens Axboe } 4098d61d5bSChristoph Hellwig 410abc2a10SJens Axboe rq->biotail->bi_next = *bio; 420abc2a10SJens Axboe rq->biotail = *bio; 430abc2a10SJens Axboe rq->__data_len += (*bio)->bi_iter.bi_size; 4486db1e29SJens Axboe } 4598d61d5bSChristoph Hellwig 4686db1e29SJens Axboe return 0; 4786db1e29SJens Axboe } 4898d61d5bSChristoph Hellwig EXPORT_SYMBOL(blk_rq_append_bio); 4986db1e29SJens Axboe 5086db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 5186db1e29SJens Axboe { 5286db1e29SJens Axboe int ret = 0; 5386db1e29SJens Axboe 5486db1e29SJens Axboe if (bio) { 5586db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 5686db1e29SJens Axboe bio_unmap_user(bio); 5786db1e29SJens Axboe else 5886db1e29SJens Axboe ret = bio_uncopy_user(bio); 5986db1e29SJens Axboe } 6086db1e29SJens Axboe 6186db1e29SJens Axboe return ret; 6286db1e29SJens Axboe } 6386db1e29SJens Axboe 644d6af73dSChristoph Hellwig static int __blk_rq_map_user_iov(struct request *rq, 654d6af73dSChristoph Hellwig struct rq_map_data *map_data, struct iov_iter *iter, 664d6af73dSChristoph Hellwig gfp_t gfp_mask, bool copy) 674d6af73dSChristoph Hellwig { 684d6af73dSChristoph Hellwig struct request_queue *q = rq->q; 694d6af73dSChristoph Hellwig struct bio *bio, *orig_bio; 704d6af73dSChristoph Hellwig int ret; 714d6af73dSChristoph Hellwig 724d6af73dSChristoph Hellwig if (copy) 734d6af73dSChristoph Hellwig bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 744d6af73dSChristoph Hellwig else 754d6af73dSChristoph Hellwig bio = bio_map_user_iov(q, iter, gfp_mask); 764d6af73dSChristoph Hellwig 774d6af73dSChristoph Hellwig if (IS_ERR(bio)) 784d6af73dSChristoph Hellwig return PTR_ERR(bio); 794d6af73dSChristoph Hellwig 80aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 81aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 82aebf526bSChristoph Hellwig 834d6af73dSChristoph Hellwig orig_bio = bio; 844d6af73dSChristoph Hellwig 854d6af73dSChristoph Hellwig /* 864d6af73dSChristoph Hellwig * We link the bounce buffer in and could have to traverse it 874d6af73dSChristoph Hellwig * later so we have to get a ref to prevent it from being freed 884d6af73dSChristoph Hellwig */ 890abc2a10SJens Axboe ret = blk_rq_append_bio(rq, &bio); 904d6af73dSChristoph Hellwig if (ret) { 914d6af73dSChristoph Hellwig __blk_rq_unmap_user(orig_bio); 924d6af73dSChristoph Hellwig return ret; 934d6af73dSChristoph Hellwig } 940abc2a10SJens Axboe bio_get(bio); 954d6af73dSChristoph Hellwig 964d6af73dSChristoph Hellwig return 0; 974d6af73dSChristoph Hellwig } 984d6af73dSChristoph Hellwig 9986db1e29SJens Axboe /** 100aebf526bSChristoph Hellwig * blk_rq_map_user_iov - map user data to a request, for passthrough requests 10186db1e29SJens Axboe * @q: request queue where request should be inserted 10286db1e29SJens Axboe * @rq: request to map data to 103152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 10426e49cfcSKent Overstreet * @iter: iovec iterator 105a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 10686db1e29SJens Axboe * 10786db1e29SJens Axboe * Description: 108710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 10986db1e29SJens Axboe * a kernel bounce buffer is used. 11086db1e29SJens Axboe * 111710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 11286db1e29SJens Axboe * still in process context. 11386db1e29SJens Axboe * 11486db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 11586db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 11686db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 11786db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 11886db1e29SJens Axboe * unmapping. 11986db1e29SJens Axboe */ 12086db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 12126e49cfcSKent Overstreet struct rq_map_data *map_data, 12226e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask) 12386db1e29SJens Axboe { 124357f435dSAl Viro bool copy = false; 125357f435dSAl Viro unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 1264d6af73dSChristoph Hellwig struct bio *bio = NULL; 1274d6af73dSChristoph Hellwig struct iov_iter i; 12869e0927bSDouglas Gilbert int ret = -EINVAL; 12986db1e29SJens Axboe 130a0ac402cSLinus Torvalds if (!iter_is_iovec(iter)) 131a0ac402cSLinus Torvalds goto fail; 132a0ac402cSLinus Torvalds 133357f435dSAl Viro if (map_data) 1344d6af73dSChristoph Hellwig copy = true; 135357f435dSAl Viro else if (iov_iter_alignment(iter) & align) 136357f435dSAl Viro copy = true; 137357f435dSAl Viro else if (queue_virt_boundary(q)) 138357f435dSAl Viro copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 139afdc1a78SFUJITA Tomonori 1404d6af73dSChristoph Hellwig i = *iter; 1414d6af73dSChristoph Hellwig do { 1424d6af73dSChristoph Hellwig ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); 1434d6af73dSChristoph Hellwig if (ret) 1444d6af73dSChristoph Hellwig goto unmap_rq; 1454d6af73dSChristoph Hellwig if (!bio) 1464d6af73dSChristoph Hellwig bio = rq->bio; 1474d6af73dSChristoph Hellwig } while (iov_iter_count(&i)); 14886db1e29SJens Axboe 149f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 150e8064021SChristoph Hellwig rq->rq_flags |= RQF_COPY_USER; 15186db1e29SJens Axboe return 0; 1524d6af73dSChristoph Hellwig 1534d6af73dSChristoph Hellwig unmap_rq: 1544d6af73dSChristoph Hellwig __blk_rq_unmap_user(bio); 155a0ac402cSLinus Torvalds fail: 1564d6af73dSChristoph Hellwig rq->bio = NULL; 15769e0927bSDouglas Gilbert return ret; 15886db1e29SJens Axboe } 159152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 16086db1e29SJens Axboe 161ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 162ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 163ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 164ddad8dd0SChristoph Hellwig { 16526e49cfcSKent Overstreet struct iovec iov; 16626e49cfcSKent Overstreet struct iov_iter i; 1678f7e885aSAl Viro int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 168ddad8dd0SChristoph Hellwig 1698f7e885aSAl Viro if (unlikely(ret < 0)) 1708f7e885aSAl Viro return ret; 171ddad8dd0SChristoph Hellwig 17226e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 173ddad8dd0SChristoph Hellwig } 174ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 175ddad8dd0SChristoph Hellwig 17686db1e29SJens Axboe /** 17786db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 17886db1e29SJens Axboe * @bio: start of bio list 17986db1e29SJens Axboe * 18086db1e29SJens Axboe * Description: 18186db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 18286db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 183710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 18486db1e29SJens Axboe */ 18586db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 18686db1e29SJens Axboe { 18786db1e29SJens Axboe struct bio *mapped_bio; 18886db1e29SJens Axboe int ret = 0, ret2; 18986db1e29SJens Axboe 19086db1e29SJens Axboe while (bio) { 19186db1e29SJens Axboe mapped_bio = bio; 19286db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 19386db1e29SJens Axboe mapped_bio = bio->bi_private; 19486db1e29SJens Axboe 19586db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 19686db1e29SJens Axboe if (ret2 && !ret) 19786db1e29SJens Axboe ret = ret2; 19886db1e29SJens Axboe 19986db1e29SJens Axboe mapped_bio = bio; 20086db1e29SJens Axboe bio = bio->bi_next; 20186db1e29SJens Axboe bio_put(mapped_bio); 20286db1e29SJens Axboe } 20386db1e29SJens Axboe 20486db1e29SJens Axboe return ret; 20586db1e29SJens Axboe } 20686db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 20786db1e29SJens Axboe 20886db1e29SJens Axboe /** 209aebf526bSChristoph Hellwig * blk_rq_map_kern - map kernel data to a request, for passthrough requests 21086db1e29SJens Axboe * @q: request queue where request should be inserted 21186db1e29SJens Axboe * @rq: request to fill 21286db1e29SJens Axboe * @kbuf: the kernel buffer 21386db1e29SJens Axboe * @len: length of user data 21486db1e29SJens Axboe * @gfp_mask: memory allocation flags 21568154e90SFUJITA Tomonori * 21668154e90SFUJITA Tomonori * Description: 21768154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 218e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 2193a5a3927SJames Bottomley * buffers. 22086db1e29SJens Axboe */ 22186db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 22286db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 22386db1e29SJens Axboe { 22468154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 22514417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 22668154e90SFUJITA Tomonori int do_copy = 0; 2270abc2a10SJens Axboe struct bio *bio, *orig_bio; 2283a5a3927SJames Bottomley int ret; 22986db1e29SJens Axboe 230ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 23186db1e29SJens Axboe return -EINVAL; 23286db1e29SJens Axboe if (!len || !kbuf) 23386db1e29SJens Axboe return -EINVAL; 23486db1e29SJens Axboe 23514417799SNamhyung Kim do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 23668154e90SFUJITA Tomonori if (do_copy) 23768154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 23868154e90SFUJITA Tomonori else 23986db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 24068154e90SFUJITA Tomonori 24186db1e29SJens Axboe if (IS_ERR(bio)) 24286db1e29SJens Axboe return PTR_ERR(bio); 24386db1e29SJens Axboe 244aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK; 245aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq); 24686db1e29SJens Axboe 24768154e90SFUJITA Tomonori if (do_copy) 248e8064021SChristoph Hellwig rq->rq_flags |= RQF_COPY_USER; 24968154e90SFUJITA Tomonori 2500abc2a10SJens Axboe orig_bio = bio; 2510abc2a10SJens Axboe ret = blk_rq_append_bio(rq, &bio); 2523a5a3927SJames Bottomley if (unlikely(ret)) { 2533a5a3927SJames Bottomley /* request is too big */ 2540abc2a10SJens Axboe bio_put(orig_bio); 2553a5a3927SJames Bottomley return ret; 2563a5a3927SJames Bottomley } 2573a5a3927SJames Bottomley 25886db1e29SJens Axboe return 0; 25986db1e29SJens Axboe } 26086db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 261