186db1e29SJens Axboe /* 286db1e29SJens Axboe * Functions related to mapping data to requests 386db1e29SJens Axboe */ 486db1e29SJens Axboe #include <linux/kernel.h> 586db1e29SJens Axboe #include <linux/module.h> 686db1e29SJens Axboe #include <linux/bio.h> 786db1e29SJens Axboe #include <linux/blkdev.h> 826e49cfcSKent Overstreet #include <linux/uio.h> 986db1e29SJens Axboe 1086db1e29SJens Axboe #include "blk.h" 1186db1e29SJens Axboe 1286db1e29SJens Axboe int blk_rq_append_bio(struct request_queue *q, struct request *rq, 1386db1e29SJens Axboe struct bio *bio) 1486db1e29SJens Axboe { 1586db1e29SJens Axboe if (!rq->bio) 1686db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 1786db1e29SJens Axboe else if (!ll_back_merge_fn(q, rq, bio)) 1886db1e29SJens Axboe return -EINVAL; 1986db1e29SJens Axboe else { 2086db1e29SJens Axboe rq->biotail->bi_next = bio; 2186db1e29SJens Axboe rq->biotail = bio; 2286db1e29SJens Axboe 234f024f37SKent Overstreet rq->__data_len += bio->bi_iter.bi_size; 2486db1e29SJens Axboe } 2586db1e29SJens Axboe return 0; 2686db1e29SJens Axboe } 2786db1e29SJens Axboe 2886db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 2986db1e29SJens Axboe { 3086db1e29SJens Axboe int ret = 0; 3186db1e29SJens Axboe 3286db1e29SJens Axboe if (bio) { 3386db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 3486db1e29SJens Axboe bio_unmap_user(bio); 3586db1e29SJens Axboe else 3686db1e29SJens Axboe ret = bio_uncopy_user(bio); 3786db1e29SJens Axboe } 3886db1e29SJens Axboe 3986db1e29SJens Axboe return ret; 4086db1e29SJens Axboe } 4186db1e29SJens Axboe 4286db1e29SJens Axboe /** 43710027a4SRandy Dunlap * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 4486db1e29SJens Axboe * @q: request queue where request should be inserted 4586db1e29SJens Axboe * @rq: request to map data to 46152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 4726e49cfcSKent Overstreet * @iter: iovec iterator 48a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 4986db1e29SJens Axboe * 5086db1e29SJens Axboe * Description: 51710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 5286db1e29SJens Axboe * a kernel bounce buffer is used. 5386db1e29SJens Axboe * 54710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 5586db1e29SJens Axboe * still in process context. 5686db1e29SJens Axboe * 5786db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 5886db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 5986db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 6086db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 6186db1e29SJens Axboe * unmapping. 6286db1e29SJens Axboe */ 6386db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 6426e49cfcSKent Overstreet struct rq_map_data *map_data, 6526e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask) 6686db1e29SJens Axboe { 6786db1e29SJens Axboe struct bio *bio; 68afdc1a78SFUJITA Tomonori int unaligned = 0; 6926e49cfcSKent Overstreet struct iov_iter i; 7026e49cfcSKent Overstreet struct iovec iov; 7186db1e29SJens Axboe 7226e49cfcSKent Overstreet if (!iter || !iter->count) 7386db1e29SJens Axboe return -EINVAL; 7486db1e29SJens Axboe 7526e49cfcSKent Overstreet iov_for_each(iov, i, *iter) { 7626e49cfcSKent Overstreet unsigned long uaddr = (unsigned long) iov.iov_base; 77afdc1a78SFUJITA Tomonori 7826e49cfcSKent Overstreet if (!iov.iov_len) 7954787556SXiaotian Feng return -EINVAL; 8054787556SXiaotian Feng 816b76106dSBen Hutchings /* 826b76106dSBen Hutchings * Keep going so we check length of all segments 836b76106dSBen Hutchings */ 846b76106dSBen Hutchings if (uaddr & queue_dma_alignment(q)) 85afdc1a78SFUJITA Tomonori unaligned = 1; 86afdc1a78SFUJITA Tomonori } 87afdc1a78SFUJITA Tomonori 8826e49cfcSKent Overstreet if (unaligned || (q->dma_pad_mask & iter->count) || map_data) 8926e49cfcSKent Overstreet bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 90afdc1a78SFUJITA Tomonori else 9137f19e57SChristoph Hellwig bio = bio_map_user_iov(q, iter, gfp_mask); 92afdc1a78SFUJITA Tomonori 9386db1e29SJens Axboe if (IS_ERR(bio)) 9486db1e29SJens Axboe return PTR_ERR(bio); 9586db1e29SJens Axboe 96a0763b27SChristoph Hellwig if (map_data && map_data->null_mapped) 97a0763b27SChristoph Hellwig bio->bi_flags |= (1 << BIO_NULL_MAPPED); 98a0763b27SChristoph Hellwig 9926e49cfcSKent Overstreet if (bio->bi_iter.bi_size != iter->count) { 100c26156b2SJens Axboe /* 101c26156b2SJens Axboe * Grab an extra reference to this bio, as bio_unmap_user() 102c26156b2SJens Axboe * expects to be able to drop it twice as it happens on the 103c26156b2SJens Axboe * normal IO completion path 104c26156b2SJens Axboe */ 105c26156b2SJens Axboe bio_get(bio); 10686db1e29SJens Axboe bio_endio(bio, 0); 10753cc0b29SPetr Vandrovec __blk_rq_unmap_user(bio); 10886db1e29SJens Axboe return -EINVAL; 10986db1e29SJens Axboe } 11086db1e29SJens Axboe 111f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 112f18573abSFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 113f18573abSFUJITA Tomonori 11407359fc6SFUJITA Tomonori blk_queue_bounce(q, &bio); 11586db1e29SJens Axboe bio_get(bio); 11686db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 11786db1e29SJens Axboe return 0; 11886db1e29SJens Axboe } 119152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 12086db1e29SJens Axboe 121ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 122ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 123ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 124ddad8dd0SChristoph Hellwig { 12526e49cfcSKent Overstreet struct iovec iov; 12626e49cfcSKent Overstreet struct iov_iter i; 127*8f7e885aSAl Viro int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 128ddad8dd0SChristoph Hellwig 129*8f7e885aSAl Viro if (unlikely(ret < 0)) 130*8f7e885aSAl Viro return ret; 131ddad8dd0SChristoph Hellwig 13226e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 133ddad8dd0SChristoph Hellwig } 134ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 135ddad8dd0SChristoph Hellwig 13686db1e29SJens Axboe /** 13786db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 13886db1e29SJens Axboe * @bio: start of bio list 13986db1e29SJens Axboe * 14086db1e29SJens Axboe * Description: 14186db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 14286db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 143710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 14486db1e29SJens Axboe */ 14586db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 14686db1e29SJens Axboe { 14786db1e29SJens Axboe struct bio *mapped_bio; 14886db1e29SJens Axboe int ret = 0, ret2; 14986db1e29SJens Axboe 15086db1e29SJens Axboe while (bio) { 15186db1e29SJens Axboe mapped_bio = bio; 15286db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 15386db1e29SJens Axboe mapped_bio = bio->bi_private; 15486db1e29SJens Axboe 15586db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 15686db1e29SJens Axboe if (ret2 && !ret) 15786db1e29SJens Axboe ret = ret2; 15886db1e29SJens Axboe 15986db1e29SJens Axboe mapped_bio = bio; 16086db1e29SJens Axboe bio = bio->bi_next; 16186db1e29SJens Axboe bio_put(mapped_bio); 16286db1e29SJens Axboe } 16386db1e29SJens Axboe 16486db1e29SJens Axboe return ret; 16586db1e29SJens Axboe } 16686db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 16786db1e29SJens Axboe 16886db1e29SJens Axboe /** 169710027a4SRandy Dunlap * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 17086db1e29SJens Axboe * @q: request queue where request should be inserted 17186db1e29SJens Axboe * @rq: request to fill 17286db1e29SJens Axboe * @kbuf: the kernel buffer 17386db1e29SJens Axboe * @len: length of user data 17486db1e29SJens Axboe * @gfp_mask: memory allocation flags 17568154e90SFUJITA Tomonori * 17668154e90SFUJITA Tomonori * Description: 17768154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 178e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 1793a5a3927SJames Bottomley * buffers. 18086db1e29SJens Axboe */ 18186db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 18286db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 18386db1e29SJens Axboe { 18468154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 18514417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 18668154e90SFUJITA Tomonori int do_copy = 0; 18786db1e29SJens Axboe struct bio *bio; 1883a5a3927SJames Bottomley int ret; 18986db1e29SJens Axboe 190ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 19186db1e29SJens Axboe return -EINVAL; 19286db1e29SJens Axboe if (!len || !kbuf) 19386db1e29SJens Axboe return -EINVAL; 19486db1e29SJens Axboe 19514417799SNamhyung Kim do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 19668154e90SFUJITA Tomonori if (do_copy) 19768154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 19868154e90SFUJITA Tomonori else 19986db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 20068154e90SFUJITA Tomonori 20186db1e29SJens Axboe if (IS_ERR(bio)) 20286db1e29SJens Axboe return PTR_ERR(bio); 20386db1e29SJens Axboe 204609f6ea1Smajianpeng if (!reading) 205a45dc2d2SBenny Halevy bio->bi_rw |= REQ_WRITE; 20686db1e29SJens Axboe 20768154e90SFUJITA Tomonori if (do_copy) 20868154e90SFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 20968154e90SFUJITA Tomonori 2103a5a3927SJames Bottomley ret = blk_rq_append_bio(q, rq, bio); 2113a5a3927SJames Bottomley if (unlikely(ret)) { 2123a5a3927SJames Bottomley /* request is too big */ 2133a5a3927SJames Bottomley bio_put(bio); 2143a5a3927SJames Bottomley return ret; 2153a5a3927SJames Bottomley } 2163a5a3927SJames Bottomley 21786db1e29SJens Axboe blk_queue_bounce(q, &rq->bio); 21886db1e29SJens Axboe return 0; 21986db1e29SJens Axboe } 22086db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 221