186db1e29SJens Axboe /* 286db1e29SJens Axboe * Functions related to mapping data to requests 386db1e29SJens Axboe */ 486db1e29SJens Axboe #include <linux/kernel.h> 586db1e29SJens Axboe #include <linux/module.h> 686db1e29SJens Axboe #include <linux/bio.h> 786db1e29SJens Axboe #include <linux/blkdev.h> 8afdc1a78SFUJITA Tomonori #include <scsi/sg.h> /* for struct sg_iovec */ 986db1e29SJens Axboe 1086db1e29SJens Axboe #include "blk.h" 1186db1e29SJens Axboe 1286db1e29SJens Axboe int blk_rq_append_bio(struct request_queue *q, struct request *rq, 1386db1e29SJens Axboe struct bio *bio) 1486db1e29SJens Axboe { 1586db1e29SJens Axboe if (!rq->bio) 1686db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 1786db1e29SJens Axboe else if (!ll_back_merge_fn(q, rq, bio)) 1886db1e29SJens Axboe return -EINVAL; 1986db1e29SJens Axboe else { 2086db1e29SJens Axboe rq->biotail->bi_next = bio; 2186db1e29SJens Axboe rq->biotail = bio; 2286db1e29SJens Axboe 234f024f37SKent Overstreet rq->__data_len += bio->bi_iter.bi_size; 2486db1e29SJens Axboe } 2586db1e29SJens Axboe return 0; 2686db1e29SJens Axboe } 2786db1e29SJens Axboe 2886db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 2986db1e29SJens Axboe { 3086db1e29SJens Axboe int ret = 0; 3186db1e29SJens Axboe 3286db1e29SJens Axboe if (bio) { 3386db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 3486db1e29SJens Axboe bio_unmap_user(bio); 3586db1e29SJens Axboe else 3686db1e29SJens Axboe ret = bio_uncopy_user(bio); 3786db1e29SJens Axboe } 3886db1e29SJens Axboe 3986db1e29SJens Axboe return ret; 4086db1e29SJens Axboe } 4186db1e29SJens Axboe 4286db1e29SJens Axboe /** 43710027a4SRandy Dunlap * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 4486db1e29SJens Axboe * @q: request queue where request should be inserted 4586db1e29SJens Axboe * @rq: request to map data to 46152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 4786db1e29SJens Axboe * @iov: pointer to the iovec 4886db1e29SJens Axboe * @iov_count: number of elements in the iovec 4986db1e29SJens Axboe * @len: I/O byte count 50a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 5186db1e29SJens Axboe * 5286db1e29SJens Axboe * Description: 53710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 5486db1e29SJens Axboe * a kernel bounce buffer is used. 5586db1e29SJens Axboe * 56710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 5786db1e29SJens Axboe * still in process context. 5886db1e29SJens Axboe * 5986db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 6086db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 6186db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 6286db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 6386db1e29SJens Axboe * unmapping. 6486db1e29SJens Axboe */ 6586db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 6686d564c8SAl Viro struct rq_map_data *map_data, const struct sg_iovec *iov, 67152e283fSFUJITA Tomonori int iov_count, unsigned int len, gfp_t gfp_mask) 6886db1e29SJens Axboe { 6986db1e29SJens Axboe struct bio *bio; 70afdc1a78SFUJITA Tomonori int i, read = rq_data_dir(rq) == READ; 71afdc1a78SFUJITA Tomonori int unaligned = 0; 7286db1e29SJens Axboe 7386db1e29SJens Axboe if (!iov || iov_count <= 0) 7486db1e29SJens Axboe return -EINVAL; 7586db1e29SJens Axboe 76afdc1a78SFUJITA Tomonori for (i = 0; i < iov_count; i++) { 77afdc1a78SFUJITA Tomonori unsigned long uaddr = (unsigned long)iov[i].iov_base; 78afdc1a78SFUJITA Tomonori 7954787556SXiaotian Feng if (!iov[i].iov_len) 8054787556SXiaotian Feng return -EINVAL; 8154787556SXiaotian Feng 826b76106dSBen Hutchings /* 836b76106dSBen Hutchings * Keep going so we check length of all segments 846b76106dSBen Hutchings */ 856b76106dSBen Hutchings if (uaddr & queue_dma_alignment(q)) 86afdc1a78SFUJITA Tomonori unaligned = 1; 87afdc1a78SFUJITA Tomonori } 88afdc1a78SFUJITA Tomonori 89152e283fSFUJITA Tomonori if (unaligned || (q->dma_pad_mask & len) || map_data) 90152e283fSFUJITA Tomonori bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, 91152e283fSFUJITA Tomonori gfp_mask); 92afdc1a78SFUJITA Tomonori else 93a3bce90eSFUJITA Tomonori bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); 94afdc1a78SFUJITA Tomonori 9586db1e29SJens Axboe if (IS_ERR(bio)) 9686db1e29SJens Axboe return PTR_ERR(bio); 9786db1e29SJens Axboe 984f024f37SKent Overstreet if (bio->bi_iter.bi_size != len) { 99c26156b2SJens Axboe /* 100c26156b2SJens Axboe * Grab an extra reference to this bio, as bio_unmap_user() 101c26156b2SJens Axboe * expects to be able to drop it twice as it happens on the 102c26156b2SJens Axboe * normal IO completion path 103c26156b2SJens Axboe */ 104c26156b2SJens Axboe bio_get(bio); 10586db1e29SJens Axboe bio_endio(bio, 0); 10653cc0b29SPetr Vandrovec __blk_rq_unmap_user(bio); 10786db1e29SJens Axboe return -EINVAL; 10886db1e29SJens Axboe } 10986db1e29SJens Axboe 110f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 111f18573abSFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 112f18573abSFUJITA Tomonori 11307359fc6SFUJITA Tomonori blk_queue_bounce(q, &bio); 11486db1e29SJens Axboe bio_get(bio); 11586db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 11686db1e29SJens Axboe return 0; 11786db1e29SJens Axboe } 118152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 11986db1e29SJens Axboe 120*ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 121*ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 122*ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 123*ddad8dd0SChristoph Hellwig { 124*ddad8dd0SChristoph Hellwig struct sg_iovec iov; 125*ddad8dd0SChristoph Hellwig 126*ddad8dd0SChristoph Hellwig iov.iov_base = (void __user *)ubuf; 127*ddad8dd0SChristoph Hellwig iov.iov_len = len; 128*ddad8dd0SChristoph Hellwig 129*ddad8dd0SChristoph Hellwig return blk_rq_map_user_iov(q, rq, map_data, &iov, 1, len, gfp_mask); 130*ddad8dd0SChristoph Hellwig } 131*ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 132*ddad8dd0SChristoph Hellwig 13386db1e29SJens Axboe /** 13486db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 13586db1e29SJens Axboe * @bio: start of bio list 13686db1e29SJens Axboe * 13786db1e29SJens Axboe * Description: 13886db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 13986db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 140710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 14186db1e29SJens Axboe */ 14286db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 14386db1e29SJens Axboe { 14486db1e29SJens Axboe struct bio *mapped_bio; 14586db1e29SJens Axboe int ret = 0, ret2; 14686db1e29SJens Axboe 14786db1e29SJens Axboe while (bio) { 14886db1e29SJens Axboe mapped_bio = bio; 14986db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 15086db1e29SJens Axboe mapped_bio = bio->bi_private; 15186db1e29SJens Axboe 15286db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 15386db1e29SJens Axboe if (ret2 && !ret) 15486db1e29SJens Axboe ret = ret2; 15586db1e29SJens Axboe 15686db1e29SJens Axboe mapped_bio = bio; 15786db1e29SJens Axboe bio = bio->bi_next; 15886db1e29SJens Axboe bio_put(mapped_bio); 15986db1e29SJens Axboe } 16086db1e29SJens Axboe 16186db1e29SJens Axboe return ret; 16286db1e29SJens Axboe } 16386db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 16486db1e29SJens Axboe 16586db1e29SJens Axboe /** 166710027a4SRandy Dunlap * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 16786db1e29SJens Axboe * @q: request queue where request should be inserted 16886db1e29SJens Axboe * @rq: request to fill 16986db1e29SJens Axboe * @kbuf: the kernel buffer 17086db1e29SJens Axboe * @len: length of user data 17186db1e29SJens Axboe * @gfp_mask: memory allocation flags 17268154e90SFUJITA Tomonori * 17368154e90SFUJITA Tomonori * Description: 17468154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 175e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 1763a5a3927SJames Bottomley * buffers. 17786db1e29SJens Axboe */ 17886db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 17986db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 18086db1e29SJens Axboe { 18168154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 18214417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 18368154e90SFUJITA Tomonori int do_copy = 0; 18486db1e29SJens Axboe struct bio *bio; 1853a5a3927SJames Bottomley int ret; 18686db1e29SJens Axboe 187ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 18886db1e29SJens Axboe return -EINVAL; 18986db1e29SJens Axboe if (!len || !kbuf) 19086db1e29SJens Axboe return -EINVAL; 19186db1e29SJens Axboe 19214417799SNamhyung Kim do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 19368154e90SFUJITA Tomonori if (do_copy) 19468154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 19568154e90SFUJITA Tomonori else 19686db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 19768154e90SFUJITA Tomonori 19886db1e29SJens Axboe if (IS_ERR(bio)) 19986db1e29SJens Axboe return PTR_ERR(bio); 20086db1e29SJens Axboe 201609f6ea1Smajianpeng if (!reading) 202a45dc2d2SBenny Halevy bio->bi_rw |= REQ_WRITE; 20386db1e29SJens Axboe 20468154e90SFUJITA Tomonori if (do_copy) 20568154e90SFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 20668154e90SFUJITA Tomonori 2073a5a3927SJames Bottomley ret = blk_rq_append_bio(q, rq, bio); 2083a5a3927SJames Bottomley if (unlikely(ret)) { 2093a5a3927SJames Bottomley /* request is too big */ 2103a5a3927SJames Bottomley bio_put(bio); 2113a5a3927SJames Bottomley return ret; 2123a5a3927SJames Bottomley } 2133a5a3927SJames Bottomley 21486db1e29SJens Axboe blk_queue_bounce(q, &rq->bio); 21586db1e29SJens Axboe return 0; 21686db1e29SJens Axboe } 21786db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 218