186db1e29SJens Axboe /* 286db1e29SJens Axboe * Functions related to mapping data to requests 386db1e29SJens Axboe */ 486db1e29SJens Axboe #include <linux/kernel.h> 586db1e29SJens Axboe #include <linux/module.h> 686db1e29SJens Axboe #include <linux/bio.h> 786db1e29SJens Axboe #include <linux/blkdev.h> 826e49cfcSKent Overstreet #include <linux/uio.h> 986db1e29SJens Axboe 1086db1e29SJens Axboe #include "blk.h" 1186db1e29SJens Axboe 1286db1e29SJens Axboe int blk_rq_append_bio(struct request_queue *q, struct request *rq, 1386db1e29SJens Axboe struct bio *bio) 1486db1e29SJens Axboe { 1586db1e29SJens Axboe if (!rq->bio) 1686db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 1786db1e29SJens Axboe else if (!ll_back_merge_fn(q, rq, bio)) 1886db1e29SJens Axboe return -EINVAL; 1986db1e29SJens Axboe else { 2086db1e29SJens Axboe rq->biotail->bi_next = bio; 2186db1e29SJens Axboe rq->biotail = bio; 2286db1e29SJens Axboe 234f024f37SKent Overstreet rq->__data_len += bio->bi_iter.bi_size; 2486db1e29SJens Axboe } 2586db1e29SJens Axboe return 0; 2686db1e29SJens Axboe } 2786db1e29SJens Axboe 2886db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 2986db1e29SJens Axboe { 3086db1e29SJens Axboe int ret = 0; 3186db1e29SJens Axboe 3286db1e29SJens Axboe if (bio) { 3386db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 3486db1e29SJens Axboe bio_unmap_user(bio); 3586db1e29SJens Axboe else 3686db1e29SJens Axboe ret = bio_uncopy_user(bio); 3786db1e29SJens Axboe } 3886db1e29SJens Axboe 3986db1e29SJens Axboe return ret; 4086db1e29SJens Axboe } 4186db1e29SJens Axboe 424d6af73dSChristoph Hellwig static int __blk_rq_map_user_iov(struct request *rq, 434d6af73dSChristoph Hellwig struct rq_map_data *map_data, struct iov_iter *iter, 444d6af73dSChristoph Hellwig gfp_t gfp_mask, bool copy) 454d6af73dSChristoph Hellwig { 464d6af73dSChristoph Hellwig struct request_queue *q = rq->q; 474d6af73dSChristoph Hellwig struct bio *bio, *orig_bio; 484d6af73dSChristoph Hellwig int ret; 494d6af73dSChristoph Hellwig 504d6af73dSChristoph Hellwig if (copy) 514d6af73dSChristoph Hellwig bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 524d6af73dSChristoph Hellwig else 534d6af73dSChristoph Hellwig bio = bio_map_user_iov(q, iter, gfp_mask); 544d6af73dSChristoph Hellwig 554d6af73dSChristoph Hellwig if (IS_ERR(bio)) 564d6af73dSChristoph Hellwig return PTR_ERR(bio); 574d6af73dSChristoph Hellwig 584d6af73dSChristoph Hellwig if (map_data && map_data->null_mapped) 594d6af73dSChristoph Hellwig bio_set_flag(bio, BIO_NULL_MAPPED); 604d6af73dSChristoph Hellwig 614d6af73dSChristoph Hellwig iov_iter_advance(iter, bio->bi_iter.bi_size); 624d6af73dSChristoph Hellwig if (map_data) 634d6af73dSChristoph Hellwig map_data->offset += bio->bi_iter.bi_size; 644d6af73dSChristoph Hellwig 654d6af73dSChristoph Hellwig orig_bio = bio; 664d6af73dSChristoph Hellwig blk_queue_bounce(q, &bio); 674d6af73dSChristoph Hellwig 684d6af73dSChristoph Hellwig /* 694d6af73dSChristoph Hellwig * We link the bounce buffer in and could have to traverse it 704d6af73dSChristoph Hellwig * later so we have to get a ref to prevent it from being freed 714d6af73dSChristoph Hellwig */ 724d6af73dSChristoph Hellwig bio_get(bio); 734d6af73dSChristoph Hellwig 744d6af73dSChristoph Hellwig ret = blk_rq_append_bio(q, rq, bio); 754d6af73dSChristoph Hellwig if (ret) { 764d6af73dSChristoph Hellwig bio_endio(bio); 774d6af73dSChristoph Hellwig __blk_rq_unmap_user(orig_bio); 784d6af73dSChristoph Hellwig bio_put(bio); 794d6af73dSChristoph Hellwig return ret; 804d6af73dSChristoph Hellwig } 814d6af73dSChristoph Hellwig 824d6af73dSChristoph Hellwig return 0; 834d6af73dSChristoph Hellwig } 844d6af73dSChristoph Hellwig 8586db1e29SJens Axboe /** 86710027a4SRandy Dunlap * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 8786db1e29SJens Axboe * @q: request queue where request should be inserted 8886db1e29SJens Axboe * @rq: request to map data to 89152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 9026e49cfcSKent Overstreet * @iter: iovec iterator 91a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 9286db1e29SJens Axboe * 9386db1e29SJens Axboe * Description: 94710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 9586db1e29SJens Axboe * a kernel bounce buffer is used. 9686db1e29SJens Axboe * 97710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 9886db1e29SJens Axboe * still in process context. 9986db1e29SJens Axboe * 10086db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 10186db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 10286db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 10386db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 10486db1e29SJens Axboe * unmapping. 10586db1e29SJens Axboe */ 10686db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 10726e49cfcSKent Overstreet struct rq_map_data *map_data, 10826e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask) 10986db1e29SJens Axboe { 110*357f435dSAl Viro bool copy = false; 111*357f435dSAl Viro unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 1124d6af73dSChristoph Hellwig struct bio *bio = NULL; 1134d6af73dSChristoph Hellwig struct iov_iter i; 1144d6af73dSChristoph Hellwig int ret; 11586db1e29SJens Axboe 116*357f435dSAl Viro if (map_data) 1174d6af73dSChristoph Hellwig copy = true; 118*357f435dSAl Viro else if (iov_iter_alignment(iter) & align) 119*357f435dSAl Viro copy = true; 120*357f435dSAl Viro else if (queue_virt_boundary(q)) 121*357f435dSAl Viro copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 122afdc1a78SFUJITA Tomonori 1234d6af73dSChristoph Hellwig i = *iter; 1244d6af73dSChristoph Hellwig do { 1254d6af73dSChristoph Hellwig ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); 1264d6af73dSChristoph Hellwig if (ret) 1274d6af73dSChristoph Hellwig goto unmap_rq; 1284d6af73dSChristoph Hellwig if (!bio) 1294d6af73dSChristoph Hellwig bio = rq->bio; 1304d6af73dSChristoph Hellwig } while (iov_iter_count(&i)); 13186db1e29SJens Axboe 132f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 133f18573abSFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 13486db1e29SJens Axboe return 0; 1354d6af73dSChristoph Hellwig 1364d6af73dSChristoph Hellwig unmap_rq: 1374d6af73dSChristoph Hellwig __blk_rq_unmap_user(bio); 1384d6af73dSChristoph Hellwig rq->bio = NULL; 1394d6af73dSChristoph Hellwig return -EINVAL; 14086db1e29SJens Axboe } 141152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 14286db1e29SJens Axboe 143ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 144ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 145ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 146ddad8dd0SChristoph Hellwig { 14726e49cfcSKent Overstreet struct iovec iov; 14826e49cfcSKent Overstreet struct iov_iter i; 1498f7e885aSAl Viro int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 150ddad8dd0SChristoph Hellwig 1518f7e885aSAl Viro if (unlikely(ret < 0)) 1528f7e885aSAl Viro return ret; 153ddad8dd0SChristoph Hellwig 15426e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 155ddad8dd0SChristoph Hellwig } 156ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 157ddad8dd0SChristoph Hellwig 15886db1e29SJens Axboe /** 15986db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 16086db1e29SJens Axboe * @bio: start of bio list 16186db1e29SJens Axboe * 16286db1e29SJens Axboe * Description: 16386db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 16486db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 165710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 16686db1e29SJens Axboe */ 16786db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 16886db1e29SJens Axboe { 16986db1e29SJens Axboe struct bio *mapped_bio; 17086db1e29SJens Axboe int ret = 0, ret2; 17186db1e29SJens Axboe 17286db1e29SJens Axboe while (bio) { 17386db1e29SJens Axboe mapped_bio = bio; 17486db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 17586db1e29SJens Axboe mapped_bio = bio->bi_private; 17686db1e29SJens Axboe 17786db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 17886db1e29SJens Axboe if (ret2 && !ret) 17986db1e29SJens Axboe ret = ret2; 18086db1e29SJens Axboe 18186db1e29SJens Axboe mapped_bio = bio; 18286db1e29SJens Axboe bio = bio->bi_next; 18386db1e29SJens Axboe bio_put(mapped_bio); 18486db1e29SJens Axboe } 18586db1e29SJens Axboe 18686db1e29SJens Axboe return ret; 18786db1e29SJens Axboe } 18886db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 18986db1e29SJens Axboe 19086db1e29SJens Axboe /** 191710027a4SRandy Dunlap * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 19286db1e29SJens Axboe * @q: request queue where request should be inserted 19386db1e29SJens Axboe * @rq: request to fill 19486db1e29SJens Axboe * @kbuf: the kernel buffer 19586db1e29SJens Axboe * @len: length of user data 19686db1e29SJens Axboe * @gfp_mask: memory allocation flags 19768154e90SFUJITA Tomonori * 19868154e90SFUJITA Tomonori * Description: 19968154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 200e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 2013a5a3927SJames Bottomley * buffers. 20286db1e29SJens Axboe */ 20386db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 20486db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 20586db1e29SJens Axboe { 20668154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 20714417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 20868154e90SFUJITA Tomonori int do_copy = 0; 20986db1e29SJens Axboe struct bio *bio; 2103a5a3927SJames Bottomley int ret; 21186db1e29SJens Axboe 212ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 21386db1e29SJens Axboe return -EINVAL; 21486db1e29SJens Axboe if (!len || !kbuf) 21586db1e29SJens Axboe return -EINVAL; 21686db1e29SJens Axboe 21714417799SNamhyung Kim do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 21868154e90SFUJITA Tomonori if (do_copy) 21968154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 22068154e90SFUJITA Tomonori else 22186db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 22268154e90SFUJITA Tomonori 22386db1e29SJens Axboe if (IS_ERR(bio)) 22486db1e29SJens Axboe return PTR_ERR(bio); 22586db1e29SJens Axboe 226609f6ea1Smajianpeng if (!reading) 227a45dc2d2SBenny Halevy bio->bi_rw |= REQ_WRITE; 22886db1e29SJens Axboe 22968154e90SFUJITA Tomonori if (do_copy) 23068154e90SFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 23168154e90SFUJITA Tomonori 2323a5a3927SJames Bottomley ret = blk_rq_append_bio(q, rq, bio); 2333a5a3927SJames Bottomley if (unlikely(ret)) { 2343a5a3927SJames Bottomley /* request is too big */ 2353a5a3927SJames Bottomley bio_put(bio); 2363a5a3927SJames Bottomley return ret; 2373a5a3927SJames Bottomley } 2383a5a3927SJames Bottomley 23986db1e29SJens Axboe blk_queue_bounce(q, &rq->bio); 24086db1e29SJens Axboe return 0; 24186db1e29SJens Axboe } 24286db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 243