186db1e29SJens Axboe /* 286db1e29SJens Axboe * Functions related to mapping data to requests 386db1e29SJens Axboe */ 486db1e29SJens Axboe #include <linux/kernel.h> 586db1e29SJens Axboe #include <linux/module.h> 686db1e29SJens Axboe #include <linux/bio.h> 786db1e29SJens Axboe #include <linux/blkdev.h> 826e49cfcSKent Overstreet #include <linux/uio.h> 986db1e29SJens Axboe 1086db1e29SJens Axboe #include "blk.h" 1186db1e29SJens Axboe 1246348456SSagi Grimberg static bool iovec_gap_to_prv(struct request_queue *q, 1346348456SSagi Grimberg struct iovec *prv, struct iovec *cur) 1446348456SSagi Grimberg { 1546348456SSagi Grimberg unsigned long prev_end; 1646348456SSagi Grimberg 1746348456SSagi Grimberg if (!queue_virt_boundary(q)) 1846348456SSagi Grimberg return false; 1946348456SSagi Grimberg 2046348456SSagi Grimberg if (prv->iov_base == NULL && prv->iov_len == 0) 2146348456SSagi Grimberg /* prv is not set - don't check */ 2246348456SSagi Grimberg return false; 2346348456SSagi Grimberg 2446348456SSagi Grimberg prev_end = (unsigned long)(prv->iov_base + prv->iov_len); 2546348456SSagi Grimberg 2646348456SSagi Grimberg return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || 2746348456SSagi Grimberg prev_end & queue_virt_boundary(q)); 2846348456SSagi Grimberg } 2946348456SSagi Grimberg 3086db1e29SJens Axboe int blk_rq_append_bio(struct request_queue *q, struct request *rq, 3186db1e29SJens Axboe struct bio *bio) 3286db1e29SJens Axboe { 3386db1e29SJens Axboe if (!rq->bio) 3486db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 3586db1e29SJens Axboe else if (!ll_back_merge_fn(q, rq, bio)) 3686db1e29SJens Axboe return -EINVAL; 3786db1e29SJens Axboe else { 3886db1e29SJens Axboe rq->biotail->bi_next = bio; 3986db1e29SJens Axboe rq->biotail = bio; 4086db1e29SJens Axboe 414f024f37SKent Overstreet rq->__data_len += bio->bi_iter.bi_size; 4286db1e29SJens Axboe } 4386db1e29SJens Axboe return 0; 4486db1e29SJens Axboe } 4586db1e29SJens Axboe 4686db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 4786db1e29SJens Axboe { 4886db1e29SJens Axboe int ret = 0; 4986db1e29SJens Axboe 5086db1e29SJens Axboe if (bio) { 5186db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 5286db1e29SJens Axboe bio_unmap_user(bio); 5386db1e29SJens Axboe else 5486db1e29SJens Axboe ret = bio_uncopy_user(bio); 5586db1e29SJens Axboe } 5686db1e29SJens Axboe 5786db1e29SJens Axboe return ret; 5886db1e29SJens Axboe } 5986db1e29SJens Axboe 60*4d6af73dSChristoph Hellwig static int __blk_rq_map_user_iov(struct request *rq, 61*4d6af73dSChristoph Hellwig struct rq_map_data *map_data, struct iov_iter *iter, 62*4d6af73dSChristoph Hellwig gfp_t gfp_mask, bool copy) 63*4d6af73dSChristoph Hellwig { 64*4d6af73dSChristoph Hellwig struct request_queue *q = rq->q; 65*4d6af73dSChristoph Hellwig struct bio *bio, *orig_bio; 66*4d6af73dSChristoph Hellwig int ret; 67*4d6af73dSChristoph Hellwig 68*4d6af73dSChristoph Hellwig if (copy) 69*4d6af73dSChristoph Hellwig bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 70*4d6af73dSChristoph Hellwig else 71*4d6af73dSChristoph Hellwig bio = bio_map_user_iov(q, iter, gfp_mask); 72*4d6af73dSChristoph Hellwig 73*4d6af73dSChristoph Hellwig if (IS_ERR(bio)) 74*4d6af73dSChristoph Hellwig return PTR_ERR(bio); 75*4d6af73dSChristoph Hellwig 76*4d6af73dSChristoph Hellwig if (map_data && map_data->null_mapped) 77*4d6af73dSChristoph Hellwig bio_set_flag(bio, BIO_NULL_MAPPED); 78*4d6af73dSChristoph Hellwig 79*4d6af73dSChristoph Hellwig iov_iter_advance(iter, bio->bi_iter.bi_size); 80*4d6af73dSChristoph Hellwig if (map_data) 81*4d6af73dSChristoph Hellwig map_data->offset += bio->bi_iter.bi_size; 82*4d6af73dSChristoph Hellwig 83*4d6af73dSChristoph Hellwig orig_bio = bio; 84*4d6af73dSChristoph Hellwig blk_queue_bounce(q, &bio); 85*4d6af73dSChristoph Hellwig 86*4d6af73dSChristoph Hellwig /* 87*4d6af73dSChristoph Hellwig * We link the bounce buffer in and could have to traverse it 88*4d6af73dSChristoph Hellwig * later so we have to get a ref to prevent it from being freed 89*4d6af73dSChristoph Hellwig */ 90*4d6af73dSChristoph Hellwig bio_get(bio); 91*4d6af73dSChristoph Hellwig 92*4d6af73dSChristoph Hellwig ret = blk_rq_append_bio(q, rq, bio); 93*4d6af73dSChristoph Hellwig if (ret) { 94*4d6af73dSChristoph Hellwig bio_endio(bio); 95*4d6af73dSChristoph Hellwig __blk_rq_unmap_user(orig_bio); 96*4d6af73dSChristoph Hellwig bio_put(bio); 97*4d6af73dSChristoph Hellwig return ret; 98*4d6af73dSChristoph Hellwig } 99*4d6af73dSChristoph Hellwig 100*4d6af73dSChristoph Hellwig return 0; 101*4d6af73dSChristoph Hellwig } 102*4d6af73dSChristoph Hellwig 10386db1e29SJens Axboe /** 104710027a4SRandy Dunlap * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 10586db1e29SJens Axboe * @q: request queue where request should be inserted 10686db1e29SJens Axboe * @rq: request to map data to 107152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 10826e49cfcSKent Overstreet * @iter: iovec iterator 109a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 11086db1e29SJens Axboe * 11186db1e29SJens Axboe * Description: 112710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 11386db1e29SJens Axboe * a kernel bounce buffer is used. 11486db1e29SJens Axboe * 115710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 11686db1e29SJens Axboe * still in process context. 11786db1e29SJens Axboe * 11886db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 11986db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 12086db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 12186db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 12286db1e29SJens Axboe * unmapping. 12386db1e29SJens Axboe */ 12486db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 12526e49cfcSKent Overstreet struct rq_map_data *map_data, 12626e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask) 12786db1e29SJens Axboe { 12846348456SSagi Grimberg struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; 129*4d6af73dSChristoph Hellwig bool copy = (q->dma_pad_mask & iter->count) || map_data; 130*4d6af73dSChristoph Hellwig struct bio *bio = NULL; 131*4d6af73dSChristoph Hellwig struct iov_iter i; 132*4d6af73dSChristoph Hellwig int ret; 13386db1e29SJens Axboe 13426e49cfcSKent Overstreet if (!iter || !iter->count) 13586db1e29SJens Axboe return -EINVAL; 13686db1e29SJens Axboe 13726e49cfcSKent Overstreet iov_for_each(iov, i, *iter) { 13826e49cfcSKent Overstreet unsigned long uaddr = (unsigned long) iov.iov_base; 139afdc1a78SFUJITA Tomonori 14026e49cfcSKent Overstreet if (!iov.iov_len) 14154787556SXiaotian Feng return -EINVAL; 14254787556SXiaotian Feng 1436b76106dSBen Hutchings /* 1446b76106dSBen Hutchings * Keep going so we check length of all segments 1456b76106dSBen Hutchings */ 14646348456SSagi Grimberg if ((uaddr & queue_dma_alignment(q)) || 14746348456SSagi Grimberg iovec_gap_to_prv(q, &prv, &iov)) 148*4d6af73dSChristoph Hellwig copy = true; 14946348456SSagi Grimberg 15046348456SSagi Grimberg prv.iov_base = iov.iov_base; 15146348456SSagi Grimberg prv.iov_len = iov.iov_len; 152afdc1a78SFUJITA Tomonori } 153afdc1a78SFUJITA Tomonori 154*4d6af73dSChristoph Hellwig i = *iter; 155*4d6af73dSChristoph Hellwig do { 156*4d6af73dSChristoph Hellwig ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); 157*4d6af73dSChristoph Hellwig if (ret) 158*4d6af73dSChristoph Hellwig goto unmap_rq; 159*4d6af73dSChristoph Hellwig if (!bio) 160*4d6af73dSChristoph Hellwig bio = rq->bio; 161*4d6af73dSChristoph Hellwig } while (iov_iter_count(&i)); 16286db1e29SJens Axboe 163f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 164f18573abSFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 16586db1e29SJens Axboe return 0; 166*4d6af73dSChristoph Hellwig 167*4d6af73dSChristoph Hellwig unmap_rq: 168*4d6af73dSChristoph Hellwig __blk_rq_unmap_user(bio); 169*4d6af73dSChristoph Hellwig rq->bio = NULL; 170*4d6af73dSChristoph Hellwig return -EINVAL; 17186db1e29SJens Axboe } 172152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 17386db1e29SJens Axboe 174ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq, 175ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf, 176ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask) 177ddad8dd0SChristoph Hellwig { 17826e49cfcSKent Overstreet struct iovec iov; 17926e49cfcSKent Overstreet struct iov_iter i; 1808f7e885aSAl Viro int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 181ddad8dd0SChristoph Hellwig 1828f7e885aSAl Viro if (unlikely(ret < 0)) 1838f7e885aSAl Viro return ret; 184ddad8dd0SChristoph Hellwig 18526e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 186ddad8dd0SChristoph Hellwig } 187ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user); 188ddad8dd0SChristoph Hellwig 18986db1e29SJens Axboe /** 19086db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 19186db1e29SJens Axboe * @bio: start of bio list 19286db1e29SJens Axboe * 19386db1e29SJens Axboe * Description: 19486db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 19586db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 196710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 19786db1e29SJens Axboe */ 19886db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 19986db1e29SJens Axboe { 20086db1e29SJens Axboe struct bio *mapped_bio; 20186db1e29SJens Axboe int ret = 0, ret2; 20286db1e29SJens Axboe 20386db1e29SJens Axboe while (bio) { 20486db1e29SJens Axboe mapped_bio = bio; 20586db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 20686db1e29SJens Axboe mapped_bio = bio->bi_private; 20786db1e29SJens Axboe 20886db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 20986db1e29SJens Axboe if (ret2 && !ret) 21086db1e29SJens Axboe ret = ret2; 21186db1e29SJens Axboe 21286db1e29SJens Axboe mapped_bio = bio; 21386db1e29SJens Axboe bio = bio->bi_next; 21486db1e29SJens Axboe bio_put(mapped_bio); 21586db1e29SJens Axboe } 21686db1e29SJens Axboe 21786db1e29SJens Axboe return ret; 21886db1e29SJens Axboe } 21986db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 22086db1e29SJens Axboe 22186db1e29SJens Axboe /** 222710027a4SRandy Dunlap * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 22386db1e29SJens Axboe * @q: request queue where request should be inserted 22486db1e29SJens Axboe * @rq: request to fill 22586db1e29SJens Axboe * @kbuf: the kernel buffer 22686db1e29SJens Axboe * @len: length of user data 22786db1e29SJens Axboe * @gfp_mask: memory allocation flags 22868154e90SFUJITA Tomonori * 22968154e90SFUJITA Tomonori * Description: 23068154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 231e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple 2323a5a3927SJames Bottomley * buffers. 23386db1e29SJens Axboe */ 23486db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 23586db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 23686db1e29SJens Axboe { 23768154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 23814417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 23968154e90SFUJITA Tomonori int do_copy = 0; 24086db1e29SJens Axboe struct bio *bio; 2413a5a3927SJames Bottomley int ret; 24286db1e29SJens Axboe 243ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 24486db1e29SJens Axboe return -EINVAL; 24586db1e29SJens Axboe if (!len || !kbuf) 24686db1e29SJens Axboe return -EINVAL; 24786db1e29SJens Axboe 24814417799SNamhyung Kim do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 24968154e90SFUJITA Tomonori if (do_copy) 25068154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 25168154e90SFUJITA Tomonori else 25286db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 25368154e90SFUJITA Tomonori 25486db1e29SJens Axboe if (IS_ERR(bio)) 25586db1e29SJens Axboe return PTR_ERR(bio); 25686db1e29SJens Axboe 257609f6ea1Smajianpeng if (!reading) 258a45dc2d2SBenny Halevy bio->bi_rw |= REQ_WRITE; 25986db1e29SJens Axboe 26068154e90SFUJITA Tomonori if (do_copy) 26168154e90SFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 26268154e90SFUJITA Tomonori 2633a5a3927SJames Bottomley ret = blk_rq_append_bio(q, rq, bio); 2643a5a3927SJames Bottomley if (unlikely(ret)) { 2653a5a3927SJames Bottomley /* request is too big */ 2663a5a3927SJames Bottomley bio_put(bio); 2673a5a3927SJames Bottomley return ret; 2683a5a3927SJames Bottomley } 2693a5a3927SJames Bottomley 27086db1e29SJens Axboe blk_queue_bounce(q, &rq->bio); 27186db1e29SJens Axboe return 0; 27286db1e29SJens Axboe } 27386db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 274