186db1e29SJens Axboe /* 286db1e29SJens Axboe * Functions related to mapping data to requests 386db1e29SJens Axboe */ 486db1e29SJens Axboe #include <linux/kernel.h> 586db1e29SJens Axboe #include <linux/module.h> 686db1e29SJens Axboe #include <linux/bio.h> 786db1e29SJens Axboe #include <linux/blkdev.h> 8afdc1a78SFUJITA Tomonori #include <scsi/sg.h> /* for struct sg_iovec */ 986db1e29SJens Axboe 1086db1e29SJens Axboe #include "blk.h" 1186db1e29SJens Axboe 1286db1e29SJens Axboe int blk_rq_append_bio(struct request_queue *q, struct request *rq, 1386db1e29SJens Axboe struct bio *bio) 1486db1e29SJens Axboe { 1586db1e29SJens Axboe if (!rq->bio) 1686db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 1786db1e29SJens Axboe else if (!ll_back_merge_fn(q, rq, bio)) 1886db1e29SJens Axboe return -EINVAL; 1986db1e29SJens Axboe else { 2086db1e29SJens Axboe rq->biotail->bi_next = bio; 2186db1e29SJens Axboe rq->biotail = bio; 2286db1e29SJens Axboe 23a2dec7b3STejun Heo rq->__data_len += bio->bi_size; 2486db1e29SJens Axboe } 2586db1e29SJens Axboe return 0; 2686db1e29SJens Axboe } 2786db1e29SJens Axboe 2886db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 2986db1e29SJens Axboe { 3086db1e29SJens Axboe int ret = 0; 3186db1e29SJens Axboe 3286db1e29SJens Axboe if (bio) { 3386db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 3486db1e29SJens Axboe bio_unmap_user(bio); 3586db1e29SJens Axboe else 3686db1e29SJens Axboe ret = bio_uncopy_user(bio); 3786db1e29SJens Axboe } 3886db1e29SJens Axboe 3986db1e29SJens Axboe return ret; 4086db1e29SJens Axboe } 4186db1e29SJens Axboe 4286db1e29SJens Axboe static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43152e283fSFUJITA Tomonori struct rq_map_data *map_data, void __user *ubuf, 4497ae77a1SFUJITA Tomonori unsigned int len, gfp_t gfp_mask) 4586db1e29SJens Axboe { 4686db1e29SJens Axboe unsigned long uaddr; 4786db1e29SJens Axboe struct bio *bio, *orig_bio; 4886db1e29SJens Axboe int reading, ret; 4986db1e29SJens Axboe 5086db1e29SJens Axboe reading = rq_data_dir(rq) == READ; 5186db1e29SJens Axboe 5286db1e29SJens Axboe /* 5386db1e29SJens Axboe * if alignment requirement is satisfied, map in user pages for 5486db1e29SJens Axboe * direct dma. else, set up kernel bounce buffers 5586db1e29SJens Axboe */ 5686db1e29SJens Axboe uaddr = (unsigned long) ubuf; 5714417799SNamhyung Kim if (blk_rq_aligned(q, uaddr, len) && !map_data) 58a3bce90eSFUJITA Tomonori bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); 5986db1e29SJens Axboe else 60152e283fSFUJITA Tomonori bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); 6186db1e29SJens Axboe 6286db1e29SJens Axboe if (IS_ERR(bio)) 6386db1e29SJens Axboe return PTR_ERR(bio); 6486db1e29SJens Axboe 6597ae77a1SFUJITA Tomonori if (map_data && map_data->null_mapped) 6681882766SFUJITA Tomonori bio->bi_flags |= (1 << BIO_NULL_MAPPED); 6781882766SFUJITA Tomonori 6886db1e29SJens Axboe orig_bio = bio; 6986db1e29SJens Axboe blk_queue_bounce(q, &bio); 7086db1e29SJens Axboe 7186db1e29SJens Axboe /* 7286db1e29SJens Axboe * We link the bounce buffer in and could have to traverse it 7386db1e29SJens Axboe * later so we have to get a ref to prevent it from being freed 7486db1e29SJens Axboe */ 7586db1e29SJens Axboe bio_get(bio); 7686db1e29SJens Axboe 7786db1e29SJens Axboe ret = blk_rq_append_bio(q, rq, bio); 7886db1e29SJens Axboe if (!ret) 7986db1e29SJens Axboe return bio->bi_size; 8086db1e29SJens Axboe 8186db1e29SJens Axboe /* if it was boucned we must call the end io function */ 8286db1e29SJens Axboe bio_endio(bio, 0); 8386db1e29SJens Axboe __blk_rq_unmap_user(orig_bio); 8486db1e29SJens Axboe bio_put(bio); 8586db1e29SJens Axboe return ret; 8686db1e29SJens Axboe } 8786db1e29SJens Axboe 8886db1e29SJens Axboe /** 89710027a4SRandy Dunlap * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage 9086db1e29SJens Axboe * @q: request queue where request should be inserted 9186db1e29SJens Axboe * @rq: request structure to fill 92152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 9386db1e29SJens Axboe * @ubuf: the user buffer 9486db1e29SJens Axboe * @len: length of user data 95a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 9686db1e29SJens Axboe * 9786db1e29SJens Axboe * Description: 98710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 9986db1e29SJens Axboe * a kernel bounce buffer is used. 10086db1e29SJens Axboe * 101710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 10286db1e29SJens Axboe * still in process context. 10386db1e29SJens Axboe * 10486db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 10586db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 10686db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 10786db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 10886db1e29SJens Axboe * unmapping. 10986db1e29SJens Axboe */ 11086db1e29SJens Axboe int blk_rq_map_user(struct request_queue *q, struct request *rq, 111152e283fSFUJITA Tomonori struct rq_map_data *map_data, void __user *ubuf, 112152e283fSFUJITA Tomonori unsigned long len, gfp_t gfp_mask) 11386db1e29SJens Axboe { 11486db1e29SJens Axboe unsigned long bytes_read = 0; 11586db1e29SJens Axboe struct bio *bio = NULL; 11697ae77a1SFUJITA Tomonori int ret; 11786db1e29SJens Axboe 118ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 11986db1e29SJens Axboe return -EINVAL; 12081882766SFUJITA Tomonori if (!len) 12186db1e29SJens Axboe return -EINVAL; 12297ae77a1SFUJITA Tomonori 12397ae77a1SFUJITA Tomonori if (!ubuf && (!map_data || !map_data->null_mapped)) 12481882766SFUJITA Tomonori return -EINVAL; 12586db1e29SJens Axboe 12686db1e29SJens Axboe while (bytes_read != len) { 12786db1e29SJens Axboe unsigned long map_len, end, start; 12886db1e29SJens Axboe 12986db1e29SJens Axboe map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); 13086db1e29SJens Axboe end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) 13186db1e29SJens Axboe >> PAGE_SHIFT; 13286db1e29SJens Axboe start = (unsigned long)ubuf >> PAGE_SHIFT; 13386db1e29SJens Axboe 13486db1e29SJens Axboe /* 13586db1e29SJens Axboe * A bad offset could cause us to require BIO_MAX_PAGES + 1 13686db1e29SJens Axboe * pages. If this happens we just lower the requested 13786db1e29SJens Axboe * mapping len by a page so that we can fit 13886db1e29SJens Axboe */ 13986db1e29SJens Axboe if (end - start > BIO_MAX_PAGES) 14086db1e29SJens Axboe map_len -= PAGE_SIZE; 14186db1e29SJens Axboe 142152e283fSFUJITA Tomonori ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, 14397ae77a1SFUJITA Tomonori gfp_mask); 14486db1e29SJens Axboe if (ret < 0) 14586db1e29SJens Axboe goto unmap_rq; 14686db1e29SJens Axboe if (!bio) 14786db1e29SJens Axboe bio = rq->bio; 14886db1e29SJens Axboe bytes_read += ret; 14986db1e29SJens Axboe ubuf += ret; 15056c451f4SFUJITA Tomonori 15156c451f4SFUJITA Tomonori if (map_data) 15256c451f4SFUJITA Tomonori map_data->offset += ret; 15386db1e29SJens Axboe } 15486db1e29SJens Axboe 155f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 156f18573abSFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 15740b01b9bSTejun Heo 158731ec497STejun Heo rq->buffer = NULL; 15986db1e29SJens Axboe return 0; 16086db1e29SJens Axboe unmap_rq: 16186db1e29SJens Axboe blk_rq_unmap_user(bio); 16284e9e03cSJens Axboe rq->bio = NULL; 16386db1e29SJens Axboe return ret; 16486db1e29SJens Axboe } 16586db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_user); 16686db1e29SJens Axboe 16786db1e29SJens Axboe /** 168710027a4SRandy Dunlap * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 16986db1e29SJens Axboe * @q: request queue where request should be inserted 17086db1e29SJens Axboe * @rq: request to map data to 171152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary) 17286db1e29SJens Axboe * @iov: pointer to the iovec 17386db1e29SJens Axboe * @iov_count: number of elements in the iovec 17486db1e29SJens Axboe * @len: I/O byte count 175a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags 17686db1e29SJens Axboe * 17786db1e29SJens Axboe * Description: 178710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise 17986db1e29SJens Axboe * a kernel bounce buffer is used. 18086db1e29SJens Axboe * 181710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 18286db1e29SJens Axboe * still in process context. 18386db1e29SJens Axboe * 18486db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 18586db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 18686db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 18786db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 18886db1e29SJens Axboe * unmapping. 18986db1e29SJens Axboe */ 19086db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 191152e283fSFUJITA Tomonori struct rq_map_data *map_data, struct sg_iovec *iov, 192152e283fSFUJITA Tomonori int iov_count, unsigned int len, gfp_t gfp_mask) 19386db1e29SJens Axboe { 19486db1e29SJens Axboe struct bio *bio; 195afdc1a78SFUJITA Tomonori int i, read = rq_data_dir(rq) == READ; 196afdc1a78SFUJITA Tomonori int unaligned = 0; 19786db1e29SJens Axboe 19886db1e29SJens Axboe if (!iov || iov_count <= 0) 19986db1e29SJens Axboe return -EINVAL; 20086db1e29SJens Axboe 201afdc1a78SFUJITA Tomonori for (i = 0; i < iov_count; i++) { 202afdc1a78SFUJITA Tomonori unsigned long uaddr = (unsigned long)iov[i].iov_base; 203afdc1a78SFUJITA Tomonori 20454787556SXiaotian Feng if (!iov[i].iov_len) 20554787556SXiaotian Feng return -EINVAL; 20654787556SXiaotian Feng 2076b76106dSBen Hutchings /* 2086b76106dSBen Hutchings * Keep going so we check length of all segments 2096b76106dSBen Hutchings */ 2106b76106dSBen Hutchings if (uaddr & queue_dma_alignment(q)) 211afdc1a78SFUJITA Tomonori unaligned = 1; 212afdc1a78SFUJITA Tomonori } 213afdc1a78SFUJITA Tomonori 214152e283fSFUJITA Tomonori if (unaligned || (q->dma_pad_mask & len) || map_data) 215152e283fSFUJITA Tomonori bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, 216152e283fSFUJITA Tomonori gfp_mask); 217afdc1a78SFUJITA Tomonori else 218a3bce90eSFUJITA Tomonori bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); 219afdc1a78SFUJITA Tomonori 22086db1e29SJens Axboe if (IS_ERR(bio)) 22186db1e29SJens Axboe return PTR_ERR(bio); 22286db1e29SJens Axboe 22386db1e29SJens Axboe if (bio->bi_size != len) { 224c26156b2SJens Axboe /* 225c26156b2SJens Axboe * Grab an extra reference to this bio, as bio_unmap_user() 226c26156b2SJens Axboe * expects to be able to drop it twice as it happens on the 227c26156b2SJens Axboe * normal IO completion path 228c26156b2SJens Axboe */ 229c26156b2SJens Axboe bio_get(bio); 23086db1e29SJens Axboe bio_endio(bio, 0); 23153cc0b29SPetr Vandrovec __blk_rq_unmap_user(bio); 23286db1e29SJens Axboe return -EINVAL; 23386db1e29SJens Axboe } 23486db1e29SJens Axboe 235f18573abSFUJITA Tomonori if (!bio_flagged(bio, BIO_USER_MAPPED)) 236f18573abSFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 237f18573abSFUJITA Tomonori 23807359fc6SFUJITA Tomonori blk_queue_bounce(q, &bio); 23986db1e29SJens Axboe bio_get(bio); 24086db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 241731ec497STejun Heo rq->buffer = NULL; 24286db1e29SJens Axboe return 0; 24386db1e29SJens Axboe } 244152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov); 24586db1e29SJens Axboe 24686db1e29SJens Axboe /** 24786db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 24886db1e29SJens Axboe * @bio: start of bio list 24986db1e29SJens Axboe * 25086db1e29SJens Axboe * Description: 25186db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 25286db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 253710027a4SRandy Dunlap * the I/O completion may have changed rq->bio. 25486db1e29SJens Axboe */ 25586db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 25686db1e29SJens Axboe { 25786db1e29SJens Axboe struct bio *mapped_bio; 25886db1e29SJens Axboe int ret = 0, ret2; 25986db1e29SJens Axboe 26086db1e29SJens Axboe while (bio) { 26186db1e29SJens Axboe mapped_bio = bio; 26286db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 26386db1e29SJens Axboe mapped_bio = bio->bi_private; 26486db1e29SJens Axboe 26586db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 26686db1e29SJens Axboe if (ret2 && !ret) 26786db1e29SJens Axboe ret = ret2; 26886db1e29SJens Axboe 26986db1e29SJens Axboe mapped_bio = bio; 27086db1e29SJens Axboe bio = bio->bi_next; 27186db1e29SJens Axboe bio_put(mapped_bio); 27286db1e29SJens Axboe } 27386db1e29SJens Axboe 27486db1e29SJens Axboe return ret; 27586db1e29SJens Axboe } 27686db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 27786db1e29SJens Axboe 27886db1e29SJens Axboe /** 279710027a4SRandy Dunlap * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 28086db1e29SJens Axboe * @q: request queue where request should be inserted 28186db1e29SJens Axboe * @rq: request to fill 28286db1e29SJens Axboe * @kbuf: the kernel buffer 28386db1e29SJens Axboe * @len: length of user data 28486db1e29SJens Axboe * @gfp_mask: memory allocation flags 28568154e90SFUJITA Tomonori * 28668154e90SFUJITA Tomonori * Description: 28768154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce 2883a5a3927SJames Bottomley * buffer is used. Can be called multple times to append multple 2893a5a3927SJames Bottomley * buffers. 29086db1e29SJens Axboe */ 29186db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 29286db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 29386db1e29SJens Axboe { 29468154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ; 29514417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf; 29668154e90SFUJITA Tomonori int do_copy = 0; 29786db1e29SJens Axboe struct bio *bio; 2983a5a3927SJames Bottomley int ret; 29986db1e29SJens Axboe 300ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9)) 30186db1e29SJens Axboe return -EINVAL; 30286db1e29SJens Axboe if (!len || !kbuf) 30386db1e29SJens Axboe return -EINVAL; 30486db1e29SJens Axboe 30514417799SNamhyung Kim do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 30668154e90SFUJITA Tomonori if (do_copy) 30768154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 30868154e90SFUJITA Tomonori else 30986db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 31068154e90SFUJITA Tomonori 31186db1e29SJens Axboe if (IS_ERR(bio)) 31286db1e29SJens Axboe return PTR_ERR(bio); 31386db1e29SJens Axboe 314*609f6ea1Smajianpeng if (!reading) 315a45dc2d2SBenny Halevy bio->bi_rw |= REQ_WRITE; 31686db1e29SJens Axboe 31768154e90SFUJITA Tomonori if (do_copy) 31868154e90SFUJITA Tomonori rq->cmd_flags |= REQ_COPY_USER; 31968154e90SFUJITA Tomonori 3203a5a3927SJames Bottomley ret = blk_rq_append_bio(q, rq, bio); 3213a5a3927SJames Bottomley if (unlikely(ret)) { 3223a5a3927SJames Bottomley /* request is too big */ 3233a5a3927SJames Bottomley bio_put(bio); 3243a5a3927SJames Bottomley return ret; 3253a5a3927SJames Bottomley } 3263a5a3927SJames Bottomley 32786db1e29SJens Axboe blk_queue_bounce(q, &rq->bio); 328731ec497STejun Heo rq->buffer = NULL; 32986db1e29SJens Axboe return 0; 33086db1e29SJens Axboe } 33186db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 332