186db1e29SJens Axboe /* 286db1e29SJens Axboe * Functions related to mapping data to requests 386db1e29SJens Axboe */ 486db1e29SJens Axboe #include <linux/kernel.h> 586db1e29SJens Axboe #include <linux/module.h> 686db1e29SJens Axboe #include <linux/bio.h> 786db1e29SJens Axboe #include <linux/blkdev.h> 886db1e29SJens Axboe 986db1e29SJens Axboe #include "blk.h" 1086db1e29SJens Axboe 1186db1e29SJens Axboe int blk_rq_append_bio(struct request_queue *q, struct request *rq, 1286db1e29SJens Axboe struct bio *bio) 1386db1e29SJens Axboe { 1486db1e29SJens Axboe if (!rq->bio) 1586db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 1686db1e29SJens Axboe else if (!ll_back_merge_fn(q, rq, bio)) 1786db1e29SJens Axboe return -EINVAL; 1886db1e29SJens Axboe else { 1986db1e29SJens Axboe rq->biotail->bi_next = bio; 2086db1e29SJens Axboe rq->biotail = bio; 2186db1e29SJens Axboe 2286db1e29SJens Axboe rq->data_len += bio->bi_size; 2386db1e29SJens Axboe } 2486db1e29SJens Axboe return 0; 2586db1e29SJens Axboe } 2686db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_append_bio); 2786db1e29SJens Axboe 2886db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio) 2986db1e29SJens Axboe { 3086db1e29SJens Axboe int ret = 0; 3186db1e29SJens Axboe 3286db1e29SJens Axboe if (bio) { 3386db1e29SJens Axboe if (bio_flagged(bio, BIO_USER_MAPPED)) 3486db1e29SJens Axboe bio_unmap_user(bio); 3586db1e29SJens Axboe else 3686db1e29SJens Axboe ret = bio_uncopy_user(bio); 3786db1e29SJens Axboe } 3886db1e29SJens Axboe 3986db1e29SJens Axboe return ret; 4086db1e29SJens Axboe } 4186db1e29SJens Axboe 4286db1e29SJens Axboe static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 4386db1e29SJens Axboe void __user *ubuf, unsigned int len) 4486db1e29SJens Axboe { 4586db1e29SJens Axboe unsigned long uaddr; 4686db1e29SJens Axboe struct bio *bio, *orig_bio; 4786db1e29SJens Axboe int reading, ret; 4886db1e29SJens Axboe 4986db1e29SJens Axboe reading = rq_data_dir(rq) == READ; 5086db1e29SJens Axboe 5186db1e29SJens Axboe /* 5286db1e29SJens Axboe * if alignment requirement is satisfied, map in user pages for 5386db1e29SJens Axboe * direct dma. else, set up kernel bounce buffers 5486db1e29SJens Axboe */ 5586db1e29SJens Axboe uaddr = (unsigned long) ubuf; 566728cb0eSJens Axboe if (!(uaddr & queue_dma_alignment(q)) && 576728cb0eSJens Axboe !(len & queue_dma_alignment(q))) 5886db1e29SJens Axboe bio = bio_map_user(q, NULL, uaddr, len, reading); 5986db1e29SJens Axboe else 6086db1e29SJens Axboe bio = bio_copy_user(q, uaddr, len, reading); 6186db1e29SJens Axboe 6286db1e29SJens Axboe if (IS_ERR(bio)) 6386db1e29SJens Axboe return PTR_ERR(bio); 6486db1e29SJens Axboe 6586db1e29SJens Axboe orig_bio = bio; 6686db1e29SJens Axboe blk_queue_bounce(q, &bio); 6786db1e29SJens Axboe 6886db1e29SJens Axboe /* 6986db1e29SJens Axboe * We link the bounce buffer in and could have to traverse it 7086db1e29SJens Axboe * later so we have to get a ref to prevent it from being freed 7186db1e29SJens Axboe */ 7286db1e29SJens Axboe bio_get(bio); 7386db1e29SJens Axboe 7486db1e29SJens Axboe ret = blk_rq_append_bio(q, rq, bio); 7586db1e29SJens Axboe if (!ret) 7686db1e29SJens Axboe return bio->bi_size; 7786db1e29SJens Axboe 7886db1e29SJens Axboe /* if it was boucned we must call the end io function */ 7986db1e29SJens Axboe bio_endio(bio, 0); 8086db1e29SJens Axboe __blk_rq_unmap_user(orig_bio); 8186db1e29SJens Axboe bio_put(bio); 8286db1e29SJens Axboe return ret; 8386db1e29SJens Axboe } 8486db1e29SJens Axboe 8586db1e29SJens Axboe /** 8686db1e29SJens Axboe * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 8786db1e29SJens Axboe * @q: request queue where request should be inserted 8886db1e29SJens Axboe * @rq: request structure to fill 8986db1e29SJens Axboe * @ubuf: the user buffer 9086db1e29SJens Axboe * @len: length of user data 9186db1e29SJens Axboe * 9286db1e29SJens Axboe * Description: 9386db1e29SJens Axboe * Data will be mapped directly for zero copy io, if possible. Otherwise 9486db1e29SJens Axboe * a kernel bounce buffer is used. 9586db1e29SJens Axboe * 9686db1e29SJens Axboe * A matching blk_rq_unmap_user() must be issued at the end of io, while 9786db1e29SJens Axboe * still in process context. 9886db1e29SJens Axboe * 9986db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 10086db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 10186db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 10286db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 10386db1e29SJens Axboe * unmapping. 10486db1e29SJens Axboe */ 10586db1e29SJens Axboe int blk_rq_map_user(struct request_queue *q, struct request *rq, 10686db1e29SJens Axboe void __user *ubuf, unsigned long len) 10786db1e29SJens Axboe { 10886db1e29SJens Axboe unsigned long bytes_read = 0; 10986db1e29SJens Axboe struct bio *bio = NULL; 11086db1e29SJens Axboe int ret; 11186db1e29SJens Axboe 11286db1e29SJens Axboe if (len > (q->max_hw_sectors << 9)) 11386db1e29SJens Axboe return -EINVAL; 11486db1e29SJens Axboe if (!len || !ubuf) 11586db1e29SJens Axboe return -EINVAL; 11686db1e29SJens Axboe 11786db1e29SJens Axboe while (bytes_read != len) { 11886db1e29SJens Axboe unsigned long map_len, end, start; 11986db1e29SJens Axboe 12086db1e29SJens Axboe map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); 12186db1e29SJens Axboe end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) 12286db1e29SJens Axboe >> PAGE_SHIFT; 12386db1e29SJens Axboe start = (unsigned long)ubuf >> PAGE_SHIFT; 12486db1e29SJens Axboe 12586db1e29SJens Axboe /* 12686db1e29SJens Axboe * A bad offset could cause us to require BIO_MAX_PAGES + 1 12786db1e29SJens Axboe * pages. If this happens we just lower the requested 12886db1e29SJens Axboe * mapping len by a page so that we can fit 12986db1e29SJens Axboe */ 13086db1e29SJens Axboe if (end - start > BIO_MAX_PAGES) 13186db1e29SJens Axboe map_len -= PAGE_SIZE; 13286db1e29SJens Axboe 13386db1e29SJens Axboe ret = __blk_rq_map_user(q, rq, ubuf, map_len); 13486db1e29SJens Axboe if (ret < 0) 13586db1e29SJens Axboe goto unmap_rq; 13686db1e29SJens Axboe if (!bio) 13786db1e29SJens Axboe bio = rq->bio; 13886db1e29SJens Axboe bytes_read += ret; 13986db1e29SJens Axboe ubuf += ret; 14086db1e29SJens Axboe } 14186db1e29SJens Axboe 142*40b01b9bSTejun Heo /* 143*40b01b9bSTejun Heo * __blk_rq_map_user() copies the buffers if starting address 144*40b01b9bSTejun Heo * or length isn't aligned. As the copied buffer is always 145*40b01b9bSTejun Heo * page aligned, we know that there's enough room for padding. 146*40b01b9bSTejun Heo * Extend the last bio and update rq->data_len accordingly. 147*40b01b9bSTejun Heo * 148*40b01b9bSTejun Heo * On unmap, bio_uncopy_user() will use unmodified 149*40b01b9bSTejun Heo * bio_map_data pointed to by bio->bi_private. 150*40b01b9bSTejun Heo */ 151*40b01b9bSTejun Heo if (len & queue_dma_alignment(q)) { 152*40b01b9bSTejun Heo unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; 153*40b01b9bSTejun Heo struct bio *bio = rq->biotail; 154*40b01b9bSTejun Heo 155*40b01b9bSTejun Heo bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; 156*40b01b9bSTejun Heo bio->bi_size += pad_len; 157*40b01b9bSTejun Heo } 158*40b01b9bSTejun Heo 15986db1e29SJens Axboe rq->buffer = rq->data = NULL; 16086db1e29SJens Axboe return 0; 16186db1e29SJens Axboe unmap_rq: 16286db1e29SJens Axboe blk_rq_unmap_user(bio); 16384e9e03cSJens Axboe rq->bio = NULL; 16486db1e29SJens Axboe return ret; 16586db1e29SJens Axboe } 16686db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_user); 16786db1e29SJens Axboe 16886db1e29SJens Axboe /** 16986db1e29SJens Axboe * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage 17086db1e29SJens Axboe * @q: request queue where request should be inserted 17186db1e29SJens Axboe * @rq: request to map data to 17286db1e29SJens Axboe * @iov: pointer to the iovec 17386db1e29SJens Axboe * @iov_count: number of elements in the iovec 17486db1e29SJens Axboe * @len: I/O byte count 17586db1e29SJens Axboe * 17686db1e29SJens Axboe * Description: 17786db1e29SJens Axboe * Data will be mapped directly for zero copy io, if possible. Otherwise 17886db1e29SJens Axboe * a kernel bounce buffer is used. 17986db1e29SJens Axboe * 18086db1e29SJens Axboe * A matching blk_rq_unmap_user() must be issued at the end of io, while 18186db1e29SJens Axboe * still in process context. 18286db1e29SJens Axboe * 18386db1e29SJens Axboe * Note: The mapped bio may need to be bounced through blk_queue_bounce() 18486db1e29SJens Axboe * before being submitted to the device, as pages mapped may be out of 18586db1e29SJens Axboe * reach. It's the callers responsibility to make sure this happens. The 18686db1e29SJens Axboe * original bio must be passed back in to blk_rq_unmap_user() for proper 18786db1e29SJens Axboe * unmapping. 18886db1e29SJens Axboe */ 18986db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 19086db1e29SJens Axboe struct sg_iovec *iov, int iov_count, unsigned int len) 19186db1e29SJens Axboe { 19286db1e29SJens Axboe struct bio *bio; 19386db1e29SJens Axboe 19486db1e29SJens Axboe if (!iov || iov_count <= 0) 19586db1e29SJens Axboe return -EINVAL; 19686db1e29SJens Axboe 19786db1e29SJens Axboe /* we don't allow misaligned data like bio_map_user() does. If the 19886db1e29SJens Axboe * user is using sg, they're expected to know the alignment constraints 19986db1e29SJens Axboe * and respect them accordingly */ 2006728cb0eSJens Axboe bio = bio_map_user_iov(q, NULL, iov, iov_count, 2016728cb0eSJens Axboe rq_data_dir(rq) == READ); 20286db1e29SJens Axboe if (IS_ERR(bio)) 20386db1e29SJens Axboe return PTR_ERR(bio); 20486db1e29SJens Axboe 20586db1e29SJens Axboe if (bio->bi_size != len) { 20686db1e29SJens Axboe bio_endio(bio, 0); 20786db1e29SJens Axboe bio_unmap_user(bio); 20886db1e29SJens Axboe return -EINVAL; 20986db1e29SJens Axboe } 21086db1e29SJens Axboe 21186db1e29SJens Axboe bio_get(bio); 21286db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 21386db1e29SJens Axboe rq->buffer = rq->data = NULL; 21486db1e29SJens Axboe return 0; 21586db1e29SJens Axboe } 21686db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_user_iov); 21786db1e29SJens Axboe 21886db1e29SJens Axboe /** 21986db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data 22086db1e29SJens Axboe * @bio: start of bio list 22186db1e29SJens Axboe * 22286db1e29SJens Axboe * Description: 22386db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 22486db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since 22586db1e29SJens Axboe * the io completion may have changed rq->bio. 22686db1e29SJens Axboe */ 22786db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio) 22886db1e29SJens Axboe { 22986db1e29SJens Axboe struct bio *mapped_bio; 23086db1e29SJens Axboe int ret = 0, ret2; 23186db1e29SJens Axboe 23286db1e29SJens Axboe while (bio) { 23386db1e29SJens Axboe mapped_bio = bio; 23486db1e29SJens Axboe if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 23586db1e29SJens Axboe mapped_bio = bio->bi_private; 23686db1e29SJens Axboe 23786db1e29SJens Axboe ret2 = __blk_rq_unmap_user(mapped_bio); 23886db1e29SJens Axboe if (ret2 && !ret) 23986db1e29SJens Axboe ret = ret2; 24086db1e29SJens Axboe 24186db1e29SJens Axboe mapped_bio = bio; 24286db1e29SJens Axboe bio = bio->bi_next; 24386db1e29SJens Axboe bio_put(mapped_bio); 24486db1e29SJens Axboe } 24586db1e29SJens Axboe 24686db1e29SJens Axboe return ret; 24786db1e29SJens Axboe } 24886db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user); 24986db1e29SJens Axboe 25086db1e29SJens Axboe /** 25186db1e29SJens Axboe * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage 25286db1e29SJens Axboe * @q: request queue where request should be inserted 25386db1e29SJens Axboe * @rq: request to fill 25486db1e29SJens Axboe * @kbuf: the kernel buffer 25586db1e29SJens Axboe * @len: length of user data 25686db1e29SJens Axboe * @gfp_mask: memory allocation flags 25786db1e29SJens Axboe */ 25886db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 25986db1e29SJens Axboe unsigned int len, gfp_t gfp_mask) 26086db1e29SJens Axboe { 26186db1e29SJens Axboe struct bio *bio; 26286db1e29SJens Axboe 26386db1e29SJens Axboe if (len > (q->max_hw_sectors << 9)) 26486db1e29SJens Axboe return -EINVAL; 26586db1e29SJens Axboe if (!len || !kbuf) 26686db1e29SJens Axboe return -EINVAL; 26786db1e29SJens Axboe 26886db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask); 26986db1e29SJens Axboe if (IS_ERR(bio)) 27086db1e29SJens Axboe return PTR_ERR(bio); 27186db1e29SJens Axboe 27286db1e29SJens Axboe if (rq_data_dir(rq) == WRITE) 27386db1e29SJens Axboe bio->bi_rw |= (1 << BIO_RW); 27486db1e29SJens Axboe 27586db1e29SJens Axboe blk_rq_bio_prep(q, rq, bio); 27686db1e29SJens Axboe blk_queue_bounce(q, &rq->bio); 27786db1e29SJens Axboe rq->buffer = rq->data = NULL; 27886db1e29SJens Axboe return 0; 27986db1e29SJens Axboe } 28086db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern); 281