xref: /openbmc/linux/block/blk-map.c (revision aebf526b)
186db1e29SJens Axboe /*
286db1e29SJens Axboe  * Functions related to mapping data to requests
386db1e29SJens Axboe  */
486db1e29SJens Axboe #include <linux/kernel.h>
586db1e29SJens Axboe #include <linux/module.h>
686db1e29SJens Axboe #include <linux/bio.h>
786db1e29SJens Axboe #include <linux/blkdev.h>
826e49cfcSKent Overstreet #include <linux/uio.h>
986db1e29SJens Axboe 
1086db1e29SJens Axboe #include "blk.h"
1186db1e29SJens Axboe 
1298d61d5bSChristoph Hellwig /*
1398d61d5bSChristoph Hellwig  * Append a bio to a passthrough request.  Only works can be merged into
1498d61d5bSChristoph Hellwig  * the request based on the driver constraints.
1598d61d5bSChristoph Hellwig  */
1698d61d5bSChristoph Hellwig int blk_rq_append_bio(struct request *rq, struct bio *bio)
1786db1e29SJens Axboe {
1898d61d5bSChristoph Hellwig 	if (!rq->bio) {
1998d61d5bSChristoph Hellwig 		blk_rq_bio_prep(rq->q, rq, bio);
2098d61d5bSChristoph Hellwig 	} else {
2198d61d5bSChristoph Hellwig 		if (!ll_back_merge_fn(rq->q, rq, bio))
2286db1e29SJens Axboe 			return -EINVAL;
2398d61d5bSChristoph Hellwig 
2486db1e29SJens Axboe 		rq->biotail->bi_next = bio;
2586db1e29SJens Axboe 		rq->biotail = bio;
264f024f37SKent Overstreet 		rq->__data_len += bio->bi_iter.bi_size;
2786db1e29SJens Axboe 	}
2898d61d5bSChristoph Hellwig 
2986db1e29SJens Axboe 	return 0;
3086db1e29SJens Axboe }
3198d61d5bSChristoph Hellwig EXPORT_SYMBOL(blk_rq_append_bio);
3286db1e29SJens Axboe 
3386db1e29SJens Axboe static int __blk_rq_unmap_user(struct bio *bio)
3486db1e29SJens Axboe {
3586db1e29SJens Axboe 	int ret = 0;
3686db1e29SJens Axboe 
3786db1e29SJens Axboe 	if (bio) {
3886db1e29SJens Axboe 		if (bio_flagged(bio, BIO_USER_MAPPED))
3986db1e29SJens Axboe 			bio_unmap_user(bio);
4086db1e29SJens Axboe 		else
4186db1e29SJens Axboe 			ret = bio_uncopy_user(bio);
4286db1e29SJens Axboe 	}
4386db1e29SJens Axboe 
4486db1e29SJens Axboe 	return ret;
4586db1e29SJens Axboe }
4686db1e29SJens Axboe 
474d6af73dSChristoph Hellwig static int __blk_rq_map_user_iov(struct request *rq,
484d6af73dSChristoph Hellwig 		struct rq_map_data *map_data, struct iov_iter *iter,
494d6af73dSChristoph Hellwig 		gfp_t gfp_mask, bool copy)
504d6af73dSChristoph Hellwig {
514d6af73dSChristoph Hellwig 	struct request_queue *q = rq->q;
524d6af73dSChristoph Hellwig 	struct bio *bio, *orig_bio;
534d6af73dSChristoph Hellwig 	int ret;
544d6af73dSChristoph Hellwig 
554d6af73dSChristoph Hellwig 	if (copy)
564d6af73dSChristoph Hellwig 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
574d6af73dSChristoph Hellwig 	else
584d6af73dSChristoph Hellwig 		bio = bio_map_user_iov(q, iter, gfp_mask);
594d6af73dSChristoph Hellwig 
604d6af73dSChristoph Hellwig 	if (IS_ERR(bio))
614d6af73dSChristoph Hellwig 		return PTR_ERR(bio);
624d6af73dSChristoph Hellwig 
63aebf526bSChristoph Hellwig 	bio->bi_opf &= ~REQ_OP_MASK;
64aebf526bSChristoph Hellwig 	bio->bi_opf |= req_op(rq);
65aebf526bSChristoph Hellwig 
664d6af73dSChristoph Hellwig 	if (map_data && map_data->null_mapped)
674d6af73dSChristoph Hellwig 		bio_set_flag(bio, BIO_NULL_MAPPED);
684d6af73dSChristoph Hellwig 
694d6af73dSChristoph Hellwig 	iov_iter_advance(iter, bio->bi_iter.bi_size);
704d6af73dSChristoph Hellwig 	if (map_data)
714d6af73dSChristoph Hellwig 		map_data->offset += bio->bi_iter.bi_size;
724d6af73dSChristoph Hellwig 
734d6af73dSChristoph Hellwig 	orig_bio = bio;
744d6af73dSChristoph Hellwig 	blk_queue_bounce(q, &bio);
754d6af73dSChristoph Hellwig 
764d6af73dSChristoph Hellwig 	/*
774d6af73dSChristoph Hellwig 	 * We link the bounce buffer in and could have to traverse it
784d6af73dSChristoph Hellwig 	 * later so we have to get a ref to prevent it from being freed
794d6af73dSChristoph Hellwig 	 */
804d6af73dSChristoph Hellwig 	bio_get(bio);
814d6af73dSChristoph Hellwig 
8298d61d5bSChristoph Hellwig 	ret = blk_rq_append_bio(rq, bio);
834d6af73dSChristoph Hellwig 	if (ret) {
844d6af73dSChristoph Hellwig 		bio_endio(bio);
854d6af73dSChristoph Hellwig 		__blk_rq_unmap_user(orig_bio);
864d6af73dSChristoph Hellwig 		bio_put(bio);
874d6af73dSChristoph Hellwig 		return ret;
884d6af73dSChristoph Hellwig 	}
894d6af73dSChristoph Hellwig 
904d6af73dSChristoph Hellwig 	return 0;
914d6af73dSChristoph Hellwig }
924d6af73dSChristoph Hellwig 
9386db1e29SJens Axboe /**
94aebf526bSChristoph Hellwig  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
9586db1e29SJens Axboe  * @q:		request queue where request should be inserted
9686db1e29SJens Axboe  * @rq:		request to map data to
97152e283fSFUJITA Tomonori  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
9826e49cfcSKent Overstreet  * @iter:	iovec iterator
99a3bce90eSFUJITA Tomonori  * @gfp_mask:	memory allocation flags
10086db1e29SJens Axboe  *
10186db1e29SJens Axboe  * Description:
102710027a4SRandy Dunlap  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
10386db1e29SJens Axboe  *    a kernel bounce buffer is used.
10486db1e29SJens Axboe  *
105710027a4SRandy Dunlap  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
10686db1e29SJens Axboe  *    still in process context.
10786db1e29SJens Axboe  *
10886db1e29SJens Axboe  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
10986db1e29SJens Axboe  *    before being submitted to the device, as pages mapped may be out of
11086db1e29SJens Axboe  *    reach. It's the callers responsibility to make sure this happens. The
11186db1e29SJens Axboe  *    original bio must be passed back in to blk_rq_unmap_user() for proper
11286db1e29SJens Axboe  *    unmapping.
11386db1e29SJens Axboe  */
11486db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
11526e49cfcSKent Overstreet 			struct rq_map_data *map_data,
11626e49cfcSKent Overstreet 			const struct iov_iter *iter, gfp_t gfp_mask)
11786db1e29SJens Axboe {
118357f435dSAl Viro 	bool copy = false;
119357f435dSAl Viro 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
1204d6af73dSChristoph Hellwig 	struct bio *bio = NULL;
1214d6af73dSChristoph Hellwig 	struct iov_iter i;
1224d6af73dSChristoph Hellwig 	int ret;
12386db1e29SJens Axboe 
124a0ac402cSLinus Torvalds 	if (!iter_is_iovec(iter))
125a0ac402cSLinus Torvalds 		goto fail;
126a0ac402cSLinus Torvalds 
127357f435dSAl Viro 	if (map_data)
1284d6af73dSChristoph Hellwig 		copy = true;
129357f435dSAl Viro 	else if (iov_iter_alignment(iter) & align)
130357f435dSAl Viro 		copy = true;
131357f435dSAl Viro 	else if (queue_virt_boundary(q))
132357f435dSAl Viro 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
133afdc1a78SFUJITA Tomonori 
1344d6af73dSChristoph Hellwig 	i = *iter;
1354d6af73dSChristoph Hellwig 	do {
1364d6af73dSChristoph Hellwig 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
1374d6af73dSChristoph Hellwig 		if (ret)
1384d6af73dSChristoph Hellwig 			goto unmap_rq;
1394d6af73dSChristoph Hellwig 		if (!bio)
1404d6af73dSChristoph Hellwig 			bio = rq->bio;
1414d6af73dSChristoph Hellwig 	} while (iov_iter_count(&i));
14286db1e29SJens Axboe 
143f18573abSFUJITA Tomonori 	if (!bio_flagged(bio, BIO_USER_MAPPED))
144e8064021SChristoph Hellwig 		rq->rq_flags |= RQF_COPY_USER;
14586db1e29SJens Axboe 	return 0;
1464d6af73dSChristoph Hellwig 
1474d6af73dSChristoph Hellwig unmap_rq:
1484d6af73dSChristoph Hellwig 	__blk_rq_unmap_user(bio);
149a0ac402cSLinus Torvalds fail:
1504d6af73dSChristoph Hellwig 	rq->bio = NULL;
1514d6af73dSChristoph Hellwig 	return -EINVAL;
15286db1e29SJens Axboe }
153152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov);
15486db1e29SJens Axboe 
155ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq,
156ddad8dd0SChristoph Hellwig 		    struct rq_map_data *map_data, void __user *ubuf,
157ddad8dd0SChristoph Hellwig 		    unsigned long len, gfp_t gfp_mask)
158ddad8dd0SChristoph Hellwig {
15926e49cfcSKent Overstreet 	struct iovec iov;
16026e49cfcSKent Overstreet 	struct iov_iter i;
1618f7e885aSAl Viro 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
162ddad8dd0SChristoph Hellwig 
1638f7e885aSAl Viro 	if (unlikely(ret < 0))
1648f7e885aSAl Viro 		return ret;
165ddad8dd0SChristoph Hellwig 
16626e49cfcSKent Overstreet 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
167ddad8dd0SChristoph Hellwig }
168ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user);
169ddad8dd0SChristoph Hellwig 
17086db1e29SJens Axboe /**
17186db1e29SJens Axboe  * blk_rq_unmap_user - unmap a request with user data
17286db1e29SJens Axboe  * @bio:	       start of bio list
17386db1e29SJens Axboe  *
17486db1e29SJens Axboe  * Description:
17586db1e29SJens Axboe  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
17686db1e29SJens Axboe  *    supply the original rq->bio from the blk_rq_map_user() return, since
177710027a4SRandy Dunlap  *    the I/O completion may have changed rq->bio.
17886db1e29SJens Axboe  */
17986db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio)
18086db1e29SJens Axboe {
18186db1e29SJens Axboe 	struct bio *mapped_bio;
18286db1e29SJens Axboe 	int ret = 0, ret2;
18386db1e29SJens Axboe 
18486db1e29SJens Axboe 	while (bio) {
18586db1e29SJens Axboe 		mapped_bio = bio;
18686db1e29SJens Axboe 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
18786db1e29SJens Axboe 			mapped_bio = bio->bi_private;
18886db1e29SJens Axboe 
18986db1e29SJens Axboe 		ret2 = __blk_rq_unmap_user(mapped_bio);
19086db1e29SJens Axboe 		if (ret2 && !ret)
19186db1e29SJens Axboe 			ret = ret2;
19286db1e29SJens Axboe 
19386db1e29SJens Axboe 		mapped_bio = bio;
19486db1e29SJens Axboe 		bio = bio->bi_next;
19586db1e29SJens Axboe 		bio_put(mapped_bio);
19686db1e29SJens Axboe 	}
19786db1e29SJens Axboe 
19886db1e29SJens Axboe 	return ret;
19986db1e29SJens Axboe }
20086db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user);
20186db1e29SJens Axboe 
20286db1e29SJens Axboe /**
203aebf526bSChristoph Hellwig  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
20486db1e29SJens Axboe  * @q:		request queue where request should be inserted
20586db1e29SJens Axboe  * @rq:		request to fill
20686db1e29SJens Axboe  * @kbuf:	the kernel buffer
20786db1e29SJens Axboe  * @len:	length of user data
20886db1e29SJens Axboe  * @gfp_mask:	memory allocation flags
20968154e90SFUJITA Tomonori  *
21068154e90SFUJITA Tomonori  * Description:
21168154e90SFUJITA Tomonori  *    Data will be mapped directly if possible. Otherwise a bounce
212e227867fSMasanari Iida  *    buffer is used. Can be called multiple times to append multiple
2133a5a3927SJames Bottomley  *    buffers.
21486db1e29SJens Axboe  */
21586db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
21686db1e29SJens Axboe 		    unsigned int len, gfp_t gfp_mask)
21786db1e29SJens Axboe {
21868154e90SFUJITA Tomonori 	int reading = rq_data_dir(rq) == READ;
21914417799SNamhyung Kim 	unsigned long addr = (unsigned long) kbuf;
22068154e90SFUJITA Tomonori 	int do_copy = 0;
22186db1e29SJens Axboe 	struct bio *bio;
2223a5a3927SJames Bottomley 	int ret;
22386db1e29SJens Axboe 
224ae03bf63SMartin K. Petersen 	if (len > (queue_max_hw_sectors(q) << 9))
22586db1e29SJens Axboe 		return -EINVAL;
22686db1e29SJens Axboe 	if (!len || !kbuf)
22786db1e29SJens Axboe 		return -EINVAL;
22886db1e29SJens Axboe 
22914417799SNamhyung Kim 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
23068154e90SFUJITA Tomonori 	if (do_copy)
23168154e90SFUJITA Tomonori 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23268154e90SFUJITA Tomonori 	else
23386db1e29SJens Axboe 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
23468154e90SFUJITA Tomonori 
23586db1e29SJens Axboe 	if (IS_ERR(bio))
23686db1e29SJens Axboe 		return PTR_ERR(bio);
23786db1e29SJens Axboe 
238aebf526bSChristoph Hellwig 	bio->bi_opf &= ~REQ_OP_MASK;
239aebf526bSChristoph Hellwig 	bio->bi_opf |= req_op(rq);
24086db1e29SJens Axboe 
24168154e90SFUJITA Tomonori 	if (do_copy)
242e8064021SChristoph Hellwig 		rq->rq_flags |= RQF_COPY_USER;
24368154e90SFUJITA Tomonori 
24498d61d5bSChristoph Hellwig 	ret = blk_rq_append_bio(rq, bio);
2453a5a3927SJames Bottomley 	if (unlikely(ret)) {
2463a5a3927SJames Bottomley 		/* request is too big */
2473a5a3927SJames Bottomley 		bio_put(bio);
2483a5a3927SJames Bottomley 		return ret;
2493a5a3927SJames Bottomley 	}
2503a5a3927SJames Bottomley 
25186db1e29SJens Axboe 	blk_queue_bounce(q, &rq->bio);
25286db1e29SJens Axboe 	return 0;
25386db1e29SJens Axboe }
25486db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern);
255