xref: /openbmc/linux/block/blk-map.c (revision 2884d0be)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/sched/task_stack.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 
11 #include "blk.h"
12 
13 /*
14  * Append a bio to a passthrough request.  Only works can be merged into
15  * the request based on the driver constraints.
16  */
17 int blk_rq_append_bio(struct request *rq, struct bio *bio)
18 {
19 	blk_queue_bounce(rq->q, &bio);
20 
21 	if (!rq->bio) {
22 		blk_rq_bio_prep(rq->q, rq, bio);
23 	} else {
24 		if (!ll_back_merge_fn(rq->q, rq, bio))
25 			return -EINVAL;
26 
27 		rq->biotail->bi_next = bio;
28 		rq->biotail = bio;
29 		rq->__data_len += bio->bi_iter.bi_size;
30 	}
31 
32 	return 0;
33 }
34 EXPORT_SYMBOL(blk_rq_append_bio);
35 
36 static int __blk_rq_unmap_user(struct bio *bio)
37 {
38 	int ret = 0;
39 
40 	if (bio) {
41 		if (bio_flagged(bio, BIO_USER_MAPPED))
42 			bio_unmap_user(bio);
43 		else
44 			ret = bio_uncopy_user(bio);
45 	}
46 
47 	return ret;
48 }
49 
50 static int __blk_rq_map_user_iov(struct request *rq,
51 		struct rq_map_data *map_data, struct iov_iter *iter,
52 		gfp_t gfp_mask, bool copy)
53 {
54 	struct request_queue *q = rq->q;
55 	struct bio *bio, *orig_bio;
56 	int ret;
57 
58 	if (copy)
59 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
60 	else
61 		bio = bio_map_user_iov(q, iter, gfp_mask);
62 
63 	if (IS_ERR(bio))
64 		return PTR_ERR(bio);
65 
66 	bio->bi_opf &= ~REQ_OP_MASK;
67 	bio->bi_opf |= req_op(rq);
68 
69 	orig_bio = bio;
70 
71 	/*
72 	 * We link the bounce buffer in and could have to traverse it
73 	 * later so we have to get a ref to prevent it from being freed
74 	 */
75 	ret = blk_rq_append_bio(rq, bio);
76 	bio_get(bio);
77 	if (ret) {
78 		bio_endio(bio);
79 		__blk_rq_unmap_user(orig_bio);
80 		bio_put(bio);
81 		return ret;
82 	}
83 
84 	return 0;
85 }
86 
87 /**
88  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
89  * @q:		request queue where request should be inserted
90  * @rq:		request to map data to
91  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
92  * @iter:	iovec iterator
93  * @gfp_mask:	memory allocation flags
94  *
95  * Description:
96  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
97  *    a kernel bounce buffer is used.
98  *
99  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
100  *    still in process context.
101  *
102  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
103  *    before being submitted to the device, as pages mapped may be out of
104  *    reach. It's the callers responsibility to make sure this happens. The
105  *    original bio must be passed back in to blk_rq_unmap_user() for proper
106  *    unmapping.
107  */
108 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
109 			struct rq_map_data *map_data,
110 			const struct iov_iter *iter, gfp_t gfp_mask)
111 {
112 	bool copy = false;
113 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
114 	struct bio *bio = NULL;
115 	struct iov_iter i;
116 	int ret;
117 
118 	if (!iter_is_iovec(iter))
119 		goto fail;
120 
121 	if (map_data)
122 		copy = true;
123 	else if (iov_iter_alignment(iter) & align)
124 		copy = true;
125 	else if (queue_virt_boundary(q))
126 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
127 
128 	i = *iter;
129 	do {
130 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
131 		if (ret)
132 			goto unmap_rq;
133 		if (!bio)
134 			bio = rq->bio;
135 	} while (iov_iter_count(&i));
136 
137 	if (!bio_flagged(bio, BIO_USER_MAPPED))
138 		rq->rq_flags |= RQF_COPY_USER;
139 	return 0;
140 
141 unmap_rq:
142 	__blk_rq_unmap_user(bio);
143 fail:
144 	rq->bio = NULL;
145 	return -EINVAL;
146 }
147 EXPORT_SYMBOL(blk_rq_map_user_iov);
148 
149 int blk_rq_map_user(struct request_queue *q, struct request *rq,
150 		    struct rq_map_data *map_data, void __user *ubuf,
151 		    unsigned long len, gfp_t gfp_mask)
152 {
153 	struct iovec iov;
154 	struct iov_iter i;
155 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
156 
157 	if (unlikely(ret < 0))
158 		return ret;
159 
160 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
161 }
162 EXPORT_SYMBOL(blk_rq_map_user);
163 
164 /**
165  * blk_rq_unmap_user - unmap a request with user data
166  * @bio:	       start of bio list
167  *
168  * Description:
169  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
170  *    supply the original rq->bio from the blk_rq_map_user() return, since
171  *    the I/O completion may have changed rq->bio.
172  */
173 int blk_rq_unmap_user(struct bio *bio)
174 {
175 	struct bio *mapped_bio;
176 	int ret = 0, ret2;
177 
178 	while (bio) {
179 		mapped_bio = bio;
180 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
181 			mapped_bio = bio->bi_private;
182 
183 		ret2 = __blk_rq_unmap_user(mapped_bio);
184 		if (ret2 && !ret)
185 			ret = ret2;
186 
187 		mapped_bio = bio;
188 		bio = bio->bi_next;
189 		bio_put(mapped_bio);
190 	}
191 
192 	return ret;
193 }
194 EXPORT_SYMBOL(blk_rq_unmap_user);
195 
196 /**
197  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
198  * @q:		request queue where request should be inserted
199  * @rq:		request to fill
200  * @kbuf:	the kernel buffer
201  * @len:	length of user data
202  * @gfp_mask:	memory allocation flags
203  *
204  * Description:
205  *    Data will be mapped directly if possible. Otherwise a bounce
206  *    buffer is used. Can be called multiple times to append multiple
207  *    buffers.
208  */
209 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
210 		    unsigned int len, gfp_t gfp_mask)
211 {
212 	int reading = rq_data_dir(rq) == READ;
213 	unsigned long addr = (unsigned long) kbuf;
214 	int do_copy = 0;
215 	struct bio *bio;
216 	int ret;
217 
218 	if (len > (queue_max_hw_sectors(q) << 9))
219 		return -EINVAL;
220 	if (!len || !kbuf)
221 		return -EINVAL;
222 
223 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
224 	if (do_copy)
225 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
226 	else
227 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
228 
229 	if (IS_ERR(bio))
230 		return PTR_ERR(bio);
231 
232 	bio->bi_opf &= ~REQ_OP_MASK;
233 	bio->bi_opf |= req_op(rq);
234 
235 	if (do_copy)
236 		rq->rq_flags |= RQF_COPY_USER;
237 
238 	ret = blk_rq_append_bio(rq, bio);
239 	if (unlikely(ret)) {
240 		/* request is too big */
241 		bio_put(bio);
242 		return ret;
243 	}
244 
245 	return 0;
246 }
247 EXPORT_SYMBOL(blk_rq_map_kern);
248