xref: /openbmc/linux/block/blk-map.c (revision 56d06fa2)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/uio.h>
9 
10 #include "blk.h"
11 
12 static bool iovec_gap_to_prv(struct request_queue *q,
13 			     struct iovec *prv, struct iovec *cur)
14 {
15 	unsigned long prev_end;
16 
17 	if (!queue_virt_boundary(q))
18 		return false;
19 
20 	if (prv->iov_base == NULL && prv->iov_len == 0)
21 		/* prv is not set - don't check */
22 		return false;
23 
24 	prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
25 
26 	return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
27 		prev_end & queue_virt_boundary(q));
28 }
29 
30 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
31 		      struct bio *bio)
32 {
33 	if (!rq->bio)
34 		blk_rq_bio_prep(q, rq, bio);
35 	else if (!ll_back_merge_fn(q, rq, bio))
36 		return -EINVAL;
37 	else {
38 		rq->biotail->bi_next = bio;
39 		rq->biotail = bio;
40 
41 		rq->__data_len += bio->bi_iter.bi_size;
42 	}
43 	return 0;
44 }
45 
46 static int __blk_rq_unmap_user(struct bio *bio)
47 {
48 	int ret = 0;
49 
50 	if (bio) {
51 		if (bio_flagged(bio, BIO_USER_MAPPED))
52 			bio_unmap_user(bio);
53 		else
54 			ret = bio_uncopy_user(bio);
55 	}
56 
57 	return ret;
58 }
59 
60 static int __blk_rq_map_user_iov(struct request *rq,
61 		struct rq_map_data *map_data, struct iov_iter *iter,
62 		gfp_t gfp_mask, bool copy)
63 {
64 	struct request_queue *q = rq->q;
65 	struct bio *bio, *orig_bio;
66 	int ret;
67 
68 	if (copy)
69 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
70 	else
71 		bio = bio_map_user_iov(q, iter, gfp_mask);
72 
73 	if (IS_ERR(bio))
74 		return PTR_ERR(bio);
75 
76 	if (map_data && map_data->null_mapped)
77 		bio_set_flag(bio, BIO_NULL_MAPPED);
78 
79 	iov_iter_advance(iter, bio->bi_iter.bi_size);
80 	if (map_data)
81 		map_data->offset += bio->bi_iter.bi_size;
82 
83 	orig_bio = bio;
84 	blk_queue_bounce(q, &bio);
85 
86 	/*
87 	 * We link the bounce buffer in and could have to traverse it
88 	 * later so we have to get a ref to prevent it from being freed
89 	 */
90 	bio_get(bio);
91 
92 	ret = blk_rq_append_bio(q, rq, bio);
93 	if (ret) {
94 		bio_endio(bio);
95 		__blk_rq_unmap_user(orig_bio);
96 		bio_put(bio);
97 		return ret;
98 	}
99 
100 	return 0;
101 }
102 
103 /**
104  * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
105  * @q:		request queue where request should be inserted
106  * @rq:		request to map data to
107  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
108  * @iter:	iovec iterator
109  * @gfp_mask:	memory allocation flags
110  *
111  * Description:
112  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
113  *    a kernel bounce buffer is used.
114  *
115  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
116  *    still in process context.
117  *
118  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
119  *    before being submitted to the device, as pages mapped may be out of
120  *    reach. It's the callers responsibility to make sure this happens. The
121  *    original bio must be passed back in to blk_rq_unmap_user() for proper
122  *    unmapping.
123  */
124 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
125 			struct rq_map_data *map_data,
126 			const struct iov_iter *iter, gfp_t gfp_mask)
127 {
128 	struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
129 	bool copy = (q->dma_pad_mask & iter->count) || map_data;
130 	struct bio *bio = NULL;
131 	struct iov_iter i;
132 	int ret;
133 
134 	if (!iter || !iter->count)
135 		return -EINVAL;
136 
137 	iov_for_each(iov, i, *iter) {
138 		unsigned long uaddr = (unsigned long) iov.iov_base;
139 
140 		if (!iov.iov_len)
141 			return -EINVAL;
142 
143 		/*
144 		 * Keep going so we check length of all segments
145 		 */
146 		if ((uaddr & queue_dma_alignment(q)) ||
147 		    iovec_gap_to_prv(q, &prv, &iov))
148 			copy = true;
149 
150 		prv.iov_base = iov.iov_base;
151 		prv.iov_len = iov.iov_len;
152 	}
153 
154 	i = *iter;
155 	do {
156 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
157 		if (ret)
158 			goto unmap_rq;
159 		if (!bio)
160 			bio = rq->bio;
161 	} while (iov_iter_count(&i));
162 
163 	if (!bio_flagged(bio, BIO_USER_MAPPED))
164 		rq->cmd_flags |= REQ_COPY_USER;
165 	return 0;
166 
167 unmap_rq:
168 	__blk_rq_unmap_user(bio);
169 	rq->bio = NULL;
170 	return -EINVAL;
171 }
172 EXPORT_SYMBOL(blk_rq_map_user_iov);
173 
174 int blk_rq_map_user(struct request_queue *q, struct request *rq,
175 		    struct rq_map_data *map_data, void __user *ubuf,
176 		    unsigned long len, gfp_t gfp_mask)
177 {
178 	struct iovec iov;
179 	struct iov_iter i;
180 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
181 
182 	if (unlikely(ret < 0))
183 		return ret;
184 
185 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
186 }
187 EXPORT_SYMBOL(blk_rq_map_user);
188 
189 /**
190  * blk_rq_unmap_user - unmap a request with user data
191  * @bio:	       start of bio list
192  *
193  * Description:
194  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
195  *    supply the original rq->bio from the blk_rq_map_user() return, since
196  *    the I/O completion may have changed rq->bio.
197  */
198 int blk_rq_unmap_user(struct bio *bio)
199 {
200 	struct bio *mapped_bio;
201 	int ret = 0, ret2;
202 
203 	while (bio) {
204 		mapped_bio = bio;
205 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
206 			mapped_bio = bio->bi_private;
207 
208 		ret2 = __blk_rq_unmap_user(mapped_bio);
209 		if (ret2 && !ret)
210 			ret = ret2;
211 
212 		mapped_bio = bio;
213 		bio = bio->bi_next;
214 		bio_put(mapped_bio);
215 	}
216 
217 	return ret;
218 }
219 EXPORT_SYMBOL(blk_rq_unmap_user);
220 
221 /**
222  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
223  * @q:		request queue where request should be inserted
224  * @rq:		request to fill
225  * @kbuf:	the kernel buffer
226  * @len:	length of user data
227  * @gfp_mask:	memory allocation flags
228  *
229  * Description:
230  *    Data will be mapped directly if possible. Otherwise a bounce
231  *    buffer is used. Can be called multiple times to append multiple
232  *    buffers.
233  */
234 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
235 		    unsigned int len, gfp_t gfp_mask)
236 {
237 	int reading = rq_data_dir(rq) == READ;
238 	unsigned long addr = (unsigned long) kbuf;
239 	int do_copy = 0;
240 	struct bio *bio;
241 	int ret;
242 
243 	if (len > (queue_max_hw_sectors(q) << 9))
244 		return -EINVAL;
245 	if (!len || !kbuf)
246 		return -EINVAL;
247 
248 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
249 	if (do_copy)
250 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
251 	else
252 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
253 
254 	if (IS_ERR(bio))
255 		return PTR_ERR(bio);
256 
257 	if (!reading)
258 		bio->bi_rw |= REQ_WRITE;
259 
260 	if (do_copy)
261 		rq->cmd_flags |= REQ_COPY_USER;
262 
263 	ret = blk_rq_append_bio(q, rq, bio);
264 	if (unlikely(ret)) {
265 		/* request is too big */
266 		bio_put(bio);
267 		return ret;
268 	}
269 
270 	blk_queue_bounce(q, &rq->bio);
271 	return 0;
272 }
273 EXPORT_SYMBOL(blk_rq_map_kern);
274