xref: /openbmc/linux/block/blk-map.c (revision 9284bcf4)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <scsi/sg.h>		/* for struct sg_iovec */
9 
10 #include "blk.h"
11 
12 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 		      struct bio *bio)
14 {
15 	if (!rq->bio)
16 		blk_rq_bio_prep(q, rq, bio);
17 	else if (!ll_back_merge_fn(q, rq, bio))
18 		return -EINVAL;
19 	else {
20 		rq->biotail->bi_next = bio;
21 		rq->biotail = bio;
22 
23 		rq->__data_len += bio->bi_size;
24 	}
25 	return 0;
26 }
27 
28 static int __blk_rq_unmap_user(struct bio *bio)
29 {
30 	int ret = 0;
31 
32 	if (bio) {
33 		if (bio_flagged(bio, BIO_USER_MAPPED))
34 			bio_unmap_user(bio);
35 		else
36 			ret = bio_uncopy_user(bio);
37 	}
38 
39 	return ret;
40 }
41 
42 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
43 			     struct rq_map_data *map_data, void __user *ubuf,
44 			     unsigned int len, gfp_t gfp_mask)
45 {
46 	unsigned long uaddr;
47 	struct bio *bio, *orig_bio;
48 	int reading, ret;
49 
50 	reading = rq_data_dir(rq) == READ;
51 
52 	/*
53 	 * if alignment requirement is satisfied, map in user pages for
54 	 * direct dma. else, set up kernel bounce buffers
55 	 */
56 	uaddr = (unsigned long) ubuf;
57 	if (blk_rq_aligned(q, uaddr, len) && !map_data)
58 		bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
59 	else
60 		bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
61 
62 	if (IS_ERR(bio))
63 		return PTR_ERR(bio);
64 
65 	if (map_data && map_data->null_mapped)
66 		bio->bi_flags |= (1 << BIO_NULL_MAPPED);
67 
68 	orig_bio = bio;
69 	blk_queue_bounce(q, &bio);
70 
71 	/*
72 	 * We link the bounce buffer in and could have to traverse it
73 	 * later so we have to get a ref to prevent it from being freed
74 	 */
75 	bio_get(bio);
76 
77 	ret = blk_rq_append_bio(q, rq, bio);
78 	if (!ret)
79 		return bio->bi_size;
80 
81 	/* if it was boucned we must call the end io function */
82 	bio_endio(bio, 0);
83 	__blk_rq_unmap_user(orig_bio);
84 	bio_put(bio);
85 	return ret;
86 }
87 
88 /**
89  * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
90  * @q:		request queue where request should be inserted
91  * @rq:		request structure to fill
92  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
93  * @ubuf:	the user buffer
94  * @len:	length of user data
95  * @gfp_mask:	memory allocation flags
96  *
97  * Description:
98  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
99  *    a kernel bounce buffer is used.
100  *
101  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
102  *    still in process context.
103  *
104  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
105  *    before being submitted to the device, as pages mapped may be out of
106  *    reach. It's the callers responsibility to make sure this happens. The
107  *    original bio must be passed back in to blk_rq_unmap_user() for proper
108  *    unmapping.
109  */
110 int blk_rq_map_user(struct request_queue *q, struct request *rq,
111 		    struct rq_map_data *map_data, void __user *ubuf,
112 		    unsigned long len, gfp_t gfp_mask)
113 {
114 	unsigned long bytes_read = 0;
115 	struct bio *bio = NULL;
116 	int ret;
117 
118 	if (len > (queue_max_hw_sectors(q) << 9))
119 		return -EINVAL;
120 	if (!len)
121 		return -EINVAL;
122 
123 	if (!ubuf && (!map_data || !map_data->null_mapped))
124 		return -EINVAL;
125 
126 	while (bytes_read != len) {
127 		unsigned long map_len, end, start;
128 
129 		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
130 		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
131 								>> PAGE_SHIFT;
132 		start = (unsigned long)ubuf >> PAGE_SHIFT;
133 
134 		/*
135 		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
136 		 * pages. If this happens we just lower the requested
137 		 * mapping len by a page so that we can fit
138 		 */
139 		if (end - start > BIO_MAX_PAGES)
140 			map_len -= PAGE_SIZE;
141 
142 		ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
143 					gfp_mask);
144 		if (ret < 0)
145 			goto unmap_rq;
146 		if (!bio)
147 			bio = rq->bio;
148 		bytes_read += ret;
149 		ubuf += ret;
150 
151 		if (map_data)
152 			map_data->offset += ret;
153 	}
154 
155 	if (!bio_flagged(bio, BIO_USER_MAPPED))
156 		rq->cmd_flags |= REQ_COPY_USER;
157 
158 	rq->buffer = NULL;
159 	return 0;
160 unmap_rq:
161 	blk_rq_unmap_user(bio);
162 	rq->bio = NULL;
163 	return ret;
164 }
165 EXPORT_SYMBOL(blk_rq_map_user);
166 
167 /**
168  * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
169  * @q:		request queue where request should be inserted
170  * @rq:		request to map data to
171  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
172  * @iov:	pointer to the iovec
173  * @iov_count:	number of elements in the iovec
174  * @len:	I/O byte count
175  * @gfp_mask:	memory allocation flags
176  *
177  * Description:
178  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
179  *    a kernel bounce buffer is used.
180  *
181  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
182  *    still in process context.
183  *
184  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
185  *    before being submitted to the device, as pages mapped may be out of
186  *    reach. It's the callers responsibility to make sure this happens. The
187  *    original bio must be passed back in to blk_rq_unmap_user() for proper
188  *    unmapping.
189  */
190 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
191 			struct rq_map_data *map_data, struct sg_iovec *iov,
192 			int iov_count, unsigned int len, gfp_t gfp_mask)
193 {
194 	struct bio *bio;
195 	int i, read = rq_data_dir(rq) == READ;
196 	int unaligned = 0;
197 
198 	if (!iov || iov_count <= 0)
199 		return -EINVAL;
200 
201 	for (i = 0; i < iov_count; i++) {
202 		unsigned long uaddr = (unsigned long)iov[i].iov_base;
203 
204 		if (uaddr & queue_dma_alignment(q)) {
205 			unaligned = 1;
206 			break;
207 		}
208 		if (!iov[i].iov_len)
209 			return -EINVAL;
210 	}
211 
212 	if (unaligned || (q->dma_pad_mask & len) || map_data)
213 		bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
214 					gfp_mask);
215 	else
216 		bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
217 
218 	if (IS_ERR(bio))
219 		return PTR_ERR(bio);
220 
221 	if (bio->bi_size != len) {
222 		/*
223 		 * Grab an extra reference to this bio, as bio_unmap_user()
224 		 * expects to be able to drop it twice as it happens on the
225 		 * normal IO completion path
226 		 */
227 		bio_get(bio);
228 		bio_endio(bio, 0);
229 		__blk_rq_unmap_user(bio);
230 		return -EINVAL;
231 	}
232 
233 	if (!bio_flagged(bio, BIO_USER_MAPPED))
234 		rq->cmd_flags |= REQ_COPY_USER;
235 
236 	blk_queue_bounce(q, &bio);
237 	bio_get(bio);
238 	blk_rq_bio_prep(q, rq, bio);
239 	rq->buffer = NULL;
240 	return 0;
241 }
242 EXPORT_SYMBOL(blk_rq_map_user_iov);
243 
244 /**
245  * blk_rq_unmap_user - unmap a request with user data
246  * @bio:	       start of bio list
247  *
248  * Description:
249  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
250  *    supply the original rq->bio from the blk_rq_map_user() return, since
251  *    the I/O completion may have changed rq->bio.
252  */
253 int blk_rq_unmap_user(struct bio *bio)
254 {
255 	struct bio *mapped_bio;
256 	int ret = 0, ret2;
257 
258 	while (bio) {
259 		mapped_bio = bio;
260 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
261 			mapped_bio = bio->bi_private;
262 
263 		ret2 = __blk_rq_unmap_user(mapped_bio);
264 		if (ret2 && !ret)
265 			ret = ret2;
266 
267 		mapped_bio = bio;
268 		bio = bio->bi_next;
269 		bio_put(mapped_bio);
270 	}
271 
272 	return ret;
273 }
274 EXPORT_SYMBOL(blk_rq_unmap_user);
275 
276 /**
277  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
278  * @q:		request queue where request should be inserted
279  * @rq:		request to fill
280  * @kbuf:	the kernel buffer
281  * @len:	length of user data
282  * @gfp_mask:	memory allocation flags
283  *
284  * Description:
285  *    Data will be mapped directly if possible. Otherwise a bounce
286  *    buffer is used. Can be called multple times to append multple
287  *    buffers.
288  */
289 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
290 		    unsigned int len, gfp_t gfp_mask)
291 {
292 	int reading = rq_data_dir(rq) == READ;
293 	unsigned long addr = (unsigned long) kbuf;
294 	int do_copy = 0;
295 	struct bio *bio;
296 	int ret;
297 
298 	if (len > (queue_max_hw_sectors(q) << 9))
299 		return -EINVAL;
300 	if (!len || !kbuf)
301 		return -EINVAL;
302 
303 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
304 	if (do_copy)
305 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
306 	else
307 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
308 
309 	if (IS_ERR(bio))
310 		return PTR_ERR(bio);
311 
312 	if (rq_data_dir(rq) == WRITE)
313 		bio->bi_rw |= REQ_WRITE;
314 
315 	if (do_copy)
316 		rq->cmd_flags |= REQ_COPY_USER;
317 
318 	ret = blk_rq_append_bio(q, rq, bio);
319 	if (unlikely(ret)) {
320 		/* request is too big */
321 		bio_put(bio);
322 		return ret;
323 	}
324 
325 	blk_queue_bounce(q, &rq->bio);
326 	rq->buffer = NULL;
327 	return 0;
328 }
329 EXPORT_SYMBOL(blk_rq_map_kern);
330