xref: /openbmc/linux/block/blk-map.c (revision 87904074)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <scsi/sg.h>		/* for struct sg_iovec */
9 
10 #include "blk.h"
11 
12 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 		      struct bio *bio)
14 {
15 	if (!rq->bio)
16 		blk_rq_bio_prep(q, rq, bio);
17 	else if (!ll_back_merge_fn(q, rq, bio))
18 		return -EINVAL;
19 	else {
20 		rq->biotail->bi_next = bio;
21 		rq->biotail = bio;
22 
23 		rq->data_len += bio->bi_size;
24 	}
25 	return 0;
26 }
27 EXPORT_SYMBOL(blk_rq_append_bio);
28 
29 static int __blk_rq_unmap_user(struct bio *bio)
30 {
31 	int ret = 0;
32 
33 	if (bio) {
34 		if (bio_flagged(bio, BIO_USER_MAPPED))
35 			bio_unmap_user(bio);
36 		else
37 			ret = bio_uncopy_user(bio);
38 	}
39 
40 	return ret;
41 }
42 
43 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 			     struct rq_map_data *map_data, void __user *ubuf,
45 			     unsigned int len, gfp_t gfp_mask)
46 {
47 	unsigned long uaddr;
48 	struct bio *bio, *orig_bio;
49 	int reading, ret;
50 
51 	reading = rq_data_dir(rq) == READ;
52 
53 	/*
54 	 * if alignment requirement is satisfied, map in user pages for
55 	 * direct dma. else, set up kernel bounce buffers
56 	 */
57 	uaddr = (unsigned long) ubuf;
58 	if (blk_rq_aligned(q, ubuf, len) && !map_data)
59 		bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
60 	else
61 		bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
62 
63 	if (IS_ERR(bio))
64 		return PTR_ERR(bio);
65 
66 	orig_bio = bio;
67 	blk_queue_bounce(q, &bio);
68 
69 	/*
70 	 * We link the bounce buffer in and could have to traverse it
71 	 * later so we have to get a ref to prevent it from being freed
72 	 */
73 	bio_get(bio);
74 
75 	ret = blk_rq_append_bio(q, rq, bio);
76 	if (!ret)
77 		return bio->bi_size;
78 
79 	/* if it was boucned we must call the end io function */
80 	bio_endio(bio, 0);
81 	__blk_rq_unmap_user(orig_bio);
82 	bio_put(bio);
83 	return ret;
84 }
85 
86 /**
87  * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
88  * @q:		request queue where request should be inserted
89  * @rq:		request structure to fill
90  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
91  * @ubuf:	the user buffer
92  * @len:	length of user data
93  * @gfp_mask:	memory allocation flags
94  *
95  * Description:
96  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
97  *    a kernel bounce buffer is used.
98  *
99  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
100  *    still in process context.
101  *
102  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
103  *    before being submitted to the device, as pages mapped may be out of
104  *    reach. It's the callers responsibility to make sure this happens. The
105  *    original bio must be passed back in to blk_rq_unmap_user() for proper
106  *    unmapping.
107  */
108 int blk_rq_map_user(struct request_queue *q, struct request *rq,
109 		    struct rq_map_data *map_data, void __user *ubuf,
110 		    unsigned long len, gfp_t gfp_mask)
111 {
112 	unsigned long bytes_read = 0;
113 	struct bio *bio = NULL;
114 	int ret;
115 
116 	if (len > (q->max_hw_sectors << 9))
117 		return -EINVAL;
118 	if (!len || !ubuf)
119 		return -EINVAL;
120 
121 	while (bytes_read != len) {
122 		unsigned long map_len, end, start;
123 
124 		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
125 		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
126 								>> PAGE_SHIFT;
127 		start = (unsigned long)ubuf >> PAGE_SHIFT;
128 
129 		/*
130 		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
131 		 * pages. If this happens we just lower the requested
132 		 * mapping len by a page so that we can fit
133 		 */
134 		if (end - start > BIO_MAX_PAGES)
135 			map_len -= PAGE_SIZE;
136 
137 		ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
138 					gfp_mask);
139 		if (ret < 0)
140 			goto unmap_rq;
141 		if (!bio)
142 			bio = rq->bio;
143 		bytes_read += ret;
144 		ubuf += ret;
145 	}
146 
147 	if (!bio_flagged(bio, BIO_USER_MAPPED))
148 		rq->cmd_flags |= REQ_COPY_USER;
149 
150 	rq->buffer = rq->data = NULL;
151 	return 0;
152 unmap_rq:
153 	blk_rq_unmap_user(bio);
154 	rq->bio = NULL;
155 	return ret;
156 }
157 EXPORT_SYMBOL(blk_rq_map_user);
158 
159 /**
160  * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
161  * @q:		request queue where request should be inserted
162  * @rq:		request to map data to
163  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
164  * @iov:	pointer to the iovec
165  * @iov_count:	number of elements in the iovec
166  * @len:	I/O byte count
167  * @gfp_mask:	memory allocation flags
168  *
169  * Description:
170  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
171  *    a kernel bounce buffer is used.
172  *
173  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
174  *    still in process context.
175  *
176  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
177  *    before being submitted to the device, as pages mapped may be out of
178  *    reach. It's the callers responsibility to make sure this happens. The
179  *    original bio must be passed back in to blk_rq_unmap_user() for proper
180  *    unmapping.
181  */
182 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
183 			struct rq_map_data *map_data, struct sg_iovec *iov,
184 			int iov_count, unsigned int len, gfp_t gfp_mask)
185 {
186 	struct bio *bio;
187 	int i, read = rq_data_dir(rq) == READ;
188 	int unaligned = 0;
189 
190 	if (!iov || iov_count <= 0)
191 		return -EINVAL;
192 
193 	for (i = 0; i < iov_count; i++) {
194 		unsigned long uaddr = (unsigned long)iov[i].iov_base;
195 
196 		if (uaddr & queue_dma_alignment(q)) {
197 			unaligned = 1;
198 			break;
199 		}
200 	}
201 
202 	if (unaligned || (q->dma_pad_mask & len) || map_data)
203 		bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
204 					gfp_mask);
205 	else
206 		bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
207 
208 	if (IS_ERR(bio))
209 		return PTR_ERR(bio);
210 
211 	if (bio->bi_size != len) {
212 		bio_endio(bio, 0);
213 		bio_unmap_user(bio);
214 		return -EINVAL;
215 	}
216 
217 	if (!bio_flagged(bio, BIO_USER_MAPPED))
218 		rq->cmd_flags |= REQ_COPY_USER;
219 
220 	blk_queue_bounce(q, &bio);
221 	bio_get(bio);
222 	blk_rq_bio_prep(q, rq, bio);
223 	rq->buffer = rq->data = NULL;
224 	return 0;
225 }
226 EXPORT_SYMBOL(blk_rq_map_user_iov);
227 
228 /**
229  * blk_rq_unmap_user - unmap a request with user data
230  * @bio:	       start of bio list
231  *
232  * Description:
233  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
234  *    supply the original rq->bio from the blk_rq_map_user() return, since
235  *    the I/O completion may have changed rq->bio.
236  */
237 int blk_rq_unmap_user(struct bio *bio)
238 {
239 	struct bio *mapped_bio;
240 	int ret = 0, ret2;
241 
242 	while (bio) {
243 		mapped_bio = bio;
244 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
245 			mapped_bio = bio->bi_private;
246 
247 		ret2 = __blk_rq_unmap_user(mapped_bio);
248 		if (ret2 && !ret)
249 			ret = ret2;
250 
251 		mapped_bio = bio;
252 		bio = bio->bi_next;
253 		bio_put(mapped_bio);
254 	}
255 
256 	return ret;
257 }
258 EXPORT_SYMBOL(blk_rq_unmap_user);
259 
260 /**
261  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
262  * @q:		request queue where request should be inserted
263  * @rq:		request to fill
264  * @kbuf:	the kernel buffer
265  * @len:	length of user data
266  * @gfp_mask:	memory allocation flags
267  *
268  * Description:
269  *    Data will be mapped directly if possible. Otherwise a bounce
270  *    buffer is used.
271  */
272 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
273 		    unsigned int len, gfp_t gfp_mask)
274 {
275 	int reading = rq_data_dir(rq) == READ;
276 	int do_copy = 0;
277 	struct bio *bio;
278 
279 	if (len > (q->max_hw_sectors << 9))
280 		return -EINVAL;
281 	if (!len || !kbuf)
282 		return -EINVAL;
283 
284 	do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
285 	if (do_copy)
286 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
287 	else
288 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
289 
290 	if (IS_ERR(bio))
291 		return PTR_ERR(bio);
292 
293 	if (rq_data_dir(rq) == WRITE)
294 		bio->bi_rw |= (1 << BIO_RW);
295 
296 	if (do_copy)
297 		rq->cmd_flags |= REQ_COPY_USER;
298 
299 	blk_rq_bio_prep(q, rq, bio);
300 	blk_queue_bounce(q, &rq->bio);
301 	rq->buffer = rq->data = NULL;
302 	return 0;
303 }
304 EXPORT_SYMBOL(blk_rq_map_kern);
305