xref: /openbmc/linux/block/blk-map.c (revision 07359fc6)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <scsi/sg.h>		/* for struct sg_iovec */
9 
10 #include "blk.h"
11 
12 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 		      struct bio *bio)
14 {
15 	if (!rq->bio)
16 		blk_rq_bio_prep(q, rq, bio);
17 	else if (!ll_back_merge_fn(q, rq, bio))
18 		return -EINVAL;
19 	else {
20 		rq->biotail->bi_next = bio;
21 		rq->biotail = bio;
22 
23 		rq->data_len += bio->bi_size;
24 	}
25 	return 0;
26 }
27 EXPORT_SYMBOL(blk_rq_append_bio);
28 
29 static int __blk_rq_unmap_user(struct bio *bio)
30 {
31 	int ret = 0;
32 
33 	if (bio) {
34 		if (bio_flagged(bio, BIO_USER_MAPPED))
35 			bio_unmap_user(bio);
36 		else
37 			ret = bio_uncopy_user(bio);
38 	}
39 
40 	return ret;
41 }
42 
43 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 			     void __user *ubuf, unsigned int len)
45 {
46 	unsigned long uaddr;
47 	unsigned int alignment;
48 	struct bio *bio, *orig_bio;
49 	int reading, ret;
50 
51 	reading = rq_data_dir(rq) == READ;
52 
53 	/*
54 	 * if alignment requirement is satisfied, map in user pages for
55 	 * direct dma. else, set up kernel bounce buffers
56 	 */
57 	uaddr = (unsigned long) ubuf;
58 	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
59 	if (!(uaddr & alignment) && !(len & alignment))
60 		bio = bio_map_user(q, NULL, uaddr, len, reading);
61 	else
62 		bio = bio_copy_user(q, uaddr, len, reading);
63 
64 	if (IS_ERR(bio))
65 		return PTR_ERR(bio);
66 
67 	orig_bio = bio;
68 	blk_queue_bounce(q, &bio);
69 
70 	/*
71 	 * We link the bounce buffer in and could have to traverse it
72 	 * later so we have to get a ref to prevent it from being freed
73 	 */
74 	bio_get(bio);
75 
76 	ret = blk_rq_append_bio(q, rq, bio);
77 	if (!ret)
78 		return bio->bi_size;
79 
80 	/* if it was boucned we must call the end io function */
81 	bio_endio(bio, 0);
82 	__blk_rq_unmap_user(orig_bio);
83 	bio_put(bio);
84 	return ret;
85 }
86 
87 /**
88  * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
89  * @q:		request queue where request should be inserted
90  * @rq:		request structure to fill
91  * @ubuf:	the user buffer
92  * @len:	length of user data
93  *
94  * Description:
95  *    Data will be mapped directly for zero copy io, if possible. Otherwise
96  *    a kernel bounce buffer is used.
97  *
98  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
99  *    still in process context.
100  *
101  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
102  *    before being submitted to the device, as pages mapped may be out of
103  *    reach. It's the callers responsibility to make sure this happens. The
104  *    original bio must be passed back in to blk_rq_unmap_user() for proper
105  *    unmapping.
106  */
107 int blk_rq_map_user(struct request_queue *q, struct request *rq,
108 		    void __user *ubuf, unsigned long len)
109 {
110 	unsigned long bytes_read = 0;
111 	struct bio *bio = NULL;
112 	int ret;
113 
114 	if (len > (q->max_hw_sectors << 9))
115 		return -EINVAL;
116 	if (!len || !ubuf)
117 		return -EINVAL;
118 
119 	while (bytes_read != len) {
120 		unsigned long map_len, end, start;
121 
122 		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
123 		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
124 								>> PAGE_SHIFT;
125 		start = (unsigned long)ubuf >> PAGE_SHIFT;
126 
127 		/*
128 		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
129 		 * pages. If this happens we just lower the requested
130 		 * mapping len by a page so that we can fit
131 		 */
132 		if (end - start > BIO_MAX_PAGES)
133 			map_len -= PAGE_SIZE;
134 
135 		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
136 		if (ret < 0)
137 			goto unmap_rq;
138 		if (!bio)
139 			bio = rq->bio;
140 		bytes_read += ret;
141 		ubuf += ret;
142 	}
143 
144 	if (!bio_flagged(bio, BIO_USER_MAPPED))
145 		rq->cmd_flags |= REQ_COPY_USER;
146 
147 	rq->buffer = rq->data = NULL;
148 	return 0;
149 unmap_rq:
150 	blk_rq_unmap_user(bio);
151 	rq->bio = NULL;
152 	return ret;
153 }
154 EXPORT_SYMBOL(blk_rq_map_user);
155 
156 /**
157  * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
158  * @q:		request queue where request should be inserted
159  * @rq:		request to map data to
160  * @iov:	pointer to the iovec
161  * @iov_count:	number of elements in the iovec
162  * @len:	I/O byte count
163  *
164  * Description:
165  *    Data will be mapped directly for zero copy io, if possible. Otherwise
166  *    a kernel bounce buffer is used.
167  *
168  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
169  *    still in process context.
170  *
171  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
172  *    before being submitted to the device, as pages mapped may be out of
173  *    reach. It's the callers responsibility to make sure this happens. The
174  *    original bio must be passed back in to blk_rq_unmap_user() for proper
175  *    unmapping.
176  */
177 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
178 			struct sg_iovec *iov, int iov_count, unsigned int len)
179 {
180 	struct bio *bio;
181 	int i, read = rq_data_dir(rq) == READ;
182 	int unaligned = 0;
183 
184 	if (!iov || iov_count <= 0)
185 		return -EINVAL;
186 
187 	for (i = 0; i < iov_count; i++) {
188 		unsigned long uaddr = (unsigned long)iov[i].iov_base;
189 
190 		if (uaddr & queue_dma_alignment(q)) {
191 			unaligned = 1;
192 			break;
193 		}
194 	}
195 
196 	if (unaligned || (q->dma_pad_mask & len))
197 		bio = bio_copy_user_iov(q, iov, iov_count, read);
198 	else
199 		bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
200 
201 	if (IS_ERR(bio))
202 		return PTR_ERR(bio);
203 
204 	if (bio->bi_size != len) {
205 		bio_endio(bio, 0);
206 		bio_unmap_user(bio);
207 		return -EINVAL;
208 	}
209 
210 	if (!bio_flagged(bio, BIO_USER_MAPPED))
211 		rq->cmd_flags |= REQ_COPY_USER;
212 
213 	blk_queue_bounce(q, &bio);
214 	bio_get(bio);
215 	blk_rq_bio_prep(q, rq, bio);
216 	rq->buffer = rq->data = NULL;
217 	return 0;
218 }
219 
220 /**
221  * blk_rq_unmap_user - unmap a request with user data
222  * @bio:	       start of bio list
223  *
224  * Description:
225  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
226  *    supply the original rq->bio from the blk_rq_map_user() return, since
227  *    the io completion may have changed rq->bio.
228  */
229 int blk_rq_unmap_user(struct bio *bio)
230 {
231 	struct bio *mapped_bio;
232 	int ret = 0, ret2;
233 
234 	while (bio) {
235 		mapped_bio = bio;
236 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
237 			mapped_bio = bio->bi_private;
238 
239 		ret2 = __blk_rq_unmap_user(mapped_bio);
240 		if (ret2 && !ret)
241 			ret = ret2;
242 
243 		mapped_bio = bio;
244 		bio = bio->bi_next;
245 		bio_put(mapped_bio);
246 	}
247 
248 	return ret;
249 }
250 EXPORT_SYMBOL(blk_rq_unmap_user);
251 
252 /**
253  * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
254  * @q:		request queue where request should be inserted
255  * @rq:		request to fill
256  * @kbuf:	the kernel buffer
257  * @len:	length of user data
258  * @gfp_mask:	memory allocation flags
259  *
260  * Description:
261  *    Data will be mapped directly if possible. Otherwise a bounce
262  *    buffer is used.
263  */
264 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
265 		    unsigned int len, gfp_t gfp_mask)
266 {
267 	unsigned long kaddr;
268 	unsigned int alignment;
269 	int reading = rq_data_dir(rq) == READ;
270 	int do_copy = 0;
271 	struct bio *bio;
272 
273 	if (len > (q->max_hw_sectors << 9))
274 		return -EINVAL;
275 	if (!len || !kbuf)
276 		return -EINVAL;
277 
278 	kaddr = (unsigned long)kbuf;
279 	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
280 	do_copy = ((kaddr & alignment) || (len & alignment));
281 
282 	if (do_copy)
283 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
284 	else
285 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
286 
287 	if (IS_ERR(bio))
288 		return PTR_ERR(bio);
289 
290 	if (rq_data_dir(rq) == WRITE)
291 		bio->bi_rw |= (1 << BIO_RW);
292 
293 	if (do_copy)
294 		rq->cmd_flags |= REQ_COPY_USER;
295 
296 	blk_rq_bio_prep(q, rq, bio);
297 	blk_queue_bounce(q, &rq->bio);
298 	rq->buffer = rq->data = NULL;
299 	return 0;
300 }
301 EXPORT_SYMBOL(blk_rq_map_kern);
302