xref: /openbmc/linux/block/blk-map.c (revision afdc1a78)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <scsi/sg.h>		/* for struct sg_iovec */
9 
10 #include "blk.h"
11 
12 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 		      struct bio *bio)
14 {
15 	if (!rq->bio)
16 		blk_rq_bio_prep(q, rq, bio);
17 	else if (!ll_back_merge_fn(q, rq, bio))
18 		return -EINVAL;
19 	else {
20 		rq->biotail->bi_next = bio;
21 		rq->biotail = bio;
22 
23 		rq->data_len += bio->bi_size;
24 	}
25 	return 0;
26 }
27 EXPORT_SYMBOL(blk_rq_append_bio);
28 
29 static int __blk_rq_unmap_user(struct bio *bio)
30 {
31 	int ret = 0;
32 
33 	if (bio) {
34 		if (bio_flagged(bio, BIO_USER_MAPPED))
35 			bio_unmap_user(bio);
36 		else
37 			ret = bio_uncopy_user(bio);
38 	}
39 
40 	return ret;
41 }
42 
43 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 			     void __user *ubuf, unsigned int len)
45 {
46 	unsigned long uaddr;
47 	unsigned int alignment;
48 	struct bio *bio, *orig_bio;
49 	int reading, ret;
50 
51 	reading = rq_data_dir(rq) == READ;
52 
53 	/*
54 	 * if alignment requirement is satisfied, map in user pages for
55 	 * direct dma. else, set up kernel bounce buffers
56 	 */
57 	uaddr = (unsigned long) ubuf;
58 	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
59 	if (!(uaddr & alignment) && !(len & alignment))
60 		bio = bio_map_user(q, NULL, uaddr, len, reading);
61 	else
62 		bio = bio_copy_user(q, uaddr, len, reading);
63 
64 	if (IS_ERR(bio))
65 		return PTR_ERR(bio);
66 
67 	orig_bio = bio;
68 	blk_queue_bounce(q, &bio);
69 
70 	/*
71 	 * We link the bounce buffer in and could have to traverse it
72 	 * later so we have to get a ref to prevent it from being freed
73 	 */
74 	bio_get(bio);
75 
76 	ret = blk_rq_append_bio(q, rq, bio);
77 	if (!ret)
78 		return bio->bi_size;
79 
80 	/* if it was boucned we must call the end io function */
81 	bio_endio(bio, 0);
82 	__blk_rq_unmap_user(orig_bio);
83 	bio_put(bio);
84 	return ret;
85 }
86 
87 /**
88  * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
89  * @q:		request queue where request should be inserted
90  * @rq:		request structure to fill
91  * @ubuf:	the user buffer
92  * @len:	length of user data
93  *
94  * Description:
95  *    Data will be mapped directly for zero copy io, if possible. Otherwise
96  *    a kernel bounce buffer is used.
97  *
98  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
99  *    still in process context.
100  *
101  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
102  *    before being submitted to the device, as pages mapped may be out of
103  *    reach. It's the callers responsibility to make sure this happens. The
104  *    original bio must be passed back in to blk_rq_unmap_user() for proper
105  *    unmapping.
106  */
107 int blk_rq_map_user(struct request_queue *q, struct request *rq,
108 		    void __user *ubuf, unsigned long len)
109 {
110 	unsigned long bytes_read = 0;
111 	struct bio *bio = NULL;
112 	int ret;
113 
114 	if (len > (q->max_hw_sectors << 9))
115 		return -EINVAL;
116 	if (!len || !ubuf)
117 		return -EINVAL;
118 
119 	while (bytes_read != len) {
120 		unsigned long map_len, end, start;
121 
122 		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
123 		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
124 								>> PAGE_SHIFT;
125 		start = (unsigned long)ubuf >> PAGE_SHIFT;
126 
127 		/*
128 		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
129 		 * pages. If this happens we just lower the requested
130 		 * mapping len by a page so that we can fit
131 		 */
132 		if (end - start > BIO_MAX_PAGES)
133 			map_len -= PAGE_SIZE;
134 
135 		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
136 		if (ret < 0)
137 			goto unmap_rq;
138 		if (!bio)
139 			bio = rq->bio;
140 		bytes_read += ret;
141 		ubuf += ret;
142 	}
143 
144 	/*
145 	 * __blk_rq_map_user() copies the buffers if starting address
146 	 * or length isn't aligned to dma_pad_mask.  As the copied
147 	 * buffer is always page aligned, we know that there's enough
148 	 * room for padding.  Extend the last bio and update
149 	 * rq->data_len accordingly.
150 	 *
151 	 * On unmap, bio_uncopy_user() will use unmodified
152 	 * bio_map_data pointed to by bio->bi_private.
153 	 */
154 	if (len & q->dma_pad_mask) {
155 		unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
156 		struct bio *tail = rq->biotail;
157 
158 		tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
159 		tail->bi_size += pad_len;
160 
161 		rq->extra_len += pad_len;
162 	}
163 
164 	rq->buffer = rq->data = NULL;
165 	return 0;
166 unmap_rq:
167 	blk_rq_unmap_user(bio);
168 	rq->bio = NULL;
169 	return ret;
170 }
171 EXPORT_SYMBOL(blk_rq_map_user);
172 
173 /**
174  * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
175  * @q:		request queue where request should be inserted
176  * @rq:		request to map data to
177  * @iov:	pointer to the iovec
178  * @iov_count:	number of elements in the iovec
179  * @len:	I/O byte count
180  *
181  * Description:
182  *    Data will be mapped directly for zero copy io, if possible. Otherwise
183  *    a kernel bounce buffer is used.
184  *
185  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
186  *    still in process context.
187  *
188  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
189  *    before being submitted to the device, as pages mapped may be out of
190  *    reach. It's the callers responsibility to make sure this happens. The
191  *    original bio must be passed back in to blk_rq_unmap_user() for proper
192  *    unmapping.
193  */
194 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
195 			struct sg_iovec *iov, int iov_count, unsigned int len)
196 {
197 	struct bio *bio;
198 	int i, read = rq_data_dir(rq) == READ;
199 	int unaligned = 0;
200 
201 	if (!iov || iov_count <= 0)
202 		return -EINVAL;
203 
204 	for (i = 0; i < iov_count; i++) {
205 		unsigned long uaddr = (unsigned long)iov[i].iov_base;
206 
207 		if (uaddr & queue_dma_alignment(q)) {
208 			unaligned = 1;
209 			break;
210 		}
211 	}
212 
213 	if (unaligned || (q->dma_pad_mask & len))
214 		bio = bio_copy_user_iov(q, iov, iov_count, read);
215 	else
216 		bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
217 
218 	if (IS_ERR(bio))
219 		return PTR_ERR(bio);
220 
221 	if (bio->bi_size != len) {
222 		bio_endio(bio, 0);
223 		bio_unmap_user(bio);
224 		return -EINVAL;
225 	}
226 
227 	bio_get(bio);
228 	blk_rq_bio_prep(q, rq, bio);
229 	rq->buffer = rq->data = NULL;
230 	return 0;
231 }
232 
233 /**
234  * blk_rq_unmap_user - unmap a request with user data
235  * @bio:	       start of bio list
236  *
237  * Description:
238  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
239  *    supply the original rq->bio from the blk_rq_map_user() return, since
240  *    the io completion may have changed rq->bio.
241  */
242 int blk_rq_unmap_user(struct bio *bio)
243 {
244 	struct bio *mapped_bio;
245 	int ret = 0, ret2;
246 
247 	while (bio) {
248 		mapped_bio = bio;
249 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
250 			mapped_bio = bio->bi_private;
251 
252 		ret2 = __blk_rq_unmap_user(mapped_bio);
253 		if (ret2 && !ret)
254 			ret = ret2;
255 
256 		mapped_bio = bio;
257 		bio = bio->bi_next;
258 		bio_put(mapped_bio);
259 	}
260 
261 	return ret;
262 }
263 EXPORT_SYMBOL(blk_rq_unmap_user);
264 
265 /**
266  * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
267  * @q:		request queue where request should be inserted
268  * @rq:		request to fill
269  * @kbuf:	the kernel buffer
270  * @len:	length of user data
271  * @gfp_mask:	memory allocation flags
272  */
273 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
274 		    unsigned int len, gfp_t gfp_mask)
275 {
276 	struct bio *bio;
277 
278 	if (len > (q->max_hw_sectors << 9))
279 		return -EINVAL;
280 	if (!len || !kbuf)
281 		return -EINVAL;
282 
283 	bio = bio_map_kern(q, kbuf, len, gfp_mask);
284 	if (IS_ERR(bio))
285 		return PTR_ERR(bio);
286 
287 	if (rq_data_dir(rq) == WRITE)
288 		bio->bi_rw |= (1 << BIO_RW);
289 
290 	blk_rq_bio_prep(q, rq, bio);
291 	blk_queue_bounce(q, &rq->bio);
292 	rq->buffer = rq->data = NULL;
293 	return 0;
294 }
295 EXPORT_SYMBOL(blk_rq_map_kern);
296