xref: /openbmc/linux/block/blk-map.c (revision bec41940)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 
9 #include "blk.h"
10 
11 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
12 		      struct bio *bio)
13 {
14 	if (!rq->bio)
15 		blk_rq_bio_prep(q, rq, bio);
16 	else if (!ll_back_merge_fn(q, rq, bio))
17 		return -EINVAL;
18 	else {
19 		rq->biotail->bi_next = bio;
20 		rq->biotail = bio;
21 
22 		rq->data_len += bio->bi_size;
23 	}
24 	return 0;
25 }
26 EXPORT_SYMBOL(blk_rq_append_bio);
27 
28 static int __blk_rq_unmap_user(struct bio *bio)
29 {
30 	int ret = 0;
31 
32 	if (bio) {
33 		if (bio_flagged(bio, BIO_USER_MAPPED))
34 			bio_unmap_user(bio);
35 		else
36 			ret = bio_uncopy_user(bio);
37 	}
38 
39 	return ret;
40 }
41 
42 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
43 			     void __user *ubuf, unsigned int len)
44 {
45 	unsigned long uaddr;
46 	unsigned int alignment;
47 	struct bio *bio, *orig_bio;
48 	int reading, ret;
49 
50 	reading = rq_data_dir(rq) == READ;
51 
52 	/*
53 	 * if alignment requirement is satisfied, map in user pages for
54 	 * direct dma. else, set up kernel bounce buffers
55 	 */
56 	uaddr = (unsigned long) ubuf;
57 	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
58 	if (!(uaddr & alignment) && !(len & alignment))
59 		bio = bio_map_user(q, NULL, uaddr, len, reading);
60 	else
61 		bio = bio_copy_user(q, uaddr, len, reading);
62 
63 	if (IS_ERR(bio))
64 		return PTR_ERR(bio);
65 
66 	orig_bio = bio;
67 	blk_queue_bounce(q, &bio);
68 
69 	/*
70 	 * We link the bounce buffer in and could have to traverse it
71 	 * later so we have to get a ref to prevent it from being freed
72 	 */
73 	bio_get(bio);
74 
75 	ret = blk_rq_append_bio(q, rq, bio);
76 	if (!ret)
77 		return bio->bi_size;
78 
79 	/* if it was boucned we must call the end io function */
80 	bio_endio(bio, 0);
81 	__blk_rq_unmap_user(orig_bio);
82 	bio_put(bio);
83 	return ret;
84 }
85 
86 /**
87  * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
88  * @q:		request queue where request should be inserted
89  * @rq:		request structure to fill
90  * @ubuf:	the user buffer
91  * @len:	length of user data
92  *
93  * Description:
94  *    Data will be mapped directly for zero copy io, if possible. Otherwise
95  *    a kernel bounce buffer is used.
96  *
97  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
98  *    still in process context.
99  *
100  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
101  *    before being submitted to the device, as pages mapped may be out of
102  *    reach. It's the callers responsibility to make sure this happens. The
103  *    original bio must be passed back in to blk_rq_unmap_user() for proper
104  *    unmapping.
105  */
106 int blk_rq_map_user(struct request_queue *q, struct request *rq,
107 		    void __user *ubuf, unsigned long len)
108 {
109 	unsigned long bytes_read = 0;
110 	struct bio *bio = NULL;
111 	int ret;
112 
113 	if (len > (q->max_hw_sectors << 9))
114 		return -EINVAL;
115 	if (!len || !ubuf)
116 		return -EINVAL;
117 
118 	while (bytes_read != len) {
119 		unsigned long map_len, end, start;
120 
121 		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
122 		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
123 								>> PAGE_SHIFT;
124 		start = (unsigned long)ubuf >> PAGE_SHIFT;
125 
126 		/*
127 		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
128 		 * pages. If this happens we just lower the requested
129 		 * mapping len by a page so that we can fit
130 		 */
131 		if (end - start > BIO_MAX_PAGES)
132 			map_len -= PAGE_SIZE;
133 
134 		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
135 		if (ret < 0)
136 			goto unmap_rq;
137 		if (!bio)
138 			bio = rq->bio;
139 		bytes_read += ret;
140 		ubuf += ret;
141 	}
142 
143 	/*
144 	 * __blk_rq_map_user() copies the buffers if starting address
145 	 * or length isn't aligned to dma_pad_mask.  As the copied
146 	 * buffer is always page aligned, we know that there's enough
147 	 * room for padding.  Extend the last bio and update
148 	 * rq->data_len accordingly.
149 	 *
150 	 * On unmap, bio_uncopy_user() will use unmodified
151 	 * bio_map_data pointed to by bio->bi_private.
152 	 */
153 	if (len & q->dma_pad_mask) {
154 		unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
155 		struct bio *bio = rq->biotail;
156 
157 		bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
158 		bio->bi_size += pad_len;
159 
160 		rq->extra_len += pad_len;
161 	}
162 
163 	rq->buffer = rq->data = NULL;
164 	return 0;
165 unmap_rq:
166 	blk_rq_unmap_user(bio);
167 	rq->bio = NULL;
168 	return ret;
169 }
170 EXPORT_SYMBOL(blk_rq_map_user);
171 
172 /**
173  * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
174  * @q:		request queue where request should be inserted
175  * @rq:		request to map data to
176  * @iov:	pointer to the iovec
177  * @iov_count:	number of elements in the iovec
178  * @len:	I/O byte count
179  *
180  * Description:
181  *    Data will be mapped directly for zero copy io, if possible. Otherwise
182  *    a kernel bounce buffer is used.
183  *
184  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
185  *    still in process context.
186  *
187  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
188  *    before being submitted to the device, as pages mapped may be out of
189  *    reach. It's the callers responsibility to make sure this happens. The
190  *    original bio must be passed back in to blk_rq_unmap_user() for proper
191  *    unmapping.
192  */
193 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
194 			struct sg_iovec *iov, int iov_count, unsigned int len)
195 {
196 	struct bio *bio;
197 
198 	if (!iov || iov_count <= 0)
199 		return -EINVAL;
200 
201 	/* we don't allow misaligned data like bio_map_user() does.  If the
202 	 * user is using sg, they're expected to know the alignment constraints
203 	 * and respect them accordingly */
204 	bio = bio_map_user_iov(q, NULL, iov, iov_count,
205 				rq_data_dir(rq) == READ);
206 	if (IS_ERR(bio))
207 		return PTR_ERR(bio);
208 
209 	if (bio->bi_size != len) {
210 		bio_endio(bio, 0);
211 		bio_unmap_user(bio);
212 		return -EINVAL;
213 	}
214 
215 	bio_get(bio);
216 	blk_rq_bio_prep(q, rq, bio);
217 	rq->buffer = rq->data = NULL;
218 	return 0;
219 }
220 
221 /**
222  * blk_rq_unmap_user - unmap a request with user data
223  * @bio:	       start of bio list
224  *
225  * Description:
226  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
227  *    supply the original rq->bio from the blk_rq_map_user() return, since
228  *    the io completion may have changed rq->bio.
229  */
230 int blk_rq_unmap_user(struct bio *bio)
231 {
232 	struct bio *mapped_bio;
233 	int ret = 0, ret2;
234 
235 	while (bio) {
236 		mapped_bio = bio;
237 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
238 			mapped_bio = bio->bi_private;
239 
240 		ret2 = __blk_rq_unmap_user(mapped_bio);
241 		if (ret2 && !ret)
242 			ret = ret2;
243 
244 		mapped_bio = bio;
245 		bio = bio->bi_next;
246 		bio_put(mapped_bio);
247 	}
248 
249 	return ret;
250 }
251 EXPORT_SYMBOL(blk_rq_unmap_user);
252 
253 /**
254  * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
255  * @q:		request queue where request should be inserted
256  * @rq:		request to fill
257  * @kbuf:	the kernel buffer
258  * @len:	length of user data
259  * @gfp_mask:	memory allocation flags
260  */
261 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
262 		    unsigned int len, gfp_t gfp_mask)
263 {
264 	struct bio *bio;
265 
266 	if (len > (q->max_hw_sectors << 9))
267 		return -EINVAL;
268 	if (!len || !kbuf)
269 		return -EINVAL;
270 
271 	bio = bio_map_kern(q, kbuf, len, gfp_mask);
272 	if (IS_ERR(bio))
273 		return PTR_ERR(bio);
274 
275 	if (rq_data_dir(rq) == WRITE)
276 		bio->bi_rw |= (1 << BIO_RW);
277 
278 	blk_rq_bio_prep(q, rq, bio);
279 	blk_queue_bounce(q, &rq->bio);
280 	rq->buffer = rq->data = NULL;
281 	return 0;
282 }
283 EXPORT_SYMBOL(blk_rq_map_kern);
284