xref: /openbmc/linux/block/blk-map.c (revision 643d1f7f)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 
9 #include "blk.h"
10 
11 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
12 		      struct bio *bio)
13 {
14 	if (!rq->bio)
15 		blk_rq_bio_prep(q, rq, bio);
16 	else if (!ll_back_merge_fn(q, rq, bio))
17 		return -EINVAL;
18 	else {
19 		rq->biotail->bi_next = bio;
20 		rq->biotail = bio;
21 
22 		rq->data_len += bio->bi_size;
23 	}
24 	return 0;
25 }
26 EXPORT_SYMBOL(blk_rq_append_bio);
27 
28 static int __blk_rq_unmap_user(struct bio *bio)
29 {
30 	int ret = 0;
31 
32 	if (bio) {
33 		if (bio_flagged(bio, BIO_USER_MAPPED))
34 			bio_unmap_user(bio);
35 		else
36 			ret = bio_uncopy_user(bio);
37 	}
38 
39 	return ret;
40 }
41 
42 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
43 			     void __user *ubuf, unsigned int len)
44 {
45 	unsigned long uaddr;
46 	struct bio *bio, *orig_bio;
47 	int reading, ret;
48 
49 	reading = rq_data_dir(rq) == READ;
50 
51 	/*
52 	 * if alignment requirement is satisfied, map in user pages for
53 	 * direct dma. else, set up kernel bounce buffers
54 	 */
55 	uaddr = (unsigned long) ubuf;
56 	if (!(uaddr & queue_dma_alignment(q)) &&
57 	    !(len & queue_dma_alignment(q)))
58 		bio = bio_map_user(q, NULL, uaddr, len, reading);
59 	else
60 		bio = bio_copy_user(q, uaddr, len, reading);
61 
62 	if (IS_ERR(bio))
63 		return PTR_ERR(bio);
64 
65 	orig_bio = bio;
66 	blk_queue_bounce(q, &bio);
67 
68 	/*
69 	 * We link the bounce buffer in and could have to traverse it
70 	 * later so we have to get a ref to prevent it from being freed
71 	 */
72 	bio_get(bio);
73 
74 	ret = blk_rq_append_bio(q, rq, bio);
75 	if (!ret)
76 		return bio->bi_size;
77 
78 	/* if it was boucned we must call the end io function */
79 	bio_endio(bio, 0);
80 	__blk_rq_unmap_user(orig_bio);
81 	bio_put(bio);
82 	return ret;
83 }
84 
85 /**
86  * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
87  * @q:		request queue where request should be inserted
88  * @rq:		request structure to fill
89  * @ubuf:	the user buffer
90  * @len:	length of user data
91  *
92  * Description:
93  *    Data will be mapped directly for zero copy io, if possible. Otherwise
94  *    a kernel bounce buffer is used.
95  *
96  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
97  *    still in process context.
98  *
99  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
100  *    before being submitted to the device, as pages mapped may be out of
101  *    reach. It's the callers responsibility to make sure this happens. The
102  *    original bio must be passed back in to blk_rq_unmap_user() for proper
103  *    unmapping.
104  */
105 int blk_rq_map_user(struct request_queue *q, struct request *rq,
106 		    void __user *ubuf, unsigned long len)
107 {
108 	unsigned long bytes_read = 0;
109 	struct bio *bio = NULL;
110 	int ret;
111 
112 	if (len > (q->max_hw_sectors << 9))
113 		return -EINVAL;
114 	if (!len || !ubuf)
115 		return -EINVAL;
116 
117 	while (bytes_read != len) {
118 		unsigned long map_len, end, start;
119 
120 		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
121 		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
122 								>> PAGE_SHIFT;
123 		start = (unsigned long)ubuf >> PAGE_SHIFT;
124 
125 		/*
126 		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
127 		 * pages. If this happens we just lower the requested
128 		 * mapping len by a page so that we can fit
129 		 */
130 		if (end - start > BIO_MAX_PAGES)
131 			map_len -= PAGE_SIZE;
132 
133 		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
134 		if (ret < 0)
135 			goto unmap_rq;
136 		if (!bio)
137 			bio = rq->bio;
138 		bytes_read += ret;
139 		ubuf += ret;
140 	}
141 
142 	rq->buffer = rq->data = NULL;
143 	return 0;
144 unmap_rq:
145 	blk_rq_unmap_user(bio);
146 	return ret;
147 }
148 EXPORT_SYMBOL(blk_rq_map_user);
149 
150 /**
151  * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
152  * @q:		request queue where request should be inserted
153  * @rq:		request to map data to
154  * @iov:	pointer to the iovec
155  * @iov_count:	number of elements in the iovec
156  * @len:	I/O byte count
157  *
158  * Description:
159  *    Data will be mapped directly for zero copy io, if possible. Otherwise
160  *    a kernel bounce buffer is used.
161  *
162  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
163  *    still in process context.
164  *
165  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
166  *    before being submitted to the device, as pages mapped may be out of
167  *    reach. It's the callers responsibility to make sure this happens. The
168  *    original bio must be passed back in to blk_rq_unmap_user() for proper
169  *    unmapping.
170  */
171 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
172 			struct sg_iovec *iov, int iov_count, unsigned int len)
173 {
174 	struct bio *bio;
175 
176 	if (!iov || iov_count <= 0)
177 		return -EINVAL;
178 
179 	/* we don't allow misaligned data like bio_map_user() does.  If the
180 	 * user is using sg, they're expected to know the alignment constraints
181 	 * and respect them accordingly */
182 	bio = bio_map_user_iov(q, NULL, iov, iov_count,
183 				rq_data_dir(rq) == READ);
184 	if (IS_ERR(bio))
185 		return PTR_ERR(bio);
186 
187 	if (bio->bi_size != len) {
188 		bio_endio(bio, 0);
189 		bio_unmap_user(bio);
190 		return -EINVAL;
191 	}
192 
193 	bio_get(bio);
194 	blk_rq_bio_prep(q, rq, bio);
195 	rq->buffer = rq->data = NULL;
196 	return 0;
197 }
198 EXPORT_SYMBOL(blk_rq_map_user_iov);
199 
200 /**
201  * blk_rq_unmap_user - unmap a request with user data
202  * @bio:	       start of bio list
203  *
204  * Description:
205  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
206  *    supply the original rq->bio from the blk_rq_map_user() return, since
207  *    the io completion may have changed rq->bio.
208  */
209 int blk_rq_unmap_user(struct bio *bio)
210 {
211 	struct bio *mapped_bio;
212 	int ret = 0, ret2;
213 
214 	while (bio) {
215 		mapped_bio = bio;
216 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
217 			mapped_bio = bio->bi_private;
218 
219 		ret2 = __blk_rq_unmap_user(mapped_bio);
220 		if (ret2 && !ret)
221 			ret = ret2;
222 
223 		mapped_bio = bio;
224 		bio = bio->bi_next;
225 		bio_put(mapped_bio);
226 	}
227 
228 	return ret;
229 }
230 EXPORT_SYMBOL(blk_rq_unmap_user);
231 
232 /**
233  * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
234  * @q:		request queue where request should be inserted
235  * @rq:		request to fill
236  * @kbuf:	the kernel buffer
237  * @len:	length of user data
238  * @gfp_mask:	memory allocation flags
239  */
240 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
241 		    unsigned int len, gfp_t gfp_mask)
242 {
243 	struct bio *bio;
244 
245 	if (len > (q->max_hw_sectors << 9))
246 		return -EINVAL;
247 	if (!len || !kbuf)
248 		return -EINVAL;
249 
250 	bio = bio_map_kern(q, kbuf, len, gfp_mask);
251 	if (IS_ERR(bio))
252 		return PTR_ERR(bio);
253 
254 	if (rq_data_dir(rq) == WRITE)
255 		bio->bi_rw |= (1 << BIO_RW);
256 
257 	blk_rq_bio_prep(q, rq, bio);
258 	blk_queue_bounce(q, &rq->bio);
259 	rq->buffer = rq->data = NULL;
260 	return 0;
261 }
262 EXPORT_SYMBOL(blk_rq_map_kern);
263