xref: /openbmc/linux/block/blk-map.c (revision bb0eb050)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/sched/task_stack.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 
11 #include "blk.h"
12 
13 /*
14  * Append a bio to a passthrough request.  Only works can be merged into
15  * the request based on the driver constraints.
16  */
17 int blk_rq_append_bio(struct request *rq, struct bio *bio)
18 {
19 	if (!rq->bio) {
20 		blk_rq_bio_prep(rq->q, rq, bio);
21 	} else {
22 		if (!ll_back_merge_fn(rq->q, rq, bio))
23 			return -EINVAL;
24 
25 		rq->biotail->bi_next = bio;
26 		rq->biotail = bio;
27 		rq->__data_len += bio->bi_iter.bi_size;
28 	}
29 
30 	return 0;
31 }
32 EXPORT_SYMBOL(blk_rq_append_bio);
33 
34 static int __blk_rq_unmap_user(struct bio *bio)
35 {
36 	int ret = 0;
37 
38 	if (bio) {
39 		if (bio_flagged(bio, BIO_USER_MAPPED))
40 			bio_unmap_user(bio);
41 		else
42 			ret = bio_uncopy_user(bio);
43 	}
44 
45 	return ret;
46 }
47 
48 static int __blk_rq_map_user_iov(struct request *rq,
49 		struct rq_map_data *map_data, struct iov_iter *iter,
50 		gfp_t gfp_mask, bool copy)
51 {
52 	struct request_queue *q = rq->q;
53 	struct bio *bio, *orig_bio;
54 	int ret;
55 
56 	if (copy)
57 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
58 	else
59 		bio = bio_map_user_iov(q, iter, gfp_mask);
60 
61 	if (IS_ERR(bio))
62 		return PTR_ERR(bio);
63 
64 	bio->bi_opf &= ~REQ_OP_MASK;
65 	bio->bi_opf |= req_op(rq);
66 
67 	if (map_data && map_data->null_mapped)
68 		bio_set_flag(bio, BIO_NULL_MAPPED);
69 
70 	iov_iter_advance(iter, bio->bi_iter.bi_size);
71 	if (map_data)
72 		map_data->offset += bio->bi_iter.bi_size;
73 
74 	orig_bio = bio;
75 	blk_queue_bounce(q, &bio);
76 
77 	/*
78 	 * We link the bounce buffer in and could have to traverse it
79 	 * later so we have to get a ref to prevent it from being freed
80 	 */
81 	bio_get(bio);
82 
83 	ret = blk_rq_append_bio(rq, bio);
84 	if (ret) {
85 		bio_endio(bio);
86 		__blk_rq_unmap_user(orig_bio);
87 		bio_put(bio);
88 		return ret;
89 	}
90 
91 	return 0;
92 }
93 
94 /**
95  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
96  * @q:		request queue where request should be inserted
97  * @rq:		request to map data to
98  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
99  * @iter:	iovec iterator
100  * @gfp_mask:	memory allocation flags
101  *
102  * Description:
103  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
104  *    a kernel bounce buffer is used.
105  *
106  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
107  *    still in process context.
108  *
109  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
110  *    before being submitted to the device, as pages mapped may be out of
111  *    reach. It's the callers responsibility to make sure this happens. The
112  *    original bio must be passed back in to blk_rq_unmap_user() for proper
113  *    unmapping.
114  */
115 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
116 			struct rq_map_data *map_data,
117 			const struct iov_iter *iter, gfp_t gfp_mask)
118 {
119 	bool copy = false;
120 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
121 	struct bio *bio = NULL;
122 	struct iov_iter i;
123 	int ret;
124 
125 	if (!iter_is_iovec(iter))
126 		goto fail;
127 
128 	if (map_data)
129 		copy = true;
130 	else if (iov_iter_alignment(iter) & align)
131 		copy = true;
132 	else if (queue_virt_boundary(q))
133 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
134 
135 	i = *iter;
136 	do {
137 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
138 		if (ret)
139 			goto unmap_rq;
140 		if (!bio)
141 			bio = rq->bio;
142 	} while (iov_iter_count(&i));
143 
144 	if (!bio_flagged(bio, BIO_USER_MAPPED))
145 		rq->rq_flags |= RQF_COPY_USER;
146 	return 0;
147 
148 unmap_rq:
149 	__blk_rq_unmap_user(bio);
150 fail:
151 	rq->bio = NULL;
152 	return -EINVAL;
153 }
154 EXPORT_SYMBOL(blk_rq_map_user_iov);
155 
156 int blk_rq_map_user(struct request_queue *q, struct request *rq,
157 		    struct rq_map_data *map_data, void __user *ubuf,
158 		    unsigned long len, gfp_t gfp_mask)
159 {
160 	struct iovec iov;
161 	struct iov_iter i;
162 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
163 
164 	if (unlikely(ret < 0))
165 		return ret;
166 
167 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
168 }
169 EXPORT_SYMBOL(blk_rq_map_user);
170 
171 /**
172  * blk_rq_unmap_user - unmap a request with user data
173  * @bio:	       start of bio list
174  *
175  * Description:
176  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
177  *    supply the original rq->bio from the blk_rq_map_user() return, since
178  *    the I/O completion may have changed rq->bio.
179  */
180 int blk_rq_unmap_user(struct bio *bio)
181 {
182 	struct bio *mapped_bio;
183 	int ret = 0, ret2;
184 
185 	while (bio) {
186 		mapped_bio = bio;
187 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
188 			mapped_bio = bio->bi_private;
189 
190 		ret2 = __blk_rq_unmap_user(mapped_bio);
191 		if (ret2 && !ret)
192 			ret = ret2;
193 
194 		mapped_bio = bio;
195 		bio = bio->bi_next;
196 		bio_put(mapped_bio);
197 	}
198 
199 	return ret;
200 }
201 EXPORT_SYMBOL(blk_rq_unmap_user);
202 
203 /**
204  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
205  * @q:		request queue where request should be inserted
206  * @rq:		request to fill
207  * @kbuf:	the kernel buffer
208  * @len:	length of user data
209  * @gfp_mask:	memory allocation flags
210  *
211  * Description:
212  *    Data will be mapped directly if possible. Otherwise a bounce
213  *    buffer is used. Can be called multiple times to append multiple
214  *    buffers.
215  */
216 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
217 		    unsigned int len, gfp_t gfp_mask)
218 {
219 	int reading = rq_data_dir(rq) == READ;
220 	unsigned long addr = (unsigned long) kbuf;
221 	int do_copy = 0;
222 	struct bio *bio;
223 	int ret;
224 
225 	if (len > (queue_max_hw_sectors(q) << 9))
226 		return -EINVAL;
227 	if (!len || !kbuf)
228 		return -EINVAL;
229 
230 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
231 	if (do_copy)
232 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
233 	else
234 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
235 
236 	if (IS_ERR(bio))
237 		return PTR_ERR(bio);
238 
239 	bio->bi_opf &= ~REQ_OP_MASK;
240 	bio->bi_opf |= req_op(rq);
241 
242 	if (do_copy)
243 		rq->rq_flags |= RQF_COPY_USER;
244 
245 	ret = blk_rq_append_bio(rq, bio);
246 	if (unlikely(ret)) {
247 		/* request is too big */
248 		bio_put(bio);
249 		return ret;
250 	}
251 
252 	blk_queue_bounce(q, &rq->bio);
253 	return 0;
254 }
255 EXPORT_SYMBOL(blk_rq_map_kern);
256