xref: /openbmc/linux/block/blk-map.c (revision 20e2fc42)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to mapping data to requests
4  */
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
11 
12 #include "blk.h"
13 
14 /*
15  * Append a bio to a passthrough request.  Only works if the bio can be merged
16  * into the request based on the driver constraints.
17  */
18 int blk_rq_append_bio(struct request *rq, struct bio **bio)
19 {
20 	struct bio *orig_bio = *bio;
21 	struct bvec_iter iter;
22 	struct bio_vec bv;
23 	unsigned int nr_segs = 0;
24 
25 	blk_queue_bounce(rq->q, bio);
26 
27 	bio_for_each_bvec(bv, *bio, iter)
28 		nr_segs++;
29 
30 	if (!rq->bio) {
31 		blk_rq_bio_prep(rq, *bio, nr_segs);
32 	} else {
33 		if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
34 			if (orig_bio != *bio) {
35 				bio_put(*bio);
36 				*bio = orig_bio;
37 			}
38 			return -EINVAL;
39 		}
40 
41 		rq->biotail->bi_next = *bio;
42 		rq->biotail = *bio;
43 		rq->__data_len += (*bio)->bi_iter.bi_size;
44 	}
45 
46 	return 0;
47 }
48 EXPORT_SYMBOL(blk_rq_append_bio);
49 
50 static int __blk_rq_unmap_user(struct bio *bio)
51 {
52 	int ret = 0;
53 
54 	if (bio) {
55 		if (bio_flagged(bio, BIO_USER_MAPPED))
56 			bio_unmap_user(bio);
57 		else
58 			ret = bio_uncopy_user(bio);
59 	}
60 
61 	return ret;
62 }
63 
64 static int __blk_rq_map_user_iov(struct request *rq,
65 		struct rq_map_data *map_data, struct iov_iter *iter,
66 		gfp_t gfp_mask, bool copy)
67 {
68 	struct request_queue *q = rq->q;
69 	struct bio *bio, *orig_bio;
70 	int ret;
71 
72 	if (copy)
73 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
74 	else
75 		bio = bio_map_user_iov(q, iter, gfp_mask);
76 
77 	if (IS_ERR(bio))
78 		return PTR_ERR(bio);
79 
80 	bio->bi_opf &= ~REQ_OP_MASK;
81 	bio->bi_opf |= req_op(rq);
82 
83 	orig_bio = bio;
84 
85 	/*
86 	 * We link the bounce buffer in and could have to traverse it
87 	 * later so we have to get a ref to prevent it from being freed
88 	 */
89 	ret = blk_rq_append_bio(rq, &bio);
90 	if (ret) {
91 		__blk_rq_unmap_user(orig_bio);
92 		return ret;
93 	}
94 	bio_get(bio);
95 
96 	return 0;
97 }
98 
99 /**
100  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
101  * @q:		request queue where request should be inserted
102  * @rq:		request to map data to
103  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
104  * @iter:	iovec iterator
105  * @gfp_mask:	memory allocation flags
106  *
107  * Description:
108  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
109  *    a kernel bounce buffer is used.
110  *
111  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
112  *    still in process context.
113  *
114  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
115  *    before being submitted to the device, as pages mapped may be out of
116  *    reach. It's the callers responsibility to make sure this happens. The
117  *    original bio must be passed back in to blk_rq_unmap_user() for proper
118  *    unmapping.
119  */
120 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
121 			struct rq_map_data *map_data,
122 			const struct iov_iter *iter, gfp_t gfp_mask)
123 {
124 	bool copy = false;
125 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
126 	struct bio *bio = NULL;
127 	struct iov_iter i;
128 	int ret = -EINVAL;
129 
130 	if (!iter_is_iovec(iter))
131 		goto fail;
132 
133 	if (map_data)
134 		copy = true;
135 	else if (iov_iter_alignment(iter) & align)
136 		copy = true;
137 	else if (queue_virt_boundary(q))
138 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
139 
140 	i = *iter;
141 	do {
142 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
143 		if (ret)
144 			goto unmap_rq;
145 		if (!bio)
146 			bio = rq->bio;
147 	} while (iov_iter_count(&i));
148 
149 	if (!bio_flagged(bio, BIO_USER_MAPPED))
150 		rq->rq_flags |= RQF_COPY_USER;
151 	return 0;
152 
153 unmap_rq:
154 	__blk_rq_unmap_user(bio);
155 fail:
156 	rq->bio = NULL;
157 	return ret;
158 }
159 EXPORT_SYMBOL(blk_rq_map_user_iov);
160 
161 int blk_rq_map_user(struct request_queue *q, struct request *rq,
162 		    struct rq_map_data *map_data, void __user *ubuf,
163 		    unsigned long len, gfp_t gfp_mask)
164 {
165 	struct iovec iov;
166 	struct iov_iter i;
167 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
168 
169 	if (unlikely(ret < 0))
170 		return ret;
171 
172 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
173 }
174 EXPORT_SYMBOL(blk_rq_map_user);
175 
176 /**
177  * blk_rq_unmap_user - unmap a request with user data
178  * @bio:	       start of bio list
179  *
180  * Description:
181  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
182  *    supply the original rq->bio from the blk_rq_map_user() return, since
183  *    the I/O completion may have changed rq->bio.
184  */
185 int blk_rq_unmap_user(struct bio *bio)
186 {
187 	struct bio *mapped_bio;
188 	int ret = 0, ret2;
189 
190 	while (bio) {
191 		mapped_bio = bio;
192 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
193 			mapped_bio = bio->bi_private;
194 
195 		ret2 = __blk_rq_unmap_user(mapped_bio);
196 		if (ret2 && !ret)
197 			ret = ret2;
198 
199 		mapped_bio = bio;
200 		bio = bio->bi_next;
201 		bio_put(mapped_bio);
202 	}
203 
204 	return ret;
205 }
206 EXPORT_SYMBOL(blk_rq_unmap_user);
207 
208 /**
209  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
210  * @q:		request queue where request should be inserted
211  * @rq:		request to fill
212  * @kbuf:	the kernel buffer
213  * @len:	length of user data
214  * @gfp_mask:	memory allocation flags
215  *
216  * Description:
217  *    Data will be mapped directly if possible. Otherwise a bounce
218  *    buffer is used. Can be called multiple times to append multiple
219  *    buffers.
220  */
221 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
222 		    unsigned int len, gfp_t gfp_mask)
223 {
224 	int reading = rq_data_dir(rq) == READ;
225 	unsigned long addr = (unsigned long) kbuf;
226 	int do_copy = 0;
227 	struct bio *bio, *orig_bio;
228 	int ret;
229 
230 	if (len > (queue_max_hw_sectors(q) << 9))
231 		return -EINVAL;
232 	if (!len || !kbuf)
233 		return -EINVAL;
234 
235 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
236 	if (do_copy)
237 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
238 	else
239 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
240 
241 	if (IS_ERR(bio))
242 		return PTR_ERR(bio);
243 
244 	bio->bi_opf &= ~REQ_OP_MASK;
245 	bio->bi_opf |= req_op(rq);
246 
247 	if (do_copy)
248 		rq->rq_flags |= RQF_COPY_USER;
249 
250 	orig_bio = bio;
251 	ret = blk_rq_append_bio(rq, &bio);
252 	if (unlikely(ret)) {
253 		/* request is too big */
254 		bio_put(orig_bio);
255 		return ret;
256 	}
257 
258 	return 0;
259 }
260 EXPORT_SYMBOL(blk_rq_map_kern);
261