xref: /openbmc/linux/block/blk-map.c (revision 8bd1369b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to mapping data to requests
4  */
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
11 
12 #include "blk.h"
13 
14 /*
15  * Append a bio to a passthrough request.  Only works if the bio can be merged
16  * into the request based on the driver constraints.
17  */
18 int blk_rq_append_bio(struct request *rq, struct bio **bio)
19 {
20 	struct bio *orig_bio = *bio;
21 
22 	blk_queue_bounce(rq->q, bio);
23 
24 	if (!rq->bio) {
25 		blk_rq_bio_prep(rq->q, rq, *bio);
26 	} else {
27 		if (!ll_back_merge_fn(rq->q, rq, *bio)) {
28 			if (orig_bio != *bio) {
29 				bio_put(*bio);
30 				*bio = orig_bio;
31 			}
32 			return -EINVAL;
33 		}
34 
35 		rq->biotail->bi_next = *bio;
36 		rq->biotail = *bio;
37 		rq->__data_len += (*bio)->bi_iter.bi_size;
38 	}
39 
40 	return 0;
41 }
42 EXPORT_SYMBOL(blk_rq_append_bio);
43 
44 static int __blk_rq_unmap_user(struct bio *bio)
45 {
46 	int ret = 0;
47 
48 	if (bio) {
49 		if (bio_flagged(bio, BIO_USER_MAPPED))
50 			bio_unmap_user(bio);
51 		else
52 			ret = bio_uncopy_user(bio);
53 	}
54 
55 	return ret;
56 }
57 
58 static int __blk_rq_map_user_iov(struct request *rq,
59 		struct rq_map_data *map_data, struct iov_iter *iter,
60 		gfp_t gfp_mask, bool copy)
61 {
62 	struct request_queue *q = rq->q;
63 	struct bio *bio, *orig_bio;
64 	int ret;
65 
66 	if (copy)
67 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
68 	else
69 		bio = bio_map_user_iov(q, iter, gfp_mask);
70 
71 	if (IS_ERR(bio))
72 		return PTR_ERR(bio);
73 
74 	bio->bi_opf &= ~REQ_OP_MASK;
75 	bio->bi_opf |= req_op(rq);
76 
77 	orig_bio = bio;
78 
79 	/*
80 	 * We link the bounce buffer in and could have to traverse it
81 	 * later so we have to get a ref to prevent it from being freed
82 	 */
83 	ret = blk_rq_append_bio(rq, &bio);
84 	if (ret) {
85 		__blk_rq_unmap_user(orig_bio);
86 		return ret;
87 	}
88 	bio_get(bio);
89 
90 	return 0;
91 }
92 
93 /**
94  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
95  * @q:		request queue where request should be inserted
96  * @rq:		request to map data to
97  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
98  * @iter:	iovec iterator
99  * @gfp_mask:	memory allocation flags
100  *
101  * Description:
102  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
103  *    a kernel bounce buffer is used.
104  *
105  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
106  *    still in process context.
107  *
108  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
109  *    before being submitted to the device, as pages mapped may be out of
110  *    reach. It's the callers responsibility to make sure this happens. The
111  *    original bio must be passed back in to blk_rq_unmap_user() for proper
112  *    unmapping.
113  */
114 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
115 			struct rq_map_data *map_data,
116 			const struct iov_iter *iter, gfp_t gfp_mask)
117 {
118 	bool copy = false;
119 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
120 	struct bio *bio = NULL;
121 	struct iov_iter i;
122 	int ret = -EINVAL;
123 
124 	if (!iter_is_iovec(iter))
125 		goto fail;
126 
127 	if (map_data)
128 		copy = true;
129 	else if (iov_iter_alignment(iter) & align)
130 		copy = true;
131 	else if (queue_virt_boundary(q))
132 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
133 
134 	i = *iter;
135 	do {
136 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
137 		if (ret)
138 			goto unmap_rq;
139 		if (!bio)
140 			bio = rq->bio;
141 	} while (iov_iter_count(&i));
142 
143 	if (!bio_flagged(bio, BIO_USER_MAPPED))
144 		rq->rq_flags |= RQF_COPY_USER;
145 	return 0;
146 
147 unmap_rq:
148 	__blk_rq_unmap_user(bio);
149 fail:
150 	rq->bio = NULL;
151 	return ret;
152 }
153 EXPORT_SYMBOL(blk_rq_map_user_iov);
154 
155 int blk_rq_map_user(struct request_queue *q, struct request *rq,
156 		    struct rq_map_data *map_data, void __user *ubuf,
157 		    unsigned long len, gfp_t gfp_mask)
158 {
159 	struct iovec iov;
160 	struct iov_iter i;
161 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
162 
163 	if (unlikely(ret < 0))
164 		return ret;
165 
166 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
167 }
168 EXPORT_SYMBOL(blk_rq_map_user);
169 
170 /**
171  * blk_rq_unmap_user - unmap a request with user data
172  * @bio:	       start of bio list
173  *
174  * Description:
175  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
176  *    supply the original rq->bio from the blk_rq_map_user() return, since
177  *    the I/O completion may have changed rq->bio.
178  */
179 int blk_rq_unmap_user(struct bio *bio)
180 {
181 	struct bio *mapped_bio;
182 	int ret = 0, ret2;
183 
184 	while (bio) {
185 		mapped_bio = bio;
186 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
187 			mapped_bio = bio->bi_private;
188 
189 		ret2 = __blk_rq_unmap_user(mapped_bio);
190 		if (ret2 && !ret)
191 			ret = ret2;
192 
193 		mapped_bio = bio;
194 		bio = bio->bi_next;
195 		bio_put(mapped_bio);
196 	}
197 
198 	return ret;
199 }
200 EXPORT_SYMBOL(blk_rq_unmap_user);
201 
202 /**
203  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
204  * @q:		request queue where request should be inserted
205  * @rq:		request to fill
206  * @kbuf:	the kernel buffer
207  * @len:	length of user data
208  * @gfp_mask:	memory allocation flags
209  *
210  * Description:
211  *    Data will be mapped directly if possible. Otherwise a bounce
212  *    buffer is used. Can be called multiple times to append multiple
213  *    buffers.
214  */
215 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
216 		    unsigned int len, gfp_t gfp_mask)
217 {
218 	int reading = rq_data_dir(rq) == READ;
219 	unsigned long addr = (unsigned long) kbuf;
220 	int do_copy = 0;
221 	struct bio *bio, *orig_bio;
222 	int ret;
223 
224 	if (len > (queue_max_hw_sectors(q) << 9))
225 		return -EINVAL;
226 	if (!len || !kbuf)
227 		return -EINVAL;
228 
229 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
230 	if (do_copy)
231 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
232 	else
233 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
234 
235 	if (IS_ERR(bio))
236 		return PTR_ERR(bio);
237 
238 	bio->bi_opf &= ~REQ_OP_MASK;
239 	bio->bi_opf |= req_op(rq);
240 
241 	if (do_copy)
242 		rq->rq_flags |= RQF_COPY_USER;
243 
244 	orig_bio = bio;
245 	ret = blk_rq_append_bio(rq, &bio);
246 	if (unlikely(ret)) {
247 		/* request is too big */
248 		bio_put(orig_bio);
249 		return ret;
250 	}
251 
252 	return 0;
253 }
254 EXPORT_SYMBOL(blk_rq_map_kern);
255