1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
286db1e29SJens Axboe /*
386db1e29SJens Axboe * Functions related to mapping data to requests
486db1e29SJens Axboe */
586db1e29SJens Axboe #include <linux/kernel.h>
668db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
786db1e29SJens Axboe #include <linux/module.h>
886db1e29SJens Axboe #include <linux/bio.h>
986db1e29SJens Axboe #include <linux/blkdev.h>
1026e49cfcSKent Overstreet #include <linux/uio.h>
1186db1e29SJens Axboe
1286db1e29SJens Axboe #include "blk.h"
1386db1e29SJens Axboe
14130879f1SChristoph Hellwig struct bio_map_data {
15f3256075SChristoph Hellwig bool is_our_pages : 1;
16f3256075SChristoph Hellwig bool is_null_mapped : 1;
17130879f1SChristoph Hellwig struct iov_iter iter;
18130879f1SChristoph Hellwig struct iovec iov[];
19130879f1SChristoph Hellwig };
20130879f1SChristoph Hellwig
bio_alloc_map_data(struct iov_iter * data,gfp_t gfp_mask)21130879f1SChristoph Hellwig static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
22130879f1SChristoph Hellwig gfp_t gfp_mask)
23130879f1SChristoph Hellwig {
24130879f1SChristoph Hellwig struct bio_map_data *bmd;
25130879f1SChristoph Hellwig
26130879f1SChristoph Hellwig if (data->nr_segs > UIO_MAXIOV)
27130879f1SChristoph Hellwig return NULL;
28130879f1SChristoph Hellwig
29130879f1SChristoph Hellwig bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
30130879f1SChristoph Hellwig if (!bmd)
31130879f1SChristoph Hellwig return NULL;
32130879f1SChristoph Hellwig bmd->iter = *data;
330a2481cdSJens Axboe if (iter_is_iovec(data)) {
34de4f5fedSJens Axboe memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs);
35de4f5fedSJens Axboe bmd->iter.__iov = bmd->iov;
360a2481cdSJens Axboe }
37130879f1SChristoph Hellwig return bmd;
38130879f1SChristoph Hellwig }
39130879f1SChristoph Hellwig
40130879f1SChristoph Hellwig /**
41130879f1SChristoph Hellwig * bio_copy_from_iter - copy all pages from iov_iter to bio
42130879f1SChristoph Hellwig * @bio: The &struct bio which describes the I/O as destination
43130879f1SChristoph Hellwig * @iter: iov_iter as source
44130879f1SChristoph Hellwig *
45130879f1SChristoph Hellwig * Copy all pages from iov_iter to bio.
46130879f1SChristoph Hellwig * Returns 0 on success, or error on failure.
47130879f1SChristoph Hellwig */
bio_copy_from_iter(struct bio * bio,struct iov_iter * iter)48130879f1SChristoph Hellwig static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
49130879f1SChristoph Hellwig {
50130879f1SChristoph Hellwig struct bio_vec *bvec;
51130879f1SChristoph Hellwig struct bvec_iter_all iter_all;
52130879f1SChristoph Hellwig
53130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) {
54130879f1SChristoph Hellwig ssize_t ret;
55130879f1SChristoph Hellwig
56130879f1SChristoph Hellwig ret = copy_page_from_iter(bvec->bv_page,
57130879f1SChristoph Hellwig bvec->bv_offset,
58130879f1SChristoph Hellwig bvec->bv_len,
59130879f1SChristoph Hellwig iter);
60130879f1SChristoph Hellwig
61130879f1SChristoph Hellwig if (!iov_iter_count(iter))
62130879f1SChristoph Hellwig break;
63130879f1SChristoph Hellwig
64130879f1SChristoph Hellwig if (ret < bvec->bv_len)
65130879f1SChristoph Hellwig return -EFAULT;
66130879f1SChristoph Hellwig }
67130879f1SChristoph Hellwig
68130879f1SChristoph Hellwig return 0;
69130879f1SChristoph Hellwig }
70130879f1SChristoph Hellwig
71130879f1SChristoph Hellwig /**
72130879f1SChristoph Hellwig * bio_copy_to_iter - copy all pages from bio to iov_iter
73130879f1SChristoph Hellwig * @bio: The &struct bio which describes the I/O as source
74130879f1SChristoph Hellwig * @iter: iov_iter as destination
75130879f1SChristoph Hellwig *
76130879f1SChristoph Hellwig * Copy all pages from bio to iov_iter.
77130879f1SChristoph Hellwig * Returns 0 on success, or error on failure.
78130879f1SChristoph Hellwig */
bio_copy_to_iter(struct bio * bio,struct iov_iter iter)79130879f1SChristoph Hellwig static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
80130879f1SChristoph Hellwig {
81130879f1SChristoph Hellwig struct bio_vec *bvec;
82130879f1SChristoph Hellwig struct bvec_iter_all iter_all;
83130879f1SChristoph Hellwig
84130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) {
85130879f1SChristoph Hellwig ssize_t ret;
86130879f1SChristoph Hellwig
87130879f1SChristoph Hellwig ret = copy_page_to_iter(bvec->bv_page,
88130879f1SChristoph Hellwig bvec->bv_offset,
89130879f1SChristoph Hellwig bvec->bv_len,
90130879f1SChristoph Hellwig &iter);
91130879f1SChristoph Hellwig
92130879f1SChristoph Hellwig if (!iov_iter_count(&iter))
93130879f1SChristoph Hellwig break;
94130879f1SChristoph Hellwig
95130879f1SChristoph Hellwig if (ret < bvec->bv_len)
96130879f1SChristoph Hellwig return -EFAULT;
97130879f1SChristoph Hellwig }
98130879f1SChristoph Hellwig
99130879f1SChristoph Hellwig return 0;
100130879f1SChristoph Hellwig }
101130879f1SChristoph Hellwig
102130879f1SChristoph Hellwig /**
103130879f1SChristoph Hellwig * bio_uncopy_user - finish previously mapped bio
104130879f1SChristoph Hellwig * @bio: bio being terminated
105130879f1SChristoph Hellwig *
106130879f1SChristoph Hellwig * Free pages allocated from bio_copy_user_iov() and write back data
107130879f1SChristoph Hellwig * to user space in case of a read.
108130879f1SChristoph Hellwig */
bio_uncopy_user(struct bio * bio)109130879f1SChristoph Hellwig static int bio_uncopy_user(struct bio *bio)
110130879f1SChristoph Hellwig {
111130879f1SChristoph Hellwig struct bio_map_data *bmd = bio->bi_private;
112130879f1SChristoph Hellwig int ret = 0;
113130879f1SChristoph Hellwig
1143310eebaSChristoph Hellwig if (!bmd->is_null_mapped) {
115130879f1SChristoph Hellwig /*
116130879f1SChristoph Hellwig * if we're in a workqueue, the request is orphaned, so
117130879f1SChristoph Hellwig * don't copy into a random user address space, just free
118130879f1SChristoph Hellwig * and return -EINTR so user space doesn't expect any data.
119130879f1SChristoph Hellwig */
120130879f1SChristoph Hellwig if (!current->mm)
121130879f1SChristoph Hellwig ret = -EINTR;
122130879f1SChristoph Hellwig else if (bio_data_dir(bio) == READ)
123130879f1SChristoph Hellwig ret = bio_copy_to_iter(bio, bmd->iter);
124130879f1SChristoph Hellwig if (bmd->is_our_pages)
125130879f1SChristoph Hellwig bio_free_pages(bio);
126130879f1SChristoph Hellwig }
127130879f1SChristoph Hellwig kfree(bmd);
128130879f1SChristoph Hellwig return ret;
129130879f1SChristoph Hellwig }
130130879f1SChristoph Hellwig
bio_copy_user_iov(struct request * rq,struct rq_map_data * map_data,struct iov_iter * iter,gfp_t gfp_mask)1317589ad67SChristoph Hellwig static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
1327589ad67SChristoph Hellwig struct iov_iter *iter, gfp_t gfp_mask)
133130879f1SChristoph Hellwig {
134130879f1SChristoph Hellwig struct bio_map_data *bmd;
135130879f1SChristoph Hellwig struct page *page;
136393bb12eSChristoph Hellwig struct bio *bio;
137130879f1SChristoph Hellwig int i = 0, ret;
138130879f1SChristoph Hellwig int nr_pages;
139130879f1SChristoph Hellwig unsigned int len = iter->count;
140130879f1SChristoph Hellwig unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
141130879f1SChristoph Hellwig
142130879f1SChristoph Hellwig bmd = bio_alloc_map_data(iter, gfp_mask);
143130879f1SChristoph Hellwig if (!bmd)
1447589ad67SChristoph Hellwig return -ENOMEM;
145130879f1SChristoph Hellwig
146130879f1SChristoph Hellwig /*
147130879f1SChristoph Hellwig * We need to do a deep copy of the iov_iter including the iovecs.
148130879f1SChristoph Hellwig * The caller provided iov might point to an on-stack or otherwise
149130879f1SChristoph Hellwig * shortlived one.
150130879f1SChristoph Hellwig */
151f3256075SChristoph Hellwig bmd->is_our_pages = !map_data;
15203859717SChristoph Hellwig bmd->is_null_mapped = (map_data && map_data->null_mapped);
153130879f1SChristoph Hellwig
1545f7136dbSMatthew Wilcox (Oracle) nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
155130879f1SChristoph Hellwig
156130879f1SChristoph Hellwig ret = -ENOMEM;
157066ff571SChristoph Hellwig bio = bio_kmalloc(nr_pages, gfp_mask);
158130879f1SChristoph Hellwig if (!bio)
159130879f1SChristoph Hellwig goto out_bmd;
160066ff571SChristoph Hellwig bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
161130879f1SChristoph Hellwig
162130879f1SChristoph Hellwig if (map_data) {
163f5d632d1SJens Axboe nr_pages = 1U << map_data->page_order;
164130879f1SChristoph Hellwig i = map_data->offset / PAGE_SIZE;
165130879f1SChristoph Hellwig }
166130879f1SChristoph Hellwig while (len) {
167130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE;
168130879f1SChristoph Hellwig
169130879f1SChristoph Hellwig bytes -= offset;
170130879f1SChristoph Hellwig
171130879f1SChristoph Hellwig if (bytes > len)
172130879f1SChristoph Hellwig bytes = len;
173130879f1SChristoph Hellwig
174130879f1SChristoph Hellwig if (map_data) {
175130879f1SChristoph Hellwig if (i == map_data->nr_entries * nr_pages) {
176130879f1SChristoph Hellwig ret = -ENOMEM;
1777589ad67SChristoph Hellwig goto cleanup;
178130879f1SChristoph Hellwig }
179130879f1SChristoph Hellwig
180130879f1SChristoph Hellwig page = map_data->pages[i / nr_pages];
181130879f1SChristoph Hellwig page += (i % nr_pages);
182130879f1SChristoph Hellwig
183130879f1SChristoph Hellwig i++;
184130879f1SChristoph Hellwig } else {
185ce288e05SChristoph Hellwig page = alloc_page(GFP_NOIO | gfp_mask);
186130879f1SChristoph Hellwig if (!page) {
187130879f1SChristoph Hellwig ret = -ENOMEM;
1887589ad67SChristoph Hellwig goto cleanup;
189130879f1SChristoph Hellwig }
190130879f1SChristoph Hellwig }
191130879f1SChristoph Hellwig
1927589ad67SChristoph Hellwig if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
193130879f1SChristoph Hellwig if (!map_data)
194130879f1SChristoph Hellwig __free_page(page);
195130879f1SChristoph Hellwig break;
196130879f1SChristoph Hellwig }
197130879f1SChristoph Hellwig
198130879f1SChristoph Hellwig len -= bytes;
199130879f1SChristoph Hellwig offset = 0;
200130879f1SChristoph Hellwig }
201130879f1SChristoph Hellwig
202130879f1SChristoph Hellwig if (map_data)
203130879f1SChristoph Hellwig map_data->offset += bio->bi_iter.bi_size;
204130879f1SChristoph Hellwig
205130879f1SChristoph Hellwig /*
206130879f1SChristoph Hellwig * success
207130879f1SChristoph Hellwig */
2080f1bae07SChristian A. Ehrhardt if (iov_iter_rw(iter) == WRITE &&
2090f1bae07SChristian A. Ehrhardt (!map_data || !map_data->null_mapped)) {
210130879f1SChristoph Hellwig ret = bio_copy_from_iter(bio, iter);
211130879f1SChristoph Hellwig if (ret)
212130879f1SChristoph Hellwig goto cleanup;
2130f1bae07SChristian A. Ehrhardt } else if (map_data && map_data->from_user) {
2140f1bae07SChristian A. Ehrhardt struct iov_iter iter2 = *iter;
2150f1bae07SChristian A. Ehrhardt
2160f1bae07SChristian A. Ehrhardt /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
2170f1bae07SChristian A. Ehrhardt iter2.data_source = ITER_SOURCE;
2180f1bae07SChristian A. Ehrhardt ret = bio_copy_from_iter(bio, &iter2);
2190f1bae07SChristian A. Ehrhardt if (ret)
2200f1bae07SChristian A. Ehrhardt goto cleanup;
221130879f1SChristoph Hellwig } else {
222130879f1SChristoph Hellwig if (bmd->is_our_pages)
223130879f1SChristoph Hellwig zero_fill_bio(bio);
224130879f1SChristoph Hellwig iov_iter_advance(iter, bio->bi_iter.bi_size);
225130879f1SChristoph Hellwig }
226130879f1SChristoph Hellwig
227130879f1SChristoph Hellwig bio->bi_private = bmd;
2287589ad67SChristoph Hellwig
229393bb12eSChristoph Hellwig ret = blk_rq_append_bio(rq, bio);
2307589ad67SChristoph Hellwig if (ret)
2317589ad67SChristoph Hellwig goto cleanup;
2327589ad67SChristoph Hellwig return 0;
233130879f1SChristoph Hellwig cleanup:
234130879f1SChristoph Hellwig if (!map_data)
235130879f1SChristoph Hellwig bio_free_pages(bio);
236066ff571SChristoph Hellwig bio_uninit(bio);
237066ff571SChristoph Hellwig kfree(bio);
238130879f1SChristoph Hellwig out_bmd:
239130879f1SChristoph Hellwig kfree(bmd);
2407589ad67SChristoph Hellwig return ret;
241130879f1SChristoph Hellwig }
242130879f1SChristoph Hellwig
blk_mq_map_bio_put(struct bio * bio)24332f1c71bSAnuj Gupta static void blk_mq_map_bio_put(struct bio *bio)
2448af870aaSJens Axboe {
2458af870aaSJens Axboe if (bio->bi_opf & REQ_ALLOC_CACHE) {
2468af870aaSJens Axboe bio_put(bio);
2478af870aaSJens Axboe } else {
2488af870aaSJens Axboe bio_uninit(bio);
2498af870aaSJens Axboe kfree(bio);
2508af870aaSJens Axboe }
2518af870aaSJens Axboe }
2528af870aaSJens Axboe
blk_rq_map_bio_alloc(struct request * rq,unsigned int nr_vecs,gfp_t gfp_mask)253ab89e8e7SKanchan Joshi static struct bio *blk_rq_map_bio_alloc(struct request *rq,
254ab89e8e7SKanchan Joshi unsigned int nr_vecs, gfp_t gfp_mask)
255ab89e8e7SKanchan Joshi {
256ab89e8e7SKanchan Joshi struct bio *bio;
257ab89e8e7SKanchan Joshi
25846930b7cSAnuj Gupta if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
2597e2e355dSAnuj Gupta bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
260ab89e8e7SKanchan Joshi &fs_bio_set);
261ab89e8e7SKanchan Joshi if (!bio)
262ab89e8e7SKanchan Joshi return NULL;
263ab89e8e7SKanchan Joshi } else {
264ab89e8e7SKanchan Joshi bio = bio_kmalloc(nr_vecs, gfp_mask);
265ab89e8e7SKanchan Joshi if (!bio)
266ab89e8e7SKanchan Joshi return NULL;
267ab89e8e7SKanchan Joshi bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
268ab89e8e7SKanchan Joshi }
269ab89e8e7SKanchan Joshi return bio;
270ab89e8e7SKanchan Joshi }
271ab89e8e7SKanchan Joshi
bio_map_user_iov(struct request * rq,struct iov_iter * iter,gfp_t gfp_mask)2727589ad67SChristoph Hellwig static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
2737589ad67SChristoph Hellwig gfp_t gfp_mask)
274130879f1SChristoph Hellwig {
275f62e52d1SDavid Howells iov_iter_extraction_t extraction_flags = 0;
2767589ad67SChristoph Hellwig unsigned int max_sectors = queue_max_hw_sectors(rq->q);
277066ff571SChristoph Hellwig unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
278393bb12eSChristoph Hellwig struct bio *bio;
279130879f1SChristoph Hellwig int ret;
2807589ad67SChristoph Hellwig int j;
281130879f1SChristoph Hellwig
282130879f1SChristoph Hellwig if (!iov_iter_count(iter))
2837589ad67SChristoph Hellwig return -EINVAL;
284130879f1SChristoph Hellwig
285ab89e8e7SKanchan Joshi bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
286ab89e8e7SKanchan Joshi if (bio == NULL)
2878af870aaSJens Axboe return -ENOMEM;
288130879f1SChristoph Hellwig
2897ee4ccf5SLogan Gunthorpe if (blk_queue_pci_p2pdma(rq->q))
290f62e52d1SDavid Howells extraction_flags |= ITER_ALLOW_P2PDMA;
291403b6fb8SDavid Howells if (iov_iter_extract_will_pin(iter))
292403b6fb8SDavid Howells bio_set_flag(bio, BIO_PAGE_PINNED);
2937ee4ccf5SLogan Gunthorpe
294130879f1SChristoph Hellwig while (iov_iter_count(iter)) {
295403b6fb8SDavid Howells struct page *stack_pages[UIO_FASTIOV];
296403b6fb8SDavid Howells struct page **pages = stack_pages;
297130879f1SChristoph Hellwig ssize_t bytes;
29891e5addaSJiapeng Chong size_t offs;
299130879f1SChristoph Hellwig int npages;
300130879f1SChristoph Hellwig
301403b6fb8SDavid Howells if (nr_vecs > ARRAY_SIZE(stack_pages))
302403b6fb8SDavid Howells pages = NULL;
303403b6fb8SDavid Howells
304403b6fb8SDavid Howells bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX,
305403b6fb8SDavid Howells nr_vecs, extraction_flags, &offs);
306130879f1SChristoph Hellwig if (unlikely(bytes <= 0)) {
307130879f1SChristoph Hellwig ret = bytes ? bytes : -EFAULT;
308130879f1SChristoph Hellwig goto out_unmap;
309130879f1SChristoph Hellwig }
310130879f1SChristoph Hellwig
311130879f1SChristoph Hellwig npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
312130879f1SChristoph Hellwig
3137ab89db9SMichal Orzel if (unlikely(offs & queue_dma_alignment(rq->q)))
314130879f1SChristoph Hellwig j = 0;
3157ab89db9SMichal Orzel else {
316130879f1SChristoph Hellwig for (j = 0; j < npages; j++) {
317130879f1SChristoph Hellwig struct page *page = pages[j];
318130879f1SChristoph Hellwig unsigned int n = PAGE_SIZE - offs;
319130879f1SChristoph Hellwig bool same_page = false;
320130879f1SChristoph Hellwig
321130879f1SChristoph Hellwig if (n > bytes)
322130879f1SChristoph Hellwig n = bytes;
323130879f1SChristoph Hellwig
3247589ad67SChristoph Hellwig if (!bio_add_hw_page(rq->q, bio, page, n, offs,
3255905afc2SChristoph Hellwig max_sectors, &same_page))
3265905afc2SChristoph Hellwig break;
3275905afc2SChristoph Hellwig
328130879f1SChristoph Hellwig if (same_page)
329403b6fb8SDavid Howells bio_release_page(bio, page);
330130879f1SChristoph Hellwig bytes -= n;
331130879f1SChristoph Hellwig offs = 0;
332130879f1SChristoph Hellwig }
333130879f1SChristoph Hellwig }
334130879f1SChristoph Hellwig /*
335130879f1SChristoph Hellwig * release the pages we didn't map into the bio, if any
336130879f1SChristoph Hellwig */
337130879f1SChristoph Hellwig while (j < npages)
338403b6fb8SDavid Howells bio_release_page(bio, pages[j++]);
339e88811bcSJens Axboe if (pages != stack_pages)
340130879f1SChristoph Hellwig kvfree(pages);
341130879f1SChristoph Hellwig /* couldn't stuff something into bio? */
342480cb846SAl Viro if (bytes) {
343480cb846SAl Viro iov_iter_revert(iter, bytes);
344130879f1SChristoph Hellwig break;
345130879f1SChristoph Hellwig }
346480cb846SAl Viro }
347130879f1SChristoph Hellwig
348393bb12eSChristoph Hellwig ret = blk_rq_append_bio(rq, bio);
3497589ad67SChristoph Hellwig if (ret)
350393bb12eSChristoph Hellwig goto out_unmap;
3517589ad67SChristoph Hellwig return 0;
3527589ad67SChristoph Hellwig
353130879f1SChristoph Hellwig out_unmap:
354130879f1SChristoph Hellwig bio_release_pages(bio, false);
35532f1c71bSAnuj Gupta blk_mq_map_bio_put(bio);
3567589ad67SChristoph Hellwig return ret;
357130879f1SChristoph Hellwig }
358130879f1SChristoph Hellwig
bio_invalidate_vmalloc_pages(struct bio * bio)359130879f1SChristoph Hellwig static void bio_invalidate_vmalloc_pages(struct bio *bio)
360130879f1SChristoph Hellwig {
361f358afc5SChristoph Hellwig #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
362130879f1SChristoph Hellwig if (bio->bi_private && !op_is_write(bio_op(bio))) {
363130879f1SChristoph Hellwig unsigned long i, len = 0;
364130879f1SChristoph Hellwig
365130879f1SChristoph Hellwig for (i = 0; i < bio->bi_vcnt; i++)
366130879f1SChristoph Hellwig len += bio->bi_io_vec[i].bv_len;
367130879f1SChristoph Hellwig invalidate_kernel_vmap_range(bio->bi_private, len);
368130879f1SChristoph Hellwig }
369130879f1SChristoph Hellwig #endif
370130879f1SChristoph Hellwig }
371130879f1SChristoph Hellwig
bio_map_kern_endio(struct bio * bio)372130879f1SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio)
373130879f1SChristoph Hellwig {
374130879f1SChristoph Hellwig bio_invalidate_vmalloc_pages(bio);
375066ff571SChristoph Hellwig bio_uninit(bio);
376066ff571SChristoph Hellwig kfree(bio);
377130879f1SChristoph Hellwig }
378130879f1SChristoph Hellwig
379130879f1SChristoph Hellwig /**
380130879f1SChristoph Hellwig * bio_map_kern - map kernel address into bio
381130879f1SChristoph Hellwig * @q: the struct request_queue for the bio
382130879f1SChristoph Hellwig * @data: pointer to buffer to map
383130879f1SChristoph Hellwig * @len: length in bytes
384130879f1SChristoph Hellwig * @gfp_mask: allocation flags for bio allocation
385130879f1SChristoph Hellwig *
386130879f1SChristoph Hellwig * Map the kernel address into a bio suitable for io to a block
387130879f1SChristoph Hellwig * device. Returns an error pointer in case of error.
388130879f1SChristoph Hellwig */
bio_map_kern(struct request_queue * q,void * data,unsigned int len,gfp_t gfp_mask)389130879f1SChristoph Hellwig static struct bio *bio_map_kern(struct request_queue *q, void *data,
390130879f1SChristoph Hellwig unsigned int len, gfp_t gfp_mask)
391130879f1SChristoph Hellwig {
392130879f1SChristoph Hellwig unsigned long kaddr = (unsigned long)data;
393130879f1SChristoph Hellwig unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
394130879f1SChristoph Hellwig unsigned long start = kaddr >> PAGE_SHIFT;
395130879f1SChristoph Hellwig const int nr_pages = end - start;
396130879f1SChristoph Hellwig bool is_vmalloc = is_vmalloc_addr(data);
397130879f1SChristoph Hellwig struct page *page;
398130879f1SChristoph Hellwig int offset, i;
399130879f1SChristoph Hellwig struct bio *bio;
400130879f1SChristoph Hellwig
401066ff571SChristoph Hellwig bio = bio_kmalloc(nr_pages, gfp_mask);
402130879f1SChristoph Hellwig if (!bio)
403130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM);
404066ff571SChristoph Hellwig bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
405130879f1SChristoph Hellwig
406130879f1SChristoph Hellwig if (is_vmalloc) {
407130879f1SChristoph Hellwig flush_kernel_vmap_range(data, len);
408130879f1SChristoph Hellwig bio->bi_private = data;
409130879f1SChristoph Hellwig }
410130879f1SChristoph Hellwig
411130879f1SChristoph Hellwig offset = offset_in_page(kaddr);
412130879f1SChristoph Hellwig for (i = 0; i < nr_pages; i++) {
413130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE - offset;
414130879f1SChristoph Hellwig
415130879f1SChristoph Hellwig if (len <= 0)
416130879f1SChristoph Hellwig break;
417130879f1SChristoph Hellwig
418130879f1SChristoph Hellwig if (bytes > len)
419130879f1SChristoph Hellwig bytes = len;
420130879f1SChristoph Hellwig
421130879f1SChristoph Hellwig if (!is_vmalloc)
422130879f1SChristoph Hellwig page = virt_to_page(data);
423130879f1SChristoph Hellwig else
424130879f1SChristoph Hellwig page = vmalloc_to_page(data);
425130879f1SChristoph Hellwig if (bio_add_pc_page(q, bio, page, bytes,
426130879f1SChristoph Hellwig offset) < bytes) {
427130879f1SChristoph Hellwig /* we don't support partial mappings */
428066ff571SChristoph Hellwig bio_uninit(bio);
429066ff571SChristoph Hellwig kfree(bio);
430130879f1SChristoph Hellwig return ERR_PTR(-EINVAL);
431130879f1SChristoph Hellwig }
432130879f1SChristoph Hellwig
433130879f1SChristoph Hellwig data += bytes;
434130879f1SChristoph Hellwig len -= bytes;
435130879f1SChristoph Hellwig offset = 0;
436130879f1SChristoph Hellwig }
437130879f1SChristoph Hellwig
438130879f1SChristoph Hellwig bio->bi_end_io = bio_map_kern_endio;
439130879f1SChristoph Hellwig return bio;
440130879f1SChristoph Hellwig }
441130879f1SChristoph Hellwig
bio_copy_kern_endio(struct bio * bio)442130879f1SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio)
443130879f1SChristoph Hellwig {
444130879f1SChristoph Hellwig bio_free_pages(bio);
445066ff571SChristoph Hellwig bio_uninit(bio);
446066ff571SChristoph Hellwig kfree(bio);
447130879f1SChristoph Hellwig }
448130879f1SChristoph Hellwig
bio_copy_kern_endio_read(struct bio * bio)449130879f1SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio)
450130879f1SChristoph Hellwig {
451130879f1SChristoph Hellwig char *p = bio->bi_private;
452130879f1SChristoph Hellwig struct bio_vec *bvec;
453130879f1SChristoph Hellwig struct bvec_iter_all iter_all;
454130879f1SChristoph Hellwig
455130879f1SChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) {
456d24920e2SChristoph Hellwig memcpy_from_bvec(p, bvec);
457130879f1SChristoph Hellwig p += bvec->bv_len;
458130879f1SChristoph Hellwig }
459130879f1SChristoph Hellwig
460130879f1SChristoph Hellwig bio_copy_kern_endio(bio);
461130879f1SChristoph Hellwig }
462130879f1SChristoph Hellwig
463130879f1SChristoph Hellwig /**
464130879f1SChristoph Hellwig * bio_copy_kern - copy kernel address into bio
465130879f1SChristoph Hellwig * @q: the struct request_queue for the bio
466130879f1SChristoph Hellwig * @data: pointer to buffer to copy
467130879f1SChristoph Hellwig * @len: length in bytes
468130879f1SChristoph Hellwig * @gfp_mask: allocation flags for bio and page allocation
469130879f1SChristoph Hellwig * @reading: data direction is READ
470130879f1SChristoph Hellwig *
471130879f1SChristoph Hellwig * copy the kernel address into a bio suitable for io to a block
472130879f1SChristoph Hellwig * device. Returns an error pointer in case of error.
473130879f1SChristoph Hellwig */
bio_copy_kern(struct request_queue * q,void * data,unsigned int len,gfp_t gfp_mask,int reading)474130879f1SChristoph Hellwig static struct bio *bio_copy_kern(struct request_queue *q, void *data,
475130879f1SChristoph Hellwig unsigned int len, gfp_t gfp_mask, int reading)
476130879f1SChristoph Hellwig {
477130879f1SChristoph Hellwig unsigned long kaddr = (unsigned long)data;
478130879f1SChristoph Hellwig unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
479130879f1SChristoph Hellwig unsigned long start = kaddr >> PAGE_SHIFT;
480130879f1SChristoph Hellwig struct bio *bio;
481130879f1SChristoph Hellwig void *p = data;
482130879f1SChristoph Hellwig int nr_pages = 0;
483130879f1SChristoph Hellwig
484130879f1SChristoph Hellwig /*
485130879f1SChristoph Hellwig * Overflow, abort
486130879f1SChristoph Hellwig */
487130879f1SChristoph Hellwig if (end < start)
488130879f1SChristoph Hellwig return ERR_PTR(-EINVAL);
489130879f1SChristoph Hellwig
490130879f1SChristoph Hellwig nr_pages = end - start;
491066ff571SChristoph Hellwig bio = bio_kmalloc(nr_pages, gfp_mask);
492130879f1SChristoph Hellwig if (!bio)
493130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM);
494066ff571SChristoph Hellwig bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
495130879f1SChristoph Hellwig
496130879f1SChristoph Hellwig while (len) {
497130879f1SChristoph Hellwig struct page *page;
498130879f1SChristoph Hellwig unsigned int bytes = PAGE_SIZE;
499130879f1SChristoph Hellwig
500130879f1SChristoph Hellwig if (bytes > len)
501130879f1SChristoph Hellwig bytes = len;
502130879f1SChristoph Hellwig
503cc8f7fe1SHaimin Zhang page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
504130879f1SChristoph Hellwig if (!page)
505130879f1SChristoph Hellwig goto cleanup;
506130879f1SChristoph Hellwig
507130879f1SChristoph Hellwig if (!reading)
508130879f1SChristoph Hellwig memcpy(page_address(page), p, bytes);
509130879f1SChristoph Hellwig
510130879f1SChristoph Hellwig if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
511130879f1SChristoph Hellwig break;
512130879f1SChristoph Hellwig
513130879f1SChristoph Hellwig len -= bytes;
514130879f1SChristoph Hellwig p += bytes;
515130879f1SChristoph Hellwig }
516130879f1SChristoph Hellwig
517130879f1SChristoph Hellwig if (reading) {
518130879f1SChristoph Hellwig bio->bi_end_io = bio_copy_kern_endio_read;
519130879f1SChristoph Hellwig bio->bi_private = data;
520130879f1SChristoph Hellwig } else {
521130879f1SChristoph Hellwig bio->bi_end_io = bio_copy_kern_endio;
522130879f1SChristoph Hellwig }
523130879f1SChristoph Hellwig
524130879f1SChristoph Hellwig return bio;
525130879f1SChristoph Hellwig
526130879f1SChristoph Hellwig cleanup:
527130879f1SChristoph Hellwig bio_free_pages(bio);
528066ff571SChristoph Hellwig bio_uninit(bio);
529066ff571SChristoph Hellwig kfree(bio);
530130879f1SChristoph Hellwig return ERR_PTR(-ENOMEM);
531130879f1SChristoph Hellwig }
532130879f1SChristoph Hellwig
53398d61d5bSChristoph Hellwig /*
5340abc2a10SJens Axboe * Append a bio to a passthrough request. Only works if the bio can be merged
5350abc2a10SJens Axboe * into the request based on the driver constraints.
53698d61d5bSChristoph Hellwig */
blk_rq_append_bio(struct request * rq,struct bio * bio)537393bb12eSChristoph Hellwig int blk_rq_append_bio(struct request *rq, struct bio *bio)
53886db1e29SJens Axboe {
53914ccb66bSChristoph Hellwig struct bvec_iter iter;
54014ccb66bSChristoph Hellwig struct bio_vec bv;
54114ccb66bSChristoph Hellwig unsigned int nr_segs = 0;
5420abc2a10SJens Axboe
543393bb12eSChristoph Hellwig bio_for_each_bvec(bv, bio, iter)
54414ccb66bSChristoph Hellwig nr_segs++;
54514ccb66bSChristoph Hellwig
54698d61d5bSChristoph Hellwig if (!rq->bio) {
547393bb12eSChristoph Hellwig blk_rq_bio_prep(rq, bio, nr_segs);
54898d61d5bSChristoph Hellwig } else {
549393bb12eSChristoph Hellwig if (!ll_back_merge_fn(rq, bio, nr_segs))
55086db1e29SJens Axboe return -EINVAL;
551393bb12eSChristoph Hellwig rq->biotail->bi_next = bio;
552393bb12eSChristoph Hellwig rq->biotail = bio;
553393bb12eSChristoph Hellwig rq->__data_len += (bio)->bi_iter.bi_size;
554393bb12eSChristoph Hellwig bio_crypt_free_ctx(bio);
55586db1e29SJens Axboe }
55698d61d5bSChristoph Hellwig
55786db1e29SJens Axboe return 0;
55886db1e29SJens Axboe }
55998d61d5bSChristoph Hellwig EXPORT_SYMBOL(blk_rq_append_bio);
56086db1e29SJens Axboe
56137987547SKanchan Joshi /* Prepare bio for passthrough IO given ITER_BVEC iter */
blk_rq_map_user_bvec(struct request * rq,const struct iov_iter * iter)56237987547SKanchan Joshi static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
56337987547SKanchan Joshi {
56437987547SKanchan Joshi struct request_queue *q = rq->q;
56537987547SKanchan Joshi size_t nr_iter = iov_iter_count(iter);
56637987547SKanchan Joshi size_t nr_segs = iter->nr_segs;
56737987547SKanchan Joshi struct bio_vec *bvecs, *bvprvp = NULL;
568aa261f20SBart Van Assche const struct queue_limits *lim = &q->limits;
56937987547SKanchan Joshi unsigned int nsegs = 0, bytes = 0;
57037987547SKanchan Joshi struct bio *bio;
57137987547SKanchan Joshi size_t i;
57237987547SKanchan Joshi
57337987547SKanchan Joshi if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
57437987547SKanchan Joshi return -EINVAL;
57537987547SKanchan Joshi if (nr_segs > queue_max_segments(q))
57637987547SKanchan Joshi return -EINVAL;
57737987547SKanchan Joshi
57837987547SKanchan Joshi /* no iovecs to alloc, as we already have a BVEC iterator */
57937987547SKanchan Joshi bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
58037987547SKanchan Joshi if (bio == NULL)
58137987547SKanchan Joshi return -ENOMEM;
58237987547SKanchan Joshi
58337987547SKanchan Joshi bio_iov_bvec_set(bio, (struct iov_iter *)iter);
58437987547SKanchan Joshi blk_rq_bio_prep(rq, bio, nr_segs);
58537987547SKanchan Joshi
58637987547SKanchan Joshi /* loop to perform a bunch of sanity checks */
58737987547SKanchan Joshi bvecs = (struct bio_vec *)iter->bvec;
58837987547SKanchan Joshi for (i = 0; i < nr_segs; i++) {
58937987547SKanchan Joshi struct bio_vec *bv = &bvecs[i];
59037987547SKanchan Joshi
59137987547SKanchan Joshi /*
59237987547SKanchan Joshi * If the queue doesn't support SG gaps and adding this
59337987547SKanchan Joshi * offset would create a gap, fallback to copy.
59437987547SKanchan Joshi */
59537987547SKanchan Joshi if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
59637987547SKanchan Joshi blk_mq_map_bio_put(bio);
59737987547SKanchan Joshi return -EREMOTEIO;
59837987547SKanchan Joshi }
59937987547SKanchan Joshi /* check full condition */
60037987547SKanchan Joshi if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
60137987547SKanchan Joshi goto put_bio;
60237987547SKanchan Joshi if (bytes + bv->bv_len > nr_iter)
603*b3c301b8SXinyu Zhang break;
60437987547SKanchan Joshi
60537987547SKanchan Joshi nsegs++;
60637987547SKanchan Joshi bytes += bv->bv_len;
60737987547SKanchan Joshi bvprvp = bv;
60837987547SKanchan Joshi }
60937987547SKanchan Joshi return 0;
61037987547SKanchan Joshi put_bio:
61137987547SKanchan Joshi blk_mq_map_bio_put(bio);
61237987547SKanchan Joshi return -EINVAL;
61337987547SKanchan Joshi }
61437987547SKanchan Joshi
61586db1e29SJens Axboe /**
616aebf526bSChristoph Hellwig * blk_rq_map_user_iov - map user data to a request, for passthrough requests
61786db1e29SJens Axboe * @q: request queue where request should be inserted
61886db1e29SJens Axboe * @rq: request to map data to
619152e283fSFUJITA Tomonori * @map_data: pointer to the rq_map_data holding pages (if necessary)
62026e49cfcSKent Overstreet * @iter: iovec iterator
621a3bce90eSFUJITA Tomonori * @gfp_mask: memory allocation flags
62286db1e29SJens Axboe *
62386db1e29SJens Axboe * Description:
624710027a4SRandy Dunlap * Data will be mapped directly for zero copy I/O, if possible. Otherwise
62586db1e29SJens Axboe * a kernel bounce buffer is used.
62686db1e29SJens Axboe *
627710027a4SRandy Dunlap * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
62886db1e29SJens Axboe * still in process context.
62986db1e29SJens Axboe */
blk_rq_map_user_iov(struct request_queue * q,struct request * rq,struct rq_map_data * map_data,const struct iov_iter * iter,gfp_t gfp_mask)63086db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
63126e49cfcSKent Overstreet struct rq_map_data *map_data,
63226e49cfcSKent Overstreet const struct iov_iter *iter, gfp_t gfp_mask)
63386db1e29SJens Axboe {
63437987547SKanchan Joshi bool copy = false, map_bvec = false;
635357f435dSAl Viro unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
6364d6af73dSChristoph Hellwig struct bio *bio = NULL;
6374d6af73dSChristoph Hellwig struct iov_iter i;
63869e0927bSDouglas Gilbert int ret = -EINVAL;
63986db1e29SJens Axboe
640357f435dSAl Viro if (map_data)
6414d6af73dSChristoph Hellwig copy = true;
642393bb12eSChristoph Hellwig else if (blk_queue_may_bounce(q))
643393bb12eSChristoph Hellwig copy = true;
644357f435dSAl Viro else if (iov_iter_alignment(iter) & align)
645357f435dSAl Viro copy = true;
64637987547SKanchan Joshi else if (iov_iter_is_bvec(iter))
64737987547SKanchan Joshi map_bvec = true;
648d46aa786SKeith Busch else if (!user_backed_iter(iter))
64937987547SKanchan Joshi copy = true;
650357f435dSAl Viro else if (queue_virt_boundary(q))
651357f435dSAl Viro copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
652afdc1a78SFUJITA Tomonori
65337987547SKanchan Joshi if (map_bvec) {
65437987547SKanchan Joshi ret = blk_rq_map_user_bvec(rq, iter);
65537987547SKanchan Joshi if (!ret)
65637987547SKanchan Joshi return 0;
65737987547SKanchan Joshi if (ret != -EREMOTEIO)
65837987547SKanchan Joshi goto fail;
65937987547SKanchan Joshi /* fall back to copying the data on limits mismatches */
66037987547SKanchan Joshi copy = true;
66137987547SKanchan Joshi }
66237987547SKanchan Joshi
6634d6af73dSChristoph Hellwig i = *iter;
6644d6af73dSChristoph Hellwig do {
6657589ad67SChristoph Hellwig if (copy)
6667589ad67SChristoph Hellwig ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
6677589ad67SChristoph Hellwig else
6687589ad67SChristoph Hellwig ret = bio_map_user_iov(rq, &i, gfp_mask);
6694d6af73dSChristoph Hellwig if (ret)
6704d6af73dSChristoph Hellwig goto unmap_rq;
6714d6af73dSChristoph Hellwig if (!bio)
6724d6af73dSChristoph Hellwig bio = rq->bio;
6734d6af73dSChristoph Hellwig } while (iov_iter_count(&i));
67486db1e29SJens Axboe
67586db1e29SJens Axboe return 0;
6764d6af73dSChristoph Hellwig
6774d6af73dSChristoph Hellwig unmap_rq:
6783b7995a9SYang Yingliang blk_rq_unmap_user(bio);
679a0ac402cSLinus Torvalds fail:
6804d6af73dSChristoph Hellwig rq->bio = NULL;
68169e0927bSDouglas Gilbert return ret;
68286db1e29SJens Axboe }
683152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov);
68486db1e29SJens Axboe
blk_rq_map_user(struct request_queue * q,struct request * rq,struct rq_map_data * map_data,void __user * ubuf,unsigned long len,gfp_t gfp_mask)685ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq,
686ddad8dd0SChristoph Hellwig struct rq_map_data *map_data, void __user *ubuf,
687ddad8dd0SChristoph Hellwig unsigned long len, gfp_t gfp_mask)
688ddad8dd0SChristoph Hellwig {
68926e49cfcSKent Overstreet struct iov_iter i;
690d46aa786SKeith Busch int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
691ddad8dd0SChristoph Hellwig
6928f7e885aSAl Viro if (unlikely(ret < 0))
6938f7e885aSAl Viro return ret;
694ddad8dd0SChristoph Hellwig
69526e49cfcSKent Overstreet return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
696ddad8dd0SChristoph Hellwig }
697ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user);
698ddad8dd0SChristoph Hellwig
blk_rq_map_user_io(struct request * req,struct rq_map_data * map_data,void __user * ubuf,unsigned long buf_len,gfp_t gfp_mask,bool vec,int iov_count,bool check_iter_count,int rw)69955765402SAnuj Gupta int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
70055765402SAnuj Gupta void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
70155765402SAnuj Gupta bool vec, int iov_count, bool check_iter_count, int rw)
70255765402SAnuj Gupta {
70355765402SAnuj Gupta int ret = 0;
70455765402SAnuj Gupta
70555765402SAnuj Gupta if (vec) {
70655765402SAnuj Gupta struct iovec fast_iov[UIO_FASTIOV];
70755765402SAnuj Gupta struct iovec *iov = fast_iov;
70855765402SAnuj Gupta struct iov_iter iter;
70955765402SAnuj Gupta
71055765402SAnuj Gupta ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
71155765402SAnuj Gupta UIO_FASTIOV, &iov, &iter);
71255765402SAnuj Gupta if (ret < 0)
71355765402SAnuj Gupta return ret;
71455765402SAnuj Gupta
71555765402SAnuj Gupta if (iov_count) {
71655765402SAnuj Gupta /* SG_IO howto says that the shorter of the two wins */
71755765402SAnuj Gupta iov_iter_truncate(&iter, buf_len);
71855765402SAnuj Gupta if (check_iter_count && !iov_iter_count(&iter)) {
71955765402SAnuj Gupta kfree(iov);
72055765402SAnuj Gupta return -EINVAL;
72155765402SAnuj Gupta }
72255765402SAnuj Gupta }
72355765402SAnuj Gupta
72455765402SAnuj Gupta ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
72555765402SAnuj Gupta gfp_mask);
72655765402SAnuj Gupta kfree(iov);
72755765402SAnuj Gupta } else if (buf_len) {
72855765402SAnuj Gupta ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
72955765402SAnuj Gupta gfp_mask);
73055765402SAnuj Gupta }
73155765402SAnuj Gupta return ret;
73255765402SAnuj Gupta }
73355765402SAnuj Gupta EXPORT_SYMBOL(blk_rq_map_user_io);
73455765402SAnuj Gupta
73586db1e29SJens Axboe /**
73686db1e29SJens Axboe * blk_rq_unmap_user - unmap a request with user data
73786db1e29SJens Axboe * @bio: start of bio list
73886db1e29SJens Axboe *
73986db1e29SJens Axboe * Description:
74086db1e29SJens Axboe * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
74186db1e29SJens Axboe * supply the original rq->bio from the blk_rq_map_user() return, since
742710027a4SRandy Dunlap * the I/O completion may have changed rq->bio.
74386db1e29SJens Axboe */
blk_rq_unmap_user(struct bio * bio)74486db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio)
74586db1e29SJens Axboe {
746393bb12eSChristoph Hellwig struct bio *next_bio;
74786db1e29SJens Axboe int ret = 0, ret2;
74886db1e29SJens Axboe
74986db1e29SJens Axboe while (bio) {
7503310eebaSChristoph Hellwig if (bio->bi_private) {
751393bb12eSChristoph Hellwig ret2 = bio_uncopy_user(bio);
75286db1e29SJens Axboe if (ret2 && !ret)
75386db1e29SJens Axboe ret = ret2;
7543310eebaSChristoph Hellwig } else {
755393bb12eSChristoph Hellwig bio_release_pages(bio, bio_data_dir(bio) == READ);
7567b63c052SChristoph Hellwig }
75786db1e29SJens Axboe
758393bb12eSChristoph Hellwig next_bio = bio;
75986db1e29SJens Axboe bio = bio->bi_next;
76032f1c71bSAnuj Gupta blk_mq_map_bio_put(next_bio);
76186db1e29SJens Axboe }
76286db1e29SJens Axboe
76386db1e29SJens Axboe return ret;
76486db1e29SJens Axboe }
76586db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user);
76686db1e29SJens Axboe
76786db1e29SJens Axboe /**
768aebf526bSChristoph Hellwig * blk_rq_map_kern - map kernel data to a request, for passthrough requests
76986db1e29SJens Axboe * @q: request queue where request should be inserted
77086db1e29SJens Axboe * @rq: request to fill
77186db1e29SJens Axboe * @kbuf: the kernel buffer
77286db1e29SJens Axboe * @len: length of user data
77386db1e29SJens Axboe * @gfp_mask: memory allocation flags
77468154e90SFUJITA Tomonori *
77568154e90SFUJITA Tomonori * Description:
77668154e90SFUJITA Tomonori * Data will be mapped directly if possible. Otherwise a bounce
777e227867fSMasanari Iida * buffer is used. Can be called multiple times to append multiple
7783a5a3927SJames Bottomley * buffers.
77986db1e29SJens Axboe */
blk_rq_map_kern(struct request_queue * q,struct request * rq,void * kbuf,unsigned int len,gfp_t gfp_mask)78086db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
78186db1e29SJens Axboe unsigned int len, gfp_t gfp_mask)
78286db1e29SJens Axboe {
78368154e90SFUJITA Tomonori int reading = rq_data_dir(rq) == READ;
78414417799SNamhyung Kim unsigned long addr = (unsigned long) kbuf;
785393bb12eSChristoph Hellwig struct bio *bio;
7863a5a3927SJames Bottomley int ret;
78786db1e29SJens Axboe
788ae03bf63SMartin K. Petersen if (len > (queue_max_hw_sectors(q) << 9))
78986db1e29SJens Axboe return -EINVAL;
79086db1e29SJens Axboe if (!len || !kbuf)
79186db1e29SJens Axboe return -EINVAL;
79286db1e29SJens Axboe
793393bb12eSChristoph Hellwig if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
794393bb12eSChristoph Hellwig blk_queue_may_bounce(q))
79568154e90SFUJITA Tomonori bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
79668154e90SFUJITA Tomonori else
79786db1e29SJens Axboe bio = bio_map_kern(q, kbuf, len, gfp_mask);
79868154e90SFUJITA Tomonori
79986db1e29SJens Axboe if (IS_ERR(bio))
80086db1e29SJens Axboe return PTR_ERR(bio);
80186db1e29SJens Axboe
802aebf526bSChristoph Hellwig bio->bi_opf &= ~REQ_OP_MASK;
803aebf526bSChristoph Hellwig bio->bi_opf |= req_op(rq);
80486db1e29SJens Axboe
805393bb12eSChristoph Hellwig ret = blk_rq_append_bio(rq, bio);
806066ff571SChristoph Hellwig if (unlikely(ret)) {
807066ff571SChristoph Hellwig bio_uninit(bio);
808066ff571SChristoph Hellwig kfree(bio);
809066ff571SChristoph Hellwig }
8103a5a3927SJames Bottomley return ret;
8113a5a3927SJames Bottomley }
81286db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern);
813