xref: /openbmc/linux/block/blk-map.c (revision 5f7136db)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
286db1e29SJens Axboe /*
386db1e29SJens Axboe  * Functions related to mapping data to requests
486db1e29SJens Axboe  */
586db1e29SJens Axboe #include <linux/kernel.h>
668db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
786db1e29SJens Axboe #include <linux/module.h>
886db1e29SJens Axboe #include <linux/bio.h>
986db1e29SJens Axboe #include <linux/blkdev.h>
1026e49cfcSKent Overstreet #include <linux/uio.h>
1186db1e29SJens Axboe 
1286db1e29SJens Axboe #include "blk.h"
1386db1e29SJens Axboe 
14130879f1SChristoph Hellwig struct bio_map_data {
15f3256075SChristoph Hellwig 	bool is_our_pages : 1;
16f3256075SChristoph Hellwig 	bool is_null_mapped : 1;
17130879f1SChristoph Hellwig 	struct iov_iter iter;
18130879f1SChristoph Hellwig 	struct iovec iov[];
19130879f1SChristoph Hellwig };
20130879f1SChristoph Hellwig 
21130879f1SChristoph Hellwig static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
22130879f1SChristoph Hellwig 					       gfp_t gfp_mask)
23130879f1SChristoph Hellwig {
24130879f1SChristoph Hellwig 	struct bio_map_data *bmd;
25130879f1SChristoph Hellwig 
26130879f1SChristoph Hellwig 	if (data->nr_segs > UIO_MAXIOV)
27130879f1SChristoph Hellwig 		return NULL;
28130879f1SChristoph Hellwig 
29130879f1SChristoph Hellwig 	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
30130879f1SChristoph Hellwig 	if (!bmd)
31130879f1SChristoph Hellwig 		return NULL;
32130879f1SChristoph Hellwig 	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
33130879f1SChristoph Hellwig 	bmd->iter = *data;
34130879f1SChristoph Hellwig 	bmd->iter.iov = bmd->iov;
35130879f1SChristoph Hellwig 	return bmd;
36130879f1SChristoph Hellwig }
37130879f1SChristoph Hellwig 
38130879f1SChristoph Hellwig /**
39130879f1SChristoph Hellwig  * bio_copy_from_iter - copy all pages from iov_iter to bio
40130879f1SChristoph Hellwig  * @bio: The &struct bio which describes the I/O as destination
41130879f1SChristoph Hellwig  * @iter: iov_iter as source
42130879f1SChristoph Hellwig  *
43130879f1SChristoph Hellwig  * Copy all pages from iov_iter to bio.
44130879f1SChristoph Hellwig  * Returns 0 on success, or error on failure.
45130879f1SChristoph Hellwig  */
46130879f1SChristoph Hellwig static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
47130879f1SChristoph Hellwig {
48130879f1SChristoph Hellwig 	struct bio_vec *bvec;
49130879f1SChristoph Hellwig 	struct bvec_iter_all iter_all;
50130879f1SChristoph Hellwig 
51130879f1SChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
52130879f1SChristoph Hellwig 		ssize_t ret;
53130879f1SChristoph Hellwig 
54130879f1SChristoph Hellwig 		ret = copy_page_from_iter(bvec->bv_page,
55130879f1SChristoph Hellwig 					  bvec->bv_offset,
56130879f1SChristoph Hellwig 					  bvec->bv_len,
57130879f1SChristoph Hellwig 					  iter);
58130879f1SChristoph Hellwig 
59130879f1SChristoph Hellwig 		if (!iov_iter_count(iter))
60130879f1SChristoph Hellwig 			break;
61130879f1SChristoph Hellwig 
62130879f1SChristoph Hellwig 		if (ret < bvec->bv_len)
63130879f1SChristoph Hellwig 			return -EFAULT;
64130879f1SChristoph Hellwig 	}
65130879f1SChristoph Hellwig 
66130879f1SChristoph Hellwig 	return 0;
67130879f1SChristoph Hellwig }
68130879f1SChristoph Hellwig 
69130879f1SChristoph Hellwig /**
70130879f1SChristoph Hellwig  * bio_copy_to_iter - copy all pages from bio to iov_iter
71130879f1SChristoph Hellwig  * @bio: The &struct bio which describes the I/O as source
72130879f1SChristoph Hellwig  * @iter: iov_iter as destination
73130879f1SChristoph Hellwig  *
74130879f1SChristoph Hellwig  * Copy all pages from bio to iov_iter.
75130879f1SChristoph Hellwig  * Returns 0 on success, or error on failure.
76130879f1SChristoph Hellwig  */
77130879f1SChristoph Hellwig static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
78130879f1SChristoph Hellwig {
79130879f1SChristoph Hellwig 	struct bio_vec *bvec;
80130879f1SChristoph Hellwig 	struct bvec_iter_all iter_all;
81130879f1SChristoph Hellwig 
82130879f1SChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
83130879f1SChristoph Hellwig 		ssize_t ret;
84130879f1SChristoph Hellwig 
85130879f1SChristoph Hellwig 		ret = copy_page_to_iter(bvec->bv_page,
86130879f1SChristoph Hellwig 					bvec->bv_offset,
87130879f1SChristoph Hellwig 					bvec->bv_len,
88130879f1SChristoph Hellwig 					&iter);
89130879f1SChristoph Hellwig 
90130879f1SChristoph Hellwig 		if (!iov_iter_count(&iter))
91130879f1SChristoph Hellwig 			break;
92130879f1SChristoph Hellwig 
93130879f1SChristoph Hellwig 		if (ret < bvec->bv_len)
94130879f1SChristoph Hellwig 			return -EFAULT;
95130879f1SChristoph Hellwig 	}
96130879f1SChristoph Hellwig 
97130879f1SChristoph Hellwig 	return 0;
98130879f1SChristoph Hellwig }
99130879f1SChristoph Hellwig 
100130879f1SChristoph Hellwig /**
101130879f1SChristoph Hellwig  *	bio_uncopy_user	-	finish previously mapped bio
102130879f1SChristoph Hellwig  *	@bio: bio being terminated
103130879f1SChristoph Hellwig  *
104130879f1SChristoph Hellwig  *	Free pages allocated from bio_copy_user_iov() and write back data
105130879f1SChristoph Hellwig  *	to user space in case of a read.
106130879f1SChristoph Hellwig  */
107130879f1SChristoph Hellwig static int bio_uncopy_user(struct bio *bio)
108130879f1SChristoph Hellwig {
109130879f1SChristoph Hellwig 	struct bio_map_data *bmd = bio->bi_private;
110130879f1SChristoph Hellwig 	int ret = 0;
111130879f1SChristoph Hellwig 
1123310eebaSChristoph Hellwig 	if (!bmd->is_null_mapped) {
113130879f1SChristoph Hellwig 		/*
114130879f1SChristoph Hellwig 		 * if we're in a workqueue, the request is orphaned, so
115130879f1SChristoph Hellwig 		 * don't copy into a random user address space, just free
116130879f1SChristoph Hellwig 		 * and return -EINTR so user space doesn't expect any data.
117130879f1SChristoph Hellwig 		 */
118130879f1SChristoph Hellwig 		if (!current->mm)
119130879f1SChristoph Hellwig 			ret = -EINTR;
120130879f1SChristoph Hellwig 		else if (bio_data_dir(bio) == READ)
121130879f1SChristoph Hellwig 			ret = bio_copy_to_iter(bio, bmd->iter);
122130879f1SChristoph Hellwig 		if (bmd->is_our_pages)
123130879f1SChristoph Hellwig 			bio_free_pages(bio);
124130879f1SChristoph Hellwig 	}
125130879f1SChristoph Hellwig 	kfree(bmd);
126130879f1SChristoph Hellwig 	bio_put(bio);
127130879f1SChristoph Hellwig 	return ret;
128130879f1SChristoph Hellwig }
129130879f1SChristoph Hellwig 
1307589ad67SChristoph Hellwig static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
1317589ad67SChristoph Hellwig 		struct iov_iter *iter, gfp_t gfp_mask)
132130879f1SChristoph Hellwig {
133130879f1SChristoph Hellwig 	struct bio_map_data *bmd;
134130879f1SChristoph Hellwig 	struct page *page;
1357589ad67SChristoph Hellwig 	struct bio *bio, *bounce_bio;
136130879f1SChristoph Hellwig 	int i = 0, ret;
137130879f1SChristoph Hellwig 	int nr_pages;
138130879f1SChristoph Hellwig 	unsigned int len = iter->count;
139130879f1SChristoph Hellwig 	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
140130879f1SChristoph Hellwig 
141130879f1SChristoph Hellwig 	bmd = bio_alloc_map_data(iter, gfp_mask);
142130879f1SChristoph Hellwig 	if (!bmd)
1437589ad67SChristoph Hellwig 		return -ENOMEM;
144130879f1SChristoph Hellwig 
145130879f1SChristoph Hellwig 	/*
146130879f1SChristoph Hellwig 	 * We need to do a deep copy of the iov_iter including the iovecs.
147130879f1SChristoph Hellwig 	 * The caller provided iov might point to an on-stack or otherwise
148130879f1SChristoph Hellwig 	 * shortlived one.
149130879f1SChristoph Hellwig 	 */
150f3256075SChristoph Hellwig 	bmd->is_our_pages = !map_data;
15103859717SChristoph Hellwig 	bmd->is_null_mapped = (map_data && map_data->null_mapped);
152130879f1SChristoph Hellwig 
153*5f7136dbSMatthew Wilcox (Oracle) 	nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
154130879f1SChristoph Hellwig 
155130879f1SChristoph Hellwig 	ret = -ENOMEM;
156130879f1SChristoph Hellwig 	bio = bio_kmalloc(gfp_mask, nr_pages);
157130879f1SChristoph Hellwig 	if (!bio)
158130879f1SChristoph Hellwig 		goto out_bmd;
1597589ad67SChristoph Hellwig 	bio->bi_opf |= req_op(rq);
160130879f1SChristoph Hellwig 
161130879f1SChristoph Hellwig 	if (map_data) {
162130879f1SChristoph Hellwig 		nr_pages = 1 << map_data->page_order;
163130879f1SChristoph Hellwig 		i = map_data->offset / PAGE_SIZE;
164130879f1SChristoph Hellwig 	}
165130879f1SChristoph Hellwig 	while (len) {
166130879f1SChristoph Hellwig 		unsigned int bytes = PAGE_SIZE;
167130879f1SChristoph Hellwig 
168130879f1SChristoph Hellwig 		bytes -= offset;
169130879f1SChristoph Hellwig 
170130879f1SChristoph Hellwig 		if (bytes > len)
171130879f1SChristoph Hellwig 			bytes = len;
172130879f1SChristoph Hellwig 
173130879f1SChristoph Hellwig 		if (map_data) {
174130879f1SChristoph Hellwig 			if (i == map_data->nr_entries * nr_pages) {
175130879f1SChristoph Hellwig 				ret = -ENOMEM;
1767589ad67SChristoph Hellwig 				goto cleanup;
177130879f1SChristoph Hellwig 			}
178130879f1SChristoph Hellwig 
179130879f1SChristoph Hellwig 			page = map_data->pages[i / nr_pages];
180130879f1SChristoph Hellwig 			page += (i % nr_pages);
181130879f1SChristoph Hellwig 
182130879f1SChristoph Hellwig 			i++;
183130879f1SChristoph Hellwig 		} else {
1847589ad67SChristoph Hellwig 			page = alloc_page(rq->q->bounce_gfp | gfp_mask);
185130879f1SChristoph Hellwig 			if (!page) {
186130879f1SChristoph Hellwig 				ret = -ENOMEM;
1877589ad67SChristoph Hellwig 				goto cleanup;
188130879f1SChristoph Hellwig 			}
189130879f1SChristoph Hellwig 		}
190130879f1SChristoph Hellwig 
1917589ad67SChristoph Hellwig 		if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
192130879f1SChristoph Hellwig 			if (!map_data)
193130879f1SChristoph Hellwig 				__free_page(page);
194130879f1SChristoph Hellwig 			break;
195130879f1SChristoph Hellwig 		}
196130879f1SChristoph Hellwig 
197130879f1SChristoph Hellwig 		len -= bytes;
198130879f1SChristoph Hellwig 		offset = 0;
199130879f1SChristoph Hellwig 	}
200130879f1SChristoph Hellwig 
201130879f1SChristoph Hellwig 	if (map_data)
202130879f1SChristoph Hellwig 		map_data->offset += bio->bi_iter.bi_size;
203130879f1SChristoph Hellwig 
204130879f1SChristoph Hellwig 	/*
205130879f1SChristoph Hellwig 	 * success
206130879f1SChristoph Hellwig 	 */
207130879f1SChristoph Hellwig 	if ((iov_iter_rw(iter) == WRITE &&
208130879f1SChristoph Hellwig 	     (!map_data || !map_data->null_mapped)) ||
209130879f1SChristoph Hellwig 	    (map_data && map_data->from_user)) {
210130879f1SChristoph Hellwig 		ret = bio_copy_from_iter(bio, iter);
211130879f1SChristoph Hellwig 		if (ret)
212130879f1SChristoph Hellwig 			goto cleanup;
213130879f1SChristoph Hellwig 	} else {
214130879f1SChristoph Hellwig 		if (bmd->is_our_pages)
215130879f1SChristoph Hellwig 			zero_fill_bio(bio);
216130879f1SChristoph Hellwig 		iov_iter_advance(iter, bio->bi_iter.bi_size);
217130879f1SChristoph Hellwig 	}
218130879f1SChristoph Hellwig 
219130879f1SChristoph Hellwig 	bio->bi_private = bmd;
2207589ad67SChristoph Hellwig 
2217589ad67SChristoph Hellwig 	bounce_bio = bio;
2227589ad67SChristoph Hellwig 	ret = blk_rq_append_bio(rq, &bounce_bio);
2237589ad67SChristoph Hellwig 	if (ret)
2247589ad67SChristoph Hellwig 		goto cleanup;
2257589ad67SChristoph Hellwig 
2267589ad67SChristoph Hellwig 	/*
2277589ad67SChristoph Hellwig 	 * We link the bounce buffer in and could have to traverse it later, so
2287589ad67SChristoph Hellwig 	 * we have to get a ref to prevent it from being freed
2297589ad67SChristoph Hellwig 	 */
2307589ad67SChristoph Hellwig 	bio_get(bounce_bio);
2317589ad67SChristoph Hellwig 	return 0;
232130879f1SChristoph Hellwig cleanup:
233130879f1SChristoph Hellwig 	if (!map_data)
234130879f1SChristoph Hellwig 		bio_free_pages(bio);
235130879f1SChristoph Hellwig 	bio_put(bio);
236130879f1SChristoph Hellwig out_bmd:
237130879f1SChristoph Hellwig 	kfree(bmd);
2387589ad67SChristoph Hellwig 	return ret;
239130879f1SChristoph Hellwig }
240130879f1SChristoph Hellwig 
2417589ad67SChristoph Hellwig static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
2427589ad67SChristoph Hellwig 		gfp_t gfp_mask)
243130879f1SChristoph Hellwig {
2447589ad67SChristoph Hellwig 	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
2457589ad67SChristoph Hellwig 	struct bio *bio, *bounce_bio;
246130879f1SChristoph Hellwig 	int ret;
2477589ad67SChristoph Hellwig 	int j;
248130879f1SChristoph Hellwig 
249130879f1SChristoph Hellwig 	if (!iov_iter_count(iter))
2507589ad67SChristoph Hellwig 		return -EINVAL;
251130879f1SChristoph Hellwig 
252130879f1SChristoph Hellwig 	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
253130879f1SChristoph Hellwig 	if (!bio)
2547589ad67SChristoph Hellwig 		return -ENOMEM;
2557589ad67SChristoph Hellwig 	bio->bi_opf |= req_op(rq);
256130879f1SChristoph Hellwig 
257130879f1SChristoph Hellwig 	while (iov_iter_count(iter)) {
258130879f1SChristoph Hellwig 		struct page **pages;
259130879f1SChristoph Hellwig 		ssize_t bytes;
260130879f1SChristoph Hellwig 		size_t offs, added = 0;
261130879f1SChristoph Hellwig 		int npages;
262130879f1SChristoph Hellwig 
263130879f1SChristoph Hellwig 		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
264130879f1SChristoph Hellwig 		if (unlikely(bytes <= 0)) {
265130879f1SChristoph Hellwig 			ret = bytes ? bytes : -EFAULT;
266130879f1SChristoph Hellwig 			goto out_unmap;
267130879f1SChristoph Hellwig 		}
268130879f1SChristoph Hellwig 
269130879f1SChristoph Hellwig 		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
270130879f1SChristoph Hellwig 
2717589ad67SChristoph Hellwig 		if (unlikely(offs & queue_dma_alignment(rq->q))) {
272130879f1SChristoph Hellwig 			ret = -EINVAL;
273130879f1SChristoph Hellwig 			j = 0;
274130879f1SChristoph Hellwig 		} else {
275130879f1SChristoph Hellwig 			for (j = 0; j < npages; j++) {
276130879f1SChristoph Hellwig 				struct page *page = pages[j];
277130879f1SChristoph Hellwig 				unsigned int n = PAGE_SIZE - offs;
278130879f1SChristoph Hellwig 				bool same_page = false;
279130879f1SChristoph Hellwig 
280130879f1SChristoph Hellwig 				if (n > bytes)
281130879f1SChristoph Hellwig 					n = bytes;
282130879f1SChristoph Hellwig 
2837589ad67SChristoph Hellwig 				if (!bio_add_hw_page(rq->q, bio, page, n, offs,
284e4581105SChristoph Hellwig 						     max_sectors, &same_page)) {
285130879f1SChristoph Hellwig 					if (same_page)
286130879f1SChristoph Hellwig 						put_page(page);
287130879f1SChristoph Hellwig 					break;
288130879f1SChristoph Hellwig 				}
289130879f1SChristoph Hellwig 
290130879f1SChristoph Hellwig 				added += n;
291130879f1SChristoph Hellwig 				bytes -= n;
292130879f1SChristoph Hellwig 				offs = 0;
293130879f1SChristoph Hellwig 			}
294130879f1SChristoph Hellwig 			iov_iter_advance(iter, added);
295130879f1SChristoph Hellwig 		}
296130879f1SChristoph Hellwig 		/*
297130879f1SChristoph Hellwig 		 * release the pages we didn't map into the bio, if any
298130879f1SChristoph Hellwig 		 */
299130879f1SChristoph Hellwig 		while (j < npages)
300130879f1SChristoph Hellwig 			put_page(pages[j++]);
301130879f1SChristoph Hellwig 		kvfree(pages);
302130879f1SChristoph Hellwig 		/* couldn't stuff something into bio? */
303130879f1SChristoph Hellwig 		if (bytes)
304130879f1SChristoph Hellwig 			break;
305130879f1SChristoph Hellwig 	}
306130879f1SChristoph Hellwig 
307130879f1SChristoph Hellwig 	/*
3087589ad67SChristoph Hellwig 	 * Subtle: if we end up needing to bounce a bio, it would normally
3097589ad67SChristoph Hellwig 	 * disappear when its bi_end_io is run.  However, we need the original
3107589ad67SChristoph Hellwig 	 * bio for the unmap, so grab an extra reference to it
311130879f1SChristoph Hellwig 	 */
312130879f1SChristoph Hellwig 	bio_get(bio);
313130879f1SChristoph Hellwig 
3147589ad67SChristoph Hellwig 	bounce_bio = bio;
3157589ad67SChristoph Hellwig 	ret = blk_rq_append_bio(rq, &bounce_bio);
3167589ad67SChristoph Hellwig 	if (ret)
3177589ad67SChristoph Hellwig 		goto out_put_orig;
3187589ad67SChristoph Hellwig 
3197589ad67SChristoph Hellwig 	/*
3207589ad67SChristoph Hellwig 	 * We link the bounce buffer in and could have to traverse it
3217589ad67SChristoph Hellwig 	 * later, so we have to get a ref to prevent it from being freed
3227589ad67SChristoph Hellwig 	 */
3237589ad67SChristoph Hellwig 	bio_get(bounce_bio);
3247589ad67SChristoph Hellwig 	return 0;
3257589ad67SChristoph Hellwig 
3267589ad67SChristoph Hellwig  out_put_orig:
3277589ad67SChristoph Hellwig 	bio_put(bio);
328130879f1SChristoph Hellwig  out_unmap:
329130879f1SChristoph Hellwig 	bio_release_pages(bio, false);
330130879f1SChristoph Hellwig 	bio_put(bio);
3317589ad67SChristoph Hellwig 	return ret;
332130879f1SChristoph Hellwig }
333130879f1SChristoph Hellwig 
334130879f1SChristoph Hellwig /**
335130879f1SChristoph Hellwig  *	bio_unmap_user	-	unmap a bio
336130879f1SChristoph Hellwig  *	@bio:		the bio being unmapped
337130879f1SChristoph Hellwig  *
338130879f1SChristoph Hellwig  *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
339130879f1SChristoph Hellwig  *	process context.
340130879f1SChristoph Hellwig  *
341130879f1SChristoph Hellwig  *	bio_unmap_user() may sleep.
342130879f1SChristoph Hellwig  */
343130879f1SChristoph Hellwig static void bio_unmap_user(struct bio *bio)
344130879f1SChristoph Hellwig {
345130879f1SChristoph Hellwig 	bio_release_pages(bio, bio_data_dir(bio) == READ);
346130879f1SChristoph Hellwig 	bio_put(bio);
347130879f1SChristoph Hellwig 	bio_put(bio);
348130879f1SChristoph Hellwig }
349130879f1SChristoph Hellwig 
350130879f1SChristoph Hellwig static void bio_invalidate_vmalloc_pages(struct bio *bio)
351130879f1SChristoph Hellwig {
352130879f1SChristoph Hellwig #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
353130879f1SChristoph Hellwig 	if (bio->bi_private && !op_is_write(bio_op(bio))) {
354130879f1SChristoph Hellwig 		unsigned long i, len = 0;
355130879f1SChristoph Hellwig 
356130879f1SChristoph Hellwig 		for (i = 0; i < bio->bi_vcnt; i++)
357130879f1SChristoph Hellwig 			len += bio->bi_io_vec[i].bv_len;
358130879f1SChristoph Hellwig 		invalidate_kernel_vmap_range(bio->bi_private, len);
359130879f1SChristoph Hellwig 	}
360130879f1SChristoph Hellwig #endif
361130879f1SChristoph Hellwig }
362130879f1SChristoph Hellwig 
363130879f1SChristoph Hellwig static void bio_map_kern_endio(struct bio *bio)
364130879f1SChristoph Hellwig {
365130879f1SChristoph Hellwig 	bio_invalidate_vmalloc_pages(bio);
366130879f1SChristoph Hellwig 	bio_put(bio);
367130879f1SChristoph Hellwig }
368130879f1SChristoph Hellwig 
369130879f1SChristoph Hellwig /**
370130879f1SChristoph Hellwig  *	bio_map_kern	-	map kernel address into bio
371130879f1SChristoph Hellwig  *	@q: the struct request_queue for the bio
372130879f1SChristoph Hellwig  *	@data: pointer to buffer to map
373130879f1SChristoph Hellwig  *	@len: length in bytes
374130879f1SChristoph Hellwig  *	@gfp_mask: allocation flags for bio allocation
375130879f1SChristoph Hellwig  *
376130879f1SChristoph Hellwig  *	Map the kernel address into a bio suitable for io to a block
377130879f1SChristoph Hellwig  *	device. Returns an error pointer in case of error.
378130879f1SChristoph Hellwig  */
379130879f1SChristoph Hellwig static struct bio *bio_map_kern(struct request_queue *q, void *data,
380130879f1SChristoph Hellwig 		unsigned int len, gfp_t gfp_mask)
381130879f1SChristoph Hellwig {
382130879f1SChristoph Hellwig 	unsigned long kaddr = (unsigned long)data;
383130879f1SChristoph Hellwig 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
384130879f1SChristoph Hellwig 	unsigned long start = kaddr >> PAGE_SHIFT;
385130879f1SChristoph Hellwig 	const int nr_pages = end - start;
386130879f1SChristoph Hellwig 	bool is_vmalloc = is_vmalloc_addr(data);
387130879f1SChristoph Hellwig 	struct page *page;
388130879f1SChristoph Hellwig 	int offset, i;
389130879f1SChristoph Hellwig 	struct bio *bio;
390130879f1SChristoph Hellwig 
391130879f1SChristoph Hellwig 	bio = bio_kmalloc(gfp_mask, nr_pages);
392130879f1SChristoph Hellwig 	if (!bio)
393130879f1SChristoph Hellwig 		return ERR_PTR(-ENOMEM);
394130879f1SChristoph Hellwig 
395130879f1SChristoph Hellwig 	if (is_vmalloc) {
396130879f1SChristoph Hellwig 		flush_kernel_vmap_range(data, len);
397130879f1SChristoph Hellwig 		bio->bi_private = data;
398130879f1SChristoph Hellwig 	}
399130879f1SChristoph Hellwig 
400130879f1SChristoph Hellwig 	offset = offset_in_page(kaddr);
401130879f1SChristoph Hellwig 	for (i = 0; i < nr_pages; i++) {
402130879f1SChristoph Hellwig 		unsigned int bytes = PAGE_SIZE - offset;
403130879f1SChristoph Hellwig 
404130879f1SChristoph Hellwig 		if (len <= 0)
405130879f1SChristoph Hellwig 			break;
406130879f1SChristoph Hellwig 
407130879f1SChristoph Hellwig 		if (bytes > len)
408130879f1SChristoph Hellwig 			bytes = len;
409130879f1SChristoph Hellwig 
410130879f1SChristoph Hellwig 		if (!is_vmalloc)
411130879f1SChristoph Hellwig 			page = virt_to_page(data);
412130879f1SChristoph Hellwig 		else
413130879f1SChristoph Hellwig 			page = vmalloc_to_page(data);
414130879f1SChristoph Hellwig 		if (bio_add_pc_page(q, bio, page, bytes,
415130879f1SChristoph Hellwig 				    offset) < bytes) {
416130879f1SChristoph Hellwig 			/* we don't support partial mappings */
417130879f1SChristoph Hellwig 			bio_put(bio);
418130879f1SChristoph Hellwig 			return ERR_PTR(-EINVAL);
419130879f1SChristoph Hellwig 		}
420130879f1SChristoph Hellwig 
421130879f1SChristoph Hellwig 		data += bytes;
422130879f1SChristoph Hellwig 		len -= bytes;
423130879f1SChristoph Hellwig 		offset = 0;
424130879f1SChristoph Hellwig 	}
425130879f1SChristoph Hellwig 
426130879f1SChristoph Hellwig 	bio->bi_end_io = bio_map_kern_endio;
427130879f1SChristoph Hellwig 	return bio;
428130879f1SChristoph Hellwig }
429130879f1SChristoph Hellwig 
430130879f1SChristoph Hellwig static void bio_copy_kern_endio(struct bio *bio)
431130879f1SChristoph Hellwig {
432130879f1SChristoph Hellwig 	bio_free_pages(bio);
433130879f1SChristoph Hellwig 	bio_put(bio);
434130879f1SChristoph Hellwig }
435130879f1SChristoph Hellwig 
436130879f1SChristoph Hellwig static void bio_copy_kern_endio_read(struct bio *bio)
437130879f1SChristoph Hellwig {
438130879f1SChristoph Hellwig 	char *p = bio->bi_private;
439130879f1SChristoph Hellwig 	struct bio_vec *bvec;
440130879f1SChristoph Hellwig 	struct bvec_iter_all iter_all;
441130879f1SChristoph Hellwig 
442130879f1SChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
443130879f1SChristoph Hellwig 		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
444130879f1SChristoph Hellwig 		p += bvec->bv_len;
445130879f1SChristoph Hellwig 	}
446130879f1SChristoph Hellwig 
447130879f1SChristoph Hellwig 	bio_copy_kern_endio(bio);
448130879f1SChristoph Hellwig }
449130879f1SChristoph Hellwig 
450130879f1SChristoph Hellwig /**
451130879f1SChristoph Hellwig  *	bio_copy_kern	-	copy kernel address into bio
452130879f1SChristoph Hellwig  *	@q: the struct request_queue for the bio
453130879f1SChristoph Hellwig  *	@data: pointer to buffer to copy
454130879f1SChristoph Hellwig  *	@len: length in bytes
455130879f1SChristoph Hellwig  *	@gfp_mask: allocation flags for bio and page allocation
456130879f1SChristoph Hellwig  *	@reading: data direction is READ
457130879f1SChristoph Hellwig  *
458130879f1SChristoph Hellwig  *	copy the kernel address into a bio suitable for io to a block
459130879f1SChristoph Hellwig  *	device. Returns an error pointer in case of error.
460130879f1SChristoph Hellwig  */
461130879f1SChristoph Hellwig static struct bio *bio_copy_kern(struct request_queue *q, void *data,
462130879f1SChristoph Hellwig 		unsigned int len, gfp_t gfp_mask, int reading)
463130879f1SChristoph Hellwig {
464130879f1SChristoph Hellwig 	unsigned long kaddr = (unsigned long)data;
465130879f1SChristoph Hellwig 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
466130879f1SChristoph Hellwig 	unsigned long start = kaddr >> PAGE_SHIFT;
467130879f1SChristoph Hellwig 	struct bio *bio;
468130879f1SChristoph Hellwig 	void *p = data;
469130879f1SChristoph Hellwig 	int nr_pages = 0;
470130879f1SChristoph Hellwig 
471130879f1SChristoph Hellwig 	/*
472130879f1SChristoph Hellwig 	 * Overflow, abort
473130879f1SChristoph Hellwig 	 */
474130879f1SChristoph Hellwig 	if (end < start)
475130879f1SChristoph Hellwig 		return ERR_PTR(-EINVAL);
476130879f1SChristoph Hellwig 
477130879f1SChristoph Hellwig 	nr_pages = end - start;
478130879f1SChristoph Hellwig 	bio = bio_kmalloc(gfp_mask, nr_pages);
479130879f1SChristoph Hellwig 	if (!bio)
480130879f1SChristoph Hellwig 		return ERR_PTR(-ENOMEM);
481130879f1SChristoph Hellwig 
482130879f1SChristoph Hellwig 	while (len) {
483130879f1SChristoph Hellwig 		struct page *page;
484130879f1SChristoph Hellwig 		unsigned int bytes = PAGE_SIZE;
485130879f1SChristoph Hellwig 
486130879f1SChristoph Hellwig 		if (bytes > len)
487130879f1SChristoph Hellwig 			bytes = len;
488130879f1SChristoph Hellwig 
489130879f1SChristoph Hellwig 		page = alloc_page(q->bounce_gfp | gfp_mask);
490130879f1SChristoph Hellwig 		if (!page)
491130879f1SChristoph Hellwig 			goto cleanup;
492130879f1SChristoph Hellwig 
493130879f1SChristoph Hellwig 		if (!reading)
494130879f1SChristoph Hellwig 			memcpy(page_address(page), p, bytes);
495130879f1SChristoph Hellwig 
496130879f1SChristoph Hellwig 		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
497130879f1SChristoph Hellwig 			break;
498130879f1SChristoph Hellwig 
499130879f1SChristoph Hellwig 		len -= bytes;
500130879f1SChristoph Hellwig 		p += bytes;
501130879f1SChristoph Hellwig 	}
502130879f1SChristoph Hellwig 
503130879f1SChristoph Hellwig 	if (reading) {
504130879f1SChristoph Hellwig 		bio->bi_end_io = bio_copy_kern_endio_read;
505130879f1SChristoph Hellwig 		bio->bi_private = data;
506130879f1SChristoph Hellwig 	} else {
507130879f1SChristoph Hellwig 		bio->bi_end_io = bio_copy_kern_endio;
508130879f1SChristoph Hellwig 	}
509130879f1SChristoph Hellwig 
510130879f1SChristoph Hellwig 	return bio;
511130879f1SChristoph Hellwig 
512130879f1SChristoph Hellwig cleanup:
513130879f1SChristoph Hellwig 	bio_free_pages(bio);
514130879f1SChristoph Hellwig 	bio_put(bio);
515130879f1SChristoph Hellwig 	return ERR_PTR(-ENOMEM);
516130879f1SChristoph Hellwig }
517130879f1SChristoph Hellwig 
51898d61d5bSChristoph Hellwig /*
5190abc2a10SJens Axboe  * Append a bio to a passthrough request.  Only works if the bio can be merged
5200abc2a10SJens Axboe  * into the request based on the driver constraints.
52198d61d5bSChristoph Hellwig  */
5220abc2a10SJens Axboe int blk_rq_append_bio(struct request *rq, struct bio **bio)
52386db1e29SJens Axboe {
5240abc2a10SJens Axboe 	struct bio *orig_bio = *bio;
52514ccb66bSChristoph Hellwig 	struct bvec_iter iter;
52614ccb66bSChristoph Hellwig 	struct bio_vec bv;
52714ccb66bSChristoph Hellwig 	unsigned int nr_segs = 0;
5280abc2a10SJens Axboe 
5290abc2a10SJens Axboe 	blk_queue_bounce(rq->q, bio);
530caa4b024SChristoph Hellwig 
53114ccb66bSChristoph Hellwig 	bio_for_each_bvec(bv, *bio, iter)
53214ccb66bSChristoph Hellwig 		nr_segs++;
53314ccb66bSChristoph Hellwig 
53498d61d5bSChristoph Hellwig 	if (!rq->bio) {
53514ccb66bSChristoph Hellwig 		blk_rq_bio_prep(rq, *bio, nr_segs);
53698d61d5bSChristoph Hellwig 	} else {
53714ccb66bSChristoph Hellwig 		if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
5380abc2a10SJens Axboe 			if (orig_bio != *bio) {
5390abc2a10SJens Axboe 				bio_put(*bio);
5400abc2a10SJens Axboe 				*bio = orig_bio;
5410abc2a10SJens Axboe 			}
54286db1e29SJens Axboe 			return -EINVAL;
5430abc2a10SJens Axboe 		}
54498d61d5bSChristoph Hellwig 
5450abc2a10SJens Axboe 		rq->biotail->bi_next = *bio;
5460abc2a10SJens Axboe 		rq->biotail = *bio;
5470abc2a10SJens Axboe 		rq->__data_len += (*bio)->bi_iter.bi_size;
548a892c8d5SSatya Tangirala 		bio_crypt_free_ctx(*bio);
54986db1e29SJens Axboe 	}
55098d61d5bSChristoph Hellwig 
55186db1e29SJens Axboe 	return 0;
55286db1e29SJens Axboe }
55398d61d5bSChristoph Hellwig EXPORT_SYMBOL(blk_rq_append_bio);
55486db1e29SJens Axboe 
55586db1e29SJens Axboe /**
556aebf526bSChristoph Hellwig  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
55786db1e29SJens Axboe  * @q:		request queue where request should be inserted
55886db1e29SJens Axboe  * @rq:		request to map data to
559152e283fSFUJITA Tomonori  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
56026e49cfcSKent Overstreet  * @iter:	iovec iterator
561a3bce90eSFUJITA Tomonori  * @gfp_mask:	memory allocation flags
56286db1e29SJens Axboe  *
56386db1e29SJens Axboe  * Description:
564710027a4SRandy Dunlap  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
56586db1e29SJens Axboe  *    a kernel bounce buffer is used.
56686db1e29SJens Axboe  *
567710027a4SRandy Dunlap  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
56886db1e29SJens Axboe  *    still in process context.
56986db1e29SJens Axboe  *
57086db1e29SJens Axboe  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
57186db1e29SJens Axboe  *    before being submitted to the device, as pages mapped may be out of
57286db1e29SJens Axboe  *    reach. It's the callers responsibility to make sure this happens. The
57386db1e29SJens Axboe  *    original bio must be passed back in to blk_rq_unmap_user() for proper
57486db1e29SJens Axboe  *    unmapping.
57586db1e29SJens Axboe  */
57686db1e29SJens Axboe int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
57726e49cfcSKent Overstreet 			struct rq_map_data *map_data,
57826e49cfcSKent Overstreet 			const struct iov_iter *iter, gfp_t gfp_mask)
57986db1e29SJens Axboe {
580357f435dSAl Viro 	bool copy = false;
581357f435dSAl Viro 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
5824d6af73dSChristoph Hellwig 	struct bio *bio = NULL;
5834d6af73dSChristoph Hellwig 	struct iov_iter i;
58469e0927bSDouglas Gilbert 	int ret = -EINVAL;
58586db1e29SJens Axboe 
586a0ac402cSLinus Torvalds 	if (!iter_is_iovec(iter))
587a0ac402cSLinus Torvalds 		goto fail;
588a0ac402cSLinus Torvalds 
589357f435dSAl Viro 	if (map_data)
5904d6af73dSChristoph Hellwig 		copy = true;
591357f435dSAl Viro 	else if (iov_iter_alignment(iter) & align)
592357f435dSAl Viro 		copy = true;
593357f435dSAl Viro 	else if (queue_virt_boundary(q))
594357f435dSAl Viro 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
595afdc1a78SFUJITA Tomonori 
5964d6af73dSChristoph Hellwig 	i = *iter;
5974d6af73dSChristoph Hellwig 	do {
5987589ad67SChristoph Hellwig 		if (copy)
5997589ad67SChristoph Hellwig 			ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
6007589ad67SChristoph Hellwig 		else
6017589ad67SChristoph Hellwig 			ret = bio_map_user_iov(rq, &i, gfp_mask);
6024d6af73dSChristoph Hellwig 		if (ret)
6034d6af73dSChristoph Hellwig 			goto unmap_rq;
6044d6af73dSChristoph Hellwig 		if (!bio)
6054d6af73dSChristoph Hellwig 			bio = rq->bio;
6064d6af73dSChristoph Hellwig 	} while (iov_iter_count(&i));
60786db1e29SJens Axboe 
60886db1e29SJens Axboe 	return 0;
6094d6af73dSChristoph Hellwig 
6104d6af73dSChristoph Hellwig unmap_rq:
6113b7995a9SYang Yingliang 	blk_rq_unmap_user(bio);
612a0ac402cSLinus Torvalds fail:
6134d6af73dSChristoph Hellwig 	rq->bio = NULL;
61469e0927bSDouglas Gilbert 	return ret;
61586db1e29SJens Axboe }
616152e283fSFUJITA Tomonori EXPORT_SYMBOL(blk_rq_map_user_iov);
61786db1e29SJens Axboe 
618ddad8dd0SChristoph Hellwig int blk_rq_map_user(struct request_queue *q, struct request *rq,
619ddad8dd0SChristoph Hellwig 		    struct rq_map_data *map_data, void __user *ubuf,
620ddad8dd0SChristoph Hellwig 		    unsigned long len, gfp_t gfp_mask)
621ddad8dd0SChristoph Hellwig {
62226e49cfcSKent Overstreet 	struct iovec iov;
62326e49cfcSKent Overstreet 	struct iov_iter i;
6248f7e885aSAl Viro 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
625ddad8dd0SChristoph Hellwig 
6268f7e885aSAl Viro 	if (unlikely(ret < 0))
6278f7e885aSAl Viro 		return ret;
628ddad8dd0SChristoph Hellwig 
62926e49cfcSKent Overstreet 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
630ddad8dd0SChristoph Hellwig }
631ddad8dd0SChristoph Hellwig EXPORT_SYMBOL(blk_rq_map_user);
632ddad8dd0SChristoph Hellwig 
63386db1e29SJens Axboe /**
63486db1e29SJens Axboe  * blk_rq_unmap_user - unmap a request with user data
63586db1e29SJens Axboe  * @bio:	       start of bio list
63686db1e29SJens Axboe  *
63786db1e29SJens Axboe  * Description:
63886db1e29SJens Axboe  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
63986db1e29SJens Axboe  *    supply the original rq->bio from the blk_rq_map_user() return, since
640710027a4SRandy Dunlap  *    the I/O completion may have changed rq->bio.
64186db1e29SJens Axboe  */
64286db1e29SJens Axboe int blk_rq_unmap_user(struct bio *bio)
64386db1e29SJens Axboe {
64486db1e29SJens Axboe 	struct bio *mapped_bio;
64586db1e29SJens Axboe 	int ret = 0, ret2;
64686db1e29SJens Axboe 
64786db1e29SJens Axboe 	while (bio) {
64886db1e29SJens Axboe 		mapped_bio = bio;
64986db1e29SJens Axboe 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
65086db1e29SJens Axboe 			mapped_bio = bio->bi_private;
65186db1e29SJens Axboe 
6523310eebaSChristoph Hellwig 		if (bio->bi_private) {
6537b63c052SChristoph Hellwig 			ret2 = bio_uncopy_user(mapped_bio);
65486db1e29SJens Axboe 			if (ret2 && !ret)
65586db1e29SJens Axboe 				ret = ret2;
6563310eebaSChristoph Hellwig 		} else {
6573310eebaSChristoph Hellwig 			bio_unmap_user(mapped_bio);
6587b63c052SChristoph Hellwig 		}
65986db1e29SJens Axboe 
66086db1e29SJens Axboe 		mapped_bio = bio;
66186db1e29SJens Axboe 		bio = bio->bi_next;
66286db1e29SJens Axboe 		bio_put(mapped_bio);
66386db1e29SJens Axboe 	}
66486db1e29SJens Axboe 
66586db1e29SJens Axboe 	return ret;
66686db1e29SJens Axboe }
66786db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_unmap_user);
66886db1e29SJens Axboe 
66986db1e29SJens Axboe /**
670aebf526bSChristoph Hellwig  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
67186db1e29SJens Axboe  * @q:		request queue where request should be inserted
67286db1e29SJens Axboe  * @rq:		request to fill
67386db1e29SJens Axboe  * @kbuf:	the kernel buffer
67486db1e29SJens Axboe  * @len:	length of user data
67586db1e29SJens Axboe  * @gfp_mask:	memory allocation flags
67668154e90SFUJITA Tomonori  *
67768154e90SFUJITA Tomonori  * Description:
67868154e90SFUJITA Tomonori  *    Data will be mapped directly if possible. Otherwise a bounce
679e227867fSMasanari Iida  *    buffer is used. Can be called multiple times to append multiple
6803a5a3927SJames Bottomley  *    buffers.
68186db1e29SJens Axboe  */
68286db1e29SJens Axboe int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
68386db1e29SJens Axboe 		    unsigned int len, gfp_t gfp_mask)
68486db1e29SJens Axboe {
68568154e90SFUJITA Tomonori 	int reading = rq_data_dir(rq) == READ;
68614417799SNamhyung Kim 	unsigned long addr = (unsigned long) kbuf;
6870abc2a10SJens Axboe 	struct bio *bio, *orig_bio;
6883a5a3927SJames Bottomley 	int ret;
68986db1e29SJens Axboe 
690ae03bf63SMartin K. Petersen 	if (len > (queue_max_hw_sectors(q) << 9))
69186db1e29SJens Axboe 		return -EINVAL;
69286db1e29SJens Axboe 	if (!len || !kbuf)
69386db1e29SJens Axboe 		return -EINVAL;
69486db1e29SJens Axboe 
695e64a0e16SChristoph Hellwig 	if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
69668154e90SFUJITA Tomonori 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
69768154e90SFUJITA Tomonori 	else
69886db1e29SJens Axboe 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
69968154e90SFUJITA Tomonori 
70086db1e29SJens Axboe 	if (IS_ERR(bio))
70186db1e29SJens Axboe 		return PTR_ERR(bio);
70286db1e29SJens Axboe 
703aebf526bSChristoph Hellwig 	bio->bi_opf &= ~REQ_OP_MASK;
704aebf526bSChristoph Hellwig 	bio->bi_opf |= req_op(rq);
70586db1e29SJens Axboe 
7060abc2a10SJens Axboe 	orig_bio = bio;
7070abc2a10SJens Axboe 	ret = blk_rq_append_bio(rq, &bio);
7083a5a3927SJames Bottomley 	if (unlikely(ret)) {
7093a5a3927SJames Bottomley 		/* request is too big */
7100abc2a10SJens Axboe 		bio_put(orig_bio);
7113a5a3927SJames Bottomley 		return ret;
7123a5a3927SJames Bottomley 	}
7133a5a3927SJames Bottomley 
71486db1e29SJens Axboe 	return 0;
71586db1e29SJens Axboe }
71686db1e29SJens Axboe EXPORT_SYMBOL(blk_rq_map_kern);
717