Lines Matching +full:iov +full:- +full:supply
1 // SPDX-License-Identifier: GPL-2.0
18 struct iovec iov[]; member
26 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data()
29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data()
32 bmd->iter = *data; in bio_alloc_map_data()
34 memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data()
35 bmd->iter.__iov = bmd->iov; in bio_alloc_map_data()
41 * bio_copy_from_iter - copy all pages from iov_iter to bio
56 ret = copy_page_from_iter(bvec->bv_page, in bio_copy_from_iter()
57 bvec->bv_offset, in bio_copy_from_iter()
58 bvec->bv_len, in bio_copy_from_iter()
64 if (ret < bvec->bv_len) in bio_copy_from_iter()
65 return -EFAULT; in bio_copy_from_iter()
72 * bio_copy_to_iter - copy all pages from bio to iov_iter
87 ret = copy_page_to_iter(bvec->bv_page, in bio_copy_to_iter()
88 bvec->bv_offset, in bio_copy_to_iter()
89 bvec->bv_len, in bio_copy_to_iter()
95 if (ret < bvec->bv_len) in bio_copy_to_iter()
96 return -EFAULT; in bio_copy_to_iter()
103 * bio_uncopy_user - finish previously mapped bio
111 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
114 if (!bmd->is_null_mapped) { in bio_uncopy_user()
118 * and return -EINTR so user space doesn't expect any data. in bio_uncopy_user()
120 if (!current->mm) in bio_uncopy_user()
121 ret = -EINTR; in bio_uncopy_user()
123 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
124 if (bmd->is_our_pages) in bio_uncopy_user()
139 unsigned int len = iter->count; in bio_copy_user_iov()
140 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; in bio_copy_user_iov()
144 return -ENOMEM; in bio_copy_user_iov()
148 * The caller provided iov might point to an on-stack or otherwise in bio_copy_user_iov()
151 bmd->is_our_pages = !map_data; in bio_copy_user_iov()
152 bmd->is_null_mapped = (map_data && map_data->null_mapped); in bio_copy_user_iov()
156 ret = -ENOMEM; in bio_copy_user_iov()
160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); in bio_copy_user_iov()
163 nr_pages = 1U << map_data->page_order; in bio_copy_user_iov()
164 i = map_data->offset / PAGE_SIZE; in bio_copy_user_iov()
169 bytes -= offset; in bio_copy_user_iov()
175 if (i == map_data->nr_entries * nr_pages) { in bio_copy_user_iov()
176 ret = -ENOMEM; in bio_copy_user_iov()
180 page = map_data->pages[i / nr_pages]; in bio_copy_user_iov()
187 ret = -ENOMEM; in bio_copy_user_iov()
192 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
198 len -= bytes; in bio_copy_user_iov()
203 map_data->offset += bio->bi_iter.bi_size; in bio_copy_user_iov()
209 (!map_data || !map_data->null_mapped)) { in bio_copy_user_iov()
213 } else if (map_data && map_data->from_user) { in bio_copy_user_iov()
216 /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */ in bio_copy_user_iov()
222 if (bmd->is_our_pages) in bio_copy_user_iov()
224 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_copy_user_iov()
227 bio->bi_private = bmd; in bio_copy_user_iov()
245 if (bio->bi_opf & REQ_ALLOC_CACHE) { in blk_mq_map_bio_put()
258 if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) { in blk_rq_map_bio_alloc()
259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc()
267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); in blk_rq_map_bio_alloc()
276 unsigned int max_sectors = queue_max_hw_sectors(rq->q); in bio_map_user_iov()
283 return -EINVAL; in bio_map_user_iov()
287 return -ENOMEM; in bio_map_user_iov()
289 if (blk_queue_pci_p2pdma(rq->q)) in bio_map_user_iov()
307 ret = bytes ? bytes : -EFAULT; in bio_map_user_iov()
313 if (unlikely(offs & queue_dma_alignment(rq->q))) in bio_map_user_iov()
318 unsigned int n = PAGE_SIZE - offs; in bio_map_user_iov()
324 if (!bio_add_hw_page(rq->q, bio, page, n, offs, in bio_map_user_iov()
330 bytes -= n; in bio_map_user_iov()
362 if (bio->bi_private && !op_is_write(bio_op(bio))) { in bio_invalidate_vmalloc_pages()
365 for (i = 0; i < bio->bi_vcnt; i++) in bio_invalidate_vmalloc_pages()
366 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
367 invalidate_kernel_vmap_range(bio->bi_private, len); in bio_invalidate_vmalloc_pages()
380 * bio_map_kern - map kernel address into bio
393 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in bio_map_kern()
395 const int nr_pages = end - start; in bio_map_kern()
403 return ERR_PTR(-ENOMEM); in bio_map_kern()
404 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); in bio_map_kern()
408 bio->bi_private = data; in bio_map_kern()
413 unsigned int bytes = PAGE_SIZE - offset; in bio_map_kern()
430 return ERR_PTR(-EINVAL); in bio_map_kern()
434 len -= bytes; in bio_map_kern()
438 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
451 char *p = bio->bi_private; in bio_copy_kern_endio_read()
457 p += bvec->bv_len; in bio_copy_kern_endio_read()
464 * bio_copy_kern - copy kernel address into bio
478 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in bio_copy_kern()
488 return ERR_PTR(-EINVAL); in bio_copy_kern()
490 nr_pages = end - start; in bio_copy_kern()
493 return ERR_PTR(-ENOMEM); in bio_copy_kern()
494 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); in bio_copy_kern()
513 len -= bytes; in bio_copy_kern()
518 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
519 bio->bi_private = data; in bio_copy_kern()
521 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
530 return ERR_PTR(-ENOMEM); in bio_copy_kern()
546 if (!rq->bio) { in blk_rq_append_bio()
550 return -EINVAL; in blk_rq_append_bio()
551 rq->biotail->bi_next = bio; in blk_rq_append_bio()
552 rq->biotail = bio; in blk_rq_append_bio()
553 rq->__data_len += (bio)->bi_iter.bi_size; in blk_rq_append_bio()
564 struct request_queue *q = rq->q; in blk_rq_map_user_bvec()
566 size_t nr_segs = iter->nr_segs; in blk_rq_map_user_bvec()
568 const struct queue_limits *lim = &q->limits; in blk_rq_map_user_bvec()
574 return -EINVAL; in blk_rq_map_user_bvec()
576 return -EINVAL; in blk_rq_map_user_bvec()
581 return -ENOMEM; in blk_rq_map_user_bvec()
587 bvecs = (struct bio_vec *)iter->bvec; in blk_rq_map_user_bvec()
595 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { in blk_rq_map_user_bvec()
597 return -EREMOTEIO; in blk_rq_map_user_bvec()
600 if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) in blk_rq_map_user_bvec()
602 if (bytes + bv->bv_len > nr_iter) in blk_rq_map_user_bvec()
606 bytes += bv->bv_len; in blk_rq_map_user_bvec()
612 return -EINVAL; in blk_rq_map_user_bvec()
616 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
635 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); in blk_rq_map_user_iov()
638 int ret = -EINVAL; in blk_rq_map_user_iov()
657 if (ret != -EREMOTEIO) in blk_rq_map_user_iov()
672 bio = rq->bio; in blk_rq_map_user_iov()
680 rq->bio = NULL; in blk_rq_map_user_iov()
707 struct iovec *iov = fast_iov; in blk_rq_map_user_io() local
711 UIO_FASTIOV, &iov, &iter); in blk_rq_map_user_io()
719 kfree(iov); in blk_rq_map_user_io()
720 return -EINVAL; in blk_rq_map_user_io()
724 ret = blk_rq_map_user_iov(req->q, req, map_data, &iter, in blk_rq_map_user_io()
726 kfree(iov); in blk_rq_map_user_io()
728 ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len, in blk_rq_map_user_io()
736 * blk_rq_unmap_user - unmap a request with user data
741 * supply the original rq->bio from the blk_rq_map_user() return, since
742 * the I/O completion may have changed rq->bio.
750 if (bio->bi_private) { in blk_rq_unmap_user()
759 bio = bio->bi_next; in blk_rq_unmap_user()
768 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
789 return -EINVAL; in blk_rq_map_kern()
791 return -EINVAL; in blk_rq_map_kern()
802 bio->bi_opf &= ~REQ_OP_MASK; in blk_rq_map_kern()
803 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()