Lines Matching refs:bio

48 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)  in bio_copy_from_iter()  argument
53 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter()
79 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
84 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter()
109 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
111 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
122 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
123 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
125 bio_free_pages(bio); in bio_uncopy_user()
136 struct bio *bio; in bio_copy_user_iov() local
157 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_user_iov()
158 if (!bio) in bio_copy_user_iov()
160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); in bio_copy_user_iov()
192 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
203 map_data->offset += bio->bi_iter.bi_size; in bio_copy_user_iov()
210 ret = bio_copy_from_iter(bio, iter); in bio_copy_user_iov()
218 ret = bio_copy_from_iter(bio, &iter2); in bio_copy_user_iov()
223 zero_fill_bio(bio); in bio_copy_user_iov()
224 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_copy_user_iov()
227 bio->bi_private = bmd; in bio_copy_user_iov()
229 ret = blk_rq_append_bio(rq, bio); in bio_copy_user_iov()
235 bio_free_pages(bio); in bio_copy_user_iov()
236 bio_uninit(bio); in bio_copy_user_iov()
237 kfree(bio); in bio_copy_user_iov()
243 static void blk_mq_map_bio_put(struct bio *bio) in blk_mq_map_bio_put() argument
245 if (bio->bi_opf & REQ_ALLOC_CACHE) { in blk_mq_map_bio_put()
246 bio_put(bio); in blk_mq_map_bio_put()
248 bio_uninit(bio); in blk_mq_map_bio_put()
249 kfree(bio); in blk_mq_map_bio_put()
253 static struct bio *blk_rq_map_bio_alloc(struct request *rq, in blk_rq_map_bio_alloc()
256 struct bio *bio; in blk_rq_map_bio_alloc() local
259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc()
261 if (!bio) in blk_rq_map_bio_alloc()
264 bio = bio_kmalloc(nr_vecs, gfp_mask); in blk_rq_map_bio_alloc()
265 if (!bio) in blk_rq_map_bio_alloc()
267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); in blk_rq_map_bio_alloc()
269 return bio; in blk_rq_map_bio_alloc()
278 struct bio *bio; in bio_map_user_iov() local
285 bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); in bio_map_user_iov()
286 if (bio == NULL) in bio_map_user_iov()
292 bio_set_flag(bio, BIO_PAGE_PINNED); in bio_map_user_iov()
324 if (!bio_add_hw_page(rq->q, bio, page, n, offs, in bio_map_user_iov()
329 bio_release_page(bio, page); in bio_map_user_iov()
338 bio_release_page(bio, pages[j++]); in bio_map_user_iov()
348 ret = blk_rq_append_bio(rq, bio); in bio_map_user_iov()
354 bio_release_pages(bio, false); in bio_map_user_iov()
355 blk_mq_map_bio_put(bio); in bio_map_user_iov()
359 static void bio_invalidate_vmalloc_pages(struct bio *bio) in bio_invalidate_vmalloc_pages() argument
362 if (bio->bi_private && !op_is_write(bio_op(bio))) { in bio_invalidate_vmalloc_pages()
365 for (i = 0; i < bio->bi_vcnt; i++) in bio_invalidate_vmalloc_pages()
366 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
367 invalidate_kernel_vmap_range(bio->bi_private, len); in bio_invalidate_vmalloc_pages()
372 static void bio_map_kern_endio(struct bio *bio) in bio_map_kern_endio() argument
374 bio_invalidate_vmalloc_pages(bio); in bio_map_kern_endio()
375 bio_uninit(bio); in bio_map_kern_endio()
376 kfree(bio); in bio_map_kern_endio()
389 static struct bio *bio_map_kern(struct request_queue *q, void *data, in bio_map_kern()
399 struct bio *bio; in bio_map_kern() local
401 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_map_kern()
402 if (!bio) in bio_map_kern()
404 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); in bio_map_kern()
408 bio->bi_private = data; in bio_map_kern()
425 if (bio_add_pc_page(q, bio, page, bytes, in bio_map_kern()
428 bio_uninit(bio); in bio_map_kern()
429 kfree(bio); in bio_map_kern()
438 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
439 return bio; in bio_map_kern()
442 static void bio_copy_kern_endio(struct bio *bio) in bio_copy_kern_endio() argument
444 bio_free_pages(bio); in bio_copy_kern_endio()
445 bio_uninit(bio); in bio_copy_kern_endio()
446 kfree(bio); in bio_copy_kern_endio()
449 static void bio_copy_kern_endio_read(struct bio *bio) in bio_copy_kern_endio_read() argument
451 char *p = bio->bi_private; in bio_copy_kern_endio_read()
455 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_kern_endio_read()
460 bio_copy_kern_endio(bio); in bio_copy_kern_endio_read()
474 static struct bio *bio_copy_kern(struct request_queue *q, void *data, in bio_copy_kern()
480 struct bio *bio; in bio_copy_kern() local
491 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_kern()
492 if (!bio) in bio_copy_kern()
494 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); in bio_copy_kern()
510 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) in bio_copy_kern()
518 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
519 bio->bi_private = data; in bio_copy_kern()
521 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
524 return bio; in bio_copy_kern()
527 bio_free_pages(bio); in bio_copy_kern()
528 bio_uninit(bio); in bio_copy_kern()
529 kfree(bio); in bio_copy_kern()
537 int blk_rq_append_bio(struct request *rq, struct bio *bio) in blk_rq_append_bio() argument
543 bio_for_each_bvec(bv, bio, iter) in blk_rq_append_bio()
546 if (!rq->bio) { in blk_rq_append_bio()
547 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_append_bio()
549 if (!ll_back_merge_fn(rq, bio, nr_segs)) in blk_rq_append_bio()
551 rq->biotail->bi_next = bio; in blk_rq_append_bio()
552 rq->biotail = bio; in blk_rq_append_bio()
553 rq->__data_len += (bio)->bi_iter.bi_size; in blk_rq_append_bio()
554 bio_crypt_free_ctx(bio); in blk_rq_append_bio()
570 struct bio *bio; in blk_rq_map_user_bvec() local
579 bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); in blk_rq_map_user_bvec()
580 if (bio == NULL) in blk_rq_map_user_bvec()
583 bio_iov_bvec_set(bio, (struct iov_iter *)iter); in blk_rq_map_user_bvec()
584 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_map_user_bvec()
596 blk_mq_map_bio_put(bio); in blk_rq_map_user_bvec()
613 blk_mq_map_bio_put(bio); in blk_rq_map_user_bvec()
638 struct bio *bio = NULL; in blk_rq_map_user_iov() local
673 if (!bio) in blk_rq_map_user_iov()
674 bio = rq->bio; in blk_rq_map_user_iov()
680 blk_rq_unmap_user(bio); in blk_rq_map_user_iov()
682 rq->bio = NULL; in blk_rq_map_user_iov()
746 int blk_rq_unmap_user(struct bio *bio) in blk_rq_unmap_user() argument
748 struct bio *next_bio; in blk_rq_unmap_user()
751 while (bio) { in blk_rq_unmap_user()
752 if (bio->bi_private) { in blk_rq_unmap_user()
753 ret2 = bio_uncopy_user(bio); in blk_rq_unmap_user()
757 bio_release_pages(bio, bio_data_dir(bio) == READ); in blk_rq_unmap_user()
760 next_bio = bio; in blk_rq_unmap_user()
761 bio = bio->bi_next; in blk_rq_unmap_user()
787 struct bio *bio; in blk_rq_map_kern() local
797 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern()
799 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
801 if (IS_ERR(bio)) in blk_rq_map_kern()
802 return PTR_ERR(bio); in blk_rq_map_kern()
804 bio->bi_opf &= ~REQ_OP_MASK; in blk_rq_map_kern()
805 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
807 ret = blk_rq_append_bio(rq, bio); in blk_rq_map_kern()
809 bio_uninit(bio); in blk_rq_map_kern()
810 kfree(bio); in blk_rq_map_kern()