Lines Matching full:clone
92 static inline struct dm_target_io *clone_to_tio(struct bio *clone) in clone_to_tio() argument
94 return container_of(clone, struct dm_target_io, clone); in clone_to_tio()
118 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
542 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) in dm_start_io_acct() argument
551 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { in dm_start_io_acct()
577 struct bio *clone; in alloc_io() local
579 clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); in alloc_io()
580 tio = clone_to_tio(clone); in alloc_io()
609 bio_put(&io->tio.clone); in free_io()
617 struct bio *clone; in alloc_tio() local
622 /* alloc_io() already initialized embedded clone */ in alloc_tio()
623 clone = &tio->clone; in alloc_tio()
625 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, in alloc_tio()
627 if (!clone) in alloc_tio()
631 clone->bi_opf &= ~REQ_DM_POLL_LIST; in alloc_tio()
633 tio = clone_to_tio(clone); in alloc_tio()
645 clone->bi_bdev = md->disk->part0; in alloc_tio()
647 bio_set_dev(clone, md->disk->part0); in alloc_tio()
650 clone->bi_iter.bi_size = to_bytes(*len); in alloc_tio()
651 if (bio_integrity(clone)) in alloc_tio()
652 bio_integrity_trim(clone); in alloc_tio()
655 return clone; in alloc_tio()
658 static void free_tio(struct bio *clone) in free_tio() argument
660 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) in free_tio()
662 bio_put(clone); in free_tio()
1017 * we may run into long bio clone chain during suspend and OOM could in dm_io_complete()
1349 * @clone: clone bio that DM core passed to target's .map function
1350 * @tgt_clone: clone of @clone bio that target needs submitted
1357 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) in dm_submit_bio_remap() argument
1359 struct dm_target_io *tio = clone_to_tio(clone); in dm_submit_bio_remap()
1364 tgt_clone = clone; in dm_submit_bio_remap()
1370 dm_start_io_acct(io, clone); in dm_submit_bio_remap()
1394 static void __map_bio(struct bio *clone) in __map_bio() argument
1396 struct dm_target_io *tio = clone_to_tio(clone); in __map_bio()
1402 clone->bi_end_io = clone_endio; in __map_bio()
1405 * Map the clone. in __map_bio()
1407 tio->old_sector = clone->bi_iter.bi_sector; in __map_bio()
1410 unlikely(swap_bios_limit(ti, clone))) { in __map_bio()
1427 r = ti->type->map(ti, clone); in __map_bio()
1429 r = ti->type->map(ti, clone); in __map_bio()
1435 dm_start_io_acct(io, clone); in __map_bio()
1438 dm_submit_bio_remap(clone, NULL); in __map_bio()
1443 unlikely(swap_bios_limit(ti, clone))) in __map_bio()
1445 free_tio(clone); in __map_bio()
1506 struct bio *clone; in __send_duplicate_bios() local
1515 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); in __send_duplicate_bios()
1516 __map_bio(clone); in __send_duplicate_bios()
1524 while ((clone = bio_list_pop(&blist))) { in __send_duplicate_bios()
1525 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); in __send_duplicate_bios()
1526 __map_bio(clone); in __send_duplicate_bios()
1543 * the basis for the clone(s). in __send_empty_flush()
1550 ci->io->tio.clone.bi_iter.bi_size = 0; in __send_empty_flush()
1702 struct bio *clone; in __split_and_process_bio() local
1725 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); in __split_and_process_bio()
1726 __map_bio(clone); in __split_and_process_bio()
1850 bio_poll(&io->tio.clone, iob, flags); in dm_poll_dm_io()