Lines Matching +full:suspend +full:- +full:to +full:- +full:disk

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
9 #include "dm-core.h"
10 #include "dm-rq.h"
11 #include "dm-uevent.h"
12 #include "dm-ima.h"
32 #include <linux/blk-crypto.h>
33 #include <linux/blk-crypto-profile.h>
46 * dm_io into one list, and reuse bio->bi_private as the list head. Before
47 * ending this fs bio, we will recover its ->bi_private.
80 * One of these is allocated (on-stack) per original bio.
100 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; in dm_per_bio_data()
101 return (char *)bio - DM_IO_BIO_OFFSET - data_size; in dm_per_bio_data()
109 if (io->magic == DM_IO_MAGIC) in dm_bio_from_per_bio_data()
111 BUG_ON(io->magic != DM_TIO_MAGIC); in dm_bio_from_per_bio_data()
118 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
122 #define MINOR_ALLOCED ((void *)-1)
145 * Bio-based DM's mempools' reserved IOs set by the user.
199 DM_NUMA_NODE, num_online_nodes() - 1); in dm_get_numa_node()
212 r = -ENOMEM; in local_init()
286 while (i--) in dm_init()
296 while (i--) in dm_exit()
310 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
313 static int dm_blk_open(struct gendisk *disk, blk_mode_t mode) in dm_blk_open() argument
319 md = disk->private_data; in dm_blk_open()
323 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
330 atomic_inc(&md->open_count); in dm_blk_open()
334 return md ? 0 : -ENXIO; in dm_blk_open()
337 static void dm_blk_close(struct gendisk *disk) in dm_blk_close() argument
343 md = disk->private_data; in dm_blk_close()
347 if (atomic_dec_and_test(&md->open_count) && in dm_blk_close()
348 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in dm_blk_close()
358 return atomic_read(&md->open_count); in dm_open_count()
371 r = -EBUSY; in dm_lock_for_deletion()
373 set_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_lock_for_deletion()
374 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) in dm_lock_for_deletion()
375 r = -EEXIST; in dm_lock_for_deletion()
377 set_bit(DMF_DELETING, &md->flags); in dm_lock_for_deletion()
390 if (test_bit(DMF_DELETING, &md->flags)) in dm_cancel_deferred_remove()
391 r = -EBUSY; in dm_cancel_deferred_remove()
393 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_cancel_deferred_remove()
407 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo()
420 r = -ENOTTY; in dm_prepare_ioctl()
426 if (map->num_targets != 1) in dm_prepare_ioctl()
430 if (!ti->type->prepare_ioctl) in dm_prepare_ioctl()
434 return -EAGAIN; in dm_prepare_ioctl()
436 r = ti->type->prepare_ioctl(ti, bdev); in dm_prepare_ioctl()
437 if (r == -ENOTCONN && !fatal_signal_pending(current)) { in dm_prepare_ioctl()
454 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl()
468 "%s: sending ioctl %x to DM device without required privilege.", in dm_blk_ioctl()
469 current->comm, cmd); in dm_blk_ioctl()
470 r = -ENOIOCTLCMD; in dm_blk_ioctl()
475 if (!bdev->bd_disk->fops->ioctl) in dm_blk_ioctl()
476 r = -ENOTTY; in dm_blk_ioctl()
478 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); in dm_blk_ioctl()
486 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); in dm_start_time_ns_from_clone()
492 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); in bio_is_flush_with_data()
504 return io->sectors; in dm_io_sectors()
510 struct bio *bio = io->orig_bio; in dm_io_acct()
514 bdev_start_io_acct(bio->bi_bdev, bio_op(bio), in dm_io_acct()
515 io->start_time); in dm_io_acct()
517 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), in dm_io_acct()
519 io->start_time); in dm_io_acct()
523 unlikely(dm_stats_used(&io->md->stats))) { in dm_io_acct()
527 sector = bio_end_sector(bio) - io->sector_offset; in dm_io_acct()
529 sector = bio->bi_iter.bi_sector; in dm_io_acct()
531 dm_stats_account_io(&io->md->stats, bio_data_dir(bio), in dm_io_acct()
533 end, io->start_time, &io->stats_aux); in dm_io_acct()
556 spin_lock_irqsave(&io->lock, flags); in dm_start_io_acct()
558 spin_unlock_irqrestore(&io->lock, flags); in dm_start_io_acct()
562 spin_unlock_irqrestore(&io->lock, flags); in dm_start_io_acct()
579 clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); in alloc_io()
581 tio->flags = 0; in alloc_io()
583 tio->io = NULL; in alloc_io()
586 io->magic = DM_IO_MAGIC; in alloc_io()
587 io->status = BLK_STS_OK; in alloc_io()
590 atomic_set(&io->io_count, 2); in alloc_io()
591 this_cpu_inc(*md->pending_io); in alloc_io()
592 io->orig_bio = bio; in alloc_io()
593 io->md = md; in alloc_io()
594 spin_lock_init(&io->lock); in alloc_io()
595 io->start_time = jiffies; in alloc_io()
596 io->flags = 0; in alloc_io()
597 if (blk_queue_io_stat(md->queue)) in alloc_io()
601 unlikely(dm_stats_used(&md->stats))) in alloc_io()
602 dm_stats_record_start(&md->stats, &io->stats_aux); in alloc_io()
609 bio_put(&io->tio.clone); in free_io()
615 struct mapped_device *md = ci->io->md; in alloc_tio()
619 if (!ci->io->tio.io) { in alloc_tio()
620 /* the dm_target_io embedded in ci->io is available */ in alloc_tio()
621 tio = &ci->io->tio; in alloc_tio()
623 clone = &tio->clone; in alloc_tio()
625 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, in alloc_tio()
626 &md->mempools->bs); in alloc_tio()
631 clone->bi_opf &= ~REQ_DM_POLL_LIST; in alloc_tio()
634 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ in alloc_tio()
637 tio->magic = DM_TIO_MAGIC; in alloc_tio()
638 tio->io = ci->io; in alloc_tio()
639 tio->ti = ti; in alloc_tio()
640 tio->target_bio_nr = target_bio_nr; in alloc_tio()
641 tio->len_ptr = len; in alloc_tio()
642 tio->old_sector = 0; in alloc_tio()
645 clone->bi_bdev = md->disk->part0; in alloc_tio()
646 if (unlikely(ti->needs_bio_set_dev)) in alloc_tio()
647 bio_set_dev(clone, md->disk->part0); in alloc_tio()
650 clone->bi_iter.bi_size = to_bytes(*len); in alloc_tio()
666 * Add the bio to the list of deferred io.
672 spin_lock_irqsave(&md->deferred_lock, flags); in queue_io()
673 bio_list_add(&md->deferred, bio); in queue_io()
674 spin_unlock_irqrestore(&md->deferred_lock, flags); in queue_io()
675 queue_work(md->wq, &md->work); in queue_io()
680 * function to access the md->map field, and make sure they call
684 int *srcu_idx) __acquires(md->io_barrier) in dm_get_live_table()
686 *srcu_idx = srcu_read_lock(&md->io_barrier); in dm_get_live_table()
688 return srcu_dereference(md->map, &md->io_barrier); in dm_get_live_table()
692 int srcu_idx) __releases(md->io_barrier) in dm_put_live_table()
694 srcu_read_unlock(&md->io_barrier, srcu_idx); in dm_put_live_table()
699 synchronize_srcu(&md->io_barrier); in dm_sync_table()
704 * A fast alternative to dm_get_live_table/dm_put_live_table.
710 return rcu_dereference(md->map); in dm_get_live_table_fast()
718 static char *_dm_claim_ptr = "I belong to device-mapper";
731 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); in open_table_device()
733 return ERR_PTR(-ENOMEM); in open_table_device()
734 refcount_set(&td->count, 1); in open_table_device()
743 * We can be called before the dm disk is added. In that case we can't in open_table_device()
747 if (md->disk->slave_dir) { in open_table_device()
748 r = bd_link_disk_holder(bdev, md->disk); in open_table_device()
753 td->dm_dev.mode = mode; in open_table_device()
754 td->dm_dev.bdev = bdev; in open_table_device()
755 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL); in open_table_device()
756 format_dev_t(td->dm_dev.name, dev); in open_table_device()
757 list_add(&td->list, &md->table_devices); in open_table_device()
772 if (md->disk->slave_dir) in close_table_device()
773 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); in close_table_device()
774 blkdev_put(td->dm_dev.bdev, _dm_claim_ptr); in close_table_device()
775 put_dax(td->dm_dev.dax_dev); in close_table_device()
776 list_del(&td->list); in close_table_device()
786 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) in find_table_device()
797 mutex_lock(&md->table_devices_lock); in dm_get_table_device()
798 td = find_table_device(&md->table_devices, dev, mode); in dm_get_table_device()
802 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
806 refcount_inc(&td->count); in dm_get_table_device()
808 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
810 *result = &td->dm_dev; in dm_get_table_device()
818 mutex_lock(&md->table_devices_lock); in dm_put_table_device()
819 if (refcount_dec_and_test(&td->count)) in dm_put_table_device()
821 mutex_unlock(&md->table_devices_lock); in dm_put_table_device()
829 *geo = md->geometry; in dm_get_geometry()
839 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; in dm_set_geometry()
841 if (geo->start > sz) { in dm_set_geometry()
843 return -EINVAL; in dm_set_geometry()
846 md->geometry = *geo; in dm_set_geometry()
853 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __noflush_suspending()
858 struct mapped_device *md = io->md; in dm_requeue_add_io()
861 struct dm_io *next = md->requeue_list; in dm_requeue_add_io()
863 md->requeue_list = io; in dm_requeue_add_io()
864 io->next = next; in dm_requeue_add_io()
866 bio_list_add_head(&md->deferred, io->orig_bio); in dm_requeue_add_io()
873 queue_work(md->wq, &md->requeue_work); in dm_kick_requeue()
875 queue_work(md->wq, &md->work); in dm_kick_requeue()
880 * io->status is updated with error if requeue disallowed.
884 struct bio *bio = io->orig_bio; in dm_handle_requeue()
885 bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE); in dm_handle_requeue()
886 bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) && in dm_handle_requeue()
887 (bio->bi_opf & REQ_POLLED)); in dm_handle_requeue()
888 struct mapped_device *md = io->md; in dm_handle_requeue()
894 if (bio->bi_opf & REQ_POLLED) { in dm_handle_requeue()
897 * (io->orig_bio may only reflect a subset of the in dm_handle_requeue()
898 * pre-split original) so clear REQ_POLLED. in dm_handle_requeue()
907 spin_lock_irqsave(&md->deferred_lock, flags); in dm_handle_requeue()
915 * noflush suspend was interrupted or this is in dm_handle_requeue()
916 * a write to a zoned target. in dm_handle_requeue()
918 io->status = BLK_STS_IOERR; in dm_handle_requeue()
920 spin_unlock_irqrestore(&md->deferred_lock, flags); in dm_handle_requeue()
931 struct bio *bio = io->orig_bio; in __dm_io_complete()
932 struct mapped_device *md = io->md; in __dm_io_complete()
940 io_error = io->status; in __dm_io_complete()
945 * Must handle target that DM_MAPIO_SUBMITTED only to in __dm_io_complete()
953 this_cpu_dec(*md->pending_io); in __dm_io_complete()
955 /* nudge anyone waiting on suspend queue */ in __dm_io_complete()
956 if (unlikely(wq_has_sleeper(&md->wait))) in __dm_io_complete()
957 wake_up(&md->wait); in __dm_io_complete()
968 bio->bi_opf &= ~REQ_PREFLUSH; in __dm_io_complete()
973 bio->bi_status = io_error; in __dm_io_complete()
985 /* reuse deferred lock to simplify dm_handle_requeue */ in dm_wq_requeue_work()
986 spin_lock_irqsave(&md->deferred_lock, flags); in dm_wq_requeue_work()
987 io = md->requeue_list; in dm_wq_requeue_work()
988 md->requeue_list = NULL; in dm_wq_requeue_work()
989 spin_unlock_irqrestore(&md->deferred_lock, flags); in dm_wq_requeue_work()
992 struct dm_io *next = io->next; in dm_wq_requeue_work()
994 dm_io_rewind(io, &md->disk->bio_split); in dm_wq_requeue_work()
996 io->next = NULL; in dm_wq_requeue_work()
1006 * 1) io->orig_bio points to the real original bio, and the part mapped to
1009 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
1017 * we may run into long bio clone chain during suspend and OOM could in dm_io_complete()
1037 if (atomic_dec_and_test(&io->io_count)) in __dm_io_dec_pending()
1045 /* Push-back supersedes any I/O errors */ in dm_io_set_error()
1046 spin_lock_irqsave(&io->lock, flags); in dm_io_set_error()
1047 if (!(io->status == BLK_STS_DM_REQUEUE && in dm_io_set_error()
1048 __noflush_suspending(io->md))) { in dm_io_set_error()
1049 io->status = error; in dm_io_set_error()
1051 spin_unlock_irqrestore(&io->lock, flags); in dm_io_set_error()
1064 * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
1068 return &md->queue->limits; in dm_get_queue_limits()
1076 limits->max_discard_sectors = 0; in disable_discard()
1084 limits->max_write_zeroes_sectors = 0; in disable_write_zeroes()
1089 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); in swap_bios_limit()
1094 blk_status_t error = bio->bi_status; in clone_endio()
1096 struct dm_target *ti = tio->ti; in clone_endio()
1097 dm_endio_fn endio = ti->type->end_io; in clone_endio()
1098 struct dm_io *io = tio->io; in clone_endio()
1099 struct mapped_device *md = io->md; in clone_endio()
1103 !bdev_max_discard_sectors(bio->bi_bdev)) in clone_endio()
1106 !bdev_write_zeroes_sectors(bio->bi_bdev)) in clone_endio()
1111 unlikely(bdev_is_zoned(bio->bi_bdev))) in clone_endio()
1121 * Requeuing writes to a sequential zone of a zoned in clone_endio()
1145 up(&md->swap_bios_semaphore); in clone_endio()
1152 * Return maximum size of I/O possible at the supplied sector up to the current
1158 return ti->len - target_offset; in max_io_len_target_boundary()
1169 * Does the target need to split IO even further? in __max_io_len()
1170 * - varied (per target) IO splitting is a tenet of DM; this in __max_io_len()
1177 min(max_sectors ? : queue_max_sectors(ti->table->md->queue), in __max_io_len()
1183 return __max_io_len(ti, sector, ti->max_io_len, 0); in max_io_len()
1191 ti->error = "Maximum size of target IO is too large"; in dm_set_target_max_io_len()
1192 return -EINVAL; in dm_set_target_max_io_len()
1195 ti->max_io_len = (uint32_t) len; in dm_set_target_max_io_len()
1203 __acquires(md->io_barrier) in dm_dax_get_live_target()
1226 long len, ret = -EIO; in dm_dax_direct_access()
1233 if (!ti->type->direct_access) in dm_dax_direct_access()
1239 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); in dm_dax_direct_access()
1253 int ret = -EIO; in dm_dax_zero_page_range()
1260 if (WARN_ON(!ti->type->dax_zero_page_range)) { in dm_dax_zero_page_range()
1262 * ->zero_page_range() is mandatory dax operation. If we are in dm_dax_zero_page_range()
1267 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); in dm_dax_zero_page_range()
1284 if (!ti || !ti->type->dax_recovery_write) in dm_dax_recovery_write()
1287 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); in dm_dax_recovery_write()
1299 * dm_accept_partial_bio informs the dm that the target only wants to process
1304 * +--------------------+---------------+-------+
1306 * +--------------------+---------------+-------+
1308 * <-------------- *tio->len_ptr --------------->
1309 * <----- bio_sectors ----->
1310 * <-- n_sectors -->
1314 * Region 2 is the remaining bio size that the target wants to process.
1315 * (it may be empty if region 1 is non-empty, although there is no reason
1316 * to make it empty)
1317 * The target requires that region 3 is to be sent in the next bio.
1319 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1326 struct dm_io *io = tio->io; in dm_accept_partial_bio()
1332 BUG_ON(bio_sectors > *tio->len_ptr); in dm_accept_partial_bio()
1335 *tio->len_ptr -= bio_sectors - n_sectors; in dm_accept_partial_bio()
1336 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; in dm_accept_partial_bio()
1343 io->sectors = n_sectors; in dm_accept_partial_bio()
1344 io->sector_offset = bio_sectors(io->orig_bio); in dm_accept_partial_bio()
1349 * @clone: clone bio that DM core passed to target's .map function
1352 * Targets should use this interface to submit bios they take
1355 * Target should also enable ti->accounts_remapped_io
1360 struct dm_io *io = tio->io; in dm_submit_bio_remap()
1367 * Account io->origin_bio to DM dev on behalf of target in dm_submit_bio_remap()
1372 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk), in dm_submit_bio_remap()
1373 tio->old_sector); in dm_submit_bio_remap()
1380 mutex_lock(&md->swap_bios_lock); in __set_swap_bios_limit()
1381 while (latch < md->swap_bios) { in __set_swap_bios_limit()
1383 down(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1384 md->swap_bios--; in __set_swap_bios_limit()
1386 while (latch > md->swap_bios) { in __set_swap_bios_limit()
1388 up(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1389 md->swap_bios++; in __set_swap_bios_limit()
1391 mutex_unlock(&md->swap_bios_lock); in __set_swap_bios_limit()
1397 struct dm_target *ti = tio->ti; in __map_bio()
1398 struct dm_io *io = tio->io; in __map_bio()
1399 struct mapped_device *md = io->md; in __map_bio()
1402 clone->bi_end_io = clone_endio; in __map_bio()
1407 tio->old_sector = clone->bi_iter.bi_sector; in __map_bio()
1413 if (unlikely(latch != md->swap_bios)) in __map_bio()
1415 down(&md->swap_bios_semaphore); in __map_bio()
1420 * Check if the IO needs a special mapping due to zone append in __map_bio()
1427 r = ti->type->map(ti, clone); in __map_bio()
1429 r = ti->type->map(ti, clone); in __map_bio()
1434 if (!ti->accounts_remapped_io) in __map_bio()
1444 up(&md->swap_bios_semaphore); in __map_bio()
1459 struct dm_io *io = ci->io; in setup_split_accounting()
1461 if (ci->sector_count > len) { in setup_split_accounting()
1467 io->sectors = len; in setup_split_accounting()
1468 io->sector_offset = bio_sectors(ci->bio); in setup_split_accounting()
1483 mutex_lock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1493 mutex_unlock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1522 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ in __send_duplicate_bios()
1537 struct dm_table *t = ci->map; in __send_empty_flush()
1541 * Use an on-stack bio for this, it's safe since we don't in __send_empty_flush()
1542 * need to reference it after submit. It's just used as in __send_empty_flush()
1545 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, in __send_empty_flush()
1548 ci->bio = &flush_bio; in __send_empty_flush()
1549 ci->sector_count = 0; in __send_empty_flush()
1550 ci->io->tio.clone.bi_iter.bi_size = 0; in __send_empty_flush()
1552 for (unsigned int i = 0; i < t->num_targets; i++) { in __send_empty_flush()
1556 atomic_add(ti->num_flush_bios, &ci->io->io_count); in __send_empty_flush()
1557 bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); in __send_empty_flush()
1558 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); in __send_empty_flush()
1565 atomic_sub(1, &ci->io->io_count); in __send_empty_flush()
1567 bio_uninit(ci->bio); in __send_empty_flush()
1577 len = min_t(sector_t, ci->sector_count, in __send_changing_extent_only()
1578 __max_io_len(ti, ci->sector, max_granularity, max_sectors)); in __send_changing_extent_only()
1580 atomic_add(num_bios, &ci->io->io_count); in __send_changing_extent_only()
1586 atomic_sub(num_bios - bios + 1, &ci->io->io_count); in __send_changing_extent_only()
1588 ci->sector += len; in __send_changing_extent_only()
1589 ci->sector_count -= len; in __send_changing_extent_only()
1616 struct queue_limits *limits = dm_get_queue_limits(ti->table->md); in __process_abnormal_io()
1618 switch (bio_op(ci->bio)) { in __process_abnormal_io()
1620 num_bios = ti->num_discard_bios; in __process_abnormal_io()
1621 max_sectors = limits->max_discard_sectors; in __process_abnormal_io()
1622 if (ti->max_discard_granularity) in __process_abnormal_io()
1626 num_bios = ti->num_secure_erase_bios; in __process_abnormal_io()
1627 max_sectors = limits->max_secure_erase_sectors; in __process_abnormal_io()
1628 if (ti->max_secure_erase_granularity) in __process_abnormal_io()
1632 num_bios = ti->num_write_zeroes_bios; in __process_abnormal_io()
1633 max_sectors = limits->max_write_zeroes_sectors; in __process_abnormal_io()
1634 if (ti->max_write_zeroes_granularity) in __process_abnormal_io()
1656 * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1657 * associated with this bio, and this bio's bi_private needs to be
1658 * stored in dm_io->data before the reuse.
1660 * bio->bi_private is owned by fs or upper layer, so block layer won't
1666 return (struct dm_io **)&bio->bi_private; in dm_poll_list_head()
1673 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { in dm_queue_poll_io()
1674 bio->bi_opf |= REQ_DM_POLL_LIST; in dm_queue_poll_io()
1679 io->data = bio->bi_private; in dm_queue_poll_io()
1681 /* tell block layer to poll for completion */ in dm_queue_poll_io()
1682 bio->bi_cookie = ~BLK_QC_T_NONE; in dm_queue_poll_io()
1684 io->next = NULL; in dm_queue_poll_io()
1687 * bio recursed due to split, reuse original poll list, in dm_queue_poll_io()
1688 * and save bio->bi_private too. in dm_queue_poll_io()
1690 io->data = (*head)->data; in dm_queue_poll_io()
1691 io->next = *head; in dm_queue_poll_io()
1698 * Select the correct strategy for processing a non-flush bio.
1706 ti = dm_table_find_target(ci->map, ci->sector); in __split_and_process_bio()
1710 if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && in __split_and_process_bio()
1711 unlikely(!dm_target_supports_nowait(ti->type))) in __split_and_process_bio()
1714 if (unlikely(ci->is_abnormal_io)) in __split_and_process_bio()
1721 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); in __split_and_process_bio()
1723 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); in __split_and_process_bio()
1728 ci->sector += len; in __split_and_process_bio()
1729 ci->sector_count -= len; in __split_and_process_bio()
1737 ci->map = map; in init_clone_info()
1738 ci->io = alloc_io(md, bio); in init_clone_info()
1739 ci->bio = bio; in init_clone_info()
1740 ci->is_abnormal_io = is_abnormal; in init_clone_info()
1741 ci->submit_as_polled = false; in init_clone_info()
1742 ci->sector = bio->bi_iter.bi_sector; in init_clone_info()
1743 ci->sector_count = bio_sectors(bio); in init_clone_info()
1745 /* Shouldn't happen but sector_count was being set to 0 so... */ in init_clone_info()
1747 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) in init_clone_info()
1748 ci->sector_count = 0; in init_clone_info()
1752 * Entry point to split a bio into clones and submit them to the targets.
1776 if (bio->bi_opf & REQ_PREFLUSH) { in dm_split_and_process_bio()
1786 * Remainder must be passed to submit_bio_noacct() so it gets handled in dm_split_and_process_bio()
1789 bio_trim(bio, io->sectors, ci.sector_count); in dm_split_and_process_bio()
1790 trace_block_split(bio, bio->bi_iter.bi_sector); in dm_split_and_process_bio()
1795 * Drop the extra reference count for non-POLLED bio, and hold one in dm_split_and_process_bio()
1799 * in bio->bi_private, so that dm_poll_bio can poll them all. in dm_split_and_process_bio()
1807 atomic_dec(&io->io_count); in dm_split_and_process_bio()
1815 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; in dm_submit_bio()
1828 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { in dm_submit_bio()
1829 if (bio->bi_opf & REQ_NOWAIT) in dm_submit_bio()
1831 else if (bio->bi_opf & REQ_RAHEAD) in dm_submit_bio()
1846 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); in dm_poll_dm_io()
1849 if (atomic_read(&io->io_count) > 1) in dm_poll_dm_io()
1850 bio_poll(&io->tio.clone, iob, flags); in dm_poll_dm_io()
1853 return atomic_read(&io->io_count) == 1; in dm_poll_dm_io()
1865 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) in dm_poll_bio()
1874 * submitted via submit_bio_noacct()'s depth-first submission. in dm_poll_bio()
1878 bio->bi_opf &= ~REQ_DM_POLL_LIST; in dm_poll_bio()
1879 bio->bi_private = list->data; in dm_poll_bio()
1881 for (curr = list, next = curr->next; curr; curr = next, next = in dm_poll_bio()
1882 curr ? curr->next : NULL) { in dm_poll_bio()
1890 curr->next = tmp; in dm_poll_bio()
1897 bio->bi_opf |= REQ_DM_POLL_LIST; in dm_poll_bio()
1898 /* Reset bio->bi_private to dm_io list head */ in dm_poll_bio()
1906 *---------------------------------------------------------------
1907 * An IDR is used to keep track of allocated minor numbers.
1908 *---------------------------------------------------------------
1925 return -EINVAL; in specific_minor()
1935 return r == -ENOSPC ? -EBUSY : r; in specific_minor()
1965 dm_destroy_crypto_profile(q->crypto_profile); in dm_queue_destroy_crypto_profile()
1977 if (md->wq) in cleanup_mapped_device()
1978 destroy_workqueue(md->wq); in cleanup_mapped_device()
1979 dm_free_md_mempools(md->mempools); in cleanup_mapped_device()
1981 if (md->dax_dev) { in cleanup_mapped_device()
1982 dax_remove_host(md->disk); in cleanup_mapped_device()
1983 kill_dax(md->dax_dev); in cleanup_mapped_device()
1984 put_dax(md->dax_dev); in cleanup_mapped_device()
1985 md->dax_dev = NULL; in cleanup_mapped_device()
1989 if (md->disk) { in cleanup_mapped_device()
1991 md->disk->private_data = NULL; in cleanup_mapped_device()
1997 list_for_each_entry(td, &md->table_devices, list) { in cleanup_mapped_device()
1998 bd_unlink_disk_holder(td->dm_dev.bdev, in cleanup_mapped_device()
1999 md->disk); in cleanup_mapped_device()
2003 * Hold lock to make sure del_gendisk() won't concurrent in cleanup_mapped_device()
2006 mutex_lock(&md->table_devices_lock); in cleanup_mapped_device()
2007 del_gendisk(md->disk); in cleanup_mapped_device()
2008 mutex_unlock(&md->table_devices_lock); in cleanup_mapped_device()
2010 dm_queue_destroy_crypto_profile(md->queue); in cleanup_mapped_device()
2011 put_disk(md->disk); in cleanup_mapped_device()
2014 if (md->pending_io) { in cleanup_mapped_device()
2015 free_percpu(md->pending_io); in cleanup_mapped_device()
2016 md->pending_io = NULL; in cleanup_mapped_device()
2019 cleanup_srcu_struct(&md->io_barrier); in cleanup_mapped_device()
2021 mutex_destroy(&md->suspend_lock); in cleanup_mapped_device()
2022 mutex_destroy(&md->type_lock); in cleanup_mapped_device()
2023 mutex_destroy(&md->table_devices_lock); in cleanup_mapped_device()
2024 mutex_destroy(&md->swap_bios_lock); in cleanup_mapped_device()
2040 DMERR("unable to allocate device, out of memory."); in alloc_dev()
2055 r = init_srcu_struct(&md->io_barrier); in alloc_dev()
2059 md->numa_node_id = numa_node_id; in alloc_dev()
2060 md->init_tio_pdu = false; in alloc_dev()
2061 md->type = DM_TYPE_NONE; in alloc_dev()
2062 mutex_init(&md->suspend_lock); in alloc_dev()
2063 mutex_init(&md->type_lock); in alloc_dev()
2064 mutex_init(&md->table_devices_lock); in alloc_dev()
2065 spin_lock_init(&md->deferred_lock); in alloc_dev()
2066 atomic_set(&md->holders, 1); in alloc_dev()
2067 atomic_set(&md->open_count, 0); in alloc_dev()
2068 atomic_set(&md->event_nr, 0); in alloc_dev()
2069 atomic_set(&md->uevent_seq, 0); in alloc_dev()
2070 INIT_LIST_HEAD(&md->uevent_list); in alloc_dev()
2071 INIT_LIST_HEAD(&md->table_devices); in alloc_dev()
2072 spin_lock_init(&md->uevent_lock); in alloc_dev()
2075 * default to bio-based until DM table is loaded and md->type in alloc_dev()
2076 * established. If request-based table is loaded: blk-mq will in alloc_dev()
2079 md->disk = blk_alloc_disk(md->numa_node_id); in alloc_dev()
2080 if (!md->disk) in alloc_dev()
2082 md->queue = md->disk->queue; in alloc_dev()
2084 init_waitqueue_head(&md->wait); in alloc_dev()
2085 INIT_WORK(&md->work, dm_wq_work); in alloc_dev()
2086 INIT_WORK(&md->requeue_work, dm_wq_requeue_work); in alloc_dev()
2087 init_waitqueue_head(&md->eventq); in alloc_dev()
2088 init_completion(&md->kobj_holder.completion); in alloc_dev()
2090 md->requeue_list = NULL; in alloc_dev()
2091 md->swap_bios = get_swap_bios(); in alloc_dev()
2092 sema_init(&md->swap_bios_semaphore, md->swap_bios); in alloc_dev()
2093 mutex_init(&md->swap_bios_lock); in alloc_dev()
2095 md->disk->major = _major; in alloc_dev()
2096 md->disk->first_minor = minor; in alloc_dev()
2097 md->disk->minors = 1; in alloc_dev()
2098 md->disk->flags |= GENHD_FL_NO_PART; in alloc_dev()
2099 md->disk->fops = &dm_blk_dops; in alloc_dev()
2100 md->disk->private_data = md; in alloc_dev()
2101 sprintf(md->disk->disk_name, "dm-%d", minor); in alloc_dev()
2104 md->dax_dev = alloc_dax(md, &dm_dax_ops); in alloc_dev()
2105 if (IS_ERR(md->dax_dev)) { in alloc_dev()
2106 md->dax_dev = NULL; in alloc_dev()
2109 set_dax_nocache(md->dax_dev); in alloc_dev()
2110 set_dax_nomc(md->dax_dev); in alloc_dev()
2111 if (dax_add_host(md->dax_dev, md->disk)) in alloc_dev()
2115 format_dev_t(md->name, MKDEV(_major, minor)); in alloc_dev()
2117 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); in alloc_dev()
2118 if (!md->wq) in alloc_dev()
2121 md->pending_io = alloc_percpu(unsigned long); in alloc_dev()
2122 if (!md->pending_io) in alloc_dev()
2125 r = dm_stats_init(&md->stats); in alloc_dev()
2153 int minor = MINOR(disk_devt(md->disk)); in free_dev()
2159 WARN_ON_ONCE(!list_empty(&md->table_devices)); in free_dev()
2160 dm_stats_cleanup(&md->stats); in free_dev()
2168 * Bind a table to the device.
2176 spin_lock_irqsave(&md->uevent_lock, flags); in event_callback()
2177 list_splice_init(&md->uevent_list, &uevents); in event_callback()
2178 spin_unlock_irqrestore(&md->uevent_lock, flags); in event_callback()
2180 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); in event_callback()
2182 atomic_inc(&md->event_nr); in event_callback()
2183 wake_up(&md->eventq); in event_callback()
2197 lockdep_assert_held(&md->suspend_lock); in __bind()
2205 memset(&md->geometry, 0, sizeof(md->geometry)); in __bind()
2207 set_capacity(md->disk, size); in __bind()
2213 * Leverage the fact that request-based DM targets are in __bind()
2214 * immutable singletons - used to optimize dm_mq_queue_rq. in __bind()
2216 md->immutable_target = dm_table_get_immutable_target(t); in __bind()
2219 * There is no need to reload with request-based dm because the in __bind()
2222 * Note for future: If you are to reload bioset, prep-ed in __bind()
2223 * requests in the queue may refer to bio from the old bioset, in __bind()
2224 * so you must walk through the queue to unprep. in __bind()
2226 if (!md->mempools) { in __bind()
2227 md->mempools = t->mempools; in __bind()
2228 t->mempools = NULL; in __bind()
2236 dm_free_md_mempools(md->mempools); in __bind()
2237 md->mempools = t->mempools; in __bind()
2238 t->mempools = NULL; in __bind()
2241 ret = dm_table_set_restrictions(t, md->queue, limits); in __bind()
2247 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __bind()
2248 rcu_assign_pointer(md->map, (void *)t); in __bind()
2249 md->immutable_target_type = dm_table_get_immutable_target_type(t); in __bind()
2258 * Returns unbound table for the caller to free.
2262 struct dm_table *map = rcu_dereference_protected(md->map, 1); in __unbind()
2268 RCU_INIT_POINTER(md->map, NULL); in __unbind()
2283 return -ENXIO; in dm_create()
2292 * Functions to manage md->type.
2293 * All are required to hold md->type_lock.
2297 mutex_lock(&md->type_lock); in dm_lock_md_type()
2302 mutex_unlock(&md->type_lock); in dm_unlock_md_type()
2307 BUG_ON(!mutex_is_locked(&md->type_lock)); in dm_set_md_type()
2308 md->type = type; in dm_set_md_type()
2313 return md->type; in dm_get_md_type()
2318 return md->immutable_target_type; in dm_get_immutable_target_type()
2333 md->disk->fops = &dm_rq_blk_dops; in dm_setup_md_queue()
2336 DMERR("Cannot initialize queue for request-based dm mapped device"); in dm_setup_md_queue()
2342 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue); in dm_setup_md_queue()
2354 r = dm_table_set_restrictions(t, md->queue, &limits); in dm_setup_md_queue()
2359 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent in dm_setup_md_queue()
2362 mutex_lock(&md->table_devices_lock); in dm_setup_md_queue()
2363 r = add_disk(md->disk); in dm_setup_md_queue()
2364 mutex_unlock(&md->table_devices_lock); in dm_setup_md_queue()
2369 * Register the holder relationship for devices added before the disk in dm_setup_md_queue()
2372 list_for_each_entry(td, &md->table_devices, list) { in dm_setup_md_queue()
2373 r = bd_link_disk_holder(td->dm_dev.bdev, md->disk); in dm_setup_md_queue()
2382 md->type = type; in dm_setup_md_queue()
2386 list_for_each_entry_continue_reverse(td, &md->table_devices, list) in dm_setup_md_queue()
2387 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); in dm_setup_md_queue()
2388 mutex_lock(&md->table_devices_lock); in dm_setup_md_queue()
2389 del_gendisk(md->disk); in dm_setup_md_queue()
2390 mutex_unlock(&md->table_devices_lock); in dm_setup_md_queue()
2406 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_md()
2420 return md->interface_ptr; in dm_get_mdptr()
2425 md->interface_ptr = ptr; in dm_set_mdptr()
2430 atomic_inc(&md->holders); in dm_get()
2431 BUG_ON(test_bit(DMF_FREEING, &md->flags)); in dm_get()
2437 if (test_bit(DMF_FREEING, &md->flags)) { in dm_hold()
2439 return -EBUSY; in dm_hold()
2449 return md->name; in dm_device_name()
2462 set_bit(DMF_FREEING, &md->flags); in __dm_destroy()
2465 blk_mark_disk_dead(md->disk); in __dm_destroy()
2469 * do not race with internal suspend. in __dm_destroy()
2471 mutex_lock(&md->suspend_lock); in __dm_destroy()
2475 set_bit(DMF_SUSPENDED, &md->flags); in __dm_destroy()
2476 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_destroy()
2481 mutex_unlock(&md->suspend_lock); in __dm_destroy()
2484 * Rare, but there may be I/O requests still going to complete, in __dm_destroy()
2485 * for example. Wait for all references to disappear. in __dm_destroy()
2490 while (atomic_read(&md->holders)) in __dm_destroy()
2492 else if (atomic_read(&md->holders)) in __dm_destroy()
2494 dm_device_name(md), atomic_read(&md->holders)); in __dm_destroy()
2512 atomic_dec(&md->holders); in dm_put()
2522 sum += *per_cpu_ptr(md->pending_io, cpu); in dm_in_flight_bios()
2533 prepare_to_wait(&md->wait, &wait, task_state); in dm_wait_for_bios_completion()
2539 r = -ERESTARTSYS; in dm_wait_for_bios_completion()
2545 finish_wait(&md->wait, &wait); in dm_wait_for_bios_completion()
2556 if (!queue_is_mq(md->queue)) in dm_wait_for_completion()
2560 if (!blk_mq_queue_inflight(md->queue)) in dm_wait_for_completion()
2564 r = -ERESTARTSYS; in dm_wait_for_completion()
2582 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_wq_work()
2583 spin_lock_irq(&md->deferred_lock); in dm_wq_work()
2584 bio = bio_list_pop(&md->deferred); in dm_wq_work()
2585 spin_unlock_irq(&md->deferred_lock); in dm_wq_work()
2597 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_queue_flush()
2599 queue_work(md->wq, &md->work); in dm_queue_flush()
2603 * Swap in a new table, returning the old one for the caller to destroy.
2607 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); in dm_swap_table()
2611 mutex_lock(&md->suspend_lock); in dm_swap_table()
2626 limits = md->queue->limits; in dm_swap_table()
2642 mutex_unlock(&md->suspend_lock); in dm_swap_table()
2647 * Functions to lock and unlock any filesystem running on the
2654 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); in lock_fs()
2656 r = freeze_bdev(md->disk->part0); in lock_fs()
2658 set_bit(DMF_FROZEN, &md->flags); in lock_fs()
2664 if (!test_bit(DMF_FROZEN, &md->flags)) in unlock_fs()
2666 thaw_bdev(md->disk->part0); in unlock_fs()
2667 clear_bit(DMF_FROZEN, &md->flags); in unlock_fs()
2676 * now. There is no request-processing activity. All new requests
2677 * are being added to md->deferred list.
2687 lockdep_assert_held(&md->suspend_lock); in __dm_suspend()
2694 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2705 * Flush I/O to the device. in __dm_suspend()
2708 * (lock_fs() flushes I/Os and waits for them to complete.) in __dm_suspend()
2720 * to target drivers i.e. no one may be executing in __dm_suspend()
2723 * To get all processes out of dm_split_and_process_bio in dm_submit_bio, in __dm_suspend()
2724 * we take the write lock. To prevent any process from reentering in __dm_suspend()
2727 * flush_workqueue(md->wq). in __dm_suspend()
2729 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in __dm_suspend()
2731 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2734 * Stop md->queue before flushing md->wq in case request-based in __dm_suspend()
2735 * dm defers requests to md->wq from md->queue. in __dm_suspend()
2738 dm_stop_queue(md->queue); in __dm_suspend()
2740 flush_workqueue(md->wq); in __dm_suspend()
2744 * We call dm_wait_for_completion to wait for all existing requests in __dm_suspend()
2745 * to finish. in __dm_suspend()
2749 set_bit(dmf_suspended_flag, &md->flags); in __dm_suspend()
2752 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2754 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2761 dm_start_queue(md->queue); in __dm_suspend()
2772 * We need to be able to change a mapping table under a mounted
2773 * filesystem. For example we might want to move some data in
2775 * dm_bind_table, dm_suspend must be called to flush any in
2779 * Suspend mechanism in request-based dm.
2783 * 3. Wait for all in-flight I/Os to be completed or requeued.
2785 * To abort suspend, start the request_queue.
2793 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_suspend()
2796 r = -EINVAL; in dm_suspend()
2802 mutex_unlock(&md->suspend_lock); in dm_suspend()
2803 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_suspend()
2809 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_suspend()
2819 set_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2821 clear_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2824 mutex_unlock(&md->suspend_lock); in dm_suspend()
2842 * Request-based dm is queueing the deferred I/Os in its request_queue. in __dm_resume()
2845 dm_start_queue(md->queue); in __dm_resume()
2858 r = -EINVAL; in dm_resume()
2859 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_resume()
2866 mutex_unlock(&md->suspend_lock); in dm_resume()
2867 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_resume()
2873 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_resume()
2881 clear_bit(DMF_SUSPENDED, &md->flags); in dm_resume()
2883 mutex_unlock(&md->suspend_lock); in dm_resume()
2889 * Internal suspend/resume works like userspace-driven suspend. It waits
2890 * until all bios finish and prevents issuing new bios to the target drivers.
2898 lockdep_assert_held(&md->suspend_lock); in __dm_internal_suspend()
2900 if (md->internal_suspend_count++) in __dm_internal_suspend()
2901 return; /* nested internal suspend */ in __dm_internal_suspend()
2904 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
2905 return; /* nest suspend */ in __dm_internal_suspend()
2908 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_suspend()
2911 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is in __dm_internal_suspend()
2912 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend in __dm_internal_suspend()
2913 * would require changing .presuspend to return an error -- avoid this in __dm_internal_suspend()
2914 * until there is a need for more elaborate variants of internal suspend. in __dm_internal_suspend()
2919 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2921 clear_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2929 BUG_ON(!md->internal_suspend_count); in __dm_internal_resume()
2931 if (--md->internal_suspend_count) in __dm_internal_resume()
2932 return; /* resume from nested internal suspend */ in __dm_internal_resume()
2935 goto done; /* resume from nested suspend */ in __dm_internal_resume()
2937 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_resume()
2942 * tricky situation. We can't return an error to the caller. We in __dm_internal_resume()
2948 * So, we fake normal suspend here, to make sure that the in __dm_internal_resume()
2952 set_bit(DMF_SUSPENDED, &md->flags); in __dm_internal_resume()
2955 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_resume()
2957 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); in __dm_internal_resume()
2962 mutex_lock(&md->suspend_lock); in dm_internal_suspend_noflush()
2964 mutex_unlock(&md->suspend_lock); in dm_internal_suspend_noflush()
2970 mutex_lock(&md->suspend_lock); in dm_internal_resume()
2972 mutex_unlock(&md->suspend_lock); in dm_internal_resume()
2977 * Fast variants of internal suspend/resume hold md->suspend_lock,
2978 * which prevents interaction with userspace-driven suspend.
2983 mutex_lock(&md->suspend_lock); in dm_internal_suspend_fast()
2987 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_internal_suspend_fast()
2988 synchronize_srcu(&md->io_barrier); in dm_internal_suspend_fast()
2989 flush_workqueue(md->wq); in dm_internal_suspend_fast()
3002 mutex_unlock(&md->suspend_lock); in dm_internal_resume_fast()
3007 *---------------------------------------------------------------
3009 *---------------------------------------------------------------
3030 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); in dm_kobject_uevent()
3039 return atomic_add_return(1, &md->uevent_seq); in dm_next_uevent_seq()
3044 return atomic_read(&md->event_nr); in dm_get_event_nr()
3049 return wait_event_interruptible(md->eventq, in dm_wait_event()
3050 (event_nr != atomic_read(&md->event_nr))); in dm_wait_event()
3057 spin_lock_irqsave(&md->uevent_lock, flags); in dm_uevent_add()
3058 list_add(elist, &md->uevent_list); in dm_uevent_add()
3059 spin_unlock_irqrestore(&md->uevent_lock, flags); in dm_uevent_add()
3068 return md->disk; in dm_disk()
3074 return &md->kobj_holder.kobj; in dm_kobject()
3084 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_from_kobject()
3097 return test_bit(DMF_SUSPENDED, &md->flags); in dm_suspended_md()
3102 return test_bit(DMF_POST_SUSPENDING, &md->flags); in dm_post_suspending_md()
3107 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in dm_suspended_internally_md()
3112 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_test_deferred_remove_flag()
3117 return dm_suspended_md(ti->table->md); in dm_suspended()
3123 return dm_post_suspending_md(ti->table->md); in dm_post_suspending()
3129 return __noflush_suspending(ti->table->md); in dm_noflush_suspending()
3138 bioset_exit(&pools->bs); in dm_free_md_mempools()
3139 bioset_exit(&pools->io_bs); in dm_free_md_mempools()
3159 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr()
3162 int ret = -ENOTTY, srcu_idx; in dm_call_pr()
3169 if (table->num_targets != 1) in dm_call_pr()
3174 ret = -EAGAIN; in dm_call_pr()
3178 ret = -EINVAL; in dm_call_pr()
3179 if (!ti->type->iterate_devices) in dm_call_pr()
3182 ti->type->iterate_devices(ti, fn, pr); in dm_call_pr()
3190 * For register / unregister we need to manually call out to every path.
3196 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; in __dm_pr_register()
3199 if (!ops || !ops->pr_register) { in __dm_pr_register()
3200 pr->ret = -EOPNOTSUPP; in __dm_pr_register()
3201 return -1; in __dm_pr_register()
3204 ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); in __dm_pr_register()
3208 if (!pr->ret) in __dm_pr_register()
3209 pr->ret = ret; in __dm_pr_register()
3211 if (pr->fail_early) in __dm_pr_register()
3212 return -1; in __dm_pr_register()
3231 /* Didn't even get to register a path */ in dm_pr_register()
3242 /* unregister all paths if we failed to register any path */ in dm_pr_register()
3256 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; in __dm_pr_reserve()
3258 if (!ops || !ops->pr_reserve) { in __dm_pr_reserve()
3259 pr->ret = -EOPNOTSUPP; in __dm_pr_reserve()
3260 return -1; in __dm_pr_reserve()
3263 pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags); in __dm_pr_reserve()
3264 if (!pr->ret) in __dm_pr_reserve()
3265 return -1; in __dm_pr_reserve()
3290 * If there is a non-All Registrants type of reservation, the release must be
3293 * try each path to make sure we got the correct path.
3299 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; in __dm_pr_release()
3301 if (!ops || !ops->pr_release) { in __dm_pr_release()
3302 pr->ret = -EOPNOTSUPP; in __dm_pr_release()
3303 return -1; in __dm_pr_release()
3306 pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type); in __dm_pr_release()
3307 if (pr->ret) in __dm_pr_release()
3308 return -1; in __dm_pr_release()
3333 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; in __dm_pr_preempt()
3335 if (!ops || !ops->pr_preempt) { in __dm_pr_preempt()
3336 pr->ret = -EOPNOTSUPP; in __dm_pr_preempt()
3337 return -1; in __dm_pr_preempt()
3340 pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type, in __dm_pr_preempt()
3341 pr->abort); in __dm_pr_preempt()
3342 if (!pr->ret) in __dm_pr_preempt()
3343 return -1; in __dm_pr_preempt()
3368 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear()
3376 ops = bdev->bd_disk->fops->pr_ops; in dm_pr_clear()
3377 if (ops && ops->pr_clear) in dm_pr_clear()
3378 r = ops->pr_clear(bdev, key); in dm_pr_clear()
3380 r = -EOPNOTSUPP; in dm_pr_clear()
3390 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; in __dm_pr_read_keys()
3392 if (!ops || !ops->pr_read_keys) { in __dm_pr_read_keys()
3393 pr->ret = -EOPNOTSUPP; in __dm_pr_read_keys()
3394 return -1; in __dm_pr_read_keys()
3397 pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys); in __dm_pr_read_keys()
3398 if (!pr->ret) in __dm_pr_read_keys()
3399 return -1; in __dm_pr_read_keys()
3422 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; in __dm_pr_read_reservation()
3424 if (!ops || !ops->pr_read_reservation) { in __dm_pr_read_reservation()
3425 pr->ret = -EOPNOTSUPP; in __dm_pr_read_reservation()
3426 return -1; in __dm_pr_read_reservation()
3429 pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv); in __dm_pr_read_reservation()
3430 if (!pr->ret) in __dm_pr_read_reservation()
3431 return -1; in __dm_pr_read_reservation()
3498 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3507 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");