dm.c (6cbce280fc741c2057d574366318eafbeabbcfda) dm.c (563a225c9fd207326c2a2af9d59b4097cb31ce70)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 647 unchanged lines hidden (view full) ---

656 queue_work(md->wq, &md->work);
657}
658
659/*
660 * Everyone (including functions in this file), should use this
661 * function to access the md->map field, and make sure they call
662 * dm_put_live_table() when finished.
663 */
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 647 unchanged lines hidden (view full) ---

656 queue_work(md->wq, &md->work);
657}
658
659/*
660 * Everyone (including functions in this file), should use this
661 * function to access the md->map field, and make sure they call
662 * dm_put_live_table() when finished.
663 */
664struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
664struct dm_table *dm_get_live_table(struct mapped_device *md,
665 int *srcu_idx) __acquires(md->io_barrier)
665{
666 *srcu_idx = srcu_read_lock(&md->io_barrier);
667
668 return srcu_dereference(md->map, &md->io_barrier);
669}
670
666{
667 *srcu_idx = srcu_read_lock(&md->io_barrier);
668
669 return srcu_dereference(md->map, &md->io_barrier);
670}
671
671void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
672void dm_put_live_table(struct mapped_device *md,
673 int srcu_idx) __releases(md->io_barrier)
672{
673 srcu_read_unlock(&md->io_barrier, srcu_idx);
674}
675
676void dm_sync_table(struct mapped_device *md)
677{
678 synchronize_srcu(&md->io_barrier);
679 synchronize_rcu_expedited();

--- 9 unchanged lines hidden (view full) ---

689 return rcu_dereference(md->map);
690}
691
692static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
693{
694 rcu_read_unlock();
695}
696
674{
675 srcu_read_unlock(&md->io_barrier, srcu_idx);
676}
677
678void dm_sync_table(struct mapped_device *md)
679{
680 synchronize_srcu(&md->io_barrier);
681 synchronize_rcu_expedited();

--- 9 unchanged lines hidden (view full) ---

691 return rcu_dereference(md->map);
692}
693
694static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
695{
696 rcu_read_unlock();
697}
698
699static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
700 int *srcu_idx, struct bio *bio)
701{
702 if (bio->bi_opf & REQ_NOWAIT)
703 return dm_get_live_table_fast(md);
704 else
705 return dm_get_live_table(md, srcu_idx);
706}
707
708static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
709 struct bio *bio)
710{
711 if (bio->bi_opf & REQ_NOWAIT)
712 dm_put_live_table_fast(md);
713 else
714 dm_put_live_table(md, srcu_idx);
715}
716
697static char *_dm_claim_ptr = "I belong to device-mapper";
698
699/*
700 * Open a table device so we can use it as a map destination.
701 */
702static int open_table_device(struct table_device *td, dev_t dev,
703 struct mapped_device *md)
704{

--- 902 unchanged lines hidden (view full) ---

1607}
1608
1609static void dm_submit_bio(struct bio *bio)
1610{
1611 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1612 int srcu_idx;
1613 struct dm_table *map;
1614
717static char *_dm_claim_ptr = "I belong to device-mapper";
718
719/*
720 * Open a table device so we can use it as a map destination.
721 */
722static int open_table_device(struct table_device *td, dev_t dev,
723 struct mapped_device *md)
724{

--- 902 unchanged lines hidden (view full) ---

1627}
1628
1629static void dm_submit_bio(struct bio *bio)
1630{
1631 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1632 int srcu_idx;
1633 struct dm_table *map;
1634
1615 map = dm_get_live_table(md, &srcu_idx);
1635 map = dm_get_live_table_bio(md, &srcu_idx, bio);
1616
1617 /* If suspended, or map not yet available, queue this IO for later */
1618 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
1619 unlikely(!map)) {
1620 if (bio->bi_opf & REQ_NOWAIT)
1621 bio_wouldblock_error(bio);
1622 else if (bio->bi_opf & REQ_RAHEAD)
1623 bio_io_error(bio);

--- 6 unchanged lines hidden (view full) ---

1630 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
1631 * otherwise associated queue_limits won't be imposed.
1632 */
1633 if (unlikely(is_abnormal_io(bio)))
1634 blk_queue_split(&bio);
1635
1636 dm_split_and_process_bio(md, map, bio);
1637out:
1636
1637 /* If suspended, or map not yet available, queue this IO for later */
1638 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
1639 unlikely(!map)) {
1640 if (bio->bi_opf & REQ_NOWAIT)
1641 bio_wouldblock_error(bio);
1642 else if (bio->bi_opf & REQ_RAHEAD)
1643 bio_io_error(bio);

--- 6 unchanged lines hidden (view full) ---

1650 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
1651 * otherwise associated queue_limits won't be imposed.
1652 */
1653 if (unlikely(is_abnormal_io(bio)))
1654 blk_queue_split(&bio);
1655
1656 dm_split_and_process_bio(md, map, bio);
1657out:
1638 dm_put_live_table(md, srcu_idx);
1658 dm_put_live_table_bio(md, srcu_idx, bio);
1639}
1640
1641static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
1642 unsigned int flags)
1643{
1644 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
1645
1646 /* don't poll if the mapped io is done */

--- 1525 unchanged lines hidden ---
1659}
1660
1661static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
1662 unsigned int flags)
1663{
1664 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
1665
1666 /* don't poll if the mapped io is done */

--- 1525 unchanged lines hidden ---