dm-snap.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) | dm-snap.c (70246286e94c335b5bea0cbc68a17a96dd620281) |
---|---|
1/* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 --- 1666 unchanged lines hidden (view full) --- 1675 struct dm_exception *e; 1676 struct dm_snapshot *s = ti->private; 1677 int r = DM_MAPIO_REMAPPED; 1678 chunk_t chunk; 1679 struct dm_snap_pending_exception *pe = NULL; 1680 1681 init_tracked_chunk(bio); 1682 | 1/* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 --- 1666 unchanged lines hidden (view full) --- 1675 struct dm_exception *e; 1676 struct dm_snapshot *s = ti->private; 1677 int r = DM_MAPIO_REMAPPED; 1678 chunk_t chunk; 1679 struct dm_snap_pending_exception *pe = NULL; 1680 1681 init_tracked_chunk(bio); 1682 |
1683 if (bio->bi_rw & REQ_FLUSH) { | 1683 if (bio->bi_rw & REQ_PREFLUSH) { |
1684 bio->bi_bdev = s->cow->bdev; 1685 return DM_MAPIO_REMAPPED; 1686 } 1687 1688 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1689 1690 /* Full snapshots are not usable */ 1691 /* To get here the table must be live so s->active is always set. */ 1692 if (!s->valid) 1693 return -EIO; 1694 1695 /* FIXME: should only take write lock if we need 1696 * to copy an exception */ 1697 down_write(&s->lock); 1698 | 1684 bio->bi_bdev = s->cow->bdev; 1685 return DM_MAPIO_REMAPPED; 1686 } 1687 1688 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1689 1690 /* Full snapshots are not usable */ 1691 /* To get here the table must be live so s->active is always set. */ 1692 if (!s->valid) 1693 return -EIO; 1694 1695 /* FIXME: should only take write lock if we need 1696 * to copy an exception */ 1697 down_write(&s->lock); 1698 |
1699 if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) { | 1699 if (!s->valid || (unlikely(s->snapshot_overflowed) && 1700 bio_data_dir(bio) == WRITE)) { |
1700 r = -EIO; 1701 goto out_unlock; 1702 } 1703 1704 /* If the block is already remapped - use that, else remap it */ 1705 e = dm_lookup_exception(&s->complete, chunk); 1706 if (e) { 1707 remap_exception(s, e, bio, chunk); 1708 goto out_unlock; 1709 } 1710 1711 /* 1712 * Write to snapshot - higher level takes care of RW/RO 1713 * flags so we should only get this if we are 1714 * writeable. 1715 */ | 1701 r = -EIO; 1702 goto out_unlock; 1703 } 1704 1705 /* If the block is already remapped - use that, else remap it */ 1706 e = dm_lookup_exception(&s->complete, chunk); 1707 if (e) { 1708 remap_exception(s, e, bio, chunk); 1709 goto out_unlock; 1710 } 1711 1712 /* 1713 * Write to snapshot - higher level takes care of RW/RO 1714 * flags so we should only get this if we are 1715 * writeable. 1716 */ |
1716 if (bio_rw(bio) == WRITE) { | 1717 if (bio_data_dir(bio) == WRITE) { |
1717 pe = __lookup_pending_exception(s, chunk); 1718 if (!pe) { 1719 up_write(&s->lock); 1720 pe = alloc_pending_exception(s); 1721 down_write(&s->lock); 1722 1723 if (!s->valid || s->snapshot_overflowed) { 1724 free_pending_exception(pe); --- 69 unchanged lines hidden (view full) --- 1794{ 1795 struct dm_exception *e; 1796 struct dm_snapshot *s = ti->private; 1797 int r = DM_MAPIO_REMAPPED; 1798 chunk_t chunk; 1799 1800 init_tracked_chunk(bio); 1801 | 1718 pe = __lookup_pending_exception(s, chunk); 1719 if (!pe) { 1720 up_write(&s->lock); 1721 pe = alloc_pending_exception(s); 1722 down_write(&s->lock); 1723 1724 if (!s->valid || s->snapshot_overflowed) { 1725 free_pending_exception(pe); --- 69 unchanged lines hidden (view full) --- 1795{ 1796 struct dm_exception *e; 1797 struct dm_snapshot *s = ti->private; 1798 int r = DM_MAPIO_REMAPPED; 1799 chunk_t chunk; 1800 1801 init_tracked_chunk(bio); 1802 |
1802 if (bio->bi_rw & REQ_FLUSH) { | 1803 if (bio->bi_rw & REQ_PREFLUSH) { |
1803 if (!dm_bio_get_target_bio_nr(bio)) 1804 bio->bi_bdev = s->origin->bdev; 1805 else 1806 bio->bi_bdev = s->cow->bdev; 1807 return DM_MAPIO_REMAPPED; 1808 } 1809 1810 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1811 1812 down_write(&s->lock); 1813 1814 /* Full merging snapshots are redirected to the origin */ 1815 if (!s->valid) 1816 goto redirect_to_origin; 1817 1818 /* If the block is already remapped - use that */ 1819 e = dm_lookup_exception(&s->complete, chunk); 1820 if (e) { 1821 /* Queue writes overlapping with chunks being merged */ | 1804 if (!dm_bio_get_target_bio_nr(bio)) 1805 bio->bi_bdev = s->origin->bdev; 1806 else 1807 bio->bi_bdev = s->cow->bdev; 1808 return DM_MAPIO_REMAPPED; 1809 } 1810 1811 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1812 1813 down_write(&s->lock); 1814 1815 /* Full merging snapshots are redirected to the origin */ 1816 if (!s->valid) 1817 goto redirect_to_origin; 1818 1819 /* If the block is already remapped - use that */ 1820 e = dm_lookup_exception(&s->complete, chunk); 1821 if (e) { 1822 /* Queue writes overlapping with chunks being merged */ |
1822 if (bio_rw(bio) == WRITE && | 1823 if (bio_data_dir(bio) == WRITE && |
1823 chunk >= s->first_merging_chunk && 1824 chunk < (s->first_merging_chunk + 1825 s->num_merging_chunks)) { 1826 bio->bi_bdev = s->origin->bdev; 1827 bio_list_add(&s->bios_queued_during_merge, bio); 1828 r = DM_MAPIO_SUBMITTED; 1829 goto out_unlock; 1830 } 1831 1832 remap_exception(s, e, bio, chunk); 1833 | 1824 chunk >= s->first_merging_chunk && 1825 chunk < (s->first_merging_chunk + 1826 s->num_merging_chunks)) { 1827 bio->bi_bdev = s->origin->bdev; 1828 bio_list_add(&s->bios_queued_during_merge, bio); 1829 r = DM_MAPIO_SUBMITTED; 1830 goto out_unlock; 1831 } 1832 1833 remap_exception(s, e, bio, chunk); 1834 |
1834 if (bio_rw(bio) == WRITE) | 1835 if (bio_data_dir(bio) == WRITE) |
1835 track_chunk(s, bio, chunk); 1836 goto out_unlock; 1837 } 1838 1839redirect_to_origin: 1840 bio->bi_bdev = s->origin->bdev; 1841 | 1836 track_chunk(s, bio, chunk); 1837 goto out_unlock; 1838 } 1839 1840redirect_to_origin: 1841 bio->bi_bdev = s->origin->bdev; 1842 |
1842 if (bio_rw(bio) == WRITE) { | 1843 if (bio_data_dir(bio) == WRITE) { |
1843 up_write(&s->lock); 1844 return do_origin(s->origin, bio); 1845 } 1846 1847out_unlock: 1848 up_write(&s->lock); 1849 1850 return r; --- 429 unchanged lines hidden (view full) --- 2280 2281static int origin_map(struct dm_target *ti, struct bio *bio) 2282{ 2283 struct dm_origin *o = ti->private; 2284 unsigned available_sectors; 2285 2286 bio->bi_bdev = o->dev->bdev; 2287 | 1844 up_write(&s->lock); 1845 return do_origin(s->origin, bio); 1846 } 1847 1848out_unlock: 1849 up_write(&s->lock); 1850 1851 return r; --- 429 unchanged lines hidden (view full) --- 2281 2282static int origin_map(struct dm_target *ti, struct bio *bio) 2283{ 2284 struct dm_origin *o = ti->private; 2285 unsigned available_sectors; 2286 2287 bio->bi_bdev = o->dev->bdev; 2288 |
2288 if (unlikely(bio->bi_rw & REQ_FLUSH)) | 2289 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) |
2289 return DM_MAPIO_REMAPPED; 2290 | 2290 return DM_MAPIO_REMAPPED; 2291 |
2291 if (bio_rw(bio) != WRITE) | 2292 if (bio_data_dir(bio) != WRITE) |
2292 return DM_MAPIO_REMAPPED; 2293 2294 available_sectors = o->split_boundary - 2295 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); 2296 2297 if (bio_sectors(bio) > available_sectors) 2298 dm_accept_partial_bio(bio, available_sectors); 2299 --- 182 unchanged lines hidden --- | 2293 return DM_MAPIO_REMAPPED; 2294 2295 available_sectors = o->split_boundary - 2296 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); 2297 2298 if (bio_sectors(bio) > available_sectors) 2299 dm_accept_partial_bio(bio, available_sectors); 2300 --- 182 unchanged lines hidden --- |