dm.c (745dc570b2c379730d2a78acdeb65b5239e833c6) dm.c (64f52b0e31489b46465cff2e61ab2e1f60a3b4eb)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 46 unchanged lines hidden (view full) ---

55
56void dm_issue_global_event(void)
57{
58 atomic_inc(&dm_global_event_nr);
59 wake_up(&dm_global_eventq);
60}
61
62/*
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 46 unchanged lines hidden (view full) ---

55
56void dm_issue_global_event(void)
57{
58 atomic_inc(&dm_global_event_nr);
59 wake_up(&dm_global_eventq);
60}
61
62/*
63 * One of these is allocated (on-stack) per original bio.
64 */
65struct clone_info {
66 struct mapped_device *md;
67 struct dm_table *map;
68 struct bio *bio;
69 struct dm_io *io;
70 sector_t sector;
71 unsigned sector_count;
72};
73
74/*
75 * One of these is allocated per clone bio.
76 */
77#define DM_TIO_MAGIC 7282014
78struct dm_target_io {
79 unsigned magic;
80 struct dm_io *io;
81 struct dm_target *ti;
82 unsigned target_bio_nr;
83 unsigned *len_ptr;
84 bool inside_dm_io;
85 struct bio clone;
86};
87
88/*
63 * One of these is allocated per original bio.
89 * One of these is allocated per original bio.
90 * It contains the first clone used for that original.
64 */
91 */
92#define DM_IO_MAGIC 5191977
65struct dm_io {
93struct dm_io {
94 unsigned magic;
66 struct mapped_device *md;
67 blk_status_t status;
68 atomic_t io_count;
69 struct bio *orig_bio;
70 unsigned long start_time;
71 spinlock_t endio_lock;
72 struct dm_stats_aux stats_aux;
95 struct mapped_device *md;
96 blk_status_t status;
97 atomic_t io_count;
98 struct bio *orig_bio;
99 unsigned long start_time;
100 spinlock_t endio_lock;
101 struct dm_stats_aux stats_aux;
102 /* last member of dm_target_io is 'struct bio' */
103 struct dm_target_io tio;
73};
74
104};
105
106void *dm_per_bio_data(struct bio *bio, size_t data_size)
107{
108 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
109 if (!tio->inside_dm_io)
110 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
111 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
112}
113EXPORT_SYMBOL_GPL(dm_per_bio_data);
114
115struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
116{
117 struct dm_io *io = (struct dm_io *)((char *)data + data_size);
118 if (io->magic == DM_IO_MAGIC)
119 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
120 BUG_ON(io->magic != DM_TIO_MAGIC);
121 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
122}
123EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
124
125unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
126{
127 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
128}
129EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
130
75#define MINOR_ALLOCED ((void *)-1)
76
77/*
78 * Bits for the md->flags field.
79 */
80#define DMF_BLOCK_IO_FOR_SUSPEND 0
81#define DMF_SUSPENDED 1
82#define DMF_FROZEN 2

--- 7 unchanged lines hidden (view full) ---

90static int dm_numa_node = DM_NUMA_NODE;
91
92/*
93 * For mempools pre-allocation at the table loading time.
94 */
95struct dm_md_mempools {
96 mempool_t *io_pool;
97 struct bio_set *bs;
131#define MINOR_ALLOCED ((void *)-1)
132
133/*
134 * Bits for the md->flags field.
135 */
136#define DMF_BLOCK_IO_FOR_SUSPEND 0
137#define DMF_SUSPENDED 1
138#define DMF_FROZEN 2

--- 7 unchanged lines hidden (view full) ---

146static int dm_numa_node = DM_NUMA_NODE;
147
148/*
149 * For mempools pre-allocation at the table loading time.
150 */
151struct dm_md_mempools {
152 mempool_t *io_pool;
153 struct bio_set *bs;
154 struct bio_set *io_bs;
98};
99
100struct table_device {
101 struct list_head list;
102 refcount_t count;
103 struct dm_dev dm_dev;
104};
105

--- 377 unchanged lines hidden (view full) ---

483 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
484out:
485 bdput(bdev);
486 return r;
487}
488
489static struct dm_io *alloc_io(struct mapped_device *md)
490{
155};
156
157struct table_device {
158 struct list_head list;
159 refcount_t count;
160 struct dm_dev dm_dev;
161};
162

--- 377 unchanged lines hidden (view full) ---

540 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
541out:
542 bdput(bdev);
543 return r;
544}
545
546static struct dm_io *alloc_io(struct mapped_device *md)
547{
491 return mempool_alloc(md->io_pool, GFP_NOIO);
548 struct dm_io *io;
549 struct dm_target_io *tio;
550 struct bio *clone;
551
552 clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs);
553 if (!clone)
554 return NULL;
555
556 tio = container_of(clone, struct dm_target_io, clone);
557 tio->inside_dm_io = true;
558 tio->io = NULL;
559
560 io = container_of(tio, struct dm_io, tio);
561 io->magic = DM_IO_MAGIC;
562
563 return io;
492}
493
494static void free_io(struct mapped_device *md, struct dm_io *io)
495{
564}
565
566static void free_io(struct mapped_device *md, struct dm_io *io)
567{
496 mempool_free(io, md->io_pool);
568 bio_put(&io->tio.clone);
497}
498
569}
570
571static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
572 unsigned target_bio_nr, gfp_t gfp_mask)
573{
574 struct dm_target_io *tio;
575
576 if (!ci->io->tio.io) {
577 /* the dm_target_io embedded in ci->io is available */
578 tio = &ci->io->tio;
579 } else {
580 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->md->bs);
581 if (!clone)
582 return NULL;
583
584 tio = container_of(clone, struct dm_target_io, clone);
585 tio->inside_dm_io = false;
586 }
587
588 tio->magic = DM_TIO_MAGIC;
589 tio->io = ci->io;
590 tio->ti = ti;
591 tio->target_bio_nr = target_bio_nr;
592
593 return tio;
594}
595
499static void free_tio(struct dm_target_io *tio)
500{
596static void free_tio(struct dm_target_io *tio)
597{
598 if (tio->inside_dm_io)
599 return;
501 bio_put(&tio->clone);
502}
503
504int md_in_flight(struct mapped_device *md)
505{
506 return atomic_read(&md->pending[READ]) +
507 atomic_read(&md->pending[WRITE]);
508}

--- 596 unchanged lines hidden (view full) ---

1105}
1106EXPORT_SYMBOL_GPL(dm_remap_zone_report);
1107
1108static void __map_bio(struct dm_target_io *tio)
1109{
1110 int r;
1111 sector_t sector;
1112 struct bio *clone = &tio->clone;
600 bio_put(&tio->clone);
601}
602
603int md_in_flight(struct mapped_device *md)
604{
605 return atomic_read(&md->pending[READ]) +
606 atomic_read(&md->pending[WRITE]);
607}

--- 596 unchanged lines hidden (view full) ---

1204}
1205EXPORT_SYMBOL_GPL(dm_remap_zone_report);
1206
1207static void __map_bio(struct dm_target_io *tio)
1208{
1209 int r;
1210 sector_t sector;
1211 struct bio *clone = &tio->clone;
1212 struct dm_io *io = tio->io;
1113 struct dm_target *ti = tio->ti;
1114
1115 clone->bi_end_io = clone_endio;
1116
1117 /*
1118 * Map the clone. If r == 0 we don't need to do
1119 * anything, the target has assumed ownership of
1120 * this io.
1121 */
1213 struct dm_target *ti = tio->ti;
1214
1215 clone->bi_end_io = clone_endio;
1216
1217 /*
1218 * Map the clone. If r == 0 we don't need to do
1219 * anything, the target has assumed ownership of
1220 * this io.
1221 */
1122 atomic_inc(&tio->io->io_count);
1222 atomic_inc(&io->io_count);
1123 sector = clone->bi_iter.bi_sector;
1124
1125 r = ti->type->map(ti, clone);
1126 switch (r) {
1127 case DM_MAPIO_SUBMITTED:
1128 break;
1129 case DM_MAPIO_REMAPPED:
1130 /* the bio has been remapped so dispatch it */
1131 trace_block_bio_remap(clone->bi_disk->queue, clone,
1223 sector = clone->bi_iter.bi_sector;
1224
1225 r = ti->type->map(ti, clone);
1226 switch (r) {
1227 case DM_MAPIO_SUBMITTED:
1228 break;
1229 case DM_MAPIO_REMAPPED:
1230 /* the bio has been remapped so dispatch it */
1231 trace_block_bio_remap(clone->bi_disk->queue, clone,
1132 bio_dev(tio->io->orig_bio), sector);
1232 bio_dev(io->orig_bio), sector);
1133 generic_make_request(clone);
1134 break;
1135 case DM_MAPIO_KILL:
1233 generic_make_request(clone);
1234 break;
1235 case DM_MAPIO_KILL:
1136 dec_pending(tio->io, BLK_STS_IOERR);
1137 free_tio(tio);
1236 free_tio(tio);
1237 dec_pending(io, BLK_STS_IOERR);
1138 break;
1139 case DM_MAPIO_REQUEUE:
1238 break;
1239 case DM_MAPIO_REQUEUE:
1140 dec_pending(tio->io, BLK_STS_DM_REQUEUE);
1141 free_tio(tio);
1240 free_tio(tio);
1241 dec_pending(io, BLK_STS_DM_REQUEUE);
1142 break;
1143 default:
1144 DMWARN("unimplemented target map return value: %d", r);
1145 BUG();
1146 }
1147}
1148
1242 break;
1243 default:
1244 DMWARN("unimplemented target map return value: %d", r);
1245 BUG();
1246 }
1247}
1248
1149struct clone_info {
1150 struct mapped_device *md;
1151 struct dm_table *map;
1152 struct bio *bio;
1153 struct dm_io *io;
1154 sector_t sector;
1155 unsigned sector_count;
1156};
1157
1158static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1159{
1160 bio->bi_iter.bi_sector = sector;
1161 bio->bi_iter.bi_size = to_bytes(len);
1162}
1163
1164/*
1165 * Creates a bio that consists of range of complete bvecs.

--- 26 unchanged lines hidden (view full) ---

1192 clone->bi_iter.bi_size = to_bytes(len);
1193
1194 if (unlikely(bio_integrity(bio) != NULL))
1195 bio_integrity_trim(clone);
1196
1197 return 0;
1198}
1199
1249static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1250{
1251 bio->bi_iter.bi_sector = sector;
1252 bio->bi_iter.bi_size = to_bytes(len);
1253}
1254
1255/*
1256 * Creates a bio that consists of range of complete bvecs.

--- 26 unchanged lines hidden (view full) ---

1283 clone->bi_iter.bi_size = to_bytes(len);
1284
1285 if (unlikely(bio_integrity(bio) != NULL))
1286 bio_integrity_trim(clone);
1287
1288 return 0;
1289}
1290
1200static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
1201 unsigned target_bio_nr, gfp_t gfp_mask)
1202{
1203 struct dm_target_io *tio;
1204 struct bio *clone;
1205
1206 clone = bio_alloc_bioset(gfp_mask, 0, ci->md->bs);
1207 if (!clone)
1208 return NULL;
1209
1210 tio = container_of(clone, struct dm_target_io, clone);
1211 tio->io = ci->io;
1212 tio->ti = ti;
1213 tio->target_bio_nr = target_bio_nr;
1214
1215 return tio;
1216}
1217
1218static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1219 struct dm_target *ti, unsigned num_bios)
1220{
1221 struct dm_target_io *tio;
1222 int try;
1223
1224 if (!num_bios)
1225 return;

--- 397 unchanged lines hidden (view full) ---

1623{
1624 if (md->wq)
1625 destroy_workqueue(md->wq);
1626 if (md->kworker_task)
1627 kthread_stop(md->kworker_task);
1628 mempool_destroy(md->io_pool);
1629 if (md->bs)
1630 bioset_free(md->bs);
1291static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1292 struct dm_target *ti, unsigned num_bios)
1293{
1294 struct dm_target_io *tio;
1295 int try;
1296
1297 if (!num_bios)
1298 return;

--- 397 unchanged lines hidden (view full) ---

1696{
1697 if (md->wq)
1698 destroy_workqueue(md->wq);
1699 if (md->kworker_task)
1700 kthread_stop(md->kworker_task);
1701 mempool_destroy(md->io_pool);
1702 if (md->bs)
1703 bioset_free(md->bs);
1704 if (md->io_bs)
1705 bioset_free(md->io_bs);
1631
1632 if (md->dax_dev) {
1633 kill_dax(md->dax_dev);
1634 put_dax(md->dax_dev);
1635 md->dax_dev = NULL;
1636 }
1637
1638 if (md->disk) {

--- 149 unchanged lines hidden (view full) ---

1788 kvfree(md);
1789}
1790
1791static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1792{
1793 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1794
1795 if (dm_table_bio_based(t)) {
1706
1707 if (md->dax_dev) {
1708 kill_dax(md->dax_dev);
1709 put_dax(md->dax_dev);
1710 md->dax_dev = NULL;
1711 }
1712
1713 if (md->disk) {

--- 149 unchanged lines hidden (view full) ---

1863 kvfree(md);
1864}
1865
1866static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1867{
1868 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1869
1870 if (dm_table_bio_based(t)) {
1796 /* The md may already have mempools that need changing. */
1871 /*
1872 * The md may already have mempools that need changing.
1873 * If so, reload bioset because front_pad may have changed
1874 * because a different table was loaded.
1875 */
1797 if (md->bs) {
1876 if (md->bs) {
1798 /*
1799 * Reload bioset because front_pad may have changed
1800 * because a different table was loaded.
1801 */
1802 bioset_free(md->bs);
1803 md->bs = NULL;
1804 }
1877 bioset_free(md->bs);
1878 md->bs = NULL;
1879 }
1880 if (md->io_bs) {
1881 bioset_free(md->io_bs);
1882 md->io_bs = NULL;
1883 }
1805 if (md->io_pool) {
1806 /*
1807 * Reload io_pool because pool_size may have changed
1808 * because a different table was loaded.
1809 */
1810 mempool_destroy(md->io_pool);
1811 md->io_pool = NULL;
1812 }

--- 5 unchanged lines hidden (view full) ---

1818 * Note for future: If you are to reload bioset,
1819 * prep-ed requests in the queue may refer
1820 * to bio from the old bioset, so you must walk
1821 * through the queue to unprep.
1822 */
1823 goto out;
1824 }
1825
1884 if (md->io_pool) {
1885 /*
1886 * Reload io_pool because pool_size may have changed
1887 * because a different table was loaded.
1888 */
1889 mempool_destroy(md->io_pool);
1890 md->io_pool = NULL;
1891 }

--- 5 unchanged lines hidden (view full) ---

1897 * Note for future: If you are to reload bioset,
1898 * prep-ed requests in the queue may refer
1899 * to bio from the old bioset, so you must walk
1900 * through the queue to unprep.
1901 */
1902 goto out;
1903 }
1904
1826 BUG_ON(!p || md->io_pool || md->bs);
1905 BUG_ON(!p || md->io_pool || md->bs || md->io_bs);
1827
1828 md->io_pool = p->io_pool;
1829 p->io_pool = NULL;
1830 md->bs = p->bs;
1831 p->bs = NULL;
1906
1907 md->io_pool = p->io_pool;
1908 p->io_pool = NULL;
1909 md->bs = p->bs;
1910 p->bs = NULL;
1911 md->io_bs = p->io_bs;
1912 p->io_bs = NULL;
1832out:
1833 /* mempool bind completed, no longer need any mempools in the table */
1834 dm_table_free_md_mempools(t);
1835}
1836
1837/*
1838 * Bind a table to the device.
1839 */

--- 874 unchanged lines hidden (view full) ---

2714EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2715
2716struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2717 unsigned integrity, unsigned per_io_data_size,
2718 unsigned min_pool_size)
2719{
2720 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2721 unsigned int pool_size = 0;
1913out:
1914 /* mempool bind completed, no longer need any mempools in the table */
1915 dm_table_free_md_mempools(t);
1916}
1917
1918/*
1919 * Bind a table to the device.
1920 */

--- 874 unchanged lines hidden (view full) ---

2795EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2796
2797struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2798 unsigned integrity, unsigned per_io_data_size,
2799 unsigned min_pool_size)
2800{
2801 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2802 unsigned int pool_size = 0;
2722 unsigned int front_pad;
2803 unsigned int front_pad, io_front_pad;
2723
2724 if (!pools)
2725 return NULL;
2726
2727 switch (type) {
2728 case DM_TYPE_BIO_BASED:
2729 case DM_TYPE_DAX_BIO_BASED:
2730 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
2731 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2804
2805 if (!pools)
2806 return NULL;
2807
2808 switch (type) {
2809 case DM_TYPE_BIO_BASED:
2810 case DM_TYPE_DAX_BIO_BASED:
2811 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
2812 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2813 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
2814 pools->io_bs = bioset_create(pool_size, io_front_pad, 0);
2815 if (!pools->io_bs)
2816 goto out;
2817 if (integrity && bioset_integrity_create(pools->io_bs, pool_size))
2818 goto out;
2732 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
2733 if (!pools->io_pool)
2734 goto out;
2735 break;
2736 case DM_TYPE_REQUEST_BASED:
2737 case DM_TYPE_MQ_REQUEST_BASED:
2738 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
2739 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);

--- 22 unchanged lines hidden (view full) ---

2762{
2763 if (!pools)
2764 return;
2765
2766 mempool_destroy(pools->io_pool);
2767
2768 if (pools->bs)
2769 bioset_free(pools->bs);
2819 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
2820 if (!pools->io_pool)
2821 goto out;
2822 break;
2823 case DM_TYPE_REQUEST_BASED:
2824 case DM_TYPE_MQ_REQUEST_BASED:
2825 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
2826 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);

--- 22 unchanged lines hidden (view full) ---

2849{
2850 if (!pools)
2851 return;
2852
2853 mempool_destroy(pools->io_pool);
2854
2855 if (pools->bs)
2856 bioset_free(pools->bs);
2857 if (pools->io_bs)
2858 bioset_free(pools->io_bs);
2770
2771 kfree(pools);
2772}
2773
2774struct dm_pr {
2775 u64 old_key;
2776 u64 new_key;
2777 u32 flags;

--- 194 unchanged lines hidden ---
2859
2860 kfree(pools);
2861}
2862
2863struct dm_pr {
2864 u64 old_key;
2865 u64 new_key;
2866 u32 flags;

--- 194 unchanged lines hidden ---