dm.c (aa6ce87a768226802f9a231b3909fe81c503852c) | dm.c (a666e5c05e7c4aaabb2c5d58117b0946803d03d2) |
---|---|
1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm-core.h" --- 139 unchanged lines hidden (view full) --- 148#define DMF_NOFLUSH_SUSPENDING 5 149#define DMF_DEFERRED_REMOVE 6 150#define DMF_SUSPENDED_INTERNALLY 7 151#define DMF_POST_SUSPENDING 8 152 153#define DM_NUMA_NODE NUMA_NO_NODE 154static int dm_numa_node = DM_NUMA_NODE; 155 | 1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm-core.h" --- 139 unchanged lines hidden (view full) --- 148#define DMF_NOFLUSH_SUSPENDING 5 149#define DMF_DEFERRED_REMOVE 6 150#define DMF_SUSPENDED_INTERNALLY 7 151#define DMF_POST_SUSPENDING 8 152 153#define DM_NUMA_NODE NUMA_NO_NODE 154static int dm_numa_node = DM_NUMA_NODE; 155 |
156#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 157static int swap_bios = DEFAULT_SWAP_BIOS; 158static int get_swap_bios(void) 159{ 160 int latch = READ_ONCE(swap_bios); 161 if (unlikely(latch <= 0)) 162 latch = DEFAULT_SWAP_BIOS; 163 return latch; 164} 165 |
|
156/* 157 * For mempools pre-allocation at the table loading time. 158 */ 159struct dm_md_mempools { 160 struct bio_set bs; 161 struct bio_set io_bs; 162}; 163 --- 805 unchanged lines hidden (view full) --- 969void disable_write_zeroes(struct mapped_device *md) 970{ 971 struct queue_limits *limits = dm_get_queue_limits(md); 972 973 /* device doesn't really support WRITE ZEROES, disable it */ 974 limits->max_write_zeroes_sectors = 0; 975} 976 | 166/* 167 * For mempools pre-allocation at the table loading time. 168 */ 169struct dm_md_mempools { 170 struct bio_set bs; 171 struct bio_set io_bs; 172}; 173 --- 805 unchanged lines hidden (view full) --- 979void disable_write_zeroes(struct mapped_device *md) 980{ 981 struct queue_limits *limits = dm_get_queue_limits(md); 982 983 /* device doesn't really support WRITE ZEROES, disable it */ 984 limits->max_write_zeroes_sectors = 0; 985} 986 |
987static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 988{ 989 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 990} 991 |
|
977static void clone_endio(struct bio *bio) 978{ 979 blk_status_t error = bio->bi_status; 980 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 981 struct dm_io *io = tio->io; 982 struct mapped_device *md = tio->io->md; 983 dm_endio_fn endio = tio->ti->type->end_io; 984 struct bio *orig_bio = io->orig_bio; --- 35 unchanged lines hidden (view full) --- 1020 /* The target will handle the io */ 1021 return; 1022 default: 1023 DMWARN("unimplemented target endio return value: %d", r); 1024 BUG(); 1025 } 1026 } 1027 | 992static void clone_endio(struct bio *bio) 993{ 994 blk_status_t error = bio->bi_status; 995 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 996 struct dm_io *io = tio->io; 997 struct mapped_device *md = tio->io->md; 998 dm_endio_fn endio = tio->ti->type->end_io; 999 struct bio *orig_bio = io->orig_bio; --- 35 unchanged lines hidden (view full) --- 1035 /* The target will handle the io */ 1036 return; 1037 default: 1038 DMWARN("unimplemented target endio return value: %d", r); 1039 BUG(); 1040 } 1041 } 1042 |
1043 if (unlikely(swap_bios_limit(tio->ti, bio))) { 1044 struct mapped_device *md = io->md; 1045 up(&md->swap_bios_semaphore); 1046 } 1047 |
|
1028 free_tio(tio); 1029 dec_pending(io, error); 1030} 1031 1032/* 1033 * Return maximum size of I/O possible at the supplied sector up to the current 1034 * target boundary. 1035 */ --- 217 unchanged lines hidden (view full) --- 1253 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1254 BUG_ON(bi_size > *tio->len_ptr); 1255 BUG_ON(n_sectors > bi_size); 1256 *tio->len_ptr -= bi_size - n_sectors; 1257 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1258} 1259EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1260 | 1048 free_tio(tio); 1049 dec_pending(io, error); 1050} 1051 1052/* 1053 * Return maximum size of I/O possible at the supplied sector up to the current 1054 * target boundary. 1055 */ --- 217 unchanged lines hidden (view full) --- 1273 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1274 BUG_ON(bi_size > *tio->len_ptr); 1275 BUG_ON(n_sectors > bi_size); 1276 *tio->len_ptr -= bi_size - n_sectors; 1277 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1278} 1279EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1280 |
1281static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1282{ 1283 mutex_lock(&md->swap_bios_lock); 1284 while (latch < md->swap_bios) { 1285 cond_resched(); 1286 down(&md->swap_bios_semaphore); 1287 md->swap_bios--; 1288 } 1289 while (latch > md->swap_bios) { 1290 cond_resched(); 1291 up(&md->swap_bios_semaphore); 1292 md->swap_bios++; 1293 } 1294 mutex_unlock(&md->swap_bios_lock); 1295} 1296 |
|
1261static blk_qc_t __map_bio(struct dm_target_io *tio) 1262{ 1263 int r; 1264 sector_t sector; 1265 struct bio *clone = &tio->clone; 1266 struct dm_io *io = tio->io; 1267 struct dm_target *ti = tio->ti; 1268 blk_qc_t ret = BLK_QC_T_NONE; 1269 1270 clone->bi_end_io = clone_endio; 1271 1272 /* 1273 * Map the clone. If r == 0 we don't need to do 1274 * anything, the target has assumed ownership of 1275 * this io. 1276 */ 1277 atomic_inc(&io->io_count); 1278 sector = clone->bi_iter.bi_sector; 1279 | 1297static blk_qc_t __map_bio(struct dm_target_io *tio) 1298{ 1299 int r; 1300 sector_t sector; 1301 struct bio *clone = &tio->clone; 1302 struct dm_io *io = tio->io; 1303 struct dm_target *ti = tio->ti; 1304 blk_qc_t ret = BLK_QC_T_NONE; 1305 1306 clone->bi_end_io = clone_endio; 1307 1308 /* 1309 * Map the clone. If r == 0 we don't need to do 1310 * anything, the target has assumed ownership of 1311 * this io. 1312 */ 1313 atomic_inc(&io->io_count); 1314 sector = clone->bi_iter.bi_sector; 1315 |
1316 if (unlikely(swap_bios_limit(ti, clone))) { 1317 struct mapped_device *md = io->md; 1318 int latch = get_swap_bios(); 1319 if (unlikely(latch != md->swap_bios)) 1320 __set_swap_bios_limit(md, latch); 1321 down(&md->swap_bios_semaphore); 1322 } 1323 |
|
1280 r = ti->type->map(ti, clone); 1281 switch (r) { 1282 case DM_MAPIO_SUBMITTED: 1283 break; 1284 case DM_MAPIO_REMAPPED: 1285 /* the bio has been remapped so dispatch it */ 1286 trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector); 1287 ret = submit_bio_noacct(clone); 1288 break; 1289 case DM_MAPIO_KILL: | 1324 r = ti->type->map(ti, clone); 1325 switch (r) { 1326 case DM_MAPIO_SUBMITTED: 1327 break; 1328 case DM_MAPIO_REMAPPED: 1329 /* the bio has been remapped so dispatch it */ 1330 trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector); 1331 ret = submit_bio_noacct(clone); 1332 break; 1333 case DM_MAPIO_KILL: |
1334 if (unlikely(swap_bios_limit(ti, clone))) { 1335 struct mapped_device *md = io->md; 1336 up(&md->swap_bios_semaphore); 1337 } |
|
1290 free_tio(tio); 1291 dec_pending(io, BLK_STS_IOERR); 1292 break; 1293 case DM_MAPIO_REQUEUE: | 1338 free_tio(tio); 1339 dec_pending(io, BLK_STS_IOERR); 1340 break; 1341 case DM_MAPIO_REQUEUE: |
1342 if (unlikely(swap_bios_limit(ti, clone))) { 1343 struct mapped_device *md = io->md; 1344 up(&md->swap_bios_semaphore); 1345 } |
|
1294 free_tio(tio); 1295 dec_pending(io, BLK_STS_DM_REQUEUE); 1296 break; 1297 default: 1298 DMWARN("unimplemented target map return value: %d", r); 1299 BUG(); 1300 } 1301 --- 460 unchanged lines hidden (view full) --- 1762 blk_cleanup_queue(md->queue); 1763 } 1764 1765 cleanup_srcu_struct(&md->io_barrier); 1766 1767 mutex_destroy(&md->suspend_lock); 1768 mutex_destroy(&md->type_lock); 1769 mutex_destroy(&md->table_devices_lock); | 1346 free_tio(tio); 1347 dec_pending(io, BLK_STS_DM_REQUEUE); 1348 break; 1349 default: 1350 DMWARN("unimplemented target map return value: %d", r); 1351 BUG(); 1352 } 1353 --- 460 unchanged lines hidden (view full) --- 1814 blk_cleanup_queue(md->queue); 1815 } 1816 1817 cleanup_srcu_struct(&md->io_barrier); 1818 1819 mutex_destroy(&md->suspend_lock); 1820 mutex_destroy(&md->type_lock); 1821 mutex_destroy(&md->table_devices_lock); |
1822 mutex_destroy(&md->swap_bios_lock); |
|
1770 1771 dm_mq_cleanup_mapped_device(md); 1772} 1773 1774/* 1775 * Allocate and initialise a blank device with a given minor. 1776 */ 1777static struct mapped_device *alloc_dev(int minor) --- 51 unchanged lines hidden (view full) --- 1829 if (!md->disk) 1830 goto bad; 1831 1832 init_waitqueue_head(&md->wait); 1833 INIT_WORK(&md->work, dm_wq_work); 1834 init_waitqueue_head(&md->eventq); 1835 init_completion(&md->kobj_holder.completion); 1836 | 1823 1824 dm_mq_cleanup_mapped_device(md); 1825} 1826 1827/* 1828 * Allocate and initialise a blank device with a given minor. 1829 */ 1830static struct mapped_device *alloc_dev(int minor) --- 51 unchanged lines hidden (view full) --- 1882 if (!md->disk) 1883 goto bad; 1884 1885 init_waitqueue_head(&md->wait); 1886 INIT_WORK(&md->work, dm_wq_work); 1887 init_waitqueue_head(&md->eventq); 1888 init_completion(&md->kobj_holder.completion); 1889 |
1890 md->swap_bios = get_swap_bios(); 1891 sema_init(&md->swap_bios_semaphore, md->swap_bios); 1892 mutex_init(&md->swap_bios_lock); 1893 |
|
1837 md->disk->major = _major; 1838 md->disk->first_minor = minor; 1839 md->disk->fops = &dm_blk_dops; 1840 md->disk->queue = md->queue; 1841 md->disk->private_data = md; 1842 sprintf(md->disk->disk_name, "dm-%d", minor); 1843 1844 if (IS_ENABLED(CONFIG_DAX_DRIVER)) { --- 1267 unchanged lines hidden (view full) --- 3112MODULE_PARM_DESC(major, "The major number of the device mapper"); 3113 3114module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3115MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3116 3117module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3118MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3119 | 1894 md->disk->major = _major; 1895 md->disk->first_minor = minor; 1896 md->disk->fops = &dm_blk_dops; 1897 md->disk->queue = md->queue; 1898 md->disk->private_data = md; 1899 sprintf(md->disk->disk_name, "dm-%d", minor); 1900 1901 if (IS_ENABLED(CONFIG_DAX_DRIVER)) { --- 1267 unchanged lines hidden (view full) --- 3169MODULE_PARM_DESC(major, "The major number of the device mapper"); 3170 3171module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3172MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3173 3174module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3175MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3176 |
3177module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3178MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3179 |
|
3120MODULE_DESCRIPTION(DM_NAME " driver"); 3121MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3122MODULE_LICENSE("GPL"); | 3180MODULE_DESCRIPTION(DM_NAME " driver"); 3181MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3182MODULE_LICENSE("GPL"); |