dm.c (60dd57c7479418e2bc902143eb46a2fdcfeecbbb) dm.c (e7089f65dd51afeda5eb760506b5950d95f9ec29)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 1169 unchanged lines hidden (view full) ---

1178 while (latch > md->swap_bios) {
1179 cond_resched();
1180 up(&md->swap_bios_semaphore);
1181 md->swap_bios++;
1182 }
1183 mutex_unlock(&md->swap_bios_lock);
1184}
1185
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 1169 unchanged lines hidden (view full) ---

1178 while (latch > md->swap_bios) {
1179 cond_resched();
1180 up(&md->swap_bios_semaphore);
1181 md->swap_bios++;
1182 }
1183 mutex_unlock(&md->swap_bios_lock);
1184}
1185
1186static blk_qc_t __map_bio(struct dm_target_io *tio)
1186static void __map_bio(struct dm_target_io *tio)
1187{
1188 int r;
1189 sector_t sector;
1190 struct bio *clone = &tio->clone;
1191 struct dm_io *io = tio->io;
1192 struct dm_target *ti = tio->ti;
1187{
1188 int r;
1189 sector_t sector;
1190 struct bio *clone = &tio->clone;
1191 struct dm_io *io = tio->io;
1192 struct dm_target *ti = tio->ti;
1193 blk_qc_t ret = BLK_QC_T_NONE;
1194
1195 clone->bi_end_io = clone_endio;
1196
1197 /*
1198 * Map the clone. If r == 0 we don't need to do
1199 * anything, the target has assumed ownership of
1200 * this io.
1201 */

--- 19 unchanged lines hidden (view full) ---

1221 r = ti->type->map(ti, clone);
1222
1223 switch (r) {
1224 case DM_MAPIO_SUBMITTED:
1225 break;
1226 case DM_MAPIO_REMAPPED:
1227 /* the bio has been remapped so dispatch it */
1228 trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
1193
1194 clone->bi_end_io = clone_endio;
1195
1196 /*
1197 * Map the clone. If r == 0 we don't need to do
1198 * anything, the target has assumed ownership of
1199 * this io.
1200 */

--- 19 unchanged lines hidden (view full) ---

1220 r = ti->type->map(ti, clone);
1221
1222 switch (r) {
1223 case DM_MAPIO_SUBMITTED:
1224 break;
1225 case DM_MAPIO_REMAPPED:
1226 /* the bio has been remapped so dispatch it */
1227 trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
1229 ret = submit_bio_noacct(clone);
1228 submit_bio_noacct(clone);
1230 break;
1231 case DM_MAPIO_KILL:
1232 if (unlikely(swap_bios_limit(ti, clone))) {
1233 struct mapped_device *md = io->md;
1234 up(&md->swap_bios_semaphore);
1235 }
1236 free_tio(tio);
1237 dm_io_dec_pending(io, BLK_STS_IOERR);

--- 5 unchanged lines hidden (view full) ---

1243 }
1244 free_tio(tio);
1245 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1246 break;
1247 default:
1248 DMWARN("unimplemented target map return value: %d", r);
1249 BUG();
1250 }
1229 break;
1230 case DM_MAPIO_KILL:
1231 if (unlikely(swap_bios_limit(ti, clone))) {
1232 struct mapped_device *md = io->md;
1233 up(&md->swap_bios_semaphore);
1234 }
1235 free_tio(tio);
1236 dm_io_dec_pending(io, BLK_STS_IOERR);

--- 5 unchanged lines hidden (view full) ---

1242 }
1243 free_tio(tio);
1244 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1245 break;
1246 default:
1247 DMWARN("unimplemented target map return value: %d", r);
1248 BUG();
1249 }
1251
1252 return ret;
1253}
1254
1255static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1256{
1257 bio->bi_iter.bi_sector = sector;
1258 bio->bi_iter.bi_size = to_bytes(len);
1259}
1260

--- 70 unchanged lines hidden (view full) ---

1331
1332 while ((bio = bio_list_pop(blist))) {
1333 tio = container_of(bio, struct dm_target_io, clone);
1334 free_tio(tio);
1335 }
1336 }
1337}
1338
1250}
1251
1252static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1253{
1254 bio->bi_iter.bi_sector = sector;
1255 bio->bi_iter.bi_size = to_bytes(len);
1256}
1257

--- 70 unchanged lines hidden (view full) ---

1328
1329 while ((bio = bio_list_pop(blist))) {
1330 tio = container_of(bio, struct dm_target_io, clone);
1331 free_tio(tio);
1332 }
1333 }
1334}
1335
1339static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
1336static void __clone_and_map_simple_bio(struct clone_info *ci,
1340 struct dm_target_io *tio, unsigned *len)
1341{
1342 struct bio *clone = &tio->clone;
1343
1344 tio->len_ptr = len;
1345
1346 __bio_clone_fast(clone, ci->bio);
1347 if (len)
1348 bio_setup_sector(clone, ci->sector, *len);
1337 struct dm_target_io *tio, unsigned *len)
1338{
1339 struct bio *clone = &tio->clone;
1340
1341 tio->len_ptr = len;
1342
1343 __bio_clone_fast(clone, ci->bio);
1344 if (len)
1345 bio_setup_sector(clone, ci->sector, *len);
1349
1350 return __map_bio(tio);
1346 __map_bio(tio);
1351}
1352
1353static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1354 unsigned num_bios, unsigned *len)
1355{
1356 struct bio_list blist = BIO_EMPTY_LIST;
1357 struct bio *bio;
1358 struct dm_target_io *tio;
1359
1360 alloc_multiple_bios(&blist, ci, ti, num_bios);
1361
1362 while ((bio = bio_list_pop(&blist))) {
1363 tio = container_of(bio, struct dm_target_io, clone);
1347}
1348
1349static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1350 unsigned num_bios, unsigned *len)
1351{
1352 struct bio_list blist = BIO_EMPTY_LIST;
1353 struct bio *bio;
1354 struct dm_target_io *tio;
1355
1356 alloc_multiple_bios(&blist, ci, ti, num_bios);
1357
1358 while ((bio = bio_list_pop(&blist))) {
1359 tio = container_of(bio, struct dm_target_io, clone);
1364 (void) __clone_and_map_simple_bio(ci, tio, len);
1360 __clone_and_map_simple_bio(ci, tio, len);
1365 }
1366}
1367
1368static int __send_empty_flush(struct clone_info *ci)
1369{
1370 unsigned target_nr = 0;
1371 struct dm_target *ti;
1372 struct bio flush_bio;

--- 27 unchanged lines hidden (view full) ---

1400
1401 tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1402 tio->len_ptr = len;
1403 r = clone_bio(tio, bio, sector, *len);
1404 if (r < 0) {
1405 free_tio(tio);
1406 return r;
1407 }
1361 }
1362}
1363
1364static int __send_empty_flush(struct clone_info *ci)
1365{
1366 unsigned target_nr = 0;
1367 struct dm_target *ti;
1368 struct bio flush_bio;

--- 27 unchanged lines hidden (view full) ---

1396
1397 tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1398 tio->len_ptr = len;
1399 r = clone_bio(tio, bio, sector, *len);
1400 if (r < 0) {
1401 free_tio(tio);
1402 return r;
1403 }
1408 (void) __map_bio(tio);
1404 __map_bio(tio);
1409
1410 return 0;
1411}
1412
1413static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1414 unsigned num_bios)
1415{
1416 unsigned len;

--- 98 unchanged lines hidden (view full) ---

1515}
1516
1517#define __dm_part_stat_sub(part, field, subnd) \
1518 (part_stat_get(part, field) -= (subnd))
1519
1520/*
1521 * Entry point to split a bio into clones and submit them to the targets.
1522 */
1405
1406 return 0;
1407}
1408
1409static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1410 unsigned num_bios)
1411{
1412 unsigned len;

--- 98 unchanged lines hidden (view full) ---

1511}
1512
1513#define __dm_part_stat_sub(part, field, subnd) \
1514 (part_stat_get(part, field) -= (subnd))
1515
1516/*
1517 * Entry point to split a bio into clones and submit them to the targets.
1518 */
1523static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1519static void __split_and_process_bio(struct mapped_device *md,
1524 struct dm_table *map, struct bio *bio)
1525{
1526 struct clone_info ci;
1520 struct dm_table *map, struct bio *bio)
1521{
1522 struct clone_info ci;
1527 blk_qc_t ret = BLK_QC_T_NONE;
1528 int error = 0;
1529
1530 init_clone_info(&ci, md, map, bio);
1531
1532 if (bio->bi_opf & REQ_PREFLUSH) {
1533 error = __send_empty_flush(&ci);
1534 /* dm_io_dec_pending submits any data associated with flush */
1535 } else if (op_is_zone_mgmt(bio_op(bio))) {

--- 26 unchanged lines hidden (view full) ---

1562 */
1563 part_stat_lock();
1564 __dm_part_stat_sub(dm_disk(md)->part0,
1565 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1566 part_stat_unlock();
1567
1568 bio_chain(b, bio);
1569 trace_block_split(b, bio->bi_iter.bi_sector);
1523 int error = 0;
1524
1525 init_clone_info(&ci, md, map, bio);
1526
1527 if (bio->bi_opf & REQ_PREFLUSH) {
1528 error = __send_empty_flush(&ci);
1529 /* dm_io_dec_pending submits any data associated with flush */
1530 } else if (op_is_zone_mgmt(bio_op(bio))) {

--- 26 unchanged lines hidden (view full) ---

1557 */
1558 part_stat_lock();
1559 __dm_part_stat_sub(dm_disk(md)->part0,
1560 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1561 part_stat_unlock();
1562
1563 bio_chain(b, bio);
1564 trace_block_split(b, bio->bi_iter.bi_sector);
1570 ret = submit_bio_noacct(bio);
1565 submit_bio_noacct(bio);
1571 }
1572 }
1573
1574 /* drop the extra reference count */
1575 dm_io_dec_pending(ci.io, errno_to_blk_status(error));
1566 }
1567 }
1568
1569 /* drop the extra reference count */
1570 dm_io_dec_pending(ci.io, errno_to_blk_status(error));
1576 return ret;
1577}
1578
1571}
1572
1579static blk_qc_t dm_submit_bio(struct bio *bio)
1573static void dm_submit_bio(struct bio *bio)
1580{
1581 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1574{
1575 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1582 blk_qc_t ret = BLK_QC_T_NONE;
1583 int srcu_idx;
1584 struct dm_table *map;
1585
1586 map = dm_get_live_table(md, &srcu_idx);
1587 if (unlikely(!map)) {
1588 DMERR_LIMIT("%s: mapping table unavailable, erroring io",
1589 dm_device_name(md));
1590 bio_io_error(bio);

--- 13 unchanged lines hidden (view full) ---

1604
1605 /*
1606 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
1607 * otherwise associated queue_limits won't be imposed.
1608 */
1609 if (is_abnormal_io(bio))
1610 blk_queue_split(&bio);
1611
1576 int srcu_idx;
1577 struct dm_table *map;
1578
1579 map = dm_get_live_table(md, &srcu_idx);
1580 if (unlikely(!map)) {
1581 DMERR_LIMIT("%s: mapping table unavailable, erroring io",
1582 dm_device_name(md));
1583 bio_io_error(bio);

--- 13 unchanged lines hidden (view full) ---

1597
1598 /*
1599 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
1600 * otherwise associated queue_limits won't be imposed.
1601 */
1602 if (is_abnormal_io(bio))
1603 blk_queue_split(&bio);
1604
1612 ret = __split_and_process_bio(md, map, bio);
1605 __split_and_process_bio(md, map, bio);
1613out:
1614 dm_put_live_table(md, srcu_idx);
1606out:
1607 dm_put_live_table(md, srcu_idx);
1615 return ret;
1616}
1617
1618/*-----------------------------------------------------------------
1619 * An IDR is used to keep track of allocated minor numbers.
1620 *---------------------------------------------------------------*/
1621static void free_minor(int minor)
1622{
1623 spin_lock(&_minor_lock);

--- 457 unchanged lines hidden (view full) ---

2081 if (r) {
2082 DMERR("Cannot calculate initial queue limits");
2083 return r;
2084 }
2085 r = dm_table_set_restrictions(t, md->queue, &limits);
2086 if (r)
2087 return r;
2088
1608}
1609
1610/*-----------------------------------------------------------------
1611 * An IDR is used to keep track of allocated minor numbers.
1612 *---------------------------------------------------------------*/
1613static void free_minor(int minor)
1614{
1615 spin_lock(&_minor_lock);

--- 457 unchanged lines hidden (view full) ---

2073 if (r) {
2074 DMERR("Cannot calculate initial queue limits");
2075 return r;
2076 }
2077 r = dm_table_set_restrictions(t, md->queue, &limits);
2078 if (r)
2079 return r;
2080
2089 add_disk(md->disk);
2081 r = add_disk(md->disk);
2082 if (r)
2083 return r;
2090
2091 r = dm_sysfs_init(md);
2092 if (r) {
2093 del_gendisk(md->disk);
2094 return r;
2095 }
2096 md->type = type;
2097 return 0;

--- 988 unchanged lines hidden ---
2084
2085 r = dm_sysfs_init(md);
2086 if (r) {
2087 del_gendisk(md->disk);
2088 return r;
2089 }
2090 md->type = type;
2091 return 0;

--- 988 unchanged lines hidden ---