dm.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) dm.c (70246286e94c335b5bea0cbc68a17a96dd620281)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 660 unchanged lines hidden (view full) ---

669 return mempool_alloc(md->io_pool, GFP_NOIO);
670}
671
672static void free_io(struct mapped_device *md, struct dm_io *io)
673{
674 mempool_free(io, md->io_pool);
675}
676
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 660 unchanged lines hidden (view full) ---

669 return mempool_alloc(md->io_pool, GFP_NOIO);
670}
671
672static void free_io(struct mapped_device *md, struct dm_io *io)
673{
674 mempool_free(io, md->io_pool);
675}
676
677static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
677static void free_tio(struct dm_target_io *tio)
678{
679 bio_put(&tio->clone);
680}
681
682static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
683 gfp_t gfp_mask)
684{
685 return mempool_alloc(md->io_pool, gfp_mask);

--- 32 unchanged lines hidden (view full) ---

718
719 cpu = part_stat_lock();
720 part_round_stats(cpu, &dm_disk(md)->part0);
721 part_stat_unlock();
722 atomic_set(&dm_disk(md)->part0.in_flight[rw],
723 atomic_inc_return(&md->pending[rw]));
724
725 if (unlikely(dm_stats_used(&md->stats)))
678{
679 bio_put(&tio->clone);
680}
681
682static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
683 gfp_t gfp_mask)
684{
685 return mempool_alloc(md->io_pool, gfp_mask);

--- 32 unchanged lines hidden (view full) ---

718
719 cpu = part_stat_lock();
720 part_round_stats(cpu, &dm_disk(md)->part0);
721 part_stat_unlock();
722 atomic_set(&dm_disk(md)->part0.in_flight[rw],
723 atomic_inc_return(&md->pending[rw]));
724
725 if (unlikely(dm_stats_used(&md->stats)))
726 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
727 bio_sectors(bio), false, 0, &io->stats_aux);
726 dm_stats_account_io(&md->stats, bio_data_dir(bio),
727 bio->bi_iter.bi_sector, bio_sectors(bio),
728 false, 0, &io->stats_aux);
728}
729
730static void end_io_acct(struct dm_io *io)
731{
732 struct mapped_device *md = io->md;
733 struct bio *bio = io->bio;
734 unsigned long duration = jiffies - io->start_time;
735 int pending;
736 int rw = bio_data_dir(bio);
737
738 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
739
740 if (unlikely(dm_stats_used(&md->stats)))
729}
730
731static void end_io_acct(struct dm_io *io)
732{
733 struct mapped_device *md = io->md;
734 struct bio *bio = io->bio;
735 unsigned long duration = jiffies - io->start_time;
736 int pending;
737 int rw = bio_data_dir(bio);
738
739 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
740
741 if (unlikely(dm_stats_used(&md->stats)))
741 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
742 bio_sectors(bio), true, duration, &io->stats_aux);
742 dm_stats_account_io(&md->stats, bio_data_dir(bio),
743 bio->bi_iter.bi_sector, bio_sectors(bio),
744 true, duration, &io->stats_aux);
743
744 /*
745 * After this is decremented the bio must not be touched if it is
746 * a flush.
747 */
748 pending = atomic_dec_return(&md->pending[rw]);
749 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
750 pending += atomic_read(&md->pending[rw^0x1]);

--- 245 unchanged lines hidden (view full) ---

996 io_error = io->error;
997 bio = io->bio;
998 end_io_acct(io);
999 free_io(md, io);
1000
1001 if (io_error == DM_ENDIO_REQUEUE)
1002 return;
1003
745
746 /*
747 * After this is decremented the bio must not be touched if it is
748 * a flush.
749 */
750 pending = atomic_dec_return(&md->pending[rw]);
751 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
752 pending += atomic_read(&md->pending[rw^0x1]);

--- 245 unchanged lines hidden (view full) ---

998 io_error = io->error;
999 bio = io->bio;
1000 end_io_acct(io);
1001 free_io(md, io);
1002
1003 if (io_error == DM_ENDIO_REQUEUE)
1004 return;
1005
1004 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
1006 if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
1005 /*
1006 * Preflush done for flush with data, reissue
1007 /*
1008 * Preflush done for flush with data, reissue
1007 * without REQ_FLUSH.
1009 * without REQ_PREFLUSH.
1008 */
1010 */
1009 bio->bi_rw &= ~REQ_FLUSH;
1011 bio->bi_rw &= ~REQ_PREFLUSH;
1010 queue_io(md, bio);
1011 } else {
1012 /* done with normal IO or empty flush */
1013 trace_block_bio_complete(md->queue, bio, io_error);
1014 bio->bi_error = io_error;
1015 bio_endio(bio);
1016 }
1017 }

--- 28 unchanged lines hidden (view full) ---

1046 /* The target will handle the io */
1047 return;
1048 else if (r) {
1049 DMWARN("unimplemented target endio return value: %d", r);
1050 BUG();
1051 }
1052 }
1053
1012 queue_io(md, bio);
1013 } else {
1014 /* done with normal IO or empty flush */
1015 trace_block_bio_complete(md->queue, bio, io_error);
1016 bio->bi_error = io_error;
1017 bio_endio(bio);
1018 }
1019 }

--- 28 unchanged lines hidden (view full) ---

1048 /* The target will handle the io */
1049 return;
1050 else if (r) {
1051 DMWARN("unimplemented target endio return value: %d", r);
1052 BUG();
1053 }
1054 }
1055
1054 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
1056 if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) &&
1055 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
1056 disable_write_same(md);
1057
1057 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
1058 disable_write_same(md);
1059
1058 free_tio(md, tio);
1060 free_tio(tio);
1059 dec_pending(io, error);
1060}
1061
1062/*
1063 * Partial completion handling for request-based dm
1064 */
1065static void end_clone_bio(struct bio *clone)
1066{

--- 49 unchanged lines hidden (view full) ---

1116 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
1117}
1118
1119static void rq_end_stats(struct mapped_device *md, struct request *orig)
1120{
1121 if (unlikely(dm_stats_used(&md->stats))) {
1122 struct dm_rq_target_io *tio = tio_from_request(orig);
1123 tio->duration_jiffies = jiffies - tio->duration_jiffies;
1061 dec_pending(io, error);
1062}
1063
1064/*
1065 * Partial completion handling for request-based dm
1066 */
1067static void end_clone_bio(struct bio *clone)
1068{

--- 49 unchanged lines hidden (view full) ---

1118 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
1119}
1120
1121static void rq_end_stats(struct mapped_device *md, struct request *orig)
1122{
1123 if (unlikely(dm_stats_used(&md->stats))) {
1124 struct dm_rq_target_io *tio = tio_from_request(orig);
1125 tio->duration_jiffies = jiffies - tio->duration_jiffies;
1124 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
1125 tio->n_sectors, true, tio->duration_jiffies,
1126 &tio->stats_aux);
1126 dm_stats_account_io(&md->stats, rq_data_dir(orig),
1127 blk_rq_pos(orig), tio->n_sectors, true,
1128 tio->duration_jiffies, &tio->stats_aux);
1127 }
1128}
1129
1130/*
1131 * Don't touch any member of the md after calling this function because
1132 * the md may be freed in dm_put() at the end of this function.
1133 * Or do dm_get() before calling this function and dm_put() later.
1134 */

--- 180 unchanged lines hidden (view full) ---

1315
1316 if (tio->ti) {
1317 rq_end_io = tio->ti->type->rq_end_io;
1318
1319 if (mapped && rq_end_io)
1320 r = rq_end_io(tio->ti, clone, error, &tio->info);
1321 }
1322
1129 }
1130}
1131
1132/*
1133 * Don't touch any member of the md after calling this function because
1134 * the md may be freed in dm_put() at the end of this function.
1135 * Or do dm_get() before calling this function and dm_put() later.
1136 */

--- 180 unchanged lines hidden (view full) ---

1317
1318 if (tio->ti) {
1319 rq_end_io = tio->ti->type->rq_end_io;
1320
1321 if (mapped && rq_end_io)
1322 r = rq_end_io(tio->ti, clone, error, &tio->info);
1323 }
1324
1323 if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
1325 if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
1324 !clone->q->limits.max_write_same_sectors))
1325 disable_write_same(tio->md);
1326
1327 if (r <= 0)
1328 /* The target wants to complete the I/O */
1329 dm_end_request(clone, r);
1330 else if (r == DM_ENDIO_INCOMPLETE)
1331 /* The target will handle the I/O */

--- 138 unchanged lines hidden (view full) ---

1470 ti->max_io_len = (uint32_t) len;
1471
1472 return 0;
1473}
1474EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1475
1476/*
1477 * A target may call dm_accept_partial_bio only from the map routine. It is
1326 !clone->q->limits.max_write_same_sectors))
1327 disable_write_same(tio->md);
1328
1329 if (r <= 0)
1330 /* The target wants to complete the I/O */
1331 dm_end_request(clone, r);
1332 else if (r == DM_ENDIO_INCOMPLETE)
1333 /* The target will handle the I/O */

--- 138 unchanged lines hidden (view full) ---

1472 ti->max_io_len = (uint32_t) len;
1473
1474 return 0;
1475}
1476EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1477
1478/*
1479 * A target may call dm_accept_partial_bio only from the map routine. It is
1478 * allowed for all bio types except REQ_FLUSH.
1480 * allowed for all bio types except REQ_PREFLUSH.
1479 *
1480 * dm_accept_partial_bio informs the dm that the target only wants to process
1481 * additional n_sectors sectors of the bio and the rest of the data should be
1482 * sent in a next bio.
1483 *
1484 * A diagram that explains the arithmetics:
1485 * +--------------------+---------------+-------+
1486 * | 1 | 2 | 3 |

--- 13 unchanged lines hidden (view full) ---

1500 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1501 * the partially processed part (the sum of regions 1+2) must be the same for all
1502 * copies of the bio.
1503 */
1504void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1505{
1506 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1507 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1481 *
1482 * dm_accept_partial_bio informs the dm that the target only wants to process
1483 * additional n_sectors sectors of the bio and the rest of the data should be
1484 * sent in a next bio.
1485 *
1486 * A diagram that explains the arithmetics:
1487 * +--------------------+---------------+-------+
1488 * | 1 | 2 | 3 |

--- 13 unchanged lines hidden (view full) ---

1502 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1503 * the partially processed part (the sum of regions 1+2) must be the same for all
1504 * copies of the bio.
1505 */
1506void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1507{
1508 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1509 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1508 BUG_ON(bio->bi_rw & REQ_FLUSH);
1510 BUG_ON(bio->bi_rw & REQ_PREFLUSH);
1509 BUG_ON(bi_size > *tio->len_ptr);
1510 BUG_ON(n_sectors > bi_size);
1511 *tio->len_ptr -= bi_size - n_sectors;
1512 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1513}
1514EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1515
1516static void __map_bio(struct dm_target_io *tio)
1517{
1518 int r;
1519 sector_t sector;
1511 BUG_ON(bi_size > *tio->len_ptr);
1512 BUG_ON(n_sectors > bi_size);
1513 *tio->len_ptr -= bi_size - n_sectors;
1514 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1515}
1516EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1517
1518static void __map_bio(struct dm_target_io *tio)
1519{
1520 int r;
1521 sector_t sector;
1520 struct mapped_device *md;
1521 struct bio *clone = &tio->clone;
1522 struct dm_target *ti = tio->ti;
1523
1524 clone->bi_end_io = clone_endio;
1525
1526 /*
1527 * Map the clone. If r == 0 we don't need to do
1528 * anything, the target has assumed ownership of

--- 6 unchanged lines hidden (view full) ---

1535 /* the bio has been remapped so dispatch it */
1536
1537 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1538 tio->io->bio->bi_bdev->bd_dev, sector);
1539
1540 generic_make_request(clone);
1541 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1542 /* error the io and bail out, or requeue it if needed */
1522 struct bio *clone = &tio->clone;
1523 struct dm_target *ti = tio->ti;
1524
1525 clone->bi_end_io = clone_endio;
1526
1527 /*
1528 * Map the clone. If r == 0 we don't need to do
1529 * anything, the target has assumed ownership of

--- 6 unchanged lines hidden (view full) ---

1536 /* the bio has been remapped so dispatch it */
1537
1538 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1539 tio->io->bio->bi_bdev->bd_dev, sector);
1540
1541 generic_make_request(clone);
1542 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1543 /* error the io and bail out, or requeue it if needed */
1543 md = tio->io->md;
1544 dec_pending(tio->io, r);
1544 dec_pending(tio->io, r);
1545 free_tio(md, tio);
1545 free_tio(tio);
1546 } else if (r != DM_MAPIO_SUBMITTED) {
1547 DMWARN("unimplemented target map return value: %d", r);
1548 BUG();
1549 }
1550}
1551
1552struct clone_info {
1553 struct mapped_device *md;

--- 104 unchanged lines hidden (view full) ---

1658 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1659 num_target_bios = ti->num_write_bios(ti, bio);
1660
1661 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1662 tio = alloc_tio(ci, ti, target_bio_nr);
1663 tio->len_ptr = len;
1664 r = clone_bio(tio, bio, sector, *len);
1665 if (r < 0) {
1546 } else if (r != DM_MAPIO_SUBMITTED) {
1547 DMWARN("unimplemented target map return value: %d", r);
1548 BUG();
1549 }
1550}
1551
1552struct clone_info {
1553 struct mapped_device *md;

--- 104 unchanged lines hidden (view full) ---

1658 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1659 num_target_bios = ti->num_write_bios(ti, bio);
1660
1661 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1662 tio = alloc_tio(ci, ti, target_bio_nr);
1663 tio->len_ptr = len;
1664 r = clone_bio(tio, bio, sector, *len);
1665 if (r < 0) {
1666 free_tio(ci->md, tio);
1666 free_tio(tio);
1667 break;
1668 }
1669 __map_bio(tio);
1670 }
1671
1672 return r;
1673}
1674

--- 68 unchanged lines hidden (view full) ---

1743 */
1744static int __split_and_process_non_flush(struct clone_info *ci)
1745{
1746 struct bio *bio = ci->bio;
1747 struct dm_target *ti;
1748 unsigned len;
1749 int r;
1750
1667 break;
1668 }
1669 __map_bio(tio);
1670 }
1671
1672 return r;
1673}
1674

--- 68 unchanged lines hidden (view full) ---

1743 */
1744static int __split_and_process_non_flush(struct clone_info *ci)
1745{
1746 struct bio *bio = ci->bio;
1747 struct dm_target *ti;
1748 unsigned len;
1749 int r;
1750
1751 if (unlikely(bio->bi_rw & REQ_DISCARD))
1751 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1752 return __send_discard(ci);
1752 return __send_discard(ci);
1753 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1753 else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1754 return __send_write_same(ci);
1755
1756 ti = dm_table_find_target(ci->map, ci->sector);
1757 if (!dm_target_is_valid(ti))
1758 return -EIO;
1759
1760 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1761

--- 28 unchanged lines hidden (view full) ---

1790 atomic_set(&ci.io->io_count, 1);
1791 ci.io->bio = bio;
1792 ci.io->md = md;
1793 spin_lock_init(&ci.io->endio_lock);
1794 ci.sector = bio->bi_iter.bi_sector;
1795
1796 start_io_acct(ci.io);
1797
1754 return __send_write_same(ci);
1755
1756 ti = dm_table_find_target(ci->map, ci->sector);
1757 if (!dm_target_is_valid(ti))
1758 return -EIO;
1759
1760 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1761

--- 28 unchanged lines hidden (view full) ---

1790 atomic_set(&ci.io->io_count, 1);
1791 ci.io->bio = bio;
1792 ci.io->md = md;
1793 spin_lock_init(&ci.io->endio_lock);
1794 ci.sector = bio->bi_iter.bi_sector;
1795
1796 start_io_acct(ci.io);
1797
1798 if (bio->bi_rw & REQ_FLUSH) {
1798 if (bio->bi_rw & REQ_PREFLUSH) {
1799 ci.bio = &ci.md->flush_bio;
1800 ci.sector_count = 0;
1801 error = __send_empty_flush(&ci);
1802 /* dec_pending submits any data associated with flush */
1803 } else {
1804 ci.bio = bio;
1805 ci.sector_count = bio_sectors(bio);
1806 while (ci.sector_count && !error)

--- 21 unchanged lines hidden (view full) ---

1828 map = dm_get_live_table(md, &srcu_idx);
1829
1830 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
1831
1832 /* if we're suspended, we have to queue this io for later */
1833 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1834 dm_put_live_table(md, srcu_idx);
1835
1799 ci.bio = &ci.md->flush_bio;
1800 ci.sector_count = 0;
1801 error = __send_empty_flush(&ci);
1802 /* dec_pending submits any data associated with flush */
1803 } else {
1804 ci.bio = bio;
1805 ci.sector_count = bio_sectors(bio);
1806 while (ci.sector_count && !error)

--- 21 unchanged lines hidden (view full) ---

1828 map = dm_get_live_table(md, &srcu_idx);
1829
1830 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
1831
1832 /* if we're suspended, we have to queue this io for later */
1833 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1834 dm_put_live_table(md, srcu_idx);
1835
1836 if (bio_rw(bio) != READA)
1836 if (!(bio->bi_rw & REQ_RAHEAD))
1837 queue_io(md, bio);
1838 else
1839 bio_io_error(bio);
1840 return BLK_QC_T_NONE;
1841 }
1842
1843 __split_and_process_bio(md, map, bio);
1844 dm_put_live_table(md, srcu_idx);

--- 234 unchanged lines hidden (view full) ---

2079 md->last_rq_rw = rq_data_dir(orig);
2080 md->last_rq_start_time = ktime_get();
2081 }
2082
2083 if (unlikely(dm_stats_used(&md->stats))) {
2084 struct dm_rq_target_io *tio = tio_from_request(orig);
2085 tio->duration_jiffies = jiffies;
2086 tio->n_sectors = blk_rq_sectors(orig);
1837 queue_io(md, bio);
1838 else
1839 bio_io_error(bio);
1840 return BLK_QC_T_NONE;
1841 }
1842
1843 __split_and_process_bio(md, map, bio);
1844 dm_put_live_table(md, srcu_idx);

--- 234 unchanged lines hidden (view full) ---

2079 md->last_rq_rw = rq_data_dir(orig);
2080 md->last_rq_start_time = ktime_get();
2081 }
2082
2083 if (unlikely(dm_stats_used(&md->stats))) {
2084 struct dm_rq_target_io *tio = tio_from_request(orig);
2085 tio->duration_jiffies = jiffies;
2086 tio->n_sectors = blk_rq_sectors(orig);
2087 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
2088 tio->n_sectors, false, 0, &tio->stats_aux);
2087 dm_stats_account_io(&md->stats, rq_data_dir(orig),
2088 blk_rq_pos(orig), tio->n_sectors, false, 0,
2089 &tio->stats_aux);
2089 }
2090
2091 /*
2092 * Hold the md reference here for the in-flight I/O.
2093 * We can't rely on the reference count by device opener,
2094 * because the device may be closed during the request completion
2095 * when all bios are completed.
2096 * See the comment in rq_completed() too.

--- 68 unchanged lines hidden (view full) ---

2165 */
2166 while (!blk_queue_stopped(q)) {
2167 rq = blk_peek_request(q);
2168 if (!rq)
2169 return;
2170
2171 /* always use block 0 to find the target for flushes for now */
2172 pos = 0;
2090 }
2091
2092 /*
2093 * Hold the md reference here for the in-flight I/O.
2094 * We can't rely on the reference count by device opener,
2095 * because the device may be closed during the request completion
2096 * when all bios are completed.
2097 * See the comment in rq_completed() too.

--- 68 unchanged lines hidden (view full) ---

2166 */
2167 while (!blk_queue_stopped(q)) {
2168 rq = blk_peek_request(q);
2169 if (!rq)
2170 return;
2171
2172 /* always use block 0 to find the target for flushes for now */
2173 pos = 0;
2173 if (!(rq->cmd_flags & REQ_FLUSH))
2174 if (req_op(rq) != REQ_OP_FLUSH)
2174 pos = blk_rq_pos(rq);
2175
2176 if ((dm_request_peeked_before_merge_deadline(md) &&
2177 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
2178 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
2179 (ti->type->busy && ti->type->busy(ti))) {
2180 blk_delay_queue(q, HZ / 100);
2181 return;

--- 227 unchanged lines hidden (view full) ---

2409 goto bad;
2410
2411 md->bdev = bdget_disk(md->disk, 0);
2412 if (!md->bdev)
2413 goto bad;
2414
2415 bio_init(&md->flush_bio);
2416 md->flush_bio.bi_bdev = md->bdev;
2175 pos = blk_rq_pos(rq);
2176
2177 if ((dm_request_peeked_before_merge_deadline(md) &&
2178 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
2179 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
2180 (ti->type->busy && ti->type->busy(ti))) {
2181 blk_delay_queue(q, HZ / 100);
2182 return;

--- 227 unchanged lines hidden (view full) ---

2410 goto bad;
2411
2412 md->bdev = bdget_disk(md->disk, 0);
2413 if (!md->bdev)
2414 goto bad;
2415
2416 bio_init(&md->flush_bio);
2417 md->flush_bio.bi_bdev = md->bdev;
2417 md->flush_bio.bi_rw = WRITE_FLUSH;
2418 bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
2418
2419 dm_stats_init(&md->stats);
2420
2421 /* Populate the mapping, nobody knows we exist yet */
2422 spin_lock(&_minor_lock);
2423 old_md = idr_replace(&_minor_idr, md, minor);
2424 spin_unlock(&_minor_lock);
2425

--- 1335 unchanged lines hidden ---
2419
2420 dm_stats_init(&md->stats);
2421
2422 /* Populate the mapping, nobody knows we exist yet */
2423 spin_lock(&_minor_lock);
2424 old_md = idr_replace(&_minor_idr, md, minor);
2425 spin_unlock(&_minor_lock);
2426

--- 1335 unchanged lines hidden ---