dm.c (8bd8ea195f6d135a8d85201116314eb5237ad7e7) dm.c (3c94d83cb352627f221d971b05f163c17527de74)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 632 unchanged lines hidden (view full) ---

641
642static void free_tio(struct dm_target_io *tio)
643{
644 if (tio->inside_dm_io)
645 return;
646 bio_put(&tio->clone);
647}
648
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 632 unchanged lines hidden (view full) ---

641
642static void free_tio(struct dm_target_io *tio)
643{
644 if (tio->inside_dm_io)
645 return;
646 bio_put(&tio->clone);
647}
648
649int md_in_flight(struct mapped_device *md)
649static bool md_in_flight_bios(struct mapped_device *md)
650{
650{
651 return atomic_read(&md->pending[READ]) +
652 atomic_read(&md->pending[WRITE]);
651 int cpu;
652 struct hd_struct *part = &dm_disk(md)->part0;
653 long sum = 0;
654
655 for_each_possible_cpu(cpu) {
656 sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
657 sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
658 }
659
660 return sum != 0;
653}
654
661}
662
663static bool md_in_flight(struct mapped_device *md)
664{
665 if (queue_is_mq(md->queue))
666 return blk_mq_queue_inflight(md->queue);
667 else
668 return md_in_flight_bios(md);
669}
670
655static void start_io_acct(struct dm_io *io)
656{
657 struct mapped_device *md = io->md;
658 struct bio *bio = io->orig_bio;
671static void start_io_acct(struct dm_io *io)
672{
673 struct mapped_device *md = io->md;
674 struct bio *bio = io->orig_bio;
659 int rw = bio_data_dir(bio);
660
661 io->start_time = jiffies;
662
663 generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
664 &dm_disk(md)->part0);
665
675
676 io->start_time = jiffies;
677
678 generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
679 &dm_disk(md)->part0);
680
666 atomic_set(&dm_disk(md)->part0.in_flight[rw],
667 atomic_inc_return(&md->pending[rw]));
668
669 if (unlikely(dm_stats_used(&md->stats)))
670 dm_stats_account_io(&md->stats, bio_data_dir(bio),
671 bio->bi_iter.bi_sector, bio_sectors(bio),
672 false, 0, &io->stats_aux);
673}
674
675static void end_io_acct(struct dm_io *io)
676{
677 struct mapped_device *md = io->md;
678 struct bio *bio = io->orig_bio;
679 unsigned long duration = jiffies - io->start_time;
681 if (unlikely(dm_stats_used(&md->stats)))
682 dm_stats_account_io(&md->stats, bio_data_dir(bio),
683 bio->bi_iter.bi_sector, bio_sectors(bio),
684 false, 0, &io->stats_aux);
685}
686
687static void end_io_acct(struct dm_io *io)
688{
689 struct mapped_device *md = io->md;
690 struct bio *bio = io->orig_bio;
691 unsigned long duration = jiffies - io->start_time;
680 int pending;
681 int rw = bio_data_dir(bio);
682
683 generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
684 io->start_time);
685
686 if (unlikely(dm_stats_used(&md->stats)))
687 dm_stats_account_io(&md->stats, bio_data_dir(bio),
688 bio->bi_iter.bi_sector, bio_sectors(bio),
689 true, duration, &io->stats_aux);
690
692
693 generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
694 io->start_time);
695
696 if (unlikely(dm_stats_used(&md->stats)))
697 dm_stats_account_io(&md->stats, bio_data_dir(bio),
698 bio->bi_iter.bi_sector, bio_sectors(bio),
699 true, duration, &io->stats_aux);
700
691 /*
692 * After this is decremented the bio must not be touched if it is
693 * a flush.
694 */
695 pending = atomic_dec_return(&md->pending[rw]);
696 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
697 pending += atomic_read(&md->pending[rw^0x1]);
698
699 /* nudge anyone waiting on suspend queue */
701 /* nudge anyone waiting on suspend queue */
700 if (!pending)
702 if (unlikely(waitqueue_active(&md->wait)))
701 wake_up(&md->wait);
702}
703
704/*
705 * Add the bio to the list of deferred io.
706 */
707static void queue_io(struct mapped_device *md, struct bio *bio)
708{

--- 703 unchanged lines hidden (view full) ---

1412 }
1413}
1414
1415static int __send_empty_flush(struct clone_info *ci)
1416{
1417 unsigned target_nr = 0;
1418 struct dm_target *ti;
1419
703 wake_up(&md->wait);
704}
705
706/*
707 * Add the bio to the list of deferred io.
708 */
709static void queue_io(struct mapped_device *md, struct bio *bio)
710{

--- 703 unchanged lines hidden (view full) ---

1414 }
1415}
1416
1417static int __send_empty_flush(struct clone_info *ci)
1418{
1419 unsigned target_nr = 0;
1420 struct dm_target *ti;
1421
1422 /*
1423 * Empty flush uses a statically initialized bio, &md->flush_bio, as
1424 * the base for cloning. However, blkg association requires that a
1425 * bdev is associated with a gendisk, which doesn't happen until the
1426 * bdev is opened. So, blkg association is done at issue time of the
1427 * flush rather than when the device is created in alloc_dev().
1428 */
1429 bio_set_dev(ci->bio, ci->io->md->bdev);
1430
1420 BUG_ON(bio_has_data(ci->bio));
1421 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1422 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1423
1431 BUG_ON(bio_has_data(ci->bio));
1432 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1433 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1434
1435 bio_disassociate_blkg(ci->bio);
1436
1424 return 0;
1425}
1426
1427static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1428 sector_t sector, unsigned *len)
1429{
1430 struct bio *bio = ci->bio;
1431 struct dm_target_io *tio;

--- 156 unchanged lines hidden (view full) ---

1588 blk_qc_t ret = BLK_QC_T_NONE;
1589 int error = 0;
1590
1591 if (unlikely(!map)) {
1592 bio_io_error(bio);
1593 return ret;
1594 }
1595
1437 return 0;
1438}
1439
1440static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1441 sector_t sector, unsigned *len)
1442{
1443 struct bio *bio = ci->bio;
1444 struct dm_target_io *tio;

--- 156 unchanged lines hidden (view full) ---

1601 blk_qc_t ret = BLK_QC_T_NONE;
1602 int error = 0;
1603
1604 if (unlikely(!map)) {
1605 bio_io_error(bio);
1606 return ret;
1607 }
1608
1596 blk_queue_split(md->queue, &bio);
1597
1598 init_clone_info(&ci, md, map, bio);
1599
1600 if (bio->bi_opf & REQ_PREFLUSH) {
1601 ci.bio = &ci.io->md->flush_bio;
1602 ci.sector_count = 0;
1603 error = __send_empty_flush(&ci);
1604 /* dec_pending submits any data associated with flush */
1605 } else if (bio_op(bio) == REQ_OP_ZONE_RESET) {

--- 287 unchanged lines hidden (view full) ---

1893 atomic_set(&md->holders, 1);
1894 atomic_set(&md->open_count, 0);
1895 atomic_set(&md->event_nr, 0);
1896 atomic_set(&md->uevent_seq, 0);
1897 INIT_LIST_HEAD(&md->uevent_list);
1898 INIT_LIST_HEAD(&md->table_devices);
1899 spin_lock_init(&md->uevent_lock);
1900
1609 init_clone_info(&ci, md, map, bio);
1610
1611 if (bio->bi_opf & REQ_PREFLUSH) {
1612 ci.bio = &ci.io->md->flush_bio;
1613 ci.sector_count = 0;
1614 error = __send_empty_flush(&ci);
1615 /* dec_pending submits any data associated with flush */
1616 } else if (bio_op(bio) == REQ_OP_ZONE_RESET) {

--- 287 unchanged lines hidden (view full) ---

1904 atomic_set(&md->holders, 1);
1905 atomic_set(&md->open_count, 0);
1906 atomic_set(&md->event_nr, 0);
1907 atomic_set(&md->uevent_seq, 0);
1908 INIT_LIST_HEAD(&md->uevent_list);
1909 INIT_LIST_HEAD(&md->table_devices);
1910 spin_lock_init(&md->uevent_lock);
1911
1901 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL);
1912 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
1902 if (!md->queue)
1903 goto bad;
1904 md->queue->queuedata = md;
1905 md->queue->backing_dev_info->congested_data = md;
1906
1907 md->disk = alloc_disk_node(1, md->numa_node_id);
1908 if (!md->disk)
1909 goto bad;
1910
1913 if (!md->queue)
1914 goto bad;
1915 md->queue->queuedata = md;
1916 md->queue->backing_dev_info->congested_data = md;
1917
1918 md->disk = alloc_disk_node(1, md->numa_node_id);
1919 if (!md->disk)
1920 goto bad;
1921
1911 atomic_set(&md->pending[0], 0);
1912 atomic_set(&md->pending[1], 0);
1913 init_waitqueue_head(&md->wait);
1914 INIT_WORK(&md->work, dm_wq_work);
1915 init_waitqueue_head(&md->eventq);
1916 init_completion(&md->kobj_holder.completion);
1917
1918 md->disk->major = _major;
1919 md->disk->first_minor = minor;
1920 md->disk->fops = &dm_blk_dops;

--- 15 unchanged lines hidden (view full) ---

1936 if (!md->wq)
1937 goto bad;
1938
1939 md->bdev = bdget_disk(md->disk, 0);
1940 if (!md->bdev)
1941 goto bad;
1942
1943 bio_init(&md->flush_bio, NULL, 0);
1922 init_waitqueue_head(&md->wait);
1923 INIT_WORK(&md->work, dm_wq_work);
1924 init_waitqueue_head(&md->eventq);
1925 init_completion(&md->kobj_holder.completion);
1926
1927 md->disk->major = _major;
1928 md->disk->first_minor = minor;
1929 md->disk->fops = &dm_blk_dops;

--- 15 unchanged lines hidden (view full) ---

1945 if (!md->wq)
1946 goto bad;
1947
1948 md->bdev = bdget_disk(md->disk, 0);
1949 if (!md->bdev)
1950 goto bad;
1951
1952 bio_init(&md->flush_bio, NULL, 0);
1944 bio_set_dev(&md->flush_bio, md->bdev);
1945 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1946
1947 dm_stats_init(&md->stats);
1948
1949 /* Populate the mapping, nobody knows we exist yet */
1950 spin_lock(&_minor_lock);
1951 old_md = idr_replace(&_minor_idr, md, minor);
1952 spin_unlock(&_minor_lock);

--- 1225 unchanged lines hidden ---
1953 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1954
1955 dm_stats_init(&md->stats);
1956
1957 /* Populate the mapping, nobody knows we exist yet */
1958 spin_lock(&_minor_lock);
1959 old_md = idr_replace(&_minor_idr, md, minor);
1960 spin_unlock(&_minor_lock);

--- 1225 unchanged lines hidden ---