dm.c (ae9da83f6d800fe1f3b23bfbc8f7222ad1c5bb74) dm.c (9e4e5f87ebcadb7ad9aca640bbe1038e1545e9f8)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 737 unchanged lines hidden (view full) ---

746
747 ci->idx++;
748 }
749}
750
751/*
752 * Split the bio into several clones.
753 */
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 737 unchanged lines hidden (view full) ---

746
747 ci->idx++;
748 }
749}
750
751/*
752 * Split the bio into several clones.
753 */
754static void __split_bio(struct mapped_device *md, struct bio *bio)
754static int __split_bio(struct mapped_device *md, struct bio *bio)
755{
756 struct clone_info ci;
757
758 ci.map = dm_get_table(md);
755{
756 struct clone_info ci;
757
758 ci.map = dm_get_table(md);
759 if (!ci.map) {
760 bio_io_error(bio);
761 return;
762 }
759 if (unlikely(!ci.map))
760 return -EIO;
763
764 ci.md = md;
765 ci.bio = bio;
766 ci.io = alloc_io(md);
767 ci.io->error = 0;
768 atomic_set(&ci.io->io_count, 1);
769 ci.io->bio = bio;
770 ci.io->md = md;
771 ci.sector = bio->bi_sector;
772 ci.sector_count = bio_sectors(bio);
773 ci.idx = bio->bi_idx;
774
775 start_io_acct(ci.io);
776 while (ci.sector_count)
777 __clone_and_map(&ci);
778
779 /* drop the extra reference count */
780 dec_pending(ci.io, 0);
781 dm_table_put(ci.map);
761
762 ci.md = md;
763 ci.bio = bio;
764 ci.io = alloc_io(md);
765 ci.io->error = 0;
766 atomic_set(&ci.io->io_count, 1);
767 ci.io->bio = bio;
768 ci.io->md = md;
769 ci.sector = bio->bi_sector;
770 ci.sector_count = bio_sectors(bio);
771 ci.idx = bio->bi_idx;
772
773 start_io_acct(ci.io);
774 while (ci.sector_count)
775 __clone_and_map(&ci);
776
777 /* drop the extra reference count */
778 dec_pending(ci.io, 0);
779 dm_table_put(ci.map);
780
781 return 0;
782}
783/*-----------------------------------------------------------------
784 * CRUD END
785 *---------------------------------------------------------------*/
786
787/*
788 * The request function that just remaps the bio built up by
789 * dm_merge_bvec.
790 */
791static int dm_request(struct request_queue *q, struct bio *bio)
792{
782}
783/*-----------------------------------------------------------------
784 * CRUD END
785 *---------------------------------------------------------------*/
786
787/*
788 * The request function that just remaps the bio built up by
789 * dm_merge_bvec.
790 */
791static int dm_request(struct request_queue *q, struct bio *bio)
792{
793 int r;
793 int r = -EIO;
794 int rw = bio_data_dir(bio);
795 struct mapped_device *md = q->queuedata;
796
797 /*
798 * There is no use in forwarding any barrier request since we can't
799 * guarantee it is (or can be) handled by the targets correctly.
800 */
801 if (unlikely(bio_barrier(bio))) {

--- 8 unchanged lines hidden (view full) ---

810
811 /*
812 * If we're suspended we have to queue
813 * this io for later.
814 */
815 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
816 up_read(&md->io_lock);
817
794 int rw = bio_data_dir(bio);
795 struct mapped_device *md = q->queuedata;
796
797 /*
798 * There is no use in forwarding any barrier request since we can't
799 * guarantee it is (or can be) handled by the targets correctly.
800 */
801 if (unlikely(bio_barrier(bio))) {

--- 8 unchanged lines hidden (view full) ---

810
811 /*
812 * If we're suspended we have to queue
813 * this io for later.
814 */
815 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
816 up_read(&md->io_lock);
817
818 if (bio_rw(bio) == READA) {
819 bio_io_error(bio);
820 return 0;
821 }
818 if (bio_rw(bio) != READA)
819 r = queue_io(md, bio);
822
820
823 r = queue_io(md, bio);
824 if (r < 0) {
825 bio_io_error(bio);
826 return 0;
821 if (r <= 0)
822 goto out_req;
827
823
828 } else if (r == 0)
829 return 0; /* deferred successfully */
830
831 /*
832 * We're in a while loop, because someone could suspend
833 * before we get to the following read lock.
834 */
835 down_read(&md->io_lock);
836 }
837
824 /*
825 * We're in a while loop, because someone could suspend
826 * before we get to the following read lock.
827 */
828 down_read(&md->io_lock);
829 }
830
838 __split_bio(md, bio);
831 r = __split_bio(md, bio);
839 up_read(&md->io_lock);
832 up_read(&md->io_lock);
833
834out_req:
835 if (r < 0)
836 bio_io_error(bio);
837
840 return 0;
841}
842
843static void dm_unplug_all(struct request_queue *q)
844{
845 struct mapped_device *md = q->queuedata;
846 struct dm_table *map = dm_get_table(md);
847

--- 382 unchanged lines hidden (view full) ---

1230 */
1231static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
1232{
1233 struct bio *n;
1234
1235 while (c) {
1236 n = c->bi_next;
1237 c->bi_next = NULL;
838 return 0;
839}
840
841static void dm_unplug_all(struct request_queue *q)
842{
843 struct mapped_device *md = q->queuedata;
844 struct dm_table *map = dm_get_table(md);
845

--- 382 unchanged lines hidden (view full) ---

1228 */
1229static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
1230{
1231 struct bio *n;
1232
1233 while (c) {
1234 n = c->bi_next;
1235 c->bi_next = NULL;
1238 __split_bio(md, c);
1236 if (__split_bio(md, c))
1237 bio_io_error(c);
1239 c = n;
1240 }
1241}
1242
1243/*
1244 * Swap in a new table (destroying old one).
1245 */
1246int dm_swap_table(struct mapped_device *md, struct dm_table *table)

--- 306 unchanged lines hidden ---
1238 c = n;
1239 }
1240}
1241
1242/*
1243 * Swap in a new table (destroying old one).
1244 */
1245int dm_swap_table(struct mapped_device *md, struct dm_table *table)

--- 306 unchanged lines hidden ---