dm.c (08cd84c81f27d5bd22ba958b7cae6d566c509280) dm.c (cf222b3769c3759488579441ab724ed33a2da5f4)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 596 unchanged lines hidden (view full) ---

605static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
606 sector_t *error_sector)
607{
608 struct mapped_device *md = q->queuedata;
609 struct dm_table *map = dm_get_table(md);
610 int ret = -ENXIO;
611
612 if (map) {
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 596 unchanged lines hidden (view full) ---

605static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
606 sector_t *error_sector)
607{
608 struct mapped_device *md = q->queuedata;
609 struct dm_table *map = dm_get_table(md);
610 int ret = -ENXIO;
611
612 if (map) {
613 ret = dm_table_flush_all(md->map);
613 ret = dm_table_flush_all(map);
614 dm_table_put(map);
615 }
616
617 return ret;
618}
619
620static void dm_unplug_all(request_queue_t *q)
621{

--- 227 unchanged lines hidden (view full) ---

849 if (size == 0)
850 return 0;
851
852 write_lock(&md->map_lock);
853 md->map = t;
854 write_unlock(&md->map_lock);
855
856 dm_table_get(t);
614 dm_table_put(map);
615 }
616
617 return ret;
618}
619
620static void dm_unplug_all(request_queue_t *q)
621{

--- 227 unchanged lines hidden (view full) ---

849 if (size == 0)
850 return 0;
851
852 write_lock(&md->map_lock);
853 md->map = t;
854 write_unlock(&md->map_lock);
855
856 dm_table_get(t);
857 dm_table_event_callback(md->map, event_callback, md);
857 dm_table_event_callback(t, event_callback, md);
858 dm_table_set_restrictions(t, q);
859 return 0;
860}
861
862static void __unbind(struct mapped_device *md)
863{
864 struct dm_table *map = md->map;
865

--- 64 unchanged lines hidden (view full) ---

930 atomic_inc(&md->holders);
931}
932
933void dm_put(struct mapped_device *md)
934{
935 struct dm_table *map = dm_get_table(md);
936
937 if (atomic_dec_and_test(&md->holders)) {
858 dm_table_set_restrictions(t, q);
859 return 0;
860}
861
862static void __unbind(struct mapped_device *md)
863{
864 struct dm_table *map = md->map;
865

--- 64 unchanged lines hidden (view full) ---

930 atomic_inc(&md->holders);
931}
932
933void dm_put(struct mapped_device *md)
934{
935 struct dm_table *map = dm_get_table(md);
936
937 if (atomic_dec_and_test(&md->holders)) {
938 if (!test_bit(DMF_SUSPENDED, &md->flags) && map) {
938 if (!dm_suspended(md)) {
939 dm_table_presuspend_targets(map);
940 dm_table_postsuspend_targets(map);
941 }
942 __unbind(md);
943 free_dev(md);
944 }
945
946 dm_table_put(map);

--- 19 unchanged lines hidden (view full) ---

966 */
967int dm_swap_table(struct mapped_device *md, struct dm_table *table)
968{
969 int r = -EINVAL;
970
971 down_write(&md->lock);
972
973 /* device must be suspended */
939 dm_table_presuspend_targets(map);
940 dm_table_postsuspend_targets(map);
941 }
942 __unbind(md);
943 free_dev(md);
944 }
945
946 dm_table_put(map);

--- 19 unchanged lines hidden (view full) ---

966 */
967int dm_swap_table(struct mapped_device *md, struct dm_table *table)
968{
969 int r = -EINVAL;
970
971 down_write(&md->lock);
972
973 /* device must be suspended */
974 if (!test_bit(DMF_SUSPENDED, &md->flags))
974 if (!dm_suspended(md))
975 goto out;
976
977 __unbind(md);
978 r = __bind(md, table);
979
980out:
981 up_write(&md->lock);
982 return r;
983}
984
985/*
986 * Functions to lock and unlock any filesystem running on the
987 * device.
988 */
989static int __lock_fs(struct mapped_device *md)
990{
975 goto out;
976
977 __unbind(md);
978 r = __bind(md, table);
979
980out:
981 up_write(&md->lock);
982 return r;
983}
984
985/*
986 * Functions to lock and unlock any filesystem running on the
987 * device.
988 */
989static int __lock_fs(struct mapped_device *md)
990{
991 int error = -ENOMEM;
991 int r = -ENOMEM;
992
993 if (test_and_set_bit(DMF_FS_LOCKED, &md->flags))
994 return 0;
995
996 md->frozen_bdev = bdget_disk(md->disk, 0);
997 if (!md->frozen_bdev) {
998 DMWARN("bdget failed in __lock_fs");
999 goto out;
1000 }
1001
1002 WARN_ON(md->frozen_sb);
1003
1004 md->frozen_sb = freeze_bdev(md->frozen_bdev);
1005 if (IS_ERR(md->frozen_sb)) {
992
993 if (test_and_set_bit(DMF_FS_LOCKED, &md->flags))
994 return 0;
995
996 md->frozen_bdev = bdget_disk(md->disk, 0);
997 if (!md->frozen_bdev) {
998 DMWARN("bdget failed in __lock_fs");
999 goto out;
1000 }
1001
1002 WARN_ON(md->frozen_sb);
1003
1004 md->frozen_sb = freeze_bdev(md->frozen_bdev);
1005 if (IS_ERR(md->frozen_sb)) {
1006 error = PTR_ERR(md->frozen_sb);
1006 r = PTR_ERR(md->frozen_sb);
1007 goto out_bdput;
1008 }
1009
1010 /* don't bdput right now, we don't want the bdev
1011 * to go away while it is locked. We'll bdput
1012 * in __unlock_fs
1013 */
1014 return 0;
1015
1016out_bdput:
1017 bdput(md->frozen_bdev);
1018 md->frozen_sb = NULL;
1019 md->frozen_bdev = NULL;
1020out:
1021 clear_bit(DMF_FS_LOCKED, &md->flags);
1007 goto out_bdput;
1008 }
1009
1010 /* don't bdput right now, we don't want the bdev
1011 * to go away while it is locked. We'll bdput
1012 * in __unlock_fs
1013 */
1014 return 0;
1015
1016out_bdput:
1017 bdput(md->frozen_bdev);
1018 md->frozen_sb = NULL;
1019 md->frozen_bdev = NULL;
1020out:
1021 clear_bit(DMF_FS_LOCKED, &md->flags);
1022 return error;
1022 return r;
1023}
1024
1025static void __unlock_fs(struct mapped_device *md)
1026{
1027 if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags))
1028 return;
1029
1030 thaw_bdev(md->frozen_bdev, md->frozen_sb);

--- 9 unchanged lines hidden (view full) ---

1040 * the background. Before the table can be swapped with
1041 * dm_bind_table, dm_suspend must be called to flush any in
1042 * flight bios and ensure that any further io gets deferred.
1043 */
1044int dm_suspend(struct mapped_device *md)
1045{
1046 struct dm_table *map;
1047 DECLARE_WAITQUEUE(wait, current);
1023}
1024
1025static void __unlock_fs(struct mapped_device *md)
1026{
1027 if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags))
1028 return;
1029
1030 thaw_bdev(md->frozen_bdev, md->frozen_sb);

--- 9 unchanged lines hidden (view full) ---

1040 * the background. Before the table can be swapped with
1041 * dm_bind_table, dm_suspend must be called to flush any in
1042 * flight bios and ensure that any further io gets deferred.
1043 */
1044int dm_suspend(struct mapped_device *md)
1045{
1046 struct dm_table *map;
1047 DECLARE_WAITQUEUE(wait, current);
1048 int error = -EINVAL;
1048 int r = -EINVAL;
1049
1049
1050 /* Flush I/O to the device. */
1051 down_read(&md->lock);
1052 if (test_bit(DMF_BLOCK_IO, &md->flags))
1053 goto out_read_unlock;
1054
1055 map = dm_get_table(md);
1050 down_read(&md->lock);
1051 if (test_bit(DMF_BLOCK_IO, &md->flags))
1052 goto out_read_unlock;
1053
1054 map = dm_get_table(md);
1056 if (map)
1057 /* This does not get reverted if there's an error later. */
1058 dm_table_presuspend_targets(map);
1059
1055
1060 error = __lock_fs(md);
1061 if (error) {
1056 /* This does not get reverted if there's an error later. */
1057 dm_table_presuspend_targets(map);
1058
1059 /* Flush I/O to the device. */
1060 r = __lock_fs(md);
1061 if (r) {
1062 dm_table_put(map);
1063 goto out_read_unlock;
1064 }
1065
1066 up_read(&md->lock);
1067
1068 /*
1069 * First we set the BLOCK_IO flag so no more ios will be mapped.
1070 *
1071 * If the flag is already set we know another thread is trying to
1072 * suspend as well, so we leave the fs locked for this thread.
1073 */
1062 dm_table_put(map);
1063 goto out_read_unlock;
1064 }
1065
1066 up_read(&md->lock);
1067
1068 /*
1069 * First we set the BLOCK_IO flag so no more ios will be mapped.
1070 *
1071 * If the flag is already set we know another thread is trying to
1072 * suspend as well, so we leave the fs locked for this thread.
1073 */
1074 error = -EINVAL;
1074 r = -EINVAL;
1075 down_write(&md->lock);
1076 if (test_and_set_bit(DMF_BLOCK_IO, &md->flags)) {
1077 if (map)
1078 dm_table_put(map);
1079 goto out_write_unlock;
1080 }
1081
1082 add_wait_queue(&md->wait, &wait);

--- 18 unchanged lines hidden (view full) ---

1101 io_schedule();
1102 }
1103 set_current_state(TASK_RUNNING);
1104
1105 down_write(&md->lock);
1106 remove_wait_queue(&md->wait, &wait);
1107
1108 /* were we interrupted ? */
1075 down_write(&md->lock);
1076 if (test_and_set_bit(DMF_BLOCK_IO, &md->flags)) {
1077 if (map)
1078 dm_table_put(map);
1079 goto out_write_unlock;
1080 }
1081
1082 add_wait_queue(&md->wait, &wait);

--- 18 unchanged lines hidden (view full) ---

1101 io_schedule();
1102 }
1103 set_current_state(TASK_RUNNING);
1104
1105 down_write(&md->lock);
1106 remove_wait_queue(&md->wait, &wait);
1107
1108 /* were we interrupted ? */
1109 error = -EINTR;
1109 r = -EINTR;
1110 if (atomic_read(&md->pending))
1111 goto out_unfreeze;
1112
1113 set_bit(DMF_SUSPENDED, &md->flags);
1114
1115 map = dm_get_table(md);
1110 if (atomic_read(&md->pending))
1111 goto out_unfreeze;
1112
1113 set_bit(DMF_SUSPENDED, &md->flags);
1114
1115 map = dm_get_table(md);
1116 if (map)
1117 dm_table_postsuspend_targets(map);
1116 dm_table_postsuspend_targets(map);
1118 dm_table_put(map);
1119 up_write(&md->lock);
1120
1121 return 0;
1122
1123out_unfreeze:
1124 __unlock_fs(md);
1125 clear_bit(DMF_BLOCK_IO, &md->flags);
1126out_write_unlock:
1127 up_write(&md->lock);
1117 dm_table_put(map);
1118 up_write(&md->lock);
1119
1120 return 0;
1121
1122out_unfreeze:
1123 __unlock_fs(md);
1124 clear_bit(DMF_BLOCK_IO, &md->flags);
1125out_write_unlock:
1126 up_write(&md->lock);
1128 return error;
1127 return r;
1129
1130out_read_unlock:
1131 up_read(&md->lock);
1128
1129out_read_unlock:
1130 up_read(&md->lock);
1132 return error;
1131 return r;
1133}
1134
1135int dm_resume(struct mapped_device *md)
1136{
1132}
1133
1134int dm_resume(struct mapped_device *md)
1135{
1136 int r = -EINVAL;
1137 struct bio *def;
1137 struct bio *def;
1138 struct dm_table *map = dm_get_table(md);
1138 struct dm_table *map = NULL;
1139
1140 down_write(&md->lock);
1139
1140 down_write(&md->lock);
1141 if (!map ||
1142 !test_bit(DMF_SUSPENDED, &md->flags) ||
1143 !dm_table_get_size(map)) {
1141 if (!dm_suspended(md)) {
1144 up_write(&md->lock);
1142 up_write(&md->lock);
1145 dm_table_put(map);
1146 return -EINVAL;
1143 goto out;
1147 }
1148
1144 }
1145
1146 map = dm_get_table(md);
1147 if (!map || !dm_table_get_size(map)) {
1148 up_write(&md->lock);
1149 goto out;
1150 }
1151
1149 dm_table_resume_targets(map);
1150 clear_bit(DMF_SUSPENDED, &md->flags);
1151 clear_bit(DMF_BLOCK_IO, &md->flags);
1152
1153 def = bio_list_get(&md->deferred);
1154 __flush_deferred_io(md, def);
1155 up_write(&md->lock);
1156 __unlock_fs(md);
1157 dm_table_unplug_all(map);
1152 dm_table_resume_targets(map);
1153 clear_bit(DMF_SUSPENDED, &md->flags);
1154 clear_bit(DMF_BLOCK_IO, &md->flags);
1155
1156 def = bio_list_get(&md->deferred);
1157 __flush_deferred_io(md, def);
1158 up_write(&md->lock);
1159 __unlock_fs(md);
1160 dm_table_unplug_all(map);
1158 dm_table_put(map);
1159
1161
1160 return 0;
1162 r = 0;
1163out:
1164 dm_table_put(map);
1165 return r;
1161}
1162
1163/*-----------------------------------------------------------------
1164 * Event notification.
1165 *---------------------------------------------------------------*/
1166uint32_t dm_get_event_nr(struct mapped_device *md)
1167{
1168 return atomic_read(&md->event_nr);

--- 41 unchanged lines hidden ---
1166}
1167
1168/*-----------------------------------------------------------------
1169 * Event notification.
1170 *---------------------------------------------------------------*/
1171uint32_t dm_get_event_nr(struct mapped_device *md)
1172{
1173 return atomic_read(&md->event_nr);

--- 41 unchanged lines hidden ---