dm.c (e0d6609a5fe34463ae2fd48d846931f70de8b37b) dm.c (1dd40c3ecd9b8a4ab91dbf2e6ce10b82a3b5ae63)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 1096 unchanged lines hidden (view full) ---

1105 }
1106
1107 ti->max_io_len = (uint32_t) len;
1108
1109 return 0;
1110}
1111EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1112
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 1096 unchanged lines hidden (view full) ---

1105 }
1106
1107 ti->max_io_len = (uint32_t) len;
1108
1109 return 0;
1110}
1111EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1112
1113/*
1114 * A target may call dm_accept_partial_bio only from the map routine. It is
1115 * allowed for all bio types except REQ_FLUSH.
1116 *
1117 * dm_accept_partial_bio informs the dm that the target only wants to process
1118 * additional n_sectors sectors of the bio and the rest of the data should be
1119 * sent in a next bio.
1120 *
1121 * A diagram that explains the arithmetics:
1122 * +--------------------+---------------+-------+
1123 * | 1 | 2 | 3 |
1124 * +--------------------+---------------+-------+
1125 *
1126 * <-------------- *tio->len_ptr --------------->
1127 * <------- bi_size ------->
1128 * <-- n_sectors -->
1129 *
1130 * Region 1 was already iterated over with bio_advance or similar function.
1131 * (it may be empty if the target doesn't use bio_advance)
1132 * Region 2 is the remaining bio size that the target wants to process.
1133 * (it may be empty if region 1 is non-empty, although there is no reason
1134 * to make it empty)
1135 * The target requires that region 3 is to be sent in the next bio.
1136 *
1137 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1138 * the partially processed part (the sum of regions 1+2) must be the same for all
1139 * copies of the bio.
1140 */
1141void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1142{
1143 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1144 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1145 BUG_ON(bio->bi_rw & REQ_FLUSH);
1146 BUG_ON(bi_size > *tio->len_ptr);
1147 BUG_ON(n_sectors > bi_size);
1148 *tio->len_ptr -= bi_size - n_sectors;
1149 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1150}
1151EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1152
1113static void __map_bio(struct dm_target_io *tio)
1114{
1115 int r;
1116 sector_t sector;
1117 struct mapped_device *md;
1118 struct bio *clone = &tio->clone;
1119 struct dm_target *ti = tio->ti;
1120

--- 74 unchanged lines hidden (view full) ---

1195 tio->ti = ti;
1196 tio->target_bio_nr = target_bio_nr;
1197
1198 return tio;
1199}
1200
1201static void __clone_and_map_simple_bio(struct clone_info *ci,
1202 struct dm_target *ti,
1153static void __map_bio(struct dm_target_io *tio)
1154{
1155 int r;
1156 sector_t sector;
1157 struct mapped_device *md;
1158 struct bio *clone = &tio->clone;
1159 struct dm_target *ti = tio->ti;
1160

--- 74 unchanged lines hidden (view full) ---

1235 tio->ti = ti;
1236 tio->target_bio_nr = target_bio_nr;
1237
1238 return tio;
1239}
1240
1241static void __clone_and_map_simple_bio(struct clone_info *ci,
1242 struct dm_target *ti,
1203 unsigned target_bio_nr, unsigned len)
1243 unsigned target_bio_nr, unsigned *len)
1204{
1205 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
1206 struct bio *clone = &tio->clone;
1207
1244{
1245 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
1246 struct bio *clone = &tio->clone;
1247
1248 tio->len_ptr = len;
1249
1208 /*
1209 * Discard requests require the bio's inline iovecs be initialized.
1210 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1211 * and discard, so no need for concern about wasted bvec allocations.
1212 */
1213 __bio_clone_fast(clone, ci->bio);
1214 if (len)
1250 /*
1251 * Discard requests require the bio's inline iovecs be initialized.
1252 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1253 * and discard, so no need for concern about wasted bvec allocations.
1254 */
1255 __bio_clone_fast(clone, ci->bio);
1256 if (len)
1215 bio_setup_sector(clone, ci->sector, len);
1257 bio_setup_sector(clone, ci->sector, *len);
1216
1217 __map_bio(tio);
1218}
1219
1220static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1258
1259 __map_bio(tio);
1260}
1261
1262static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1221 unsigned num_bios, unsigned len)
1263 unsigned num_bios, unsigned *len)
1222{
1223 unsigned target_bio_nr;
1224
1225 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1226 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1227}
1228
1229static int __send_empty_flush(struct clone_info *ci)
1230{
1231 unsigned target_nr = 0;
1232 struct dm_target *ti;
1233
1234 BUG_ON(bio_has_data(ci->bio));
1235 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1264{
1265 unsigned target_bio_nr;
1266
1267 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1268 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1269}
1270
1271static int __send_empty_flush(struct clone_info *ci)
1272{
1273 unsigned target_nr = 0;
1274 struct dm_target *ti;
1275
1276 BUG_ON(bio_has_data(ci->bio));
1277 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1236 __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
1278 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1237
1238 return 0;
1239}
1240
1241static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1279
1280 return 0;
1281}
1282
1283static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1242 sector_t sector, unsigned len)
1284 sector_t sector, unsigned *len)
1243{
1244 struct bio *bio = ci->bio;
1245 struct dm_target_io *tio;
1246 unsigned target_bio_nr;
1247 unsigned num_target_bios = 1;
1248
1249 /*
1250 * Does the target want to receive duplicate copies of the bio?
1251 */
1252 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1253 num_target_bios = ti->num_write_bios(ti, bio);
1254
1255 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1256 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1285{
1286 struct bio *bio = ci->bio;
1287 struct dm_target_io *tio;
1288 unsigned target_bio_nr;
1289 unsigned num_target_bios = 1;
1290
1291 /*
1292 * Does the target want to receive duplicate copies of the bio?
1293 */
1294 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1295 num_target_bios = ti->num_write_bios(ti, bio);
1296
1297 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1298 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1257 clone_bio(tio, bio, sector, len);
1299 tio->len_ptr = len;
1300 clone_bio(tio, bio, sector, *len);
1258 __map_bio(tio);
1259 }
1260}
1261
1262typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1263
1264static unsigned get_num_discard_bios(struct dm_target *ti)
1265{

--- 35 unchanged lines hidden (view full) ---

1301 if (!num_bios)
1302 return -EOPNOTSUPP;
1303
1304 if (is_split_required && !is_split_required(ti))
1305 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1306 else
1307 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1308
1301 __map_bio(tio);
1302 }
1303}
1304
1305typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1306
1307static unsigned get_num_discard_bios(struct dm_target *ti)
1308{

--- 35 unchanged lines hidden (view full) ---

1344 if (!num_bios)
1345 return -EOPNOTSUPP;
1346
1347 if (is_split_required && !is_split_required(ti))
1348 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1349 else
1350 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1351
1309 __send_duplicate_bios(ci, ti, num_bios, len);
1352 __send_duplicate_bios(ci, ti, num_bios, &len);
1310
1311 ci->sector += len;
1312 } while (ci->sector_count -= len);
1313
1314 return 0;
1315}
1316
1317static int __send_discard(struct clone_info *ci)

--- 22 unchanged lines hidden (view full) ---

1340 return __send_write_same(ci);
1341
1342 ti = dm_table_find_target(ci->map, ci->sector);
1343 if (!dm_target_is_valid(ti))
1344 return -EIO;
1345
1346 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1347
1353
1354 ci->sector += len;
1355 } while (ci->sector_count -= len);
1356
1357 return 0;
1358}
1359
1360static int __send_discard(struct clone_info *ci)

--- 22 unchanged lines hidden (view full) ---

1383 return __send_write_same(ci);
1384
1385 ti = dm_table_find_target(ci->map, ci->sector);
1386 if (!dm_target_is_valid(ti))
1387 return -EIO;
1388
1389 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1390
1348 __clone_and_map_data_bio(ci, ti, ci->sector, len);
1391 __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1349
1350 ci->sector += len;
1351 ci->sector_count -= len;
1352
1353 return 0;
1354}
1355
1356/*

--- 1533 unchanged lines hidden ---
1392
1393 ci->sector += len;
1394 ci->sector_count -= len;
1395
1396 return 0;
1397}
1398
1399/*

--- 1533 unchanged lines hidden ---