dm.c (018b05ebbff4f3ed611e950fe5f8760d2348b814) | dm.c (e6fc9f62ce6e412acb1699a5373174aa42ca2bd3) |
---|---|
1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm-core.h" --- 572 unchanged lines hidden (view full) --- 581 tio = clone_to_tio(clone); 582 tio->inside_dm_io = false; 583 } 584 585 tio->magic = DM_TIO_MAGIC; 586 tio->io = ci->io; 587 tio->ti = ti; 588 tio->target_bio_nr = target_bio_nr; | 1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm-core.h" --- 572 unchanged lines hidden (view full) --- 581 tio = clone_to_tio(clone); 582 tio->inside_dm_io = false; 583 } 584 585 tio->magic = DM_TIO_MAGIC; 586 tio->io = ci->io; 587 tio->ti = ti; 588 tio->target_bio_nr = target_bio_nr; |
589 tio->is_duplicate_bio = false; |
|
589 tio->len_ptr = len; 590 tio->old_sector = 0; 591 592 if (len) { 593 clone->bi_iter.bi_size = to_bytes(*len); 594 if (bio_integrity(clone)) 595 bio_integrity_trim(clone); 596 } --- 485 unchanged lines hidden (view full) --- 1082 dm_put_live_table(md, srcu_idx); 1083 1084 return ret; 1085} 1086 1087/* 1088 * A target may call dm_accept_partial_bio only from the map routine. It is 1089 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management | 590 tio->len_ptr = len; 591 tio->old_sector = 0; 592 593 if (len) { 594 clone->bi_iter.bi_size = to_bytes(*len); 595 if (bio_integrity(clone)) 596 bio_integrity_trim(clone); 597 } --- 485 unchanged lines hidden (view full) --- 1083 dm_put_live_table(md, srcu_idx); 1084 1085 return ret; 1086} 1087 1088/* 1089 * A target may call dm_accept_partial_bio only from the map routine. It is 1090 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management |
1090 * operations and REQ_OP_ZONE_APPEND (zone append writes). | 1091 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1092 * __send_duplicate_bios(). |
1091 * 1092 * dm_accept_partial_bio informs the dm that the target only wants to process 1093 * additional n_sectors sectors of the bio and the rest of the data should be 1094 * sent in a next bio. 1095 * 1096 * A diagram that explains the arithmetics: 1097 * +--------------------+---------------+-------+ 1098 * | 1 | 2 | 3 | --- 14 unchanged lines hidden (view full) --- 1113 * the partially processed part (the sum of regions 1+2) must be the same for all 1114 * copies of the bio. 1115 */ 1116void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1117{ 1118 struct dm_target_io *tio = clone_to_tio(bio); 1119 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1120 | 1093 * 1094 * dm_accept_partial_bio informs the dm that the target only wants to process 1095 * additional n_sectors sectors of the bio and the rest of the data should be 1096 * sent in a next bio. 1097 * 1098 * A diagram that explains the arithmetics: 1099 * +--------------------+---------------+-------+ 1100 * | 1 | 2 | 3 | --- 14 unchanged lines hidden (view full) --- 1115 * the partially processed part (the sum of regions 1+2) must be the same for all 1116 * copies of the bio. 1117 */ 1118void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1119{ 1120 struct dm_target_io *tio = clone_to_tio(bio); 1121 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1122 |
1121 BUG_ON(bio->bi_opf & REQ_PREFLUSH); | 1123 BUG_ON(tio->is_duplicate_bio); |
1122 BUG_ON(op_is_zone_mgmt(bio_op(bio))); 1123 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1124 BUG_ON(bi_size > *tio->len_ptr); 1125 BUG_ON(n_sectors > bi_size); 1126 1127 *tio->len_ptr -= bi_size - n_sectors; 1128 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1129} --- 111 unchanged lines hidden (view full) --- 1241 struct bio_list blist = BIO_EMPTY_LIST; 1242 struct bio *clone; 1243 1244 switch (num_bios) { 1245 case 0: 1246 break; 1247 case 1: 1248 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); | 1124 BUG_ON(op_is_zone_mgmt(bio_op(bio))); 1125 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1126 BUG_ON(bi_size > *tio->len_ptr); 1127 BUG_ON(n_sectors > bi_size); 1128 1129 *tio->len_ptr -= bi_size - n_sectors; 1130 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1131} --- 111 unchanged lines hidden (view full) --- 1243 struct bio_list blist = BIO_EMPTY_LIST; 1244 struct bio *clone; 1245 1246 switch (num_bios) { 1247 case 0: 1248 break; 1249 case 1: 1250 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); |
1251 clone_to_tio(clone)->is_duplicate_bio = true; |
|
1249 __map_bio(clone); 1250 break; 1251 default: 1252 alloc_multiple_bios(&blist, ci, ti, num_bios, len); | 1252 __map_bio(clone); 1253 break; 1254 default: 1255 alloc_multiple_bios(&blist, ci, ti, num_bios, len); |
1253 while ((clone = bio_list_pop(&blist))) | 1256 while ((clone = bio_list_pop(&blist))) { 1257 clone_to_tio(clone)->is_duplicate_bio = true; |
1254 __map_bio(clone); | 1258 __map_bio(clone); |
1259 } |
|
1255 break; 1256 } 1257} 1258 1259static int __send_empty_flush(struct clone_info *ci) 1260{ 1261 unsigned target_nr = 0; 1262 struct dm_target *ti; --- 12 unchanged lines hidden (view full) --- 1275 1276 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1277 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1278 1279 bio_uninit(ci->bio); 1280 return 0; 1281} 1282 | 1260 break; 1261 } 1262} 1263 1264static int __send_empty_flush(struct clone_info *ci) 1265{ 1266 unsigned target_nr = 0; 1267 struct dm_target *ti; --- 12 unchanged lines hidden (view full) --- 1280 1281 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1282 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1283 1284 bio_uninit(ci->bio); 1285 return 0; 1286} 1287 |
1283static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1284 unsigned num_bios) | 1288static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1289 unsigned num_bios) |
1285{ 1286 unsigned len; 1287 | 1290{ 1291 unsigned len; 1292 |
1288 /* 1289 * Even though the device advertised support for this type of 1290 * request, that does not mean every target supports it, and 1291 * reconfiguration might also have changed that since the 1292 * check was performed. 1293 */ 1294 if (!num_bios) 1295 return -EOPNOTSUPP; 1296 | |
1297 len = min_t(sector_t, ci->sector_count, 1298 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 1299 | 1293 len = min_t(sector_t, ci->sector_count, 1294 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 1295 |
1300 __send_duplicate_bios(ci, ti, num_bios, &len); 1301 | 1296 /* 1297 * dm_accept_partial_bio cannot be used with duplicate bios, 1298 * so update clone_info cursor before __send_duplicate_bios(). 1299 */ |
1302 ci->sector += len; 1303 ci->sector_count -= len; 1304 | 1300 ci->sector += len; 1301 ci->sector_count -= len; 1302 |
1305 return 0; | 1303 __send_duplicate_bios(ci, ti, num_bios, &len); |
1306} 1307 1308static bool is_abnormal_io(struct bio *bio) 1309{ 1310 bool r = false; 1311 1312 switch (bio_op(bio)) { 1313 case REQ_OP_DISCARD: --- 5 unchanged lines hidden (view full) --- 1319 } 1320 1321 return r; 1322} 1323 1324static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1325 int *result) 1326{ | 1304} 1305 1306static bool is_abnormal_io(struct bio *bio) 1307{ 1308 bool r = false; 1309 1310 switch (bio_op(bio)) { 1311 case REQ_OP_DISCARD: --- 5 unchanged lines hidden (view full) --- 1317 } 1318 1319 return r; 1320} 1321 1322static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1323 int *result) 1324{ |
1327 struct bio *bio = ci->bio; | |
1328 unsigned num_bios = 0; 1329 | 1325 unsigned num_bios = 0; 1326 |
1330 switch (bio_op(bio)) { | 1327 switch (bio_op(ci->bio)) { |
1331 case REQ_OP_DISCARD: 1332 num_bios = ti->num_discard_bios; 1333 break; 1334 case REQ_OP_SECURE_ERASE: 1335 num_bios = ti->num_secure_erase_bios; 1336 break; 1337 case REQ_OP_WRITE_SAME: 1338 num_bios = ti->num_write_same_bios; 1339 break; 1340 case REQ_OP_WRITE_ZEROES: 1341 num_bios = ti->num_write_zeroes_bios; 1342 break; 1343 default: 1344 return false; 1345 } 1346 | 1328 case REQ_OP_DISCARD: 1329 num_bios = ti->num_discard_bios; 1330 break; 1331 case REQ_OP_SECURE_ERASE: 1332 num_bios = ti->num_secure_erase_bios; 1333 break; 1334 case REQ_OP_WRITE_SAME: 1335 num_bios = ti->num_write_same_bios; 1336 break; 1337 case REQ_OP_WRITE_ZEROES: 1338 num_bios = ti->num_write_zeroes_bios; 1339 break; 1340 default: 1341 return false; 1342 } 1343 |
1347 *result = __send_changing_extent_only(ci, ti, num_bios); | 1344 /* 1345 * Even though the device advertised support for this type of 1346 * request, that does not mean every target supports it, and 1347 * reconfiguration might also have changed that since the 1348 * check was performed. 1349 */ 1350 if (!num_bios) 1351 *result = -EOPNOTSUPP; 1352 else { 1353 __send_changing_extent_only(ci, ti, num_bios); 1354 *result = 0; 1355 } |
1348 return true; 1349} 1350 1351/* 1352 * Select the correct strategy for processing a non-flush bio. 1353 */ 1354static int __split_and_process_bio(struct clone_info *ci) 1355{ --- 1587 unchanged lines hidden --- | 1356 return true; 1357} 1358 1359/* 1360 * Select the correct strategy for processing a non-flush bio. 1361 */ 1362static int __split_and_process_bio(struct clone_info *ci) 1363{ --- 1587 unchanged lines hidden --- |