1 /* 2 * Copyright (C) 2003 Sistina Software (UK) Limited. 3 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include <linux/device-mapper.h> 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/blkdev.h> 13 #include <linux/bio.h> 14 #include <linux/slab.h> 15 16 #define DM_MSG_PREFIX "flakey" 17 18 #define all_corrupt_bio_flags_match(bio, fc) \ 19 (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) 20 21 /* 22 * Flakey: Used for testing only, simulates intermittent, 23 * catastrophic device failure. 24 */ 25 struct flakey_c { 26 struct dm_dev *dev; 27 unsigned long start_time; 28 sector_t start; 29 unsigned up_interval; 30 unsigned down_interval; 31 unsigned long flags; 32 unsigned corrupt_bio_byte; 33 unsigned corrupt_bio_rw; 34 unsigned corrupt_bio_value; 35 unsigned corrupt_bio_flags; 36 }; 37 38 enum feature_flag_bits { 39 DROP_WRITES 40 }; 41 42 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, 43 struct dm_target *ti) 44 { 45 int r; 46 unsigned argc; 47 const char *arg_name; 48 49 static struct dm_arg _args[] = { 50 {0, 6, "Invalid number of feature args"}, 51 {1, UINT_MAX, "Invalid corrupt bio byte"}, 52 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, 53 {0, UINT_MAX, "Invalid corrupt bio flags mask"}, 54 }; 55 56 /* No feature arguments supplied. */ 57 if (!as->argc) 58 return 0; 59 60 r = dm_read_arg_group(_args, as, &argc, &ti->error); 61 if (r) 62 return r; 63 64 while (argc) { 65 arg_name = dm_shift_arg(as); 66 argc--; 67 68 /* 69 * drop_writes 70 */ 71 if (!strcasecmp(arg_name, "drop_writes")) { 72 if (test_and_set_bit(DROP_WRITES, &fc->flags)) { 73 ti->error = "Feature drop_writes duplicated"; 74 return -EINVAL; 75 } 76 77 continue; 78 } 79 80 /* 81 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> 82 */ 83 if (!strcasecmp(arg_name, "corrupt_bio_byte")) { 84 if (!argc) { 85 ti->error = "Feature corrupt_bio_byte requires parameters"; 86 return -EINVAL; 87 } 88 89 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); 90 if (r) 91 return r; 92 argc--; 93 94 /* 95 * Direction r or w? 96 */ 97 arg_name = dm_shift_arg(as); 98 if (!strcasecmp(arg_name, "w")) 99 fc->corrupt_bio_rw = WRITE; 100 else if (!strcasecmp(arg_name, "r")) 101 fc->corrupt_bio_rw = READ; 102 else { 103 ti->error = "Invalid corrupt bio direction (r or w)"; 104 return -EINVAL; 105 } 106 argc--; 107 108 /* 109 * Value of byte (0-255) to write in place of correct one. 110 */ 111 r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error); 112 if (r) 113 return r; 114 argc--; 115 116 /* 117 * Only corrupt bios with these flags set. 118 */ 119 r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error); 120 if (r) 121 return r; 122 argc--; 123 124 continue; 125 } 126 127 ti->error = "Unrecognised flakey feature requested"; 128 return -EINVAL; 129 } 130 131 if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { 132 ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; 133 return -EINVAL; 134 } 135 136 return 0; 137 } 138 139 /* 140 * Construct a flakey mapping: 141 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*] 142 * 143 * Feature args: 144 * [drop_writes] 145 * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>] 146 * 147 * Nth_byte starts from 1 for the first byte. 148 * Direction is r for READ or w for WRITE. 149 * bio_flags is ignored if 0. 150 */ 151 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) 152 { 153 static struct dm_arg _args[] = { 154 {0, UINT_MAX, "Invalid up interval"}, 155 {0, UINT_MAX, "Invalid down interval"}, 156 }; 157 158 int r; 159 struct flakey_c *fc; 160 unsigned long long tmpll; 161 struct dm_arg_set as; 162 const char *devname; 163 char dummy; 164 165 as.argc = argc; 166 as.argv = argv; 167 168 if (argc < 4) { 169 ti->error = "Invalid argument count"; 170 return -EINVAL; 171 } 172 173 fc = kzalloc(sizeof(*fc), GFP_KERNEL); 174 if (!fc) { 175 ti->error = "Cannot allocate linear context"; 176 return -ENOMEM; 177 } 178 fc->start_time = jiffies; 179 180 devname = dm_shift_arg(&as); 181 182 if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) { 183 ti->error = "Invalid device sector"; 184 goto bad; 185 } 186 fc->start = tmpll; 187 188 r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error); 189 if (r) 190 goto bad; 191 192 r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error); 193 if (r) 194 goto bad; 195 196 if (!(fc->up_interval + fc->down_interval)) { 197 ti->error = "Total (up + down) interval is zero"; 198 goto bad; 199 } 200 201 if (fc->up_interval + fc->down_interval < fc->up_interval) { 202 ti->error = "Interval overflow"; 203 goto bad; 204 } 205 206 r = parse_features(&as, fc, ti); 207 if (r) 208 goto bad; 209 210 if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) { 211 ti->error = "Device lookup failed"; 212 goto bad; 213 } 214 215 ti->num_flush_requests = 1; 216 ti->num_discard_requests = 1; 217 ti->private = fc; 218 return 0; 219 220 bad: 221 kfree(fc); 222 return -EINVAL; 223 } 224 225 static void flakey_dtr(struct dm_target *ti) 226 { 227 struct flakey_c *fc = ti->private; 228 229 dm_put_device(ti, fc->dev); 230 kfree(fc); 231 } 232 233 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) 234 { 235 struct flakey_c *fc = ti->private; 236 237 return fc->start + dm_target_offset(ti, bi_sector); 238 } 239 240 static void flakey_map_bio(struct dm_target *ti, struct bio *bio) 241 { 242 struct flakey_c *fc = ti->private; 243 244 bio->bi_bdev = fc->dev->bdev; 245 if (bio_sectors(bio)) 246 bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); 247 } 248 249 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) 250 { 251 unsigned bio_bytes = bio_cur_bytes(bio); 252 char *data = bio_data(bio); 253 254 /* 255 * Overwrite the Nth byte of the data returned. 256 */ 257 if (data && bio_bytes >= fc->corrupt_bio_byte) { 258 data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; 259 260 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " 261 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", 262 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, 263 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', 264 bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); 265 } 266 } 267 268 static int flakey_map(struct dm_target *ti, struct bio *bio, 269 union map_info *map_context) 270 { 271 struct flakey_c *fc = ti->private; 272 unsigned elapsed; 273 274 /* Are we alive ? */ 275 elapsed = (jiffies - fc->start_time) / HZ; 276 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { 277 /* 278 * Flag this bio as submitted while down. 279 */ 280 map_context->ll = 1; 281 282 /* 283 * Map reads as normal. 284 */ 285 if (bio_data_dir(bio) == READ) 286 goto map_bio; 287 288 /* 289 * Drop writes? 290 */ 291 if (test_bit(DROP_WRITES, &fc->flags)) { 292 bio_endio(bio, 0); 293 return DM_MAPIO_SUBMITTED; 294 } 295 296 /* 297 * Corrupt matching writes. 298 */ 299 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { 300 if (all_corrupt_bio_flags_match(bio, fc)) 301 corrupt_bio_data(bio, fc); 302 goto map_bio; 303 } 304 305 /* 306 * By default, error all I/O. 307 */ 308 return -EIO; 309 } 310 311 map_bio: 312 flakey_map_bio(ti, bio); 313 314 return DM_MAPIO_REMAPPED; 315 } 316 317 static int flakey_end_io(struct dm_target *ti, struct bio *bio, 318 int error, union map_info *map_context) 319 { 320 struct flakey_c *fc = ti->private; 321 unsigned bio_submitted_while_down = map_context->ll; 322 323 /* 324 * Corrupt successful READs while in down state. 325 * If flags were specified, only corrupt those that match. 326 */ 327 if (fc->corrupt_bio_byte && !error && bio_submitted_while_down && 328 (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && 329 all_corrupt_bio_flags_match(bio, fc)) 330 corrupt_bio_data(bio, fc); 331 332 return error; 333 } 334 335 static int flakey_status(struct dm_target *ti, status_type_t type, 336 char *result, unsigned int maxlen) 337 { 338 unsigned sz = 0; 339 struct flakey_c *fc = ti->private; 340 unsigned drop_writes; 341 342 switch (type) { 343 case STATUSTYPE_INFO: 344 result[0] = '\0'; 345 break; 346 347 case STATUSTYPE_TABLE: 348 DMEMIT("%s %llu %u %u ", fc->dev->name, 349 (unsigned long long)fc->start, fc->up_interval, 350 fc->down_interval); 351 352 drop_writes = test_bit(DROP_WRITES, &fc->flags); 353 DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5); 354 355 if (drop_writes) 356 DMEMIT("drop_writes "); 357 358 if (fc->corrupt_bio_byte) 359 DMEMIT("corrupt_bio_byte %u %c %u %u ", 360 fc->corrupt_bio_byte, 361 (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r', 362 fc->corrupt_bio_value, fc->corrupt_bio_flags); 363 364 break; 365 } 366 return 0; 367 } 368 369 static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) 370 { 371 struct flakey_c *fc = ti->private; 372 struct dm_dev *dev = fc->dev; 373 int r = 0; 374 375 /* 376 * Only pass ioctls through if the device sizes match exactly. 377 */ 378 if (fc->start || 379 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) 380 r = scsi_verify_blk_ioctl(NULL, cmd); 381 382 return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); 383 } 384 385 static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 386 struct bio_vec *biovec, int max_size) 387 { 388 struct flakey_c *fc = ti->private; 389 struct request_queue *q = bdev_get_queue(fc->dev->bdev); 390 391 if (!q->merge_bvec_fn) 392 return max_size; 393 394 bvm->bi_bdev = fc->dev->bdev; 395 bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector); 396 397 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 398 } 399 400 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) 401 { 402 struct flakey_c *fc = ti->private; 403 404 return fn(ti, fc->dev, fc->start, ti->len, data); 405 } 406 407 static struct target_type flakey_target = { 408 .name = "flakey", 409 .version = {1, 2, 0}, 410 .module = THIS_MODULE, 411 .ctr = flakey_ctr, 412 .dtr = flakey_dtr, 413 .map = flakey_map, 414 .end_io = flakey_end_io, 415 .status = flakey_status, 416 .ioctl = flakey_ioctl, 417 .merge = flakey_merge, 418 .iterate_devices = flakey_iterate_devices, 419 }; 420 421 static int __init dm_flakey_init(void) 422 { 423 int r = dm_register_target(&flakey_target); 424 425 if (r < 0) 426 DMERR("register failed %d", r); 427 428 return r; 429 } 430 431 static void __exit dm_flakey_exit(void) 432 { 433 dm_unregister_target(&flakey_target); 434 } 435 436 /* Module hooks */ 437 module_init(dm_flakey_init); 438 module_exit(dm_flakey_exit); 439 440 MODULE_DESCRIPTION(DM_NAME " flakey target"); 441 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 442 MODULE_LICENSE("GPL"); 443