1 /* 2 * Copyright (C) 2003 Sistina Software Limited. 3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-bio-record.h" 9 10 #include <linux/init.h> 11 #include <linux/mempool.h> 12 #include <linux/module.h> 13 #include <linux/pagemap.h> 14 #include <linux/slab.h> 15 #include <linux/workqueue.h> 16 #include <linux/device-mapper.h> 17 #include <linux/dm-io.h> 18 #include <linux/dm-dirty-log.h> 19 #include <linux/dm-kcopyd.h> 20 #include <linux/dm-region-hash.h> 21 22 #define DM_MSG_PREFIX "raid1" 23 24 #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ 25 26 #define DM_RAID1_HANDLE_ERRORS 0x01 27 #define DM_RAID1_KEEP_LOG 0x02 28 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) 29 #define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG) 30 31 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); 32 33 /*----------------------------------------------------------------- 34 * Mirror set structures. 35 *---------------------------------------------------------------*/ 36 enum dm_raid1_error { 37 DM_RAID1_WRITE_ERROR, 38 DM_RAID1_FLUSH_ERROR, 39 DM_RAID1_SYNC_ERROR, 40 DM_RAID1_READ_ERROR 41 }; 42 43 struct mirror { 44 struct mirror_set *ms; 45 atomic_t error_count; 46 unsigned long error_type; 47 struct dm_dev *dev; 48 sector_t offset; 49 }; 50 51 struct mirror_set { 52 struct dm_target *ti; 53 struct list_head list; 54 55 uint64_t features; 56 57 spinlock_t lock; /* protects the lists */ 58 struct bio_list reads; 59 struct bio_list writes; 60 struct bio_list failures; 61 struct bio_list holds; /* bios are waiting until suspend */ 62 63 struct dm_region_hash *rh; 64 struct dm_kcopyd_client *kcopyd_client; 65 struct dm_io_client *io_client; 66 67 /* recovery */ 68 region_t nr_regions; 69 int in_sync; 70 int log_failure; 71 int leg_failure; 72 atomic_t suspend; 73 74 atomic_t default_mirror; /* Default mirror */ 75 76 struct workqueue_struct *kmirrord_wq; 77 struct work_struct kmirrord_work; 78 struct timer_list timer; 79 unsigned long timer_pending; 80 81 struct work_struct trigger_event; 82 83 unsigned nr_mirrors; 84 struct mirror mirror[0]; 85 }; 86 87 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle, 88 "A percentage of time allocated for raid resynchronization"); 89 90 static void wakeup_mirrord(void *context) 91 { 92 struct mirror_set *ms = context; 93 94 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); 95 } 96 97 static void delayed_wake_fn(unsigned long data) 98 { 99 struct mirror_set *ms = (struct mirror_set *) data; 100 101 clear_bit(0, &ms->timer_pending); 102 wakeup_mirrord(ms); 103 } 104 105 static void delayed_wake(struct mirror_set *ms) 106 { 107 if (test_and_set_bit(0, &ms->timer_pending)) 108 return; 109 110 ms->timer.expires = jiffies + HZ / 5; 111 ms->timer.data = (unsigned long) ms; 112 ms->timer.function = delayed_wake_fn; 113 add_timer(&ms->timer); 114 } 115 116 static void wakeup_all_recovery_waiters(void *context) 117 { 118 wake_up_all(&_kmirrord_recovery_stopped); 119 } 120 121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) 122 { 123 unsigned long flags; 124 int should_wake = 0; 125 struct bio_list *bl; 126 127 bl = (rw == WRITE) ? &ms->writes : &ms->reads; 128 spin_lock_irqsave(&ms->lock, flags); 129 should_wake = !(bl->head); 130 bio_list_add(bl, bio); 131 spin_unlock_irqrestore(&ms->lock, flags); 132 133 if (should_wake) 134 wakeup_mirrord(ms); 135 } 136 137 static void dispatch_bios(void *context, struct bio_list *bio_list) 138 { 139 struct mirror_set *ms = context; 140 struct bio *bio; 141 142 while ((bio = bio_list_pop(bio_list))) 143 queue_bio(ms, bio, WRITE); 144 } 145 146 struct dm_raid1_bio_record { 147 struct mirror *m; 148 /* if details->bi_bdev == NULL, details were not saved */ 149 struct dm_bio_details details; 150 region_t write_region; 151 }; 152 153 /* 154 * Every mirror should look like this one. 155 */ 156 #define DEFAULT_MIRROR 0 157 158 /* 159 * This is yucky. We squirrel the mirror struct away inside 160 * bi_next for read/write buffers. This is safe since the bh 161 * doesn't get submitted to the lower levels of block layer. 162 */ 163 static struct mirror *bio_get_m(struct bio *bio) 164 { 165 return (struct mirror *) bio->bi_next; 166 } 167 168 static void bio_set_m(struct bio *bio, struct mirror *m) 169 { 170 bio->bi_next = (struct bio *) m; 171 } 172 173 static struct mirror *get_default_mirror(struct mirror_set *ms) 174 { 175 return &ms->mirror[atomic_read(&ms->default_mirror)]; 176 } 177 178 static void set_default_mirror(struct mirror *m) 179 { 180 struct mirror_set *ms = m->ms; 181 struct mirror *m0 = &(ms->mirror[0]); 182 183 atomic_set(&ms->default_mirror, m - m0); 184 } 185 186 static struct mirror *get_valid_mirror(struct mirror_set *ms) 187 { 188 struct mirror *m; 189 190 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) 191 if (!atomic_read(&m->error_count)) 192 return m; 193 194 return NULL; 195 } 196 197 /* fail_mirror 198 * @m: mirror device to fail 199 * @error_type: one of the enum's, DM_RAID1_*_ERROR 200 * 201 * If errors are being handled, record the type of 202 * error encountered for this device. If this type 203 * of error has already been recorded, we can return; 204 * otherwise, we must signal userspace by triggering 205 * an event. Additionally, if the device is the 206 * primary device, we must choose a new primary, but 207 * only if the mirror is in-sync. 208 * 209 * This function must not block. 210 */ 211 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) 212 { 213 struct mirror_set *ms = m->ms; 214 struct mirror *new; 215 216 ms->leg_failure = 1; 217 218 /* 219 * error_count is used for nothing more than a 220 * simple way to tell if a device has encountered 221 * errors. 222 */ 223 atomic_inc(&m->error_count); 224 225 if (test_and_set_bit(error_type, &m->error_type)) 226 return; 227 228 if (!errors_handled(ms)) 229 return; 230 231 if (m != get_default_mirror(ms)) 232 goto out; 233 234 if (!ms->in_sync && !keep_log(ms)) { 235 /* 236 * Better to issue requests to same failing device 237 * than to risk returning corrupt data. 238 */ 239 DMERR("Primary mirror (%s) failed while out-of-sync: " 240 "Reads may fail.", m->dev->name); 241 goto out; 242 } 243 244 new = get_valid_mirror(ms); 245 if (new) 246 set_default_mirror(new); 247 else 248 DMWARN("All sides of mirror have failed."); 249 250 out: 251 schedule_work(&ms->trigger_event); 252 } 253 254 static int mirror_flush(struct dm_target *ti) 255 { 256 struct mirror_set *ms = ti->private; 257 unsigned long error_bits; 258 259 unsigned int i; 260 struct dm_io_region io[ms->nr_mirrors]; 261 struct mirror *m; 262 struct dm_io_request io_req = { 263 .bi_rw = WRITE_FLUSH, 264 .mem.type = DM_IO_KMEM, 265 .mem.ptr.addr = NULL, 266 .client = ms->io_client, 267 }; 268 269 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { 270 io[i].bdev = m->dev->bdev; 271 io[i].sector = 0; 272 io[i].count = 0; 273 } 274 275 error_bits = -1; 276 dm_io(&io_req, ms->nr_mirrors, io, &error_bits); 277 if (unlikely(error_bits != 0)) { 278 for (i = 0; i < ms->nr_mirrors; i++) 279 if (test_bit(i, &error_bits)) 280 fail_mirror(ms->mirror + i, 281 DM_RAID1_FLUSH_ERROR); 282 return -EIO; 283 } 284 285 return 0; 286 } 287 288 /*----------------------------------------------------------------- 289 * Recovery. 290 * 291 * When a mirror is first activated we may find that some regions 292 * are in the no-sync state. We have to recover these by 293 * recopying from the default mirror to all the others. 294 *---------------------------------------------------------------*/ 295 static void recovery_complete(int read_err, unsigned long write_err, 296 void *context) 297 { 298 struct dm_region *reg = context; 299 struct mirror_set *ms = dm_rh_region_context(reg); 300 int m, bit = 0; 301 302 if (read_err) { 303 /* Read error means the failure of default mirror. */ 304 DMERR_LIMIT("Unable to read primary mirror during recovery"); 305 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); 306 } 307 308 if (write_err) { 309 DMERR_LIMIT("Write error during recovery (error = 0x%lx)", 310 write_err); 311 /* 312 * Bits correspond to devices (excluding default mirror). 313 * The default mirror cannot change during recovery. 314 */ 315 for (m = 0; m < ms->nr_mirrors; m++) { 316 if (&ms->mirror[m] == get_default_mirror(ms)) 317 continue; 318 if (test_bit(bit, &write_err)) 319 fail_mirror(ms->mirror + m, 320 DM_RAID1_SYNC_ERROR); 321 bit++; 322 } 323 } 324 325 dm_rh_recovery_end(reg, !(read_err || write_err)); 326 } 327 328 static int recover(struct mirror_set *ms, struct dm_region *reg) 329 { 330 int r; 331 unsigned i; 332 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; 333 struct mirror *m; 334 unsigned long flags = 0; 335 region_t key = dm_rh_get_region_key(reg); 336 sector_t region_size = dm_rh_get_region_size(ms->rh); 337 338 /* fill in the source */ 339 m = get_default_mirror(ms); 340 from.bdev = m->dev->bdev; 341 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); 342 if (key == (ms->nr_regions - 1)) { 343 /* 344 * The final region may be smaller than 345 * region_size. 346 */ 347 from.count = ms->ti->len & (region_size - 1); 348 if (!from.count) 349 from.count = region_size; 350 } else 351 from.count = region_size; 352 353 /* fill in the destinations */ 354 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 355 if (&ms->mirror[i] == get_default_mirror(ms)) 356 continue; 357 358 m = ms->mirror + i; 359 dest->bdev = m->dev->bdev; 360 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); 361 dest->count = from.count; 362 dest++; 363 } 364 365 /* hand to kcopyd */ 366 if (!errors_handled(ms)) 367 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); 368 369 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, 370 flags, recovery_complete, reg); 371 372 return r; 373 } 374 375 static void reset_ms_flags(struct mirror_set *ms) 376 { 377 unsigned int m; 378 379 ms->leg_failure = 0; 380 for (m = 0; m < ms->nr_mirrors; m++) { 381 atomic_set(&(ms->mirror[m].error_count), 0); 382 ms->mirror[m].error_type = 0; 383 } 384 } 385 386 static void do_recovery(struct mirror_set *ms) 387 { 388 struct dm_region *reg; 389 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 390 int r; 391 392 /* 393 * Start quiescing some regions. 394 */ 395 dm_rh_recovery_prepare(ms->rh); 396 397 /* 398 * Copy any already quiesced regions. 399 */ 400 while ((reg = dm_rh_recovery_start(ms->rh))) { 401 r = recover(ms, reg); 402 if (r) 403 dm_rh_recovery_end(reg, 0); 404 } 405 406 /* 407 * Update the in sync flag. 408 */ 409 if (!ms->in_sync && 410 (log->type->get_sync_count(log) == ms->nr_regions)) { 411 /* the sync is complete */ 412 dm_table_event(ms->ti->table); 413 ms->in_sync = 1; 414 reset_ms_flags(ms); 415 } 416 } 417 418 /*----------------------------------------------------------------- 419 * Reads 420 *---------------------------------------------------------------*/ 421 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) 422 { 423 struct mirror *m = get_default_mirror(ms); 424 425 do { 426 if (likely(!atomic_read(&m->error_count))) 427 return m; 428 429 if (m-- == ms->mirror) 430 m += ms->nr_mirrors; 431 } while (m != get_default_mirror(ms)); 432 433 return NULL; 434 } 435 436 static int default_ok(struct mirror *m) 437 { 438 struct mirror *default_mirror = get_default_mirror(m->ms); 439 440 return !atomic_read(&default_mirror->error_count); 441 } 442 443 static int mirror_available(struct mirror_set *ms, struct bio *bio) 444 { 445 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 446 region_t region = dm_rh_bio_to_region(ms->rh, bio); 447 448 if (log->type->in_sync(log, region, 0)) 449 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; 450 451 return 0; 452 } 453 454 /* 455 * remap a buffer to a particular mirror. 456 */ 457 static sector_t map_sector(struct mirror *m, struct bio *bio) 458 { 459 if (unlikely(!bio->bi_iter.bi_size)) 460 return 0; 461 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); 462 } 463 464 static void map_bio(struct mirror *m, struct bio *bio) 465 { 466 bio->bi_bdev = m->dev->bdev; 467 bio->bi_iter.bi_sector = map_sector(m, bio); 468 } 469 470 static void map_region(struct dm_io_region *io, struct mirror *m, 471 struct bio *bio) 472 { 473 io->bdev = m->dev->bdev; 474 io->sector = map_sector(m, bio); 475 io->count = bio_sectors(bio); 476 } 477 478 static void hold_bio(struct mirror_set *ms, struct bio *bio) 479 { 480 /* 481 * Lock is required to avoid race condition during suspend 482 * process. 483 */ 484 spin_lock_irq(&ms->lock); 485 486 if (atomic_read(&ms->suspend)) { 487 spin_unlock_irq(&ms->lock); 488 489 /* 490 * If device is suspended, complete the bio. 491 */ 492 if (dm_noflush_suspending(ms->ti)) 493 bio_endio(bio, DM_ENDIO_REQUEUE); 494 else 495 bio_endio(bio, -EIO); 496 return; 497 } 498 499 /* 500 * Hold bio until the suspend is complete. 501 */ 502 bio_list_add(&ms->holds, bio); 503 spin_unlock_irq(&ms->lock); 504 } 505 506 /*----------------------------------------------------------------- 507 * Reads 508 *---------------------------------------------------------------*/ 509 static void read_callback(unsigned long error, void *context) 510 { 511 struct bio *bio = context; 512 struct mirror *m; 513 514 m = bio_get_m(bio); 515 bio_set_m(bio, NULL); 516 517 if (likely(!error)) { 518 bio_endio(bio, 0); 519 return; 520 } 521 522 fail_mirror(m, DM_RAID1_READ_ERROR); 523 524 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { 525 DMWARN_LIMIT("Read failure on mirror device %s. " 526 "Trying alternative device.", 527 m->dev->name); 528 queue_bio(m->ms, bio, bio_rw(bio)); 529 return; 530 } 531 532 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", 533 m->dev->name); 534 bio_endio(bio, -EIO); 535 } 536 537 /* Asynchronous read. */ 538 static void read_async_bio(struct mirror *m, struct bio *bio) 539 { 540 struct dm_io_region io; 541 struct dm_io_request io_req = { 542 .bi_rw = READ, 543 .mem.type = DM_IO_BIO, 544 .mem.ptr.bio = bio, 545 .notify.fn = read_callback, 546 .notify.context = bio, 547 .client = m->ms->io_client, 548 }; 549 550 map_region(&io, m, bio); 551 bio_set_m(bio, m); 552 BUG_ON(dm_io(&io_req, 1, &io, NULL)); 553 } 554 555 static inline int region_in_sync(struct mirror_set *ms, region_t region, 556 int may_block) 557 { 558 int state = dm_rh_get_state(ms->rh, region, may_block); 559 return state == DM_RH_CLEAN || state == DM_RH_DIRTY; 560 } 561 562 static void do_reads(struct mirror_set *ms, struct bio_list *reads) 563 { 564 region_t region; 565 struct bio *bio; 566 struct mirror *m; 567 568 while ((bio = bio_list_pop(reads))) { 569 region = dm_rh_bio_to_region(ms->rh, bio); 570 m = get_default_mirror(ms); 571 572 /* 573 * We can only read balance if the region is in sync. 574 */ 575 if (likely(region_in_sync(ms, region, 1))) 576 m = choose_mirror(ms, bio->bi_iter.bi_sector); 577 else if (m && atomic_read(&m->error_count)) 578 m = NULL; 579 580 if (likely(m)) 581 read_async_bio(m, bio); 582 else 583 bio_endio(bio, -EIO); 584 } 585 } 586 587 /*----------------------------------------------------------------- 588 * Writes. 589 * 590 * We do different things with the write io depending on the 591 * state of the region that it's in: 592 * 593 * SYNC: increment pending, use kcopyd to write to *all* mirrors 594 * RECOVERING: delay the io until recovery completes 595 * NOSYNC: increment pending, just write to the default mirror 596 *---------------------------------------------------------------*/ 597 598 599 static void write_callback(unsigned long error, void *context) 600 { 601 unsigned i, ret = 0; 602 struct bio *bio = (struct bio *) context; 603 struct mirror_set *ms; 604 int should_wake = 0; 605 unsigned long flags; 606 607 ms = bio_get_m(bio)->ms; 608 bio_set_m(bio, NULL); 609 610 /* 611 * NOTE: We don't decrement the pending count here, 612 * instead it is done by the targets endio function. 613 * This way we handle both writes to SYNC and NOSYNC 614 * regions with the same code. 615 */ 616 if (likely(!error)) { 617 bio_endio(bio, ret); 618 return; 619 } 620 621 /* 622 * If the bio is discard, return an error, but do not 623 * degrade the array. 624 */ 625 if (bio->bi_rw & REQ_DISCARD) { 626 bio_endio(bio, -EOPNOTSUPP); 627 return; 628 } 629 630 for (i = 0; i < ms->nr_mirrors; i++) 631 if (test_bit(i, &error)) 632 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); 633 634 /* 635 * Need to raise event. Since raising 636 * events can block, we need to do it in 637 * the main thread. 638 */ 639 spin_lock_irqsave(&ms->lock, flags); 640 if (!ms->failures.head) 641 should_wake = 1; 642 bio_list_add(&ms->failures, bio); 643 spin_unlock_irqrestore(&ms->lock, flags); 644 if (should_wake) 645 wakeup_mirrord(ms); 646 } 647 648 static void do_write(struct mirror_set *ms, struct bio *bio) 649 { 650 unsigned int i; 651 struct dm_io_region io[ms->nr_mirrors], *dest = io; 652 struct mirror *m; 653 struct dm_io_request io_req = { 654 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), 655 .mem.type = DM_IO_BIO, 656 .mem.ptr.bio = bio, 657 .notify.fn = write_callback, 658 .notify.context = bio, 659 .client = ms->io_client, 660 }; 661 662 if (bio->bi_rw & REQ_DISCARD) { 663 io_req.bi_rw |= REQ_DISCARD; 664 io_req.mem.type = DM_IO_KMEM; 665 io_req.mem.ptr.addr = NULL; 666 } 667 668 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) 669 map_region(dest++, m, bio); 670 671 /* 672 * Use default mirror because we only need it to retrieve the reference 673 * to the mirror set in write_callback(). 674 */ 675 bio_set_m(bio, get_default_mirror(ms)); 676 677 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); 678 } 679 680 static void do_writes(struct mirror_set *ms, struct bio_list *writes) 681 { 682 int state; 683 struct bio *bio; 684 struct bio_list sync, nosync, recover, *this_list = NULL; 685 struct bio_list requeue; 686 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 687 region_t region; 688 689 if (!writes->head) 690 return; 691 692 /* 693 * Classify each write. 694 */ 695 bio_list_init(&sync); 696 bio_list_init(&nosync); 697 bio_list_init(&recover); 698 bio_list_init(&requeue); 699 700 while ((bio = bio_list_pop(writes))) { 701 if ((bio->bi_rw & REQ_FLUSH) || 702 (bio->bi_rw & REQ_DISCARD)) { 703 bio_list_add(&sync, bio); 704 continue; 705 } 706 707 region = dm_rh_bio_to_region(ms->rh, bio); 708 709 if (log->type->is_remote_recovering && 710 log->type->is_remote_recovering(log, region)) { 711 bio_list_add(&requeue, bio); 712 continue; 713 } 714 715 state = dm_rh_get_state(ms->rh, region, 1); 716 switch (state) { 717 case DM_RH_CLEAN: 718 case DM_RH_DIRTY: 719 this_list = &sync; 720 break; 721 722 case DM_RH_NOSYNC: 723 this_list = &nosync; 724 break; 725 726 case DM_RH_RECOVERING: 727 this_list = &recover; 728 break; 729 } 730 731 bio_list_add(this_list, bio); 732 } 733 734 /* 735 * Add bios that are delayed due to remote recovery 736 * back on to the write queue 737 */ 738 if (unlikely(requeue.head)) { 739 spin_lock_irq(&ms->lock); 740 bio_list_merge(&ms->writes, &requeue); 741 spin_unlock_irq(&ms->lock); 742 delayed_wake(ms); 743 } 744 745 /* 746 * Increment the pending counts for any regions that will 747 * be written to (writes to recover regions are going to 748 * be delayed). 749 */ 750 dm_rh_inc_pending(ms->rh, &sync); 751 dm_rh_inc_pending(ms->rh, &nosync); 752 753 /* 754 * If the flush fails on a previous call and succeeds here, 755 * we must not reset the log_failure variable. We need 756 * userspace interaction to do that. 757 */ 758 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; 759 760 /* 761 * Dispatch io. 762 */ 763 if (unlikely(ms->log_failure) && errors_handled(ms)) { 764 spin_lock_irq(&ms->lock); 765 bio_list_merge(&ms->failures, &sync); 766 spin_unlock_irq(&ms->lock); 767 wakeup_mirrord(ms); 768 } else 769 while ((bio = bio_list_pop(&sync))) 770 do_write(ms, bio); 771 772 while ((bio = bio_list_pop(&recover))) 773 dm_rh_delay(ms->rh, bio); 774 775 while ((bio = bio_list_pop(&nosync))) { 776 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) { 777 spin_lock_irq(&ms->lock); 778 bio_list_add(&ms->failures, bio); 779 spin_unlock_irq(&ms->lock); 780 wakeup_mirrord(ms); 781 } else { 782 map_bio(get_default_mirror(ms), bio); 783 generic_make_request(bio); 784 } 785 } 786 } 787 788 static void do_failures(struct mirror_set *ms, struct bio_list *failures) 789 { 790 struct bio *bio; 791 792 if (likely(!failures->head)) 793 return; 794 795 /* 796 * If the log has failed, unattempted writes are being 797 * put on the holds list. We can't issue those writes 798 * until a log has been marked, so we must store them. 799 * 800 * If a 'noflush' suspend is in progress, we can requeue 801 * the I/O's to the core. This give userspace a chance 802 * to reconfigure the mirror, at which point the core 803 * will reissue the writes. If the 'noflush' flag is 804 * not set, we have no choice but to return errors. 805 * 806 * Some writes on the failures list may have been 807 * submitted before the log failure and represent a 808 * failure to write to one of the devices. It is ok 809 * for us to treat them the same and requeue them 810 * as well. 811 */ 812 while ((bio = bio_list_pop(failures))) { 813 if (!ms->log_failure) { 814 ms->in_sync = 0; 815 dm_rh_mark_nosync(ms->rh, bio); 816 } 817 818 /* 819 * If all the legs are dead, fail the I/O. 820 * If the device has failed and keep_log is enabled, 821 * fail the I/O. 822 * 823 * If we have been told to handle errors, and keep_log 824 * isn't enabled, hold the bio and wait for userspace to 825 * deal with the problem. 826 * 827 * Otherwise pretend that the I/O succeeded. (This would 828 * be wrong if the failed leg returned after reboot and 829 * got replicated back to the good legs.) 830 */ 831 832 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure))) 833 bio_endio(bio, -EIO); 834 else if (errors_handled(ms) && !keep_log(ms)) 835 hold_bio(ms, bio); 836 else 837 bio_endio(bio, 0); 838 } 839 } 840 841 static void trigger_event(struct work_struct *work) 842 { 843 struct mirror_set *ms = 844 container_of(work, struct mirror_set, trigger_event); 845 846 dm_table_event(ms->ti->table); 847 } 848 849 /*----------------------------------------------------------------- 850 * kmirrord 851 *---------------------------------------------------------------*/ 852 static void do_mirror(struct work_struct *work) 853 { 854 struct mirror_set *ms = container_of(work, struct mirror_set, 855 kmirrord_work); 856 struct bio_list reads, writes, failures; 857 unsigned long flags; 858 859 spin_lock_irqsave(&ms->lock, flags); 860 reads = ms->reads; 861 writes = ms->writes; 862 failures = ms->failures; 863 bio_list_init(&ms->reads); 864 bio_list_init(&ms->writes); 865 bio_list_init(&ms->failures); 866 spin_unlock_irqrestore(&ms->lock, flags); 867 868 dm_rh_update_states(ms->rh, errors_handled(ms)); 869 do_recovery(ms); 870 do_reads(ms, &reads); 871 do_writes(ms, &writes); 872 do_failures(ms, &failures); 873 } 874 875 /*----------------------------------------------------------------- 876 * Target functions 877 *---------------------------------------------------------------*/ 878 static struct mirror_set *alloc_context(unsigned int nr_mirrors, 879 uint32_t region_size, 880 struct dm_target *ti, 881 struct dm_dirty_log *dl) 882 { 883 size_t len; 884 struct mirror_set *ms = NULL; 885 886 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); 887 888 ms = kzalloc(len, GFP_KERNEL); 889 if (!ms) { 890 ti->error = "Cannot allocate mirror context"; 891 return NULL; 892 } 893 894 spin_lock_init(&ms->lock); 895 bio_list_init(&ms->reads); 896 bio_list_init(&ms->writes); 897 bio_list_init(&ms->failures); 898 bio_list_init(&ms->holds); 899 900 ms->ti = ti; 901 ms->nr_mirrors = nr_mirrors; 902 ms->nr_regions = dm_sector_div_up(ti->len, region_size); 903 ms->in_sync = 0; 904 ms->log_failure = 0; 905 ms->leg_failure = 0; 906 atomic_set(&ms->suspend, 0); 907 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); 908 909 ms->io_client = dm_io_client_create(); 910 if (IS_ERR(ms->io_client)) { 911 ti->error = "Error creating dm_io client"; 912 kfree(ms); 913 return NULL; 914 } 915 916 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, 917 wakeup_all_recovery_waiters, 918 ms->ti->begin, MAX_RECOVERY, 919 dl, region_size, ms->nr_regions); 920 if (IS_ERR(ms->rh)) { 921 ti->error = "Error creating dirty region hash"; 922 dm_io_client_destroy(ms->io_client); 923 kfree(ms); 924 return NULL; 925 } 926 927 return ms; 928 } 929 930 static void free_context(struct mirror_set *ms, struct dm_target *ti, 931 unsigned int m) 932 { 933 while (m--) 934 dm_put_device(ti, ms->mirror[m].dev); 935 936 dm_io_client_destroy(ms->io_client); 937 dm_region_hash_destroy(ms->rh); 938 kfree(ms); 939 } 940 941 static int get_mirror(struct mirror_set *ms, struct dm_target *ti, 942 unsigned int mirror, char **argv) 943 { 944 unsigned long long offset; 945 char dummy; 946 947 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) { 948 ti->error = "Invalid offset"; 949 return -EINVAL; 950 } 951 952 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), 953 &ms->mirror[mirror].dev)) { 954 ti->error = "Device lookup failure"; 955 return -ENXIO; 956 } 957 958 ms->mirror[mirror].ms = ms; 959 atomic_set(&(ms->mirror[mirror].error_count), 0); 960 ms->mirror[mirror].error_type = 0; 961 ms->mirror[mirror].offset = offset; 962 963 return 0; 964 } 965 966 /* 967 * Create dirty log: log_type #log_params <log_params> 968 */ 969 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, 970 unsigned argc, char **argv, 971 unsigned *args_used) 972 { 973 unsigned param_count; 974 struct dm_dirty_log *dl; 975 char dummy; 976 977 if (argc < 2) { 978 ti->error = "Insufficient mirror log arguments"; 979 return NULL; 980 } 981 982 if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) { 983 ti->error = "Invalid mirror log argument count"; 984 return NULL; 985 } 986 987 *args_used = 2 + param_count; 988 989 if (argc < *args_used) { 990 ti->error = "Insufficient mirror log arguments"; 991 return NULL; 992 } 993 994 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, 995 argv + 2); 996 if (!dl) { 997 ti->error = "Error creating mirror dirty log"; 998 return NULL; 999 } 1000 1001 return dl; 1002 } 1003 1004 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, 1005 unsigned *args_used) 1006 { 1007 unsigned num_features; 1008 struct dm_target *ti = ms->ti; 1009 char dummy; 1010 int i; 1011 1012 *args_used = 0; 1013 1014 if (!argc) 1015 return 0; 1016 1017 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) { 1018 ti->error = "Invalid number of features"; 1019 return -EINVAL; 1020 } 1021 1022 argc--; 1023 argv++; 1024 (*args_used)++; 1025 1026 if (num_features > argc) { 1027 ti->error = "Not enough arguments to support feature count"; 1028 return -EINVAL; 1029 } 1030 1031 for (i = 0; i < num_features; i++) { 1032 if (!strcmp("handle_errors", argv[0])) 1033 ms->features |= DM_RAID1_HANDLE_ERRORS; 1034 else if (!strcmp("keep_log", argv[0])) 1035 ms->features |= DM_RAID1_KEEP_LOG; 1036 else { 1037 ti->error = "Unrecognised feature requested"; 1038 return -EINVAL; 1039 } 1040 1041 argc--; 1042 argv++; 1043 (*args_used)++; 1044 } 1045 if (!errors_handled(ms) && keep_log(ms)) { 1046 ti->error = "keep_log feature requires the handle_errors feature"; 1047 return -EINVAL; 1048 } 1049 1050 return 0; 1051 } 1052 1053 /* 1054 * Construct a mirror mapping: 1055 * 1056 * log_type #log_params <log_params> 1057 * #mirrors [mirror_path offset]{2,} 1058 * [#features <features>] 1059 * 1060 * log_type is "core" or "disk" 1061 * #log_params is between 1 and 3 1062 * 1063 * If present, supported features are "handle_errors" and "keep_log". 1064 */ 1065 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1066 { 1067 int r; 1068 unsigned int nr_mirrors, m, args_used; 1069 struct mirror_set *ms; 1070 struct dm_dirty_log *dl; 1071 char dummy; 1072 1073 dl = create_dirty_log(ti, argc, argv, &args_used); 1074 if (!dl) 1075 return -EINVAL; 1076 1077 argv += args_used; 1078 argc -= args_used; 1079 1080 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || 1081 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { 1082 ti->error = "Invalid number of mirrors"; 1083 dm_dirty_log_destroy(dl); 1084 return -EINVAL; 1085 } 1086 1087 argv++, argc--; 1088 1089 if (argc < nr_mirrors * 2) { 1090 ti->error = "Too few mirror arguments"; 1091 dm_dirty_log_destroy(dl); 1092 return -EINVAL; 1093 } 1094 1095 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); 1096 if (!ms) { 1097 dm_dirty_log_destroy(dl); 1098 return -ENOMEM; 1099 } 1100 1101 /* Get the mirror parameter sets */ 1102 for (m = 0; m < nr_mirrors; m++) { 1103 r = get_mirror(ms, ti, m, argv); 1104 if (r) { 1105 free_context(ms, ti, m); 1106 return r; 1107 } 1108 argv += 2; 1109 argc -= 2; 1110 } 1111 1112 ti->private = ms; 1113 1114 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); 1115 if (r) 1116 goto err_free_context; 1117 1118 ti->num_flush_bios = 1; 1119 ti->num_discard_bios = 1; 1120 ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record); 1121 ti->discard_zeroes_data_unsupported = true; 1122 1123 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); 1124 if (!ms->kmirrord_wq) { 1125 DMERR("couldn't start kmirrord"); 1126 r = -ENOMEM; 1127 goto err_free_context; 1128 } 1129 INIT_WORK(&ms->kmirrord_work, do_mirror); 1130 init_timer(&ms->timer); 1131 ms->timer_pending = 0; 1132 INIT_WORK(&ms->trigger_event, trigger_event); 1133 1134 r = parse_features(ms, argc, argv, &args_used); 1135 if (r) 1136 goto err_destroy_wq; 1137 1138 argv += args_used; 1139 argc -= args_used; 1140 1141 /* 1142 * Any read-balancing addition depends on the 1143 * DM_RAID1_HANDLE_ERRORS flag being present. 1144 * This is because the decision to balance depends 1145 * on the sync state of a region. If the above 1146 * flag is not present, we ignore errors; and 1147 * the sync state may be inaccurate. 1148 */ 1149 1150 if (argc) { 1151 ti->error = "Too many mirror arguments"; 1152 r = -EINVAL; 1153 goto err_destroy_wq; 1154 } 1155 1156 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); 1157 if (IS_ERR(ms->kcopyd_client)) { 1158 r = PTR_ERR(ms->kcopyd_client); 1159 goto err_destroy_wq; 1160 } 1161 1162 wakeup_mirrord(ms); 1163 return 0; 1164 1165 err_destroy_wq: 1166 destroy_workqueue(ms->kmirrord_wq); 1167 err_free_context: 1168 free_context(ms, ti, ms->nr_mirrors); 1169 return r; 1170 } 1171 1172 static void mirror_dtr(struct dm_target *ti) 1173 { 1174 struct mirror_set *ms = (struct mirror_set *) ti->private; 1175 1176 del_timer_sync(&ms->timer); 1177 flush_workqueue(ms->kmirrord_wq); 1178 flush_work(&ms->trigger_event); 1179 dm_kcopyd_client_destroy(ms->kcopyd_client); 1180 destroy_workqueue(ms->kmirrord_wq); 1181 free_context(ms, ti, ms->nr_mirrors); 1182 } 1183 1184 /* 1185 * Mirror mapping function 1186 */ 1187 static int mirror_map(struct dm_target *ti, struct bio *bio) 1188 { 1189 int r, rw = bio_rw(bio); 1190 struct mirror *m; 1191 struct mirror_set *ms = ti->private; 1192 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1193 struct dm_raid1_bio_record *bio_record = 1194 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1195 1196 bio_record->details.bi_bdev = NULL; 1197 1198 if (rw == WRITE) { 1199 /* Save region for mirror_end_io() handler */ 1200 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); 1201 queue_bio(ms, bio, rw); 1202 return DM_MAPIO_SUBMITTED; 1203 } 1204 1205 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); 1206 if (r < 0 && r != -EWOULDBLOCK) 1207 return r; 1208 1209 /* 1210 * If region is not in-sync queue the bio. 1211 */ 1212 if (!r || (r == -EWOULDBLOCK)) { 1213 if (rw == READA) 1214 return -EWOULDBLOCK; 1215 1216 queue_bio(ms, bio, rw); 1217 return DM_MAPIO_SUBMITTED; 1218 } 1219 1220 /* 1221 * The region is in-sync and we can perform reads directly. 1222 * Store enough information so we can retry if it fails. 1223 */ 1224 m = choose_mirror(ms, bio->bi_iter.bi_sector); 1225 if (unlikely(!m)) 1226 return -EIO; 1227 1228 dm_bio_record(&bio_record->details, bio); 1229 bio_record->m = m; 1230 1231 map_bio(m, bio); 1232 1233 return DM_MAPIO_REMAPPED; 1234 } 1235 1236 static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) 1237 { 1238 int rw = bio_rw(bio); 1239 struct mirror_set *ms = (struct mirror_set *) ti->private; 1240 struct mirror *m = NULL; 1241 struct dm_bio_details *bd = NULL; 1242 struct dm_raid1_bio_record *bio_record = 1243 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1244 1245 /* 1246 * We need to dec pending if this was a write. 1247 */ 1248 if (rw == WRITE) { 1249 if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) 1250 dm_rh_dec(ms->rh, bio_record->write_region); 1251 return error; 1252 } 1253 1254 if (error == -EOPNOTSUPP) 1255 goto out; 1256 1257 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) 1258 goto out; 1259 1260 if (unlikely(error)) { 1261 if (!bio_record->details.bi_bdev) { 1262 /* 1263 * There wasn't enough memory to record necessary 1264 * information for a retry or there was no other 1265 * mirror in-sync. 1266 */ 1267 DMERR_LIMIT("Mirror read failed."); 1268 return -EIO; 1269 } 1270 1271 m = bio_record->m; 1272 1273 DMERR("Mirror read failed from %s. Trying alternative device.", 1274 m->dev->name); 1275 1276 fail_mirror(m, DM_RAID1_READ_ERROR); 1277 1278 /* 1279 * A failed read is requeued for another attempt using an intact 1280 * mirror. 1281 */ 1282 if (default_ok(m) || mirror_available(ms, bio)) { 1283 bd = &bio_record->details; 1284 1285 dm_bio_restore(bd, bio); 1286 bio_record->details.bi_bdev = NULL; 1287 1288 queue_bio(ms, bio, rw); 1289 return DM_ENDIO_INCOMPLETE; 1290 } 1291 DMERR("All replicated volumes dead, failing I/O"); 1292 } 1293 1294 out: 1295 bio_record->details.bi_bdev = NULL; 1296 1297 return error; 1298 } 1299 1300 static void mirror_presuspend(struct dm_target *ti) 1301 { 1302 struct mirror_set *ms = (struct mirror_set *) ti->private; 1303 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1304 1305 struct bio_list holds; 1306 struct bio *bio; 1307 1308 atomic_set(&ms->suspend, 1); 1309 1310 /* 1311 * Process bios in the hold list to start recovery waiting 1312 * for bios in the hold list. After the process, no bio has 1313 * a chance to be added in the hold list because ms->suspend 1314 * is set. 1315 */ 1316 spin_lock_irq(&ms->lock); 1317 holds = ms->holds; 1318 bio_list_init(&ms->holds); 1319 spin_unlock_irq(&ms->lock); 1320 1321 while ((bio = bio_list_pop(&holds))) 1322 hold_bio(ms, bio); 1323 1324 /* 1325 * We must finish up all the work that we've 1326 * generated (i.e. recovery work). 1327 */ 1328 dm_rh_stop_recovery(ms->rh); 1329 1330 wait_event(_kmirrord_recovery_stopped, 1331 !dm_rh_recovery_in_flight(ms->rh)); 1332 1333 if (log->type->presuspend && log->type->presuspend(log)) 1334 /* FIXME: need better error handling */ 1335 DMWARN("log presuspend failed"); 1336 1337 /* 1338 * Now that recovery is complete/stopped and the 1339 * delayed bios are queued, we need to wait for 1340 * the worker thread to complete. This way, 1341 * we know that all of our I/O has been pushed. 1342 */ 1343 flush_workqueue(ms->kmirrord_wq); 1344 } 1345 1346 static void mirror_postsuspend(struct dm_target *ti) 1347 { 1348 struct mirror_set *ms = ti->private; 1349 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1350 1351 if (log->type->postsuspend && log->type->postsuspend(log)) 1352 /* FIXME: need better error handling */ 1353 DMWARN("log postsuspend failed"); 1354 } 1355 1356 static void mirror_resume(struct dm_target *ti) 1357 { 1358 struct mirror_set *ms = ti->private; 1359 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1360 1361 atomic_set(&ms->suspend, 0); 1362 if (log->type->resume && log->type->resume(log)) 1363 /* FIXME: need better error handling */ 1364 DMWARN("log resume failed"); 1365 dm_rh_start_recovery(ms->rh); 1366 } 1367 1368 /* 1369 * device_status_char 1370 * @m: mirror device/leg we want the status of 1371 * 1372 * We return one character representing the most severe error 1373 * we have encountered. 1374 * A => Alive - No failures 1375 * D => Dead - A write failure occurred leaving mirror out-of-sync 1376 * S => Sync - A sychronization failure occurred, mirror out-of-sync 1377 * R => Read - A read failure occurred, mirror data unaffected 1378 * 1379 * Returns: <char> 1380 */ 1381 static char device_status_char(struct mirror *m) 1382 { 1383 if (!atomic_read(&(m->error_count))) 1384 return 'A'; 1385 1386 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : 1387 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : 1388 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : 1389 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; 1390 } 1391 1392 1393 static void mirror_status(struct dm_target *ti, status_type_t type, 1394 unsigned status_flags, char *result, unsigned maxlen) 1395 { 1396 unsigned int m, sz = 0; 1397 int num_feature_args = 0; 1398 struct mirror_set *ms = (struct mirror_set *) ti->private; 1399 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1400 char buffer[ms->nr_mirrors + 1]; 1401 1402 switch (type) { 1403 case STATUSTYPE_INFO: 1404 DMEMIT("%d ", ms->nr_mirrors); 1405 for (m = 0; m < ms->nr_mirrors; m++) { 1406 DMEMIT("%s ", ms->mirror[m].dev->name); 1407 buffer[m] = device_status_char(&(ms->mirror[m])); 1408 } 1409 buffer[m] = '\0'; 1410 1411 DMEMIT("%llu/%llu 1 %s ", 1412 (unsigned long long)log->type->get_sync_count(log), 1413 (unsigned long long)ms->nr_regions, buffer); 1414 1415 sz += log->type->status(log, type, result+sz, maxlen-sz); 1416 1417 break; 1418 1419 case STATUSTYPE_TABLE: 1420 sz = log->type->status(log, type, result, maxlen); 1421 1422 DMEMIT("%d", ms->nr_mirrors); 1423 for (m = 0; m < ms->nr_mirrors; m++) 1424 DMEMIT(" %s %llu", ms->mirror[m].dev->name, 1425 (unsigned long long)ms->mirror[m].offset); 1426 1427 num_feature_args += !!errors_handled(ms); 1428 num_feature_args += !!keep_log(ms); 1429 if (num_feature_args) { 1430 DMEMIT(" %d", num_feature_args); 1431 if (errors_handled(ms)) 1432 DMEMIT(" handle_errors"); 1433 if (keep_log(ms)) 1434 DMEMIT(" keep_log"); 1435 } 1436 1437 break; 1438 } 1439 } 1440 1441 static int mirror_iterate_devices(struct dm_target *ti, 1442 iterate_devices_callout_fn fn, void *data) 1443 { 1444 struct mirror_set *ms = ti->private; 1445 int ret = 0; 1446 unsigned i; 1447 1448 for (i = 0; !ret && i < ms->nr_mirrors; i++) 1449 ret = fn(ti, ms->mirror[i].dev, 1450 ms->mirror[i].offset, ti->len, data); 1451 1452 return ret; 1453 } 1454 1455 static struct target_type mirror_target = { 1456 .name = "mirror", 1457 .version = {1, 14, 0}, 1458 .module = THIS_MODULE, 1459 .ctr = mirror_ctr, 1460 .dtr = mirror_dtr, 1461 .map = mirror_map, 1462 .end_io = mirror_end_io, 1463 .presuspend = mirror_presuspend, 1464 .postsuspend = mirror_postsuspend, 1465 .resume = mirror_resume, 1466 .status = mirror_status, 1467 .iterate_devices = mirror_iterate_devices, 1468 }; 1469 1470 static int __init dm_mirror_init(void) 1471 { 1472 int r; 1473 1474 r = dm_register_target(&mirror_target); 1475 if (r < 0) { 1476 DMERR("Failed to register mirror target"); 1477 goto bad_target; 1478 } 1479 1480 return 0; 1481 1482 bad_target: 1483 return r; 1484 } 1485 1486 static void __exit dm_mirror_exit(void) 1487 { 1488 dm_unregister_target(&mirror_target); 1489 } 1490 1491 /* Module hooks */ 1492 module_init(dm_mirror_init); 1493 module_exit(dm_mirror_exit); 1494 1495 MODULE_DESCRIPTION(DM_NAME " mirror target"); 1496 MODULE_AUTHOR("Joe Thornber"); 1497 MODULE_LICENSE("GPL"); 1498