1 /* 2 * Copyright (C) 2003 Sistina Software Limited. 3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-bio-record.h" 9 10 #include <linux/init.h> 11 #include <linux/mempool.h> 12 #include <linux/module.h> 13 #include <linux/pagemap.h> 14 #include <linux/slab.h> 15 #include <linux/workqueue.h> 16 #include <linux/device-mapper.h> 17 #include <linux/dm-io.h> 18 #include <linux/dm-dirty-log.h> 19 #include <linux/dm-kcopyd.h> 20 #include <linux/dm-region-hash.h> 21 22 #define DM_MSG_PREFIX "raid1" 23 24 #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ 25 #define DM_IO_PAGES 64 26 #define DM_KCOPYD_PAGES 64 27 28 #define DM_RAID1_HANDLE_ERRORS 0x01 29 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) 30 31 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); 32 33 /*----------------------------------------------------------------- 34 * Mirror set structures. 35 *---------------------------------------------------------------*/ 36 enum dm_raid1_error { 37 DM_RAID1_WRITE_ERROR, 38 DM_RAID1_SYNC_ERROR, 39 DM_RAID1_READ_ERROR 40 }; 41 42 struct mirror { 43 struct mirror_set *ms; 44 atomic_t error_count; 45 unsigned long error_type; 46 struct dm_dev *dev; 47 sector_t offset; 48 }; 49 50 struct mirror_set { 51 struct dm_target *ti; 52 struct list_head list; 53 54 uint64_t features; 55 56 spinlock_t lock; /* protects the lists */ 57 struct bio_list reads; 58 struct bio_list writes; 59 struct bio_list failures; 60 61 struct dm_region_hash *rh; 62 struct dm_kcopyd_client *kcopyd_client; 63 struct dm_io_client *io_client; 64 mempool_t *read_record_pool; 65 66 /* recovery */ 67 region_t nr_regions; 68 int in_sync; 69 int log_failure; 70 atomic_t suspend; 71 72 atomic_t default_mirror; /* Default mirror */ 73 74 struct workqueue_struct *kmirrord_wq; 75 struct work_struct kmirrord_work; 76 struct timer_list timer; 77 unsigned long timer_pending; 78 79 struct work_struct trigger_event; 80 81 unsigned nr_mirrors; 82 struct mirror mirror[0]; 83 }; 84 85 static void wakeup_mirrord(void *context) 86 { 87 struct mirror_set *ms = context; 88 89 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); 90 } 91 92 static void delayed_wake_fn(unsigned long data) 93 { 94 struct mirror_set *ms = (struct mirror_set *) data; 95 96 clear_bit(0, &ms->timer_pending); 97 wakeup_mirrord(ms); 98 } 99 100 static void delayed_wake(struct mirror_set *ms) 101 { 102 if (test_and_set_bit(0, &ms->timer_pending)) 103 return; 104 105 ms->timer.expires = jiffies + HZ / 5; 106 ms->timer.data = (unsigned long) ms; 107 ms->timer.function = delayed_wake_fn; 108 add_timer(&ms->timer); 109 } 110 111 static void wakeup_all_recovery_waiters(void *context) 112 { 113 wake_up_all(&_kmirrord_recovery_stopped); 114 } 115 116 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) 117 { 118 unsigned long flags; 119 int should_wake = 0; 120 struct bio_list *bl; 121 122 bl = (rw == WRITE) ? &ms->writes : &ms->reads; 123 spin_lock_irqsave(&ms->lock, flags); 124 should_wake = !(bl->head); 125 bio_list_add(bl, bio); 126 spin_unlock_irqrestore(&ms->lock, flags); 127 128 if (should_wake) 129 wakeup_mirrord(ms); 130 } 131 132 static void dispatch_bios(void *context, struct bio_list *bio_list) 133 { 134 struct mirror_set *ms = context; 135 struct bio *bio; 136 137 while ((bio = bio_list_pop(bio_list))) 138 queue_bio(ms, bio, WRITE); 139 } 140 141 #define MIN_READ_RECORDS 20 142 struct dm_raid1_read_record { 143 struct mirror *m; 144 struct dm_bio_details details; 145 }; 146 147 static struct kmem_cache *_dm_raid1_read_record_cache; 148 149 /* 150 * Every mirror should look like this one. 151 */ 152 #define DEFAULT_MIRROR 0 153 154 /* 155 * This is yucky. We squirrel the mirror struct away inside 156 * bi_next for read/write buffers. This is safe since the bh 157 * doesn't get submitted to the lower levels of block layer. 158 */ 159 static struct mirror *bio_get_m(struct bio *bio) 160 { 161 return (struct mirror *) bio->bi_next; 162 } 163 164 static void bio_set_m(struct bio *bio, struct mirror *m) 165 { 166 bio->bi_next = (struct bio *) m; 167 } 168 169 static struct mirror *get_default_mirror(struct mirror_set *ms) 170 { 171 return &ms->mirror[atomic_read(&ms->default_mirror)]; 172 } 173 174 static void set_default_mirror(struct mirror *m) 175 { 176 struct mirror_set *ms = m->ms; 177 struct mirror *m0 = &(ms->mirror[0]); 178 179 atomic_set(&ms->default_mirror, m - m0); 180 } 181 182 /* fail_mirror 183 * @m: mirror device to fail 184 * @error_type: one of the enum's, DM_RAID1_*_ERROR 185 * 186 * If errors are being handled, record the type of 187 * error encountered for this device. If this type 188 * of error has already been recorded, we can return; 189 * otherwise, we must signal userspace by triggering 190 * an event. Additionally, if the device is the 191 * primary device, we must choose a new primary, but 192 * only if the mirror is in-sync. 193 * 194 * This function must not block. 195 */ 196 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) 197 { 198 struct mirror_set *ms = m->ms; 199 struct mirror *new; 200 201 /* 202 * error_count is used for nothing more than a 203 * simple way to tell if a device has encountered 204 * errors. 205 */ 206 atomic_inc(&m->error_count); 207 208 if (test_and_set_bit(error_type, &m->error_type)) 209 return; 210 211 if (!errors_handled(ms)) 212 return; 213 214 if (m != get_default_mirror(ms)) 215 goto out; 216 217 if (!ms->in_sync) { 218 /* 219 * Better to issue requests to same failing device 220 * than to risk returning corrupt data. 221 */ 222 DMERR("Primary mirror (%s) failed while out-of-sync: " 223 "Reads may fail.", m->dev->name); 224 goto out; 225 } 226 227 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) 228 if (!atomic_read(&new->error_count)) { 229 set_default_mirror(new); 230 break; 231 } 232 233 if (unlikely(new == ms->mirror + ms->nr_mirrors)) 234 DMWARN("All sides of mirror have failed."); 235 236 out: 237 schedule_work(&ms->trigger_event); 238 } 239 240 /*----------------------------------------------------------------- 241 * Recovery. 242 * 243 * When a mirror is first activated we may find that some regions 244 * are in the no-sync state. We have to recover these by 245 * recopying from the default mirror to all the others. 246 *---------------------------------------------------------------*/ 247 static void recovery_complete(int read_err, unsigned long write_err, 248 void *context) 249 { 250 struct dm_region *reg = context; 251 struct mirror_set *ms = dm_rh_region_context(reg); 252 int m, bit = 0; 253 254 if (read_err) { 255 /* Read error means the failure of default mirror. */ 256 DMERR_LIMIT("Unable to read primary mirror during recovery"); 257 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); 258 } 259 260 if (write_err) { 261 DMERR_LIMIT("Write error during recovery (error = 0x%lx)", 262 write_err); 263 /* 264 * Bits correspond to devices (excluding default mirror). 265 * The default mirror cannot change during recovery. 266 */ 267 for (m = 0; m < ms->nr_mirrors; m++) { 268 if (&ms->mirror[m] == get_default_mirror(ms)) 269 continue; 270 if (test_bit(bit, &write_err)) 271 fail_mirror(ms->mirror + m, 272 DM_RAID1_SYNC_ERROR); 273 bit++; 274 } 275 } 276 277 dm_rh_recovery_end(reg, !(read_err || write_err)); 278 } 279 280 static int recover(struct mirror_set *ms, struct dm_region *reg) 281 { 282 int r; 283 unsigned i; 284 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; 285 struct mirror *m; 286 unsigned long flags = 0; 287 region_t key = dm_rh_get_region_key(reg); 288 sector_t region_size = dm_rh_get_region_size(ms->rh); 289 290 /* fill in the source */ 291 m = get_default_mirror(ms); 292 from.bdev = m->dev->bdev; 293 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); 294 if (key == (ms->nr_regions - 1)) { 295 /* 296 * The final region may be smaller than 297 * region_size. 298 */ 299 from.count = ms->ti->len & (region_size - 1); 300 if (!from.count) 301 from.count = region_size; 302 } else 303 from.count = region_size; 304 305 /* fill in the destinations */ 306 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 307 if (&ms->mirror[i] == get_default_mirror(ms)) 308 continue; 309 310 m = ms->mirror + i; 311 dest->bdev = m->dev->bdev; 312 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); 313 dest->count = from.count; 314 dest++; 315 } 316 317 /* hand to kcopyd */ 318 if (!errors_handled(ms)) 319 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); 320 321 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, 322 flags, recovery_complete, reg); 323 324 return r; 325 } 326 327 static void do_recovery(struct mirror_set *ms) 328 { 329 struct dm_region *reg; 330 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 331 int r; 332 333 /* 334 * Start quiescing some regions. 335 */ 336 dm_rh_recovery_prepare(ms->rh); 337 338 /* 339 * Copy any already quiesced regions. 340 */ 341 while ((reg = dm_rh_recovery_start(ms->rh))) { 342 r = recover(ms, reg); 343 if (r) 344 dm_rh_recovery_end(reg, 0); 345 } 346 347 /* 348 * Update the in sync flag. 349 */ 350 if (!ms->in_sync && 351 (log->type->get_sync_count(log) == ms->nr_regions)) { 352 /* the sync is complete */ 353 dm_table_event(ms->ti->table); 354 ms->in_sync = 1; 355 } 356 } 357 358 /*----------------------------------------------------------------- 359 * Reads 360 *---------------------------------------------------------------*/ 361 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) 362 { 363 struct mirror *m = get_default_mirror(ms); 364 365 do { 366 if (likely(!atomic_read(&m->error_count))) 367 return m; 368 369 if (m-- == ms->mirror) 370 m += ms->nr_mirrors; 371 } while (m != get_default_mirror(ms)); 372 373 return NULL; 374 } 375 376 static int default_ok(struct mirror *m) 377 { 378 struct mirror *default_mirror = get_default_mirror(m->ms); 379 380 return !atomic_read(&default_mirror->error_count); 381 } 382 383 static int mirror_available(struct mirror_set *ms, struct bio *bio) 384 { 385 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 386 region_t region = dm_rh_bio_to_region(ms->rh, bio); 387 388 if (log->type->in_sync(log, region, 0)) 389 return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 390 391 return 0; 392 } 393 394 /* 395 * remap a buffer to a particular mirror. 396 */ 397 static sector_t map_sector(struct mirror *m, struct bio *bio) 398 { 399 return m->offset + (bio->bi_sector - m->ms->ti->begin); 400 } 401 402 static void map_bio(struct mirror *m, struct bio *bio) 403 { 404 bio->bi_bdev = m->dev->bdev; 405 bio->bi_sector = map_sector(m, bio); 406 } 407 408 static void map_region(struct dm_io_region *io, struct mirror *m, 409 struct bio *bio) 410 { 411 io->bdev = m->dev->bdev; 412 io->sector = map_sector(m, bio); 413 io->count = bio->bi_size >> 9; 414 } 415 416 /*----------------------------------------------------------------- 417 * Reads 418 *---------------------------------------------------------------*/ 419 static void read_callback(unsigned long error, void *context) 420 { 421 struct bio *bio = context; 422 struct mirror *m; 423 424 m = bio_get_m(bio); 425 bio_set_m(bio, NULL); 426 427 if (likely(!error)) { 428 bio_endio(bio, 0); 429 return; 430 } 431 432 fail_mirror(m, DM_RAID1_READ_ERROR); 433 434 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { 435 DMWARN_LIMIT("Read failure on mirror device %s. " 436 "Trying alternative device.", 437 m->dev->name); 438 queue_bio(m->ms, bio, bio_rw(bio)); 439 return; 440 } 441 442 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", 443 m->dev->name); 444 bio_endio(bio, -EIO); 445 } 446 447 /* Asynchronous read. */ 448 static void read_async_bio(struct mirror *m, struct bio *bio) 449 { 450 struct dm_io_region io; 451 struct dm_io_request io_req = { 452 .bi_rw = READ, 453 .mem.type = DM_IO_BVEC, 454 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 455 .notify.fn = read_callback, 456 .notify.context = bio, 457 .client = m->ms->io_client, 458 }; 459 460 map_region(&io, m, bio); 461 bio_set_m(bio, m); 462 BUG_ON(dm_io(&io_req, 1, &io, NULL)); 463 } 464 465 static inline int region_in_sync(struct mirror_set *ms, region_t region, 466 int may_block) 467 { 468 int state = dm_rh_get_state(ms->rh, region, may_block); 469 return state == DM_RH_CLEAN || state == DM_RH_DIRTY; 470 } 471 472 static void do_reads(struct mirror_set *ms, struct bio_list *reads) 473 { 474 region_t region; 475 struct bio *bio; 476 struct mirror *m; 477 478 while ((bio = bio_list_pop(reads))) { 479 region = dm_rh_bio_to_region(ms->rh, bio); 480 m = get_default_mirror(ms); 481 482 /* 483 * We can only read balance if the region is in sync. 484 */ 485 if (likely(region_in_sync(ms, region, 1))) 486 m = choose_mirror(ms, bio->bi_sector); 487 else if (m && atomic_read(&m->error_count)) 488 m = NULL; 489 490 if (likely(m)) 491 read_async_bio(m, bio); 492 else 493 bio_endio(bio, -EIO); 494 } 495 } 496 497 /*----------------------------------------------------------------- 498 * Writes. 499 * 500 * We do different things with the write io depending on the 501 * state of the region that it's in: 502 * 503 * SYNC: increment pending, use kcopyd to write to *all* mirrors 504 * RECOVERING: delay the io until recovery completes 505 * NOSYNC: increment pending, just write to the default mirror 506 *---------------------------------------------------------------*/ 507 508 509 static void write_callback(unsigned long error, void *context) 510 { 511 unsigned i, ret = 0; 512 struct bio *bio = (struct bio *) context; 513 struct mirror_set *ms; 514 int uptodate = 0; 515 int should_wake = 0; 516 unsigned long flags; 517 518 ms = bio_get_m(bio)->ms; 519 bio_set_m(bio, NULL); 520 521 /* 522 * NOTE: We don't decrement the pending count here, 523 * instead it is done by the targets endio function. 524 * This way we handle both writes to SYNC and NOSYNC 525 * regions with the same code. 526 */ 527 if (likely(!error)) 528 goto out; 529 530 for (i = 0; i < ms->nr_mirrors; i++) 531 if (test_bit(i, &error)) 532 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); 533 else 534 uptodate = 1; 535 536 if (unlikely(!uptodate)) { 537 DMERR("All replicated volumes dead, failing I/O"); 538 /* None of the writes succeeded, fail the I/O. */ 539 ret = -EIO; 540 } else if (errors_handled(ms)) { 541 /* 542 * Need to raise event. Since raising 543 * events can block, we need to do it in 544 * the main thread. 545 */ 546 spin_lock_irqsave(&ms->lock, flags); 547 if (!ms->failures.head) 548 should_wake = 1; 549 bio_list_add(&ms->failures, bio); 550 spin_unlock_irqrestore(&ms->lock, flags); 551 if (should_wake) 552 wakeup_mirrord(ms); 553 return; 554 } 555 out: 556 bio_endio(bio, ret); 557 } 558 559 static void do_write(struct mirror_set *ms, struct bio *bio) 560 { 561 unsigned int i; 562 struct dm_io_region io[ms->nr_mirrors], *dest = io; 563 struct mirror *m; 564 struct dm_io_request io_req = { 565 .bi_rw = WRITE, 566 .mem.type = DM_IO_BVEC, 567 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 568 .notify.fn = write_callback, 569 .notify.context = bio, 570 .client = ms->io_client, 571 }; 572 573 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) 574 map_region(dest++, m, bio); 575 576 /* 577 * Use default mirror because we only need it to retrieve the reference 578 * to the mirror set in write_callback(). 579 */ 580 bio_set_m(bio, get_default_mirror(ms)); 581 582 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); 583 } 584 585 static void do_writes(struct mirror_set *ms, struct bio_list *writes) 586 { 587 int state; 588 struct bio *bio; 589 struct bio_list sync, nosync, recover, *this_list = NULL; 590 struct bio_list requeue; 591 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 592 region_t region; 593 594 if (!writes->head) 595 return; 596 597 /* 598 * Classify each write. 599 */ 600 bio_list_init(&sync); 601 bio_list_init(&nosync); 602 bio_list_init(&recover); 603 bio_list_init(&requeue); 604 605 while ((bio = bio_list_pop(writes))) { 606 region = dm_rh_bio_to_region(ms->rh, bio); 607 608 if (log->type->is_remote_recovering && 609 log->type->is_remote_recovering(log, region)) { 610 bio_list_add(&requeue, bio); 611 continue; 612 } 613 614 state = dm_rh_get_state(ms->rh, region, 1); 615 switch (state) { 616 case DM_RH_CLEAN: 617 case DM_RH_DIRTY: 618 this_list = &sync; 619 break; 620 621 case DM_RH_NOSYNC: 622 this_list = &nosync; 623 break; 624 625 case DM_RH_RECOVERING: 626 this_list = &recover; 627 break; 628 } 629 630 bio_list_add(this_list, bio); 631 } 632 633 /* 634 * Add bios that are delayed due to remote recovery 635 * back on to the write queue 636 */ 637 if (unlikely(requeue.head)) { 638 spin_lock_irq(&ms->lock); 639 bio_list_merge(&ms->writes, &requeue); 640 spin_unlock_irq(&ms->lock); 641 } 642 643 /* 644 * Increment the pending counts for any regions that will 645 * be written to (writes to recover regions are going to 646 * be delayed). 647 */ 648 dm_rh_inc_pending(ms->rh, &sync); 649 dm_rh_inc_pending(ms->rh, &nosync); 650 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; 651 652 /* 653 * Dispatch io. 654 */ 655 if (unlikely(ms->log_failure)) { 656 spin_lock_irq(&ms->lock); 657 bio_list_merge(&ms->failures, &sync); 658 spin_unlock_irq(&ms->lock); 659 wakeup_mirrord(ms); 660 } else 661 while ((bio = bio_list_pop(&sync))) 662 do_write(ms, bio); 663 664 while ((bio = bio_list_pop(&recover))) 665 dm_rh_delay(ms->rh, bio); 666 667 while ((bio = bio_list_pop(&nosync))) { 668 map_bio(get_default_mirror(ms), bio); 669 generic_make_request(bio); 670 } 671 } 672 673 static void do_failures(struct mirror_set *ms, struct bio_list *failures) 674 { 675 struct bio *bio; 676 677 if (!failures->head) 678 return; 679 680 if (!ms->log_failure) { 681 while ((bio = bio_list_pop(failures))) { 682 ms->in_sync = 0; 683 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); 684 } 685 return; 686 } 687 688 /* 689 * If the log has failed, unattempted writes are being 690 * put on the failures list. We can't issue those writes 691 * until a log has been marked, so we must store them. 692 * 693 * If a 'noflush' suspend is in progress, we can requeue 694 * the I/O's to the core. This give userspace a chance 695 * to reconfigure the mirror, at which point the core 696 * will reissue the writes. If the 'noflush' flag is 697 * not set, we have no choice but to return errors. 698 * 699 * Some writes on the failures list may have been 700 * submitted before the log failure and represent a 701 * failure to write to one of the devices. It is ok 702 * for us to treat them the same and requeue them 703 * as well. 704 */ 705 if (dm_noflush_suspending(ms->ti)) { 706 while ((bio = bio_list_pop(failures))) 707 bio_endio(bio, DM_ENDIO_REQUEUE); 708 return; 709 } 710 711 if (atomic_read(&ms->suspend)) { 712 while ((bio = bio_list_pop(failures))) 713 bio_endio(bio, -EIO); 714 return; 715 } 716 717 spin_lock_irq(&ms->lock); 718 bio_list_merge(&ms->failures, failures); 719 spin_unlock_irq(&ms->lock); 720 721 delayed_wake(ms); 722 } 723 724 static void trigger_event(struct work_struct *work) 725 { 726 struct mirror_set *ms = 727 container_of(work, struct mirror_set, trigger_event); 728 729 dm_table_event(ms->ti->table); 730 } 731 732 /*----------------------------------------------------------------- 733 * kmirrord 734 *---------------------------------------------------------------*/ 735 static void do_mirror(struct work_struct *work) 736 { 737 struct mirror_set *ms = container_of(work, struct mirror_set, 738 kmirrord_work); 739 struct bio_list reads, writes, failures; 740 unsigned long flags; 741 742 spin_lock_irqsave(&ms->lock, flags); 743 reads = ms->reads; 744 writes = ms->writes; 745 failures = ms->failures; 746 bio_list_init(&ms->reads); 747 bio_list_init(&ms->writes); 748 bio_list_init(&ms->failures); 749 spin_unlock_irqrestore(&ms->lock, flags); 750 751 dm_rh_update_states(ms->rh, errors_handled(ms)); 752 do_recovery(ms); 753 do_reads(ms, &reads); 754 do_writes(ms, &writes); 755 do_failures(ms, &failures); 756 757 dm_table_unplug_all(ms->ti->table); 758 } 759 760 /*----------------------------------------------------------------- 761 * Target functions 762 *---------------------------------------------------------------*/ 763 static struct mirror_set *alloc_context(unsigned int nr_mirrors, 764 uint32_t region_size, 765 struct dm_target *ti, 766 struct dm_dirty_log *dl) 767 { 768 size_t len; 769 struct mirror_set *ms = NULL; 770 771 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); 772 773 ms = kzalloc(len, GFP_KERNEL); 774 if (!ms) { 775 ti->error = "Cannot allocate mirror context"; 776 return NULL; 777 } 778 779 spin_lock_init(&ms->lock); 780 781 ms->ti = ti; 782 ms->nr_mirrors = nr_mirrors; 783 ms->nr_regions = dm_sector_div_up(ti->len, region_size); 784 ms->in_sync = 0; 785 ms->log_failure = 0; 786 atomic_set(&ms->suspend, 0); 787 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); 788 789 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS, 790 _dm_raid1_read_record_cache); 791 792 if (!ms->read_record_pool) { 793 ti->error = "Error creating mirror read_record_pool"; 794 kfree(ms); 795 return NULL; 796 } 797 798 ms->io_client = dm_io_client_create(DM_IO_PAGES); 799 if (IS_ERR(ms->io_client)) { 800 ti->error = "Error creating dm_io client"; 801 mempool_destroy(ms->read_record_pool); 802 kfree(ms); 803 return NULL; 804 } 805 806 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, 807 wakeup_all_recovery_waiters, 808 ms->ti->begin, MAX_RECOVERY, 809 dl, region_size, ms->nr_regions); 810 if (IS_ERR(ms->rh)) { 811 ti->error = "Error creating dirty region hash"; 812 dm_io_client_destroy(ms->io_client); 813 mempool_destroy(ms->read_record_pool); 814 kfree(ms); 815 return NULL; 816 } 817 818 return ms; 819 } 820 821 static void free_context(struct mirror_set *ms, struct dm_target *ti, 822 unsigned int m) 823 { 824 while (m--) 825 dm_put_device(ti, ms->mirror[m].dev); 826 827 dm_io_client_destroy(ms->io_client); 828 dm_region_hash_destroy(ms->rh); 829 mempool_destroy(ms->read_record_pool); 830 kfree(ms); 831 } 832 833 static int get_mirror(struct mirror_set *ms, struct dm_target *ti, 834 unsigned int mirror, char **argv) 835 { 836 unsigned long long offset; 837 838 if (sscanf(argv[1], "%llu", &offset) != 1) { 839 ti->error = "Invalid offset"; 840 return -EINVAL; 841 } 842 843 if (dm_get_device(ti, argv[0], offset, ti->len, 844 dm_table_get_mode(ti->table), 845 &ms->mirror[mirror].dev)) { 846 ti->error = "Device lookup failure"; 847 return -ENXIO; 848 } 849 850 ms->mirror[mirror].ms = ms; 851 atomic_set(&(ms->mirror[mirror].error_count), 0); 852 ms->mirror[mirror].error_type = 0; 853 ms->mirror[mirror].offset = offset; 854 855 return 0; 856 } 857 858 /* 859 * Create dirty log: log_type #log_params <log_params> 860 */ 861 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, 862 unsigned argc, char **argv, 863 unsigned *args_used) 864 { 865 unsigned param_count; 866 struct dm_dirty_log *dl; 867 868 if (argc < 2) { 869 ti->error = "Insufficient mirror log arguments"; 870 return NULL; 871 } 872 873 if (sscanf(argv[1], "%u", ¶m_count) != 1) { 874 ti->error = "Invalid mirror log argument count"; 875 return NULL; 876 } 877 878 *args_used = 2 + param_count; 879 880 if (argc < *args_used) { 881 ti->error = "Insufficient mirror log arguments"; 882 return NULL; 883 } 884 885 dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2); 886 if (!dl) { 887 ti->error = "Error creating mirror dirty log"; 888 return NULL; 889 } 890 891 return dl; 892 } 893 894 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, 895 unsigned *args_used) 896 { 897 unsigned num_features; 898 struct dm_target *ti = ms->ti; 899 900 *args_used = 0; 901 902 if (!argc) 903 return 0; 904 905 if (sscanf(argv[0], "%u", &num_features) != 1) { 906 ti->error = "Invalid number of features"; 907 return -EINVAL; 908 } 909 910 argc--; 911 argv++; 912 (*args_used)++; 913 914 if (num_features > argc) { 915 ti->error = "Not enough arguments to support feature count"; 916 return -EINVAL; 917 } 918 919 if (!strcmp("handle_errors", argv[0])) 920 ms->features |= DM_RAID1_HANDLE_ERRORS; 921 else { 922 ti->error = "Unrecognised feature requested"; 923 return -EINVAL; 924 } 925 926 (*args_used)++; 927 928 return 0; 929 } 930 931 /* 932 * Construct a mirror mapping: 933 * 934 * log_type #log_params <log_params> 935 * #mirrors [mirror_path offset]{2,} 936 * [#features <features>] 937 * 938 * log_type is "core" or "disk" 939 * #log_params is between 1 and 3 940 * 941 * If present, features must be "handle_errors". 942 */ 943 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 944 { 945 int r; 946 unsigned int nr_mirrors, m, args_used; 947 struct mirror_set *ms; 948 struct dm_dirty_log *dl; 949 950 dl = create_dirty_log(ti, argc, argv, &args_used); 951 if (!dl) 952 return -EINVAL; 953 954 argv += args_used; 955 argc -= args_used; 956 957 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || 958 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { 959 ti->error = "Invalid number of mirrors"; 960 dm_dirty_log_destroy(dl); 961 return -EINVAL; 962 } 963 964 argv++, argc--; 965 966 if (argc < nr_mirrors * 2) { 967 ti->error = "Too few mirror arguments"; 968 dm_dirty_log_destroy(dl); 969 return -EINVAL; 970 } 971 972 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); 973 if (!ms) { 974 dm_dirty_log_destroy(dl); 975 return -ENOMEM; 976 } 977 978 /* Get the mirror parameter sets */ 979 for (m = 0; m < nr_mirrors; m++) { 980 r = get_mirror(ms, ti, m, argv); 981 if (r) { 982 free_context(ms, ti, m); 983 return r; 984 } 985 argv += 2; 986 argc -= 2; 987 } 988 989 ti->private = ms; 990 ti->split_io = dm_rh_get_region_size(ms->rh); 991 992 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); 993 if (!ms->kmirrord_wq) { 994 DMERR("couldn't start kmirrord"); 995 r = -ENOMEM; 996 goto err_free_context; 997 } 998 INIT_WORK(&ms->kmirrord_work, do_mirror); 999 init_timer(&ms->timer); 1000 ms->timer_pending = 0; 1001 INIT_WORK(&ms->trigger_event, trigger_event); 1002 1003 r = parse_features(ms, argc, argv, &args_used); 1004 if (r) 1005 goto err_destroy_wq; 1006 1007 argv += args_used; 1008 argc -= args_used; 1009 1010 /* 1011 * Any read-balancing addition depends on the 1012 * DM_RAID1_HANDLE_ERRORS flag being present. 1013 * This is because the decision to balance depends 1014 * on the sync state of a region. If the above 1015 * flag is not present, we ignore errors; and 1016 * the sync state may be inaccurate. 1017 */ 1018 1019 if (argc) { 1020 ti->error = "Too many mirror arguments"; 1021 r = -EINVAL; 1022 goto err_destroy_wq; 1023 } 1024 1025 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); 1026 if (r) 1027 goto err_destroy_wq; 1028 1029 wakeup_mirrord(ms); 1030 return 0; 1031 1032 err_destroy_wq: 1033 destroy_workqueue(ms->kmirrord_wq); 1034 err_free_context: 1035 free_context(ms, ti, ms->nr_mirrors); 1036 return r; 1037 } 1038 1039 static void mirror_dtr(struct dm_target *ti) 1040 { 1041 struct mirror_set *ms = (struct mirror_set *) ti->private; 1042 1043 del_timer_sync(&ms->timer); 1044 flush_workqueue(ms->kmirrord_wq); 1045 flush_scheduled_work(); 1046 dm_kcopyd_client_destroy(ms->kcopyd_client); 1047 destroy_workqueue(ms->kmirrord_wq); 1048 free_context(ms, ti, ms->nr_mirrors); 1049 } 1050 1051 /* 1052 * Mirror mapping function 1053 */ 1054 static int mirror_map(struct dm_target *ti, struct bio *bio, 1055 union map_info *map_context) 1056 { 1057 int r, rw = bio_rw(bio); 1058 struct mirror *m; 1059 struct mirror_set *ms = ti->private; 1060 struct dm_raid1_read_record *read_record = NULL; 1061 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1062 1063 if (rw == WRITE) { 1064 /* Save region for mirror_end_io() handler */ 1065 map_context->ll = dm_rh_bio_to_region(ms->rh, bio); 1066 queue_bio(ms, bio, rw); 1067 return DM_MAPIO_SUBMITTED; 1068 } 1069 1070 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); 1071 if (r < 0 && r != -EWOULDBLOCK) 1072 return r; 1073 1074 /* 1075 * If region is not in-sync queue the bio. 1076 */ 1077 if (!r || (r == -EWOULDBLOCK)) { 1078 if (rw == READA) 1079 return -EWOULDBLOCK; 1080 1081 queue_bio(ms, bio, rw); 1082 return DM_MAPIO_SUBMITTED; 1083 } 1084 1085 /* 1086 * The region is in-sync and we can perform reads directly. 1087 * Store enough information so we can retry if it fails. 1088 */ 1089 m = choose_mirror(ms, bio->bi_sector); 1090 if (unlikely(!m)) 1091 return -EIO; 1092 1093 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO); 1094 if (likely(read_record)) { 1095 dm_bio_record(&read_record->details, bio); 1096 map_context->ptr = read_record; 1097 read_record->m = m; 1098 } 1099 1100 map_bio(m, bio); 1101 1102 return DM_MAPIO_REMAPPED; 1103 } 1104 1105 static int mirror_end_io(struct dm_target *ti, struct bio *bio, 1106 int error, union map_info *map_context) 1107 { 1108 int rw = bio_rw(bio); 1109 struct mirror_set *ms = (struct mirror_set *) ti->private; 1110 struct mirror *m = NULL; 1111 struct dm_bio_details *bd = NULL; 1112 struct dm_raid1_read_record *read_record = map_context->ptr; 1113 1114 /* 1115 * We need to dec pending if this was a write. 1116 */ 1117 if (rw == WRITE) { 1118 dm_rh_dec(ms->rh, map_context->ll); 1119 return error; 1120 } 1121 1122 if (error == -EOPNOTSUPP) 1123 goto out; 1124 1125 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 1126 goto out; 1127 1128 if (unlikely(error)) { 1129 if (!read_record) { 1130 /* 1131 * There wasn't enough memory to record necessary 1132 * information for a retry or there was no other 1133 * mirror in-sync. 1134 */ 1135 DMERR_LIMIT("Mirror read failed."); 1136 return -EIO; 1137 } 1138 1139 m = read_record->m; 1140 1141 DMERR("Mirror read failed from %s. Trying alternative device.", 1142 m->dev->name); 1143 1144 fail_mirror(m, DM_RAID1_READ_ERROR); 1145 1146 /* 1147 * A failed read is requeued for another attempt using an intact 1148 * mirror. 1149 */ 1150 if (default_ok(m) || mirror_available(ms, bio)) { 1151 bd = &read_record->details; 1152 1153 dm_bio_restore(bd, bio); 1154 mempool_free(read_record, ms->read_record_pool); 1155 map_context->ptr = NULL; 1156 queue_bio(ms, bio, rw); 1157 return 1; 1158 } 1159 DMERR("All replicated volumes dead, failing I/O"); 1160 } 1161 1162 out: 1163 if (read_record) { 1164 mempool_free(read_record, ms->read_record_pool); 1165 map_context->ptr = NULL; 1166 } 1167 1168 return error; 1169 } 1170 1171 static void mirror_presuspend(struct dm_target *ti) 1172 { 1173 struct mirror_set *ms = (struct mirror_set *) ti->private; 1174 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1175 1176 atomic_set(&ms->suspend, 1); 1177 1178 /* 1179 * We must finish up all the work that we've 1180 * generated (i.e. recovery work). 1181 */ 1182 dm_rh_stop_recovery(ms->rh); 1183 1184 wait_event(_kmirrord_recovery_stopped, 1185 !dm_rh_recovery_in_flight(ms->rh)); 1186 1187 if (log->type->presuspend && log->type->presuspend(log)) 1188 /* FIXME: need better error handling */ 1189 DMWARN("log presuspend failed"); 1190 1191 /* 1192 * Now that recovery is complete/stopped and the 1193 * delayed bios are queued, we need to wait for 1194 * the worker thread to complete. This way, 1195 * we know that all of our I/O has been pushed. 1196 */ 1197 flush_workqueue(ms->kmirrord_wq); 1198 } 1199 1200 static void mirror_postsuspend(struct dm_target *ti) 1201 { 1202 struct mirror_set *ms = ti->private; 1203 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1204 1205 if (log->type->postsuspend && log->type->postsuspend(log)) 1206 /* FIXME: need better error handling */ 1207 DMWARN("log postsuspend failed"); 1208 } 1209 1210 static void mirror_resume(struct dm_target *ti) 1211 { 1212 struct mirror_set *ms = ti->private; 1213 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1214 1215 atomic_set(&ms->suspend, 0); 1216 if (log->type->resume && log->type->resume(log)) 1217 /* FIXME: need better error handling */ 1218 DMWARN("log resume failed"); 1219 dm_rh_start_recovery(ms->rh); 1220 } 1221 1222 /* 1223 * device_status_char 1224 * @m: mirror device/leg we want the status of 1225 * 1226 * We return one character representing the most severe error 1227 * we have encountered. 1228 * A => Alive - No failures 1229 * D => Dead - A write failure occurred leaving mirror out-of-sync 1230 * S => Sync - A sychronization failure occurred, mirror out-of-sync 1231 * R => Read - A read failure occurred, mirror data unaffected 1232 * 1233 * Returns: <char> 1234 */ 1235 static char device_status_char(struct mirror *m) 1236 { 1237 if (!atomic_read(&(m->error_count))) 1238 return 'A'; 1239 1240 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : 1241 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : 1242 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; 1243 } 1244 1245 1246 static int mirror_status(struct dm_target *ti, status_type_t type, 1247 char *result, unsigned int maxlen) 1248 { 1249 unsigned int m, sz = 0; 1250 struct mirror_set *ms = (struct mirror_set *) ti->private; 1251 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1252 char buffer[ms->nr_mirrors + 1]; 1253 1254 switch (type) { 1255 case STATUSTYPE_INFO: 1256 DMEMIT("%d ", ms->nr_mirrors); 1257 for (m = 0; m < ms->nr_mirrors; m++) { 1258 DMEMIT("%s ", ms->mirror[m].dev->name); 1259 buffer[m] = device_status_char(&(ms->mirror[m])); 1260 } 1261 buffer[m] = '\0'; 1262 1263 DMEMIT("%llu/%llu 1 %s ", 1264 (unsigned long long)log->type->get_sync_count(log), 1265 (unsigned long long)ms->nr_regions, buffer); 1266 1267 sz += log->type->status(log, type, result+sz, maxlen-sz); 1268 1269 break; 1270 1271 case STATUSTYPE_TABLE: 1272 sz = log->type->status(log, type, result, maxlen); 1273 1274 DMEMIT("%d", ms->nr_mirrors); 1275 for (m = 0; m < ms->nr_mirrors; m++) 1276 DMEMIT(" %s %llu", ms->mirror[m].dev->name, 1277 (unsigned long long)ms->mirror[m].offset); 1278 1279 if (ms->features & DM_RAID1_HANDLE_ERRORS) 1280 DMEMIT(" 1 handle_errors"); 1281 } 1282 1283 return 0; 1284 } 1285 1286 static struct target_type mirror_target = { 1287 .name = "mirror", 1288 .version = {1, 0, 20}, 1289 .module = THIS_MODULE, 1290 .ctr = mirror_ctr, 1291 .dtr = mirror_dtr, 1292 .map = mirror_map, 1293 .end_io = mirror_end_io, 1294 .presuspend = mirror_presuspend, 1295 .postsuspend = mirror_postsuspend, 1296 .resume = mirror_resume, 1297 .status = mirror_status, 1298 }; 1299 1300 static int __init dm_mirror_init(void) 1301 { 1302 int r; 1303 1304 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0); 1305 if (!_dm_raid1_read_record_cache) { 1306 DMERR("Can't allocate dm_raid1_read_record cache"); 1307 r = -ENOMEM; 1308 goto bad_cache; 1309 } 1310 1311 r = dm_register_target(&mirror_target); 1312 if (r < 0) { 1313 DMERR("Failed to register mirror target"); 1314 goto bad_target; 1315 } 1316 1317 return 0; 1318 1319 bad_target: 1320 kmem_cache_destroy(_dm_raid1_read_record_cache); 1321 bad_cache: 1322 return r; 1323 } 1324 1325 static void __exit dm_mirror_exit(void) 1326 { 1327 dm_unregister_target(&mirror_target); 1328 kmem_cache_destroy(_dm_raid1_read_record_cache); 1329 } 1330 1331 /* Module hooks */ 1332 module_init(dm_mirror_init); 1333 module_exit(dm_mirror_exit); 1334 1335 MODULE_DESCRIPTION(DM_NAME " mirror target"); 1336 MODULE_AUTHOR("Joe Thornber"); 1337 MODULE_LICENSE("GPL"); 1338