1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2003 Sistina Software Limited. 4 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include "dm-bio-record.h" 10 11 #include <linux/init.h> 12 #include <linux/mempool.h> 13 #include <linux/module.h> 14 #include <linux/pagemap.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 #include <linux/device-mapper.h> 18 #include <linux/dm-io.h> 19 #include <linux/dm-dirty-log.h> 20 #include <linux/dm-kcopyd.h> 21 #include <linux/dm-region-hash.h> 22 23 #define DM_MSG_PREFIX "raid1" 24 25 #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ 26 27 #define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1) 28 29 #define DM_RAID1_HANDLE_ERRORS 0x01 30 #define DM_RAID1_KEEP_LOG 0x02 31 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) 32 #define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG) 33 34 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); 35 36 /*----------------------------------------------------------------- 37 * Mirror set structures. 38 *---------------------------------------------------------------*/ 39 enum dm_raid1_error { 40 DM_RAID1_WRITE_ERROR, 41 DM_RAID1_FLUSH_ERROR, 42 DM_RAID1_SYNC_ERROR, 43 DM_RAID1_READ_ERROR 44 }; 45 46 struct mirror { 47 struct mirror_set *ms; 48 atomic_t error_count; 49 unsigned long error_type; 50 struct dm_dev *dev; 51 sector_t offset; 52 }; 53 54 struct mirror_set { 55 struct dm_target *ti; 56 struct list_head list; 57 58 uint64_t features; 59 60 spinlock_t lock; /* protects the lists */ 61 struct bio_list reads; 62 struct bio_list writes; 63 struct bio_list failures; 64 struct bio_list holds; /* bios are waiting until suspend */ 65 66 struct dm_region_hash *rh; 67 struct dm_kcopyd_client *kcopyd_client; 68 struct dm_io_client *io_client; 69 70 /* recovery */ 71 region_t nr_regions; 72 int in_sync; 73 int log_failure; 74 int leg_failure; 75 atomic_t suspend; 76 77 atomic_t default_mirror; /* Default mirror */ 78 79 struct workqueue_struct *kmirrord_wq; 80 struct work_struct kmirrord_work; 81 struct timer_list timer; 82 unsigned long timer_pending; 83 84 struct work_struct trigger_event; 85 86 unsigned int nr_mirrors; 87 struct mirror mirror[]; 88 }; 89 90 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle, 91 "A percentage of time allocated for raid resynchronization"); 92 93 static void wakeup_mirrord(void *context) 94 { 95 struct mirror_set *ms = context; 96 97 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); 98 } 99 100 static void delayed_wake_fn(struct timer_list *t) 101 { 102 struct mirror_set *ms = from_timer(ms, t, timer); 103 104 clear_bit(0, &ms->timer_pending); 105 wakeup_mirrord(ms); 106 } 107 108 static void delayed_wake(struct mirror_set *ms) 109 { 110 if (test_and_set_bit(0, &ms->timer_pending)) 111 return; 112 113 ms->timer.expires = jiffies + HZ / 5; 114 add_timer(&ms->timer); 115 } 116 117 static void wakeup_all_recovery_waiters(void *context) 118 { 119 wake_up_all(&_kmirrord_recovery_stopped); 120 } 121 122 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) 123 { 124 unsigned long flags; 125 int should_wake = 0; 126 struct bio_list *bl; 127 128 bl = (rw == WRITE) ? &ms->writes : &ms->reads; 129 spin_lock_irqsave(&ms->lock, flags); 130 should_wake = !(bl->head); 131 bio_list_add(bl, bio); 132 spin_unlock_irqrestore(&ms->lock, flags); 133 134 if (should_wake) 135 wakeup_mirrord(ms); 136 } 137 138 static void dispatch_bios(void *context, struct bio_list *bio_list) 139 { 140 struct mirror_set *ms = context; 141 struct bio *bio; 142 143 while ((bio = bio_list_pop(bio_list))) 144 queue_bio(ms, bio, WRITE); 145 } 146 147 struct dm_raid1_bio_record { 148 struct mirror *m; 149 /* if details->bi_bdev == NULL, details were not saved */ 150 struct dm_bio_details details; 151 region_t write_region; 152 }; 153 154 /* 155 * Every mirror should look like this one. 156 */ 157 #define DEFAULT_MIRROR 0 158 159 /* 160 * This is yucky. We squirrel the mirror struct away inside 161 * bi_next for read/write buffers. This is safe since the bh 162 * doesn't get submitted to the lower levels of block layer. 163 */ 164 static struct mirror *bio_get_m(struct bio *bio) 165 { 166 return (struct mirror *) bio->bi_next; 167 } 168 169 static void bio_set_m(struct bio *bio, struct mirror *m) 170 { 171 bio->bi_next = (struct bio *) m; 172 } 173 174 static struct mirror *get_default_mirror(struct mirror_set *ms) 175 { 176 return &ms->mirror[atomic_read(&ms->default_mirror)]; 177 } 178 179 static void set_default_mirror(struct mirror *m) 180 { 181 struct mirror_set *ms = m->ms; 182 struct mirror *m0 = &(ms->mirror[0]); 183 184 atomic_set(&ms->default_mirror, m - m0); 185 } 186 187 static struct mirror *get_valid_mirror(struct mirror_set *ms) 188 { 189 struct mirror *m; 190 191 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) 192 if (!atomic_read(&m->error_count)) 193 return m; 194 195 return NULL; 196 } 197 198 /* fail_mirror 199 * @m: mirror device to fail 200 * @error_type: one of the enum's, DM_RAID1_*_ERROR 201 * 202 * If errors are being handled, record the type of 203 * error encountered for this device. If this type 204 * of error has already been recorded, we can return; 205 * otherwise, we must signal userspace by triggering 206 * an event. Additionally, if the device is the 207 * primary device, we must choose a new primary, but 208 * only if the mirror is in-sync. 209 * 210 * This function must not block. 211 */ 212 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) 213 { 214 struct mirror_set *ms = m->ms; 215 struct mirror *new; 216 217 ms->leg_failure = 1; 218 219 /* 220 * error_count is used for nothing more than a 221 * simple way to tell if a device has encountered 222 * errors. 223 */ 224 atomic_inc(&m->error_count); 225 226 if (test_and_set_bit(error_type, &m->error_type)) 227 return; 228 229 if (!errors_handled(ms)) 230 return; 231 232 if (m != get_default_mirror(ms)) 233 goto out; 234 235 if (!ms->in_sync && !keep_log(ms)) { 236 /* 237 * Better to issue requests to same failing device 238 * than to risk returning corrupt data. 239 */ 240 DMERR("Primary mirror (%s) failed while out-of-sync: " 241 "Reads may fail.", m->dev->name); 242 goto out; 243 } 244 245 new = get_valid_mirror(ms); 246 if (new) 247 set_default_mirror(new); 248 else 249 DMWARN("All sides of mirror have failed."); 250 251 out: 252 schedule_work(&ms->trigger_event); 253 } 254 255 static int mirror_flush(struct dm_target *ti) 256 { 257 struct mirror_set *ms = ti->private; 258 unsigned long error_bits; 259 260 unsigned int i; 261 struct dm_io_region io[MAX_NR_MIRRORS]; 262 struct mirror *m; 263 struct dm_io_request io_req = { 264 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, 265 .mem.type = DM_IO_KMEM, 266 .mem.ptr.addr = NULL, 267 .client = ms->io_client, 268 }; 269 270 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { 271 io[i].bdev = m->dev->bdev; 272 io[i].sector = 0; 273 io[i].count = 0; 274 } 275 276 error_bits = -1; 277 dm_io(&io_req, ms->nr_mirrors, io, &error_bits); 278 if (unlikely(error_bits != 0)) { 279 for (i = 0; i < ms->nr_mirrors; i++) 280 if (test_bit(i, &error_bits)) 281 fail_mirror(ms->mirror + i, 282 DM_RAID1_FLUSH_ERROR); 283 return -EIO; 284 } 285 286 return 0; 287 } 288 289 /*----------------------------------------------------------------- 290 * Recovery. 291 * 292 * When a mirror is first activated we may find that some regions 293 * are in the no-sync state. We have to recover these by 294 * recopying from the default mirror to all the others. 295 *---------------------------------------------------------------*/ 296 static void recovery_complete(int read_err, unsigned long write_err, 297 void *context) 298 { 299 struct dm_region *reg = context; 300 struct mirror_set *ms = dm_rh_region_context(reg); 301 int m, bit = 0; 302 303 if (read_err) { 304 /* Read error means the failure of default mirror. */ 305 DMERR_LIMIT("Unable to read primary mirror during recovery"); 306 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); 307 } 308 309 if (write_err) { 310 DMERR_LIMIT("Write error during recovery (error = 0x%lx)", 311 write_err); 312 /* 313 * Bits correspond to devices (excluding default mirror). 314 * The default mirror cannot change during recovery. 315 */ 316 for (m = 0; m < ms->nr_mirrors; m++) { 317 if (&ms->mirror[m] == get_default_mirror(ms)) 318 continue; 319 if (test_bit(bit, &write_err)) 320 fail_mirror(ms->mirror + m, 321 DM_RAID1_SYNC_ERROR); 322 bit++; 323 } 324 } 325 326 dm_rh_recovery_end(reg, !(read_err || write_err)); 327 } 328 329 static void recover(struct mirror_set *ms, struct dm_region *reg) 330 { 331 unsigned int i; 332 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; 333 struct mirror *m; 334 unsigned long flags = 0; 335 region_t key = dm_rh_get_region_key(reg); 336 sector_t region_size = dm_rh_get_region_size(ms->rh); 337 338 /* fill in the source */ 339 m = get_default_mirror(ms); 340 from.bdev = m->dev->bdev; 341 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); 342 if (key == (ms->nr_regions - 1)) { 343 /* 344 * The final region may be smaller than 345 * region_size. 346 */ 347 from.count = ms->ti->len & (region_size - 1); 348 if (!from.count) 349 from.count = region_size; 350 } else 351 from.count = region_size; 352 353 /* fill in the destinations */ 354 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 355 if (&ms->mirror[i] == get_default_mirror(ms)) 356 continue; 357 358 m = ms->mirror + i; 359 dest->bdev = m->dev->bdev; 360 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); 361 dest->count = from.count; 362 dest++; 363 } 364 365 /* hand to kcopyd */ 366 if (!errors_handled(ms)) 367 flags |= BIT(DM_KCOPYD_IGNORE_ERROR); 368 369 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, 370 flags, recovery_complete, reg); 371 } 372 373 static void reset_ms_flags(struct mirror_set *ms) 374 { 375 unsigned int m; 376 377 ms->leg_failure = 0; 378 for (m = 0; m < ms->nr_mirrors; m++) { 379 atomic_set(&(ms->mirror[m].error_count), 0); 380 ms->mirror[m].error_type = 0; 381 } 382 } 383 384 static void do_recovery(struct mirror_set *ms) 385 { 386 struct dm_region *reg; 387 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 388 389 /* 390 * Start quiescing some regions. 391 */ 392 dm_rh_recovery_prepare(ms->rh); 393 394 /* 395 * Copy any already quiesced regions. 396 */ 397 while ((reg = dm_rh_recovery_start(ms->rh))) 398 recover(ms, reg); 399 400 /* 401 * Update the in sync flag. 402 */ 403 if (!ms->in_sync && 404 (log->type->get_sync_count(log) == ms->nr_regions)) { 405 /* the sync is complete */ 406 dm_table_event(ms->ti->table); 407 ms->in_sync = 1; 408 reset_ms_flags(ms); 409 } 410 } 411 412 /*----------------------------------------------------------------- 413 * Reads 414 *---------------------------------------------------------------*/ 415 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) 416 { 417 struct mirror *m = get_default_mirror(ms); 418 419 do { 420 if (likely(!atomic_read(&m->error_count))) 421 return m; 422 423 if (m-- == ms->mirror) 424 m += ms->nr_mirrors; 425 } while (m != get_default_mirror(ms)); 426 427 return NULL; 428 } 429 430 static int default_ok(struct mirror *m) 431 { 432 struct mirror *default_mirror = get_default_mirror(m->ms); 433 434 return !atomic_read(&default_mirror->error_count); 435 } 436 437 static int mirror_available(struct mirror_set *ms, struct bio *bio) 438 { 439 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 440 region_t region = dm_rh_bio_to_region(ms->rh, bio); 441 442 if (log->type->in_sync(log, region, 0)) 443 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; 444 445 return 0; 446 } 447 448 /* 449 * remap a buffer to a particular mirror. 450 */ 451 static sector_t map_sector(struct mirror *m, struct bio *bio) 452 { 453 if (unlikely(!bio->bi_iter.bi_size)) 454 return 0; 455 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); 456 } 457 458 static void map_bio(struct mirror *m, struct bio *bio) 459 { 460 bio_set_dev(bio, m->dev->bdev); 461 bio->bi_iter.bi_sector = map_sector(m, bio); 462 } 463 464 static void map_region(struct dm_io_region *io, struct mirror *m, 465 struct bio *bio) 466 { 467 io->bdev = m->dev->bdev; 468 io->sector = map_sector(m, bio); 469 io->count = bio_sectors(bio); 470 } 471 472 static void hold_bio(struct mirror_set *ms, struct bio *bio) 473 { 474 /* 475 * Lock is required to avoid race condition during suspend 476 * process. 477 */ 478 spin_lock_irq(&ms->lock); 479 480 if (atomic_read(&ms->suspend)) { 481 spin_unlock_irq(&ms->lock); 482 483 /* 484 * If device is suspended, complete the bio. 485 */ 486 if (dm_noflush_suspending(ms->ti)) 487 bio->bi_status = BLK_STS_DM_REQUEUE; 488 else 489 bio->bi_status = BLK_STS_IOERR; 490 491 bio_endio(bio); 492 return; 493 } 494 495 /* 496 * Hold bio until the suspend is complete. 497 */ 498 bio_list_add(&ms->holds, bio); 499 spin_unlock_irq(&ms->lock); 500 } 501 502 /*----------------------------------------------------------------- 503 * Reads 504 *---------------------------------------------------------------*/ 505 static void read_callback(unsigned long error, void *context) 506 { 507 struct bio *bio = context; 508 struct mirror *m; 509 510 m = bio_get_m(bio); 511 bio_set_m(bio, NULL); 512 513 if (likely(!error)) { 514 bio_endio(bio); 515 return; 516 } 517 518 fail_mirror(m, DM_RAID1_READ_ERROR); 519 520 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { 521 DMWARN_LIMIT("Read failure on mirror device %s. " 522 "Trying alternative device.", 523 m->dev->name); 524 queue_bio(m->ms, bio, bio_data_dir(bio)); 525 return; 526 } 527 528 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", 529 m->dev->name); 530 bio_io_error(bio); 531 } 532 533 /* Asynchronous read. */ 534 static void read_async_bio(struct mirror *m, struct bio *bio) 535 { 536 struct dm_io_region io; 537 struct dm_io_request io_req = { 538 .bi_opf = REQ_OP_READ, 539 .mem.type = DM_IO_BIO, 540 .mem.ptr.bio = bio, 541 .notify.fn = read_callback, 542 .notify.context = bio, 543 .client = m->ms->io_client, 544 }; 545 546 map_region(&io, m, bio); 547 bio_set_m(bio, m); 548 BUG_ON(dm_io(&io_req, 1, &io, NULL)); 549 } 550 551 static inline int region_in_sync(struct mirror_set *ms, region_t region, 552 int may_block) 553 { 554 int state = dm_rh_get_state(ms->rh, region, may_block); 555 return state == DM_RH_CLEAN || state == DM_RH_DIRTY; 556 } 557 558 static void do_reads(struct mirror_set *ms, struct bio_list *reads) 559 { 560 region_t region; 561 struct bio *bio; 562 struct mirror *m; 563 564 while ((bio = bio_list_pop(reads))) { 565 region = dm_rh_bio_to_region(ms->rh, bio); 566 m = get_default_mirror(ms); 567 568 /* 569 * We can only read balance if the region is in sync. 570 */ 571 if (likely(region_in_sync(ms, region, 1))) 572 m = choose_mirror(ms, bio->bi_iter.bi_sector); 573 else if (m && atomic_read(&m->error_count)) 574 m = NULL; 575 576 if (likely(m)) 577 read_async_bio(m, bio); 578 else 579 bio_io_error(bio); 580 } 581 } 582 583 /*----------------------------------------------------------------- 584 * Writes. 585 * 586 * We do different things with the write io depending on the 587 * state of the region that it's in: 588 * 589 * SYNC: increment pending, use kcopyd to write to *all* mirrors 590 * RECOVERING: delay the io until recovery completes 591 * NOSYNC: increment pending, just write to the default mirror 592 *---------------------------------------------------------------*/ 593 594 595 static void write_callback(unsigned long error, void *context) 596 { 597 unsigned int i; 598 struct bio *bio = (struct bio *) context; 599 struct mirror_set *ms; 600 int should_wake = 0; 601 unsigned long flags; 602 603 ms = bio_get_m(bio)->ms; 604 bio_set_m(bio, NULL); 605 606 /* 607 * NOTE: We don't decrement the pending count here, 608 * instead it is done by the targets endio function. 609 * This way we handle both writes to SYNC and NOSYNC 610 * regions with the same code. 611 */ 612 if (likely(!error)) { 613 bio_endio(bio); 614 return; 615 } 616 617 /* 618 * If the bio is discard, return an error, but do not 619 * degrade the array. 620 */ 621 if (bio_op(bio) == REQ_OP_DISCARD) { 622 bio->bi_status = BLK_STS_NOTSUPP; 623 bio_endio(bio); 624 return; 625 } 626 627 for (i = 0; i < ms->nr_mirrors; i++) 628 if (test_bit(i, &error)) 629 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); 630 631 /* 632 * Need to raise event. Since raising 633 * events can block, we need to do it in 634 * the main thread. 635 */ 636 spin_lock_irqsave(&ms->lock, flags); 637 if (!ms->failures.head) 638 should_wake = 1; 639 bio_list_add(&ms->failures, bio); 640 spin_unlock_irqrestore(&ms->lock, flags); 641 if (should_wake) 642 wakeup_mirrord(ms); 643 } 644 645 static void do_write(struct mirror_set *ms, struct bio *bio) 646 { 647 unsigned int i; 648 struct dm_io_region io[MAX_NR_MIRRORS], *dest = io; 649 struct mirror *m; 650 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH); 651 struct dm_io_request io_req = { 652 .bi_opf = REQ_OP_WRITE | op_flags, 653 .mem.type = DM_IO_BIO, 654 .mem.ptr.bio = bio, 655 .notify.fn = write_callback, 656 .notify.context = bio, 657 .client = ms->io_client, 658 }; 659 660 if (bio_op(bio) == REQ_OP_DISCARD) { 661 io_req.bi_opf = REQ_OP_DISCARD | op_flags; 662 io_req.mem.type = DM_IO_KMEM; 663 io_req.mem.ptr.addr = NULL; 664 } 665 666 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) 667 map_region(dest++, m, bio); 668 669 /* 670 * Use default mirror because we only need it to retrieve the reference 671 * to the mirror set in write_callback(). 672 */ 673 bio_set_m(bio, get_default_mirror(ms)); 674 675 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); 676 } 677 678 static void do_writes(struct mirror_set *ms, struct bio_list *writes) 679 { 680 int state; 681 struct bio *bio; 682 struct bio_list sync, nosync, recover, *this_list = NULL; 683 struct bio_list requeue; 684 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 685 region_t region; 686 687 if (!writes->head) 688 return; 689 690 /* 691 * Classify each write. 692 */ 693 bio_list_init(&sync); 694 bio_list_init(&nosync); 695 bio_list_init(&recover); 696 bio_list_init(&requeue); 697 698 while ((bio = bio_list_pop(writes))) { 699 if ((bio->bi_opf & REQ_PREFLUSH) || 700 (bio_op(bio) == REQ_OP_DISCARD)) { 701 bio_list_add(&sync, bio); 702 continue; 703 } 704 705 region = dm_rh_bio_to_region(ms->rh, bio); 706 707 if (log->type->is_remote_recovering && 708 log->type->is_remote_recovering(log, region)) { 709 bio_list_add(&requeue, bio); 710 continue; 711 } 712 713 state = dm_rh_get_state(ms->rh, region, 1); 714 switch (state) { 715 case DM_RH_CLEAN: 716 case DM_RH_DIRTY: 717 this_list = &sync; 718 break; 719 720 case DM_RH_NOSYNC: 721 this_list = &nosync; 722 break; 723 724 case DM_RH_RECOVERING: 725 this_list = &recover; 726 break; 727 } 728 729 bio_list_add(this_list, bio); 730 } 731 732 /* 733 * Add bios that are delayed due to remote recovery 734 * back on to the write queue 735 */ 736 if (unlikely(requeue.head)) { 737 spin_lock_irq(&ms->lock); 738 bio_list_merge(&ms->writes, &requeue); 739 spin_unlock_irq(&ms->lock); 740 delayed_wake(ms); 741 } 742 743 /* 744 * Increment the pending counts for any regions that will 745 * be written to (writes to recover regions are going to 746 * be delayed). 747 */ 748 dm_rh_inc_pending(ms->rh, &sync); 749 dm_rh_inc_pending(ms->rh, &nosync); 750 751 /* 752 * If the flush fails on a previous call and succeeds here, 753 * we must not reset the log_failure variable. We need 754 * userspace interaction to do that. 755 */ 756 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; 757 758 /* 759 * Dispatch io. 760 */ 761 if (unlikely(ms->log_failure) && errors_handled(ms)) { 762 spin_lock_irq(&ms->lock); 763 bio_list_merge(&ms->failures, &sync); 764 spin_unlock_irq(&ms->lock); 765 wakeup_mirrord(ms); 766 } else 767 while ((bio = bio_list_pop(&sync))) 768 do_write(ms, bio); 769 770 while ((bio = bio_list_pop(&recover))) 771 dm_rh_delay(ms->rh, bio); 772 773 while ((bio = bio_list_pop(&nosync))) { 774 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) { 775 spin_lock_irq(&ms->lock); 776 bio_list_add(&ms->failures, bio); 777 spin_unlock_irq(&ms->lock); 778 wakeup_mirrord(ms); 779 } else { 780 map_bio(get_default_mirror(ms), bio); 781 submit_bio_noacct(bio); 782 } 783 } 784 } 785 786 static void do_failures(struct mirror_set *ms, struct bio_list *failures) 787 { 788 struct bio *bio; 789 790 if (likely(!failures->head)) 791 return; 792 793 /* 794 * If the log has failed, unattempted writes are being 795 * put on the holds list. We can't issue those writes 796 * until a log has been marked, so we must store them. 797 * 798 * If a 'noflush' suspend is in progress, we can requeue 799 * the I/O's to the core. This give userspace a chance 800 * to reconfigure the mirror, at which point the core 801 * will reissue the writes. If the 'noflush' flag is 802 * not set, we have no choice but to return errors. 803 * 804 * Some writes on the failures list may have been 805 * submitted before the log failure and represent a 806 * failure to write to one of the devices. It is ok 807 * for us to treat them the same and requeue them 808 * as well. 809 */ 810 while ((bio = bio_list_pop(failures))) { 811 if (!ms->log_failure) { 812 ms->in_sync = 0; 813 dm_rh_mark_nosync(ms->rh, bio); 814 } 815 816 /* 817 * If all the legs are dead, fail the I/O. 818 * If the device has failed and keep_log is enabled, 819 * fail the I/O. 820 * 821 * If we have been told to handle errors, and keep_log 822 * isn't enabled, hold the bio and wait for userspace to 823 * deal with the problem. 824 * 825 * Otherwise pretend that the I/O succeeded. (This would 826 * be wrong if the failed leg returned after reboot and 827 * got replicated back to the good legs.) 828 */ 829 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure))) 830 bio_io_error(bio); 831 else if (errors_handled(ms) && !keep_log(ms)) 832 hold_bio(ms, bio); 833 else 834 bio_endio(bio); 835 } 836 } 837 838 static void trigger_event(struct work_struct *work) 839 { 840 struct mirror_set *ms = 841 container_of(work, struct mirror_set, trigger_event); 842 843 dm_table_event(ms->ti->table); 844 } 845 846 /*----------------------------------------------------------------- 847 * kmirrord 848 *---------------------------------------------------------------*/ 849 static void do_mirror(struct work_struct *work) 850 { 851 struct mirror_set *ms = container_of(work, struct mirror_set, 852 kmirrord_work); 853 struct bio_list reads, writes, failures; 854 unsigned long flags; 855 856 spin_lock_irqsave(&ms->lock, flags); 857 reads = ms->reads; 858 writes = ms->writes; 859 failures = ms->failures; 860 bio_list_init(&ms->reads); 861 bio_list_init(&ms->writes); 862 bio_list_init(&ms->failures); 863 spin_unlock_irqrestore(&ms->lock, flags); 864 865 dm_rh_update_states(ms->rh, errors_handled(ms)); 866 do_recovery(ms); 867 do_reads(ms, &reads); 868 do_writes(ms, &writes); 869 do_failures(ms, &failures); 870 } 871 872 /*----------------------------------------------------------------- 873 * Target functions 874 *---------------------------------------------------------------*/ 875 static struct mirror_set *alloc_context(unsigned int nr_mirrors, 876 uint32_t region_size, 877 struct dm_target *ti, 878 struct dm_dirty_log *dl) 879 { 880 struct mirror_set *ms = 881 kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL); 882 883 if (!ms) { 884 ti->error = "Cannot allocate mirror context"; 885 return NULL; 886 } 887 888 spin_lock_init(&ms->lock); 889 bio_list_init(&ms->reads); 890 bio_list_init(&ms->writes); 891 bio_list_init(&ms->failures); 892 bio_list_init(&ms->holds); 893 894 ms->ti = ti; 895 ms->nr_mirrors = nr_mirrors; 896 ms->nr_regions = dm_sector_div_up(ti->len, region_size); 897 ms->in_sync = 0; 898 ms->log_failure = 0; 899 ms->leg_failure = 0; 900 atomic_set(&ms->suspend, 0); 901 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); 902 903 ms->io_client = dm_io_client_create(); 904 if (IS_ERR(ms->io_client)) { 905 ti->error = "Error creating dm_io client"; 906 kfree(ms); 907 return NULL; 908 } 909 910 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, 911 wakeup_all_recovery_waiters, 912 ms->ti->begin, MAX_RECOVERY, 913 dl, region_size, ms->nr_regions); 914 if (IS_ERR(ms->rh)) { 915 ti->error = "Error creating dirty region hash"; 916 dm_io_client_destroy(ms->io_client); 917 kfree(ms); 918 return NULL; 919 } 920 921 return ms; 922 } 923 924 static void free_context(struct mirror_set *ms, struct dm_target *ti, 925 unsigned int m) 926 { 927 while (m--) 928 dm_put_device(ti, ms->mirror[m].dev); 929 930 dm_io_client_destroy(ms->io_client); 931 dm_region_hash_destroy(ms->rh); 932 kfree(ms); 933 } 934 935 static int get_mirror(struct mirror_set *ms, struct dm_target *ti, 936 unsigned int mirror, char **argv) 937 { 938 unsigned long long offset; 939 char dummy; 940 int ret; 941 942 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 || 943 offset != (sector_t)offset) { 944 ti->error = "Invalid offset"; 945 return -EINVAL; 946 } 947 948 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), 949 &ms->mirror[mirror].dev); 950 if (ret) { 951 ti->error = "Device lookup failure"; 952 return ret; 953 } 954 955 ms->mirror[mirror].ms = ms; 956 atomic_set(&(ms->mirror[mirror].error_count), 0); 957 ms->mirror[mirror].error_type = 0; 958 ms->mirror[mirror].offset = offset; 959 960 return 0; 961 } 962 963 /* 964 * Create dirty log: log_type #log_params <log_params> 965 */ 966 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, 967 unsigned int argc, char **argv, 968 unsigned int *args_used) 969 { 970 unsigned int param_count; 971 struct dm_dirty_log *dl; 972 char dummy; 973 974 if (argc < 2) { 975 ti->error = "Insufficient mirror log arguments"; 976 return NULL; 977 } 978 979 if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) { 980 ti->error = "Invalid mirror log argument count"; 981 return NULL; 982 } 983 984 *args_used = 2 + param_count; 985 986 if (argc < *args_used) { 987 ti->error = "Insufficient mirror log arguments"; 988 return NULL; 989 } 990 991 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, 992 argv + 2); 993 if (!dl) { 994 ti->error = "Error creating mirror dirty log"; 995 return NULL; 996 } 997 998 return dl; 999 } 1000 1001 static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv, 1002 unsigned int *args_used) 1003 { 1004 unsigned int num_features; 1005 struct dm_target *ti = ms->ti; 1006 char dummy; 1007 int i; 1008 1009 *args_used = 0; 1010 1011 if (!argc) 1012 return 0; 1013 1014 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) { 1015 ti->error = "Invalid number of features"; 1016 return -EINVAL; 1017 } 1018 1019 argc--; 1020 argv++; 1021 (*args_used)++; 1022 1023 if (num_features > argc) { 1024 ti->error = "Not enough arguments to support feature count"; 1025 return -EINVAL; 1026 } 1027 1028 for (i = 0; i < num_features; i++) { 1029 if (!strcmp("handle_errors", argv[0])) 1030 ms->features |= DM_RAID1_HANDLE_ERRORS; 1031 else if (!strcmp("keep_log", argv[0])) 1032 ms->features |= DM_RAID1_KEEP_LOG; 1033 else { 1034 ti->error = "Unrecognised feature requested"; 1035 return -EINVAL; 1036 } 1037 1038 argc--; 1039 argv++; 1040 (*args_used)++; 1041 } 1042 if (!errors_handled(ms) && keep_log(ms)) { 1043 ti->error = "keep_log feature requires the handle_errors feature"; 1044 return -EINVAL; 1045 } 1046 1047 return 0; 1048 } 1049 1050 /* 1051 * Construct a mirror mapping: 1052 * 1053 * log_type #log_params <log_params> 1054 * #mirrors [mirror_path offset]{2,} 1055 * [#features <features>] 1056 * 1057 * log_type is "core" or "disk" 1058 * #log_params is between 1 and 3 1059 * 1060 * If present, supported features are "handle_errors" and "keep_log". 1061 */ 1062 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1063 { 1064 int r; 1065 unsigned int nr_mirrors, m, args_used; 1066 struct mirror_set *ms; 1067 struct dm_dirty_log *dl; 1068 char dummy; 1069 1070 dl = create_dirty_log(ti, argc, argv, &args_used); 1071 if (!dl) 1072 return -EINVAL; 1073 1074 argv += args_used; 1075 argc -= args_used; 1076 1077 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || 1078 nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) { 1079 ti->error = "Invalid number of mirrors"; 1080 dm_dirty_log_destroy(dl); 1081 return -EINVAL; 1082 } 1083 1084 argv++, argc--; 1085 1086 if (argc < nr_mirrors * 2) { 1087 ti->error = "Too few mirror arguments"; 1088 dm_dirty_log_destroy(dl); 1089 return -EINVAL; 1090 } 1091 1092 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); 1093 if (!ms) { 1094 dm_dirty_log_destroy(dl); 1095 return -ENOMEM; 1096 } 1097 1098 /* Get the mirror parameter sets */ 1099 for (m = 0; m < nr_mirrors; m++) { 1100 r = get_mirror(ms, ti, m, argv); 1101 if (r) { 1102 free_context(ms, ti, m); 1103 return r; 1104 } 1105 argv += 2; 1106 argc -= 2; 1107 } 1108 1109 ti->private = ms; 1110 1111 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); 1112 if (r) 1113 goto err_free_context; 1114 1115 ti->num_flush_bios = 1; 1116 ti->num_discard_bios = 1; 1117 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); 1118 1119 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); 1120 if (!ms->kmirrord_wq) { 1121 DMERR("couldn't start kmirrord"); 1122 r = -ENOMEM; 1123 goto err_free_context; 1124 } 1125 INIT_WORK(&ms->kmirrord_work, do_mirror); 1126 timer_setup(&ms->timer, delayed_wake_fn, 0); 1127 ms->timer_pending = 0; 1128 INIT_WORK(&ms->trigger_event, trigger_event); 1129 1130 r = parse_features(ms, argc, argv, &args_used); 1131 if (r) 1132 goto err_destroy_wq; 1133 1134 argv += args_used; 1135 argc -= args_used; 1136 1137 /* 1138 * Any read-balancing addition depends on the 1139 * DM_RAID1_HANDLE_ERRORS flag being present. 1140 * This is because the decision to balance depends 1141 * on the sync state of a region. If the above 1142 * flag is not present, we ignore errors; and 1143 * the sync state may be inaccurate. 1144 */ 1145 1146 if (argc) { 1147 ti->error = "Too many mirror arguments"; 1148 r = -EINVAL; 1149 goto err_destroy_wq; 1150 } 1151 1152 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); 1153 if (IS_ERR(ms->kcopyd_client)) { 1154 r = PTR_ERR(ms->kcopyd_client); 1155 goto err_destroy_wq; 1156 } 1157 1158 wakeup_mirrord(ms); 1159 return 0; 1160 1161 err_destroy_wq: 1162 destroy_workqueue(ms->kmirrord_wq); 1163 err_free_context: 1164 free_context(ms, ti, ms->nr_mirrors); 1165 return r; 1166 } 1167 1168 static void mirror_dtr(struct dm_target *ti) 1169 { 1170 struct mirror_set *ms = (struct mirror_set *) ti->private; 1171 1172 del_timer_sync(&ms->timer); 1173 flush_workqueue(ms->kmirrord_wq); 1174 flush_work(&ms->trigger_event); 1175 dm_kcopyd_client_destroy(ms->kcopyd_client); 1176 destroy_workqueue(ms->kmirrord_wq); 1177 free_context(ms, ti, ms->nr_mirrors); 1178 } 1179 1180 /* 1181 * Mirror mapping function 1182 */ 1183 static int mirror_map(struct dm_target *ti, struct bio *bio) 1184 { 1185 int r, rw = bio_data_dir(bio); 1186 struct mirror *m; 1187 struct mirror_set *ms = ti->private; 1188 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1189 struct dm_raid1_bio_record *bio_record = 1190 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1191 1192 bio_record->details.bi_bdev = NULL; 1193 1194 if (rw == WRITE) { 1195 /* Save region for mirror_end_io() handler */ 1196 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); 1197 queue_bio(ms, bio, rw); 1198 return DM_MAPIO_SUBMITTED; 1199 } 1200 1201 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); 1202 if (r < 0 && r != -EWOULDBLOCK) 1203 return DM_MAPIO_KILL; 1204 1205 /* 1206 * If region is not in-sync queue the bio. 1207 */ 1208 if (!r || (r == -EWOULDBLOCK)) { 1209 if (bio->bi_opf & REQ_RAHEAD) 1210 return DM_MAPIO_KILL; 1211 1212 queue_bio(ms, bio, rw); 1213 return DM_MAPIO_SUBMITTED; 1214 } 1215 1216 /* 1217 * The region is in-sync and we can perform reads directly. 1218 * Store enough information so we can retry if it fails. 1219 */ 1220 m = choose_mirror(ms, bio->bi_iter.bi_sector); 1221 if (unlikely(!m)) 1222 return DM_MAPIO_KILL; 1223 1224 dm_bio_record(&bio_record->details, bio); 1225 bio_record->m = m; 1226 1227 map_bio(m, bio); 1228 1229 return DM_MAPIO_REMAPPED; 1230 } 1231 1232 static int mirror_end_io(struct dm_target *ti, struct bio *bio, 1233 blk_status_t *error) 1234 { 1235 int rw = bio_data_dir(bio); 1236 struct mirror_set *ms = (struct mirror_set *) ti->private; 1237 struct mirror *m = NULL; 1238 struct dm_bio_details *bd = NULL; 1239 struct dm_raid1_bio_record *bio_record = 1240 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1241 1242 /* 1243 * We need to dec pending if this was a write. 1244 */ 1245 if (rw == WRITE) { 1246 if (!(bio->bi_opf & REQ_PREFLUSH) && 1247 bio_op(bio) != REQ_OP_DISCARD) 1248 dm_rh_dec(ms->rh, bio_record->write_region); 1249 return DM_ENDIO_DONE; 1250 } 1251 1252 if (*error == BLK_STS_NOTSUPP) 1253 goto out; 1254 1255 if (bio->bi_opf & REQ_RAHEAD) 1256 goto out; 1257 1258 if (unlikely(*error)) { 1259 if (!bio_record->details.bi_bdev) { 1260 /* 1261 * There wasn't enough memory to record necessary 1262 * information for a retry or there was no other 1263 * mirror in-sync. 1264 */ 1265 DMERR_LIMIT("Mirror read failed."); 1266 return DM_ENDIO_DONE; 1267 } 1268 1269 m = bio_record->m; 1270 1271 DMERR("Mirror read failed from %s. Trying alternative device.", 1272 m->dev->name); 1273 1274 fail_mirror(m, DM_RAID1_READ_ERROR); 1275 1276 /* 1277 * A failed read is requeued for another attempt using an intact 1278 * mirror. 1279 */ 1280 if (default_ok(m) || mirror_available(ms, bio)) { 1281 bd = &bio_record->details; 1282 1283 dm_bio_restore(bd, bio); 1284 bio_record->details.bi_bdev = NULL; 1285 bio->bi_status = 0; 1286 1287 queue_bio(ms, bio, rw); 1288 return DM_ENDIO_INCOMPLETE; 1289 } 1290 DMERR("All replicated volumes dead, failing I/O"); 1291 } 1292 1293 out: 1294 bio_record->details.bi_bdev = NULL; 1295 1296 return DM_ENDIO_DONE; 1297 } 1298 1299 static void mirror_presuspend(struct dm_target *ti) 1300 { 1301 struct mirror_set *ms = (struct mirror_set *) ti->private; 1302 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1303 1304 struct bio_list holds; 1305 struct bio *bio; 1306 1307 atomic_set(&ms->suspend, 1); 1308 1309 /* 1310 * Process bios in the hold list to start recovery waiting 1311 * for bios in the hold list. After the process, no bio has 1312 * a chance to be added in the hold list because ms->suspend 1313 * is set. 1314 */ 1315 spin_lock_irq(&ms->lock); 1316 holds = ms->holds; 1317 bio_list_init(&ms->holds); 1318 spin_unlock_irq(&ms->lock); 1319 1320 while ((bio = bio_list_pop(&holds))) 1321 hold_bio(ms, bio); 1322 1323 /* 1324 * We must finish up all the work that we've 1325 * generated (i.e. recovery work). 1326 */ 1327 dm_rh_stop_recovery(ms->rh); 1328 1329 wait_event(_kmirrord_recovery_stopped, 1330 !dm_rh_recovery_in_flight(ms->rh)); 1331 1332 if (log->type->presuspend && log->type->presuspend(log)) 1333 /* FIXME: need better error handling */ 1334 DMWARN("log presuspend failed"); 1335 1336 /* 1337 * Now that recovery is complete/stopped and the 1338 * delayed bios are queued, we need to wait for 1339 * the worker thread to complete. This way, 1340 * we know that all of our I/O has been pushed. 1341 */ 1342 flush_workqueue(ms->kmirrord_wq); 1343 } 1344 1345 static void mirror_postsuspend(struct dm_target *ti) 1346 { 1347 struct mirror_set *ms = ti->private; 1348 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1349 1350 if (log->type->postsuspend && log->type->postsuspend(log)) 1351 /* FIXME: need better error handling */ 1352 DMWARN("log postsuspend failed"); 1353 } 1354 1355 static void mirror_resume(struct dm_target *ti) 1356 { 1357 struct mirror_set *ms = ti->private; 1358 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1359 1360 atomic_set(&ms->suspend, 0); 1361 if (log->type->resume && log->type->resume(log)) 1362 /* FIXME: need better error handling */ 1363 DMWARN("log resume failed"); 1364 dm_rh_start_recovery(ms->rh); 1365 } 1366 1367 /* 1368 * device_status_char 1369 * @m: mirror device/leg we want the status of 1370 * 1371 * We return one character representing the most severe error 1372 * we have encountered. 1373 * A => Alive - No failures 1374 * D => Dead - A write failure occurred leaving mirror out-of-sync 1375 * S => Sync - A sychronization failure occurred, mirror out-of-sync 1376 * R => Read - A read failure occurred, mirror data unaffected 1377 * 1378 * Returns: <char> 1379 */ 1380 static char device_status_char(struct mirror *m) 1381 { 1382 if (!atomic_read(&(m->error_count))) 1383 return 'A'; 1384 1385 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : 1386 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : 1387 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : 1388 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; 1389 } 1390 1391 1392 static void mirror_status(struct dm_target *ti, status_type_t type, 1393 unsigned int status_flags, char *result, unsigned int maxlen) 1394 { 1395 unsigned int m, sz = 0; 1396 int num_feature_args = 0; 1397 struct mirror_set *ms = (struct mirror_set *) ti->private; 1398 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1399 char buffer[MAX_NR_MIRRORS + 1]; 1400 1401 switch (type) { 1402 case STATUSTYPE_INFO: 1403 DMEMIT("%d ", ms->nr_mirrors); 1404 for (m = 0; m < ms->nr_mirrors; m++) { 1405 DMEMIT("%s ", ms->mirror[m].dev->name); 1406 buffer[m] = device_status_char(&(ms->mirror[m])); 1407 } 1408 buffer[m] = '\0'; 1409 1410 DMEMIT("%llu/%llu 1 %s ", 1411 (unsigned long long)log->type->get_sync_count(log), 1412 (unsigned long long)ms->nr_regions, buffer); 1413 1414 sz += log->type->status(log, type, result+sz, maxlen-sz); 1415 1416 break; 1417 1418 case STATUSTYPE_TABLE: 1419 sz = log->type->status(log, type, result, maxlen); 1420 1421 DMEMIT("%d", ms->nr_mirrors); 1422 for (m = 0; m < ms->nr_mirrors; m++) 1423 DMEMIT(" %s %llu", ms->mirror[m].dev->name, 1424 (unsigned long long)ms->mirror[m].offset); 1425 1426 num_feature_args += !!errors_handled(ms); 1427 num_feature_args += !!keep_log(ms); 1428 if (num_feature_args) { 1429 DMEMIT(" %d", num_feature_args); 1430 if (errors_handled(ms)) 1431 DMEMIT(" handle_errors"); 1432 if (keep_log(ms)) 1433 DMEMIT(" keep_log"); 1434 } 1435 1436 break; 1437 1438 case STATUSTYPE_IMA: 1439 DMEMIT_TARGET_NAME_VERSION(ti->type); 1440 DMEMIT(",nr_mirrors=%d", ms->nr_mirrors); 1441 for (m = 0; m < ms->nr_mirrors; m++) { 1442 DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name); 1443 DMEMIT(",mirror_device_%d_status=%c", 1444 m, device_status_char(&(ms->mirror[m]))); 1445 } 1446 1447 DMEMIT(",handle_errors=%c", errors_handled(ms) ? 'y' : 'n'); 1448 DMEMIT(",keep_log=%c", keep_log(ms) ? 'y' : 'n'); 1449 1450 DMEMIT(",log_type_status="); 1451 sz += log->type->status(log, type, result+sz, maxlen-sz); 1452 DMEMIT(";"); 1453 break; 1454 } 1455 } 1456 1457 static int mirror_iterate_devices(struct dm_target *ti, 1458 iterate_devices_callout_fn fn, void *data) 1459 { 1460 struct mirror_set *ms = ti->private; 1461 int ret = 0; 1462 unsigned int i; 1463 1464 for (i = 0; !ret && i < ms->nr_mirrors; i++) 1465 ret = fn(ti, ms->mirror[i].dev, 1466 ms->mirror[i].offset, ti->len, data); 1467 1468 return ret; 1469 } 1470 1471 static struct target_type mirror_target = { 1472 .name = "mirror", 1473 .version = {1, 14, 0}, 1474 .module = THIS_MODULE, 1475 .ctr = mirror_ctr, 1476 .dtr = mirror_dtr, 1477 .map = mirror_map, 1478 .end_io = mirror_end_io, 1479 .presuspend = mirror_presuspend, 1480 .postsuspend = mirror_postsuspend, 1481 .resume = mirror_resume, 1482 .status = mirror_status, 1483 .iterate_devices = mirror_iterate_devices, 1484 }; 1485 1486 static int __init dm_mirror_init(void) 1487 { 1488 int r; 1489 1490 r = dm_register_target(&mirror_target); 1491 if (r < 0) { 1492 DMERR("Failed to register mirror target"); 1493 goto bad_target; 1494 } 1495 1496 return 0; 1497 1498 bad_target: 1499 return r; 1500 } 1501 1502 static void __exit dm_mirror_exit(void) 1503 { 1504 dm_unregister_target(&mirror_target); 1505 } 1506 1507 /* Module hooks */ 1508 module_init(dm_mirror_init); 1509 module_exit(dm_mirror_exit); 1510 1511 MODULE_DESCRIPTION(DM_NAME " mirror target"); 1512 MODULE_AUTHOR("Joe Thornber"); 1513 MODULE_LICENSE("GPL"); 1514