1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * raid1.c : Multiple Devices driver for Linux 4 * 5 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 6 * 7 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 8 * 9 * RAID-1 management functions. 10 * 11 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 12 * 13 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 14 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 15 * 16 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 17 * bitmapped intelligence in resync: 18 * 19 * - bitmap marked during normal i/o 20 * - bitmap used to skip nondirty blocks during sync 21 * 22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 23 * - persistent bitmap code 24 */ 25 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/blkdev.h> 29 #include <linux/module.h> 30 #include <linux/seq_file.h> 31 #include <linux/ratelimit.h> 32 #include <linux/interval_tree_generic.h> 33 34 #include <trace/events/block.h> 35 36 #include "md.h" 37 #include "raid1.h" 38 #include "md-bitmap.h" 39 40 #define UNSUPPORTED_MDDEV_FLAGS \ 41 ((1L << MD_HAS_JOURNAL) | \ 42 (1L << MD_JOURNAL_CLEAN) | \ 43 (1L << MD_HAS_PPL) | \ 44 (1L << MD_HAS_MULTIPLE_PPLS)) 45 46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr); 47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr); 48 49 #define raid1_log(md, fmt, args...) \ 50 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) 51 52 #include "raid1-10.c" 53 54 #define START(node) ((node)->start) 55 #define LAST(node) ((node)->last) 56 INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last, 57 START, LAST, static inline, raid1_rb); 58 59 static int check_and_add_serial(struct md_rdev *rdev, sector_t lo, sector_t hi) 60 { 61 struct serial_info *si; 62 unsigned long flags; 63 int ret = 0; 64 struct mddev *mddev = rdev->mddev; 65 int idx = sector_to_idx(lo); 66 struct serial_in_rdev *serial = &rdev->serial[idx]; 67 68 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); 69 70 spin_lock_irqsave(&serial->serial_lock, flags); 71 /* collision happened */ 72 if (raid1_rb_iter_first(&serial->serial_rb, lo, hi)) 73 ret = -EBUSY; 74 if (!ret) { 75 si->start = lo; 76 si->last = hi; 77 raid1_rb_insert(si, &serial->serial_rb); 78 } else 79 mempool_free(si, mddev->serial_info_pool); 80 spin_unlock_irqrestore(&serial->serial_lock, flags); 81 82 return ret; 83 } 84 85 static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi) 86 { 87 struct serial_info *si; 88 unsigned long flags; 89 int found = 0; 90 struct mddev *mddev = rdev->mddev; 91 int idx = sector_to_idx(lo); 92 struct serial_in_rdev *serial = &rdev->serial[idx]; 93 94 spin_lock_irqsave(&serial->serial_lock, flags); 95 for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi); 96 si; si = raid1_rb_iter_next(si, lo, hi)) { 97 if (si->start == lo && si->last == hi) { 98 raid1_rb_remove(si, &serial->serial_rb); 99 mempool_free(si, mddev->serial_info_pool); 100 found = 1; 101 break; 102 } 103 } 104 if (!found) 105 WARN(1, "The write IO is not recorded for serialization\n"); 106 spin_unlock_irqrestore(&serial->serial_lock, flags); 107 wake_up(&serial->serial_io_wait); 108 } 109 110 /* 111 * for resync bio, r1bio pointer can be retrieved from the per-bio 112 * 'struct resync_pages'. 113 */ 114 static inline struct r1bio *get_resync_r1bio(struct bio *bio) 115 { 116 return get_resync_pages(bio)->raid_bio; 117 } 118 119 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 120 { 121 struct pool_info *pi = data; 122 int size = offsetof(struct r1bio, bios[pi->raid_disks]); 123 124 /* allocate a r1bio with room for raid_disks entries in the bios array */ 125 return kzalloc(size, gfp_flags); 126 } 127 128 #define RESYNC_DEPTH 32 129 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 130 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) 131 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) 132 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) 133 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 134 135 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 136 { 137 struct pool_info *pi = data; 138 struct r1bio *r1_bio; 139 struct bio *bio; 140 int need_pages; 141 int j; 142 struct resync_pages *rps; 143 144 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 145 if (!r1_bio) 146 return NULL; 147 148 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), 149 gfp_flags); 150 if (!rps) 151 goto out_free_r1bio; 152 153 /* 154 * Allocate bios : 1 for reading, n-1 for writing 155 */ 156 for (j = pi->raid_disks ; j-- ; ) { 157 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 158 if (!bio) 159 goto out_free_bio; 160 r1_bio->bios[j] = bio; 161 } 162 /* 163 * Allocate RESYNC_PAGES data pages and attach them to 164 * the first bio. 165 * If this is a user-requested check/repair, allocate 166 * RESYNC_PAGES for each bio. 167 */ 168 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 169 need_pages = pi->raid_disks; 170 else 171 need_pages = 1; 172 for (j = 0; j < pi->raid_disks; j++) { 173 struct resync_pages *rp = &rps[j]; 174 175 bio = r1_bio->bios[j]; 176 177 if (j < need_pages) { 178 if (resync_alloc_pages(rp, gfp_flags)) 179 goto out_free_pages; 180 } else { 181 memcpy(rp, &rps[0], sizeof(*rp)); 182 resync_get_all_pages(rp); 183 } 184 185 rp->raid_bio = r1_bio; 186 bio->bi_private = rp; 187 } 188 189 r1_bio->master_bio = NULL; 190 191 return r1_bio; 192 193 out_free_pages: 194 while (--j >= 0) 195 resync_free_pages(&rps[j]); 196 197 out_free_bio: 198 while (++j < pi->raid_disks) 199 bio_put(r1_bio->bios[j]); 200 kfree(rps); 201 202 out_free_r1bio: 203 rbio_pool_free(r1_bio, data); 204 return NULL; 205 } 206 207 static void r1buf_pool_free(void *__r1_bio, void *data) 208 { 209 struct pool_info *pi = data; 210 int i; 211 struct r1bio *r1bio = __r1_bio; 212 struct resync_pages *rp = NULL; 213 214 for (i = pi->raid_disks; i--; ) { 215 rp = get_resync_pages(r1bio->bios[i]); 216 resync_free_pages(rp); 217 bio_put(r1bio->bios[i]); 218 } 219 220 /* resync pages array stored in the 1st bio's .bi_private */ 221 kfree(rp); 222 223 rbio_pool_free(r1bio, data); 224 } 225 226 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) 227 { 228 int i; 229 230 for (i = 0; i < conf->raid_disks * 2; i++) { 231 struct bio **bio = r1_bio->bios + i; 232 if (!BIO_SPECIAL(*bio)) 233 bio_put(*bio); 234 *bio = NULL; 235 } 236 } 237 238 static void free_r1bio(struct r1bio *r1_bio) 239 { 240 struct r1conf *conf = r1_bio->mddev->private; 241 242 put_all_bios(conf, r1_bio); 243 mempool_free(r1_bio, &conf->r1bio_pool); 244 } 245 246 static void put_buf(struct r1bio *r1_bio) 247 { 248 struct r1conf *conf = r1_bio->mddev->private; 249 sector_t sect = r1_bio->sector; 250 int i; 251 252 for (i = 0; i < conf->raid_disks * 2; i++) { 253 struct bio *bio = r1_bio->bios[i]; 254 if (bio->bi_end_io) 255 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 256 } 257 258 mempool_free(r1_bio, &conf->r1buf_pool); 259 260 lower_barrier(conf, sect); 261 } 262 263 static void reschedule_retry(struct r1bio *r1_bio) 264 { 265 unsigned long flags; 266 struct mddev *mddev = r1_bio->mddev; 267 struct r1conf *conf = mddev->private; 268 int idx; 269 270 idx = sector_to_idx(r1_bio->sector); 271 spin_lock_irqsave(&conf->device_lock, flags); 272 list_add(&r1_bio->retry_list, &conf->retry_list); 273 atomic_inc(&conf->nr_queued[idx]); 274 spin_unlock_irqrestore(&conf->device_lock, flags); 275 276 wake_up(&conf->wait_barrier); 277 md_wakeup_thread(mddev->thread); 278 } 279 280 /* 281 * raid_end_bio_io() is called when we have finished servicing a mirrored 282 * operation and are ready to return a success/failure code to the buffer 283 * cache layer. 284 */ 285 static void call_bio_endio(struct r1bio *r1_bio) 286 { 287 struct bio *bio = r1_bio->master_bio; 288 struct r1conf *conf = r1_bio->mddev->private; 289 290 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 291 bio->bi_status = BLK_STS_IOERR; 292 293 bio_endio(bio); 294 /* 295 * Wake up any possible resync thread that waits for the device 296 * to go idle. 297 */ 298 allow_barrier(conf, r1_bio->sector); 299 } 300 301 static void raid_end_bio_io(struct r1bio *r1_bio) 302 { 303 struct bio *bio = r1_bio->master_bio; 304 305 /* if nobody has done the final endio yet, do it now */ 306 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 307 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 308 (bio_data_dir(bio) == WRITE) ? "write" : "read", 309 (unsigned long long) bio->bi_iter.bi_sector, 310 (unsigned long long) bio_end_sector(bio) - 1); 311 312 call_bio_endio(r1_bio); 313 } 314 free_r1bio(r1_bio); 315 } 316 317 /* 318 * Update disk head position estimator based on IRQ completion info. 319 */ 320 static inline void update_head_pos(int disk, struct r1bio *r1_bio) 321 { 322 struct r1conf *conf = r1_bio->mddev->private; 323 324 conf->mirrors[disk].head_position = 325 r1_bio->sector + (r1_bio->sectors); 326 } 327 328 /* 329 * Find the disk number which triggered given bio 330 */ 331 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) 332 { 333 int mirror; 334 struct r1conf *conf = r1_bio->mddev->private; 335 int raid_disks = conf->raid_disks; 336 337 for (mirror = 0; mirror < raid_disks * 2; mirror++) 338 if (r1_bio->bios[mirror] == bio) 339 break; 340 341 BUG_ON(mirror == raid_disks * 2); 342 update_head_pos(mirror, r1_bio); 343 344 return mirror; 345 } 346 347 static void raid1_end_read_request(struct bio *bio) 348 { 349 int uptodate = !bio->bi_status; 350 struct r1bio *r1_bio = bio->bi_private; 351 struct r1conf *conf = r1_bio->mddev->private; 352 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; 353 354 /* 355 * this branch is our 'one mirror IO has finished' event handler: 356 */ 357 update_head_pos(r1_bio->read_disk, r1_bio); 358 359 if (uptodate) 360 set_bit(R1BIO_Uptodate, &r1_bio->state); 361 else if (test_bit(FailFast, &rdev->flags) && 362 test_bit(R1BIO_FailFast, &r1_bio->state)) 363 /* This was a fail-fast read so we definitely 364 * want to retry */ 365 ; 366 else { 367 /* If all other devices have failed, we want to return 368 * the error upwards rather than fail the last device. 369 * Here we redefine "uptodate" to mean "Don't want to retry" 370 */ 371 unsigned long flags; 372 spin_lock_irqsave(&conf->device_lock, flags); 373 if (r1_bio->mddev->degraded == conf->raid_disks || 374 (r1_bio->mddev->degraded == conf->raid_disks-1 && 375 test_bit(In_sync, &rdev->flags))) 376 uptodate = 1; 377 spin_unlock_irqrestore(&conf->device_lock, flags); 378 } 379 380 if (uptodate) { 381 raid_end_bio_io(r1_bio); 382 rdev_dec_pending(rdev, conf->mddev); 383 } else { 384 /* 385 * oops, read error: 386 */ 387 char b[BDEVNAME_SIZE]; 388 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n", 389 mdname(conf->mddev), 390 bdevname(rdev->bdev, b), 391 (unsigned long long)r1_bio->sector); 392 set_bit(R1BIO_ReadError, &r1_bio->state); 393 reschedule_retry(r1_bio); 394 /* don't drop the reference on read_disk yet */ 395 } 396 } 397 398 static void close_write(struct r1bio *r1_bio) 399 { 400 /* it really is the end of this request */ 401 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 402 bio_free_pages(r1_bio->behind_master_bio); 403 bio_put(r1_bio->behind_master_bio); 404 r1_bio->behind_master_bio = NULL; 405 } 406 /* clear the bitmap if all writes complete successfully */ 407 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 408 r1_bio->sectors, 409 !test_bit(R1BIO_Degraded, &r1_bio->state), 410 test_bit(R1BIO_BehindIO, &r1_bio->state)); 411 md_write_end(r1_bio->mddev); 412 } 413 414 static void r1_bio_write_done(struct r1bio *r1_bio) 415 { 416 if (!atomic_dec_and_test(&r1_bio->remaining)) 417 return; 418 419 if (test_bit(R1BIO_WriteError, &r1_bio->state)) 420 reschedule_retry(r1_bio); 421 else { 422 close_write(r1_bio); 423 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) 424 reschedule_retry(r1_bio); 425 else 426 raid_end_bio_io(r1_bio); 427 } 428 } 429 430 static void raid1_end_write_request(struct bio *bio) 431 { 432 struct r1bio *r1_bio = bio->bi_private; 433 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 434 struct r1conf *conf = r1_bio->mddev->private; 435 struct bio *to_put = NULL; 436 int mirror = find_bio_disk(r1_bio, bio); 437 struct md_rdev *rdev = conf->mirrors[mirror].rdev; 438 bool discard_error; 439 sector_t lo = r1_bio->sector; 440 sector_t hi = r1_bio->sector + r1_bio->sectors; 441 442 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; 443 444 /* 445 * 'one mirror IO has finished' event handler: 446 */ 447 if (bio->bi_status && !discard_error) { 448 set_bit(WriteErrorSeen, &rdev->flags); 449 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 450 set_bit(MD_RECOVERY_NEEDED, & 451 conf->mddev->recovery); 452 453 if (test_bit(FailFast, &rdev->flags) && 454 (bio->bi_opf & MD_FAILFAST) && 455 /* We never try FailFast to WriteMostly devices */ 456 !test_bit(WriteMostly, &rdev->flags)) { 457 md_error(r1_bio->mddev, rdev); 458 } 459 460 /* 461 * When the device is faulty, it is not necessary to 462 * handle write error. 463 * For failfast, this is the only remaining device, 464 * We need to retry the write without FailFast. 465 */ 466 if (!test_bit(Faulty, &rdev->flags)) 467 set_bit(R1BIO_WriteError, &r1_bio->state); 468 else { 469 /* Finished with this branch */ 470 r1_bio->bios[mirror] = NULL; 471 to_put = bio; 472 } 473 } else { 474 /* 475 * Set R1BIO_Uptodate in our master bio, so that we 476 * will return a good error code for to the higher 477 * levels even if IO on some other mirrored buffer 478 * fails. 479 * 480 * The 'master' represents the composite IO operation 481 * to user-side. So if something waits for IO, then it 482 * will wait for the 'master' bio. 483 */ 484 sector_t first_bad; 485 int bad_sectors; 486 487 r1_bio->bios[mirror] = NULL; 488 to_put = bio; 489 /* 490 * Do not set R1BIO_Uptodate if the current device is 491 * rebuilding or Faulty. This is because we cannot use 492 * such device for properly reading the data back (we could 493 * potentially use it, if the current write would have felt 494 * before rdev->recovery_offset, but for simplicity we don't 495 * check this here. 496 */ 497 if (test_bit(In_sync, &rdev->flags) && 498 !test_bit(Faulty, &rdev->flags)) 499 set_bit(R1BIO_Uptodate, &r1_bio->state); 500 501 /* Maybe we can clear some bad blocks. */ 502 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 503 &first_bad, &bad_sectors) && !discard_error) { 504 r1_bio->bios[mirror] = IO_MADE_GOOD; 505 set_bit(R1BIO_MadeGood, &r1_bio->state); 506 } 507 } 508 509 if (behind) { 510 if (test_bit(CollisionCheck, &rdev->flags)) 511 remove_serial(rdev, lo, hi); 512 if (test_bit(WriteMostly, &rdev->flags)) 513 atomic_dec(&r1_bio->behind_remaining); 514 515 /* 516 * In behind mode, we ACK the master bio once the I/O 517 * has safely reached all non-writemostly 518 * disks. Setting the Returned bit ensures that this 519 * gets done only once -- we don't ever want to return 520 * -EIO here, instead we'll wait 521 */ 522 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 523 test_bit(R1BIO_Uptodate, &r1_bio->state)) { 524 /* Maybe we can return now */ 525 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 526 struct bio *mbio = r1_bio->master_bio; 527 pr_debug("raid1: behind end write sectors" 528 " %llu-%llu\n", 529 (unsigned long long) mbio->bi_iter.bi_sector, 530 (unsigned long long) bio_end_sector(mbio) - 1); 531 call_bio_endio(r1_bio); 532 } 533 } 534 } else if (rdev->mddev->serialize_policy) 535 remove_serial(rdev, lo, hi); 536 if (r1_bio->bios[mirror] == NULL) 537 rdev_dec_pending(rdev, conf->mddev); 538 539 /* 540 * Let's see if all mirrored write operations have finished 541 * already. 542 */ 543 r1_bio_write_done(r1_bio); 544 545 if (to_put) 546 bio_put(to_put); 547 } 548 549 static sector_t align_to_barrier_unit_end(sector_t start_sector, 550 sector_t sectors) 551 { 552 sector_t len; 553 554 WARN_ON(sectors == 0); 555 /* 556 * len is the number of sectors from start_sector to end of the 557 * barrier unit which start_sector belongs to. 558 */ 559 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) - 560 start_sector; 561 562 if (len > sectors) 563 len = sectors; 564 565 return len; 566 } 567 568 /* 569 * This routine returns the disk from which the requested read should 570 * be done. There is a per-array 'next expected sequential IO' sector 571 * number - if this matches on the next IO then we use the last disk. 572 * There is also a per-disk 'last know head position' sector that is 573 * maintained from IRQ contexts, both the normal and the resync IO 574 * completion handlers update this position correctly. If there is no 575 * perfect sequential match then we pick the disk whose head is closest. 576 * 577 * If there are 2 mirrors in the same 2 devices, performance degrades 578 * because position is mirror, not device based. 579 * 580 * The rdev for the device selected will have nr_pending incremented. 581 */ 582 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) 583 { 584 const sector_t this_sector = r1_bio->sector; 585 int sectors; 586 int best_good_sectors; 587 int best_disk, best_dist_disk, best_pending_disk; 588 int has_nonrot_disk; 589 int disk; 590 sector_t best_dist; 591 unsigned int min_pending; 592 struct md_rdev *rdev; 593 int choose_first; 594 int choose_next_idle; 595 596 rcu_read_lock(); 597 /* 598 * Check if we can balance. We can balance on the whole 599 * device if no resync is going on, or below the resync window. 600 * We take the first readable disk when above the resync window. 601 */ 602 retry: 603 sectors = r1_bio->sectors; 604 best_disk = -1; 605 best_dist_disk = -1; 606 best_dist = MaxSector; 607 best_pending_disk = -1; 608 min_pending = UINT_MAX; 609 best_good_sectors = 0; 610 has_nonrot_disk = 0; 611 choose_next_idle = 0; 612 clear_bit(R1BIO_FailFast, &r1_bio->state); 613 614 if ((conf->mddev->recovery_cp < this_sector + sectors) || 615 (mddev_is_clustered(conf->mddev) && 616 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, 617 this_sector + sectors))) 618 choose_first = 1; 619 else 620 choose_first = 0; 621 622 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { 623 sector_t dist; 624 sector_t first_bad; 625 int bad_sectors; 626 unsigned int pending; 627 bool nonrot; 628 629 rdev = rcu_dereference(conf->mirrors[disk].rdev); 630 if (r1_bio->bios[disk] == IO_BLOCKED 631 || rdev == NULL 632 || test_bit(Faulty, &rdev->flags)) 633 continue; 634 if (!test_bit(In_sync, &rdev->flags) && 635 rdev->recovery_offset < this_sector + sectors) 636 continue; 637 if (test_bit(WriteMostly, &rdev->flags)) { 638 /* Don't balance among write-mostly, just 639 * use the first as a last resort */ 640 if (best_dist_disk < 0) { 641 if (is_badblock(rdev, this_sector, sectors, 642 &first_bad, &bad_sectors)) { 643 if (first_bad <= this_sector) 644 /* Cannot use this */ 645 continue; 646 best_good_sectors = first_bad - this_sector; 647 } else 648 best_good_sectors = sectors; 649 best_dist_disk = disk; 650 best_pending_disk = disk; 651 } 652 continue; 653 } 654 /* This is a reasonable device to use. It might 655 * even be best. 656 */ 657 if (is_badblock(rdev, this_sector, sectors, 658 &first_bad, &bad_sectors)) { 659 if (best_dist < MaxSector) 660 /* already have a better device */ 661 continue; 662 if (first_bad <= this_sector) { 663 /* cannot read here. If this is the 'primary' 664 * device, then we must not read beyond 665 * bad_sectors from another device.. 666 */ 667 bad_sectors -= (this_sector - first_bad); 668 if (choose_first && sectors > bad_sectors) 669 sectors = bad_sectors; 670 if (best_good_sectors > sectors) 671 best_good_sectors = sectors; 672 673 } else { 674 sector_t good_sectors = first_bad - this_sector; 675 if (good_sectors > best_good_sectors) { 676 best_good_sectors = good_sectors; 677 best_disk = disk; 678 } 679 if (choose_first) 680 break; 681 } 682 continue; 683 } else { 684 if ((sectors > best_good_sectors) && (best_disk >= 0)) 685 best_disk = -1; 686 best_good_sectors = sectors; 687 } 688 689 if (best_disk >= 0) 690 /* At least two disks to choose from so failfast is OK */ 691 set_bit(R1BIO_FailFast, &r1_bio->state); 692 693 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); 694 has_nonrot_disk |= nonrot; 695 pending = atomic_read(&rdev->nr_pending); 696 dist = abs(this_sector - conf->mirrors[disk].head_position); 697 if (choose_first) { 698 best_disk = disk; 699 break; 700 } 701 /* Don't change to another disk for sequential reads */ 702 if (conf->mirrors[disk].next_seq_sect == this_sector 703 || dist == 0) { 704 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; 705 struct raid1_info *mirror = &conf->mirrors[disk]; 706 707 best_disk = disk; 708 /* 709 * If buffered sequential IO size exceeds optimal 710 * iosize, check if there is idle disk. If yes, choose 711 * the idle disk. read_balance could already choose an 712 * idle disk before noticing it's a sequential IO in 713 * this disk. This doesn't matter because this disk 714 * will idle, next time it will be utilized after the 715 * first disk has IO size exceeds optimal iosize. In 716 * this way, iosize of the first disk will be optimal 717 * iosize at least. iosize of the second disk might be 718 * small, but not a big deal since when the second disk 719 * starts IO, the first disk is likely still busy. 720 */ 721 if (nonrot && opt_iosize > 0 && 722 mirror->seq_start != MaxSector && 723 mirror->next_seq_sect > opt_iosize && 724 mirror->next_seq_sect - opt_iosize >= 725 mirror->seq_start) { 726 choose_next_idle = 1; 727 continue; 728 } 729 break; 730 } 731 732 if (choose_next_idle) 733 continue; 734 735 if (min_pending > pending) { 736 min_pending = pending; 737 best_pending_disk = disk; 738 } 739 740 if (dist < best_dist) { 741 best_dist = dist; 742 best_dist_disk = disk; 743 } 744 } 745 746 /* 747 * If all disks are rotational, choose the closest disk. If any disk is 748 * non-rotational, choose the disk with less pending request even the 749 * disk is rotational, which might/might not be optimal for raids with 750 * mixed ratation/non-rotational disks depending on workload. 751 */ 752 if (best_disk == -1) { 753 if (has_nonrot_disk || min_pending == 0) 754 best_disk = best_pending_disk; 755 else 756 best_disk = best_dist_disk; 757 } 758 759 if (best_disk >= 0) { 760 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); 761 if (!rdev) 762 goto retry; 763 atomic_inc(&rdev->nr_pending); 764 sectors = best_good_sectors; 765 766 if (conf->mirrors[best_disk].next_seq_sect != this_sector) 767 conf->mirrors[best_disk].seq_start = this_sector; 768 769 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; 770 } 771 rcu_read_unlock(); 772 *max_sectors = sectors; 773 774 return best_disk; 775 } 776 777 static int raid1_congested(struct mddev *mddev, int bits) 778 { 779 struct r1conf *conf = mddev->private; 780 int i, ret = 0; 781 782 if ((bits & (1 << WB_async_congested)) && 783 conf->pending_count >= max_queued_requests) 784 return 1; 785 786 rcu_read_lock(); 787 for (i = 0; i < conf->raid_disks * 2; i++) { 788 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 789 if (rdev && !test_bit(Faulty, &rdev->flags)) { 790 struct request_queue *q = bdev_get_queue(rdev->bdev); 791 792 BUG_ON(!q); 793 794 /* Note the '|| 1' - when read_balance prefers 795 * non-congested targets, it can be removed 796 */ 797 if ((bits & (1 << WB_async_congested)) || 1) 798 ret |= bdi_congested(q->backing_dev_info, bits); 799 else 800 ret &= bdi_congested(q->backing_dev_info, bits); 801 } 802 } 803 rcu_read_unlock(); 804 return ret; 805 } 806 807 static void flush_bio_list(struct r1conf *conf, struct bio *bio) 808 { 809 /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 810 md_bitmap_unplug(conf->mddev->bitmap); 811 wake_up(&conf->wait_barrier); 812 813 while (bio) { /* submit pending writes */ 814 struct bio *next = bio->bi_next; 815 struct md_rdev *rdev = (void *)bio->bi_disk; 816 bio->bi_next = NULL; 817 bio_set_dev(bio, rdev->bdev); 818 if (test_bit(Faulty, &rdev->flags)) { 819 bio_io_error(bio); 820 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 821 !blk_queue_discard(bio->bi_disk->queue))) 822 /* Just ignore it */ 823 bio_endio(bio); 824 else 825 generic_make_request(bio); 826 bio = next; 827 cond_resched(); 828 } 829 } 830 831 static void flush_pending_writes(struct r1conf *conf) 832 { 833 /* Any writes that have been queued but are awaiting 834 * bitmap updates get flushed here. 835 */ 836 spin_lock_irq(&conf->device_lock); 837 838 if (conf->pending_bio_list.head) { 839 struct blk_plug plug; 840 struct bio *bio; 841 842 bio = bio_list_get(&conf->pending_bio_list); 843 conf->pending_count = 0; 844 spin_unlock_irq(&conf->device_lock); 845 846 /* 847 * As this is called in a wait_event() loop (see freeze_array), 848 * current->state might be TASK_UNINTERRUPTIBLE which will 849 * cause a warning when we prepare to wait again. As it is 850 * rare that this path is taken, it is perfectly safe to force 851 * us to go around the wait_event() loop again, so the warning 852 * is a false-positive. Silence the warning by resetting 853 * thread state 854 */ 855 __set_current_state(TASK_RUNNING); 856 blk_start_plug(&plug); 857 flush_bio_list(conf, bio); 858 blk_finish_plug(&plug); 859 } else 860 spin_unlock_irq(&conf->device_lock); 861 } 862 863 /* Barriers.... 864 * Sometimes we need to suspend IO while we do something else, 865 * either some resync/recovery, or reconfigure the array. 866 * To do this we raise a 'barrier'. 867 * The 'barrier' is a counter that can be raised multiple times 868 * to count how many activities are happening which preclude 869 * normal IO. 870 * We can only raise the barrier if there is no pending IO. 871 * i.e. if nr_pending == 0. 872 * We choose only to raise the barrier if no-one is waiting for the 873 * barrier to go down. This means that as soon as an IO request 874 * is ready, no other operations which require a barrier will start 875 * until the IO request has had a chance. 876 * 877 * So: regular IO calls 'wait_barrier'. When that returns there 878 * is no backgroup IO happening, It must arrange to call 879 * allow_barrier when it has finished its IO. 880 * backgroup IO calls must call raise_barrier. Once that returns 881 * there is no normal IO happeing. It must arrange to call 882 * lower_barrier when the particular background IO completes. 883 * 884 * If resync/recovery is interrupted, returns -EINTR; 885 * Otherwise, returns 0. 886 */ 887 static int raise_barrier(struct r1conf *conf, sector_t sector_nr) 888 { 889 int idx = sector_to_idx(sector_nr); 890 891 spin_lock_irq(&conf->resync_lock); 892 893 /* Wait until no block IO is waiting */ 894 wait_event_lock_irq(conf->wait_barrier, 895 !atomic_read(&conf->nr_waiting[idx]), 896 conf->resync_lock); 897 898 /* block any new IO from starting */ 899 atomic_inc(&conf->barrier[idx]); 900 /* 901 * In raise_barrier() we firstly increase conf->barrier[idx] then 902 * check conf->nr_pending[idx]. In _wait_barrier() we firstly 903 * increase conf->nr_pending[idx] then check conf->barrier[idx]. 904 * A memory barrier here to make sure conf->nr_pending[idx] won't 905 * be fetched before conf->barrier[idx] is increased. Otherwise 906 * there will be a race between raise_barrier() and _wait_barrier(). 907 */ 908 smp_mb__after_atomic(); 909 910 /* For these conditions we must wait: 911 * A: while the array is in frozen state 912 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O 913 * existing in corresponding I/O barrier bucket. 914 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches 915 * max resync count which allowed on current I/O barrier bucket. 916 */ 917 wait_event_lock_irq(conf->wait_barrier, 918 (!conf->array_frozen && 919 !atomic_read(&conf->nr_pending[idx]) && 920 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || 921 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), 922 conf->resync_lock); 923 924 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 925 atomic_dec(&conf->barrier[idx]); 926 spin_unlock_irq(&conf->resync_lock); 927 wake_up(&conf->wait_barrier); 928 return -EINTR; 929 } 930 931 atomic_inc(&conf->nr_sync_pending); 932 spin_unlock_irq(&conf->resync_lock); 933 934 return 0; 935 } 936 937 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) 938 { 939 int idx = sector_to_idx(sector_nr); 940 941 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); 942 943 atomic_dec(&conf->barrier[idx]); 944 atomic_dec(&conf->nr_sync_pending); 945 wake_up(&conf->wait_barrier); 946 } 947 948 static void _wait_barrier(struct r1conf *conf, int idx) 949 { 950 /* 951 * We need to increase conf->nr_pending[idx] very early here, 952 * then raise_barrier() can be blocked when it waits for 953 * conf->nr_pending[idx] to be 0. Then we can avoid holding 954 * conf->resync_lock when there is no barrier raised in same 955 * barrier unit bucket. Also if the array is frozen, I/O 956 * should be blocked until array is unfrozen. 957 */ 958 atomic_inc(&conf->nr_pending[idx]); 959 /* 960 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then 961 * check conf->barrier[idx]. In raise_barrier() we firstly increase 962 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory 963 * barrier is necessary here to make sure conf->barrier[idx] won't be 964 * fetched before conf->nr_pending[idx] is increased. Otherwise there 965 * will be a race between _wait_barrier() and raise_barrier(). 966 */ 967 smp_mb__after_atomic(); 968 969 /* 970 * Don't worry about checking two atomic_t variables at same time 971 * here. If during we check conf->barrier[idx], the array is 972 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is 973 * 0, it is safe to return and make the I/O continue. Because the 974 * array is frozen, all I/O returned here will eventually complete 975 * or be queued, no race will happen. See code comment in 976 * frozen_array(). 977 */ 978 if (!READ_ONCE(conf->array_frozen) && 979 !atomic_read(&conf->barrier[idx])) 980 return; 981 982 /* 983 * After holding conf->resync_lock, conf->nr_pending[idx] 984 * should be decreased before waiting for barrier to drop. 985 * Otherwise, we may encounter a race condition because 986 * raise_barrer() might be waiting for conf->nr_pending[idx] 987 * to be 0 at same time. 988 */ 989 spin_lock_irq(&conf->resync_lock); 990 atomic_inc(&conf->nr_waiting[idx]); 991 atomic_dec(&conf->nr_pending[idx]); 992 /* 993 * In case freeze_array() is waiting for 994 * get_unqueued_pending() == extra 995 */ 996 wake_up(&conf->wait_barrier); 997 /* Wait for the barrier in same barrier unit bucket to drop. */ 998 wait_event_lock_irq(conf->wait_barrier, 999 !conf->array_frozen && 1000 !atomic_read(&conf->barrier[idx]), 1001 conf->resync_lock); 1002 atomic_inc(&conf->nr_pending[idx]); 1003 atomic_dec(&conf->nr_waiting[idx]); 1004 spin_unlock_irq(&conf->resync_lock); 1005 } 1006 1007 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) 1008 { 1009 int idx = sector_to_idx(sector_nr); 1010 1011 /* 1012 * Very similar to _wait_barrier(). The difference is, for read 1013 * I/O we don't need wait for sync I/O, but if the whole array 1014 * is frozen, the read I/O still has to wait until the array is 1015 * unfrozen. Since there is no ordering requirement with 1016 * conf->barrier[idx] here, memory barrier is unnecessary as well. 1017 */ 1018 atomic_inc(&conf->nr_pending[idx]); 1019 1020 if (!READ_ONCE(conf->array_frozen)) 1021 return; 1022 1023 spin_lock_irq(&conf->resync_lock); 1024 atomic_inc(&conf->nr_waiting[idx]); 1025 atomic_dec(&conf->nr_pending[idx]); 1026 /* 1027 * In case freeze_array() is waiting for 1028 * get_unqueued_pending() == extra 1029 */ 1030 wake_up(&conf->wait_barrier); 1031 /* Wait for array to be unfrozen */ 1032 wait_event_lock_irq(conf->wait_barrier, 1033 !conf->array_frozen, 1034 conf->resync_lock); 1035 atomic_inc(&conf->nr_pending[idx]); 1036 atomic_dec(&conf->nr_waiting[idx]); 1037 spin_unlock_irq(&conf->resync_lock); 1038 } 1039 1040 static void wait_barrier(struct r1conf *conf, sector_t sector_nr) 1041 { 1042 int idx = sector_to_idx(sector_nr); 1043 1044 _wait_barrier(conf, idx); 1045 } 1046 1047 static void _allow_barrier(struct r1conf *conf, int idx) 1048 { 1049 atomic_dec(&conf->nr_pending[idx]); 1050 wake_up(&conf->wait_barrier); 1051 } 1052 1053 static void allow_barrier(struct r1conf *conf, sector_t sector_nr) 1054 { 1055 int idx = sector_to_idx(sector_nr); 1056 1057 _allow_barrier(conf, idx); 1058 } 1059 1060 /* conf->resync_lock should be held */ 1061 static int get_unqueued_pending(struct r1conf *conf) 1062 { 1063 int idx, ret; 1064 1065 ret = atomic_read(&conf->nr_sync_pending); 1066 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1067 ret += atomic_read(&conf->nr_pending[idx]) - 1068 atomic_read(&conf->nr_queued[idx]); 1069 1070 return ret; 1071 } 1072 1073 static void freeze_array(struct r1conf *conf, int extra) 1074 { 1075 /* Stop sync I/O and normal I/O and wait for everything to 1076 * go quiet. 1077 * This is called in two situations: 1078 * 1) management command handlers (reshape, remove disk, quiesce). 1079 * 2) one normal I/O request failed. 1080 1081 * After array_frozen is set to 1, new sync IO will be blocked at 1082 * raise_barrier(), and new normal I/O will blocked at _wait_barrier() 1083 * or wait_read_barrier(). The flying I/Os will either complete or be 1084 * queued. When everything goes quite, there are only queued I/Os left. 1085 1086 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the 1087 * barrier bucket index which this I/O request hits. When all sync and 1088 * normal I/O are queued, sum of all conf->nr_pending[] will match sum 1089 * of all conf->nr_queued[]. But normal I/O failure is an exception, 1090 * in handle_read_error(), we may call freeze_array() before trying to 1091 * fix the read error. In this case, the error read I/O is not queued, 1092 * so get_unqueued_pending() == 1. 1093 * 1094 * Therefore before this function returns, we need to wait until 1095 * get_unqueued_pendings(conf) gets equal to extra. For 1096 * normal I/O context, extra is 1, in rested situations extra is 0. 1097 */ 1098 spin_lock_irq(&conf->resync_lock); 1099 conf->array_frozen = 1; 1100 raid1_log(conf->mddev, "wait freeze"); 1101 wait_event_lock_irq_cmd( 1102 conf->wait_barrier, 1103 get_unqueued_pending(conf) == extra, 1104 conf->resync_lock, 1105 flush_pending_writes(conf)); 1106 spin_unlock_irq(&conf->resync_lock); 1107 } 1108 static void unfreeze_array(struct r1conf *conf) 1109 { 1110 /* reverse the effect of the freeze */ 1111 spin_lock_irq(&conf->resync_lock); 1112 conf->array_frozen = 0; 1113 spin_unlock_irq(&conf->resync_lock); 1114 wake_up(&conf->wait_barrier); 1115 } 1116 1117 static void alloc_behind_master_bio(struct r1bio *r1_bio, 1118 struct bio *bio) 1119 { 1120 int size = bio->bi_iter.bi_size; 1121 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1122 int i = 0; 1123 struct bio *behind_bio = NULL; 1124 1125 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); 1126 if (!behind_bio) 1127 return; 1128 1129 /* discard op, we don't support writezero/writesame yet */ 1130 if (!bio_has_data(bio)) { 1131 behind_bio->bi_iter.bi_size = size; 1132 goto skip_copy; 1133 } 1134 1135 behind_bio->bi_write_hint = bio->bi_write_hint; 1136 1137 while (i < vcnt && size) { 1138 struct page *page; 1139 int len = min_t(int, PAGE_SIZE, size); 1140 1141 page = alloc_page(GFP_NOIO); 1142 if (unlikely(!page)) 1143 goto free_pages; 1144 1145 bio_add_page(behind_bio, page, len, 0); 1146 1147 size -= len; 1148 i++; 1149 } 1150 1151 bio_copy_data(behind_bio, bio); 1152 skip_copy: 1153 r1_bio->behind_master_bio = behind_bio; 1154 set_bit(R1BIO_BehindIO, &r1_bio->state); 1155 1156 return; 1157 1158 free_pages: 1159 pr_debug("%dB behind alloc failed, doing sync I/O\n", 1160 bio->bi_iter.bi_size); 1161 bio_free_pages(behind_bio); 1162 bio_put(behind_bio); 1163 } 1164 1165 struct raid1_plug_cb { 1166 struct blk_plug_cb cb; 1167 struct bio_list pending; 1168 int pending_cnt; 1169 }; 1170 1171 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) 1172 { 1173 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, 1174 cb); 1175 struct mddev *mddev = plug->cb.data; 1176 struct r1conf *conf = mddev->private; 1177 struct bio *bio; 1178 1179 if (from_schedule || current->bio_list) { 1180 spin_lock_irq(&conf->device_lock); 1181 bio_list_merge(&conf->pending_bio_list, &plug->pending); 1182 conf->pending_count += plug->pending_cnt; 1183 spin_unlock_irq(&conf->device_lock); 1184 wake_up(&conf->wait_barrier); 1185 md_wakeup_thread(mddev->thread); 1186 kfree(plug); 1187 return; 1188 } 1189 1190 /* we aren't scheduling, so we can do the write-out directly. */ 1191 bio = bio_list_get(&plug->pending); 1192 flush_bio_list(conf, bio); 1193 kfree(plug); 1194 } 1195 1196 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) 1197 { 1198 r1_bio->master_bio = bio; 1199 r1_bio->sectors = bio_sectors(bio); 1200 r1_bio->state = 0; 1201 r1_bio->mddev = mddev; 1202 r1_bio->sector = bio->bi_iter.bi_sector; 1203 } 1204 1205 static inline struct r1bio * 1206 alloc_r1bio(struct mddev *mddev, struct bio *bio) 1207 { 1208 struct r1conf *conf = mddev->private; 1209 struct r1bio *r1_bio; 1210 1211 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); 1212 /* Ensure no bio records IO_BLOCKED */ 1213 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); 1214 init_r1bio(r1_bio, mddev, bio); 1215 return r1_bio; 1216 } 1217 1218 static void raid1_read_request(struct mddev *mddev, struct bio *bio, 1219 int max_read_sectors, struct r1bio *r1_bio) 1220 { 1221 struct r1conf *conf = mddev->private; 1222 struct raid1_info *mirror; 1223 struct bio *read_bio; 1224 struct bitmap *bitmap = mddev->bitmap; 1225 const int op = bio_op(bio); 1226 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1227 int max_sectors; 1228 int rdisk; 1229 bool print_msg = !!r1_bio; 1230 char b[BDEVNAME_SIZE]; 1231 1232 /* 1233 * If r1_bio is set, we are blocking the raid1d thread 1234 * so there is a tiny risk of deadlock. So ask for 1235 * emergency memory if needed. 1236 */ 1237 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; 1238 1239 if (print_msg) { 1240 /* Need to get the block device name carefully */ 1241 struct md_rdev *rdev; 1242 rcu_read_lock(); 1243 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); 1244 if (rdev) 1245 bdevname(rdev->bdev, b); 1246 else 1247 strcpy(b, "???"); 1248 rcu_read_unlock(); 1249 } 1250 1251 /* 1252 * Still need barrier for READ in case that whole 1253 * array is frozen. 1254 */ 1255 wait_read_barrier(conf, bio->bi_iter.bi_sector); 1256 1257 if (!r1_bio) 1258 r1_bio = alloc_r1bio(mddev, bio); 1259 else 1260 init_r1bio(r1_bio, mddev, bio); 1261 r1_bio->sectors = max_read_sectors; 1262 1263 /* 1264 * make_request() can abort the operation when read-ahead is being 1265 * used and no empty request is available. 1266 */ 1267 rdisk = read_balance(conf, r1_bio, &max_sectors); 1268 1269 if (rdisk < 0) { 1270 /* couldn't find anywhere to read from */ 1271 if (print_msg) { 1272 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 1273 mdname(mddev), 1274 b, 1275 (unsigned long long)r1_bio->sector); 1276 } 1277 raid_end_bio_io(r1_bio); 1278 return; 1279 } 1280 mirror = conf->mirrors + rdisk; 1281 1282 if (print_msg) 1283 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", 1284 mdname(mddev), 1285 (unsigned long long)r1_bio->sector, 1286 bdevname(mirror->rdev->bdev, b)); 1287 1288 if (test_bit(WriteMostly, &mirror->rdev->flags) && 1289 bitmap) { 1290 /* 1291 * Reading from a write-mostly device must take care not to 1292 * over-take any writes that are 'behind' 1293 */ 1294 raid1_log(mddev, "wait behind writes"); 1295 wait_event(bitmap->behind_wait, 1296 atomic_read(&bitmap->behind_writes) == 0); 1297 } 1298 1299 if (max_sectors < bio_sectors(bio)) { 1300 struct bio *split = bio_split(bio, max_sectors, 1301 gfp, &conf->bio_split); 1302 bio_chain(split, bio); 1303 generic_make_request(bio); 1304 bio = split; 1305 r1_bio->master_bio = bio; 1306 r1_bio->sectors = max_sectors; 1307 } 1308 1309 r1_bio->read_disk = rdisk; 1310 1311 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); 1312 1313 r1_bio->bios[rdisk] = read_bio; 1314 1315 read_bio->bi_iter.bi_sector = r1_bio->sector + 1316 mirror->rdev->data_offset; 1317 bio_set_dev(read_bio, mirror->rdev->bdev); 1318 read_bio->bi_end_io = raid1_end_read_request; 1319 bio_set_op_attrs(read_bio, op, do_sync); 1320 if (test_bit(FailFast, &mirror->rdev->flags) && 1321 test_bit(R1BIO_FailFast, &r1_bio->state)) 1322 read_bio->bi_opf |= MD_FAILFAST; 1323 read_bio->bi_private = r1_bio; 1324 1325 if (mddev->gendisk) 1326 trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, 1327 disk_devt(mddev->gendisk), r1_bio->sector); 1328 1329 generic_make_request(read_bio); 1330 } 1331 1332 static void raid1_write_request(struct mddev *mddev, struct bio *bio, 1333 int max_write_sectors) 1334 { 1335 struct r1conf *conf = mddev->private; 1336 struct r1bio *r1_bio; 1337 int i, disks; 1338 struct bitmap *bitmap = mddev->bitmap; 1339 unsigned long flags; 1340 struct md_rdev *blocked_rdev; 1341 struct blk_plug_cb *cb; 1342 struct raid1_plug_cb *plug = NULL; 1343 int first_clone; 1344 int max_sectors; 1345 sector_t lo, hi; 1346 1347 if (mddev_is_clustered(mddev) && 1348 md_cluster_ops->area_resyncing(mddev, WRITE, 1349 bio->bi_iter.bi_sector, bio_end_sector(bio))) { 1350 1351 DEFINE_WAIT(w); 1352 for (;;) { 1353 prepare_to_wait(&conf->wait_barrier, 1354 &w, TASK_IDLE); 1355 if (!md_cluster_ops->area_resyncing(mddev, WRITE, 1356 bio->bi_iter.bi_sector, 1357 bio_end_sector(bio))) 1358 break; 1359 schedule(); 1360 } 1361 finish_wait(&conf->wait_barrier, &w); 1362 } 1363 1364 /* 1365 * Register the new request and wait if the reconstruction 1366 * thread has put up a bar for new requests. 1367 * Continue immediately if no resync is active currently. 1368 */ 1369 wait_barrier(conf, bio->bi_iter.bi_sector); 1370 1371 r1_bio = alloc_r1bio(mddev, bio); 1372 r1_bio->sectors = max_write_sectors; 1373 lo = r1_bio->sector; 1374 hi = r1_bio->sector + r1_bio->sectors; 1375 1376 if (conf->pending_count >= max_queued_requests) { 1377 md_wakeup_thread(mddev->thread); 1378 raid1_log(mddev, "wait queued"); 1379 wait_event(conf->wait_barrier, 1380 conf->pending_count < max_queued_requests); 1381 } 1382 /* first select target devices under rcu_lock and 1383 * inc refcount on their rdev. Record them by setting 1384 * bios[x] to bio 1385 * If there are known/acknowledged bad blocks on any device on 1386 * which we have seen a write error, we want to avoid writing those 1387 * blocks. 1388 * This potentially requires several writes to write around 1389 * the bad blocks. Each set of writes gets it's own r1bio 1390 * with a set of bios attached. 1391 */ 1392 1393 disks = conf->raid_disks * 2; 1394 retry_write: 1395 blocked_rdev = NULL; 1396 rcu_read_lock(); 1397 max_sectors = r1_bio->sectors; 1398 for (i = 0; i < disks; i++) { 1399 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1400 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1401 atomic_inc(&rdev->nr_pending); 1402 blocked_rdev = rdev; 1403 break; 1404 } 1405 r1_bio->bios[i] = NULL; 1406 if (!rdev || test_bit(Faulty, &rdev->flags)) { 1407 if (i < conf->raid_disks) 1408 set_bit(R1BIO_Degraded, &r1_bio->state); 1409 continue; 1410 } 1411 1412 atomic_inc(&rdev->nr_pending); 1413 if (test_bit(WriteErrorSeen, &rdev->flags)) { 1414 sector_t first_bad; 1415 int bad_sectors; 1416 int is_bad; 1417 1418 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, 1419 &first_bad, &bad_sectors); 1420 if (is_bad < 0) { 1421 /* mustn't write here until the bad block is 1422 * acknowledged*/ 1423 set_bit(BlockedBadBlocks, &rdev->flags); 1424 blocked_rdev = rdev; 1425 break; 1426 } 1427 if (is_bad && first_bad <= r1_bio->sector) { 1428 /* Cannot write here at all */ 1429 bad_sectors -= (r1_bio->sector - first_bad); 1430 if (bad_sectors < max_sectors) 1431 /* mustn't write more than bad_sectors 1432 * to other devices yet 1433 */ 1434 max_sectors = bad_sectors; 1435 rdev_dec_pending(rdev, mddev); 1436 /* We don't set R1BIO_Degraded as that 1437 * only applies if the disk is 1438 * missing, so it might be re-added, 1439 * and we want to know to recover this 1440 * chunk. 1441 * In this case the device is here, 1442 * and the fact that this chunk is not 1443 * in-sync is recorded in the bad 1444 * block log 1445 */ 1446 continue; 1447 } 1448 if (is_bad) { 1449 int good_sectors = first_bad - r1_bio->sector; 1450 if (good_sectors < max_sectors) 1451 max_sectors = good_sectors; 1452 } 1453 } 1454 r1_bio->bios[i] = bio; 1455 } 1456 rcu_read_unlock(); 1457 1458 if (unlikely(blocked_rdev)) { 1459 /* Wait for this device to become unblocked */ 1460 int j; 1461 1462 for (j = 0; j < i; j++) 1463 if (r1_bio->bios[j]) 1464 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1465 r1_bio->state = 0; 1466 allow_barrier(conf, bio->bi_iter.bi_sector); 1467 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 1468 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1469 wait_barrier(conf, bio->bi_iter.bi_sector); 1470 goto retry_write; 1471 } 1472 1473 if (max_sectors < bio_sectors(bio)) { 1474 struct bio *split = bio_split(bio, max_sectors, 1475 GFP_NOIO, &conf->bio_split); 1476 bio_chain(split, bio); 1477 generic_make_request(bio); 1478 bio = split; 1479 r1_bio->master_bio = bio; 1480 r1_bio->sectors = max_sectors; 1481 } 1482 1483 atomic_set(&r1_bio->remaining, 1); 1484 atomic_set(&r1_bio->behind_remaining, 0); 1485 1486 first_clone = 1; 1487 1488 for (i = 0; i < disks; i++) { 1489 struct bio *mbio = NULL; 1490 struct md_rdev *rdev = conf->mirrors[i].rdev; 1491 int idx = sector_to_idx(lo); 1492 struct serial_in_rdev *serial = &rdev->serial[idx]; 1493 if (!r1_bio->bios[i]) 1494 continue; 1495 1496 if (first_clone) { 1497 /* do behind I/O ? 1498 * Not if there are too many, or cannot 1499 * allocate memory, or a reader on WriteMostly 1500 * is waiting for behind writes to flush */ 1501 if (bitmap && 1502 (atomic_read(&bitmap->behind_writes) 1503 < mddev->bitmap_info.max_write_behind) && 1504 !waitqueue_active(&bitmap->behind_wait)) { 1505 alloc_behind_master_bio(r1_bio, bio); 1506 } 1507 1508 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, 1509 test_bit(R1BIO_BehindIO, &r1_bio->state)); 1510 first_clone = 0; 1511 } 1512 1513 if (r1_bio->behind_master_bio) 1514 mbio = bio_clone_fast(r1_bio->behind_master_bio, 1515 GFP_NOIO, &mddev->bio_set); 1516 else 1517 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 1518 1519 if (r1_bio->behind_master_bio) { 1520 if (test_bit(CollisionCheck, &rdev->flags)) 1521 wait_event(serial->serial_io_wait, 1522 check_and_add_serial(rdev, lo, hi) 1523 == 0); 1524 if (test_bit(WriteMostly, &rdev->flags)) 1525 atomic_inc(&r1_bio->behind_remaining); 1526 } else if (mddev->serialize_policy) 1527 wait_event(serial->serial_io_wait, 1528 check_and_add_serial(rdev, lo, hi) == 0); 1529 1530 r1_bio->bios[i] = mbio; 1531 1532 mbio->bi_iter.bi_sector = (r1_bio->sector + 1533 conf->mirrors[i].rdev->data_offset); 1534 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); 1535 mbio->bi_end_io = raid1_end_write_request; 1536 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1537 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1538 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1539 conf->raid_disks - mddev->degraded > 1) 1540 mbio->bi_opf |= MD_FAILFAST; 1541 mbio->bi_private = r1_bio; 1542 1543 atomic_inc(&r1_bio->remaining); 1544 1545 if (mddev->gendisk) 1546 trace_block_bio_remap(mbio->bi_disk->queue, 1547 mbio, disk_devt(mddev->gendisk), 1548 r1_bio->sector); 1549 /* flush_pending_writes() needs access to the rdev so...*/ 1550 mbio->bi_disk = (void *)conf->mirrors[i].rdev; 1551 1552 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1553 if (cb) 1554 plug = container_of(cb, struct raid1_plug_cb, cb); 1555 else 1556 plug = NULL; 1557 if (plug) { 1558 bio_list_add(&plug->pending, mbio); 1559 plug->pending_cnt++; 1560 } else { 1561 spin_lock_irqsave(&conf->device_lock, flags); 1562 bio_list_add(&conf->pending_bio_list, mbio); 1563 conf->pending_count++; 1564 spin_unlock_irqrestore(&conf->device_lock, flags); 1565 md_wakeup_thread(mddev->thread); 1566 } 1567 } 1568 1569 r1_bio_write_done(r1_bio); 1570 1571 /* In case raid1d snuck in to freeze_array */ 1572 wake_up(&conf->wait_barrier); 1573 } 1574 1575 static bool raid1_make_request(struct mddev *mddev, struct bio *bio) 1576 { 1577 sector_t sectors; 1578 1579 if (unlikely(bio->bi_opf & REQ_PREFLUSH) 1580 && md_flush_request(mddev, bio)) 1581 return true; 1582 1583 /* 1584 * There is a limit to the maximum size, but 1585 * the read/write handler might find a lower limit 1586 * due to bad blocks. To avoid multiple splits, 1587 * we pass the maximum number of sectors down 1588 * and let the lower level perform the split. 1589 */ 1590 sectors = align_to_barrier_unit_end( 1591 bio->bi_iter.bi_sector, bio_sectors(bio)); 1592 1593 if (bio_data_dir(bio) == READ) 1594 raid1_read_request(mddev, bio, sectors, NULL); 1595 else { 1596 if (!md_write_start(mddev,bio)) 1597 return false; 1598 raid1_write_request(mddev, bio, sectors); 1599 } 1600 return true; 1601 } 1602 1603 static void raid1_status(struct seq_file *seq, struct mddev *mddev) 1604 { 1605 struct r1conf *conf = mddev->private; 1606 int i; 1607 1608 seq_printf(seq, " [%d/%d] [", conf->raid_disks, 1609 conf->raid_disks - mddev->degraded); 1610 rcu_read_lock(); 1611 for (i = 0; i < conf->raid_disks; i++) { 1612 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1613 seq_printf(seq, "%s", 1614 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1615 } 1616 rcu_read_unlock(); 1617 seq_printf(seq, "]"); 1618 } 1619 1620 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) 1621 { 1622 char b[BDEVNAME_SIZE]; 1623 struct r1conf *conf = mddev->private; 1624 unsigned long flags; 1625 1626 /* 1627 * If it is not operational, then we have already marked it as dead 1628 * else if it is the last working disks with "fail_last_dev == false", 1629 * ignore the error, let the next level up know. 1630 * else mark the drive as failed 1631 */ 1632 spin_lock_irqsave(&conf->device_lock, flags); 1633 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev 1634 && (conf->raid_disks - mddev->degraded) == 1) { 1635 /* 1636 * Don't fail the drive, act as though we were just a 1637 * normal single drive. 1638 * However don't try a recovery from this drive as 1639 * it is very likely to fail. 1640 */ 1641 conf->recovery_disabled = mddev->recovery_disabled; 1642 spin_unlock_irqrestore(&conf->device_lock, flags); 1643 return; 1644 } 1645 set_bit(Blocked, &rdev->flags); 1646 if (test_and_clear_bit(In_sync, &rdev->flags)) 1647 mddev->degraded++; 1648 set_bit(Faulty, &rdev->flags); 1649 spin_unlock_irqrestore(&conf->device_lock, flags); 1650 /* 1651 * if recovery is running, make sure it aborts. 1652 */ 1653 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1654 set_mask_bits(&mddev->sb_flags, 0, 1655 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1656 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" 1657 "md/raid1:%s: Operation continuing on %d devices.\n", 1658 mdname(mddev), bdevname(rdev->bdev, b), 1659 mdname(mddev), conf->raid_disks - mddev->degraded); 1660 } 1661 1662 static void print_conf(struct r1conf *conf) 1663 { 1664 int i; 1665 1666 pr_debug("RAID1 conf printout:\n"); 1667 if (!conf) { 1668 pr_debug("(!conf)\n"); 1669 return; 1670 } 1671 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 1672 conf->raid_disks); 1673 1674 rcu_read_lock(); 1675 for (i = 0; i < conf->raid_disks; i++) { 1676 char b[BDEVNAME_SIZE]; 1677 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1678 if (rdev) 1679 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", 1680 i, !test_bit(In_sync, &rdev->flags), 1681 !test_bit(Faulty, &rdev->flags), 1682 bdevname(rdev->bdev,b)); 1683 } 1684 rcu_read_unlock(); 1685 } 1686 1687 static void close_sync(struct r1conf *conf) 1688 { 1689 int idx; 1690 1691 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) { 1692 _wait_barrier(conf, idx); 1693 _allow_barrier(conf, idx); 1694 } 1695 1696 mempool_exit(&conf->r1buf_pool); 1697 } 1698 1699 static int raid1_spare_active(struct mddev *mddev) 1700 { 1701 int i; 1702 struct r1conf *conf = mddev->private; 1703 int count = 0; 1704 unsigned long flags; 1705 1706 /* 1707 * Find all failed disks within the RAID1 configuration 1708 * and mark them readable. 1709 * Called under mddev lock, so rcu protection not needed. 1710 * device_lock used to avoid races with raid1_end_read_request 1711 * which expects 'In_sync' flags and ->degraded to be consistent. 1712 */ 1713 spin_lock_irqsave(&conf->device_lock, flags); 1714 for (i = 0; i < conf->raid_disks; i++) { 1715 struct md_rdev *rdev = conf->mirrors[i].rdev; 1716 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 1717 if (repl 1718 && !test_bit(Candidate, &repl->flags) 1719 && repl->recovery_offset == MaxSector 1720 && !test_bit(Faulty, &repl->flags) 1721 && !test_and_set_bit(In_sync, &repl->flags)) { 1722 /* replacement has just become active */ 1723 if (!rdev || 1724 !test_and_clear_bit(In_sync, &rdev->flags)) 1725 count++; 1726 if (rdev) { 1727 /* Replaced device not technically 1728 * faulty, but we need to be sure 1729 * it gets removed and never re-added 1730 */ 1731 set_bit(Faulty, &rdev->flags); 1732 sysfs_notify_dirent_safe( 1733 rdev->sysfs_state); 1734 } 1735 } 1736 if (rdev 1737 && rdev->recovery_offset == MaxSector 1738 && !test_bit(Faulty, &rdev->flags) 1739 && !test_and_set_bit(In_sync, &rdev->flags)) { 1740 count++; 1741 sysfs_notify_dirent_safe(rdev->sysfs_state); 1742 } 1743 } 1744 mddev->degraded -= count; 1745 spin_unlock_irqrestore(&conf->device_lock, flags); 1746 1747 print_conf(conf); 1748 return count; 1749 } 1750 1751 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1752 { 1753 struct r1conf *conf = mddev->private; 1754 int err = -EEXIST; 1755 int mirror = 0; 1756 struct raid1_info *p; 1757 int first = 0; 1758 int last = conf->raid_disks - 1; 1759 1760 if (mddev->recovery_disabled == conf->recovery_disabled) 1761 return -EBUSY; 1762 1763 if (md_integrity_add_rdev(rdev, mddev)) 1764 return -ENXIO; 1765 1766 if (rdev->raid_disk >= 0) 1767 first = last = rdev->raid_disk; 1768 1769 /* 1770 * find the disk ... but prefer rdev->saved_raid_disk 1771 * if possible. 1772 */ 1773 if (rdev->saved_raid_disk >= 0 && 1774 rdev->saved_raid_disk >= first && 1775 rdev->saved_raid_disk < conf->raid_disks && 1776 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1777 first = last = rdev->saved_raid_disk; 1778 1779 for (mirror = first; mirror <= last; mirror++) { 1780 p = conf->mirrors + mirror; 1781 if (!p->rdev) { 1782 if (mddev->gendisk) 1783 disk_stack_limits(mddev->gendisk, rdev->bdev, 1784 rdev->data_offset << 9); 1785 1786 p->head_position = 0; 1787 rdev->raid_disk = mirror; 1788 err = 0; 1789 /* As all devices are equivalent, we don't need a full recovery 1790 * if this was recently any drive of the array 1791 */ 1792 if (rdev->saved_raid_disk < 0) 1793 conf->fullsync = 1; 1794 rcu_assign_pointer(p->rdev, rdev); 1795 break; 1796 } 1797 if (test_bit(WantReplacement, &p->rdev->flags) && 1798 p[conf->raid_disks].rdev == NULL) { 1799 /* Add this device as a replacement */ 1800 clear_bit(In_sync, &rdev->flags); 1801 set_bit(Replacement, &rdev->flags); 1802 rdev->raid_disk = mirror; 1803 err = 0; 1804 conf->fullsync = 1; 1805 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); 1806 break; 1807 } 1808 } 1809 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1810 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); 1811 print_conf(conf); 1812 return err; 1813 } 1814 1815 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 1816 { 1817 struct r1conf *conf = mddev->private; 1818 int err = 0; 1819 int number = rdev->raid_disk; 1820 struct raid1_info *p = conf->mirrors + number; 1821 1822 if (rdev != p->rdev) 1823 p = conf->mirrors + conf->raid_disks + number; 1824 1825 print_conf(conf); 1826 if (rdev == p->rdev) { 1827 if (test_bit(In_sync, &rdev->flags) || 1828 atomic_read(&rdev->nr_pending)) { 1829 err = -EBUSY; 1830 goto abort; 1831 } 1832 /* Only remove non-faulty devices if recovery 1833 * is not possible. 1834 */ 1835 if (!test_bit(Faulty, &rdev->flags) && 1836 mddev->recovery_disabled != conf->recovery_disabled && 1837 mddev->degraded < conf->raid_disks) { 1838 err = -EBUSY; 1839 goto abort; 1840 } 1841 p->rdev = NULL; 1842 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 1843 synchronize_rcu(); 1844 if (atomic_read(&rdev->nr_pending)) { 1845 /* lost the race, try later */ 1846 err = -EBUSY; 1847 p->rdev = rdev; 1848 goto abort; 1849 } 1850 } 1851 if (conf->mirrors[conf->raid_disks + number].rdev) { 1852 /* We just removed a device that is being replaced. 1853 * Move down the replacement. We drain all IO before 1854 * doing this to avoid confusion. 1855 */ 1856 struct md_rdev *repl = 1857 conf->mirrors[conf->raid_disks + number].rdev; 1858 freeze_array(conf, 0); 1859 if (atomic_read(&repl->nr_pending)) { 1860 /* It means that some queued IO of retry_list 1861 * hold repl. Thus, we cannot set replacement 1862 * as NULL, avoiding rdev NULL pointer 1863 * dereference in sync_request_write and 1864 * handle_write_finished. 1865 */ 1866 err = -EBUSY; 1867 unfreeze_array(conf); 1868 goto abort; 1869 } 1870 clear_bit(Replacement, &repl->flags); 1871 p->rdev = repl; 1872 conf->mirrors[conf->raid_disks + number].rdev = NULL; 1873 unfreeze_array(conf); 1874 } 1875 1876 clear_bit(WantReplacement, &rdev->flags); 1877 err = md_integrity_register(mddev); 1878 } 1879 abort: 1880 1881 print_conf(conf); 1882 return err; 1883 } 1884 1885 static void end_sync_read(struct bio *bio) 1886 { 1887 struct r1bio *r1_bio = get_resync_r1bio(bio); 1888 1889 update_head_pos(r1_bio->read_disk, r1_bio); 1890 1891 /* 1892 * we have read a block, now it needs to be re-written, 1893 * or re-read if the read failed. 1894 * We don't do much here, just schedule handling by raid1d 1895 */ 1896 if (!bio->bi_status) 1897 set_bit(R1BIO_Uptodate, &r1_bio->state); 1898 1899 if (atomic_dec_and_test(&r1_bio->remaining)) 1900 reschedule_retry(r1_bio); 1901 } 1902 1903 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) 1904 { 1905 sector_t sync_blocks = 0; 1906 sector_t s = r1_bio->sector; 1907 long sectors_to_go = r1_bio->sectors; 1908 1909 /* make sure these bits don't get cleared. */ 1910 do { 1911 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); 1912 s += sync_blocks; 1913 sectors_to_go -= sync_blocks; 1914 } while (sectors_to_go > 0); 1915 } 1916 1917 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate) 1918 { 1919 if (atomic_dec_and_test(&r1_bio->remaining)) { 1920 struct mddev *mddev = r1_bio->mddev; 1921 int s = r1_bio->sectors; 1922 1923 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 1924 test_bit(R1BIO_WriteError, &r1_bio->state)) 1925 reschedule_retry(r1_bio); 1926 else { 1927 put_buf(r1_bio); 1928 md_done_sync(mddev, s, uptodate); 1929 } 1930 } 1931 } 1932 1933 static void end_sync_write(struct bio *bio) 1934 { 1935 int uptodate = !bio->bi_status; 1936 struct r1bio *r1_bio = get_resync_r1bio(bio); 1937 struct mddev *mddev = r1_bio->mddev; 1938 struct r1conf *conf = mddev->private; 1939 sector_t first_bad; 1940 int bad_sectors; 1941 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1942 1943 if (!uptodate) { 1944 abort_sync_write(mddev, r1_bio); 1945 set_bit(WriteErrorSeen, &rdev->flags); 1946 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1947 set_bit(MD_RECOVERY_NEEDED, & 1948 mddev->recovery); 1949 set_bit(R1BIO_WriteError, &r1_bio->state); 1950 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 1951 &first_bad, &bad_sectors) && 1952 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, 1953 r1_bio->sector, 1954 r1_bio->sectors, 1955 &first_bad, &bad_sectors) 1956 ) 1957 set_bit(R1BIO_MadeGood, &r1_bio->state); 1958 1959 put_sync_write_buf(r1_bio, uptodate); 1960 } 1961 1962 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, 1963 int sectors, struct page *page, int rw) 1964 { 1965 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 1966 /* success */ 1967 return 1; 1968 if (rw == WRITE) { 1969 set_bit(WriteErrorSeen, &rdev->flags); 1970 if (!test_and_set_bit(WantReplacement, 1971 &rdev->flags)) 1972 set_bit(MD_RECOVERY_NEEDED, & 1973 rdev->mddev->recovery); 1974 } 1975 /* need to record an error - either for the block or the device */ 1976 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 1977 md_error(rdev->mddev, rdev); 1978 return 0; 1979 } 1980 1981 static int fix_sync_read_error(struct r1bio *r1_bio) 1982 { 1983 /* Try some synchronous reads of other devices to get 1984 * good data, much like with normal read errors. Only 1985 * read into the pages we already have so we don't 1986 * need to re-issue the read request. 1987 * We don't need to freeze the array, because being in an 1988 * active sync request, there is no normal IO, and 1989 * no overlapping syncs. 1990 * We don't need to check is_badblock() again as we 1991 * made sure that anything with a bad block in range 1992 * will have bi_end_io clear. 1993 */ 1994 struct mddev *mddev = r1_bio->mddev; 1995 struct r1conf *conf = mddev->private; 1996 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; 1997 struct page **pages = get_resync_pages(bio)->pages; 1998 sector_t sect = r1_bio->sector; 1999 int sectors = r1_bio->sectors; 2000 int idx = 0; 2001 struct md_rdev *rdev; 2002 2003 rdev = conf->mirrors[r1_bio->read_disk].rdev; 2004 if (test_bit(FailFast, &rdev->flags)) { 2005 /* Don't try recovering from here - just fail it 2006 * ... unless it is the last working device of course */ 2007 md_error(mddev, rdev); 2008 if (test_bit(Faulty, &rdev->flags)) 2009 /* Don't try to read from here, but make sure 2010 * put_buf does it's thing 2011 */ 2012 bio->bi_end_io = end_sync_write; 2013 } 2014 2015 while(sectors) { 2016 int s = sectors; 2017 int d = r1_bio->read_disk; 2018 int success = 0; 2019 int start; 2020 2021 if (s > (PAGE_SIZE>>9)) 2022 s = PAGE_SIZE >> 9; 2023 do { 2024 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 2025 /* No rcu protection needed here devices 2026 * can only be removed when no resync is 2027 * active, and resync is currently active 2028 */ 2029 rdev = conf->mirrors[d].rdev; 2030 if (sync_page_io(rdev, sect, s<<9, 2031 pages[idx], 2032 REQ_OP_READ, 0, false)) { 2033 success = 1; 2034 break; 2035 } 2036 } 2037 d++; 2038 if (d == conf->raid_disks * 2) 2039 d = 0; 2040 } while (!success && d != r1_bio->read_disk); 2041 2042 if (!success) { 2043 char b[BDEVNAME_SIZE]; 2044 int abort = 0; 2045 /* Cannot read from anywhere, this block is lost. 2046 * Record a bad block on each device. If that doesn't 2047 * work just disable and interrupt the recovery. 2048 * Don't fail devices as that won't really help. 2049 */ 2050 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 2051 mdname(mddev), bio_devname(bio, b), 2052 (unsigned long long)r1_bio->sector); 2053 for (d = 0; d < conf->raid_disks * 2; d++) { 2054 rdev = conf->mirrors[d].rdev; 2055 if (!rdev || test_bit(Faulty, &rdev->flags)) 2056 continue; 2057 if (!rdev_set_badblocks(rdev, sect, s, 0)) 2058 abort = 1; 2059 } 2060 if (abort) { 2061 conf->recovery_disabled = 2062 mddev->recovery_disabled; 2063 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2064 md_done_sync(mddev, r1_bio->sectors, 0); 2065 put_buf(r1_bio); 2066 return 0; 2067 } 2068 /* Try next page */ 2069 sectors -= s; 2070 sect += s; 2071 idx++; 2072 continue; 2073 } 2074 2075 start = d; 2076 /* write it back and re-read */ 2077 while (d != r1_bio->read_disk) { 2078 if (d == 0) 2079 d = conf->raid_disks * 2; 2080 d--; 2081 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 2082 continue; 2083 rdev = conf->mirrors[d].rdev; 2084 if (r1_sync_page_io(rdev, sect, s, 2085 pages[idx], 2086 WRITE) == 0) { 2087 r1_bio->bios[d]->bi_end_io = NULL; 2088 rdev_dec_pending(rdev, mddev); 2089 } 2090 } 2091 d = start; 2092 while (d != r1_bio->read_disk) { 2093 if (d == 0) 2094 d = conf->raid_disks * 2; 2095 d--; 2096 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 2097 continue; 2098 rdev = conf->mirrors[d].rdev; 2099 if (r1_sync_page_io(rdev, sect, s, 2100 pages[idx], 2101 READ) != 0) 2102 atomic_add(s, &rdev->corrected_errors); 2103 } 2104 sectors -= s; 2105 sect += s; 2106 idx ++; 2107 } 2108 set_bit(R1BIO_Uptodate, &r1_bio->state); 2109 bio->bi_status = 0; 2110 return 1; 2111 } 2112 2113 static void process_checks(struct r1bio *r1_bio) 2114 { 2115 /* We have read all readable devices. If we haven't 2116 * got the block, then there is no hope left. 2117 * If we have, then we want to do a comparison 2118 * and skip the write if everything is the same. 2119 * If any blocks failed to read, then we need to 2120 * attempt an over-write 2121 */ 2122 struct mddev *mddev = r1_bio->mddev; 2123 struct r1conf *conf = mddev->private; 2124 int primary; 2125 int i; 2126 int vcnt; 2127 2128 /* Fix variable parts of all bios */ 2129 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); 2130 for (i = 0; i < conf->raid_disks * 2; i++) { 2131 blk_status_t status; 2132 struct bio *b = r1_bio->bios[i]; 2133 struct resync_pages *rp = get_resync_pages(b); 2134 if (b->bi_end_io != end_sync_read) 2135 continue; 2136 /* fixup the bio for reuse, but preserve errno */ 2137 status = b->bi_status; 2138 bio_reset(b); 2139 b->bi_status = status; 2140 b->bi_iter.bi_sector = r1_bio->sector + 2141 conf->mirrors[i].rdev->data_offset; 2142 bio_set_dev(b, conf->mirrors[i].rdev->bdev); 2143 b->bi_end_io = end_sync_read; 2144 rp->raid_bio = r1_bio; 2145 b->bi_private = rp; 2146 2147 /* initialize bvec table again */ 2148 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); 2149 } 2150 for (primary = 0; primary < conf->raid_disks * 2; primary++) 2151 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 2152 !r1_bio->bios[primary]->bi_status) { 2153 r1_bio->bios[primary]->bi_end_io = NULL; 2154 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 2155 break; 2156 } 2157 r1_bio->read_disk = primary; 2158 for (i = 0; i < conf->raid_disks * 2; i++) { 2159 int j = 0; 2160 struct bio *pbio = r1_bio->bios[primary]; 2161 struct bio *sbio = r1_bio->bios[i]; 2162 blk_status_t status = sbio->bi_status; 2163 struct page **ppages = get_resync_pages(pbio)->pages; 2164 struct page **spages = get_resync_pages(sbio)->pages; 2165 struct bio_vec *bi; 2166 int page_len[RESYNC_PAGES] = { 0 }; 2167 struct bvec_iter_all iter_all; 2168 2169 if (sbio->bi_end_io != end_sync_read) 2170 continue; 2171 /* Now we can 'fixup' the error value */ 2172 sbio->bi_status = 0; 2173 2174 bio_for_each_segment_all(bi, sbio, iter_all) 2175 page_len[j++] = bi->bv_len; 2176 2177 if (!status) { 2178 for (j = vcnt; j-- ; ) { 2179 if (memcmp(page_address(ppages[j]), 2180 page_address(spages[j]), 2181 page_len[j])) 2182 break; 2183 } 2184 } else 2185 j = 0; 2186 if (j >= 0) 2187 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2188 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 2189 && !status)) { 2190 /* No need to write to this device. */ 2191 sbio->bi_end_io = NULL; 2192 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 2193 continue; 2194 } 2195 2196 bio_copy_data(sbio, pbio); 2197 } 2198 } 2199 2200 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) 2201 { 2202 struct r1conf *conf = mddev->private; 2203 int i; 2204 int disks = conf->raid_disks * 2; 2205 struct bio *wbio; 2206 2207 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 2208 /* ouch - failed to read all of that. */ 2209 if (!fix_sync_read_error(r1_bio)) 2210 return; 2211 2212 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2213 process_checks(r1_bio); 2214 2215 /* 2216 * schedule writes 2217 */ 2218 atomic_set(&r1_bio->remaining, 1); 2219 for (i = 0; i < disks ; i++) { 2220 wbio = r1_bio->bios[i]; 2221 if (wbio->bi_end_io == NULL || 2222 (wbio->bi_end_io == end_sync_read && 2223 (i == r1_bio->read_disk || 2224 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 2225 continue; 2226 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { 2227 abort_sync_write(mddev, r1_bio); 2228 continue; 2229 } 2230 2231 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2232 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) 2233 wbio->bi_opf |= MD_FAILFAST; 2234 2235 wbio->bi_end_io = end_sync_write; 2236 atomic_inc(&r1_bio->remaining); 2237 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); 2238 2239 generic_make_request(wbio); 2240 } 2241 2242 put_sync_write_buf(r1_bio, 1); 2243 } 2244 2245 /* 2246 * This is a kernel thread which: 2247 * 2248 * 1. Retries failed read operations on working mirrors. 2249 * 2. Updates the raid superblock when problems encounter. 2250 * 3. Performs writes following reads for array synchronising. 2251 */ 2252 2253 static void fix_read_error(struct r1conf *conf, int read_disk, 2254 sector_t sect, int sectors) 2255 { 2256 struct mddev *mddev = conf->mddev; 2257 while(sectors) { 2258 int s = sectors; 2259 int d = read_disk; 2260 int success = 0; 2261 int start; 2262 struct md_rdev *rdev; 2263 2264 if (s > (PAGE_SIZE>>9)) 2265 s = PAGE_SIZE >> 9; 2266 2267 do { 2268 sector_t first_bad; 2269 int bad_sectors; 2270 2271 rcu_read_lock(); 2272 rdev = rcu_dereference(conf->mirrors[d].rdev); 2273 if (rdev && 2274 (test_bit(In_sync, &rdev->flags) || 2275 (!test_bit(Faulty, &rdev->flags) && 2276 rdev->recovery_offset >= sect + s)) && 2277 is_badblock(rdev, sect, s, 2278 &first_bad, &bad_sectors) == 0) { 2279 atomic_inc(&rdev->nr_pending); 2280 rcu_read_unlock(); 2281 if (sync_page_io(rdev, sect, s<<9, 2282 conf->tmppage, REQ_OP_READ, 0, false)) 2283 success = 1; 2284 rdev_dec_pending(rdev, mddev); 2285 if (success) 2286 break; 2287 } else 2288 rcu_read_unlock(); 2289 d++; 2290 if (d == conf->raid_disks * 2) 2291 d = 0; 2292 } while (!success && d != read_disk); 2293 2294 if (!success) { 2295 /* Cannot read from anywhere - mark it bad */ 2296 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; 2297 if (!rdev_set_badblocks(rdev, sect, s, 0)) 2298 md_error(mddev, rdev); 2299 break; 2300 } 2301 /* write it back and re-read */ 2302 start = d; 2303 while (d != read_disk) { 2304 if (d==0) 2305 d = conf->raid_disks * 2; 2306 d--; 2307 rcu_read_lock(); 2308 rdev = rcu_dereference(conf->mirrors[d].rdev); 2309 if (rdev && 2310 !test_bit(Faulty, &rdev->flags)) { 2311 atomic_inc(&rdev->nr_pending); 2312 rcu_read_unlock(); 2313 r1_sync_page_io(rdev, sect, s, 2314 conf->tmppage, WRITE); 2315 rdev_dec_pending(rdev, mddev); 2316 } else 2317 rcu_read_unlock(); 2318 } 2319 d = start; 2320 while (d != read_disk) { 2321 char b[BDEVNAME_SIZE]; 2322 if (d==0) 2323 d = conf->raid_disks * 2; 2324 d--; 2325 rcu_read_lock(); 2326 rdev = rcu_dereference(conf->mirrors[d].rdev); 2327 if (rdev && 2328 !test_bit(Faulty, &rdev->flags)) { 2329 atomic_inc(&rdev->nr_pending); 2330 rcu_read_unlock(); 2331 if (r1_sync_page_io(rdev, sect, s, 2332 conf->tmppage, READ)) { 2333 atomic_add(s, &rdev->corrected_errors); 2334 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n", 2335 mdname(mddev), s, 2336 (unsigned long long)(sect + 2337 rdev->data_offset), 2338 bdevname(rdev->bdev, b)); 2339 } 2340 rdev_dec_pending(rdev, mddev); 2341 } else 2342 rcu_read_unlock(); 2343 } 2344 sectors -= s; 2345 sect += s; 2346 } 2347 } 2348 2349 static int narrow_write_error(struct r1bio *r1_bio, int i) 2350 { 2351 struct mddev *mddev = r1_bio->mddev; 2352 struct r1conf *conf = mddev->private; 2353 struct md_rdev *rdev = conf->mirrors[i].rdev; 2354 2355 /* bio has the data to be written to device 'i' where 2356 * we just recently had a write error. 2357 * We repeatedly clone the bio and trim down to one block, 2358 * then try the write. Where the write fails we record 2359 * a bad block. 2360 * It is conceivable that the bio doesn't exactly align with 2361 * blocks. We must handle this somehow. 2362 * 2363 * We currently own a reference on the rdev. 2364 */ 2365 2366 int block_sectors; 2367 sector_t sector; 2368 int sectors; 2369 int sect_to_write = r1_bio->sectors; 2370 int ok = 1; 2371 2372 if (rdev->badblocks.shift < 0) 2373 return 0; 2374 2375 block_sectors = roundup(1 << rdev->badblocks.shift, 2376 bdev_logical_block_size(rdev->bdev) >> 9); 2377 sector = r1_bio->sector; 2378 sectors = ((sector + block_sectors) 2379 & ~(sector_t)(block_sectors - 1)) 2380 - sector; 2381 2382 while (sect_to_write) { 2383 struct bio *wbio; 2384 if (sectors > sect_to_write) 2385 sectors = sect_to_write; 2386 /* Write at 'sector' for 'sectors'*/ 2387 2388 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 2389 wbio = bio_clone_fast(r1_bio->behind_master_bio, 2390 GFP_NOIO, 2391 &mddev->bio_set); 2392 } else { 2393 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2394 &mddev->bio_set); 2395 } 2396 2397 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2398 wbio->bi_iter.bi_sector = r1_bio->sector; 2399 wbio->bi_iter.bi_size = r1_bio->sectors << 9; 2400 2401 bio_trim(wbio, sector - r1_bio->sector, sectors); 2402 wbio->bi_iter.bi_sector += rdev->data_offset; 2403 bio_set_dev(wbio, rdev->bdev); 2404 2405 if (submit_bio_wait(wbio) < 0) 2406 /* failure! */ 2407 ok = rdev_set_badblocks(rdev, sector, 2408 sectors, 0) 2409 && ok; 2410 2411 bio_put(wbio); 2412 sect_to_write -= sectors; 2413 sector += sectors; 2414 sectors = block_sectors; 2415 } 2416 return ok; 2417 } 2418 2419 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 2420 { 2421 int m; 2422 int s = r1_bio->sectors; 2423 for (m = 0; m < conf->raid_disks * 2 ; m++) { 2424 struct md_rdev *rdev = conf->mirrors[m].rdev; 2425 struct bio *bio = r1_bio->bios[m]; 2426 if (bio->bi_end_io == NULL) 2427 continue; 2428 if (!bio->bi_status && 2429 test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2430 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 2431 } 2432 if (bio->bi_status && 2433 test_bit(R1BIO_WriteError, &r1_bio->state)) { 2434 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 2435 md_error(conf->mddev, rdev); 2436 } 2437 } 2438 put_buf(r1_bio); 2439 md_done_sync(conf->mddev, s, 1); 2440 } 2441 2442 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 2443 { 2444 int m, idx; 2445 bool fail = false; 2446 2447 for (m = 0; m < conf->raid_disks * 2 ; m++) 2448 if (r1_bio->bios[m] == IO_MADE_GOOD) { 2449 struct md_rdev *rdev = conf->mirrors[m].rdev; 2450 rdev_clear_badblocks(rdev, 2451 r1_bio->sector, 2452 r1_bio->sectors, 0); 2453 rdev_dec_pending(rdev, conf->mddev); 2454 } else if (r1_bio->bios[m] != NULL) { 2455 /* This drive got a write error. We need to 2456 * narrow down and record precise write 2457 * errors. 2458 */ 2459 fail = true; 2460 if (!narrow_write_error(r1_bio, m)) { 2461 md_error(conf->mddev, 2462 conf->mirrors[m].rdev); 2463 /* an I/O failed, we can't clear the bitmap */ 2464 set_bit(R1BIO_Degraded, &r1_bio->state); 2465 } 2466 rdev_dec_pending(conf->mirrors[m].rdev, 2467 conf->mddev); 2468 } 2469 if (fail) { 2470 spin_lock_irq(&conf->device_lock); 2471 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); 2472 idx = sector_to_idx(r1_bio->sector); 2473 atomic_inc(&conf->nr_queued[idx]); 2474 spin_unlock_irq(&conf->device_lock); 2475 /* 2476 * In case freeze_array() is waiting for condition 2477 * get_unqueued_pending() == extra to be true. 2478 */ 2479 wake_up(&conf->wait_barrier); 2480 md_wakeup_thread(conf->mddev->thread); 2481 } else { 2482 if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2483 close_write(r1_bio); 2484 raid_end_bio_io(r1_bio); 2485 } 2486 } 2487 2488 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) 2489 { 2490 struct mddev *mddev = conf->mddev; 2491 struct bio *bio; 2492 struct md_rdev *rdev; 2493 2494 clear_bit(R1BIO_ReadError, &r1_bio->state); 2495 /* we got a read error. Maybe the drive is bad. Maybe just 2496 * the block and we can fix it. 2497 * We freeze all other IO, and try reading the block from 2498 * other devices. When we find one, we re-write 2499 * and check it that fixes the read error. 2500 * This is all done synchronously while the array is 2501 * frozen 2502 */ 2503 2504 bio = r1_bio->bios[r1_bio->read_disk]; 2505 bio_put(bio); 2506 r1_bio->bios[r1_bio->read_disk] = NULL; 2507 2508 rdev = conf->mirrors[r1_bio->read_disk].rdev; 2509 if (mddev->ro == 0 2510 && !test_bit(FailFast, &rdev->flags)) { 2511 freeze_array(conf, 1); 2512 fix_read_error(conf, r1_bio->read_disk, 2513 r1_bio->sector, r1_bio->sectors); 2514 unfreeze_array(conf); 2515 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { 2516 md_error(mddev, rdev); 2517 } else { 2518 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; 2519 } 2520 2521 rdev_dec_pending(rdev, conf->mddev); 2522 allow_barrier(conf, r1_bio->sector); 2523 bio = r1_bio->master_bio; 2524 2525 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */ 2526 r1_bio->state = 0; 2527 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); 2528 } 2529 2530 static void raid1d(struct md_thread *thread) 2531 { 2532 struct mddev *mddev = thread->mddev; 2533 struct r1bio *r1_bio; 2534 unsigned long flags; 2535 struct r1conf *conf = mddev->private; 2536 struct list_head *head = &conf->retry_list; 2537 struct blk_plug plug; 2538 int idx; 2539 2540 md_check_recovery(mddev); 2541 2542 if (!list_empty_careful(&conf->bio_end_io_list) && 2543 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2544 LIST_HEAD(tmp); 2545 spin_lock_irqsave(&conf->device_lock, flags); 2546 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 2547 list_splice_init(&conf->bio_end_io_list, &tmp); 2548 spin_unlock_irqrestore(&conf->device_lock, flags); 2549 while (!list_empty(&tmp)) { 2550 r1_bio = list_first_entry(&tmp, struct r1bio, 2551 retry_list); 2552 list_del(&r1_bio->retry_list); 2553 idx = sector_to_idx(r1_bio->sector); 2554 atomic_dec(&conf->nr_queued[idx]); 2555 if (mddev->degraded) 2556 set_bit(R1BIO_Degraded, &r1_bio->state); 2557 if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2558 close_write(r1_bio); 2559 raid_end_bio_io(r1_bio); 2560 } 2561 } 2562 2563 blk_start_plug(&plug); 2564 for (;;) { 2565 2566 flush_pending_writes(conf); 2567 2568 spin_lock_irqsave(&conf->device_lock, flags); 2569 if (list_empty(head)) { 2570 spin_unlock_irqrestore(&conf->device_lock, flags); 2571 break; 2572 } 2573 r1_bio = list_entry(head->prev, struct r1bio, retry_list); 2574 list_del(head->prev); 2575 idx = sector_to_idx(r1_bio->sector); 2576 atomic_dec(&conf->nr_queued[idx]); 2577 spin_unlock_irqrestore(&conf->device_lock, flags); 2578 2579 mddev = r1_bio->mddev; 2580 conf = mddev->private; 2581 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 2582 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 2583 test_bit(R1BIO_WriteError, &r1_bio->state)) 2584 handle_sync_write_finished(conf, r1_bio); 2585 else 2586 sync_request_write(mddev, r1_bio); 2587 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 2588 test_bit(R1BIO_WriteError, &r1_bio->state)) 2589 handle_write_finished(conf, r1_bio); 2590 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) 2591 handle_read_error(conf, r1_bio); 2592 else 2593 WARN_ON_ONCE(1); 2594 2595 cond_resched(); 2596 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) 2597 md_check_recovery(mddev); 2598 } 2599 blk_finish_plug(&plug); 2600 } 2601 2602 static int init_resync(struct r1conf *conf) 2603 { 2604 int buffs; 2605 2606 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2607 BUG_ON(mempool_initialized(&conf->r1buf_pool)); 2608 2609 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, 2610 r1buf_pool_free, conf->poolinfo); 2611 } 2612 2613 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) 2614 { 2615 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); 2616 struct resync_pages *rps; 2617 struct bio *bio; 2618 int i; 2619 2620 for (i = conf->poolinfo->raid_disks; i--; ) { 2621 bio = r1bio->bios[i]; 2622 rps = bio->bi_private; 2623 bio_reset(bio); 2624 bio->bi_private = rps; 2625 } 2626 r1bio->master_bio = NULL; 2627 return r1bio; 2628 } 2629 2630 /* 2631 * perform a "sync" on one "block" 2632 * 2633 * We need to make sure that no normal I/O request - particularly write 2634 * requests - conflict with active sync requests. 2635 * 2636 * This is achieved by tracking pending requests and a 'barrier' concept 2637 * that can be installed to exclude normal IO requests. 2638 */ 2639 2640 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, 2641 int *skipped) 2642 { 2643 struct r1conf *conf = mddev->private; 2644 struct r1bio *r1_bio; 2645 struct bio *bio; 2646 sector_t max_sector, nr_sectors; 2647 int disk = -1; 2648 int i; 2649 int wonly = -1; 2650 int write_targets = 0, read_targets = 0; 2651 sector_t sync_blocks; 2652 int still_degraded = 0; 2653 int good_sectors = RESYNC_SECTORS; 2654 int min_bad = 0; /* number of sectors that are bad in all devices */ 2655 int idx = sector_to_idx(sector_nr); 2656 int page_idx = 0; 2657 2658 if (!mempool_initialized(&conf->r1buf_pool)) 2659 if (init_resync(conf)) 2660 return 0; 2661 2662 max_sector = mddev->dev_sectors; 2663 if (sector_nr >= max_sector) { 2664 /* If we aborted, we need to abort the 2665 * sync on the 'current' bitmap chunk (there will 2666 * only be one in raid1 resync. 2667 * We can find the current addess in mddev->curr_resync 2668 */ 2669 if (mddev->curr_resync < max_sector) /* aborted */ 2670 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2671 &sync_blocks, 1); 2672 else /* completed sync */ 2673 conf->fullsync = 0; 2674 2675 md_bitmap_close_sync(mddev->bitmap); 2676 close_sync(conf); 2677 2678 if (mddev_is_clustered(mddev)) { 2679 conf->cluster_sync_low = 0; 2680 conf->cluster_sync_high = 0; 2681 } 2682 return 0; 2683 } 2684 2685 if (mddev->bitmap == NULL && 2686 mddev->recovery_cp == MaxSector && 2687 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2688 conf->fullsync == 0) { 2689 *skipped = 1; 2690 return max_sector - sector_nr; 2691 } 2692 /* before building a request, check if we can skip these blocks.. 2693 * This call the bitmap_start_sync doesn't actually record anything 2694 */ 2695 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2696 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2697 /* We can skip this block, and probably several more */ 2698 *skipped = 1; 2699 return sync_blocks; 2700 } 2701 2702 /* 2703 * If there is non-resync activity waiting for a turn, then let it 2704 * though before starting on this new sync request. 2705 */ 2706 if (atomic_read(&conf->nr_waiting[idx])) 2707 schedule_timeout_uninterruptible(1); 2708 2709 /* we are incrementing sector_nr below. To be safe, we check against 2710 * sector_nr + two times RESYNC_SECTORS 2711 */ 2712 2713 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2714 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 2715 2716 2717 if (raise_barrier(conf, sector_nr)) 2718 return 0; 2719 2720 r1_bio = raid1_alloc_init_r1buf(conf); 2721 2722 rcu_read_lock(); 2723 /* 2724 * If we get a correctably read error during resync or recovery, 2725 * we might want to read from a different device. So we 2726 * flag all drives that could conceivably be read from for READ, 2727 * and any others (which will be non-In_sync devices) for WRITE. 2728 * If a read fails, we try reading from something else for which READ 2729 * is OK. 2730 */ 2731 2732 r1_bio->mddev = mddev; 2733 r1_bio->sector = sector_nr; 2734 r1_bio->state = 0; 2735 set_bit(R1BIO_IsSync, &r1_bio->state); 2736 /* make sure good_sectors won't go across barrier unit boundary */ 2737 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors); 2738 2739 for (i = 0; i < conf->raid_disks * 2; i++) { 2740 struct md_rdev *rdev; 2741 bio = r1_bio->bios[i]; 2742 2743 rdev = rcu_dereference(conf->mirrors[i].rdev); 2744 if (rdev == NULL || 2745 test_bit(Faulty, &rdev->flags)) { 2746 if (i < conf->raid_disks) 2747 still_degraded = 1; 2748 } else if (!test_bit(In_sync, &rdev->flags)) { 2749 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 2750 bio->bi_end_io = end_sync_write; 2751 write_targets ++; 2752 } else { 2753 /* may need to read from here */ 2754 sector_t first_bad = MaxSector; 2755 int bad_sectors; 2756 2757 if (is_badblock(rdev, sector_nr, good_sectors, 2758 &first_bad, &bad_sectors)) { 2759 if (first_bad > sector_nr) 2760 good_sectors = first_bad - sector_nr; 2761 else { 2762 bad_sectors -= (sector_nr - first_bad); 2763 if (min_bad == 0 || 2764 min_bad > bad_sectors) 2765 min_bad = bad_sectors; 2766 } 2767 } 2768 if (sector_nr < first_bad) { 2769 if (test_bit(WriteMostly, &rdev->flags)) { 2770 if (wonly < 0) 2771 wonly = i; 2772 } else { 2773 if (disk < 0) 2774 disk = i; 2775 } 2776 bio_set_op_attrs(bio, REQ_OP_READ, 0); 2777 bio->bi_end_io = end_sync_read; 2778 read_targets++; 2779 } else if (!test_bit(WriteErrorSeen, &rdev->flags) && 2780 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2781 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 2782 /* 2783 * The device is suitable for reading (InSync), 2784 * but has bad block(s) here. Let's try to correct them, 2785 * if we are doing resync or repair. Otherwise, leave 2786 * this device alone for this sync request. 2787 */ 2788 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 2789 bio->bi_end_io = end_sync_write; 2790 write_targets++; 2791 } 2792 } 2793 if (rdev && bio->bi_end_io) { 2794 atomic_inc(&rdev->nr_pending); 2795 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 2796 bio_set_dev(bio, rdev->bdev); 2797 if (test_bit(FailFast, &rdev->flags)) 2798 bio->bi_opf |= MD_FAILFAST; 2799 } 2800 } 2801 rcu_read_unlock(); 2802 if (disk < 0) 2803 disk = wonly; 2804 r1_bio->read_disk = disk; 2805 2806 if (read_targets == 0 && min_bad > 0) { 2807 /* These sectors are bad on all InSync devices, so we 2808 * need to mark them bad on all write targets 2809 */ 2810 int ok = 1; 2811 for (i = 0 ; i < conf->raid_disks * 2 ; i++) 2812 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { 2813 struct md_rdev *rdev = conf->mirrors[i].rdev; 2814 ok = rdev_set_badblocks(rdev, sector_nr, 2815 min_bad, 0 2816 ) && ok; 2817 } 2818 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2819 *skipped = 1; 2820 put_buf(r1_bio); 2821 2822 if (!ok) { 2823 /* Cannot record the badblocks, so need to 2824 * abort the resync. 2825 * If there are multiple read targets, could just 2826 * fail the really bad ones ??? 2827 */ 2828 conf->recovery_disabled = mddev->recovery_disabled; 2829 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2830 return 0; 2831 } else 2832 return min_bad; 2833 2834 } 2835 if (min_bad > 0 && min_bad < good_sectors) { 2836 /* only resync enough to reach the next bad->good 2837 * transition */ 2838 good_sectors = min_bad; 2839 } 2840 2841 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 2842 /* extra read targets are also write targets */ 2843 write_targets += read_targets-1; 2844 2845 if (write_targets == 0 || read_targets == 0) { 2846 /* There is nowhere to write, so all non-sync 2847 * drives must be failed - so we are finished 2848 */ 2849 sector_t rv; 2850 if (min_bad > 0) 2851 max_sector = sector_nr + min_bad; 2852 rv = max_sector - sector_nr; 2853 *skipped = 1; 2854 put_buf(r1_bio); 2855 return rv; 2856 } 2857 2858 if (max_sector > mddev->resync_max) 2859 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2860 if (max_sector > sector_nr + good_sectors) 2861 max_sector = sector_nr + good_sectors; 2862 nr_sectors = 0; 2863 sync_blocks = 0; 2864 do { 2865 struct page *page; 2866 int len = PAGE_SIZE; 2867 if (sector_nr + (len>>9) > max_sector) 2868 len = (max_sector - sector_nr) << 9; 2869 if (len == 0) 2870 break; 2871 if (sync_blocks == 0) { 2872 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, 2873 &sync_blocks, still_degraded) && 2874 !conf->fullsync && 2875 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2876 break; 2877 if ((len >> 9) > sync_blocks) 2878 len = sync_blocks<<9; 2879 } 2880 2881 for (i = 0 ; i < conf->raid_disks * 2; i++) { 2882 struct resync_pages *rp; 2883 2884 bio = r1_bio->bios[i]; 2885 rp = get_resync_pages(bio); 2886 if (bio->bi_end_io) { 2887 page = resync_fetch_page(rp, page_idx); 2888 2889 /* 2890 * won't fail because the vec table is big 2891 * enough to hold all these pages 2892 */ 2893 bio_add_page(bio, page, len, 0); 2894 } 2895 } 2896 nr_sectors += len>>9; 2897 sector_nr += len>>9; 2898 sync_blocks -= (len>>9); 2899 } while (++page_idx < RESYNC_PAGES); 2900 2901 r1_bio->sectors = nr_sectors; 2902 2903 if (mddev_is_clustered(mddev) && 2904 conf->cluster_sync_high < sector_nr + nr_sectors) { 2905 conf->cluster_sync_low = mddev->curr_resync_completed; 2906 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; 2907 /* Send resync message */ 2908 md_cluster_ops->resync_info_update(mddev, 2909 conf->cluster_sync_low, 2910 conf->cluster_sync_high); 2911 } 2912 2913 /* For a user-requested sync, we read all readable devices and do a 2914 * compare 2915 */ 2916 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2917 atomic_set(&r1_bio->remaining, read_targets); 2918 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { 2919 bio = r1_bio->bios[i]; 2920 if (bio->bi_end_io == end_sync_read) { 2921 read_targets--; 2922 md_sync_acct_bio(bio, nr_sectors); 2923 if (read_targets == 1) 2924 bio->bi_opf &= ~MD_FAILFAST; 2925 generic_make_request(bio); 2926 } 2927 } 2928 } else { 2929 atomic_set(&r1_bio->remaining, 1); 2930 bio = r1_bio->bios[r1_bio->read_disk]; 2931 md_sync_acct_bio(bio, nr_sectors); 2932 if (read_targets == 1) 2933 bio->bi_opf &= ~MD_FAILFAST; 2934 generic_make_request(bio); 2935 } 2936 return nr_sectors; 2937 } 2938 2939 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) 2940 { 2941 if (sectors) 2942 return sectors; 2943 2944 return mddev->dev_sectors; 2945 } 2946 2947 static struct r1conf *setup_conf(struct mddev *mddev) 2948 { 2949 struct r1conf *conf; 2950 int i; 2951 struct raid1_info *disk; 2952 struct md_rdev *rdev; 2953 int err = -ENOMEM; 2954 2955 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); 2956 if (!conf) 2957 goto abort; 2958 2959 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, 2960 sizeof(atomic_t), GFP_KERNEL); 2961 if (!conf->nr_pending) 2962 goto abort; 2963 2964 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, 2965 sizeof(atomic_t), GFP_KERNEL); 2966 if (!conf->nr_waiting) 2967 goto abort; 2968 2969 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, 2970 sizeof(atomic_t), GFP_KERNEL); 2971 if (!conf->nr_queued) 2972 goto abort; 2973 2974 conf->barrier = kcalloc(BARRIER_BUCKETS_NR, 2975 sizeof(atomic_t), GFP_KERNEL); 2976 if (!conf->barrier) 2977 goto abort; 2978 2979 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), 2980 mddev->raid_disks, 2), 2981 GFP_KERNEL); 2982 if (!conf->mirrors) 2983 goto abort; 2984 2985 conf->tmppage = alloc_page(GFP_KERNEL); 2986 if (!conf->tmppage) 2987 goto abort; 2988 2989 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 2990 if (!conf->poolinfo) 2991 goto abort; 2992 conf->poolinfo->raid_disks = mddev->raid_disks * 2; 2993 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, 2994 rbio_pool_free, conf->poolinfo); 2995 if (err) 2996 goto abort; 2997 2998 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); 2999 if (err) 3000 goto abort; 3001 3002 conf->poolinfo->mddev = mddev; 3003 3004 err = -EINVAL; 3005 spin_lock_init(&conf->device_lock); 3006 rdev_for_each(rdev, mddev) { 3007 int disk_idx = rdev->raid_disk; 3008 if (disk_idx >= mddev->raid_disks 3009 || disk_idx < 0) 3010 continue; 3011 if (test_bit(Replacement, &rdev->flags)) 3012 disk = conf->mirrors + mddev->raid_disks + disk_idx; 3013 else 3014 disk = conf->mirrors + disk_idx; 3015 3016 if (disk->rdev) 3017 goto abort; 3018 disk->rdev = rdev; 3019 disk->head_position = 0; 3020 disk->seq_start = MaxSector; 3021 } 3022 conf->raid_disks = mddev->raid_disks; 3023 conf->mddev = mddev; 3024 INIT_LIST_HEAD(&conf->retry_list); 3025 INIT_LIST_HEAD(&conf->bio_end_io_list); 3026 3027 spin_lock_init(&conf->resync_lock); 3028 init_waitqueue_head(&conf->wait_barrier); 3029 3030 bio_list_init(&conf->pending_bio_list); 3031 conf->pending_count = 0; 3032 conf->recovery_disabled = mddev->recovery_disabled - 1; 3033 3034 err = -EIO; 3035 for (i = 0; i < conf->raid_disks * 2; i++) { 3036 3037 disk = conf->mirrors + i; 3038 3039 if (i < conf->raid_disks && 3040 disk[conf->raid_disks].rdev) { 3041 /* This slot has a replacement. */ 3042 if (!disk->rdev) { 3043 /* No original, just make the replacement 3044 * a recovering spare 3045 */ 3046 disk->rdev = 3047 disk[conf->raid_disks].rdev; 3048 disk[conf->raid_disks].rdev = NULL; 3049 } else if (!test_bit(In_sync, &disk->rdev->flags)) 3050 /* Original is not in_sync - bad */ 3051 goto abort; 3052 } 3053 3054 if (!disk->rdev || 3055 !test_bit(In_sync, &disk->rdev->flags)) { 3056 disk->head_position = 0; 3057 if (disk->rdev && 3058 (disk->rdev->saved_raid_disk < 0)) 3059 conf->fullsync = 1; 3060 } 3061 } 3062 3063 err = -ENOMEM; 3064 conf->thread = md_register_thread(raid1d, mddev, "raid1"); 3065 if (!conf->thread) 3066 goto abort; 3067 3068 return conf; 3069 3070 abort: 3071 if (conf) { 3072 mempool_exit(&conf->r1bio_pool); 3073 kfree(conf->mirrors); 3074 safe_put_page(conf->tmppage); 3075 kfree(conf->poolinfo); 3076 kfree(conf->nr_pending); 3077 kfree(conf->nr_waiting); 3078 kfree(conf->nr_queued); 3079 kfree(conf->barrier); 3080 bioset_exit(&conf->bio_split); 3081 kfree(conf); 3082 } 3083 return ERR_PTR(err); 3084 } 3085 3086 static void raid1_free(struct mddev *mddev, void *priv); 3087 static int raid1_run(struct mddev *mddev) 3088 { 3089 struct r1conf *conf; 3090 int i; 3091 struct md_rdev *rdev; 3092 int ret; 3093 bool discard_supported = false; 3094 3095 if (mddev->level != 1) { 3096 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", 3097 mdname(mddev), mddev->level); 3098 return -EIO; 3099 } 3100 if (mddev->reshape_position != MaxSector) { 3101 pr_warn("md/raid1:%s: reshape_position set but not supported\n", 3102 mdname(mddev)); 3103 return -EIO; 3104 } 3105 if (mddev_init_writes_pending(mddev) < 0) 3106 return -ENOMEM; 3107 /* 3108 * copy the already verified devices into our private RAID1 3109 * bookkeeping area. [whatever we allocate in run(), 3110 * should be freed in raid1_free()] 3111 */ 3112 if (mddev->private == NULL) 3113 conf = setup_conf(mddev); 3114 else 3115 conf = mddev->private; 3116 3117 if (IS_ERR(conf)) 3118 return PTR_ERR(conf); 3119 3120 if (mddev->queue) { 3121 blk_queue_max_write_same_sectors(mddev->queue, 0); 3122 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 3123 } 3124 3125 rdev_for_each(rdev, mddev) { 3126 if (!mddev->gendisk) 3127 continue; 3128 disk_stack_limits(mddev->gendisk, rdev->bdev, 3129 rdev->data_offset << 9); 3130 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 3131 discard_supported = true; 3132 } 3133 3134 mddev->degraded = 0; 3135 for (i = 0; i < conf->raid_disks; i++) 3136 if (conf->mirrors[i].rdev == NULL || 3137 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 3138 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 3139 mddev->degraded++; 3140 /* 3141 * RAID1 needs at least one disk in active 3142 */ 3143 if (conf->raid_disks - mddev->degraded < 1) { 3144 ret = -EINVAL; 3145 goto abort; 3146 } 3147 3148 if (conf->raid_disks - mddev->degraded == 1) 3149 mddev->recovery_cp = MaxSector; 3150 3151 if (mddev->recovery_cp != MaxSector) 3152 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", 3153 mdname(mddev)); 3154 pr_info("md/raid1:%s: active with %d out of %d mirrors\n", 3155 mdname(mddev), mddev->raid_disks - mddev->degraded, 3156 mddev->raid_disks); 3157 3158 /* 3159 * Ok, everything is just fine now 3160 */ 3161 mddev->thread = conf->thread; 3162 conf->thread = NULL; 3163 mddev->private = conf; 3164 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 3165 3166 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 3167 3168 if (mddev->queue) { 3169 if (discard_supported) 3170 blk_queue_flag_set(QUEUE_FLAG_DISCARD, 3171 mddev->queue); 3172 else 3173 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, 3174 mddev->queue); 3175 } 3176 3177 ret = md_integrity_register(mddev); 3178 if (ret) { 3179 md_unregister_thread(&mddev->thread); 3180 goto abort; 3181 } 3182 return 0; 3183 3184 abort: 3185 raid1_free(mddev, conf); 3186 return ret; 3187 } 3188 3189 static void raid1_free(struct mddev *mddev, void *priv) 3190 { 3191 struct r1conf *conf = priv; 3192 3193 mempool_exit(&conf->r1bio_pool); 3194 kfree(conf->mirrors); 3195 safe_put_page(conf->tmppage); 3196 kfree(conf->poolinfo); 3197 kfree(conf->nr_pending); 3198 kfree(conf->nr_waiting); 3199 kfree(conf->nr_queued); 3200 kfree(conf->barrier); 3201 bioset_exit(&conf->bio_split); 3202 kfree(conf); 3203 } 3204 3205 static int raid1_resize(struct mddev *mddev, sector_t sectors) 3206 { 3207 /* no resync is happening, and there is enough space 3208 * on all devices, so we can resize. 3209 * We need to make sure resync covers any new space. 3210 * If the array is shrinking we should possibly wait until 3211 * any io in the removed space completes, but it hardly seems 3212 * worth it. 3213 */ 3214 sector_t newsize = raid1_size(mddev, sectors, 0); 3215 if (mddev->external_size && 3216 mddev->array_sectors > newsize) 3217 return -EINVAL; 3218 if (mddev->bitmap) { 3219 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); 3220 if (ret) 3221 return ret; 3222 } 3223 md_set_array_sectors(mddev, newsize); 3224 if (sectors > mddev->dev_sectors && 3225 mddev->recovery_cp > mddev->dev_sectors) { 3226 mddev->recovery_cp = mddev->dev_sectors; 3227 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3228 } 3229 mddev->dev_sectors = sectors; 3230 mddev->resync_max_sectors = sectors; 3231 return 0; 3232 } 3233 3234 static int raid1_reshape(struct mddev *mddev) 3235 { 3236 /* We need to: 3237 * 1/ resize the r1bio_pool 3238 * 2/ resize conf->mirrors 3239 * 3240 * We allocate a new r1bio_pool if we can. 3241 * Then raise a device barrier and wait until all IO stops. 3242 * Then resize conf->mirrors and swap in the new r1bio pool. 3243 * 3244 * At the same time, we "pack" the devices so that all the missing 3245 * devices have the higher raid_disk numbers. 3246 */ 3247 mempool_t newpool, oldpool; 3248 struct pool_info *newpoolinfo; 3249 struct raid1_info *newmirrors; 3250 struct r1conf *conf = mddev->private; 3251 int cnt, raid_disks; 3252 unsigned long flags; 3253 int d, d2; 3254 int ret; 3255 3256 memset(&newpool, 0, sizeof(newpool)); 3257 memset(&oldpool, 0, sizeof(oldpool)); 3258 3259 /* Cannot change chunk_size, layout, or level */ 3260 if (mddev->chunk_sectors != mddev->new_chunk_sectors || 3261 mddev->layout != mddev->new_layout || 3262 mddev->level != mddev->new_level) { 3263 mddev->new_chunk_sectors = mddev->chunk_sectors; 3264 mddev->new_layout = mddev->layout; 3265 mddev->new_level = mddev->level; 3266 return -EINVAL; 3267 } 3268 3269 if (!mddev_is_clustered(mddev)) 3270 md_allow_write(mddev); 3271 3272 raid_disks = mddev->raid_disks + mddev->delta_disks; 3273 3274 if (raid_disks < conf->raid_disks) { 3275 cnt=0; 3276 for (d= 0; d < conf->raid_disks; d++) 3277 if (conf->mirrors[d].rdev) 3278 cnt++; 3279 if (cnt > raid_disks) 3280 return -EBUSY; 3281 } 3282 3283 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 3284 if (!newpoolinfo) 3285 return -ENOMEM; 3286 newpoolinfo->mddev = mddev; 3287 newpoolinfo->raid_disks = raid_disks * 2; 3288 3289 ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc, 3290 rbio_pool_free, newpoolinfo); 3291 if (ret) { 3292 kfree(newpoolinfo); 3293 return ret; 3294 } 3295 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info), 3296 raid_disks, 2), 3297 GFP_KERNEL); 3298 if (!newmirrors) { 3299 kfree(newpoolinfo); 3300 mempool_exit(&newpool); 3301 return -ENOMEM; 3302 } 3303 3304 freeze_array(conf, 0); 3305 3306 /* ok, everything is stopped */ 3307 oldpool = conf->r1bio_pool; 3308 conf->r1bio_pool = newpool; 3309 3310 for (d = d2 = 0; d < conf->raid_disks; d++) { 3311 struct md_rdev *rdev = conf->mirrors[d].rdev; 3312 if (rdev && rdev->raid_disk != d2) { 3313 sysfs_unlink_rdev(mddev, rdev); 3314 rdev->raid_disk = d2; 3315 sysfs_unlink_rdev(mddev, rdev); 3316 if (sysfs_link_rdev(mddev, rdev)) 3317 pr_warn("md/raid1:%s: cannot register rd%d\n", 3318 mdname(mddev), rdev->raid_disk); 3319 } 3320 if (rdev) 3321 newmirrors[d2++].rdev = rdev; 3322 } 3323 kfree(conf->mirrors); 3324 conf->mirrors = newmirrors; 3325 kfree(conf->poolinfo); 3326 conf->poolinfo = newpoolinfo; 3327 3328 spin_lock_irqsave(&conf->device_lock, flags); 3329 mddev->degraded += (raid_disks - conf->raid_disks); 3330 spin_unlock_irqrestore(&conf->device_lock, flags); 3331 conf->raid_disks = mddev->raid_disks = raid_disks; 3332 mddev->delta_disks = 0; 3333 3334 unfreeze_array(conf); 3335 3336 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3337 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3338 md_wakeup_thread(mddev->thread); 3339 3340 mempool_exit(&oldpool); 3341 return 0; 3342 } 3343 3344 static void raid1_quiesce(struct mddev *mddev, int quiesce) 3345 { 3346 struct r1conf *conf = mddev->private; 3347 3348 if (quiesce) 3349 freeze_array(conf, 0); 3350 else 3351 unfreeze_array(conf); 3352 } 3353 3354 static void *raid1_takeover(struct mddev *mddev) 3355 { 3356 /* raid1 can take over: 3357 * raid5 with 2 devices, any layout or chunk size 3358 */ 3359 if (mddev->level == 5 && mddev->raid_disks == 2) { 3360 struct r1conf *conf; 3361 mddev->new_level = 1; 3362 mddev->new_layout = 0; 3363 mddev->new_chunk_sectors = 0; 3364 conf = setup_conf(mddev); 3365 if (!IS_ERR(conf)) { 3366 /* Array must appear to be quiesced */ 3367 conf->array_frozen = 1; 3368 mddev_clear_unsupported_flags(mddev, 3369 UNSUPPORTED_MDDEV_FLAGS); 3370 } 3371 return conf; 3372 } 3373 return ERR_PTR(-EINVAL); 3374 } 3375 3376 static struct md_personality raid1_personality = 3377 { 3378 .name = "raid1", 3379 .level = 1, 3380 .owner = THIS_MODULE, 3381 .make_request = raid1_make_request, 3382 .run = raid1_run, 3383 .free = raid1_free, 3384 .status = raid1_status, 3385 .error_handler = raid1_error, 3386 .hot_add_disk = raid1_add_disk, 3387 .hot_remove_disk= raid1_remove_disk, 3388 .spare_active = raid1_spare_active, 3389 .sync_request = raid1_sync_request, 3390 .resize = raid1_resize, 3391 .size = raid1_size, 3392 .check_reshape = raid1_reshape, 3393 .quiesce = raid1_quiesce, 3394 .takeover = raid1_takeover, 3395 .congested = raid1_congested, 3396 }; 3397 3398 static int __init raid_init(void) 3399 { 3400 return register_md_personality(&raid1_personality); 3401 } 3402 3403 static void raid_exit(void) 3404 { 3405 unregister_md_personality(&raid1_personality); 3406 } 3407 3408 module_init(raid_init); 3409 module_exit(raid_exit); 3410 MODULE_LICENSE("GPL"); 3411 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); 3412 MODULE_ALIAS("md-personality-3"); /* RAID1 */ 3413 MODULE_ALIAS("md-raid1"); 3414 MODULE_ALIAS("md-level-1"); 3415 3416 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 3417