1 /* 2 * raid1.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 5 * 6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 7 * 8 * RAID-1 management functions. 9 * 10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 11 * 12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 14 * 15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 16 * bitmapped intelligence in resync: 17 * 18 * - bitmap marked during normal i/o 19 * - bitmap used to skip nondirty blocks during sync 20 * 21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 22 * - persistent bitmap code 23 * 24 * This program is free software; you can redistribute it and/or modify 25 * it under the terms of the GNU General Public License as published by 26 * the Free Software Foundation; either version 2, or (at your option) 27 * any later version. 28 * 29 * You should have received a copy of the GNU General Public License 30 * (for example /usr/src/linux/COPYING); if not, write to the Free 31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/slab.h> 35 #include <linux/delay.h> 36 #include <linux/blkdev.h> 37 #include <linux/module.h> 38 #include <linux/seq_file.h> 39 #include <linux/ratelimit.h> 40 #include <linux/sched/signal.h> 41 42 #include <trace/events/block.h> 43 44 #include "md.h" 45 #include "raid1.h" 46 #include "bitmap.h" 47 48 #define UNSUPPORTED_MDDEV_FLAGS \ 49 ((1L << MD_HAS_JOURNAL) | \ 50 (1L << MD_JOURNAL_CLEAN)) 51 52 /* 53 * Number of guaranteed r1bios in case of extreme VM load: 54 */ 55 #define NR_RAID1_BIOS 256 56 57 /* when we get a read error on a read-only array, we redirect to another 58 * device without failing the first device, or trying to over-write to 59 * correct the read error. To keep track of bad blocks on a per-bio 60 * level, we store IO_BLOCKED in the appropriate 'bios' pointer 61 */ 62 #define IO_BLOCKED ((struct bio *)1) 63 /* When we successfully write to a known bad-block, we need to remove the 64 * bad-block marking which must be done from process context. So we record 65 * the success by setting devs[n].bio to IO_MADE_GOOD 66 */ 67 #define IO_MADE_GOOD ((struct bio *)2) 68 69 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) 70 71 /* When there are this many requests queue to be written by 72 * the raid1 thread, we become 'congested' to provide back-pressure 73 * for writeback. 74 */ 75 static int max_queued_requests = 1024; 76 77 static void allow_barrier(struct r1conf *conf, sector_t sector_nr); 78 static void lower_barrier(struct r1conf *conf, sector_t sector_nr); 79 80 #define raid1_log(md, fmt, args...) \ 81 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) 82 83 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 84 { 85 struct pool_info *pi = data; 86 int size = offsetof(struct r1bio, bios[pi->raid_disks]); 87 88 /* allocate a r1bio with room for raid_disks entries in the bios array */ 89 return kzalloc(size, gfp_flags); 90 } 91 92 static void r1bio_pool_free(void *r1_bio, void *data) 93 { 94 kfree(r1_bio); 95 } 96 97 #define RESYNC_BLOCK_SIZE (64*1024) 98 #define RESYNC_DEPTH 32 99 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 100 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 101 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) 102 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) 103 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) 104 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 105 106 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 107 { 108 struct pool_info *pi = data; 109 struct r1bio *r1_bio; 110 struct bio *bio; 111 int need_pages; 112 int i, j; 113 114 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 115 if (!r1_bio) 116 return NULL; 117 118 /* 119 * Allocate bios : 1 for reading, n-1 for writing 120 */ 121 for (j = pi->raid_disks ; j-- ; ) { 122 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 123 if (!bio) 124 goto out_free_bio; 125 r1_bio->bios[j] = bio; 126 } 127 /* 128 * Allocate RESYNC_PAGES data pages and attach them to 129 * the first bio. 130 * If this is a user-requested check/repair, allocate 131 * RESYNC_PAGES for each bio. 132 */ 133 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 134 need_pages = pi->raid_disks; 135 else 136 need_pages = 1; 137 for (j = 0; j < need_pages; j++) { 138 bio = r1_bio->bios[j]; 139 bio->bi_vcnt = RESYNC_PAGES; 140 141 if (bio_alloc_pages(bio, gfp_flags)) 142 goto out_free_pages; 143 } 144 /* If not user-requests, copy the page pointers to all bios */ 145 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { 146 for (i=0; i<RESYNC_PAGES ; i++) 147 for (j=1; j<pi->raid_disks; j++) 148 r1_bio->bios[j]->bi_io_vec[i].bv_page = 149 r1_bio->bios[0]->bi_io_vec[i].bv_page; 150 } 151 152 r1_bio->master_bio = NULL; 153 154 return r1_bio; 155 156 out_free_pages: 157 while (--j >= 0) 158 bio_free_pages(r1_bio->bios[j]); 159 160 out_free_bio: 161 while (++j < pi->raid_disks) 162 bio_put(r1_bio->bios[j]); 163 r1bio_pool_free(r1_bio, data); 164 return NULL; 165 } 166 167 static void r1buf_pool_free(void *__r1_bio, void *data) 168 { 169 struct pool_info *pi = data; 170 int i,j; 171 struct r1bio *r1bio = __r1_bio; 172 173 for (i = 0; i < RESYNC_PAGES; i++) 174 for (j = pi->raid_disks; j-- ;) { 175 if (j == 0 || 176 r1bio->bios[j]->bi_io_vec[i].bv_page != 177 r1bio->bios[0]->bi_io_vec[i].bv_page) 178 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); 179 } 180 for (i=0 ; i < pi->raid_disks; i++) 181 bio_put(r1bio->bios[i]); 182 183 r1bio_pool_free(r1bio, data); 184 } 185 186 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) 187 { 188 int i; 189 190 for (i = 0; i < conf->raid_disks * 2; i++) { 191 struct bio **bio = r1_bio->bios + i; 192 if (!BIO_SPECIAL(*bio)) 193 bio_put(*bio); 194 *bio = NULL; 195 } 196 } 197 198 static void free_r1bio(struct r1bio *r1_bio) 199 { 200 struct r1conf *conf = r1_bio->mddev->private; 201 202 put_all_bios(conf, r1_bio); 203 mempool_free(r1_bio, conf->r1bio_pool); 204 } 205 206 static void put_buf(struct r1bio *r1_bio) 207 { 208 struct r1conf *conf = r1_bio->mddev->private; 209 sector_t sect = r1_bio->sector; 210 int i; 211 212 for (i = 0; i < conf->raid_disks * 2; i++) { 213 struct bio *bio = r1_bio->bios[i]; 214 if (bio->bi_end_io) 215 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 216 } 217 218 mempool_free(r1_bio, conf->r1buf_pool); 219 220 lower_barrier(conf, sect); 221 } 222 223 static void reschedule_retry(struct r1bio *r1_bio) 224 { 225 unsigned long flags; 226 struct mddev *mddev = r1_bio->mddev; 227 struct r1conf *conf = mddev->private; 228 int idx; 229 230 idx = sector_to_idx(r1_bio->sector); 231 spin_lock_irqsave(&conf->device_lock, flags); 232 list_add(&r1_bio->retry_list, &conf->retry_list); 233 atomic_inc(&conf->nr_queued[idx]); 234 spin_unlock_irqrestore(&conf->device_lock, flags); 235 236 wake_up(&conf->wait_barrier); 237 md_wakeup_thread(mddev->thread); 238 } 239 240 /* 241 * raid_end_bio_io() is called when we have finished servicing a mirrored 242 * operation and are ready to return a success/failure code to the buffer 243 * cache layer. 244 */ 245 static void call_bio_endio(struct r1bio *r1_bio) 246 { 247 struct bio *bio = r1_bio->master_bio; 248 int done; 249 struct r1conf *conf = r1_bio->mddev->private; 250 sector_t bi_sector = bio->bi_iter.bi_sector; 251 252 if (bio->bi_phys_segments) { 253 unsigned long flags; 254 spin_lock_irqsave(&conf->device_lock, flags); 255 bio->bi_phys_segments--; 256 done = (bio->bi_phys_segments == 0); 257 spin_unlock_irqrestore(&conf->device_lock, flags); 258 /* 259 * make_request() might be waiting for 260 * bi_phys_segments to decrease 261 */ 262 wake_up(&conf->wait_barrier); 263 } else 264 done = 1; 265 266 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 267 bio->bi_error = -EIO; 268 269 if (done) { 270 bio_endio(bio); 271 /* 272 * Wake up any possible resync thread that waits for the device 273 * to go idle. 274 */ 275 allow_barrier(conf, bi_sector); 276 } 277 } 278 279 static void raid_end_bio_io(struct r1bio *r1_bio) 280 { 281 struct bio *bio = r1_bio->master_bio; 282 283 /* if nobody has done the final endio yet, do it now */ 284 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 285 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 286 (bio_data_dir(bio) == WRITE) ? "write" : "read", 287 (unsigned long long) bio->bi_iter.bi_sector, 288 (unsigned long long) bio_end_sector(bio) - 1); 289 290 call_bio_endio(r1_bio); 291 } 292 free_r1bio(r1_bio); 293 } 294 295 /* 296 * Update disk head position estimator based on IRQ completion info. 297 */ 298 static inline void update_head_pos(int disk, struct r1bio *r1_bio) 299 { 300 struct r1conf *conf = r1_bio->mddev->private; 301 302 conf->mirrors[disk].head_position = 303 r1_bio->sector + (r1_bio->sectors); 304 } 305 306 /* 307 * Find the disk number which triggered given bio 308 */ 309 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) 310 { 311 int mirror; 312 struct r1conf *conf = r1_bio->mddev->private; 313 int raid_disks = conf->raid_disks; 314 315 for (mirror = 0; mirror < raid_disks * 2; mirror++) 316 if (r1_bio->bios[mirror] == bio) 317 break; 318 319 BUG_ON(mirror == raid_disks * 2); 320 update_head_pos(mirror, r1_bio); 321 322 return mirror; 323 } 324 325 static void raid1_end_read_request(struct bio *bio) 326 { 327 int uptodate = !bio->bi_error; 328 struct r1bio *r1_bio = bio->bi_private; 329 struct r1conf *conf = r1_bio->mddev->private; 330 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; 331 332 /* 333 * this branch is our 'one mirror IO has finished' event handler: 334 */ 335 update_head_pos(r1_bio->read_disk, r1_bio); 336 337 if (uptodate) 338 set_bit(R1BIO_Uptodate, &r1_bio->state); 339 else if (test_bit(FailFast, &rdev->flags) && 340 test_bit(R1BIO_FailFast, &r1_bio->state)) 341 /* This was a fail-fast read so we definitely 342 * want to retry */ 343 ; 344 else { 345 /* If all other devices have failed, we want to return 346 * the error upwards rather than fail the last device. 347 * Here we redefine "uptodate" to mean "Don't want to retry" 348 */ 349 unsigned long flags; 350 spin_lock_irqsave(&conf->device_lock, flags); 351 if (r1_bio->mddev->degraded == conf->raid_disks || 352 (r1_bio->mddev->degraded == conf->raid_disks-1 && 353 test_bit(In_sync, &rdev->flags))) 354 uptodate = 1; 355 spin_unlock_irqrestore(&conf->device_lock, flags); 356 } 357 358 if (uptodate) { 359 raid_end_bio_io(r1_bio); 360 rdev_dec_pending(rdev, conf->mddev); 361 } else { 362 /* 363 * oops, read error: 364 */ 365 char b[BDEVNAME_SIZE]; 366 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n", 367 mdname(conf->mddev), 368 bdevname(rdev->bdev, b), 369 (unsigned long long)r1_bio->sector); 370 set_bit(R1BIO_ReadError, &r1_bio->state); 371 reschedule_retry(r1_bio); 372 /* don't drop the reference on read_disk yet */ 373 } 374 } 375 376 static void close_write(struct r1bio *r1_bio) 377 { 378 /* it really is the end of this request */ 379 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 380 /* free extra copy of the data pages */ 381 int i = r1_bio->behind_page_count; 382 while (i--) 383 safe_put_page(r1_bio->behind_bvecs[i].bv_page); 384 kfree(r1_bio->behind_bvecs); 385 r1_bio->behind_bvecs = NULL; 386 } 387 /* clear the bitmap if all writes complete successfully */ 388 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 389 r1_bio->sectors, 390 !test_bit(R1BIO_Degraded, &r1_bio->state), 391 test_bit(R1BIO_BehindIO, &r1_bio->state)); 392 md_write_end(r1_bio->mddev); 393 } 394 395 static void r1_bio_write_done(struct r1bio *r1_bio) 396 { 397 if (!atomic_dec_and_test(&r1_bio->remaining)) 398 return; 399 400 if (test_bit(R1BIO_WriteError, &r1_bio->state)) 401 reschedule_retry(r1_bio); 402 else { 403 close_write(r1_bio); 404 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) 405 reschedule_retry(r1_bio); 406 else 407 raid_end_bio_io(r1_bio); 408 } 409 } 410 411 static void raid1_end_write_request(struct bio *bio) 412 { 413 struct r1bio *r1_bio = bio->bi_private; 414 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 415 struct r1conf *conf = r1_bio->mddev->private; 416 struct bio *to_put = NULL; 417 int mirror = find_bio_disk(r1_bio, bio); 418 struct md_rdev *rdev = conf->mirrors[mirror].rdev; 419 bool discard_error; 420 421 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; 422 423 /* 424 * 'one mirror IO has finished' event handler: 425 */ 426 if (bio->bi_error && !discard_error) { 427 set_bit(WriteErrorSeen, &rdev->flags); 428 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 429 set_bit(MD_RECOVERY_NEEDED, & 430 conf->mddev->recovery); 431 432 if (test_bit(FailFast, &rdev->flags) && 433 (bio->bi_opf & MD_FAILFAST) && 434 /* We never try FailFast to WriteMostly devices */ 435 !test_bit(WriteMostly, &rdev->flags)) { 436 md_error(r1_bio->mddev, rdev); 437 if (!test_bit(Faulty, &rdev->flags)) 438 /* This is the only remaining device, 439 * We need to retry the write without 440 * FailFast 441 */ 442 set_bit(R1BIO_WriteError, &r1_bio->state); 443 else { 444 /* Finished with this branch */ 445 r1_bio->bios[mirror] = NULL; 446 to_put = bio; 447 } 448 } else 449 set_bit(R1BIO_WriteError, &r1_bio->state); 450 } else { 451 /* 452 * Set R1BIO_Uptodate in our master bio, so that we 453 * will return a good error code for to the higher 454 * levels even if IO on some other mirrored buffer 455 * fails. 456 * 457 * The 'master' represents the composite IO operation 458 * to user-side. So if something waits for IO, then it 459 * will wait for the 'master' bio. 460 */ 461 sector_t first_bad; 462 int bad_sectors; 463 464 r1_bio->bios[mirror] = NULL; 465 to_put = bio; 466 /* 467 * Do not set R1BIO_Uptodate if the current device is 468 * rebuilding or Faulty. This is because we cannot use 469 * such device for properly reading the data back (we could 470 * potentially use it, if the current write would have felt 471 * before rdev->recovery_offset, but for simplicity we don't 472 * check this here. 473 */ 474 if (test_bit(In_sync, &rdev->flags) && 475 !test_bit(Faulty, &rdev->flags)) 476 set_bit(R1BIO_Uptodate, &r1_bio->state); 477 478 /* Maybe we can clear some bad blocks. */ 479 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 480 &first_bad, &bad_sectors) && !discard_error) { 481 r1_bio->bios[mirror] = IO_MADE_GOOD; 482 set_bit(R1BIO_MadeGood, &r1_bio->state); 483 } 484 } 485 486 if (behind) { 487 if (test_bit(WriteMostly, &rdev->flags)) 488 atomic_dec(&r1_bio->behind_remaining); 489 490 /* 491 * In behind mode, we ACK the master bio once the I/O 492 * has safely reached all non-writemostly 493 * disks. Setting the Returned bit ensures that this 494 * gets done only once -- we don't ever want to return 495 * -EIO here, instead we'll wait 496 */ 497 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 498 test_bit(R1BIO_Uptodate, &r1_bio->state)) { 499 /* Maybe we can return now */ 500 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 501 struct bio *mbio = r1_bio->master_bio; 502 pr_debug("raid1: behind end write sectors" 503 " %llu-%llu\n", 504 (unsigned long long) mbio->bi_iter.bi_sector, 505 (unsigned long long) bio_end_sector(mbio) - 1); 506 call_bio_endio(r1_bio); 507 } 508 } 509 } 510 if (r1_bio->bios[mirror] == NULL) 511 rdev_dec_pending(rdev, conf->mddev); 512 513 /* 514 * Let's see if all mirrored write operations have finished 515 * already. 516 */ 517 r1_bio_write_done(r1_bio); 518 519 if (to_put) 520 bio_put(to_put); 521 } 522 523 static sector_t align_to_barrier_unit_end(sector_t start_sector, 524 sector_t sectors) 525 { 526 sector_t len; 527 528 WARN_ON(sectors == 0); 529 /* 530 * len is the number of sectors from start_sector to end of the 531 * barrier unit which start_sector belongs to. 532 */ 533 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) - 534 start_sector; 535 536 if (len > sectors) 537 len = sectors; 538 539 return len; 540 } 541 542 /* 543 * This routine returns the disk from which the requested read should 544 * be done. There is a per-array 'next expected sequential IO' sector 545 * number - if this matches on the next IO then we use the last disk. 546 * There is also a per-disk 'last know head position' sector that is 547 * maintained from IRQ contexts, both the normal and the resync IO 548 * completion handlers update this position correctly. If there is no 549 * perfect sequential match then we pick the disk whose head is closest. 550 * 551 * If there are 2 mirrors in the same 2 devices, performance degrades 552 * because position is mirror, not device based. 553 * 554 * The rdev for the device selected will have nr_pending incremented. 555 */ 556 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) 557 { 558 const sector_t this_sector = r1_bio->sector; 559 int sectors; 560 int best_good_sectors; 561 int best_disk, best_dist_disk, best_pending_disk; 562 int has_nonrot_disk; 563 int disk; 564 sector_t best_dist; 565 unsigned int min_pending; 566 struct md_rdev *rdev; 567 int choose_first; 568 int choose_next_idle; 569 570 rcu_read_lock(); 571 /* 572 * Check if we can balance. We can balance on the whole 573 * device if no resync is going on, or below the resync window. 574 * We take the first readable disk when above the resync window. 575 */ 576 retry: 577 sectors = r1_bio->sectors; 578 best_disk = -1; 579 best_dist_disk = -1; 580 best_dist = MaxSector; 581 best_pending_disk = -1; 582 min_pending = UINT_MAX; 583 best_good_sectors = 0; 584 has_nonrot_disk = 0; 585 choose_next_idle = 0; 586 clear_bit(R1BIO_FailFast, &r1_bio->state); 587 588 if ((conf->mddev->recovery_cp < this_sector + sectors) || 589 (mddev_is_clustered(conf->mddev) && 590 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, 591 this_sector + sectors))) 592 choose_first = 1; 593 else 594 choose_first = 0; 595 596 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { 597 sector_t dist; 598 sector_t first_bad; 599 int bad_sectors; 600 unsigned int pending; 601 bool nonrot; 602 603 rdev = rcu_dereference(conf->mirrors[disk].rdev); 604 if (r1_bio->bios[disk] == IO_BLOCKED 605 || rdev == NULL 606 || test_bit(Faulty, &rdev->flags)) 607 continue; 608 if (!test_bit(In_sync, &rdev->flags) && 609 rdev->recovery_offset < this_sector + sectors) 610 continue; 611 if (test_bit(WriteMostly, &rdev->flags)) { 612 /* Don't balance among write-mostly, just 613 * use the first as a last resort */ 614 if (best_dist_disk < 0) { 615 if (is_badblock(rdev, this_sector, sectors, 616 &first_bad, &bad_sectors)) { 617 if (first_bad <= this_sector) 618 /* Cannot use this */ 619 continue; 620 best_good_sectors = first_bad - this_sector; 621 } else 622 best_good_sectors = sectors; 623 best_dist_disk = disk; 624 best_pending_disk = disk; 625 } 626 continue; 627 } 628 /* This is a reasonable device to use. It might 629 * even be best. 630 */ 631 if (is_badblock(rdev, this_sector, sectors, 632 &first_bad, &bad_sectors)) { 633 if (best_dist < MaxSector) 634 /* already have a better device */ 635 continue; 636 if (first_bad <= this_sector) { 637 /* cannot read here. If this is the 'primary' 638 * device, then we must not read beyond 639 * bad_sectors from another device.. 640 */ 641 bad_sectors -= (this_sector - first_bad); 642 if (choose_first && sectors > bad_sectors) 643 sectors = bad_sectors; 644 if (best_good_sectors > sectors) 645 best_good_sectors = sectors; 646 647 } else { 648 sector_t good_sectors = first_bad - this_sector; 649 if (good_sectors > best_good_sectors) { 650 best_good_sectors = good_sectors; 651 best_disk = disk; 652 } 653 if (choose_first) 654 break; 655 } 656 continue; 657 } else 658 best_good_sectors = sectors; 659 660 if (best_disk >= 0) 661 /* At least two disks to choose from so failfast is OK */ 662 set_bit(R1BIO_FailFast, &r1_bio->state); 663 664 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); 665 has_nonrot_disk |= nonrot; 666 pending = atomic_read(&rdev->nr_pending); 667 dist = abs(this_sector - conf->mirrors[disk].head_position); 668 if (choose_first) { 669 best_disk = disk; 670 break; 671 } 672 /* Don't change to another disk for sequential reads */ 673 if (conf->mirrors[disk].next_seq_sect == this_sector 674 || dist == 0) { 675 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; 676 struct raid1_info *mirror = &conf->mirrors[disk]; 677 678 best_disk = disk; 679 /* 680 * If buffered sequential IO size exceeds optimal 681 * iosize, check if there is idle disk. If yes, choose 682 * the idle disk. read_balance could already choose an 683 * idle disk before noticing it's a sequential IO in 684 * this disk. This doesn't matter because this disk 685 * will idle, next time it will be utilized after the 686 * first disk has IO size exceeds optimal iosize. In 687 * this way, iosize of the first disk will be optimal 688 * iosize at least. iosize of the second disk might be 689 * small, but not a big deal since when the second disk 690 * starts IO, the first disk is likely still busy. 691 */ 692 if (nonrot && opt_iosize > 0 && 693 mirror->seq_start != MaxSector && 694 mirror->next_seq_sect > opt_iosize && 695 mirror->next_seq_sect - opt_iosize >= 696 mirror->seq_start) { 697 choose_next_idle = 1; 698 continue; 699 } 700 break; 701 } 702 703 if (choose_next_idle) 704 continue; 705 706 if (min_pending > pending) { 707 min_pending = pending; 708 best_pending_disk = disk; 709 } 710 711 if (dist < best_dist) { 712 best_dist = dist; 713 best_dist_disk = disk; 714 } 715 } 716 717 /* 718 * If all disks are rotational, choose the closest disk. If any disk is 719 * non-rotational, choose the disk with less pending request even the 720 * disk is rotational, which might/might not be optimal for raids with 721 * mixed ratation/non-rotational disks depending on workload. 722 */ 723 if (best_disk == -1) { 724 if (has_nonrot_disk || min_pending == 0) 725 best_disk = best_pending_disk; 726 else 727 best_disk = best_dist_disk; 728 } 729 730 if (best_disk >= 0) { 731 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); 732 if (!rdev) 733 goto retry; 734 atomic_inc(&rdev->nr_pending); 735 sectors = best_good_sectors; 736 737 if (conf->mirrors[best_disk].next_seq_sect != this_sector) 738 conf->mirrors[best_disk].seq_start = this_sector; 739 740 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; 741 } 742 rcu_read_unlock(); 743 *max_sectors = sectors; 744 745 return best_disk; 746 } 747 748 static int raid1_congested(struct mddev *mddev, int bits) 749 { 750 struct r1conf *conf = mddev->private; 751 int i, ret = 0; 752 753 if ((bits & (1 << WB_async_congested)) && 754 conf->pending_count >= max_queued_requests) 755 return 1; 756 757 rcu_read_lock(); 758 for (i = 0; i < conf->raid_disks * 2; i++) { 759 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 760 if (rdev && !test_bit(Faulty, &rdev->flags)) { 761 struct request_queue *q = bdev_get_queue(rdev->bdev); 762 763 BUG_ON(!q); 764 765 /* Note the '|| 1' - when read_balance prefers 766 * non-congested targets, it can be removed 767 */ 768 if ((bits & (1 << WB_async_congested)) || 1) 769 ret |= bdi_congested(q->backing_dev_info, bits); 770 else 771 ret &= bdi_congested(q->backing_dev_info, bits); 772 } 773 } 774 rcu_read_unlock(); 775 return ret; 776 } 777 778 static void flush_pending_writes(struct r1conf *conf) 779 { 780 /* Any writes that have been queued but are awaiting 781 * bitmap updates get flushed here. 782 */ 783 spin_lock_irq(&conf->device_lock); 784 785 if (conf->pending_bio_list.head) { 786 struct bio *bio; 787 bio = bio_list_get(&conf->pending_bio_list); 788 conf->pending_count = 0; 789 spin_unlock_irq(&conf->device_lock); 790 /* flush any pending bitmap writes to 791 * disk before proceeding w/ I/O */ 792 bitmap_unplug(conf->mddev->bitmap); 793 wake_up(&conf->wait_barrier); 794 795 while (bio) { /* submit pending writes */ 796 struct bio *next = bio->bi_next; 797 struct md_rdev *rdev = (void*)bio->bi_bdev; 798 bio->bi_next = NULL; 799 bio->bi_bdev = rdev->bdev; 800 if (test_bit(Faulty, &rdev->flags)) { 801 bio->bi_error = -EIO; 802 bio_endio(bio); 803 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 804 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 805 /* Just ignore it */ 806 bio_endio(bio); 807 else 808 generic_make_request(bio); 809 bio = next; 810 } 811 } else 812 spin_unlock_irq(&conf->device_lock); 813 } 814 815 /* Barriers.... 816 * Sometimes we need to suspend IO while we do something else, 817 * either some resync/recovery, or reconfigure the array. 818 * To do this we raise a 'barrier'. 819 * The 'barrier' is a counter that can be raised multiple times 820 * to count how many activities are happening which preclude 821 * normal IO. 822 * We can only raise the barrier if there is no pending IO. 823 * i.e. if nr_pending == 0. 824 * We choose only to raise the barrier if no-one is waiting for the 825 * barrier to go down. This means that as soon as an IO request 826 * is ready, no other operations which require a barrier will start 827 * until the IO request has had a chance. 828 * 829 * So: regular IO calls 'wait_barrier'. When that returns there 830 * is no backgroup IO happening, It must arrange to call 831 * allow_barrier when it has finished its IO. 832 * backgroup IO calls must call raise_barrier. Once that returns 833 * there is no normal IO happeing. It must arrange to call 834 * lower_barrier when the particular background IO completes. 835 */ 836 static void raise_barrier(struct r1conf *conf, sector_t sector_nr) 837 { 838 int idx = sector_to_idx(sector_nr); 839 840 spin_lock_irq(&conf->resync_lock); 841 842 /* Wait until no block IO is waiting */ 843 wait_event_lock_irq(conf->wait_barrier, 844 !atomic_read(&conf->nr_waiting[idx]), 845 conf->resync_lock); 846 847 /* block any new IO from starting */ 848 atomic_inc(&conf->barrier[idx]); 849 /* 850 * In raise_barrier() we firstly increase conf->barrier[idx] then 851 * check conf->nr_pending[idx]. In _wait_barrier() we firstly 852 * increase conf->nr_pending[idx] then check conf->barrier[idx]. 853 * A memory barrier here to make sure conf->nr_pending[idx] won't 854 * be fetched before conf->barrier[idx] is increased. Otherwise 855 * there will be a race between raise_barrier() and _wait_barrier(). 856 */ 857 smp_mb__after_atomic(); 858 859 /* For these conditions we must wait: 860 * A: while the array is in frozen state 861 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O 862 * existing in corresponding I/O barrier bucket. 863 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches 864 * max resync count which allowed on current I/O barrier bucket. 865 */ 866 wait_event_lock_irq(conf->wait_barrier, 867 !conf->array_frozen && 868 !atomic_read(&conf->nr_pending[idx]) && 869 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, 870 conf->resync_lock); 871 872 atomic_inc(&conf->nr_pending[idx]); 873 spin_unlock_irq(&conf->resync_lock); 874 } 875 876 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) 877 { 878 int idx = sector_to_idx(sector_nr); 879 880 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); 881 882 atomic_dec(&conf->barrier[idx]); 883 atomic_dec(&conf->nr_pending[idx]); 884 wake_up(&conf->wait_barrier); 885 } 886 887 static void _wait_barrier(struct r1conf *conf, int idx) 888 { 889 /* 890 * We need to increase conf->nr_pending[idx] very early here, 891 * then raise_barrier() can be blocked when it waits for 892 * conf->nr_pending[idx] to be 0. Then we can avoid holding 893 * conf->resync_lock when there is no barrier raised in same 894 * barrier unit bucket. Also if the array is frozen, I/O 895 * should be blocked until array is unfrozen. 896 */ 897 atomic_inc(&conf->nr_pending[idx]); 898 /* 899 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then 900 * check conf->barrier[idx]. In raise_barrier() we firstly increase 901 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory 902 * barrier is necessary here to make sure conf->barrier[idx] won't be 903 * fetched before conf->nr_pending[idx] is increased. Otherwise there 904 * will be a race between _wait_barrier() and raise_barrier(). 905 */ 906 smp_mb__after_atomic(); 907 908 /* 909 * Don't worry about checking two atomic_t variables at same time 910 * here. If during we check conf->barrier[idx], the array is 911 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is 912 * 0, it is safe to return and make the I/O continue. Because the 913 * array is frozen, all I/O returned here will eventually complete 914 * or be queued, no race will happen. See code comment in 915 * frozen_array(). 916 */ 917 if (!READ_ONCE(conf->array_frozen) && 918 !atomic_read(&conf->barrier[idx])) 919 return; 920 921 /* 922 * After holding conf->resync_lock, conf->nr_pending[idx] 923 * should be decreased before waiting for barrier to drop. 924 * Otherwise, we may encounter a race condition because 925 * raise_barrer() might be waiting for conf->nr_pending[idx] 926 * to be 0 at same time. 927 */ 928 spin_lock_irq(&conf->resync_lock); 929 atomic_inc(&conf->nr_waiting[idx]); 930 atomic_dec(&conf->nr_pending[idx]); 931 /* 932 * In case freeze_array() is waiting for 933 * get_unqueued_pending() == extra 934 */ 935 wake_up(&conf->wait_barrier); 936 /* Wait for the barrier in same barrier unit bucket to drop. */ 937 wait_event_lock_irq(conf->wait_barrier, 938 !conf->array_frozen && 939 !atomic_read(&conf->barrier[idx]), 940 conf->resync_lock); 941 atomic_inc(&conf->nr_pending[idx]); 942 atomic_dec(&conf->nr_waiting[idx]); 943 spin_unlock_irq(&conf->resync_lock); 944 } 945 946 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) 947 { 948 int idx = sector_to_idx(sector_nr); 949 950 /* 951 * Very similar to _wait_barrier(). The difference is, for read 952 * I/O we don't need wait for sync I/O, but if the whole array 953 * is frozen, the read I/O still has to wait until the array is 954 * unfrozen. Since there is no ordering requirement with 955 * conf->barrier[idx] here, memory barrier is unnecessary as well. 956 */ 957 atomic_inc(&conf->nr_pending[idx]); 958 959 if (!READ_ONCE(conf->array_frozen)) 960 return; 961 962 spin_lock_irq(&conf->resync_lock); 963 atomic_inc(&conf->nr_waiting[idx]); 964 atomic_dec(&conf->nr_pending[idx]); 965 /* 966 * In case freeze_array() is waiting for 967 * get_unqueued_pending() == extra 968 */ 969 wake_up(&conf->wait_barrier); 970 /* Wait for array to be unfrozen */ 971 wait_event_lock_irq(conf->wait_barrier, 972 !conf->array_frozen, 973 conf->resync_lock); 974 atomic_inc(&conf->nr_pending[idx]); 975 atomic_dec(&conf->nr_waiting[idx]); 976 spin_unlock_irq(&conf->resync_lock); 977 } 978 979 static void wait_barrier(struct r1conf *conf, sector_t sector_nr) 980 { 981 int idx = sector_to_idx(sector_nr); 982 983 _wait_barrier(conf, idx); 984 } 985 986 static void wait_all_barriers(struct r1conf *conf) 987 { 988 int idx; 989 990 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 991 _wait_barrier(conf, idx); 992 } 993 994 static void _allow_barrier(struct r1conf *conf, int idx) 995 { 996 atomic_dec(&conf->nr_pending[idx]); 997 wake_up(&conf->wait_barrier); 998 } 999 1000 static void allow_barrier(struct r1conf *conf, sector_t sector_nr) 1001 { 1002 int idx = sector_to_idx(sector_nr); 1003 1004 _allow_barrier(conf, idx); 1005 } 1006 1007 static void allow_all_barriers(struct r1conf *conf) 1008 { 1009 int idx; 1010 1011 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1012 _allow_barrier(conf, idx); 1013 } 1014 1015 /* conf->resync_lock should be held */ 1016 static int get_unqueued_pending(struct r1conf *conf) 1017 { 1018 int idx, ret; 1019 1020 for (ret = 0, idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1021 ret += atomic_read(&conf->nr_pending[idx]) - 1022 atomic_read(&conf->nr_queued[idx]); 1023 1024 return ret; 1025 } 1026 1027 static void freeze_array(struct r1conf *conf, int extra) 1028 { 1029 /* Stop sync I/O and normal I/O and wait for everything to 1030 * go quite. 1031 * This is called in two situations: 1032 * 1) management command handlers (reshape, remove disk, quiesce). 1033 * 2) one normal I/O request failed. 1034 1035 * After array_frozen is set to 1, new sync IO will be blocked at 1036 * raise_barrier(), and new normal I/O will blocked at _wait_barrier() 1037 * or wait_read_barrier(). The flying I/Os will either complete or be 1038 * queued. When everything goes quite, there are only queued I/Os left. 1039 1040 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the 1041 * barrier bucket index which this I/O request hits. When all sync and 1042 * normal I/O are queued, sum of all conf->nr_pending[] will match sum 1043 * of all conf->nr_queued[]. But normal I/O failure is an exception, 1044 * in handle_read_error(), we may call freeze_array() before trying to 1045 * fix the read error. In this case, the error read I/O is not queued, 1046 * so get_unqueued_pending() == 1. 1047 * 1048 * Therefore before this function returns, we need to wait until 1049 * get_unqueued_pendings(conf) gets equal to extra. For 1050 * normal I/O context, extra is 1, in rested situations extra is 0. 1051 */ 1052 spin_lock_irq(&conf->resync_lock); 1053 conf->array_frozen = 1; 1054 raid1_log(conf->mddev, "wait freeze"); 1055 wait_event_lock_irq_cmd( 1056 conf->wait_barrier, 1057 get_unqueued_pending(conf) == extra, 1058 conf->resync_lock, 1059 flush_pending_writes(conf)); 1060 spin_unlock_irq(&conf->resync_lock); 1061 } 1062 static void unfreeze_array(struct r1conf *conf) 1063 { 1064 /* reverse the effect of the freeze */ 1065 spin_lock_irq(&conf->resync_lock); 1066 conf->array_frozen = 0; 1067 spin_unlock_irq(&conf->resync_lock); 1068 wake_up(&conf->wait_barrier); 1069 } 1070 1071 /* duplicate the data pages for behind I/O 1072 */ 1073 static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) 1074 { 1075 int i; 1076 struct bio_vec *bvec; 1077 struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec), 1078 GFP_NOIO); 1079 if (unlikely(!bvecs)) 1080 return; 1081 1082 bio_for_each_segment_all(bvec, bio, i) { 1083 bvecs[i] = *bvec; 1084 bvecs[i].bv_page = alloc_page(GFP_NOIO); 1085 if (unlikely(!bvecs[i].bv_page)) 1086 goto do_sync_io; 1087 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset, 1088 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); 1089 kunmap(bvecs[i].bv_page); 1090 kunmap(bvec->bv_page); 1091 } 1092 r1_bio->behind_bvecs = bvecs; 1093 r1_bio->behind_page_count = bio->bi_vcnt; 1094 set_bit(R1BIO_BehindIO, &r1_bio->state); 1095 return; 1096 1097 do_sync_io: 1098 for (i = 0; i < bio->bi_vcnt; i++) 1099 if (bvecs[i].bv_page) 1100 put_page(bvecs[i].bv_page); 1101 kfree(bvecs); 1102 pr_debug("%dB behind alloc failed, doing sync I/O\n", 1103 bio->bi_iter.bi_size); 1104 } 1105 1106 struct raid1_plug_cb { 1107 struct blk_plug_cb cb; 1108 struct bio_list pending; 1109 int pending_cnt; 1110 }; 1111 1112 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) 1113 { 1114 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, 1115 cb); 1116 struct mddev *mddev = plug->cb.data; 1117 struct r1conf *conf = mddev->private; 1118 struct bio *bio; 1119 1120 if (from_schedule || current->bio_list) { 1121 spin_lock_irq(&conf->device_lock); 1122 bio_list_merge(&conf->pending_bio_list, &plug->pending); 1123 conf->pending_count += plug->pending_cnt; 1124 spin_unlock_irq(&conf->device_lock); 1125 wake_up(&conf->wait_barrier); 1126 md_wakeup_thread(mddev->thread); 1127 kfree(plug); 1128 return; 1129 } 1130 1131 /* we aren't scheduling, so we can do the write-out directly. */ 1132 bio = bio_list_get(&plug->pending); 1133 bitmap_unplug(mddev->bitmap); 1134 wake_up(&conf->wait_barrier); 1135 1136 while (bio) { /* submit pending writes */ 1137 struct bio *next = bio->bi_next; 1138 struct md_rdev *rdev = (void*)bio->bi_bdev; 1139 bio->bi_next = NULL; 1140 bio->bi_bdev = rdev->bdev; 1141 if (test_bit(Faulty, &rdev->flags)) { 1142 bio->bi_error = -EIO; 1143 bio_endio(bio); 1144 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1145 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1146 /* Just ignore it */ 1147 bio_endio(bio); 1148 else 1149 generic_make_request(bio); 1150 bio = next; 1151 } 1152 kfree(plug); 1153 } 1154 1155 static inline struct r1bio * 1156 alloc_r1bio(struct mddev *mddev, struct bio *bio, sector_t sectors_handled) 1157 { 1158 struct r1conf *conf = mddev->private; 1159 struct r1bio *r1_bio; 1160 1161 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1162 1163 r1_bio->master_bio = bio; 1164 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1165 r1_bio->state = 0; 1166 r1_bio->mddev = mddev; 1167 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; 1168 1169 return r1_bio; 1170 } 1171 1172 static void raid1_read_request(struct mddev *mddev, struct bio *bio) 1173 { 1174 struct r1conf *conf = mddev->private; 1175 struct raid1_info *mirror; 1176 struct r1bio *r1_bio; 1177 struct bio *read_bio; 1178 struct bitmap *bitmap = mddev->bitmap; 1179 const int op = bio_op(bio); 1180 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1181 int sectors_handled; 1182 int max_sectors; 1183 int rdisk; 1184 1185 /* 1186 * Still need barrier for READ in case that whole 1187 * array is frozen. 1188 */ 1189 wait_read_barrier(conf, bio->bi_iter.bi_sector); 1190 1191 r1_bio = alloc_r1bio(mddev, bio, 0); 1192 1193 /* 1194 * We might need to issue multiple reads to different 1195 * devices if there are bad blocks around, so we keep 1196 * track of the number of reads in bio->bi_phys_segments. 1197 * If this is 0, there is only one r1_bio and no locking 1198 * will be needed when requests complete. If it is 1199 * non-zero, then it is the number of not-completed requests. 1200 */ 1201 bio->bi_phys_segments = 0; 1202 bio_clear_flag(bio, BIO_SEG_VALID); 1203 1204 /* 1205 * make_request() can abort the operation when read-ahead is being 1206 * used and no empty request is available. 1207 */ 1208 read_again: 1209 rdisk = read_balance(conf, r1_bio, &max_sectors); 1210 1211 if (rdisk < 0) { 1212 /* couldn't find anywhere to read from */ 1213 raid_end_bio_io(r1_bio); 1214 return; 1215 } 1216 mirror = conf->mirrors + rdisk; 1217 1218 if (test_bit(WriteMostly, &mirror->rdev->flags) && 1219 bitmap) { 1220 /* 1221 * Reading from a write-mostly device must take care not to 1222 * over-take any writes that are 'behind' 1223 */ 1224 raid1_log(mddev, "wait behind writes"); 1225 wait_event(bitmap->behind_wait, 1226 atomic_read(&bitmap->behind_writes) == 0); 1227 } 1228 r1_bio->read_disk = rdisk; 1229 1230 read_bio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 1231 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, 1232 max_sectors); 1233 1234 r1_bio->bios[rdisk] = read_bio; 1235 1236 read_bio->bi_iter.bi_sector = r1_bio->sector + 1237 mirror->rdev->data_offset; 1238 read_bio->bi_bdev = mirror->rdev->bdev; 1239 read_bio->bi_end_io = raid1_end_read_request; 1240 bio_set_op_attrs(read_bio, op, do_sync); 1241 if (test_bit(FailFast, &mirror->rdev->flags) && 1242 test_bit(R1BIO_FailFast, &r1_bio->state)) 1243 read_bio->bi_opf |= MD_FAILFAST; 1244 read_bio->bi_private = r1_bio; 1245 1246 if (mddev->gendisk) 1247 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), 1248 read_bio, disk_devt(mddev->gendisk), 1249 r1_bio->sector); 1250 1251 if (max_sectors < r1_bio->sectors) { 1252 /* 1253 * could not read all from this device, so we will need another 1254 * r1_bio. 1255 */ 1256 sectors_handled = (r1_bio->sector + max_sectors 1257 - bio->bi_iter.bi_sector); 1258 r1_bio->sectors = max_sectors; 1259 spin_lock_irq(&conf->device_lock); 1260 if (bio->bi_phys_segments == 0) 1261 bio->bi_phys_segments = 2; 1262 else 1263 bio->bi_phys_segments++; 1264 spin_unlock_irq(&conf->device_lock); 1265 1266 /* 1267 * Cannot call generic_make_request directly as that will be 1268 * queued in __make_request and subsequent mempool_alloc might 1269 * block waiting for it. So hand bio over to raid1d. 1270 */ 1271 reschedule_retry(r1_bio); 1272 1273 r1_bio = alloc_r1bio(mddev, bio, sectors_handled); 1274 goto read_again; 1275 } else 1276 generic_make_request(read_bio); 1277 } 1278 1279 static void raid1_write_request(struct mddev *mddev, struct bio *bio) 1280 { 1281 struct r1conf *conf = mddev->private; 1282 struct r1bio *r1_bio; 1283 int i, disks; 1284 struct bitmap *bitmap = mddev->bitmap; 1285 unsigned long flags; 1286 struct md_rdev *blocked_rdev; 1287 struct blk_plug_cb *cb; 1288 struct raid1_plug_cb *plug = NULL; 1289 int first_clone; 1290 int sectors_handled; 1291 int max_sectors; 1292 1293 /* 1294 * Register the new request and wait if the reconstruction 1295 * thread has put up a bar for new requests. 1296 * Continue immediately if no resync is active currently. 1297 */ 1298 1299 md_write_start(mddev, bio); /* wait on superblock update early */ 1300 1301 if ((bio_end_sector(bio) > mddev->suspend_lo && 1302 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1303 (mddev_is_clustered(mddev) && 1304 md_cluster_ops->area_resyncing(mddev, WRITE, 1305 bio->bi_iter.bi_sector, bio_end_sector(bio)))) { 1306 1307 /* 1308 * As the suspend_* range is controlled by userspace, we want 1309 * an interruptible wait. 1310 */ 1311 DEFINE_WAIT(w); 1312 for (;;) { 1313 flush_signals(current); 1314 prepare_to_wait(&conf->wait_barrier, 1315 &w, TASK_INTERRUPTIBLE); 1316 if (bio_end_sector(bio) <= mddev->suspend_lo || 1317 bio->bi_iter.bi_sector >= mddev->suspend_hi || 1318 (mddev_is_clustered(mddev) && 1319 !md_cluster_ops->area_resyncing(mddev, WRITE, 1320 bio->bi_iter.bi_sector, 1321 bio_end_sector(bio)))) 1322 break; 1323 schedule(); 1324 } 1325 finish_wait(&conf->wait_barrier, &w); 1326 } 1327 wait_barrier(conf, bio->bi_iter.bi_sector); 1328 1329 r1_bio = alloc_r1bio(mddev, bio, 0); 1330 1331 /* We might need to issue multiple writes to different 1332 * devices if there are bad blocks around, so we keep 1333 * track of the number of writes in bio->bi_phys_segments. 1334 * If this is 0, there is only one r1_bio and no locking 1335 * will be needed when requests complete. If it is 1336 * non-zero, then it is the number of not-completed requests. 1337 */ 1338 bio->bi_phys_segments = 0; 1339 bio_clear_flag(bio, BIO_SEG_VALID); 1340 1341 if (conf->pending_count >= max_queued_requests) { 1342 md_wakeup_thread(mddev->thread); 1343 raid1_log(mddev, "wait queued"); 1344 wait_event(conf->wait_barrier, 1345 conf->pending_count < max_queued_requests); 1346 } 1347 /* first select target devices under rcu_lock and 1348 * inc refcount on their rdev. Record them by setting 1349 * bios[x] to bio 1350 * If there are known/acknowledged bad blocks on any device on 1351 * which we have seen a write error, we want to avoid writing those 1352 * blocks. 1353 * This potentially requires several writes to write around 1354 * the bad blocks. Each set of writes gets it's own r1bio 1355 * with a set of bios attached. 1356 */ 1357 1358 disks = conf->raid_disks * 2; 1359 retry_write: 1360 blocked_rdev = NULL; 1361 rcu_read_lock(); 1362 max_sectors = r1_bio->sectors; 1363 for (i = 0; i < disks; i++) { 1364 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1365 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1366 atomic_inc(&rdev->nr_pending); 1367 blocked_rdev = rdev; 1368 break; 1369 } 1370 r1_bio->bios[i] = NULL; 1371 if (!rdev || test_bit(Faulty, &rdev->flags)) { 1372 if (i < conf->raid_disks) 1373 set_bit(R1BIO_Degraded, &r1_bio->state); 1374 continue; 1375 } 1376 1377 atomic_inc(&rdev->nr_pending); 1378 if (test_bit(WriteErrorSeen, &rdev->flags)) { 1379 sector_t first_bad; 1380 int bad_sectors; 1381 int is_bad; 1382 1383 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, 1384 &first_bad, &bad_sectors); 1385 if (is_bad < 0) { 1386 /* mustn't write here until the bad block is 1387 * acknowledged*/ 1388 set_bit(BlockedBadBlocks, &rdev->flags); 1389 blocked_rdev = rdev; 1390 break; 1391 } 1392 if (is_bad && first_bad <= r1_bio->sector) { 1393 /* Cannot write here at all */ 1394 bad_sectors -= (r1_bio->sector - first_bad); 1395 if (bad_sectors < max_sectors) 1396 /* mustn't write more than bad_sectors 1397 * to other devices yet 1398 */ 1399 max_sectors = bad_sectors; 1400 rdev_dec_pending(rdev, mddev); 1401 /* We don't set R1BIO_Degraded as that 1402 * only applies if the disk is 1403 * missing, so it might be re-added, 1404 * and we want to know to recover this 1405 * chunk. 1406 * In this case the device is here, 1407 * and the fact that this chunk is not 1408 * in-sync is recorded in the bad 1409 * block log 1410 */ 1411 continue; 1412 } 1413 if (is_bad) { 1414 int good_sectors = first_bad - r1_bio->sector; 1415 if (good_sectors < max_sectors) 1416 max_sectors = good_sectors; 1417 } 1418 } 1419 r1_bio->bios[i] = bio; 1420 } 1421 rcu_read_unlock(); 1422 1423 if (unlikely(blocked_rdev)) { 1424 /* Wait for this device to become unblocked */ 1425 int j; 1426 1427 for (j = 0; j < i; j++) 1428 if (r1_bio->bios[j]) 1429 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1430 r1_bio->state = 0; 1431 allow_barrier(conf, bio->bi_iter.bi_sector); 1432 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 1433 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1434 wait_barrier(conf, bio->bi_iter.bi_sector); 1435 goto retry_write; 1436 } 1437 1438 if (max_sectors < r1_bio->sectors) { 1439 /* We are splitting this write into multiple parts, so 1440 * we need to prepare for allocating another r1_bio. 1441 */ 1442 r1_bio->sectors = max_sectors; 1443 spin_lock_irq(&conf->device_lock); 1444 if (bio->bi_phys_segments == 0) 1445 bio->bi_phys_segments = 2; 1446 else 1447 bio->bi_phys_segments++; 1448 spin_unlock_irq(&conf->device_lock); 1449 } 1450 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; 1451 1452 atomic_set(&r1_bio->remaining, 1); 1453 atomic_set(&r1_bio->behind_remaining, 0); 1454 1455 first_clone = 1; 1456 for (i = 0; i < disks; i++) { 1457 struct bio *mbio = NULL; 1458 sector_t offset; 1459 if (!r1_bio->bios[i]) 1460 continue; 1461 1462 offset = r1_bio->sector - bio->bi_iter.bi_sector; 1463 1464 if (first_clone) { 1465 /* do behind I/O ? 1466 * Not if there are too many, or cannot 1467 * allocate memory, or a reader on WriteMostly 1468 * is waiting for behind writes to flush */ 1469 if (bitmap && 1470 (atomic_read(&bitmap->behind_writes) 1471 < mddev->bitmap_info.max_write_behind) && 1472 !waitqueue_active(&bitmap->behind_wait)) { 1473 mbio = bio_clone_bioset_partial(bio, GFP_NOIO, 1474 mddev->bio_set, 1475 offset << 9, 1476 max_sectors << 9); 1477 alloc_behind_pages(mbio, r1_bio); 1478 } 1479 1480 bitmap_startwrite(bitmap, r1_bio->sector, 1481 r1_bio->sectors, 1482 test_bit(R1BIO_BehindIO, 1483 &r1_bio->state)); 1484 first_clone = 0; 1485 } 1486 1487 if (!mbio) { 1488 if (r1_bio->behind_bvecs) 1489 mbio = bio_clone_bioset_partial(bio, GFP_NOIO, 1490 mddev->bio_set, 1491 offset << 9, 1492 max_sectors << 9); 1493 else { 1494 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 1495 bio_trim(mbio, offset, max_sectors); 1496 } 1497 } 1498 1499 if (r1_bio->behind_bvecs) { 1500 struct bio_vec *bvec; 1501 int j; 1502 1503 /* 1504 * We trimmed the bio, so _all is legit 1505 */ 1506 bio_for_each_segment_all(bvec, mbio, j) 1507 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; 1508 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 1509 atomic_inc(&r1_bio->behind_remaining); 1510 } 1511 1512 r1_bio->bios[i] = mbio; 1513 1514 mbio->bi_iter.bi_sector = (r1_bio->sector + 1515 conf->mirrors[i].rdev->data_offset); 1516 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1517 mbio->bi_end_io = raid1_end_write_request; 1518 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1519 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1520 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1521 conf->raid_disks - mddev->degraded > 1) 1522 mbio->bi_opf |= MD_FAILFAST; 1523 mbio->bi_private = r1_bio; 1524 1525 atomic_inc(&r1_bio->remaining); 1526 1527 if (mddev->gendisk) 1528 trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1529 mbio, disk_devt(mddev->gendisk), 1530 r1_bio->sector); 1531 /* flush_pending_writes() needs access to the rdev so...*/ 1532 mbio->bi_bdev = (void*)conf->mirrors[i].rdev; 1533 1534 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1535 if (cb) 1536 plug = container_of(cb, struct raid1_plug_cb, cb); 1537 else 1538 plug = NULL; 1539 spin_lock_irqsave(&conf->device_lock, flags); 1540 if (plug) { 1541 bio_list_add(&plug->pending, mbio); 1542 plug->pending_cnt++; 1543 } else { 1544 bio_list_add(&conf->pending_bio_list, mbio); 1545 conf->pending_count++; 1546 } 1547 spin_unlock_irqrestore(&conf->device_lock, flags); 1548 if (!plug) 1549 md_wakeup_thread(mddev->thread); 1550 } 1551 /* Mustn't call r1_bio_write_done before this next test, 1552 * as it could result in the bio being freed. 1553 */ 1554 if (sectors_handled < bio_sectors(bio)) { 1555 r1_bio_write_done(r1_bio); 1556 /* We need another r1_bio. It has already been counted 1557 * in bio->bi_phys_segments 1558 */ 1559 r1_bio = alloc_r1bio(mddev, bio, sectors_handled); 1560 goto retry_write; 1561 } 1562 1563 r1_bio_write_done(r1_bio); 1564 1565 /* In case raid1d snuck in to freeze_array */ 1566 wake_up(&conf->wait_barrier); 1567 } 1568 1569 static void raid1_make_request(struct mddev *mddev, struct bio *bio) 1570 { 1571 struct bio *split; 1572 sector_t sectors; 1573 1574 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1575 md_flush_request(mddev, bio); 1576 return; 1577 } 1578 1579 /* if bio exceeds barrier unit boundary, split it */ 1580 do { 1581 sectors = align_to_barrier_unit_end( 1582 bio->bi_iter.bi_sector, bio_sectors(bio)); 1583 if (sectors < bio_sectors(bio)) { 1584 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); 1585 bio_chain(split, bio); 1586 } else { 1587 split = bio; 1588 } 1589 1590 if (bio_data_dir(split) == READ) 1591 raid1_read_request(mddev, split); 1592 else 1593 raid1_write_request(mddev, split); 1594 } while (split != bio); 1595 } 1596 1597 static void raid1_status(struct seq_file *seq, struct mddev *mddev) 1598 { 1599 struct r1conf *conf = mddev->private; 1600 int i; 1601 1602 seq_printf(seq, " [%d/%d] [", conf->raid_disks, 1603 conf->raid_disks - mddev->degraded); 1604 rcu_read_lock(); 1605 for (i = 0; i < conf->raid_disks; i++) { 1606 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1607 seq_printf(seq, "%s", 1608 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1609 } 1610 rcu_read_unlock(); 1611 seq_printf(seq, "]"); 1612 } 1613 1614 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) 1615 { 1616 char b[BDEVNAME_SIZE]; 1617 struct r1conf *conf = mddev->private; 1618 unsigned long flags; 1619 1620 /* 1621 * If it is not operational, then we have already marked it as dead 1622 * else if it is the last working disks, ignore the error, let the 1623 * next level up know. 1624 * else mark the drive as failed 1625 */ 1626 spin_lock_irqsave(&conf->device_lock, flags); 1627 if (test_bit(In_sync, &rdev->flags) 1628 && (conf->raid_disks - mddev->degraded) == 1) { 1629 /* 1630 * Don't fail the drive, act as though we were just a 1631 * normal single drive. 1632 * However don't try a recovery from this drive as 1633 * it is very likely to fail. 1634 */ 1635 conf->recovery_disabled = mddev->recovery_disabled; 1636 spin_unlock_irqrestore(&conf->device_lock, flags); 1637 return; 1638 } 1639 set_bit(Blocked, &rdev->flags); 1640 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1641 mddev->degraded++; 1642 set_bit(Faulty, &rdev->flags); 1643 } else 1644 set_bit(Faulty, &rdev->flags); 1645 spin_unlock_irqrestore(&conf->device_lock, flags); 1646 /* 1647 * if recovery is running, make sure it aborts. 1648 */ 1649 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1650 set_mask_bits(&mddev->sb_flags, 0, 1651 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1652 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" 1653 "md/raid1:%s: Operation continuing on %d devices.\n", 1654 mdname(mddev), bdevname(rdev->bdev, b), 1655 mdname(mddev), conf->raid_disks - mddev->degraded); 1656 } 1657 1658 static void print_conf(struct r1conf *conf) 1659 { 1660 int i; 1661 1662 pr_debug("RAID1 conf printout:\n"); 1663 if (!conf) { 1664 pr_debug("(!conf)\n"); 1665 return; 1666 } 1667 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 1668 conf->raid_disks); 1669 1670 rcu_read_lock(); 1671 for (i = 0; i < conf->raid_disks; i++) { 1672 char b[BDEVNAME_SIZE]; 1673 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1674 if (rdev) 1675 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", 1676 i, !test_bit(In_sync, &rdev->flags), 1677 !test_bit(Faulty, &rdev->flags), 1678 bdevname(rdev->bdev,b)); 1679 } 1680 rcu_read_unlock(); 1681 } 1682 1683 static void close_sync(struct r1conf *conf) 1684 { 1685 wait_all_barriers(conf); 1686 allow_all_barriers(conf); 1687 1688 mempool_destroy(conf->r1buf_pool); 1689 conf->r1buf_pool = NULL; 1690 } 1691 1692 static int raid1_spare_active(struct mddev *mddev) 1693 { 1694 int i; 1695 struct r1conf *conf = mddev->private; 1696 int count = 0; 1697 unsigned long flags; 1698 1699 /* 1700 * Find all failed disks within the RAID1 configuration 1701 * and mark them readable. 1702 * Called under mddev lock, so rcu protection not needed. 1703 * device_lock used to avoid races with raid1_end_read_request 1704 * which expects 'In_sync' flags and ->degraded to be consistent. 1705 */ 1706 spin_lock_irqsave(&conf->device_lock, flags); 1707 for (i = 0; i < conf->raid_disks; i++) { 1708 struct md_rdev *rdev = conf->mirrors[i].rdev; 1709 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 1710 if (repl 1711 && !test_bit(Candidate, &repl->flags) 1712 && repl->recovery_offset == MaxSector 1713 && !test_bit(Faulty, &repl->flags) 1714 && !test_and_set_bit(In_sync, &repl->flags)) { 1715 /* replacement has just become active */ 1716 if (!rdev || 1717 !test_and_clear_bit(In_sync, &rdev->flags)) 1718 count++; 1719 if (rdev) { 1720 /* Replaced device not technically 1721 * faulty, but we need to be sure 1722 * it gets removed and never re-added 1723 */ 1724 set_bit(Faulty, &rdev->flags); 1725 sysfs_notify_dirent_safe( 1726 rdev->sysfs_state); 1727 } 1728 } 1729 if (rdev 1730 && rdev->recovery_offset == MaxSector 1731 && !test_bit(Faulty, &rdev->flags) 1732 && !test_and_set_bit(In_sync, &rdev->flags)) { 1733 count++; 1734 sysfs_notify_dirent_safe(rdev->sysfs_state); 1735 } 1736 } 1737 mddev->degraded -= count; 1738 spin_unlock_irqrestore(&conf->device_lock, flags); 1739 1740 print_conf(conf); 1741 return count; 1742 } 1743 1744 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1745 { 1746 struct r1conf *conf = mddev->private; 1747 int err = -EEXIST; 1748 int mirror = 0; 1749 struct raid1_info *p; 1750 int first = 0; 1751 int last = conf->raid_disks - 1; 1752 1753 if (mddev->recovery_disabled == conf->recovery_disabled) 1754 return -EBUSY; 1755 1756 if (md_integrity_add_rdev(rdev, mddev)) 1757 return -ENXIO; 1758 1759 if (rdev->raid_disk >= 0) 1760 first = last = rdev->raid_disk; 1761 1762 /* 1763 * find the disk ... but prefer rdev->saved_raid_disk 1764 * if possible. 1765 */ 1766 if (rdev->saved_raid_disk >= 0 && 1767 rdev->saved_raid_disk >= first && 1768 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1769 first = last = rdev->saved_raid_disk; 1770 1771 for (mirror = first; mirror <= last; mirror++) { 1772 p = conf->mirrors+mirror; 1773 if (!p->rdev) { 1774 1775 if (mddev->gendisk) 1776 disk_stack_limits(mddev->gendisk, rdev->bdev, 1777 rdev->data_offset << 9); 1778 1779 p->head_position = 0; 1780 rdev->raid_disk = mirror; 1781 err = 0; 1782 /* As all devices are equivalent, we don't need a full recovery 1783 * if this was recently any drive of the array 1784 */ 1785 if (rdev->saved_raid_disk < 0) 1786 conf->fullsync = 1; 1787 rcu_assign_pointer(p->rdev, rdev); 1788 break; 1789 } 1790 if (test_bit(WantReplacement, &p->rdev->flags) && 1791 p[conf->raid_disks].rdev == NULL) { 1792 /* Add this device as a replacement */ 1793 clear_bit(In_sync, &rdev->flags); 1794 set_bit(Replacement, &rdev->flags); 1795 rdev->raid_disk = mirror; 1796 err = 0; 1797 conf->fullsync = 1; 1798 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); 1799 break; 1800 } 1801 } 1802 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1803 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1804 print_conf(conf); 1805 return err; 1806 } 1807 1808 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 1809 { 1810 struct r1conf *conf = mddev->private; 1811 int err = 0; 1812 int number = rdev->raid_disk; 1813 struct raid1_info *p = conf->mirrors + number; 1814 1815 if (rdev != p->rdev) 1816 p = conf->mirrors + conf->raid_disks + number; 1817 1818 print_conf(conf); 1819 if (rdev == p->rdev) { 1820 if (test_bit(In_sync, &rdev->flags) || 1821 atomic_read(&rdev->nr_pending)) { 1822 err = -EBUSY; 1823 goto abort; 1824 } 1825 /* Only remove non-faulty devices if recovery 1826 * is not possible. 1827 */ 1828 if (!test_bit(Faulty, &rdev->flags) && 1829 mddev->recovery_disabled != conf->recovery_disabled && 1830 mddev->degraded < conf->raid_disks) { 1831 err = -EBUSY; 1832 goto abort; 1833 } 1834 p->rdev = NULL; 1835 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 1836 synchronize_rcu(); 1837 if (atomic_read(&rdev->nr_pending)) { 1838 /* lost the race, try later */ 1839 err = -EBUSY; 1840 p->rdev = rdev; 1841 goto abort; 1842 } 1843 } 1844 if (conf->mirrors[conf->raid_disks + number].rdev) { 1845 /* We just removed a device that is being replaced. 1846 * Move down the replacement. We drain all IO before 1847 * doing this to avoid confusion. 1848 */ 1849 struct md_rdev *repl = 1850 conf->mirrors[conf->raid_disks + number].rdev; 1851 freeze_array(conf, 0); 1852 clear_bit(Replacement, &repl->flags); 1853 p->rdev = repl; 1854 conf->mirrors[conf->raid_disks + number].rdev = NULL; 1855 unfreeze_array(conf); 1856 clear_bit(WantReplacement, &rdev->flags); 1857 } else 1858 clear_bit(WantReplacement, &rdev->flags); 1859 err = md_integrity_register(mddev); 1860 } 1861 abort: 1862 1863 print_conf(conf); 1864 return err; 1865 } 1866 1867 static void end_sync_read(struct bio *bio) 1868 { 1869 struct r1bio *r1_bio = bio->bi_private; 1870 1871 update_head_pos(r1_bio->read_disk, r1_bio); 1872 1873 /* 1874 * we have read a block, now it needs to be re-written, 1875 * or re-read if the read failed. 1876 * We don't do much here, just schedule handling by raid1d 1877 */ 1878 if (!bio->bi_error) 1879 set_bit(R1BIO_Uptodate, &r1_bio->state); 1880 1881 if (atomic_dec_and_test(&r1_bio->remaining)) 1882 reschedule_retry(r1_bio); 1883 } 1884 1885 static void end_sync_write(struct bio *bio) 1886 { 1887 int uptodate = !bio->bi_error; 1888 struct r1bio *r1_bio = bio->bi_private; 1889 struct mddev *mddev = r1_bio->mddev; 1890 struct r1conf *conf = mddev->private; 1891 sector_t first_bad; 1892 int bad_sectors; 1893 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1894 1895 if (!uptodate) { 1896 sector_t sync_blocks = 0; 1897 sector_t s = r1_bio->sector; 1898 long sectors_to_go = r1_bio->sectors; 1899 /* make sure these bits doesn't get cleared. */ 1900 do { 1901 bitmap_end_sync(mddev->bitmap, s, 1902 &sync_blocks, 1); 1903 s += sync_blocks; 1904 sectors_to_go -= sync_blocks; 1905 } while (sectors_to_go > 0); 1906 set_bit(WriteErrorSeen, &rdev->flags); 1907 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1908 set_bit(MD_RECOVERY_NEEDED, & 1909 mddev->recovery); 1910 set_bit(R1BIO_WriteError, &r1_bio->state); 1911 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 1912 &first_bad, &bad_sectors) && 1913 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, 1914 r1_bio->sector, 1915 r1_bio->sectors, 1916 &first_bad, &bad_sectors) 1917 ) 1918 set_bit(R1BIO_MadeGood, &r1_bio->state); 1919 1920 if (atomic_dec_and_test(&r1_bio->remaining)) { 1921 int s = r1_bio->sectors; 1922 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 1923 test_bit(R1BIO_WriteError, &r1_bio->state)) 1924 reschedule_retry(r1_bio); 1925 else { 1926 put_buf(r1_bio); 1927 md_done_sync(mddev, s, uptodate); 1928 } 1929 } 1930 } 1931 1932 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, 1933 int sectors, struct page *page, int rw) 1934 { 1935 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 1936 /* success */ 1937 return 1; 1938 if (rw == WRITE) { 1939 set_bit(WriteErrorSeen, &rdev->flags); 1940 if (!test_and_set_bit(WantReplacement, 1941 &rdev->flags)) 1942 set_bit(MD_RECOVERY_NEEDED, & 1943 rdev->mddev->recovery); 1944 } 1945 /* need to record an error - either for the block or the device */ 1946 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 1947 md_error(rdev->mddev, rdev); 1948 return 0; 1949 } 1950 1951 static int fix_sync_read_error(struct r1bio *r1_bio) 1952 { 1953 /* Try some synchronous reads of other devices to get 1954 * good data, much like with normal read errors. Only 1955 * read into the pages we already have so we don't 1956 * need to re-issue the read request. 1957 * We don't need to freeze the array, because being in an 1958 * active sync request, there is no normal IO, and 1959 * no overlapping syncs. 1960 * We don't need to check is_badblock() again as we 1961 * made sure that anything with a bad block in range 1962 * will have bi_end_io clear. 1963 */ 1964 struct mddev *mddev = r1_bio->mddev; 1965 struct r1conf *conf = mddev->private; 1966 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; 1967 sector_t sect = r1_bio->sector; 1968 int sectors = r1_bio->sectors; 1969 int idx = 0; 1970 struct md_rdev *rdev; 1971 1972 rdev = conf->mirrors[r1_bio->read_disk].rdev; 1973 if (test_bit(FailFast, &rdev->flags)) { 1974 /* Don't try recovering from here - just fail it 1975 * ... unless it is the last working device of course */ 1976 md_error(mddev, rdev); 1977 if (test_bit(Faulty, &rdev->flags)) 1978 /* Don't try to read from here, but make sure 1979 * put_buf does it's thing 1980 */ 1981 bio->bi_end_io = end_sync_write; 1982 } 1983 1984 while(sectors) { 1985 int s = sectors; 1986 int d = r1_bio->read_disk; 1987 int success = 0; 1988 int start; 1989 1990 if (s > (PAGE_SIZE>>9)) 1991 s = PAGE_SIZE >> 9; 1992 do { 1993 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 1994 /* No rcu protection needed here devices 1995 * can only be removed when no resync is 1996 * active, and resync is currently active 1997 */ 1998 rdev = conf->mirrors[d].rdev; 1999 if (sync_page_io(rdev, sect, s<<9, 2000 bio->bi_io_vec[idx].bv_page, 2001 REQ_OP_READ, 0, false)) { 2002 success = 1; 2003 break; 2004 } 2005 } 2006 d++; 2007 if (d == conf->raid_disks * 2) 2008 d = 0; 2009 } while (!success && d != r1_bio->read_disk); 2010 2011 if (!success) { 2012 char b[BDEVNAME_SIZE]; 2013 int abort = 0; 2014 /* Cannot read from anywhere, this block is lost. 2015 * Record a bad block on each device. If that doesn't 2016 * work just disable and interrupt the recovery. 2017 * Don't fail devices as that won't really help. 2018 */ 2019 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 2020 mdname(mddev), 2021 bdevname(bio->bi_bdev, b), 2022 (unsigned long long)r1_bio->sector); 2023 for (d = 0; d < conf->raid_disks * 2; d++) { 2024 rdev = conf->mirrors[d].rdev; 2025 if (!rdev || test_bit(Faulty, &rdev->flags)) 2026 continue; 2027 if (!rdev_set_badblocks(rdev, sect, s, 0)) 2028 abort = 1; 2029 } 2030 if (abort) { 2031 conf->recovery_disabled = 2032 mddev->recovery_disabled; 2033 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2034 md_done_sync(mddev, r1_bio->sectors, 0); 2035 put_buf(r1_bio); 2036 return 0; 2037 } 2038 /* Try next page */ 2039 sectors -= s; 2040 sect += s; 2041 idx++; 2042 continue; 2043 } 2044 2045 start = d; 2046 /* write it back and re-read */ 2047 while (d != r1_bio->read_disk) { 2048 if (d == 0) 2049 d = conf->raid_disks * 2; 2050 d--; 2051 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 2052 continue; 2053 rdev = conf->mirrors[d].rdev; 2054 if (r1_sync_page_io(rdev, sect, s, 2055 bio->bi_io_vec[idx].bv_page, 2056 WRITE) == 0) { 2057 r1_bio->bios[d]->bi_end_io = NULL; 2058 rdev_dec_pending(rdev, mddev); 2059 } 2060 } 2061 d = start; 2062 while (d != r1_bio->read_disk) { 2063 if (d == 0) 2064 d = conf->raid_disks * 2; 2065 d--; 2066 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 2067 continue; 2068 rdev = conf->mirrors[d].rdev; 2069 if (r1_sync_page_io(rdev, sect, s, 2070 bio->bi_io_vec[idx].bv_page, 2071 READ) != 0) 2072 atomic_add(s, &rdev->corrected_errors); 2073 } 2074 sectors -= s; 2075 sect += s; 2076 idx ++; 2077 } 2078 set_bit(R1BIO_Uptodate, &r1_bio->state); 2079 bio->bi_error = 0; 2080 return 1; 2081 } 2082 2083 static void process_checks(struct r1bio *r1_bio) 2084 { 2085 /* We have read all readable devices. If we haven't 2086 * got the block, then there is no hope left. 2087 * If we have, then we want to do a comparison 2088 * and skip the write if everything is the same. 2089 * If any blocks failed to read, then we need to 2090 * attempt an over-write 2091 */ 2092 struct mddev *mddev = r1_bio->mddev; 2093 struct r1conf *conf = mddev->private; 2094 int primary; 2095 int i; 2096 int vcnt; 2097 2098 /* Fix variable parts of all bios */ 2099 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); 2100 for (i = 0; i < conf->raid_disks * 2; i++) { 2101 int j; 2102 int size; 2103 int error; 2104 struct bio *b = r1_bio->bios[i]; 2105 if (b->bi_end_io != end_sync_read) 2106 continue; 2107 /* fixup the bio for reuse, but preserve errno */ 2108 error = b->bi_error; 2109 bio_reset(b); 2110 b->bi_error = error; 2111 b->bi_vcnt = vcnt; 2112 b->bi_iter.bi_size = r1_bio->sectors << 9; 2113 b->bi_iter.bi_sector = r1_bio->sector + 2114 conf->mirrors[i].rdev->data_offset; 2115 b->bi_bdev = conf->mirrors[i].rdev->bdev; 2116 b->bi_end_io = end_sync_read; 2117 b->bi_private = r1_bio; 2118 2119 size = b->bi_iter.bi_size; 2120 for (j = 0; j < vcnt ; j++) { 2121 struct bio_vec *bi; 2122 bi = &b->bi_io_vec[j]; 2123 bi->bv_offset = 0; 2124 if (size > PAGE_SIZE) 2125 bi->bv_len = PAGE_SIZE; 2126 else 2127 bi->bv_len = size; 2128 size -= PAGE_SIZE; 2129 } 2130 } 2131 for (primary = 0; primary < conf->raid_disks * 2; primary++) 2132 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 2133 !r1_bio->bios[primary]->bi_error) { 2134 r1_bio->bios[primary]->bi_end_io = NULL; 2135 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 2136 break; 2137 } 2138 r1_bio->read_disk = primary; 2139 for (i = 0; i < conf->raid_disks * 2; i++) { 2140 int j; 2141 struct bio *pbio = r1_bio->bios[primary]; 2142 struct bio *sbio = r1_bio->bios[i]; 2143 int error = sbio->bi_error; 2144 2145 if (sbio->bi_end_io != end_sync_read) 2146 continue; 2147 /* Now we can 'fixup' the error value */ 2148 sbio->bi_error = 0; 2149 2150 if (!error) { 2151 for (j = vcnt; j-- ; ) { 2152 struct page *p, *s; 2153 p = pbio->bi_io_vec[j].bv_page; 2154 s = sbio->bi_io_vec[j].bv_page; 2155 if (memcmp(page_address(p), 2156 page_address(s), 2157 sbio->bi_io_vec[j].bv_len)) 2158 break; 2159 } 2160 } else 2161 j = 0; 2162 if (j >= 0) 2163 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2164 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 2165 && !error)) { 2166 /* No need to write to this device. */ 2167 sbio->bi_end_io = NULL; 2168 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 2169 continue; 2170 } 2171 2172 bio_copy_data(sbio, pbio); 2173 } 2174 } 2175 2176 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) 2177 { 2178 struct r1conf *conf = mddev->private; 2179 int i; 2180 int disks = conf->raid_disks * 2; 2181 struct bio *bio, *wbio; 2182 2183 bio = r1_bio->bios[r1_bio->read_disk]; 2184 2185 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 2186 /* ouch - failed to read all of that. */ 2187 if (!fix_sync_read_error(r1_bio)) 2188 return; 2189 2190 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2191 process_checks(r1_bio); 2192 2193 /* 2194 * schedule writes 2195 */ 2196 atomic_set(&r1_bio->remaining, 1); 2197 for (i = 0; i < disks ; i++) { 2198 wbio = r1_bio->bios[i]; 2199 if (wbio->bi_end_io == NULL || 2200 (wbio->bi_end_io == end_sync_read && 2201 (i == r1_bio->read_disk || 2202 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 2203 continue; 2204 2205 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2206 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) 2207 wbio->bi_opf |= MD_FAILFAST; 2208 2209 wbio->bi_end_io = end_sync_write; 2210 atomic_inc(&r1_bio->remaining); 2211 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); 2212 2213 generic_make_request(wbio); 2214 } 2215 2216 if (atomic_dec_and_test(&r1_bio->remaining)) { 2217 /* if we're here, all write(s) have completed, so clean up */ 2218 int s = r1_bio->sectors; 2219 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 2220 test_bit(R1BIO_WriteError, &r1_bio->state)) 2221 reschedule_retry(r1_bio); 2222 else { 2223 put_buf(r1_bio); 2224 md_done_sync(mddev, s, 1); 2225 } 2226 } 2227 } 2228 2229 /* 2230 * This is a kernel thread which: 2231 * 2232 * 1. Retries failed read operations on working mirrors. 2233 * 2. Updates the raid superblock when problems encounter. 2234 * 3. Performs writes following reads for array synchronising. 2235 */ 2236 2237 static void fix_read_error(struct r1conf *conf, int read_disk, 2238 sector_t sect, int sectors) 2239 { 2240 struct mddev *mddev = conf->mddev; 2241 while(sectors) { 2242 int s = sectors; 2243 int d = read_disk; 2244 int success = 0; 2245 int start; 2246 struct md_rdev *rdev; 2247 2248 if (s > (PAGE_SIZE>>9)) 2249 s = PAGE_SIZE >> 9; 2250 2251 do { 2252 sector_t first_bad; 2253 int bad_sectors; 2254 2255 rcu_read_lock(); 2256 rdev = rcu_dereference(conf->mirrors[d].rdev); 2257 if (rdev && 2258 (test_bit(In_sync, &rdev->flags) || 2259 (!test_bit(Faulty, &rdev->flags) && 2260 rdev->recovery_offset >= sect + s)) && 2261 is_badblock(rdev, sect, s, 2262 &first_bad, &bad_sectors) == 0) { 2263 atomic_inc(&rdev->nr_pending); 2264 rcu_read_unlock(); 2265 if (sync_page_io(rdev, sect, s<<9, 2266 conf->tmppage, REQ_OP_READ, 0, false)) 2267 success = 1; 2268 rdev_dec_pending(rdev, mddev); 2269 if (success) 2270 break; 2271 } else 2272 rcu_read_unlock(); 2273 d++; 2274 if (d == conf->raid_disks * 2) 2275 d = 0; 2276 } while (!success && d != read_disk); 2277 2278 if (!success) { 2279 /* Cannot read from anywhere - mark it bad */ 2280 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; 2281 if (!rdev_set_badblocks(rdev, sect, s, 0)) 2282 md_error(mddev, rdev); 2283 break; 2284 } 2285 /* write it back and re-read */ 2286 start = d; 2287 while (d != read_disk) { 2288 if (d==0) 2289 d = conf->raid_disks * 2; 2290 d--; 2291 rcu_read_lock(); 2292 rdev = rcu_dereference(conf->mirrors[d].rdev); 2293 if (rdev && 2294 !test_bit(Faulty, &rdev->flags)) { 2295 atomic_inc(&rdev->nr_pending); 2296 rcu_read_unlock(); 2297 r1_sync_page_io(rdev, sect, s, 2298 conf->tmppage, WRITE); 2299 rdev_dec_pending(rdev, mddev); 2300 } else 2301 rcu_read_unlock(); 2302 } 2303 d = start; 2304 while (d != read_disk) { 2305 char b[BDEVNAME_SIZE]; 2306 if (d==0) 2307 d = conf->raid_disks * 2; 2308 d--; 2309 rcu_read_lock(); 2310 rdev = rcu_dereference(conf->mirrors[d].rdev); 2311 if (rdev && 2312 !test_bit(Faulty, &rdev->flags)) { 2313 atomic_inc(&rdev->nr_pending); 2314 rcu_read_unlock(); 2315 if (r1_sync_page_io(rdev, sect, s, 2316 conf->tmppage, READ)) { 2317 atomic_add(s, &rdev->corrected_errors); 2318 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n", 2319 mdname(mddev), s, 2320 (unsigned long long)(sect + 2321 rdev->data_offset), 2322 bdevname(rdev->bdev, b)); 2323 } 2324 rdev_dec_pending(rdev, mddev); 2325 } else 2326 rcu_read_unlock(); 2327 } 2328 sectors -= s; 2329 sect += s; 2330 } 2331 } 2332 2333 static int narrow_write_error(struct r1bio *r1_bio, int i) 2334 { 2335 struct mddev *mddev = r1_bio->mddev; 2336 struct r1conf *conf = mddev->private; 2337 struct md_rdev *rdev = conf->mirrors[i].rdev; 2338 2339 /* bio has the data to be written to device 'i' where 2340 * we just recently had a write error. 2341 * We repeatedly clone the bio and trim down to one block, 2342 * then try the write. Where the write fails we record 2343 * a bad block. 2344 * It is conceivable that the bio doesn't exactly align with 2345 * blocks. We must handle this somehow. 2346 * 2347 * We currently own a reference on the rdev. 2348 */ 2349 2350 int block_sectors; 2351 sector_t sector; 2352 int sectors; 2353 int sect_to_write = r1_bio->sectors; 2354 int ok = 1; 2355 2356 if (rdev->badblocks.shift < 0) 2357 return 0; 2358 2359 block_sectors = roundup(1 << rdev->badblocks.shift, 2360 bdev_logical_block_size(rdev->bdev) >> 9); 2361 sector = r1_bio->sector; 2362 sectors = ((sector + block_sectors) 2363 & ~(sector_t)(block_sectors - 1)) 2364 - sector; 2365 2366 while (sect_to_write) { 2367 struct bio *wbio; 2368 if (sectors > sect_to_write) 2369 sectors = sect_to_write; 2370 /* Write at 'sector' for 'sectors'*/ 2371 2372 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 2373 unsigned vcnt = r1_bio->behind_page_count; 2374 struct bio_vec *vec = r1_bio->behind_bvecs; 2375 2376 while (!vec->bv_page) { 2377 vec++; 2378 vcnt--; 2379 } 2380 2381 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev); 2382 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec)); 2383 2384 wbio->bi_vcnt = vcnt; 2385 } else { 2386 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2387 mddev->bio_set); 2388 } 2389 2390 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2391 wbio->bi_iter.bi_sector = r1_bio->sector; 2392 wbio->bi_iter.bi_size = r1_bio->sectors << 9; 2393 2394 bio_trim(wbio, sector - r1_bio->sector, sectors); 2395 wbio->bi_iter.bi_sector += rdev->data_offset; 2396 wbio->bi_bdev = rdev->bdev; 2397 2398 if (submit_bio_wait(wbio) < 0) 2399 /* failure! */ 2400 ok = rdev_set_badblocks(rdev, sector, 2401 sectors, 0) 2402 && ok; 2403 2404 bio_put(wbio); 2405 sect_to_write -= sectors; 2406 sector += sectors; 2407 sectors = block_sectors; 2408 } 2409 return ok; 2410 } 2411 2412 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 2413 { 2414 int m; 2415 int s = r1_bio->sectors; 2416 for (m = 0; m < conf->raid_disks * 2 ; m++) { 2417 struct md_rdev *rdev = conf->mirrors[m].rdev; 2418 struct bio *bio = r1_bio->bios[m]; 2419 if (bio->bi_end_io == NULL) 2420 continue; 2421 if (!bio->bi_error && 2422 test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2423 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 2424 } 2425 if (bio->bi_error && 2426 test_bit(R1BIO_WriteError, &r1_bio->state)) { 2427 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 2428 md_error(conf->mddev, rdev); 2429 } 2430 } 2431 put_buf(r1_bio); 2432 md_done_sync(conf->mddev, s, 1); 2433 } 2434 2435 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 2436 { 2437 int m, idx; 2438 bool fail = false; 2439 2440 for (m = 0; m < conf->raid_disks * 2 ; m++) 2441 if (r1_bio->bios[m] == IO_MADE_GOOD) { 2442 struct md_rdev *rdev = conf->mirrors[m].rdev; 2443 rdev_clear_badblocks(rdev, 2444 r1_bio->sector, 2445 r1_bio->sectors, 0); 2446 rdev_dec_pending(rdev, conf->mddev); 2447 } else if (r1_bio->bios[m] != NULL) { 2448 /* This drive got a write error. We need to 2449 * narrow down and record precise write 2450 * errors. 2451 */ 2452 fail = true; 2453 if (!narrow_write_error(r1_bio, m)) { 2454 md_error(conf->mddev, 2455 conf->mirrors[m].rdev); 2456 /* an I/O failed, we can't clear the bitmap */ 2457 set_bit(R1BIO_Degraded, &r1_bio->state); 2458 } 2459 rdev_dec_pending(conf->mirrors[m].rdev, 2460 conf->mddev); 2461 } 2462 if (fail) { 2463 spin_lock_irq(&conf->device_lock); 2464 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); 2465 idx = sector_to_idx(r1_bio->sector); 2466 atomic_inc(&conf->nr_queued[idx]); 2467 spin_unlock_irq(&conf->device_lock); 2468 /* 2469 * In case freeze_array() is waiting for condition 2470 * get_unqueued_pending() == extra to be true. 2471 */ 2472 wake_up(&conf->wait_barrier); 2473 md_wakeup_thread(conf->mddev->thread); 2474 } else { 2475 if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2476 close_write(r1_bio); 2477 raid_end_bio_io(r1_bio); 2478 } 2479 } 2480 2481 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) 2482 { 2483 int disk; 2484 int max_sectors; 2485 struct mddev *mddev = conf->mddev; 2486 struct bio *bio; 2487 char b[BDEVNAME_SIZE]; 2488 struct md_rdev *rdev; 2489 dev_t bio_dev; 2490 sector_t bio_sector; 2491 2492 clear_bit(R1BIO_ReadError, &r1_bio->state); 2493 /* we got a read error. Maybe the drive is bad. Maybe just 2494 * the block and we can fix it. 2495 * We freeze all other IO, and try reading the block from 2496 * other devices. When we find one, we re-write 2497 * and check it that fixes the read error. 2498 * This is all done synchronously while the array is 2499 * frozen 2500 */ 2501 2502 bio = r1_bio->bios[r1_bio->read_disk]; 2503 bdevname(bio->bi_bdev, b); 2504 bio_dev = bio->bi_bdev->bd_dev; 2505 bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; 2506 bio_put(bio); 2507 r1_bio->bios[r1_bio->read_disk] = NULL; 2508 2509 rdev = conf->mirrors[r1_bio->read_disk].rdev; 2510 if (mddev->ro == 0 2511 && !test_bit(FailFast, &rdev->flags)) { 2512 freeze_array(conf, 1); 2513 fix_read_error(conf, r1_bio->read_disk, 2514 r1_bio->sector, r1_bio->sectors); 2515 unfreeze_array(conf); 2516 } else { 2517 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; 2518 } 2519 2520 rdev_dec_pending(rdev, conf->mddev); 2521 2522 read_more: 2523 disk = read_balance(conf, r1_bio, &max_sectors); 2524 if (disk == -1) { 2525 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 2526 mdname(mddev), b, (unsigned long long)r1_bio->sector); 2527 raid_end_bio_io(r1_bio); 2528 } else { 2529 const unsigned long do_sync 2530 = r1_bio->master_bio->bi_opf & REQ_SYNC; 2531 r1_bio->read_disk = disk; 2532 bio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2533 mddev->bio_set); 2534 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, 2535 max_sectors); 2536 r1_bio->bios[r1_bio->read_disk] = bio; 2537 rdev = conf->mirrors[disk].rdev; 2538 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", 2539 mdname(mddev), 2540 (unsigned long long)r1_bio->sector, 2541 bdevname(rdev->bdev, b)); 2542 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; 2543 bio->bi_bdev = rdev->bdev; 2544 bio->bi_end_io = raid1_end_read_request; 2545 bio_set_op_attrs(bio, REQ_OP_READ, do_sync); 2546 if (test_bit(FailFast, &rdev->flags) && 2547 test_bit(R1BIO_FailFast, &r1_bio->state)) 2548 bio->bi_opf |= MD_FAILFAST; 2549 bio->bi_private = r1_bio; 2550 if (max_sectors < r1_bio->sectors) { 2551 /* Drat - have to split this up more */ 2552 struct bio *mbio = r1_bio->master_bio; 2553 int sectors_handled = (r1_bio->sector + max_sectors 2554 - mbio->bi_iter.bi_sector); 2555 r1_bio->sectors = max_sectors; 2556 spin_lock_irq(&conf->device_lock); 2557 if (mbio->bi_phys_segments == 0) 2558 mbio->bi_phys_segments = 2; 2559 else 2560 mbio->bi_phys_segments++; 2561 spin_unlock_irq(&conf->device_lock); 2562 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 2563 bio, bio_dev, bio_sector); 2564 generic_make_request(bio); 2565 bio = NULL; 2566 2567 r1_bio = alloc_r1bio(mddev, mbio, sectors_handled); 2568 set_bit(R1BIO_ReadError, &r1_bio->state); 2569 2570 goto read_more; 2571 } else { 2572 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 2573 bio, bio_dev, bio_sector); 2574 generic_make_request(bio); 2575 } 2576 } 2577 } 2578 2579 static void raid1d(struct md_thread *thread) 2580 { 2581 struct mddev *mddev = thread->mddev; 2582 struct r1bio *r1_bio; 2583 unsigned long flags; 2584 struct r1conf *conf = mddev->private; 2585 struct list_head *head = &conf->retry_list; 2586 struct blk_plug plug; 2587 int idx; 2588 2589 md_check_recovery(mddev); 2590 2591 if (!list_empty_careful(&conf->bio_end_io_list) && 2592 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2593 LIST_HEAD(tmp); 2594 spin_lock_irqsave(&conf->device_lock, flags); 2595 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 2596 list_splice_init(&conf->bio_end_io_list, &tmp); 2597 spin_unlock_irqrestore(&conf->device_lock, flags); 2598 while (!list_empty(&tmp)) { 2599 r1_bio = list_first_entry(&tmp, struct r1bio, 2600 retry_list); 2601 list_del(&r1_bio->retry_list); 2602 idx = sector_to_idx(r1_bio->sector); 2603 atomic_dec(&conf->nr_queued[idx]); 2604 if (mddev->degraded) 2605 set_bit(R1BIO_Degraded, &r1_bio->state); 2606 if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2607 close_write(r1_bio); 2608 raid_end_bio_io(r1_bio); 2609 } 2610 } 2611 2612 blk_start_plug(&plug); 2613 for (;;) { 2614 2615 flush_pending_writes(conf); 2616 2617 spin_lock_irqsave(&conf->device_lock, flags); 2618 if (list_empty(head)) { 2619 spin_unlock_irqrestore(&conf->device_lock, flags); 2620 break; 2621 } 2622 r1_bio = list_entry(head->prev, struct r1bio, retry_list); 2623 list_del(head->prev); 2624 idx = sector_to_idx(r1_bio->sector); 2625 atomic_dec(&conf->nr_queued[idx]); 2626 spin_unlock_irqrestore(&conf->device_lock, flags); 2627 2628 mddev = r1_bio->mddev; 2629 conf = mddev->private; 2630 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 2631 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 2632 test_bit(R1BIO_WriteError, &r1_bio->state)) 2633 handle_sync_write_finished(conf, r1_bio); 2634 else 2635 sync_request_write(mddev, r1_bio); 2636 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 2637 test_bit(R1BIO_WriteError, &r1_bio->state)) 2638 handle_write_finished(conf, r1_bio); 2639 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) 2640 handle_read_error(conf, r1_bio); 2641 else 2642 /* just a partial read to be scheduled from separate 2643 * context 2644 */ 2645 generic_make_request(r1_bio->bios[r1_bio->read_disk]); 2646 2647 cond_resched(); 2648 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) 2649 md_check_recovery(mddev); 2650 } 2651 blk_finish_plug(&plug); 2652 } 2653 2654 static int init_resync(struct r1conf *conf) 2655 { 2656 int buffs; 2657 2658 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2659 BUG_ON(conf->r1buf_pool); 2660 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, 2661 conf->poolinfo); 2662 if (!conf->r1buf_pool) 2663 return -ENOMEM; 2664 return 0; 2665 } 2666 2667 /* 2668 * perform a "sync" on one "block" 2669 * 2670 * We need to make sure that no normal I/O request - particularly write 2671 * requests - conflict with active sync requests. 2672 * 2673 * This is achieved by tracking pending requests and a 'barrier' concept 2674 * that can be installed to exclude normal IO requests. 2675 */ 2676 2677 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, 2678 int *skipped) 2679 { 2680 struct r1conf *conf = mddev->private; 2681 struct r1bio *r1_bio; 2682 struct bio *bio; 2683 sector_t max_sector, nr_sectors; 2684 int disk = -1; 2685 int i; 2686 int wonly = -1; 2687 int write_targets = 0, read_targets = 0; 2688 sector_t sync_blocks; 2689 int still_degraded = 0; 2690 int good_sectors = RESYNC_SECTORS; 2691 int min_bad = 0; /* number of sectors that are bad in all devices */ 2692 int idx = sector_to_idx(sector_nr); 2693 2694 if (!conf->r1buf_pool) 2695 if (init_resync(conf)) 2696 return 0; 2697 2698 max_sector = mddev->dev_sectors; 2699 if (sector_nr >= max_sector) { 2700 /* If we aborted, we need to abort the 2701 * sync on the 'current' bitmap chunk (there will 2702 * only be one in raid1 resync. 2703 * We can find the current addess in mddev->curr_resync 2704 */ 2705 if (mddev->curr_resync < max_sector) /* aborted */ 2706 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2707 &sync_blocks, 1); 2708 else /* completed sync */ 2709 conf->fullsync = 0; 2710 2711 bitmap_close_sync(mddev->bitmap); 2712 close_sync(conf); 2713 2714 if (mddev_is_clustered(mddev)) { 2715 conf->cluster_sync_low = 0; 2716 conf->cluster_sync_high = 0; 2717 } 2718 return 0; 2719 } 2720 2721 if (mddev->bitmap == NULL && 2722 mddev->recovery_cp == MaxSector && 2723 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2724 conf->fullsync == 0) { 2725 *skipped = 1; 2726 return max_sector - sector_nr; 2727 } 2728 /* before building a request, check if we can skip these blocks.. 2729 * This call the bitmap_start_sync doesn't actually record anything 2730 */ 2731 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2732 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2733 /* We can skip this block, and probably several more */ 2734 *skipped = 1; 2735 return sync_blocks; 2736 } 2737 2738 /* 2739 * If there is non-resync activity waiting for a turn, then let it 2740 * though before starting on this new sync request. 2741 */ 2742 if (atomic_read(&conf->nr_waiting[idx])) 2743 schedule_timeout_uninterruptible(1); 2744 2745 /* we are incrementing sector_nr below. To be safe, we check against 2746 * sector_nr + two times RESYNC_SECTORS 2747 */ 2748 2749 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2750 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 2751 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 2752 2753 raise_barrier(conf, sector_nr); 2754 2755 rcu_read_lock(); 2756 /* 2757 * If we get a correctably read error during resync or recovery, 2758 * we might want to read from a different device. So we 2759 * flag all drives that could conceivably be read from for READ, 2760 * and any others (which will be non-In_sync devices) for WRITE. 2761 * If a read fails, we try reading from something else for which READ 2762 * is OK. 2763 */ 2764 2765 r1_bio->mddev = mddev; 2766 r1_bio->sector = sector_nr; 2767 r1_bio->state = 0; 2768 set_bit(R1BIO_IsSync, &r1_bio->state); 2769 /* make sure good_sectors won't go across barrier unit boundary */ 2770 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors); 2771 2772 for (i = 0; i < conf->raid_disks * 2; i++) { 2773 struct md_rdev *rdev; 2774 bio = r1_bio->bios[i]; 2775 bio_reset(bio); 2776 2777 rdev = rcu_dereference(conf->mirrors[i].rdev); 2778 if (rdev == NULL || 2779 test_bit(Faulty, &rdev->flags)) { 2780 if (i < conf->raid_disks) 2781 still_degraded = 1; 2782 } else if (!test_bit(In_sync, &rdev->flags)) { 2783 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 2784 bio->bi_end_io = end_sync_write; 2785 write_targets ++; 2786 } else { 2787 /* may need to read from here */ 2788 sector_t first_bad = MaxSector; 2789 int bad_sectors; 2790 2791 if (is_badblock(rdev, sector_nr, good_sectors, 2792 &first_bad, &bad_sectors)) { 2793 if (first_bad > sector_nr) 2794 good_sectors = first_bad - sector_nr; 2795 else { 2796 bad_sectors -= (sector_nr - first_bad); 2797 if (min_bad == 0 || 2798 min_bad > bad_sectors) 2799 min_bad = bad_sectors; 2800 } 2801 } 2802 if (sector_nr < first_bad) { 2803 if (test_bit(WriteMostly, &rdev->flags)) { 2804 if (wonly < 0) 2805 wonly = i; 2806 } else { 2807 if (disk < 0) 2808 disk = i; 2809 } 2810 bio_set_op_attrs(bio, REQ_OP_READ, 0); 2811 bio->bi_end_io = end_sync_read; 2812 read_targets++; 2813 } else if (!test_bit(WriteErrorSeen, &rdev->flags) && 2814 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2815 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 2816 /* 2817 * The device is suitable for reading (InSync), 2818 * but has bad block(s) here. Let's try to correct them, 2819 * if we are doing resync or repair. Otherwise, leave 2820 * this device alone for this sync request. 2821 */ 2822 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 2823 bio->bi_end_io = end_sync_write; 2824 write_targets++; 2825 } 2826 } 2827 if (bio->bi_end_io) { 2828 atomic_inc(&rdev->nr_pending); 2829 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 2830 bio->bi_bdev = rdev->bdev; 2831 bio->bi_private = r1_bio; 2832 if (test_bit(FailFast, &rdev->flags)) 2833 bio->bi_opf |= MD_FAILFAST; 2834 } 2835 } 2836 rcu_read_unlock(); 2837 if (disk < 0) 2838 disk = wonly; 2839 r1_bio->read_disk = disk; 2840 2841 if (read_targets == 0 && min_bad > 0) { 2842 /* These sectors are bad on all InSync devices, so we 2843 * need to mark them bad on all write targets 2844 */ 2845 int ok = 1; 2846 for (i = 0 ; i < conf->raid_disks * 2 ; i++) 2847 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { 2848 struct md_rdev *rdev = conf->mirrors[i].rdev; 2849 ok = rdev_set_badblocks(rdev, sector_nr, 2850 min_bad, 0 2851 ) && ok; 2852 } 2853 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2854 *skipped = 1; 2855 put_buf(r1_bio); 2856 2857 if (!ok) { 2858 /* Cannot record the badblocks, so need to 2859 * abort the resync. 2860 * If there are multiple read targets, could just 2861 * fail the really bad ones ??? 2862 */ 2863 conf->recovery_disabled = mddev->recovery_disabled; 2864 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2865 return 0; 2866 } else 2867 return min_bad; 2868 2869 } 2870 if (min_bad > 0 && min_bad < good_sectors) { 2871 /* only resync enough to reach the next bad->good 2872 * transition */ 2873 good_sectors = min_bad; 2874 } 2875 2876 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 2877 /* extra read targets are also write targets */ 2878 write_targets += read_targets-1; 2879 2880 if (write_targets == 0 || read_targets == 0) { 2881 /* There is nowhere to write, so all non-sync 2882 * drives must be failed - so we are finished 2883 */ 2884 sector_t rv; 2885 if (min_bad > 0) 2886 max_sector = sector_nr + min_bad; 2887 rv = max_sector - sector_nr; 2888 *skipped = 1; 2889 put_buf(r1_bio); 2890 return rv; 2891 } 2892 2893 if (max_sector > mddev->resync_max) 2894 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2895 if (max_sector > sector_nr + good_sectors) 2896 max_sector = sector_nr + good_sectors; 2897 nr_sectors = 0; 2898 sync_blocks = 0; 2899 do { 2900 struct page *page; 2901 int len = PAGE_SIZE; 2902 if (sector_nr + (len>>9) > max_sector) 2903 len = (max_sector - sector_nr) << 9; 2904 if (len == 0) 2905 break; 2906 if (sync_blocks == 0) { 2907 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 2908 &sync_blocks, still_degraded) && 2909 !conf->fullsync && 2910 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2911 break; 2912 if ((len >> 9) > sync_blocks) 2913 len = sync_blocks<<9; 2914 } 2915 2916 for (i = 0 ; i < conf->raid_disks * 2; i++) { 2917 bio = r1_bio->bios[i]; 2918 if (bio->bi_end_io) { 2919 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 2920 if (bio_add_page(bio, page, len, 0) == 0) { 2921 /* stop here */ 2922 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 2923 while (i > 0) { 2924 i--; 2925 bio = r1_bio->bios[i]; 2926 if (bio->bi_end_io==NULL) 2927 continue; 2928 /* remove last page from this bio */ 2929 bio->bi_vcnt--; 2930 bio->bi_iter.bi_size -= len; 2931 bio_clear_flag(bio, BIO_SEG_VALID); 2932 } 2933 goto bio_full; 2934 } 2935 } 2936 } 2937 nr_sectors += len>>9; 2938 sector_nr += len>>9; 2939 sync_blocks -= (len>>9); 2940 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); 2941 bio_full: 2942 r1_bio->sectors = nr_sectors; 2943 2944 if (mddev_is_clustered(mddev) && 2945 conf->cluster_sync_high < sector_nr + nr_sectors) { 2946 conf->cluster_sync_low = mddev->curr_resync_completed; 2947 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; 2948 /* Send resync message */ 2949 md_cluster_ops->resync_info_update(mddev, 2950 conf->cluster_sync_low, 2951 conf->cluster_sync_high); 2952 } 2953 2954 /* For a user-requested sync, we read all readable devices and do a 2955 * compare 2956 */ 2957 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2958 atomic_set(&r1_bio->remaining, read_targets); 2959 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { 2960 bio = r1_bio->bios[i]; 2961 if (bio->bi_end_io == end_sync_read) { 2962 read_targets--; 2963 md_sync_acct(bio->bi_bdev, nr_sectors); 2964 if (read_targets == 1) 2965 bio->bi_opf &= ~MD_FAILFAST; 2966 generic_make_request(bio); 2967 } 2968 } 2969 } else { 2970 atomic_set(&r1_bio->remaining, 1); 2971 bio = r1_bio->bios[r1_bio->read_disk]; 2972 md_sync_acct(bio->bi_bdev, nr_sectors); 2973 if (read_targets == 1) 2974 bio->bi_opf &= ~MD_FAILFAST; 2975 generic_make_request(bio); 2976 2977 } 2978 return nr_sectors; 2979 } 2980 2981 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) 2982 { 2983 if (sectors) 2984 return sectors; 2985 2986 return mddev->dev_sectors; 2987 } 2988 2989 static struct r1conf *setup_conf(struct mddev *mddev) 2990 { 2991 struct r1conf *conf; 2992 int i; 2993 struct raid1_info *disk; 2994 struct md_rdev *rdev; 2995 int err = -ENOMEM; 2996 2997 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); 2998 if (!conf) 2999 goto abort; 3000 3001 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, 3002 sizeof(atomic_t), GFP_KERNEL); 3003 if (!conf->nr_pending) 3004 goto abort; 3005 3006 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, 3007 sizeof(atomic_t), GFP_KERNEL); 3008 if (!conf->nr_waiting) 3009 goto abort; 3010 3011 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, 3012 sizeof(atomic_t), GFP_KERNEL); 3013 if (!conf->nr_queued) 3014 goto abort; 3015 3016 conf->barrier = kcalloc(BARRIER_BUCKETS_NR, 3017 sizeof(atomic_t), GFP_KERNEL); 3018 if (!conf->barrier) 3019 goto abort; 3020 3021 conf->mirrors = kzalloc(sizeof(struct raid1_info) 3022 * mddev->raid_disks * 2, 3023 GFP_KERNEL); 3024 if (!conf->mirrors) 3025 goto abort; 3026 3027 conf->tmppage = alloc_page(GFP_KERNEL); 3028 if (!conf->tmppage) 3029 goto abort; 3030 3031 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 3032 if (!conf->poolinfo) 3033 goto abort; 3034 conf->poolinfo->raid_disks = mddev->raid_disks * 2; 3035 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 3036 r1bio_pool_free, 3037 conf->poolinfo); 3038 if (!conf->r1bio_pool) 3039 goto abort; 3040 3041 conf->poolinfo->mddev = mddev; 3042 3043 err = -EINVAL; 3044 spin_lock_init(&conf->device_lock); 3045 rdev_for_each(rdev, mddev) { 3046 struct request_queue *q; 3047 int disk_idx = rdev->raid_disk; 3048 if (disk_idx >= mddev->raid_disks 3049 || disk_idx < 0) 3050 continue; 3051 if (test_bit(Replacement, &rdev->flags)) 3052 disk = conf->mirrors + mddev->raid_disks + disk_idx; 3053 else 3054 disk = conf->mirrors + disk_idx; 3055 3056 if (disk->rdev) 3057 goto abort; 3058 disk->rdev = rdev; 3059 q = bdev_get_queue(rdev->bdev); 3060 3061 disk->head_position = 0; 3062 disk->seq_start = MaxSector; 3063 } 3064 conf->raid_disks = mddev->raid_disks; 3065 conf->mddev = mddev; 3066 INIT_LIST_HEAD(&conf->retry_list); 3067 INIT_LIST_HEAD(&conf->bio_end_io_list); 3068 3069 spin_lock_init(&conf->resync_lock); 3070 init_waitqueue_head(&conf->wait_barrier); 3071 3072 bio_list_init(&conf->pending_bio_list); 3073 conf->pending_count = 0; 3074 conf->recovery_disabled = mddev->recovery_disabled - 1; 3075 3076 err = -EIO; 3077 for (i = 0; i < conf->raid_disks * 2; i++) { 3078 3079 disk = conf->mirrors + i; 3080 3081 if (i < conf->raid_disks && 3082 disk[conf->raid_disks].rdev) { 3083 /* This slot has a replacement. */ 3084 if (!disk->rdev) { 3085 /* No original, just make the replacement 3086 * a recovering spare 3087 */ 3088 disk->rdev = 3089 disk[conf->raid_disks].rdev; 3090 disk[conf->raid_disks].rdev = NULL; 3091 } else if (!test_bit(In_sync, &disk->rdev->flags)) 3092 /* Original is not in_sync - bad */ 3093 goto abort; 3094 } 3095 3096 if (!disk->rdev || 3097 !test_bit(In_sync, &disk->rdev->flags)) { 3098 disk->head_position = 0; 3099 if (disk->rdev && 3100 (disk->rdev->saved_raid_disk < 0)) 3101 conf->fullsync = 1; 3102 } 3103 } 3104 3105 err = -ENOMEM; 3106 conf->thread = md_register_thread(raid1d, mddev, "raid1"); 3107 if (!conf->thread) 3108 goto abort; 3109 3110 return conf; 3111 3112 abort: 3113 if (conf) { 3114 mempool_destroy(conf->r1bio_pool); 3115 kfree(conf->mirrors); 3116 safe_put_page(conf->tmppage); 3117 kfree(conf->poolinfo); 3118 kfree(conf->nr_pending); 3119 kfree(conf->nr_waiting); 3120 kfree(conf->nr_queued); 3121 kfree(conf->barrier); 3122 kfree(conf); 3123 } 3124 return ERR_PTR(err); 3125 } 3126 3127 static void raid1_free(struct mddev *mddev, void *priv); 3128 static int raid1_run(struct mddev *mddev) 3129 { 3130 struct r1conf *conf; 3131 int i; 3132 struct md_rdev *rdev; 3133 int ret; 3134 bool discard_supported = false; 3135 3136 if (mddev->level != 1) { 3137 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", 3138 mdname(mddev), mddev->level); 3139 return -EIO; 3140 } 3141 if (mddev->reshape_position != MaxSector) { 3142 pr_warn("md/raid1:%s: reshape_position set but not supported\n", 3143 mdname(mddev)); 3144 return -EIO; 3145 } 3146 /* 3147 * copy the already verified devices into our private RAID1 3148 * bookkeeping area. [whatever we allocate in run(), 3149 * should be freed in raid1_free()] 3150 */ 3151 if (mddev->private == NULL) 3152 conf = setup_conf(mddev); 3153 else 3154 conf = mddev->private; 3155 3156 if (IS_ERR(conf)) 3157 return PTR_ERR(conf); 3158 3159 if (mddev->queue) 3160 blk_queue_max_write_same_sectors(mddev->queue, 0); 3161 3162 rdev_for_each(rdev, mddev) { 3163 if (!mddev->gendisk) 3164 continue; 3165 disk_stack_limits(mddev->gendisk, rdev->bdev, 3166 rdev->data_offset << 9); 3167 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 3168 discard_supported = true; 3169 } 3170 3171 mddev->degraded = 0; 3172 for (i=0; i < conf->raid_disks; i++) 3173 if (conf->mirrors[i].rdev == NULL || 3174 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 3175 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 3176 mddev->degraded++; 3177 3178 if (conf->raid_disks - mddev->degraded == 1) 3179 mddev->recovery_cp = MaxSector; 3180 3181 if (mddev->recovery_cp != MaxSector) 3182 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", 3183 mdname(mddev)); 3184 pr_info("md/raid1:%s: active with %d out of %d mirrors\n", 3185 mdname(mddev), mddev->raid_disks - mddev->degraded, 3186 mddev->raid_disks); 3187 3188 /* 3189 * Ok, everything is just fine now 3190 */ 3191 mddev->thread = conf->thread; 3192 conf->thread = NULL; 3193 mddev->private = conf; 3194 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 3195 3196 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 3197 3198 if (mddev->queue) { 3199 if (discard_supported) 3200 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 3201 mddev->queue); 3202 else 3203 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 3204 mddev->queue); 3205 } 3206 3207 ret = md_integrity_register(mddev); 3208 if (ret) { 3209 md_unregister_thread(&mddev->thread); 3210 raid1_free(mddev, conf); 3211 } 3212 return ret; 3213 } 3214 3215 static void raid1_free(struct mddev *mddev, void *priv) 3216 { 3217 struct r1conf *conf = priv; 3218 3219 mempool_destroy(conf->r1bio_pool); 3220 kfree(conf->mirrors); 3221 safe_put_page(conf->tmppage); 3222 kfree(conf->poolinfo); 3223 kfree(conf->nr_pending); 3224 kfree(conf->nr_waiting); 3225 kfree(conf->nr_queued); 3226 kfree(conf->barrier); 3227 kfree(conf); 3228 } 3229 3230 static int raid1_resize(struct mddev *mddev, sector_t sectors) 3231 { 3232 /* no resync is happening, and there is enough space 3233 * on all devices, so we can resize. 3234 * We need to make sure resync covers any new space. 3235 * If the array is shrinking we should possibly wait until 3236 * any io in the removed space completes, but it hardly seems 3237 * worth it. 3238 */ 3239 sector_t newsize = raid1_size(mddev, sectors, 0); 3240 if (mddev->external_size && 3241 mddev->array_sectors > newsize) 3242 return -EINVAL; 3243 if (mddev->bitmap) { 3244 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0); 3245 if (ret) 3246 return ret; 3247 } 3248 md_set_array_sectors(mddev, newsize); 3249 set_capacity(mddev->gendisk, mddev->array_sectors); 3250 revalidate_disk(mddev->gendisk); 3251 if (sectors > mddev->dev_sectors && 3252 mddev->recovery_cp > mddev->dev_sectors) { 3253 mddev->recovery_cp = mddev->dev_sectors; 3254 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3255 } 3256 mddev->dev_sectors = sectors; 3257 mddev->resync_max_sectors = sectors; 3258 return 0; 3259 } 3260 3261 static int raid1_reshape(struct mddev *mddev) 3262 { 3263 /* We need to: 3264 * 1/ resize the r1bio_pool 3265 * 2/ resize conf->mirrors 3266 * 3267 * We allocate a new r1bio_pool if we can. 3268 * Then raise a device barrier and wait until all IO stops. 3269 * Then resize conf->mirrors and swap in the new r1bio pool. 3270 * 3271 * At the same time, we "pack" the devices so that all the missing 3272 * devices have the higher raid_disk numbers. 3273 */ 3274 mempool_t *newpool, *oldpool; 3275 struct pool_info *newpoolinfo; 3276 struct raid1_info *newmirrors; 3277 struct r1conf *conf = mddev->private; 3278 int cnt, raid_disks; 3279 unsigned long flags; 3280 int d, d2, err; 3281 3282 /* Cannot change chunk_size, layout, or level */ 3283 if (mddev->chunk_sectors != mddev->new_chunk_sectors || 3284 mddev->layout != mddev->new_layout || 3285 mddev->level != mddev->new_level) { 3286 mddev->new_chunk_sectors = mddev->chunk_sectors; 3287 mddev->new_layout = mddev->layout; 3288 mddev->new_level = mddev->level; 3289 return -EINVAL; 3290 } 3291 3292 if (!mddev_is_clustered(mddev)) { 3293 err = md_allow_write(mddev); 3294 if (err) 3295 return err; 3296 } 3297 3298 raid_disks = mddev->raid_disks + mddev->delta_disks; 3299 3300 if (raid_disks < conf->raid_disks) { 3301 cnt=0; 3302 for (d= 0; d < conf->raid_disks; d++) 3303 if (conf->mirrors[d].rdev) 3304 cnt++; 3305 if (cnt > raid_disks) 3306 return -EBUSY; 3307 } 3308 3309 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 3310 if (!newpoolinfo) 3311 return -ENOMEM; 3312 newpoolinfo->mddev = mddev; 3313 newpoolinfo->raid_disks = raid_disks * 2; 3314 3315 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 3316 r1bio_pool_free, newpoolinfo); 3317 if (!newpool) { 3318 kfree(newpoolinfo); 3319 return -ENOMEM; 3320 } 3321 newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2, 3322 GFP_KERNEL); 3323 if (!newmirrors) { 3324 kfree(newpoolinfo); 3325 mempool_destroy(newpool); 3326 return -ENOMEM; 3327 } 3328 3329 freeze_array(conf, 0); 3330 3331 /* ok, everything is stopped */ 3332 oldpool = conf->r1bio_pool; 3333 conf->r1bio_pool = newpool; 3334 3335 for (d = d2 = 0; d < conf->raid_disks; d++) { 3336 struct md_rdev *rdev = conf->mirrors[d].rdev; 3337 if (rdev && rdev->raid_disk != d2) { 3338 sysfs_unlink_rdev(mddev, rdev); 3339 rdev->raid_disk = d2; 3340 sysfs_unlink_rdev(mddev, rdev); 3341 if (sysfs_link_rdev(mddev, rdev)) 3342 pr_warn("md/raid1:%s: cannot register rd%d\n", 3343 mdname(mddev), rdev->raid_disk); 3344 } 3345 if (rdev) 3346 newmirrors[d2++].rdev = rdev; 3347 } 3348 kfree(conf->mirrors); 3349 conf->mirrors = newmirrors; 3350 kfree(conf->poolinfo); 3351 conf->poolinfo = newpoolinfo; 3352 3353 spin_lock_irqsave(&conf->device_lock, flags); 3354 mddev->degraded += (raid_disks - conf->raid_disks); 3355 spin_unlock_irqrestore(&conf->device_lock, flags); 3356 conf->raid_disks = mddev->raid_disks = raid_disks; 3357 mddev->delta_disks = 0; 3358 3359 unfreeze_array(conf); 3360 3361 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3362 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3363 md_wakeup_thread(mddev->thread); 3364 3365 mempool_destroy(oldpool); 3366 return 0; 3367 } 3368 3369 static void raid1_quiesce(struct mddev *mddev, int state) 3370 { 3371 struct r1conf *conf = mddev->private; 3372 3373 switch(state) { 3374 case 2: /* wake for suspend */ 3375 wake_up(&conf->wait_barrier); 3376 break; 3377 case 1: 3378 freeze_array(conf, 0); 3379 break; 3380 case 0: 3381 unfreeze_array(conf); 3382 break; 3383 } 3384 } 3385 3386 static void *raid1_takeover(struct mddev *mddev) 3387 { 3388 /* raid1 can take over: 3389 * raid5 with 2 devices, any layout or chunk size 3390 */ 3391 if (mddev->level == 5 && mddev->raid_disks == 2) { 3392 struct r1conf *conf; 3393 mddev->new_level = 1; 3394 mddev->new_layout = 0; 3395 mddev->new_chunk_sectors = 0; 3396 conf = setup_conf(mddev); 3397 if (!IS_ERR(conf)) { 3398 /* Array must appear to be quiesced */ 3399 conf->array_frozen = 1; 3400 mddev_clear_unsupported_flags(mddev, 3401 UNSUPPORTED_MDDEV_FLAGS); 3402 } 3403 return conf; 3404 } 3405 return ERR_PTR(-EINVAL); 3406 } 3407 3408 static struct md_personality raid1_personality = 3409 { 3410 .name = "raid1", 3411 .level = 1, 3412 .owner = THIS_MODULE, 3413 .make_request = raid1_make_request, 3414 .run = raid1_run, 3415 .free = raid1_free, 3416 .status = raid1_status, 3417 .error_handler = raid1_error, 3418 .hot_add_disk = raid1_add_disk, 3419 .hot_remove_disk= raid1_remove_disk, 3420 .spare_active = raid1_spare_active, 3421 .sync_request = raid1_sync_request, 3422 .resize = raid1_resize, 3423 .size = raid1_size, 3424 .check_reshape = raid1_reshape, 3425 .quiesce = raid1_quiesce, 3426 .takeover = raid1_takeover, 3427 .congested = raid1_congested, 3428 }; 3429 3430 static int __init raid_init(void) 3431 { 3432 return register_md_personality(&raid1_personality); 3433 } 3434 3435 static void raid_exit(void) 3436 { 3437 unregister_md_personality(&raid1_personality); 3438 } 3439 3440 module_init(raid_init); 3441 module_exit(raid_exit); 3442 MODULE_LICENSE("GPL"); 3443 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); 3444 MODULE_ALIAS("md-personality-3"); /* RAID1 */ 3445 MODULE_ALIAS("md-raid1"); 3446 MODULE_ALIAS("md-level-1"); 3447 3448 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 3449