1 /* 2 * raid10.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 2000-2004 Neil Brown 5 * 6 * RAID-10 support for md. 7 * 8 * Base on code in raid1.c. See raid1.c for further copyright information. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/blkdev.h> 24 #include <linux/module.h> 25 #include <linux/seq_file.h> 26 #include <linux/ratelimit.h> 27 #include <linux/kthread.h> 28 #include <trace/events/block.h> 29 #include "md.h" 30 #include "raid10.h" 31 #include "raid0.h" 32 #include "bitmap.h" 33 34 /* 35 * RAID10 provides a combination of RAID0 and RAID1 functionality. 36 * The layout of data is defined by 37 * chunk_size 38 * raid_disks 39 * near_copies (stored in low byte of layout) 40 * far_copies (stored in second byte of layout) 41 * far_offset (stored in bit 16 of layout ) 42 * use_far_sets (stored in bit 17 of layout ) 43 * use_far_sets_bugfixed (stored in bit 18 of layout ) 44 * 45 * The data to be stored is divided into chunks using chunksize. Each device 46 * is divided into far_copies sections. In each section, chunks are laid out 47 * in a style similar to raid0, but near_copies copies of each chunk is stored 48 * (each on a different drive). The starting device for each section is offset 49 * near_copies from the starting device of the previous section. Thus there 50 * are (near_copies * far_copies) of each chunk, and each is on a different 51 * drive. near_copies and far_copies must be at least one, and their product 52 * is at most raid_disks. 53 * 54 * If far_offset is true, then the far_copies are handled a bit differently. 55 * The copies are still in different stripes, but instead of being very far 56 * apart on disk, there are adjacent stripes. 57 * 58 * The far and offset algorithms are handled slightly differently if 59 * 'use_far_sets' is true. In this case, the array's devices are grouped into 60 * sets that are (near_copies * far_copies) in size. The far copied stripes 61 * are still shifted by 'near_copies' devices, but this shifting stays confined 62 * to the set rather than the entire array. This is done to improve the number 63 * of device combinations that can fail without causing the array to fail. 64 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk 65 * on a device): 66 * A B C D A B C D E 67 * ... ... 68 * D A B C E A B C D 69 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s): 70 * [A B] [C D] [A B] [C D E] 71 * |...| |...| |...| | ... | 72 * [B A] [D C] [B A] [E C D] 73 */ 74 75 /* 76 * Number of guaranteed r10bios in case of extreme VM load: 77 */ 78 #define NR_RAID10_BIOS 256 79 80 /* when we get a read error on a read-only array, we redirect to another 81 * device without failing the first device, or trying to over-write to 82 * correct the read error. To keep track of bad blocks on a per-bio 83 * level, we store IO_BLOCKED in the appropriate 'bios' pointer 84 */ 85 #define IO_BLOCKED ((struct bio *)1) 86 /* When we successfully write to a known bad-block, we need to remove the 87 * bad-block marking which must be done from process context. So we record 88 * the success by setting devs[n].bio to IO_MADE_GOOD 89 */ 90 #define IO_MADE_GOOD ((struct bio *)2) 91 92 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) 93 94 /* When there are this many requests queued to be written by 95 * the raid10 thread, we become 'congested' to provide back-pressure 96 * for writeback. 97 */ 98 static int max_queued_requests = 1024; 99 100 static void allow_barrier(struct r10conf *conf); 101 static void lower_barrier(struct r10conf *conf); 102 static int _enough(struct r10conf *conf, int previous, int ignore); 103 static int enough(struct r10conf *conf, int ignore); 104 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 105 int *skipped); 106 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); 107 static void end_reshape_write(struct bio *bio); 108 static void end_reshape(struct r10conf *conf); 109 110 #define raid10_log(md, fmt, args...) \ 111 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) 112 113 /* 114 * 'strct resync_pages' stores actual pages used for doing the resync 115 * IO, and it is per-bio, so make .bi_private points to it. 116 */ 117 static inline struct resync_pages *get_resync_pages(struct bio *bio) 118 { 119 return bio->bi_private; 120 } 121 122 /* 123 * for resync bio, r10bio pointer can be retrieved from the per-bio 124 * 'struct resync_pages'. 125 */ 126 static inline struct r10bio *get_resync_r10bio(struct bio *bio) 127 { 128 return get_resync_pages(bio)->raid_bio; 129 } 130 131 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 132 { 133 struct r10conf *conf = data; 134 int size = offsetof(struct r10bio, devs[conf->copies]); 135 136 /* allocate a r10bio with room for raid_disks entries in the 137 * bios array */ 138 return kzalloc(size, gfp_flags); 139 } 140 141 static void r10bio_pool_free(void *r10_bio, void *data) 142 { 143 kfree(r10_bio); 144 } 145 146 /* amount of memory to reserve for resync requests */ 147 #define RESYNC_WINDOW (1024*1024) 148 /* maximum number of concurrent requests, memory permitting */ 149 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 150 151 /* 152 * When performing a resync, we need to read and compare, so 153 * we need as many pages are there are copies. 154 * When performing a recovery, we need 2 bios, one for read, 155 * one for write (we recover only one drive per r10buf) 156 * 157 */ 158 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 159 { 160 struct r10conf *conf = data; 161 struct r10bio *r10_bio; 162 struct bio *bio; 163 int j; 164 int nalloc, nalloc_rp; 165 struct resync_pages *rps; 166 167 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 168 if (!r10_bio) 169 return NULL; 170 171 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || 172 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) 173 nalloc = conf->copies; /* resync */ 174 else 175 nalloc = 2; /* recovery */ 176 177 /* allocate once for all bios */ 178 if (!conf->have_replacement) 179 nalloc_rp = nalloc; 180 else 181 nalloc_rp = nalloc * 2; 182 rps = kmalloc(sizeof(struct resync_pages) * nalloc_rp, gfp_flags); 183 if (!rps) 184 goto out_free_r10bio; 185 186 /* 187 * Allocate bios. 188 */ 189 for (j = nalloc ; j-- ; ) { 190 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 191 if (!bio) 192 goto out_free_bio; 193 r10_bio->devs[j].bio = bio; 194 if (!conf->have_replacement) 195 continue; 196 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 197 if (!bio) 198 goto out_free_bio; 199 r10_bio->devs[j].repl_bio = bio; 200 } 201 /* 202 * Allocate RESYNC_PAGES data pages and attach them 203 * where needed. 204 */ 205 for (j = 0; j < nalloc; j++) { 206 struct bio *rbio = r10_bio->devs[j].repl_bio; 207 struct resync_pages *rp, *rp_repl; 208 209 rp = &rps[j]; 210 if (rbio) 211 rp_repl = &rps[nalloc + j]; 212 213 bio = r10_bio->devs[j].bio; 214 215 if (!j || test_bit(MD_RECOVERY_SYNC, 216 &conf->mddev->recovery)) { 217 if (resync_alloc_pages(rp, gfp_flags)) 218 goto out_free_pages; 219 } else { 220 memcpy(rp, &rps[0], sizeof(*rp)); 221 resync_get_all_pages(rp); 222 } 223 224 rp->idx = 0; 225 rp->raid_bio = r10_bio; 226 bio->bi_private = rp; 227 if (rbio) { 228 memcpy(rp_repl, rp, sizeof(*rp)); 229 rbio->bi_private = rp_repl; 230 } 231 } 232 233 return r10_bio; 234 235 out_free_pages: 236 while (--j >= 0) 237 resync_free_pages(&rps[j * 2]); 238 239 j = 0; 240 out_free_bio: 241 for ( ; j < nalloc; j++) { 242 if (r10_bio->devs[j].bio) 243 bio_put(r10_bio->devs[j].bio); 244 if (r10_bio->devs[j].repl_bio) 245 bio_put(r10_bio->devs[j].repl_bio); 246 } 247 kfree(rps); 248 out_free_r10bio: 249 r10bio_pool_free(r10_bio, conf); 250 return NULL; 251 } 252 253 static void r10buf_pool_free(void *__r10_bio, void *data) 254 { 255 struct r10conf *conf = data; 256 struct r10bio *r10bio = __r10_bio; 257 int j; 258 struct resync_pages *rp = NULL; 259 260 for (j = conf->copies; j--; ) { 261 struct bio *bio = r10bio->devs[j].bio; 262 263 rp = get_resync_pages(bio); 264 resync_free_pages(rp); 265 bio_put(bio); 266 267 bio = r10bio->devs[j].repl_bio; 268 if (bio) 269 bio_put(bio); 270 } 271 272 /* resync pages array stored in the 1st bio's .bi_private */ 273 kfree(rp); 274 275 r10bio_pool_free(r10bio, conf); 276 } 277 278 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) 279 { 280 int i; 281 282 for (i = 0; i < conf->copies; i++) { 283 struct bio **bio = & r10_bio->devs[i].bio; 284 if (!BIO_SPECIAL(*bio)) 285 bio_put(*bio); 286 *bio = NULL; 287 bio = &r10_bio->devs[i].repl_bio; 288 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) 289 bio_put(*bio); 290 *bio = NULL; 291 } 292 } 293 294 static void free_r10bio(struct r10bio *r10_bio) 295 { 296 struct r10conf *conf = r10_bio->mddev->private; 297 298 put_all_bios(conf, r10_bio); 299 mempool_free(r10_bio, conf->r10bio_pool); 300 } 301 302 static void put_buf(struct r10bio *r10_bio) 303 { 304 struct r10conf *conf = r10_bio->mddev->private; 305 306 mempool_free(r10_bio, conf->r10buf_pool); 307 308 lower_barrier(conf); 309 } 310 311 static void reschedule_retry(struct r10bio *r10_bio) 312 { 313 unsigned long flags; 314 struct mddev *mddev = r10_bio->mddev; 315 struct r10conf *conf = mddev->private; 316 317 spin_lock_irqsave(&conf->device_lock, flags); 318 list_add(&r10_bio->retry_list, &conf->retry_list); 319 conf->nr_queued ++; 320 spin_unlock_irqrestore(&conf->device_lock, flags); 321 322 /* wake up frozen array... */ 323 wake_up(&conf->wait_barrier); 324 325 md_wakeup_thread(mddev->thread); 326 } 327 328 /* 329 * raid_end_bio_io() is called when we have finished servicing a mirrored 330 * operation and are ready to return a success/failure code to the buffer 331 * cache layer. 332 */ 333 static void raid_end_bio_io(struct r10bio *r10_bio) 334 { 335 struct bio *bio = r10_bio->master_bio; 336 struct r10conf *conf = r10_bio->mddev->private; 337 338 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 339 bio->bi_error = -EIO; 340 341 bio_endio(bio); 342 /* 343 * Wake up any possible resync thread that waits for the device 344 * to go idle. 345 */ 346 allow_barrier(conf); 347 348 free_r10bio(r10_bio); 349 } 350 351 /* 352 * Update disk head position estimator based on IRQ completion info. 353 */ 354 static inline void update_head_pos(int slot, struct r10bio *r10_bio) 355 { 356 struct r10conf *conf = r10_bio->mddev->private; 357 358 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 359 r10_bio->devs[slot].addr + (r10_bio->sectors); 360 } 361 362 /* 363 * Find the disk number which triggered given bio 364 */ 365 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, 366 struct bio *bio, int *slotp, int *replp) 367 { 368 int slot; 369 int repl = 0; 370 371 for (slot = 0; slot < conf->copies; slot++) { 372 if (r10_bio->devs[slot].bio == bio) 373 break; 374 if (r10_bio->devs[slot].repl_bio == bio) { 375 repl = 1; 376 break; 377 } 378 } 379 380 BUG_ON(slot == conf->copies); 381 update_head_pos(slot, r10_bio); 382 383 if (slotp) 384 *slotp = slot; 385 if (replp) 386 *replp = repl; 387 return r10_bio->devs[slot].devnum; 388 } 389 390 static void raid10_end_read_request(struct bio *bio) 391 { 392 int uptodate = !bio->bi_error; 393 struct r10bio *r10_bio = bio->bi_private; 394 int slot, dev; 395 struct md_rdev *rdev; 396 struct r10conf *conf = r10_bio->mddev->private; 397 398 slot = r10_bio->read_slot; 399 dev = r10_bio->devs[slot].devnum; 400 rdev = r10_bio->devs[slot].rdev; 401 /* 402 * this branch is our 'one mirror IO has finished' event handler: 403 */ 404 update_head_pos(slot, r10_bio); 405 406 if (uptodate) { 407 /* 408 * Set R10BIO_Uptodate in our master bio, so that 409 * we will return a good error code to the higher 410 * levels even if IO on some other mirrored buffer fails. 411 * 412 * The 'master' represents the composite IO operation to 413 * user-side. So if something waits for IO, then it will 414 * wait for the 'master' bio. 415 */ 416 set_bit(R10BIO_Uptodate, &r10_bio->state); 417 } else { 418 /* If all other devices that store this block have 419 * failed, we want to return the error upwards rather 420 * than fail the last device. Here we redefine 421 * "uptodate" to mean "Don't want to retry" 422 */ 423 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), 424 rdev->raid_disk)) 425 uptodate = 1; 426 } 427 if (uptodate) { 428 raid_end_bio_io(r10_bio); 429 rdev_dec_pending(rdev, conf->mddev); 430 } else { 431 /* 432 * oops, read error - keep the refcount on the rdev 433 */ 434 char b[BDEVNAME_SIZE]; 435 pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n", 436 mdname(conf->mddev), 437 bdevname(rdev->bdev, b), 438 (unsigned long long)r10_bio->sector); 439 set_bit(R10BIO_ReadError, &r10_bio->state); 440 reschedule_retry(r10_bio); 441 } 442 } 443 444 static void close_write(struct r10bio *r10_bio) 445 { 446 /* clear the bitmap if all writes complete successfully */ 447 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 448 r10_bio->sectors, 449 !test_bit(R10BIO_Degraded, &r10_bio->state), 450 0); 451 md_write_end(r10_bio->mddev); 452 } 453 454 static void one_write_done(struct r10bio *r10_bio) 455 { 456 if (atomic_dec_and_test(&r10_bio->remaining)) { 457 if (test_bit(R10BIO_WriteError, &r10_bio->state)) 458 reschedule_retry(r10_bio); 459 else { 460 close_write(r10_bio); 461 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 462 reschedule_retry(r10_bio); 463 else 464 raid_end_bio_io(r10_bio); 465 } 466 } 467 } 468 469 static void raid10_end_write_request(struct bio *bio) 470 { 471 struct r10bio *r10_bio = bio->bi_private; 472 int dev; 473 int dec_rdev = 1; 474 struct r10conf *conf = r10_bio->mddev->private; 475 int slot, repl; 476 struct md_rdev *rdev = NULL; 477 struct bio *to_put = NULL; 478 bool discard_error; 479 480 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; 481 482 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 483 484 if (repl) 485 rdev = conf->mirrors[dev].replacement; 486 if (!rdev) { 487 smp_rmb(); 488 repl = 0; 489 rdev = conf->mirrors[dev].rdev; 490 } 491 /* 492 * this branch is our 'one mirror IO has finished' event handler: 493 */ 494 if (bio->bi_error && !discard_error) { 495 if (repl) 496 /* Never record new bad blocks to replacement, 497 * just fail it. 498 */ 499 md_error(rdev->mddev, rdev); 500 else { 501 set_bit(WriteErrorSeen, &rdev->flags); 502 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 503 set_bit(MD_RECOVERY_NEEDED, 504 &rdev->mddev->recovery); 505 506 dec_rdev = 0; 507 if (test_bit(FailFast, &rdev->flags) && 508 (bio->bi_opf & MD_FAILFAST)) { 509 md_error(rdev->mddev, rdev); 510 if (!test_bit(Faulty, &rdev->flags)) 511 /* This is the only remaining device, 512 * We need to retry the write without 513 * FailFast 514 */ 515 set_bit(R10BIO_WriteError, &r10_bio->state); 516 else { 517 r10_bio->devs[slot].bio = NULL; 518 to_put = bio; 519 dec_rdev = 1; 520 } 521 } else 522 set_bit(R10BIO_WriteError, &r10_bio->state); 523 } 524 } else { 525 /* 526 * Set R10BIO_Uptodate in our master bio, so that 527 * we will return a good error code for to the higher 528 * levels even if IO on some other mirrored buffer fails. 529 * 530 * The 'master' represents the composite IO operation to 531 * user-side. So if something waits for IO, then it will 532 * wait for the 'master' bio. 533 */ 534 sector_t first_bad; 535 int bad_sectors; 536 537 /* 538 * Do not set R10BIO_Uptodate if the current device is 539 * rebuilding or Faulty. This is because we cannot use 540 * such device for properly reading the data back (we could 541 * potentially use it, if the current write would have felt 542 * before rdev->recovery_offset, but for simplicity we don't 543 * check this here. 544 */ 545 if (test_bit(In_sync, &rdev->flags) && 546 !test_bit(Faulty, &rdev->flags)) 547 set_bit(R10BIO_Uptodate, &r10_bio->state); 548 549 /* Maybe we can clear some bad blocks. */ 550 if (is_badblock(rdev, 551 r10_bio->devs[slot].addr, 552 r10_bio->sectors, 553 &first_bad, &bad_sectors) && !discard_error) { 554 bio_put(bio); 555 if (repl) 556 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; 557 else 558 r10_bio->devs[slot].bio = IO_MADE_GOOD; 559 dec_rdev = 0; 560 set_bit(R10BIO_MadeGood, &r10_bio->state); 561 } 562 } 563 564 /* 565 * 566 * Let's see if all mirrored write operations have finished 567 * already. 568 */ 569 one_write_done(r10_bio); 570 if (dec_rdev) 571 rdev_dec_pending(rdev, conf->mddev); 572 if (to_put) 573 bio_put(to_put); 574 } 575 576 /* 577 * RAID10 layout manager 578 * As well as the chunksize and raid_disks count, there are two 579 * parameters: near_copies and far_copies. 580 * near_copies * far_copies must be <= raid_disks. 581 * Normally one of these will be 1. 582 * If both are 1, we get raid0. 583 * If near_copies == raid_disks, we get raid1. 584 * 585 * Chunks are laid out in raid0 style with near_copies copies of the 586 * first chunk, followed by near_copies copies of the next chunk and 587 * so on. 588 * If far_copies > 1, then after 1/far_copies of the array has been assigned 589 * as described above, we start again with a device offset of near_copies. 590 * So we effectively have another copy of the whole array further down all 591 * the drives, but with blocks on different drives. 592 * With this layout, and block is never stored twice on the one device. 593 * 594 * raid10_find_phys finds the sector offset of a given virtual sector 595 * on each device that it is on. 596 * 597 * raid10_find_virt does the reverse mapping, from a device and a 598 * sector offset to a virtual address 599 */ 600 601 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio) 602 { 603 int n,f; 604 sector_t sector; 605 sector_t chunk; 606 sector_t stripe; 607 int dev; 608 int slot = 0; 609 int last_far_set_start, last_far_set_size; 610 611 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 612 last_far_set_start *= geo->far_set_size; 613 614 last_far_set_size = geo->far_set_size; 615 last_far_set_size += (geo->raid_disks % geo->far_set_size); 616 617 /* now calculate first sector/dev */ 618 chunk = r10bio->sector >> geo->chunk_shift; 619 sector = r10bio->sector & geo->chunk_mask; 620 621 chunk *= geo->near_copies; 622 stripe = chunk; 623 dev = sector_div(stripe, geo->raid_disks); 624 if (geo->far_offset) 625 stripe *= geo->far_copies; 626 627 sector += stripe << geo->chunk_shift; 628 629 /* and calculate all the others */ 630 for (n = 0; n < geo->near_copies; n++) { 631 int d = dev; 632 int set; 633 sector_t s = sector; 634 r10bio->devs[slot].devnum = d; 635 r10bio->devs[slot].addr = s; 636 slot++; 637 638 for (f = 1; f < geo->far_copies; f++) { 639 set = d / geo->far_set_size; 640 d += geo->near_copies; 641 642 if ((geo->raid_disks % geo->far_set_size) && 643 (d > last_far_set_start)) { 644 d -= last_far_set_start; 645 d %= last_far_set_size; 646 d += last_far_set_start; 647 } else { 648 d %= geo->far_set_size; 649 d += geo->far_set_size * set; 650 } 651 s += geo->stride; 652 r10bio->devs[slot].devnum = d; 653 r10bio->devs[slot].addr = s; 654 slot++; 655 } 656 dev++; 657 if (dev >= geo->raid_disks) { 658 dev = 0; 659 sector += (geo->chunk_mask + 1); 660 } 661 } 662 } 663 664 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) 665 { 666 struct geom *geo = &conf->geo; 667 668 if (conf->reshape_progress != MaxSector && 669 ((r10bio->sector >= conf->reshape_progress) != 670 conf->mddev->reshape_backwards)) { 671 set_bit(R10BIO_Previous, &r10bio->state); 672 geo = &conf->prev; 673 } else 674 clear_bit(R10BIO_Previous, &r10bio->state); 675 676 __raid10_find_phys(geo, r10bio); 677 } 678 679 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) 680 { 681 sector_t offset, chunk, vchunk; 682 /* Never use conf->prev as this is only called during resync 683 * or recovery, so reshape isn't happening 684 */ 685 struct geom *geo = &conf->geo; 686 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size; 687 int far_set_size = geo->far_set_size; 688 int last_far_set_start; 689 690 if (geo->raid_disks % geo->far_set_size) { 691 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 692 last_far_set_start *= geo->far_set_size; 693 694 if (dev >= last_far_set_start) { 695 far_set_size = geo->far_set_size; 696 far_set_size += (geo->raid_disks % geo->far_set_size); 697 far_set_start = last_far_set_start; 698 } 699 } 700 701 offset = sector & geo->chunk_mask; 702 if (geo->far_offset) { 703 int fc; 704 chunk = sector >> geo->chunk_shift; 705 fc = sector_div(chunk, geo->far_copies); 706 dev -= fc * geo->near_copies; 707 if (dev < far_set_start) 708 dev += far_set_size; 709 } else { 710 while (sector >= geo->stride) { 711 sector -= geo->stride; 712 if (dev < (geo->near_copies + far_set_start)) 713 dev += far_set_size - geo->near_copies; 714 else 715 dev -= geo->near_copies; 716 } 717 chunk = sector >> geo->chunk_shift; 718 } 719 vchunk = chunk * geo->raid_disks + dev; 720 sector_div(vchunk, geo->near_copies); 721 return (vchunk << geo->chunk_shift) + offset; 722 } 723 724 /* 725 * This routine returns the disk from which the requested read should 726 * be done. There is a per-array 'next expected sequential IO' sector 727 * number - if this matches on the next IO then we use the last disk. 728 * There is also a per-disk 'last know head position' sector that is 729 * maintained from IRQ contexts, both the normal and the resync IO 730 * completion handlers update this position correctly. If there is no 731 * perfect sequential match then we pick the disk whose head is closest. 732 * 733 * If there are 2 mirrors in the same 2 devices, performance degrades 734 * because position is mirror, not device based. 735 * 736 * The rdev for the device selected will have nr_pending incremented. 737 */ 738 739 /* 740 * FIXME: possibly should rethink readbalancing and do it differently 741 * depending on near_copies / far_copies geometry. 742 */ 743 static struct md_rdev *read_balance(struct r10conf *conf, 744 struct r10bio *r10_bio, 745 int *max_sectors) 746 { 747 const sector_t this_sector = r10_bio->sector; 748 int disk, slot; 749 int sectors = r10_bio->sectors; 750 int best_good_sectors; 751 sector_t new_distance, best_dist; 752 struct md_rdev *best_rdev, *rdev = NULL; 753 int do_balance; 754 int best_slot; 755 struct geom *geo = &conf->geo; 756 757 raid10_find_phys(conf, r10_bio); 758 rcu_read_lock(); 759 sectors = r10_bio->sectors; 760 best_slot = -1; 761 best_rdev = NULL; 762 best_dist = MaxSector; 763 best_good_sectors = 0; 764 do_balance = 1; 765 clear_bit(R10BIO_FailFast, &r10_bio->state); 766 /* 767 * Check if we can balance. We can balance on the whole 768 * device if no resync is going on (recovery is ok), or below 769 * the resync window. We take the first readable disk when 770 * above the resync window. 771 */ 772 if (conf->mddev->recovery_cp < MaxSector 773 && (this_sector + sectors >= conf->next_resync)) 774 do_balance = 0; 775 776 for (slot = 0; slot < conf->copies ; slot++) { 777 sector_t first_bad; 778 int bad_sectors; 779 sector_t dev_sector; 780 781 if (r10_bio->devs[slot].bio == IO_BLOCKED) 782 continue; 783 disk = r10_bio->devs[slot].devnum; 784 rdev = rcu_dereference(conf->mirrors[disk].replacement); 785 if (rdev == NULL || test_bit(Faulty, &rdev->flags) || 786 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 787 rdev = rcu_dereference(conf->mirrors[disk].rdev); 788 if (rdev == NULL || 789 test_bit(Faulty, &rdev->flags)) 790 continue; 791 if (!test_bit(In_sync, &rdev->flags) && 792 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 793 continue; 794 795 dev_sector = r10_bio->devs[slot].addr; 796 if (is_badblock(rdev, dev_sector, sectors, 797 &first_bad, &bad_sectors)) { 798 if (best_dist < MaxSector) 799 /* Already have a better slot */ 800 continue; 801 if (first_bad <= dev_sector) { 802 /* Cannot read here. If this is the 803 * 'primary' device, then we must not read 804 * beyond 'bad_sectors' from another device. 805 */ 806 bad_sectors -= (dev_sector - first_bad); 807 if (!do_balance && sectors > bad_sectors) 808 sectors = bad_sectors; 809 if (best_good_sectors > sectors) 810 best_good_sectors = sectors; 811 } else { 812 sector_t good_sectors = 813 first_bad - dev_sector; 814 if (good_sectors > best_good_sectors) { 815 best_good_sectors = good_sectors; 816 best_slot = slot; 817 best_rdev = rdev; 818 } 819 if (!do_balance) 820 /* Must read from here */ 821 break; 822 } 823 continue; 824 } else 825 best_good_sectors = sectors; 826 827 if (!do_balance) 828 break; 829 830 if (best_slot >= 0) 831 /* At least 2 disks to choose from so failfast is OK */ 832 set_bit(R10BIO_FailFast, &r10_bio->state); 833 /* This optimisation is debatable, and completely destroys 834 * sequential read speed for 'far copies' arrays. So only 835 * keep it for 'near' arrays, and review those later. 836 */ 837 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) 838 new_distance = 0; 839 840 /* for far > 1 always use the lowest address */ 841 else if (geo->far_copies > 1) 842 new_distance = r10_bio->devs[slot].addr; 843 else 844 new_distance = abs(r10_bio->devs[slot].addr - 845 conf->mirrors[disk].head_position); 846 if (new_distance < best_dist) { 847 best_dist = new_distance; 848 best_slot = slot; 849 best_rdev = rdev; 850 } 851 } 852 if (slot >= conf->copies) { 853 slot = best_slot; 854 rdev = best_rdev; 855 } 856 857 if (slot >= 0) { 858 atomic_inc(&rdev->nr_pending); 859 r10_bio->read_slot = slot; 860 } else 861 rdev = NULL; 862 rcu_read_unlock(); 863 *max_sectors = best_good_sectors; 864 865 return rdev; 866 } 867 868 static int raid10_congested(struct mddev *mddev, int bits) 869 { 870 struct r10conf *conf = mddev->private; 871 int i, ret = 0; 872 873 if ((bits & (1 << WB_async_congested)) && 874 conf->pending_count >= max_queued_requests) 875 return 1; 876 877 rcu_read_lock(); 878 for (i = 0; 879 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) 880 && ret == 0; 881 i++) { 882 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 883 if (rdev && !test_bit(Faulty, &rdev->flags)) { 884 struct request_queue *q = bdev_get_queue(rdev->bdev); 885 886 ret |= bdi_congested(q->backing_dev_info, bits); 887 } 888 } 889 rcu_read_unlock(); 890 return ret; 891 } 892 893 static void flush_pending_writes(struct r10conf *conf) 894 { 895 /* Any writes that have been queued but are awaiting 896 * bitmap updates get flushed here. 897 */ 898 spin_lock_irq(&conf->device_lock); 899 900 if (conf->pending_bio_list.head) { 901 struct bio *bio; 902 bio = bio_list_get(&conf->pending_bio_list); 903 conf->pending_count = 0; 904 spin_unlock_irq(&conf->device_lock); 905 /* flush any pending bitmap writes to disk 906 * before proceeding w/ I/O */ 907 bitmap_unplug(conf->mddev->bitmap); 908 wake_up(&conf->wait_barrier); 909 910 while (bio) { /* submit pending writes */ 911 struct bio *next = bio->bi_next; 912 struct md_rdev *rdev = (void*)bio->bi_bdev; 913 bio->bi_next = NULL; 914 bio->bi_bdev = rdev->bdev; 915 if (test_bit(Faulty, &rdev->flags)) { 916 bio->bi_error = -EIO; 917 bio_endio(bio); 918 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 919 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 920 /* Just ignore it */ 921 bio_endio(bio); 922 else 923 generic_make_request(bio); 924 bio = next; 925 } 926 } else 927 spin_unlock_irq(&conf->device_lock); 928 } 929 930 /* Barriers.... 931 * Sometimes we need to suspend IO while we do something else, 932 * either some resync/recovery, or reconfigure the array. 933 * To do this we raise a 'barrier'. 934 * The 'barrier' is a counter that can be raised multiple times 935 * to count how many activities are happening which preclude 936 * normal IO. 937 * We can only raise the barrier if there is no pending IO. 938 * i.e. if nr_pending == 0. 939 * We choose only to raise the barrier if no-one is waiting for the 940 * barrier to go down. This means that as soon as an IO request 941 * is ready, no other operations which require a barrier will start 942 * until the IO request has had a chance. 943 * 944 * So: regular IO calls 'wait_barrier'. When that returns there 945 * is no backgroup IO happening, It must arrange to call 946 * allow_barrier when it has finished its IO. 947 * backgroup IO calls must call raise_barrier. Once that returns 948 * there is no normal IO happeing. It must arrange to call 949 * lower_barrier when the particular background IO completes. 950 */ 951 952 static void raise_barrier(struct r10conf *conf, int force) 953 { 954 BUG_ON(force && !conf->barrier); 955 spin_lock_irq(&conf->resync_lock); 956 957 /* Wait until no block IO is waiting (unless 'force') */ 958 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 959 conf->resync_lock); 960 961 /* block any new IO from starting */ 962 conf->barrier++; 963 964 /* Now wait for all pending IO to complete */ 965 wait_event_lock_irq(conf->wait_barrier, 966 !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH, 967 conf->resync_lock); 968 969 spin_unlock_irq(&conf->resync_lock); 970 } 971 972 static void lower_barrier(struct r10conf *conf) 973 { 974 unsigned long flags; 975 spin_lock_irqsave(&conf->resync_lock, flags); 976 conf->barrier--; 977 spin_unlock_irqrestore(&conf->resync_lock, flags); 978 wake_up(&conf->wait_barrier); 979 } 980 981 static void wait_barrier(struct r10conf *conf) 982 { 983 spin_lock_irq(&conf->resync_lock); 984 if (conf->barrier) { 985 conf->nr_waiting++; 986 /* Wait for the barrier to drop. 987 * However if there are already pending 988 * requests (preventing the barrier from 989 * rising completely), and the 990 * pre-process bio queue isn't empty, 991 * then don't wait, as we need to empty 992 * that queue to get the nr_pending 993 * count down. 994 */ 995 raid10_log(conf->mddev, "wait barrier"); 996 wait_event_lock_irq(conf->wait_barrier, 997 !conf->barrier || 998 (atomic_read(&conf->nr_pending) && 999 current->bio_list && 1000 (!bio_list_empty(¤t->bio_list[0]) || 1001 !bio_list_empty(¤t->bio_list[1]))), 1002 conf->resync_lock); 1003 conf->nr_waiting--; 1004 if (!conf->nr_waiting) 1005 wake_up(&conf->wait_barrier); 1006 } 1007 atomic_inc(&conf->nr_pending); 1008 spin_unlock_irq(&conf->resync_lock); 1009 } 1010 1011 static void allow_barrier(struct r10conf *conf) 1012 { 1013 if ((atomic_dec_and_test(&conf->nr_pending)) || 1014 (conf->array_freeze_pending)) 1015 wake_up(&conf->wait_barrier); 1016 } 1017 1018 static void freeze_array(struct r10conf *conf, int extra) 1019 { 1020 /* stop syncio and normal IO and wait for everything to 1021 * go quiet. 1022 * We increment barrier and nr_waiting, and then 1023 * wait until nr_pending match nr_queued+extra 1024 * This is called in the context of one normal IO request 1025 * that has failed. Thus any sync request that might be pending 1026 * will be blocked by nr_pending, and we need to wait for 1027 * pending IO requests to complete or be queued for re-try. 1028 * Thus the number queued (nr_queued) plus this request (extra) 1029 * must match the number of pending IOs (nr_pending) before 1030 * we continue. 1031 */ 1032 spin_lock_irq(&conf->resync_lock); 1033 conf->array_freeze_pending++; 1034 conf->barrier++; 1035 conf->nr_waiting++; 1036 wait_event_lock_irq_cmd(conf->wait_barrier, 1037 atomic_read(&conf->nr_pending) == conf->nr_queued+extra, 1038 conf->resync_lock, 1039 flush_pending_writes(conf)); 1040 1041 conf->array_freeze_pending--; 1042 spin_unlock_irq(&conf->resync_lock); 1043 } 1044 1045 static void unfreeze_array(struct r10conf *conf) 1046 { 1047 /* reverse the effect of the freeze */ 1048 spin_lock_irq(&conf->resync_lock); 1049 conf->barrier--; 1050 conf->nr_waiting--; 1051 wake_up(&conf->wait_barrier); 1052 spin_unlock_irq(&conf->resync_lock); 1053 } 1054 1055 static sector_t choose_data_offset(struct r10bio *r10_bio, 1056 struct md_rdev *rdev) 1057 { 1058 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || 1059 test_bit(R10BIO_Previous, &r10_bio->state)) 1060 return rdev->data_offset; 1061 else 1062 return rdev->new_data_offset; 1063 } 1064 1065 struct raid10_plug_cb { 1066 struct blk_plug_cb cb; 1067 struct bio_list pending; 1068 int pending_cnt; 1069 }; 1070 1071 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) 1072 { 1073 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb, 1074 cb); 1075 struct mddev *mddev = plug->cb.data; 1076 struct r10conf *conf = mddev->private; 1077 struct bio *bio; 1078 1079 if (from_schedule || current->bio_list) { 1080 spin_lock_irq(&conf->device_lock); 1081 bio_list_merge(&conf->pending_bio_list, &plug->pending); 1082 conf->pending_count += plug->pending_cnt; 1083 spin_unlock_irq(&conf->device_lock); 1084 wake_up(&conf->wait_barrier); 1085 md_wakeup_thread(mddev->thread); 1086 kfree(plug); 1087 return; 1088 } 1089 1090 /* we aren't scheduling, so we can do the write-out directly. */ 1091 bio = bio_list_get(&plug->pending); 1092 bitmap_unplug(mddev->bitmap); 1093 wake_up(&conf->wait_barrier); 1094 1095 while (bio) { /* submit pending writes */ 1096 struct bio *next = bio->bi_next; 1097 struct md_rdev *rdev = (void*)bio->bi_bdev; 1098 bio->bi_next = NULL; 1099 bio->bi_bdev = rdev->bdev; 1100 if (test_bit(Faulty, &rdev->flags)) { 1101 bio->bi_error = -EIO; 1102 bio_endio(bio); 1103 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1104 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1105 /* Just ignore it */ 1106 bio_endio(bio); 1107 else 1108 generic_make_request(bio); 1109 bio = next; 1110 } 1111 kfree(plug); 1112 } 1113 1114 static void raid10_read_request(struct mddev *mddev, struct bio *bio, 1115 struct r10bio *r10_bio) 1116 { 1117 struct r10conf *conf = mddev->private; 1118 struct bio *read_bio; 1119 const int op = bio_op(bio); 1120 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1121 int max_sectors; 1122 sector_t sectors; 1123 struct md_rdev *rdev; 1124 char b[BDEVNAME_SIZE]; 1125 int slot = r10_bio->read_slot; 1126 struct md_rdev *err_rdev = NULL; 1127 gfp_t gfp = GFP_NOIO; 1128 1129 if (r10_bio->devs[slot].rdev) { 1130 /* 1131 * This is an error retry, but we cannot 1132 * safely dereference the rdev in the r10_bio, 1133 * we must use the one in conf. 1134 * If it has already been disconnected (unlikely) 1135 * we lose the device name in error messages. 1136 */ 1137 int disk; 1138 /* 1139 * As we are blocking raid10, it is a little safer to 1140 * use __GFP_HIGH. 1141 */ 1142 gfp = GFP_NOIO | __GFP_HIGH; 1143 1144 rcu_read_lock(); 1145 disk = r10_bio->devs[slot].devnum; 1146 err_rdev = rcu_dereference(conf->mirrors[disk].rdev); 1147 if (err_rdev) 1148 bdevname(err_rdev->bdev, b); 1149 else { 1150 strcpy(b, "???"); 1151 /* This never gets dereferenced */ 1152 err_rdev = r10_bio->devs[slot].rdev; 1153 } 1154 rcu_read_unlock(); 1155 } 1156 /* 1157 * Register the new request and wait if the reconstruction 1158 * thread has put up a bar for new requests. 1159 * Continue immediately if no resync is active currently. 1160 */ 1161 wait_barrier(conf); 1162 1163 sectors = r10_bio->sectors; 1164 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1165 bio->bi_iter.bi_sector < conf->reshape_progress && 1166 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1167 /* 1168 * IO spans the reshape position. Need to wait for reshape to 1169 * pass 1170 */ 1171 raid10_log(conf->mddev, "wait reshape"); 1172 allow_barrier(conf); 1173 wait_event(conf->wait_barrier, 1174 conf->reshape_progress <= bio->bi_iter.bi_sector || 1175 conf->reshape_progress >= bio->bi_iter.bi_sector + 1176 sectors); 1177 wait_barrier(conf); 1178 } 1179 1180 rdev = read_balance(conf, r10_bio, &max_sectors); 1181 if (!rdev) { 1182 if (err_rdev) { 1183 pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n", 1184 mdname(mddev), b, 1185 (unsigned long long)r10_bio->sector); 1186 } 1187 raid_end_bio_io(r10_bio); 1188 return; 1189 } 1190 if (err_rdev) 1191 pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n", 1192 mdname(mddev), 1193 bdevname(rdev->bdev, b), 1194 (unsigned long long)r10_bio->sector); 1195 if (max_sectors < bio_sectors(bio)) { 1196 struct bio *split = bio_split(bio, max_sectors, 1197 gfp, conf->bio_split); 1198 bio_chain(split, bio); 1199 generic_make_request(bio); 1200 bio = split; 1201 r10_bio->master_bio = bio; 1202 r10_bio->sectors = max_sectors; 1203 } 1204 slot = r10_bio->read_slot; 1205 1206 read_bio = bio_clone_fast(bio, gfp, mddev->bio_set); 1207 1208 r10_bio->devs[slot].bio = read_bio; 1209 r10_bio->devs[slot].rdev = rdev; 1210 1211 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + 1212 choose_data_offset(r10_bio, rdev); 1213 read_bio->bi_bdev = rdev->bdev; 1214 read_bio->bi_end_io = raid10_end_read_request; 1215 bio_set_op_attrs(read_bio, op, do_sync); 1216 if (test_bit(FailFast, &rdev->flags) && 1217 test_bit(R10BIO_FailFast, &r10_bio->state)) 1218 read_bio->bi_opf |= MD_FAILFAST; 1219 read_bio->bi_private = r10_bio; 1220 1221 if (mddev->gendisk) 1222 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), 1223 read_bio, disk_devt(mddev->gendisk), 1224 r10_bio->sector); 1225 generic_make_request(read_bio); 1226 return; 1227 } 1228 1229 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, 1230 struct bio *bio, bool replacement, 1231 int n_copy) 1232 { 1233 const int op = bio_op(bio); 1234 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1235 const unsigned long do_fua = (bio->bi_opf & REQ_FUA); 1236 unsigned long flags; 1237 struct blk_plug_cb *cb; 1238 struct raid10_plug_cb *plug = NULL; 1239 struct r10conf *conf = mddev->private; 1240 struct md_rdev *rdev; 1241 int devnum = r10_bio->devs[n_copy].devnum; 1242 struct bio *mbio; 1243 1244 if (replacement) { 1245 rdev = conf->mirrors[devnum].replacement; 1246 if (rdev == NULL) { 1247 /* Replacement just got moved to main 'rdev' */ 1248 smp_mb(); 1249 rdev = conf->mirrors[devnum].rdev; 1250 } 1251 } else 1252 rdev = conf->mirrors[devnum].rdev; 1253 1254 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 1255 if (replacement) 1256 r10_bio->devs[n_copy].repl_bio = mbio; 1257 else 1258 r10_bio->devs[n_copy].bio = mbio; 1259 1260 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + 1261 choose_data_offset(r10_bio, rdev)); 1262 mbio->bi_bdev = rdev->bdev; 1263 mbio->bi_end_io = raid10_end_write_request; 1264 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1265 if (!replacement && test_bit(FailFast, 1266 &conf->mirrors[devnum].rdev->flags) 1267 && enough(conf, devnum)) 1268 mbio->bi_opf |= MD_FAILFAST; 1269 mbio->bi_private = r10_bio; 1270 1271 if (conf->mddev->gendisk) 1272 trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1273 mbio, disk_devt(conf->mddev->gendisk), 1274 r10_bio->sector); 1275 /* flush_pending_writes() needs access to the rdev so...*/ 1276 mbio->bi_bdev = (void *)rdev; 1277 1278 atomic_inc(&r10_bio->remaining); 1279 1280 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); 1281 if (cb) 1282 plug = container_of(cb, struct raid10_plug_cb, cb); 1283 else 1284 plug = NULL; 1285 spin_lock_irqsave(&conf->device_lock, flags); 1286 if (plug) { 1287 bio_list_add(&plug->pending, mbio); 1288 plug->pending_cnt++; 1289 } else { 1290 bio_list_add(&conf->pending_bio_list, mbio); 1291 conf->pending_count++; 1292 } 1293 spin_unlock_irqrestore(&conf->device_lock, flags); 1294 if (!plug) 1295 md_wakeup_thread(mddev->thread); 1296 } 1297 1298 static void raid10_write_request(struct mddev *mddev, struct bio *bio, 1299 struct r10bio *r10_bio) 1300 { 1301 struct r10conf *conf = mddev->private; 1302 int i; 1303 struct md_rdev *blocked_rdev; 1304 sector_t sectors; 1305 int max_sectors; 1306 1307 md_write_start(mddev, bio); 1308 1309 /* 1310 * Register the new request and wait if the reconstruction 1311 * thread has put up a bar for new requests. 1312 * Continue immediately if no resync is active currently. 1313 */ 1314 wait_barrier(conf); 1315 1316 sectors = r10_bio->sectors; 1317 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1318 bio->bi_iter.bi_sector < conf->reshape_progress && 1319 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1320 /* 1321 * IO spans the reshape position. Need to wait for reshape to 1322 * pass 1323 */ 1324 raid10_log(conf->mddev, "wait reshape"); 1325 allow_barrier(conf); 1326 wait_event(conf->wait_barrier, 1327 conf->reshape_progress <= bio->bi_iter.bi_sector || 1328 conf->reshape_progress >= bio->bi_iter.bi_sector + 1329 sectors); 1330 wait_barrier(conf); 1331 } 1332 1333 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1334 (mddev->reshape_backwards 1335 ? (bio->bi_iter.bi_sector < conf->reshape_safe && 1336 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) 1337 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && 1338 bio->bi_iter.bi_sector < conf->reshape_progress))) { 1339 /* Need to update reshape_position in metadata */ 1340 mddev->reshape_position = conf->reshape_progress; 1341 set_mask_bits(&mddev->sb_flags, 0, 1342 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1343 md_wakeup_thread(mddev->thread); 1344 raid10_log(conf->mddev, "wait reshape metadata"); 1345 wait_event(mddev->sb_wait, 1346 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 1347 1348 conf->reshape_safe = mddev->reshape_position; 1349 } 1350 1351 if (conf->pending_count >= max_queued_requests) { 1352 md_wakeup_thread(mddev->thread); 1353 raid10_log(mddev, "wait queued"); 1354 wait_event(conf->wait_barrier, 1355 conf->pending_count < max_queued_requests); 1356 } 1357 /* first select target devices under rcu_lock and 1358 * inc refcount on their rdev. Record them by setting 1359 * bios[x] to bio 1360 * If there are known/acknowledged bad blocks on any device 1361 * on which we have seen a write error, we want to avoid 1362 * writing to those blocks. This potentially requires several 1363 * writes to write around the bad blocks. Each set of writes 1364 * gets its own r10_bio with a set of bios attached. 1365 */ 1366 1367 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ 1368 raid10_find_phys(conf, r10_bio); 1369 retry_write: 1370 blocked_rdev = NULL; 1371 rcu_read_lock(); 1372 max_sectors = r10_bio->sectors; 1373 1374 for (i = 0; i < conf->copies; i++) { 1375 int d = r10_bio->devs[i].devnum; 1376 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 1377 struct md_rdev *rrdev = rcu_dereference( 1378 conf->mirrors[d].replacement); 1379 if (rdev == rrdev) 1380 rrdev = NULL; 1381 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1382 atomic_inc(&rdev->nr_pending); 1383 blocked_rdev = rdev; 1384 break; 1385 } 1386 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) { 1387 atomic_inc(&rrdev->nr_pending); 1388 blocked_rdev = rrdev; 1389 break; 1390 } 1391 if (rdev && (test_bit(Faulty, &rdev->flags))) 1392 rdev = NULL; 1393 if (rrdev && (test_bit(Faulty, &rrdev->flags))) 1394 rrdev = NULL; 1395 1396 r10_bio->devs[i].bio = NULL; 1397 r10_bio->devs[i].repl_bio = NULL; 1398 1399 if (!rdev && !rrdev) { 1400 set_bit(R10BIO_Degraded, &r10_bio->state); 1401 continue; 1402 } 1403 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { 1404 sector_t first_bad; 1405 sector_t dev_sector = r10_bio->devs[i].addr; 1406 int bad_sectors; 1407 int is_bad; 1408 1409 is_bad = is_badblock(rdev, dev_sector, max_sectors, 1410 &first_bad, &bad_sectors); 1411 if (is_bad < 0) { 1412 /* Mustn't write here until the bad block 1413 * is acknowledged 1414 */ 1415 atomic_inc(&rdev->nr_pending); 1416 set_bit(BlockedBadBlocks, &rdev->flags); 1417 blocked_rdev = rdev; 1418 break; 1419 } 1420 if (is_bad && first_bad <= dev_sector) { 1421 /* Cannot write here at all */ 1422 bad_sectors -= (dev_sector - first_bad); 1423 if (bad_sectors < max_sectors) 1424 /* Mustn't write more than bad_sectors 1425 * to other devices yet 1426 */ 1427 max_sectors = bad_sectors; 1428 /* We don't set R10BIO_Degraded as that 1429 * only applies if the disk is missing, 1430 * so it might be re-added, and we want to 1431 * know to recover this chunk. 1432 * In this case the device is here, and the 1433 * fact that this chunk is not in-sync is 1434 * recorded in the bad block log. 1435 */ 1436 continue; 1437 } 1438 if (is_bad) { 1439 int good_sectors = first_bad - dev_sector; 1440 if (good_sectors < max_sectors) 1441 max_sectors = good_sectors; 1442 } 1443 } 1444 if (rdev) { 1445 r10_bio->devs[i].bio = bio; 1446 atomic_inc(&rdev->nr_pending); 1447 } 1448 if (rrdev) { 1449 r10_bio->devs[i].repl_bio = bio; 1450 atomic_inc(&rrdev->nr_pending); 1451 } 1452 } 1453 rcu_read_unlock(); 1454 1455 if (unlikely(blocked_rdev)) { 1456 /* Have to wait for this device to get unblocked, then retry */ 1457 int j; 1458 int d; 1459 1460 for (j = 0; j < i; j++) { 1461 if (r10_bio->devs[j].bio) { 1462 d = r10_bio->devs[j].devnum; 1463 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1464 } 1465 if (r10_bio->devs[j].repl_bio) { 1466 struct md_rdev *rdev; 1467 d = r10_bio->devs[j].devnum; 1468 rdev = conf->mirrors[d].replacement; 1469 if (!rdev) { 1470 /* Race with remove_disk */ 1471 smp_mb(); 1472 rdev = conf->mirrors[d].rdev; 1473 } 1474 rdev_dec_pending(rdev, mddev); 1475 } 1476 } 1477 allow_barrier(conf); 1478 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 1479 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1480 wait_barrier(conf); 1481 goto retry_write; 1482 } 1483 1484 if (max_sectors < r10_bio->sectors) 1485 r10_bio->sectors = max_sectors; 1486 1487 if (r10_bio->sectors < bio_sectors(bio)) { 1488 struct bio *split = bio_split(bio, r10_bio->sectors, 1489 GFP_NOIO, conf->bio_split); 1490 bio_chain(split, bio); 1491 generic_make_request(bio); 1492 bio = split; 1493 r10_bio->master_bio = bio; 1494 } 1495 1496 atomic_set(&r10_bio->remaining, 1); 1497 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1498 1499 for (i = 0; i < conf->copies; i++) { 1500 if (r10_bio->devs[i].bio) 1501 raid10_write_one_disk(mddev, r10_bio, bio, false, i); 1502 if (r10_bio->devs[i].repl_bio) 1503 raid10_write_one_disk(mddev, r10_bio, bio, true, i); 1504 } 1505 one_write_done(r10_bio); 1506 } 1507 1508 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) 1509 { 1510 struct r10conf *conf = mddev->private; 1511 struct r10bio *r10_bio; 1512 1513 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1514 1515 r10_bio->master_bio = bio; 1516 r10_bio->sectors = sectors; 1517 1518 r10_bio->mddev = mddev; 1519 r10_bio->sector = bio->bi_iter.bi_sector; 1520 r10_bio->state = 0; 1521 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); 1522 1523 if (bio_data_dir(bio) == READ) 1524 raid10_read_request(mddev, bio, r10_bio); 1525 else 1526 raid10_write_request(mddev, bio, r10_bio); 1527 } 1528 1529 static void raid10_make_request(struct mddev *mddev, struct bio *bio) 1530 { 1531 struct r10conf *conf = mddev->private; 1532 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1533 int chunk_sects = chunk_mask + 1; 1534 int sectors = bio_sectors(bio); 1535 1536 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1537 md_flush_request(mddev, bio); 1538 return; 1539 } 1540 1541 /* 1542 * If this request crosses a chunk boundary, we need to split 1543 * it. 1544 */ 1545 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + 1546 sectors > chunk_sects 1547 && (conf->geo.near_copies < conf->geo.raid_disks 1548 || conf->prev.near_copies < 1549 conf->prev.raid_disks))) 1550 sectors = chunk_sects - 1551 (bio->bi_iter.bi_sector & 1552 (chunk_sects - 1)); 1553 __make_request(mddev, bio, sectors); 1554 1555 /* In case raid10d snuck in to freeze_array */ 1556 wake_up(&conf->wait_barrier); 1557 } 1558 1559 static void raid10_status(struct seq_file *seq, struct mddev *mddev) 1560 { 1561 struct r10conf *conf = mddev->private; 1562 int i; 1563 1564 if (conf->geo.near_copies < conf->geo.raid_disks) 1565 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); 1566 if (conf->geo.near_copies > 1) 1567 seq_printf(seq, " %d near-copies", conf->geo.near_copies); 1568 if (conf->geo.far_copies > 1) { 1569 if (conf->geo.far_offset) 1570 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); 1571 else 1572 seq_printf(seq, " %d far-copies", conf->geo.far_copies); 1573 if (conf->geo.far_set_size != conf->geo.raid_disks) 1574 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); 1575 } 1576 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, 1577 conf->geo.raid_disks - mddev->degraded); 1578 rcu_read_lock(); 1579 for (i = 0; i < conf->geo.raid_disks; i++) { 1580 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1581 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1582 } 1583 rcu_read_unlock(); 1584 seq_printf(seq, "]"); 1585 } 1586 1587 /* check if there are enough drives for 1588 * every block to appear on atleast one. 1589 * Don't consider the device numbered 'ignore' 1590 * as we might be about to remove it. 1591 */ 1592 static int _enough(struct r10conf *conf, int previous, int ignore) 1593 { 1594 int first = 0; 1595 int has_enough = 0; 1596 int disks, ncopies; 1597 if (previous) { 1598 disks = conf->prev.raid_disks; 1599 ncopies = conf->prev.near_copies; 1600 } else { 1601 disks = conf->geo.raid_disks; 1602 ncopies = conf->geo.near_copies; 1603 } 1604 1605 rcu_read_lock(); 1606 do { 1607 int n = conf->copies; 1608 int cnt = 0; 1609 int this = first; 1610 while (n--) { 1611 struct md_rdev *rdev; 1612 if (this != ignore && 1613 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && 1614 test_bit(In_sync, &rdev->flags)) 1615 cnt++; 1616 this = (this+1) % disks; 1617 } 1618 if (cnt == 0) 1619 goto out; 1620 first = (first + ncopies) % disks; 1621 } while (first != 0); 1622 has_enough = 1; 1623 out: 1624 rcu_read_unlock(); 1625 return has_enough; 1626 } 1627 1628 static int enough(struct r10conf *conf, int ignore) 1629 { 1630 /* when calling 'enough', both 'prev' and 'geo' must 1631 * be stable. 1632 * This is ensured if ->reconfig_mutex or ->device_lock 1633 * is held. 1634 */ 1635 return _enough(conf, 0, ignore) && 1636 _enough(conf, 1, ignore); 1637 } 1638 1639 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) 1640 { 1641 char b[BDEVNAME_SIZE]; 1642 struct r10conf *conf = mddev->private; 1643 unsigned long flags; 1644 1645 /* 1646 * If it is not operational, then we have already marked it as dead 1647 * else if it is the last working disks, ignore the error, let the 1648 * next level up know. 1649 * else mark the drive as failed 1650 */ 1651 spin_lock_irqsave(&conf->device_lock, flags); 1652 if (test_bit(In_sync, &rdev->flags) 1653 && !enough(conf, rdev->raid_disk)) { 1654 /* 1655 * Don't fail the drive, just return an IO error. 1656 */ 1657 spin_unlock_irqrestore(&conf->device_lock, flags); 1658 return; 1659 } 1660 if (test_and_clear_bit(In_sync, &rdev->flags)) 1661 mddev->degraded++; 1662 /* 1663 * If recovery is running, make sure it aborts. 1664 */ 1665 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1666 set_bit(Blocked, &rdev->flags); 1667 set_bit(Faulty, &rdev->flags); 1668 set_mask_bits(&mddev->sb_flags, 0, 1669 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1670 spin_unlock_irqrestore(&conf->device_lock, flags); 1671 pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n" 1672 "md/raid10:%s: Operation continuing on %d devices.\n", 1673 mdname(mddev), bdevname(rdev->bdev, b), 1674 mdname(mddev), conf->geo.raid_disks - mddev->degraded); 1675 } 1676 1677 static void print_conf(struct r10conf *conf) 1678 { 1679 int i; 1680 struct md_rdev *rdev; 1681 1682 pr_debug("RAID10 conf printout:\n"); 1683 if (!conf) { 1684 pr_debug("(!conf)\n"); 1685 return; 1686 } 1687 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, 1688 conf->geo.raid_disks); 1689 1690 /* This is only called with ->reconfix_mutex held, so 1691 * rcu protection of rdev is not needed */ 1692 for (i = 0; i < conf->geo.raid_disks; i++) { 1693 char b[BDEVNAME_SIZE]; 1694 rdev = conf->mirrors[i].rdev; 1695 if (rdev) 1696 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", 1697 i, !test_bit(In_sync, &rdev->flags), 1698 !test_bit(Faulty, &rdev->flags), 1699 bdevname(rdev->bdev,b)); 1700 } 1701 } 1702 1703 static void close_sync(struct r10conf *conf) 1704 { 1705 wait_barrier(conf); 1706 allow_barrier(conf); 1707 1708 mempool_destroy(conf->r10buf_pool); 1709 conf->r10buf_pool = NULL; 1710 } 1711 1712 static int raid10_spare_active(struct mddev *mddev) 1713 { 1714 int i; 1715 struct r10conf *conf = mddev->private; 1716 struct raid10_info *tmp; 1717 int count = 0; 1718 unsigned long flags; 1719 1720 /* 1721 * Find all non-in_sync disks within the RAID10 configuration 1722 * and mark them in_sync 1723 */ 1724 for (i = 0; i < conf->geo.raid_disks; i++) { 1725 tmp = conf->mirrors + i; 1726 if (tmp->replacement 1727 && tmp->replacement->recovery_offset == MaxSector 1728 && !test_bit(Faulty, &tmp->replacement->flags) 1729 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 1730 /* Replacement has just become active */ 1731 if (!tmp->rdev 1732 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 1733 count++; 1734 if (tmp->rdev) { 1735 /* Replaced device not technically faulty, 1736 * but we need to be sure it gets removed 1737 * and never re-added. 1738 */ 1739 set_bit(Faulty, &tmp->rdev->flags); 1740 sysfs_notify_dirent_safe( 1741 tmp->rdev->sysfs_state); 1742 } 1743 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1744 } else if (tmp->rdev 1745 && tmp->rdev->recovery_offset == MaxSector 1746 && !test_bit(Faulty, &tmp->rdev->flags) 1747 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1748 count++; 1749 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 1750 } 1751 } 1752 spin_lock_irqsave(&conf->device_lock, flags); 1753 mddev->degraded -= count; 1754 spin_unlock_irqrestore(&conf->device_lock, flags); 1755 1756 print_conf(conf); 1757 return count; 1758 } 1759 1760 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1761 { 1762 struct r10conf *conf = mddev->private; 1763 int err = -EEXIST; 1764 int mirror; 1765 int first = 0; 1766 int last = conf->geo.raid_disks - 1; 1767 1768 if (mddev->recovery_cp < MaxSector) 1769 /* only hot-add to in-sync arrays, as recovery is 1770 * very different from resync 1771 */ 1772 return -EBUSY; 1773 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) 1774 return -EINVAL; 1775 1776 if (md_integrity_add_rdev(rdev, mddev)) 1777 return -ENXIO; 1778 1779 if (rdev->raid_disk >= 0) 1780 first = last = rdev->raid_disk; 1781 1782 if (rdev->saved_raid_disk >= first && 1783 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1784 mirror = rdev->saved_raid_disk; 1785 else 1786 mirror = first; 1787 for ( ; mirror <= last ; mirror++) { 1788 struct raid10_info *p = &conf->mirrors[mirror]; 1789 if (p->recovery_disabled == mddev->recovery_disabled) 1790 continue; 1791 if (p->rdev) { 1792 if (!test_bit(WantReplacement, &p->rdev->flags) || 1793 p->replacement != NULL) 1794 continue; 1795 clear_bit(In_sync, &rdev->flags); 1796 set_bit(Replacement, &rdev->flags); 1797 rdev->raid_disk = mirror; 1798 err = 0; 1799 if (mddev->gendisk) 1800 disk_stack_limits(mddev->gendisk, rdev->bdev, 1801 rdev->data_offset << 9); 1802 conf->fullsync = 1; 1803 rcu_assign_pointer(p->replacement, rdev); 1804 break; 1805 } 1806 1807 if (mddev->gendisk) 1808 disk_stack_limits(mddev->gendisk, rdev->bdev, 1809 rdev->data_offset << 9); 1810 1811 p->head_position = 0; 1812 p->recovery_disabled = mddev->recovery_disabled - 1; 1813 rdev->raid_disk = mirror; 1814 err = 0; 1815 if (rdev->saved_raid_disk != mirror) 1816 conf->fullsync = 1; 1817 rcu_assign_pointer(p->rdev, rdev); 1818 break; 1819 } 1820 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1821 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1822 1823 print_conf(conf); 1824 return err; 1825 } 1826 1827 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 1828 { 1829 struct r10conf *conf = mddev->private; 1830 int err = 0; 1831 int number = rdev->raid_disk; 1832 struct md_rdev **rdevp; 1833 struct raid10_info *p = conf->mirrors + number; 1834 1835 print_conf(conf); 1836 if (rdev == p->rdev) 1837 rdevp = &p->rdev; 1838 else if (rdev == p->replacement) 1839 rdevp = &p->replacement; 1840 else 1841 return 0; 1842 1843 if (test_bit(In_sync, &rdev->flags) || 1844 atomic_read(&rdev->nr_pending)) { 1845 err = -EBUSY; 1846 goto abort; 1847 } 1848 /* Only remove non-faulty devices if recovery 1849 * is not possible. 1850 */ 1851 if (!test_bit(Faulty, &rdev->flags) && 1852 mddev->recovery_disabled != p->recovery_disabled && 1853 (!p->replacement || p->replacement == rdev) && 1854 number < conf->geo.raid_disks && 1855 enough(conf, -1)) { 1856 err = -EBUSY; 1857 goto abort; 1858 } 1859 *rdevp = NULL; 1860 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 1861 synchronize_rcu(); 1862 if (atomic_read(&rdev->nr_pending)) { 1863 /* lost the race, try later */ 1864 err = -EBUSY; 1865 *rdevp = rdev; 1866 goto abort; 1867 } 1868 } 1869 if (p->replacement) { 1870 /* We must have just cleared 'rdev' */ 1871 p->rdev = p->replacement; 1872 clear_bit(Replacement, &p->replacement->flags); 1873 smp_mb(); /* Make sure other CPUs may see both as identical 1874 * but will never see neither -- if they are careful. 1875 */ 1876 p->replacement = NULL; 1877 } 1878 1879 clear_bit(WantReplacement, &rdev->flags); 1880 err = md_integrity_register(mddev); 1881 1882 abort: 1883 1884 print_conf(conf); 1885 return err; 1886 } 1887 1888 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) 1889 { 1890 struct r10conf *conf = r10_bio->mddev->private; 1891 1892 if (!bio->bi_error) 1893 set_bit(R10BIO_Uptodate, &r10_bio->state); 1894 else 1895 /* The write handler will notice the lack of 1896 * R10BIO_Uptodate and record any errors etc 1897 */ 1898 atomic_add(r10_bio->sectors, 1899 &conf->mirrors[d].rdev->corrected_errors); 1900 1901 /* for reconstruct, we always reschedule after a read. 1902 * for resync, only after all reads 1903 */ 1904 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1905 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1906 atomic_dec_and_test(&r10_bio->remaining)) { 1907 /* we have read all the blocks, 1908 * do the comparison in process context in raid10d 1909 */ 1910 reschedule_retry(r10_bio); 1911 } 1912 } 1913 1914 static void end_sync_read(struct bio *bio) 1915 { 1916 struct r10bio *r10_bio = get_resync_r10bio(bio); 1917 struct r10conf *conf = r10_bio->mddev->private; 1918 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); 1919 1920 __end_sync_read(r10_bio, bio, d); 1921 } 1922 1923 static void end_reshape_read(struct bio *bio) 1924 { 1925 /* reshape read bio isn't allocated from r10buf_pool */ 1926 struct r10bio *r10_bio = bio->bi_private; 1927 1928 __end_sync_read(r10_bio, bio, r10_bio->read_slot); 1929 } 1930 1931 static void end_sync_request(struct r10bio *r10_bio) 1932 { 1933 struct mddev *mddev = r10_bio->mddev; 1934 1935 while (atomic_dec_and_test(&r10_bio->remaining)) { 1936 if (r10_bio->master_bio == NULL) { 1937 /* the primary of several recovery bios */ 1938 sector_t s = r10_bio->sectors; 1939 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1940 test_bit(R10BIO_WriteError, &r10_bio->state)) 1941 reschedule_retry(r10_bio); 1942 else 1943 put_buf(r10_bio); 1944 md_done_sync(mddev, s, 1); 1945 break; 1946 } else { 1947 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; 1948 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1949 test_bit(R10BIO_WriteError, &r10_bio->state)) 1950 reschedule_retry(r10_bio); 1951 else 1952 put_buf(r10_bio); 1953 r10_bio = r10_bio2; 1954 } 1955 } 1956 } 1957 1958 static void end_sync_write(struct bio *bio) 1959 { 1960 struct r10bio *r10_bio = get_resync_r10bio(bio); 1961 struct mddev *mddev = r10_bio->mddev; 1962 struct r10conf *conf = mddev->private; 1963 int d; 1964 sector_t first_bad; 1965 int bad_sectors; 1966 int slot; 1967 int repl; 1968 struct md_rdev *rdev = NULL; 1969 1970 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 1971 if (repl) 1972 rdev = conf->mirrors[d].replacement; 1973 else 1974 rdev = conf->mirrors[d].rdev; 1975 1976 if (bio->bi_error) { 1977 if (repl) 1978 md_error(mddev, rdev); 1979 else { 1980 set_bit(WriteErrorSeen, &rdev->flags); 1981 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1982 set_bit(MD_RECOVERY_NEEDED, 1983 &rdev->mddev->recovery); 1984 set_bit(R10BIO_WriteError, &r10_bio->state); 1985 } 1986 } else if (is_badblock(rdev, 1987 r10_bio->devs[slot].addr, 1988 r10_bio->sectors, 1989 &first_bad, &bad_sectors)) 1990 set_bit(R10BIO_MadeGood, &r10_bio->state); 1991 1992 rdev_dec_pending(rdev, mddev); 1993 1994 end_sync_request(r10_bio); 1995 } 1996 1997 /* 1998 * Note: sync and recover and handled very differently for raid10 1999 * This code is for resync. 2000 * For resync, we read through virtual addresses and read all blocks. 2001 * If there is any error, we schedule a write. The lowest numbered 2002 * drive is authoritative. 2003 * However requests come for physical address, so we need to map. 2004 * For every physical address there are raid_disks/copies virtual addresses, 2005 * which is always are least one, but is not necessarly an integer. 2006 * This means that a physical address can span multiple chunks, so we may 2007 * have to submit multiple io requests for a single sync request. 2008 */ 2009 /* 2010 * We check if all blocks are in-sync and only write to blocks that 2011 * aren't in sync 2012 */ 2013 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2014 { 2015 struct r10conf *conf = mddev->private; 2016 int i, first; 2017 struct bio *tbio, *fbio; 2018 int vcnt; 2019 struct page **tpages, **fpages; 2020 2021 atomic_set(&r10_bio->remaining, 1); 2022 2023 /* find the first device with a block */ 2024 for (i=0; i<conf->copies; i++) 2025 if (!r10_bio->devs[i].bio->bi_error) 2026 break; 2027 2028 if (i == conf->copies) 2029 goto done; 2030 2031 first = i; 2032 fbio = r10_bio->devs[i].bio; 2033 fbio->bi_iter.bi_size = r10_bio->sectors << 9; 2034 fbio->bi_iter.bi_idx = 0; 2035 fpages = get_resync_pages(fbio)->pages; 2036 2037 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 2038 /* now find blocks with errors */ 2039 for (i=0 ; i < conf->copies ; i++) { 2040 int j, d; 2041 struct md_rdev *rdev; 2042 struct resync_pages *rp; 2043 2044 tbio = r10_bio->devs[i].bio; 2045 2046 if (tbio->bi_end_io != end_sync_read) 2047 continue; 2048 if (i == first) 2049 continue; 2050 2051 tpages = get_resync_pages(tbio)->pages; 2052 d = r10_bio->devs[i].devnum; 2053 rdev = conf->mirrors[d].rdev; 2054 if (!r10_bio->devs[i].bio->bi_error) { 2055 /* We know that the bi_io_vec layout is the same for 2056 * both 'first' and 'i', so we just compare them. 2057 * All vec entries are PAGE_SIZE; 2058 */ 2059 int sectors = r10_bio->sectors; 2060 for (j = 0; j < vcnt; j++) { 2061 int len = PAGE_SIZE; 2062 if (sectors < (len / 512)) 2063 len = sectors * 512; 2064 if (memcmp(page_address(fpages[j]), 2065 page_address(tpages[j]), 2066 len)) 2067 break; 2068 sectors -= len/512; 2069 } 2070 if (j == vcnt) 2071 continue; 2072 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); 2073 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2074 /* Don't fix anything. */ 2075 continue; 2076 } else if (test_bit(FailFast, &rdev->flags)) { 2077 /* Just give up on this device */ 2078 md_error(rdev->mddev, rdev); 2079 continue; 2080 } 2081 /* Ok, we need to write this bio, either to correct an 2082 * inconsistency or to correct an unreadable block. 2083 * First we need to fixup bv_offset, bv_len and 2084 * bi_vecs, as the read request might have corrupted these 2085 */ 2086 rp = get_resync_pages(tbio); 2087 bio_reset(tbio); 2088 2089 tbio->bi_vcnt = vcnt; 2090 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; 2091 rp->raid_bio = r10_bio; 2092 tbio->bi_private = rp; 2093 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 2094 tbio->bi_end_io = end_sync_write; 2095 bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); 2096 2097 bio_copy_data(tbio, fbio); 2098 2099 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2100 atomic_inc(&r10_bio->remaining); 2101 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2102 2103 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 2104 tbio->bi_opf |= MD_FAILFAST; 2105 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2106 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2107 generic_make_request(tbio); 2108 } 2109 2110 /* Now write out to any replacement devices 2111 * that are active 2112 */ 2113 for (i = 0; i < conf->copies; i++) { 2114 int d; 2115 2116 tbio = r10_bio->devs[i].repl_bio; 2117 if (!tbio || !tbio->bi_end_io) 2118 continue; 2119 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write 2120 && r10_bio->devs[i].bio != fbio) 2121 bio_copy_data(tbio, fbio); 2122 d = r10_bio->devs[i].devnum; 2123 atomic_inc(&r10_bio->remaining); 2124 md_sync_acct(conf->mirrors[d].replacement->bdev, 2125 bio_sectors(tbio)); 2126 generic_make_request(tbio); 2127 } 2128 2129 done: 2130 if (atomic_dec_and_test(&r10_bio->remaining)) { 2131 md_done_sync(mddev, r10_bio->sectors, 1); 2132 put_buf(r10_bio); 2133 } 2134 } 2135 2136 /* 2137 * Now for the recovery code. 2138 * Recovery happens across physical sectors. 2139 * We recover all non-is_sync drives by finding the virtual address of 2140 * each, and then choose a working drive that also has that virt address. 2141 * There is a separate r10_bio for each non-in_sync drive. 2142 * Only the first two slots are in use. The first for reading, 2143 * The second for writing. 2144 * 2145 */ 2146 static void fix_recovery_read_error(struct r10bio *r10_bio) 2147 { 2148 /* We got a read error during recovery. 2149 * We repeat the read in smaller page-sized sections. 2150 * If a read succeeds, write it to the new device or record 2151 * a bad block if we cannot. 2152 * If a read fails, record a bad block on both old and 2153 * new devices. 2154 */ 2155 struct mddev *mddev = r10_bio->mddev; 2156 struct r10conf *conf = mddev->private; 2157 struct bio *bio = r10_bio->devs[0].bio; 2158 sector_t sect = 0; 2159 int sectors = r10_bio->sectors; 2160 int idx = 0; 2161 int dr = r10_bio->devs[0].devnum; 2162 int dw = r10_bio->devs[1].devnum; 2163 struct page **pages = get_resync_pages(bio)->pages; 2164 2165 while (sectors) { 2166 int s = sectors; 2167 struct md_rdev *rdev; 2168 sector_t addr; 2169 int ok; 2170 2171 if (s > (PAGE_SIZE>>9)) 2172 s = PAGE_SIZE >> 9; 2173 2174 rdev = conf->mirrors[dr].rdev; 2175 addr = r10_bio->devs[0].addr + sect, 2176 ok = sync_page_io(rdev, 2177 addr, 2178 s << 9, 2179 pages[idx], 2180 REQ_OP_READ, 0, false); 2181 if (ok) { 2182 rdev = conf->mirrors[dw].rdev; 2183 addr = r10_bio->devs[1].addr + sect; 2184 ok = sync_page_io(rdev, 2185 addr, 2186 s << 9, 2187 pages[idx], 2188 REQ_OP_WRITE, 0, false); 2189 if (!ok) { 2190 set_bit(WriteErrorSeen, &rdev->flags); 2191 if (!test_and_set_bit(WantReplacement, 2192 &rdev->flags)) 2193 set_bit(MD_RECOVERY_NEEDED, 2194 &rdev->mddev->recovery); 2195 } 2196 } 2197 if (!ok) { 2198 /* We don't worry if we cannot set a bad block - 2199 * it really is bad so there is no loss in not 2200 * recording it yet 2201 */ 2202 rdev_set_badblocks(rdev, addr, s, 0); 2203 2204 if (rdev != conf->mirrors[dw].rdev) { 2205 /* need bad block on destination too */ 2206 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; 2207 addr = r10_bio->devs[1].addr + sect; 2208 ok = rdev_set_badblocks(rdev2, addr, s, 0); 2209 if (!ok) { 2210 /* just abort the recovery */ 2211 pr_notice("md/raid10:%s: recovery aborted due to read error\n", 2212 mdname(mddev)); 2213 2214 conf->mirrors[dw].recovery_disabled 2215 = mddev->recovery_disabled; 2216 set_bit(MD_RECOVERY_INTR, 2217 &mddev->recovery); 2218 break; 2219 } 2220 } 2221 } 2222 2223 sectors -= s; 2224 sect += s; 2225 idx++; 2226 } 2227 } 2228 2229 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2230 { 2231 struct r10conf *conf = mddev->private; 2232 int d; 2233 struct bio *wbio, *wbio2; 2234 2235 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { 2236 fix_recovery_read_error(r10_bio); 2237 end_sync_request(r10_bio); 2238 return; 2239 } 2240 2241 /* 2242 * share the pages with the first bio 2243 * and submit the write request 2244 */ 2245 d = r10_bio->devs[1].devnum; 2246 wbio = r10_bio->devs[1].bio; 2247 wbio2 = r10_bio->devs[1].repl_bio; 2248 /* Need to test wbio2->bi_end_io before we call 2249 * generic_make_request as if the former is NULL, 2250 * the latter is free to free wbio2. 2251 */ 2252 if (wbio2 && !wbio2->bi_end_io) 2253 wbio2 = NULL; 2254 if (wbio->bi_end_io) { 2255 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2256 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2257 generic_make_request(wbio); 2258 } 2259 if (wbio2) { 2260 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2261 md_sync_acct(conf->mirrors[d].replacement->bdev, 2262 bio_sectors(wbio2)); 2263 generic_make_request(wbio2); 2264 } 2265 } 2266 2267 /* 2268 * Used by fix_read_error() to decay the per rdev read_errors. 2269 * We halve the read error count for every hour that has elapsed 2270 * since the last recorded read error. 2271 * 2272 */ 2273 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) 2274 { 2275 long cur_time_mon; 2276 unsigned long hours_since_last; 2277 unsigned int read_errors = atomic_read(&rdev->read_errors); 2278 2279 cur_time_mon = ktime_get_seconds(); 2280 2281 if (rdev->last_read_error == 0) { 2282 /* first time we've seen a read error */ 2283 rdev->last_read_error = cur_time_mon; 2284 return; 2285 } 2286 2287 hours_since_last = (long)(cur_time_mon - 2288 rdev->last_read_error) / 3600; 2289 2290 rdev->last_read_error = cur_time_mon; 2291 2292 /* 2293 * if hours_since_last is > the number of bits in read_errors 2294 * just set read errors to 0. We do this to avoid 2295 * overflowing the shift of read_errors by hours_since_last. 2296 */ 2297 if (hours_since_last >= 8 * sizeof(read_errors)) 2298 atomic_set(&rdev->read_errors, 0); 2299 else 2300 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); 2301 } 2302 2303 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, 2304 int sectors, struct page *page, int rw) 2305 { 2306 sector_t first_bad; 2307 int bad_sectors; 2308 2309 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) 2310 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) 2311 return -1; 2312 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 2313 /* success */ 2314 return 1; 2315 if (rw == WRITE) { 2316 set_bit(WriteErrorSeen, &rdev->flags); 2317 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2318 set_bit(MD_RECOVERY_NEEDED, 2319 &rdev->mddev->recovery); 2320 } 2321 /* need to record an error - either for the block or the device */ 2322 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 2323 md_error(rdev->mddev, rdev); 2324 return 0; 2325 } 2326 2327 /* 2328 * This is a kernel thread which: 2329 * 2330 * 1. Retries failed read operations on working mirrors. 2331 * 2. Updates the raid superblock when problems encounter. 2332 * 3. Performs writes following reads for array synchronising. 2333 */ 2334 2335 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) 2336 { 2337 int sect = 0; /* Offset from r10_bio->sector */ 2338 int sectors = r10_bio->sectors; 2339 struct md_rdev*rdev; 2340 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); 2341 int d = r10_bio->devs[r10_bio->read_slot].devnum; 2342 2343 /* still own a reference to this rdev, so it cannot 2344 * have been cleared recently. 2345 */ 2346 rdev = conf->mirrors[d].rdev; 2347 2348 if (test_bit(Faulty, &rdev->flags)) 2349 /* drive has already been failed, just ignore any 2350 more fix_read_error() attempts */ 2351 return; 2352 2353 check_decay_read_errors(mddev, rdev); 2354 atomic_inc(&rdev->read_errors); 2355 if (atomic_read(&rdev->read_errors) > max_read_errors) { 2356 char b[BDEVNAME_SIZE]; 2357 bdevname(rdev->bdev, b); 2358 2359 pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n", 2360 mdname(mddev), b, 2361 atomic_read(&rdev->read_errors), max_read_errors); 2362 pr_notice("md/raid10:%s: %s: Failing raid device\n", 2363 mdname(mddev), b); 2364 md_error(mddev, rdev); 2365 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; 2366 return; 2367 } 2368 2369 while(sectors) { 2370 int s = sectors; 2371 int sl = r10_bio->read_slot; 2372 int success = 0; 2373 int start; 2374 2375 if (s > (PAGE_SIZE>>9)) 2376 s = PAGE_SIZE >> 9; 2377 2378 rcu_read_lock(); 2379 do { 2380 sector_t first_bad; 2381 int bad_sectors; 2382 2383 d = r10_bio->devs[sl].devnum; 2384 rdev = rcu_dereference(conf->mirrors[d].rdev); 2385 if (rdev && 2386 test_bit(In_sync, &rdev->flags) && 2387 !test_bit(Faulty, &rdev->flags) && 2388 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, 2389 &first_bad, &bad_sectors) == 0) { 2390 atomic_inc(&rdev->nr_pending); 2391 rcu_read_unlock(); 2392 success = sync_page_io(rdev, 2393 r10_bio->devs[sl].addr + 2394 sect, 2395 s<<9, 2396 conf->tmppage, 2397 REQ_OP_READ, 0, false); 2398 rdev_dec_pending(rdev, mddev); 2399 rcu_read_lock(); 2400 if (success) 2401 break; 2402 } 2403 sl++; 2404 if (sl == conf->copies) 2405 sl = 0; 2406 } while (!success && sl != r10_bio->read_slot); 2407 rcu_read_unlock(); 2408 2409 if (!success) { 2410 /* Cannot read from anywhere, just mark the block 2411 * as bad on the first device to discourage future 2412 * reads. 2413 */ 2414 int dn = r10_bio->devs[r10_bio->read_slot].devnum; 2415 rdev = conf->mirrors[dn].rdev; 2416 2417 if (!rdev_set_badblocks( 2418 rdev, 2419 r10_bio->devs[r10_bio->read_slot].addr 2420 + sect, 2421 s, 0)) { 2422 md_error(mddev, rdev); 2423 r10_bio->devs[r10_bio->read_slot].bio 2424 = IO_BLOCKED; 2425 } 2426 break; 2427 } 2428 2429 start = sl; 2430 /* write it back and re-read */ 2431 rcu_read_lock(); 2432 while (sl != r10_bio->read_slot) { 2433 char b[BDEVNAME_SIZE]; 2434 2435 if (sl==0) 2436 sl = conf->copies; 2437 sl--; 2438 d = r10_bio->devs[sl].devnum; 2439 rdev = rcu_dereference(conf->mirrors[d].rdev); 2440 if (!rdev || 2441 test_bit(Faulty, &rdev->flags) || 2442 !test_bit(In_sync, &rdev->flags)) 2443 continue; 2444 2445 atomic_inc(&rdev->nr_pending); 2446 rcu_read_unlock(); 2447 if (r10_sync_page_io(rdev, 2448 r10_bio->devs[sl].addr + 2449 sect, 2450 s, conf->tmppage, WRITE) 2451 == 0) { 2452 /* Well, this device is dead */ 2453 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n", 2454 mdname(mddev), s, 2455 (unsigned long long)( 2456 sect + 2457 choose_data_offset(r10_bio, 2458 rdev)), 2459 bdevname(rdev->bdev, b)); 2460 pr_notice("md/raid10:%s: %s: failing drive\n", 2461 mdname(mddev), 2462 bdevname(rdev->bdev, b)); 2463 } 2464 rdev_dec_pending(rdev, mddev); 2465 rcu_read_lock(); 2466 } 2467 sl = start; 2468 while (sl != r10_bio->read_slot) { 2469 char b[BDEVNAME_SIZE]; 2470 2471 if (sl==0) 2472 sl = conf->copies; 2473 sl--; 2474 d = r10_bio->devs[sl].devnum; 2475 rdev = rcu_dereference(conf->mirrors[d].rdev); 2476 if (!rdev || 2477 test_bit(Faulty, &rdev->flags) || 2478 !test_bit(In_sync, &rdev->flags)) 2479 continue; 2480 2481 atomic_inc(&rdev->nr_pending); 2482 rcu_read_unlock(); 2483 switch (r10_sync_page_io(rdev, 2484 r10_bio->devs[sl].addr + 2485 sect, 2486 s, conf->tmppage, 2487 READ)) { 2488 case 0: 2489 /* Well, this device is dead */ 2490 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n", 2491 mdname(mddev), s, 2492 (unsigned long long)( 2493 sect + 2494 choose_data_offset(r10_bio, rdev)), 2495 bdevname(rdev->bdev, b)); 2496 pr_notice("md/raid10:%s: %s: failing drive\n", 2497 mdname(mddev), 2498 bdevname(rdev->bdev, b)); 2499 break; 2500 case 1: 2501 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n", 2502 mdname(mddev), s, 2503 (unsigned long long)( 2504 sect + 2505 choose_data_offset(r10_bio, rdev)), 2506 bdevname(rdev->bdev, b)); 2507 atomic_add(s, &rdev->corrected_errors); 2508 } 2509 2510 rdev_dec_pending(rdev, mddev); 2511 rcu_read_lock(); 2512 } 2513 rcu_read_unlock(); 2514 2515 sectors -= s; 2516 sect += s; 2517 } 2518 } 2519 2520 static int narrow_write_error(struct r10bio *r10_bio, int i) 2521 { 2522 struct bio *bio = r10_bio->master_bio; 2523 struct mddev *mddev = r10_bio->mddev; 2524 struct r10conf *conf = mddev->private; 2525 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; 2526 /* bio has the data to be written to slot 'i' where 2527 * we just recently had a write error. 2528 * We repeatedly clone the bio and trim down to one block, 2529 * then try the write. Where the write fails we record 2530 * a bad block. 2531 * It is conceivable that the bio doesn't exactly align with 2532 * blocks. We must handle this. 2533 * 2534 * We currently own a reference to the rdev. 2535 */ 2536 2537 int block_sectors; 2538 sector_t sector; 2539 int sectors; 2540 int sect_to_write = r10_bio->sectors; 2541 int ok = 1; 2542 2543 if (rdev->badblocks.shift < 0) 2544 return 0; 2545 2546 block_sectors = roundup(1 << rdev->badblocks.shift, 2547 bdev_logical_block_size(rdev->bdev) >> 9); 2548 sector = r10_bio->sector; 2549 sectors = ((r10_bio->sector + block_sectors) 2550 & ~(sector_t)(block_sectors - 1)) 2551 - sector; 2552 2553 while (sect_to_write) { 2554 struct bio *wbio; 2555 sector_t wsector; 2556 if (sectors > sect_to_write) 2557 sectors = sect_to_write; 2558 /* Write at 'sector' for 'sectors' */ 2559 wbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 2560 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); 2561 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); 2562 wbio->bi_iter.bi_sector = wsector + 2563 choose_data_offset(r10_bio, rdev); 2564 wbio->bi_bdev = rdev->bdev; 2565 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2566 2567 if (submit_bio_wait(wbio) < 0) 2568 /* Failure! */ 2569 ok = rdev_set_badblocks(rdev, wsector, 2570 sectors, 0) 2571 && ok; 2572 2573 bio_put(wbio); 2574 sect_to_write -= sectors; 2575 sector += sectors; 2576 sectors = block_sectors; 2577 } 2578 return ok; 2579 } 2580 2581 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) 2582 { 2583 int slot = r10_bio->read_slot; 2584 struct bio *bio; 2585 struct r10conf *conf = mddev->private; 2586 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2587 dev_t bio_dev; 2588 sector_t bio_last_sector; 2589 2590 /* we got a read error. Maybe the drive is bad. Maybe just 2591 * the block and we can fix it. 2592 * We freeze all other IO, and try reading the block from 2593 * other devices. When we find one, we re-write 2594 * and check it that fixes the read error. 2595 * This is all done synchronously while the array is 2596 * frozen. 2597 */ 2598 bio = r10_bio->devs[slot].bio; 2599 bio_dev = bio->bi_bdev->bd_dev; 2600 bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors; 2601 bio_put(bio); 2602 r10_bio->devs[slot].bio = NULL; 2603 2604 if (mddev->ro) 2605 r10_bio->devs[slot].bio = IO_BLOCKED; 2606 else if (!test_bit(FailFast, &rdev->flags)) { 2607 freeze_array(conf, 1); 2608 fix_read_error(conf, mddev, r10_bio); 2609 unfreeze_array(conf); 2610 } else 2611 md_error(mddev, rdev); 2612 2613 rdev_dec_pending(rdev, mddev); 2614 allow_barrier(conf); 2615 r10_bio->state = 0; 2616 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); 2617 } 2618 2619 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) 2620 { 2621 /* Some sort of write request has finished and it 2622 * succeeded in writing where we thought there was a 2623 * bad block. So forget the bad block. 2624 * Or possibly if failed and we need to record 2625 * a bad block. 2626 */ 2627 int m; 2628 struct md_rdev *rdev; 2629 2630 if (test_bit(R10BIO_IsSync, &r10_bio->state) || 2631 test_bit(R10BIO_IsRecover, &r10_bio->state)) { 2632 for (m = 0; m < conf->copies; m++) { 2633 int dev = r10_bio->devs[m].devnum; 2634 rdev = conf->mirrors[dev].rdev; 2635 if (r10_bio->devs[m].bio == NULL) 2636 continue; 2637 if (!r10_bio->devs[m].bio->bi_error) { 2638 rdev_clear_badblocks( 2639 rdev, 2640 r10_bio->devs[m].addr, 2641 r10_bio->sectors, 0); 2642 } else { 2643 if (!rdev_set_badblocks( 2644 rdev, 2645 r10_bio->devs[m].addr, 2646 r10_bio->sectors, 0)) 2647 md_error(conf->mddev, rdev); 2648 } 2649 rdev = conf->mirrors[dev].replacement; 2650 if (r10_bio->devs[m].repl_bio == NULL) 2651 continue; 2652 2653 if (!r10_bio->devs[m].repl_bio->bi_error) { 2654 rdev_clear_badblocks( 2655 rdev, 2656 r10_bio->devs[m].addr, 2657 r10_bio->sectors, 0); 2658 } else { 2659 if (!rdev_set_badblocks( 2660 rdev, 2661 r10_bio->devs[m].addr, 2662 r10_bio->sectors, 0)) 2663 md_error(conf->mddev, rdev); 2664 } 2665 } 2666 put_buf(r10_bio); 2667 } else { 2668 bool fail = false; 2669 for (m = 0; m < conf->copies; m++) { 2670 int dev = r10_bio->devs[m].devnum; 2671 struct bio *bio = r10_bio->devs[m].bio; 2672 rdev = conf->mirrors[dev].rdev; 2673 if (bio == IO_MADE_GOOD) { 2674 rdev_clear_badblocks( 2675 rdev, 2676 r10_bio->devs[m].addr, 2677 r10_bio->sectors, 0); 2678 rdev_dec_pending(rdev, conf->mddev); 2679 } else if (bio != NULL && bio->bi_error) { 2680 fail = true; 2681 if (!narrow_write_error(r10_bio, m)) { 2682 md_error(conf->mddev, rdev); 2683 set_bit(R10BIO_Degraded, 2684 &r10_bio->state); 2685 } 2686 rdev_dec_pending(rdev, conf->mddev); 2687 } 2688 bio = r10_bio->devs[m].repl_bio; 2689 rdev = conf->mirrors[dev].replacement; 2690 if (rdev && bio == IO_MADE_GOOD) { 2691 rdev_clear_badblocks( 2692 rdev, 2693 r10_bio->devs[m].addr, 2694 r10_bio->sectors, 0); 2695 rdev_dec_pending(rdev, conf->mddev); 2696 } 2697 } 2698 if (fail) { 2699 spin_lock_irq(&conf->device_lock); 2700 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); 2701 conf->nr_queued++; 2702 spin_unlock_irq(&conf->device_lock); 2703 /* 2704 * In case freeze_array() is waiting for condition 2705 * nr_pending == nr_queued + extra to be true. 2706 */ 2707 wake_up(&conf->wait_barrier); 2708 md_wakeup_thread(conf->mddev->thread); 2709 } else { 2710 if (test_bit(R10BIO_WriteError, 2711 &r10_bio->state)) 2712 close_write(r10_bio); 2713 raid_end_bio_io(r10_bio); 2714 } 2715 } 2716 } 2717 2718 static void raid10d(struct md_thread *thread) 2719 { 2720 struct mddev *mddev = thread->mddev; 2721 struct r10bio *r10_bio; 2722 unsigned long flags; 2723 struct r10conf *conf = mddev->private; 2724 struct list_head *head = &conf->retry_list; 2725 struct blk_plug plug; 2726 2727 md_check_recovery(mddev); 2728 2729 if (!list_empty_careful(&conf->bio_end_io_list) && 2730 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2731 LIST_HEAD(tmp); 2732 spin_lock_irqsave(&conf->device_lock, flags); 2733 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2734 while (!list_empty(&conf->bio_end_io_list)) { 2735 list_move(conf->bio_end_io_list.prev, &tmp); 2736 conf->nr_queued--; 2737 } 2738 } 2739 spin_unlock_irqrestore(&conf->device_lock, flags); 2740 while (!list_empty(&tmp)) { 2741 r10_bio = list_first_entry(&tmp, struct r10bio, 2742 retry_list); 2743 list_del(&r10_bio->retry_list); 2744 if (mddev->degraded) 2745 set_bit(R10BIO_Degraded, &r10_bio->state); 2746 2747 if (test_bit(R10BIO_WriteError, 2748 &r10_bio->state)) 2749 close_write(r10_bio); 2750 raid_end_bio_io(r10_bio); 2751 } 2752 } 2753 2754 blk_start_plug(&plug); 2755 for (;;) { 2756 2757 flush_pending_writes(conf); 2758 2759 spin_lock_irqsave(&conf->device_lock, flags); 2760 if (list_empty(head)) { 2761 spin_unlock_irqrestore(&conf->device_lock, flags); 2762 break; 2763 } 2764 r10_bio = list_entry(head->prev, struct r10bio, retry_list); 2765 list_del(head->prev); 2766 conf->nr_queued--; 2767 spin_unlock_irqrestore(&conf->device_lock, flags); 2768 2769 mddev = r10_bio->mddev; 2770 conf = mddev->private; 2771 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 2772 test_bit(R10BIO_WriteError, &r10_bio->state)) 2773 handle_write_completed(conf, r10_bio); 2774 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) 2775 reshape_request_write(mddev, r10_bio); 2776 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) 2777 sync_request_write(mddev, r10_bio); 2778 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 2779 recovery_request_write(mddev, r10_bio); 2780 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) 2781 handle_read_error(mddev, r10_bio); 2782 else 2783 WARN_ON_ONCE(1); 2784 2785 cond_resched(); 2786 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) 2787 md_check_recovery(mddev); 2788 } 2789 blk_finish_plug(&plug); 2790 } 2791 2792 static int init_resync(struct r10conf *conf) 2793 { 2794 int buffs; 2795 int i; 2796 2797 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2798 BUG_ON(conf->r10buf_pool); 2799 conf->have_replacement = 0; 2800 for (i = 0; i < conf->geo.raid_disks; i++) 2801 if (conf->mirrors[i].replacement) 2802 conf->have_replacement = 1; 2803 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); 2804 if (!conf->r10buf_pool) 2805 return -ENOMEM; 2806 conf->next_resync = 0; 2807 return 0; 2808 } 2809 2810 /* 2811 * perform a "sync" on one "block" 2812 * 2813 * We need to make sure that no normal I/O request - particularly write 2814 * requests - conflict with active sync requests. 2815 * 2816 * This is achieved by tracking pending requests and a 'barrier' concept 2817 * that can be installed to exclude normal IO requests. 2818 * 2819 * Resync and recovery are handled very differently. 2820 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery. 2821 * 2822 * For resync, we iterate over virtual addresses, read all copies, 2823 * and update if there are differences. If only one copy is live, 2824 * skip it. 2825 * For recovery, we iterate over physical addresses, read a good 2826 * value for each non-in_sync drive, and over-write. 2827 * 2828 * So, for recovery we may have several outstanding complex requests for a 2829 * given address, one for each out-of-sync device. We model this by allocating 2830 * a number of r10_bio structures, one for each out-of-sync device. 2831 * As we setup these structures, we collect all bio's together into a list 2832 * which we then process collectively to add pages, and then process again 2833 * to pass to generic_make_request. 2834 * 2835 * The r10_bio structures are linked using a borrowed master_bio pointer. 2836 * This link is counted in ->remaining. When the r10_bio that points to NULL 2837 * has its remaining count decremented to 0, the whole complex operation 2838 * is complete. 2839 * 2840 */ 2841 2842 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, 2843 int *skipped) 2844 { 2845 struct r10conf *conf = mddev->private; 2846 struct r10bio *r10_bio; 2847 struct bio *biolist = NULL, *bio; 2848 sector_t max_sector, nr_sectors; 2849 int i; 2850 int max_sync; 2851 sector_t sync_blocks; 2852 sector_t sectors_skipped = 0; 2853 int chunks_skipped = 0; 2854 sector_t chunk_mask = conf->geo.chunk_mask; 2855 2856 if (!conf->r10buf_pool) 2857 if (init_resync(conf)) 2858 return 0; 2859 2860 /* 2861 * Allow skipping a full rebuild for incremental assembly 2862 * of a clean array, like RAID1 does. 2863 */ 2864 if (mddev->bitmap == NULL && 2865 mddev->recovery_cp == MaxSector && 2866 mddev->reshape_position == MaxSector && 2867 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2868 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2869 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2870 conf->fullsync == 0) { 2871 *skipped = 1; 2872 return mddev->dev_sectors - sector_nr; 2873 } 2874 2875 skipped: 2876 max_sector = mddev->dev_sectors; 2877 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 2878 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2879 max_sector = mddev->resync_max_sectors; 2880 if (sector_nr >= max_sector) { 2881 /* If we aborted, we need to abort the 2882 * sync on the 'current' bitmap chucks (there can 2883 * be several when recovering multiple devices). 2884 * as we may have started syncing it but not finished. 2885 * We can find the current address in 2886 * mddev->curr_resync, but for recovery, 2887 * we need to convert that to several 2888 * virtual addresses. 2889 */ 2890 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2891 end_reshape(conf); 2892 close_sync(conf); 2893 return 0; 2894 } 2895 2896 if (mddev->curr_resync < max_sector) { /* aborted */ 2897 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2898 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2899 &sync_blocks, 1); 2900 else for (i = 0; i < conf->geo.raid_disks; i++) { 2901 sector_t sect = 2902 raid10_find_virt(conf, mddev->curr_resync, i); 2903 bitmap_end_sync(mddev->bitmap, sect, 2904 &sync_blocks, 1); 2905 } 2906 } else { 2907 /* completed sync */ 2908 if ((!mddev->bitmap || conf->fullsync) 2909 && conf->have_replacement 2910 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2911 /* Completed a full sync so the replacements 2912 * are now fully recovered. 2913 */ 2914 rcu_read_lock(); 2915 for (i = 0; i < conf->geo.raid_disks; i++) { 2916 struct md_rdev *rdev = 2917 rcu_dereference(conf->mirrors[i].replacement); 2918 if (rdev) 2919 rdev->recovery_offset = MaxSector; 2920 } 2921 rcu_read_unlock(); 2922 } 2923 conf->fullsync = 0; 2924 } 2925 bitmap_close_sync(mddev->bitmap); 2926 close_sync(conf); 2927 *skipped = 1; 2928 return sectors_skipped; 2929 } 2930 2931 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2932 return reshape_request(mddev, sector_nr, skipped); 2933 2934 if (chunks_skipped >= conf->geo.raid_disks) { 2935 /* if there has been nothing to do on any drive, 2936 * then there is nothing to do at all.. 2937 */ 2938 *skipped = 1; 2939 return (max_sector - sector_nr) + sectors_skipped; 2940 } 2941 2942 if (max_sector > mddev->resync_max) 2943 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2944 2945 /* make sure whole request will fit in a chunk - if chunks 2946 * are meaningful 2947 */ 2948 if (conf->geo.near_copies < conf->geo.raid_disks && 2949 max_sector > (sector_nr | chunk_mask)) 2950 max_sector = (sector_nr | chunk_mask) + 1; 2951 2952 /* 2953 * If there is non-resync activity waiting for a turn, then let it 2954 * though before starting on this new sync request. 2955 */ 2956 if (conf->nr_waiting) 2957 schedule_timeout_uninterruptible(1); 2958 2959 /* Again, very different code for resync and recovery. 2960 * Both must result in an r10bio with a list of bios that 2961 * have bi_end_io, bi_sector, bi_bdev set, 2962 * and bi_private set to the r10bio. 2963 * For recovery, we may actually create several r10bios 2964 * with 2 bios in each, that correspond to the bios in the main one. 2965 * In this case, the subordinate r10bios link back through a 2966 * borrowed master_bio pointer, and the counter in the master 2967 * includes a ref from each subordinate. 2968 */ 2969 /* First, we decide what to do and set ->bi_end_io 2970 * To end_sync_read if we want to read, and 2971 * end_sync_write if we will want to write. 2972 */ 2973 2974 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); 2975 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2976 /* recovery... the complicated one */ 2977 int j; 2978 r10_bio = NULL; 2979 2980 for (i = 0 ; i < conf->geo.raid_disks; i++) { 2981 int still_degraded; 2982 struct r10bio *rb2; 2983 sector_t sect; 2984 int must_sync; 2985 int any_working; 2986 struct raid10_info *mirror = &conf->mirrors[i]; 2987 struct md_rdev *mrdev, *mreplace; 2988 2989 rcu_read_lock(); 2990 mrdev = rcu_dereference(mirror->rdev); 2991 mreplace = rcu_dereference(mirror->replacement); 2992 2993 if ((mrdev == NULL || 2994 test_bit(Faulty, &mrdev->flags) || 2995 test_bit(In_sync, &mrdev->flags)) && 2996 (mreplace == NULL || 2997 test_bit(Faulty, &mreplace->flags))) { 2998 rcu_read_unlock(); 2999 continue; 3000 } 3001 3002 still_degraded = 0; 3003 /* want to reconstruct this device */ 3004 rb2 = r10_bio; 3005 sect = raid10_find_virt(conf, sector_nr, i); 3006 if (sect >= mddev->resync_max_sectors) { 3007 /* last stripe is not complete - don't 3008 * try to recover this sector. 3009 */ 3010 rcu_read_unlock(); 3011 continue; 3012 } 3013 if (mreplace && test_bit(Faulty, &mreplace->flags)) 3014 mreplace = NULL; 3015 /* Unless we are doing a full sync, or a replacement 3016 * we only need to recover the block if it is set in 3017 * the bitmap 3018 */ 3019 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3020 &sync_blocks, 1); 3021 if (sync_blocks < max_sync) 3022 max_sync = sync_blocks; 3023 if (!must_sync && 3024 mreplace == NULL && 3025 !conf->fullsync) { 3026 /* yep, skip the sync_blocks here, but don't assume 3027 * that there will never be anything to do here 3028 */ 3029 chunks_skipped = -1; 3030 rcu_read_unlock(); 3031 continue; 3032 } 3033 atomic_inc(&mrdev->nr_pending); 3034 if (mreplace) 3035 atomic_inc(&mreplace->nr_pending); 3036 rcu_read_unlock(); 3037 3038 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3039 r10_bio->state = 0; 3040 raise_barrier(conf, rb2 != NULL); 3041 atomic_set(&r10_bio->remaining, 0); 3042 3043 r10_bio->master_bio = (struct bio*)rb2; 3044 if (rb2) 3045 atomic_inc(&rb2->remaining); 3046 r10_bio->mddev = mddev; 3047 set_bit(R10BIO_IsRecover, &r10_bio->state); 3048 r10_bio->sector = sect; 3049 3050 raid10_find_phys(conf, r10_bio); 3051 3052 /* Need to check if the array will still be 3053 * degraded 3054 */ 3055 rcu_read_lock(); 3056 for (j = 0; j < conf->geo.raid_disks; j++) { 3057 struct md_rdev *rdev = rcu_dereference( 3058 conf->mirrors[j].rdev); 3059 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3060 still_degraded = 1; 3061 break; 3062 } 3063 } 3064 3065 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3066 &sync_blocks, still_degraded); 3067 3068 any_working = 0; 3069 for (j=0; j<conf->copies;j++) { 3070 int k; 3071 int d = r10_bio->devs[j].devnum; 3072 sector_t from_addr, to_addr; 3073 struct md_rdev *rdev = 3074 rcu_dereference(conf->mirrors[d].rdev); 3075 sector_t sector, first_bad; 3076 int bad_sectors; 3077 if (!rdev || 3078 !test_bit(In_sync, &rdev->flags)) 3079 continue; 3080 /* This is where we read from */ 3081 any_working = 1; 3082 sector = r10_bio->devs[j].addr; 3083 3084 if (is_badblock(rdev, sector, max_sync, 3085 &first_bad, &bad_sectors)) { 3086 if (first_bad > sector) 3087 max_sync = first_bad - sector; 3088 else { 3089 bad_sectors -= (sector 3090 - first_bad); 3091 if (max_sync > bad_sectors) 3092 max_sync = bad_sectors; 3093 continue; 3094 } 3095 } 3096 bio = r10_bio->devs[0].bio; 3097 bio->bi_next = biolist; 3098 biolist = bio; 3099 bio->bi_end_io = end_sync_read; 3100 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3101 if (test_bit(FailFast, &rdev->flags)) 3102 bio->bi_opf |= MD_FAILFAST; 3103 from_addr = r10_bio->devs[j].addr; 3104 bio->bi_iter.bi_sector = from_addr + 3105 rdev->data_offset; 3106 bio->bi_bdev = rdev->bdev; 3107 atomic_inc(&rdev->nr_pending); 3108 /* and we write to 'i' (if not in_sync) */ 3109 3110 for (k=0; k<conf->copies; k++) 3111 if (r10_bio->devs[k].devnum == i) 3112 break; 3113 BUG_ON(k == conf->copies); 3114 to_addr = r10_bio->devs[k].addr; 3115 r10_bio->devs[0].devnum = d; 3116 r10_bio->devs[0].addr = from_addr; 3117 r10_bio->devs[1].devnum = i; 3118 r10_bio->devs[1].addr = to_addr; 3119 3120 if (!test_bit(In_sync, &mrdev->flags)) { 3121 bio = r10_bio->devs[1].bio; 3122 bio->bi_next = biolist; 3123 biolist = bio; 3124 bio->bi_end_io = end_sync_write; 3125 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3126 bio->bi_iter.bi_sector = to_addr 3127 + mrdev->data_offset; 3128 bio->bi_bdev = mrdev->bdev; 3129 atomic_inc(&r10_bio->remaining); 3130 } else 3131 r10_bio->devs[1].bio->bi_end_io = NULL; 3132 3133 /* and maybe write to replacement */ 3134 bio = r10_bio->devs[1].repl_bio; 3135 if (bio) 3136 bio->bi_end_io = NULL; 3137 /* Note: if mreplace != NULL, then bio 3138 * cannot be NULL as r10buf_pool_alloc will 3139 * have allocated it. 3140 * So the second test here is pointless. 3141 * But it keeps semantic-checkers happy, and 3142 * this comment keeps human reviewers 3143 * happy. 3144 */ 3145 if (mreplace == NULL || bio == NULL || 3146 test_bit(Faulty, &mreplace->flags)) 3147 break; 3148 bio->bi_next = biolist; 3149 biolist = bio; 3150 bio->bi_end_io = end_sync_write; 3151 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3152 bio->bi_iter.bi_sector = to_addr + 3153 mreplace->data_offset; 3154 bio->bi_bdev = mreplace->bdev; 3155 atomic_inc(&r10_bio->remaining); 3156 break; 3157 } 3158 rcu_read_unlock(); 3159 if (j == conf->copies) { 3160 /* Cannot recover, so abort the recovery or 3161 * record a bad block */ 3162 if (any_working) { 3163 /* problem is that there are bad blocks 3164 * on other device(s) 3165 */ 3166 int k; 3167 for (k = 0; k < conf->copies; k++) 3168 if (r10_bio->devs[k].devnum == i) 3169 break; 3170 if (!test_bit(In_sync, 3171 &mrdev->flags) 3172 && !rdev_set_badblocks( 3173 mrdev, 3174 r10_bio->devs[k].addr, 3175 max_sync, 0)) 3176 any_working = 0; 3177 if (mreplace && 3178 !rdev_set_badblocks( 3179 mreplace, 3180 r10_bio->devs[k].addr, 3181 max_sync, 0)) 3182 any_working = 0; 3183 } 3184 if (!any_working) { 3185 if (!test_and_set_bit(MD_RECOVERY_INTR, 3186 &mddev->recovery)) 3187 pr_warn("md/raid10:%s: insufficient working devices for recovery.\n", 3188 mdname(mddev)); 3189 mirror->recovery_disabled 3190 = mddev->recovery_disabled; 3191 } 3192 put_buf(r10_bio); 3193 if (rb2) 3194 atomic_dec(&rb2->remaining); 3195 r10_bio = rb2; 3196 rdev_dec_pending(mrdev, mddev); 3197 if (mreplace) 3198 rdev_dec_pending(mreplace, mddev); 3199 break; 3200 } 3201 rdev_dec_pending(mrdev, mddev); 3202 if (mreplace) 3203 rdev_dec_pending(mreplace, mddev); 3204 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { 3205 /* Only want this if there is elsewhere to 3206 * read from. 'j' is currently the first 3207 * readable copy. 3208 */ 3209 int targets = 1; 3210 for (; j < conf->copies; j++) { 3211 int d = r10_bio->devs[j].devnum; 3212 if (conf->mirrors[d].rdev && 3213 test_bit(In_sync, 3214 &conf->mirrors[d].rdev->flags)) 3215 targets++; 3216 } 3217 if (targets == 1) 3218 r10_bio->devs[0].bio->bi_opf 3219 &= ~MD_FAILFAST; 3220 } 3221 } 3222 if (biolist == NULL) { 3223 while (r10_bio) { 3224 struct r10bio *rb2 = r10_bio; 3225 r10_bio = (struct r10bio*) rb2->master_bio; 3226 rb2->master_bio = NULL; 3227 put_buf(rb2); 3228 } 3229 goto giveup; 3230 } 3231 } else { 3232 /* resync. Schedule a read for every block at this virt offset */ 3233 int count = 0; 3234 3235 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); 3236 3237 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 3238 &sync_blocks, mddev->degraded) && 3239 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, 3240 &mddev->recovery)) { 3241 /* We can skip this block */ 3242 *skipped = 1; 3243 return sync_blocks + sectors_skipped; 3244 } 3245 if (sync_blocks < max_sync) 3246 max_sync = sync_blocks; 3247 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3248 r10_bio->state = 0; 3249 3250 r10_bio->mddev = mddev; 3251 atomic_set(&r10_bio->remaining, 0); 3252 raise_barrier(conf, 0); 3253 conf->next_resync = sector_nr; 3254 3255 r10_bio->master_bio = NULL; 3256 r10_bio->sector = sector_nr; 3257 set_bit(R10BIO_IsSync, &r10_bio->state); 3258 raid10_find_phys(conf, r10_bio); 3259 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; 3260 3261 for (i = 0; i < conf->copies; i++) { 3262 int d = r10_bio->devs[i].devnum; 3263 sector_t first_bad, sector; 3264 int bad_sectors; 3265 struct md_rdev *rdev; 3266 3267 if (r10_bio->devs[i].repl_bio) 3268 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3269 3270 bio = r10_bio->devs[i].bio; 3271 bio->bi_error = -EIO; 3272 rcu_read_lock(); 3273 rdev = rcu_dereference(conf->mirrors[d].rdev); 3274 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3275 rcu_read_unlock(); 3276 continue; 3277 } 3278 sector = r10_bio->devs[i].addr; 3279 if (is_badblock(rdev, sector, max_sync, 3280 &first_bad, &bad_sectors)) { 3281 if (first_bad > sector) 3282 max_sync = first_bad - sector; 3283 else { 3284 bad_sectors -= (sector - first_bad); 3285 if (max_sync > bad_sectors) 3286 max_sync = bad_sectors; 3287 rcu_read_unlock(); 3288 continue; 3289 } 3290 } 3291 atomic_inc(&rdev->nr_pending); 3292 atomic_inc(&r10_bio->remaining); 3293 bio->bi_next = biolist; 3294 biolist = bio; 3295 bio->bi_end_io = end_sync_read; 3296 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3297 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 3298 bio->bi_opf |= MD_FAILFAST; 3299 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3300 bio->bi_bdev = rdev->bdev; 3301 count++; 3302 3303 rdev = rcu_dereference(conf->mirrors[d].replacement); 3304 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3305 rcu_read_unlock(); 3306 continue; 3307 } 3308 atomic_inc(&rdev->nr_pending); 3309 rcu_read_unlock(); 3310 3311 /* Need to set up for writing to the replacement */ 3312 bio = r10_bio->devs[i].repl_bio; 3313 bio->bi_error = -EIO; 3314 3315 sector = r10_bio->devs[i].addr; 3316 bio->bi_next = biolist; 3317 biolist = bio; 3318 bio->bi_end_io = end_sync_write; 3319 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3320 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 3321 bio->bi_opf |= MD_FAILFAST; 3322 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3323 bio->bi_bdev = rdev->bdev; 3324 count++; 3325 } 3326 3327 if (count < 2) { 3328 for (i=0; i<conf->copies; i++) { 3329 int d = r10_bio->devs[i].devnum; 3330 if (r10_bio->devs[i].bio->bi_end_io) 3331 rdev_dec_pending(conf->mirrors[d].rdev, 3332 mddev); 3333 if (r10_bio->devs[i].repl_bio && 3334 r10_bio->devs[i].repl_bio->bi_end_io) 3335 rdev_dec_pending( 3336 conf->mirrors[d].replacement, 3337 mddev); 3338 } 3339 put_buf(r10_bio); 3340 biolist = NULL; 3341 goto giveup; 3342 } 3343 } 3344 3345 nr_sectors = 0; 3346 if (sector_nr + max_sync < max_sector) 3347 max_sector = sector_nr + max_sync; 3348 do { 3349 struct page *page; 3350 int len = PAGE_SIZE; 3351 if (sector_nr + (len>>9) > max_sector) 3352 len = (max_sector - sector_nr) << 9; 3353 if (len == 0) 3354 break; 3355 for (bio= biolist ; bio ; bio=bio->bi_next) { 3356 struct resync_pages *rp = get_resync_pages(bio); 3357 page = resync_fetch_page(rp, rp->idx++); 3358 /* 3359 * won't fail because the vec table is big enough 3360 * to hold all these pages 3361 */ 3362 bio_add_page(bio, page, len, 0); 3363 } 3364 nr_sectors += len>>9; 3365 sector_nr += len>>9; 3366 } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); 3367 r10_bio->sectors = nr_sectors; 3368 3369 while (biolist) { 3370 bio = biolist; 3371 biolist = biolist->bi_next; 3372 3373 bio->bi_next = NULL; 3374 r10_bio = get_resync_r10bio(bio); 3375 r10_bio->sectors = nr_sectors; 3376 3377 if (bio->bi_end_io == end_sync_read) { 3378 md_sync_acct(bio->bi_bdev, nr_sectors); 3379 bio->bi_error = 0; 3380 generic_make_request(bio); 3381 } 3382 } 3383 3384 if (sectors_skipped) 3385 /* pretend they weren't skipped, it makes 3386 * no important difference in this case 3387 */ 3388 md_done_sync(mddev, sectors_skipped, 1); 3389 3390 return sectors_skipped + nr_sectors; 3391 giveup: 3392 /* There is nowhere to write, so all non-sync 3393 * drives must be failed or in resync, all drives 3394 * have a bad block, so try the next chunk... 3395 */ 3396 if (sector_nr + max_sync < max_sector) 3397 max_sector = sector_nr + max_sync; 3398 3399 sectors_skipped += (max_sector - sector_nr); 3400 chunks_skipped ++; 3401 sector_nr = max_sector; 3402 goto skipped; 3403 } 3404 3405 static sector_t 3406 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) 3407 { 3408 sector_t size; 3409 struct r10conf *conf = mddev->private; 3410 3411 if (!raid_disks) 3412 raid_disks = min(conf->geo.raid_disks, 3413 conf->prev.raid_disks); 3414 if (!sectors) 3415 sectors = conf->dev_sectors; 3416 3417 size = sectors >> conf->geo.chunk_shift; 3418 sector_div(size, conf->geo.far_copies); 3419 size = size * raid_disks; 3420 sector_div(size, conf->geo.near_copies); 3421 3422 return size << conf->geo.chunk_shift; 3423 } 3424 3425 static void calc_sectors(struct r10conf *conf, sector_t size) 3426 { 3427 /* Calculate the number of sectors-per-device that will 3428 * actually be used, and set conf->dev_sectors and 3429 * conf->stride 3430 */ 3431 3432 size = size >> conf->geo.chunk_shift; 3433 sector_div(size, conf->geo.far_copies); 3434 size = size * conf->geo.raid_disks; 3435 sector_div(size, conf->geo.near_copies); 3436 /* 'size' is now the number of chunks in the array */ 3437 /* calculate "used chunks per device" */ 3438 size = size * conf->copies; 3439 3440 /* We need to round up when dividing by raid_disks to 3441 * get the stride size. 3442 */ 3443 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); 3444 3445 conf->dev_sectors = size << conf->geo.chunk_shift; 3446 3447 if (conf->geo.far_offset) 3448 conf->geo.stride = 1 << conf->geo.chunk_shift; 3449 else { 3450 sector_div(size, conf->geo.far_copies); 3451 conf->geo.stride = size << conf->geo.chunk_shift; 3452 } 3453 } 3454 3455 enum geo_type {geo_new, geo_old, geo_start}; 3456 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) 3457 { 3458 int nc, fc, fo; 3459 int layout, chunk, disks; 3460 switch (new) { 3461 case geo_old: 3462 layout = mddev->layout; 3463 chunk = mddev->chunk_sectors; 3464 disks = mddev->raid_disks - mddev->delta_disks; 3465 break; 3466 case geo_new: 3467 layout = mddev->new_layout; 3468 chunk = mddev->new_chunk_sectors; 3469 disks = mddev->raid_disks; 3470 break; 3471 default: /* avoid 'may be unused' warnings */ 3472 case geo_start: /* new when starting reshape - raid_disks not 3473 * updated yet. */ 3474 layout = mddev->new_layout; 3475 chunk = mddev->new_chunk_sectors; 3476 disks = mddev->raid_disks + mddev->delta_disks; 3477 break; 3478 } 3479 if (layout >> 19) 3480 return -1; 3481 if (chunk < (PAGE_SIZE >> 9) || 3482 !is_power_of_2(chunk)) 3483 return -2; 3484 nc = layout & 255; 3485 fc = (layout >> 8) & 255; 3486 fo = layout & (1<<16); 3487 geo->raid_disks = disks; 3488 geo->near_copies = nc; 3489 geo->far_copies = fc; 3490 geo->far_offset = fo; 3491 switch (layout >> 17) { 3492 case 0: /* original layout. simple but not always optimal */ 3493 geo->far_set_size = disks; 3494 break; 3495 case 1: /* "improved" layout which was buggy. Hopefully no-one is 3496 * actually using this, but leave code here just in case.*/ 3497 geo->far_set_size = disks/fc; 3498 WARN(geo->far_set_size < fc, 3499 "This RAID10 layout does not provide data safety - please backup and create new array\n"); 3500 break; 3501 case 2: /* "improved" layout fixed to match documentation */ 3502 geo->far_set_size = fc * nc; 3503 break; 3504 default: /* Not a valid layout */ 3505 return -1; 3506 } 3507 geo->chunk_mask = chunk - 1; 3508 geo->chunk_shift = ffz(~chunk); 3509 return nc*fc; 3510 } 3511 3512 static struct r10conf *setup_conf(struct mddev *mddev) 3513 { 3514 struct r10conf *conf = NULL; 3515 int err = -EINVAL; 3516 struct geom geo; 3517 int copies; 3518 3519 copies = setup_geo(&geo, mddev, geo_new); 3520 3521 if (copies == -2) { 3522 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n", 3523 mdname(mddev), PAGE_SIZE); 3524 goto out; 3525 } 3526 3527 if (copies < 2 || copies > mddev->raid_disks) { 3528 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n", 3529 mdname(mddev), mddev->new_layout); 3530 goto out; 3531 } 3532 3533 err = -ENOMEM; 3534 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); 3535 if (!conf) 3536 goto out; 3537 3538 /* FIXME calc properly */ 3539 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + 3540 max(0,-mddev->delta_disks)), 3541 GFP_KERNEL); 3542 if (!conf->mirrors) 3543 goto out; 3544 3545 conf->tmppage = alloc_page(GFP_KERNEL); 3546 if (!conf->tmppage) 3547 goto out; 3548 3549 conf->geo = geo; 3550 conf->copies = copies; 3551 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, 3552 r10bio_pool_free, conf); 3553 if (!conf->r10bio_pool) 3554 goto out; 3555 3556 conf->bio_split = bioset_create(BIO_POOL_SIZE, 0); 3557 if (!conf->bio_split) 3558 goto out; 3559 3560 calc_sectors(conf, mddev->dev_sectors); 3561 if (mddev->reshape_position == MaxSector) { 3562 conf->prev = conf->geo; 3563 conf->reshape_progress = MaxSector; 3564 } else { 3565 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { 3566 err = -EINVAL; 3567 goto out; 3568 } 3569 conf->reshape_progress = mddev->reshape_position; 3570 if (conf->prev.far_offset) 3571 conf->prev.stride = 1 << conf->prev.chunk_shift; 3572 else 3573 /* far_copies must be 1 */ 3574 conf->prev.stride = conf->dev_sectors; 3575 } 3576 conf->reshape_safe = conf->reshape_progress; 3577 spin_lock_init(&conf->device_lock); 3578 INIT_LIST_HEAD(&conf->retry_list); 3579 INIT_LIST_HEAD(&conf->bio_end_io_list); 3580 3581 spin_lock_init(&conf->resync_lock); 3582 init_waitqueue_head(&conf->wait_barrier); 3583 atomic_set(&conf->nr_pending, 0); 3584 3585 conf->thread = md_register_thread(raid10d, mddev, "raid10"); 3586 if (!conf->thread) 3587 goto out; 3588 3589 conf->mddev = mddev; 3590 return conf; 3591 3592 out: 3593 if (conf) { 3594 mempool_destroy(conf->r10bio_pool); 3595 kfree(conf->mirrors); 3596 safe_put_page(conf->tmppage); 3597 if (conf->bio_split) 3598 bioset_free(conf->bio_split); 3599 kfree(conf); 3600 } 3601 return ERR_PTR(err); 3602 } 3603 3604 static int raid10_run(struct mddev *mddev) 3605 { 3606 struct r10conf *conf; 3607 int i, disk_idx, chunk_size; 3608 struct raid10_info *disk; 3609 struct md_rdev *rdev; 3610 sector_t size; 3611 sector_t min_offset_diff = 0; 3612 int first = 1; 3613 bool discard_supported = false; 3614 3615 if (mddev->private == NULL) { 3616 conf = setup_conf(mddev); 3617 if (IS_ERR(conf)) 3618 return PTR_ERR(conf); 3619 mddev->private = conf; 3620 } 3621 conf = mddev->private; 3622 if (!conf) 3623 goto out; 3624 3625 mddev->thread = conf->thread; 3626 conf->thread = NULL; 3627 3628 chunk_size = mddev->chunk_sectors << 9; 3629 if (mddev->queue) { 3630 blk_queue_max_discard_sectors(mddev->queue, 3631 mddev->chunk_sectors); 3632 blk_queue_max_write_same_sectors(mddev->queue, 0); 3633 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 3634 blk_queue_io_min(mddev->queue, chunk_size); 3635 if (conf->geo.raid_disks % conf->geo.near_copies) 3636 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3637 else 3638 blk_queue_io_opt(mddev->queue, chunk_size * 3639 (conf->geo.raid_disks / conf->geo.near_copies)); 3640 } 3641 3642 rdev_for_each(rdev, mddev) { 3643 long long diff; 3644 3645 disk_idx = rdev->raid_disk; 3646 if (disk_idx < 0) 3647 continue; 3648 if (disk_idx >= conf->geo.raid_disks && 3649 disk_idx >= conf->prev.raid_disks) 3650 continue; 3651 disk = conf->mirrors + disk_idx; 3652 3653 if (test_bit(Replacement, &rdev->flags)) { 3654 if (disk->replacement) 3655 goto out_free_conf; 3656 disk->replacement = rdev; 3657 } else { 3658 if (disk->rdev) 3659 goto out_free_conf; 3660 disk->rdev = rdev; 3661 } 3662 diff = (rdev->new_data_offset - rdev->data_offset); 3663 if (!mddev->reshape_backwards) 3664 diff = -diff; 3665 if (diff < 0) 3666 diff = 0; 3667 if (first || diff < min_offset_diff) 3668 min_offset_diff = diff; 3669 3670 if (mddev->gendisk) 3671 disk_stack_limits(mddev->gendisk, rdev->bdev, 3672 rdev->data_offset << 9); 3673 3674 disk->head_position = 0; 3675 3676 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 3677 discard_supported = true; 3678 first = 0; 3679 } 3680 3681 if (mddev->queue) { 3682 if (discard_supported) 3683 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 3684 mddev->queue); 3685 else 3686 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 3687 mddev->queue); 3688 } 3689 /* need to check that every block has at least one working mirror */ 3690 if (!enough(conf, -1)) { 3691 pr_err("md/raid10:%s: not enough operational mirrors.\n", 3692 mdname(mddev)); 3693 goto out_free_conf; 3694 } 3695 3696 if (conf->reshape_progress != MaxSector) { 3697 /* must ensure that shape change is supported */ 3698 if (conf->geo.far_copies != 1 && 3699 conf->geo.far_offset == 0) 3700 goto out_free_conf; 3701 if (conf->prev.far_copies != 1 && 3702 conf->prev.far_offset == 0) 3703 goto out_free_conf; 3704 } 3705 3706 mddev->degraded = 0; 3707 for (i = 0; 3708 i < conf->geo.raid_disks 3709 || i < conf->prev.raid_disks; 3710 i++) { 3711 3712 disk = conf->mirrors + i; 3713 3714 if (!disk->rdev && disk->replacement) { 3715 /* The replacement is all we have - use it */ 3716 disk->rdev = disk->replacement; 3717 disk->replacement = NULL; 3718 clear_bit(Replacement, &disk->rdev->flags); 3719 } 3720 3721 if (!disk->rdev || 3722 !test_bit(In_sync, &disk->rdev->flags)) { 3723 disk->head_position = 0; 3724 mddev->degraded++; 3725 if (disk->rdev && 3726 disk->rdev->saved_raid_disk < 0) 3727 conf->fullsync = 1; 3728 } 3729 disk->recovery_disabled = mddev->recovery_disabled - 1; 3730 } 3731 3732 if (mddev->recovery_cp != MaxSector) 3733 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n", 3734 mdname(mddev)); 3735 pr_info("md/raid10:%s: active with %d out of %d devices\n", 3736 mdname(mddev), conf->geo.raid_disks - mddev->degraded, 3737 conf->geo.raid_disks); 3738 /* 3739 * Ok, everything is just fine now 3740 */ 3741 mddev->dev_sectors = conf->dev_sectors; 3742 size = raid10_size(mddev, 0, 0); 3743 md_set_array_sectors(mddev, size); 3744 mddev->resync_max_sectors = size; 3745 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 3746 3747 if (mddev->queue) { 3748 int stripe = conf->geo.raid_disks * 3749 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 3750 3751 /* Calculate max read-ahead size. 3752 * We need to readahead at least twice a whole stripe.... 3753 * maybe... 3754 */ 3755 stripe /= conf->geo.near_copies; 3756 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 3757 mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 3758 } 3759 3760 if (md_integrity_register(mddev)) 3761 goto out_free_conf; 3762 3763 if (conf->reshape_progress != MaxSector) { 3764 unsigned long before_length, after_length; 3765 3766 before_length = ((1 << conf->prev.chunk_shift) * 3767 conf->prev.far_copies); 3768 after_length = ((1 << conf->geo.chunk_shift) * 3769 conf->geo.far_copies); 3770 3771 if (max(before_length, after_length) > min_offset_diff) { 3772 /* This cannot work */ 3773 pr_warn("md/raid10: offset difference not enough to continue reshape\n"); 3774 goto out_free_conf; 3775 } 3776 conf->offset_diff = min_offset_diff; 3777 3778 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3779 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3780 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3781 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3782 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3783 "reshape"); 3784 } 3785 3786 return 0; 3787 3788 out_free_conf: 3789 md_unregister_thread(&mddev->thread); 3790 mempool_destroy(conf->r10bio_pool); 3791 safe_put_page(conf->tmppage); 3792 kfree(conf->mirrors); 3793 kfree(conf); 3794 mddev->private = NULL; 3795 out: 3796 return -EIO; 3797 } 3798 3799 static void raid10_free(struct mddev *mddev, void *priv) 3800 { 3801 struct r10conf *conf = priv; 3802 3803 mempool_destroy(conf->r10bio_pool); 3804 safe_put_page(conf->tmppage); 3805 kfree(conf->mirrors); 3806 kfree(conf->mirrors_old); 3807 kfree(conf->mirrors_new); 3808 if (conf->bio_split) 3809 bioset_free(conf->bio_split); 3810 kfree(conf); 3811 } 3812 3813 static void raid10_quiesce(struct mddev *mddev, int state) 3814 { 3815 struct r10conf *conf = mddev->private; 3816 3817 switch(state) { 3818 case 1: 3819 raise_barrier(conf, 0); 3820 break; 3821 case 0: 3822 lower_barrier(conf); 3823 break; 3824 } 3825 } 3826 3827 static int raid10_resize(struct mddev *mddev, sector_t sectors) 3828 { 3829 /* Resize of 'far' arrays is not supported. 3830 * For 'near' and 'offset' arrays we can set the 3831 * number of sectors used to be an appropriate multiple 3832 * of the chunk size. 3833 * For 'offset', this is far_copies*chunksize. 3834 * For 'near' the multiplier is the LCM of 3835 * near_copies and raid_disks. 3836 * So if far_copies > 1 && !far_offset, fail. 3837 * Else find LCM(raid_disks, near_copy)*far_copies and 3838 * multiply by chunk_size. Then round to this number. 3839 * This is mostly done by raid10_size() 3840 */ 3841 struct r10conf *conf = mddev->private; 3842 sector_t oldsize, size; 3843 3844 if (mddev->reshape_position != MaxSector) 3845 return -EBUSY; 3846 3847 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) 3848 return -EINVAL; 3849 3850 oldsize = raid10_size(mddev, 0, 0); 3851 size = raid10_size(mddev, sectors, 0); 3852 if (mddev->external_size && 3853 mddev->array_sectors > size) 3854 return -EINVAL; 3855 if (mddev->bitmap) { 3856 int ret = bitmap_resize(mddev->bitmap, size, 0, 0); 3857 if (ret) 3858 return ret; 3859 } 3860 md_set_array_sectors(mddev, size); 3861 if (sectors > mddev->dev_sectors && 3862 mddev->recovery_cp > oldsize) { 3863 mddev->recovery_cp = oldsize; 3864 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3865 } 3866 calc_sectors(conf, sectors); 3867 mddev->dev_sectors = conf->dev_sectors; 3868 mddev->resync_max_sectors = size; 3869 return 0; 3870 } 3871 3872 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) 3873 { 3874 struct md_rdev *rdev; 3875 struct r10conf *conf; 3876 3877 if (mddev->degraded > 0) { 3878 pr_warn("md/raid10:%s: Error: degraded raid0!\n", 3879 mdname(mddev)); 3880 return ERR_PTR(-EINVAL); 3881 } 3882 sector_div(size, devs); 3883 3884 /* Set new parameters */ 3885 mddev->new_level = 10; 3886 /* new layout: far_copies = 1, near_copies = 2 */ 3887 mddev->new_layout = (1<<8) + 2; 3888 mddev->new_chunk_sectors = mddev->chunk_sectors; 3889 mddev->delta_disks = mddev->raid_disks; 3890 mddev->raid_disks *= 2; 3891 /* make sure it will be not marked as dirty */ 3892 mddev->recovery_cp = MaxSector; 3893 mddev->dev_sectors = size; 3894 3895 conf = setup_conf(mddev); 3896 if (!IS_ERR(conf)) { 3897 rdev_for_each(rdev, mddev) 3898 if (rdev->raid_disk >= 0) { 3899 rdev->new_raid_disk = rdev->raid_disk * 2; 3900 rdev->sectors = size; 3901 } 3902 conf->barrier = 1; 3903 } 3904 3905 return conf; 3906 } 3907 3908 static void *raid10_takeover(struct mddev *mddev) 3909 { 3910 struct r0conf *raid0_conf; 3911 3912 /* raid10 can take over: 3913 * raid0 - providing it has only two drives 3914 */ 3915 if (mddev->level == 0) { 3916 /* for raid0 takeover only one zone is supported */ 3917 raid0_conf = mddev->private; 3918 if (raid0_conf->nr_strip_zones > 1) { 3919 pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n", 3920 mdname(mddev)); 3921 return ERR_PTR(-EINVAL); 3922 } 3923 return raid10_takeover_raid0(mddev, 3924 raid0_conf->strip_zone->zone_end, 3925 raid0_conf->strip_zone->nb_dev); 3926 } 3927 return ERR_PTR(-EINVAL); 3928 } 3929 3930 static int raid10_check_reshape(struct mddev *mddev) 3931 { 3932 /* Called when there is a request to change 3933 * - layout (to ->new_layout) 3934 * - chunk size (to ->new_chunk_sectors) 3935 * - raid_disks (by delta_disks) 3936 * or when trying to restart a reshape that was ongoing. 3937 * 3938 * We need to validate the request and possibly allocate 3939 * space if that might be an issue later. 3940 * 3941 * Currently we reject any reshape of a 'far' mode array, 3942 * allow chunk size to change if new is generally acceptable, 3943 * allow raid_disks to increase, and allow 3944 * a switch between 'near' mode and 'offset' mode. 3945 */ 3946 struct r10conf *conf = mddev->private; 3947 struct geom geo; 3948 3949 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) 3950 return -EINVAL; 3951 3952 if (setup_geo(&geo, mddev, geo_start) != conf->copies) 3953 /* mustn't change number of copies */ 3954 return -EINVAL; 3955 if (geo.far_copies > 1 && !geo.far_offset) 3956 /* Cannot switch to 'far' mode */ 3957 return -EINVAL; 3958 3959 if (mddev->array_sectors & geo.chunk_mask) 3960 /* not factor of array size */ 3961 return -EINVAL; 3962 3963 if (!enough(conf, -1)) 3964 return -EINVAL; 3965 3966 kfree(conf->mirrors_new); 3967 conf->mirrors_new = NULL; 3968 if (mddev->delta_disks > 0) { 3969 /* allocate new 'mirrors' list */ 3970 conf->mirrors_new = kzalloc( 3971 sizeof(struct raid10_info) 3972 *(mddev->raid_disks + 3973 mddev->delta_disks), 3974 GFP_KERNEL); 3975 if (!conf->mirrors_new) 3976 return -ENOMEM; 3977 } 3978 return 0; 3979 } 3980 3981 /* 3982 * Need to check if array has failed when deciding whether to: 3983 * - start an array 3984 * - remove non-faulty devices 3985 * - add a spare 3986 * - allow a reshape 3987 * This determination is simple when no reshape is happening. 3988 * However if there is a reshape, we need to carefully check 3989 * both the before and after sections. 3990 * This is because some failed devices may only affect one 3991 * of the two sections, and some non-in_sync devices may 3992 * be insync in the section most affected by failed devices. 3993 */ 3994 static int calc_degraded(struct r10conf *conf) 3995 { 3996 int degraded, degraded2; 3997 int i; 3998 3999 rcu_read_lock(); 4000 degraded = 0; 4001 /* 'prev' section first */ 4002 for (i = 0; i < conf->prev.raid_disks; i++) { 4003 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 4004 if (!rdev || test_bit(Faulty, &rdev->flags)) 4005 degraded++; 4006 else if (!test_bit(In_sync, &rdev->flags)) 4007 /* When we can reduce the number of devices in 4008 * an array, this might not contribute to 4009 * 'degraded'. It does now. 4010 */ 4011 degraded++; 4012 } 4013 rcu_read_unlock(); 4014 if (conf->geo.raid_disks == conf->prev.raid_disks) 4015 return degraded; 4016 rcu_read_lock(); 4017 degraded2 = 0; 4018 for (i = 0; i < conf->geo.raid_disks; i++) { 4019 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 4020 if (!rdev || test_bit(Faulty, &rdev->flags)) 4021 degraded2++; 4022 else if (!test_bit(In_sync, &rdev->flags)) { 4023 /* If reshape is increasing the number of devices, 4024 * this section has already been recovered, so 4025 * it doesn't contribute to degraded. 4026 * else it does. 4027 */ 4028 if (conf->geo.raid_disks <= conf->prev.raid_disks) 4029 degraded2++; 4030 } 4031 } 4032 rcu_read_unlock(); 4033 if (degraded2 > degraded) 4034 return degraded2; 4035 return degraded; 4036 } 4037 4038 static int raid10_start_reshape(struct mddev *mddev) 4039 { 4040 /* A 'reshape' has been requested. This commits 4041 * the various 'new' fields and sets MD_RECOVER_RESHAPE 4042 * This also checks if there are enough spares and adds them 4043 * to the array. 4044 * We currently require enough spares to make the final 4045 * array non-degraded. We also require that the difference 4046 * between old and new data_offset - on each device - is 4047 * enough that we never risk over-writing. 4048 */ 4049 4050 unsigned long before_length, after_length; 4051 sector_t min_offset_diff = 0; 4052 int first = 1; 4053 struct geom new; 4054 struct r10conf *conf = mddev->private; 4055 struct md_rdev *rdev; 4056 int spares = 0; 4057 int ret; 4058 4059 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4060 return -EBUSY; 4061 4062 if (setup_geo(&new, mddev, geo_start) != conf->copies) 4063 return -EINVAL; 4064 4065 before_length = ((1 << conf->prev.chunk_shift) * 4066 conf->prev.far_copies); 4067 after_length = ((1 << conf->geo.chunk_shift) * 4068 conf->geo.far_copies); 4069 4070 rdev_for_each(rdev, mddev) { 4071 if (!test_bit(In_sync, &rdev->flags) 4072 && !test_bit(Faulty, &rdev->flags)) 4073 spares++; 4074 if (rdev->raid_disk >= 0) { 4075 long long diff = (rdev->new_data_offset 4076 - rdev->data_offset); 4077 if (!mddev->reshape_backwards) 4078 diff = -diff; 4079 if (diff < 0) 4080 diff = 0; 4081 if (first || diff < min_offset_diff) 4082 min_offset_diff = diff; 4083 first = 0; 4084 } 4085 } 4086 4087 if (max(before_length, after_length) > min_offset_diff) 4088 return -EINVAL; 4089 4090 if (spares < mddev->delta_disks) 4091 return -EINVAL; 4092 4093 conf->offset_diff = min_offset_diff; 4094 spin_lock_irq(&conf->device_lock); 4095 if (conf->mirrors_new) { 4096 memcpy(conf->mirrors_new, conf->mirrors, 4097 sizeof(struct raid10_info)*conf->prev.raid_disks); 4098 smp_mb(); 4099 kfree(conf->mirrors_old); 4100 conf->mirrors_old = conf->mirrors; 4101 conf->mirrors = conf->mirrors_new; 4102 conf->mirrors_new = NULL; 4103 } 4104 setup_geo(&conf->geo, mddev, geo_start); 4105 smp_mb(); 4106 if (mddev->reshape_backwards) { 4107 sector_t size = raid10_size(mddev, 0, 0); 4108 if (size < mddev->array_sectors) { 4109 spin_unlock_irq(&conf->device_lock); 4110 pr_warn("md/raid10:%s: array size must be reduce before number of disks\n", 4111 mdname(mddev)); 4112 return -EINVAL; 4113 } 4114 mddev->resync_max_sectors = size; 4115 conf->reshape_progress = size; 4116 } else 4117 conf->reshape_progress = 0; 4118 conf->reshape_safe = conf->reshape_progress; 4119 spin_unlock_irq(&conf->device_lock); 4120 4121 if (mddev->delta_disks && mddev->bitmap) { 4122 ret = bitmap_resize(mddev->bitmap, 4123 raid10_size(mddev, 0, 4124 conf->geo.raid_disks), 4125 0, 0); 4126 if (ret) 4127 goto abort; 4128 } 4129 if (mddev->delta_disks > 0) { 4130 rdev_for_each(rdev, mddev) 4131 if (rdev->raid_disk < 0 && 4132 !test_bit(Faulty, &rdev->flags)) { 4133 if (raid10_add_disk(mddev, rdev) == 0) { 4134 if (rdev->raid_disk >= 4135 conf->prev.raid_disks) 4136 set_bit(In_sync, &rdev->flags); 4137 else 4138 rdev->recovery_offset = 0; 4139 4140 if (sysfs_link_rdev(mddev, rdev)) 4141 /* Failure here is OK */; 4142 } 4143 } else if (rdev->raid_disk >= conf->prev.raid_disks 4144 && !test_bit(Faulty, &rdev->flags)) { 4145 /* This is a spare that was manually added */ 4146 set_bit(In_sync, &rdev->flags); 4147 } 4148 } 4149 /* When a reshape changes the number of devices, 4150 * ->degraded is measured against the larger of the 4151 * pre and post numbers. 4152 */ 4153 spin_lock_irq(&conf->device_lock); 4154 mddev->degraded = calc_degraded(conf); 4155 spin_unlock_irq(&conf->device_lock); 4156 mddev->raid_disks = conf->geo.raid_disks; 4157 mddev->reshape_position = conf->reshape_progress; 4158 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4159 4160 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4161 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4162 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4163 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4164 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4165 4166 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4167 "reshape"); 4168 if (!mddev->sync_thread) { 4169 ret = -EAGAIN; 4170 goto abort; 4171 } 4172 conf->reshape_checkpoint = jiffies; 4173 md_wakeup_thread(mddev->sync_thread); 4174 md_new_event(mddev); 4175 return 0; 4176 4177 abort: 4178 mddev->recovery = 0; 4179 spin_lock_irq(&conf->device_lock); 4180 conf->geo = conf->prev; 4181 mddev->raid_disks = conf->geo.raid_disks; 4182 rdev_for_each(rdev, mddev) 4183 rdev->new_data_offset = rdev->data_offset; 4184 smp_wmb(); 4185 conf->reshape_progress = MaxSector; 4186 conf->reshape_safe = MaxSector; 4187 mddev->reshape_position = MaxSector; 4188 spin_unlock_irq(&conf->device_lock); 4189 return ret; 4190 } 4191 4192 /* Calculate the last device-address that could contain 4193 * any block from the chunk that includes the array-address 's' 4194 * and report the next address. 4195 * i.e. the address returned will be chunk-aligned and after 4196 * any data that is in the chunk containing 's'. 4197 */ 4198 static sector_t last_dev_address(sector_t s, struct geom *geo) 4199 { 4200 s = (s | geo->chunk_mask) + 1; 4201 s >>= geo->chunk_shift; 4202 s *= geo->near_copies; 4203 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); 4204 s *= geo->far_copies; 4205 s <<= geo->chunk_shift; 4206 return s; 4207 } 4208 4209 /* Calculate the first device-address that could contain 4210 * any block from the chunk that includes the array-address 's'. 4211 * This too will be the start of a chunk 4212 */ 4213 static sector_t first_dev_address(sector_t s, struct geom *geo) 4214 { 4215 s >>= geo->chunk_shift; 4216 s *= geo->near_copies; 4217 sector_div(s, geo->raid_disks); 4218 s *= geo->far_copies; 4219 s <<= geo->chunk_shift; 4220 return s; 4221 } 4222 4223 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 4224 int *skipped) 4225 { 4226 /* We simply copy at most one chunk (smallest of old and new) 4227 * at a time, possibly less if that exceeds RESYNC_PAGES, 4228 * or we hit a bad block or something. 4229 * This might mean we pause for normal IO in the middle of 4230 * a chunk, but that is not a problem as mddev->reshape_position 4231 * can record any location. 4232 * 4233 * If we will want to write to a location that isn't 4234 * yet recorded as 'safe' (i.e. in metadata on disk) then 4235 * we need to flush all reshape requests and update the metadata. 4236 * 4237 * When reshaping forwards (e.g. to more devices), we interpret 4238 * 'safe' as the earliest block which might not have been copied 4239 * down yet. We divide this by previous stripe size and multiply 4240 * by previous stripe length to get lowest device offset that we 4241 * cannot write to yet. 4242 * We interpret 'sector_nr' as an address that we want to write to. 4243 * From this we use last_device_address() to find where we might 4244 * write to, and first_device_address on the 'safe' position. 4245 * If this 'next' write position is after the 'safe' position, 4246 * we must update the metadata to increase the 'safe' position. 4247 * 4248 * When reshaping backwards, we round in the opposite direction 4249 * and perform the reverse test: next write position must not be 4250 * less than current safe position. 4251 * 4252 * In all this the minimum difference in data offsets 4253 * (conf->offset_diff - always positive) allows a bit of slack, 4254 * so next can be after 'safe', but not by more than offset_diff 4255 * 4256 * We need to prepare all the bios here before we start any IO 4257 * to ensure the size we choose is acceptable to all devices. 4258 * The means one for each copy for write-out and an extra one for 4259 * read-in. 4260 * We store the read-in bio in ->master_bio and the others in 4261 * ->devs[x].bio and ->devs[x].repl_bio. 4262 */ 4263 struct r10conf *conf = mddev->private; 4264 struct r10bio *r10_bio; 4265 sector_t next, safe, last; 4266 int max_sectors; 4267 int nr_sectors; 4268 int s; 4269 struct md_rdev *rdev; 4270 int need_flush = 0; 4271 struct bio *blist; 4272 struct bio *bio, *read_bio; 4273 int sectors_done = 0; 4274 struct page **pages; 4275 4276 if (sector_nr == 0) { 4277 /* If restarting in the middle, skip the initial sectors */ 4278 if (mddev->reshape_backwards && 4279 conf->reshape_progress < raid10_size(mddev, 0, 0)) { 4280 sector_nr = (raid10_size(mddev, 0, 0) 4281 - conf->reshape_progress); 4282 } else if (!mddev->reshape_backwards && 4283 conf->reshape_progress > 0) 4284 sector_nr = conf->reshape_progress; 4285 if (sector_nr) { 4286 mddev->curr_resync_completed = sector_nr; 4287 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4288 *skipped = 1; 4289 return sector_nr; 4290 } 4291 } 4292 4293 /* We don't use sector_nr to track where we are up to 4294 * as that doesn't work well for ->reshape_backwards. 4295 * So just use ->reshape_progress. 4296 */ 4297 if (mddev->reshape_backwards) { 4298 /* 'next' is the earliest device address that we might 4299 * write to for this chunk in the new layout 4300 */ 4301 next = first_dev_address(conf->reshape_progress - 1, 4302 &conf->geo); 4303 4304 /* 'safe' is the last device address that we might read from 4305 * in the old layout after a restart 4306 */ 4307 safe = last_dev_address(conf->reshape_safe - 1, 4308 &conf->prev); 4309 4310 if (next + conf->offset_diff < safe) 4311 need_flush = 1; 4312 4313 last = conf->reshape_progress - 1; 4314 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask 4315 & conf->prev.chunk_mask); 4316 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last) 4317 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512; 4318 } else { 4319 /* 'next' is after the last device address that we 4320 * might write to for this chunk in the new layout 4321 */ 4322 next = last_dev_address(conf->reshape_progress, &conf->geo); 4323 4324 /* 'safe' is the earliest device address that we might 4325 * read from in the old layout after a restart 4326 */ 4327 safe = first_dev_address(conf->reshape_safe, &conf->prev); 4328 4329 /* Need to update metadata if 'next' might be beyond 'safe' 4330 * as that would possibly corrupt data 4331 */ 4332 if (next > safe + conf->offset_diff) 4333 need_flush = 1; 4334 4335 sector_nr = conf->reshape_progress; 4336 last = sector_nr | (conf->geo.chunk_mask 4337 & conf->prev.chunk_mask); 4338 4339 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last) 4340 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1; 4341 } 4342 4343 if (need_flush || 4344 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4345 /* Need to update reshape_position in metadata */ 4346 wait_barrier(conf); 4347 mddev->reshape_position = conf->reshape_progress; 4348 if (mddev->reshape_backwards) 4349 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) 4350 - conf->reshape_progress; 4351 else 4352 mddev->curr_resync_completed = conf->reshape_progress; 4353 conf->reshape_checkpoint = jiffies; 4354 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4355 md_wakeup_thread(mddev->thread); 4356 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || 4357 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4358 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4359 allow_barrier(conf); 4360 return sectors_done; 4361 } 4362 conf->reshape_safe = mddev->reshape_position; 4363 allow_barrier(conf); 4364 } 4365 4366 read_more: 4367 /* Now schedule reads for blocks from sector_nr to last */ 4368 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 4369 r10_bio->state = 0; 4370 raise_barrier(conf, sectors_done != 0); 4371 atomic_set(&r10_bio->remaining, 0); 4372 r10_bio->mddev = mddev; 4373 r10_bio->sector = sector_nr; 4374 set_bit(R10BIO_IsReshape, &r10_bio->state); 4375 r10_bio->sectors = last - sector_nr + 1; 4376 rdev = read_balance(conf, r10_bio, &max_sectors); 4377 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); 4378 4379 if (!rdev) { 4380 /* Cannot read from here, so need to record bad blocks 4381 * on all the target devices. 4382 */ 4383 // FIXME 4384 mempool_free(r10_bio, conf->r10buf_pool); 4385 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4386 return sectors_done; 4387 } 4388 4389 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4390 4391 read_bio->bi_bdev = rdev->bdev; 4392 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4393 + rdev->data_offset); 4394 read_bio->bi_private = r10_bio; 4395 read_bio->bi_end_io = end_reshape_read; 4396 bio_set_op_attrs(read_bio, REQ_OP_READ, 0); 4397 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); 4398 read_bio->bi_error = 0; 4399 read_bio->bi_vcnt = 0; 4400 read_bio->bi_iter.bi_size = 0; 4401 r10_bio->master_bio = read_bio; 4402 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4403 4404 /* Now find the locations in the new layout */ 4405 __raid10_find_phys(&conf->geo, r10_bio); 4406 4407 blist = read_bio; 4408 read_bio->bi_next = NULL; 4409 4410 rcu_read_lock(); 4411 for (s = 0; s < conf->copies*2; s++) { 4412 struct bio *b; 4413 int d = r10_bio->devs[s/2].devnum; 4414 struct md_rdev *rdev2; 4415 if (s&1) { 4416 rdev2 = rcu_dereference(conf->mirrors[d].replacement); 4417 b = r10_bio->devs[s/2].repl_bio; 4418 } else { 4419 rdev2 = rcu_dereference(conf->mirrors[d].rdev); 4420 b = r10_bio->devs[s/2].bio; 4421 } 4422 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4423 continue; 4424 4425 b->bi_bdev = rdev2->bdev; 4426 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + 4427 rdev2->new_data_offset; 4428 b->bi_end_io = end_reshape_write; 4429 bio_set_op_attrs(b, REQ_OP_WRITE, 0); 4430 b->bi_next = blist; 4431 blist = b; 4432 } 4433 4434 /* Now add as many pages as possible to all of these bios. */ 4435 4436 nr_sectors = 0; 4437 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; 4438 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) { 4439 struct page *page = pages[s / (PAGE_SIZE >> 9)]; 4440 int len = (max_sectors - s) << 9; 4441 if (len > PAGE_SIZE) 4442 len = PAGE_SIZE; 4443 for (bio = blist; bio ; bio = bio->bi_next) { 4444 /* 4445 * won't fail because the vec table is big enough 4446 * to hold all these pages 4447 */ 4448 bio_add_page(bio, page, len, 0); 4449 } 4450 sector_nr += len >> 9; 4451 nr_sectors += len >> 9; 4452 } 4453 rcu_read_unlock(); 4454 r10_bio->sectors = nr_sectors; 4455 4456 /* Now submit the read */ 4457 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); 4458 atomic_inc(&r10_bio->remaining); 4459 read_bio->bi_next = NULL; 4460 generic_make_request(read_bio); 4461 sector_nr += nr_sectors; 4462 sectors_done += nr_sectors; 4463 if (sector_nr <= last) 4464 goto read_more; 4465 4466 /* Now that we have done the whole section we can 4467 * update reshape_progress 4468 */ 4469 if (mddev->reshape_backwards) 4470 conf->reshape_progress -= sectors_done; 4471 else 4472 conf->reshape_progress += sectors_done; 4473 4474 return sectors_done; 4475 } 4476 4477 static void end_reshape_request(struct r10bio *r10_bio); 4478 static int handle_reshape_read_error(struct mddev *mddev, 4479 struct r10bio *r10_bio); 4480 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) 4481 { 4482 /* Reshape read completed. Hopefully we have a block 4483 * to write out. 4484 * If we got a read error then we do sync 1-page reads from 4485 * elsewhere until we find the data - or give up. 4486 */ 4487 struct r10conf *conf = mddev->private; 4488 int s; 4489 4490 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 4491 if (handle_reshape_read_error(mddev, r10_bio) < 0) { 4492 /* Reshape has been aborted */ 4493 md_done_sync(mddev, r10_bio->sectors, 0); 4494 return; 4495 } 4496 4497 /* We definitely have the data in the pages, schedule the 4498 * writes. 4499 */ 4500 atomic_set(&r10_bio->remaining, 1); 4501 for (s = 0; s < conf->copies*2; s++) { 4502 struct bio *b; 4503 int d = r10_bio->devs[s/2].devnum; 4504 struct md_rdev *rdev; 4505 rcu_read_lock(); 4506 if (s&1) { 4507 rdev = rcu_dereference(conf->mirrors[d].replacement); 4508 b = r10_bio->devs[s/2].repl_bio; 4509 } else { 4510 rdev = rcu_dereference(conf->mirrors[d].rdev); 4511 b = r10_bio->devs[s/2].bio; 4512 } 4513 if (!rdev || test_bit(Faulty, &rdev->flags)) { 4514 rcu_read_unlock(); 4515 continue; 4516 } 4517 atomic_inc(&rdev->nr_pending); 4518 rcu_read_unlock(); 4519 md_sync_acct(b->bi_bdev, r10_bio->sectors); 4520 atomic_inc(&r10_bio->remaining); 4521 b->bi_next = NULL; 4522 generic_make_request(b); 4523 } 4524 end_reshape_request(r10_bio); 4525 } 4526 4527 static void end_reshape(struct r10conf *conf) 4528 { 4529 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) 4530 return; 4531 4532 spin_lock_irq(&conf->device_lock); 4533 conf->prev = conf->geo; 4534 md_finish_reshape(conf->mddev); 4535 smp_wmb(); 4536 conf->reshape_progress = MaxSector; 4537 conf->reshape_safe = MaxSector; 4538 spin_unlock_irq(&conf->device_lock); 4539 4540 /* read-ahead size must cover two whole stripes, which is 4541 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4542 */ 4543 if (conf->mddev->queue) { 4544 int stripe = conf->geo.raid_disks * 4545 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); 4546 stripe /= conf->geo.near_copies; 4547 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 4548 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 4549 } 4550 conf->fullsync = 0; 4551 } 4552 4553 static int handle_reshape_read_error(struct mddev *mddev, 4554 struct r10bio *r10_bio) 4555 { 4556 /* Use sync reads to get the blocks from somewhere else */ 4557 int sectors = r10_bio->sectors; 4558 struct r10conf *conf = mddev->private; 4559 struct { 4560 struct r10bio r10_bio; 4561 struct r10dev devs[conf->copies]; 4562 } on_stack; 4563 struct r10bio *r10b = &on_stack.r10_bio; 4564 int slot = 0; 4565 int idx = 0; 4566 struct page **pages; 4567 4568 /* reshape IOs share pages from .devs[0].bio */ 4569 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; 4570 4571 r10b->sector = r10_bio->sector; 4572 __raid10_find_phys(&conf->prev, r10b); 4573 4574 while (sectors) { 4575 int s = sectors; 4576 int success = 0; 4577 int first_slot = slot; 4578 4579 if (s > (PAGE_SIZE >> 9)) 4580 s = PAGE_SIZE >> 9; 4581 4582 rcu_read_lock(); 4583 while (!success) { 4584 int d = r10b->devs[slot].devnum; 4585 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 4586 sector_t addr; 4587 if (rdev == NULL || 4588 test_bit(Faulty, &rdev->flags) || 4589 !test_bit(In_sync, &rdev->flags)) 4590 goto failed; 4591 4592 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; 4593 atomic_inc(&rdev->nr_pending); 4594 rcu_read_unlock(); 4595 success = sync_page_io(rdev, 4596 addr, 4597 s << 9, 4598 pages[idx], 4599 REQ_OP_READ, 0, false); 4600 rdev_dec_pending(rdev, mddev); 4601 rcu_read_lock(); 4602 if (success) 4603 break; 4604 failed: 4605 slot++; 4606 if (slot >= conf->copies) 4607 slot = 0; 4608 if (slot == first_slot) 4609 break; 4610 } 4611 rcu_read_unlock(); 4612 if (!success) { 4613 /* couldn't read this block, must give up */ 4614 set_bit(MD_RECOVERY_INTR, 4615 &mddev->recovery); 4616 return -EIO; 4617 } 4618 sectors -= s; 4619 idx++; 4620 } 4621 return 0; 4622 } 4623 4624 static void end_reshape_write(struct bio *bio) 4625 { 4626 struct r10bio *r10_bio = get_resync_r10bio(bio); 4627 struct mddev *mddev = r10_bio->mddev; 4628 struct r10conf *conf = mddev->private; 4629 int d; 4630 int slot; 4631 int repl; 4632 struct md_rdev *rdev = NULL; 4633 4634 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 4635 if (repl) 4636 rdev = conf->mirrors[d].replacement; 4637 if (!rdev) { 4638 smp_mb(); 4639 rdev = conf->mirrors[d].rdev; 4640 } 4641 4642 if (bio->bi_error) { 4643 /* FIXME should record badblock */ 4644 md_error(mddev, rdev); 4645 } 4646 4647 rdev_dec_pending(rdev, mddev); 4648 end_reshape_request(r10_bio); 4649 } 4650 4651 static void end_reshape_request(struct r10bio *r10_bio) 4652 { 4653 if (!atomic_dec_and_test(&r10_bio->remaining)) 4654 return; 4655 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); 4656 bio_put(r10_bio->master_bio); 4657 put_buf(r10_bio); 4658 } 4659 4660 static void raid10_finish_reshape(struct mddev *mddev) 4661 { 4662 struct r10conf *conf = mddev->private; 4663 4664 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4665 return; 4666 4667 if (mddev->delta_disks > 0) { 4668 sector_t size = raid10_size(mddev, 0, 0); 4669 md_set_array_sectors(mddev, size); 4670 if (mddev->recovery_cp > mddev->resync_max_sectors) { 4671 mddev->recovery_cp = mddev->resync_max_sectors; 4672 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4673 } 4674 mddev->resync_max_sectors = size; 4675 if (mddev->queue) { 4676 set_capacity(mddev->gendisk, mddev->array_sectors); 4677 revalidate_disk(mddev->gendisk); 4678 } 4679 } else { 4680 int d; 4681 rcu_read_lock(); 4682 for (d = conf->geo.raid_disks ; 4683 d < conf->geo.raid_disks - mddev->delta_disks; 4684 d++) { 4685 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 4686 if (rdev) 4687 clear_bit(In_sync, &rdev->flags); 4688 rdev = rcu_dereference(conf->mirrors[d].replacement); 4689 if (rdev) 4690 clear_bit(In_sync, &rdev->flags); 4691 } 4692 rcu_read_unlock(); 4693 } 4694 mddev->layout = mddev->new_layout; 4695 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; 4696 mddev->reshape_position = MaxSector; 4697 mddev->delta_disks = 0; 4698 mddev->reshape_backwards = 0; 4699 } 4700 4701 static struct md_personality raid10_personality = 4702 { 4703 .name = "raid10", 4704 .level = 10, 4705 .owner = THIS_MODULE, 4706 .make_request = raid10_make_request, 4707 .run = raid10_run, 4708 .free = raid10_free, 4709 .status = raid10_status, 4710 .error_handler = raid10_error, 4711 .hot_add_disk = raid10_add_disk, 4712 .hot_remove_disk= raid10_remove_disk, 4713 .spare_active = raid10_spare_active, 4714 .sync_request = raid10_sync_request, 4715 .quiesce = raid10_quiesce, 4716 .size = raid10_size, 4717 .resize = raid10_resize, 4718 .takeover = raid10_takeover, 4719 .check_reshape = raid10_check_reshape, 4720 .start_reshape = raid10_start_reshape, 4721 .finish_reshape = raid10_finish_reshape, 4722 .congested = raid10_congested, 4723 }; 4724 4725 static int __init raid_init(void) 4726 { 4727 return register_md_personality(&raid10_personality); 4728 } 4729 4730 static void raid_exit(void) 4731 { 4732 unregister_md_personality(&raid10_personality); 4733 } 4734 4735 module_init(raid_init); 4736 module_exit(raid_exit); 4737 MODULE_LICENSE("GPL"); 4738 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); 4739 MODULE_ALIAS("md-personality-9"); /* RAID10 */ 4740 MODULE_ALIAS("md-raid10"); 4741 MODULE_ALIAS("md-level-10"); 4742 4743 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 4744