1 /* 2 * raid10.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 2000-2004 Neil Brown 5 * 6 * RAID-10 support for md. 7 * 8 * Base on code in raid1.c. See raid1.c for further copyright information. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/blkdev.h> 24 #include <linux/module.h> 25 #include <linux/seq_file.h> 26 #include <linux/ratelimit.h> 27 #include <linux/kthread.h> 28 #include "md.h" 29 #include "raid10.h" 30 #include "raid0.h" 31 #include "bitmap.h" 32 33 /* 34 * RAID10 provides a combination of RAID0 and RAID1 functionality. 35 * The layout of data is defined by 36 * chunk_size 37 * raid_disks 38 * near_copies (stored in low byte of layout) 39 * far_copies (stored in second byte of layout) 40 * far_offset (stored in bit 16 of layout ) 41 * use_far_sets (stored in bit 17 of layout ) 42 * use_far_sets_bugfixed (stored in bit 18 of layout ) 43 * 44 * The data to be stored is divided into chunks using chunksize. Each device 45 * is divided into far_copies sections. In each section, chunks are laid out 46 * in a style similar to raid0, but near_copies copies of each chunk is stored 47 * (each on a different drive). The starting device for each section is offset 48 * near_copies from the starting device of the previous section. Thus there 49 * are (near_copies * far_copies) of each chunk, and each is on a different 50 * drive. near_copies and far_copies must be at least one, and their product 51 * is at most raid_disks. 52 * 53 * If far_offset is true, then the far_copies are handled a bit differently. 54 * The copies are still in different stripes, but instead of being very far 55 * apart on disk, there are adjacent stripes. 56 * 57 * The far and offset algorithms are handled slightly differently if 58 * 'use_far_sets' is true. In this case, the array's devices are grouped into 59 * sets that are (near_copies * far_copies) in size. The far copied stripes 60 * are still shifted by 'near_copies' devices, but this shifting stays confined 61 * to the set rather than the entire array. This is done to improve the number 62 * of device combinations that can fail without causing the array to fail. 63 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk 64 * on a device): 65 * A B C D A B C D E 66 * ... ... 67 * D A B C E A B C D 68 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s): 69 * [A B] [C D] [A B] [C D E] 70 * |...| |...| |...| | ... | 71 * [B A] [D C] [B A] [E C D] 72 */ 73 74 /* 75 * Number of guaranteed r10bios in case of extreme VM load: 76 */ 77 #define NR_RAID10_BIOS 256 78 79 /* when we get a read error on a read-only array, we redirect to another 80 * device without failing the first device, or trying to over-write to 81 * correct the read error. To keep track of bad blocks on a per-bio 82 * level, we store IO_BLOCKED in the appropriate 'bios' pointer 83 */ 84 #define IO_BLOCKED ((struct bio *)1) 85 /* When we successfully write to a known bad-block, we need to remove the 86 * bad-block marking which must be done from process context. So we record 87 * the success by setting devs[n].bio to IO_MADE_GOOD 88 */ 89 #define IO_MADE_GOOD ((struct bio *)2) 90 91 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) 92 93 /* When there are this many requests queued to be written by 94 * the raid10 thread, we become 'congested' to provide back-pressure 95 * for writeback. 96 */ 97 static int max_queued_requests = 1024; 98 99 static void allow_barrier(struct r10conf *conf); 100 static void lower_barrier(struct r10conf *conf); 101 static int _enough(struct r10conf *conf, int previous, int ignore); 102 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 103 int *skipped); 104 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); 105 static void end_reshape_write(struct bio *bio); 106 static void end_reshape(struct r10conf *conf); 107 108 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 109 { 110 struct r10conf *conf = data; 111 int size = offsetof(struct r10bio, devs[conf->copies]); 112 113 /* allocate a r10bio with room for raid_disks entries in the 114 * bios array */ 115 return kzalloc(size, gfp_flags); 116 } 117 118 static void r10bio_pool_free(void *r10_bio, void *data) 119 { 120 kfree(r10_bio); 121 } 122 123 /* Maximum size of each resync request */ 124 #define RESYNC_BLOCK_SIZE (64*1024) 125 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 126 /* amount of memory to reserve for resync requests */ 127 #define RESYNC_WINDOW (1024*1024) 128 /* maximum number of concurrent requests, memory permitting */ 129 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 130 131 /* 132 * When performing a resync, we need to read and compare, so 133 * we need as many pages are there are copies. 134 * When performing a recovery, we need 2 bios, one for read, 135 * one for write (we recover only one drive per r10buf) 136 * 137 */ 138 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 139 { 140 struct r10conf *conf = data; 141 struct page *page; 142 struct r10bio *r10_bio; 143 struct bio *bio; 144 int i, j; 145 int nalloc; 146 147 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 148 if (!r10_bio) 149 return NULL; 150 151 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || 152 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) 153 nalloc = conf->copies; /* resync */ 154 else 155 nalloc = 2; /* recovery */ 156 157 /* 158 * Allocate bios. 159 */ 160 for (j = nalloc ; j-- ; ) { 161 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 162 if (!bio) 163 goto out_free_bio; 164 r10_bio->devs[j].bio = bio; 165 if (!conf->have_replacement) 166 continue; 167 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 168 if (!bio) 169 goto out_free_bio; 170 r10_bio->devs[j].repl_bio = bio; 171 } 172 /* 173 * Allocate RESYNC_PAGES data pages and attach them 174 * where needed. 175 */ 176 for (j = 0 ; j < nalloc; j++) { 177 struct bio *rbio = r10_bio->devs[j].repl_bio; 178 bio = r10_bio->devs[j].bio; 179 for (i = 0; i < RESYNC_PAGES; i++) { 180 if (j > 0 && !test_bit(MD_RECOVERY_SYNC, 181 &conf->mddev->recovery)) { 182 /* we can share bv_page's during recovery 183 * and reshape */ 184 struct bio *rbio = r10_bio->devs[0].bio; 185 page = rbio->bi_io_vec[i].bv_page; 186 get_page(page); 187 } else 188 page = alloc_page(gfp_flags); 189 if (unlikely(!page)) 190 goto out_free_pages; 191 192 bio->bi_io_vec[i].bv_page = page; 193 if (rbio) 194 rbio->bi_io_vec[i].bv_page = page; 195 } 196 } 197 198 return r10_bio; 199 200 out_free_pages: 201 for ( ; i > 0 ; i--) 202 safe_put_page(bio->bi_io_vec[i-1].bv_page); 203 while (j--) 204 for (i = 0; i < RESYNC_PAGES ; i++) 205 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); 206 j = 0; 207 out_free_bio: 208 for ( ; j < nalloc; j++) { 209 if (r10_bio->devs[j].bio) 210 bio_put(r10_bio->devs[j].bio); 211 if (r10_bio->devs[j].repl_bio) 212 bio_put(r10_bio->devs[j].repl_bio); 213 } 214 r10bio_pool_free(r10_bio, conf); 215 return NULL; 216 } 217 218 static void r10buf_pool_free(void *__r10_bio, void *data) 219 { 220 int i; 221 struct r10conf *conf = data; 222 struct r10bio *r10bio = __r10_bio; 223 int j; 224 225 for (j=0; j < conf->copies; j++) { 226 struct bio *bio = r10bio->devs[j].bio; 227 if (bio) { 228 for (i = 0; i < RESYNC_PAGES; i++) { 229 safe_put_page(bio->bi_io_vec[i].bv_page); 230 bio->bi_io_vec[i].bv_page = NULL; 231 } 232 bio_put(bio); 233 } 234 bio = r10bio->devs[j].repl_bio; 235 if (bio) 236 bio_put(bio); 237 } 238 r10bio_pool_free(r10bio, conf); 239 } 240 241 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) 242 { 243 int i; 244 245 for (i = 0; i < conf->copies; i++) { 246 struct bio **bio = & r10_bio->devs[i].bio; 247 if (!BIO_SPECIAL(*bio)) 248 bio_put(*bio); 249 *bio = NULL; 250 bio = &r10_bio->devs[i].repl_bio; 251 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) 252 bio_put(*bio); 253 *bio = NULL; 254 } 255 } 256 257 static void free_r10bio(struct r10bio *r10_bio) 258 { 259 struct r10conf *conf = r10_bio->mddev->private; 260 261 put_all_bios(conf, r10_bio); 262 mempool_free(r10_bio, conf->r10bio_pool); 263 } 264 265 static void put_buf(struct r10bio *r10_bio) 266 { 267 struct r10conf *conf = r10_bio->mddev->private; 268 269 mempool_free(r10_bio, conf->r10buf_pool); 270 271 lower_barrier(conf); 272 } 273 274 static void reschedule_retry(struct r10bio *r10_bio) 275 { 276 unsigned long flags; 277 struct mddev *mddev = r10_bio->mddev; 278 struct r10conf *conf = mddev->private; 279 280 spin_lock_irqsave(&conf->device_lock, flags); 281 list_add(&r10_bio->retry_list, &conf->retry_list); 282 conf->nr_queued ++; 283 spin_unlock_irqrestore(&conf->device_lock, flags); 284 285 /* wake up frozen array... */ 286 wake_up(&conf->wait_barrier); 287 288 md_wakeup_thread(mddev->thread); 289 } 290 291 /* 292 * raid_end_bio_io() is called when we have finished servicing a mirrored 293 * operation and are ready to return a success/failure code to the buffer 294 * cache layer. 295 */ 296 static void raid_end_bio_io(struct r10bio *r10_bio) 297 { 298 struct bio *bio = r10_bio->master_bio; 299 int done; 300 struct r10conf *conf = r10_bio->mddev->private; 301 302 if (bio->bi_phys_segments) { 303 unsigned long flags; 304 spin_lock_irqsave(&conf->device_lock, flags); 305 bio->bi_phys_segments--; 306 done = (bio->bi_phys_segments == 0); 307 spin_unlock_irqrestore(&conf->device_lock, flags); 308 } else 309 done = 1; 310 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 311 bio->bi_error = -EIO; 312 if (done) { 313 bio_endio(bio); 314 /* 315 * Wake up any possible resync thread that waits for the device 316 * to go idle. 317 */ 318 allow_barrier(conf); 319 } 320 free_r10bio(r10_bio); 321 } 322 323 /* 324 * Update disk head position estimator based on IRQ completion info. 325 */ 326 static inline void update_head_pos(int slot, struct r10bio *r10_bio) 327 { 328 struct r10conf *conf = r10_bio->mddev->private; 329 330 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 331 r10_bio->devs[slot].addr + (r10_bio->sectors); 332 } 333 334 /* 335 * Find the disk number which triggered given bio 336 */ 337 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, 338 struct bio *bio, int *slotp, int *replp) 339 { 340 int slot; 341 int repl = 0; 342 343 for (slot = 0; slot < conf->copies; slot++) { 344 if (r10_bio->devs[slot].bio == bio) 345 break; 346 if (r10_bio->devs[slot].repl_bio == bio) { 347 repl = 1; 348 break; 349 } 350 } 351 352 BUG_ON(slot == conf->copies); 353 update_head_pos(slot, r10_bio); 354 355 if (slotp) 356 *slotp = slot; 357 if (replp) 358 *replp = repl; 359 return r10_bio->devs[slot].devnum; 360 } 361 362 static void raid10_end_read_request(struct bio *bio) 363 { 364 int uptodate = !bio->bi_error; 365 struct r10bio *r10_bio = bio->bi_private; 366 int slot, dev; 367 struct md_rdev *rdev; 368 struct r10conf *conf = r10_bio->mddev->private; 369 370 slot = r10_bio->read_slot; 371 dev = r10_bio->devs[slot].devnum; 372 rdev = r10_bio->devs[slot].rdev; 373 /* 374 * this branch is our 'one mirror IO has finished' event handler: 375 */ 376 update_head_pos(slot, r10_bio); 377 378 if (uptodate) { 379 /* 380 * Set R10BIO_Uptodate in our master bio, so that 381 * we will return a good error code to the higher 382 * levels even if IO on some other mirrored buffer fails. 383 * 384 * The 'master' represents the composite IO operation to 385 * user-side. So if something waits for IO, then it will 386 * wait for the 'master' bio. 387 */ 388 set_bit(R10BIO_Uptodate, &r10_bio->state); 389 } else { 390 /* If all other devices that store this block have 391 * failed, we want to return the error upwards rather 392 * than fail the last device. Here we redefine 393 * "uptodate" to mean "Don't want to retry" 394 */ 395 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), 396 rdev->raid_disk)) 397 uptodate = 1; 398 } 399 if (uptodate) { 400 raid_end_bio_io(r10_bio); 401 rdev_dec_pending(rdev, conf->mddev); 402 } else { 403 /* 404 * oops, read error - keep the refcount on the rdev 405 */ 406 char b[BDEVNAME_SIZE]; 407 printk_ratelimited(KERN_ERR 408 "md/raid10:%s: %s: rescheduling sector %llu\n", 409 mdname(conf->mddev), 410 bdevname(rdev->bdev, b), 411 (unsigned long long)r10_bio->sector); 412 set_bit(R10BIO_ReadError, &r10_bio->state); 413 reschedule_retry(r10_bio); 414 } 415 } 416 417 static void close_write(struct r10bio *r10_bio) 418 { 419 /* clear the bitmap if all writes complete successfully */ 420 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 421 r10_bio->sectors, 422 !test_bit(R10BIO_Degraded, &r10_bio->state), 423 0); 424 md_write_end(r10_bio->mddev); 425 } 426 427 static void one_write_done(struct r10bio *r10_bio) 428 { 429 if (atomic_dec_and_test(&r10_bio->remaining)) { 430 if (test_bit(R10BIO_WriteError, &r10_bio->state)) 431 reschedule_retry(r10_bio); 432 else { 433 close_write(r10_bio); 434 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 435 reschedule_retry(r10_bio); 436 else 437 raid_end_bio_io(r10_bio); 438 } 439 } 440 } 441 442 static void raid10_end_write_request(struct bio *bio) 443 { 444 struct r10bio *r10_bio = bio->bi_private; 445 int dev; 446 int dec_rdev = 1; 447 struct r10conf *conf = r10_bio->mddev->private; 448 int slot, repl; 449 struct md_rdev *rdev = NULL; 450 451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 452 453 if (repl) 454 rdev = conf->mirrors[dev].replacement; 455 if (!rdev) { 456 smp_rmb(); 457 repl = 0; 458 rdev = conf->mirrors[dev].rdev; 459 } 460 /* 461 * this branch is our 'one mirror IO has finished' event handler: 462 */ 463 if (bio->bi_error) { 464 if (repl) 465 /* Never record new bad blocks to replacement, 466 * just fail it. 467 */ 468 md_error(rdev->mddev, rdev); 469 else { 470 set_bit(WriteErrorSeen, &rdev->flags); 471 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 472 set_bit(MD_RECOVERY_NEEDED, 473 &rdev->mddev->recovery); 474 set_bit(R10BIO_WriteError, &r10_bio->state); 475 dec_rdev = 0; 476 } 477 } else { 478 /* 479 * Set R10BIO_Uptodate in our master bio, so that 480 * we will return a good error code for to the higher 481 * levels even if IO on some other mirrored buffer fails. 482 * 483 * The 'master' represents the composite IO operation to 484 * user-side. So if something waits for IO, then it will 485 * wait for the 'master' bio. 486 */ 487 sector_t first_bad; 488 int bad_sectors; 489 490 /* 491 * Do not set R10BIO_Uptodate if the current device is 492 * rebuilding or Faulty. This is because we cannot use 493 * such device for properly reading the data back (we could 494 * potentially use it, if the current write would have felt 495 * before rdev->recovery_offset, but for simplicity we don't 496 * check this here. 497 */ 498 if (test_bit(In_sync, &rdev->flags) && 499 !test_bit(Faulty, &rdev->flags)) 500 set_bit(R10BIO_Uptodate, &r10_bio->state); 501 502 /* Maybe we can clear some bad blocks. */ 503 if (is_badblock(rdev, 504 r10_bio->devs[slot].addr, 505 r10_bio->sectors, 506 &first_bad, &bad_sectors)) { 507 bio_put(bio); 508 if (repl) 509 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; 510 else 511 r10_bio->devs[slot].bio = IO_MADE_GOOD; 512 dec_rdev = 0; 513 set_bit(R10BIO_MadeGood, &r10_bio->state); 514 } 515 } 516 517 /* 518 * 519 * Let's see if all mirrored write operations have finished 520 * already. 521 */ 522 one_write_done(r10_bio); 523 if (dec_rdev) 524 rdev_dec_pending(rdev, conf->mddev); 525 } 526 527 /* 528 * RAID10 layout manager 529 * As well as the chunksize and raid_disks count, there are two 530 * parameters: near_copies and far_copies. 531 * near_copies * far_copies must be <= raid_disks. 532 * Normally one of these will be 1. 533 * If both are 1, we get raid0. 534 * If near_copies == raid_disks, we get raid1. 535 * 536 * Chunks are laid out in raid0 style with near_copies copies of the 537 * first chunk, followed by near_copies copies of the next chunk and 538 * so on. 539 * If far_copies > 1, then after 1/far_copies of the array has been assigned 540 * as described above, we start again with a device offset of near_copies. 541 * So we effectively have another copy of the whole array further down all 542 * the drives, but with blocks on different drives. 543 * With this layout, and block is never stored twice on the one device. 544 * 545 * raid10_find_phys finds the sector offset of a given virtual sector 546 * on each device that it is on. 547 * 548 * raid10_find_virt does the reverse mapping, from a device and a 549 * sector offset to a virtual address 550 */ 551 552 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio) 553 { 554 int n,f; 555 sector_t sector; 556 sector_t chunk; 557 sector_t stripe; 558 int dev; 559 int slot = 0; 560 int last_far_set_start, last_far_set_size; 561 562 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 563 last_far_set_start *= geo->far_set_size; 564 565 last_far_set_size = geo->far_set_size; 566 last_far_set_size += (geo->raid_disks % geo->far_set_size); 567 568 /* now calculate first sector/dev */ 569 chunk = r10bio->sector >> geo->chunk_shift; 570 sector = r10bio->sector & geo->chunk_mask; 571 572 chunk *= geo->near_copies; 573 stripe = chunk; 574 dev = sector_div(stripe, geo->raid_disks); 575 if (geo->far_offset) 576 stripe *= geo->far_copies; 577 578 sector += stripe << geo->chunk_shift; 579 580 /* and calculate all the others */ 581 for (n = 0; n < geo->near_copies; n++) { 582 int d = dev; 583 int set; 584 sector_t s = sector; 585 r10bio->devs[slot].devnum = d; 586 r10bio->devs[slot].addr = s; 587 slot++; 588 589 for (f = 1; f < geo->far_copies; f++) { 590 set = d / geo->far_set_size; 591 d += geo->near_copies; 592 593 if ((geo->raid_disks % geo->far_set_size) && 594 (d > last_far_set_start)) { 595 d -= last_far_set_start; 596 d %= last_far_set_size; 597 d += last_far_set_start; 598 } else { 599 d %= geo->far_set_size; 600 d += geo->far_set_size * set; 601 } 602 s += geo->stride; 603 r10bio->devs[slot].devnum = d; 604 r10bio->devs[slot].addr = s; 605 slot++; 606 } 607 dev++; 608 if (dev >= geo->raid_disks) { 609 dev = 0; 610 sector += (geo->chunk_mask + 1); 611 } 612 } 613 } 614 615 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) 616 { 617 struct geom *geo = &conf->geo; 618 619 if (conf->reshape_progress != MaxSector && 620 ((r10bio->sector >= conf->reshape_progress) != 621 conf->mddev->reshape_backwards)) { 622 set_bit(R10BIO_Previous, &r10bio->state); 623 geo = &conf->prev; 624 } else 625 clear_bit(R10BIO_Previous, &r10bio->state); 626 627 __raid10_find_phys(geo, r10bio); 628 } 629 630 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) 631 { 632 sector_t offset, chunk, vchunk; 633 /* Never use conf->prev as this is only called during resync 634 * or recovery, so reshape isn't happening 635 */ 636 struct geom *geo = &conf->geo; 637 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size; 638 int far_set_size = geo->far_set_size; 639 int last_far_set_start; 640 641 if (geo->raid_disks % geo->far_set_size) { 642 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 643 last_far_set_start *= geo->far_set_size; 644 645 if (dev >= last_far_set_start) { 646 far_set_size = geo->far_set_size; 647 far_set_size += (geo->raid_disks % geo->far_set_size); 648 far_set_start = last_far_set_start; 649 } 650 } 651 652 offset = sector & geo->chunk_mask; 653 if (geo->far_offset) { 654 int fc; 655 chunk = sector >> geo->chunk_shift; 656 fc = sector_div(chunk, geo->far_copies); 657 dev -= fc * geo->near_copies; 658 if (dev < far_set_start) 659 dev += far_set_size; 660 } else { 661 while (sector >= geo->stride) { 662 sector -= geo->stride; 663 if (dev < (geo->near_copies + far_set_start)) 664 dev += far_set_size - geo->near_copies; 665 else 666 dev -= geo->near_copies; 667 } 668 chunk = sector >> geo->chunk_shift; 669 } 670 vchunk = chunk * geo->raid_disks + dev; 671 sector_div(vchunk, geo->near_copies); 672 return (vchunk << geo->chunk_shift) + offset; 673 } 674 675 /* 676 * This routine returns the disk from which the requested read should 677 * be done. There is a per-array 'next expected sequential IO' sector 678 * number - if this matches on the next IO then we use the last disk. 679 * There is also a per-disk 'last know head position' sector that is 680 * maintained from IRQ contexts, both the normal and the resync IO 681 * completion handlers update this position correctly. If there is no 682 * perfect sequential match then we pick the disk whose head is closest. 683 * 684 * If there are 2 mirrors in the same 2 devices, performance degrades 685 * because position is mirror, not device based. 686 * 687 * The rdev for the device selected will have nr_pending incremented. 688 */ 689 690 /* 691 * FIXME: possibly should rethink readbalancing and do it differently 692 * depending on near_copies / far_copies geometry. 693 */ 694 static struct md_rdev *read_balance(struct r10conf *conf, 695 struct r10bio *r10_bio, 696 int *max_sectors) 697 { 698 const sector_t this_sector = r10_bio->sector; 699 int disk, slot; 700 int sectors = r10_bio->sectors; 701 int best_good_sectors; 702 sector_t new_distance, best_dist; 703 struct md_rdev *best_rdev, *rdev = NULL; 704 int do_balance; 705 int best_slot; 706 struct geom *geo = &conf->geo; 707 708 raid10_find_phys(conf, r10_bio); 709 rcu_read_lock(); 710 retry: 711 sectors = r10_bio->sectors; 712 best_slot = -1; 713 best_rdev = NULL; 714 best_dist = MaxSector; 715 best_good_sectors = 0; 716 do_balance = 1; 717 /* 718 * Check if we can balance. We can balance on the whole 719 * device if no resync is going on (recovery is ok), or below 720 * the resync window. We take the first readable disk when 721 * above the resync window. 722 */ 723 if (conf->mddev->recovery_cp < MaxSector 724 && (this_sector + sectors >= conf->next_resync)) 725 do_balance = 0; 726 727 for (slot = 0; slot < conf->copies ; slot++) { 728 sector_t first_bad; 729 int bad_sectors; 730 sector_t dev_sector; 731 732 if (r10_bio->devs[slot].bio == IO_BLOCKED) 733 continue; 734 disk = r10_bio->devs[slot].devnum; 735 rdev = rcu_dereference(conf->mirrors[disk].replacement); 736 if (rdev == NULL || test_bit(Faulty, &rdev->flags) || 737 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 738 rdev = rcu_dereference(conf->mirrors[disk].rdev); 739 if (rdev == NULL || 740 test_bit(Faulty, &rdev->flags)) 741 continue; 742 if (!test_bit(In_sync, &rdev->flags) && 743 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 744 continue; 745 746 dev_sector = r10_bio->devs[slot].addr; 747 if (is_badblock(rdev, dev_sector, sectors, 748 &first_bad, &bad_sectors)) { 749 if (best_dist < MaxSector) 750 /* Already have a better slot */ 751 continue; 752 if (first_bad <= dev_sector) { 753 /* Cannot read here. If this is the 754 * 'primary' device, then we must not read 755 * beyond 'bad_sectors' from another device. 756 */ 757 bad_sectors -= (dev_sector - first_bad); 758 if (!do_balance && sectors > bad_sectors) 759 sectors = bad_sectors; 760 if (best_good_sectors > sectors) 761 best_good_sectors = sectors; 762 } else { 763 sector_t good_sectors = 764 first_bad - dev_sector; 765 if (good_sectors > best_good_sectors) { 766 best_good_sectors = good_sectors; 767 best_slot = slot; 768 best_rdev = rdev; 769 } 770 if (!do_balance) 771 /* Must read from here */ 772 break; 773 } 774 continue; 775 } else 776 best_good_sectors = sectors; 777 778 if (!do_balance) 779 break; 780 781 /* This optimisation is debatable, and completely destroys 782 * sequential read speed for 'far copies' arrays. So only 783 * keep it for 'near' arrays, and review those later. 784 */ 785 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) 786 break; 787 788 /* for far > 1 always use the lowest address */ 789 if (geo->far_copies > 1) 790 new_distance = r10_bio->devs[slot].addr; 791 else 792 new_distance = abs(r10_bio->devs[slot].addr - 793 conf->mirrors[disk].head_position); 794 if (new_distance < best_dist) { 795 best_dist = new_distance; 796 best_slot = slot; 797 best_rdev = rdev; 798 } 799 } 800 if (slot >= conf->copies) { 801 slot = best_slot; 802 rdev = best_rdev; 803 } 804 805 if (slot >= 0) { 806 atomic_inc(&rdev->nr_pending); 807 if (test_bit(Faulty, &rdev->flags)) { 808 /* Cannot risk returning a device that failed 809 * before we inc'ed nr_pending 810 */ 811 rdev_dec_pending(rdev, conf->mddev); 812 goto retry; 813 } 814 r10_bio->read_slot = slot; 815 } else 816 rdev = NULL; 817 rcu_read_unlock(); 818 *max_sectors = best_good_sectors; 819 820 return rdev; 821 } 822 823 static int raid10_congested(struct mddev *mddev, int bits) 824 { 825 struct r10conf *conf = mddev->private; 826 int i, ret = 0; 827 828 if ((bits & (1 << WB_async_congested)) && 829 conf->pending_count >= max_queued_requests) 830 return 1; 831 832 rcu_read_lock(); 833 for (i = 0; 834 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) 835 && ret == 0; 836 i++) { 837 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 838 if (rdev && !test_bit(Faulty, &rdev->flags)) { 839 struct request_queue *q = bdev_get_queue(rdev->bdev); 840 841 ret |= bdi_congested(&q->backing_dev_info, bits); 842 } 843 } 844 rcu_read_unlock(); 845 return ret; 846 } 847 848 static void flush_pending_writes(struct r10conf *conf) 849 { 850 /* Any writes that have been queued but are awaiting 851 * bitmap updates get flushed here. 852 */ 853 spin_lock_irq(&conf->device_lock); 854 855 if (conf->pending_bio_list.head) { 856 struct bio *bio; 857 bio = bio_list_get(&conf->pending_bio_list); 858 conf->pending_count = 0; 859 spin_unlock_irq(&conf->device_lock); 860 /* flush any pending bitmap writes to disk 861 * before proceeding w/ I/O */ 862 bitmap_unplug(conf->mddev->bitmap); 863 wake_up(&conf->wait_barrier); 864 865 while (bio) { /* submit pending writes */ 866 struct bio *next = bio->bi_next; 867 bio->bi_next = NULL; 868 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 869 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 870 /* Just ignore it */ 871 bio_endio(bio); 872 else 873 generic_make_request(bio); 874 bio = next; 875 } 876 } else 877 spin_unlock_irq(&conf->device_lock); 878 } 879 880 /* Barriers.... 881 * Sometimes we need to suspend IO while we do something else, 882 * either some resync/recovery, or reconfigure the array. 883 * To do this we raise a 'barrier'. 884 * The 'barrier' is a counter that can be raised multiple times 885 * to count how many activities are happening which preclude 886 * normal IO. 887 * We can only raise the barrier if there is no pending IO. 888 * i.e. if nr_pending == 0. 889 * We choose only to raise the barrier if no-one is waiting for the 890 * barrier to go down. This means that as soon as an IO request 891 * is ready, no other operations which require a barrier will start 892 * until the IO request has had a chance. 893 * 894 * So: regular IO calls 'wait_barrier'. When that returns there 895 * is no backgroup IO happening, It must arrange to call 896 * allow_barrier when it has finished its IO. 897 * backgroup IO calls must call raise_barrier. Once that returns 898 * there is no normal IO happeing. It must arrange to call 899 * lower_barrier when the particular background IO completes. 900 */ 901 902 static void raise_barrier(struct r10conf *conf, int force) 903 { 904 BUG_ON(force && !conf->barrier); 905 spin_lock_irq(&conf->resync_lock); 906 907 /* Wait until no block IO is waiting (unless 'force') */ 908 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 909 conf->resync_lock); 910 911 /* block any new IO from starting */ 912 conf->barrier++; 913 914 /* Now wait for all pending IO to complete */ 915 wait_event_lock_irq(conf->wait_barrier, 916 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 917 conf->resync_lock); 918 919 spin_unlock_irq(&conf->resync_lock); 920 } 921 922 static void lower_barrier(struct r10conf *conf) 923 { 924 unsigned long flags; 925 spin_lock_irqsave(&conf->resync_lock, flags); 926 conf->barrier--; 927 spin_unlock_irqrestore(&conf->resync_lock, flags); 928 wake_up(&conf->wait_barrier); 929 } 930 931 static void wait_barrier(struct r10conf *conf) 932 { 933 spin_lock_irq(&conf->resync_lock); 934 if (conf->barrier) { 935 conf->nr_waiting++; 936 /* Wait for the barrier to drop. 937 * However if there are already pending 938 * requests (preventing the barrier from 939 * rising completely), and the 940 * pre-process bio queue isn't empty, 941 * then don't wait, as we need to empty 942 * that queue to get the nr_pending 943 * count down. 944 */ 945 wait_event_lock_irq(conf->wait_barrier, 946 !conf->barrier || 947 (conf->nr_pending && 948 current->bio_list && 949 !bio_list_empty(current->bio_list)), 950 conf->resync_lock); 951 conf->nr_waiting--; 952 } 953 conf->nr_pending++; 954 spin_unlock_irq(&conf->resync_lock); 955 } 956 957 static void allow_barrier(struct r10conf *conf) 958 { 959 unsigned long flags; 960 spin_lock_irqsave(&conf->resync_lock, flags); 961 conf->nr_pending--; 962 spin_unlock_irqrestore(&conf->resync_lock, flags); 963 wake_up(&conf->wait_barrier); 964 } 965 966 static void freeze_array(struct r10conf *conf, int extra) 967 { 968 /* stop syncio and normal IO and wait for everything to 969 * go quiet. 970 * We increment barrier and nr_waiting, and then 971 * wait until nr_pending match nr_queued+extra 972 * This is called in the context of one normal IO request 973 * that has failed. Thus any sync request that might be pending 974 * will be blocked by nr_pending, and we need to wait for 975 * pending IO requests to complete or be queued for re-try. 976 * Thus the number queued (nr_queued) plus this request (extra) 977 * must match the number of pending IOs (nr_pending) before 978 * we continue. 979 */ 980 spin_lock_irq(&conf->resync_lock); 981 conf->barrier++; 982 conf->nr_waiting++; 983 wait_event_lock_irq_cmd(conf->wait_barrier, 984 conf->nr_pending == conf->nr_queued+extra, 985 conf->resync_lock, 986 flush_pending_writes(conf)); 987 988 spin_unlock_irq(&conf->resync_lock); 989 } 990 991 static void unfreeze_array(struct r10conf *conf) 992 { 993 /* reverse the effect of the freeze */ 994 spin_lock_irq(&conf->resync_lock); 995 conf->barrier--; 996 conf->nr_waiting--; 997 wake_up(&conf->wait_barrier); 998 spin_unlock_irq(&conf->resync_lock); 999 } 1000 1001 static sector_t choose_data_offset(struct r10bio *r10_bio, 1002 struct md_rdev *rdev) 1003 { 1004 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || 1005 test_bit(R10BIO_Previous, &r10_bio->state)) 1006 return rdev->data_offset; 1007 else 1008 return rdev->new_data_offset; 1009 } 1010 1011 struct raid10_plug_cb { 1012 struct blk_plug_cb cb; 1013 struct bio_list pending; 1014 int pending_cnt; 1015 }; 1016 1017 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) 1018 { 1019 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb, 1020 cb); 1021 struct mddev *mddev = plug->cb.data; 1022 struct r10conf *conf = mddev->private; 1023 struct bio *bio; 1024 1025 if (from_schedule || current->bio_list) { 1026 spin_lock_irq(&conf->device_lock); 1027 bio_list_merge(&conf->pending_bio_list, &plug->pending); 1028 conf->pending_count += plug->pending_cnt; 1029 spin_unlock_irq(&conf->device_lock); 1030 wake_up(&conf->wait_barrier); 1031 md_wakeup_thread(mddev->thread); 1032 kfree(plug); 1033 return; 1034 } 1035 1036 /* we aren't scheduling, so we can do the write-out directly. */ 1037 bio = bio_list_get(&plug->pending); 1038 bitmap_unplug(mddev->bitmap); 1039 wake_up(&conf->wait_barrier); 1040 1041 while (bio) { /* submit pending writes */ 1042 struct bio *next = bio->bi_next; 1043 bio->bi_next = NULL; 1044 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1045 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1046 /* Just ignore it */ 1047 bio_endio(bio); 1048 else 1049 generic_make_request(bio); 1050 bio = next; 1051 } 1052 kfree(plug); 1053 } 1054 1055 static void __make_request(struct mddev *mddev, struct bio *bio) 1056 { 1057 struct r10conf *conf = mddev->private; 1058 struct r10bio *r10_bio; 1059 struct bio *read_bio; 1060 int i; 1061 const int op = bio_op(bio); 1062 const int rw = bio_data_dir(bio); 1063 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1064 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1065 unsigned long flags; 1066 struct md_rdev *blocked_rdev; 1067 struct blk_plug_cb *cb; 1068 struct raid10_plug_cb *plug = NULL; 1069 int sectors_handled; 1070 int max_sectors; 1071 int sectors; 1072 1073 /* 1074 * Register the new request and wait if the reconstruction 1075 * thread has put up a bar for new requests. 1076 * Continue immediately if no resync is active currently. 1077 */ 1078 wait_barrier(conf); 1079 1080 sectors = bio_sectors(bio); 1081 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1082 bio->bi_iter.bi_sector < conf->reshape_progress && 1083 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1084 /* IO spans the reshape position. Need to wait for 1085 * reshape to pass 1086 */ 1087 allow_barrier(conf); 1088 wait_event(conf->wait_barrier, 1089 conf->reshape_progress <= bio->bi_iter.bi_sector || 1090 conf->reshape_progress >= bio->bi_iter.bi_sector + 1091 sectors); 1092 wait_barrier(conf); 1093 } 1094 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1095 bio_data_dir(bio) == WRITE && 1096 (mddev->reshape_backwards 1097 ? (bio->bi_iter.bi_sector < conf->reshape_safe && 1098 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) 1099 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && 1100 bio->bi_iter.bi_sector < conf->reshape_progress))) { 1101 /* Need to update reshape_position in metadata */ 1102 mddev->reshape_position = conf->reshape_progress; 1103 set_mask_bits(&mddev->flags, 0, 1104 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); 1105 md_wakeup_thread(mddev->thread); 1106 wait_event(mddev->sb_wait, 1107 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 1108 1109 conf->reshape_safe = mddev->reshape_position; 1110 } 1111 1112 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1113 1114 r10_bio->master_bio = bio; 1115 r10_bio->sectors = sectors; 1116 1117 r10_bio->mddev = mddev; 1118 r10_bio->sector = bio->bi_iter.bi_sector; 1119 r10_bio->state = 0; 1120 1121 /* We might need to issue multiple reads to different 1122 * devices if there are bad blocks around, so we keep 1123 * track of the number of reads in bio->bi_phys_segments. 1124 * If this is 0, there is only one r10_bio and no locking 1125 * will be needed when the request completes. If it is 1126 * non-zero, then it is the number of not-completed requests. 1127 */ 1128 bio->bi_phys_segments = 0; 1129 bio_clear_flag(bio, BIO_SEG_VALID); 1130 1131 if (rw == READ) { 1132 /* 1133 * read balancing logic: 1134 */ 1135 struct md_rdev *rdev; 1136 int slot; 1137 1138 read_again: 1139 rdev = read_balance(conf, r10_bio, &max_sectors); 1140 if (!rdev) { 1141 raid_end_bio_io(r10_bio); 1142 return; 1143 } 1144 slot = r10_bio->read_slot; 1145 1146 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1147 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, 1148 max_sectors); 1149 1150 r10_bio->devs[slot].bio = read_bio; 1151 r10_bio->devs[slot].rdev = rdev; 1152 1153 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + 1154 choose_data_offset(r10_bio, rdev); 1155 read_bio->bi_bdev = rdev->bdev; 1156 read_bio->bi_end_io = raid10_end_read_request; 1157 bio_set_op_attrs(read_bio, op, do_sync); 1158 read_bio->bi_private = r10_bio; 1159 1160 if (max_sectors < r10_bio->sectors) { 1161 /* Could not read all from this device, so we will 1162 * need another r10_bio. 1163 */ 1164 sectors_handled = (r10_bio->sector + max_sectors 1165 - bio->bi_iter.bi_sector); 1166 r10_bio->sectors = max_sectors; 1167 spin_lock_irq(&conf->device_lock); 1168 if (bio->bi_phys_segments == 0) 1169 bio->bi_phys_segments = 2; 1170 else 1171 bio->bi_phys_segments++; 1172 spin_unlock_irq(&conf->device_lock); 1173 /* Cannot call generic_make_request directly 1174 * as that will be queued in __generic_make_request 1175 * and subsequent mempool_alloc might block 1176 * waiting for it. so hand bio over to raid10d. 1177 */ 1178 reschedule_retry(r10_bio); 1179 1180 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1181 1182 r10_bio->master_bio = bio; 1183 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1184 r10_bio->state = 0; 1185 r10_bio->mddev = mddev; 1186 r10_bio->sector = bio->bi_iter.bi_sector + 1187 sectors_handled; 1188 goto read_again; 1189 } else 1190 generic_make_request(read_bio); 1191 return; 1192 } 1193 1194 /* 1195 * WRITE: 1196 */ 1197 if (conf->pending_count >= max_queued_requests) { 1198 md_wakeup_thread(mddev->thread); 1199 wait_event(conf->wait_barrier, 1200 conf->pending_count < max_queued_requests); 1201 } 1202 /* first select target devices under rcu_lock and 1203 * inc refcount on their rdev. Record them by setting 1204 * bios[x] to bio 1205 * If there are known/acknowledged bad blocks on any device 1206 * on which we have seen a write error, we want to avoid 1207 * writing to those blocks. This potentially requires several 1208 * writes to write around the bad blocks. Each set of writes 1209 * gets its own r10_bio with a set of bios attached. The number 1210 * of r10_bios is recored in bio->bi_phys_segments just as with 1211 * the read case. 1212 */ 1213 1214 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ 1215 raid10_find_phys(conf, r10_bio); 1216 retry_write: 1217 blocked_rdev = NULL; 1218 rcu_read_lock(); 1219 max_sectors = r10_bio->sectors; 1220 1221 for (i = 0; i < conf->copies; i++) { 1222 int d = r10_bio->devs[i].devnum; 1223 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 1224 struct md_rdev *rrdev = rcu_dereference( 1225 conf->mirrors[d].replacement); 1226 if (rdev == rrdev) 1227 rrdev = NULL; 1228 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1229 atomic_inc(&rdev->nr_pending); 1230 blocked_rdev = rdev; 1231 break; 1232 } 1233 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) { 1234 atomic_inc(&rrdev->nr_pending); 1235 blocked_rdev = rrdev; 1236 break; 1237 } 1238 if (rdev && (test_bit(Faulty, &rdev->flags))) 1239 rdev = NULL; 1240 if (rrdev && (test_bit(Faulty, &rrdev->flags))) 1241 rrdev = NULL; 1242 1243 r10_bio->devs[i].bio = NULL; 1244 r10_bio->devs[i].repl_bio = NULL; 1245 1246 if (!rdev && !rrdev) { 1247 set_bit(R10BIO_Degraded, &r10_bio->state); 1248 continue; 1249 } 1250 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { 1251 sector_t first_bad; 1252 sector_t dev_sector = r10_bio->devs[i].addr; 1253 int bad_sectors; 1254 int is_bad; 1255 1256 is_bad = is_badblock(rdev, dev_sector, 1257 max_sectors, 1258 &first_bad, &bad_sectors); 1259 if (is_bad < 0) { 1260 /* Mustn't write here until the bad block 1261 * is acknowledged 1262 */ 1263 atomic_inc(&rdev->nr_pending); 1264 set_bit(BlockedBadBlocks, &rdev->flags); 1265 blocked_rdev = rdev; 1266 break; 1267 } 1268 if (is_bad && first_bad <= dev_sector) { 1269 /* Cannot write here at all */ 1270 bad_sectors -= (dev_sector - first_bad); 1271 if (bad_sectors < max_sectors) 1272 /* Mustn't write more than bad_sectors 1273 * to other devices yet 1274 */ 1275 max_sectors = bad_sectors; 1276 /* We don't set R10BIO_Degraded as that 1277 * only applies if the disk is missing, 1278 * so it might be re-added, and we want to 1279 * know to recover this chunk. 1280 * In this case the device is here, and the 1281 * fact that this chunk is not in-sync is 1282 * recorded in the bad block log. 1283 */ 1284 continue; 1285 } 1286 if (is_bad) { 1287 int good_sectors = first_bad - dev_sector; 1288 if (good_sectors < max_sectors) 1289 max_sectors = good_sectors; 1290 } 1291 } 1292 if (rdev) { 1293 r10_bio->devs[i].bio = bio; 1294 atomic_inc(&rdev->nr_pending); 1295 } 1296 if (rrdev) { 1297 r10_bio->devs[i].repl_bio = bio; 1298 atomic_inc(&rrdev->nr_pending); 1299 } 1300 } 1301 rcu_read_unlock(); 1302 1303 if (unlikely(blocked_rdev)) { 1304 /* Have to wait for this device to get unblocked, then retry */ 1305 int j; 1306 int d; 1307 1308 for (j = 0; j < i; j++) { 1309 if (r10_bio->devs[j].bio) { 1310 d = r10_bio->devs[j].devnum; 1311 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1312 } 1313 if (r10_bio->devs[j].repl_bio) { 1314 struct md_rdev *rdev; 1315 d = r10_bio->devs[j].devnum; 1316 rdev = conf->mirrors[d].replacement; 1317 if (!rdev) { 1318 /* Race with remove_disk */ 1319 smp_mb(); 1320 rdev = conf->mirrors[d].rdev; 1321 } 1322 rdev_dec_pending(rdev, mddev); 1323 } 1324 } 1325 allow_barrier(conf); 1326 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1327 wait_barrier(conf); 1328 goto retry_write; 1329 } 1330 1331 if (max_sectors < r10_bio->sectors) { 1332 /* We are splitting this into multiple parts, so 1333 * we need to prepare for allocating another r10_bio. 1334 */ 1335 r10_bio->sectors = max_sectors; 1336 spin_lock_irq(&conf->device_lock); 1337 if (bio->bi_phys_segments == 0) 1338 bio->bi_phys_segments = 2; 1339 else 1340 bio->bi_phys_segments++; 1341 spin_unlock_irq(&conf->device_lock); 1342 } 1343 sectors_handled = r10_bio->sector + max_sectors - 1344 bio->bi_iter.bi_sector; 1345 1346 atomic_set(&r10_bio->remaining, 1); 1347 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1348 1349 for (i = 0; i < conf->copies; i++) { 1350 struct bio *mbio; 1351 int d = r10_bio->devs[i].devnum; 1352 if (r10_bio->devs[i].bio) { 1353 struct md_rdev *rdev = conf->mirrors[d].rdev; 1354 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1355 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, 1356 max_sectors); 1357 r10_bio->devs[i].bio = mbio; 1358 1359 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 1360 choose_data_offset(r10_bio, 1361 rdev)); 1362 mbio->bi_bdev = rdev->bdev; 1363 mbio->bi_end_io = raid10_end_write_request; 1364 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1365 mbio->bi_private = r10_bio; 1366 1367 atomic_inc(&r10_bio->remaining); 1368 1369 cb = blk_check_plugged(raid10_unplug, mddev, 1370 sizeof(*plug)); 1371 if (cb) 1372 plug = container_of(cb, struct raid10_plug_cb, 1373 cb); 1374 else 1375 plug = NULL; 1376 spin_lock_irqsave(&conf->device_lock, flags); 1377 if (plug) { 1378 bio_list_add(&plug->pending, mbio); 1379 plug->pending_cnt++; 1380 } else { 1381 bio_list_add(&conf->pending_bio_list, mbio); 1382 conf->pending_count++; 1383 } 1384 spin_unlock_irqrestore(&conf->device_lock, flags); 1385 if (!plug) 1386 md_wakeup_thread(mddev->thread); 1387 } 1388 1389 if (r10_bio->devs[i].repl_bio) { 1390 struct md_rdev *rdev = conf->mirrors[d].replacement; 1391 if (rdev == NULL) { 1392 /* Replacement just got moved to main 'rdev' */ 1393 smp_mb(); 1394 rdev = conf->mirrors[d].rdev; 1395 } 1396 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1397 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, 1398 max_sectors); 1399 r10_bio->devs[i].repl_bio = mbio; 1400 1401 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + 1402 choose_data_offset( 1403 r10_bio, rdev)); 1404 mbio->bi_bdev = rdev->bdev; 1405 mbio->bi_end_io = raid10_end_write_request; 1406 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1407 mbio->bi_private = r10_bio; 1408 1409 atomic_inc(&r10_bio->remaining); 1410 spin_lock_irqsave(&conf->device_lock, flags); 1411 bio_list_add(&conf->pending_bio_list, mbio); 1412 conf->pending_count++; 1413 spin_unlock_irqrestore(&conf->device_lock, flags); 1414 if (!mddev_check_plugged(mddev)) 1415 md_wakeup_thread(mddev->thread); 1416 } 1417 } 1418 1419 /* Don't remove the bias on 'remaining' (one_write_done) until 1420 * after checking if we need to go around again. 1421 */ 1422 1423 if (sectors_handled < bio_sectors(bio)) { 1424 one_write_done(r10_bio); 1425 /* We need another r10_bio. It has already been counted 1426 * in bio->bi_phys_segments. 1427 */ 1428 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1429 1430 r10_bio->master_bio = bio; 1431 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1432 1433 r10_bio->mddev = mddev; 1434 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; 1435 r10_bio->state = 0; 1436 goto retry_write; 1437 } 1438 one_write_done(r10_bio); 1439 } 1440 1441 static void raid10_make_request(struct mddev *mddev, struct bio *bio) 1442 { 1443 struct r10conf *conf = mddev->private; 1444 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1445 int chunk_sects = chunk_mask + 1; 1446 1447 struct bio *split; 1448 1449 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { 1450 md_flush_request(mddev, bio); 1451 return; 1452 } 1453 1454 md_write_start(mddev, bio); 1455 1456 do { 1457 1458 /* 1459 * If this request crosses a chunk boundary, we need to split 1460 * it. 1461 */ 1462 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + 1463 bio_sectors(bio) > chunk_sects 1464 && (conf->geo.near_copies < conf->geo.raid_disks 1465 || conf->prev.near_copies < 1466 conf->prev.raid_disks))) { 1467 split = bio_split(bio, chunk_sects - 1468 (bio->bi_iter.bi_sector & 1469 (chunk_sects - 1)), 1470 GFP_NOIO, fs_bio_set); 1471 bio_chain(split, bio); 1472 } else { 1473 split = bio; 1474 } 1475 1476 __make_request(mddev, split); 1477 } while (split != bio); 1478 1479 /* In case raid10d snuck in to freeze_array */ 1480 wake_up(&conf->wait_barrier); 1481 } 1482 1483 static void raid10_status(struct seq_file *seq, struct mddev *mddev) 1484 { 1485 struct r10conf *conf = mddev->private; 1486 int i; 1487 1488 if (conf->geo.near_copies < conf->geo.raid_disks) 1489 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); 1490 if (conf->geo.near_copies > 1) 1491 seq_printf(seq, " %d near-copies", conf->geo.near_copies); 1492 if (conf->geo.far_copies > 1) { 1493 if (conf->geo.far_offset) 1494 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); 1495 else 1496 seq_printf(seq, " %d far-copies", conf->geo.far_copies); 1497 if (conf->geo.far_set_size != conf->geo.raid_disks) 1498 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); 1499 } 1500 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, 1501 conf->geo.raid_disks - mddev->degraded); 1502 for (i = 0; i < conf->geo.raid_disks; i++) 1503 seq_printf(seq, "%s", 1504 conf->mirrors[i].rdev && 1505 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); 1506 seq_printf(seq, "]"); 1507 } 1508 1509 /* check if there are enough drives for 1510 * every block to appear on atleast one. 1511 * Don't consider the device numbered 'ignore' 1512 * as we might be about to remove it. 1513 */ 1514 static int _enough(struct r10conf *conf, int previous, int ignore) 1515 { 1516 int first = 0; 1517 int has_enough = 0; 1518 int disks, ncopies; 1519 if (previous) { 1520 disks = conf->prev.raid_disks; 1521 ncopies = conf->prev.near_copies; 1522 } else { 1523 disks = conf->geo.raid_disks; 1524 ncopies = conf->geo.near_copies; 1525 } 1526 1527 rcu_read_lock(); 1528 do { 1529 int n = conf->copies; 1530 int cnt = 0; 1531 int this = first; 1532 while (n--) { 1533 struct md_rdev *rdev; 1534 if (this != ignore && 1535 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && 1536 test_bit(In_sync, &rdev->flags)) 1537 cnt++; 1538 this = (this+1) % disks; 1539 } 1540 if (cnt == 0) 1541 goto out; 1542 first = (first + ncopies) % disks; 1543 } while (first != 0); 1544 has_enough = 1; 1545 out: 1546 rcu_read_unlock(); 1547 return has_enough; 1548 } 1549 1550 static int enough(struct r10conf *conf, int ignore) 1551 { 1552 /* when calling 'enough', both 'prev' and 'geo' must 1553 * be stable. 1554 * This is ensured if ->reconfig_mutex or ->device_lock 1555 * is held. 1556 */ 1557 return _enough(conf, 0, ignore) && 1558 _enough(conf, 1, ignore); 1559 } 1560 1561 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) 1562 { 1563 char b[BDEVNAME_SIZE]; 1564 struct r10conf *conf = mddev->private; 1565 unsigned long flags; 1566 1567 /* 1568 * If it is not operational, then we have already marked it as dead 1569 * else if it is the last working disks, ignore the error, let the 1570 * next level up know. 1571 * else mark the drive as failed 1572 */ 1573 spin_lock_irqsave(&conf->device_lock, flags); 1574 if (test_bit(In_sync, &rdev->flags) 1575 && !enough(conf, rdev->raid_disk)) { 1576 /* 1577 * Don't fail the drive, just return an IO error. 1578 */ 1579 spin_unlock_irqrestore(&conf->device_lock, flags); 1580 return; 1581 } 1582 if (test_and_clear_bit(In_sync, &rdev->flags)) 1583 mddev->degraded++; 1584 /* 1585 * If recovery is running, make sure it aborts. 1586 */ 1587 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1588 set_bit(Blocked, &rdev->flags); 1589 set_bit(Faulty, &rdev->flags); 1590 set_mask_bits(&mddev->flags, 0, 1591 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); 1592 spin_unlock_irqrestore(&conf->device_lock, flags); 1593 printk(KERN_ALERT 1594 "md/raid10:%s: Disk failure on %s, disabling device.\n" 1595 "md/raid10:%s: Operation continuing on %d devices.\n", 1596 mdname(mddev), bdevname(rdev->bdev, b), 1597 mdname(mddev), conf->geo.raid_disks - mddev->degraded); 1598 } 1599 1600 static void print_conf(struct r10conf *conf) 1601 { 1602 int i; 1603 struct raid10_info *tmp; 1604 1605 printk(KERN_DEBUG "RAID10 conf printout:\n"); 1606 if (!conf) { 1607 printk(KERN_DEBUG "(!conf)\n"); 1608 return; 1609 } 1610 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, 1611 conf->geo.raid_disks); 1612 1613 for (i = 0; i < conf->geo.raid_disks; i++) { 1614 char b[BDEVNAME_SIZE]; 1615 tmp = conf->mirrors + i; 1616 if (tmp->rdev) 1617 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", 1618 i, !test_bit(In_sync, &tmp->rdev->flags), 1619 !test_bit(Faulty, &tmp->rdev->flags), 1620 bdevname(tmp->rdev->bdev,b)); 1621 } 1622 } 1623 1624 static void close_sync(struct r10conf *conf) 1625 { 1626 wait_barrier(conf); 1627 allow_barrier(conf); 1628 1629 mempool_destroy(conf->r10buf_pool); 1630 conf->r10buf_pool = NULL; 1631 } 1632 1633 static int raid10_spare_active(struct mddev *mddev) 1634 { 1635 int i; 1636 struct r10conf *conf = mddev->private; 1637 struct raid10_info *tmp; 1638 int count = 0; 1639 unsigned long flags; 1640 1641 /* 1642 * Find all non-in_sync disks within the RAID10 configuration 1643 * and mark them in_sync 1644 */ 1645 for (i = 0; i < conf->geo.raid_disks; i++) { 1646 tmp = conf->mirrors + i; 1647 if (tmp->replacement 1648 && tmp->replacement->recovery_offset == MaxSector 1649 && !test_bit(Faulty, &tmp->replacement->flags) 1650 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 1651 /* Replacement has just become active */ 1652 if (!tmp->rdev 1653 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 1654 count++; 1655 if (tmp->rdev) { 1656 /* Replaced device not technically faulty, 1657 * but we need to be sure it gets removed 1658 * and never re-added. 1659 */ 1660 set_bit(Faulty, &tmp->rdev->flags); 1661 sysfs_notify_dirent_safe( 1662 tmp->rdev->sysfs_state); 1663 } 1664 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1665 } else if (tmp->rdev 1666 && tmp->rdev->recovery_offset == MaxSector 1667 && !test_bit(Faulty, &tmp->rdev->flags) 1668 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1669 count++; 1670 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 1671 } 1672 } 1673 spin_lock_irqsave(&conf->device_lock, flags); 1674 mddev->degraded -= count; 1675 spin_unlock_irqrestore(&conf->device_lock, flags); 1676 1677 print_conf(conf); 1678 return count; 1679 } 1680 1681 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1682 { 1683 struct r10conf *conf = mddev->private; 1684 int err = -EEXIST; 1685 int mirror; 1686 int first = 0; 1687 int last = conf->geo.raid_disks - 1; 1688 1689 if (mddev->recovery_cp < MaxSector) 1690 /* only hot-add to in-sync arrays, as recovery is 1691 * very different from resync 1692 */ 1693 return -EBUSY; 1694 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) 1695 return -EINVAL; 1696 1697 if (md_integrity_add_rdev(rdev, mddev)) 1698 return -ENXIO; 1699 1700 if (rdev->raid_disk >= 0) 1701 first = last = rdev->raid_disk; 1702 1703 if (rdev->saved_raid_disk >= first && 1704 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1705 mirror = rdev->saved_raid_disk; 1706 else 1707 mirror = first; 1708 for ( ; mirror <= last ; mirror++) { 1709 struct raid10_info *p = &conf->mirrors[mirror]; 1710 if (p->recovery_disabled == mddev->recovery_disabled) 1711 continue; 1712 if (p->rdev) { 1713 if (!test_bit(WantReplacement, &p->rdev->flags) || 1714 p->replacement != NULL) 1715 continue; 1716 clear_bit(In_sync, &rdev->flags); 1717 set_bit(Replacement, &rdev->flags); 1718 rdev->raid_disk = mirror; 1719 err = 0; 1720 if (mddev->gendisk) 1721 disk_stack_limits(mddev->gendisk, rdev->bdev, 1722 rdev->data_offset << 9); 1723 conf->fullsync = 1; 1724 rcu_assign_pointer(p->replacement, rdev); 1725 break; 1726 } 1727 1728 if (mddev->gendisk) 1729 disk_stack_limits(mddev->gendisk, rdev->bdev, 1730 rdev->data_offset << 9); 1731 1732 p->head_position = 0; 1733 p->recovery_disabled = mddev->recovery_disabled - 1; 1734 rdev->raid_disk = mirror; 1735 err = 0; 1736 if (rdev->saved_raid_disk != mirror) 1737 conf->fullsync = 1; 1738 rcu_assign_pointer(p->rdev, rdev); 1739 break; 1740 } 1741 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1742 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1743 1744 print_conf(conf); 1745 return err; 1746 } 1747 1748 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 1749 { 1750 struct r10conf *conf = mddev->private; 1751 int err = 0; 1752 int number = rdev->raid_disk; 1753 struct md_rdev **rdevp; 1754 struct raid10_info *p = conf->mirrors + number; 1755 1756 print_conf(conf); 1757 if (rdev == p->rdev) 1758 rdevp = &p->rdev; 1759 else if (rdev == p->replacement) 1760 rdevp = &p->replacement; 1761 else 1762 return 0; 1763 1764 if (test_bit(In_sync, &rdev->flags) || 1765 atomic_read(&rdev->nr_pending)) { 1766 err = -EBUSY; 1767 goto abort; 1768 } 1769 /* Only remove faulty devices if recovery 1770 * is not possible. 1771 */ 1772 if (!test_bit(Faulty, &rdev->flags) && 1773 mddev->recovery_disabled != p->recovery_disabled && 1774 (!p->replacement || p->replacement == rdev) && 1775 number < conf->geo.raid_disks && 1776 enough(conf, -1)) { 1777 err = -EBUSY; 1778 goto abort; 1779 } 1780 *rdevp = NULL; 1781 synchronize_rcu(); 1782 if (atomic_read(&rdev->nr_pending)) { 1783 /* lost the race, try later */ 1784 err = -EBUSY; 1785 *rdevp = rdev; 1786 goto abort; 1787 } else if (p->replacement) { 1788 /* We must have just cleared 'rdev' */ 1789 p->rdev = p->replacement; 1790 clear_bit(Replacement, &p->replacement->flags); 1791 smp_mb(); /* Make sure other CPUs may see both as identical 1792 * but will never see neither -- if they are careful. 1793 */ 1794 p->replacement = NULL; 1795 clear_bit(WantReplacement, &rdev->flags); 1796 } else 1797 /* We might have just remove the Replacement as faulty 1798 * Clear the flag just in case 1799 */ 1800 clear_bit(WantReplacement, &rdev->flags); 1801 1802 err = md_integrity_register(mddev); 1803 1804 abort: 1805 1806 print_conf(conf); 1807 return err; 1808 } 1809 1810 static void end_sync_read(struct bio *bio) 1811 { 1812 struct r10bio *r10_bio = bio->bi_private; 1813 struct r10conf *conf = r10_bio->mddev->private; 1814 int d; 1815 1816 if (bio == r10_bio->master_bio) { 1817 /* this is a reshape read */ 1818 d = r10_bio->read_slot; /* really the read dev */ 1819 } else 1820 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); 1821 1822 if (!bio->bi_error) 1823 set_bit(R10BIO_Uptodate, &r10_bio->state); 1824 else 1825 /* The write handler will notice the lack of 1826 * R10BIO_Uptodate and record any errors etc 1827 */ 1828 atomic_add(r10_bio->sectors, 1829 &conf->mirrors[d].rdev->corrected_errors); 1830 1831 /* for reconstruct, we always reschedule after a read. 1832 * for resync, only after all reads 1833 */ 1834 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1835 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1836 atomic_dec_and_test(&r10_bio->remaining)) { 1837 /* we have read all the blocks, 1838 * do the comparison in process context in raid10d 1839 */ 1840 reschedule_retry(r10_bio); 1841 } 1842 } 1843 1844 static void end_sync_request(struct r10bio *r10_bio) 1845 { 1846 struct mddev *mddev = r10_bio->mddev; 1847 1848 while (atomic_dec_and_test(&r10_bio->remaining)) { 1849 if (r10_bio->master_bio == NULL) { 1850 /* the primary of several recovery bios */ 1851 sector_t s = r10_bio->sectors; 1852 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1853 test_bit(R10BIO_WriteError, &r10_bio->state)) 1854 reschedule_retry(r10_bio); 1855 else 1856 put_buf(r10_bio); 1857 md_done_sync(mddev, s, 1); 1858 break; 1859 } else { 1860 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; 1861 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1862 test_bit(R10BIO_WriteError, &r10_bio->state)) 1863 reschedule_retry(r10_bio); 1864 else 1865 put_buf(r10_bio); 1866 r10_bio = r10_bio2; 1867 } 1868 } 1869 } 1870 1871 static void end_sync_write(struct bio *bio) 1872 { 1873 struct r10bio *r10_bio = bio->bi_private; 1874 struct mddev *mddev = r10_bio->mddev; 1875 struct r10conf *conf = mddev->private; 1876 int d; 1877 sector_t first_bad; 1878 int bad_sectors; 1879 int slot; 1880 int repl; 1881 struct md_rdev *rdev = NULL; 1882 1883 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 1884 if (repl) 1885 rdev = conf->mirrors[d].replacement; 1886 else 1887 rdev = conf->mirrors[d].rdev; 1888 1889 if (bio->bi_error) { 1890 if (repl) 1891 md_error(mddev, rdev); 1892 else { 1893 set_bit(WriteErrorSeen, &rdev->flags); 1894 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1895 set_bit(MD_RECOVERY_NEEDED, 1896 &rdev->mddev->recovery); 1897 set_bit(R10BIO_WriteError, &r10_bio->state); 1898 } 1899 } else if (is_badblock(rdev, 1900 r10_bio->devs[slot].addr, 1901 r10_bio->sectors, 1902 &first_bad, &bad_sectors)) 1903 set_bit(R10BIO_MadeGood, &r10_bio->state); 1904 1905 rdev_dec_pending(rdev, mddev); 1906 1907 end_sync_request(r10_bio); 1908 } 1909 1910 /* 1911 * Note: sync and recover and handled very differently for raid10 1912 * This code is for resync. 1913 * For resync, we read through virtual addresses and read all blocks. 1914 * If there is any error, we schedule a write. The lowest numbered 1915 * drive is authoritative. 1916 * However requests come for physical address, so we need to map. 1917 * For every physical address there are raid_disks/copies virtual addresses, 1918 * which is always are least one, but is not necessarly an integer. 1919 * This means that a physical address can span multiple chunks, so we may 1920 * have to submit multiple io requests for a single sync request. 1921 */ 1922 /* 1923 * We check if all blocks are in-sync and only write to blocks that 1924 * aren't in sync 1925 */ 1926 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) 1927 { 1928 struct r10conf *conf = mddev->private; 1929 int i, first; 1930 struct bio *tbio, *fbio; 1931 int vcnt; 1932 1933 atomic_set(&r10_bio->remaining, 1); 1934 1935 /* find the first device with a block */ 1936 for (i=0; i<conf->copies; i++) 1937 if (!r10_bio->devs[i].bio->bi_error) 1938 break; 1939 1940 if (i == conf->copies) 1941 goto done; 1942 1943 first = i; 1944 fbio = r10_bio->devs[i].bio; 1945 fbio->bi_iter.bi_size = r10_bio->sectors << 9; 1946 fbio->bi_iter.bi_idx = 0; 1947 1948 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 1949 /* now find blocks with errors */ 1950 for (i=0 ; i < conf->copies ; i++) { 1951 int j, d; 1952 1953 tbio = r10_bio->devs[i].bio; 1954 1955 if (tbio->bi_end_io != end_sync_read) 1956 continue; 1957 if (i == first) 1958 continue; 1959 if (!r10_bio->devs[i].bio->bi_error) { 1960 /* We know that the bi_io_vec layout is the same for 1961 * both 'first' and 'i', so we just compare them. 1962 * All vec entries are PAGE_SIZE; 1963 */ 1964 int sectors = r10_bio->sectors; 1965 for (j = 0; j < vcnt; j++) { 1966 int len = PAGE_SIZE; 1967 if (sectors < (len / 512)) 1968 len = sectors * 512; 1969 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 1970 page_address(tbio->bi_io_vec[j].bv_page), 1971 len)) 1972 break; 1973 sectors -= len/512; 1974 } 1975 if (j == vcnt) 1976 continue; 1977 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); 1978 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 1979 /* Don't fix anything. */ 1980 continue; 1981 } 1982 /* Ok, we need to write this bio, either to correct an 1983 * inconsistency or to correct an unreadable block. 1984 * First we need to fixup bv_offset, bv_len and 1985 * bi_vecs, as the read request might have corrupted these 1986 */ 1987 bio_reset(tbio); 1988 1989 tbio->bi_vcnt = vcnt; 1990 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; 1991 tbio->bi_private = r10_bio; 1992 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 1993 tbio->bi_end_io = end_sync_write; 1994 bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); 1995 1996 bio_copy_data(tbio, fbio); 1997 1998 d = r10_bio->devs[i].devnum; 1999 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2000 atomic_inc(&r10_bio->remaining); 2001 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2002 2003 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2004 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2005 generic_make_request(tbio); 2006 } 2007 2008 /* Now write out to any replacement devices 2009 * that are active 2010 */ 2011 for (i = 0; i < conf->copies; i++) { 2012 int d; 2013 2014 tbio = r10_bio->devs[i].repl_bio; 2015 if (!tbio || !tbio->bi_end_io) 2016 continue; 2017 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write 2018 && r10_bio->devs[i].bio != fbio) 2019 bio_copy_data(tbio, fbio); 2020 d = r10_bio->devs[i].devnum; 2021 atomic_inc(&r10_bio->remaining); 2022 md_sync_acct(conf->mirrors[d].replacement->bdev, 2023 bio_sectors(tbio)); 2024 generic_make_request(tbio); 2025 } 2026 2027 done: 2028 if (atomic_dec_and_test(&r10_bio->remaining)) { 2029 md_done_sync(mddev, r10_bio->sectors, 1); 2030 put_buf(r10_bio); 2031 } 2032 } 2033 2034 /* 2035 * Now for the recovery code. 2036 * Recovery happens across physical sectors. 2037 * We recover all non-is_sync drives by finding the virtual address of 2038 * each, and then choose a working drive that also has that virt address. 2039 * There is a separate r10_bio for each non-in_sync drive. 2040 * Only the first two slots are in use. The first for reading, 2041 * The second for writing. 2042 * 2043 */ 2044 static void fix_recovery_read_error(struct r10bio *r10_bio) 2045 { 2046 /* We got a read error during recovery. 2047 * We repeat the read in smaller page-sized sections. 2048 * If a read succeeds, write it to the new device or record 2049 * a bad block if we cannot. 2050 * If a read fails, record a bad block on both old and 2051 * new devices. 2052 */ 2053 struct mddev *mddev = r10_bio->mddev; 2054 struct r10conf *conf = mddev->private; 2055 struct bio *bio = r10_bio->devs[0].bio; 2056 sector_t sect = 0; 2057 int sectors = r10_bio->sectors; 2058 int idx = 0; 2059 int dr = r10_bio->devs[0].devnum; 2060 int dw = r10_bio->devs[1].devnum; 2061 2062 while (sectors) { 2063 int s = sectors; 2064 struct md_rdev *rdev; 2065 sector_t addr; 2066 int ok; 2067 2068 if (s > (PAGE_SIZE>>9)) 2069 s = PAGE_SIZE >> 9; 2070 2071 rdev = conf->mirrors[dr].rdev; 2072 addr = r10_bio->devs[0].addr + sect, 2073 ok = sync_page_io(rdev, 2074 addr, 2075 s << 9, 2076 bio->bi_io_vec[idx].bv_page, 2077 REQ_OP_READ, 0, false); 2078 if (ok) { 2079 rdev = conf->mirrors[dw].rdev; 2080 addr = r10_bio->devs[1].addr + sect; 2081 ok = sync_page_io(rdev, 2082 addr, 2083 s << 9, 2084 bio->bi_io_vec[idx].bv_page, 2085 REQ_OP_WRITE, 0, false); 2086 if (!ok) { 2087 set_bit(WriteErrorSeen, &rdev->flags); 2088 if (!test_and_set_bit(WantReplacement, 2089 &rdev->flags)) 2090 set_bit(MD_RECOVERY_NEEDED, 2091 &rdev->mddev->recovery); 2092 } 2093 } 2094 if (!ok) { 2095 /* We don't worry if we cannot set a bad block - 2096 * it really is bad so there is no loss in not 2097 * recording it yet 2098 */ 2099 rdev_set_badblocks(rdev, addr, s, 0); 2100 2101 if (rdev != conf->mirrors[dw].rdev) { 2102 /* need bad block on destination too */ 2103 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; 2104 addr = r10_bio->devs[1].addr + sect; 2105 ok = rdev_set_badblocks(rdev2, addr, s, 0); 2106 if (!ok) { 2107 /* just abort the recovery */ 2108 printk(KERN_NOTICE 2109 "md/raid10:%s: recovery aborted" 2110 " due to read error\n", 2111 mdname(mddev)); 2112 2113 conf->mirrors[dw].recovery_disabled 2114 = mddev->recovery_disabled; 2115 set_bit(MD_RECOVERY_INTR, 2116 &mddev->recovery); 2117 break; 2118 } 2119 } 2120 } 2121 2122 sectors -= s; 2123 sect += s; 2124 idx++; 2125 } 2126 } 2127 2128 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2129 { 2130 struct r10conf *conf = mddev->private; 2131 int d; 2132 struct bio *wbio, *wbio2; 2133 2134 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { 2135 fix_recovery_read_error(r10_bio); 2136 end_sync_request(r10_bio); 2137 return; 2138 } 2139 2140 /* 2141 * share the pages with the first bio 2142 * and submit the write request 2143 */ 2144 d = r10_bio->devs[1].devnum; 2145 wbio = r10_bio->devs[1].bio; 2146 wbio2 = r10_bio->devs[1].repl_bio; 2147 /* Need to test wbio2->bi_end_io before we call 2148 * generic_make_request as if the former is NULL, 2149 * the latter is free to free wbio2. 2150 */ 2151 if (wbio2 && !wbio2->bi_end_io) 2152 wbio2 = NULL; 2153 if (wbio->bi_end_io) { 2154 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2155 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2156 generic_make_request(wbio); 2157 } 2158 if (wbio2) { 2159 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2160 md_sync_acct(conf->mirrors[d].replacement->bdev, 2161 bio_sectors(wbio2)); 2162 generic_make_request(wbio2); 2163 } 2164 } 2165 2166 /* 2167 * Used by fix_read_error() to decay the per rdev read_errors. 2168 * We halve the read error count for every hour that has elapsed 2169 * since the last recorded read error. 2170 * 2171 */ 2172 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) 2173 { 2174 struct timespec cur_time_mon; 2175 unsigned long hours_since_last; 2176 unsigned int read_errors = atomic_read(&rdev->read_errors); 2177 2178 ktime_get_ts(&cur_time_mon); 2179 2180 if (rdev->last_read_error.tv_sec == 0 && 2181 rdev->last_read_error.tv_nsec == 0) { 2182 /* first time we've seen a read error */ 2183 rdev->last_read_error = cur_time_mon; 2184 return; 2185 } 2186 2187 hours_since_last = (cur_time_mon.tv_sec - 2188 rdev->last_read_error.tv_sec) / 3600; 2189 2190 rdev->last_read_error = cur_time_mon; 2191 2192 /* 2193 * if hours_since_last is > the number of bits in read_errors 2194 * just set read errors to 0. We do this to avoid 2195 * overflowing the shift of read_errors by hours_since_last. 2196 */ 2197 if (hours_since_last >= 8 * sizeof(read_errors)) 2198 atomic_set(&rdev->read_errors, 0); 2199 else 2200 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); 2201 } 2202 2203 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, 2204 int sectors, struct page *page, int rw) 2205 { 2206 sector_t first_bad; 2207 int bad_sectors; 2208 2209 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) 2210 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) 2211 return -1; 2212 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 2213 /* success */ 2214 return 1; 2215 if (rw == WRITE) { 2216 set_bit(WriteErrorSeen, &rdev->flags); 2217 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2218 set_bit(MD_RECOVERY_NEEDED, 2219 &rdev->mddev->recovery); 2220 } 2221 /* need to record an error - either for the block or the device */ 2222 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 2223 md_error(rdev->mddev, rdev); 2224 return 0; 2225 } 2226 2227 /* 2228 * This is a kernel thread which: 2229 * 2230 * 1. Retries failed read operations on working mirrors. 2231 * 2. Updates the raid superblock when problems encounter. 2232 * 3. Performs writes following reads for array synchronising. 2233 */ 2234 2235 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) 2236 { 2237 int sect = 0; /* Offset from r10_bio->sector */ 2238 int sectors = r10_bio->sectors; 2239 struct md_rdev*rdev; 2240 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); 2241 int d = r10_bio->devs[r10_bio->read_slot].devnum; 2242 2243 /* still own a reference to this rdev, so it cannot 2244 * have been cleared recently. 2245 */ 2246 rdev = conf->mirrors[d].rdev; 2247 2248 if (test_bit(Faulty, &rdev->flags)) 2249 /* drive has already been failed, just ignore any 2250 more fix_read_error() attempts */ 2251 return; 2252 2253 check_decay_read_errors(mddev, rdev); 2254 atomic_inc(&rdev->read_errors); 2255 if (atomic_read(&rdev->read_errors) > max_read_errors) { 2256 char b[BDEVNAME_SIZE]; 2257 bdevname(rdev->bdev, b); 2258 2259 printk(KERN_NOTICE 2260 "md/raid10:%s: %s: Raid device exceeded " 2261 "read_error threshold [cur %d:max %d]\n", 2262 mdname(mddev), b, 2263 atomic_read(&rdev->read_errors), max_read_errors); 2264 printk(KERN_NOTICE 2265 "md/raid10:%s: %s: Failing raid device\n", 2266 mdname(mddev), b); 2267 md_error(mddev, conf->mirrors[d].rdev); 2268 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; 2269 return; 2270 } 2271 2272 while(sectors) { 2273 int s = sectors; 2274 int sl = r10_bio->read_slot; 2275 int success = 0; 2276 int start; 2277 2278 if (s > (PAGE_SIZE>>9)) 2279 s = PAGE_SIZE >> 9; 2280 2281 rcu_read_lock(); 2282 do { 2283 sector_t first_bad; 2284 int bad_sectors; 2285 2286 d = r10_bio->devs[sl].devnum; 2287 rdev = rcu_dereference(conf->mirrors[d].rdev); 2288 if (rdev && 2289 test_bit(In_sync, &rdev->flags) && 2290 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, 2291 &first_bad, &bad_sectors) == 0) { 2292 atomic_inc(&rdev->nr_pending); 2293 rcu_read_unlock(); 2294 success = sync_page_io(rdev, 2295 r10_bio->devs[sl].addr + 2296 sect, 2297 s<<9, 2298 conf->tmppage, 2299 REQ_OP_READ, 0, false); 2300 rdev_dec_pending(rdev, mddev); 2301 rcu_read_lock(); 2302 if (success) 2303 break; 2304 } 2305 sl++; 2306 if (sl == conf->copies) 2307 sl = 0; 2308 } while (!success && sl != r10_bio->read_slot); 2309 rcu_read_unlock(); 2310 2311 if (!success) { 2312 /* Cannot read from anywhere, just mark the block 2313 * as bad on the first device to discourage future 2314 * reads. 2315 */ 2316 int dn = r10_bio->devs[r10_bio->read_slot].devnum; 2317 rdev = conf->mirrors[dn].rdev; 2318 2319 if (!rdev_set_badblocks( 2320 rdev, 2321 r10_bio->devs[r10_bio->read_slot].addr 2322 + sect, 2323 s, 0)) { 2324 md_error(mddev, rdev); 2325 r10_bio->devs[r10_bio->read_slot].bio 2326 = IO_BLOCKED; 2327 } 2328 break; 2329 } 2330 2331 start = sl; 2332 /* write it back and re-read */ 2333 rcu_read_lock(); 2334 while (sl != r10_bio->read_slot) { 2335 char b[BDEVNAME_SIZE]; 2336 2337 if (sl==0) 2338 sl = conf->copies; 2339 sl--; 2340 d = r10_bio->devs[sl].devnum; 2341 rdev = rcu_dereference(conf->mirrors[d].rdev); 2342 if (!rdev || 2343 !test_bit(In_sync, &rdev->flags)) 2344 continue; 2345 2346 atomic_inc(&rdev->nr_pending); 2347 rcu_read_unlock(); 2348 if (r10_sync_page_io(rdev, 2349 r10_bio->devs[sl].addr + 2350 sect, 2351 s, conf->tmppage, WRITE) 2352 == 0) { 2353 /* Well, this device is dead */ 2354 printk(KERN_NOTICE 2355 "md/raid10:%s: read correction " 2356 "write failed" 2357 " (%d sectors at %llu on %s)\n", 2358 mdname(mddev), s, 2359 (unsigned long long)( 2360 sect + 2361 choose_data_offset(r10_bio, 2362 rdev)), 2363 bdevname(rdev->bdev, b)); 2364 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 2365 "drive\n", 2366 mdname(mddev), 2367 bdevname(rdev->bdev, b)); 2368 } 2369 rdev_dec_pending(rdev, mddev); 2370 rcu_read_lock(); 2371 } 2372 sl = start; 2373 while (sl != r10_bio->read_slot) { 2374 char b[BDEVNAME_SIZE]; 2375 2376 if (sl==0) 2377 sl = conf->copies; 2378 sl--; 2379 d = r10_bio->devs[sl].devnum; 2380 rdev = rcu_dereference(conf->mirrors[d].rdev); 2381 if (!rdev || 2382 !test_bit(In_sync, &rdev->flags)) 2383 continue; 2384 2385 atomic_inc(&rdev->nr_pending); 2386 rcu_read_unlock(); 2387 switch (r10_sync_page_io(rdev, 2388 r10_bio->devs[sl].addr + 2389 sect, 2390 s, conf->tmppage, 2391 READ)) { 2392 case 0: 2393 /* Well, this device is dead */ 2394 printk(KERN_NOTICE 2395 "md/raid10:%s: unable to read back " 2396 "corrected sectors" 2397 " (%d sectors at %llu on %s)\n", 2398 mdname(mddev), s, 2399 (unsigned long long)( 2400 sect + 2401 choose_data_offset(r10_bio, rdev)), 2402 bdevname(rdev->bdev, b)); 2403 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 2404 "drive\n", 2405 mdname(mddev), 2406 bdevname(rdev->bdev, b)); 2407 break; 2408 case 1: 2409 printk(KERN_INFO 2410 "md/raid10:%s: read error corrected" 2411 " (%d sectors at %llu on %s)\n", 2412 mdname(mddev), s, 2413 (unsigned long long)( 2414 sect + 2415 choose_data_offset(r10_bio, rdev)), 2416 bdevname(rdev->bdev, b)); 2417 atomic_add(s, &rdev->corrected_errors); 2418 } 2419 2420 rdev_dec_pending(rdev, mddev); 2421 rcu_read_lock(); 2422 } 2423 rcu_read_unlock(); 2424 2425 sectors -= s; 2426 sect += s; 2427 } 2428 } 2429 2430 static int narrow_write_error(struct r10bio *r10_bio, int i) 2431 { 2432 struct bio *bio = r10_bio->master_bio; 2433 struct mddev *mddev = r10_bio->mddev; 2434 struct r10conf *conf = mddev->private; 2435 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; 2436 /* bio has the data to be written to slot 'i' where 2437 * we just recently had a write error. 2438 * We repeatedly clone the bio and trim down to one block, 2439 * then try the write. Where the write fails we record 2440 * a bad block. 2441 * It is conceivable that the bio doesn't exactly align with 2442 * blocks. We must handle this. 2443 * 2444 * We currently own a reference to the rdev. 2445 */ 2446 2447 int block_sectors; 2448 sector_t sector; 2449 int sectors; 2450 int sect_to_write = r10_bio->sectors; 2451 int ok = 1; 2452 2453 if (rdev->badblocks.shift < 0) 2454 return 0; 2455 2456 block_sectors = roundup(1 << rdev->badblocks.shift, 2457 bdev_logical_block_size(rdev->bdev) >> 9); 2458 sector = r10_bio->sector; 2459 sectors = ((r10_bio->sector + block_sectors) 2460 & ~(sector_t)(block_sectors - 1)) 2461 - sector; 2462 2463 while (sect_to_write) { 2464 struct bio *wbio; 2465 if (sectors > sect_to_write) 2466 sectors = sect_to_write; 2467 /* Write at 'sector' for 'sectors' */ 2468 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2469 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); 2470 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 2471 choose_data_offset(r10_bio, rdev) + 2472 (sector - r10_bio->sector)); 2473 wbio->bi_bdev = rdev->bdev; 2474 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2475 2476 if (submit_bio_wait(wbio) < 0) 2477 /* Failure! */ 2478 ok = rdev_set_badblocks(rdev, sector, 2479 sectors, 0) 2480 && ok; 2481 2482 bio_put(wbio); 2483 sect_to_write -= sectors; 2484 sector += sectors; 2485 sectors = block_sectors; 2486 } 2487 return ok; 2488 } 2489 2490 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) 2491 { 2492 int slot = r10_bio->read_slot; 2493 struct bio *bio; 2494 struct r10conf *conf = mddev->private; 2495 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2496 char b[BDEVNAME_SIZE]; 2497 unsigned long do_sync; 2498 int max_sectors; 2499 2500 /* we got a read error. Maybe the drive is bad. Maybe just 2501 * the block and we can fix it. 2502 * We freeze all other IO, and try reading the block from 2503 * other devices. When we find one, we re-write 2504 * and check it that fixes the read error. 2505 * This is all done synchronously while the array is 2506 * frozen. 2507 */ 2508 bio = r10_bio->devs[slot].bio; 2509 bdevname(bio->bi_bdev, b); 2510 bio_put(bio); 2511 r10_bio->devs[slot].bio = NULL; 2512 2513 if (mddev->ro == 0) { 2514 freeze_array(conf, 1); 2515 fix_read_error(conf, mddev, r10_bio); 2516 unfreeze_array(conf); 2517 } else 2518 r10_bio->devs[slot].bio = IO_BLOCKED; 2519 2520 rdev_dec_pending(rdev, mddev); 2521 2522 read_more: 2523 rdev = read_balance(conf, r10_bio, &max_sectors); 2524 if (rdev == NULL) { 2525 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" 2526 " read error for block %llu\n", 2527 mdname(mddev), b, 2528 (unsigned long long)r10_bio->sector); 2529 raid_end_bio_io(r10_bio); 2530 return; 2531 } 2532 2533 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); 2534 slot = r10_bio->read_slot; 2535 printk_ratelimited( 2536 KERN_ERR 2537 "md/raid10:%s: %s: redirecting " 2538 "sector %llu to another mirror\n", 2539 mdname(mddev), 2540 bdevname(rdev->bdev, b), 2541 (unsigned long long)r10_bio->sector); 2542 bio = bio_clone_mddev(r10_bio->master_bio, 2543 GFP_NOIO, mddev); 2544 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); 2545 r10_bio->devs[slot].bio = bio; 2546 r10_bio->devs[slot].rdev = rdev; 2547 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr 2548 + choose_data_offset(r10_bio, rdev); 2549 bio->bi_bdev = rdev->bdev; 2550 bio_set_op_attrs(bio, REQ_OP_READ, do_sync); 2551 bio->bi_private = r10_bio; 2552 bio->bi_end_io = raid10_end_read_request; 2553 if (max_sectors < r10_bio->sectors) { 2554 /* Drat - have to split this up more */ 2555 struct bio *mbio = r10_bio->master_bio; 2556 int sectors_handled = 2557 r10_bio->sector + max_sectors 2558 - mbio->bi_iter.bi_sector; 2559 r10_bio->sectors = max_sectors; 2560 spin_lock_irq(&conf->device_lock); 2561 if (mbio->bi_phys_segments == 0) 2562 mbio->bi_phys_segments = 2; 2563 else 2564 mbio->bi_phys_segments++; 2565 spin_unlock_irq(&conf->device_lock); 2566 generic_make_request(bio); 2567 2568 r10_bio = mempool_alloc(conf->r10bio_pool, 2569 GFP_NOIO); 2570 r10_bio->master_bio = mbio; 2571 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; 2572 r10_bio->state = 0; 2573 set_bit(R10BIO_ReadError, 2574 &r10_bio->state); 2575 r10_bio->mddev = mddev; 2576 r10_bio->sector = mbio->bi_iter.bi_sector 2577 + sectors_handled; 2578 2579 goto read_more; 2580 } else 2581 generic_make_request(bio); 2582 } 2583 2584 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) 2585 { 2586 /* Some sort of write request has finished and it 2587 * succeeded in writing where we thought there was a 2588 * bad block. So forget the bad block. 2589 * Or possibly if failed and we need to record 2590 * a bad block. 2591 */ 2592 int m; 2593 struct md_rdev *rdev; 2594 2595 if (test_bit(R10BIO_IsSync, &r10_bio->state) || 2596 test_bit(R10BIO_IsRecover, &r10_bio->state)) { 2597 for (m = 0; m < conf->copies; m++) { 2598 int dev = r10_bio->devs[m].devnum; 2599 rdev = conf->mirrors[dev].rdev; 2600 if (r10_bio->devs[m].bio == NULL) 2601 continue; 2602 if (!r10_bio->devs[m].bio->bi_error) { 2603 rdev_clear_badblocks( 2604 rdev, 2605 r10_bio->devs[m].addr, 2606 r10_bio->sectors, 0); 2607 } else { 2608 if (!rdev_set_badblocks( 2609 rdev, 2610 r10_bio->devs[m].addr, 2611 r10_bio->sectors, 0)) 2612 md_error(conf->mddev, rdev); 2613 } 2614 rdev = conf->mirrors[dev].replacement; 2615 if (r10_bio->devs[m].repl_bio == NULL) 2616 continue; 2617 2618 if (!r10_bio->devs[m].repl_bio->bi_error) { 2619 rdev_clear_badblocks( 2620 rdev, 2621 r10_bio->devs[m].addr, 2622 r10_bio->sectors, 0); 2623 } else { 2624 if (!rdev_set_badblocks( 2625 rdev, 2626 r10_bio->devs[m].addr, 2627 r10_bio->sectors, 0)) 2628 md_error(conf->mddev, rdev); 2629 } 2630 } 2631 put_buf(r10_bio); 2632 } else { 2633 bool fail = false; 2634 for (m = 0; m < conf->copies; m++) { 2635 int dev = r10_bio->devs[m].devnum; 2636 struct bio *bio = r10_bio->devs[m].bio; 2637 rdev = conf->mirrors[dev].rdev; 2638 if (bio == IO_MADE_GOOD) { 2639 rdev_clear_badblocks( 2640 rdev, 2641 r10_bio->devs[m].addr, 2642 r10_bio->sectors, 0); 2643 rdev_dec_pending(rdev, conf->mddev); 2644 } else if (bio != NULL && bio->bi_error) { 2645 fail = true; 2646 if (!narrow_write_error(r10_bio, m)) { 2647 md_error(conf->mddev, rdev); 2648 set_bit(R10BIO_Degraded, 2649 &r10_bio->state); 2650 } 2651 rdev_dec_pending(rdev, conf->mddev); 2652 } 2653 bio = r10_bio->devs[m].repl_bio; 2654 rdev = conf->mirrors[dev].replacement; 2655 if (rdev && bio == IO_MADE_GOOD) { 2656 rdev_clear_badblocks( 2657 rdev, 2658 r10_bio->devs[m].addr, 2659 r10_bio->sectors, 0); 2660 rdev_dec_pending(rdev, conf->mddev); 2661 } 2662 } 2663 if (fail) { 2664 spin_lock_irq(&conf->device_lock); 2665 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); 2666 conf->nr_queued++; 2667 spin_unlock_irq(&conf->device_lock); 2668 md_wakeup_thread(conf->mddev->thread); 2669 } else { 2670 if (test_bit(R10BIO_WriteError, 2671 &r10_bio->state)) 2672 close_write(r10_bio); 2673 raid_end_bio_io(r10_bio); 2674 } 2675 } 2676 } 2677 2678 static void raid10d(struct md_thread *thread) 2679 { 2680 struct mddev *mddev = thread->mddev; 2681 struct r10bio *r10_bio; 2682 unsigned long flags; 2683 struct r10conf *conf = mddev->private; 2684 struct list_head *head = &conf->retry_list; 2685 struct blk_plug plug; 2686 2687 md_check_recovery(mddev); 2688 2689 if (!list_empty_careful(&conf->bio_end_io_list) && 2690 !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { 2691 LIST_HEAD(tmp); 2692 spin_lock_irqsave(&conf->device_lock, flags); 2693 if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { 2694 while (!list_empty(&conf->bio_end_io_list)) { 2695 list_move(conf->bio_end_io_list.prev, &tmp); 2696 conf->nr_queued--; 2697 } 2698 } 2699 spin_unlock_irqrestore(&conf->device_lock, flags); 2700 while (!list_empty(&tmp)) { 2701 r10_bio = list_first_entry(&tmp, struct r10bio, 2702 retry_list); 2703 list_del(&r10_bio->retry_list); 2704 if (mddev->degraded) 2705 set_bit(R10BIO_Degraded, &r10_bio->state); 2706 2707 if (test_bit(R10BIO_WriteError, 2708 &r10_bio->state)) 2709 close_write(r10_bio); 2710 raid_end_bio_io(r10_bio); 2711 } 2712 } 2713 2714 blk_start_plug(&plug); 2715 for (;;) { 2716 2717 flush_pending_writes(conf); 2718 2719 spin_lock_irqsave(&conf->device_lock, flags); 2720 if (list_empty(head)) { 2721 spin_unlock_irqrestore(&conf->device_lock, flags); 2722 break; 2723 } 2724 r10_bio = list_entry(head->prev, struct r10bio, retry_list); 2725 list_del(head->prev); 2726 conf->nr_queued--; 2727 spin_unlock_irqrestore(&conf->device_lock, flags); 2728 2729 mddev = r10_bio->mddev; 2730 conf = mddev->private; 2731 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 2732 test_bit(R10BIO_WriteError, &r10_bio->state)) 2733 handle_write_completed(conf, r10_bio); 2734 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) 2735 reshape_request_write(mddev, r10_bio); 2736 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) 2737 sync_request_write(mddev, r10_bio); 2738 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 2739 recovery_request_write(mddev, r10_bio); 2740 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) 2741 handle_read_error(mddev, r10_bio); 2742 else { 2743 /* just a partial read to be scheduled from a 2744 * separate context 2745 */ 2746 int slot = r10_bio->read_slot; 2747 generic_make_request(r10_bio->devs[slot].bio); 2748 } 2749 2750 cond_resched(); 2751 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 2752 md_check_recovery(mddev); 2753 } 2754 blk_finish_plug(&plug); 2755 } 2756 2757 static int init_resync(struct r10conf *conf) 2758 { 2759 int buffs; 2760 int i; 2761 2762 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2763 BUG_ON(conf->r10buf_pool); 2764 conf->have_replacement = 0; 2765 for (i = 0; i < conf->geo.raid_disks; i++) 2766 if (conf->mirrors[i].replacement) 2767 conf->have_replacement = 1; 2768 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); 2769 if (!conf->r10buf_pool) 2770 return -ENOMEM; 2771 conf->next_resync = 0; 2772 return 0; 2773 } 2774 2775 /* 2776 * perform a "sync" on one "block" 2777 * 2778 * We need to make sure that no normal I/O request - particularly write 2779 * requests - conflict with active sync requests. 2780 * 2781 * This is achieved by tracking pending requests and a 'barrier' concept 2782 * that can be installed to exclude normal IO requests. 2783 * 2784 * Resync and recovery are handled very differently. 2785 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery. 2786 * 2787 * For resync, we iterate over virtual addresses, read all copies, 2788 * and update if there are differences. If only one copy is live, 2789 * skip it. 2790 * For recovery, we iterate over physical addresses, read a good 2791 * value for each non-in_sync drive, and over-write. 2792 * 2793 * So, for recovery we may have several outstanding complex requests for a 2794 * given address, one for each out-of-sync device. We model this by allocating 2795 * a number of r10_bio structures, one for each out-of-sync device. 2796 * As we setup these structures, we collect all bio's together into a list 2797 * which we then process collectively to add pages, and then process again 2798 * to pass to generic_make_request. 2799 * 2800 * The r10_bio structures are linked using a borrowed master_bio pointer. 2801 * This link is counted in ->remaining. When the r10_bio that points to NULL 2802 * has its remaining count decremented to 0, the whole complex operation 2803 * is complete. 2804 * 2805 */ 2806 2807 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, 2808 int *skipped) 2809 { 2810 struct r10conf *conf = mddev->private; 2811 struct r10bio *r10_bio; 2812 struct bio *biolist = NULL, *bio; 2813 sector_t max_sector, nr_sectors; 2814 int i; 2815 int max_sync; 2816 sector_t sync_blocks; 2817 sector_t sectors_skipped = 0; 2818 int chunks_skipped = 0; 2819 sector_t chunk_mask = conf->geo.chunk_mask; 2820 2821 if (!conf->r10buf_pool) 2822 if (init_resync(conf)) 2823 return 0; 2824 2825 /* 2826 * Allow skipping a full rebuild for incremental assembly 2827 * of a clean array, like RAID1 does. 2828 */ 2829 if (mddev->bitmap == NULL && 2830 mddev->recovery_cp == MaxSector && 2831 mddev->reshape_position == MaxSector && 2832 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2833 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2834 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2835 conf->fullsync == 0) { 2836 *skipped = 1; 2837 return mddev->dev_sectors - sector_nr; 2838 } 2839 2840 skipped: 2841 max_sector = mddev->dev_sectors; 2842 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 2843 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2844 max_sector = mddev->resync_max_sectors; 2845 if (sector_nr >= max_sector) { 2846 /* If we aborted, we need to abort the 2847 * sync on the 'current' bitmap chucks (there can 2848 * be several when recovering multiple devices). 2849 * as we may have started syncing it but not finished. 2850 * We can find the current address in 2851 * mddev->curr_resync, but for recovery, 2852 * we need to convert that to several 2853 * virtual addresses. 2854 */ 2855 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2856 end_reshape(conf); 2857 close_sync(conf); 2858 return 0; 2859 } 2860 2861 if (mddev->curr_resync < max_sector) { /* aborted */ 2862 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2863 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2864 &sync_blocks, 1); 2865 else for (i = 0; i < conf->geo.raid_disks; i++) { 2866 sector_t sect = 2867 raid10_find_virt(conf, mddev->curr_resync, i); 2868 bitmap_end_sync(mddev->bitmap, sect, 2869 &sync_blocks, 1); 2870 } 2871 } else { 2872 /* completed sync */ 2873 if ((!mddev->bitmap || conf->fullsync) 2874 && conf->have_replacement 2875 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2876 /* Completed a full sync so the replacements 2877 * are now fully recovered. 2878 */ 2879 for (i = 0; i < conf->geo.raid_disks; i++) 2880 if (conf->mirrors[i].replacement) 2881 conf->mirrors[i].replacement 2882 ->recovery_offset 2883 = MaxSector; 2884 } 2885 conf->fullsync = 0; 2886 } 2887 bitmap_close_sync(mddev->bitmap); 2888 close_sync(conf); 2889 *skipped = 1; 2890 return sectors_skipped; 2891 } 2892 2893 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2894 return reshape_request(mddev, sector_nr, skipped); 2895 2896 if (chunks_skipped >= conf->geo.raid_disks) { 2897 /* if there has been nothing to do on any drive, 2898 * then there is nothing to do at all.. 2899 */ 2900 *skipped = 1; 2901 return (max_sector - sector_nr) + sectors_skipped; 2902 } 2903 2904 if (max_sector > mddev->resync_max) 2905 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2906 2907 /* make sure whole request will fit in a chunk - if chunks 2908 * are meaningful 2909 */ 2910 if (conf->geo.near_copies < conf->geo.raid_disks && 2911 max_sector > (sector_nr | chunk_mask)) 2912 max_sector = (sector_nr | chunk_mask) + 1; 2913 2914 /* Again, very different code for resync and recovery. 2915 * Both must result in an r10bio with a list of bios that 2916 * have bi_end_io, bi_sector, bi_bdev set, 2917 * and bi_private set to the r10bio. 2918 * For recovery, we may actually create several r10bios 2919 * with 2 bios in each, that correspond to the bios in the main one. 2920 * In this case, the subordinate r10bios link back through a 2921 * borrowed master_bio pointer, and the counter in the master 2922 * includes a ref from each subordinate. 2923 */ 2924 /* First, we decide what to do and set ->bi_end_io 2925 * To end_sync_read if we want to read, and 2926 * end_sync_write if we will want to write. 2927 */ 2928 2929 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); 2930 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2931 /* recovery... the complicated one */ 2932 int j; 2933 r10_bio = NULL; 2934 2935 for (i = 0 ; i < conf->geo.raid_disks; i++) { 2936 int still_degraded; 2937 struct r10bio *rb2; 2938 sector_t sect; 2939 int must_sync; 2940 int any_working; 2941 struct raid10_info *mirror = &conf->mirrors[i]; 2942 2943 if ((mirror->rdev == NULL || 2944 test_bit(In_sync, &mirror->rdev->flags)) 2945 && 2946 (mirror->replacement == NULL || 2947 test_bit(Faulty, 2948 &mirror->replacement->flags))) 2949 continue; 2950 2951 still_degraded = 0; 2952 /* want to reconstruct this device */ 2953 rb2 = r10_bio; 2954 sect = raid10_find_virt(conf, sector_nr, i); 2955 if (sect >= mddev->resync_max_sectors) { 2956 /* last stripe is not complete - don't 2957 * try to recover this sector. 2958 */ 2959 continue; 2960 } 2961 /* Unless we are doing a full sync, or a replacement 2962 * we only need to recover the block if it is set in 2963 * the bitmap 2964 */ 2965 must_sync = bitmap_start_sync(mddev->bitmap, sect, 2966 &sync_blocks, 1); 2967 if (sync_blocks < max_sync) 2968 max_sync = sync_blocks; 2969 if (!must_sync && 2970 mirror->replacement == NULL && 2971 !conf->fullsync) { 2972 /* yep, skip the sync_blocks here, but don't assume 2973 * that there will never be anything to do here 2974 */ 2975 chunks_skipped = -1; 2976 continue; 2977 } 2978 2979 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 2980 r10_bio->state = 0; 2981 raise_barrier(conf, rb2 != NULL); 2982 atomic_set(&r10_bio->remaining, 0); 2983 2984 r10_bio->master_bio = (struct bio*)rb2; 2985 if (rb2) 2986 atomic_inc(&rb2->remaining); 2987 r10_bio->mddev = mddev; 2988 set_bit(R10BIO_IsRecover, &r10_bio->state); 2989 r10_bio->sector = sect; 2990 2991 raid10_find_phys(conf, r10_bio); 2992 2993 /* Need to check if the array will still be 2994 * degraded 2995 */ 2996 for (j = 0; j < conf->geo.raid_disks; j++) 2997 if (conf->mirrors[j].rdev == NULL || 2998 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { 2999 still_degraded = 1; 3000 break; 3001 } 3002 3003 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3004 &sync_blocks, still_degraded); 3005 3006 any_working = 0; 3007 for (j=0; j<conf->copies;j++) { 3008 int k; 3009 int d = r10_bio->devs[j].devnum; 3010 sector_t from_addr, to_addr; 3011 struct md_rdev *rdev; 3012 sector_t sector, first_bad; 3013 int bad_sectors; 3014 if (!conf->mirrors[d].rdev || 3015 !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) 3016 continue; 3017 /* This is where we read from */ 3018 any_working = 1; 3019 rdev = conf->mirrors[d].rdev; 3020 sector = r10_bio->devs[j].addr; 3021 3022 if (is_badblock(rdev, sector, max_sync, 3023 &first_bad, &bad_sectors)) { 3024 if (first_bad > sector) 3025 max_sync = first_bad - sector; 3026 else { 3027 bad_sectors -= (sector 3028 - first_bad); 3029 if (max_sync > bad_sectors) 3030 max_sync = bad_sectors; 3031 continue; 3032 } 3033 } 3034 bio = r10_bio->devs[0].bio; 3035 bio_reset(bio); 3036 bio->bi_next = biolist; 3037 biolist = bio; 3038 bio->bi_private = r10_bio; 3039 bio->bi_end_io = end_sync_read; 3040 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3041 from_addr = r10_bio->devs[j].addr; 3042 bio->bi_iter.bi_sector = from_addr + 3043 rdev->data_offset; 3044 bio->bi_bdev = rdev->bdev; 3045 atomic_inc(&rdev->nr_pending); 3046 /* and we write to 'i' (if not in_sync) */ 3047 3048 for (k=0; k<conf->copies; k++) 3049 if (r10_bio->devs[k].devnum == i) 3050 break; 3051 BUG_ON(k == conf->copies); 3052 to_addr = r10_bio->devs[k].addr; 3053 r10_bio->devs[0].devnum = d; 3054 r10_bio->devs[0].addr = from_addr; 3055 r10_bio->devs[1].devnum = i; 3056 r10_bio->devs[1].addr = to_addr; 3057 3058 rdev = mirror->rdev; 3059 if (!test_bit(In_sync, &rdev->flags)) { 3060 bio = r10_bio->devs[1].bio; 3061 bio_reset(bio); 3062 bio->bi_next = biolist; 3063 biolist = bio; 3064 bio->bi_private = r10_bio; 3065 bio->bi_end_io = end_sync_write; 3066 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3067 bio->bi_iter.bi_sector = to_addr 3068 + rdev->data_offset; 3069 bio->bi_bdev = rdev->bdev; 3070 atomic_inc(&r10_bio->remaining); 3071 } else 3072 r10_bio->devs[1].bio->bi_end_io = NULL; 3073 3074 /* and maybe write to replacement */ 3075 bio = r10_bio->devs[1].repl_bio; 3076 if (bio) 3077 bio->bi_end_io = NULL; 3078 rdev = mirror->replacement; 3079 /* Note: if rdev != NULL, then bio 3080 * cannot be NULL as r10buf_pool_alloc will 3081 * have allocated it. 3082 * So the second test here is pointless. 3083 * But it keeps semantic-checkers happy, and 3084 * this comment keeps human reviewers 3085 * happy. 3086 */ 3087 if (rdev == NULL || bio == NULL || 3088 test_bit(Faulty, &rdev->flags)) 3089 break; 3090 bio_reset(bio); 3091 bio->bi_next = biolist; 3092 biolist = bio; 3093 bio->bi_private = r10_bio; 3094 bio->bi_end_io = end_sync_write; 3095 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3096 bio->bi_iter.bi_sector = to_addr + 3097 rdev->data_offset; 3098 bio->bi_bdev = rdev->bdev; 3099 atomic_inc(&r10_bio->remaining); 3100 break; 3101 } 3102 if (j == conf->copies) { 3103 /* Cannot recover, so abort the recovery or 3104 * record a bad block */ 3105 if (any_working) { 3106 /* problem is that there are bad blocks 3107 * on other device(s) 3108 */ 3109 int k; 3110 for (k = 0; k < conf->copies; k++) 3111 if (r10_bio->devs[k].devnum == i) 3112 break; 3113 if (!test_bit(In_sync, 3114 &mirror->rdev->flags) 3115 && !rdev_set_badblocks( 3116 mirror->rdev, 3117 r10_bio->devs[k].addr, 3118 max_sync, 0)) 3119 any_working = 0; 3120 if (mirror->replacement && 3121 !rdev_set_badblocks( 3122 mirror->replacement, 3123 r10_bio->devs[k].addr, 3124 max_sync, 0)) 3125 any_working = 0; 3126 } 3127 if (!any_working) { 3128 if (!test_and_set_bit(MD_RECOVERY_INTR, 3129 &mddev->recovery)) 3130 printk(KERN_INFO "md/raid10:%s: insufficient " 3131 "working devices for recovery.\n", 3132 mdname(mddev)); 3133 mirror->recovery_disabled 3134 = mddev->recovery_disabled; 3135 } 3136 put_buf(r10_bio); 3137 if (rb2) 3138 atomic_dec(&rb2->remaining); 3139 r10_bio = rb2; 3140 break; 3141 } 3142 } 3143 if (biolist == NULL) { 3144 while (r10_bio) { 3145 struct r10bio *rb2 = r10_bio; 3146 r10_bio = (struct r10bio*) rb2->master_bio; 3147 rb2->master_bio = NULL; 3148 put_buf(rb2); 3149 } 3150 goto giveup; 3151 } 3152 } else { 3153 /* resync. Schedule a read for every block at this virt offset */ 3154 int count = 0; 3155 3156 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); 3157 3158 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 3159 &sync_blocks, mddev->degraded) && 3160 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, 3161 &mddev->recovery)) { 3162 /* We can skip this block */ 3163 *skipped = 1; 3164 return sync_blocks + sectors_skipped; 3165 } 3166 if (sync_blocks < max_sync) 3167 max_sync = sync_blocks; 3168 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3169 r10_bio->state = 0; 3170 3171 r10_bio->mddev = mddev; 3172 atomic_set(&r10_bio->remaining, 0); 3173 raise_barrier(conf, 0); 3174 conf->next_resync = sector_nr; 3175 3176 r10_bio->master_bio = NULL; 3177 r10_bio->sector = sector_nr; 3178 set_bit(R10BIO_IsSync, &r10_bio->state); 3179 raid10_find_phys(conf, r10_bio); 3180 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; 3181 3182 for (i = 0; i < conf->copies; i++) { 3183 int d = r10_bio->devs[i].devnum; 3184 sector_t first_bad, sector; 3185 int bad_sectors; 3186 3187 if (r10_bio->devs[i].repl_bio) 3188 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3189 3190 bio = r10_bio->devs[i].bio; 3191 bio_reset(bio); 3192 bio->bi_error = -EIO; 3193 if (conf->mirrors[d].rdev == NULL || 3194 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) 3195 continue; 3196 sector = r10_bio->devs[i].addr; 3197 if (is_badblock(conf->mirrors[d].rdev, 3198 sector, max_sync, 3199 &first_bad, &bad_sectors)) { 3200 if (first_bad > sector) 3201 max_sync = first_bad - sector; 3202 else { 3203 bad_sectors -= (sector - first_bad); 3204 if (max_sync > bad_sectors) 3205 max_sync = bad_sectors; 3206 continue; 3207 } 3208 } 3209 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 3210 atomic_inc(&r10_bio->remaining); 3211 bio->bi_next = biolist; 3212 biolist = bio; 3213 bio->bi_private = r10_bio; 3214 bio->bi_end_io = end_sync_read; 3215 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3216 bio->bi_iter.bi_sector = sector + 3217 conf->mirrors[d].rdev->data_offset; 3218 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 3219 count++; 3220 3221 if (conf->mirrors[d].replacement == NULL || 3222 test_bit(Faulty, 3223 &conf->mirrors[d].replacement->flags)) 3224 continue; 3225 3226 /* Need to set up for writing to the replacement */ 3227 bio = r10_bio->devs[i].repl_bio; 3228 bio_reset(bio); 3229 bio->bi_error = -EIO; 3230 3231 sector = r10_bio->devs[i].addr; 3232 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 3233 bio->bi_next = biolist; 3234 biolist = bio; 3235 bio->bi_private = r10_bio; 3236 bio->bi_end_io = end_sync_write; 3237 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3238 bio->bi_iter.bi_sector = sector + 3239 conf->mirrors[d].replacement->data_offset; 3240 bio->bi_bdev = conf->mirrors[d].replacement->bdev; 3241 count++; 3242 } 3243 3244 if (count < 2) { 3245 for (i=0; i<conf->copies; i++) { 3246 int d = r10_bio->devs[i].devnum; 3247 if (r10_bio->devs[i].bio->bi_end_io) 3248 rdev_dec_pending(conf->mirrors[d].rdev, 3249 mddev); 3250 if (r10_bio->devs[i].repl_bio && 3251 r10_bio->devs[i].repl_bio->bi_end_io) 3252 rdev_dec_pending( 3253 conf->mirrors[d].replacement, 3254 mddev); 3255 } 3256 put_buf(r10_bio); 3257 biolist = NULL; 3258 goto giveup; 3259 } 3260 } 3261 3262 nr_sectors = 0; 3263 if (sector_nr + max_sync < max_sector) 3264 max_sector = sector_nr + max_sync; 3265 do { 3266 struct page *page; 3267 int len = PAGE_SIZE; 3268 if (sector_nr + (len>>9) > max_sector) 3269 len = (max_sector - sector_nr) << 9; 3270 if (len == 0) 3271 break; 3272 for (bio= biolist ; bio ; bio=bio->bi_next) { 3273 struct bio *bio2; 3274 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 3275 if (bio_add_page(bio, page, len, 0)) 3276 continue; 3277 3278 /* stop here */ 3279 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 3280 for (bio2 = biolist; 3281 bio2 && bio2 != bio; 3282 bio2 = bio2->bi_next) { 3283 /* remove last page from this bio */ 3284 bio2->bi_vcnt--; 3285 bio2->bi_iter.bi_size -= len; 3286 bio_clear_flag(bio2, BIO_SEG_VALID); 3287 } 3288 goto bio_full; 3289 } 3290 nr_sectors += len>>9; 3291 sector_nr += len>>9; 3292 } while (biolist->bi_vcnt < RESYNC_PAGES); 3293 bio_full: 3294 r10_bio->sectors = nr_sectors; 3295 3296 while (biolist) { 3297 bio = biolist; 3298 biolist = biolist->bi_next; 3299 3300 bio->bi_next = NULL; 3301 r10_bio = bio->bi_private; 3302 r10_bio->sectors = nr_sectors; 3303 3304 if (bio->bi_end_io == end_sync_read) { 3305 md_sync_acct(bio->bi_bdev, nr_sectors); 3306 bio->bi_error = 0; 3307 generic_make_request(bio); 3308 } 3309 } 3310 3311 if (sectors_skipped) 3312 /* pretend they weren't skipped, it makes 3313 * no important difference in this case 3314 */ 3315 md_done_sync(mddev, sectors_skipped, 1); 3316 3317 return sectors_skipped + nr_sectors; 3318 giveup: 3319 /* There is nowhere to write, so all non-sync 3320 * drives must be failed or in resync, all drives 3321 * have a bad block, so try the next chunk... 3322 */ 3323 if (sector_nr + max_sync < max_sector) 3324 max_sector = sector_nr + max_sync; 3325 3326 sectors_skipped += (max_sector - sector_nr); 3327 chunks_skipped ++; 3328 sector_nr = max_sector; 3329 goto skipped; 3330 } 3331 3332 static sector_t 3333 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) 3334 { 3335 sector_t size; 3336 struct r10conf *conf = mddev->private; 3337 3338 if (!raid_disks) 3339 raid_disks = min(conf->geo.raid_disks, 3340 conf->prev.raid_disks); 3341 if (!sectors) 3342 sectors = conf->dev_sectors; 3343 3344 size = sectors >> conf->geo.chunk_shift; 3345 sector_div(size, conf->geo.far_copies); 3346 size = size * raid_disks; 3347 sector_div(size, conf->geo.near_copies); 3348 3349 return size << conf->geo.chunk_shift; 3350 } 3351 3352 static void calc_sectors(struct r10conf *conf, sector_t size) 3353 { 3354 /* Calculate the number of sectors-per-device that will 3355 * actually be used, and set conf->dev_sectors and 3356 * conf->stride 3357 */ 3358 3359 size = size >> conf->geo.chunk_shift; 3360 sector_div(size, conf->geo.far_copies); 3361 size = size * conf->geo.raid_disks; 3362 sector_div(size, conf->geo.near_copies); 3363 /* 'size' is now the number of chunks in the array */ 3364 /* calculate "used chunks per device" */ 3365 size = size * conf->copies; 3366 3367 /* We need to round up when dividing by raid_disks to 3368 * get the stride size. 3369 */ 3370 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); 3371 3372 conf->dev_sectors = size << conf->geo.chunk_shift; 3373 3374 if (conf->geo.far_offset) 3375 conf->geo.stride = 1 << conf->geo.chunk_shift; 3376 else { 3377 sector_div(size, conf->geo.far_copies); 3378 conf->geo.stride = size << conf->geo.chunk_shift; 3379 } 3380 } 3381 3382 enum geo_type {geo_new, geo_old, geo_start}; 3383 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) 3384 { 3385 int nc, fc, fo; 3386 int layout, chunk, disks; 3387 switch (new) { 3388 case geo_old: 3389 layout = mddev->layout; 3390 chunk = mddev->chunk_sectors; 3391 disks = mddev->raid_disks - mddev->delta_disks; 3392 break; 3393 case geo_new: 3394 layout = mddev->new_layout; 3395 chunk = mddev->new_chunk_sectors; 3396 disks = mddev->raid_disks; 3397 break; 3398 default: /* avoid 'may be unused' warnings */ 3399 case geo_start: /* new when starting reshape - raid_disks not 3400 * updated yet. */ 3401 layout = mddev->new_layout; 3402 chunk = mddev->new_chunk_sectors; 3403 disks = mddev->raid_disks + mddev->delta_disks; 3404 break; 3405 } 3406 if (layout >> 19) 3407 return -1; 3408 if (chunk < (PAGE_SIZE >> 9) || 3409 !is_power_of_2(chunk)) 3410 return -2; 3411 nc = layout & 255; 3412 fc = (layout >> 8) & 255; 3413 fo = layout & (1<<16); 3414 geo->raid_disks = disks; 3415 geo->near_copies = nc; 3416 geo->far_copies = fc; 3417 geo->far_offset = fo; 3418 switch (layout >> 17) { 3419 case 0: /* original layout. simple but not always optimal */ 3420 geo->far_set_size = disks; 3421 break; 3422 case 1: /* "improved" layout which was buggy. Hopefully no-one is 3423 * actually using this, but leave code here just in case.*/ 3424 geo->far_set_size = disks/fc; 3425 WARN(geo->far_set_size < fc, 3426 "This RAID10 layout does not provide data safety - please backup and create new array\n"); 3427 break; 3428 case 2: /* "improved" layout fixed to match documentation */ 3429 geo->far_set_size = fc * nc; 3430 break; 3431 default: /* Not a valid layout */ 3432 return -1; 3433 } 3434 geo->chunk_mask = chunk - 1; 3435 geo->chunk_shift = ffz(~chunk); 3436 return nc*fc; 3437 } 3438 3439 static struct r10conf *setup_conf(struct mddev *mddev) 3440 { 3441 struct r10conf *conf = NULL; 3442 int err = -EINVAL; 3443 struct geom geo; 3444 int copies; 3445 3446 copies = setup_geo(&geo, mddev, geo_new); 3447 3448 if (copies == -2) { 3449 printk(KERN_ERR "md/raid10:%s: chunk size must be " 3450 "at least PAGE_SIZE(%ld) and be a power of 2.\n", 3451 mdname(mddev), PAGE_SIZE); 3452 goto out; 3453 } 3454 3455 if (copies < 2 || copies > mddev->raid_disks) { 3456 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n", 3457 mdname(mddev), mddev->new_layout); 3458 goto out; 3459 } 3460 3461 err = -ENOMEM; 3462 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); 3463 if (!conf) 3464 goto out; 3465 3466 /* FIXME calc properly */ 3467 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + 3468 max(0,-mddev->delta_disks)), 3469 GFP_KERNEL); 3470 if (!conf->mirrors) 3471 goto out; 3472 3473 conf->tmppage = alloc_page(GFP_KERNEL); 3474 if (!conf->tmppage) 3475 goto out; 3476 3477 conf->geo = geo; 3478 conf->copies = copies; 3479 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, 3480 r10bio_pool_free, conf); 3481 if (!conf->r10bio_pool) 3482 goto out; 3483 3484 calc_sectors(conf, mddev->dev_sectors); 3485 if (mddev->reshape_position == MaxSector) { 3486 conf->prev = conf->geo; 3487 conf->reshape_progress = MaxSector; 3488 } else { 3489 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { 3490 err = -EINVAL; 3491 goto out; 3492 } 3493 conf->reshape_progress = mddev->reshape_position; 3494 if (conf->prev.far_offset) 3495 conf->prev.stride = 1 << conf->prev.chunk_shift; 3496 else 3497 /* far_copies must be 1 */ 3498 conf->prev.stride = conf->dev_sectors; 3499 } 3500 conf->reshape_safe = conf->reshape_progress; 3501 spin_lock_init(&conf->device_lock); 3502 INIT_LIST_HEAD(&conf->retry_list); 3503 INIT_LIST_HEAD(&conf->bio_end_io_list); 3504 3505 spin_lock_init(&conf->resync_lock); 3506 init_waitqueue_head(&conf->wait_barrier); 3507 3508 conf->thread = md_register_thread(raid10d, mddev, "raid10"); 3509 if (!conf->thread) 3510 goto out; 3511 3512 conf->mddev = mddev; 3513 return conf; 3514 3515 out: 3516 if (err == -ENOMEM) 3517 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", 3518 mdname(mddev)); 3519 if (conf) { 3520 mempool_destroy(conf->r10bio_pool); 3521 kfree(conf->mirrors); 3522 safe_put_page(conf->tmppage); 3523 kfree(conf); 3524 } 3525 return ERR_PTR(err); 3526 } 3527 3528 static int raid10_run(struct mddev *mddev) 3529 { 3530 struct r10conf *conf; 3531 int i, disk_idx, chunk_size; 3532 struct raid10_info *disk; 3533 struct md_rdev *rdev; 3534 sector_t size; 3535 sector_t min_offset_diff = 0; 3536 int first = 1; 3537 bool discard_supported = false; 3538 3539 if (mddev->private == NULL) { 3540 conf = setup_conf(mddev); 3541 if (IS_ERR(conf)) 3542 return PTR_ERR(conf); 3543 mddev->private = conf; 3544 } 3545 conf = mddev->private; 3546 if (!conf) 3547 goto out; 3548 3549 mddev->thread = conf->thread; 3550 conf->thread = NULL; 3551 3552 chunk_size = mddev->chunk_sectors << 9; 3553 if (mddev->queue) { 3554 blk_queue_max_discard_sectors(mddev->queue, 3555 mddev->chunk_sectors); 3556 blk_queue_max_write_same_sectors(mddev->queue, 0); 3557 blk_queue_io_min(mddev->queue, chunk_size); 3558 if (conf->geo.raid_disks % conf->geo.near_copies) 3559 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3560 else 3561 blk_queue_io_opt(mddev->queue, chunk_size * 3562 (conf->geo.raid_disks / conf->geo.near_copies)); 3563 } 3564 3565 rdev_for_each(rdev, mddev) { 3566 long long diff; 3567 struct request_queue *q; 3568 3569 disk_idx = rdev->raid_disk; 3570 if (disk_idx < 0) 3571 continue; 3572 if (disk_idx >= conf->geo.raid_disks && 3573 disk_idx >= conf->prev.raid_disks) 3574 continue; 3575 disk = conf->mirrors + disk_idx; 3576 3577 if (test_bit(Replacement, &rdev->flags)) { 3578 if (disk->replacement) 3579 goto out_free_conf; 3580 disk->replacement = rdev; 3581 } else { 3582 if (disk->rdev) 3583 goto out_free_conf; 3584 disk->rdev = rdev; 3585 } 3586 q = bdev_get_queue(rdev->bdev); 3587 diff = (rdev->new_data_offset - rdev->data_offset); 3588 if (!mddev->reshape_backwards) 3589 diff = -diff; 3590 if (diff < 0) 3591 diff = 0; 3592 if (first || diff < min_offset_diff) 3593 min_offset_diff = diff; 3594 3595 if (mddev->gendisk) 3596 disk_stack_limits(mddev->gendisk, rdev->bdev, 3597 rdev->data_offset << 9); 3598 3599 disk->head_position = 0; 3600 3601 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 3602 discard_supported = true; 3603 } 3604 3605 if (mddev->queue) { 3606 if (discard_supported) 3607 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 3608 mddev->queue); 3609 else 3610 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 3611 mddev->queue); 3612 } 3613 /* need to check that every block has at least one working mirror */ 3614 if (!enough(conf, -1)) { 3615 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", 3616 mdname(mddev)); 3617 goto out_free_conf; 3618 } 3619 3620 if (conf->reshape_progress != MaxSector) { 3621 /* must ensure that shape change is supported */ 3622 if (conf->geo.far_copies != 1 && 3623 conf->geo.far_offset == 0) 3624 goto out_free_conf; 3625 if (conf->prev.far_copies != 1 && 3626 conf->prev.far_offset == 0) 3627 goto out_free_conf; 3628 } 3629 3630 mddev->degraded = 0; 3631 for (i = 0; 3632 i < conf->geo.raid_disks 3633 || i < conf->prev.raid_disks; 3634 i++) { 3635 3636 disk = conf->mirrors + i; 3637 3638 if (!disk->rdev && disk->replacement) { 3639 /* The replacement is all we have - use it */ 3640 disk->rdev = disk->replacement; 3641 disk->replacement = NULL; 3642 clear_bit(Replacement, &disk->rdev->flags); 3643 } 3644 3645 if (!disk->rdev || 3646 !test_bit(In_sync, &disk->rdev->flags)) { 3647 disk->head_position = 0; 3648 mddev->degraded++; 3649 if (disk->rdev && 3650 disk->rdev->saved_raid_disk < 0) 3651 conf->fullsync = 1; 3652 } 3653 disk->recovery_disabled = mddev->recovery_disabled - 1; 3654 } 3655 3656 if (mddev->recovery_cp != MaxSector) 3657 printk(KERN_NOTICE "md/raid10:%s: not clean" 3658 " -- starting background reconstruction\n", 3659 mdname(mddev)); 3660 printk(KERN_INFO 3661 "md/raid10:%s: active with %d out of %d devices\n", 3662 mdname(mddev), conf->geo.raid_disks - mddev->degraded, 3663 conf->geo.raid_disks); 3664 /* 3665 * Ok, everything is just fine now 3666 */ 3667 mddev->dev_sectors = conf->dev_sectors; 3668 size = raid10_size(mddev, 0, 0); 3669 md_set_array_sectors(mddev, size); 3670 mddev->resync_max_sectors = size; 3671 3672 if (mddev->queue) { 3673 int stripe = conf->geo.raid_disks * 3674 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 3675 3676 /* Calculate max read-ahead size. 3677 * We need to readahead at least twice a whole stripe.... 3678 * maybe... 3679 */ 3680 stripe /= conf->geo.near_copies; 3681 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3682 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3683 } 3684 3685 if (md_integrity_register(mddev)) 3686 goto out_free_conf; 3687 3688 if (conf->reshape_progress != MaxSector) { 3689 unsigned long before_length, after_length; 3690 3691 before_length = ((1 << conf->prev.chunk_shift) * 3692 conf->prev.far_copies); 3693 after_length = ((1 << conf->geo.chunk_shift) * 3694 conf->geo.far_copies); 3695 3696 if (max(before_length, after_length) > min_offset_diff) { 3697 /* This cannot work */ 3698 printk("md/raid10: offset difference not enough to continue reshape\n"); 3699 goto out_free_conf; 3700 } 3701 conf->offset_diff = min_offset_diff; 3702 3703 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3704 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3705 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3706 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3707 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3708 "reshape"); 3709 } 3710 3711 return 0; 3712 3713 out_free_conf: 3714 md_unregister_thread(&mddev->thread); 3715 mempool_destroy(conf->r10bio_pool); 3716 safe_put_page(conf->tmppage); 3717 kfree(conf->mirrors); 3718 kfree(conf); 3719 mddev->private = NULL; 3720 out: 3721 return -EIO; 3722 } 3723 3724 static void raid10_free(struct mddev *mddev, void *priv) 3725 { 3726 struct r10conf *conf = priv; 3727 3728 mempool_destroy(conf->r10bio_pool); 3729 safe_put_page(conf->tmppage); 3730 kfree(conf->mirrors); 3731 kfree(conf->mirrors_old); 3732 kfree(conf->mirrors_new); 3733 kfree(conf); 3734 } 3735 3736 static void raid10_quiesce(struct mddev *mddev, int state) 3737 { 3738 struct r10conf *conf = mddev->private; 3739 3740 switch(state) { 3741 case 1: 3742 raise_barrier(conf, 0); 3743 break; 3744 case 0: 3745 lower_barrier(conf); 3746 break; 3747 } 3748 } 3749 3750 static int raid10_resize(struct mddev *mddev, sector_t sectors) 3751 { 3752 /* Resize of 'far' arrays is not supported. 3753 * For 'near' and 'offset' arrays we can set the 3754 * number of sectors used to be an appropriate multiple 3755 * of the chunk size. 3756 * For 'offset', this is far_copies*chunksize. 3757 * For 'near' the multiplier is the LCM of 3758 * near_copies and raid_disks. 3759 * So if far_copies > 1 && !far_offset, fail. 3760 * Else find LCM(raid_disks, near_copy)*far_copies and 3761 * multiply by chunk_size. Then round to this number. 3762 * This is mostly done by raid10_size() 3763 */ 3764 struct r10conf *conf = mddev->private; 3765 sector_t oldsize, size; 3766 3767 if (mddev->reshape_position != MaxSector) 3768 return -EBUSY; 3769 3770 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) 3771 return -EINVAL; 3772 3773 oldsize = raid10_size(mddev, 0, 0); 3774 size = raid10_size(mddev, sectors, 0); 3775 if (mddev->external_size && 3776 mddev->array_sectors > size) 3777 return -EINVAL; 3778 if (mddev->bitmap) { 3779 int ret = bitmap_resize(mddev->bitmap, size, 0, 0); 3780 if (ret) 3781 return ret; 3782 } 3783 md_set_array_sectors(mddev, size); 3784 if (mddev->queue) { 3785 set_capacity(mddev->gendisk, mddev->array_sectors); 3786 revalidate_disk(mddev->gendisk); 3787 } 3788 if (sectors > mddev->dev_sectors && 3789 mddev->recovery_cp > oldsize) { 3790 mddev->recovery_cp = oldsize; 3791 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3792 } 3793 calc_sectors(conf, sectors); 3794 mddev->dev_sectors = conf->dev_sectors; 3795 mddev->resync_max_sectors = size; 3796 return 0; 3797 } 3798 3799 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) 3800 { 3801 struct md_rdev *rdev; 3802 struct r10conf *conf; 3803 3804 if (mddev->degraded > 0) { 3805 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", 3806 mdname(mddev)); 3807 return ERR_PTR(-EINVAL); 3808 } 3809 sector_div(size, devs); 3810 3811 /* Set new parameters */ 3812 mddev->new_level = 10; 3813 /* new layout: far_copies = 1, near_copies = 2 */ 3814 mddev->new_layout = (1<<8) + 2; 3815 mddev->new_chunk_sectors = mddev->chunk_sectors; 3816 mddev->delta_disks = mddev->raid_disks; 3817 mddev->raid_disks *= 2; 3818 /* make sure it will be not marked as dirty */ 3819 mddev->recovery_cp = MaxSector; 3820 mddev->dev_sectors = size; 3821 3822 conf = setup_conf(mddev); 3823 if (!IS_ERR(conf)) { 3824 rdev_for_each(rdev, mddev) 3825 if (rdev->raid_disk >= 0) { 3826 rdev->new_raid_disk = rdev->raid_disk * 2; 3827 rdev->sectors = size; 3828 } 3829 conf->barrier = 1; 3830 } 3831 3832 return conf; 3833 } 3834 3835 static void *raid10_takeover(struct mddev *mddev) 3836 { 3837 struct r0conf *raid0_conf; 3838 3839 /* raid10 can take over: 3840 * raid0 - providing it has only two drives 3841 */ 3842 if (mddev->level == 0) { 3843 /* for raid0 takeover only one zone is supported */ 3844 raid0_conf = mddev->private; 3845 if (raid0_conf->nr_strip_zones > 1) { 3846 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" 3847 " with more than one zone.\n", 3848 mdname(mddev)); 3849 return ERR_PTR(-EINVAL); 3850 } 3851 return raid10_takeover_raid0(mddev, 3852 raid0_conf->strip_zone->zone_end, 3853 raid0_conf->strip_zone->nb_dev); 3854 } 3855 return ERR_PTR(-EINVAL); 3856 } 3857 3858 static int raid10_check_reshape(struct mddev *mddev) 3859 { 3860 /* Called when there is a request to change 3861 * - layout (to ->new_layout) 3862 * - chunk size (to ->new_chunk_sectors) 3863 * - raid_disks (by delta_disks) 3864 * or when trying to restart a reshape that was ongoing. 3865 * 3866 * We need to validate the request and possibly allocate 3867 * space if that might be an issue later. 3868 * 3869 * Currently we reject any reshape of a 'far' mode array, 3870 * allow chunk size to change if new is generally acceptable, 3871 * allow raid_disks to increase, and allow 3872 * a switch between 'near' mode and 'offset' mode. 3873 */ 3874 struct r10conf *conf = mddev->private; 3875 struct geom geo; 3876 3877 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) 3878 return -EINVAL; 3879 3880 if (setup_geo(&geo, mddev, geo_start) != conf->copies) 3881 /* mustn't change number of copies */ 3882 return -EINVAL; 3883 if (geo.far_copies > 1 && !geo.far_offset) 3884 /* Cannot switch to 'far' mode */ 3885 return -EINVAL; 3886 3887 if (mddev->array_sectors & geo.chunk_mask) 3888 /* not factor of array size */ 3889 return -EINVAL; 3890 3891 if (!enough(conf, -1)) 3892 return -EINVAL; 3893 3894 kfree(conf->mirrors_new); 3895 conf->mirrors_new = NULL; 3896 if (mddev->delta_disks > 0) { 3897 /* allocate new 'mirrors' list */ 3898 conf->mirrors_new = kzalloc( 3899 sizeof(struct raid10_info) 3900 *(mddev->raid_disks + 3901 mddev->delta_disks), 3902 GFP_KERNEL); 3903 if (!conf->mirrors_new) 3904 return -ENOMEM; 3905 } 3906 return 0; 3907 } 3908 3909 /* 3910 * Need to check if array has failed when deciding whether to: 3911 * - start an array 3912 * - remove non-faulty devices 3913 * - add a spare 3914 * - allow a reshape 3915 * This determination is simple when no reshape is happening. 3916 * However if there is a reshape, we need to carefully check 3917 * both the before and after sections. 3918 * This is because some failed devices may only affect one 3919 * of the two sections, and some non-in_sync devices may 3920 * be insync in the section most affected by failed devices. 3921 */ 3922 static int calc_degraded(struct r10conf *conf) 3923 { 3924 int degraded, degraded2; 3925 int i; 3926 3927 rcu_read_lock(); 3928 degraded = 0; 3929 /* 'prev' section first */ 3930 for (i = 0; i < conf->prev.raid_disks; i++) { 3931 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 3932 if (!rdev || test_bit(Faulty, &rdev->flags)) 3933 degraded++; 3934 else if (!test_bit(In_sync, &rdev->flags)) 3935 /* When we can reduce the number of devices in 3936 * an array, this might not contribute to 3937 * 'degraded'. It does now. 3938 */ 3939 degraded++; 3940 } 3941 rcu_read_unlock(); 3942 if (conf->geo.raid_disks == conf->prev.raid_disks) 3943 return degraded; 3944 rcu_read_lock(); 3945 degraded2 = 0; 3946 for (i = 0; i < conf->geo.raid_disks; i++) { 3947 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 3948 if (!rdev || test_bit(Faulty, &rdev->flags)) 3949 degraded2++; 3950 else if (!test_bit(In_sync, &rdev->flags)) { 3951 /* If reshape is increasing the number of devices, 3952 * this section has already been recovered, so 3953 * it doesn't contribute to degraded. 3954 * else it does. 3955 */ 3956 if (conf->geo.raid_disks <= conf->prev.raid_disks) 3957 degraded2++; 3958 } 3959 } 3960 rcu_read_unlock(); 3961 if (degraded2 > degraded) 3962 return degraded2; 3963 return degraded; 3964 } 3965 3966 static int raid10_start_reshape(struct mddev *mddev) 3967 { 3968 /* A 'reshape' has been requested. This commits 3969 * the various 'new' fields and sets MD_RECOVER_RESHAPE 3970 * This also checks if there are enough spares and adds them 3971 * to the array. 3972 * We currently require enough spares to make the final 3973 * array non-degraded. We also require that the difference 3974 * between old and new data_offset - on each device - is 3975 * enough that we never risk over-writing. 3976 */ 3977 3978 unsigned long before_length, after_length; 3979 sector_t min_offset_diff = 0; 3980 int first = 1; 3981 struct geom new; 3982 struct r10conf *conf = mddev->private; 3983 struct md_rdev *rdev; 3984 int spares = 0; 3985 int ret; 3986 3987 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3988 return -EBUSY; 3989 3990 if (setup_geo(&new, mddev, geo_start) != conf->copies) 3991 return -EINVAL; 3992 3993 before_length = ((1 << conf->prev.chunk_shift) * 3994 conf->prev.far_copies); 3995 after_length = ((1 << conf->geo.chunk_shift) * 3996 conf->geo.far_copies); 3997 3998 rdev_for_each(rdev, mddev) { 3999 if (!test_bit(In_sync, &rdev->flags) 4000 && !test_bit(Faulty, &rdev->flags)) 4001 spares++; 4002 if (rdev->raid_disk >= 0) { 4003 long long diff = (rdev->new_data_offset 4004 - rdev->data_offset); 4005 if (!mddev->reshape_backwards) 4006 diff = -diff; 4007 if (diff < 0) 4008 diff = 0; 4009 if (first || diff < min_offset_diff) 4010 min_offset_diff = diff; 4011 } 4012 } 4013 4014 if (max(before_length, after_length) > min_offset_diff) 4015 return -EINVAL; 4016 4017 if (spares < mddev->delta_disks) 4018 return -EINVAL; 4019 4020 conf->offset_diff = min_offset_diff; 4021 spin_lock_irq(&conf->device_lock); 4022 if (conf->mirrors_new) { 4023 memcpy(conf->mirrors_new, conf->mirrors, 4024 sizeof(struct raid10_info)*conf->prev.raid_disks); 4025 smp_mb(); 4026 kfree(conf->mirrors_old); 4027 conf->mirrors_old = conf->mirrors; 4028 conf->mirrors = conf->mirrors_new; 4029 conf->mirrors_new = NULL; 4030 } 4031 setup_geo(&conf->geo, mddev, geo_start); 4032 smp_mb(); 4033 if (mddev->reshape_backwards) { 4034 sector_t size = raid10_size(mddev, 0, 0); 4035 if (size < mddev->array_sectors) { 4036 spin_unlock_irq(&conf->device_lock); 4037 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n", 4038 mdname(mddev)); 4039 return -EINVAL; 4040 } 4041 mddev->resync_max_sectors = size; 4042 conf->reshape_progress = size; 4043 } else 4044 conf->reshape_progress = 0; 4045 conf->reshape_safe = conf->reshape_progress; 4046 spin_unlock_irq(&conf->device_lock); 4047 4048 if (mddev->delta_disks && mddev->bitmap) { 4049 ret = bitmap_resize(mddev->bitmap, 4050 raid10_size(mddev, 0, 4051 conf->geo.raid_disks), 4052 0, 0); 4053 if (ret) 4054 goto abort; 4055 } 4056 if (mddev->delta_disks > 0) { 4057 rdev_for_each(rdev, mddev) 4058 if (rdev->raid_disk < 0 && 4059 !test_bit(Faulty, &rdev->flags)) { 4060 if (raid10_add_disk(mddev, rdev) == 0) { 4061 if (rdev->raid_disk >= 4062 conf->prev.raid_disks) 4063 set_bit(In_sync, &rdev->flags); 4064 else 4065 rdev->recovery_offset = 0; 4066 4067 if (sysfs_link_rdev(mddev, rdev)) 4068 /* Failure here is OK */; 4069 } 4070 } else if (rdev->raid_disk >= conf->prev.raid_disks 4071 && !test_bit(Faulty, &rdev->flags)) { 4072 /* This is a spare that was manually added */ 4073 set_bit(In_sync, &rdev->flags); 4074 } 4075 } 4076 /* When a reshape changes the number of devices, 4077 * ->degraded is measured against the larger of the 4078 * pre and post numbers. 4079 */ 4080 spin_lock_irq(&conf->device_lock); 4081 mddev->degraded = calc_degraded(conf); 4082 spin_unlock_irq(&conf->device_lock); 4083 mddev->raid_disks = conf->geo.raid_disks; 4084 mddev->reshape_position = conf->reshape_progress; 4085 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4086 4087 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4088 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4089 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4090 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4091 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4092 4093 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4094 "reshape"); 4095 if (!mddev->sync_thread) { 4096 ret = -EAGAIN; 4097 goto abort; 4098 } 4099 conf->reshape_checkpoint = jiffies; 4100 md_wakeup_thread(mddev->sync_thread); 4101 md_new_event(mddev); 4102 return 0; 4103 4104 abort: 4105 mddev->recovery = 0; 4106 spin_lock_irq(&conf->device_lock); 4107 conf->geo = conf->prev; 4108 mddev->raid_disks = conf->geo.raid_disks; 4109 rdev_for_each(rdev, mddev) 4110 rdev->new_data_offset = rdev->data_offset; 4111 smp_wmb(); 4112 conf->reshape_progress = MaxSector; 4113 conf->reshape_safe = MaxSector; 4114 mddev->reshape_position = MaxSector; 4115 spin_unlock_irq(&conf->device_lock); 4116 return ret; 4117 } 4118 4119 /* Calculate the last device-address that could contain 4120 * any block from the chunk that includes the array-address 's' 4121 * and report the next address. 4122 * i.e. the address returned will be chunk-aligned and after 4123 * any data that is in the chunk containing 's'. 4124 */ 4125 static sector_t last_dev_address(sector_t s, struct geom *geo) 4126 { 4127 s = (s | geo->chunk_mask) + 1; 4128 s >>= geo->chunk_shift; 4129 s *= geo->near_copies; 4130 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); 4131 s *= geo->far_copies; 4132 s <<= geo->chunk_shift; 4133 return s; 4134 } 4135 4136 /* Calculate the first device-address that could contain 4137 * any block from the chunk that includes the array-address 's'. 4138 * This too will be the start of a chunk 4139 */ 4140 static sector_t first_dev_address(sector_t s, struct geom *geo) 4141 { 4142 s >>= geo->chunk_shift; 4143 s *= geo->near_copies; 4144 sector_div(s, geo->raid_disks); 4145 s *= geo->far_copies; 4146 s <<= geo->chunk_shift; 4147 return s; 4148 } 4149 4150 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 4151 int *skipped) 4152 { 4153 /* We simply copy at most one chunk (smallest of old and new) 4154 * at a time, possibly less if that exceeds RESYNC_PAGES, 4155 * or we hit a bad block or something. 4156 * This might mean we pause for normal IO in the middle of 4157 * a chunk, but that is not a problem as mddev->reshape_position 4158 * can record any location. 4159 * 4160 * If we will want to write to a location that isn't 4161 * yet recorded as 'safe' (i.e. in metadata on disk) then 4162 * we need to flush all reshape requests and update the metadata. 4163 * 4164 * When reshaping forwards (e.g. to more devices), we interpret 4165 * 'safe' as the earliest block which might not have been copied 4166 * down yet. We divide this by previous stripe size and multiply 4167 * by previous stripe length to get lowest device offset that we 4168 * cannot write to yet. 4169 * We interpret 'sector_nr' as an address that we want to write to. 4170 * From this we use last_device_address() to find where we might 4171 * write to, and first_device_address on the 'safe' position. 4172 * If this 'next' write position is after the 'safe' position, 4173 * we must update the metadata to increase the 'safe' position. 4174 * 4175 * When reshaping backwards, we round in the opposite direction 4176 * and perform the reverse test: next write position must not be 4177 * less than current safe position. 4178 * 4179 * In all this the minimum difference in data offsets 4180 * (conf->offset_diff - always positive) allows a bit of slack, 4181 * so next can be after 'safe', but not by more than offset_diff 4182 * 4183 * We need to prepare all the bios here before we start any IO 4184 * to ensure the size we choose is acceptable to all devices. 4185 * The means one for each copy for write-out and an extra one for 4186 * read-in. 4187 * We store the read-in bio in ->master_bio and the others in 4188 * ->devs[x].bio and ->devs[x].repl_bio. 4189 */ 4190 struct r10conf *conf = mddev->private; 4191 struct r10bio *r10_bio; 4192 sector_t next, safe, last; 4193 int max_sectors; 4194 int nr_sectors; 4195 int s; 4196 struct md_rdev *rdev; 4197 int need_flush = 0; 4198 struct bio *blist; 4199 struct bio *bio, *read_bio; 4200 int sectors_done = 0; 4201 4202 if (sector_nr == 0) { 4203 /* If restarting in the middle, skip the initial sectors */ 4204 if (mddev->reshape_backwards && 4205 conf->reshape_progress < raid10_size(mddev, 0, 0)) { 4206 sector_nr = (raid10_size(mddev, 0, 0) 4207 - conf->reshape_progress); 4208 } else if (!mddev->reshape_backwards && 4209 conf->reshape_progress > 0) 4210 sector_nr = conf->reshape_progress; 4211 if (sector_nr) { 4212 mddev->curr_resync_completed = sector_nr; 4213 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4214 *skipped = 1; 4215 return sector_nr; 4216 } 4217 } 4218 4219 /* We don't use sector_nr to track where we are up to 4220 * as that doesn't work well for ->reshape_backwards. 4221 * So just use ->reshape_progress. 4222 */ 4223 if (mddev->reshape_backwards) { 4224 /* 'next' is the earliest device address that we might 4225 * write to for this chunk in the new layout 4226 */ 4227 next = first_dev_address(conf->reshape_progress - 1, 4228 &conf->geo); 4229 4230 /* 'safe' is the last device address that we might read from 4231 * in the old layout after a restart 4232 */ 4233 safe = last_dev_address(conf->reshape_safe - 1, 4234 &conf->prev); 4235 4236 if (next + conf->offset_diff < safe) 4237 need_flush = 1; 4238 4239 last = conf->reshape_progress - 1; 4240 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask 4241 & conf->prev.chunk_mask); 4242 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last) 4243 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512; 4244 } else { 4245 /* 'next' is after the last device address that we 4246 * might write to for this chunk in the new layout 4247 */ 4248 next = last_dev_address(conf->reshape_progress, &conf->geo); 4249 4250 /* 'safe' is the earliest device address that we might 4251 * read from in the old layout after a restart 4252 */ 4253 safe = first_dev_address(conf->reshape_safe, &conf->prev); 4254 4255 /* Need to update metadata if 'next' might be beyond 'safe' 4256 * as that would possibly corrupt data 4257 */ 4258 if (next > safe + conf->offset_diff) 4259 need_flush = 1; 4260 4261 sector_nr = conf->reshape_progress; 4262 last = sector_nr | (conf->geo.chunk_mask 4263 & conf->prev.chunk_mask); 4264 4265 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last) 4266 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1; 4267 } 4268 4269 if (need_flush || 4270 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4271 /* Need to update reshape_position in metadata */ 4272 wait_barrier(conf); 4273 mddev->reshape_position = conf->reshape_progress; 4274 if (mddev->reshape_backwards) 4275 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) 4276 - conf->reshape_progress; 4277 else 4278 mddev->curr_resync_completed = conf->reshape_progress; 4279 conf->reshape_checkpoint = jiffies; 4280 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4281 md_wakeup_thread(mddev->thread); 4282 wait_event(mddev->sb_wait, mddev->flags == 0 || 4283 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4284 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4285 allow_barrier(conf); 4286 return sectors_done; 4287 } 4288 conf->reshape_safe = mddev->reshape_position; 4289 allow_barrier(conf); 4290 } 4291 4292 read_more: 4293 /* Now schedule reads for blocks from sector_nr to last */ 4294 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 4295 r10_bio->state = 0; 4296 raise_barrier(conf, sectors_done != 0); 4297 atomic_set(&r10_bio->remaining, 0); 4298 r10_bio->mddev = mddev; 4299 r10_bio->sector = sector_nr; 4300 set_bit(R10BIO_IsReshape, &r10_bio->state); 4301 r10_bio->sectors = last - sector_nr + 1; 4302 rdev = read_balance(conf, r10_bio, &max_sectors); 4303 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); 4304 4305 if (!rdev) { 4306 /* Cannot read from here, so need to record bad blocks 4307 * on all the target devices. 4308 */ 4309 // FIXME 4310 mempool_free(r10_bio, conf->r10buf_pool); 4311 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4312 return sectors_done; 4313 } 4314 4315 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4316 4317 read_bio->bi_bdev = rdev->bdev; 4318 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4319 + rdev->data_offset); 4320 read_bio->bi_private = r10_bio; 4321 read_bio->bi_end_io = end_sync_read; 4322 bio_set_op_attrs(read_bio, REQ_OP_READ, 0); 4323 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); 4324 read_bio->bi_error = 0; 4325 read_bio->bi_vcnt = 0; 4326 read_bio->bi_iter.bi_size = 0; 4327 r10_bio->master_bio = read_bio; 4328 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4329 4330 /* Now find the locations in the new layout */ 4331 __raid10_find_phys(&conf->geo, r10_bio); 4332 4333 blist = read_bio; 4334 read_bio->bi_next = NULL; 4335 4336 for (s = 0; s < conf->copies*2; s++) { 4337 struct bio *b; 4338 int d = r10_bio->devs[s/2].devnum; 4339 struct md_rdev *rdev2; 4340 if (s&1) { 4341 rdev2 = conf->mirrors[d].replacement; 4342 b = r10_bio->devs[s/2].repl_bio; 4343 } else { 4344 rdev2 = conf->mirrors[d].rdev; 4345 b = r10_bio->devs[s/2].bio; 4346 } 4347 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4348 continue; 4349 4350 bio_reset(b); 4351 b->bi_bdev = rdev2->bdev; 4352 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + 4353 rdev2->new_data_offset; 4354 b->bi_private = r10_bio; 4355 b->bi_end_io = end_reshape_write; 4356 bio_set_op_attrs(b, REQ_OP_WRITE, 0); 4357 b->bi_next = blist; 4358 blist = b; 4359 } 4360 4361 /* Now add as many pages as possible to all of these bios. */ 4362 4363 nr_sectors = 0; 4364 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) { 4365 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; 4366 int len = (max_sectors - s) << 9; 4367 if (len > PAGE_SIZE) 4368 len = PAGE_SIZE; 4369 for (bio = blist; bio ; bio = bio->bi_next) { 4370 struct bio *bio2; 4371 if (bio_add_page(bio, page, len, 0)) 4372 continue; 4373 4374 /* Didn't fit, must stop */ 4375 for (bio2 = blist; 4376 bio2 && bio2 != bio; 4377 bio2 = bio2->bi_next) { 4378 /* Remove last page from this bio */ 4379 bio2->bi_vcnt--; 4380 bio2->bi_iter.bi_size -= len; 4381 bio_clear_flag(bio2, BIO_SEG_VALID); 4382 } 4383 goto bio_full; 4384 } 4385 sector_nr += len >> 9; 4386 nr_sectors += len >> 9; 4387 } 4388 bio_full: 4389 r10_bio->sectors = nr_sectors; 4390 4391 /* Now submit the read */ 4392 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); 4393 atomic_inc(&r10_bio->remaining); 4394 read_bio->bi_next = NULL; 4395 generic_make_request(read_bio); 4396 sector_nr += nr_sectors; 4397 sectors_done += nr_sectors; 4398 if (sector_nr <= last) 4399 goto read_more; 4400 4401 /* Now that we have done the whole section we can 4402 * update reshape_progress 4403 */ 4404 if (mddev->reshape_backwards) 4405 conf->reshape_progress -= sectors_done; 4406 else 4407 conf->reshape_progress += sectors_done; 4408 4409 return sectors_done; 4410 } 4411 4412 static void end_reshape_request(struct r10bio *r10_bio); 4413 static int handle_reshape_read_error(struct mddev *mddev, 4414 struct r10bio *r10_bio); 4415 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) 4416 { 4417 /* Reshape read completed. Hopefully we have a block 4418 * to write out. 4419 * If we got a read error then we do sync 1-page reads from 4420 * elsewhere until we find the data - or give up. 4421 */ 4422 struct r10conf *conf = mddev->private; 4423 int s; 4424 4425 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 4426 if (handle_reshape_read_error(mddev, r10_bio) < 0) { 4427 /* Reshape has been aborted */ 4428 md_done_sync(mddev, r10_bio->sectors, 0); 4429 return; 4430 } 4431 4432 /* We definitely have the data in the pages, schedule the 4433 * writes. 4434 */ 4435 atomic_set(&r10_bio->remaining, 1); 4436 for (s = 0; s < conf->copies*2; s++) { 4437 struct bio *b; 4438 int d = r10_bio->devs[s/2].devnum; 4439 struct md_rdev *rdev; 4440 if (s&1) { 4441 rdev = conf->mirrors[d].replacement; 4442 b = r10_bio->devs[s/2].repl_bio; 4443 } else { 4444 rdev = conf->mirrors[d].rdev; 4445 b = r10_bio->devs[s/2].bio; 4446 } 4447 if (!rdev || test_bit(Faulty, &rdev->flags)) 4448 continue; 4449 atomic_inc(&rdev->nr_pending); 4450 md_sync_acct(b->bi_bdev, r10_bio->sectors); 4451 atomic_inc(&r10_bio->remaining); 4452 b->bi_next = NULL; 4453 generic_make_request(b); 4454 } 4455 end_reshape_request(r10_bio); 4456 } 4457 4458 static void end_reshape(struct r10conf *conf) 4459 { 4460 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) 4461 return; 4462 4463 spin_lock_irq(&conf->device_lock); 4464 conf->prev = conf->geo; 4465 md_finish_reshape(conf->mddev); 4466 smp_wmb(); 4467 conf->reshape_progress = MaxSector; 4468 conf->reshape_safe = MaxSector; 4469 spin_unlock_irq(&conf->device_lock); 4470 4471 /* read-ahead size must cover two whole stripes, which is 4472 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4473 */ 4474 if (conf->mddev->queue) { 4475 int stripe = conf->geo.raid_disks * 4476 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); 4477 stripe /= conf->geo.near_copies; 4478 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4479 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4480 } 4481 conf->fullsync = 0; 4482 } 4483 4484 static int handle_reshape_read_error(struct mddev *mddev, 4485 struct r10bio *r10_bio) 4486 { 4487 /* Use sync reads to get the blocks from somewhere else */ 4488 int sectors = r10_bio->sectors; 4489 struct r10conf *conf = mddev->private; 4490 struct { 4491 struct r10bio r10_bio; 4492 struct r10dev devs[conf->copies]; 4493 } on_stack; 4494 struct r10bio *r10b = &on_stack.r10_bio; 4495 int slot = 0; 4496 int idx = 0; 4497 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; 4498 4499 r10b->sector = r10_bio->sector; 4500 __raid10_find_phys(&conf->prev, r10b); 4501 4502 while (sectors) { 4503 int s = sectors; 4504 int success = 0; 4505 int first_slot = slot; 4506 4507 if (s > (PAGE_SIZE >> 9)) 4508 s = PAGE_SIZE >> 9; 4509 4510 while (!success) { 4511 int d = r10b->devs[slot].devnum; 4512 struct md_rdev *rdev = conf->mirrors[d].rdev; 4513 sector_t addr; 4514 if (rdev == NULL || 4515 test_bit(Faulty, &rdev->flags) || 4516 !test_bit(In_sync, &rdev->flags)) 4517 goto failed; 4518 4519 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; 4520 success = sync_page_io(rdev, 4521 addr, 4522 s << 9, 4523 bvec[idx].bv_page, 4524 REQ_OP_READ, 0, false); 4525 if (success) 4526 break; 4527 failed: 4528 slot++; 4529 if (slot >= conf->copies) 4530 slot = 0; 4531 if (slot == first_slot) 4532 break; 4533 } 4534 if (!success) { 4535 /* couldn't read this block, must give up */ 4536 set_bit(MD_RECOVERY_INTR, 4537 &mddev->recovery); 4538 return -EIO; 4539 } 4540 sectors -= s; 4541 idx++; 4542 } 4543 return 0; 4544 } 4545 4546 static void end_reshape_write(struct bio *bio) 4547 { 4548 struct r10bio *r10_bio = bio->bi_private; 4549 struct mddev *mddev = r10_bio->mddev; 4550 struct r10conf *conf = mddev->private; 4551 int d; 4552 int slot; 4553 int repl; 4554 struct md_rdev *rdev = NULL; 4555 4556 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 4557 if (repl) 4558 rdev = conf->mirrors[d].replacement; 4559 if (!rdev) { 4560 smp_mb(); 4561 rdev = conf->mirrors[d].rdev; 4562 } 4563 4564 if (bio->bi_error) { 4565 /* FIXME should record badblock */ 4566 md_error(mddev, rdev); 4567 } 4568 4569 rdev_dec_pending(rdev, mddev); 4570 end_reshape_request(r10_bio); 4571 } 4572 4573 static void end_reshape_request(struct r10bio *r10_bio) 4574 { 4575 if (!atomic_dec_and_test(&r10_bio->remaining)) 4576 return; 4577 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); 4578 bio_put(r10_bio->master_bio); 4579 put_buf(r10_bio); 4580 } 4581 4582 static void raid10_finish_reshape(struct mddev *mddev) 4583 { 4584 struct r10conf *conf = mddev->private; 4585 4586 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4587 return; 4588 4589 if (mddev->delta_disks > 0) { 4590 sector_t size = raid10_size(mddev, 0, 0); 4591 md_set_array_sectors(mddev, size); 4592 if (mddev->recovery_cp > mddev->resync_max_sectors) { 4593 mddev->recovery_cp = mddev->resync_max_sectors; 4594 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4595 } 4596 mddev->resync_max_sectors = size; 4597 if (mddev->queue) { 4598 set_capacity(mddev->gendisk, mddev->array_sectors); 4599 revalidate_disk(mddev->gendisk); 4600 } 4601 } else { 4602 int d; 4603 for (d = conf->geo.raid_disks ; 4604 d < conf->geo.raid_disks - mddev->delta_disks; 4605 d++) { 4606 struct md_rdev *rdev = conf->mirrors[d].rdev; 4607 if (rdev) 4608 clear_bit(In_sync, &rdev->flags); 4609 rdev = conf->mirrors[d].replacement; 4610 if (rdev) 4611 clear_bit(In_sync, &rdev->flags); 4612 } 4613 } 4614 mddev->layout = mddev->new_layout; 4615 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; 4616 mddev->reshape_position = MaxSector; 4617 mddev->delta_disks = 0; 4618 mddev->reshape_backwards = 0; 4619 } 4620 4621 static struct md_personality raid10_personality = 4622 { 4623 .name = "raid10", 4624 .level = 10, 4625 .owner = THIS_MODULE, 4626 .make_request = raid10_make_request, 4627 .run = raid10_run, 4628 .free = raid10_free, 4629 .status = raid10_status, 4630 .error_handler = raid10_error, 4631 .hot_add_disk = raid10_add_disk, 4632 .hot_remove_disk= raid10_remove_disk, 4633 .spare_active = raid10_spare_active, 4634 .sync_request = raid10_sync_request, 4635 .quiesce = raid10_quiesce, 4636 .size = raid10_size, 4637 .resize = raid10_resize, 4638 .takeover = raid10_takeover, 4639 .check_reshape = raid10_check_reshape, 4640 .start_reshape = raid10_start_reshape, 4641 .finish_reshape = raid10_finish_reshape, 4642 .congested = raid10_congested, 4643 }; 4644 4645 static int __init raid_init(void) 4646 { 4647 return register_md_personality(&raid10_personality); 4648 } 4649 4650 static void raid_exit(void) 4651 { 4652 unregister_md_personality(&raid10_personality); 4653 } 4654 4655 module_init(raid_init); 4656 module_exit(raid_exit); 4657 MODULE_LICENSE("GPL"); 4658 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); 4659 MODULE_ALIAS("md-personality-9"); /* RAID10 */ 4660 MODULE_ALIAS("md-raid10"); 4661 MODULE_ALIAS("md-level-10"); 4662 4663 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 4664