1 /* 2 * raid10.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 2000-2004 Neil Brown 5 * 6 * RAID-10 support for md. 7 * 8 * Base on code in raid1.c. See raid1.c for further copyright information. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/blkdev.h> 24 #include <linux/module.h> 25 #include <linux/seq_file.h> 26 #include <linux/ratelimit.h> 27 #include <linux/kthread.h> 28 #include "md.h" 29 #include "raid10.h" 30 #include "raid0.h" 31 #include "bitmap.h" 32 33 /* 34 * RAID10 provides a combination of RAID0 and RAID1 functionality. 35 * The layout of data is defined by 36 * chunk_size 37 * raid_disks 38 * near_copies (stored in low byte of layout) 39 * far_copies (stored in second byte of layout) 40 * far_offset (stored in bit 16 of layout ) 41 * use_far_sets (stored in bit 17 of layout ) 42 * 43 * The data to be stored is divided into chunks using chunksize. Each device 44 * is divided into far_copies sections. In each section, chunks are laid out 45 * in a style similar to raid0, but near_copies copies of each chunk is stored 46 * (each on a different drive). The starting device for each section is offset 47 * near_copies from the starting device of the previous section. Thus there 48 * are (near_copies * far_copies) of each chunk, and each is on a different 49 * drive. near_copies and far_copies must be at least one, and their product 50 * is at most raid_disks. 51 * 52 * If far_offset is true, then the far_copies are handled a bit differently. 53 * The copies are still in different stripes, but instead of being very far 54 * apart on disk, there are adjacent stripes. 55 * 56 * The far and offset algorithms are handled slightly differently if 57 * 'use_far_sets' is true. In this case, the array's devices are grouped into 58 * sets that are (near_copies * far_copies) in size. The far copied stripes 59 * are still shifted by 'near_copies' devices, but this shifting stays confined 60 * to the set rather than the entire array. This is done to improve the number 61 * of device combinations that can fail without causing the array to fail. 62 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk 63 * on a device): 64 * A B C D A B C D E 65 * ... ... 66 * D A B C E A B C D 67 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s): 68 * [A B] [C D] [A B] [C D E] 69 * |...| |...| |...| | ... | 70 * [B A] [D C] [B A] [E C D] 71 */ 72 73 /* 74 * Number of guaranteed r10bios in case of extreme VM load: 75 */ 76 #define NR_RAID10_BIOS 256 77 78 /* when we get a read error on a read-only array, we redirect to another 79 * device without failing the first device, or trying to over-write to 80 * correct the read error. To keep track of bad blocks on a per-bio 81 * level, we store IO_BLOCKED in the appropriate 'bios' pointer 82 */ 83 #define IO_BLOCKED ((struct bio *)1) 84 /* When we successfully write to a known bad-block, we need to remove the 85 * bad-block marking which must be done from process context. So we record 86 * the success by setting devs[n].bio to IO_MADE_GOOD 87 */ 88 #define IO_MADE_GOOD ((struct bio *)2) 89 90 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) 91 92 /* When there are this many requests queued to be written by 93 * the raid10 thread, we become 'congested' to provide back-pressure 94 * for writeback. 95 */ 96 static int max_queued_requests = 1024; 97 98 static void allow_barrier(struct r10conf *conf); 99 static void lower_barrier(struct r10conf *conf); 100 static int _enough(struct r10conf *conf, int previous, int ignore); 101 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 102 int *skipped); 103 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); 104 static void end_reshape_write(struct bio *bio, int error); 105 static void end_reshape(struct r10conf *conf); 106 107 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 108 { 109 struct r10conf *conf = data; 110 int size = offsetof(struct r10bio, devs[conf->copies]); 111 112 /* allocate a r10bio with room for raid_disks entries in the 113 * bios array */ 114 return kzalloc(size, gfp_flags); 115 } 116 117 static void r10bio_pool_free(void *r10_bio, void *data) 118 { 119 kfree(r10_bio); 120 } 121 122 /* Maximum size of each resync request */ 123 #define RESYNC_BLOCK_SIZE (64*1024) 124 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 125 /* amount of memory to reserve for resync requests */ 126 #define RESYNC_WINDOW (1024*1024) 127 /* maximum number of concurrent requests, memory permitting */ 128 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 129 130 /* 131 * When performing a resync, we need to read and compare, so 132 * we need as many pages are there are copies. 133 * When performing a recovery, we need 2 bios, one for read, 134 * one for write (we recover only one drive per r10buf) 135 * 136 */ 137 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 138 { 139 struct r10conf *conf = data; 140 struct page *page; 141 struct r10bio *r10_bio; 142 struct bio *bio; 143 int i, j; 144 int nalloc; 145 146 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 147 if (!r10_bio) 148 return NULL; 149 150 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || 151 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) 152 nalloc = conf->copies; /* resync */ 153 else 154 nalloc = 2; /* recovery */ 155 156 /* 157 * Allocate bios. 158 */ 159 for (j = nalloc ; j-- ; ) { 160 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 161 if (!bio) 162 goto out_free_bio; 163 r10_bio->devs[j].bio = bio; 164 if (!conf->have_replacement) 165 continue; 166 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 167 if (!bio) 168 goto out_free_bio; 169 r10_bio->devs[j].repl_bio = bio; 170 } 171 /* 172 * Allocate RESYNC_PAGES data pages and attach them 173 * where needed. 174 */ 175 for (j = 0 ; j < nalloc; j++) { 176 struct bio *rbio = r10_bio->devs[j].repl_bio; 177 bio = r10_bio->devs[j].bio; 178 for (i = 0; i < RESYNC_PAGES; i++) { 179 if (j > 0 && !test_bit(MD_RECOVERY_SYNC, 180 &conf->mddev->recovery)) { 181 /* we can share bv_page's during recovery 182 * and reshape */ 183 struct bio *rbio = r10_bio->devs[0].bio; 184 page = rbio->bi_io_vec[i].bv_page; 185 get_page(page); 186 } else 187 page = alloc_page(gfp_flags); 188 if (unlikely(!page)) 189 goto out_free_pages; 190 191 bio->bi_io_vec[i].bv_page = page; 192 if (rbio) 193 rbio->bi_io_vec[i].bv_page = page; 194 } 195 } 196 197 return r10_bio; 198 199 out_free_pages: 200 for ( ; i > 0 ; i--) 201 safe_put_page(bio->bi_io_vec[i-1].bv_page); 202 while (j--) 203 for (i = 0; i < RESYNC_PAGES ; i++) 204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); 205 j = 0; 206 out_free_bio: 207 for ( ; j < nalloc; j++) { 208 if (r10_bio->devs[j].bio) 209 bio_put(r10_bio->devs[j].bio); 210 if (r10_bio->devs[j].repl_bio) 211 bio_put(r10_bio->devs[j].repl_bio); 212 } 213 r10bio_pool_free(r10_bio, conf); 214 return NULL; 215 } 216 217 static void r10buf_pool_free(void *__r10_bio, void *data) 218 { 219 int i; 220 struct r10conf *conf = data; 221 struct r10bio *r10bio = __r10_bio; 222 int j; 223 224 for (j=0; j < conf->copies; j++) { 225 struct bio *bio = r10bio->devs[j].bio; 226 if (bio) { 227 for (i = 0; i < RESYNC_PAGES; i++) { 228 safe_put_page(bio->bi_io_vec[i].bv_page); 229 bio->bi_io_vec[i].bv_page = NULL; 230 } 231 bio_put(bio); 232 } 233 bio = r10bio->devs[j].repl_bio; 234 if (bio) 235 bio_put(bio); 236 } 237 r10bio_pool_free(r10bio, conf); 238 } 239 240 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) 241 { 242 int i; 243 244 for (i = 0; i < conf->copies; i++) { 245 struct bio **bio = & r10_bio->devs[i].bio; 246 if (!BIO_SPECIAL(*bio)) 247 bio_put(*bio); 248 *bio = NULL; 249 bio = &r10_bio->devs[i].repl_bio; 250 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) 251 bio_put(*bio); 252 *bio = NULL; 253 } 254 } 255 256 static void free_r10bio(struct r10bio *r10_bio) 257 { 258 struct r10conf *conf = r10_bio->mddev->private; 259 260 put_all_bios(conf, r10_bio); 261 mempool_free(r10_bio, conf->r10bio_pool); 262 } 263 264 static void put_buf(struct r10bio *r10_bio) 265 { 266 struct r10conf *conf = r10_bio->mddev->private; 267 268 mempool_free(r10_bio, conf->r10buf_pool); 269 270 lower_barrier(conf); 271 } 272 273 static void reschedule_retry(struct r10bio *r10_bio) 274 { 275 unsigned long flags; 276 struct mddev *mddev = r10_bio->mddev; 277 struct r10conf *conf = mddev->private; 278 279 spin_lock_irqsave(&conf->device_lock, flags); 280 list_add(&r10_bio->retry_list, &conf->retry_list); 281 conf->nr_queued ++; 282 spin_unlock_irqrestore(&conf->device_lock, flags); 283 284 /* wake up frozen array... */ 285 wake_up(&conf->wait_barrier); 286 287 md_wakeup_thread(mddev->thread); 288 } 289 290 /* 291 * raid_end_bio_io() is called when we have finished servicing a mirrored 292 * operation and are ready to return a success/failure code to the buffer 293 * cache layer. 294 */ 295 static void raid_end_bio_io(struct r10bio *r10_bio) 296 { 297 struct bio *bio = r10_bio->master_bio; 298 int done; 299 struct r10conf *conf = r10_bio->mddev->private; 300 301 if (bio->bi_phys_segments) { 302 unsigned long flags; 303 spin_lock_irqsave(&conf->device_lock, flags); 304 bio->bi_phys_segments--; 305 done = (bio->bi_phys_segments == 0); 306 spin_unlock_irqrestore(&conf->device_lock, flags); 307 } else 308 done = 1; 309 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 310 clear_bit(BIO_UPTODATE, &bio->bi_flags); 311 if (done) { 312 bio_endio(bio, 0); 313 /* 314 * Wake up any possible resync thread that waits for the device 315 * to go idle. 316 */ 317 allow_barrier(conf); 318 } 319 free_r10bio(r10_bio); 320 } 321 322 /* 323 * Update disk head position estimator based on IRQ completion info. 324 */ 325 static inline void update_head_pos(int slot, struct r10bio *r10_bio) 326 { 327 struct r10conf *conf = r10_bio->mddev->private; 328 329 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 330 r10_bio->devs[slot].addr + (r10_bio->sectors); 331 } 332 333 /* 334 * Find the disk number which triggered given bio 335 */ 336 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, 337 struct bio *bio, int *slotp, int *replp) 338 { 339 int slot; 340 int repl = 0; 341 342 for (slot = 0; slot < conf->copies; slot++) { 343 if (r10_bio->devs[slot].bio == bio) 344 break; 345 if (r10_bio->devs[slot].repl_bio == bio) { 346 repl = 1; 347 break; 348 } 349 } 350 351 BUG_ON(slot == conf->copies); 352 update_head_pos(slot, r10_bio); 353 354 if (slotp) 355 *slotp = slot; 356 if (replp) 357 *replp = repl; 358 return r10_bio->devs[slot].devnum; 359 } 360 361 static void raid10_end_read_request(struct bio *bio, int error) 362 { 363 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 364 struct r10bio *r10_bio = bio->bi_private; 365 int slot, dev; 366 struct md_rdev *rdev; 367 struct r10conf *conf = r10_bio->mddev->private; 368 369 slot = r10_bio->read_slot; 370 dev = r10_bio->devs[slot].devnum; 371 rdev = r10_bio->devs[slot].rdev; 372 /* 373 * this branch is our 'one mirror IO has finished' event handler: 374 */ 375 update_head_pos(slot, r10_bio); 376 377 if (uptodate) { 378 /* 379 * Set R10BIO_Uptodate in our master bio, so that 380 * we will return a good error code to the higher 381 * levels even if IO on some other mirrored buffer fails. 382 * 383 * The 'master' represents the composite IO operation to 384 * user-side. So if something waits for IO, then it will 385 * wait for the 'master' bio. 386 */ 387 set_bit(R10BIO_Uptodate, &r10_bio->state); 388 } else { 389 /* If all other devices that store this block have 390 * failed, we want to return the error upwards rather 391 * than fail the last device. Here we redefine 392 * "uptodate" to mean "Don't want to retry" 393 */ 394 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), 395 rdev->raid_disk)) 396 uptodate = 1; 397 } 398 if (uptodate) { 399 raid_end_bio_io(r10_bio); 400 rdev_dec_pending(rdev, conf->mddev); 401 } else { 402 /* 403 * oops, read error - keep the refcount on the rdev 404 */ 405 char b[BDEVNAME_SIZE]; 406 printk_ratelimited(KERN_ERR 407 "md/raid10:%s: %s: rescheduling sector %llu\n", 408 mdname(conf->mddev), 409 bdevname(rdev->bdev, b), 410 (unsigned long long)r10_bio->sector); 411 set_bit(R10BIO_ReadError, &r10_bio->state); 412 reschedule_retry(r10_bio); 413 } 414 } 415 416 static void close_write(struct r10bio *r10_bio) 417 { 418 /* clear the bitmap if all writes complete successfully */ 419 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 420 r10_bio->sectors, 421 !test_bit(R10BIO_Degraded, &r10_bio->state), 422 0); 423 md_write_end(r10_bio->mddev); 424 } 425 426 static void one_write_done(struct r10bio *r10_bio) 427 { 428 if (atomic_dec_and_test(&r10_bio->remaining)) { 429 if (test_bit(R10BIO_WriteError, &r10_bio->state)) 430 reschedule_retry(r10_bio); 431 else { 432 close_write(r10_bio); 433 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 434 reschedule_retry(r10_bio); 435 else 436 raid_end_bio_io(r10_bio); 437 } 438 } 439 } 440 441 static void raid10_end_write_request(struct bio *bio, int error) 442 { 443 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 444 struct r10bio *r10_bio = bio->bi_private; 445 int dev; 446 int dec_rdev = 1; 447 struct r10conf *conf = r10_bio->mddev->private; 448 int slot, repl; 449 struct md_rdev *rdev = NULL; 450 451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 452 453 if (repl) 454 rdev = conf->mirrors[dev].replacement; 455 if (!rdev) { 456 smp_rmb(); 457 repl = 0; 458 rdev = conf->mirrors[dev].rdev; 459 } 460 /* 461 * this branch is our 'one mirror IO has finished' event handler: 462 */ 463 if (!uptodate) { 464 if (repl) 465 /* Never record new bad blocks to replacement, 466 * just fail it. 467 */ 468 md_error(rdev->mddev, rdev); 469 else { 470 set_bit(WriteErrorSeen, &rdev->flags); 471 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 472 set_bit(MD_RECOVERY_NEEDED, 473 &rdev->mddev->recovery); 474 set_bit(R10BIO_WriteError, &r10_bio->state); 475 dec_rdev = 0; 476 } 477 } else { 478 /* 479 * Set R10BIO_Uptodate in our master bio, so that 480 * we will return a good error code for to the higher 481 * levels even if IO on some other mirrored buffer fails. 482 * 483 * The 'master' represents the composite IO operation to 484 * user-side. So if something waits for IO, then it will 485 * wait for the 'master' bio. 486 */ 487 sector_t first_bad; 488 int bad_sectors; 489 490 /* 491 * Do not set R10BIO_Uptodate if the current device is 492 * rebuilding or Faulty. This is because we cannot use 493 * such device for properly reading the data back (we could 494 * potentially use it, if the current write would have felt 495 * before rdev->recovery_offset, but for simplicity we don't 496 * check this here. 497 */ 498 if (test_bit(In_sync, &rdev->flags) && 499 !test_bit(Faulty, &rdev->flags)) 500 set_bit(R10BIO_Uptodate, &r10_bio->state); 501 502 /* Maybe we can clear some bad blocks. */ 503 if (is_badblock(rdev, 504 r10_bio->devs[slot].addr, 505 r10_bio->sectors, 506 &first_bad, &bad_sectors)) { 507 bio_put(bio); 508 if (repl) 509 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; 510 else 511 r10_bio->devs[slot].bio = IO_MADE_GOOD; 512 dec_rdev = 0; 513 set_bit(R10BIO_MadeGood, &r10_bio->state); 514 } 515 } 516 517 /* 518 * 519 * Let's see if all mirrored write operations have finished 520 * already. 521 */ 522 one_write_done(r10_bio); 523 if (dec_rdev) 524 rdev_dec_pending(rdev, conf->mddev); 525 } 526 527 /* 528 * RAID10 layout manager 529 * As well as the chunksize and raid_disks count, there are two 530 * parameters: near_copies and far_copies. 531 * near_copies * far_copies must be <= raid_disks. 532 * Normally one of these will be 1. 533 * If both are 1, we get raid0. 534 * If near_copies == raid_disks, we get raid1. 535 * 536 * Chunks are laid out in raid0 style with near_copies copies of the 537 * first chunk, followed by near_copies copies of the next chunk and 538 * so on. 539 * If far_copies > 1, then after 1/far_copies of the array has been assigned 540 * as described above, we start again with a device offset of near_copies. 541 * So we effectively have another copy of the whole array further down all 542 * the drives, but with blocks on different drives. 543 * With this layout, and block is never stored twice on the one device. 544 * 545 * raid10_find_phys finds the sector offset of a given virtual sector 546 * on each device that it is on. 547 * 548 * raid10_find_virt does the reverse mapping, from a device and a 549 * sector offset to a virtual address 550 */ 551 552 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio) 553 { 554 int n,f; 555 sector_t sector; 556 sector_t chunk; 557 sector_t stripe; 558 int dev; 559 int slot = 0; 560 int last_far_set_start, last_far_set_size; 561 562 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 563 last_far_set_start *= geo->far_set_size; 564 565 last_far_set_size = geo->far_set_size; 566 last_far_set_size += (geo->raid_disks % geo->far_set_size); 567 568 /* now calculate first sector/dev */ 569 chunk = r10bio->sector >> geo->chunk_shift; 570 sector = r10bio->sector & geo->chunk_mask; 571 572 chunk *= geo->near_copies; 573 stripe = chunk; 574 dev = sector_div(stripe, geo->raid_disks); 575 if (geo->far_offset) 576 stripe *= geo->far_copies; 577 578 sector += stripe << geo->chunk_shift; 579 580 /* and calculate all the others */ 581 for (n = 0; n < geo->near_copies; n++) { 582 int d = dev; 583 int set; 584 sector_t s = sector; 585 r10bio->devs[slot].devnum = d; 586 r10bio->devs[slot].addr = s; 587 slot++; 588 589 for (f = 1; f < geo->far_copies; f++) { 590 set = d / geo->far_set_size; 591 d += geo->near_copies; 592 593 if ((geo->raid_disks % geo->far_set_size) && 594 (d > last_far_set_start)) { 595 d -= last_far_set_start; 596 d %= last_far_set_size; 597 d += last_far_set_start; 598 } else { 599 d %= geo->far_set_size; 600 d += geo->far_set_size * set; 601 } 602 s += geo->stride; 603 r10bio->devs[slot].devnum = d; 604 r10bio->devs[slot].addr = s; 605 slot++; 606 } 607 dev++; 608 if (dev >= geo->raid_disks) { 609 dev = 0; 610 sector += (geo->chunk_mask + 1); 611 } 612 } 613 } 614 615 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) 616 { 617 struct geom *geo = &conf->geo; 618 619 if (conf->reshape_progress != MaxSector && 620 ((r10bio->sector >= conf->reshape_progress) != 621 conf->mddev->reshape_backwards)) { 622 set_bit(R10BIO_Previous, &r10bio->state); 623 geo = &conf->prev; 624 } else 625 clear_bit(R10BIO_Previous, &r10bio->state); 626 627 __raid10_find_phys(geo, r10bio); 628 } 629 630 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) 631 { 632 sector_t offset, chunk, vchunk; 633 /* Never use conf->prev as this is only called during resync 634 * or recovery, so reshape isn't happening 635 */ 636 struct geom *geo = &conf->geo; 637 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size; 638 int far_set_size = geo->far_set_size; 639 int last_far_set_start; 640 641 if (geo->raid_disks % geo->far_set_size) { 642 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 643 last_far_set_start *= geo->far_set_size; 644 645 if (dev >= last_far_set_start) { 646 far_set_size = geo->far_set_size; 647 far_set_size += (geo->raid_disks % geo->far_set_size); 648 far_set_start = last_far_set_start; 649 } 650 } 651 652 offset = sector & geo->chunk_mask; 653 if (geo->far_offset) { 654 int fc; 655 chunk = sector >> geo->chunk_shift; 656 fc = sector_div(chunk, geo->far_copies); 657 dev -= fc * geo->near_copies; 658 if (dev < far_set_start) 659 dev += far_set_size; 660 } else { 661 while (sector >= geo->stride) { 662 sector -= geo->stride; 663 if (dev < (geo->near_copies + far_set_start)) 664 dev += far_set_size - geo->near_copies; 665 else 666 dev -= geo->near_copies; 667 } 668 chunk = sector >> geo->chunk_shift; 669 } 670 vchunk = chunk * geo->raid_disks + dev; 671 sector_div(vchunk, geo->near_copies); 672 return (vchunk << geo->chunk_shift) + offset; 673 } 674 675 /** 676 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged 677 * @q: request queue 678 * @bvm: properties of new bio 679 * @biovec: the request that could be merged to it. 680 * 681 * Return amount of bytes we can accept at this offset 682 * This requires checking for end-of-chunk if near_copies != raid_disks, 683 * and for subordinate merge_bvec_fns if merge_check_needed. 684 */ 685 static int raid10_mergeable_bvec(struct request_queue *q, 686 struct bvec_merge_data *bvm, 687 struct bio_vec *biovec) 688 { 689 struct mddev *mddev = q->queuedata; 690 struct r10conf *conf = mddev->private; 691 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 692 int max; 693 unsigned int chunk_sectors; 694 unsigned int bio_sectors = bvm->bi_size >> 9; 695 struct geom *geo = &conf->geo; 696 697 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1; 698 if (conf->reshape_progress != MaxSector && 699 ((sector >= conf->reshape_progress) != 700 conf->mddev->reshape_backwards)) 701 geo = &conf->prev; 702 703 if (geo->near_copies < geo->raid_disks) { 704 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) 705 + bio_sectors)) << 9; 706 if (max < 0) 707 /* bio_add cannot handle a negative return */ 708 max = 0; 709 if (max <= biovec->bv_len && bio_sectors == 0) 710 return biovec->bv_len; 711 } else 712 max = biovec->bv_len; 713 714 if (mddev->merge_check_needed) { 715 struct { 716 struct r10bio r10_bio; 717 struct r10dev devs[conf->copies]; 718 } on_stack; 719 struct r10bio *r10_bio = &on_stack.r10_bio; 720 int s; 721 if (conf->reshape_progress != MaxSector) { 722 /* Cannot give any guidance during reshape */ 723 if (max <= biovec->bv_len && bio_sectors == 0) 724 return biovec->bv_len; 725 return 0; 726 } 727 r10_bio->sector = sector; 728 raid10_find_phys(conf, r10_bio); 729 rcu_read_lock(); 730 for (s = 0; s < conf->copies; s++) { 731 int disk = r10_bio->devs[s].devnum; 732 struct md_rdev *rdev = rcu_dereference( 733 conf->mirrors[disk].rdev); 734 if (rdev && !test_bit(Faulty, &rdev->flags)) { 735 struct request_queue *q = 736 bdev_get_queue(rdev->bdev); 737 if (q->merge_bvec_fn) { 738 bvm->bi_sector = r10_bio->devs[s].addr 739 + rdev->data_offset; 740 bvm->bi_bdev = rdev->bdev; 741 max = min(max, q->merge_bvec_fn( 742 q, bvm, biovec)); 743 } 744 } 745 rdev = rcu_dereference(conf->mirrors[disk].replacement); 746 if (rdev && !test_bit(Faulty, &rdev->flags)) { 747 struct request_queue *q = 748 bdev_get_queue(rdev->bdev); 749 if (q->merge_bvec_fn) { 750 bvm->bi_sector = r10_bio->devs[s].addr 751 + rdev->data_offset; 752 bvm->bi_bdev = rdev->bdev; 753 max = min(max, q->merge_bvec_fn( 754 q, bvm, biovec)); 755 } 756 } 757 } 758 rcu_read_unlock(); 759 } 760 return max; 761 } 762 763 /* 764 * This routine returns the disk from which the requested read should 765 * be done. There is a per-array 'next expected sequential IO' sector 766 * number - if this matches on the next IO then we use the last disk. 767 * There is also a per-disk 'last know head position' sector that is 768 * maintained from IRQ contexts, both the normal and the resync IO 769 * completion handlers update this position correctly. If there is no 770 * perfect sequential match then we pick the disk whose head is closest. 771 * 772 * If there are 2 mirrors in the same 2 devices, performance degrades 773 * because position is mirror, not device based. 774 * 775 * The rdev for the device selected will have nr_pending incremented. 776 */ 777 778 /* 779 * FIXME: possibly should rethink readbalancing and do it differently 780 * depending on near_copies / far_copies geometry. 781 */ 782 static struct md_rdev *read_balance(struct r10conf *conf, 783 struct r10bio *r10_bio, 784 int *max_sectors) 785 { 786 const sector_t this_sector = r10_bio->sector; 787 int disk, slot; 788 int sectors = r10_bio->sectors; 789 int best_good_sectors; 790 sector_t new_distance, best_dist; 791 struct md_rdev *best_rdev, *rdev = NULL; 792 int do_balance; 793 int best_slot; 794 struct geom *geo = &conf->geo; 795 796 raid10_find_phys(conf, r10_bio); 797 rcu_read_lock(); 798 retry: 799 sectors = r10_bio->sectors; 800 best_slot = -1; 801 best_rdev = NULL; 802 best_dist = MaxSector; 803 best_good_sectors = 0; 804 do_balance = 1; 805 /* 806 * Check if we can balance. We can balance on the whole 807 * device if no resync is going on (recovery is ok), or below 808 * the resync window. We take the first readable disk when 809 * above the resync window. 810 */ 811 if (conf->mddev->recovery_cp < MaxSector 812 && (this_sector + sectors >= conf->next_resync)) 813 do_balance = 0; 814 815 for (slot = 0; slot < conf->copies ; slot++) { 816 sector_t first_bad; 817 int bad_sectors; 818 sector_t dev_sector; 819 820 if (r10_bio->devs[slot].bio == IO_BLOCKED) 821 continue; 822 disk = r10_bio->devs[slot].devnum; 823 rdev = rcu_dereference(conf->mirrors[disk].replacement); 824 if (rdev == NULL || test_bit(Faulty, &rdev->flags) || 825 test_bit(Unmerged, &rdev->flags) || 826 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 827 rdev = rcu_dereference(conf->mirrors[disk].rdev); 828 if (rdev == NULL || 829 test_bit(Faulty, &rdev->flags) || 830 test_bit(Unmerged, &rdev->flags)) 831 continue; 832 if (!test_bit(In_sync, &rdev->flags) && 833 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 834 continue; 835 836 dev_sector = r10_bio->devs[slot].addr; 837 if (is_badblock(rdev, dev_sector, sectors, 838 &first_bad, &bad_sectors)) { 839 if (best_dist < MaxSector) 840 /* Already have a better slot */ 841 continue; 842 if (first_bad <= dev_sector) { 843 /* Cannot read here. If this is the 844 * 'primary' device, then we must not read 845 * beyond 'bad_sectors' from another device. 846 */ 847 bad_sectors -= (dev_sector - first_bad); 848 if (!do_balance && sectors > bad_sectors) 849 sectors = bad_sectors; 850 if (best_good_sectors > sectors) 851 best_good_sectors = sectors; 852 } else { 853 sector_t good_sectors = 854 first_bad - dev_sector; 855 if (good_sectors > best_good_sectors) { 856 best_good_sectors = good_sectors; 857 best_slot = slot; 858 best_rdev = rdev; 859 } 860 if (!do_balance) 861 /* Must read from here */ 862 break; 863 } 864 continue; 865 } else 866 best_good_sectors = sectors; 867 868 if (!do_balance) 869 break; 870 871 /* This optimisation is debatable, and completely destroys 872 * sequential read speed for 'far copies' arrays. So only 873 * keep it for 'near' arrays, and review those later. 874 */ 875 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) 876 break; 877 878 /* for far > 1 always use the lowest address */ 879 if (geo->far_copies > 1) 880 new_distance = r10_bio->devs[slot].addr; 881 else 882 new_distance = abs(r10_bio->devs[slot].addr - 883 conf->mirrors[disk].head_position); 884 if (new_distance < best_dist) { 885 best_dist = new_distance; 886 best_slot = slot; 887 best_rdev = rdev; 888 } 889 } 890 if (slot >= conf->copies) { 891 slot = best_slot; 892 rdev = best_rdev; 893 } 894 895 if (slot >= 0) { 896 atomic_inc(&rdev->nr_pending); 897 if (test_bit(Faulty, &rdev->flags)) { 898 /* Cannot risk returning a device that failed 899 * before we inc'ed nr_pending 900 */ 901 rdev_dec_pending(rdev, conf->mddev); 902 goto retry; 903 } 904 r10_bio->read_slot = slot; 905 } else 906 rdev = NULL; 907 rcu_read_unlock(); 908 *max_sectors = best_good_sectors; 909 910 return rdev; 911 } 912 913 int md_raid10_congested(struct mddev *mddev, int bits) 914 { 915 struct r10conf *conf = mddev->private; 916 int i, ret = 0; 917 918 if ((bits & (1 << BDI_async_congested)) && 919 conf->pending_count >= max_queued_requests) 920 return 1; 921 922 rcu_read_lock(); 923 for (i = 0; 924 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) 925 && ret == 0; 926 i++) { 927 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 928 if (rdev && !test_bit(Faulty, &rdev->flags)) { 929 struct request_queue *q = bdev_get_queue(rdev->bdev); 930 931 ret |= bdi_congested(&q->backing_dev_info, bits); 932 } 933 } 934 rcu_read_unlock(); 935 return ret; 936 } 937 EXPORT_SYMBOL_GPL(md_raid10_congested); 938 939 static int raid10_congested(void *data, int bits) 940 { 941 struct mddev *mddev = data; 942 943 return mddev_congested(mddev, bits) || 944 md_raid10_congested(mddev, bits); 945 } 946 947 static void flush_pending_writes(struct r10conf *conf) 948 { 949 /* Any writes that have been queued but are awaiting 950 * bitmap updates get flushed here. 951 */ 952 spin_lock_irq(&conf->device_lock); 953 954 if (conf->pending_bio_list.head) { 955 struct bio *bio; 956 bio = bio_list_get(&conf->pending_bio_list); 957 conf->pending_count = 0; 958 spin_unlock_irq(&conf->device_lock); 959 /* flush any pending bitmap writes to disk 960 * before proceeding w/ I/O */ 961 bitmap_unplug(conf->mddev->bitmap); 962 wake_up(&conf->wait_barrier); 963 964 while (bio) { /* submit pending writes */ 965 struct bio *next = bio->bi_next; 966 bio->bi_next = NULL; 967 if (unlikely((bio->bi_rw & REQ_DISCARD) && 968 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 969 /* Just ignore it */ 970 bio_endio(bio, 0); 971 else 972 generic_make_request(bio); 973 bio = next; 974 } 975 } else 976 spin_unlock_irq(&conf->device_lock); 977 } 978 979 /* Barriers.... 980 * Sometimes we need to suspend IO while we do something else, 981 * either some resync/recovery, or reconfigure the array. 982 * To do this we raise a 'barrier'. 983 * The 'barrier' is a counter that can be raised multiple times 984 * to count how many activities are happening which preclude 985 * normal IO. 986 * We can only raise the barrier if there is no pending IO. 987 * i.e. if nr_pending == 0. 988 * We choose only to raise the barrier if no-one is waiting for the 989 * barrier to go down. This means that as soon as an IO request 990 * is ready, no other operations which require a barrier will start 991 * until the IO request has had a chance. 992 * 993 * So: regular IO calls 'wait_barrier'. When that returns there 994 * is no backgroup IO happening, It must arrange to call 995 * allow_barrier when it has finished its IO. 996 * backgroup IO calls must call raise_barrier. Once that returns 997 * there is no normal IO happeing. It must arrange to call 998 * lower_barrier when the particular background IO completes. 999 */ 1000 1001 static void raise_barrier(struct r10conf *conf, int force) 1002 { 1003 BUG_ON(force && !conf->barrier); 1004 spin_lock_irq(&conf->resync_lock); 1005 1006 /* Wait until no block IO is waiting (unless 'force') */ 1007 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 1008 conf->resync_lock); 1009 1010 /* block any new IO from starting */ 1011 conf->barrier++; 1012 1013 /* Now wait for all pending IO to complete */ 1014 wait_event_lock_irq(conf->wait_barrier, 1015 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 1016 conf->resync_lock); 1017 1018 spin_unlock_irq(&conf->resync_lock); 1019 } 1020 1021 static void lower_barrier(struct r10conf *conf) 1022 { 1023 unsigned long flags; 1024 spin_lock_irqsave(&conf->resync_lock, flags); 1025 conf->barrier--; 1026 spin_unlock_irqrestore(&conf->resync_lock, flags); 1027 wake_up(&conf->wait_barrier); 1028 } 1029 1030 static void wait_barrier(struct r10conf *conf) 1031 { 1032 spin_lock_irq(&conf->resync_lock); 1033 if (conf->barrier) { 1034 conf->nr_waiting++; 1035 /* Wait for the barrier to drop. 1036 * However if there are already pending 1037 * requests (preventing the barrier from 1038 * rising completely), and the 1039 * pre-process bio queue isn't empty, 1040 * then don't wait, as we need to empty 1041 * that queue to get the nr_pending 1042 * count down. 1043 */ 1044 wait_event_lock_irq(conf->wait_barrier, 1045 !conf->barrier || 1046 (conf->nr_pending && 1047 current->bio_list && 1048 !bio_list_empty(current->bio_list)), 1049 conf->resync_lock); 1050 conf->nr_waiting--; 1051 } 1052 conf->nr_pending++; 1053 spin_unlock_irq(&conf->resync_lock); 1054 } 1055 1056 static void allow_barrier(struct r10conf *conf) 1057 { 1058 unsigned long flags; 1059 spin_lock_irqsave(&conf->resync_lock, flags); 1060 conf->nr_pending--; 1061 spin_unlock_irqrestore(&conf->resync_lock, flags); 1062 wake_up(&conf->wait_barrier); 1063 } 1064 1065 static void freeze_array(struct r10conf *conf, int extra) 1066 { 1067 /* stop syncio and normal IO and wait for everything to 1068 * go quiet. 1069 * We increment barrier and nr_waiting, and then 1070 * wait until nr_pending match nr_queued+extra 1071 * This is called in the context of one normal IO request 1072 * that has failed. Thus any sync request that might be pending 1073 * will be blocked by nr_pending, and we need to wait for 1074 * pending IO requests to complete or be queued for re-try. 1075 * Thus the number queued (nr_queued) plus this request (extra) 1076 * must match the number of pending IOs (nr_pending) before 1077 * we continue. 1078 */ 1079 spin_lock_irq(&conf->resync_lock); 1080 conf->barrier++; 1081 conf->nr_waiting++; 1082 wait_event_lock_irq_cmd(conf->wait_barrier, 1083 conf->nr_pending == conf->nr_queued+extra, 1084 conf->resync_lock, 1085 flush_pending_writes(conf)); 1086 1087 spin_unlock_irq(&conf->resync_lock); 1088 } 1089 1090 static void unfreeze_array(struct r10conf *conf) 1091 { 1092 /* reverse the effect of the freeze */ 1093 spin_lock_irq(&conf->resync_lock); 1094 conf->barrier--; 1095 conf->nr_waiting--; 1096 wake_up(&conf->wait_barrier); 1097 spin_unlock_irq(&conf->resync_lock); 1098 } 1099 1100 static sector_t choose_data_offset(struct r10bio *r10_bio, 1101 struct md_rdev *rdev) 1102 { 1103 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || 1104 test_bit(R10BIO_Previous, &r10_bio->state)) 1105 return rdev->data_offset; 1106 else 1107 return rdev->new_data_offset; 1108 } 1109 1110 struct raid10_plug_cb { 1111 struct blk_plug_cb cb; 1112 struct bio_list pending; 1113 int pending_cnt; 1114 }; 1115 1116 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) 1117 { 1118 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb, 1119 cb); 1120 struct mddev *mddev = plug->cb.data; 1121 struct r10conf *conf = mddev->private; 1122 struct bio *bio; 1123 1124 if (from_schedule || current->bio_list) { 1125 spin_lock_irq(&conf->device_lock); 1126 bio_list_merge(&conf->pending_bio_list, &plug->pending); 1127 conf->pending_count += plug->pending_cnt; 1128 spin_unlock_irq(&conf->device_lock); 1129 wake_up(&conf->wait_barrier); 1130 md_wakeup_thread(mddev->thread); 1131 kfree(plug); 1132 return; 1133 } 1134 1135 /* we aren't scheduling, so we can do the write-out directly. */ 1136 bio = bio_list_get(&plug->pending); 1137 bitmap_unplug(mddev->bitmap); 1138 wake_up(&conf->wait_barrier); 1139 1140 while (bio) { /* submit pending writes */ 1141 struct bio *next = bio->bi_next; 1142 bio->bi_next = NULL; 1143 if (unlikely((bio->bi_rw & REQ_DISCARD) && 1144 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1145 /* Just ignore it */ 1146 bio_endio(bio, 0); 1147 else 1148 generic_make_request(bio); 1149 bio = next; 1150 } 1151 kfree(plug); 1152 } 1153 1154 static void __make_request(struct mddev *mddev, struct bio *bio) 1155 { 1156 struct r10conf *conf = mddev->private; 1157 struct r10bio *r10_bio; 1158 struct bio *read_bio; 1159 int i; 1160 const int rw = bio_data_dir(bio); 1161 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1162 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1163 const unsigned long do_discard = (bio->bi_rw 1164 & (REQ_DISCARD | REQ_SECURE)); 1165 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); 1166 unsigned long flags; 1167 struct md_rdev *blocked_rdev; 1168 struct blk_plug_cb *cb; 1169 struct raid10_plug_cb *plug = NULL; 1170 int sectors_handled; 1171 int max_sectors; 1172 int sectors; 1173 1174 /* 1175 * Register the new request and wait if the reconstruction 1176 * thread has put up a bar for new requests. 1177 * Continue immediately if no resync is active currently. 1178 */ 1179 wait_barrier(conf); 1180 1181 sectors = bio_sectors(bio); 1182 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1183 bio->bi_iter.bi_sector < conf->reshape_progress && 1184 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1185 /* IO spans the reshape position. Need to wait for 1186 * reshape to pass 1187 */ 1188 allow_barrier(conf); 1189 wait_event(conf->wait_barrier, 1190 conf->reshape_progress <= bio->bi_iter.bi_sector || 1191 conf->reshape_progress >= bio->bi_iter.bi_sector + 1192 sectors); 1193 wait_barrier(conf); 1194 } 1195 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1196 bio_data_dir(bio) == WRITE && 1197 (mddev->reshape_backwards 1198 ? (bio->bi_iter.bi_sector < conf->reshape_safe && 1199 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) 1200 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && 1201 bio->bi_iter.bi_sector < conf->reshape_progress))) { 1202 /* Need to update reshape_position in metadata */ 1203 mddev->reshape_position = conf->reshape_progress; 1204 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1205 set_bit(MD_CHANGE_PENDING, &mddev->flags); 1206 md_wakeup_thread(mddev->thread); 1207 wait_event(mddev->sb_wait, 1208 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 1209 1210 conf->reshape_safe = mddev->reshape_position; 1211 } 1212 1213 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1214 1215 r10_bio->master_bio = bio; 1216 r10_bio->sectors = sectors; 1217 1218 r10_bio->mddev = mddev; 1219 r10_bio->sector = bio->bi_iter.bi_sector; 1220 r10_bio->state = 0; 1221 1222 /* We might need to issue multiple reads to different 1223 * devices if there are bad blocks around, so we keep 1224 * track of the number of reads in bio->bi_phys_segments. 1225 * If this is 0, there is only one r10_bio and no locking 1226 * will be needed when the request completes. If it is 1227 * non-zero, then it is the number of not-completed requests. 1228 */ 1229 bio->bi_phys_segments = 0; 1230 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 1231 1232 if (rw == READ) { 1233 /* 1234 * read balancing logic: 1235 */ 1236 struct md_rdev *rdev; 1237 int slot; 1238 1239 read_again: 1240 rdev = read_balance(conf, r10_bio, &max_sectors); 1241 if (!rdev) { 1242 raid_end_bio_io(r10_bio); 1243 return; 1244 } 1245 slot = r10_bio->read_slot; 1246 1247 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1248 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, 1249 max_sectors); 1250 1251 r10_bio->devs[slot].bio = read_bio; 1252 r10_bio->devs[slot].rdev = rdev; 1253 1254 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + 1255 choose_data_offset(r10_bio, rdev); 1256 read_bio->bi_bdev = rdev->bdev; 1257 read_bio->bi_end_io = raid10_end_read_request; 1258 read_bio->bi_rw = READ | do_sync; 1259 read_bio->bi_private = r10_bio; 1260 1261 if (max_sectors < r10_bio->sectors) { 1262 /* Could not read all from this device, so we will 1263 * need another r10_bio. 1264 */ 1265 sectors_handled = (r10_bio->sector + max_sectors 1266 - bio->bi_iter.bi_sector); 1267 r10_bio->sectors = max_sectors; 1268 spin_lock_irq(&conf->device_lock); 1269 if (bio->bi_phys_segments == 0) 1270 bio->bi_phys_segments = 2; 1271 else 1272 bio->bi_phys_segments++; 1273 spin_unlock_irq(&conf->device_lock); 1274 /* Cannot call generic_make_request directly 1275 * as that will be queued in __generic_make_request 1276 * and subsequent mempool_alloc might block 1277 * waiting for it. so hand bio over to raid10d. 1278 */ 1279 reschedule_retry(r10_bio); 1280 1281 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1282 1283 r10_bio->master_bio = bio; 1284 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1285 r10_bio->state = 0; 1286 r10_bio->mddev = mddev; 1287 r10_bio->sector = bio->bi_iter.bi_sector + 1288 sectors_handled; 1289 goto read_again; 1290 } else 1291 generic_make_request(read_bio); 1292 return; 1293 } 1294 1295 /* 1296 * WRITE: 1297 */ 1298 if (conf->pending_count >= max_queued_requests) { 1299 md_wakeup_thread(mddev->thread); 1300 wait_event(conf->wait_barrier, 1301 conf->pending_count < max_queued_requests); 1302 } 1303 /* first select target devices under rcu_lock and 1304 * inc refcount on their rdev. Record them by setting 1305 * bios[x] to bio 1306 * If there are known/acknowledged bad blocks on any device 1307 * on which we have seen a write error, we want to avoid 1308 * writing to those blocks. This potentially requires several 1309 * writes to write around the bad blocks. Each set of writes 1310 * gets its own r10_bio with a set of bios attached. The number 1311 * of r10_bios is recored in bio->bi_phys_segments just as with 1312 * the read case. 1313 */ 1314 1315 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ 1316 raid10_find_phys(conf, r10_bio); 1317 retry_write: 1318 blocked_rdev = NULL; 1319 rcu_read_lock(); 1320 max_sectors = r10_bio->sectors; 1321 1322 for (i = 0; i < conf->copies; i++) { 1323 int d = r10_bio->devs[i].devnum; 1324 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 1325 struct md_rdev *rrdev = rcu_dereference( 1326 conf->mirrors[d].replacement); 1327 if (rdev == rrdev) 1328 rrdev = NULL; 1329 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1330 atomic_inc(&rdev->nr_pending); 1331 blocked_rdev = rdev; 1332 break; 1333 } 1334 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) { 1335 atomic_inc(&rrdev->nr_pending); 1336 blocked_rdev = rrdev; 1337 break; 1338 } 1339 if (rdev && (test_bit(Faulty, &rdev->flags) 1340 || test_bit(Unmerged, &rdev->flags))) 1341 rdev = NULL; 1342 if (rrdev && (test_bit(Faulty, &rrdev->flags) 1343 || test_bit(Unmerged, &rrdev->flags))) 1344 rrdev = NULL; 1345 1346 r10_bio->devs[i].bio = NULL; 1347 r10_bio->devs[i].repl_bio = NULL; 1348 1349 if (!rdev && !rrdev) { 1350 set_bit(R10BIO_Degraded, &r10_bio->state); 1351 continue; 1352 } 1353 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { 1354 sector_t first_bad; 1355 sector_t dev_sector = r10_bio->devs[i].addr; 1356 int bad_sectors; 1357 int is_bad; 1358 1359 is_bad = is_badblock(rdev, dev_sector, 1360 max_sectors, 1361 &first_bad, &bad_sectors); 1362 if (is_bad < 0) { 1363 /* Mustn't write here until the bad block 1364 * is acknowledged 1365 */ 1366 atomic_inc(&rdev->nr_pending); 1367 set_bit(BlockedBadBlocks, &rdev->flags); 1368 blocked_rdev = rdev; 1369 break; 1370 } 1371 if (is_bad && first_bad <= dev_sector) { 1372 /* Cannot write here at all */ 1373 bad_sectors -= (dev_sector - first_bad); 1374 if (bad_sectors < max_sectors) 1375 /* Mustn't write more than bad_sectors 1376 * to other devices yet 1377 */ 1378 max_sectors = bad_sectors; 1379 /* We don't set R10BIO_Degraded as that 1380 * only applies if the disk is missing, 1381 * so it might be re-added, and we want to 1382 * know to recover this chunk. 1383 * In this case the device is here, and the 1384 * fact that this chunk is not in-sync is 1385 * recorded in the bad block log. 1386 */ 1387 continue; 1388 } 1389 if (is_bad) { 1390 int good_sectors = first_bad - dev_sector; 1391 if (good_sectors < max_sectors) 1392 max_sectors = good_sectors; 1393 } 1394 } 1395 if (rdev) { 1396 r10_bio->devs[i].bio = bio; 1397 atomic_inc(&rdev->nr_pending); 1398 } 1399 if (rrdev) { 1400 r10_bio->devs[i].repl_bio = bio; 1401 atomic_inc(&rrdev->nr_pending); 1402 } 1403 } 1404 rcu_read_unlock(); 1405 1406 if (unlikely(blocked_rdev)) { 1407 /* Have to wait for this device to get unblocked, then retry */ 1408 int j; 1409 int d; 1410 1411 for (j = 0; j < i; j++) { 1412 if (r10_bio->devs[j].bio) { 1413 d = r10_bio->devs[j].devnum; 1414 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1415 } 1416 if (r10_bio->devs[j].repl_bio) { 1417 struct md_rdev *rdev; 1418 d = r10_bio->devs[j].devnum; 1419 rdev = conf->mirrors[d].replacement; 1420 if (!rdev) { 1421 /* Race with remove_disk */ 1422 smp_mb(); 1423 rdev = conf->mirrors[d].rdev; 1424 } 1425 rdev_dec_pending(rdev, mddev); 1426 } 1427 } 1428 allow_barrier(conf); 1429 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1430 wait_barrier(conf); 1431 goto retry_write; 1432 } 1433 1434 if (max_sectors < r10_bio->sectors) { 1435 /* We are splitting this into multiple parts, so 1436 * we need to prepare for allocating another r10_bio. 1437 */ 1438 r10_bio->sectors = max_sectors; 1439 spin_lock_irq(&conf->device_lock); 1440 if (bio->bi_phys_segments == 0) 1441 bio->bi_phys_segments = 2; 1442 else 1443 bio->bi_phys_segments++; 1444 spin_unlock_irq(&conf->device_lock); 1445 } 1446 sectors_handled = r10_bio->sector + max_sectors - 1447 bio->bi_iter.bi_sector; 1448 1449 atomic_set(&r10_bio->remaining, 1); 1450 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1451 1452 for (i = 0; i < conf->copies; i++) { 1453 struct bio *mbio; 1454 int d = r10_bio->devs[i].devnum; 1455 if (r10_bio->devs[i].bio) { 1456 struct md_rdev *rdev = conf->mirrors[d].rdev; 1457 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1458 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, 1459 max_sectors); 1460 r10_bio->devs[i].bio = mbio; 1461 1462 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 1463 choose_data_offset(r10_bio, 1464 rdev)); 1465 mbio->bi_bdev = rdev->bdev; 1466 mbio->bi_end_io = raid10_end_write_request; 1467 mbio->bi_rw = 1468 WRITE | do_sync | do_fua | do_discard | do_same; 1469 mbio->bi_private = r10_bio; 1470 1471 atomic_inc(&r10_bio->remaining); 1472 1473 cb = blk_check_plugged(raid10_unplug, mddev, 1474 sizeof(*plug)); 1475 if (cb) 1476 plug = container_of(cb, struct raid10_plug_cb, 1477 cb); 1478 else 1479 plug = NULL; 1480 spin_lock_irqsave(&conf->device_lock, flags); 1481 if (plug) { 1482 bio_list_add(&plug->pending, mbio); 1483 plug->pending_cnt++; 1484 } else { 1485 bio_list_add(&conf->pending_bio_list, mbio); 1486 conf->pending_count++; 1487 } 1488 spin_unlock_irqrestore(&conf->device_lock, flags); 1489 if (!plug) 1490 md_wakeup_thread(mddev->thread); 1491 } 1492 1493 if (r10_bio->devs[i].repl_bio) { 1494 struct md_rdev *rdev = conf->mirrors[d].replacement; 1495 if (rdev == NULL) { 1496 /* Replacement just got moved to main 'rdev' */ 1497 smp_mb(); 1498 rdev = conf->mirrors[d].rdev; 1499 } 1500 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1501 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, 1502 max_sectors); 1503 r10_bio->devs[i].repl_bio = mbio; 1504 1505 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + 1506 choose_data_offset( 1507 r10_bio, rdev)); 1508 mbio->bi_bdev = rdev->bdev; 1509 mbio->bi_end_io = raid10_end_write_request; 1510 mbio->bi_rw = 1511 WRITE | do_sync | do_fua | do_discard | do_same; 1512 mbio->bi_private = r10_bio; 1513 1514 atomic_inc(&r10_bio->remaining); 1515 spin_lock_irqsave(&conf->device_lock, flags); 1516 bio_list_add(&conf->pending_bio_list, mbio); 1517 conf->pending_count++; 1518 spin_unlock_irqrestore(&conf->device_lock, flags); 1519 if (!mddev_check_plugged(mddev)) 1520 md_wakeup_thread(mddev->thread); 1521 } 1522 } 1523 1524 /* Don't remove the bias on 'remaining' (one_write_done) until 1525 * after checking if we need to go around again. 1526 */ 1527 1528 if (sectors_handled < bio_sectors(bio)) { 1529 one_write_done(r10_bio); 1530 /* We need another r10_bio. It has already been counted 1531 * in bio->bi_phys_segments. 1532 */ 1533 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1534 1535 r10_bio->master_bio = bio; 1536 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1537 1538 r10_bio->mddev = mddev; 1539 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; 1540 r10_bio->state = 0; 1541 goto retry_write; 1542 } 1543 one_write_done(r10_bio); 1544 } 1545 1546 static void make_request(struct mddev *mddev, struct bio *bio) 1547 { 1548 struct r10conf *conf = mddev->private; 1549 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1550 int chunk_sects = chunk_mask + 1; 1551 1552 struct bio *split; 1553 1554 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 1555 md_flush_request(mddev, bio); 1556 return; 1557 } 1558 1559 md_write_start(mddev, bio); 1560 1561 do { 1562 1563 /* 1564 * If this request crosses a chunk boundary, we need to split 1565 * it. 1566 */ 1567 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + 1568 bio_sectors(bio) > chunk_sects 1569 && (conf->geo.near_copies < conf->geo.raid_disks 1570 || conf->prev.near_copies < 1571 conf->prev.raid_disks))) { 1572 split = bio_split(bio, chunk_sects - 1573 (bio->bi_iter.bi_sector & 1574 (chunk_sects - 1)), 1575 GFP_NOIO, fs_bio_set); 1576 bio_chain(split, bio); 1577 } else { 1578 split = bio; 1579 } 1580 1581 __make_request(mddev, split); 1582 } while (split != bio); 1583 1584 /* In case raid10d snuck in to freeze_array */ 1585 wake_up(&conf->wait_barrier); 1586 } 1587 1588 static void status(struct seq_file *seq, struct mddev *mddev) 1589 { 1590 struct r10conf *conf = mddev->private; 1591 int i; 1592 1593 if (conf->geo.near_copies < conf->geo.raid_disks) 1594 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); 1595 if (conf->geo.near_copies > 1) 1596 seq_printf(seq, " %d near-copies", conf->geo.near_copies); 1597 if (conf->geo.far_copies > 1) { 1598 if (conf->geo.far_offset) 1599 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); 1600 else 1601 seq_printf(seq, " %d far-copies", conf->geo.far_copies); 1602 } 1603 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, 1604 conf->geo.raid_disks - mddev->degraded); 1605 for (i = 0; i < conf->geo.raid_disks; i++) 1606 seq_printf(seq, "%s", 1607 conf->mirrors[i].rdev && 1608 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); 1609 seq_printf(seq, "]"); 1610 } 1611 1612 /* check if there are enough drives for 1613 * every block to appear on atleast one. 1614 * Don't consider the device numbered 'ignore' 1615 * as we might be about to remove it. 1616 */ 1617 static int _enough(struct r10conf *conf, int previous, int ignore) 1618 { 1619 int first = 0; 1620 int has_enough = 0; 1621 int disks, ncopies; 1622 if (previous) { 1623 disks = conf->prev.raid_disks; 1624 ncopies = conf->prev.near_copies; 1625 } else { 1626 disks = conf->geo.raid_disks; 1627 ncopies = conf->geo.near_copies; 1628 } 1629 1630 rcu_read_lock(); 1631 do { 1632 int n = conf->copies; 1633 int cnt = 0; 1634 int this = first; 1635 while (n--) { 1636 struct md_rdev *rdev; 1637 if (this != ignore && 1638 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && 1639 test_bit(In_sync, &rdev->flags)) 1640 cnt++; 1641 this = (this+1) % disks; 1642 } 1643 if (cnt == 0) 1644 goto out; 1645 first = (first + ncopies) % disks; 1646 } while (first != 0); 1647 has_enough = 1; 1648 out: 1649 rcu_read_unlock(); 1650 return has_enough; 1651 } 1652 1653 static int enough(struct r10conf *conf, int ignore) 1654 { 1655 /* when calling 'enough', both 'prev' and 'geo' must 1656 * be stable. 1657 * This is ensured if ->reconfig_mutex or ->device_lock 1658 * is held. 1659 */ 1660 return _enough(conf, 0, ignore) && 1661 _enough(conf, 1, ignore); 1662 } 1663 1664 static void error(struct mddev *mddev, struct md_rdev *rdev) 1665 { 1666 char b[BDEVNAME_SIZE]; 1667 struct r10conf *conf = mddev->private; 1668 unsigned long flags; 1669 1670 /* 1671 * If it is not operational, then we have already marked it as dead 1672 * else if it is the last working disks, ignore the error, let the 1673 * next level up know. 1674 * else mark the drive as failed 1675 */ 1676 spin_lock_irqsave(&conf->device_lock, flags); 1677 if (test_bit(In_sync, &rdev->flags) 1678 && !enough(conf, rdev->raid_disk)) { 1679 /* 1680 * Don't fail the drive, just return an IO error. 1681 */ 1682 spin_unlock_irqrestore(&conf->device_lock, flags); 1683 return; 1684 } 1685 if (test_and_clear_bit(In_sync, &rdev->flags)) 1686 mddev->degraded++; 1687 /* 1688 * If recovery is running, make sure it aborts. 1689 */ 1690 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1691 set_bit(Blocked, &rdev->flags); 1692 set_bit(Faulty, &rdev->flags); 1693 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1694 spin_unlock_irqrestore(&conf->device_lock, flags); 1695 printk(KERN_ALERT 1696 "md/raid10:%s: Disk failure on %s, disabling device.\n" 1697 "md/raid10:%s: Operation continuing on %d devices.\n", 1698 mdname(mddev), bdevname(rdev->bdev, b), 1699 mdname(mddev), conf->geo.raid_disks - mddev->degraded); 1700 } 1701 1702 static void print_conf(struct r10conf *conf) 1703 { 1704 int i; 1705 struct raid10_info *tmp; 1706 1707 printk(KERN_DEBUG "RAID10 conf printout:\n"); 1708 if (!conf) { 1709 printk(KERN_DEBUG "(!conf)\n"); 1710 return; 1711 } 1712 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, 1713 conf->geo.raid_disks); 1714 1715 for (i = 0; i < conf->geo.raid_disks; i++) { 1716 char b[BDEVNAME_SIZE]; 1717 tmp = conf->mirrors + i; 1718 if (tmp->rdev) 1719 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", 1720 i, !test_bit(In_sync, &tmp->rdev->flags), 1721 !test_bit(Faulty, &tmp->rdev->flags), 1722 bdevname(tmp->rdev->bdev,b)); 1723 } 1724 } 1725 1726 static void close_sync(struct r10conf *conf) 1727 { 1728 wait_barrier(conf); 1729 allow_barrier(conf); 1730 1731 mempool_destroy(conf->r10buf_pool); 1732 conf->r10buf_pool = NULL; 1733 } 1734 1735 static int raid10_spare_active(struct mddev *mddev) 1736 { 1737 int i; 1738 struct r10conf *conf = mddev->private; 1739 struct raid10_info *tmp; 1740 int count = 0; 1741 unsigned long flags; 1742 1743 /* 1744 * Find all non-in_sync disks within the RAID10 configuration 1745 * and mark them in_sync 1746 */ 1747 for (i = 0; i < conf->geo.raid_disks; i++) { 1748 tmp = conf->mirrors + i; 1749 if (tmp->replacement 1750 && tmp->replacement->recovery_offset == MaxSector 1751 && !test_bit(Faulty, &tmp->replacement->flags) 1752 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 1753 /* Replacement has just become active */ 1754 if (!tmp->rdev 1755 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 1756 count++; 1757 if (tmp->rdev) { 1758 /* Replaced device not technically faulty, 1759 * but we need to be sure it gets removed 1760 * and never re-added. 1761 */ 1762 set_bit(Faulty, &tmp->rdev->flags); 1763 sysfs_notify_dirent_safe( 1764 tmp->rdev->sysfs_state); 1765 } 1766 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1767 } else if (tmp->rdev 1768 && tmp->rdev->recovery_offset == MaxSector 1769 && !test_bit(Faulty, &tmp->rdev->flags) 1770 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1771 count++; 1772 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 1773 } 1774 } 1775 spin_lock_irqsave(&conf->device_lock, flags); 1776 mddev->degraded -= count; 1777 spin_unlock_irqrestore(&conf->device_lock, flags); 1778 1779 print_conf(conf); 1780 return count; 1781 } 1782 1783 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1784 { 1785 struct r10conf *conf = mddev->private; 1786 int err = -EEXIST; 1787 int mirror; 1788 int first = 0; 1789 int last = conf->geo.raid_disks - 1; 1790 struct request_queue *q = bdev_get_queue(rdev->bdev); 1791 1792 if (mddev->recovery_cp < MaxSector) 1793 /* only hot-add to in-sync arrays, as recovery is 1794 * very different from resync 1795 */ 1796 return -EBUSY; 1797 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) 1798 return -EINVAL; 1799 1800 if (rdev->raid_disk >= 0) 1801 first = last = rdev->raid_disk; 1802 1803 if (q->merge_bvec_fn) { 1804 set_bit(Unmerged, &rdev->flags); 1805 mddev->merge_check_needed = 1; 1806 } 1807 1808 if (rdev->saved_raid_disk >= first && 1809 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1810 mirror = rdev->saved_raid_disk; 1811 else 1812 mirror = first; 1813 for ( ; mirror <= last ; mirror++) { 1814 struct raid10_info *p = &conf->mirrors[mirror]; 1815 if (p->recovery_disabled == mddev->recovery_disabled) 1816 continue; 1817 if (p->rdev) { 1818 if (!test_bit(WantReplacement, &p->rdev->flags) || 1819 p->replacement != NULL) 1820 continue; 1821 clear_bit(In_sync, &rdev->flags); 1822 set_bit(Replacement, &rdev->flags); 1823 rdev->raid_disk = mirror; 1824 err = 0; 1825 if (mddev->gendisk) 1826 disk_stack_limits(mddev->gendisk, rdev->bdev, 1827 rdev->data_offset << 9); 1828 conf->fullsync = 1; 1829 rcu_assign_pointer(p->replacement, rdev); 1830 break; 1831 } 1832 1833 if (mddev->gendisk) 1834 disk_stack_limits(mddev->gendisk, rdev->bdev, 1835 rdev->data_offset << 9); 1836 1837 p->head_position = 0; 1838 p->recovery_disabled = mddev->recovery_disabled - 1; 1839 rdev->raid_disk = mirror; 1840 err = 0; 1841 if (rdev->saved_raid_disk != mirror) 1842 conf->fullsync = 1; 1843 rcu_assign_pointer(p->rdev, rdev); 1844 break; 1845 } 1846 if (err == 0 && test_bit(Unmerged, &rdev->flags)) { 1847 /* Some requests might not have seen this new 1848 * merge_bvec_fn. We must wait for them to complete 1849 * before merging the device fully. 1850 * First we make sure any code which has tested 1851 * our function has submitted the request, then 1852 * we wait for all outstanding requests to complete. 1853 */ 1854 synchronize_sched(); 1855 freeze_array(conf, 0); 1856 unfreeze_array(conf); 1857 clear_bit(Unmerged, &rdev->flags); 1858 } 1859 md_integrity_add_rdev(rdev, mddev); 1860 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1861 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1862 1863 print_conf(conf); 1864 return err; 1865 } 1866 1867 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 1868 { 1869 struct r10conf *conf = mddev->private; 1870 int err = 0; 1871 int number = rdev->raid_disk; 1872 struct md_rdev **rdevp; 1873 struct raid10_info *p = conf->mirrors + number; 1874 1875 print_conf(conf); 1876 if (rdev == p->rdev) 1877 rdevp = &p->rdev; 1878 else if (rdev == p->replacement) 1879 rdevp = &p->replacement; 1880 else 1881 return 0; 1882 1883 if (test_bit(In_sync, &rdev->flags) || 1884 atomic_read(&rdev->nr_pending)) { 1885 err = -EBUSY; 1886 goto abort; 1887 } 1888 /* Only remove faulty devices if recovery 1889 * is not possible. 1890 */ 1891 if (!test_bit(Faulty, &rdev->flags) && 1892 mddev->recovery_disabled != p->recovery_disabled && 1893 (!p->replacement || p->replacement == rdev) && 1894 number < conf->geo.raid_disks && 1895 enough(conf, -1)) { 1896 err = -EBUSY; 1897 goto abort; 1898 } 1899 *rdevp = NULL; 1900 synchronize_rcu(); 1901 if (atomic_read(&rdev->nr_pending)) { 1902 /* lost the race, try later */ 1903 err = -EBUSY; 1904 *rdevp = rdev; 1905 goto abort; 1906 } else if (p->replacement) { 1907 /* We must have just cleared 'rdev' */ 1908 p->rdev = p->replacement; 1909 clear_bit(Replacement, &p->replacement->flags); 1910 smp_mb(); /* Make sure other CPUs may see both as identical 1911 * but will never see neither -- if they are careful. 1912 */ 1913 p->replacement = NULL; 1914 clear_bit(WantReplacement, &rdev->flags); 1915 } else 1916 /* We might have just remove the Replacement as faulty 1917 * Clear the flag just in case 1918 */ 1919 clear_bit(WantReplacement, &rdev->flags); 1920 1921 err = md_integrity_register(mddev); 1922 1923 abort: 1924 1925 print_conf(conf); 1926 return err; 1927 } 1928 1929 static void end_sync_read(struct bio *bio, int error) 1930 { 1931 struct r10bio *r10_bio = bio->bi_private; 1932 struct r10conf *conf = r10_bio->mddev->private; 1933 int d; 1934 1935 if (bio == r10_bio->master_bio) { 1936 /* this is a reshape read */ 1937 d = r10_bio->read_slot; /* really the read dev */ 1938 } else 1939 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); 1940 1941 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1942 set_bit(R10BIO_Uptodate, &r10_bio->state); 1943 else 1944 /* The write handler will notice the lack of 1945 * R10BIO_Uptodate and record any errors etc 1946 */ 1947 atomic_add(r10_bio->sectors, 1948 &conf->mirrors[d].rdev->corrected_errors); 1949 1950 /* for reconstruct, we always reschedule after a read. 1951 * for resync, only after all reads 1952 */ 1953 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1954 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1955 atomic_dec_and_test(&r10_bio->remaining)) { 1956 /* we have read all the blocks, 1957 * do the comparison in process context in raid10d 1958 */ 1959 reschedule_retry(r10_bio); 1960 } 1961 } 1962 1963 static void end_sync_request(struct r10bio *r10_bio) 1964 { 1965 struct mddev *mddev = r10_bio->mddev; 1966 1967 while (atomic_dec_and_test(&r10_bio->remaining)) { 1968 if (r10_bio->master_bio == NULL) { 1969 /* the primary of several recovery bios */ 1970 sector_t s = r10_bio->sectors; 1971 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1972 test_bit(R10BIO_WriteError, &r10_bio->state)) 1973 reschedule_retry(r10_bio); 1974 else 1975 put_buf(r10_bio); 1976 md_done_sync(mddev, s, 1); 1977 break; 1978 } else { 1979 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; 1980 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1981 test_bit(R10BIO_WriteError, &r10_bio->state)) 1982 reschedule_retry(r10_bio); 1983 else 1984 put_buf(r10_bio); 1985 r10_bio = r10_bio2; 1986 } 1987 } 1988 } 1989 1990 static void end_sync_write(struct bio *bio, int error) 1991 { 1992 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1993 struct r10bio *r10_bio = bio->bi_private; 1994 struct mddev *mddev = r10_bio->mddev; 1995 struct r10conf *conf = mddev->private; 1996 int d; 1997 sector_t first_bad; 1998 int bad_sectors; 1999 int slot; 2000 int repl; 2001 struct md_rdev *rdev = NULL; 2002 2003 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 2004 if (repl) 2005 rdev = conf->mirrors[d].replacement; 2006 else 2007 rdev = conf->mirrors[d].rdev; 2008 2009 if (!uptodate) { 2010 if (repl) 2011 md_error(mddev, rdev); 2012 else { 2013 set_bit(WriteErrorSeen, &rdev->flags); 2014 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2015 set_bit(MD_RECOVERY_NEEDED, 2016 &rdev->mddev->recovery); 2017 set_bit(R10BIO_WriteError, &r10_bio->state); 2018 } 2019 } else if (is_badblock(rdev, 2020 r10_bio->devs[slot].addr, 2021 r10_bio->sectors, 2022 &first_bad, &bad_sectors)) 2023 set_bit(R10BIO_MadeGood, &r10_bio->state); 2024 2025 rdev_dec_pending(rdev, mddev); 2026 2027 end_sync_request(r10_bio); 2028 } 2029 2030 /* 2031 * Note: sync and recover and handled very differently for raid10 2032 * This code is for resync. 2033 * For resync, we read through virtual addresses and read all blocks. 2034 * If there is any error, we schedule a write. The lowest numbered 2035 * drive is authoritative. 2036 * However requests come for physical address, so we need to map. 2037 * For every physical address there are raid_disks/copies virtual addresses, 2038 * which is always are least one, but is not necessarly an integer. 2039 * This means that a physical address can span multiple chunks, so we may 2040 * have to submit multiple io requests for a single sync request. 2041 */ 2042 /* 2043 * We check if all blocks are in-sync and only write to blocks that 2044 * aren't in sync 2045 */ 2046 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2047 { 2048 struct r10conf *conf = mddev->private; 2049 int i, first; 2050 struct bio *tbio, *fbio; 2051 int vcnt; 2052 2053 atomic_set(&r10_bio->remaining, 1); 2054 2055 /* find the first device with a block */ 2056 for (i=0; i<conf->copies; i++) 2057 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) 2058 break; 2059 2060 if (i == conf->copies) 2061 goto done; 2062 2063 first = i; 2064 fbio = r10_bio->devs[i].bio; 2065 2066 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 2067 /* now find blocks with errors */ 2068 for (i=0 ; i < conf->copies ; i++) { 2069 int j, d; 2070 2071 tbio = r10_bio->devs[i].bio; 2072 2073 if (tbio->bi_end_io != end_sync_read) 2074 continue; 2075 if (i == first) 2076 continue; 2077 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { 2078 /* We know that the bi_io_vec layout is the same for 2079 * both 'first' and 'i', so we just compare them. 2080 * All vec entries are PAGE_SIZE; 2081 */ 2082 int sectors = r10_bio->sectors; 2083 for (j = 0; j < vcnt; j++) { 2084 int len = PAGE_SIZE; 2085 if (sectors < (len / 512)) 2086 len = sectors * 512; 2087 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 2088 page_address(tbio->bi_io_vec[j].bv_page), 2089 len)) 2090 break; 2091 sectors -= len/512; 2092 } 2093 if (j == vcnt) 2094 continue; 2095 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); 2096 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2097 /* Don't fix anything. */ 2098 continue; 2099 } 2100 /* Ok, we need to write this bio, either to correct an 2101 * inconsistency or to correct an unreadable block. 2102 * First we need to fixup bv_offset, bv_len and 2103 * bi_vecs, as the read request might have corrupted these 2104 */ 2105 bio_reset(tbio); 2106 2107 tbio->bi_vcnt = vcnt; 2108 tbio->bi_iter.bi_size = r10_bio->sectors << 9; 2109 tbio->bi_rw = WRITE; 2110 tbio->bi_private = r10_bio; 2111 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 2112 2113 for (j=0; j < vcnt ; j++) { 2114 tbio->bi_io_vec[j].bv_offset = 0; 2115 tbio->bi_io_vec[j].bv_len = PAGE_SIZE; 2116 2117 memcpy(page_address(tbio->bi_io_vec[j].bv_page), 2118 page_address(fbio->bi_io_vec[j].bv_page), 2119 PAGE_SIZE); 2120 } 2121 tbio->bi_end_io = end_sync_write; 2122 2123 d = r10_bio->devs[i].devnum; 2124 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2125 atomic_inc(&r10_bio->remaining); 2126 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2127 2128 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2129 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2130 generic_make_request(tbio); 2131 } 2132 2133 /* Now write out to any replacement devices 2134 * that are active 2135 */ 2136 for (i = 0; i < conf->copies; i++) { 2137 int j, d; 2138 2139 tbio = r10_bio->devs[i].repl_bio; 2140 if (!tbio || !tbio->bi_end_io) 2141 continue; 2142 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write 2143 && r10_bio->devs[i].bio != fbio) 2144 for (j = 0; j < vcnt; j++) 2145 memcpy(page_address(tbio->bi_io_vec[j].bv_page), 2146 page_address(fbio->bi_io_vec[j].bv_page), 2147 PAGE_SIZE); 2148 d = r10_bio->devs[i].devnum; 2149 atomic_inc(&r10_bio->remaining); 2150 md_sync_acct(conf->mirrors[d].replacement->bdev, 2151 bio_sectors(tbio)); 2152 generic_make_request(tbio); 2153 } 2154 2155 done: 2156 if (atomic_dec_and_test(&r10_bio->remaining)) { 2157 md_done_sync(mddev, r10_bio->sectors, 1); 2158 put_buf(r10_bio); 2159 } 2160 } 2161 2162 /* 2163 * Now for the recovery code. 2164 * Recovery happens across physical sectors. 2165 * We recover all non-is_sync drives by finding the virtual address of 2166 * each, and then choose a working drive that also has that virt address. 2167 * There is a separate r10_bio for each non-in_sync drive. 2168 * Only the first two slots are in use. The first for reading, 2169 * The second for writing. 2170 * 2171 */ 2172 static void fix_recovery_read_error(struct r10bio *r10_bio) 2173 { 2174 /* We got a read error during recovery. 2175 * We repeat the read in smaller page-sized sections. 2176 * If a read succeeds, write it to the new device or record 2177 * a bad block if we cannot. 2178 * If a read fails, record a bad block on both old and 2179 * new devices. 2180 */ 2181 struct mddev *mddev = r10_bio->mddev; 2182 struct r10conf *conf = mddev->private; 2183 struct bio *bio = r10_bio->devs[0].bio; 2184 sector_t sect = 0; 2185 int sectors = r10_bio->sectors; 2186 int idx = 0; 2187 int dr = r10_bio->devs[0].devnum; 2188 int dw = r10_bio->devs[1].devnum; 2189 2190 while (sectors) { 2191 int s = sectors; 2192 struct md_rdev *rdev; 2193 sector_t addr; 2194 int ok; 2195 2196 if (s > (PAGE_SIZE>>9)) 2197 s = PAGE_SIZE >> 9; 2198 2199 rdev = conf->mirrors[dr].rdev; 2200 addr = r10_bio->devs[0].addr + sect, 2201 ok = sync_page_io(rdev, 2202 addr, 2203 s << 9, 2204 bio->bi_io_vec[idx].bv_page, 2205 READ, false); 2206 if (ok) { 2207 rdev = conf->mirrors[dw].rdev; 2208 addr = r10_bio->devs[1].addr + sect; 2209 ok = sync_page_io(rdev, 2210 addr, 2211 s << 9, 2212 bio->bi_io_vec[idx].bv_page, 2213 WRITE, false); 2214 if (!ok) { 2215 set_bit(WriteErrorSeen, &rdev->flags); 2216 if (!test_and_set_bit(WantReplacement, 2217 &rdev->flags)) 2218 set_bit(MD_RECOVERY_NEEDED, 2219 &rdev->mddev->recovery); 2220 } 2221 } 2222 if (!ok) { 2223 /* We don't worry if we cannot set a bad block - 2224 * it really is bad so there is no loss in not 2225 * recording it yet 2226 */ 2227 rdev_set_badblocks(rdev, addr, s, 0); 2228 2229 if (rdev != conf->mirrors[dw].rdev) { 2230 /* need bad block on destination too */ 2231 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; 2232 addr = r10_bio->devs[1].addr + sect; 2233 ok = rdev_set_badblocks(rdev2, addr, s, 0); 2234 if (!ok) { 2235 /* just abort the recovery */ 2236 printk(KERN_NOTICE 2237 "md/raid10:%s: recovery aborted" 2238 " due to read error\n", 2239 mdname(mddev)); 2240 2241 conf->mirrors[dw].recovery_disabled 2242 = mddev->recovery_disabled; 2243 set_bit(MD_RECOVERY_INTR, 2244 &mddev->recovery); 2245 break; 2246 } 2247 } 2248 } 2249 2250 sectors -= s; 2251 sect += s; 2252 idx++; 2253 } 2254 } 2255 2256 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2257 { 2258 struct r10conf *conf = mddev->private; 2259 int d; 2260 struct bio *wbio, *wbio2; 2261 2262 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { 2263 fix_recovery_read_error(r10_bio); 2264 end_sync_request(r10_bio); 2265 return; 2266 } 2267 2268 /* 2269 * share the pages with the first bio 2270 * and submit the write request 2271 */ 2272 d = r10_bio->devs[1].devnum; 2273 wbio = r10_bio->devs[1].bio; 2274 wbio2 = r10_bio->devs[1].repl_bio; 2275 /* Need to test wbio2->bi_end_io before we call 2276 * generic_make_request as if the former is NULL, 2277 * the latter is free to free wbio2. 2278 */ 2279 if (wbio2 && !wbio2->bi_end_io) 2280 wbio2 = NULL; 2281 if (wbio->bi_end_io) { 2282 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2283 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2284 generic_make_request(wbio); 2285 } 2286 if (wbio2) { 2287 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2288 md_sync_acct(conf->mirrors[d].replacement->bdev, 2289 bio_sectors(wbio2)); 2290 generic_make_request(wbio2); 2291 } 2292 } 2293 2294 /* 2295 * Used by fix_read_error() to decay the per rdev read_errors. 2296 * We halve the read error count for every hour that has elapsed 2297 * since the last recorded read error. 2298 * 2299 */ 2300 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) 2301 { 2302 struct timespec cur_time_mon; 2303 unsigned long hours_since_last; 2304 unsigned int read_errors = atomic_read(&rdev->read_errors); 2305 2306 ktime_get_ts(&cur_time_mon); 2307 2308 if (rdev->last_read_error.tv_sec == 0 && 2309 rdev->last_read_error.tv_nsec == 0) { 2310 /* first time we've seen a read error */ 2311 rdev->last_read_error = cur_time_mon; 2312 return; 2313 } 2314 2315 hours_since_last = (cur_time_mon.tv_sec - 2316 rdev->last_read_error.tv_sec) / 3600; 2317 2318 rdev->last_read_error = cur_time_mon; 2319 2320 /* 2321 * if hours_since_last is > the number of bits in read_errors 2322 * just set read errors to 0. We do this to avoid 2323 * overflowing the shift of read_errors by hours_since_last. 2324 */ 2325 if (hours_since_last >= 8 * sizeof(read_errors)) 2326 atomic_set(&rdev->read_errors, 0); 2327 else 2328 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); 2329 } 2330 2331 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, 2332 int sectors, struct page *page, int rw) 2333 { 2334 sector_t first_bad; 2335 int bad_sectors; 2336 2337 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) 2338 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) 2339 return -1; 2340 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) 2341 /* success */ 2342 return 1; 2343 if (rw == WRITE) { 2344 set_bit(WriteErrorSeen, &rdev->flags); 2345 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2346 set_bit(MD_RECOVERY_NEEDED, 2347 &rdev->mddev->recovery); 2348 } 2349 /* need to record an error - either for the block or the device */ 2350 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 2351 md_error(rdev->mddev, rdev); 2352 return 0; 2353 } 2354 2355 /* 2356 * This is a kernel thread which: 2357 * 2358 * 1. Retries failed read operations on working mirrors. 2359 * 2. Updates the raid superblock when problems encounter. 2360 * 3. Performs writes following reads for array synchronising. 2361 */ 2362 2363 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) 2364 { 2365 int sect = 0; /* Offset from r10_bio->sector */ 2366 int sectors = r10_bio->sectors; 2367 struct md_rdev*rdev; 2368 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); 2369 int d = r10_bio->devs[r10_bio->read_slot].devnum; 2370 2371 /* still own a reference to this rdev, so it cannot 2372 * have been cleared recently. 2373 */ 2374 rdev = conf->mirrors[d].rdev; 2375 2376 if (test_bit(Faulty, &rdev->flags)) 2377 /* drive has already been failed, just ignore any 2378 more fix_read_error() attempts */ 2379 return; 2380 2381 check_decay_read_errors(mddev, rdev); 2382 atomic_inc(&rdev->read_errors); 2383 if (atomic_read(&rdev->read_errors) > max_read_errors) { 2384 char b[BDEVNAME_SIZE]; 2385 bdevname(rdev->bdev, b); 2386 2387 printk(KERN_NOTICE 2388 "md/raid10:%s: %s: Raid device exceeded " 2389 "read_error threshold [cur %d:max %d]\n", 2390 mdname(mddev), b, 2391 atomic_read(&rdev->read_errors), max_read_errors); 2392 printk(KERN_NOTICE 2393 "md/raid10:%s: %s: Failing raid device\n", 2394 mdname(mddev), b); 2395 md_error(mddev, conf->mirrors[d].rdev); 2396 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; 2397 return; 2398 } 2399 2400 while(sectors) { 2401 int s = sectors; 2402 int sl = r10_bio->read_slot; 2403 int success = 0; 2404 int start; 2405 2406 if (s > (PAGE_SIZE>>9)) 2407 s = PAGE_SIZE >> 9; 2408 2409 rcu_read_lock(); 2410 do { 2411 sector_t first_bad; 2412 int bad_sectors; 2413 2414 d = r10_bio->devs[sl].devnum; 2415 rdev = rcu_dereference(conf->mirrors[d].rdev); 2416 if (rdev && 2417 !test_bit(Unmerged, &rdev->flags) && 2418 test_bit(In_sync, &rdev->flags) && 2419 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, 2420 &first_bad, &bad_sectors) == 0) { 2421 atomic_inc(&rdev->nr_pending); 2422 rcu_read_unlock(); 2423 success = sync_page_io(rdev, 2424 r10_bio->devs[sl].addr + 2425 sect, 2426 s<<9, 2427 conf->tmppage, READ, false); 2428 rdev_dec_pending(rdev, mddev); 2429 rcu_read_lock(); 2430 if (success) 2431 break; 2432 } 2433 sl++; 2434 if (sl == conf->copies) 2435 sl = 0; 2436 } while (!success && sl != r10_bio->read_slot); 2437 rcu_read_unlock(); 2438 2439 if (!success) { 2440 /* Cannot read from anywhere, just mark the block 2441 * as bad on the first device to discourage future 2442 * reads. 2443 */ 2444 int dn = r10_bio->devs[r10_bio->read_slot].devnum; 2445 rdev = conf->mirrors[dn].rdev; 2446 2447 if (!rdev_set_badblocks( 2448 rdev, 2449 r10_bio->devs[r10_bio->read_slot].addr 2450 + sect, 2451 s, 0)) { 2452 md_error(mddev, rdev); 2453 r10_bio->devs[r10_bio->read_slot].bio 2454 = IO_BLOCKED; 2455 } 2456 break; 2457 } 2458 2459 start = sl; 2460 /* write it back and re-read */ 2461 rcu_read_lock(); 2462 while (sl != r10_bio->read_slot) { 2463 char b[BDEVNAME_SIZE]; 2464 2465 if (sl==0) 2466 sl = conf->copies; 2467 sl--; 2468 d = r10_bio->devs[sl].devnum; 2469 rdev = rcu_dereference(conf->mirrors[d].rdev); 2470 if (!rdev || 2471 test_bit(Unmerged, &rdev->flags) || 2472 !test_bit(In_sync, &rdev->flags)) 2473 continue; 2474 2475 atomic_inc(&rdev->nr_pending); 2476 rcu_read_unlock(); 2477 if (r10_sync_page_io(rdev, 2478 r10_bio->devs[sl].addr + 2479 sect, 2480 s, conf->tmppage, WRITE) 2481 == 0) { 2482 /* Well, this device is dead */ 2483 printk(KERN_NOTICE 2484 "md/raid10:%s: read correction " 2485 "write failed" 2486 " (%d sectors at %llu on %s)\n", 2487 mdname(mddev), s, 2488 (unsigned long long)( 2489 sect + 2490 choose_data_offset(r10_bio, 2491 rdev)), 2492 bdevname(rdev->bdev, b)); 2493 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 2494 "drive\n", 2495 mdname(mddev), 2496 bdevname(rdev->bdev, b)); 2497 } 2498 rdev_dec_pending(rdev, mddev); 2499 rcu_read_lock(); 2500 } 2501 sl = start; 2502 while (sl != r10_bio->read_slot) { 2503 char b[BDEVNAME_SIZE]; 2504 2505 if (sl==0) 2506 sl = conf->copies; 2507 sl--; 2508 d = r10_bio->devs[sl].devnum; 2509 rdev = rcu_dereference(conf->mirrors[d].rdev); 2510 if (!rdev || 2511 !test_bit(In_sync, &rdev->flags)) 2512 continue; 2513 2514 atomic_inc(&rdev->nr_pending); 2515 rcu_read_unlock(); 2516 switch (r10_sync_page_io(rdev, 2517 r10_bio->devs[sl].addr + 2518 sect, 2519 s, conf->tmppage, 2520 READ)) { 2521 case 0: 2522 /* Well, this device is dead */ 2523 printk(KERN_NOTICE 2524 "md/raid10:%s: unable to read back " 2525 "corrected sectors" 2526 " (%d sectors at %llu on %s)\n", 2527 mdname(mddev), s, 2528 (unsigned long long)( 2529 sect + 2530 choose_data_offset(r10_bio, rdev)), 2531 bdevname(rdev->bdev, b)); 2532 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 2533 "drive\n", 2534 mdname(mddev), 2535 bdevname(rdev->bdev, b)); 2536 break; 2537 case 1: 2538 printk(KERN_INFO 2539 "md/raid10:%s: read error corrected" 2540 " (%d sectors at %llu on %s)\n", 2541 mdname(mddev), s, 2542 (unsigned long long)( 2543 sect + 2544 choose_data_offset(r10_bio, rdev)), 2545 bdevname(rdev->bdev, b)); 2546 atomic_add(s, &rdev->corrected_errors); 2547 } 2548 2549 rdev_dec_pending(rdev, mddev); 2550 rcu_read_lock(); 2551 } 2552 rcu_read_unlock(); 2553 2554 sectors -= s; 2555 sect += s; 2556 } 2557 } 2558 2559 static int narrow_write_error(struct r10bio *r10_bio, int i) 2560 { 2561 struct bio *bio = r10_bio->master_bio; 2562 struct mddev *mddev = r10_bio->mddev; 2563 struct r10conf *conf = mddev->private; 2564 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; 2565 /* bio has the data to be written to slot 'i' where 2566 * we just recently had a write error. 2567 * We repeatedly clone the bio and trim down to one block, 2568 * then try the write. Where the write fails we record 2569 * a bad block. 2570 * It is conceivable that the bio doesn't exactly align with 2571 * blocks. We must handle this. 2572 * 2573 * We currently own a reference to the rdev. 2574 */ 2575 2576 int block_sectors; 2577 sector_t sector; 2578 int sectors; 2579 int sect_to_write = r10_bio->sectors; 2580 int ok = 1; 2581 2582 if (rdev->badblocks.shift < 0) 2583 return 0; 2584 2585 block_sectors = 1 << rdev->badblocks.shift; 2586 sector = r10_bio->sector; 2587 sectors = ((r10_bio->sector + block_sectors) 2588 & ~(sector_t)(block_sectors - 1)) 2589 - sector; 2590 2591 while (sect_to_write) { 2592 struct bio *wbio; 2593 if (sectors > sect_to_write) 2594 sectors = sect_to_write; 2595 /* Write at 'sector' for 'sectors' */ 2596 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2597 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); 2598 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 2599 choose_data_offset(r10_bio, rdev) + 2600 (sector - r10_bio->sector)); 2601 wbio->bi_bdev = rdev->bdev; 2602 if (submit_bio_wait(WRITE, wbio) == 0) 2603 /* Failure! */ 2604 ok = rdev_set_badblocks(rdev, sector, 2605 sectors, 0) 2606 && ok; 2607 2608 bio_put(wbio); 2609 sect_to_write -= sectors; 2610 sector += sectors; 2611 sectors = block_sectors; 2612 } 2613 return ok; 2614 } 2615 2616 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) 2617 { 2618 int slot = r10_bio->read_slot; 2619 struct bio *bio; 2620 struct r10conf *conf = mddev->private; 2621 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2622 char b[BDEVNAME_SIZE]; 2623 unsigned long do_sync; 2624 int max_sectors; 2625 2626 /* we got a read error. Maybe the drive is bad. Maybe just 2627 * the block and we can fix it. 2628 * We freeze all other IO, and try reading the block from 2629 * other devices. When we find one, we re-write 2630 * and check it that fixes the read error. 2631 * This is all done synchronously while the array is 2632 * frozen. 2633 */ 2634 bio = r10_bio->devs[slot].bio; 2635 bdevname(bio->bi_bdev, b); 2636 bio_put(bio); 2637 r10_bio->devs[slot].bio = NULL; 2638 2639 if (mddev->ro == 0) { 2640 freeze_array(conf, 1); 2641 fix_read_error(conf, mddev, r10_bio); 2642 unfreeze_array(conf); 2643 } else 2644 r10_bio->devs[slot].bio = IO_BLOCKED; 2645 2646 rdev_dec_pending(rdev, mddev); 2647 2648 read_more: 2649 rdev = read_balance(conf, r10_bio, &max_sectors); 2650 if (rdev == NULL) { 2651 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" 2652 " read error for block %llu\n", 2653 mdname(mddev), b, 2654 (unsigned long long)r10_bio->sector); 2655 raid_end_bio_io(r10_bio); 2656 return; 2657 } 2658 2659 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); 2660 slot = r10_bio->read_slot; 2661 printk_ratelimited( 2662 KERN_ERR 2663 "md/raid10:%s: %s: redirecting " 2664 "sector %llu to another mirror\n", 2665 mdname(mddev), 2666 bdevname(rdev->bdev, b), 2667 (unsigned long long)r10_bio->sector); 2668 bio = bio_clone_mddev(r10_bio->master_bio, 2669 GFP_NOIO, mddev); 2670 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); 2671 r10_bio->devs[slot].bio = bio; 2672 r10_bio->devs[slot].rdev = rdev; 2673 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr 2674 + choose_data_offset(r10_bio, rdev); 2675 bio->bi_bdev = rdev->bdev; 2676 bio->bi_rw = READ | do_sync; 2677 bio->bi_private = r10_bio; 2678 bio->bi_end_io = raid10_end_read_request; 2679 if (max_sectors < r10_bio->sectors) { 2680 /* Drat - have to split this up more */ 2681 struct bio *mbio = r10_bio->master_bio; 2682 int sectors_handled = 2683 r10_bio->sector + max_sectors 2684 - mbio->bi_iter.bi_sector; 2685 r10_bio->sectors = max_sectors; 2686 spin_lock_irq(&conf->device_lock); 2687 if (mbio->bi_phys_segments == 0) 2688 mbio->bi_phys_segments = 2; 2689 else 2690 mbio->bi_phys_segments++; 2691 spin_unlock_irq(&conf->device_lock); 2692 generic_make_request(bio); 2693 2694 r10_bio = mempool_alloc(conf->r10bio_pool, 2695 GFP_NOIO); 2696 r10_bio->master_bio = mbio; 2697 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; 2698 r10_bio->state = 0; 2699 set_bit(R10BIO_ReadError, 2700 &r10_bio->state); 2701 r10_bio->mddev = mddev; 2702 r10_bio->sector = mbio->bi_iter.bi_sector 2703 + sectors_handled; 2704 2705 goto read_more; 2706 } else 2707 generic_make_request(bio); 2708 } 2709 2710 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) 2711 { 2712 /* Some sort of write request has finished and it 2713 * succeeded in writing where we thought there was a 2714 * bad block. So forget the bad block. 2715 * Or possibly if failed and we need to record 2716 * a bad block. 2717 */ 2718 int m; 2719 struct md_rdev *rdev; 2720 2721 if (test_bit(R10BIO_IsSync, &r10_bio->state) || 2722 test_bit(R10BIO_IsRecover, &r10_bio->state)) { 2723 for (m = 0; m < conf->copies; m++) { 2724 int dev = r10_bio->devs[m].devnum; 2725 rdev = conf->mirrors[dev].rdev; 2726 if (r10_bio->devs[m].bio == NULL) 2727 continue; 2728 if (test_bit(BIO_UPTODATE, 2729 &r10_bio->devs[m].bio->bi_flags)) { 2730 rdev_clear_badblocks( 2731 rdev, 2732 r10_bio->devs[m].addr, 2733 r10_bio->sectors, 0); 2734 } else { 2735 if (!rdev_set_badblocks( 2736 rdev, 2737 r10_bio->devs[m].addr, 2738 r10_bio->sectors, 0)) 2739 md_error(conf->mddev, rdev); 2740 } 2741 rdev = conf->mirrors[dev].replacement; 2742 if (r10_bio->devs[m].repl_bio == NULL) 2743 continue; 2744 if (test_bit(BIO_UPTODATE, 2745 &r10_bio->devs[m].repl_bio->bi_flags)) { 2746 rdev_clear_badblocks( 2747 rdev, 2748 r10_bio->devs[m].addr, 2749 r10_bio->sectors, 0); 2750 } else { 2751 if (!rdev_set_badblocks( 2752 rdev, 2753 r10_bio->devs[m].addr, 2754 r10_bio->sectors, 0)) 2755 md_error(conf->mddev, rdev); 2756 } 2757 } 2758 put_buf(r10_bio); 2759 } else { 2760 for (m = 0; m < conf->copies; m++) { 2761 int dev = r10_bio->devs[m].devnum; 2762 struct bio *bio = r10_bio->devs[m].bio; 2763 rdev = conf->mirrors[dev].rdev; 2764 if (bio == IO_MADE_GOOD) { 2765 rdev_clear_badblocks( 2766 rdev, 2767 r10_bio->devs[m].addr, 2768 r10_bio->sectors, 0); 2769 rdev_dec_pending(rdev, conf->mddev); 2770 } else if (bio != NULL && 2771 !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2772 if (!narrow_write_error(r10_bio, m)) { 2773 md_error(conf->mddev, rdev); 2774 set_bit(R10BIO_Degraded, 2775 &r10_bio->state); 2776 } 2777 rdev_dec_pending(rdev, conf->mddev); 2778 } 2779 bio = r10_bio->devs[m].repl_bio; 2780 rdev = conf->mirrors[dev].replacement; 2781 if (rdev && bio == IO_MADE_GOOD) { 2782 rdev_clear_badblocks( 2783 rdev, 2784 r10_bio->devs[m].addr, 2785 r10_bio->sectors, 0); 2786 rdev_dec_pending(rdev, conf->mddev); 2787 } 2788 } 2789 if (test_bit(R10BIO_WriteError, 2790 &r10_bio->state)) 2791 close_write(r10_bio); 2792 raid_end_bio_io(r10_bio); 2793 } 2794 } 2795 2796 static void raid10d(struct md_thread *thread) 2797 { 2798 struct mddev *mddev = thread->mddev; 2799 struct r10bio *r10_bio; 2800 unsigned long flags; 2801 struct r10conf *conf = mddev->private; 2802 struct list_head *head = &conf->retry_list; 2803 struct blk_plug plug; 2804 2805 md_check_recovery(mddev); 2806 2807 blk_start_plug(&plug); 2808 for (;;) { 2809 2810 flush_pending_writes(conf); 2811 2812 spin_lock_irqsave(&conf->device_lock, flags); 2813 if (list_empty(head)) { 2814 spin_unlock_irqrestore(&conf->device_lock, flags); 2815 break; 2816 } 2817 r10_bio = list_entry(head->prev, struct r10bio, retry_list); 2818 list_del(head->prev); 2819 conf->nr_queued--; 2820 spin_unlock_irqrestore(&conf->device_lock, flags); 2821 2822 mddev = r10_bio->mddev; 2823 conf = mddev->private; 2824 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 2825 test_bit(R10BIO_WriteError, &r10_bio->state)) 2826 handle_write_completed(conf, r10_bio); 2827 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) 2828 reshape_request_write(mddev, r10_bio); 2829 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) 2830 sync_request_write(mddev, r10_bio); 2831 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 2832 recovery_request_write(mddev, r10_bio); 2833 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) 2834 handle_read_error(mddev, r10_bio); 2835 else { 2836 /* just a partial read to be scheduled from a 2837 * separate context 2838 */ 2839 int slot = r10_bio->read_slot; 2840 generic_make_request(r10_bio->devs[slot].bio); 2841 } 2842 2843 cond_resched(); 2844 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 2845 md_check_recovery(mddev); 2846 } 2847 blk_finish_plug(&plug); 2848 } 2849 2850 static int init_resync(struct r10conf *conf) 2851 { 2852 int buffs; 2853 int i; 2854 2855 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2856 BUG_ON(conf->r10buf_pool); 2857 conf->have_replacement = 0; 2858 for (i = 0; i < conf->geo.raid_disks; i++) 2859 if (conf->mirrors[i].replacement) 2860 conf->have_replacement = 1; 2861 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); 2862 if (!conf->r10buf_pool) 2863 return -ENOMEM; 2864 conf->next_resync = 0; 2865 return 0; 2866 } 2867 2868 /* 2869 * perform a "sync" on one "block" 2870 * 2871 * We need to make sure that no normal I/O request - particularly write 2872 * requests - conflict with active sync requests. 2873 * 2874 * This is achieved by tracking pending requests and a 'barrier' concept 2875 * that can be installed to exclude normal IO requests. 2876 * 2877 * Resync and recovery are handled very differently. 2878 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery. 2879 * 2880 * For resync, we iterate over virtual addresses, read all copies, 2881 * and update if there are differences. If only one copy is live, 2882 * skip it. 2883 * For recovery, we iterate over physical addresses, read a good 2884 * value for each non-in_sync drive, and over-write. 2885 * 2886 * So, for recovery we may have several outstanding complex requests for a 2887 * given address, one for each out-of-sync device. We model this by allocating 2888 * a number of r10_bio structures, one for each out-of-sync device. 2889 * As we setup these structures, we collect all bio's together into a list 2890 * which we then process collectively to add pages, and then process again 2891 * to pass to generic_make_request. 2892 * 2893 * The r10_bio structures are linked using a borrowed master_bio pointer. 2894 * This link is counted in ->remaining. When the r10_bio that points to NULL 2895 * has its remaining count decremented to 0, the whole complex operation 2896 * is complete. 2897 * 2898 */ 2899 2900 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, 2901 int *skipped, int go_faster) 2902 { 2903 struct r10conf *conf = mddev->private; 2904 struct r10bio *r10_bio; 2905 struct bio *biolist = NULL, *bio; 2906 sector_t max_sector, nr_sectors; 2907 int i; 2908 int max_sync; 2909 sector_t sync_blocks; 2910 sector_t sectors_skipped = 0; 2911 int chunks_skipped = 0; 2912 sector_t chunk_mask = conf->geo.chunk_mask; 2913 2914 if (!conf->r10buf_pool) 2915 if (init_resync(conf)) 2916 return 0; 2917 2918 /* 2919 * Allow skipping a full rebuild for incremental assembly 2920 * of a clean array, like RAID1 does. 2921 */ 2922 if (mddev->bitmap == NULL && 2923 mddev->recovery_cp == MaxSector && 2924 mddev->reshape_position == MaxSector && 2925 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2926 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2927 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2928 conf->fullsync == 0) { 2929 *skipped = 1; 2930 return mddev->dev_sectors - sector_nr; 2931 } 2932 2933 skipped: 2934 max_sector = mddev->dev_sectors; 2935 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 2936 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2937 max_sector = mddev->resync_max_sectors; 2938 if (sector_nr >= max_sector) { 2939 /* If we aborted, we need to abort the 2940 * sync on the 'current' bitmap chucks (there can 2941 * be several when recovering multiple devices). 2942 * as we may have started syncing it but not finished. 2943 * We can find the current address in 2944 * mddev->curr_resync, but for recovery, 2945 * we need to convert that to several 2946 * virtual addresses. 2947 */ 2948 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2949 end_reshape(conf); 2950 close_sync(conf); 2951 return 0; 2952 } 2953 2954 if (mddev->curr_resync < max_sector) { /* aborted */ 2955 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2956 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2957 &sync_blocks, 1); 2958 else for (i = 0; i < conf->geo.raid_disks; i++) { 2959 sector_t sect = 2960 raid10_find_virt(conf, mddev->curr_resync, i); 2961 bitmap_end_sync(mddev->bitmap, sect, 2962 &sync_blocks, 1); 2963 } 2964 } else { 2965 /* completed sync */ 2966 if ((!mddev->bitmap || conf->fullsync) 2967 && conf->have_replacement 2968 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2969 /* Completed a full sync so the replacements 2970 * are now fully recovered. 2971 */ 2972 for (i = 0; i < conf->geo.raid_disks; i++) 2973 if (conf->mirrors[i].replacement) 2974 conf->mirrors[i].replacement 2975 ->recovery_offset 2976 = MaxSector; 2977 } 2978 conf->fullsync = 0; 2979 } 2980 bitmap_close_sync(mddev->bitmap); 2981 close_sync(conf); 2982 *skipped = 1; 2983 return sectors_skipped; 2984 } 2985 2986 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2987 return reshape_request(mddev, sector_nr, skipped); 2988 2989 if (chunks_skipped >= conf->geo.raid_disks) { 2990 /* if there has been nothing to do on any drive, 2991 * then there is nothing to do at all.. 2992 */ 2993 *skipped = 1; 2994 return (max_sector - sector_nr) + sectors_skipped; 2995 } 2996 2997 if (max_sector > mddev->resync_max) 2998 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2999 3000 /* make sure whole request will fit in a chunk - if chunks 3001 * are meaningful 3002 */ 3003 if (conf->geo.near_copies < conf->geo.raid_disks && 3004 max_sector > (sector_nr | chunk_mask)) 3005 max_sector = (sector_nr | chunk_mask) + 1; 3006 /* 3007 * If there is non-resync activity waiting for us then 3008 * put in a delay to throttle resync. 3009 */ 3010 if (!go_faster && conf->nr_waiting) 3011 msleep_interruptible(1000); 3012 3013 /* Again, very different code for resync and recovery. 3014 * Both must result in an r10bio with a list of bios that 3015 * have bi_end_io, bi_sector, bi_bdev set, 3016 * and bi_private set to the r10bio. 3017 * For recovery, we may actually create several r10bios 3018 * with 2 bios in each, that correspond to the bios in the main one. 3019 * In this case, the subordinate r10bios link back through a 3020 * borrowed master_bio pointer, and the counter in the master 3021 * includes a ref from each subordinate. 3022 */ 3023 /* First, we decide what to do and set ->bi_end_io 3024 * To end_sync_read if we want to read, and 3025 * end_sync_write if we will want to write. 3026 */ 3027 3028 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); 3029 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3030 /* recovery... the complicated one */ 3031 int j; 3032 r10_bio = NULL; 3033 3034 for (i = 0 ; i < conf->geo.raid_disks; i++) { 3035 int still_degraded; 3036 struct r10bio *rb2; 3037 sector_t sect; 3038 int must_sync; 3039 int any_working; 3040 struct raid10_info *mirror = &conf->mirrors[i]; 3041 3042 if ((mirror->rdev == NULL || 3043 test_bit(In_sync, &mirror->rdev->flags)) 3044 && 3045 (mirror->replacement == NULL || 3046 test_bit(Faulty, 3047 &mirror->replacement->flags))) 3048 continue; 3049 3050 still_degraded = 0; 3051 /* want to reconstruct this device */ 3052 rb2 = r10_bio; 3053 sect = raid10_find_virt(conf, sector_nr, i); 3054 if (sect >= mddev->resync_max_sectors) { 3055 /* last stripe is not complete - don't 3056 * try to recover this sector. 3057 */ 3058 continue; 3059 } 3060 /* Unless we are doing a full sync, or a replacement 3061 * we only need to recover the block if it is set in 3062 * the bitmap 3063 */ 3064 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3065 &sync_blocks, 1); 3066 if (sync_blocks < max_sync) 3067 max_sync = sync_blocks; 3068 if (!must_sync && 3069 mirror->replacement == NULL && 3070 !conf->fullsync) { 3071 /* yep, skip the sync_blocks here, but don't assume 3072 * that there will never be anything to do here 3073 */ 3074 chunks_skipped = -1; 3075 continue; 3076 } 3077 3078 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3079 r10_bio->state = 0; 3080 raise_barrier(conf, rb2 != NULL); 3081 atomic_set(&r10_bio->remaining, 0); 3082 3083 r10_bio->master_bio = (struct bio*)rb2; 3084 if (rb2) 3085 atomic_inc(&rb2->remaining); 3086 r10_bio->mddev = mddev; 3087 set_bit(R10BIO_IsRecover, &r10_bio->state); 3088 r10_bio->sector = sect; 3089 3090 raid10_find_phys(conf, r10_bio); 3091 3092 /* Need to check if the array will still be 3093 * degraded 3094 */ 3095 for (j = 0; j < conf->geo.raid_disks; j++) 3096 if (conf->mirrors[j].rdev == NULL || 3097 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { 3098 still_degraded = 1; 3099 break; 3100 } 3101 3102 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3103 &sync_blocks, still_degraded); 3104 3105 any_working = 0; 3106 for (j=0; j<conf->copies;j++) { 3107 int k; 3108 int d = r10_bio->devs[j].devnum; 3109 sector_t from_addr, to_addr; 3110 struct md_rdev *rdev; 3111 sector_t sector, first_bad; 3112 int bad_sectors; 3113 if (!conf->mirrors[d].rdev || 3114 !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) 3115 continue; 3116 /* This is where we read from */ 3117 any_working = 1; 3118 rdev = conf->mirrors[d].rdev; 3119 sector = r10_bio->devs[j].addr; 3120 3121 if (is_badblock(rdev, sector, max_sync, 3122 &first_bad, &bad_sectors)) { 3123 if (first_bad > sector) 3124 max_sync = first_bad - sector; 3125 else { 3126 bad_sectors -= (sector 3127 - first_bad); 3128 if (max_sync > bad_sectors) 3129 max_sync = bad_sectors; 3130 continue; 3131 } 3132 } 3133 bio = r10_bio->devs[0].bio; 3134 bio_reset(bio); 3135 bio->bi_next = biolist; 3136 biolist = bio; 3137 bio->bi_private = r10_bio; 3138 bio->bi_end_io = end_sync_read; 3139 bio->bi_rw = READ; 3140 from_addr = r10_bio->devs[j].addr; 3141 bio->bi_iter.bi_sector = from_addr + 3142 rdev->data_offset; 3143 bio->bi_bdev = rdev->bdev; 3144 atomic_inc(&rdev->nr_pending); 3145 /* and we write to 'i' (if not in_sync) */ 3146 3147 for (k=0; k<conf->copies; k++) 3148 if (r10_bio->devs[k].devnum == i) 3149 break; 3150 BUG_ON(k == conf->copies); 3151 to_addr = r10_bio->devs[k].addr; 3152 r10_bio->devs[0].devnum = d; 3153 r10_bio->devs[0].addr = from_addr; 3154 r10_bio->devs[1].devnum = i; 3155 r10_bio->devs[1].addr = to_addr; 3156 3157 rdev = mirror->rdev; 3158 if (!test_bit(In_sync, &rdev->flags)) { 3159 bio = r10_bio->devs[1].bio; 3160 bio_reset(bio); 3161 bio->bi_next = biolist; 3162 biolist = bio; 3163 bio->bi_private = r10_bio; 3164 bio->bi_end_io = end_sync_write; 3165 bio->bi_rw = WRITE; 3166 bio->bi_iter.bi_sector = to_addr 3167 + rdev->data_offset; 3168 bio->bi_bdev = rdev->bdev; 3169 atomic_inc(&r10_bio->remaining); 3170 } else 3171 r10_bio->devs[1].bio->bi_end_io = NULL; 3172 3173 /* and maybe write to replacement */ 3174 bio = r10_bio->devs[1].repl_bio; 3175 if (bio) 3176 bio->bi_end_io = NULL; 3177 rdev = mirror->replacement; 3178 /* Note: if rdev != NULL, then bio 3179 * cannot be NULL as r10buf_pool_alloc will 3180 * have allocated it. 3181 * So the second test here is pointless. 3182 * But it keeps semantic-checkers happy, and 3183 * this comment keeps human reviewers 3184 * happy. 3185 */ 3186 if (rdev == NULL || bio == NULL || 3187 test_bit(Faulty, &rdev->flags)) 3188 break; 3189 bio_reset(bio); 3190 bio->bi_next = biolist; 3191 biolist = bio; 3192 bio->bi_private = r10_bio; 3193 bio->bi_end_io = end_sync_write; 3194 bio->bi_rw = WRITE; 3195 bio->bi_iter.bi_sector = to_addr + 3196 rdev->data_offset; 3197 bio->bi_bdev = rdev->bdev; 3198 atomic_inc(&r10_bio->remaining); 3199 break; 3200 } 3201 if (j == conf->copies) { 3202 /* Cannot recover, so abort the recovery or 3203 * record a bad block */ 3204 if (any_working) { 3205 /* problem is that there are bad blocks 3206 * on other device(s) 3207 */ 3208 int k; 3209 for (k = 0; k < conf->copies; k++) 3210 if (r10_bio->devs[k].devnum == i) 3211 break; 3212 if (!test_bit(In_sync, 3213 &mirror->rdev->flags) 3214 && !rdev_set_badblocks( 3215 mirror->rdev, 3216 r10_bio->devs[k].addr, 3217 max_sync, 0)) 3218 any_working = 0; 3219 if (mirror->replacement && 3220 !rdev_set_badblocks( 3221 mirror->replacement, 3222 r10_bio->devs[k].addr, 3223 max_sync, 0)) 3224 any_working = 0; 3225 } 3226 if (!any_working) { 3227 if (!test_and_set_bit(MD_RECOVERY_INTR, 3228 &mddev->recovery)) 3229 printk(KERN_INFO "md/raid10:%s: insufficient " 3230 "working devices for recovery.\n", 3231 mdname(mddev)); 3232 mirror->recovery_disabled 3233 = mddev->recovery_disabled; 3234 } 3235 put_buf(r10_bio); 3236 if (rb2) 3237 atomic_dec(&rb2->remaining); 3238 r10_bio = rb2; 3239 break; 3240 } 3241 } 3242 if (biolist == NULL) { 3243 while (r10_bio) { 3244 struct r10bio *rb2 = r10_bio; 3245 r10_bio = (struct r10bio*) rb2->master_bio; 3246 rb2->master_bio = NULL; 3247 put_buf(rb2); 3248 } 3249 goto giveup; 3250 } 3251 } else { 3252 /* resync. Schedule a read for every block at this virt offset */ 3253 int count = 0; 3254 3255 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 3256 3257 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 3258 &sync_blocks, mddev->degraded) && 3259 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, 3260 &mddev->recovery)) { 3261 /* We can skip this block */ 3262 *skipped = 1; 3263 return sync_blocks + sectors_skipped; 3264 } 3265 if (sync_blocks < max_sync) 3266 max_sync = sync_blocks; 3267 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3268 r10_bio->state = 0; 3269 3270 r10_bio->mddev = mddev; 3271 atomic_set(&r10_bio->remaining, 0); 3272 raise_barrier(conf, 0); 3273 conf->next_resync = sector_nr; 3274 3275 r10_bio->master_bio = NULL; 3276 r10_bio->sector = sector_nr; 3277 set_bit(R10BIO_IsSync, &r10_bio->state); 3278 raid10_find_phys(conf, r10_bio); 3279 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; 3280 3281 for (i = 0; i < conf->copies; i++) { 3282 int d = r10_bio->devs[i].devnum; 3283 sector_t first_bad, sector; 3284 int bad_sectors; 3285 3286 if (r10_bio->devs[i].repl_bio) 3287 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3288 3289 bio = r10_bio->devs[i].bio; 3290 bio_reset(bio); 3291 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3292 if (conf->mirrors[d].rdev == NULL || 3293 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) 3294 continue; 3295 sector = r10_bio->devs[i].addr; 3296 if (is_badblock(conf->mirrors[d].rdev, 3297 sector, max_sync, 3298 &first_bad, &bad_sectors)) { 3299 if (first_bad > sector) 3300 max_sync = first_bad - sector; 3301 else { 3302 bad_sectors -= (sector - first_bad); 3303 if (max_sync > bad_sectors) 3304 max_sync = bad_sectors; 3305 continue; 3306 } 3307 } 3308 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 3309 atomic_inc(&r10_bio->remaining); 3310 bio->bi_next = biolist; 3311 biolist = bio; 3312 bio->bi_private = r10_bio; 3313 bio->bi_end_io = end_sync_read; 3314 bio->bi_rw = READ; 3315 bio->bi_iter.bi_sector = sector + 3316 conf->mirrors[d].rdev->data_offset; 3317 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 3318 count++; 3319 3320 if (conf->mirrors[d].replacement == NULL || 3321 test_bit(Faulty, 3322 &conf->mirrors[d].replacement->flags)) 3323 continue; 3324 3325 /* Need to set up for writing to the replacement */ 3326 bio = r10_bio->devs[i].repl_bio; 3327 bio_reset(bio); 3328 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3329 3330 sector = r10_bio->devs[i].addr; 3331 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 3332 bio->bi_next = biolist; 3333 biolist = bio; 3334 bio->bi_private = r10_bio; 3335 bio->bi_end_io = end_sync_write; 3336 bio->bi_rw = WRITE; 3337 bio->bi_iter.bi_sector = sector + 3338 conf->mirrors[d].replacement->data_offset; 3339 bio->bi_bdev = conf->mirrors[d].replacement->bdev; 3340 count++; 3341 } 3342 3343 if (count < 2) { 3344 for (i=0; i<conf->copies; i++) { 3345 int d = r10_bio->devs[i].devnum; 3346 if (r10_bio->devs[i].bio->bi_end_io) 3347 rdev_dec_pending(conf->mirrors[d].rdev, 3348 mddev); 3349 if (r10_bio->devs[i].repl_bio && 3350 r10_bio->devs[i].repl_bio->bi_end_io) 3351 rdev_dec_pending( 3352 conf->mirrors[d].replacement, 3353 mddev); 3354 } 3355 put_buf(r10_bio); 3356 biolist = NULL; 3357 goto giveup; 3358 } 3359 } 3360 3361 nr_sectors = 0; 3362 if (sector_nr + max_sync < max_sector) 3363 max_sector = sector_nr + max_sync; 3364 do { 3365 struct page *page; 3366 int len = PAGE_SIZE; 3367 if (sector_nr + (len>>9) > max_sector) 3368 len = (max_sector - sector_nr) << 9; 3369 if (len == 0) 3370 break; 3371 for (bio= biolist ; bio ; bio=bio->bi_next) { 3372 struct bio *bio2; 3373 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 3374 if (bio_add_page(bio, page, len, 0)) 3375 continue; 3376 3377 /* stop here */ 3378 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 3379 for (bio2 = biolist; 3380 bio2 && bio2 != bio; 3381 bio2 = bio2->bi_next) { 3382 /* remove last page from this bio */ 3383 bio2->bi_vcnt--; 3384 bio2->bi_iter.bi_size -= len; 3385 __clear_bit(BIO_SEG_VALID, &bio2->bi_flags); 3386 } 3387 goto bio_full; 3388 } 3389 nr_sectors += len>>9; 3390 sector_nr += len>>9; 3391 } while (biolist->bi_vcnt < RESYNC_PAGES); 3392 bio_full: 3393 r10_bio->sectors = nr_sectors; 3394 3395 while (biolist) { 3396 bio = biolist; 3397 biolist = biolist->bi_next; 3398 3399 bio->bi_next = NULL; 3400 r10_bio = bio->bi_private; 3401 r10_bio->sectors = nr_sectors; 3402 3403 if (bio->bi_end_io == end_sync_read) { 3404 md_sync_acct(bio->bi_bdev, nr_sectors); 3405 set_bit(BIO_UPTODATE, &bio->bi_flags); 3406 generic_make_request(bio); 3407 } 3408 } 3409 3410 if (sectors_skipped) 3411 /* pretend they weren't skipped, it makes 3412 * no important difference in this case 3413 */ 3414 md_done_sync(mddev, sectors_skipped, 1); 3415 3416 return sectors_skipped + nr_sectors; 3417 giveup: 3418 /* There is nowhere to write, so all non-sync 3419 * drives must be failed or in resync, all drives 3420 * have a bad block, so try the next chunk... 3421 */ 3422 if (sector_nr + max_sync < max_sector) 3423 max_sector = sector_nr + max_sync; 3424 3425 sectors_skipped += (max_sector - sector_nr); 3426 chunks_skipped ++; 3427 sector_nr = max_sector; 3428 goto skipped; 3429 } 3430 3431 static sector_t 3432 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) 3433 { 3434 sector_t size; 3435 struct r10conf *conf = mddev->private; 3436 3437 if (!raid_disks) 3438 raid_disks = min(conf->geo.raid_disks, 3439 conf->prev.raid_disks); 3440 if (!sectors) 3441 sectors = conf->dev_sectors; 3442 3443 size = sectors >> conf->geo.chunk_shift; 3444 sector_div(size, conf->geo.far_copies); 3445 size = size * raid_disks; 3446 sector_div(size, conf->geo.near_copies); 3447 3448 return size << conf->geo.chunk_shift; 3449 } 3450 3451 static void calc_sectors(struct r10conf *conf, sector_t size) 3452 { 3453 /* Calculate the number of sectors-per-device that will 3454 * actually be used, and set conf->dev_sectors and 3455 * conf->stride 3456 */ 3457 3458 size = size >> conf->geo.chunk_shift; 3459 sector_div(size, conf->geo.far_copies); 3460 size = size * conf->geo.raid_disks; 3461 sector_div(size, conf->geo.near_copies); 3462 /* 'size' is now the number of chunks in the array */ 3463 /* calculate "used chunks per device" */ 3464 size = size * conf->copies; 3465 3466 /* We need to round up when dividing by raid_disks to 3467 * get the stride size. 3468 */ 3469 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); 3470 3471 conf->dev_sectors = size << conf->geo.chunk_shift; 3472 3473 if (conf->geo.far_offset) 3474 conf->geo.stride = 1 << conf->geo.chunk_shift; 3475 else { 3476 sector_div(size, conf->geo.far_copies); 3477 conf->geo.stride = size << conf->geo.chunk_shift; 3478 } 3479 } 3480 3481 enum geo_type {geo_new, geo_old, geo_start}; 3482 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) 3483 { 3484 int nc, fc, fo; 3485 int layout, chunk, disks; 3486 switch (new) { 3487 case geo_old: 3488 layout = mddev->layout; 3489 chunk = mddev->chunk_sectors; 3490 disks = mddev->raid_disks - mddev->delta_disks; 3491 break; 3492 case geo_new: 3493 layout = mddev->new_layout; 3494 chunk = mddev->new_chunk_sectors; 3495 disks = mddev->raid_disks; 3496 break; 3497 default: /* avoid 'may be unused' warnings */ 3498 case geo_start: /* new when starting reshape - raid_disks not 3499 * updated yet. */ 3500 layout = mddev->new_layout; 3501 chunk = mddev->new_chunk_sectors; 3502 disks = mddev->raid_disks + mddev->delta_disks; 3503 break; 3504 } 3505 if (layout >> 18) 3506 return -1; 3507 if (chunk < (PAGE_SIZE >> 9) || 3508 !is_power_of_2(chunk)) 3509 return -2; 3510 nc = layout & 255; 3511 fc = (layout >> 8) & 255; 3512 fo = layout & (1<<16); 3513 geo->raid_disks = disks; 3514 geo->near_copies = nc; 3515 geo->far_copies = fc; 3516 geo->far_offset = fo; 3517 geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks; 3518 geo->chunk_mask = chunk - 1; 3519 geo->chunk_shift = ffz(~chunk); 3520 return nc*fc; 3521 } 3522 3523 static struct r10conf *setup_conf(struct mddev *mddev) 3524 { 3525 struct r10conf *conf = NULL; 3526 int err = -EINVAL; 3527 struct geom geo; 3528 int copies; 3529 3530 copies = setup_geo(&geo, mddev, geo_new); 3531 3532 if (copies == -2) { 3533 printk(KERN_ERR "md/raid10:%s: chunk size must be " 3534 "at least PAGE_SIZE(%ld) and be a power of 2.\n", 3535 mdname(mddev), PAGE_SIZE); 3536 goto out; 3537 } 3538 3539 if (copies < 2 || copies > mddev->raid_disks) { 3540 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n", 3541 mdname(mddev), mddev->new_layout); 3542 goto out; 3543 } 3544 3545 err = -ENOMEM; 3546 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); 3547 if (!conf) 3548 goto out; 3549 3550 /* FIXME calc properly */ 3551 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + 3552 max(0,-mddev->delta_disks)), 3553 GFP_KERNEL); 3554 if (!conf->mirrors) 3555 goto out; 3556 3557 conf->tmppage = alloc_page(GFP_KERNEL); 3558 if (!conf->tmppage) 3559 goto out; 3560 3561 conf->geo = geo; 3562 conf->copies = copies; 3563 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, 3564 r10bio_pool_free, conf); 3565 if (!conf->r10bio_pool) 3566 goto out; 3567 3568 calc_sectors(conf, mddev->dev_sectors); 3569 if (mddev->reshape_position == MaxSector) { 3570 conf->prev = conf->geo; 3571 conf->reshape_progress = MaxSector; 3572 } else { 3573 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { 3574 err = -EINVAL; 3575 goto out; 3576 } 3577 conf->reshape_progress = mddev->reshape_position; 3578 if (conf->prev.far_offset) 3579 conf->prev.stride = 1 << conf->prev.chunk_shift; 3580 else 3581 /* far_copies must be 1 */ 3582 conf->prev.stride = conf->dev_sectors; 3583 } 3584 spin_lock_init(&conf->device_lock); 3585 INIT_LIST_HEAD(&conf->retry_list); 3586 3587 spin_lock_init(&conf->resync_lock); 3588 init_waitqueue_head(&conf->wait_barrier); 3589 3590 conf->thread = md_register_thread(raid10d, mddev, "raid10"); 3591 if (!conf->thread) 3592 goto out; 3593 3594 conf->mddev = mddev; 3595 return conf; 3596 3597 out: 3598 if (err == -ENOMEM) 3599 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", 3600 mdname(mddev)); 3601 if (conf) { 3602 if (conf->r10bio_pool) 3603 mempool_destroy(conf->r10bio_pool); 3604 kfree(conf->mirrors); 3605 safe_put_page(conf->tmppage); 3606 kfree(conf); 3607 } 3608 return ERR_PTR(err); 3609 } 3610 3611 static int run(struct mddev *mddev) 3612 { 3613 struct r10conf *conf; 3614 int i, disk_idx, chunk_size; 3615 struct raid10_info *disk; 3616 struct md_rdev *rdev; 3617 sector_t size; 3618 sector_t min_offset_diff = 0; 3619 int first = 1; 3620 bool discard_supported = false; 3621 3622 if (mddev->private == NULL) { 3623 conf = setup_conf(mddev); 3624 if (IS_ERR(conf)) 3625 return PTR_ERR(conf); 3626 mddev->private = conf; 3627 } 3628 conf = mddev->private; 3629 if (!conf) 3630 goto out; 3631 3632 mddev->thread = conf->thread; 3633 conf->thread = NULL; 3634 3635 chunk_size = mddev->chunk_sectors << 9; 3636 if (mddev->queue) { 3637 blk_queue_max_discard_sectors(mddev->queue, 3638 mddev->chunk_sectors); 3639 blk_queue_max_write_same_sectors(mddev->queue, 0); 3640 blk_queue_io_min(mddev->queue, chunk_size); 3641 if (conf->geo.raid_disks % conf->geo.near_copies) 3642 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3643 else 3644 blk_queue_io_opt(mddev->queue, chunk_size * 3645 (conf->geo.raid_disks / conf->geo.near_copies)); 3646 } 3647 3648 rdev_for_each(rdev, mddev) { 3649 long long diff; 3650 struct request_queue *q; 3651 3652 disk_idx = rdev->raid_disk; 3653 if (disk_idx < 0) 3654 continue; 3655 if (disk_idx >= conf->geo.raid_disks && 3656 disk_idx >= conf->prev.raid_disks) 3657 continue; 3658 disk = conf->mirrors + disk_idx; 3659 3660 if (test_bit(Replacement, &rdev->flags)) { 3661 if (disk->replacement) 3662 goto out_free_conf; 3663 disk->replacement = rdev; 3664 } else { 3665 if (disk->rdev) 3666 goto out_free_conf; 3667 disk->rdev = rdev; 3668 } 3669 q = bdev_get_queue(rdev->bdev); 3670 if (q->merge_bvec_fn) 3671 mddev->merge_check_needed = 1; 3672 diff = (rdev->new_data_offset - rdev->data_offset); 3673 if (!mddev->reshape_backwards) 3674 diff = -diff; 3675 if (diff < 0) 3676 diff = 0; 3677 if (first || diff < min_offset_diff) 3678 min_offset_diff = diff; 3679 3680 if (mddev->gendisk) 3681 disk_stack_limits(mddev->gendisk, rdev->bdev, 3682 rdev->data_offset << 9); 3683 3684 disk->head_position = 0; 3685 3686 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 3687 discard_supported = true; 3688 } 3689 3690 if (mddev->queue) { 3691 if (discard_supported) 3692 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 3693 mddev->queue); 3694 else 3695 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 3696 mddev->queue); 3697 } 3698 /* need to check that every block has at least one working mirror */ 3699 if (!enough(conf, -1)) { 3700 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", 3701 mdname(mddev)); 3702 goto out_free_conf; 3703 } 3704 3705 if (conf->reshape_progress != MaxSector) { 3706 /* must ensure that shape change is supported */ 3707 if (conf->geo.far_copies != 1 && 3708 conf->geo.far_offset == 0) 3709 goto out_free_conf; 3710 if (conf->prev.far_copies != 1 && 3711 conf->prev.far_offset == 0) 3712 goto out_free_conf; 3713 } 3714 3715 mddev->degraded = 0; 3716 for (i = 0; 3717 i < conf->geo.raid_disks 3718 || i < conf->prev.raid_disks; 3719 i++) { 3720 3721 disk = conf->mirrors + i; 3722 3723 if (!disk->rdev && disk->replacement) { 3724 /* The replacement is all we have - use it */ 3725 disk->rdev = disk->replacement; 3726 disk->replacement = NULL; 3727 clear_bit(Replacement, &disk->rdev->flags); 3728 } 3729 3730 if (!disk->rdev || 3731 !test_bit(In_sync, &disk->rdev->flags)) { 3732 disk->head_position = 0; 3733 mddev->degraded++; 3734 if (disk->rdev && 3735 disk->rdev->saved_raid_disk < 0) 3736 conf->fullsync = 1; 3737 } 3738 disk->recovery_disabled = mddev->recovery_disabled - 1; 3739 } 3740 3741 if (mddev->recovery_cp != MaxSector) 3742 printk(KERN_NOTICE "md/raid10:%s: not clean" 3743 " -- starting background reconstruction\n", 3744 mdname(mddev)); 3745 printk(KERN_INFO 3746 "md/raid10:%s: active with %d out of %d devices\n", 3747 mdname(mddev), conf->geo.raid_disks - mddev->degraded, 3748 conf->geo.raid_disks); 3749 /* 3750 * Ok, everything is just fine now 3751 */ 3752 mddev->dev_sectors = conf->dev_sectors; 3753 size = raid10_size(mddev, 0, 0); 3754 md_set_array_sectors(mddev, size); 3755 mddev->resync_max_sectors = size; 3756 3757 if (mddev->queue) { 3758 int stripe = conf->geo.raid_disks * 3759 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 3760 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 3761 mddev->queue->backing_dev_info.congested_data = mddev; 3762 3763 /* Calculate max read-ahead size. 3764 * We need to readahead at least twice a whole stripe.... 3765 * maybe... 3766 */ 3767 stripe /= conf->geo.near_copies; 3768 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3769 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3770 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 3771 } 3772 3773 if (md_integrity_register(mddev)) 3774 goto out_free_conf; 3775 3776 if (conf->reshape_progress != MaxSector) { 3777 unsigned long before_length, after_length; 3778 3779 before_length = ((1 << conf->prev.chunk_shift) * 3780 conf->prev.far_copies); 3781 after_length = ((1 << conf->geo.chunk_shift) * 3782 conf->geo.far_copies); 3783 3784 if (max(before_length, after_length) > min_offset_diff) { 3785 /* This cannot work */ 3786 printk("md/raid10: offset difference not enough to continue reshape\n"); 3787 goto out_free_conf; 3788 } 3789 conf->offset_diff = min_offset_diff; 3790 3791 conf->reshape_safe = conf->reshape_progress; 3792 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3793 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3794 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3795 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3796 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3797 "reshape"); 3798 } 3799 3800 return 0; 3801 3802 out_free_conf: 3803 md_unregister_thread(&mddev->thread); 3804 if (conf->r10bio_pool) 3805 mempool_destroy(conf->r10bio_pool); 3806 safe_put_page(conf->tmppage); 3807 kfree(conf->mirrors); 3808 kfree(conf); 3809 mddev->private = NULL; 3810 out: 3811 return -EIO; 3812 } 3813 3814 static int stop(struct mddev *mddev) 3815 { 3816 struct r10conf *conf = mddev->private; 3817 3818 raise_barrier(conf, 0); 3819 lower_barrier(conf); 3820 3821 md_unregister_thread(&mddev->thread); 3822 if (mddev->queue) 3823 /* the unplug fn references 'conf'*/ 3824 blk_sync_queue(mddev->queue); 3825 3826 if (conf->r10bio_pool) 3827 mempool_destroy(conf->r10bio_pool); 3828 safe_put_page(conf->tmppage); 3829 kfree(conf->mirrors); 3830 kfree(conf->mirrors_old); 3831 kfree(conf->mirrors_new); 3832 kfree(conf); 3833 mddev->private = NULL; 3834 return 0; 3835 } 3836 3837 static void raid10_quiesce(struct mddev *mddev, int state) 3838 { 3839 struct r10conf *conf = mddev->private; 3840 3841 switch(state) { 3842 case 1: 3843 raise_barrier(conf, 0); 3844 break; 3845 case 0: 3846 lower_barrier(conf); 3847 break; 3848 } 3849 } 3850 3851 static int raid10_resize(struct mddev *mddev, sector_t sectors) 3852 { 3853 /* Resize of 'far' arrays is not supported. 3854 * For 'near' and 'offset' arrays we can set the 3855 * number of sectors used to be an appropriate multiple 3856 * of the chunk size. 3857 * For 'offset', this is far_copies*chunksize. 3858 * For 'near' the multiplier is the LCM of 3859 * near_copies and raid_disks. 3860 * So if far_copies > 1 && !far_offset, fail. 3861 * Else find LCM(raid_disks, near_copy)*far_copies and 3862 * multiply by chunk_size. Then round to this number. 3863 * This is mostly done by raid10_size() 3864 */ 3865 struct r10conf *conf = mddev->private; 3866 sector_t oldsize, size; 3867 3868 if (mddev->reshape_position != MaxSector) 3869 return -EBUSY; 3870 3871 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) 3872 return -EINVAL; 3873 3874 oldsize = raid10_size(mddev, 0, 0); 3875 size = raid10_size(mddev, sectors, 0); 3876 if (mddev->external_size && 3877 mddev->array_sectors > size) 3878 return -EINVAL; 3879 if (mddev->bitmap) { 3880 int ret = bitmap_resize(mddev->bitmap, size, 0, 0); 3881 if (ret) 3882 return ret; 3883 } 3884 md_set_array_sectors(mddev, size); 3885 set_capacity(mddev->gendisk, mddev->array_sectors); 3886 revalidate_disk(mddev->gendisk); 3887 if (sectors > mddev->dev_sectors && 3888 mddev->recovery_cp > oldsize) { 3889 mddev->recovery_cp = oldsize; 3890 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3891 } 3892 calc_sectors(conf, sectors); 3893 mddev->dev_sectors = conf->dev_sectors; 3894 mddev->resync_max_sectors = size; 3895 return 0; 3896 } 3897 3898 static void *raid10_takeover_raid0(struct mddev *mddev) 3899 { 3900 struct md_rdev *rdev; 3901 struct r10conf *conf; 3902 3903 if (mddev->degraded > 0) { 3904 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", 3905 mdname(mddev)); 3906 return ERR_PTR(-EINVAL); 3907 } 3908 3909 /* Set new parameters */ 3910 mddev->new_level = 10; 3911 /* new layout: far_copies = 1, near_copies = 2 */ 3912 mddev->new_layout = (1<<8) + 2; 3913 mddev->new_chunk_sectors = mddev->chunk_sectors; 3914 mddev->delta_disks = mddev->raid_disks; 3915 mddev->raid_disks *= 2; 3916 /* make sure it will be not marked as dirty */ 3917 mddev->recovery_cp = MaxSector; 3918 3919 conf = setup_conf(mddev); 3920 if (!IS_ERR(conf)) { 3921 rdev_for_each(rdev, mddev) 3922 if (rdev->raid_disk >= 0) 3923 rdev->new_raid_disk = rdev->raid_disk * 2; 3924 conf->barrier = 1; 3925 } 3926 3927 return conf; 3928 } 3929 3930 static void *raid10_takeover(struct mddev *mddev) 3931 { 3932 struct r0conf *raid0_conf; 3933 3934 /* raid10 can take over: 3935 * raid0 - providing it has only two drives 3936 */ 3937 if (mddev->level == 0) { 3938 /* for raid0 takeover only one zone is supported */ 3939 raid0_conf = mddev->private; 3940 if (raid0_conf->nr_strip_zones > 1) { 3941 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" 3942 " with more than one zone.\n", 3943 mdname(mddev)); 3944 return ERR_PTR(-EINVAL); 3945 } 3946 return raid10_takeover_raid0(mddev); 3947 } 3948 return ERR_PTR(-EINVAL); 3949 } 3950 3951 static int raid10_check_reshape(struct mddev *mddev) 3952 { 3953 /* Called when there is a request to change 3954 * - layout (to ->new_layout) 3955 * - chunk size (to ->new_chunk_sectors) 3956 * - raid_disks (by delta_disks) 3957 * or when trying to restart a reshape that was ongoing. 3958 * 3959 * We need to validate the request and possibly allocate 3960 * space if that might be an issue later. 3961 * 3962 * Currently we reject any reshape of a 'far' mode array, 3963 * allow chunk size to change if new is generally acceptable, 3964 * allow raid_disks to increase, and allow 3965 * a switch between 'near' mode and 'offset' mode. 3966 */ 3967 struct r10conf *conf = mddev->private; 3968 struct geom geo; 3969 3970 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) 3971 return -EINVAL; 3972 3973 if (setup_geo(&geo, mddev, geo_start) != conf->copies) 3974 /* mustn't change number of copies */ 3975 return -EINVAL; 3976 if (geo.far_copies > 1 && !geo.far_offset) 3977 /* Cannot switch to 'far' mode */ 3978 return -EINVAL; 3979 3980 if (mddev->array_sectors & geo.chunk_mask) 3981 /* not factor of array size */ 3982 return -EINVAL; 3983 3984 if (!enough(conf, -1)) 3985 return -EINVAL; 3986 3987 kfree(conf->mirrors_new); 3988 conf->mirrors_new = NULL; 3989 if (mddev->delta_disks > 0) { 3990 /* allocate new 'mirrors' list */ 3991 conf->mirrors_new = kzalloc( 3992 sizeof(struct raid10_info) 3993 *(mddev->raid_disks + 3994 mddev->delta_disks), 3995 GFP_KERNEL); 3996 if (!conf->mirrors_new) 3997 return -ENOMEM; 3998 } 3999 return 0; 4000 } 4001 4002 /* 4003 * Need to check if array has failed when deciding whether to: 4004 * - start an array 4005 * - remove non-faulty devices 4006 * - add a spare 4007 * - allow a reshape 4008 * This determination is simple when no reshape is happening. 4009 * However if there is a reshape, we need to carefully check 4010 * both the before and after sections. 4011 * This is because some failed devices may only affect one 4012 * of the two sections, and some non-in_sync devices may 4013 * be insync in the section most affected by failed devices. 4014 */ 4015 static int calc_degraded(struct r10conf *conf) 4016 { 4017 int degraded, degraded2; 4018 int i; 4019 4020 rcu_read_lock(); 4021 degraded = 0; 4022 /* 'prev' section first */ 4023 for (i = 0; i < conf->prev.raid_disks; i++) { 4024 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 4025 if (!rdev || test_bit(Faulty, &rdev->flags)) 4026 degraded++; 4027 else if (!test_bit(In_sync, &rdev->flags)) 4028 /* When we can reduce the number of devices in 4029 * an array, this might not contribute to 4030 * 'degraded'. It does now. 4031 */ 4032 degraded++; 4033 } 4034 rcu_read_unlock(); 4035 if (conf->geo.raid_disks == conf->prev.raid_disks) 4036 return degraded; 4037 rcu_read_lock(); 4038 degraded2 = 0; 4039 for (i = 0; i < conf->geo.raid_disks; i++) { 4040 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 4041 if (!rdev || test_bit(Faulty, &rdev->flags)) 4042 degraded2++; 4043 else if (!test_bit(In_sync, &rdev->flags)) { 4044 /* If reshape is increasing the number of devices, 4045 * this section has already been recovered, so 4046 * it doesn't contribute to degraded. 4047 * else it does. 4048 */ 4049 if (conf->geo.raid_disks <= conf->prev.raid_disks) 4050 degraded2++; 4051 } 4052 } 4053 rcu_read_unlock(); 4054 if (degraded2 > degraded) 4055 return degraded2; 4056 return degraded; 4057 } 4058 4059 static int raid10_start_reshape(struct mddev *mddev) 4060 { 4061 /* A 'reshape' has been requested. This commits 4062 * the various 'new' fields and sets MD_RECOVER_RESHAPE 4063 * This also checks if there are enough spares and adds them 4064 * to the array. 4065 * We currently require enough spares to make the final 4066 * array non-degraded. We also require that the difference 4067 * between old and new data_offset - on each device - is 4068 * enough that we never risk over-writing. 4069 */ 4070 4071 unsigned long before_length, after_length; 4072 sector_t min_offset_diff = 0; 4073 int first = 1; 4074 struct geom new; 4075 struct r10conf *conf = mddev->private; 4076 struct md_rdev *rdev; 4077 int spares = 0; 4078 int ret; 4079 4080 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4081 return -EBUSY; 4082 4083 if (setup_geo(&new, mddev, geo_start) != conf->copies) 4084 return -EINVAL; 4085 4086 before_length = ((1 << conf->prev.chunk_shift) * 4087 conf->prev.far_copies); 4088 after_length = ((1 << conf->geo.chunk_shift) * 4089 conf->geo.far_copies); 4090 4091 rdev_for_each(rdev, mddev) { 4092 if (!test_bit(In_sync, &rdev->flags) 4093 && !test_bit(Faulty, &rdev->flags)) 4094 spares++; 4095 if (rdev->raid_disk >= 0) { 4096 long long diff = (rdev->new_data_offset 4097 - rdev->data_offset); 4098 if (!mddev->reshape_backwards) 4099 diff = -diff; 4100 if (diff < 0) 4101 diff = 0; 4102 if (first || diff < min_offset_diff) 4103 min_offset_diff = diff; 4104 } 4105 } 4106 4107 if (max(before_length, after_length) > min_offset_diff) 4108 return -EINVAL; 4109 4110 if (spares < mddev->delta_disks) 4111 return -EINVAL; 4112 4113 conf->offset_diff = min_offset_diff; 4114 spin_lock_irq(&conf->device_lock); 4115 if (conf->mirrors_new) { 4116 memcpy(conf->mirrors_new, conf->mirrors, 4117 sizeof(struct raid10_info)*conf->prev.raid_disks); 4118 smp_mb(); 4119 kfree(conf->mirrors_old); 4120 conf->mirrors_old = conf->mirrors; 4121 conf->mirrors = conf->mirrors_new; 4122 conf->mirrors_new = NULL; 4123 } 4124 setup_geo(&conf->geo, mddev, geo_start); 4125 smp_mb(); 4126 if (mddev->reshape_backwards) { 4127 sector_t size = raid10_size(mddev, 0, 0); 4128 if (size < mddev->array_sectors) { 4129 spin_unlock_irq(&conf->device_lock); 4130 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n", 4131 mdname(mddev)); 4132 return -EINVAL; 4133 } 4134 mddev->resync_max_sectors = size; 4135 conf->reshape_progress = size; 4136 } else 4137 conf->reshape_progress = 0; 4138 spin_unlock_irq(&conf->device_lock); 4139 4140 if (mddev->delta_disks && mddev->bitmap) { 4141 ret = bitmap_resize(mddev->bitmap, 4142 raid10_size(mddev, 0, 4143 conf->geo.raid_disks), 4144 0, 0); 4145 if (ret) 4146 goto abort; 4147 } 4148 if (mddev->delta_disks > 0) { 4149 rdev_for_each(rdev, mddev) 4150 if (rdev->raid_disk < 0 && 4151 !test_bit(Faulty, &rdev->flags)) { 4152 if (raid10_add_disk(mddev, rdev) == 0) { 4153 if (rdev->raid_disk >= 4154 conf->prev.raid_disks) 4155 set_bit(In_sync, &rdev->flags); 4156 else 4157 rdev->recovery_offset = 0; 4158 4159 if (sysfs_link_rdev(mddev, rdev)) 4160 /* Failure here is OK */; 4161 } 4162 } else if (rdev->raid_disk >= conf->prev.raid_disks 4163 && !test_bit(Faulty, &rdev->flags)) { 4164 /* This is a spare that was manually added */ 4165 set_bit(In_sync, &rdev->flags); 4166 } 4167 } 4168 /* When a reshape changes the number of devices, 4169 * ->degraded is measured against the larger of the 4170 * pre and post numbers. 4171 */ 4172 spin_lock_irq(&conf->device_lock); 4173 mddev->degraded = calc_degraded(conf); 4174 spin_unlock_irq(&conf->device_lock); 4175 mddev->raid_disks = conf->geo.raid_disks; 4176 mddev->reshape_position = conf->reshape_progress; 4177 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4178 4179 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4180 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4181 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4182 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4183 4184 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4185 "reshape"); 4186 if (!mddev->sync_thread) { 4187 ret = -EAGAIN; 4188 goto abort; 4189 } 4190 conf->reshape_checkpoint = jiffies; 4191 md_wakeup_thread(mddev->sync_thread); 4192 md_new_event(mddev); 4193 return 0; 4194 4195 abort: 4196 mddev->recovery = 0; 4197 spin_lock_irq(&conf->device_lock); 4198 conf->geo = conf->prev; 4199 mddev->raid_disks = conf->geo.raid_disks; 4200 rdev_for_each(rdev, mddev) 4201 rdev->new_data_offset = rdev->data_offset; 4202 smp_wmb(); 4203 conf->reshape_progress = MaxSector; 4204 mddev->reshape_position = MaxSector; 4205 spin_unlock_irq(&conf->device_lock); 4206 return ret; 4207 } 4208 4209 /* Calculate the last device-address that could contain 4210 * any block from the chunk that includes the array-address 's' 4211 * and report the next address. 4212 * i.e. the address returned will be chunk-aligned and after 4213 * any data that is in the chunk containing 's'. 4214 */ 4215 static sector_t last_dev_address(sector_t s, struct geom *geo) 4216 { 4217 s = (s | geo->chunk_mask) + 1; 4218 s >>= geo->chunk_shift; 4219 s *= geo->near_copies; 4220 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); 4221 s *= geo->far_copies; 4222 s <<= geo->chunk_shift; 4223 return s; 4224 } 4225 4226 /* Calculate the first device-address that could contain 4227 * any block from the chunk that includes the array-address 's'. 4228 * This too will be the start of a chunk 4229 */ 4230 static sector_t first_dev_address(sector_t s, struct geom *geo) 4231 { 4232 s >>= geo->chunk_shift; 4233 s *= geo->near_copies; 4234 sector_div(s, geo->raid_disks); 4235 s *= geo->far_copies; 4236 s <<= geo->chunk_shift; 4237 return s; 4238 } 4239 4240 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 4241 int *skipped) 4242 { 4243 /* We simply copy at most one chunk (smallest of old and new) 4244 * at a time, possibly less if that exceeds RESYNC_PAGES, 4245 * or we hit a bad block or something. 4246 * This might mean we pause for normal IO in the middle of 4247 * a chunk, but that is not a problem was mddev->reshape_position 4248 * can record any location. 4249 * 4250 * If we will want to write to a location that isn't 4251 * yet recorded as 'safe' (i.e. in metadata on disk) then 4252 * we need to flush all reshape requests and update the metadata. 4253 * 4254 * When reshaping forwards (e.g. to more devices), we interpret 4255 * 'safe' as the earliest block which might not have been copied 4256 * down yet. We divide this by previous stripe size and multiply 4257 * by previous stripe length to get lowest device offset that we 4258 * cannot write to yet. 4259 * We interpret 'sector_nr' as an address that we want to write to. 4260 * From this we use last_device_address() to find where we might 4261 * write to, and first_device_address on the 'safe' position. 4262 * If this 'next' write position is after the 'safe' position, 4263 * we must update the metadata to increase the 'safe' position. 4264 * 4265 * When reshaping backwards, we round in the opposite direction 4266 * and perform the reverse test: next write position must not be 4267 * less than current safe position. 4268 * 4269 * In all this the minimum difference in data offsets 4270 * (conf->offset_diff - always positive) allows a bit of slack, 4271 * so next can be after 'safe', but not by more than offset_disk 4272 * 4273 * We need to prepare all the bios here before we start any IO 4274 * to ensure the size we choose is acceptable to all devices. 4275 * The means one for each copy for write-out and an extra one for 4276 * read-in. 4277 * We store the read-in bio in ->master_bio and the others in 4278 * ->devs[x].bio and ->devs[x].repl_bio. 4279 */ 4280 struct r10conf *conf = mddev->private; 4281 struct r10bio *r10_bio; 4282 sector_t next, safe, last; 4283 int max_sectors; 4284 int nr_sectors; 4285 int s; 4286 struct md_rdev *rdev; 4287 int need_flush = 0; 4288 struct bio *blist; 4289 struct bio *bio, *read_bio; 4290 int sectors_done = 0; 4291 4292 if (sector_nr == 0) { 4293 /* If restarting in the middle, skip the initial sectors */ 4294 if (mddev->reshape_backwards && 4295 conf->reshape_progress < raid10_size(mddev, 0, 0)) { 4296 sector_nr = (raid10_size(mddev, 0, 0) 4297 - conf->reshape_progress); 4298 } else if (!mddev->reshape_backwards && 4299 conf->reshape_progress > 0) 4300 sector_nr = conf->reshape_progress; 4301 if (sector_nr) { 4302 mddev->curr_resync_completed = sector_nr; 4303 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4304 *skipped = 1; 4305 return sector_nr; 4306 } 4307 } 4308 4309 /* We don't use sector_nr to track where we are up to 4310 * as that doesn't work well for ->reshape_backwards. 4311 * So just use ->reshape_progress. 4312 */ 4313 if (mddev->reshape_backwards) { 4314 /* 'next' is the earliest device address that we might 4315 * write to for this chunk in the new layout 4316 */ 4317 next = first_dev_address(conf->reshape_progress - 1, 4318 &conf->geo); 4319 4320 /* 'safe' is the last device address that we might read from 4321 * in the old layout after a restart 4322 */ 4323 safe = last_dev_address(conf->reshape_safe - 1, 4324 &conf->prev); 4325 4326 if (next + conf->offset_diff < safe) 4327 need_flush = 1; 4328 4329 last = conf->reshape_progress - 1; 4330 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask 4331 & conf->prev.chunk_mask); 4332 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last) 4333 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512; 4334 } else { 4335 /* 'next' is after the last device address that we 4336 * might write to for this chunk in the new layout 4337 */ 4338 next = last_dev_address(conf->reshape_progress, &conf->geo); 4339 4340 /* 'safe' is the earliest device address that we might 4341 * read from in the old layout after a restart 4342 */ 4343 safe = first_dev_address(conf->reshape_safe, &conf->prev); 4344 4345 /* Need to update metadata if 'next' might be beyond 'safe' 4346 * as that would possibly corrupt data 4347 */ 4348 if (next > safe + conf->offset_diff) 4349 need_flush = 1; 4350 4351 sector_nr = conf->reshape_progress; 4352 last = sector_nr | (conf->geo.chunk_mask 4353 & conf->prev.chunk_mask); 4354 4355 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last) 4356 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1; 4357 } 4358 4359 if (need_flush || 4360 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4361 /* Need to update reshape_position in metadata */ 4362 wait_barrier(conf); 4363 mddev->reshape_position = conf->reshape_progress; 4364 if (mddev->reshape_backwards) 4365 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) 4366 - conf->reshape_progress; 4367 else 4368 mddev->curr_resync_completed = conf->reshape_progress; 4369 conf->reshape_checkpoint = jiffies; 4370 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4371 md_wakeup_thread(mddev->thread); 4372 wait_event(mddev->sb_wait, mddev->flags == 0 || 4373 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4374 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4375 allow_barrier(conf); 4376 return sectors_done; 4377 } 4378 conf->reshape_safe = mddev->reshape_position; 4379 allow_barrier(conf); 4380 } 4381 4382 read_more: 4383 /* Now schedule reads for blocks from sector_nr to last */ 4384 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 4385 r10_bio->state = 0; 4386 raise_barrier(conf, sectors_done != 0); 4387 atomic_set(&r10_bio->remaining, 0); 4388 r10_bio->mddev = mddev; 4389 r10_bio->sector = sector_nr; 4390 set_bit(R10BIO_IsReshape, &r10_bio->state); 4391 r10_bio->sectors = last - sector_nr + 1; 4392 rdev = read_balance(conf, r10_bio, &max_sectors); 4393 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); 4394 4395 if (!rdev) { 4396 /* Cannot read from here, so need to record bad blocks 4397 * on all the target devices. 4398 */ 4399 // FIXME 4400 mempool_free(r10_bio, conf->r10buf_pool); 4401 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4402 return sectors_done; 4403 } 4404 4405 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4406 4407 read_bio->bi_bdev = rdev->bdev; 4408 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4409 + rdev->data_offset); 4410 read_bio->bi_private = r10_bio; 4411 read_bio->bi_end_io = end_sync_read; 4412 read_bio->bi_rw = READ; 4413 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); 4414 __set_bit(BIO_UPTODATE, &read_bio->bi_flags); 4415 read_bio->bi_vcnt = 0; 4416 read_bio->bi_iter.bi_size = 0; 4417 r10_bio->master_bio = read_bio; 4418 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4419 4420 /* Now find the locations in the new layout */ 4421 __raid10_find_phys(&conf->geo, r10_bio); 4422 4423 blist = read_bio; 4424 read_bio->bi_next = NULL; 4425 4426 for (s = 0; s < conf->copies*2; s++) { 4427 struct bio *b; 4428 int d = r10_bio->devs[s/2].devnum; 4429 struct md_rdev *rdev2; 4430 if (s&1) { 4431 rdev2 = conf->mirrors[d].replacement; 4432 b = r10_bio->devs[s/2].repl_bio; 4433 } else { 4434 rdev2 = conf->mirrors[d].rdev; 4435 b = r10_bio->devs[s/2].bio; 4436 } 4437 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4438 continue; 4439 4440 bio_reset(b); 4441 b->bi_bdev = rdev2->bdev; 4442 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + 4443 rdev2->new_data_offset; 4444 b->bi_private = r10_bio; 4445 b->bi_end_io = end_reshape_write; 4446 b->bi_rw = WRITE; 4447 b->bi_next = blist; 4448 blist = b; 4449 } 4450 4451 /* Now add as many pages as possible to all of these bios. */ 4452 4453 nr_sectors = 0; 4454 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) { 4455 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; 4456 int len = (max_sectors - s) << 9; 4457 if (len > PAGE_SIZE) 4458 len = PAGE_SIZE; 4459 for (bio = blist; bio ; bio = bio->bi_next) { 4460 struct bio *bio2; 4461 if (bio_add_page(bio, page, len, 0)) 4462 continue; 4463 4464 /* Didn't fit, must stop */ 4465 for (bio2 = blist; 4466 bio2 && bio2 != bio; 4467 bio2 = bio2->bi_next) { 4468 /* Remove last page from this bio */ 4469 bio2->bi_vcnt--; 4470 bio2->bi_iter.bi_size -= len; 4471 __clear_bit(BIO_SEG_VALID, &bio2->bi_flags); 4472 } 4473 goto bio_full; 4474 } 4475 sector_nr += len >> 9; 4476 nr_sectors += len >> 9; 4477 } 4478 bio_full: 4479 r10_bio->sectors = nr_sectors; 4480 4481 /* Now submit the read */ 4482 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); 4483 atomic_inc(&r10_bio->remaining); 4484 read_bio->bi_next = NULL; 4485 generic_make_request(read_bio); 4486 sector_nr += nr_sectors; 4487 sectors_done += nr_sectors; 4488 if (sector_nr <= last) 4489 goto read_more; 4490 4491 /* Now that we have done the whole section we can 4492 * update reshape_progress 4493 */ 4494 if (mddev->reshape_backwards) 4495 conf->reshape_progress -= sectors_done; 4496 else 4497 conf->reshape_progress += sectors_done; 4498 4499 return sectors_done; 4500 } 4501 4502 static void end_reshape_request(struct r10bio *r10_bio); 4503 static int handle_reshape_read_error(struct mddev *mddev, 4504 struct r10bio *r10_bio); 4505 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) 4506 { 4507 /* Reshape read completed. Hopefully we have a block 4508 * to write out. 4509 * If we got a read error then we do sync 1-page reads from 4510 * elsewhere until we find the data - or give up. 4511 */ 4512 struct r10conf *conf = mddev->private; 4513 int s; 4514 4515 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 4516 if (handle_reshape_read_error(mddev, r10_bio) < 0) { 4517 /* Reshape has been aborted */ 4518 md_done_sync(mddev, r10_bio->sectors, 0); 4519 return; 4520 } 4521 4522 /* We definitely have the data in the pages, schedule the 4523 * writes. 4524 */ 4525 atomic_set(&r10_bio->remaining, 1); 4526 for (s = 0; s < conf->copies*2; s++) { 4527 struct bio *b; 4528 int d = r10_bio->devs[s/2].devnum; 4529 struct md_rdev *rdev; 4530 if (s&1) { 4531 rdev = conf->mirrors[d].replacement; 4532 b = r10_bio->devs[s/2].repl_bio; 4533 } else { 4534 rdev = conf->mirrors[d].rdev; 4535 b = r10_bio->devs[s/2].bio; 4536 } 4537 if (!rdev || test_bit(Faulty, &rdev->flags)) 4538 continue; 4539 atomic_inc(&rdev->nr_pending); 4540 md_sync_acct(b->bi_bdev, r10_bio->sectors); 4541 atomic_inc(&r10_bio->remaining); 4542 b->bi_next = NULL; 4543 generic_make_request(b); 4544 } 4545 end_reshape_request(r10_bio); 4546 } 4547 4548 static void end_reshape(struct r10conf *conf) 4549 { 4550 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) 4551 return; 4552 4553 spin_lock_irq(&conf->device_lock); 4554 conf->prev = conf->geo; 4555 md_finish_reshape(conf->mddev); 4556 smp_wmb(); 4557 conf->reshape_progress = MaxSector; 4558 spin_unlock_irq(&conf->device_lock); 4559 4560 /* read-ahead size must cover two whole stripes, which is 4561 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4562 */ 4563 if (conf->mddev->queue) { 4564 int stripe = conf->geo.raid_disks * 4565 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); 4566 stripe /= conf->geo.near_copies; 4567 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4568 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4569 } 4570 conf->fullsync = 0; 4571 } 4572 4573 static int handle_reshape_read_error(struct mddev *mddev, 4574 struct r10bio *r10_bio) 4575 { 4576 /* Use sync reads to get the blocks from somewhere else */ 4577 int sectors = r10_bio->sectors; 4578 struct r10conf *conf = mddev->private; 4579 struct { 4580 struct r10bio r10_bio; 4581 struct r10dev devs[conf->copies]; 4582 } on_stack; 4583 struct r10bio *r10b = &on_stack.r10_bio; 4584 int slot = 0; 4585 int idx = 0; 4586 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; 4587 4588 r10b->sector = r10_bio->sector; 4589 __raid10_find_phys(&conf->prev, r10b); 4590 4591 while (sectors) { 4592 int s = sectors; 4593 int success = 0; 4594 int first_slot = slot; 4595 4596 if (s > (PAGE_SIZE >> 9)) 4597 s = PAGE_SIZE >> 9; 4598 4599 while (!success) { 4600 int d = r10b->devs[slot].devnum; 4601 struct md_rdev *rdev = conf->mirrors[d].rdev; 4602 sector_t addr; 4603 if (rdev == NULL || 4604 test_bit(Faulty, &rdev->flags) || 4605 !test_bit(In_sync, &rdev->flags)) 4606 goto failed; 4607 4608 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; 4609 success = sync_page_io(rdev, 4610 addr, 4611 s << 9, 4612 bvec[idx].bv_page, 4613 READ, false); 4614 if (success) 4615 break; 4616 failed: 4617 slot++; 4618 if (slot >= conf->copies) 4619 slot = 0; 4620 if (slot == first_slot) 4621 break; 4622 } 4623 if (!success) { 4624 /* couldn't read this block, must give up */ 4625 set_bit(MD_RECOVERY_INTR, 4626 &mddev->recovery); 4627 return -EIO; 4628 } 4629 sectors -= s; 4630 idx++; 4631 } 4632 return 0; 4633 } 4634 4635 static void end_reshape_write(struct bio *bio, int error) 4636 { 4637 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 4638 struct r10bio *r10_bio = bio->bi_private; 4639 struct mddev *mddev = r10_bio->mddev; 4640 struct r10conf *conf = mddev->private; 4641 int d; 4642 int slot; 4643 int repl; 4644 struct md_rdev *rdev = NULL; 4645 4646 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 4647 if (repl) 4648 rdev = conf->mirrors[d].replacement; 4649 if (!rdev) { 4650 smp_mb(); 4651 rdev = conf->mirrors[d].rdev; 4652 } 4653 4654 if (!uptodate) { 4655 /* FIXME should record badblock */ 4656 md_error(mddev, rdev); 4657 } 4658 4659 rdev_dec_pending(rdev, mddev); 4660 end_reshape_request(r10_bio); 4661 } 4662 4663 static void end_reshape_request(struct r10bio *r10_bio) 4664 { 4665 if (!atomic_dec_and_test(&r10_bio->remaining)) 4666 return; 4667 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); 4668 bio_put(r10_bio->master_bio); 4669 put_buf(r10_bio); 4670 } 4671 4672 static void raid10_finish_reshape(struct mddev *mddev) 4673 { 4674 struct r10conf *conf = mddev->private; 4675 4676 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4677 return; 4678 4679 if (mddev->delta_disks > 0) { 4680 sector_t size = raid10_size(mddev, 0, 0); 4681 md_set_array_sectors(mddev, size); 4682 if (mddev->recovery_cp > mddev->resync_max_sectors) { 4683 mddev->recovery_cp = mddev->resync_max_sectors; 4684 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4685 } 4686 mddev->resync_max_sectors = size; 4687 set_capacity(mddev->gendisk, mddev->array_sectors); 4688 revalidate_disk(mddev->gendisk); 4689 } else { 4690 int d; 4691 for (d = conf->geo.raid_disks ; 4692 d < conf->geo.raid_disks - mddev->delta_disks; 4693 d++) { 4694 struct md_rdev *rdev = conf->mirrors[d].rdev; 4695 if (rdev) 4696 clear_bit(In_sync, &rdev->flags); 4697 rdev = conf->mirrors[d].replacement; 4698 if (rdev) 4699 clear_bit(In_sync, &rdev->flags); 4700 } 4701 } 4702 mddev->layout = mddev->new_layout; 4703 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; 4704 mddev->reshape_position = MaxSector; 4705 mddev->delta_disks = 0; 4706 mddev->reshape_backwards = 0; 4707 } 4708 4709 static struct md_personality raid10_personality = 4710 { 4711 .name = "raid10", 4712 .level = 10, 4713 .owner = THIS_MODULE, 4714 .make_request = make_request, 4715 .run = run, 4716 .stop = stop, 4717 .status = status, 4718 .error_handler = error, 4719 .hot_add_disk = raid10_add_disk, 4720 .hot_remove_disk= raid10_remove_disk, 4721 .spare_active = raid10_spare_active, 4722 .sync_request = sync_request, 4723 .quiesce = raid10_quiesce, 4724 .size = raid10_size, 4725 .resize = raid10_resize, 4726 .takeover = raid10_takeover, 4727 .check_reshape = raid10_check_reshape, 4728 .start_reshape = raid10_start_reshape, 4729 .finish_reshape = raid10_finish_reshape, 4730 }; 4731 4732 static int __init raid_init(void) 4733 { 4734 return register_md_personality(&raid10_personality); 4735 } 4736 4737 static void raid_exit(void) 4738 { 4739 unregister_md_personality(&raid10_personality); 4740 } 4741 4742 module_init(raid_init); 4743 module_exit(raid_exit); 4744 MODULE_LICENSE("GPL"); 4745 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); 4746 MODULE_ALIAS("md-personality-9"); /* RAID10 */ 4747 MODULE_ALIAS("md-raid10"); 4748 MODULE_ALIAS("md-level-10"); 4749 4750 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 4751