1 /* 2 * raid10.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 2000-2004 Neil Brown 5 * 6 * RAID-10 support for md. 7 * 8 * Base on code in raid1.c. See raid1.c for further copyright information. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/blkdev.h> 24 #include <linux/module.h> 25 #include <linux/seq_file.h> 26 #include <linux/ratelimit.h> 27 #include <linux/kthread.h> 28 #include <trace/events/block.h> 29 #include "md.h" 30 #include "raid10.h" 31 #include "raid0.h" 32 #include "bitmap.h" 33 34 /* 35 * RAID10 provides a combination of RAID0 and RAID1 functionality. 36 * The layout of data is defined by 37 * chunk_size 38 * raid_disks 39 * near_copies (stored in low byte of layout) 40 * far_copies (stored in second byte of layout) 41 * far_offset (stored in bit 16 of layout ) 42 * use_far_sets (stored in bit 17 of layout ) 43 * use_far_sets_bugfixed (stored in bit 18 of layout ) 44 * 45 * The data to be stored is divided into chunks using chunksize. Each device 46 * is divided into far_copies sections. In each section, chunks are laid out 47 * in a style similar to raid0, but near_copies copies of each chunk is stored 48 * (each on a different drive). The starting device for each section is offset 49 * near_copies from the starting device of the previous section. Thus there 50 * are (near_copies * far_copies) of each chunk, and each is on a different 51 * drive. near_copies and far_copies must be at least one, and their product 52 * is at most raid_disks. 53 * 54 * If far_offset is true, then the far_copies are handled a bit differently. 55 * The copies are still in different stripes, but instead of being very far 56 * apart on disk, there are adjacent stripes. 57 * 58 * The far and offset algorithms are handled slightly differently if 59 * 'use_far_sets' is true. In this case, the array's devices are grouped into 60 * sets that are (near_copies * far_copies) in size. The far copied stripes 61 * are still shifted by 'near_copies' devices, but this shifting stays confined 62 * to the set rather than the entire array. This is done to improve the number 63 * of device combinations that can fail without causing the array to fail. 64 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk 65 * on a device): 66 * A B C D A B C D E 67 * ... ... 68 * D A B C E A B C D 69 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s): 70 * [A B] [C D] [A B] [C D E] 71 * |...| |...| |...| | ... | 72 * [B A] [D C] [B A] [E C D] 73 */ 74 75 /* 76 * Number of guaranteed r10bios in case of extreme VM load: 77 */ 78 #define NR_RAID10_BIOS 256 79 80 /* when we get a read error on a read-only array, we redirect to another 81 * device without failing the first device, or trying to over-write to 82 * correct the read error. To keep track of bad blocks on a per-bio 83 * level, we store IO_BLOCKED in the appropriate 'bios' pointer 84 */ 85 #define IO_BLOCKED ((struct bio *)1) 86 /* When we successfully write to a known bad-block, we need to remove the 87 * bad-block marking which must be done from process context. So we record 88 * the success by setting devs[n].bio to IO_MADE_GOOD 89 */ 90 #define IO_MADE_GOOD ((struct bio *)2) 91 92 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) 93 94 /* When there are this many requests queued to be written by 95 * the raid10 thread, we become 'congested' to provide back-pressure 96 * for writeback. 97 */ 98 static int max_queued_requests = 1024; 99 100 static void allow_barrier(struct r10conf *conf); 101 static void lower_barrier(struct r10conf *conf); 102 static int _enough(struct r10conf *conf, int previous, int ignore); 103 static int enough(struct r10conf *conf, int ignore); 104 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 105 int *skipped); 106 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); 107 static void end_reshape_write(struct bio *bio); 108 static void end_reshape(struct r10conf *conf); 109 110 #define raid10_log(md, fmt, args...) \ 111 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) 112 113 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 114 { 115 struct r10conf *conf = data; 116 int size = offsetof(struct r10bio, devs[conf->copies]); 117 118 /* allocate a r10bio with room for raid_disks entries in the 119 * bios array */ 120 return kzalloc(size, gfp_flags); 121 } 122 123 static void r10bio_pool_free(void *r10_bio, void *data) 124 { 125 kfree(r10_bio); 126 } 127 128 /* Maximum size of each resync request */ 129 #define RESYNC_BLOCK_SIZE (64*1024) 130 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 131 /* amount of memory to reserve for resync requests */ 132 #define RESYNC_WINDOW (1024*1024) 133 /* maximum number of concurrent requests, memory permitting */ 134 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 135 136 /* 137 * When performing a resync, we need to read and compare, so 138 * we need as many pages are there are copies. 139 * When performing a recovery, we need 2 bios, one for read, 140 * one for write (we recover only one drive per r10buf) 141 * 142 */ 143 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 144 { 145 struct r10conf *conf = data; 146 struct page *page; 147 struct r10bio *r10_bio; 148 struct bio *bio; 149 int i, j; 150 int nalloc; 151 152 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 153 if (!r10_bio) 154 return NULL; 155 156 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || 157 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) 158 nalloc = conf->copies; /* resync */ 159 else 160 nalloc = 2; /* recovery */ 161 162 /* 163 * Allocate bios. 164 */ 165 for (j = nalloc ; j-- ; ) { 166 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 167 if (!bio) 168 goto out_free_bio; 169 r10_bio->devs[j].bio = bio; 170 if (!conf->have_replacement) 171 continue; 172 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 173 if (!bio) 174 goto out_free_bio; 175 r10_bio->devs[j].repl_bio = bio; 176 } 177 /* 178 * Allocate RESYNC_PAGES data pages and attach them 179 * where needed. 180 */ 181 for (j = 0 ; j < nalloc; j++) { 182 struct bio *rbio = r10_bio->devs[j].repl_bio; 183 bio = r10_bio->devs[j].bio; 184 for (i = 0; i < RESYNC_PAGES; i++) { 185 if (j > 0 && !test_bit(MD_RECOVERY_SYNC, 186 &conf->mddev->recovery)) { 187 /* we can share bv_page's during recovery 188 * and reshape */ 189 struct bio *rbio = r10_bio->devs[0].bio; 190 page = rbio->bi_io_vec[i].bv_page; 191 get_page(page); 192 } else 193 page = alloc_page(gfp_flags); 194 if (unlikely(!page)) 195 goto out_free_pages; 196 197 bio->bi_io_vec[i].bv_page = page; 198 if (rbio) 199 rbio->bi_io_vec[i].bv_page = page; 200 } 201 } 202 203 return r10_bio; 204 205 out_free_pages: 206 for ( ; i > 0 ; i--) 207 safe_put_page(bio->bi_io_vec[i-1].bv_page); 208 while (j--) 209 for (i = 0; i < RESYNC_PAGES ; i++) 210 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); 211 j = 0; 212 out_free_bio: 213 for ( ; j < nalloc; j++) { 214 if (r10_bio->devs[j].bio) 215 bio_put(r10_bio->devs[j].bio); 216 if (r10_bio->devs[j].repl_bio) 217 bio_put(r10_bio->devs[j].repl_bio); 218 } 219 r10bio_pool_free(r10_bio, conf); 220 return NULL; 221 } 222 223 static void r10buf_pool_free(void *__r10_bio, void *data) 224 { 225 int i; 226 struct r10conf *conf = data; 227 struct r10bio *r10bio = __r10_bio; 228 int j; 229 230 for (j=0; j < conf->copies; j++) { 231 struct bio *bio = r10bio->devs[j].bio; 232 if (bio) { 233 for (i = 0; i < RESYNC_PAGES; i++) { 234 safe_put_page(bio->bi_io_vec[i].bv_page); 235 bio->bi_io_vec[i].bv_page = NULL; 236 } 237 bio_put(bio); 238 } 239 bio = r10bio->devs[j].repl_bio; 240 if (bio) 241 bio_put(bio); 242 } 243 r10bio_pool_free(r10bio, conf); 244 } 245 246 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) 247 { 248 int i; 249 250 for (i = 0; i < conf->copies; i++) { 251 struct bio **bio = & r10_bio->devs[i].bio; 252 if (!BIO_SPECIAL(*bio)) 253 bio_put(*bio); 254 *bio = NULL; 255 bio = &r10_bio->devs[i].repl_bio; 256 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) 257 bio_put(*bio); 258 *bio = NULL; 259 } 260 } 261 262 static void free_r10bio(struct r10bio *r10_bio) 263 { 264 struct r10conf *conf = r10_bio->mddev->private; 265 266 put_all_bios(conf, r10_bio); 267 mempool_free(r10_bio, conf->r10bio_pool); 268 } 269 270 static void put_buf(struct r10bio *r10_bio) 271 { 272 struct r10conf *conf = r10_bio->mddev->private; 273 274 mempool_free(r10_bio, conf->r10buf_pool); 275 276 lower_barrier(conf); 277 } 278 279 static void reschedule_retry(struct r10bio *r10_bio) 280 { 281 unsigned long flags; 282 struct mddev *mddev = r10_bio->mddev; 283 struct r10conf *conf = mddev->private; 284 285 spin_lock_irqsave(&conf->device_lock, flags); 286 list_add(&r10_bio->retry_list, &conf->retry_list); 287 conf->nr_queued ++; 288 spin_unlock_irqrestore(&conf->device_lock, flags); 289 290 /* wake up frozen array... */ 291 wake_up(&conf->wait_barrier); 292 293 md_wakeup_thread(mddev->thread); 294 } 295 296 /* 297 * raid_end_bio_io() is called when we have finished servicing a mirrored 298 * operation and are ready to return a success/failure code to the buffer 299 * cache layer. 300 */ 301 static void raid_end_bio_io(struct r10bio *r10_bio) 302 { 303 struct bio *bio = r10_bio->master_bio; 304 int done; 305 struct r10conf *conf = r10_bio->mddev->private; 306 307 if (bio->bi_phys_segments) { 308 unsigned long flags; 309 spin_lock_irqsave(&conf->device_lock, flags); 310 bio->bi_phys_segments--; 311 done = (bio->bi_phys_segments == 0); 312 spin_unlock_irqrestore(&conf->device_lock, flags); 313 } else 314 done = 1; 315 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 316 bio->bi_error = -EIO; 317 if (done) { 318 bio_endio(bio); 319 /* 320 * Wake up any possible resync thread that waits for the device 321 * to go idle. 322 */ 323 allow_barrier(conf); 324 } 325 free_r10bio(r10_bio); 326 } 327 328 /* 329 * Update disk head position estimator based on IRQ completion info. 330 */ 331 static inline void update_head_pos(int slot, struct r10bio *r10_bio) 332 { 333 struct r10conf *conf = r10_bio->mddev->private; 334 335 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 336 r10_bio->devs[slot].addr + (r10_bio->sectors); 337 } 338 339 /* 340 * Find the disk number which triggered given bio 341 */ 342 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, 343 struct bio *bio, int *slotp, int *replp) 344 { 345 int slot; 346 int repl = 0; 347 348 for (slot = 0; slot < conf->copies; slot++) { 349 if (r10_bio->devs[slot].bio == bio) 350 break; 351 if (r10_bio->devs[slot].repl_bio == bio) { 352 repl = 1; 353 break; 354 } 355 } 356 357 BUG_ON(slot == conf->copies); 358 update_head_pos(slot, r10_bio); 359 360 if (slotp) 361 *slotp = slot; 362 if (replp) 363 *replp = repl; 364 return r10_bio->devs[slot].devnum; 365 } 366 367 static void raid10_end_read_request(struct bio *bio) 368 { 369 int uptodate = !bio->bi_error; 370 struct r10bio *r10_bio = bio->bi_private; 371 int slot, dev; 372 struct md_rdev *rdev; 373 struct r10conf *conf = r10_bio->mddev->private; 374 375 slot = r10_bio->read_slot; 376 dev = r10_bio->devs[slot].devnum; 377 rdev = r10_bio->devs[slot].rdev; 378 /* 379 * this branch is our 'one mirror IO has finished' event handler: 380 */ 381 update_head_pos(slot, r10_bio); 382 383 if (uptodate) { 384 /* 385 * Set R10BIO_Uptodate in our master bio, so that 386 * we will return a good error code to the higher 387 * levels even if IO on some other mirrored buffer fails. 388 * 389 * The 'master' represents the composite IO operation to 390 * user-side. So if something waits for IO, then it will 391 * wait for the 'master' bio. 392 */ 393 set_bit(R10BIO_Uptodate, &r10_bio->state); 394 } else { 395 /* If all other devices that store this block have 396 * failed, we want to return the error upwards rather 397 * than fail the last device. Here we redefine 398 * "uptodate" to mean "Don't want to retry" 399 */ 400 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), 401 rdev->raid_disk)) 402 uptodate = 1; 403 } 404 if (uptodate) { 405 raid_end_bio_io(r10_bio); 406 rdev_dec_pending(rdev, conf->mddev); 407 } else { 408 /* 409 * oops, read error - keep the refcount on the rdev 410 */ 411 char b[BDEVNAME_SIZE]; 412 pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n", 413 mdname(conf->mddev), 414 bdevname(rdev->bdev, b), 415 (unsigned long long)r10_bio->sector); 416 set_bit(R10BIO_ReadError, &r10_bio->state); 417 reschedule_retry(r10_bio); 418 } 419 } 420 421 static void close_write(struct r10bio *r10_bio) 422 { 423 /* clear the bitmap if all writes complete successfully */ 424 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 425 r10_bio->sectors, 426 !test_bit(R10BIO_Degraded, &r10_bio->state), 427 0); 428 md_write_end(r10_bio->mddev); 429 } 430 431 static void one_write_done(struct r10bio *r10_bio) 432 { 433 if (atomic_dec_and_test(&r10_bio->remaining)) { 434 if (test_bit(R10BIO_WriteError, &r10_bio->state)) 435 reschedule_retry(r10_bio); 436 else { 437 close_write(r10_bio); 438 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 439 reschedule_retry(r10_bio); 440 else 441 raid_end_bio_io(r10_bio); 442 } 443 } 444 } 445 446 static void raid10_end_write_request(struct bio *bio) 447 { 448 struct r10bio *r10_bio = bio->bi_private; 449 int dev; 450 int dec_rdev = 1; 451 struct r10conf *conf = r10_bio->mddev->private; 452 int slot, repl; 453 struct md_rdev *rdev = NULL; 454 struct bio *to_put = NULL; 455 bool discard_error; 456 457 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; 458 459 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 460 461 if (repl) 462 rdev = conf->mirrors[dev].replacement; 463 if (!rdev) { 464 smp_rmb(); 465 repl = 0; 466 rdev = conf->mirrors[dev].rdev; 467 } 468 /* 469 * this branch is our 'one mirror IO has finished' event handler: 470 */ 471 if (bio->bi_error && !discard_error) { 472 if (repl) 473 /* Never record new bad blocks to replacement, 474 * just fail it. 475 */ 476 md_error(rdev->mddev, rdev); 477 else { 478 set_bit(WriteErrorSeen, &rdev->flags); 479 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 480 set_bit(MD_RECOVERY_NEEDED, 481 &rdev->mddev->recovery); 482 483 dec_rdev = 0; 484 if (test_bit(FailFast, &rdev->flags) && 485 (bio->bi_opf & MD_FAILFAST)) { 486 md_error(rdev->mddev, rdev); 487 if (!test_bit(Faulty, &rdev->flags)) 488 /* This is the only remaining device, 489 * We need to retry the write without 490 * FailFast 491 */ 492 set_bit(R10BIO_WriteError, &r10_bio->state); 493 else { 494 r10_bio->devs[slot].bio = NULL; 495 to_put = bio; 496 dec_rdev = 1; 497 } 498 } else 499 set_bit(R10BIO_WriteError, &r10_bio->state); 500 } 501 } else { 502 /* 503 * Set R10BIO_Uptodate in our master bio, so that 504 * we will return a good error code for to the higher 505 * levels even if IO on some other mirrored buffer fails. 506 * 507 * The 'master' represents the composite IO operation to 508 * user-side. So if something waits for IO, then it will 509 * wait for the 'master' bio. 510 */ 511 sector_t first_bad; 512 int bad_sectors; 513 514 /* 515 * Do not set R10BIO_Uptodate if the current device is 516 * rebuilding or Faulty. This is because we cannot use 517 * such device for properly reading the data back (we could 518 * potentially use it, if the current write would have felt 519 * before rdev->recovery_offset, but for simplicity we don't 520 * check this here. 521 */ 522 if (test_bit(In_sync, &rdev->flags) && 523 !test_bit(Faulty, &rdev->flags)) 524 set_bit(R10BIO_Uptodate, &r10_bio->state); 525 526 /* Maybe we can clear some bad blocks. */ 527 if (is_badblock(rdev, 528 r10_bio->devs[slot].addr, 529 r10_bio->sectors, 530 &first_bad, &bad_sectors) && !discard_error) { 531 bio_put(bio); 532 if (repl) 533 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; 534 else 535 r10_bio->devs[slot].bio = IO_MADE_GOOD; 536 dec_rdev = 0; 537 set_bit(R10BIO_MadeGood, &r10_bio->state); 538 } 539 } 540 541 /* 542 * 543 * Let's see if all mirrored write operations have finished 544 * already. 545 */ 546 one_write_done(r10_bio); 547 if (dec_rdev) 548 rdev_dec_pending(rdev, conf->mddev); 549 if (to_put) 550 bio_put(to_put); 551 } 552 553 /* 554 * RAID10 layout manager 555 * As well as the chunksize and raid_disks count, there are two 556 * parameters: near_copies and far_copies. 557 * near_copies * far_copies must be <= raid_disks. 558 * Normally one of these will be 1. 559 * If both are 1, we get raid0. 560 * If near_copies == raid_disks, we get raid1. 561 * 562 * Chunks are laid out in raid0 style with near_copies copies of the 563 * first chunk, followed by near_copies copies of the next chunk and 564 * so on. 565 * If far_copies > 1, then after 1/far_copies of the array has been assigned 566 * as described above, we start again with a device offset of near_copies. 567 * So we effectively have another copy of the whole array further down all 568 * the drives, but with blocks on different drives. 569 * With this layout, and block is never stored twice on the one device. 570 * 571 * raid10_find_phys finds the sector offset of a given virtual sector 572 * on each device that it is on. 573 * 574 * raid10_find_virt does the reverse mapping, from a device and a 575 * sector offset to a virtual address 576 */ 577 578 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio) 579 { 580 int n,f; 581 sector_t sector; 582 sector_t chunk; 583 sector_t stripe; 584 int dev; 585 int slot = 0; 586 int last_far_set_start, last_far_set_size; 587 588 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 589 last_far_set_start *= geo->far_set_size; 590 591 last_far_set_size = geo->far_set_size; 592 last_far_set_size += (geo->raid_disks % geo->far_set_size); 593 594 /* now calculate first sector/dev */ 595 chunk = r10bio->sector >> geo->chunk_shift; 596 sector = r10bio->sector & geo->chunk_mask; 597 598 chunk *= geo->near_copies; 599 stripe = chunk; 600 dev = sector_div(stripe, geo->raid_disks); 601 if (geo->far_offset) 602 stripe *= geo->far_copies; 603 604 sector += stripe << geo->chunk_shift; 605 606 /* and calculate all the others */ 607 for (n = 0; n < geo->near_copies; n++) { 608 int d = dev; 609 int set; 610 sector_t s = sector; 611 r10bio->devs[slot].devnum = d; 612 r10bio->devs[slot].addr = s; 613 slot++; 614 615 for (f = 1; f < geo->far_copies; f++) { 616 set = d / geo->far_set_size; 617 d += geo->near_copies; 618 619 if ((geo->raid_disks % geo->far_set_size) && 620 (d > last_far_set_start)) { 621 d -= last_far_set_start; 622 d %= last_far_set_size; 623 d += last_far_set_start; 624 } else { 625 d %= geo->far_set_size; 626 d += geo->far_set_size * set; 627 } 628 s += geo->stride; 629 r10bio->devs[slot].devnum = d; 630 r10bio->devs[slot].addr = s; 631 slot++; 632 } 633 dev++; 634 if (dev >= geo->raid_disks) { 635 dev = 0; 636 sector += (geo->chunk_mask + 1); 637 } 638 } 639 } 640 641 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) 642 { 643 struct geom *geo = &conf->geo; 644 645 if (conf->reshape_progress != MaxSector && 646 ((r10bio->sector >= conf->reshape_progress) != 647 conf->mddev->reshape_backwards)) { 648 set_bit(R10BIO_Previous, &r10bio->state); 649 geo = &conf->prev; 650 } else 651 clear_bit(R10BIO_Previous, &r10bio->state); 652 653 __raid10_find_phys(geo, r10bio); 654 } 655 656 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) 657 { 658 sector_t offset, chunk, vchunk; 659 /* Never use conf->prev as this is only called during resync 660 * or recovery, so reshape isn't happening 661 */ 662 struct geom *geo = &conf->geo; 663 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size; 664 int far_set_size = geo->far_set_size; 665 int last_far_set_start; 666 667 if (geo->raid_disks % geo->far_set_size) { 668 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 669 last_far_set_start *= geo->far_set_size; 670 671 if (dev >= last_far_set_start) { 672 far_set_size = geo->far_set_size; 673 far_set_size += (geo->raid_disks % geo->far_set_size); 674 far_set_start = last_far_set_start; 675 } 676 } 677 678 offset = sector & geo->chunk_mask; 679 if (geo->far_offset) { 680 int fc; 681 chunk = sector >> geo->chunk_shift; 682 fc = sector_div(chunk, geo->far_copies); 683 dev -= fc * geo->near_copies; 684 if (dev < far_set_start) 685 dev += far_set_size; 686 } else { 687 while (sector >= geo->stride) { 688 sector -= geo->stride; 689 if (dev < (geo->near_copies + far_set_start)) 690 dev += far_set_size - geo->near_copies; 691 else 692 dev -= geo->near_copies; 693 } 694 chunk = sector >> geo->chunk_shift; 695 } 696 vchunk = chunk * geo->raid_disks + dev; 697 sector_div(vchunk, geo->near_copies); 698 return (vchunk << geo->chunk_shift) + offset; 699 } 700 701 /* 702 * This routine returns the disk from which the requested read should 703 * be done. There is a per-array 'next expected sequential IO' sector 704 * number - if this matches on the next IO then we use the last disk. 705 * There is also a per-disk 'last know head position' sector that is 706 * maintained from IRQ contexts, both the normal and the resync IO 707 * completion handlers update this position correctly. If there is no 708 * perfect sequential match then we pick the disk whose head is closest. 709 * 710 * If there are 2 mirrors in the same 2 devices, performance degrades 711 * because position is mirror, not device based. 712 * 713 * The rdev for the device selected will have nr_pending incremented. 714 */ 715 716 /* 717 * FIXME: possibly should rethink readbalancing and do it differently 718 * depending on near_copies / far_copies geometry. 719 */ 720 static struct md_rdev *read_balance(struct r10conf *conf, 721 struct r10bio *r10_bio, 722 int *max_sectors) 723 { 724 const sector_t this_sector = r10_bio->sector; 725 int disk, slot; 726 int sectors = r10_bio->sectors; 727 int best_good_sectors; 728 sector_t new_distance, best_dist; 729 struct md_rdev *best_rdev, *rdev = NULL; 730 int do_balance; 731 int best_slot; 732 struct geom *geo = &conf->geo; 733 734 raid10_find_phys(conf, r10_bio); 735 rcu_read_lock(); 736 sectors = r10_bio->sectors; 737 best_slot = -1; 738 best_rdev = NULL; 739 best_dist = MaxSector; 740 best_good_sectors = 0; 741 do_balance = 1; 742 clear_bit(R10BIO_FailFast, &r10_bio->state); 743 /* 744 * Check if we can balance. We can balance on the whole 745 * device if no resync is going on (recovery is ok), or below 746 * the resync window. We take the first readable disk when 747 * above the resync window. 748 */ 749 if (conf->mddev->recovery_cp < MaxSector 750 && (this_sector + sectors >= conf->next_resync)) 751 do_balance = 0; 752 753 for (slot = 0; slot < conf->copies ; slot++) { 754 sector_t first_bad; 755 int bad_sectors; 756 sector_t dev_sector; 757 758 if (r10_bio->devs[slot].bio == IO_BLOCKED) 759 continue; 760 disk = r10_bio->devs[slot].devnum; 761 rdev = rcu_dereference(conf->mirrors[disk].replacement); 762 if (rdev == NULL || test_bit(Faulty, &rdev->flags) || 763 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 764 rdev = rcu_dereference(conf->mirrors[disk].rdev); 765 if (rdev == NULL || 766 test_bit(Faulty, &rdev->flags)) 767 continue; 768 if (!test_bit(In_sync, &rdev->flags) && 769 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 770 continue; 771 772 dev_sector = r10_bio->devs[slot].addr; 773 if (is_badblock(rdev, dev_sector, sectors, 774 &first_bad, &bad_sectors)) { 775 if (best_dist < MaxSector) 776 /* Already have a better slot */ 777 continue; 778 if (first_bad <= dev_sector) { 779 /* Cannot read here. If this is the 780 * 'primary' device, then we must not read 781 * beyond 'bad_sectors' from another device. 782 */ 783 bad_sectors -= (dev_sector - first_bad); 784 if (!do_balance && sectors > bad_sectors) 785 sectors = bad_sectors; 786 if (best_good_sectors > sectors) 787 best_good_sectors = sectors; 788 } else { 789 sector_t good_sectors = 790 first_bad - dev_sector; 791 if (good_sectors > best_good_sectors) { 792 best_good_sectors = good_sectors; 793 best_slot = slot; 794 best_rdev = rdev; 795 } 796 if (!do_balance) 797 /* Must read from here */ 798 break; 799 } 800 continue; 801 } else 802 best_good_sectors = sectors; 803 804 if (!do_balance) 805 break; 806 807 if (best_slot >= 0) 808 /* At least 2 disks to choose from so failfast is OK */ 809 set_bit(R10BIO_FailFast, &r10_bio->state); 810 /* This optimisation is debatable, and completely destroys 811 * sequential read speed for 'far copies' arrays. So only 812 * keep it for 'near' arrays, and review those later. 813 */ 814 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) 815 new_distance = 0; 816 817 /* for far > 1 always use the lowest address */ 818 else if (geo->far_copies > 1) 819 new_distance = r10_bio->devs[slot].addr; 820 else 821 new_distance = abs(r10_bio->devs[slot].addr - 822 conf->mirrors[disk].head_position); 823 if (new_distance < best_dist) { 824 best_dist = new_distance; 825 best_slot = slot; 826 best_rdev = rdev; 827 } 828 } 829 if (slot >= conf->copies) { 830 slot = best_slot; 831 rdev = best_rdev; 832 } 833 834 if (slot >= 0) { 835 atomic_inc(&rdev->nr_pending); 836 r10_bio->read_slot = slot; 837 } else 838 rdev = NULL; 839 rcu_read_unlock(); 840 *max_sectors = best_good_sectors; 841 842 return rdev; 843 } 844 845 static int raid10_congested(struct mddev *mddev, int bits) 846 { 847 struct r10conf *conf = mddev->private; 848 int i, ret = 0; 849 850 if ((bits & (1 << WB_async_congested)) && 851 conf->pending_count >= max_queued_requests) 852 return 1; 853 854 rcu_read_lock(); 855 for (i = 0; 856 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) 857 && ret == 0; 858 i++) { 859 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 860 if (rdev && !test_bit(Faulty, &rdev->flags)) { 861 struct request_queue *q = bdev_get_queue(rdev->bdev); 862 863 ret |= bdi_congested(q->backing_dev_info, bits); 864 } 865 } 866 rcu_read_unlock(); 867 return ret; 868 } 869 870 static void flush_pending_writes(struct r10conf *conf) 871 { 872 /* Any writes that have been queued but are awaiting 873 * bitmap updates get flushed here. 874 */ 875 spin_lock_irq(&conf->device_lock); 876 877 if (conf->pending_bio_list.head) { 878 struct bio *bio; 879 bio = bio_list_get(&conf->pending_bio_list); 880 conf->pending_count = 0; 881 spin_unlock_irq(&conf->device_lock); 882 /* flush any pending bitmap writes to disk 883 * before proceeding w/ I/O */ 884 bitmap_unplug(conf->mddev->bitmap); 885 wake_up(&conf->wait_barrier); 886 887 while (bio) { /* submit pending writes */ 888 struct bio *next = bio->bi_next; 889 struct md_rdev *rdev = (void*)bio->bi_bdev; 890 bio->bi_next = NULL; 891 bio->bi_bdev = rdev->bdev; 892 if (test_bit(Faulty, &rdev->flags)) { 893 bio->bi_error = -EIO; 894 bio_endio(bio); 895 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 896 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 897 /* Just ignore it */ 898 bio_endio(bio); 899 else 900 generic_make_request(bio); 901 bio = next; 902 } 903 } else 904 spin_unlock_irq(&conf->device_lock); 905 } 906 907 /* Barriers.... 908 * Sometimes we need to suspend IO while we do something else, 909 * either some resync/recovery, or reconfigure the array. 910 * To do this we raise a 'barrier'. 911 * The 'barrier' is a counter that can be raised multiple times 912 * to count how many activities are happening which preclude 913 * normal IO. 914 * We can only raise the barrier if there is no pending IO. 915 * i.e. if nr_pending == 0. 916 * We choose only to raise the barrier if no-one is waiting for the 917 * barrier to go down. This means that as soon as an IO request 918 * is ready, no other operations which require a barrier will start 919 * until the IO request has had a chance. 920 * 921 * So: regular IO calls 'wait_barrier'. When that returns there 922 * is no backgroup IO happening, It must arrange to call 923 * allow_barrier when it has finished its IO. 924 * backgroup IO calls must call raise_barrier. Once that returns 925 * there is no normal IO happeing. It must arrange to call 926 * lower_barrier when the particular background IO completes. 927 */ 928 929 static void raise_barrier(struct r10conf *conf, int force) 930 { 931 BUG_ON(force && !conf->barrier); 932 spin_lock_irq(&conf->resync_lock); 933 934 /* Wait until no block IO is waiting (unless 'force') */ 935 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 936 conf->resync_lock); 937 938 /* block any new IO from starting */ 939 conf->barrier++; 940 941 /* Now wait for all pending IO to complete */ 942 wait_event_lock_irq(conf->wait_barrier, 943 !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH, 944 conf->resync_lock); 945 946 spin_unlock_irq(&conf->resync_lock); 947 } 948 949 static void lower_barrier(struct r10conf *conf) 950 { 951 unsigned long flags; 952 spin_lock_irqsave(&conf->resync_lock, flags); 953 conf->barrier--; 954 spin_unlock_irqrestore(&conf->resync_lock, flags); 955 wake_up(&conf->wait_barrier); 956 } 957 958 static void wait_barrier(struct r10conf *conf) 959 { 960 spin_lock_irq(&conf->resync_lock); 961 if (conf->barrier) { 962 conf->nr_waiting++; 963 /* Wait for the barrier to drop. 964 * However if there are already pending 965 * requests (preventing the barrier from 966 * rising completely), and the 967 * pre-process bio queue isn't empty, 968 * then don't wait, as we need to empty 969 * that queue to get the nr_pending 970 * count down. 971 */ 972 raid10_log(conf->mddev, "wait barrier"); 973 wait_event_lock_irq(conf->wait_barrier, 974 !conf->barrier || 975 (atomic_read(&conf->nr_pending) && 976 current->bio_list && 977 !bio_list_empty(current->bio_list)), 978 conf->resync_lock); 979 conf->nr_waiting--; 980 if (!conf->nr_waiting) 981 wake_up(&conf->wait_barrier); 982 } 983 atomic_inc(&conf->nr_pending); 984 spin_unlock_irq(&conf->resync_lock); 985 } 986 987 static void allow_barrier(struct r10conf *conf) 988 { 989 if ((atomic_dec_and_test(&conf->nr_pending)) || 990 (conf->array_freeze_pending)) 991 wake_up(&conf->wait_barrier); 992 } 993 994 static void freeze_array(struct r10conf *conf, int extra) 995 { 996 /* stop syncio and normal IO and wait for everything to 997 * go quiet. 998 * We increment barrier and nr_waiting, and then 999 * wait until nr_pending match nr_queued+extra 1000 * This is called in the context of one normal IO request 1001 * that has failed. Thus any sync request that might be pending 1002 * will be blocked by nr_pending, and we need to wait for 1003 * pending IO requests to complete or be queued for re-try. 1004 * Thus the number queued (nr_queued) plus this request (extra) 1005 * must match the number of pending IOs (nr_pending) before 1006 * we continue. 1007 */ 1008 spin_lock_irq(&conf->resync_lock); 1009 conf->array_freeze_pending++; 1010 conf->barrier++; 1011 conf->nr_waiting++; 1012 wait_event_lock_irq_cmd(conf->wait_barrier, 1013 atomic_read(&conf->nr_pending) == conf->nr_queued+extra, 1014 conf->resync_lock, 1015 flush_pending_writes(conf)); 1016 1017 conf->array_freeze_pending--; 1018 spin_unlock_irq(&conf->resync_lock); 1019 } 1020 1021 static void unfreeze_array(struct r10conf *conf) 1022 { 1023 /* reverse the effect of the freeze */ 1024 spin_lock_irq(&conf->resync_lock); 1025 conf->barrier--; 1026 conf->nr_waiting--; 1027 wake_up(&conf->wait_barrier); 1028 spin_unlock_irq(&conf->resync_lock); 1029 } 1030 1031 static sector_t choose_data_offset(struct r10bio *r10_bio, 1032 struct md_rdev *rdev) 1033 { 1034 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || 1035 test_bit(R10BIO_Previous, &r10_bio->state)) 1036 return rdev->data_offset; 1037 else 1038 return rdev->new_data_offset; 1039 } 1040 1041 struct raid10_plug_cb { 1042 struct blk_plug_cb cb; 1043 struct bio_list pending; 1044 int pending_cnt; 1045 }; 1046 1047 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) 1048 { 1049 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb, 1050 cb); 1051 struct mddev *mddev = plug->cb.data; 1052 struct r10conf *conf = mddev->private; 1053 struct bio *bio; 1054 1055 if (from_schedule || current->bio_list) { 1056 spin_lock_irq(&conf->device_lock); 1057 bio_list_merge(&conf->pending_bio_list, &plug->pending); 1058 conf->pending_count += plug->pending_cnt; 1059 spin_unlock_irq(&conf->device_lock); 1060 wake_up(&conf->wait_barrier); 1061 md_wakeup_thread(mddev->thread); 1062 kfree(plug); 1063 return; 1064 } 1065 1066 /* we aren't scheduling, so we can do the write-out directly. */ 1067 bio = bio_list_get(&plug->pending); 1068 bitmap_unplug(mddev->bitmap); 1069 wake_up(&conf->wait_barrier); 1070 1071 while (bio) { /* submit pending writes */ 1072 struct bio *next = bio->bi_next; 1073 struct md_rdev *rdev = (void*)bio->bi_bdev; 1074 bio->bi_next = NULL; 1075 bio->bi_bdev = rdev->bdev; 1076 if (test_bit(Faulty, &rdev->flags)) { 1077 bio->bi_error = -EIO; 1078 bio_endio(bio); 1079 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1080 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1081 /* Just ignore it */ 1082 bio_endio(bio); 1083 else 1084 generic_make_request(bio); 1085 bio = next; 1086 } 1087 kfree(plug); 1088 } 1089 1090 static void raid10_read_request(struct mddev *mddev, struct bio *bio, 1091 struct r10bio *r10_bio) 1092 { 1093 struct r10conf *conf = mddev->private; 1094 struct bio *read_bio; 1095 const int op = bio_op(bio); 1096 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1097 int sectors_handled; 1098 int max_sectors; 1099 sector_t sectors; 1100 struct md_rdev *rdev; 1101 int slot; 1102 1103 /* 1104 * Register the new request and wait if the reconstruction 1105 * thread has put up a bar for new requests. 1106 * Continue immediately if no resync is active currently. 1107 */ 1108 wait_barrier(conf); 1109 1110 sectors = bio_sectors(bio); 1111 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1112 bio->bi_iter.bi_sector < conf->reshape_progress && 1113 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1114 /* 1115 * IO spans the reshape position. Need to wait for reshape to 1116 * pass 1117 */ 1118 raid10_log(conf->mddev, "wait reshape"); 1119 allow_barrier(conf); 1120 wait_event(conf->wait_barrier, 1121 conf->reshape_progress <= bio->bi_iter.bi_sector || 1122 conf->reshape_progress >= bio->bi_iter.bi_sector + 1123 sectors); 1124 wait_barrier(conf); 1125 } 1126 1127 read_again: 1128 rdev = read_balance(conf, r10_bio, &max_sectors); 1129 if (!rdev) { 1130 raid_end_bio_io(r10_bio); 1131 return; 1132 } 1133 slot = r10_bio->read_slot; 1134 1135 read_bio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 1136 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, 1137 max_sectors); 1138 1139 r10_bio->devs[slot].bio = read_bio; 1140 r10_bio->devs[slot].rdev = rdev; 1141 1142 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + 1143 choose_data_offset(r10_bio, rdev); 1144 read_bio->bi_bdev = rdev->bdev; 1145 read_bio->bi_end_io = raid10_end_read_request; 1146 bio_set_op_attrs(read_bio, op, do_sync); 1147 if (test_bit(FailFast, &rdev->flags) && 1148 test_bit(R10BIO_FailFast, &r10_bio->state)) 1149 read_bio->bi_opf |= MD_FAILFAST; 1150 read_bio->bi_private = r10_bio; 1151 1152 if (mddev->gendisk) 1153 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), 1154 read_bio, disk_devt(mddev->gendisk), 1155 r10_bio->sector); 1156 if (max_sectors < r10_bio->sectors) { 1157 /* 1158 * Could not read all from this device, so we will need another 1159 * r10_bio. 1160 */ 1161 sectors_handled = (r10_bio->sector + max_sectors 1162 - bio->bi_iter.bi_sector); 1163 r10_bio->sectors = max_sectors; 1164 spin_lock_irq(&conf->device_lock); 1165 if (bio->bi_phys_segments == 0) 1166 bio->bi_phys_segments = 2; 1167 else 1168 bio->bi_phys_segments++; 1169 spin_unlock_irq(&conf->device_lock); 1170 /* 1171 * Cannot call generic_make_request directly as that will be 1172 * queued in __generic_make_request and subsequent 1173 * mempool_alloc might block waiting for it. so hand bio over 1174 * to raid10d. 1175 */ 1176 reschedule_retry(r10_bio); 1177 1178 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1179 1180 r10_bio->master_bio = bio; 1181 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1182 r10_bio->state = 0; 1183 r10_bio->mddev = mddev; 1184 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; 1185 goto read_again; 1186 } else 1187 generic_make_request(read_bio); 1188 return; 1189 } 1190 1191 static void raid10_write_request(struct mddev *mddev, struct bio *bio, 1192 struct r10bio *r10_bio) 1193 { 1194 struct r10conf *conf = mddev->private; 1195 int i; 1196 const int op = bio_op(bio); 1197 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1198 const unsigned long do_fua = (bio->bi_opf & REQ_FUA); 1199 unsigned long flags; 1200 struct md_rdev *blocked_rdev; 1201 struct blk_plug_cb *cb; 1202 struct raid10_plug_cb *plug = NULL; 1203 sector_t sectors; 1204 int sectors_handled; 1205 int max_sectors; 1206 1207 md_write_start(mddev, bio); 1208 1209 /* 1210 * Register the new request and wait if the reconstruction 1211 * thread has put up a bar for new requests. 1212 * Continue immediately if no resync is active currently. 1213 */ 1214 wait_barrier(conf); 1215 1216 sectors = bio_sectors(bio); 1217 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1218 bio->bi_iter.bi_sector < conf->reshape_progress && 1219 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1220 /* 1221 * IO spans the reshape position. Need to wait for reshape to 1222 * pass 1223 */ 1224 raid10_log(conf->mddev, "wait reshape"); 1225 allow_barrier(conf); 1226 wait_event(conf->wait_barrier, 1227 conf->reshape_progress <= bio->bi_iter.bi_sector || 1228 conf->reshape_progress >= bio->bi_iter.bi_sector + 1229 sectors); 1230 wait_barrier(conf); 1231 } 1232 1233 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1234 (mddev->reshape_backwards 1235 ? (bio->bi_iter.bi_sector < conf->reshape_safe && 1236 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) 1237 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && 1238 bio->bi_iter.bi_sector < conf->reshape_progress))) { 1239 /* Need to update reshape_position in metadata */ 1240 mddev->reshape_position = conf->reshape_progress; 1241 set_mask_bits(&mddev->sb_flags, 0, 1242 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1243 md_wakeup_thread(mddev->thread); 1244 raid10_log(conf->mddev, "wait reshape metadata"); 1245 wait_event(mddev->sb_wait, 1246 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 1247 1248 conf->reshape_safe = mddev->reshape_position; 1249 } 1250 1251 if (conf->pending_count >= max_queued_requests) { 1252 md_wakeup_thread(mddev->thread); 1253 raid10_log(mddev, "wait queued"); 1254 wait_event(conf->wait_barrier, 1255 conf->pending_count < max_queued_requests); 1256 } 1257 /* first select target devices under rcu_lock and 1258 * inc refcount on their rdev. Record them by setting 1259 * bios[x] to bio 1260 * If there are known/acknowledged bad blocks on any device 1261 * on which we have seen a write error, we want to avoid 1262 * writing to those blocks. This potentially requires several 1263 * writes to write around the bad blocks. Each set of writes 1264 * gets its own r10_bio with a set of bios attached. The number 1265 * of r10_bios is recored in bio->bi_phys_segments just as with 1266 * the read case. 1267 */ 1268 1269 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ 1270 raid10_find_phys(conf, r10_bio); 1271 retry_write: 1272 blocked_rdev = NULL; 1273 rcu_read_lock(); 1274 max_sectors = r10_bio->sectors; 1275 1276 for (i = 0; i < conf->copies; i++) { 1277 int d = r10_bio->devs[i].devnum; 1278 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 1279 struct md_rdev *rrdev = rcu_dereference( 1280 conf->mirrors[d].replacement); 1281 if (rdev == rrdev) 1282 rrdev = NULL; 1283 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1284 atomic_inc(&rdev->nr_pending); 1285 blocked_rdev = rdev; 1286 break; 1287 } 1288 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) { 1289 atomic_inc(&rrdev->nr_pending); 1290 blocked_rdev = rrdev; 1291 break; 1292 } 1293 if (rdev && (test_bit(Faulty, &rdev->flags))) 1294 rdev = NULL; 1295 if (rrdev && (test_bit(Faulty, &rrdev->flags))) 1296 rrdev = NULL; 1297 1298 r10_bio->devs[i].bio = NULL; 1299 r10_bio->devs[i].repl_bio = NULL; 1300 1301 if (!rdev && !rrdev) { 1302 set_bit(R10BIO_Degraded, &r10_bio->state); 1303 continue; 1304 } 1305 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { 1306 sector_t first_bad; 1307 sector_t dev_sector = r10_bio->devs[i].addr; 1308 int bad_sectors; 1309 int is_bad; 1310 1311 is_bad = is_badblock(rdev, dev_sector, max_sectors, 1312 &first_bad, &bad_sectors); 1313 if (is_bad < 0) { 1314 /* Mustn't write here until the bad block 1315 * is acknowledged 1316 */ 1317 atomic_inc(&rdev->nr_pending); 1318 set_bit(BlockedBadBlocks, &rdev->flags); 1319 blocked_rdev = rdev; 1320 break; 1321 } 1322 if (is_bad && first_bad <= dev_sector) { 1323 /* Cannot write here at all */ 1324 bad_sectors -= (dev_sector - first_bad); 1325 if (bad_sectors < max_sectors) 1326 /* Mustn't write more than bad_sectors 1327 * to other devices yet 1328 */ 1329 max_sectors = bad_sectors; 1330 /* We don't set R10BIO_Degraded as that 1331 * only applies if the disk is missing, 1332 * so it might be re-added, and we want to 1333 * know to recover this chunk. 1334 * In this case the device is here, and the 1335 * fact that this chunk is not in-sync is 1336 * recorded in the bad block log. 1337 */ 1338 continue; 1339 } 1340 if (is_bad) { 1341 int good_sectors = first_bad - dev_sector; 1342 if (good_sectors < max_sectors) 1343 max_sectors = good_sectors; 1344 } 1345 } 1346 if (rdev) { 1347 r10_bio->devs[i].bio = bio; 1348 atomic_inc(&rdev->nr_pending); 1349 } 1350 if (rrdev) { 1351 r10_bio->devs[i].repl_bio = bio; 1352 atomic_inc(&rrdev->nr_pending); 1353 } 1354 } 1355 rcu_read_unlock(); 1356 1357 if (unlikely(blocked_rdev)) { 1358 /* Have to wait for this device to get unblocked, then retry */ 1359 int j; 1360 int d; 1361 1362 for (j = 0; j < i; j++) { 1363 if (r10_bio->devs[j].bio) { 1364 d = r10_bio->devs[j].devnum; 1365 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1366 } 1367 if (r10_bio->devs[j].repl_bio) { 1368 struct md_rdev *rdev; 1369 d = r10_bio->devs[j].devnum; 1370 rdev = conf->mirrors[d].replacement; 1371 if (!rdev) { 1372 /* Race with remove_disk */ 1373 smp_mb(); 1374 rdev = conf->mirrors[d].rdev; 1375 } 1376 rdev_dec_pending(rdev, mddev); 1377 } 1378 } 1379 allow_barrier(conf); 1380 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 1381 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1382 wait_barrier(conf); 1383 goto retry_write; 1384 } 1385 1386 if (max_sectors < r10_bio->sectors) { 1387 /* We are splitting this into multiple parts, so 1388 * we need to prepare for allocating another r10_bio. 1389 */ 1390 r10_bio->sectors = max_sectors; 1391 spin_lock_irq(&conf->device_lock); 1392 if (bio->bi_phys_segments == 0) 1393 bio->bi_phys_segments = 2; 1394 else 1395 bio->bi_phys_segments++; 1396 spin_unlock_irq(&conf->device_lock); 1397 } 1398 sectors_handled = r10_bio->sector + max_sectors - 1399 bio->bi_iter.bi_sector; 1400 1401 atomic_set(&r10_bio->remaining, 1); 1402 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1403 1404 for (i = 0; i < conf->copies; i++) { 1405 struct bio *mbio; 1406 int d = r10_bio->devs[i].devnum; 1407 if (r10_bio->devs[i].bio) { 1408 struct md_rdev *rdev = conf->mirrors[d].rdev; 1409 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 1410 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, 1411 max_sectors); 1412 r10_bio->devs[i].bio = mbio; 1413 1414 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 1415 choose_data_offset(r10_bio, rdev)); 1416 mbio->bi_bdev = rdev->bdev; 1417 mbio->bi_end_io = raid10_end_write_request; 1418 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1419 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) && 1420 enough(conf, d)) 1421 mbio->bi_opf |= MD_FAILFAST; 1422 mbio->bi_private = r10_bio; 1423 1424 if (conf->mddev->gendisk) 1425 trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1426 mbio, disk_devt(conf->mddev->gendisk), 1427 r10_bio->sector); 1428 /* flush_pending_writes() needs access to the rdev so...*/ 1429 mbio->bi_bdev = (void*)rdev; 1430 1431 atomic_inc(&r10_bio->remaining); 1432 1433 cb = blk_check_plugged(raid10_unplug, mddev, 1434 sizeof(*plug)); 1435 if (cb) 1436 plug = container_of(cb, struct raid10_plug_cb, 1437 cb); 1438 else 1439 plug = NULL; 1440 spin_lock_irqsave(&conf->device_lock, flags); 1441 if (plug) { 1442 bio_list_add(&plug->pending, mbio); 1443 plug->pending_cnt++; 1444 } else { 1445 bio_list_add(&conf->pending_bio_list, mbio); 1446 conf->pending_count++; 1447 } 1448 spin_unlock_irqrestore(&conf->device_lock, flags); 1449 if (!plug) 1450 md_wakeup_thread(mddev->thread); 1451 } 1452 1453 if (r10_bio->devs[i].repl_bio) { 1454 struct md_rdev *rdev = conf->mirrors[d].replacement; 1455 if (rdev == NULL) { 1456 /* Replacement just got moved to main 'rdev' */ 1457 smp_mb(); 1458 rdev = conf->mirrors[d].rdev; 1459 } 1460 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 1461 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, 1462 max_sectors); 1463 r10_bio->devs[i].repl_bio = mbio; 1464 1465 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + 1466 choose_data_offset(r10_bio, rdev)); 1467 mbio->bi_bdev = rdev->bdev; 1468 mbio->bi_end_io = raid10_end_write_request; 1469 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1470 mbio->bi_private = r10_bio; 1471 1472 if (conf->mddev->gendisk) 1473 trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1474 mbio, disk_devt(conf->mddev->gendisk), 1475 r10_bio->sector); 1476 /* flush_pending_writes() needs access to the rdev so...*/ 1477 mbio->bi_bdev = (void*)rdev; 1478 1479 atomic_inc(&r10_bio->remaining); 1480 spin_lock_irqsave(&conf->device_lock, flags); 1481 bio_list_add(&conf->pending_bio_list, mbio); 1482 conf->pending_count++; 1483 spin_unlock_irqrestore(&conf->device_lock, flags); 1484 if (!mddev_check_plugged(mddev)) 1485 md_wakeup_thread(mddev->thread); 1486 } 1487 } 1488 1489 /* Don't remove the bias on 'remaining' (one_write_done) until 1490 * after checking if we need to go around again. 1491 */ 1492 1493 if (sectors_handled < bio_sectors(bio)) { 1494 one_write_done(r10_bio); 1495 /* We need another r10_bio. It has already been counted 1496 * in bio->bi_phys_segments. 1497 */ 1498 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1499 1500 r10_bio->master_bio = bio; 1501 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1502 1503 r10_bio->mddev = mddev; 1504 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; 1505 r10_bio->state = 0; 1506 goto retry_write; 1507 } 1508 one_write_done(r10_bio); 1509 } 1510 1511 static void __make_request(struct mddev *mddev, struct bio *bio) 1512 { 1513 struct r10conf *conf = mddev->private; 1514 struct r10bio *r10_bio; 1515 1516 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1517 1518 r10_bio->master_bio = bio; 1519 r10_bio->sectors = bio_sectors(bio); 1520 1521 r10_bio->mddev = mddev; 1522 r10_bio->sector = bio->bi_iter.bi_sector; 1523 r10_bio->state = 0; 1524 1525 /* 1526 * We might need to issue multiple reads to different devices if there 1527 * are bad blocks around, so we keep track of the number of reads in 1528 * bio->bi_phys_segments. If this is 0, there is only one r10_bio and 1529 * no locking will be needed when the request completes. If it is 1530 * non-zero, then it is the number of not-completed requests. 1531 */ 1532 bio->bi_phys_segments = 0; 1533 bio_clear_flag(bio, BIO_SEG_VALID); 1534 1535 if (bio_data_dir(bio) == READ) 1536 raid10_read_request(mddev, bio, r10_bio); 1537 else 1538 raid10_write_request(mddev, bio, r10_bio); 1539 } 1540 1541 static void raid10_make_request(struct mddev *mddev, struct bio *bio) 1542 { 1543 struct r10conf *conf = mddev->private; 1544 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1545 int chunk_sects = chunk_mask + 1; 1546 1547 struct bio *split; 1548 1549 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1550 md_flush_request(mddev, bio); 1551 return; 1552 } 1553 1554 do { 1555 1556 /* 1557 * If this request crosses a chunk boundary, we need to split 1558 * it. 1559 */ 1560 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + 1561 bio_sectors(bio) > chunk_sects 1562 && (conf->geo.near_copies < conf->geo.raid_disks 1563 || conf->prev.near_copies < 1564 conf->prev.raid_disks))) { 1565 split = bio_split(bio, chunk_sects - 1566 (bio->bi_iter.bi_sector & 1567 (chunk_sects - 1)), 1568 GFP_NOIO, fs_bio_set); 1569 bio_chain(split, bio); 1570 } else { 1571 split = bio; 1572 } 1573 1574 __make_request(mddev, split); 1575 } while (split != bio); 1576 1577 /* In case raid10d snuck in to freeze_array */ 1578 wake_up(&conf->wait_barrier); 1579 } 1580 1581 static void raid10_status(struct seq_file *seq, struct mddev *mddev) 1582 { 1583 struct r10conf *conf = mddev->private; 1584 int i; 1585 1586 if (conf->geo.near_copies < conf->geo.raid_disks) 1587 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); 1588 if (conf->geo.near_copies > 1) 1589 seq_printf(seq, " %d near-copies", conf->geo.near_copies); 1590 if (conf->geo.far_copies > 1) { 1591 if (conf->geo.far_offset) 1592 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); 1593 else 1594 seq_printf(seq, " %d far-copies", conf->geo.far_copies); 1595 if (conf->geo.far_set_size != conf->geo.raid_disks) 1596 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); 1597 } 1598 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, 1599 conf->geo.raid_disks - mddev->degraded); 1600 rcu_read_lock(); 1601 for (i = 0; i < conf->geo.raid_disks; i++) { 1602 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1603 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1604 } 1605 rcu_read_unlock(); 1606 seq_printf(seq, "]"); 1607 } 1608 1609 /* check if there are enough drives for 1610 * every block to appear on atleast one. 1611 * Don't consider the device numbered 'ignore' 1612 * as we might be about to remove it. 1613 */ 1614 static int _enough(struct r10conf *conf, int previous, int ignore) 1615 { 1616 int first = 0; 1617 int has_enough = 0; 1618 int disks, ncopies; 1619 if (previous) { 1620 disks = conf->prev.raid_disks; 1621 ncopies = conf->prev.near_copies; 1622 } else { 1623 disks = conf->geo.raid_disks; 1624 ncopies = conf->geo.near_copies; 1625 } 1626 1627 rcu_read_lock(); 1628 do { 1629 int n = conf->copies; 1630 int cnt = 0; 1631 int this = first; 1632 while (n--) { 1633 struct md_rdev *rdev; 1634 if (this != ignore && 1635 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && 1636 test_bit(In_sync, &rdev->flags)) 1637 cnt++; 1638 this = (this+1) % disks; 1639 } 1640 if (cnt == 0) 1641 goto out; 1642 first = (first + ncopies) % disks; 1643 } while (first != 0); 1644 has_enough = 1; 1645 out: 1646 rcu_read_unlock(); 1647 return has_enough; 1648 } 1649 1650 static int enough(struct r10conf *conf, int ignore) 1651 { 1652 /* when calling 'enough', both 'prev' and 'geo' must 1653 * be stable. 1654 * This is ensured if ->reconfig_mutex or ->device_lock 1655 * is held. 1656 */ 1657 return _enough(conf, 0, ignore) && 1658 _enough(conf, 1, ignore); 1659 } 1660 1661 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) 1662 { 1663 char b[BDEVNAME_SIZE]; 1664 struct r10conf *conf = mddev->private; 1665 unsigned long flags; 1666 1667 /* 1668 * If it is not operational, then we have already marked it as dead 1669 * else if it is the last working disks, ignore the error, let the 1670 * next level up know. 1671 * else mark the drive as failed 1672 */ 1673 spin_lock_irqsave(&conf->device_lock, flags); 1674 if (test_bit(In_sync, &rdev->flags) 1675 && !enough(conf, rdev->raid_disk)) { 1676 /* 1677 * Don't fail the drive, just return an IO error. 1678 */ 1679 spin_unlock_irqrestore(&conf->device_lock, flags); 1680 return; 1681 } 1682 if (test_and_clear_bit(In_sync, &rdev->flags)) 1683 mddev->degraded++; 1684 /* 1685 * If recovery is running, make sure it aborts. 1686 */ 1687 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1688 set_bit(Blocked, &rdev->flags); 1689 set_bit(Faulty, &rdev->flags); 1690 set_mask_bits(&mddev->sb_flags, 0, 1691 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1692 spin_unlock_irqrestore(&conf->device_lock, flags); 1693 pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n" 1694 "md/raid10:%s: Operation continuing on %d devices.\n", 1695 mdname(mddev), bdevname(rdev->bdev, b), 1696 mdname(mddev), conf->geo.raid_disks - mddev->degraded); 1697 } 1698 1699 static void print_conf(struct r10conf *conf) 1700 { 1701 int i; 1702 struct md_rdev *rdev; 1703 1704 pr_debug("RAID10 conf printout:\n"); 1705 if (!conf) { 1706 pr_debug("(!conf)\n"); 1707 return; 1708 } 1709 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, 1710 conf->geo.raid_disks); 1711 1712 /* This is only called with ->reconfix_mutex held, so 1713 * rcu protection of rdev is not needed */ 1714 for (i = 0; i < conf->geo.raid_disks; i++) { 1715 char b[BDEVNAME_SIZE]; 1716 rdev = conf->mirrors[i].rdev; 1717 if (rdev) 1718 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", 1719 i, !test_bit(In_sync, &rdev->flags), 1720 !test_bit(Faulty, &rdev->flags), 1721 bdevname(rdev->bdev,b)); 1722 } 1723 } 1724 1725 static void close_sync(struct r10conf *conf) 1726 { 1727 wait_barrier(conf); 1728 allow_barrier(conf); 1729 1730 mempool_destroy(conf->r10buf_pool); 1731 conf->r10buf_pool = NULL; 1732 } 1733 1734 static int raid10_spare_active(struct mddev *mddev) 1735 { 1736 int i; 1737 struct r10conf *conf = mddev->private; 1738 struct raid10_info *tmp; 1739 int count = 0; 1740 unsigned long flags; 1741 1742 /* 1743 * Find all non-in_sync disks within the RAID10 configuration 1744 * and mark them in_sync 1745 */ 1746 for (i = 0; i < conf->geo.raid_disks; i++) { 1747 tmp = conf->mirrors + i; 1748 if (tmp->replacement 1749 && tmp->replacement->recovery_offset == MaxSector 1750 && !test_bit(Faulty, &tmp->replacement->flags) 1751 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 1752 /* Replacement has just become active */ 1753 if (!tmp->rdev 1754 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 1755 count++; 1756 if (tmp->rdev) { 1757 /* Replaced device not technically faulty, 1758 * but we need to be sure it gets removed 1759 * and never re-added. 1760 */ 1761 set_bit(Faulty, &tmp->rdev->flags); 1762 sysfs_notify_dirent_safe( 1763 tmp->rdev->sysfs_state); 1764 } 1765 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1766 } else if (tmp->rdev 1767 && tmp->rdev->recovery_offset == MaxSector 1768 && !test_bit(Faulty, &tmp->rdev->flags) 1769 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1770 count++; 1771 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 1772 } 1773 } 1774 spin_lock_irqsave(&conf->device_lock, flags); 1775 mddev->degraded -= count; 1776 spin_unlock_irqrestore(&conf->device_lock, flags); 1777 1778 print_conf(conf); 1779 return count; 1780 } 1781 1782 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1783 { 1784 struct r10conf *conf = mddev->private; 1785 int err = -EEXIST; 1786 int mirror; 1787 int first = 0; 1788 int last = conf->geo.raid_disks - 1; 1789 1790 if (mddev->recovery_cp < MaxSector) 1791 /* only hot-add to in-sync arrays, as recovery is 1792 * very different from resync 1793 */ 1794 return -EBUSY; 1795 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) 1796 return -EINVAL; 1797 1798 if (md_integrity_add_rdev(rdev, mddev)) 1799 return -ENXIO; 1800 1801 if (rdev->raid_disk >= 0) 1802 first = last = rdev->raid_disk; 1803 1804 if (rdev->saved_raid_disk >= first && 1805 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1806 mirror = rdev->saved_raid_disk; 1807 else 1808 mirror = first; 1809 for ( ; mirror <= last ; mirror++) { 1810 struct raid10_info *p = &conf->mirrors[mirror]; 1811 if (p->recovery_disabled == mddev->recovery_disabled) 1812 continue; 1813 if (p->rdev) { 1814 if (!test_bit(WantReplacement, &p->rdev->flags) || 1815 p->replacement != NULL) 1816 continue; 1817 clear_bit(In_sync, &rdev->flags); 1818 set_bit(Replacement, &rdev->flags); 1819 rdev->raid_disk = mirror; 1820 err = 0; 1821 if (mddev->gendisk) 1822 disk_stack_limits(mddev->gendisk, rdev->bdev, 1823 rdev->data_offset << 9); 1824 conf->fullsync = 1; 1825 rcu_assign_pointer(p->replacement, rdev); 1826 break; 1827 } 1828 1829 if (mddev->gendisk) 1830 disk_stack_limits(mddev->gendisk, rdev->bdev, 1831 rdev->data_offset << 9); 1832 1833 p->head_position = 0; 1834 p->recovery_disabled = mddev->recovery_disabled - 1; 1835 rdev->raid_disk = mirror; 1836 err = 0; 1837 if (rdev->saved_raid_disk != mirror) 1838 conf->fullsync = 1; 1839 rcu_assign_pointer(p->rdev, rdev); 1840 break; 1841 } 1842 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1843 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1844 1845 print_conf(conf); 1846 return err; 1847 } 1848 1849 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 1850 { 1851 struct r10conf *conf = mddev->private; 1852 int err = 0; 1853 int number = rdev->raid_disk; 1854 struct md_rdev **rdevp; 1855 struct raid10_info *p = conf->mirrors + number; 1856 1857 print_conf(conf); 1858 if (rdev == p->rdev) 1859 rdevp = &p->rdev; 1860 else if (rdev == p->replacement) 1861 rdevp = &p->replacement; 1862 else 1863 return 0; 1864 1865 if (test_bit(In_sync, &rdev->flags) || 1866 atomic_read(&rdev->nr_pending)) { 1867 err = -EBUSY; 1868 goto abort; 1869 } 1870 /* Only remove non-faulty devices if recovery 1871 * is not possible. 1872 */ 1873 if (!test_bit(Faulty, &rdev->flags) && 1874 mddev->recovery_disabled != p->recovery_disabled && 1875 (!p->replacement || p->replacement == rdev) && 1876 number < conf->geo.raid_disks && 1877 enough(conf, -1)) { 1878 err = -EBUSY; 1879 goto abort; 1880 } 1881 *rdevp = NULL; 1882 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 1883 synchronize_rcu(); 1884 if (atomic_read(&rdev->nr_pending)) { 1885 /* lost the race, try later */ 1886 err = -EBUSY; 1887 *rdevp = rdev; 1888 goto abort; 1889 } 1890 } 1891 if (p->replacement) { 1892 /* We must have just cleared 'rdev' */ 1893 p->rdev = p->replacement; 1894 clear_bit(Replacement, &p->replacement->flags); 1895 smp_mb(); /* Make sure other CPUs may see both as identical 1896 * but will never see neither -- if they are careful. 1897 */ 1898 p->replacement = NULL; 1899 clear_bit(WantReplacement, &rdev->flags); 1900 } else 1901 /* We might have just remove the Replacement as faulty 1902 * Clear the flag just in case 1903 */ 1904 clear_bit(WantReplacement, &rdev->flags); 1905 1906 err = md_integrity_register(mddev); 1907 1908 abort: 1909 1910 print_conf(conf); 1911 return err; 1912 } 1913 1914 static void end_sync_read(struct bio *bio) 1915 { 1916 struct r10bio *r10_bio = bio->bi_private; 1917 struct r10conf *conf = r10_bio->mddev->private; 1918 int d; 1919 1920 if (bio == r10_bio->master_bio) { 1921 /* this is a reshape read */ 1922 d = r10_bio->read_slot; /* really the read dev */ 1923 } else 1924 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); 1925 1926 if (!bio->bi_error) 1927 set_bit(R10BIO_Uptodate, &r10_bio->state); 1928 else 1929 /* The write handler will notice the lack of 1930 * R10BIO_Uptodate and record any errors etc 1931 */ 1932 atomic_add(r10_bio->sectors, 1933 &conf->mirrors[d].rdev->corrected_errors); 1934 1935 /* for reconstruct, we always reschedule after a read. 1936 * for resync, only after all reads 1937 */ 1938 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1939 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1940 atomic_dec_and_test(&r10_bio->remaining)) { 1941 /* we have read all the blocks, 1942 * do the comparison in process context in raid10d 1943 */ 1944 reschedule_retry(r10_bio); 1945 } 1946 } 1947 1948 static void end_sync_request(struct r10bio *r10_bio) 1949 { 1950 struct mddev *mddev = r10_bio->mddev; 1951 1952 while (atomic_dec_and_test(&r10_bio->remaining)) { 1953 if (r10_bio->master_bio == NULL) { 1954 /* the primary of several recovery bios */ 1955 sector_t s = r10_bio->sectors; 1956 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1957 test_bit(R10BIO_WriteError, &r10_bio->state)) 1958 reschedule_retry(r10_bio); 1959 else 1960 put_buf(r10_bio); 1961 md_done_sync(mddev, s, 1); 1962 break; 1963 } else { 1964 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; 1965 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1966 test_bit(R10BIO_WriteError, &r10_bio->state)) 1967 reschedule_retry(r10_bio); 1968 else 1969 put_buf(r10_bio); 1970 r10_bio = r10_bio2; 1971 } 1972 } 1973 } 1974 1975 static void end_sync_write(struct bio *bio) 1976 { 1977 struct r10bio *r10_bio = bio->bi_private; 1978 struct mddev *mddev = r10_bio->mddev; 1979 struct r10conf *conf = mddev->private; 1980 int d; 1981 sector_t first_bad; 1982 int bad_sectors; 1983 int slot; 1984 int repl; 1985 struct md_rdev *rdev = NULL; 1986 1987 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 1988 if (repl) 1989 rdev = conf->mirrors[d].replacement; 1990 else 1991 rdev = conf->mirrors[d].rdev; 1992 1993 if (bio->bi_error) { 1994 if (repl) 1995 md_error(mddev, rdev); 1996 else { 1997 set_bit(WriteErrorSeen, &rdev->flags); 1998 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1999 set_bit(MD_RECOVERY_NEEDED, 2000 &rdev->mddev->recovery); 2001 set_bit(R10BIO_WriteError, &r10_bio->state); 2002 } 2003 } else if (is_badblock(rdev, 2004 r10_bio->devs[slot].addr, 2005 r10_bio->sectors, 2006 &first_bad, &bad_sectors)) 2007 set_bit(R10BIO_MadeGood, &r10_bio->state); 2008 2009 rdev_dec_pending(rdev, mddev); 2010 2011 end_sync_request(r10_bio); 2012 } 2013 2014 /* 2015 * Note: sync and recover and handled very differently for raid10 2016 * This code is for resync. 2017 * For resync, we read through virtual addresses and read all blocks. 2018 * If there is any error, we schedule a write. The lowest numbered 2019 * drive is authoritative. 2020 * However requests come for physical address, so we need to map. 2021 * For every physical address there are raid_disks/copies virtual addresses, 2022 * which is always are least one, but is not necessarly an integer. 2023 * This means that a physical address can span multiple chunks, so we may 2024 * have to submit multiple io requests for a single sync request. 2025 */ 2026 /* 2027 * We check if all blocks are in-sync and only write to blocks that 2028 * aren't in sync 2029 */ 2030 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2031 { 2032 struct r10conf *conf = mddev->private; 2033 int i, first; 2034 struct bio *tbio, *fbio; 2035 int vcnt; 2036 2037 atomic_set(&r10_bio->remaining, 1); 2038 2039 /* find the first device with a block */ 2040 for (i=0; i<conf->copies; i++) 2041 if (!r10_bio->devs[i].bio->bi_error) 2042 break; 2043 2044 if (i == conf->copies) 2045 goto done; 2046 2047 first = i; 2048 fbio = r10_bio->devs[i].bio; 2049 fbio->bi_iter.bi_size = r10_bio->sectors << 9; 2050 fbio->bi_iter.bi_idx = 0; 2051 2052 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 2053 /* now find blocks with errors */ 2054 for (i=0 ; i < conf->copies ; i++) { 2055 int j, d; 2056 struct md_rdev *rdev; 2057 2058 tbio = r10_bio->devs[i].bio; 2059 2060 if (tbio->bi_end_io != end_sync_read) 2061 continue; 2062 if (i == first) 2063 continue; 2064 d = r10_bio->devs[i].devnum; 2065 rdev = conf->mirrors[d].rdev; 2066 if (!r10_bio->devs[i].bio->bi_error) { 2067 /* We know that the bi_io_vec layout is the same for 2068 * both 'first' and 'i', so we just compare them. 2069 * All vec entries are PAGE_SIZE; 2070 */ 2071 int sectors = r10_bio->sectors; 2072 for (j = 0; j < vcnt; j++) { 2073 int len = PAGE_SIZE; 2074 if (sectors < (len / 512)) 2075 len = sectors * 512; 2076 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 2077 page_address(tbio->bi_io_vec[j].bv_page), 2078 len)) 2079 break; 2080 sectors -= len/512; 2081 } 2082 if (j == vcnt) 2083 continue; 2084 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); 2085 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2086 /* Don't fix anything. */ 2087 continue; 2088 } else if (test_bit(FailFast, &rdev->flags)) { 2089 /* Just give up on this device */ 2090 md_error(rdev->mddev, rdev); 2091 continue; 2092 } 2093 /* Ok, we need to write this bio, either to correct an 2094 * inconsistency or to correct an unreadable block. 2095 * First we need to fixup bv_offset, bv_len and 2096 * bi_vecs, as the read request might have corrupted these 2097 */ 2098 bio_reset(tbio); 2099 2100 tbio->bi_vcnt = vcnt; 2101 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; 2102 tbio->bi_private = r10_bio; 2103 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 2104 tbio->bi_end_io = end_sync_write; 2105 bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); 2106 2107 bio_copy_data(tbio, fbio); 2108 2109 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2110 atomic_inc(&r10_bio->remaining); 2111 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2112 2113 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 2114 tbio->bi_opf |= MD_FAILFAST; 2115 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2116 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2117 generic_make_request(tbio); 2118 } 2119 2120 /* Now write out to any replacement devices 2121 * that are active 2122 */ 2123 for (i = 0; i < conf->copies; i++) { 2124 int d; 2125 2126 tbio = r10_bio->devs[i].repl_bio; 2127 if (!tbio || !tbio->bi_end_io) 2128 continue; 2129 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write 2130 && r10_bio->devs[i].bio != fbio) 2131 bio_copy_data(tbio, fbio); 2132 d = r10_bio->devs[i].devnum; 2133 atomic_inc(&r10_bio->remaining); 2134 md_sync_acct(conf->mirrors[d].replacement->bdev, 2135 bio_sectors(tbio)); 2136 generic_make_request(tbio); 2137 } 2138 2139 done: 2140 if (atomic_dec_and_test(&r10_bio->remaining)) { 2141 md_done_sync(mddev, r10_bio->sectors, 1); 2142 put_buf(r10_bio); 2143 } 2144 } 2145 2146 /* 2147 * Now for the recovery code. 2148 * Recovery happens across physical sectors. 2149 * We recover all non-is_sync drives by finding the virtual address of 2150 * each, and then choose a working drive that also has that virt address. 2151 * There is a separate r10_bio for each non-in_sync drive. 2152 * Only the first two slots are in use. The first for reading, 2153 * The second for writing. 2154 * 2155 */ 2156 static void fix_recovery_read_error(struct r10bio *r10_bio) 2157 { 2158 /* We got a read error during recovery. 2159 * We repeat the read in smaller page-sized sections. 2160 * If a read succeeds, write it to the new device or record 2161 * a bad block if we cannot. 2162 * If a read fails, record a bad block on both old and 2163 * new devices. 2164 */ 2165 struct mddev *mddev = r10_bio->mddev; 2166 struct r10conf *conf = mddev->private; 2167 struct bio *bio = r10_bio->devs[0].bio; 2168 sector_t sect = 0; 2169 int sectors = r10_bio->sectors; 2170 int idx = 0; 2171 int dr = r10_bio->devs[0].devnum; 2172 int dw = r10_bio->devs[1].devnum; 2173 2174 while (sectors) { 2175 int s = sectors; 2176 struct md_rdev *rdev; 2177 sector_t addr; 2178 int ok; 2179 2180 if (s > (PAGE_SIZE>>9)) 2181 s = PAGE_SIZE >> 9; 2182 2183 rdev = conf->mirrors[dr].rdev; 2184 addr = r10_bio->devs[0].addr + sect, 2185 ok = sync_page_io(rdev, 2186 addr, 2187 s << 9, 2188 bio->bi_io_vec[idx].bv_page, 2189 REQ_OP_READ, 0, false); 2190 if (ok) { 2191 rdev = conf->mirrors[dw].rdev; 2192 addr = r10_bio->devs[1].addr + sect; 2193 ok = sync_page_io(rdev, 2194 addr, 2195 s << 9, 2196 bio->bi_io_vec[idx].bv_page, 2197 REQ_OP_WRITE, 0, false); 2198 if (!ok) { 2199 set_bit(WriteErrorSeen, &rdev->flags); 2200 if (!test_and_set_bit(WantReplacement, 2201 &rdev->flags)) 2202 set_bit(MD_RECOVERY_NEEDED, 2203 &rdev->mddev->recovery); 2204 } 2205 } 2206 if (!ok) { 2207 /* We don't worry if we cannot set a bad block - 2208 * it really is bad so there is no loss in not 2209 * recording it yet 2210 */ 2211 rdev_set_badblocks(rdev, addr, s, 0); 2212 2213 if (rdev != conf->mirrors[dw].rdev) { 2214 /* need bad block on destination too */ 2215 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; 2216 addr = r10_bio->devs[1].addr + sect; 2217 ok = rdev_set_badblocks(rdev2, addr, s, 0); 2218 if (!ok) { 2219 /* just abort the recovery */ 2220 pr_notice("md/raid10:%s: recovery aborted due to read error\n", 2221 mdname(mddev)); 2222 2223 conf->mirrors[dw].recovery_disabled 2224 = mddev->recovery_disabled; 2225 set_bit(MD_RECOVERY_INTR, 2226 &mddev->recovery); 2227 break; 2228 } 2229 } 2230 } 2231 2232 sectors -= s; 2233 sect += s; 2234 idx++; 2235 } 2236 } 2237 2238 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2239 { 2240 struct r10conf *conf = mddev->private; 2241 int d; 2242 struct bio *wbio, *wbio2; 2243 2244 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { 2245 fix_recovery_read_error(r10_bio); 2246 end_sync_request(r10_bio); 2247 return; 2248 } 2249 2250 /* 2251 * share the pages with the first bio 2252 * and submit the write request 2253 */ 2254 d = r10_bio->devs[1].devnum; 2255 wbio = r10_bio->devs[1].bio; 2256 wbio2 = r10_bio->devs[1].repl_bio; 2257 /* Need to test wbio2->bi_end_io before we call 2258 * generic_make_request as if the former is NULL, 2259 * the latter is free to free wbio2. 2260 */ 2261 if (wbio2 && !wbio2->bi_end_io) 2262 wbio2 = NULL; 2263 if (wbio->bi_end_io) { 2264 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2265 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2266 generic_make_request(wbio); 2267 } 2268 if (wbio2) { 2269 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2270 md_sync_acct(conf->mirrors[d].replacement->bdev, 2271 bio_sectors(wbio2)); 2272 generic_make_request(wbio2); 2273 } 2274 } 2275 2276 /* 2277 * Used by fix_read_error() to decay the per rdev read_errors. 2278 * We halve the read error count for every hour that has elapsed 2279 * since the last recorded read error. 2280 * 2281 */ 2282 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) 2283 { 2284 long cur_time_mon; 2285 unsigned long hours_since_last; 2286 unsigned int read_errors = atomic_read(&rdev->read_errors); 2287 2288 cur_time_mon = ktime_get_seconds(); 2289 2290 if (rdev->last_read_error == 0) { 2291 /* first time we've seen a read error */ 2292 rdev->last_read_error = cur_time_mon; 2293 return; 2294 } 2295 2296 hours_since_last = (long)(cur_time_mon - 2297 rdev->last_read_error) / 3600; 2298 2299 rdev->last_read_error = cur_time_mon; 2300 2301 /* 2302 * if hours_since_last is > the number of bits in read_errors 2303 * just set read errors to 0. We do this to avoid 2304 * overflowing the shift of read_errors by hours_since_last. 2305 */ 2306 if (hours_since_last >= 8 * sizeof(read_errors)) 2307 atomic_set(&rdev->read_errors, 0); 2308 else 2309 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); 2310 } 2311 2312 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, 2313 int sectors, struct page *page, int rw) 2314 { 2315 sector_t first_bad; 2316 int bad_sectors; 2317 2318 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) 2319 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) 2320 return -1; 2321 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 2322 /* success */ 2323 return 1; 2324 if (rw == WRITE) { 2325 set_bit(WriteErrorSeen, &rdev->flags); 2326 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2327 set_bit(MD_RECOVERY_NEEDED, 2328 &rdev->mddev->recovery); 2329 } 2330 /* need to record an error - either for the block or the device */ 2331 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 2332 md_error(rdev->mddev, rdev); 2333 return 0; 2334 } 2335 2336 /* 2337 * This is a kernel thread which: 2338 * 2339 * 1. Retries failed read operations on working mirrors. 2340 * 2. Updates the raid superblock when problems encounter. 2341 * 3. Performs writes following reads for array synchronising. 2342 */ 2343 2344 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) 2345 { 2346 int sect = 0; /* Offset from r10_bio->sector */ 2347 int sectors = r10_bio->sectors; 2348 struct md_rdev*rdev; 2349 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); 2350 int d = r10_bio->devs[r10_bio->read_slot].devnum; 2351 2352 /* still own a reference to this rdev, so it cannot 2353 * have been cleared recently. 2354 */ 2355 rdev = conf->mirrors[d].rdev; 2356 2357 if (test_bit(Faulty, &rdev->flags)) 2358 /* drive has already been failed, just ignore any 2359 more fix_read_error() attempts */ 2360 return; 2361 2362 check_decay_read_errors(mddev, rdev); 2363 atomic_inc(&rdev->read_errors); 2364 if (atomic_read(&rdev->read_errors) > max_read_errors) { 2365 char b[BDEVNAME_SIZE]; 2366 bdevname(rdev->bdev, b); 2367 2368 pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n", 2369 mdname(mddev), b, 2370 atomic_read(&rdev->read_errors), max_read_errors); 2371 pr_notice("md/raid10:%s: %s: Failing raid device\n", 2372 mdname(mddev), b); 2373 md_error(mddev, rdev); 2374 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; 2375 return; 2376 } 2377 2378 while(sectors) { 2379 int s = sectors; 2380 int sl = r10_bio->read_slot; 2381 int success = 0; 2382 int start; 2383 2384 if (s > (PAGE_SIZE>>9)) 2385 s = PAGE_SIZE >> 9; 2386 2387 rcu_read_lock(); 2388 do { 2389 sector_t first_bad; 2390 int bad_sectors; 2391 2392 d = r10_bio->devs[sl].devnum; 2393 rdev = rcu_dereference(conf->mirrors[d].rdev); 2394 if (rdev && 2395 test_bit(In_sync, &rdev->flags) && 2396 !test_bit(Faulty, &rdev->flags) && 2397 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, 2398 &first_bad, &bad_sectors) == 0) { 2399 atomic_inc(&rdev->nr_pending); 2400 rcu_read_unlock(); 2401 success = sync_page_io(rdev, 2402 r10_bio->devs[sl].addr + 2403 sect, 2404 s<<9, 2405 conf->tmppage, 2406 REQ_OP_READ, 0, false); 2407 rdev_dec_pending(rdev, mddev); 2408 rcu_read_lock(); 2409 if (success) 2410 break; 2411 } 2412 sl++; 2413 if (sl == conf->copies) 2414 sl = 0; 2415 } while (!success && sl != r10_bio->read_slot); 2416 rcu_read_unlock(); 2417 2418 if (!success) { 2419 /* Cannot read from anywhere, just mark the block 2420 * as bad on the first device to discourage future 2421 * reads. 2422 */ 2423 int dn = r10_bio->devs[r10_bio->read_slot].devnum; 2424 rdev = conf->mirrors[dn].rdev; 2425 2426 if (!rdev_set_badblocks( 2427 rdev, 2428 r10_bio->devs[r10_bio->read_slot].addr 2429 + sect, 2430 s, 0)) { 2431 md_error(mddev, rdev); 2432 r10_bio->devs[r10_bio->read_slot].bio 2433 = IO_BLOCKED; 2434 } 2435 break; 2436 } 2437 2438 start = sl; 2439 /* write it back and re-read */ 2440 rcu_read_lock(); 2441 while (sl != r10_bio->read_slot) { 2442 char b[BDEVNAME_SIZE]; 2443 2444 if (sl==0) 2445 sl = conf->copies; 2446 sl--; 2447 d = r10_bio->devs[sl].devnum; 2448 rdev = rcu_dereference(conf->mirrors[d].rdev); 2449 if (!rdev || 2450 test_bit(Faulty, &rdev->flags) || 2451 !test_bit(In_sync, &rdev->flags)) 2452 continue; 2453 2454 atomic_inc(&rdev->nr_pending); 2455 rcu_read_unlock(); 2456 if (r10_sync_page_io(rdev, 2457 r10_bio->devs[sl].addr + 2458 sect, 2459 s, conf->tmppage, WRITE) 2460 == 0) { 2461 /* Well, this device is dead */ 2462 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n", 2463 mdname(mddev), s, 2464 (unsigned long long)( 2465 sect + 2466 choose_data_offset(r10_bio, 2467 rdev)), 2468 bdevname(rdev->bdev, b)); 2469 pr_notice("md/raid10:%s: %s: failing drive\n", 2470 mdname(mddev), 2471 bdevname(rdev->bdev, b)); 2472 } 2473 rdev_dec_pending(rdev, mddev); 2474 rcu_read_lock(); 2475 } 2476 sl = start; 2477 while (sl != r10_bio->read_slot) { 2478 char b[BDEVNAME_SIZE]; 2479 2480 if (sl==0) 2481 sl = conf->copies; 2482 sl--; 2483 d = r10_bio->devs[sl].devnum; 2484 rdev = rcu_dereference(conf->mirrors[d].rdev); 2485 if (!rdev || 2486 test_bit(Faulty, &rdev->flags) || 2487 !test_bit(In_sync, &rdev->flags)) 2488 continue; 2489 2490 atomic_inc(&rdev->nr_pending); 2491 rcu_read_unlock(); 2492 switch (r10_sync_page_io(rdev, 2493 r10_bio->devs[sl].addr + 2494 sect, 2495 s, conf->tmppage, 2496 READ)) { 2497 case 0: 2498 /* Well, this device is dead */ 2499 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n", 2500 mdname(mddev), s, 2501 (unsigned long long)( 2502 sect + 2503 choose_data_offset(r10_bio, rdev)), 2504 bdevname(rdev->bdev, b)); 2505 pr_notice("md/raid10:%s: %s: failing drive\n", 2506 mdname(mddev), 2507 bdevname(rdev->bdev, b)); 2508 break; 2509 case 1: 2510 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n", 2511 mdname(mddev), s, 2512 (unsigned long long)( 2513 sect + 2514 choose_data_offset(r10_bio, rdev)), 2515 bdevname(rdev->bdev, b)); 2516 atomic_add(s, &rdev->corrected_errors); 2517 } 2518 2519 rdev_dec_pending(rdev, mddev); 2520 rcu_read_lock(); 2521 } 2522 rcu_read_unlock(); 2523 2524 sectors -= s; 2525 sect += s; 2526 } 2527 } 2528 2529 static int narrow_write_error(struct r10bio *r10_bio, int i) 2530 { 2531 struct bio *bio = r10_bio->master_bio; 2532 struct mddev *mddev = r10_bio->mddev; 2533 struct r10conf *conf = mddev->private; 2534 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; 2535 /* bio has the data to be written to slot 'i' where 2536 * we just recently had a write error. 2537 * We repeatedly clone the bio and trim down to one block, 2538 * then try the write. Where the write fails we record 2539 * a bad block. 2540 * It is conceivable that the bio doesn't exactly align with 2541 * blocks. We must handle this. 2542 * 2543 * We currently own a reference to the rdev. 2544 */ 2545 2546 int block_sectors; 2547 sector_t sector; 2548 int sectors; 2549 int sect_to_write = r10_bio->sectors; 2550 int ok = 1; 2551 2552 if (rdev->badblocks.shift < 0) 2553 return 0; 2554 2555 block_sectors = roundup(1 << rdev->badblocks.shift, 2556 bdev_logical_block_size(rdev->bdev) >> 9); 2557 sector = r10_bio->sector; 2558 sectors = ((r10_bio->sector + block_sectors) 2559 & ~(sector_t)(block_sectors - 1)) 2560 - sector; 2561 2562 while (sect_to_write) { 2563 struct bio *wbio; 2564 sector_t wsector; 2565 if (sectors > sect_to_write) 2566 sectors = sect_to_write; 2567 /* Write at 'sector' for 'sectors' */ 2568 wbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 2569 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); 2570 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); 2571 wbio->bi_iter.bi_sector = wsector + 2572 choose_data_offset(r10_bio, rdev); 2573 wbio->bi_bdev = rdev->bdev; 2574 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2575 2576 if (submit_bio_wait(wbio) < 0) 2577 /* Failure! */ 2578 ok = rdev_set_badblocks(rdev, wsector, 2579 sectors, 0) 2580 && ok; 2581 2582 bio_put(wbio); 2583 sect_to_write -= sectors; 2584 sector += sectors; 2585 sectors = block_sectors; 2586 } 2587 return ok; 2588 } 2589 2590 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) 2591 { 2592 int slot = r10_bio->read_slot; 2593 struct bio *bio; 2594 struct r10conf *conf = mddev->private; 2595 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2596 char b[BDEVNAME_SIZE]; 2597 unsigned long do_sync; 2598 int max_sectors; 2599 dev_t bio_dev; 2600 sector_t bio_last_sector; 2601 2602 /* we got a read error. Maybe the drive is bad. Maybe just 2603 * the block and we can fix it. 2604 * We freeze all other IO, and try reading the block from 2605 * other devices. When we find one, we re-write 2606 * and check it that fixes the read error. 2607 * This is all done synchronously while the array is 2608 * frozen. 2609 */ 2610 bio = r10_bio->devs[slot].bio; 2611 bdevname(bio->bi_bdev, b); 2612 bio_dev = bio->bi_bdev->bd_dev; 2613 bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors; 2614 bio_put(bio); 2615 r10_bio->devs[slot].bio = NULL; 2616 2617 if (mddev->ro) 2618 r10_bio->devs[slot].bio = IO_BLOCKED; 2619 else if (!test_bit(FailFast, &rdev->flags)) { 2620 freeze_array(conf, 1); 2621 fix_read_error(conf, mddev, r10_bio); 2622 unfreeze_array(conf); 2623 } else 2624 md_error(mddev, rdev); 2625 2626 rdev_dec_pending(rdev, mddev); 2627 2628 read_more: 2629 rdev = read_balance(conf, r10_bio, &max_sectors); 2630 if (rdev == NULL) { 2631 pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n", 2632 mdname(mddev), b, 2633 (unsigned long long)r10_bio->sector); 2634 raid_end_bio_io(r10_bio); 2635 return; 2636 } 2637 2638 do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC); 2639 slot = r10_bio->read_slot; 2640 pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n", 2641 mdname(mddev), 2642 bdevname(rdev->bdev, b), 2643 (unsigned long long)r10_bio->sector); 2644 bio = bio_clone_fast(r10_bio->master_bio, GFP_NOIO, mddev->bio_set); 2645 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); 2646 r10_bio->devs[slot].bio = bio; 2647 r10_bio->devs[slot].rdev = rdev; 2648 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr 2649 + choose_data_offset(r10_bio, rdev); 2650 bio->bi_bdev = rdev->bdev; 2651 bio_set_op_attrs(bio, REQ_OP_READ, do_sync); 2652 if (test_bit(FailFast, &rdev->flags) && 2653 test_bit(R10BIO_FailFast, &r10_bio->state)) 2654 bio->bi_opf |= MD_FAILFAST; 2655 bio->bi_private = r10_bio; 2656 bio->bi_end_io = raid10_end_read_request; 2657 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 2658 bio, bio_dev, 2659 bio_last_sector - r10_bio->sectors); 2660 2661 if (max_sectors < r10_bio->sectors) { 2662 /* Drat - have to split this up more */ 2663 struct bio *mbio = r10_bio->master_bio; 2664 int sectors_handled = 2665 r10_bio->sector + max_sectors 2666 - mbio->bi_iter.bi_sector; 2667 r10_bio->sectors = max_sectors; 2668 spin_lock_irq(&conf->device_lock); 2669 if (mbio->bi_phys_segments == 0) 2670 mbio->bi_phys_segments = 2; 2671 else 2672 mbio->bi_phys_segments++; 2673 spin_unlock_irq(&conf->device_lock); 2674 generic_make_request(bio); 2675 2676 r10_bio = mempool_alloc(conf->r10bio_pool, 2677 GFP_NOIO); 2678 r10_bio->master_bio = mbio; 2679 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; 2680 r10_bio->state = 0; 2681 set_bit(R10BIO_ReadError, 2682 &r10_bio->state); 2683 r10_bio->mddev = mddev; 2684 r10_bio->sector = mbio->bi_iter.bi_sector 2685 + sectors_handled; 2686 2687 goto read_more; 2688 } else 2689 generic_make_request(bio); 2690 } 2691 2692 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) 2693 { 2694 /* Some sort of write request has finished and it 2695 * succeeded in writing where we thought there was a 2696 * bad block. So forget the bad block. 2697 * Or possibly if failed and we need to record 2698 * a bad block. 2699 */ 2700 int m; 2701 struct md_rdev *rdev; 2702 2703 if (test_bit(R10BIO_IsSync, &r10_bio->state) || 2704 test_bit(R10BIO_IsRecover, &r10_bio->state)) { 2705 for (m = 0; m < conf->copies; m++) { 2706 int dev = r10_bio->devs[m].devnum; 2707 rdev = conf->mirrors[dev].rdev; 2708 if (r10_bio->devs[m].bio == NULL) 2709 continue; 2710 if (!r10_bio->devs[m].bio->bi_error) { 2711 rdev_clear_badblocks( 2712 rdev, 2713 r10_bio->devs[m].addr, 2714 r10_bio->sectors, 0); 2715 } else { 2716 if (!rdev_set_badblocks( 2717 rdev, 2718 r10_bio->devs[m].addr, 2719 r10_bio->sectors, 0)) 2720 md_error(conf->mddev, rdev); 2721 } 2722 rdev = conf->mirrors[dev].replacement; 2723 if (r10_bio->devs[m].repl_bio == NULL) 2724 continue; 2725 2726 if (!r10_bio->devs[m].repl_bio->bi_error) { 2727 rdev_clear_badblocks( 2728 rdev, 2729 r10_bio->devs[m].addr, 2730 r10_bio->sectors, 0); 2731 } else { 2732 if (!rdev_set_badblocks( 2733 rdev, 2734 r10_bio->devs[m].addr, 2735 r10_bio->sectors, 0)) 2736 md_error(conf->mddev, rdev); 2737 } 2738 } 2739 put_buf(r10_bio); 2740 } else { 2741 bool fail = false; 2742 for (m = 0; m < conf->copies; m++) { 2743 int dev = r10_bio->devs[m].devnum; 2744 struct bio *bio = r10_bio->devs[m].bio; 2745 rdev = conf->mirrors[dev].rdev; 2746 if (bio == IO_MADE_GOOD) { 2747 rdev_clear_badblocks( 2748 rdev, 2749 r10_bio->devs[m].addr, 2750 r10_bio->sectors, 0); 2751 rdev_dec_pending(rdev, conf->mddev); 2752 } else if (bio != NULL && bio->bi_error) { 2753 fail = true; 2754 if (!narrow_write_error(r10_bio, m)) { 2755 md_error(conf->mddev, rdev); 2756 set_bit(R10BIO_Degraded, 2757 &r10_bio->state); 2758 } 2759 rdev_dec_pending(rdev, conf->mddev); 2760 } 2761 bio = r10_bio->devs[m].repl_bio; 2762 rdev = conf->mirrors[dev].replacement; 2763 if (rdev && bio == IO_MADE_GOOD) { 2764 rdev_clear_badblocks( 2765 rdev, 2766 r10_bio->devs[m].addr, 2767 r10_bio->sectors, 0); 2768 rdev_dec_pending(rdev, conf->mddev); 2769 } 2770 } 2771 if (fail) { 2772 spin_lock_irq(&conf->device_lock); 2773 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); 2774 conf->nr_queued++; 2775 spin_unlock_irq(&conf->device_lock); 2776 md_wakeup_thread(conf->mddev->thread); 2777 } else { 2778 if (test_bit(R10BIO_WriteError, 2779 &r10_bio->state)) 2780 close_write(r10_bio); 2781 raid_end_bio_io(r10_bio); 2782 } 2783 } 2784 } 2785 2786 static void raid10d(struct md_thread *thread) 2787 { 2788 struct mddev *mddev = thread->mddev; 2789 struct r10bio *r10_bio; 2790 unsigned long flags; 2791 struct r10conf *conf = mddev->private; 2792 struct list_head *head = &conf->retry_list; 2793 struct blk_plug plug; 2794 2795 md_check_recovery(mddev); 2796 2797 if (!list_empty_careful(&conf->bio_end_io_list) && 2798 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2799 LIST_HEAD(tmp); 2800 spin_lock_irqsave(&conf->device_lock, flags); 2801 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2802 while (!list_empty(&conf->bio_end_io_list)) { 2803 list_move(conf->bio_end_io_list.prev, &tmp); 2804 conf->nr_queued--; 2805 } 2806 } 2807 spin_unlock_irqrestore(&conf->device_lock, flags); 2808 while (!list_empty(&tmp)) { 2809 r10_bio = list_first_entry(&tmp, struct r10bio, 2810 retry_list); 2811 list_del(&r10_bio->retry_list); 2812 if (mddev->degraded) 2813 set_bit(R10BIO_Degraded, &r10_bio->state); 2814 2815 if (test_bit(R10BIO_WriteError, 2816 &r10_bio->state)) 2817 close_write(r10_bio); 2818 raid_end_bio_io(r10_bio); 2819 } 2820 } 2821 2822 blk_start_plug(&plug); 2823 for (;;) { 2824 2825 flush_pending_writes(conf); 2826 2827 spin_lock_irqsave(&conf->device_lock, flags); 2828 if (list_empty(head)) { 2829 spin_unlock_irqrestore(&conf->device_lock, flags); 2830 break; 2831 } 2832 r10_bio = list_entry(head->prev, struct r10bio, retry_list); 2833 list_del(head->prev); 2834 conf->nr_queued--; 2835 spin_unlock_irqrestore(&conf->device_lock, flags); 2836 2837 mddev = r10_bio->mddev; 2838 conf = mddev->private; 2839 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 2840 test_bit(R10BIO_WriteError, &r10_bio->state)) 2841 handle_write_completed(conf, r10_bio); 2842 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) 2843 reshape_request_write(mddev, r10_bio); 2844 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) 2845 sync_request_write(mddev, r10_bio); 2846 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 2847 recovery_request_write(mddev, r10_bio); 2848 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) 2849 handle_read_error(mddev, r10_bio); 2850 else { 2851 /* just a partial read to be scheduled from a 2852 * separate context 2853 */ 2854 int slot = r10_bio->read_slot; 2855 generic_make_request(r10_bio->devs[slot].bio); 2856 } 2857 2858 cond_resched(); 2859 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) 2860 md_check_recovery(mddev); 2861 } 2862 blk_finish_plug(&plug); 2863 } 2864 2865 static int init_resync(struct r10conf *conf) 2866 { 2867 int buffs; 2868 int i; 2869 2870 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2871 BUG_ON(conf->r10buf_pool); 2872 conf->have_replacement = 0; 2873 for (i = 0; i < conf->geo.raid_disks; i++) 2874 if (conf->mirrors[i].replacement) 2875 conf->have_replacement = 1; 2876 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); 2877 if (!conf->r10buf_pool) 2878 return -ENOMEM; 2879 conf->next_resync = 0; 2880 return 0; 2881 } 2882 2883 /* 2884 * perform a "sync" on one "block" 2885 * 2886 * We need to make sure that no normal I/O request - particularly write 2887 * requests - conflict with active sync requests. 2888 * 2889 * This is achieved by tracking pending requests and a 'barrier' concept 2890 * that can be installed to exclude normal IO requests. 2891 * 2892 * Resync and recovery are handled very differently. 2893 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery. 2894 * 2895 * For resync, we iterate over virtual addresses, read all copies, 2896 * and update if there are differences. If only one copy is live, 2897 * skip it. 2898 * For recovery, we iterate over physical addresses, read a good 2899 * value for each non-in_sync drive, and over-write. 2900 * 2901 * So, for recovery we may have several outstanding complex requests for a 2902 * given address, one for each out-of-sync device. We model this by allocating 2903 * a number of r10_bio structures, one for each out-of-sync device. 2904 * As we setup these structures, we collect all bio's together into a list 2905 * which we then process collectively to add pages, and then process again 2906 * to pass to generic_make_request. 2907 * 2908 * The r10_bio structures are linked using a borrowed master_bio pointer. 2909 * This link is counted in ->remaining. When the r10_bio that points to NULL 2910 * has its remaining count decremented to 0, the whole complex operation 2911 * is complete. 2912 * 2913 */ 2914 2915 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, 2916 int *skipped) 2917 { 2918 struct r10conf *conf = mddev->private; 2919 struct r10bio *r10_bio; 2920 struct bio *biolist = NULL, *bio; 2921 sector_t max_sector, nr_sectors; 2922 int i; 2923 int max_sync; 2924 sector_t sync_blocks; 2925 sector_t sectors_skipped = 0; 2926 int chunks_skipped = 0; 2927 sector_t chunk_mask = conf->geo.chunk_mask; 2928 2929 if (!conf->r10buf_pool) 2930 if (init_resync(conf)) 2931 return 0; 2932 2933 /* 2934 * Allow skipping a full rebuild for incremental assembly 2935 * of a clean array, like RAID1 does. 2936 */ 2937 if (mddev->bitmap == NULL && 2938 mddev->recovery_cp == MaxSector && 2939 mddev->reshape_position == MaxSector && 2940 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2941 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2942 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2943 conf->fullsync == 0) { 2944 *skipped = 1; 2945 return mddev->dev_sectors - sector_nr; 2946 } 2947 2948 skipped: 2949 max_sector = mddev->dev_sectors; 2950 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 2951 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2952 max_sector = mddev->resync_max_sectors; 2953 if (sector_nr >= max_sector) { 2954 /* If we aborted, we need to abort the 2955 * sync on the 'current' bitmap chucks (there can 2956 * be several when recovering multiple devices). 2957 * as we may have started syncing it but not finished. 2958 * We can find the current address in 2959 * mddev->curr_resync, but for recovery, 2960 * we need to convert that to several 2961 * virtual addresses. 2962 */ 2963 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2964 end_reshape(conf); 2965 close_sync(conf); 2966 return 0; 2967 } 2968 2969 if (mddev->curr_resync < max_sector) { /* aborted */ 2970 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2971 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2972 &sync_blocks, 1); 2973 else for (i = 0; i < conf->geo.raid_disks; i++) { 2974 sector_t sect = 2975 raid10_find_virt(conf, mddev->curr_resync, i); 2976 bitmap_end_sync(mddev->bitmap, sect, 2977 &sync_blocks, 1); 2978 } 2979 } else { 2980 /* completed sync */ 2981 if ((!mddev->bitmap || conf->fullsync) 2982 && conf->have_replacement 2983 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2984 /* Completed a full sync so the replacements 2985 * are now fully recovered. 2986 */ 2987 rcu_read_lock(); 2988 for (i = 0; i < conf->geo.raid_disks; i++) { 2989 struct md_rdev *rdev = 2990 rcu_dereference(conf->mirrors[i].replacement); 2991 if (rdev) 2992 rdev->recovery_offset = MaxSector; 2993 } 2994 rcu_read_unlock(); 2995 } 2996 conf->fullsync = 0; 2997 } 2998 bitmap_close_sync(mddev->bitmap); 2999 close_sync(conf); 3000 *skipped = 1; 3001 return sectors_skipped; 3002 } 3003 3004 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3005 return reshape_request(mddev, sector_nr, skipped); 3006 3007 if (chunks_skipped >= conf->geo.raid_disks) { 3008 /* if there has been nothing to do on any drive, 3009 * then there is nothing to do at all.. 3010 */ 3011 *skipped = 1; 3012 return (max_sector - sector_nr) + sectors_skipped; 3013 } 3014 3015 if (max_sector > mddev->resync_max) 3016 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 3017 3018 /* make sure whole request will fit in a chunk - if chunks 3019 * are meaningful 3020 */ 3021 if (conf->geo.near_copies < conf->geo.raid_disks && 3022 max_sector > (sector_nr | chunk_mask)) 3023 max_sector = (sector_nr | chunk_mask) + 1; 3024 3025 /* 3026 * If there is non-resync activity waiting for a turn, then let it 3027 * though before starting on this new sync request. 3028 */ 3029 if (conf->nr_waiting) 3030 schedule_timeout_uninterruptible(1); 3031 3032 /* Again, very different code for resync and recovery. 3033 * Both must result in an r10bio with a list of bios that 3034 * have bi_end_io, bi_sector, bi_bdev set, 3035 * and bi_private set to the r10bio. 3036 * For recovery, we may actually create several r10bios 3037 * with 2 bios in each, that correspond to the bios in the main one. 3038 * In this case, the subordinate r10bios link back through a 3039 * borrowed master_bio pointer, and the counter in the master 3040 * includes a ref from each subordinate. 3041 */ 3042 /* First, we decide what to do and set ->bi_end_io 3043 * To end_sync_read if we want to read, and 3044 * end_sync_write if we will want to write. 3045 */ 3046 3047 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); 3048 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3049 /* recovery... the complicated one */ 3050 int j; 3051 r10_bio = NULL; 3052 3053 for (i = 0 ; i < conf->geo.raid_disks; i++) { 3054 int still_degraded; 3055 struct r10bio *rb2; 3056 sector_t sect; 3057 int must_sync; 3058 int any_working; 3059 struct raid10_info *mirror = &conf->mirrors[i]; 3060 struct md_rdev *mrdev, *mreplace; 3061 3062 rcu_read_lock(); 3063 mrdev = rcu_dereference(mirror->rdev); 3064 mreplace = rcu_dereference(mirror->replacement); 3065 3066 if ((mrdev == NULL || 3067 test_bit(Faulty, &mrdev->flags) || 3068 test_bit(In_sync, &mrdev->flags)) && 3069 (mreplace == NULL || 3070 test_bit(Faulty, &mreplace->flags))) { 3071 rcu_read_unlock(); 3072 continue; 3073 } 3074 3075 still_degraded = 0; 3076 /* want to reconstruct this device */ 3077 rb2 = r10_bio; 3078 sect = raid10_find_virt(conf, sector_nr, i); 3079 if (sect >= mddev->resync_max_sectors) { 3080 /* last stripe is not complete - don't 3081 * try to recover this sector. 3082 */ 3083 rcu_read_unlock(); 3084 continue; 3085 } 3086 if (mreplace && test_bit(Faulty, &mreplace->flags)) 3087 mreplace = NULL; 3088 /* Unless we are doing a full sync, or a replacement 3089 * we only need to recover the block if it is set in 3090 * the bitmap 3091 */ 3092 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3093 &sync_blocks, 1); 3094 if (sync_blocks < max_sync) 3095 max_sync = sync_blocks; 3096 if (!must_sync && 3097 mreplace == NULL && 3098 !conf->fullsync) { 3099 /* yep, skip the sync_blocks here, but don't assume 3100 * that there will never be anything to do here 3101 */ 3102 chunks_skipped = -1; 3103 rcu_read_unlock(); 3104 continue; 3105 } 3106 atomic_inc(&mrdev->nr_pending); 3107 if (mreplace) 3108 atomic_inc(&mreplace->nr_pending); 3109 rcu_read_unlock(); 3110 3111 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3112 r10_bio->state = 0; 3113 raise_barrier(conf, rb2 != NULL); 3114 atomic_set(&r10_bio->remaining, 0); 3115 3116 r10_bio->master_bio = (struct bio*)rb2; 3117 if (rb2) 3118 atomic_inc(&rb2->remaining); 3119 r10_bio->mddev = mddev; 3120 set_bit(R10BIO_IsRecover, &r10_bio->state); 3121 r10_bio->sector = sect; 3122 3123 raid10_find_phys(conf, r10_bio); 3124 3125 /* Need to check if the array will still be 3126 * degraded 3127 */ 3128 rcu_read_lock(); 3129 for (j = 0; j < conf->geo.raid_disks; j++) { 3130 struct md_rdev *rdev = rcu_dereference( 3131 conf->mirrors[j].rdev); 3132 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3133 still_degraded = 1; 3134 break; 3135 } 3136 } 3137 3138 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3139 &sync_blocks, still_degraded); 3140 3141 any_working = 0; 3142 for (j=0; j<conf->copies;j++) { 3143 int k; 3144 int d = r10_bio->devs[j].devnum; 3145 sector_t from_addr, to_addr; 3146 struct md_rdev *rdev = 3147 rcu_dereference(conf->mirrors[d].rdev); 3148 sector_t sector, first_bad; 3149 int bad_sectors; 3150 if (!rdev || 3151 !test_bit(In_sync, &rdev->flags)) 3152 continue; 3153 /* This is where we read from */ 3154 any_working = 1; 3155 sector = r10_bio->devs[j].addr; 3156 3157 if (is_badblock(rdev, sector, max_sync, 3158 &first_bad, &bad_sectors)) { 3159 if (first_bad > sector) 3160 max_sync = first_bad - sector; 3161 else { 3162 bad_sectors -= (sector 3163 - first_bad); 3164 if (max_sync > bad_sectors) 3165 max_sync = bad_sectors; 3166 continue; 3167 } 3168 } 3169 bio = r10_bio->devs[0].bio; 3170 bio_reset(bio); 3171 bio->bi_next = biolist; 3172 biolist = bio; 3173 bio->bi_private = r10_bio; 3174 bio->bi_end_io = end_sync_read; 3175 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3176 if (test_bit(FailFast, &rdev->flags)) 3177 bio->bi_opf |= MD_FAILFAST; 3178 from_addr = r10_bio->devs[j].addr; 3179 bio->bi_iter.bi_sector = from_addr + 3180 rdev->data_offset; 3181 bio->bi_bdev = rdev->bdev; 3182 atomic_inc(&rdev->nr_pending); 3183 /* and we write to 'i' (if not in_sync) */ 3184 3185 for (k=0; k<conf->copies; k++) 3186 if (r10_bio->devs[k].devnum == i) 3187 break; 3188 BUG_ON(k == conf->copies); 3189 to_addr = r10_bio->devs[k].addr; 3190 r10_bio->devs[0].devnum = d; 3191 r10_bio->devs[0].addr = from_addr; 3192 r10_bio->devs[1].devnum = i; 3193 r10_bio->devs[1].addr = to_addr; 3194 3195 if (!test_bit(In_sync, &mrdev->flags)) { 3196 bio = r10_bio->devs[1].bio; 3197 bio_reset(bio); 3198 bio->bi_next = biolist; 3199 biolist = bio; 3200 bio->bi_private = r10_bio; 3201 bio->bi_end_io = end_sync_write; 3202 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3203 bio->bi_iter.bi_sector = to_addr 3204 + mrdev->data_offset; 3205 bio->bi_bdev = mrdev->bdev; 3206 atomic_inc(&r10_bio->remaining); 3207 } else 3208 r10_bio->devs[1].bio->bi_end_io = NULL; 3209 3210 /* and maybe write to replacement */ 3211 bio = r10_bio->devs[1].repl_bio; 3212 if (bio) 3213 bio->bi_end_io = NULL; 3214 /* Note: if mreplace != NULL, then bio 3215 * cannot be NULL as r10buf_pool_alloc will 3216 * have allocated it. 3217 * So the second test here is pointless. 3218 * But it keeps semantic-checkers happy, and 3219 * this comment keeps human reviewers 3220 * happy. 3221 */ 3222 if (mreplace == NULL || bio == NULL || 3223 test_bit(Faulty, &mreplace->flags)) 3224 break; 3225 bio_reset(bio); 3226 bio->bi_next = biolist; 3227 biolist = bio; 3228 bio->bi_private = r10_bio; 3229 bio->bi_end_io = end_sync_write; 3230 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3231 bio->bi_iter.bi_sector = to_addr + 3232 mreplace->data_offset; 3233 bio->bi_bdev = mreplace->bdev; 3234 atomic_inc(&r10_bio->remaining); 3235 break; 3236 } 3237 rcu_read_unlock(); 3238 if (j == conf->copies) { 3239 /* Cannot recover, so abort the recovery or 3240 * record a bad block */ 3241 if (any_working) { 3242 /* problem is that there are bad blocks 3243 * on other device(s) 3244 */ 3245 int k; 3246 for (k = 0; k < conf->copies; k++) 3247 if (r10_bio->devs[k].devnum == i) 3248 break; 3249 if (!test_bit(In_sync, 3250 &mrdev->flags) 3251 && !rdev_set_badblocks( 3252 mrdev, 3253 r10_bio->devs[k].addr, 3254 max_sync, 0)) 3255 any_working = 0; 3256 if (mreplace && 3257 !rdev_set_badblocks( 3258 mreplace, 3259 r10_bio->devs[k].addr, 3260 max_sync, 0)) 3261 any_working = 0; 3262 } 3263 if (!any_working) { 3264 if (!test_and_set_bit(MD_RECOVERY_INTR, 3265 &mddev->recovery)) 3266 pr_warn("md/raid10:%s: insufficient working devices for recovery.\n", 3267 mdname(mddev)); 3268 mirror->recovery_disabled 3269 = mddev->recovery_disabled; 3270 } 3271 put_buf(r10_bio); 3272 if (rb2) 3273 atomic_dec(&rb2->remaining); 3274 r10_bio = rb2; 3275 rdev_dec_pending(mrdev, mddev); 3276 if (mreplace) 3277 rdev_dec_pending(mreplace, mddev); 3278 break; 3279 } 3280 rdev_dec_pending(mrdev, mddev); 3281 if (mreplace) 3282 rdev_dec_pending(mreplace, mddev); 3283 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { 3284 /* Only want this if there is elsewhere to 3285 * read from. 'j' is currently the first 3286 * readable copy. 3287 */ 3288 int targets = 1; 3289 for (; j < conf->copies; j++) { 3290 int d = r10_bio->devs[j].devnum; 3291 if (conf->mirrors[d].rdev && 3292 test_bit(In_sync, 3293 &conf->mirrors[d].rdev->flags)) 3294 targets++; 3295 } 3296 if (targets == 1) 3297 r10_bio->devs[0].bio->bi_opf 3298 &= ~MD_FAILFAST; 3299 } 3300 } 3301 if (biolist == NULL) { 3302 while (r10_bio) { 3303 struct r10bio *rb2 = r10_bio; 3304 r10_bio = (struct r10bio*) rb2->master_bio; 3305 rb2->master_bio = NULL; 3306 put_buf(rb2); 3307 } 3308 goto giveup; 3309 } 3310 } else { 3311 /* resync. Schedule a read for every block at this virt offset */ 3312 int count = 0; 3313 3314 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); 3315 3316 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 3317 &sync_blocks, mddev->degraded) && 3318 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, 3319 &mddev->recovery)) { 3320 /* We can skip this block */ 3321 *skipped = 1; 3322 return sync_blocks + sectors_skipped; 3323 } 3324 if (sync_blocks < max_sync) 3325 max_sync = sync_blocks; 3326 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3327 r10_bio->state = 0; 3328 3329 r10_bio->mddev = mddev; 3330 atomic_set(&r10_bio->remaining, 0); 3331 raise_barrier(conf, 0); 3332 conf->next_resync = sector_nr; 3333 3334 r10_bio->master_bio = NULL; 3335 r10_bio->sector = sector_nr; 3336 set_bit(R10BIO_IsSync, &r10_bio->state); 3337 raid10_find_phys(conf, r10_bio); 3338 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; 3339 3340 for (i = 0; i < conf->copies; i++) { 3341 int d = r10_bio->devs[i].devnum; 3342 sector_t first_bad, sector; 3343 int bad_sectors; 3344 struct md_rdev *rdev; 3345 3346 if (r10_bio->devs[i].repl_bio) 3347 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3348 3349 bio = r10_bio->devs[i].bio; 3350 bio_reset(bio); 3351 bio->bi_error = -EIO; 3352 rcu_read_lock(); 3353 rdev = rcu_dereference(conf->mirrors[d].rdev); 3354 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3355 rcu_read_unlock(); 3356 continue; 3357 } 3358 sector = r10_bio->devs[i].addr; 3359 if (is_badblock(rdev, sector, max_sync, 3360 &first_bad, &bad_sectors)) { 3361 if (first_bad > sector) 3362 max_sync = first_bad - sector; 3363 else { 3364 bad_sectors -= (sector - first_bad); 3365 if (max_sync > bad_sectors) 3366 max_sync = bad_sectors; 3367 rcu_read_unlock(); 3368 continue; 3369 } 3370 } 3371 atomic_inc(&rdev->nr_pending); 3372 atomic_inc(&r10_bio->remaining); 3373 bio->bi_next = biolist; 3374 biolist = bio; 3375 bio->bi_private = r10_bio; 3376 bio->bi_end_io = end_sync_read; 3377 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3378 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 3379 bio->bi_opf |= MD_FAILFAST; 3380 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3381 bio->bi_bdev = rdev->bdev; 3382 count++; 3383 3384 rdev = rcu_dereference(conf->mirrors[d].replacement); 3385 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3386 rcu_read_unlock(); 3387 continue; 3388 } 3389 atomic_inc(&rdev->nr_pending); 3390 rcu_read_unlock(); 3391 3392 /* Need to set up for writing to the replacement */ 3393 bio = r10_bio->devs[i].repl_bio; 3394 bio_reset(bio); 3395 bio->bi_error = -EIO; 3396 3397 sector = r10_bio->devs[i].addr; 3398 bio->bi_next = biolist; 3399 biolist = bio; 3400 bio->bi_private = r10_bio; 3401 bio->bi_end_io = end_sync_write; 3402 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3403 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 3404 bio->bi_opf |= MD_FAILFAST; 3405 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3406 bio->bi_bdev = rdev->bdev; 3407 count++; 3408 } 3409 3410 if (count < 2) { 3411 for (i=0; i<conf->copies; i++) { 3412 int d = r10_bio->devs[i].devnum; 3413 if (r10_bio->devs[i].bio->bi_end_io) 3414 rdev_dec_pending(conf->mirrors[d].rdev, 3415 mddev); 3416 if (r10_bio->devs[i].repl_bio && 3417 r10_bio->devs[i].repl_bio->bi_end_io) 3418 rdev_dec_pending( 3419 conf->mirrors[d].replacement, 3420 mddev); 3421 } 3422 put_buf(r10_bio); 3423 biolist = NULL; 3424 goto giveup; 3425 } 3426 } 3427 3428 nr_sectors = 0; 3429 if (sector_nr + max_sync < max_sector) 3430 max_sector = sector_nr + max_sync; 3431 do { 3432 struct page *page; 3433 int len = PAGE_SIZE; 3434 if (sector_nr + (len>>9) > max_sector) 3435 len = (max_sector - sector_nr) << 9; 3436 if (len == 0) 3437 break; 3438 for (bio= biolist ; bio ; bio=bio->bi_next) { 3439 struct bio *bio2; 3440 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 3441 if (bio_add_page(bio, page, len, 0)) 3442 continue; 3443 3444 /* stop here */ 3445 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 3446 for (bio2 = biolist; 3447 bio2 && bio2 != bio; 3448 bio2 = bio2->bi_next) { 3449 /* remove last page from this bio */ 3450 bio2->bi_vcnt--; 3451 bio2->bi_iter.bi_size -= len; 3452 bio_clear_flag(bio2, BIO_SEG_VALID); 3453 } 3454 goto bio_full; 3455 } 3456 nr_sectors += len>>9; 3457 sector_nr += len>>9; 3458 } while (biolist->bi_vcnt < RESYNC_PAGES); 3459 bio_full: 3460 r10_bio->sectors = nr_sectors; 3461 3462 while (biolist) { 3463 bio = biolist; 3464 biolist = biolist->bi_next; 3465 3466 bio->bi_next = NULL; 3467 r10_bio = bio->bi_private; 3468 r10_bio->sectors = nr_sectors; 3469 3470 if (bio->bi_end_io == end_sync_read) { 3471 md_sync_acct(bio->bi_bdev, nr_sectors); 3472 bio->bi_error = 0; 3473 generic_make_request(bio); 3474 } 3475 } 3476 3477 if (sectors_skipped) 3478 /* pretend they weren't skipped, it makes 3479 * no important difference in this case 3480 */ 3481 md_done_sync(mddev, sectors_skipped, 1); 3482 3483 return sectors_skipped + nr_sectors; 3484 giveup: 3485 /* There is nowhere to write, so all non-sync 3486 * drives must be failed or in resync, all drives 3487 * have a bad block, so try the next chunk... 3488 */ 3489 if (sector_nr + max_sync < max_sector) 3490 max_sector = sector_nr + max_sync; 3491 3492 sectors_skipped += (max_sector - sector_nr); 3493 chunks_skipped ++; 3494 sector_nr = max_sector; 3495 goto skipped; 3496 } 3497 3498 static sector_t 3499 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) 3500 { 3501 sector_t size; 3502 struct r10conf *conf = mddev->private; 3503 3504 if (!raid_disks) 3505 raid_disks = min(conf->geo.raid_disks, 3506 conf->prev.raid_disks); 3507 if (!sectors) 3508 sectors = conf->dev_sectors; 3509 3510 size = sectors >> conf->geo.chunk_shift; 3511 sector_div(size, conf->geo.far_copies); 3512 size = size * raid_disks; 3513 sector_div(size, conf->geo.near_copies); 3514 3515 return size << conf->geo.chunk_shift; 3516 } 3517 3518 static void calc_sectors(struct r10conf *conf, sector_t size) 3519 { 3520 /* Calculate the number of sectors-per-device that will 3521 * actually be used, and set conf->dev_sectors and 3522 * conf->stride 3523 */ 3524 3525 size = size >> conf->geo.chunk_shift; 3526 sector_div(size, conf->geo.far_copies); 3527 size = size * conf->geo.raid_disks; 3528 sector_div(size, conf->geo.near_copies); 3529 /* 'size' is now the number of chunks in the array */ 3530 /* calculate "used chunks per device" */ 3531 size = size * conf->copies; 3532 3533 /* We need to round up when dividing by raid_disks to 3534 * get the stride size. 3535 */ 3536 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); 3537 3538 conf->dev_sectors = size << conf->geo.chunk_shift; 3539 3540 if (conf->geo.far_offset) 3541 conf->geo.stride = 1 << conf->geo.chunk_shift; 3542 else { 3543 sector_div(size, conf->geo.far_copies); 3544 conf->geo.stride = size << conf->geo.chunk_shift; 3545 } 3546 } 3547 3548 enum geo_type {geo_new, geo_old, geo_start}; 3549 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) 3550 { 3551 int nc, fc, fo; 3552 int layout, chunk, disks; 3553 switch (new) { 3554 case geo_old: 3555 layout = mddev->layout; 3556 chunk = mddev->chunk_sectors; 3557 disks = mddev->raid_disks - mddev->delta_disks; 3558 break; 3559 case geo_new: 3560 layout = mddev->new_layout; 3561 chunk = mddev->new_chunk_sectors; 3562 disks = mddev->raid_disks; 3563 break; 3564 default: /* avoid 'may be unused' warnings */ 3565 case geo_start: /* new when starting reshape - raid_disks not 3566 * updated yet. */ 3567 layout = mddev->new_layout; 3568 chunk = mddev->new_chunk_sectors; 3569 disks = mddev->raid_disks + mddev->delta_disks; 3570 break; 3571 } 3572 if (layout >> 19) 3573 return -1; 3574 if (chunk < (PAGE_SIZE >> 9) || 3575 !is_power_of_2(chunk)) 3576 return -2; 3577 nc = layout & 255; 3578 fc = (layout >> 8) & 255; 3579 fo = layout & (1<<16); 3580 geo->raid_disks = disks; 3581 geo->near_copies = nc; 3582 geo->far_copies = fc; 3583 geo->far_offset = fo; 3584 switch (layout >> 17) { 3585 case 0: /* original layout. simple but not always optimal */ 3586 geo->far_set_size = disks; 3587 break; 3588 case 1: /* "improved" layout which was buggy. Hopefully no-one is 3589 * actually using this, but leave code here just in case.*/ 3590 geo->far_set_size = disks/fc; 3591 WARN(geo->far_set_size < fc, 3592 "This RAID10 layout does not provide data safety - please backup and create new array\n"); 3593 break; 3594 case 2: /* "improved" layout fixed to match documentation */ 3595 geo->far_set_size = fc * nc; 3596 break; 3597 default: /* Not a valid layout */ 3598 return -1; 3599 } 3600 geo->chunk_mask = chunk - 1; 3601 geo->chunk_shift = ffz(~chunk); 3602 return nc*fc; 3603 } 3604 3605 static struct r10conf *setup_conf(struct mddev *mddev) 3606 { 3607 struct r10conf *conf = NULL; 3608 int err = -EINVAL; 3609 struct geom geo; 3610 int copies; 3611 3612 copies = setup_geo(&geo, mddev, geo_new); 3613 3614 if (copies == -2) { 3615 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n", 3616 mdname(mddev), PAGE_SIZE); 3617 goto out; 3618 } 3619 3620 if (copies < 2 || copies > mddev->raid_disks) { 3621 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n", 3622 mdname(mddev), mddev->new_layout); 3623 goto out; 3624 } 3625 3626 err = -ENOMEM; 3627 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); 3628 if (!conf) 3629 goto out; 3630 3631 /* FIXME calc properly */ 3632 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + 3633 max(0,-mddev->delta_disks)), 3634 GFP_KERNEL); 3635 if (!conf->mirrors) 3636 goto out; 3637 3638 conf->tmppage = alloc_page(GFP_KERNEL); 3639 if (!conf->tmppage) 3640 goto out; 3641 3642 conf->geo = geo; 3643 conf->copies = copies; 3644 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, 3645 r10bio_pool_free, conf); 3646 if (!conf->r10bio_pool) 3647 goto out; 3648 3649 calc_sectors(conf, mddev->dev_sectors); 3650 if (mddev->reshape_position == MaxSector) { 3651 conf->prev = conf->geo; 3652 conf->reshape_progress = MaxSector; 3653 } else { 3654 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { 3655 err = -EINVAL; 3656 goto out; 3657 } 3658 conf->reshape_progress = mddev->reshape_position; 3659 if (conf->prev.far_offset) 3660 conf->prev.stride = 1 << conf->prev.chunk_shift; 3661 else 3662 /* far_copies must be 1 */ 3663 conf->prev.stride = conf->dev_sectors; 3664 } 3665 conf->reshape_safe = conf->reshape_progress; 3666 spin_lock_init(&conf->device_lock); 3667 INIT_LIST_HEAD(&conf->retry_list); 3668 INIT_LIST_HEAD(&conf->bio_end_io_list); 3669 3670 spin_lock_init(&conf->resync_lock); 3671 init_waitqueue_head(&conf->wait_barrier); 3672 atomic_set(&conf->nr_pending, 0); 3673 3674 conf->thread = md_register_thread(raid10d, mddev, "raid10"); 3675 if (!conf->thread) 3676 goto out; 3677 3678 conf->mddev = mddev; 3679 return conf; 3680 3681 out: 3682 if (conf) { 3683 mempool_destroy(conf->r10bio_pool); 3684 kfree(conf->mirrors); 3685 safe_put_page(conf->tmppage); 3686 kfree(conf); 3687 } 3688 return ERR_PTR(err); 3689 } 3690 3691 static int raid10_run(struct mddev *mddev) 3692 { 3693 struct r10conf *conf; 3694 int i, disk_idx, chunk_size; 3695 struct raid10_info *disk; 3696 struct md_rdev *rdev; 3697 sector_t size; 3698 sector_t min_offset_diff = 0; 3699 int first = 1; 3700 bool discard_supported = false; 3701 3702 if (mddev->private == NULL) { 3703 conf = setup_conf(mddev); 3704 if (IS_ERR(conf)) 3705 return PTR_ERR(conf); 3706 mddev->private = conf; 3707 } 3708 conf = mddev->private; 3709 if (!conf) 3710 goto out; 3711 3712 mddev->thread = conf->thread; 3713 conf->thread = NULL; 3714 3715 chunk_size = mddev->chunk_sectors << 9; 3716 if (mddev->queue) { 3717 blk_queue_max_discard_sectors(mddev->queue, 3718 mddev->chunk_sectors); 3719 blk_queue_max_write_same_sectors(mddev->queue, 0); 3720 blk_queue_io_min(mddev->queue, chunk_size); 3721 if (conf->geo.raid_disks % conf->geo.near_copies) 3722 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3723 else 3724 blk_queue_io_opt(mddev->queue, chunk_size * 3725 (conf->geo.raid_disks / conf->geo.near_copies)); 3726 } 3727 3728 rdev_for_each(rdev, mddev) { 3729 long long diff; 3730 struct request_queue *q; 3731 3732 disk_idx = rdev->raid_disk; 3733 if (disk_idx < 0) 3734 continue; 3735 if (disk_idx >= conf->geo.raid_disks && 3736 disk_idx >= conf->prev.raid_disks) 3737 continue; 3738 disk = conf->mirrors + disk_idx; 3739 3740 if (test_bit(Replacement, &rdev->flags)) { 3741 if (disk->replacement) 3742 goto out_free_conf; 3743 disk->replacement = rdev; 3744 } else { 3745 if (disk->rdev) 3746 goto out_free_conf; 3747 disk->rdev = rdev; 3748 } 3749 q = bdev_get_queue(rdev->bdev); 3750 diff = (rdev->new_data_offset - rdev->data_offset); 3751 if (!mddev->reshape_backwards) 3752 diff = -diff; 3753 if (diff < 0) 3754 diff = 0; 3755 if (first || diff < min_offset_diff) 3756 min_offset_diff = diff; 3757 3758 if (mddev->gendisk) 3759 disk_stack_limits(mddev->gendisk, rdev->bdev, 3760 rdev->data_offset << 9); 3761 3762 disk->head_position = 0; 3763 3764 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 3765 discard_supported = true; 3766 } 3767 3768 if (mddev->queue) { 3769 if (discard_supported) 3770 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 3771 mddev->queue); 3772 else 3773 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 3774 mddev->queue); 3775 } 3776 /* need to check that every block has at least one working mirror */ 3777 if (!enough(conf, -1)) { 3778 pr_err("md/raid10:%s: not enough operational mirrors.\n", 3779 mdname(mddev)); 3780 goto out_free_conf; 3781 } 3782 3783 if (conf->reshape_progress != MaxSector) { 3784 /* must ensure that shape change is supported */ 3785 if (conf->geo.far_copies != 1 && 3786 conf->geo.far_offset == 0) 3787 goto out_free_conf; 3788 if (conf->prev.far_copies != 1 && 3789 conf->prev.far_offset == 0) 3790 goto out_free_conf; 3791 } 3792 3793 mddev->degraded = 0; 3794 for (i = 0; 3795 i < conf->geo.raid_disks 3796 || i < conf->prev.raid_disks; 3797 i++) { 3798 3799 disk = conf->mirrors + i; 3800 3801 if (!disk->rdev && disk->replacement) { 3802 /* The replacement is all we have - use it */ 3803 disk->rdev = disk->replacement; 3804 disk->replacement = NULL; 3805 clear_bit(Replacement, &disk->rdev->flags); 3806 } 3807 3808 if (!disk->rdev || 3809 !test_bit(In_sync, &disk->rdev->flags)) { 3810 disk->head_position = 0; 3811 mddev->degraded++; 3812 if (disk->rdev && 3813 disk->rdev->saved_raid_disk < 0) 3814 conf->fullsync = 1; 3815 } 3816 disk->recovery_disabled = mddev->recovery_disabled - 1; 3817 } 3818 3819 if (mddev->recovery_cp != MaxSector) 3820 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n", 3821 mdname(mddev)); 3822 pr_info("md/raid10:%s: active with %d out of %d devices\n", 3823 mdname(mddev), conf->geo.raid_disks - mddev->degraded, 3824 conf->geo.raid_disks); 3825 /* 3826 * Ok, everything is just fine now 3827 */ 3828 mddev->dev_sectors = conf->dev_sectors; 3829 size = raid10_size(mddev, 0, 0); 3830 md_set_array_sectors(mddev, size); 3831 mddev->resync_max_sectors = size; 3832 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 3833 3834 if (mddev->queue) { 3835 int stripe = conf->geo.raid_disks * 3836 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 3837 3838 /* Calculate max read-ahead size. 3839 * We need to readahead at least twice a whole stripe.... 3840 * maybe... 3841 */ 3842 stripe /= conf->geo.near_copies; 3843 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 3844 mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 3845 } 3846 3847 if (md_integrity_register(mddev)) 3848 goto out_free_conf; 3849 3850 if (conf->reshape_progress != MaxSector) { 3851 unsigned long before_length, after_length; 3852 3853 before_length = ((1 << conf->prev.chunk_shift) * 3854 conf->prev.far_copies); 3855 after_length = ((1 << conf->geo.chunk_shift) * 3856 conf->geo.far_copies); 3857 3858 if (max(before_length, after_length) > min_offset_diff) { 3859 /* This cannot work */ 3860 pr_warn("md/raid10: offset difference not enough to continue reshape\n"); 3861 goto out_free_conf; 3862 } 3863 conf->offset_diff = min_offset_diff; 3864 3865 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3866 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3867 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3868 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3869 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3870 "reshape"); 3871 } 3872 3873 return 0; 3874 3875 out_free_conf: 3876 md_unregister_thread(&mddev->thread); 3877 mempool_destroy(conf->r10bio_pool); 3878 safe_put_page(conf->tmppage); 3879 kfree(conf->mirrors); 3880 kfree(conf); 3881 mddev->private = NULL; 3882 out: 3883 return -EIO; 3884 } 3885 3886 static void raid10_free(struct mddev *mddev, void *priv) 3887 { 3888 struct r10conf *conf = priv; 3889 3890 mempool_destroy(conf->r10bio_pool); 3891 safe_put_page(conf->tmppage); 3892 kfree(conf->mirrors); 3893 kfree(conf->mirrors_old); 3894 kfree(conf->mirrors_new); 3895 kfree(conf); 3896 } 3897 3898 static void raid10_quiesce(struct mddev *mddev, int state) 3899 { 3900 struct r10conf *conf = mddev->private; 3901 3902 switch(state) { 3903 case 1: 3904 raise_barrier(conf, 0); 3905 break; 3906 case 0: 3907 lower_barrier(conf); 3908 break; 3909 } 3910 } 3911 3912 static int raid10_resize(struct mddev *mddev, sector_t sectors) 3913 { 3914 /* Resize of 'far' arrays is not supported. 3915 * For 'near' and 'offset' arrays we can set the 3916 * number of sectors used to be an appropriate multiple 3917 * of the chunk size. 3918 * For 'offset', this is far_copies*chunksize. 3919 * For 'near' the multiplier is the LCM of 3920 * near_copies and raid_disks. 3921 * So if far_copies > 1 && !far_offset, fail. 3922 * Else find LCM(raid_disks, near_copy)*far_copies and 3923 * multiply by chunk_size. Then round to this number. 3924 * This is mostly done by raid10_size() 3925 */ 3926 struct r10conf *conf = mddev->private; 3927 sector_t oldsize, size; 3928 3929 if (mddev->reshape_position != MaxSector) 3930 return -EBUSY; 3931 3932 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) 3933 return -EINVAL; 3934 3935 oldsize = raid10_size(mddev, 0, 0); 3936 size = raid10_size(mddev, sectors, 0); 3937 if (mddev->external_size && 3938 mddev->array_sectors > size) 3939 return -EINVAL; 3940 if (mddev->bitmap) { 3941 int ret = bitmap_resize(mddev->bitmap, size, 0, 0); 3942 if (ret) 3943 return ret; 3944 } 3945 md_set_array_sectors(mddev, size); 3946 if (mddev->queue) { 3947 set_capacity(mddev->gendisk, mddev->array_sectors); 3948 revalidate_disk(mddev->gendisk); 3949 } 3950 if (sectors > mddev->dev_sectors && 3951 mddev->recovery_cp > oldsize) { 3952 mddev->recovery_cp = oldsize; 3953 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3954 } 3955 calc_sectors(conf, sectors); 3956 mddev->dev_sectors = conf->dev_sectors; 3957 mddev->resync_max_sectors = size; 3958 return 0; 3959 } 3960 3961 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) 3962 { 3963 struct md_rdev *rdev; 3964 struct r10conf *conf; 3965 3966 if (mddev->degraded > 0) { 3967 pr_warn("md/raid10:%s: Error: degraded raid0!\n", 3968 mdname(mddev)); 3969 return ERR_PTR(-EINVAL); 3970 } 3971 sector_div(size, devs); 3972 3973 /* Set new parameters */ 3974 mddev->new_level = 10; 3975 /* new layout: far_copies = 1, near_copies = 2 */ 3976 mddev->new_layout = (1<<8) + 2; 3977 mddev->new_chunk_sectors = mddev->chunk_sectors; 3978 mddev->delta_disks = mddev->raid_disks; 3979 mddev->raid_disks *= 2; 3980 /* make sure it will be not marked as dirty */ 3981 mddev->recovery_cp = MaxSector; 3982 mddev->dev_sectors = size; 3983 3984 conf = setup_conf(mddev); 3985 if (!IS_ERR(conf)) { 3986 rdev_for_each(rdev, mddev) 3987 if (rdev->raid_disk >= 0) { 3988 rdev->new_raid_disk = rdev->raid_disk * 2; 3989 rdev->sectors = size; 3990 } 3991 conf->barrier = 1; 3992 } 3993 3994 return conf; 3995 } 3996 3997 static void *raid10_takeover(struct mddev *mddev) 3998 { 3999 struct r0conf *raid0_conf; 4000 4001 /* raid10 can take over: 4002 * raid0 - providing it has only two drives 4003 */ 4004 if (mddev->level == 0) { 4005 /* for raid0 takeover only one zone is supported */ 4006 raid0_conf = mddev->private; 4007 if (raid0_conf->nr_strip_zones > 1) { 4008 pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n", 4009 mdname(mddev)); 4010 return ERR_PTR(-EINVAL); 4011 } 4012 return raid10_takeover_raid0(mddev, 4013 raid0_conf->strip_zone->zone_end, 4014 raid0_conf->strip_zone->nb_dev); 4015 } 4016 return ERR_PTR(-EINVAL); 4017 } 4018 4019 static int raid10_check_reshape(struct mddev *mddev) 4020 { 4021 /* Called when there is a request to change 4022 * - layout (to ->new_layout) 4023 * - chunk size (to ->new_chunk_sectors) 4024 * - raid_disks (by delta_disks) 4025 * or when trying to restart a reshape that was ongoing. 4026 * 4027 * We need to validate the request and possibly allocate 4028 * space if that might be an issue later. 4029 * 4030 * Currently we reject any reshape of a 'far' mode array, 4031 * allow chunk size to change if new is generally acceptable, 4032 * allow raid_disks to increase, and allow 4033 * a switch between 'near' mode and 'offset' mode. 4034 */ 4035 struct r10conf *conf = mddev->private; 4036 struct geom geo; 4037 4038 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) 4039 return -EINVAL; 4040 4041 if (setup_geo(&geo, mddev, geo_start) != conf->copies) 4042 /* mustn't change number of copies */ 4043 return -EINVAL; 4044 if (geo.far_copies > 1 && !geo.far_offset) 4045 /* Cannot switch to 'far' mode */ 4046 return -EINVAL; 4047 4048 if (mddev->array_sectors & geo.chunk_mask) 4049 /* not factor of array size */ 4050 return -EINVAL; 4051 4052 if (!enough(conf, -1)) 4053 return -EINVAL; 4054 4055 kfree(conf->mirrors_new); 4056 conf->mirrors_new = NULL; 4057 if (mddev->delta_disks > 0) { 4058 /* allocate new 'mirrors' list */ 4059 conf->mirrors_new = kzalloc( 4060 sizeof(struct raid10_info) 4061 *(mddev->raid_disks + 4062 mddev->delta_disks), 4063 GFP_KERNEL); 4064 if (!conf->mirrors_new) 4065 return -ENOMEM; 4066 } 4067 return 0; 4068 } 4069 4070 /* 4071 * Need to check if array has failed when deciding whether to: 4072 * - start an array 4073 * - remove non-faulty devices 4074 * - add a spare 4075 * - allow a reshape 4076 * This determination is simple when no reshape is happening. 4077 * However if there is a reshape, we need to carefully check 4078 * both the before and after sections. 4079 * This is because some failed devices may only affect one 4080 * of the two sections, and some non-in_sync devices may 4081 * be insync in the section most affected by failed devices. 4082 */ 4083 static int calc_degraded(struct r10conf *conf) 4084 { 4085 int degraded, degraded2; 4086 int i; 4087 4088 rcu_read_lock(); 4089 degraded = 0; 4090 /* 'prev' section first */ 4091 for (i = 0; i < conf->prev.raid_disks; i++) { 4092 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 4093 if (!rdev || test_bit(Faulty, &rdev->flags)) 4094 degraded++; 4095 else if (!test_bit(In_sync, &rdev->flags)) 4096 /* When we can reduce the number of devices in 4097 * an array, this might not contribute to 4098 * 'degraded'. It does now. 4099 */ 4100 degraded++; 4101 } 4102 rcu_read_unlock(); 4103 if (conf->geo.raid_disks == conf->prev.raid_disks) 4104 return degraded; 4105 rcu_read_lock(); 4106 degraded2 = 0; 4107 for (i = 0; i < conf->geo.raid_disks; i++) { 4108 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 4109 if (!rdev || test_bit(Faulty, &rdev->flags)) 4110 degraded2++; 4111 else if (!test_bit(In_sync, &rdev->flags)) { 4112 /* If reshape is increasing the number of devices, 4113 * this section has already been recovered, so 4114 * it doesn't contribute to degraded. 4115 * else it does. 4116 */ 4117 if (conf->geo.raid_disks <= conf->prev.raid_disks) 4118 degraded2++; 4119 } 4120 } 4121 rcu_read_unlock(); 4122 if (degraded2 > degraded) 4123 return degraded2; 4124 return degraded; 4125 } 4126 4127 static int raid10_start_reshape(struct mddev *mddev) 4128 { 4129 /* A 'reshape' has been requested. This commits 4130 * the various 'new' fields and sets MD_RECOVER_RESHAPE 4131 * This also checks if there are enough spares and adds them 4132 * to the array. 4133 * We currently require enough spares to make the final 4134 * array non-degraded. We also require that the difference 4135 * between old and new data_offset - on each device - is 4136 * enough that we never risk over-writing. 4137 */ 4138 4139 unsigned long before_length, after_length; 4140 sector_t min_offset_diff = 0; 4141 int first = 1; 4142 struct geom new; 4143 struct r10conf *conf = mddev->private; 4144 struct md_rdev *rdev; 4145 int spares = 0; 4146 int ret; 4147 4148 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4149 return -EBUSY; 4150 4151 if (setup_geo(&new, mddev, geo_start) != conf->copies) 4152 return -EINVAL; 4153 4154 before_length = ((1 << conf->prev.chunk_shift) * 4155 conf->prev.far_copies); 4156 after_length = ((1 << conf->geo.chunk_shift) * 4157 conf->geo.far_copies); 4158 4159 rdev_for_each(rdev, mddev) { 4160 if (!test_bit(In_sync, &rdev->flags) 4161 && !test_bit(Faulty, &rdev->flags)) 4162 spares++; 4163 if (rdev->raid_disk >= 0) { 4164 long long diff = (rdev->new_data_offset 4165 - rdev->data_offset); 4166 if (!mddev->reshape_backwards) 4167 diff = -diff; 4168 if (diff < 0) 4169 diff = 0; 4170 if (first || diff < min_offset_diff) 4171 min_offset_diff = diff; 4172 } 4173 } 4174 4175 if (max(before_length, after_length) > min_offset_diff) 4176 return -EINVAL; 4177 4178 if (spares < mddev->delta_disks) 4179 return -EINVAL; 4180 4181 conf->offset_diff = min_offset_diff; 4182 spin_lock_irq(&conf->device_lock); 4183 if (conf->mirrors_new) { 4184 memcpy(conf->mirrors_new, conf->mirrors, 4185 sizeof(struct raid10_info)*conf->prev.raid_disks); 4186 smp_mb(); 4187 kfree(conf->mirrors_old); 4188 conf->mirrors_old = conf->mirrors; 4189 conf->mirrors = conf->mirrors_new; 4190 conf->mirrors_new = NULL; 4191 } 4192 setup_geo(&conf->geo, mddev, geo_start); 4193 smp_mb(); 4194 if (mddev->reshape_backwards) { 4195 sector_t size = raid10_size(mddev, 0, 0); 4196 if (size < mddev->array_sectors) { 4197 spin_unlock_irq(&conf->device_lock); 4198 pr_warn("md/raid10:%s: array size must be reduce before number of disks\n", 4199 mdname(mddev)); 4200 return -EINVAL; 4201 } 4202 mddev->resync_max_sectors = size; 4203 conf->reshape_progress = size; 4204 } else 4205 conf->reshape_progress = 0; 4206 conf->reshape_safe = conf->reshape_progress; 4207 spin_unlock_irq(&conf->device_lock); 4208 4209 if (mddev->delta_disks && mddev->bitmap) { 4210 ret = bitmap_resize(mddev->bitmap, 4211 raid10_size(mddev, 0, 4212 conf->geo.raid_disks), 4213 0, 0); 4214 if (ret) 4215 goto abort; 4216 } 4217 if (mddev->delta_disks > 0) { 4218 rdev_for_each(rdev, mddev) 4219 if (rdev->raid_disk < 0 && 4220 !test_bit(Faulty, &rdev->flags)) { 4221 if (raid10_add_disk(mddev, rdev) == 0) { 4222 if (rdev->raid_disk >= 4223 conf->prev.raid_disks) 4224 set_bit(In_sync, &rdev->flags); 4225 else 4226 rdev->recovery_offset = 0; 4227 4228 if (sysfs_link_rdev(mddev, rdev)) 4229 /* Failure here is OK */; 4230 } 4231 } else if (rdev->raid_disk >= conf->prev.raid_disks 4232 && !test_bit(Faulty, &rdev->flags)) { 4233 /* This is a spare that was manually added */ 4234 set_bit(In_sync, &rdev->flags); 4235 } 4236 } 4237 /* When a reshape changes the number of devices, 4238 * ->degraded is measured against the larger of the 4239 * pre and post numbers. 4240 */ 4241 spin_lock_irq(&conf->device_lock); 4242 mddev->degraded = calc_degraded(conf); 4243 spin_unlock_irq(&conf->device_lock); 4244 mddev->raid_disks = conf->geo.raid_disks; 4245 mddev->reshape_position = conf->reshape_progress; 4246 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4247 4248 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4249 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4250 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4251 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4252 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4253 4254 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4255 "reshape"); 4256 if (!mddev->sync_thread) { 4257 ret = -EAGAIN; 4258 goto abort; 4259 } 4260 conf->reshape_checkpoint = jiffies; 4261 md_wakeup_thread(mddev->sync_thread); 4262 md_new_event(mddev); 4263 return 0; 4264 4265 abort: 4266 mddev->recovery = 0; 4267 spin_lock_irq(&conf->device_lock); 4268 conf->geo = conf->prev; 4269 mddev->raid_disks = conf->geo.raid_disks; 4270 rdev_for_each(rdev, mddev) 4271 rdev->new_data_offset = rdev->data_offset; 4272 smp_wmb(); 4273 conf->reshape_progress = MaxSector; 4274 conf->reshape_safe = MaxSector; 4275 mddev->reshape_position = MaxSector; 4276 spin_unlock_irq(&conf->device_lock); 4277 return ret; 4278 } 4279 4280 /* Calculate the last device-address that could contain 4281 * any block from the chunk that includes the array-address 's' 4282 * and report the next address. 4283 * i.e. the address returned will be chunk-aligned and after 4284 * any data that is in the chunk containing 's'. 4285 */ 4286 static sector_t last_dev_address(sector_t s, struct geom *geo) 4287 { 4288 s = (s | geo->chunk_mask) + 1; 4289 s >>= geo->chunk_shift; 4290 s *= geo->near_copies; 4291 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); 4292 s *= geo->far_copies; 4293 s <<= geo->chunk_shift; 4294 return s; 4295 } 4296 4297 /* Calculate the first device-address that could contain 4298 * any block from the chunk that includes the array-address 's'. 4299 * This too will be the start of a chunk 4300 */ 4301 static sector_t first_dev_address(sector_t s, struct geom *geo) 4302 { 4303 s >>= geo->chunk_shift; 4304 s *= geo->near_copies; 4305 sector_div(s, geo->raid_disks); 4306 s *= geo->far_copies; 4307 s <<= geo->chunk_shift; 4308 return s; 4309 } 4310 4311 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 4312 int *skipped) 4313 { 4314 /* We simply copy at most one chunk (smallest of old and new) 4315 * at a time, possibly less if that exceeds RESYNC_PAGES, 4316 * or we hit a bad block or something. 4317 * This might mean we pause for normal IO in the middle of 4318 * a chunk, but that is not a problem as mddev->reshape_position 4319 * can record any location. 4320 * 4321 * If we will want to write to a location that isn't 4322 * yet recorded as 'safe' (i.e. in metadata on disk) then 4323 * we need to flush all reshape requests and update the metadata. 4324 * 4325 * When reshaping forwards (e.g. to more devices), we interpret 4326 * 'safe' as the earliest block which might not have been copied 4327 * down yet. We divide this by previous stripe size and multiply 4328 * by previous stripe length to get lowest device offset that we 4329 * cannot write to yet. 4330 * We interpret 'sector_nr' as an address that we want to write to. 4331 * From this we use last_device_address() to find where we might 4332 * write to, and first_device_address on the 'safe' position. 4333 * If this 'next' write position is after the 'safe' position, 4334 * we must update the metadata to increase the 'safe' position. 4335 * 4336 * When reshaping backwards, we round in the opposite direction 4337 * and perform the reverse test: next write position must not be 4338 * less than current safe position. 4339 * 4340 * In all this the minimum difference in data offsets 4341 * (conf->offset_diff - always positive) allows a bit of slack, 4342 * so next can be after 'safe', but not by more than offset_diff 4343 * 4344 * We need to prepare all the bios here before we start any IO 4345 * to ensure the size we choose is acceptable to all devices. 4346 * The means one for each copy for write-out and an extra one for 4347 * read-in. 4348 * We store the read-in bio in ->master_bio and the others in 4349 * ->devs[x].bio and ->devs[x].repl_bio. 4350 */ 4351 struct r10conf *conf = mddev->private; 4352 struct r10bio *r10_bio; 4353 sector_t next, safe, last; 4354 int max_sectors; 4355 int nr_sectors; 4356 int s; 4357 struct md_rdev *rdev; 4358 int need_flush = 0; 4359 struct bio *blist; 4360 struct bio *bio, *read_bio; 4361 int sectors_done = 0; 4362 4363 if (sector_nr == 0) { 4364 /* If restarting in the middle, skip the initial sectors */ 4365 if (mddev->reshape_backwards && 4366 conf->reshape_progress < raid10_size(mddev, 0, 0)) { 4367 sector_nr = (raid10_size(mddev, 0, 0) 4368 - conf->reshape_progress); 4369 } else if (!mddev->reshape_backwards && 4370 conf->reshape_progress > 0) 4371 sector_nr = conf->reshape_progress; 4372 if (sector_nr) { 4373 mddev->curr_resync_completed = sector_nr; 4374 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4375 *skipped = 1; 4376 return sector_nr; 4377 } 4378 } 4379 4380 /* We don't use sector_nr to track where we are up to 4381 * as that doesn't work well for ->reshape_backwards. 4382 * So just use ->reshape_progress. 4383 */ 4384 if (mddev->reshape_backwards) { 4385 /* 'next' is the earliest device address that we might 4386 * write to for this chunk in the new layout 4387 */ 4388 next = first_dev_address(conf->reshape_progress - 1, 4389 &conf->geo); 4390 4391 /* 'safe' is the last device address that we might read from 4392 * in the old layout after a restart 4393 */ 4394 safe = last_dev_address(conf->reshape_safe - 1, 4395 &conf->prev); 4396 4397 if (next + conf->offset_diff < safe) 4398 need_flush = 1; 4399 4400 last = conf->reshape_progress - 1; 4401 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask 4402 & conf->prev.chunk_mask); 4403 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last) 4404 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512; 4405 } else { 4406 /* 'next' is after the last device address that we 4407 * might write to for this chunk in the new layout 4408 */ 4409 next = last_dev_address(conf->reshape_progress, &conf->geo); 4410 4411 /* 'safe' is the earliest device address that we might 4412 * read from in the old layout after a restart 4413 */ 4414 safe = first_dev_address(conf->reshape_safe, &conf->prev); 4415 4416 /* Need to update metadata if 'next' might be beyond 'safe' 4417 * as that would possibly corrupt data 4418 */ 4419 if (next > safe + conf->offset_diff) 4420 need_flush = 1; 4421 4422 sector_nr = conf->reshape_progress; 4423 last = sector_nr | (conf->geo.chunk_mask 4424 & conf->prev.chunk_mask); 4425 4426 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last) 4427 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1; 4428 } 4429 4430 if (need_flush || 4431 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4432 /* Need to update reshape_position in metadata */ 4433 wait_barrier(conf); 4434 mddev->reshape_position = conf->reshape_progress; 4435 if (mddev->reshape_backwards) 4436 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) 4437 - conf->reshape_progress; 4438 else 4439 mddev->curr_resync_completed = conf->reshape_progress; 4440 conf->reshape_checkpoint = jiffies; 4441 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4442 md_wakeup_thread(mddev->thread); 4443 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || 4444 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4445 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4446 allow_barrier(conf); 4447 return sectors_done; 4448 } 4449 conf->reshape_safe = mddev->reshape_position; 4450 allow_barrier(conf); 4451 } 4452 4453 read_more: 4454 /* Now schedule reads for blocks from sector_nr to last */ 4455 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 4456 r10_bio->state = 0; 4457 raise_barrier(conf, sectors_done != 0); 4458 atomic_set(&r10_bio->remaining, 0); 4459 r10_bio->mddev = mddev; 4460 r10_bio->sector = sector_nr; 4461 set_bit(R10BIO_IsReshape, &r10_bio->state); 4462 r10_bio->sectors = last - sector_nr + 1; 4463 rdev = read_balance(conf, r10_bio, &max_sectors); 4464 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); 4465 4466 if (!rdev) { 4467 /* Cannot read from here, so need to record bad blocks 4468 * on all the target devices. 4469 */ 4470 // FIXME 4471 mempool_free(r10_bio, conf->r10buf_pool); 4472 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4473 return sectors_done; 4474 } 4475 4476 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4477 4478 read_bio->bi_bdev = rdev->bdev; 4479 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4480 + rdev->data_offset); 4481 read_bio->bi_private = r10_bio; 4482 read_bio->bi_end_io = end_sync_read; 4483 bio_set_op_attrs(read_bio, REQ_OP_READ, 0); 4484 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); 4485 read_bio->bi_error = 0; 4486 read_bio->bi_vcnt = 0; 4487 read_bio->bi_iter.bi_size = 0; 4488 r10_bio->master_bio = read_bio; 4489 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4490 4491 /* Now find the locations in the new layout */ 4492 __raid10_find_phys(&conf->geo, r10_bio); 4493 4494 blist = read_bio; 4495 read_bio->bi_next = NULL; 4496 4497 rcu_read_lock(); 4498 for (s = 0; s < conf->copies*2; s++) { 4499 struct bio *b; 4500 int d = r10_bio->devs[s/2].devnum; 4501 struct md_rdev *rdev2; 4502 if (s&1) { 4503 rdev2 = rcu_dereference(conf->mirrors[d].replacement); 4504 b = r10_bio->devs[s/2].repl_bio; 4505 } else { 4506 rdev2 = rcu_dereference(conf->mirrors[d].rdev); 4507 b = r10_bio->devs[s/2].bio; 4508 } 4509 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4510 continue; 4511 4512 bio_reset(b); 4513 b->bi_bdev = rdev2->bdev; 4514 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + 4515 rdev2->new_data_offset; 4516 b->bi_private = r10_bio; 4517 b->bi_end_io = end_reshape_write; 4518 bio_set_op_attrs(b, REQ_OP_WRITE, 0); 4519 b->bi_next = blist; 4520 blist = b; 4521 } 4522 4523 /* Now add as many pages as possible to all of these bios. */ 4524 4525 nr_sectors = 0; 4526 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) { 4527 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; 4528 int len = (max_sectors - s) << 9; 4529 if (len > PAGE_SIZE) 4530 len = PAGE_SIZE; 4531 for (bio = blist; bio ; bio = bio->bi_next) { 4532 struct bio *bio2; 4533 if (bio_add_page(bio, page, len, 0)) 4534 continue; 4535 4536 /* Didn't fit, must stop */ 4537 for (bio2 = blist; 4538 bio2 && bio2 != bio; 4539 bio2 = bio2->bi_next) { 4540 /* Remove last page from this bio */ 4541 bio2->bi_vcnt--; 4542 bio2->bi_iter.bi_size -= len; 4543 bio_clear_flag(bio2, BIO_SEG_VALID); 4544 } 4545 goto bio_full; 4546 } 4547 sector_nr += len >> 9; 4548 nr_sectors += len >> 9; 4549 } 4550 bio_full: 4551 rcu_read_unlock(); 4552 r10_bio->sectors = nr_sectors; 4553 4554 /* Now submit the read */ 4555 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); 4556 atomic_inc(&r10_bio->remaining); 4557 read_bio->bi_next = NULL; 4558 generic_make_request(read_bio); 4559 sector_nr += nr_sectors; 4560 sectors_done += nr_sectors; 4561 if (sector_nr <= last) 4562 goto read_more; 4563 4564 /* Now that we have done the whole section we can 4565 * update reshape_progress 4566 */ 4567 if (mddev->reshape_backwards) 4568 conf->reshape_progress -= sectors_done; 4569 else 4570 conf->reshape_progress += sectors_done; 4571 4572 return sectors_done; 4573 } 4574 4575 static void end_reshape_request(struct r10bio *r10_bio); 4576 static int handle_reshape_read_error(struct mddev *mddev, 4577 struct r10bio *r10_bio); 4578 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) 4579 { 4580 /* Reshape read completed. Hopefully we have a block 4581 * to write out. 4582 * If we got a read error then we do sync 1-page reads from 4583 * elsewhere until we find the data - or give up. 4584 */ 4585 struct r10conf *conf = mddev->private; 4586 int s; 4587 4588 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 4589 if (handle_reshape_read_error(mddev, r10_bio) < 0) { 4590 /* Reshape has been aborted */ 4591 md_done_sync(mddev, r10_bio->sectors, 0); 4592 return; 4593 } 4594 4595 /* We definitely have the data in the pages, schedule the 4596 * writes. 4597 */ 4598 atomic_set(&r10_bio->remaining, 1); 4599 for (s = 0; s < conf->copies*2; s++) { 4600 struct bio *b; 4601 int d = r10_bio->devs[s/2].devnum; 4602 struct md_rdev *rdev; 4603 rcu_read_lock(); 4604 if (s&1) { 4605 rdev = rcu_dereference(conf->mirrors[d].replacement); 4606 b = r10_bio->devs[s/2].repl_bio; 4607 } else { 4608 rdev = rcu_dereference(conf->mirrors[d].rdev); 4609 b = r10_bio->devs[s/2].bio; 4610 } 4611 if (!rdev || test_bit(Faulty, &rdev->flags)) { 4612 rcu_read_unlock(); 4613 continue; 4614 } 4615 atomic_inc(&rdev->nr_pending); 4616 rcu_read_unlock(); 4617 md_sync_acct(b->bi_bdev, r10_bio->sectors); 4618 atomic_inc(&r10_bio->remaining); 4619 b->bi_next = NULL; 4620 generic_make_request(b); 4621 } 4622 end_reshape_request(r10_bio); 4623 } 4624 4625 static void end_reshape(struct r10conf *conf) 4626 { 4627 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) 4628 return; 4629 4630 spin_lock_irq(&conf->device_lock); 4631 conf->prev = conf->geo; 4632 md_finish_reshape(conf->mddev); 4633 smp_wmb(); 4634 conf->reshape_progress = MaxSector; 4635 conf->reshape_safe = MaxSector; 4636 spin_unlock_irq(&conf->device_lock); 4637 4638 /* read-ahead size must cover two whole stripes, which is 4639 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4640 */ 4641 if (conf->mddev->queue) { 4642 int stripe = conf->geo.raid_disks * 4643 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); 4644 stripe /= conf->geo.near_copies; 4645 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 4646 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 4647 } 4648 conf->fullsync = 0; 4649 } 4650 4651 static int handle_reshape_read_error(struct mddev *mddev, 4652 struct r10bio *r10_bio) 4653 { 4654 /* Use sync reads to get the blocks from somewhere else */ 4655 int sectors = r10_bio->sectors; 4656 struct r10conf *conf = mddev->private; 4657 struct { 4658 struct r10bio r10_bio; 4659 struct r10dev devs[conf->copies]; 4660 } on_stack; 4661 struct r10bio *r10b = &on_stack.r10_bio; 4662 int slot = 0; 4663 int idx = 0; 4664 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; 4665 4666 r10b->sector = r10_bio->sector; 4667 __raid10_find_phys(&conf->prev, r10b); 4668 4669 while (sectors) { 4670 int s = sectors; 4671 int success = 0; 4672 int first_slot = slot; 4673 4674 if (s > (PAGE_SIZE >> 9)) 4675 s = PAGE_SIZE >> 9; 4676 4677 rcu_read_lock(); 4678 while (!success) { 4679 int d = r10b->devs[slot].devnum; 4680 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 4681 sector_t addr; 4682 if (rdev == NULL || 4683 test_bit(Faulty, &rdev->flags) || 4684 !test_bit(In_sync, &rdev->flags)) 4685 goto failed; 4686 4687 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; 4688 atomic_inc(&rdev->nr_pending); 4689 rcu_read_unlock(); 4690 success = sync_page_io(rdev, 4691 addr, 4692 s << 9, 4693 bvec[idx].bv_page, 4694 REQ_OP_READ, 0, false); 4695 rdev_dec_pending(rdev, mddev); 4696 rcu_read_lock(); 4697 if (success) 4698 break; 4699 failed: 4700 slot++; 4701 if (slot >= conf->copies) 4702 slot = 0; 4703 if (slot == first_slot) 4704 break; 4705 } 4706 rcu_read_unlock(); 4707 if (!success) { 4708 /* couldn't read this block, must give up */ 4709 set_bit(MD_RECOVERY_INTR, 4710 &mddev->recovery); 4711 return -EIO; 4712 } 4713 sectors -= s; 4714 idx++; 4715 } 4716 return 0; 4717 } 4718 4719 static void end_reshape_write(struct bio *bio) 4720 { 4721 struct r10bio *r10_bio = bio->bi_private; 4722 struct mddev *mddev = r10_bio->mddev; 4723 struct r10conf *conf = mddev->private; 4724 int d; 4725 int slot; 4726 int repl; 4727 struct md_rdev *rdev = NULL; 4728 4729 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 4730 if (repl) 4731 rdev = conf->mirrors[d].replacement; 4732 if (!rdev) { 4733 smp_mb(); 4734 rdev = conf->mirrors[d].rdev; 4735 } 4736 4737 if (bio->bi_error) { 4738 /* FIXME should record badblock */ 4739 md_error(mddev, rdev); 4740 } 4741 4742 rdev_dec_pending(rdev, mddev); 4743 end_reshape_request(r10_bio); 4744 } 4745 4746 static void end_reshape_request(struct r10bio *r10_bio) 4747 { 4748 if (!atomic_dec_and_test(&r10_bio->remaining)) 4749 return; 4750 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); 4751 bio_put(r10_bio->master_bio); 4752 put_buf(r10_bio); 4753 } 4754 4755 static void raid10_finish_reshape(struct mddev *mddev) 4756 { 4757 struct r10conf *conf = mddev->private; 4758 4759 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4760 return; 4761 4762 if (mddev->delta_disks > 0) { 4763 sector_t size = raid10_size(mddev, 0, 0); 4764 md_set_array_sectors(mddev, size); 4765 if (mddev->recovery_cp > mddev->resync_max_sectors) { 4766 mddev->recovery_cp = mddev->resync_max_sectors; 4767 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4768 } 4769 mddev->resync_max_sectors = size; 4770 if (mddev->queue) { 4771 set_capacity(mddev->gendisk, mddev->array_sectors); 4772 revalidate_disk(mddev->gendisk); 4773 } 4774 } else { 4775 int d; 4776 rcu_read_lock(); 4777 for (d = conf->geo.raid_disks ; 4778 d < conf->geo.raid_disks - mddev->delta_disks; 4779 d++) { 4780 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 4781 if (rdev) 4782 clear_bit(In_sync, &rdev->flags); 4783 rdev = rcu_dereference(conf->mirrors[d].replacement); 4784 if (rdev) 4785 clear_bit(In_sync, &rdev->flags); 4786 } 4787 rcu_read_unlock(); 4788 } 4789 mddev->layout = mddev->new_layout; 4790 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; 4791 mddev->reshape_position = MaxSector; 4792 mddev->delta_disks = 0; 4793 mddev->reshape_backwards = 0; 4794 } 4795 4796 static struct md_personality raid10_personality = 4797 { 4798 .name = "raid10", 4799 .level = 10, 4800 .owner = THIS_MODULE, 4801 .make_request = raid10_make_request, 4802 .run = raid10_run, 4803 .free = raid10_free, 4804 .status = raid10_status, 4805 .error_handler = raid10_error, 4806 .hot_add_disk = raid10_add_disk, 4807 .hot_remove_disk= raid10_remove_disk, 4808 .spare_active = raid10_spare_active, 4809 .sync_request = raid10_sync_request, 4810 .quiesce = raid10_quiesce, 4811 .size = raid10_size, 4812 .resize = raid10_resize, 4813 .takeover = raid10_takeover, 4814 .check_reshape = raid10_check_reshape, 4815 .start_reshape = raid10_start_reshape, 4816 .finish_reshape = raid10_finish_reshape, 4817 .congested = raid10_congested, 4818 }; 4819 4820 static int __init raid_init(void) 4821 { 4822 return register_md_personality(&raid10_personality); 4823 } 4824 4825 static void raid_exit(void) 4826 { 4827 unregister_md_personality(&raid10_personality); 4828 } 4829 4830 module_init(raid_init); 4831 module_exit(raid_exit); 4832 MODULE_LICENSE("GPL"); 4833 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); 4834 MODULE_ALIAS("md-personality-9"); /* RAID10 */ 4835 MODULE_ALIAS("md-raid10"); 4836 MODULE_ALIAS("md-level-10"); 4837 4838 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 4839