1 /* 2 * raid10.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 2000-2004 Neil Brown 5 * 6 * RAID-10 support for md. 7 * 8 * Base on code in raid1.c. See raid1.c for further copyright information. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/blkdev.h> 24 #include <linux/seq_file.h> 25 #include <linux/ratelimit.h> 26 #include "md.h" 27 #include "raid10.h" 28 #include "raid0.h" 29 #include "bitmap.h" 30 31 /* 32 * RAID10 provides a combination of RAID0 and RAID1 functionality. 33 * The layout of data is defined by 34 * chunk_size 35 * raid_disks 36 * near_copies (stored in low byte of layout) 37 * far_copies (stored in second byte of layout) 38 * far_offset (stored in bit 16 of layout ) 39 * 40 * The data to be stored is divided into chunks using chunksize. 41 * Each device is divided into far_copies sections. 42 * In each section, chunks are laid out in a style similar to raid0, but 43 * near_copies copies of each chunk is stored (each on a different drive). 44 * The starting device for each section is offset near_copies from the starting 45 * device of the previous section. 46 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different 47 * drive. 48 * near_copies and far_copies must be at least one, and their product is at most 49 * raid_disks. 50 * 51 * If far_offset is true, then the far_copies are handled a bit differently. 52 * The copies are still in different stripes, but instead of be very far apart 53 * on disk, there are adjacent stripes. 54 */ 55 56 /* 57 * Number of guaranteed r10bios in case of extreme VM load: 58 */ 59 #define NR_RAID10_BIOS 256 60 61 static void allow_barrier(conf_t *conf); 62 static void lower_barrier(conf_t *conf); 63 64 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 65 { 66 conf_t *conf = data; 67 int size = offsetof(struct r10bio_s, devs[conf->copies]); 68 69 /* allocate a r10bio with room for raid_disks entries in the bios array */ 70 return kzalloc(size, gfp_flags); 71 } 72 73 static void r10bio_pool_free(void *r10_bio, void *data) 74 { 75 kfree(r10_bio); 76 } 77 78 /* Maximum size of each resync request */ 79 #define RESYNC_BLOCK_SIZE (64*1024) 80 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 81 /* amount of memory to reserve for resync requests */ 82 #define RESYNC_WINDOW (1024*1024) 83 /* maximum number of concurrent requests, memory permitting */ 84 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 85 86 /* 87 * When performing a resync, we need to read and compare, so 88 * we need as many pages are there are copies. 89 * When performing a recovery, we need 2 bios, one for read, 90 * one for write (we recover only one drive per r10buf) 91 * 92 */ 93 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 94 { 95 conf_t *conf = data; 96 struct page *page; 97 r10bio_t *r10_bio; 98 struct bio *bio; 99 int i, j; 100 int nalloc; 101 102 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 103 if (!r10_bio) 104 return NULL; 105 106 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) 107 nalloc = conf->copies; /* resync */ 108 else 109 nalloc = 2; /* recovery */ 110 111 /* 112 * Allocate bios. 113 */ 114 for (j = nalloc ; j-- ; ) { 115 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 116 if (!bio) 117 goto out_free_bio; 118 r10_bio->devs[j].bio = bio; 119 } 120 /* 121 * Allocate RESYNC_PAGES data pages and attach them 122 * where needed. 123 */ 124 for (j = 0 ; j < nalloc; j++) { 125 bio = r10_bio->devs[j].bio; 126 for (i = 0; i < RESYNC_PAGES; i++) { 127 if (j == 1 && !test_bit(MD_RECOVERY_SYNC, 128 &conf->mddev->recovery)) { 129 /* we can share bv_page's during recovery */ 130 struct bio *rbio = r10_bio->devs[0].bio; 131 page = rbio->bi_io_vec[i].bv_page; 132 get_page(page); 133 } else 134 page = alloc_page(gfp_flags); 135 if (unlikely(!page)) 136 goto out_free_pages; 137 138 bio->bi_io_vec[i].bv_page = page; 139 } 140 } 141 142 return r10_bio; 143 144 out_free_pages: 145 for ( ; i > 0 ; i--) 146 safe_put_page(bio->bi_io_vec[i-1].bv_page); 147 while (j--) 148 for (i = 0; i < RESYNC_PAGES ; i++) 149 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); 150 j = -1; 151 out_free_bio: 152 while ( ++j < nalloc ) 153 bio_put(r10_bio->devs[j].bio); 154 r10bio_pool_free(r10_bio, conf); 155 return NULL; 156 } 157 158 static void r10buf_pool_free(void *__r10_bio, void *data) 159 { 160 int i; 161 conf_t *conf = data; 162 r10bio_t *r10bio = __r10_bio; 163 int j; 164 165 for (j=0; j < conf->copies; j++) { 166 struct bio *bio = r10bio->devs[j].bio; 167 if (bio) { 168 for (i = 0; i < RESYNC_PAGES; i++) { 169 safe_put_page(bio->bi_io_vec[i].bv_page); 170 bio->bi_io_vec[i].bv_page = NULL; 171 } 172 bio_put(bio); 173 } 174 } 175 r10bio_pool_free(r10bio, conf); 176 } 177 178 static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) 179 { 180 int i; 181 182 for (i = 0; i < conf->copies; i++) { 183 struct bio **bio = & r10_bio->devs[i].bio; 184 if (!BIO_SPECIAL(*bio)) 185 bio_put(*bio); 186 *bio = NULL; 187 } 188 } 189 190 static void free_r10bio(r10bio_t *r10_bio) 191 { 192 conf_t *conf = r10_bio->mddev->private; 193 194 put_all_bios(conf, r10_bio); 195 mempool_free(r10_bio, conf->r10bio_pool); 196 } 197 198 static void put_buf(r10bio_t *r10_bio) 199 { 200 conf_t *conf = r10_bio->mddev->private; 201 202 mempool_free(r10_bio, conf->r10buf_pool); 203 204 lower_barrier(conf); 205 } 206 207 static void reschedule_retry(r10bio_t *r10_bio) 208 { 209 unsigned long flags; 210 mddev_t *mddev = r10_bio->mddev; 211 conf_t *conf = mddev->private; 212 213 spin_lock_irqsave(&conf->device_lock, flags); 214 list_add(&r10_bio->retry_list, &conf->retry_list); 215 conf->nr_queued ++; 216 spin_unlock_irqrestore(&conf->device_lock, flags); 217 218 /* wake up frozen array... */ 219 wake_up(&conf->wait_barrier); 220 221 md_wakeup_thread(mddev->thread); 222 } 223 224 /* 225 * raid_end_bio_io() is called when we have finished servicing a mirrored 226 * operation and are ready to return a success/failure code to the buffer 227 * cache layer. 228 */ 229 static void raid_end_bio_io(r10bio_t *r10_bio) 230 { 231 struct bio *bio = r10_bio->master_bio; 232 int done; 233 conf_t *conf = r10_bio->mddev->private; 234 235 if (bio->bi_phys_segments) { 236 unsigned long flags; 237 spin_lock_irqsave(&conf->device_lock, flags); 238 bio->bi_phys_segments--; 239 done = (bio->bi_phys_segments == 0); 240 spin_unlock_irqrestore(&conf->device_lock, flags); 241 } else 242 done = 1; 243 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 244 clear_bit(BIO_UPTODATE, &bio->bi_flags); 245 if (done) { 246 bio_endio(bio, 0); 247 /* 248 * Wake up any possible resync thread that waits for the device 249 * to go idle. 250 */ 251 allow_barrier(conf); 252 } 253 free_r10bio(r10_bio); 254 } 255 256 /* 257 * Update disk head position estimator based on IRQ completion info. 258 */ 259 static inline void update_head_pos(int slot, r10bio_t *r10_bio) 260 { 261 conf_t *conf = r10_bio->mddev->private; 262 263 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 264 r10_bio->devs[slot].addr + (r10_bio->sectors); 265 } 266 267 /* 268 * Find the disk number which triggered given bio 269 */ 270 static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, 271 struct bio *bio, int *slotp) 272 { 273 int slot; 274 275 for (slot = 0; slot < conf->copies; slot++) 276 if (r10_bio->devs[slot].bio == bio) 277 break; 278 279 BUG_ON(slot == conf->copies); 280 update_head_pos(slot, r10_bio); 281 282 if (slotp) 283 *slotp = slot; 284 return r10_bio->devs[slot].devnum; 285 } 286 287 static void raid10_end_read_request(struct bio *bio, int error) 288 { 289 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 290 r10bio_t *r10_bio = bio->bi_private; 291 int slot, dev; 292 conf_t *conf = r10_bio->mddev->private; 293 294 295 slot = r10_bio->read_slot; 296 dev = r10_bio->devs[slot].devnum; 297 /* 298 * this branch is our 'one mirror IO has finished' event handler: 299 */ 300 update_head_pos(slot, r10_bio); 301 302 if (uptodate) { 303 /* 304 * Set R10BIO_Uptodate in our master bio, so that 305 * we will return a good error code to the higher 306 * levels even if IO on some other mirrored buffer fails. 307 * 308 * The 'master' represents the composite IO operation to 309 * user-side. So if something waits for IO, then it will 310 * wait for the 'master' bio. 311 */ 312 set_bit(R10BIO_Uptodate, &r10_bio->state); 313 raid_end_bio_io(r10_bio); 314 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 315 } else { 316 /* 317 * oops, read error - keep the refcount on the rdev 318 */ 319 char b[BDEVNAME_SIZE]; 320 printk_ratelimited(KERN_ERR 321 "md/raid10:%s: %s: rescheduling sector %llu\n", 322 mdname(conf->mddev), 323 bdevname(conf->mirrors[dev].rdev->bdev, b), 324 (unsigned long long)r10_bio->sector); 325 set_bit(R10BIO_ReadError, &r10_bio->state); 326 reschedule_retry(r10_bio); 327 } 328 } 329 330 static void close_write(r10bio_t *r10_bio) 331 { 332 /* clear the bitmap if all writes complete successfully */ 333 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 334 r10_bio->sectors, 335 !test_bit(R10BIO_Degraded, &r10_bio->state), 336 0); 337 md_write_end(r10_bio->mddev); 338 } 339 340 static void one_write_done(r10bio_t *r10_bio) 341 { 342 if (atomic_dec_and_test(&r10_bio->remaining)) { 343 if (test_bit(R10BIO_WriteError, &r10_bio->state)) 344 reschedule_retry(r10_bio); 345 else { 346 close_write(r10_bio); 347 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 348 reschedule_retry(r10_bio); 349 else 350 raid_end_bio_io(r10_bio); 351 } 352 } 353 } 354 355 static void raid10_end_write_request(struct bio *bio, int error) 356 { 357 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 358 r10bio_t *r10_bio = bio->bi_private; 359 int dev; 360 int dec_rdev = 1; 361 conf_t *conf = r10_bio->mddev->private; 362 int slot; 363 364 dev = find_bio_disk(conf, r10_bio, bio, &slot); 365 366 /* 367 * this branch is our 'one mirror IO has finished' event handler: 368 */ 369 if (!uptodate) { 370 set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags); 371 set_bit(R10BIO_WriteError, &r10_bio->state); 372 dec_rdev = 0; 373 } else { 374 /* 375 * Set R10BIO_Uptodate in our master bio, so that 376 * we will return a good error code for to the higher 377 * levels even if IO on some other mirrored buffer fails. 378 * 379 * The 'master' represents the composite IO operation to 380 * user-side. So if something waits for IO, then it will 381 * wait for the 'master' bio. 382 */ 383 sector_t first_bad; 384 int bad_sectors; 385 386 set_bit(R10BIO_Uptodate, &r10_bio->state); 387 388 /* Maybe we can clear some bad blocks. */ 389 if (is_badblock(conf->mirrors[dev].rdev, 390 r10_bio->devs[slot].addr, 391 r10_bio->sectors, 392 &first_bad, &bad_sectors)) { 393 bio_put(bio); 394 r10_bio->devs[slot].bio = IO_MADE_GOOD; 395 dec_rdev = 0; 396 set_bit(R10BIO_MadeGood, &r10_bio->state); 397 } 398 } 399 400 /* 401 * 402 * Let's see if all mirrored write operations have finished 403 * already. 404 */ 405 one_write_done(r10_bio); 406 if (dec_rdev) 407 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 408 } 409 410 411 /* 412 * RAID10 layout manager 413 * As well as the chunksize and raid_disks count, there are two 414 * parameters: near_copies and far_copies. 415 * near_copies * far_copies must be <= raid_disks. 416 * Normally one of these will be 1. 417 * If both are 1, we get raid0. 418 * If near_copies == raid_disks, we get raid1. 419 * 420 * Chunks are laid out in raid0 style with near_copies copies of the 421 * first chunk, followed by near_copies copies of the next chunk and 422 * so on. 423 * If far_copies > 1, then after 1/far_copies of the array has been assigned 424 * as described above, we start again with a device offset of near_copies. 425 * So we effectively have another copy of the whole array further down all 426 * the drives, but with blocks on different drives. 427 * With this layout, and block is never stored twice on the one device. 428 * 429 * raid10_find_phys finds the sector offset of a given virtual sector 430 * on each device that it is on. 431 * 432 * raid10_find_virt does the reverse mapping, from a device and a 433 * sector offset to a virtual address 434 */ 435 436 static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio) 437 { 438 int n,f; 439 sector_t sector; 440 sector_t chunk; 441 sector_t stripe; 442 int dev; 443 444 int slot = 0; 445 446 /* now calculate first sector/dev */ 447 chunk = r10bio->sector >> conf->chunk_shift; 448 sector = r10bio->sector & conf->chunk_mask; 449 450 chunk *= conf->near_copies; 451 stripe = chunk; 452 dev = sector_div(stripe, conf->raid_disks); 453 if (conf->far_offset) 454 stripe *= conf->far_copies; 455 456 sector += stripe << conf->chunk_shift; 457 458 /* and calculate all the others */ 459 for (n=0; n < conf->near_copies; n++) { 460 int d = dev; 461 sector_t s = sector; 462 r10bio->devs[slot].addr = sector; 463 r10bio->devs[slot].devnum = d; 464 slot++; 465 466 for (f = 1; f < conf->far_copies; f++) { 467 d += conf->near_copies; 468 if (d >= conf->raid_disks) 469 d -= conf->raid_disks; 470 s += conf->stride; 471 r10bio->devs[slot].devnum = d; 472 r10bio->devs[slot].addr = s; 473 slot++; 474 } 475 dev++; 476 if (dev >= conf->raid_disks) { 477 dev = 0; 478 sector += (conf->chunk_mask + 1); 479 } 480 } 481 BUG_ON(slot != conf->copies); 482 } 483 484 static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev) 485 { 486 sector_t offset, chunk, vchunk; 487 488 offset = sector & conf->chunk_mask; 489 if (conf->far_offset) { 490 int fc; 491 chunk = sector >> conf->chunk_shift; 492 fc = sector_div(chunk, conf->far_copies); 493 dev -= fc * conf->near_copies; 494 if (dev < 0) 495 dev += conf->raid_disks; 496 } else { 497 while (sector >= conf->stride) { 498 sector -= conf->stride; 499 if (dev < conf->near_copies) 500 dev += conf->raid_disks - conf->near_copies; 501 else 502 dev -= conf->near_copies; 503 } 504 chunk = sector >> conf->chunk_shift; 505 } 506 vchunk = chunk * conf->raid_disks + dev; 507 sector_div(vchunk, conf->near_copies); 508 return (vchunk << conf->chunk_shift) + offset; 509 } 510 511 /** 512 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged 513 * @q: request queue 514 * @bvm: properties of new bio 515 * @biovec: the request that could be merged to it. 516 * 517 * Return amount of bytes we can accept at this offset 518 * If near_copies == raid_disk, there are no striping issues, 519 * but in that case, the function isn't called at all. 520 */ 521 static int raid10_mergeable_bvec(struct request_queue *q, 522 struct bvec_merge_data *bvm, 523 struct bio_vec *biovec) 524 { 525 mddev_t *mddev = q->queuedata; 526 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 527 int max; 528 unsigned int chunk_sectors = mddev->chunk_sectors; 529 unsigned int bio_sectors = bvm->bi_size >> 9; 530 531 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 532 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 533 if (max <= biovec->bv_len && bio_sectors == 0) 534 return biovec->bv_len; 535 else 536 return max; 537 } 538 539 /* 540 * This routine returns the disk from which the requested read should 541 * be done. There is a per-array 'next expected sequential IO' sector 542 * number - if this matches on the next IO then we use the last disk. 543 * There is also a per-disk 'last know head position' sector that is 544 * maintained from IRQ contexts, both the normal and the resync IO 545 * completion handlers update this position correctly. If there is no 546 * perfect sequential match then we pick the disk whose head is closest. 547 * 548 * If there are 2 mirrors in the same 2 devices, performance degrades 549 * because position is mirror, not device based. 550 * 551 * The rdev for the device selected will have nr_pending incremented. 552 */ 553 554 /* 555 * FIXME: possibly should rethink readbalancing and do it differently 556 * depending on near_copies / far_copies geometry. 557 */ 558 static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors) 559 { 560 const sector_t this_sector = r10_bio->sector; 561 int disk, slot; 562 int sectors = r10_bio->sectors; 563 int best_good_sectors; 564 sector_t new_distance, best_dist; 565 mdk_rdev_t *rdev; 566 int do_balance; 567 int best_slot; 568 569 raid10_find_phys(conf, r10_bio); 570 rcu_read_lock(); 571 retry: 572 sectors = r10_bio->sectors; 573 best_slot = -1; 574 best_dist = MaxSector; 575 best_good_sectors = 0; 576 do_balance = 1; 577 /* 578 * Check if we can balance. We can balance on the whole 579 * device if no resync is going on (recovery is ok), or below 580 * the resync window. We take the first readable disk when 581 * above the resync window. 582 */ 583 if (conf->mddev->recovery_cp < MaxSector 584 && (this_sector + sectors >= conf->next_resync)) 585 do_balance = 0; 586 587 for (slot = 0; slot < conf->copies ; slot++) { 588 sector_t first_bad; 589 int bad_sectors; 590 sector_t dev_sector; 591 592 if (r10_bio->devs[slot].bio == IO_BLOCKED) 593 continue; 594 disk = r10_bio->devs[slot].devnum; 595 rdev = rcu_dereference(conf->mirrors[disk].rdev); 596 if (rdev == NULL) 597 continue; 598 if (!test_bit(In_sync, &rdev->flags)) 599 continue; 600 601 dev_sector = r10_bio->devs[slot].addr; 602 if (is_badblock(rdev, dev_sector, sectors, 603 &first_bad, &bad_sectors)) { 604 if (best_dist < MaxSector) 605 /* Already have a better slot */ 606 continue; 607 if (first_bad <= dev_sector) { 608 /* Cannot read here. If this is the 609 * 'primary' device, then we must not read 610 * beyond 'bad_sectors' from another device. 611 */ 612 bad_sectors -= (dev_sector - first_bad); 613 if (!do_balance && sectors > bad_sectors) 614 sectors = bad_sectors; 615 if (best_good_sectors > sectors) 616 best_good_sectors = sectors; 617 } else { 618 sector_t good_sectors = 619 first_bad - dev_sector; 620 if (good_sectors > best_good_sectors) { 621 best_good_sectors = good_sectors; 622 best_slot = slot; 623 } 624 if (!do_balance) 625 /* Must read from here */ 626 break; 627 } 628 continue; 629 } else 630 best_good_sectors = sectors; 631 632 if (!do_balance) 633 break; 634 635 /* This optimisation is debatable, and completely destroys 636 * sequential read speed for 'far copies' arrays. So only 637 * keep it for 'near' arrays, and review those later. 638 */ 639 if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) 640 break; 641 642 /* for far > 1 always use the lowest address */ 643 if (conf->far_copies > 1) 644 new_distance = r10_bio->devs[slot].addr; 645 else 646 new_distance = abs(r10_bio->devs[slot].addr - 647 conf->mirrors[disk].head_position); 648 if (new_distance < best_dist) { 649 best_dist = new_distance; 650 best_slot = slot; 651 } 652 } 653 if (slot == conf->copies) 654 slot = best_slot; 655 656 if (slot >= 0) { 657 disk = r10_bio->devs[slot].devnum; 658 rdev = rcu_dereference(conf->mirrors[disk].rdev); 659 if (!rdev) 660 goto retry; 661 atomic_inc(&rdev->nr_pending); 662 if (test_bit(Faulty, &rdev->flags)) { 663 /* Cannot risk returning a device that failed 664 * before we inc'ed nr_pending 665 */ 666 rdev_dec_pending(rdev, conf->mddev); 667 goto retry; 668 } 669 r10_bio->read_slot = slot; 670 } else 671 disk = -1; 672 rcu_read_unlock(); 673 *max_sectors = best_good_sectors; 674 675 return disk; 676 } 677 678 static int raid10_congested(void *data, int bits) 679 { 680 mddev_t *mddev = data; 681 conf_t *conf = mddev->private; 682 int i, ret = 0; 683 684 if (mddev_congested(mddev, bits)) 685 return 1; 686 rcu_read_lock(); 687 for (i = 0; i < conf->raid_disks && ret == 0; i++) { 688 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 689 if (rdev && !test_bit(Faulty, &rdev->flags)) { 690 struct request_queue *q = bdev_get_queue(rdev->bdev); 691 692 ret |= bdi_congested(&q->backing_dev_info, bits); 693 } 694 } 695 rcu_read_unlock(); 696 return ret; 697 } 698 699 static void flush_pending_writes(conf_t *conf) 700 { 701 /* Any writes that have been queued but are awaiting 702 * bitmap updates get flushed here. 703 */ 704 spin_lock_irq(&conf->device_lock); 705 706 if (conf->pending_bio_list.head) { 707 struct bio *bio; 708 bio = bio_list_get(&conf->pending_bio_list); 709 spin_unlock_irq(&conf->device_lock); 710 /* flush any pending bitmap writes to disk 711 * before proceeding w/ I/O */ 712 bitmap_unplug(conf->mddev->bitmap); 713 714 while (bio) { /* submit pending writes */ 715 struct bio *next = bio->bi_next; 716 bio->bi_next = NULL; 717 generic_make_request(bio); 718 bio = next; 719 } 720 } else 721 spin_unlock_irq(&conf->device_lock); 722 } 723 724 /* Barriers.... 725 * Sometimes we need to suspend IO while we do something else, 726 * either some resync/recovery, or reconfigure the array. 727 * To do this we raise a 'barrier'. 728 * The 'barrier' is a counter that can be raised multiple times 729 * to count how many activities are happening which preclude 730 * normal IO. 731 * We can only raise the barrier if there is no pending IO. 732 * i.e. if nr_pending == 0. 733 * We choose only to raise the barrier if no-one is waiting for the 734 * barrier to go down. This means that as soon as an IO request 735 * is ready, no other operations which require a barrier will start 736 * until the IO request has had a chance. 737 * 738 * So: regular IO calls 'wait_barrier'. When that returns there 739 * is no backgroup IO happening, It must arrange to call 740 * allow_barrier when it has finished its IO. 741 * backgroup IO calls must call raise_barrier. Once that returns 742 * there is no normal IO happeing. It must arrange to call 743 * lower_barrier when the particular background IO completes. 744 */ 745 746 static void raise_barrier(conf_t *conf, int force) 747 { 748 BUG_ON(force && !conf->barrier); 749 spin_lock_irq(&conf->resync_lock); 750 751 /* Wait until no block IO is waiting (unless 'force') */ 752 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 753 conf->resync_lock, ); 754 755 /* block any new IO from starting */ 756 conf->barrier++; 757 758 /* Now wait for all pending IO to complete */ 759 wait_event_lock_irq(conf->wait_barrier, 760 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 761 conf->resync_lock, ); 762 763 spin_unlock_irq(&conf->resync_lock); 764 } 765 766 static void lower_barrier(conf_t *conf) 767 { 768 unsigned long flags; 769 spin_lock_irqsave(&conf->resync_lock, flags); 770 conf->barrier--; 771 spin_unlock_irqrestore(&conf->resync_lock, flags); 772 wake_up(&conf->wait_barrier); 773 } 774 775 static void wait_barrier(conf_t *conf) 776 { 777 spin_lock_irq(&conf->resync_lock); 778 if (conf->barrier) { 779 conf->nr_waiting++; 780 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 781 conf->resync_lock, 782 ); 783 conf->nr_waiting--; 784 } 785 conf->nr_pending++; 786 spin_unlock_irq(&conf->resync_lock); 787 } 788 789 static void allow_barrier(conf_t *conf) 790 { 791 unsigned long flags; 792 spin_lock_irqsave(&conf->resync_lock, flags); 793 conf->nr_pending--; 794 spin_unlock_irqrestore(&conf->resync_lock, flags); 795 wake_up(&conf->wait_barrier); 796 } 797 798 static void freeze_array(conf_t *conf) 799 { 800 /* stop syncio and normal IO and wait for everything to 801 * go quiet. 802 * We increment barrier and nr_waiting, and then 803 * wait until nr_pending match nr_queued+1 804 * This is called in the context of one normal IO request 805 * that has failed. Thus any sync request that might be pending 806 * will be blocked by nr_pending, and we need to wait for 807 * pending IO requests to complete or be queued for re-try. 808 * Thus the number queued (nr_queued) plus this request (1) 809 * must match the number of pending IOs (nr_pending) before 810 * we continue. 811 */ 812 spin_lock_irq(&conf->resync_lock); 813 conf->barrier++; 814 conf->nr_waiting++; 815 wait_event_lock_irq(conf->wait_barrier, 816 conf->nr_pending == conf->nr_queued+1, 817 conf->resync_lock, 818 flush_pending_writes(conf)); 819 820 spin_unlock_irq(&conf->resync_lock); 821 } 822 823 static void unfreeze_array(conf_t *conf) 824 { 825 /* reverse the effect of the freeze */ 826 spin_lock_irq(&conf->resync_lock); 827 conf->barrier--; 828 conf->nr_waiting--; 829 wake_up(&conf->wait_barrier); 830 spin_unlock_irq(&conf->resync_lock); 831 } 832 833 static int make_request(mddev_t *mddev, struct bio * bio) 834 { 835 conf_t *conf = mddev->private; 836 mirror_info_t *mirror; 837 r10bio_t *r10_bio; 838 struct bio *read_bio; 839 int i; 840 int chunk_sects = conf->chunk_mask + 1; 841 const int rw = bio_data_dir(bio); 842 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 843 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 844 unsigned long flags; 845 mdk_rdev_t *blocked_rdev; 846 int plugged; 847 int sectors_handled; 848 int max_sectors; 849 850 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 851 md_flush_request(mddev, bio); 852 return 0; 853 } 854 855 /* If this request crosses a chunk boundary, we need to 856 * split it. This will only happen for 1 PAGE (or less) requests. 857 */ 858 if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9) 859 > chunk_sects && 860 conf->near_copies < conf->raid_disks)) { 861 struct bio_pair *bp; 862 /* Sanity check -- queue functions should prevent this happening */ 863 if (bio->bi_vcnt != 1 || 864 bio->bi_idx != 0) 865 goto bad_map; 866 /* This is a one page bio that upper layers 867 * refuse to split for us, so we need to split it. 868 */ 869 bp = bio_split(bio, 870 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 871 872 /* Each of these 'make_request' calls will call 'wait_barrier'. 873 * If the first succeeds but the second blocks due to the resync 874 * thread raising the barrier, we will deadlock because the 875 * IO to the underlying device will be queued in generic_make_request 876 * and will never complete, so will never reduce nr_pending. 877 * So increment nr_waiting here so no new raise_barriers will 878 * succeed, and so the second wait_barrier cannot block. 879 */ 880 spin_lock_irq(&conf->resync_lock); 881 conf->nr_waiting++; 882 spin_unlock_irq(&conf->resync_lock); 883 884 if (make_request(mddev, &bp->bio1)) 885 generic_make_request(&bp->bio1); 886 if (make_request(mddev, &bp->bio2)) 887 generic_make_request(&bp->bio2); 888 889 spin_lock_irq(&conf->resync_lock); 890 conf->nr_waiting--; 891 wake_up(&conf->wait_barrier); 892 spin_unlock_irq(&conf->resync_lock); 893 894 bio_pair_release(bp); 895 return 0; 896 bad_map: 897 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 898 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 899 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 900 901 bio_io_error(bio); 902 return 0; 903 } 904 905 md_write_start(mddev, bio); 906 907 /* 908 * Register the new request and wait if the reconstruction 909 * thread has put up a bar for new requests. 910 * Continue immediately if no resync is active currently. 911 */ 912 wait_barrier(conf); 913 914 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 915 916 r10_bio->master_bio = bio; 917 r10_bio->sectors = bio->bi_size >> 9; 918 919 r10_bio->mddev = mddev; 920 r10_bio->sector = bio->bi_sector; 921 r10_bio->state = 0; 922 923 /* We might need to issue multiple reads to different 924 * devices if there are bad blocks around, so we keep 925 * track of the number of reads in bio->bi_phys_segments. 926 * If this is 0, there is only one r10_bio and no locking 927 * will be needed when the request completes. If it is 928 * non-zero, then it is the number of not-completed requests. 929 */ 930 bio->bi_phys_segments = 0; 931 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 932 933 if (rw == READ) { 934 /* 935 * read balancing logic: 936 */ 937 int disk; 938 int slot; 939 940 read_again: 941 disk = read_balance(conf, r10_bio, &max_sectors); 942 slot = r10_bio->read_slot; 943 if (disk < 0) { 944 raid_end_bio_io(r10_bio); 945 return 0; 946 } 947 mirror = conf->mirrors + disk; 948 949 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 950 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector, 951 max_sectors); 952 953 r10_bio->devs[slot].bio = read_bio; 954 955 read_bio->bi_sector = r10_bio->devs[slot].addr + 956 mirror->rdev->data_offset; 957 read_bio->bi_bdev = mirror->rdev->bdev; 958 read_bio->bi_end_io = raid10_end_read_request; 959 read_bio->bi_rw = READ | do_sync; 960 read_bio->bi_private = r10_bio; 961 962 if (max_sectors < r10_bio->sectors) { 963 /* Could not read all from this device, so we will 964 * need another r10_bio. 965 */ 966 sectors_handled = (r10_bio->sectors + max_sectors 967 - bio->bi_sector); 968 r10_bio->sectors = max_sectors; 969 spin_lock_irq(&conf->device_lock); 970 if (bio->bi_phys_segments == 0) 971 bio->bi_phys_segments = 2; 972 else 973 bio->bi_phys_segments++; 974 spin_unlock(&conf->device_lock); 975 /* Cannot call generic_make_request directly 976 * as that will be queued in __generic_make_request 977 * and subsequent mempool_alloc might block 978 * waiting for it. so hand bio over to raid10d. 979 */ 980 reschedule_retry(r10_bio); 981 982 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 983 984 r10_bio->master_bio = bio; 985 r10_bio->sectors = ((bio->bi_size >> 9) 986 - sectors_handled); 987 r10_bio->state = 0; 988 r10_bio->mddev = mddev; 989 r10_bio->sector = bio->bi_sector + sectors_handled; 990 goto read_again; 991 } else 992 generic_make_request(read_bio); 993 return 0; 994 } 995 996 /* 997 * WRITE: 998 */ 999 /* first select target devices under rcu_lock and 1000 * inc refcount on their rdev. Record them by setting 1001 * bios[x] to bio 1002 * If there are known/acknowledged bad blocks on any device 1003 * on which we have seen a write error, we want to avoid 1004 * writing to those blocks. This potentially requires several 1005 * writes to write around the bad blocks. Each set of writes 1006 * gets its own r10_bio with a set of bios attached. The number 1007 * of r10_bios is recored in bio->bi_phys_segments just as with 1008 * the read case. 1009 */ 1010 plugged = mddev_check_plugged(mddev); 1011 1012 raid10_find_phys(conf, r10_bio); 1013 retry_write: 1014 blocked_rdev = NULL; 1015 rcu_read_lock(); 1016 max_sectors = r10_bio->sectors; 1017 1018 for (i = 0; i < conf->copies; i++) { 1019 int d = r10_bio->devs[i].devnum; 1020 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); 1021 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1022 atomic_inc(&rdev->nr_pending); 1023 blocked_rdev = rdev; 1024 break; 1025 } 1026 r10_bio->devs[i].bio = NULL; 1027 if (!rdev || test_bit(Faulty, &rdev->flags)) { 1028 set_bit(R10BIO_Degraded, &r10_bio->state); 1029 continue; 1030 } 1031 if (test_bit(WriteErrorSeen, &rdev->flags)) { 1032 sector_t first_bad; 1033 sector_t dev_sector = r10_bio->devs[i].addr; 1034 int bad_sectors; 1035 int is_bad; 1036 1037 is_bad = is_badblock(rdev, dev_sector, 1038 max_sectors, 1039 &first_bad, &bad_sectors); 1040 if (is_bad < 0) { 1041 /* Mustn't write here until the bad block 1042 * is acknowledged 1043 */ 1044 atomic_inc(&rdev->nr_pending); 1045 set_bit(BlockedBadBlocks, &rdev->flags); 1046 blocked_rdev = rdev; 1047 break; 1048 } 1049 if (is_bad && first_bad <= dev_sector) { 1050 /* Cannot write here at all */ 1051 bad_sectors -= (dev_sector - first_bad); 1052 if (bad_sectors < max_sectors) 1053 /* Mustn't write more than bad_sectors 1054 * to other devices yet 1055 */ 1056 max_sectors = bad_sectors; 1057 /* We don't set R10BIO_Degraded as that 1058 * only applies if the disk is missing, 1059 * so it might be re-added, and we want to 1060 * know to recover this chunk. 1061 * In this case the device is here, and the 1062 * fact that this chunk is not in-sync is 1063 * recorded in the bad block log. 1064 */ 1065 continue; 1066 } 1067 if (is_bad) { 1068 int good_sectors = first_bad - dev_sector; 1069 if (good_sectors < max_sectors) 1070 max_sectors = good_sectors; 1071 } 1072 } 1073 r10_bio->devs[i].bio = bio; 1074 atomic_inc(&rdev->nr_pending); 1075 } 1076 rcu_read_unlock(); 1077 1078 if (unlikely(blocked_rdev)) { 1079 /* Have to wait for this device to get unblocked, then retry */ 1080 int j; 1081 int d; 1082 1083 for (j = 0; j < i; j++) 1084 if (r10_bio->devs[j].bio) { 1085 d = r10_bio->devs[j].devnum; 1086 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1087 } 1088 allow_barrier(conf); 1089 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1090 wait_barrier(conf); 1091 goto retry_write; 1092 } 1093 1094 if (max_sectors < r10_bio->sectors) { 1095 /* We are splitting this into multiple parts, so 1096 * we need to prepare for allocating another r10_bio. 1097 */ 1098 r10_bio->sectors = max_sectors; 1099 spin_lock_irq(&conf->device_lock); 1100 if (bio->bi_phys_segments == 0) 1101 bio->bi_phys_segments = 2; 1102 else 1103 bio->bi_phys_segments++; 1104 spin_unlock_irq(&conf->device_lock); 1105 } 1106 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; 1107 1108 atomic_set(&r10_bio->remaining, 1); 1109 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1110 1111 for (i = 0; i < conf->copies; i++) { 1112 struct bio *mbio; 1113 int d = r10_bio->devs[i].devnum; 1114 if (!r10_bio->devs[i].bio) 1115 continue; 1116 1117 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1118 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, 1119 max_sectors); 1120 r10_bio->devs[i].bio = mbio; 1121 1122 mbio->bi_sector = (r10_bio->devs[i].addr+ 1123 conf->mirrors[d].rdev->data_offset); 1124 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 1125 mbio->bi_end_io = raid10_end_write_request; 1126 mbio->bi_rw = WRITE | do_sync | do_fua; 1127 mbio->bi_private = r10_bio; 1128 1129 atomic_inc(&r10_bio->remaining); 1130 spin_lock_irqsave(&conf->device_lock, flags); 1131 bio_list_add(&conf->pending_bio_list, mbio); 1132 spin_unlock_irqrestore(&conf->device_lock, flags); 1133 } 1134 1135 /* Don't remove the bias on 'remaining' (one_write_done) until 1136 * after checking if we need to go around again. 1137 */ 1138 1139 if (sectors_handled < (bio->bi_size >> 9)) { 1140 one_write_done(r10_bio); 1141 /* We need another r10_bio. It has already been counted 1142 * in bio->bi_phys_segments. 1143 */ 1144 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1145 1146 r10_bio->master_bio = bio; 1147 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1148 1149 r10_bio->mddev = mddev; 1150 r10_bio->sector = bio->bi_sector + sectors_handled; 1151 r10_bio->state = 0; 1152 goto retry_write; 1153 } 1154 one_write_done(r10_bio); 1155 1156 /* In case raid10d snuck in to freeze_array */ 1157 wake_up(&conf->wait_barrier); 1158 1159 if (do_sync || !mddev->bitmap || !plugged) 1160 md_wakeup_thread(mddev->thread); 1161 return 0; 1162 } 1163 1164 static void status(struct seq_file *seq, mddev_t *mddev) 1165 { 1166 conf_t *conf = mddev->private; 1167 int i; 1168 1169 if (conf->near_copies < conf->raid_disks) 1170 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); 1171 if (conf->near_copies > 1) 1172 seq_printf(seq, " %d near-copies", conf->near_copies); 1173 if (conf->far_copies > 1) { 1174 if (conf->far_offset) 1175 seq_printf(seq, " %d offset-copies", conf->far_copies); 1176 else 1177 seq_printf(seq, " %d far-copies", conf->far_copies); 1178 } 1179 seq_printf(seq, " [%d/%d] [", conf->raid_disks, 1180 conf->raid_disks - mddev->degraded); 1181 for (i = 0; i < conf->raid_disks; i++) 1182 seq_printf(seq, "%s", 1183 conf->mirrors[i].rdev && 1184 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); 1185 seq_printf(seq, "]"); 1186 } 1187 1188 /* check if there are enough drives for 1189 * every block to appear on atleast one. 1190 * Don't consider the device numbered 'ignore' 1191 * as we might be about to remove it. 1192 */ 1193 static int enough(conf_t *conf, int ignore) 1194 { 1195 int first = 0; 1196 1197 do { 1198 int n = conf->copies; 1199 int cnt = 0; 1200 while (n--) { 1201 if (conf->mirrors[first].rdev && 1202 first != ignore) 1203 cnt++; 1204 first = (first+1) % conf->raid_disks; 1205 } 1206 if (cnt == 0) 1207 return 0; 1208 } while (first != 0); 1209 return 1; 1210 } 1211 1212 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1213 { 1214 char b[BDEVNAME_SIZE]; 1215 conf_t *conf = mddev->private; 1216 1217 /* 1218 * If it is not operational, then we have already marked it as dead 1219 * else if it is the last working disks, ignore the error, let the 1220 * next level up know. 1221 * else mark the drive as failed 1222 */ 1223 if (test_bit(In_sync, &rdev->flags) 1224 && !enough(conf, rdev->raid_disk)) 1225 /* 1226 * Don't fail the drive, just return an IO error. 1227 */ 1228 return; 1229 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1230 unsigned long flags; 1231 spin_lock_irqsave(&conf->device_lock, flags); 1232 mddev->degraded++; 1233 spin_unlock_irqrestore(&conf->device_lock, flags); 1234 /* 1235 * if recovery is running, make sure it aborts. 1236 */ 1237 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1238 } 1239 set_bit(Blocked, &rdev->flags); 1240 set_bit(Faulty, &rdev->flags); 1241 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1242 printk(KERN_ALERT 1243 "md/raid10:%s: Disk failure on %s, disabling device.\n" 1244 "md/raid10:%s: Operation continuing on %d devices.\n", 1245 mdname(mddev), bdevname(rdev->bdev, b), 1246 mdname(mddev), conf->raid_disks - mddev->degraded); 1247 } 1248 1249 static void print_conf(conf_t *conf) 1250 { 1251 int i; 1252 mirror_info_t *tmp; 1253 1254 printk(KERN_DEBUG "RAID10 conf printout:\n"); 1255 if (!conf) { 1256 printk(KERN_DEBUG "(!conf)\n"); 1257 return; 1258 } 1259 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 1260 conf->raid_disks); 1261 1262 for (i = 0; i < conf->raid_disks; i++) { 1263 char b[BDEVNAME_SIZE]; 1264 tmp = conf->mirrors + i; 1265 if (tmp->rdev) 1266 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", 1267 i, !test_bit(In_sync, &tmp->rdev->flags), 1268 !test_bit(Faulty, &tmp->rdev->flags), 1269 bdevname(tmp->rdev->bdev,b)); 1270 } 1271 } 1272 1273 static void close_sync(conf_t *conf) 1274 { 1275 wait_barrier(conf); 1276 allow_barrier(conf); 1277 1278 mempool_destroy(conf->r10buf_pool); 1279 conf->r10buf_pool = NULL; 1280 } 1281 1282 static int raid10_spare_active(mddev_t *mddev) 1283 { 1284 int i; 1285 conf_t *conf = mddev->private; 1286 mirror_info_t *tmp; 1287 int count = 0; 1288 unsigned long flags; 1289 1290 /* 1291 * Find all non-in_sync disks within the RAID10 configuration 1292 * and mark them in_sync 1293 */ 1294 for (i = 0; i < conf->raid_disks; i++) { 1295 tmp = conf->mirrors + i; 1296 if (tmp->rdev 1297 && !test_bit(Faulty, &tmp->rdev->flags) 1298 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1299 count++; 1300 sysfs_notify_dirent(tmp->rdev->sysfs_state); 1301 } 1302 } 1303 spin_lock_irqsave(&conf->device_lock, flags); 1304 mddev->degraded -= count; 1305 spin_unlock_irqrestore(&conf->device_lock, flags); 1306 1307 print_conf(conf); 1308 return count; 1309 } 1310 1311 1312 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 1313 { 1314 conf_t *conf = mddev->private; 1315 int err = -EEXIST; 1316 int mirror; 1317 int first = 0; 1318 int last = conf->raid_disks - 1; 1319 1320 if (mddev->recovery_cp < MaxSector) 1321 /* only hot-add to in-sync arrays, as recovery is 1322 * very different from resync 1323 */ 1324 return -EBUSY; 1325 if (!enough(conf, -1)) 1326 return -EINVAL; 1327 1328 if (rdev->raid_disk >= 0) 1329 first = last = rdev->raid_disk; 1330 1331 if (rdev->saved_raid_disk >= first && 1332 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1333 mirror = rdev->saved_raid_disk; 1334 else 1335 mirror = first; 1336 for ( ; mirror <= last ; mirror++) { 1337 mirror_info_t *p = &conf->mirrors[mirror]; 1338 if (p->recovery_disabled == mddev->recovery_disabled) 1339 continue; 1340 if (!p->rdev) 1341 continue; 1342 1343 disk_stack_limits(mddev->gendisk, rdev->bdev, 1344 rdev->data_offset << 9); 1345 /* as we don't honour merge_bvec_fn, we must 1346 * never risk violating it, so limit 1347 * ->max_segments to one lying with a single 1348 * page, as a one page request is never in 1349 * violation. 1350 */ 1351 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { 1352 blk_queue_max_segments(mddev->queue, 1); 1353 blk_queue_segment_boundary(mddev->queue, 1354 PAGE_CACHE_SIZE - 1); 1355 } 1356 1357 p->head_position = 0; 1358 rdev->raid_disk = mirror; 1359 err = 0; 1360 if (rdev->saved_raid_disk != mirror) 1361 conf->fullsync = 1; 1362 rcu_assign_pointer(p->rdev, rdev); 1363 break; 1364 } 1365 1366 md_integrity_add_rdev(rdev, mddev); 1367 print_conf(conf); 1368 return err; 1369 } 1370 1371 static int raid10_remove_disk(mddev_t *mddev, int number) 1372 { 1373 conf_t *conf = mddev->private; 1374 int err = 0; 1375 mdk_rdev_t *rdev; 1376 mirror_info_t *p = conf->mirrors+ number; 1377 1378 print_conf(conf); 1379 rdev = p->rdev; 1380 if (rdev) { 1381 if (test_bit(In_sync, &rdev->flags) || 1382 atomic_read(&rdev->nr_pending)) { 1383 err = -EBUSY; 1384 goto abort; 1385 } 1386 /* Only remove faulty devices in recovery 1387 * is not possible. 1388 */ 1389 if (!test_bit(Faulty, &rdev->flags) && 1390 mddev->recovery_disabled != p->recovery_disabled && 1391 enough(conf, -1)) { 1392 err = -EBUSY; 1393 goto abort; 1394 } 1395 p->rdev = NULL; 1396 synchronize_rcu(); 1397 if (atomic_read(&rdev->nr_pending)) { 1398 /* lost the race, try later */ 1399 err = -EBUSY; 1400 p->rdev = rdev; 1401 goto abort; 1402 } 1403 err = md_integrity_register(mddev); 1404 } 1405 abort: 1406 1407 print_conf(conf); 1408 return err; 1409 } 1410 1411 1412 static void end_sync_read(struct bio *bio, int error) 1413 { 1414 r10bio_t *r10_bio = bio->bi_private; 1415 conf_t *conf = r10_bio->mddev->private; 1416 int d; 1417 1418 d = find_bio_disk(conf, r10_bio, bio, NULL); 1419 1420 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1421 set_bit(R10BIO_Uptodate, &r10_bio->state); 1422 else 1423 /* The write handler will notice the lack of 1424 * R10BIO_Uptodate and record any errors etc 1425 */ 1426 atomic_add(r10_bio->sectors, 1427 &conf->mirrors[d].rdev->corrected_errors); 1428 1429 /* for reconstruct, we always reschedule after a read. 1430 * for resync, only after all reads 1431 */ 1432 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1433 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1434 atomic_dec_and_test(&r10_bio->remaining)) { 1435 /* we have read all the blocks, 1436 * do the comparison in process context in raid10d 1437 */ 1438 reschedule_retry(r10_bio); 1439 } 1440 } 1441 1442 static void end_sync_request(r10bio_t *r10_bio) 1443 { 1444 mddev_t *mddev = r10_bio->mddev; 1445 1446 while (atomic_dec_and_test(&r10_bio->remaining)) { 1447 if (r10_bio->master_bio == NULL) { 1448 /* the primary of several recovery bios */ 1449 sector_t s = r10_bio->sectors; 1450 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1451 test_bit(R10BIO_WriteError, &r10_bio->state)) 1452 reschedule_retry(r10_bio); 1453 else 1454 put_buf(r10_bio); 1455 md_done_sync(mddev, s, 1); 1456 break; 1457 } else { 1458 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; 1459 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1460 test_bit(R10BIO_WriteError, &r10_bio->state)) 1461 reschedule_retry(r10_bio); 1462 else 1463 put_buf(r10_bio); 1464 r10_bio = r10_bio2; 1465 } 1466 } 1467 } 1468 1469 static void end_sync_write(struct bio *bio, int error) 1470 { 1471 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1472 r10bio_t *r10_bio = bio->bi_private; 1473 mddev_t *mddev = r10_bio->mddev; 1474 conf_t *conf = mddev->private; 1475 int d; 1476 sector_t first_bad; 1477 int bad_sectors; 1478 int slot; 1479 1480 d = find_bio_disk(conf, r10_bio, bio, &slot); 1481 1482 if (!uptodate) { 1483 set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags); 1484 set_bit(R10BIO_WriteError, &r10_bio->state); 1485 } else if (is_badblock(conf->mirrors[d].rdev, 1486 r10_bio->devs[slot].addr, 1487 r10_bio->sectors, 1488 &first_bad, &bad_sectors)) 1489 set_bit(R10BIO_MadeGood, &r10_bio->state); 1490 1491 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1492 1493 end_sync_request(r10_bio); 1494 } 1495 1496 /* 1497 * Note: sync and recover and handled very differently for raid10 1498 * This code is for resync. 1499 * For resync, we read through virtual addresses and read all blocks. 1500 * If there is any error, we schedule a write. The lowest numbered 1501 * drive is authoritative. 1502 * However requests come for physical address, so we need to map. 1503 * For every physical address there are raid_disks/copies virtual addresses, 1504 * which is always are least one, but is not necessarly an integer. 1505 * This means that a physical address can span multiple chunks, so we may 1506 * have to submit multiple io requests for a single sync request. 1507 */ 1508 /* 1509 * We check if all blocks are in-sync and only write to blocks that 1510 * aren't in sync 1511 */ 1512 static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1513 { 1514 conf_t *conf = mddev->private; 1515 int i, first; 1516 struct bio *tbio, *fbio; 1517 1518 atomic_set(&r10_bio->remaining, 1); 1519 1520 /* find the first device with a block */ 1521 for (i=0; i<conf->copies; i++) 1522 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) 1523 break; 1524 1525 if (i == conf->copies) 1526 goto done; 1527 1528 first = i; 1529 fbio = r10_bio->devs[i].bio; 1530 1531 /* now find blocks with errors */ 1532 for (i=0 ; i < conf->copies ; i++) { 1533 int j, d; 1534 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); 1535 1536 tbio = r10_bio->devs[i].bio; 1537 1538 if (tbio->bi_end_io != end_sync_read) 1539 continue; 1540 if (i == first) 1541 continue; 1542 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { 1543 /* We know that the bi_io_vec layout is the same for 1544 * both 'first' and 'i', so we just compare them. 1545 * All vec entries are PAGE_SIZE; 1546 */ 1547 for (j = 0; j < vcnt; j++) 1548 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 1549 page_address(tbio->bi_io_vec[j].bv_page), 1550 PAGE_SIZE)) 1551 break; 1552 if (j == vcnt) 1553 continue; 1554 mddev->resync_mismatches += r10_bio->sectors; 1555 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 1556 /* Don't fix anything. */ 1557 continue; 1558 } 1559 /* Ok, we need to write this bio, either to correct an 1560 * inconsistency or to correct an unreadable block. 1561 * First we need to fixup bv_offset, bv_len and 1562 * bi_vecs, as the read request might have corrupted these 1563 */ 1564 tbio->bi_vcnt = vcnt; 1565 tbio->bi_size = r10_bio->sectors << 9; 1566 tbio->bi_idx = 0; 1567 tbio->bi_phys_segments = 0; 1568 tbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1569 tbio->bi_flags |= 1 << BIO_UPTODATE; 1570 tbio->bi_next = NULL; 1571 tbio->bi_rw = WRITE; 1572 tbio->bi_private = r10_bio; 1573 tbio->bi_sector = r10_bio->devs[i].addr; 1574 1575 for (j=0; j < vcnt ; j++) { 1576 tbio->bi_io_vec[j].bv_offset = 0; 1577 tbio->bi_io_vec[j].bv_len = PAGE_SIZE; 1578 1579 memcpy(page_address(tbio->bi_io_vec[j].bv_page), 1580 page_address(fbio->bi_io_vec[j].bv_page), 1581 PAGE_SIZE); 1582 } 1583 tbio->bi_end_io = end_sync_write; 1584 1585 d = r10_bio->devs[i].devnum; 1586 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 1587 atomic_inc(&r10_bio->remaining); 1588 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9); 1589 1590 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 1591 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 1592 generic_make_request(tbio); 1593 } 1594 1595 done: 1596 if (atomic_dec_and_test(&r10_bio->remaining)) { 1597 md_done_sync(mddev, r10_bio->sectors, 1); 1598 put_buf(r10_bio); 1599 } 1600 } 1601 1602 /* 1603 * Now for the recovery code. 1604 * Recovery happens across physical sectors. 1605 * We recover all non-is_sync drives by finding the virtual address of 1606 * each, and then choose a working drive that also has that virt address. 1607 * There is a separate r10_bio for each non-in_sync drive. 1608 * Only the first two slots are in use. The first for reading, 1609 * The second for writing. 1610 * 1611 */ 1612 static void fix_recovery_read_error(r10bio_t *r10_bio) 1613 { 1614 /* We got a read error during recovery. 1615 * We repeat the read in smaller page-sized sections. 1616 * If a read succeeds, write it to the new device or record 1617 * a bad block if we cannot. 1618 * If a read fails, record a bad block on both old and 1619 * new devices. 1620 */ 1621 mddev_t *mddev = r10_bio->mddev; 1622 conf_t *conf = mddev->private; 1623 struct bio *bio = r10_bio->devs[0].bio; 1624 sector_t sect = 0; 1625 int sectors = r10_bio->sectors; 1626 int idx = 0; 1627 int dr = r10_bio->devs[0].devnum; 1628 int dw = r10_bio->devs[1].devnum; 1629 1630 while (sectors) { 1631 int s = sectors; 1632 mdk_rdev_t *rdev; 1633 sector_t addr; 1634 int ok; 1635 1636 if (s > (PAGE_SIZE>>9)) 1637 s = PAGE_SIZE >> 9; 1638 1639 rdev = conf->mirrors[dr].rdev; 1640 addr = r10_bio->devs[0].addr + sect, 1641 ok = sync_page_io(rdev, 1642 addr, 1643 s << 9, 1644 bio->bi_io_vec[idx].bv_page, 1645 READ, false); 1646 if (ok) { 1647 rdev = conf->mirrors[dw].rdev; 1648 addr = r10_bio->devs[1].addr + sect; 1649 ok = sync_page_io(rdev, 1650 addr, 1651 s << 9, 1652 bio->bi_io_vec[idx].bv_page, 1653 WRITE, false); 1654 if (!ok) 1655 set_bit(WriteErrorSeen, &rdev->flags); 1656 } 1657 if (!ok) { 1658 /* We don't worry if we cannot set a bad block - 1659 * it really is bad so there is no loss in not 1660 * recording it yet 1661 */ 1662 rdev_set_badblocks(rdev, addr, s, 0); 1663 1664 if (rdev != conf->mirrors[dw].rdev) { 1665 /* need bad block on destination too */ 1666 mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev; 1667 addr = r10_bio->devs[1].addr + sect; 1668 ok = rdev_set_badblocks(rdev2, addr, s, 0); 1669 if (!ok) { 1670 /* just abort the recovery */ 1671 printk(KERN_NOTICE 1672 "md/raid10:%s: recovery aborted" 1673 " due to read error\n", 1674 mdname(mddev)); 1675 1676 conf->mirrors[dw].recovery_disabled 1677 = mddev->recovery_disabled; 1678 set_bit(MD_RECOVERY_INTR, 1679 &mddev->recovery); 1680 break; 1681 } 1682 } 1683 } 1684 1685 sectors -= s; 1686 sect += s; 1687 idx++; 1688 } 1689 } 1690 1691 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1692 { 1693 conf_t *conf = mddev->private; 1694 int d; 1695 struct bio *wbio; 1696 1697 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { 1698 fix_recovery_read_error(r10_bio); 1699 end_sync_request(r10_bio); 1700 return; 1701 } 1702 1703 /* 1704 * share the pages with the first bio 1705 * and submit the write request 1706 */ 1707 wbio = r10_bio->devs[1].bio; 1708 d = r10_bio->devs[1].devnum; 1709 1710 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 1711 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); 1712 generic_make_request(wbio); 1713 } 1714 1715 1716 /* 1717 * Used by fix_read_error() to decay the per rdev read_errors. 1718 * We halve the read error count for every hour that has elapsed 1719 * since the last recorded read error. 1720 * 1721 */ 1722 static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) 1723 { 1724 struct timespec cur_time_mon; 1725 unsigned long hours_since_last; 1726 unsigned int read_errors = atomic_read(&rdev->read_errors); 1727 1728 ktime_get_ts(&cur_time_mon); 1729 1730 if (rdev->last_read_error.tv_sec == 0 && 1731 rdev->last_read_error.tv_nsec == 0) { 1732 /* first time we've seen a read error */ 1733 rdev->last_read_error = cur_time_mon; 1734 return; 1735 } 1736 1737 hours_since_last = (cur_time_mon.tv_sec - 1738 rdev->last_read_error.tv_sec) / 3600; 1739 1740 rdev->last_read_error = cur_time_mon; 1741 1742 /* 1743 * if hours_since_last is > the number of bits in read_errors 1744 * just set read errors to 0. We do this to avoid 1745 * overflowing the shift of read_errors by hours_since_last. 1746 */ 1747 if (hours_since_last >= 8 * sizeof(read_errors)) 1748 atomic_set(&rdev->read_errors, 0); 1749 else 1750 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); 1751 } 1752 1753 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector, 1754 int sectors, struct page *page, int rw) 1755 { 1756 sector_t first_bad; 1757 int bad_sectors; 1758 1759 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) 1760 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) 1761 return -1; 1762 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) 1763 /* success */ 1764 return 1; 1765 if (rw == WRITE) 1766 set_bit(WriteErrorSeen, &rdev->flags); 1767 /* need to record an error - either for the block or the device */ 1768 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 1769 md_error(rdev->mddev, rdev); 1770 return 0; 1771 } 1772 1773 /* 1774 * This is a kernel thread which: 1775 * 1776 * 1. Retries failed read operations on working mirrors. 1777 * 2. Updates the raid superblock when problems encounter. 1778 * 3. Performs writes following reads for array synchronising. 1779 */ 1780 1781 static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) 1782 { 1783 int sect = 0; /* Offset from r10_bio->sector */ 1784 int sectors = r10_bio->sectors; 1785 mdk_rdev_t*rdev; 1786 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); 1787 int d = r10_bio->devs[r10_bio->read_slot].devnum; 1788 1789 /* still own a reference to this rdev, so it cannot 1790 * have been cleared recently. 1791 */ 1792 rdev = conf->mirrors[d].rdev; 1793 1794 if (test_bit(Faulty, &rdev->flags)) 1795 /* drive has already been failed, just ignore any 1796 more fix_read_error() attempts */ 1797 return; 1798 1799 check_decay_read_errors(mddev, rdev); 1800 atomic_inc(&rdev->read_errors); 1801 if (atomic_read(&rdev->read_errors) > max_read_errors) { 1802 char b[BDEVNAME_SIZE]; 1803 bdevname(rdev->bdev, b); 1804 1805 printk(KERN_NOTICE 1806 "md/raid10:%s: %s: Raid device exceeded " 1807 "read_error threshold [cur %d:max %d]\n", 1808 mdname(mddev), b, 1809 atomic_read(&rdev->read_errors), max_read_errors); 1810 printk(KERN_NOTICE 1811 "md/raid10:%s: %s: Failing raid device\n", 1812 mdname(mddev), b); 1813 md_error(mddev, conf->mirrors[d].rdev); 1814 return; 1815 } 1816 1817 while(sectors) { 1818 int s = sectors; 1819 int sl = r10_bio->read_slot; 1820 int success = 0; 1821 int start; 1822 1823 if (s > (PAGE_SIZE>>9)) 1824 s = PAGE_SIZE >> 9; 1825 1826 rcu_read_lock(); 1827 do { 1828 sector_t first_bad; 1829 int bad_sectors; 1830 1831 d = r10_bio->devs[sl].devnum; 1832 rdev = rcu_dereference(conf->mirrors[d].rdev); 1833 if (rdev && 1834 test_bit(In_sync, &rdev->flags) && 1835 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, 1836 &first_bad, &bad_sectors) == 0) { 1837 atomic_inc(&rdev->nr_pending); 1838 rcu_read_unlock(); 1839 success = sync_page_io(rdev, 1840 r10_bio->devs[sl].addr + 1841 sect, 1842 s<<9, 1843 conf->tmppage, READ, false); 1844 rdev_dec_pending(rdev, mddev); 1845 rcu_read_lock(); 1846 if (success) 1847 break; 1848 } 1849 sl++; 1850 if (sl == conf->copies) 1851 sl = 0; 1852 } while (!success && sl != r10_bio->read_slot); 1853 rcu_read_unlock(); 1854 1855 if (!success) { 1856 /* Cannot read from anywhere, just mark the block 1857 * as bad on the first device to discourage future 1858 * reads. 1859 */ 1860 int dn = r10_bio->devs[r10_bio->read_slot].devnum; 1861 rdev = conf->mirrors[dn].rdev; 1862 1863 if (!rdev_set_badblocks( 1864 rdev, 1865 r10_bio->devs[r10_bio->read_slot].addr 1866 + sect, 1867 s, 0)) 1868 md_error(mddev, rdev); 1869 break; 1870 } 1871 1872 start = sl; 1873 /* write it back and re-read */ 1874 rcu_read_lock(); 1875 while (sl != r10_bio->read_slot) { 1876 char b[BDEVNAME_SIZE]; 1877 1878 if (sl==0) 1879 sl = conf->copies; 1880 sl--; 1881 d = r10_bio->devs[sl].devnum; 1882 rdev = rcu_dereference(conf->mirrors[d].rdev); 1883 if (!rdev || 1884 !test_bit(In_sync, &rdev->flags)) 1885 continue; 1886 1887 atomic_inc(&rdev->nr_pending); 1888 rcu_read_unlock(); 1889 if (r10_sync_page_io(rdev, 1890 r10_bio->devs[sl].addr + 1891 sect, 1892 s<<9, conf->tmppage, WRITE) 1893 == 0) { 1894 /* Well, this device is dead */ 1895 printk(KERN_NOTICE 1896 "md/raid10:%s: read correction " 1897 "write failed" 1898 " (%d sectors at %llu on %s)\n", 1899 mdname(mddev), s, 1900 (unsigned long long)( 1901 sect + rdev->data_offset), 1902 bdevname(rdev->bdev, b)); 1903 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 1904 "drive\n", 1905 mdname(mddev), 1906 bdevname(rdev->bdev, b)); 1907 } 1908 rdev_dec_pending(rdev, mddev); 1909 rcu_read_lock(); 1910 } 1911 sl = start; 1912 while (sl != r10_bio->read_slot) { 1913 char b[BDEVNAME_SIZE]; 1914 1915 if (sl==0) 1916 sl = conf->copies; 1917 sl--; 1918 d = r10_bio->devs[sl].devnum; 1919 rdev = rcu_dereference(conf->mirrors[d].rdev); 1920 if (!rdev || 1921 !test_bit(In_sync, &rdev->flags)) 1922 continue; 1923 1924 atomic_inc(&rdev->nr_pending); 1925 rcu_read_unlock(); 1926 switch (r10_sync_page_io(rdev, 1927 r10_bio->devs[sl].addr + 1928 sect, 1929 s<<9, conf->tmppage, 1930 READ)) { 1931 case 0: 1932 /* Well, this device is dead */ 1933 printk(KERN_NOTICE 1934 "md/raid10:%s: unable to read back " 1935 "corrected sectors" 1936 " (%d sectors at %llu on %s)\n", 1937 mdname(mddev), s, 1938 (unsigned long long)( 1939 sect + rdev->data_offset), 1940 bdevname(rdev->bdev, b)); 1941 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 1942 "drive\n", 1943 mdname(mddev), 1944 bdevname(rdev->bdev, b)); 1945 break; 1946 case 1: 1947 printk(KERN_INFO 1948 "md/raid10:%s: read error corrected" 1949 " (%d sectors at %llu on %s)\n", 1950 mdname(mddev), s, 1951 (unsigned long long)( 1952 sect + rdev->data_offset), 1953 bdevname(rdev->bdev, b)); 1954 atomic_add(s, &rdev->corrected_errors); 1955 } 1956 1957 rdev_dec_pending(rdev, mddev); 1958 rcu_read_lock(); 1959 } 1960 rcu_read_unlock(); 1961 1962 sectors -= s; 1963 sect += s; 1964 } 1965 } 1966 1967 static void bi_complete(struct bio *bio, int error) 1968 { 1969 complete((struct completion *)bio->bi_private); 1970 } 1971 1972 static int submit_bio_wait(int rw, struct bio *bio) 1973 { 1974 struct completion event; 1975 rw |= REQ_SYNC; 1976 1977 init_completion(&event); 1978 bio->bi_private = &event; 1979 bio->bi_end_io = bi_complete; 1980 submit_bio(rw, bio); 1981 wait_for_completion(&event); 1982 1983 return test_bit(BIO_UPTODATE, &bio->bi_flags); 1984 } 1985 1986 static int narrow_write_error(r10bio_t *r10_bio, int i) 1987 { 1988 struct bio *bio = r10_bio->master_bio; 1989 mddev_t *mddev = r10_bio->mddev; 1990 conf_t *conf = mddev->private; 1991 mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; 1992 /* bio has the data to be written to slot 'i' where 1993 * we just recently had a write error. 1994 * We repeatedly clone the bio and trim down to one block, 1995 * then try the write. Where the write fails we record 1996 * a bad block. 1997 * It is conceivable that the bio doesn't exactly align with 1998 * blocks. We must handle this. 1999 * 2000 * We currently own a reference to the rdev. 2001 */ 2002 2003 int block_sectors; 2004 sector_t sector; 2005 int sectors; 2006 int sect_to_write = r10_bio->sectors; 2007 int ok = 1; 2008 2009 if (rdev->badblocks.shift < 0) 2010 return 0; 2011 2012 block_sectors = 1 << rdev->badblocks.shift; 2013 sector = r10_bio->sector; 2014 sectors = ((r10_bio->sector + block_sectors) 2015 & ~(sector_t)(block_sectors - 1)) 2016 - sector; 2017 2018 while (sect_to_write) { 2019 struct bio *wbio; 2020 if (sectors > sect_to_write) 2021 sectors = sect_to_write; 2022 /* Write at 'sector' for 'sectors' */ 2023 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2024 md_trim_bio(wbio, sector - bio->bi_sector, sectors); 2025 wbio->bi_sector = (r10_bio->devs[i].addr+ 2026 rdev->data_offset+ 2027 (sector - r10_bio->sector)); 2028 wbio->bi_bdev = rdev->bdev; 2029 if (submit_bio_wait(WRITE, wbio) == 0) 2030 /* Failure! */ 2031 ok = rdev_set_badblocks(rdev, sector, 2032 sectors, 0) 2033 && ok; 2034 2035 bio_put(wbio); 2036 sect_to_write -= sectors; 2037 sector += sectors; 2038 sectors = block_sectors; 2039 } 2040 return ok; 2041 } 2042 2043 static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) 2044 { 2045 int slot = r10_bio->read_slot; 2046 int mirror = r10_bio->devs[slot].devnum; 2047 struct bio *bio; 2048 conf_t *conf = mddev->private; 2049 mdk_rdev_t *rdev; 2050 char b[BDEVNAME_SIZE]; 2051 unsigned long do_sync; 2052 int max_sectors; 2053 2054 /* we got a read error. Maybe the drive is bad. Maybe just 2055 * the block and we can fix it. 2056 * We freeze all other IO, and try reading the block from 2057 * other devices. When we find one, we re-write 2058 * and check it that fixes the read error. 2059 * This is all done synchronously while the array is 2060 * frozen. 2061 */ 2062 if (mddev->ro == 0) { 2063 freeze_array(conf); 2064 fix_read_error(conf, mddev, r10_bio); 2065 unfreeze_array(conf); 2066 } 2067 rdev_dec_pending(conf->mirrors[mirror].rdev, mddev); 2068 2069 bio = r10_bio->devs[slot].bio; 2070 bdevname(bio->bi_bdev, b); 2071 r10_bio->devs[slot].bio = 2072 mddev->ro ? IO_BLOCKED : NULL; 2073 read_more: 2074 mirror = read_balance(conf, r10_bio, &max_sectors); 2075 if (mirror == -1) { 2076 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" 2077 " read error for block %llu\n", 2078 mdname(mddev), b, 2079 (unsigned long long)r10_bio->sector); 2080 raid_end_bio_io(r10_bio); 2081 bio_put(bio); 2082 return; 2083 } 2084 2085 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); 2086 if (bio) 2087 bio_put(bio); 2088 slot = r10_bio->read_slot; 2089 rdev = conf->mirrors[mirror].rdev; 2090 printk_ratelimited( 2091 KERN_ERR 2092 "md/raid10:%s: %s: redirecting" 2093 "sector %llu to another mirror\n", 2094 mdname(mddev), 2095 bdevname(rdev->bdev, b), 2096 (unsigned long long)r10_bio->sector); 2097 bio = bio_clone_mddev(r10_bio->master_bio, 2098 GFP_NOIO, mddev); 2099 md_trim_bio(bio, 2100 r10_bio->sector - bio->bi_sector, 2101 max_sectors); 2102 r10_bio->devs[slot].bio = bio; 2103 bio->bi_sector = r10_bio->devs[slot].addr 2104 + rdev->data_offset; 2105 bio->bi_bdev = rdev->bdev; 2106 bio->bi_rw = READ | do_sync; 2107 bio->bi_private = r10_bio; 2108 bio->bi_end_io = raid10_end_read_request; 2109 if (max_sectors < r10_bio->sectors) { 2110 /* Drat - have to split this up more */ 2111 struct bio *mbio = r10_bio->master_bio; 2112 int sectors_handled = 2113 r10_bio->sector + max_sectors 2114 - mbio->bi_sector; 2115 r10_bio->sectors = max_sectors; 2116 spin_lock_irq(&conf->device_lock); 2117 if (mbio->bi_phys_segments == 0) 2118 mbio->bi_phys_segments = 2; 2119 else 2120 mbio->bi_phys_segments++; 2121 spin_unlock_irq(&conf->device_lock); 2122 generic_make_request(bio); 2123 bio = NULL; 2124 2125 r10_bio = mempool_alloc(conf->r10bio_pool, 2126 GFP_NOIO); 2127 r10_bio->master_bio = mbio; 2128 r10_bio->sectors = (mbio->bi_size >> 9) 2129 - sectors_handled; 2130 r10_bio->state = 0; 2131 set_bit(R10BIO_ReadError, 2132 &r10_bio->state); 2133 r10_bio->mddev = mddev; 2134 r10_bio->sector = mbio->bi_sector 2135 + sectors_handled; 2136 2137 goto read_more; 2138 } else 2139 generic_make_request(bio); 2140 } 2141 2142 static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio) 2143 { 2144 /* Some sort of write request has finished and it 2145 * succeeded in writing where we thought there was a 2146 * bad block. So forget the bad block. 2147 * Or possibly if failed and we need to record 2148 * a bad block. 2149 */ 2150 int m; 2151 mdk_rdev_t *rdev; 2152 2153 if (test_bit(R10BIO_IsSync, &r10_bio->state) || 2154 test_bit(R10BIO_IsRecover, &r10_bio->state)) { 2155 for (m = 0; m < conf->copies; m++) { 2156 int dev = r10_bio->devs[m].devnum; 2157 rdev = conf->mirrors[dev].rdev; 2158 if (r10_bio->devs[m].bio == NULL) 2159 continue; 2160 if (test_bit(BIO_UPTODATE, 2161 &r10_bio->devs[m].bio->bi_flags)) { 2162 rdev_clear_badblocks( 2163 rdev, 2164 r10_bio->devs[m].addr, 2165 r10_bio->sectors); 2166 } else { 2167 if (!rdev_set_badblocks( 2168 rdev, 2169 r10_bio->devs[m].addr, 2170 r10_bio->sectors, 0)) 2171 md_error(conf->mddev, rdev); 2172 } 2173 } 2174 put_buf(r10_bio); 2175 } else { 2176 for (m = 0; m < conf->copies; m++) { 2177 int dev = r10_bio->devs[m].devnum; 2178 struct bio *bio = r10_bio->devs[m].bio; 2179 rdev = conf->mirrors[dev].rdev; 2180 if (bio == IO_MADE_GOOD) { 2181 rdev_clear_badblocks( 2182 rdev, 2183 r10_bio->devs[m].addr, 2184 r10_bio->sectors); 2185 rdev_dec_pending(rdev, conf->mddev); 2186 } else if (bio != NULL && 2187 !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2188 if (!narrow_write_error(r10_bio, m)) { 2189 md_error(conf->mddev, rdev); 2190 set_bit(R10BIO_Degraded, 2191 &r10_bio->state); 2192 } 2193 rdev_dec_pending(rdev, conf->mddev); 2194 } 2195 } 2196 if (test_bit(R10BIO_WriteError, 2197 &r10_bio->state)) 2198 close_write(r10_bio); 2199 raid_end_bio_io(r10_bio); 2200 } 2201 } 2202 2203 static void raid10d(mddev_t *mddev) 2204 { 2205 r10bio_t *r10_bio; 2206 unsigned long flags; 2207 conf_t *conf = mddev->private; 2208 struct list_head *head = &conf->retry_list; 2209 struct blk_plug plug; 2210 2211 md_check_recovery(mddev); 2212 2213 blk_start_plug(&plug); 2214 for (;;) { 2215 2216 flush_pending_writes(conf); 2217 2218 spin_lock_irqsave(&conf->device_lock, flags); 2219 if (list_empty(head)) { 2220 spin_unlock_irqrestore(&conf->device_lock, flags); 2221 break; 2222 } 2223 r10_bio = list_entry(head->prev, r10bio_t, retry_list); 2224 list_del(head->prev); 2225 conf->nr_queued--; 2226 spin_unlock_irqrestore(&conf->device_lock, flags); 2227 2228 mddev = r10_bio->mddev; 2229 conf = mddev->private; 2230 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 2231 test_bit(R10BIO_WriteError, &r10_bio->state)) 2232 handle_write_completed(conf, r10_bio); 2233 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) 2234 sync_request_write(mddev, r10_bio); 2235 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 2236 recovery_request_write(mddev, r10_bio); 2237 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) 2238 handle_read_error(mddev, r10_bio); 2239 else { 2240 /* just a partial read to be scheduled from a 2241 * separate context 2242 */ 2243 int slot = r10_bio->read_slot; 2244 generic_make_request(r10_bio->devs[slot].bio); 2245 } 2246 2247 cond_resched(); 2248 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 2249 md_check_recovery(mddev); 2250 } 2251 blk_finish_plug(&plug); 2252 } 2253 2254 2255 static int init_resync(conf_t *conf) 2256 { 2257 int buffs; 2258 2259 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2260 BUG_ON(conf->r10buf_pool); 2261 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); 2262 if (!conf->r10buf_pool) 2263 return -ENOMEM; 2264 conf->next_resync = 0; 2265 return 0; 2266 } 2267 2268 /* 2269 * perform a "sync" on one "block" 2270 * 2271 * We need to make sure that no normal I/O request - particularly write 2272 * requests - conflict with active sync requests. 2273 * 2274 * This is achieved by tracking pending requests and a 'barrier' concept 2275 * that can be installed to exclude normal IO requests. 2276 * 2277 * Resync and recovery are handled very differently. 2278 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery. 2279 * 2280 * For resync, we iterate over virtual addresses, read all copies, 2281 * and update if there are differences. If only one copy is live, 2282 * skip it. 2283 * For recovery, we iterate over physical addresses, read a good 2284 * value for each non-in_sync drive, and over-write. 2285 * 2286 * So, for recovery we may have several outstanding complex requests for a 2287 * given address, one for each out-of-sync device. We model this by allocating 2288 * a number of r10_bio structures, one for each out-of-sync device. 2289 * As we setup these structures, we collect all bio's together into a list 2290 * which we then process collectively to add pages, and then process again 2291 * to pass to generic_make_request. 2292 * 2293 * The r10_bio structures are linked using a borrowed master_bio pointer. 2294 * This link is counted in ->remaining. When the r10_bio that points to NULL 2295 * has its remaining count decremented to 0, the whole complex operation 2296 * is complete. 2297 * 2298 */ 2299 2300 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, 2301 int *skipped, int go_faster) 2302 { 2303 conf_t *conf = mddev->private; 2304 r10bio_t *r10_bio; 2305 struct bio *biolist = NULL, *bio; 2306 sector_t max_sector, nr_sectors; 2307 int i; 2308 int max_sync; 2309 sector_t sync_blocks; 2310 sector_t sectors_skipped = 0; 2311 int chunks_skipped = 0; 2312 2313 if (!conf->r10buf_pool) 2314 if (init_resync(conf)) 2315 return 0; 2316 2317 skipped: 2318 max_sector = mddev->dev_sectors; 2319 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2320 max_sector = mddev->resync_max_sectors; 2321 if (sector_nr >= max_sector) { 2322 /* If we aborted, we need to abort the 2323 * sync on the 'current' bitmap chucks (there can 2324 * be several when recovering multiple devices). 2325 * as we may have started syncing it but not finished. 2326 * We can find the current address in 2327 * mddev->curr_resync, but for recovery, 2328 * we need to convert that to several 2329 * virtual addresses. 2330 */ 2331 if (mddev->curr_resync < max_sector) { /* aborted */ 2332 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2333 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2334 &sync_blocks, 1); 2335 else for (i=0; i<conf->raid_disks; i++) { 2336 sector_t sect = 2337 raid10_find_virt(conf, mddev->curr_resync, i); 2338 bitmap_end_sync(mddev->bitmap, sect, 2339 &sync_blocks, 1); 2340 } 2341 } else /* completed sync */ 2342 conf->fullsync = 0; 2343 2344 bitmap_close_sync(mddev->bitmap); 2345 close_sync(conf); 2346 *skipped = 1; 2347 return sectors_skipped; 2348 } 2349 if (chunks_skipped >= conf->raid_disks) { 2350 /* if there has been nothing to do on any drive, 2351 * then there is nothing to do at all.. 2352 */ 2353 *skipped = 1; 2354 return (max_sector - sector_nr) + sectors_skipped; 2355 } 2356 2357 if (max_sector > mddev->resync_max) 2358 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2359 2360 /* make sure whole request will fit in a chunk - if chunks 2361 * are meaningful 2362 */ 2363 if (conf->near_copies < conf->raid_disks && 2364 max_sector > (sector_nr | conf->chunk_mask)) 2365 max_sector = (sector_nr | conf->chunk_mask) + 1; 2366 /* 2367 * If there is non-resync activity waiting for us then 2368 * put in a delay to throttle resync. 2369 */ 2370 if (!go_faster && conf->nr_waiting) 2371 msleep_interruptible(1000); 2372 2373 /* Again, very different code for resync and recovery. 2374 * Both must result in an r10bio with a list of bios that 2375 * have bi_end_io, bi_sector, bi_bdev set, 2376 * and bi_private set to the r10bio. 2377 * For recovery, we may actually create several r10bios 2378 * with 2 bios in each, that correspond to the bios in the main one. 2379 * In this case, the subordinate r10bios link back through a 2380 * borrowed master_bio pointer, and the counter in the master 2381 * includes a ref from each subordinate. 2382 */ 2383 /* First, we decide what to do and set ->bi_end_io 2384 * To end_sync_read if we want to read, and 2385 * end_sync_write if we will want to write. 2386 */ 2387 2388 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); 2389 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2390 /* recovery... the complicated one */ 2391 int j; 2392 r10_bio = NULL; 2393 2394 for (i=0 ; i<conf->raid_disks; i++) { 2395 int still_degraded; 2396 r10bio_t *rb2; 2397 sector_t sect; 2398 int must_sync; 2399 int any_working; 2400 2401 if (conf->mirrors[i].rdev == NULL || 2402 test_bit(In_sync, &conf->mirrors[i].rdev->flags)) 2403 continue; 2404 2405 still_degraded = 0; 2406 /* want to reconstruct this device */ 2407 rb2 = r10_bio; 2408 sect = raid10_find_virt(conf, sector_nr, i); 2409 /* Unless we are doing a full sync, we only need 2410 * to recover the block if it is set in the bitmap 2411 */ 2412 must_sync = bitmap_start_sync(mddev->bitmap, sect, 2413 &sync_blocks, 1); 2414 if (sync_blocks < max_sync) 2415 max_sync = sync_blocks; 2416 if (!must_sync && 2417 !conf->fullsync) { 2418 /* yep, skip the sync_blocks here, but don't assume 2419 * that there will never be anything to do here 2420 */ 2421 chunks_skipped = -1; 2422 continue; 2423 } 2424 2425 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 2426 raise_barrier(conf, rb2 != NULL); 2427 atomic_set(&r10_bio->remaining, 0); 2428 2429 r10_bio->master_bio = (struct bio*)rb2; 2430 if (rb2) 2431 atomic_inc(&rb2->remaining); 2432 r10_bio->mddev = mddev; 2433 set_bit(R10BIO_IsRecover, &r10_bio->state); 2434 r10_bio->sector = sect; 2435 2436 raid10_find_phys(conf, r10_bio); 2437 2438 /* Need to check if the array will still be 2439 * degraded 2440 */ 2441 for (j=0; j<conf->raid_disks; j++) 2442 if (conf->mirrors[j].rdev == NULL || 2443 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { 2444 still_degraded = 1; 2445 break; 2446 } 2447 2448 must_sync = bitmap_start_sync(mddev->bitmap, sect, 2449 &sync_blocks, still_degraded); 2450 2451 any_working = 0; 2452 for (j=0; j<conf->copies;j++) { 2453 int k; 2454 int d = r10_bio->devs[j].devnum; 2455 sector_t from_addr, to_addr; 2456 mdk_rdev_t *rdev; 2457 sector_t sector, first_bad; 2458 int bad_sectors; 2459 if (!conf->mirrors[d].rdev || 2460 !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) 2461 continue; 2462 /* This is where we read from */ 2463 any_working = 1; 2464 rdev = conf->mirrors[d].rdev; 2465 sector = r10_bio->devs[j].addr; 2466 2467 if (is_badblock(rdev, sector, max_sync, 2468 &first_bad, &bad_sectors)) { 2469 if (first_bad > sector) 2470 max_sync = first_bad - sector; 2471 else { 2472 bad_sectors -= (sector 2473 - first_bad); 2474 if (max_sync > bad_sectors) 2475 max_sync = bad_sectors; 2476 continue; 2477 } 2478 } 2479 bio = r10_bio->devs[0].bio; 2480 bio->bi_next = biolist; 2481 biolist = bio; 2482 bio->bi_private = r10_bio; 2483 bio->bi_end_io = end_sync_read; 2484 bio->bi_rw = READ; 2485 from_addr = r10_bio->devs[j].addr; 2486 bio->bi_sector = from_addr + 2487 conf->mirrors[d].rdev->data_offset; 2488 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 2489 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2490 atomic_inc(&r10_bio->remaining); 2491 /* and we write to 'i' */ 2492 2493 for (k=0; k<conf->copies; k++) 2494 if (r10_bio->devs[k].devnum == i) 2495 break; 2496 BUG_ON(k == conf->copies); 2497 bio = r10_bio->devs[1].bio; 2498 bio->bi_next = biolist; 2499 biolist = bio; 2500 bio->bi_private = r10_bio; 2501 bio->bi_end_io = end_sync_write; 2502 bio->bi_rw = WRITE; 2503 to_addr = r10_bio->devs[k].addr; 2504 bio->bi_sector = to_addr + 2505 conf->mirrors[i].rdev->data_offset; 2506 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 2507 2508 r10_bio->devs[0].devnum = d; 2509 r10_bio->devs[0].addr = from_addr; 2510 r10_bio->devs[1].devnum = i; 2511 r10_bio->devs[1].addr = to_addr; 2512 2513 break; 2514 } 2515 if (j == conf->copies) { 2516 /* Cannot recover, so abort the recovery or 2517 * record a bad block */ 2518 put_buf(r10_bio); 2519 if (rb2) 2520 atomic_dec(&rb2->remaining); 2521 r10_bio = rb2; 2522 if (any_working) { 2523 /* problem is that there are bad blocks 2524 * on other device(s) 2525 */ 2526 int k; 2527 for (k = 0; k < conf->copies; k++) 2528 if (r10_bio->devs[k].devnum == i) 2529 break; 2530 if (!rdev_set_badblocks( 2531 conf->mirrors[i].rdev, 2532 r10_bio->devs[k].addr, 2533 max_sync, 0)) 2534 any_working = 0; 2535 } 2536 if (!any_working) { 2537 if (!test_and_set_bit(MD_RECOVERY_INTR, 2538 &mddev->recovery)) 2539 printk(KERN_INFO "md/raid10:%s: insufficient " 2540 "working devices for recovery.\n", 2541 mdname(mddev)); 2542 conf->mirrors[i].recovery_disabled 2543 = mddev->recovery_disabled; 2544 } 2545 break; 2546 } 2547 } 2548 if (biolist == NULL) { 2549 while (r10_bio) { 2550 r10bio_t *rb2 = r10_bio; 2551 r10_bio = (r10bio_t*) rb2->master_bio; 2552 rb2->master_bio = NULL; 2553 put_buf(rb2); 2554 } 2555 goto giveup; 2556 } 2557 } else { 2558 /* resync. Schedule a read for every block at this virt offset */ 2559 int count = 0; 2560 2561 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 2562 2563 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 2564 &sync_blocks, mddev->degraded) && 2565 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, 2566 &mddev->recovery)) { 2567 /* We can skip this block */ 2568 *skipped = 1; 2569 return sync_blocks + sectors_skipped; 2570 } 2571 if (sync_blocks < max_sync) 2572 max_sync = sync_blocks; 2573 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 2574 2575 r10_bio->mddev = mddev; 2576 atomic_set(&r10_bio->remaining, 0); 2577 raise_barrier(conf, 0); 2578 conf->next_resync = sector_nr; 2579 2580 r10_bio->master_bio = NULL; 2581 r10_bio->sector = sector_nr; 2582 set_bit(R10BIO_IsSync, &r10_bio->state); 2583 raid10_find_phys(conf, r10_bio); 2584 r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1; 2585 2586 for (i=0; i<conf->copies; i++) { 2587 int d = r10_bio->devs[i].devnum; 2588 sector_t first_bad, sector; 2589 int bad_sectors; 2590 2591 bio = r10_bio->devs[i].bio; 2592 bio->bi_end_io = NULL; 2593 clear_bit(BIO_UPTODATE, &bio->bi_flags); 2594 if (conf->mirrors[d].rdev == NULL || 2595 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) 2596 continue; 2597 sector = r10_bio->devs[i].addr; 2598 if (is_badblock(conf->mirrors[d].rdev, 2599 sector, max_sync, 2600 &first_bad, &bad_sectors)) { 2601 if (first_bad > sector) 2602 max_sync = first_bad - sector; 2603 else { 2604 bad_sectors -= (sector - first_bad); 2605 if (max_sync > bad_sectors) 2606 max_sync = max_sync; 2607 continue; 2608 } 2609 } 2610 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2611 atomic_inc(&r10_bio->remaining); 2612 bio->bi_next = biolist; 2613 biolist = bio; 2614 bio->bi_private = r10_bio; 2615 bio->bi_end_io = end_sync_read; 2616 bio->bi_rw = READ; 2617 bio->bi_sector = sector + 2618 conf->mirrors[d].rdev->data_offset; 2619 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 2620 count++; 2621 } 2622 2623 if (count < 2) { 2624 for (i=0; i<conf->copies; i++) { 2625 int d = r10_bio->devs[i].devnum; 2626 if (r10_bio->devs[i].bio->bi_end_io) 2627 rdev_dec_pending(conf->mirrors[d].rdev, 2628 mddev); 2629 } 2630 put_buf(r10_bio); 2631 biolist = NULL; 2632 goto giveup; 2633 } 2634 } 2635 2636 for (bio = biolist; bio ; bio=bio->bi_next) { 2637 2638 bio->bi_flags &= ~(BIO_POOL_MASK - 1); 2639 if (bio->bi_end_io) 2640 bio->bi_flags |= 1 << BIO_UPTODATE; 2641 bio->bi_vcnt = 0; 2642 bio->bi_idx = 0; 2643 bio->bi_phys_segments = 0; 2644 bio->bi_size = 0; 2645 } 2646 2647 nr_sectors = 0; 2648 if (sector_nr + max_sync < max_sector) 2649 max_sector = sector_nr + max_sync; 2650 do { 2651 struct page *page; 2652 int len = PAGE_SIZE; 2653 if (sector_nr + (len>>9) > max_sector) 2654 len = (max_sector - sector_nr) << 9; 2655 if (len == 0) 2656 break; 2657 for (bio= biolist ; bio ; bio=bio->bi_next) { 2658 struct bio *bio2; 2659 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 2660 if (bio_add_page(bio, page, len, 0)) 2661 continue; 2662 2663 /* stop here */ 2664 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 2665 for (bio2 = biolist; 2666 bio2 && bio2 != bio; 2667 bio2 = bio2->bi_next) { 2668 /* remove last page from this bio */ 2669 bio2->bi_vcnt--; 2670 bio2->bi_size -= len; 2671 bio2->bi_flags &= ~(1<< BIO_SEG_VALID); 2672 } 2673 goto bio_full; 2674 } 2675 nr_sectors += len>>9; 2676 sector_nr += len>>9; 2677 } while (biolist->bi_vcnt < RESYNC_PAGES); 2678 bio_full: 2679 r10_bio->sectors = nr_sectors; 2680 2681 while (biolist) { 2682 bio = biolist; 2683 biolist = biolist->bi_next; 2684 2685 bio->bi_next = NULL; 2686 r10_bio = bio->bi_private; 2687 r10_bio->sectors = nr_sectors; 2688 2689 if (bio->bi_end_io == end_sync_read) { 2690 md_sync_acct(bio->bi_bdev, nr_sectors); 2691 generic_make_request(bio); 2692 } 2693 } 2694 2695 if (sectors_skipped) 2696 /* pretend they weren't skipped, it makes 2697 * no important difference in this case 2698 */ 2699 md_done_sync(mddev, sectors_skipped, 1); 2700 2701 return sectors_skipped + nr_sectors; 2702 giveup: 2703 /* There is nowhere to write, so all non-sync 2704 * drives must be failed or in resync, all drives 2705 * have a bad block, so try the next chunk... 2706 */ 2707 if (sector_nr + max_sync < max_sector) 2708 max_sector = sector_nr + max_sync; 2709 2710 sectors_skipped += (max_sector - sector_nr); 2711 chunks_skipped ++; 2712 sector_nr = max_sector; 2713 goto skipped; 2714 } 2715 2716 static sector_t 2717 raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) 2718 { 2719 sector_t size; 2720 conf_t *conf = mddev->private; 2721 2722 if (!raid_disks) 2723 raid_disks = conf->raid_disks; 2724 if (!sectors) 2725 sectors = conf->dev_sectors; 2726 2727 size = sectors >> conf->chunk_shift; 2728 sector_div(size, conf->far_copies); 2729 size = size * raid_disks; 2730 sector_div(size, conf->near_copies); 2731 2732 return size << conf->chunk_shift; 2733 } 2734 2735 2736 static conf_t *setup_conf(mddev_t *mddev) 2737 { 2738 conf_t *conf = NULL; 2739 int nc, fc, fo; 2740 sector_t stride, size; 2741 int err = -EINVAL; 2742 2743 if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) || 2744 !is_power_of_2(mddev->new_chunk_sectors)) { 2745 printk(KERN_ERR "md/raid10:%s: chunk size must be " 2746 "at least PAGE_SIZE(%ld) and be a power of 2.\n", 2747 mdname(mddev), PAGE_SIZE); 2748 goto out; 2749 } 2750 2751 nc = mddev->new_layout & 255; 2752 fc = (mddev->new_layout >> 8) & 255; 2753 fo = mddev->new_layout & (1<<16); 2754 2755 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks || 2756 (mddev->new_layout >> 17)) { 2757 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n", 2758 mdname(mddev), mddev->new_layout); 2759 goto out; 2760 } 2761 2762 err = -ENOMEM; 2763 conf = kzalloc(sizeof(conf_t), GFP_KERNEL); 2764 if (!conf) 2765 goto out; 2766 2767 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, 2768 GFP_KERNEL); 2769 if (!conf->mirrors) 2770 goto out; 2771 2772 conf->tmppage = alloc_page(GFP_KERNEL); 2773 if (!conf->tmppage) 2774 goto out; 2775 2776 2777 conf->raid_disks = mddev->raid_disks; 2778 conf->near_copies = nc; 2779 conf->far_copies = fc; 2780 conf->copies = nc*fc; 2781 conf->far_offset = fo; 2782 conf->chunk_mask = mddev->new_chunk_sectors - 1; 2783 conf->chunk_shift = ffz(~mddev->new_chunk_sectors); 2784 2785 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, 2786 r10bio_pool_free, conf); 2787 if (!conf->r10bio_pool) 2788 goto out; 2789 2790 size = mddev->dev_sectors >> conf->chunk_shift; 2791 sector_div(size, fc); 2792 size = size * conf->raid_disks; 2793 sector_div(size, nc); 2794 /* 'size' is now the number of chunks in the array */ 2795 /* calculate "used chunks per device" in 'stride' */ 2796 stride = size * conf->copies; 2797 2798 /* We need to round up when dividing by raid_disks to 2799 * get the stride size. 2800 */ 2801 stride += conf->raid_disks - 1; 2802 sector_div(stride, conf->raid_disks); 2803 2804 conf->dev_sectors = stride << conf->chunk_shift; 2805 2806 if (fo) 2807 stride = 1; 2808 else 2809 sector_div(stride, fc); 2810 conf->stride = stride << conf->chunk_shift; 2811 2812 2813 spin_lock_init(&conf->device_lock); 2814 INIT_LIST_HEAD(&conf->retry_list); 2815 2816 spin_lock_init(&conf->resync_lock); 2817 init_waitqueue_head(&conf->wait_barrier); 2818 2819 conf->thread = md_register_thread(raid10d, mddev, NULL); 2820 if (!conf->thread) 2821 goto out; 2822 2823 conf->mddev = mddev; 2824 return conf; 2825 2826 out: 2827 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", 2828 mdname(mddev)); 2829 if (conf) { 2830 if (conf->r10bio_pool) 2831 mempool_destroy(conf->r10bio_pool); 2832 kfree(conf->mirrors); 2833 safe_put_page(conf->tmppage); 2834 kfree(conf); 2835 } 2836 return ERR_PTR(err); 2837 } 2838 2839 static int run(mddev_t *mddev) 2840 { 2841 conf_t *conf; 2842 int i, disk_idx, chunk_size; 2843 mirror_info_t *disk; 2844 mdk_rdev_t *rdev; 2845 sector_t size; 2846 2847 /* 2848 * copy the already verified devices into our private RAID10 2849 * bookkeeping area. [whatever we allocate in run(), 2850 * should be freed in stop()] 2851 */ 2852 2853 if (mddev->private == NULL) { 2854 conf = setup_conf(mddev); 2855 if (IS_ERR(conf)) 2856 return PTR_ERR(conf); 2857 mddev->private = conf; 2858 } 2859 conf = mddev->private; 2860 if (!conf) 2861 goto out; 2862 2863 mddev->thread = conf->thread; 2864 conf->thread = NULL; 2865 2866 chunk_size = mddev->chunk_sectors << 9; 2867 blk_queue_io_min(mddev->queue, chunk_size); 2868 if (conf->raid_disks % conf->near_copies) 2869 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks); 2870 else 2871 blk_queue_io_opt(mddev->queue, chunk_size * 2872 (conf->raid_disks / conf->near_copies)); 2873 2874 list_for_each_entry(rdev, &mddev->disks, same_set) { 2875 2876 disk_idx = rdev->raid_disk; 2877 if (disk_idx >= conf->raid_disks 2878 || disk_idx < 0) 2879 continue; 2880 disk = conf->mirrors + disk_idx; 2881 2882 disk->rdev = rdev; 2883 disk_stack_limits(mddev->gendisk, rdev->bdev, 2884 rdev->data_offset << 9); 2885 /* as we don't honour merge_bvec_fn, we must never risk 2886 * violating it, so limit max_segments to 1 lying 2887 * within a single page. 2888 */ 2889 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { 2890 blk_queue_max_segments(mddev->queue, 1); 2891 blk_queue_segment_boundary(mddev->queue, 2892 PAGE_CACHE_SIZE - 1); 2893 } 2894 2895 disk->head_position = 0; 2896 } 2897 /* need to check that every block has at least one working mirror */ 2898 if (!enough(conf, -1)) { 2899 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", 2900 mdname(mddev)); 2901 goto out_free_conf; 2902 } 2903 2904 mddev->degraded = 0; 2905 for (i = 0; i < conf->raid_disks; i++) { 2906 2907 disk = conf->mirrors + i; 2908 2909 if (!disk->rdev || 2910 !test_bit(In_sync, &disk->rdev->flags)) { 2911 disk->head_position = 0; 2912 mddev->degraded++; 2913 if (disk->rdev) 2914 conf->fullsync = 1; 2915 } 2916 } 2917 2918 if (mddev->recovery_cp != MaxSector) 2919 printk(KERN_NOTICE "md/raid10:%s: not clean" 2920 " -- starting background reconstruction\n", 2921 mdname(mddev)); 2922 printk(KERN_INFO 2923 "md/raid10:%s: active with %d out of %d devices\n", 2924 mdname(mddev), conf->raid_disks - mddev->degraded, 2925 conf->raid_disks); 2926 /* 2927 * Ok, everything is just fine now 2928 */ 2929 mddev->dev_sectors = conf->dev_sectors; 2930 size = raid10_size(mddev, 0, 0); 2931 md_set_array_sectors(mddev, size); 2932 mddev->resync_max_sectors = size; 2933 2934 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2935 mddev->queue->backing_dev_info.congested_data = mddev; 2936 2937 /* Calculate max read-ahead size. 2938 * We need to readahead at least twice a whole stripe.... 2939 * maybe... 2940 */ 2941 { 2942 int stripe = conf->raid_disks * 2943 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 2944 stripe /= conf->near_copies; 2945 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 2946 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 2947 } 2948 2949 if (conf->near_copies < conf->raid_disks) 2950 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2951 2952 if (md_integrity_register(mddev)) 2953 goto out_free_conf; 2954 2955 return 0; 2956 2957 out_free_conf: 2958 md_unregister_thread(mddev->thread); 2959 if (conf->r10bio_pool) 2960 mempool_destroy(conf->r10bio_pool); 2961 safe_put_page(conf->tmppage); 2962 kfree(conf->mirrors); 2963 kfree(conf); 2964 mddev->private = NULL; 2965 out: 2966 return -EIO; 2967 } 2968 2969 static int stop(mddev_t *mddev) 2970 { 2971 conf_t *conf = mddev->private; 2972 2973 raise_barrier(conf, 0); 2974 lower_barrier(conf); 2975 2976 md_unregister_thread(mddev->thread); 2977 mddev->thread = NULL; 2978 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2979 if (conf->r10bio_pool) 2980 mempool_destroy(conf->r10bio_pool); 2981 kfree(conf->mirrors); 2982 kfree(conf); 2983 mddev->private = NULL; 2984 return 0; 2985 } 2986 2987 static void raid10_quiesce(mddev_t *mddev, int state) 2988 { 2989 conf_t *conf = mddev->private; 2990 2991 switch(state) { 2992 case 1: 2993 raise_barrier(conf, 0); 2994 break; 2995 case 0: 2996 lower_barrier(conf); 2997 break; 2998 } 2999 } 3000 3001 static void *raid10_takeover_raid0(mddev_t *mddev) 3002 { 3003 mdk_rdev_t *rdev; 3004 conf_t *conf; 3005 3006 if (mddev->degraded > 0) { 3007 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", 3008 mdname(mddev)); 3009 return ERR_PTR(-EINVAL); 3010 } 3011 3012 /* Set new parameters */ 3013 mddev->new_level = 10; 3014 /* new layout: far_copies = 1, near_copies = 2 */ 3015 mddev->new_layout = (1<<8) + 2; 3016 mddev->new_chunk_sectors = mddev->chunk_sectors; 3017 mddev->delta_disks = mddev->raid_disks; 3018 mddev->raid_disks *= 2; 3019 /* make sure it will be not marked as dirty */ 3020 mddev->recovery_cp = MaxSector; 3021 3022 conf = setup_conf(mddev); 3023 if (!IS_ERR(conf)) { 3024 list_for_each_entry(rdev, &mddev->disks, same_set) 3025 if (rdev->raid_disk >= 0) 3026 rdev->new_raid_disk = rdev->raid_disk * 2; 3027 conf->barrier = 1; 3028 } 3029 3030 return conf; 3031 } 3032 3033 static void *raid10_takeover(mddev_t *mddev) 3034 { 3035 struct raid0_private_data *raid0_priv; 3036 3037 /* raid10 can take over: 3038 * raid0 - providing it has only two drives 3039 */ 3040 if (mddev->level == 0) { 3041 /* for raid0 takeover only one zone is supported */ 3042 raid0_priv = mddev->private; 3043 if (raid0_priv->nr_strip_zones > 1) { 3044 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" 3045 " with more than one zone.\n", 3046 mdname(mddev)); 3047 return ERR_PTR(-EINVAL); 3048 } 3049 return raid10_takeover_raid0(mddev); 3050 } 3051 return ERR_PTR(-EINVAL); 3052 } 3053 3054 static struct mdk_personality raid10_personality = 3055 { 3056 .name = "raid10", 3057 .level = 10, 3058 .owner = THIS_MODULE, 3059 .make_request = make_request, 3060 .run = run, 3061 .stop = stop, 3062 .status = status, 3063 .error_handler = error, 3064 .hot_add_disk = raid10_add_disk, 3065 .hot_remove_disk= raid10_remove_disk, 3066 .spare_active = raid10_spare_active, 3067 .sync_request = sync_request, 3068 .quiesce = raid10_quiesce, 3069 .size = raid10_size, 3070 .takeover = raid10_takeover, 3071 }; 3072 3073 static int __init raid_init(void) 3074 { 3075 return register_md_personality(&raid10_personality); 3076 } 3077 3078 static void raid_exit(void) 3079 { 3080 unregister_md_personality(&raid10_personality); 3081 } 3082 3083 module_init(raid_init); 3084 module_exit(raid_exit); 3085 MODULE_LICENSE("GPL"); 3086 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); 3087 MODULE_ALIAS("md-personality-9"); /* RAID10 */ 3088 MODULE_ALIAS("md-raid10"); 3089 MODULE_ALIAS("md-level-10"); 3090