1 /* 2 * raid1.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 5 * 6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 7 * 8 * RAID-1 management functions. 9 * 10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 11 * 12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 14 * 15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 16 * bitmapped intelligence in resync: 17 * 18 * - bitmap marked during normal i/o 19 * - bitmap used to skip nondirty blocks during sync 20 * 21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 22 * - persistent bitmap code 23 * 24 * This program is free software; you can redistribute it and/or modify 25 * it under the terms of the GNU General Public License as published by 26 * the Free Software Foundation; either version 2, or (at your option) 27 * any later version. 28 * 29 * You should have received a copy of the GNU General Public License 30 * (for example /usr/src/linux/COPYING); if not, write to the Free 31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/delay.h> 35 #include <linux/blkdev.h> 36 #include <linux/seq_file.h> 37 #include "md.h" 38 #include "dm-bio-list.h" 39 #include "raid1.h" 40 #include "bitmap.h" 41 42 #define DEBUG 0 43 #if DEBUG 44 #define PRINTK(x...) printk(x) 45 #else 46 #define PRINTK(x...) 47 #endif 48 49 /* 50 * Number of guaranteed r1bios in case of extreme VM load: 51 */ 52 #define NR_RAID1_BIOS 256 53 54 55 static void unplug_slaves(mddev_t *mddev); 56 57 static void allow_barrier(conf_t *conf); 58 static void lower_barrier(conf_t *conf); 59 60 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 61 { 62 struct pool_info *pi = data; 63 r1bio_t *r1_bio; 64 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 65 66 /* allocate a r1bio with room for raid_disks entries in the bios array */ 67 r1_bio = kzalloc(size, gfp_flags); 68 if (!r1_bio) 69 unplug_slaves(pi->mddev); 70 71 return r1_bio; 72 } 73 74 static void r1bio_pool_free(void *r1_bio, void *data) 75 { 76 kfree(r1_bio); 77 } 78 79 #define RESYNC_BLOCK_SIZE (64*1024) 80 //#define RESYNC_BLOCK_SIZE PAGE_SIZE 81 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 82 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 83 #define RESYNC_WINDOW (2048*1024) 84 85 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 86 { 87 struct pool_info *pi = data; 88 struct page *page; 89 r1bio_t *r1_bio; 90 struct bio *bio; 91 int i, j; 92 93 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 94 if (!r1_bio) { 95 unplug_slaves(pi->mddev); 96 return NULL; 97 } 98 99 /* 100 * Allocate bios : 1 for reading, n-1 for writing 101 */ 102 for (j = pi->raid_disks ; j-- ; ) { 103 bio = bio_alloc(gfp_flags, RESYNC_PAGES); 104 if (!bio) 105 goto out_free_bio; 106 r1_bio->bios[j] = bio; 107 } 108 /* 109 * Allocate RESYNC_PAGES data pages and attach them to 110 * the first bio. 111 * If this is a user-requested check/repair, allocate 112 * RESYNC_PAGES for each bio. 113 */ 114 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 115 j = pi->raid_disks; 116 else 117 j = 1; 118 while(j--) { 119 bio = r1_bio->bios[j]; 120 for (i = 0; i < RESYNC_PAGES; i++) { 121 page = alloc_page(gfp_flags); 122 if (unlikely(!page)) 123 goto out_free_pages; 124 125 bio->bi_io_vec[i].bv_page = page; 126 bio->bi_vcnt = i+1; 127 } 128 } 129 /* If not user-requests, copy the page pointers to all bios */ 130 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { 131 for (i=0; i<RESYNC_PAGES ; i++) 132 for (j=1; j<pi->raid_disks; j++) 133 r1_bio->bios[j]->bi_io_vec[i].bv_page = 134 r1_bio->bios[0]->bi_io_vec[i].bv_page; 135 } 136 137 r1_bio->master_bio = NULL; 138 139 return r1_bio; 140 141 out_free_pages: 142 for (j=0 ; j < pi->raid_disks; j++) 143 for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++) 144 put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); 145 j = -1; 146 out_free_bio: 147 while ( ++j < pi->raid_disks ) 148 bio_put(r1_bio->bios[j]); 149 r1bio_pool_free(r1_bio, data); 150 return NULL; 151 } 152 153 static void r1buf_pool_free(void *__r1_bio, void *data) 154 { 155 struct pool_info *pi = data; 156 int i,j; 157 r1bio_t *r1bio = __r1_bio; 158 159 for (i = 0; i < RESYNC_PAGES; i++) 160 for (j = pi->raid_disks; j-- ;) { 161 if (j == 0 || 162 r1bio->bios[j]->bi_io_vec[i].bv_page != 163 r1bio->bios[0]->bi_io_vec[i].bv_page) 164 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); 165 } 166 for (i=0 ; i < pi->raid_disks; i++) 167 bio_put(r1bio->bios[i]); 168 169 r1bio_pool_free(r1bio, data); 170 } 171 172 static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) 173 { 174 int i; 175 176 for (i = 0; i < conf->raid_disks; i++) { 177 struct bio **bio = r1_bio->bios + i; 178 if (*bio && *bio != IO_BLOCKED) 179 bio_put(*bio); 180 *bio = NULL; 181 } 182 } 183 184 static void free_r1bio(r1bio_t *r1_bio) 185 { 186 conf_t *conf = mddev_to_conf(r1_bio->mddev); 187 188 /* 189 * Wake up any possible resync thread that waits for the device 190 * to go idle. 191 */ 192 allow_barrier(conf); 193 194 put_all_bios(conf, r1_bio); 195 mempool_free(r1_bio, conf->r1bio_pool); 196 } 197 198 static void put_buf(r1bio_t *r1_bio) 199 { 200 conf_t *conf = mddev_to_conf(r1_bio->mddev); 201 int i; 202 203 for (i=0; i<conf->raid_disks; i++) { 204 struct bio *bio = r1_bio->bios[i]; 205 if (bio->bi_end_io) 206 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 207 } 208 209 mempool_free(r1_bio, conf->r1buf_pool); 210 211 lower_barrier(conf); 212 } 213 214 static void reschedule_retry(r1bio_t *r1_bio) 215 { 216 unsigned long flags; 217 mddev_t *mddev = r1_bio->mddev; 218 conf_t *conf = mddev_to_conf(mddev); 219 220 spin_lock_irqsave(&conf->device_lock, flags); 221 list_add(&r1_bio->retry_list, &conf->retry_list); 222 conf->nr_queued ++; 223 spin_unlock_irqrestore(&conf->device_lock, flags); 224 225 wake_up(&conf->wait_barrier); 226 md_wakeup_thread(mddev->thread); 227 } 228 229 /* 230 * raid_end_bio_io() is called when we have finished servicing a mirrored 231 * operation and are ready to return a success/failure code to the buffer 232 * cache layer. 233 */ 234 static void raid_end_bio_io(r1bio_t *r1_bio) 235 { 236 struct bio *bio = r1_bio->master_bio; 237 238 /* if nobody has done the final endio yet, do it now */ 239 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 240 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n", 241 (bio_data_dir(bio) == WRITE) ? "write" : "read", 242 (unsigned long long) bio->bi_sector, 243 (unsigned long long) bio->bi_sector + 244 (bio->bi_size >> 9) - 1); 245 246 bio_endio(bio, 247 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO); 248 } 249 free_r1bio(r1_bio); 250 } 251 252 /* 253 * Update disk head position estimator based on IRQ completion info. 254 */ 255 static inline void update_head_pos(int disk, r1bio_t *r1_bio) 256 { 257 conf_t *conf = mddev_to_conf(r1_bio->mddev); 258 259 conf->mirrors[disk].head_position = 260 r1_bio->sector + (r1_bio->sectors); 261 } 262 263 static void raid1_end_read_request(struct bio *bio, int error) 264 { 265 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 266 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 267 int mirror; 268 conf_t *conf = mddev_to_conf(r1_bio->mddev); 269 270 mirror = r1_bio->read_disk; 271 /* 272 * this branch is our 'one mirror IO has finished' event handler: 273 */ 274 update_head_pos(mirror, r1_bio); 275 276 if (uptodate) 277 set_bit(R1BIO_Uptodate, &r1_bio->state); 278 else { 279 /* If all other devices have failed, we want to return 280 * the error upwards rather than fail the last device. 281 * Here we redefine "uptodate" to mean "Don't want to retry" 282 */ 283 unsigned long flags; 284 spin_lock_irqsave(&conf->device_lock, flags); 285 if (r1_bio->mddev->degraded == conf->raid_disks || 286 (r1_bio->mddev->degraded == conf->raid_disks-1 && 287 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 288 uptodate = 1; 289 spin_unlock_irqrestore(&conf->device_lock, flags); 290 } 291 292 if (uptodate) 293 raid_end_bio_io(r1_bio); 294 else { 295 /* 296 * oops, read error: 297 */ 298 char b[BDEVNAME_SIZE]; 299 if (printk_ratelimit()) 300 printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n", 301 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector); 302 reschedule_retry(r1_bio); 303 } 304 305 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 306 } 307 308 static void raid1_end_write_request(struct bio *bio, int error) 309 { 310 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 311 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 312 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 313 conf_t *conf = mddev_to_conf(r1_bio->mddev); 314 struct bio *to_put = NULL; 315 316 317 for (mirror = 0; mirror < conf->raid_disks; mirror++) 318 if (r1_bio->bios[mirror] == bio) 319 break; 320 321 if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { 322 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); 323 set_bit(R1BIO_BarrierRetry, &r1_bio->state); 324 r1_bio->mddev->barriers_work = 0; 325 /* Don't rdev_dec_pending in this branch - keep it for the retry */ 326 } else { 327 /* 328 * this branch is our 'one mirror IO has finished' event handler: 329 */ 330 r1_bio->bios[mirror] = NULL; 331 to_put = bio; 332 if (!uptodate) { 333 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 334 /* an I/O failed, we can't clear the bitmap */ 335 set_bit(R1BIO_Degraded, &r1_bio->state); 336 } else 337 /* 338 * Set R1BIO_Uptodate in our master bio, so that 339 * we will return a good error code for to the higher 340 * levels even if IO on some other mirrored buffer fails. 341 * 342 * The 'master' represents the composite IO operation to 343 * user-side. So if something waits for IO, then it will 344 * wait for the 'master' bio. 345 */ 346 set_bit(R1BIO_Uptodate, &r1_bio->state); 347 348 update_head_pos(mirror, r1_bio); 349 350 if (behind) { 351 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) 352 atomic_dec(&r1_bio->behind_remaining); 353 354 /* In behind mode, we ACK the master bio once the I/O has safely 355 * reached all non-writemostly disks. Setting the Returned bit 356 * ensures that this gets done only once -- we don't ever want to 357 * return -EIO here, instead we'll wait */ 358 359 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 360 test_bit(R1BIO_Uptodate, &r1_bio->state)) { 361 /* Maybe we can return now */ 362 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 363 struct bio *mbio = r1_bio->master_bio; 364 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", 365 (unsigned long long) mbio->bi_sector, 366 (unsigned long long) mbio->bi_sector + 367 (mbio->bi_size >> 9) - 1); 368 bio_endio(mbio, 0); 369 } 370 } 371 } 372 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 373 } 374 /* 375 * 376 * Let's see if all mirrored write operations have finished 377 * already. 378 */ 379 if (atomic_dec_and_test(&r1_bio->remaining)) { 380 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) 381 reschedule_retry(r1_bio); 382 else { 383 /* it really is the end of this request */ 384 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 385 /* free extra copy of the data pages */ 386 int i = bio->bi_vcnt; 387 while (i--) 388 safe_put_page(bio->bi_io_vec[i].bv_page); 389 } 390 /* clear the bitmap if all writes complete successfully */ 391 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 392 r1_bio->sectors, 393 !test_bit(R1BIO_Degraded, &r1_bio->state), 394 behind); 395 md_write_end(r1_bio->mddev); 396 raid_end_bio_io(r1_bio); 397 } 398 } 399 400 if (to_put) 401 bio_put(to_put); 402 } 403 404 405 /* 406 * This routine returns the disk from which the requested read should 407 * be done. There is a per-array 'next expected sequential IO' sector 408 * number - if this matches on the next IO then we use the last disk. 409 * There is also a per-disk 'last know head position' sector that is 410 * maintained from IRQ contexts, both the normal and the resync IO 411 * completion handlers update this position correctly. If there is no 412 * perfect sequential match then we pick the disk whose head is closest. 413 * 414 * If there are 2 mirrors in the same 2 devices, performance degrades 415 * because position is mirror, not device based. 416 * 417 * The rdev for the device selected will have nr_pending incremented. 418 */ 419 static int read_balance(conf_t *conf, r1bio_t *r1_bio) 420 { 421 const unsigned long this_sector = r1_bio->sector; 422 int new_disk = conf->last_used, disk = new_disk; 423 int wonly_disk = -1; 424 const int sectors = r1_bio->sectors; 425 sector_t new_distance, current_distance; 426 mdk_rdev_t *rdev; 427 428 rcu_read_lock(); 429 /* 430 * Check if we can balance. We can balance on the whole 431 * device if no resync is going on, or below the resync window. 432 * We take the first readable disk when above the resync window. 433 */ 434 retry: 435 if (conf->mddev->recovery_cp < MaxSector && 436 (this_sector + sectors >= conf->next_resync)) { 437 /* Choose the first operation device, for consistancy */ 438 new_disk = 0; 439 440 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 441 r1_bio->bios[new_disk] == IO_BLOCKED || 442 !rdev || !test_bit(In_sync, &rdev->flags) 443 || test_bit(WriteMostly, &rdev->flags); 444 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { 445 446 if (rdev && test_bit(In_sync, &rdev->flags) && 447 r1_bio->bios[new_disk] != IO_BLOCKED) 448 wonly_disk = new_disk; 449 450 if (new_disk == conf->raid_disks - 1) { 451 new_disk = wonly_disk; 452 break; 453 } 454 } 455 goto rb_out; 456 } 457 458 459 /* make sure the disk is operational */ 460 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 461 r1_bio->bios[new_disk] == IO_BLOCKED || 462 !rdev || !test_bit(In_sync, &rdev->flags) || 463 test_bit(WriteMostly, &rdev->flags); 464 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { 465 466 if (rdev && test_bit(In_sync, &rdev->flags) && 467 r1_bio->bios[new_disk] != IO_BLOCKED) 468 wonly_disk = new_disk; 469 470 if (new_disk <= 0) 471 new_disk = conf->raid_disks; 472 new_disk--; 473 if (new_disk == disk) { 474 new_disk = wonly_disk; 475 break; 476 } 477 } 478 479 if (new_disk < 0) 480 goto rb_out; 481 482 disk = new_disk; 483 /* now disk == new_disk == starting point for search */ 484 485 /* 486 * Don't change to another disk for sequential reads: 487 */ 488 if (conf->next_seq_sect == this_sector) 489 goto rb_out; 490 if (this_sector == conf->mirrors[new_disk].head_position) 491 goto rb_out; 492 493 current_distance = abs(this_sector - conf->mirrors[disk].head_position); 494 495 /* Find the disk whose head is closest */ 496 497 do { 498 if (disk <= 0) 499 disk = conf->raid_disks; 500 disk--; 501 502 rdev = rcu_dereference(conf->mirrors[disk].rdev); 503 504 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED || 505 !test_bit(In_sync, &rdev->flags) || 506 test_bit(WriteMostly, &rdev->flags)) 507 continue; 508 509 if (!atomic_read(&rdev->nr_pending)) { 510 new_disk = disk; 511 break; 512 } 513 new_distance = abs(this_sector - conf->mirrors[disk].head_position); 514 if (new_distance < current_distance) { 515 current_distance = new_distance; 516 new_disk = disk; 517 } 518 } while (disk != conf->last_used); 519 520 rb_out: 521 522 523 if (new_disk >= 0) { 524 rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 525 if (!rdev) 526 goto retry; 527 atomic_inc(&rdev->nr_pending); 528 if (!test_bit(In_sync, &rdev->flags)) { 529 /* cannot risk returning a device that failed 530 * before we inc'ed nr_pending 531 */ 532 rdev_dec_pending(rdev, conf->mddev); 533 goto retry; 534 } 535 conf->next_seq_sect = this_sector + sectors; 536 conf->last_used = new_disk; 537 } 538 rcu_read_unlock(); 539 540 return new_disk; 541 } 542 543 static void unplug_slaves(mddev_t *mddev) 544 { 545 conf_t *conf = mddev_to_conf(mddev); 546 int i; 547 548 rcu_read_lock(); 549 for (i=0; i<mddev->raid_disks; i++) { 550 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 551 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 552 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 553 554 atomic_inc(&rdev->nr_pending); 555 rcu_read_unlock(); 556 557 blk_unplug(r_queue); 558 559 rdev_dec_pending(rdev, mddev); 560 rcu_read_lock(); 561 } 562 } 563 rcu_read_unlock(); 564 } 565 566 static void raid1_unplug(struct request_queue *q) 567 { 568 mddev_t *mddev = q->queuedata; 569 570 unplug_slaves(mddev); 571 md_wakeup_thread(mddev->thread); 572 } 573 574 static int raid1_congested(void *data, int bits) 575 { 576 mddev_t *mddev = data; 577 conf_t *conf = mddev_to_conf(mddev); 578 int i, ret = 0; 579 580 rcu_read_lock(); 581 for (i = 0; i < mddev->raid_disks; i++) { 582 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 583 if (rdev && !test_bit(Faulty, &rdev->flags)) { 584 struct request_queue *q = bdev_get_queue(rdev->bdev); 585 586 /* Note the '|| 1' - when read_balance prefers 587 * non-congested targets, it can be removed 588 */ 589 if ((bits & (1<<BDI_async_congested)) || 1) 590 ret |= bdi_congested(&q->backing_dev_info, bits); 591 else 592 ret &= bdi_congested(&q->backing_dev_info, bits); 593 } 594 } 595 rcu_read_unlock(); 596 return ret; 597 } 598 599 600 static int flush_pending_writes(conf_t *conf) 601 { 602 /* Any writes that have been queued but are awaiting 603 * bitmap updates get flushed here. 604 * We return 1 if any requests were actually submitted. 605 */ 606 int rv = 0; 607 608 spin_lock_irq(&conf->device_lock); 609 610 if (conf->pending_bio_list.head) { 611 struct bio *bio; 612 bio = bio_list_get(&conf->pending_bio_list); 613 blk_remove_plug(conf->mddev->queue); 614 spin_unlock_irq(&conf->device_lock); 615 /* flush any pending bitmap writes to 616 * disk before proceeding w/ I/O */ 617 bitmap_unplug(conf->mddev->bitmap); 618 619 while (bio) { /* submit pending writes */ 620 struct bio *next = bio->bi_next; 621 bio->bi_next = NULL; 622 generic_make_request(bio); 623 bio = next; 624 } 625 rv = 1; 626 } else 627 spin_unlock_irq(&conf->device_lock); 628 return rv; 629 } 630 631 /* Barriers.... 632 * Sometimes we need to suspend IO while we do something else, 633 * either some resync/recovery, or reconfigure the array. 634 * To do this we raise a 'barrier'. 635 * The 'barrier' is a counter that can be raised multiple times 636 * to count how many activities are happening which preclude 637 * normal IO. 638 * We can only raise the barrier if there is no pending IO. 639 * i.e. if nr_pending == 0. 640 * We choose only to raise the barrier if no-one is waiting for the 641 * barrier to go down. This means that as soon as an IO request 642 * is ready, no other operations which require a barrier will start 643 * until the IO request has had a chance. 644 * 645 * So: regular IO calls 'wait_barrier'. When that returns there 646 * is no backgroup IO happening, It must arrange to call 647 * allow_barrier when it has finished its IO. 648 * backgroup IO calls must call raise_barrier. Once that returns 649 * there is no normal IO happeing. It must arrange to call 650 * lower_barrier when the particular background IO completes. 651 */ 652 #define RESYNC_DEPTH 32 653 654 static void raise_barrier(conf_t *conf) 655 { 656 spin_lock_irq(&conf->resync_lock); 657 658 /* Wait until no block IO is waiting */ 659 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 660 conf->resync_lock, 661 raid1_unplug(conf->mddev->queue)); 662 663 /* block any new IO from starting */ 664 conf->barrier++; 665 666 /* No wait for all pending IO to complete */ 667 wait_event_lock_irq(conf->wait_barrier, 668 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 669 conf->resync_lock, 670 raid1_unplug(conf->mddev->queue)); 671 672 spin_unlock_irq(&conf->resync_lock); 673 } 674 675 static void lower_barrier(conf_t *conf) 676 { 677 unsigned long flags; 678 spin_lock_irqsave(&conf->resync_lock, flags); 679 conf->barrier--; 680 spin_unlock_irqrestore(&conf->resync_lock, flags); 681 wake_up(&conf->wait_barrier); 682 } 683 684 static void wait_barrier(conf_t *conf) 685 { 686 spin_lock_irq(&conf->resync_lock); 687 if (conf->barrier) { 688 conf->nr_waiting++; 689 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 690 conf->resync_lock, 691 raid1_unplug(conf->mddev->queue)); 692 conf->nr_waiting--; 693 } 694 conf->nr_pending++; 695 spin_unlock_irq(&conf->resync_lock); 696 } 697 698 static void allow_barrier(conf_t *conf) 699 { 700 unsigned long flags; 701 spin_lock_irqsave(&conf->resync_lock, flags); 702 conf->nr_pending--; 703 spin_unlock_irqrestore(&conf->resync_lock, flags); 704 wake_up(&conf->wait_barrier); 705 } 706 707 static void freeze_array(conf_t *conf) 708 { 709 /* stop syncio and normal IO and wait for everything to 710 * go quite. 711 * We increment barrier and nr_waiting, and then 712 * wait until nr_pending match nr_queued+1 713 * This is called in the context of one normal IO request 714 * that has failed. Thus any sync request that might be pending 715 * will be blocked by nr_pending, and we need to wait for 716 * pending IO requests to complete or be queued for re-try. 717 * Thus the number queued (nr_queued) plus this request (1) 718 * must match the number of pending IOs (nr_pending) before 719 * we continue. 720 */ 721 spin_lock_irq(&conf->resync_lock); 722 conf->barrier++; 723 conf->nr_waiting++; 724 wait_event_lock_irq(conf->wait_barrier, 725 conf->nr_pending == conf->nr_queued+1, 726 conf->resync_lock, 727 ({ flush_pending_writes(conf); 728 raid1_unplug(conf->mddev->queue); })); 729 spin_unlock_irq(&conf->resync_lock); 730 } 731 static void unfreeze_array(conf_t *conf) 732 { 733 /* reverse the effect of the freeze */ 734 spin_lock_irq(&conf->resync_lock); 735 conf->barrier--; 736 conf->nr_waiting--; 737 wake_up(&conf->wait_barrier); 738 spin_unlock_irq(&conf->resync_lock); 739 } 740 741 742 /* duplicate the data pages for behind I/O */ 743 static struct page **alloc_behind_pages(struct bio *bio) 744 { 745 int i; 746 struct bio_vec *bvec; 747 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *), 748 GFP_NOIO); 749 if (unlikely(!pages)) 750 goto do_sync_io; 751 752 bio_for_each_segment(bvec, bio, i) { 753 pages[i] = alloc_page(GFP_NOIO); 754 if (unlikely(!pages[i])) 755 goto do_sync_io; 756 memcpy(kmap(pages[i]) + bvec->bv_offset, 757 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); 758 kunmap(pages[i]); 759 kunmap(bvec->bv_page); 760 } 761 762 return pages; 763 764 do_sync_io: 765 if (pages) 766 for (i = 0; i < bio->bi_vcnt && pages[i]; i++) 767 put_page(pages[i]); 768 kfree(pages); 769 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 770 return NULL; 771 } 772 773 static int make_request(struct request_queue *q, struct bio * bio) 774 { 775 mddev_t *mddev = q->queuedata; 776 conf_t *conf = mddev_to_conf(mddev); 777 mirror_info_t *mirror; 778 r1bio_t *r1_bio; 779 struct bio *read_bio; 780 int i, targets = 0, disks; 781 struct bitmap *bitmap; 782 unsigned long flags; 783 struct bio_list bl; 784 struct page **behind_pages = NULL; 785 const int rw = bio_data_dir(bio); 786 const int do_sync = bio_sync(bio); 787 int cpu, do_barriers; 788 mdk_rdev_t *blocked_rdev; 789 790 /* 791 * Register the new request and wait if the reconstruction 792 * thread has put up a bar for new requests. 793 * Continue immediately if no resync is active currently. 794 * We test barriers_work *after* md_write_start as md_write_start 795 * may cause the first superblock write, and that will check out 796 * if barriers work. 797 */ 798 799 md_write_start(mddev, bio); /* wait on superblock update early */ 800 801 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 802 if (rw == WRITE) 803 md_write_end(mddev); 804 bio_endio(bio, -EOPNOTSUPP); 805 return 0; 806 } 807 808 wait_barrier(conf); 809 810 bitmap = mddev->bitmap; 811 812 cpu = part_stat_lock(); 813 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 814 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 815 bio_sectors(bio)); 816 part_stat_unlock(); 817 818 /* 819 * make_request() can abort the operation when READA is being 820 * used and no empty request is available. 821 * 822 */ 823 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 824 825 r1_bio->master_bio = bio; 826 r1_bio->sectors = bio->bi_size >> 9; 827 r1_bio->state = 0; 828 r1_bio->mddev = mddev; 829 r1_bio->sector = bio->bi_sector; 830 831 if (rw == READ) { 832 /* 833 * read balancing logic: 834 */ 835 int rdisk = read_balance(conf, r1_bio); 836 837 if (rdisk < 0) { 838 /* couldn't find anywhere to read from */ 839 raid_end_bio_io(r1_bio); 840 return 0; 841 } 842 mirror = conf->mirrors + rdisk; 843 844 r1_bio->read_disk = rdisk; 845 846 read_bio = bio_clone(bio, GFP_NOIO); 847 848 r1_bio->bios[rdisk] = read_bio; 849 850 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 851 read_bio->bi_bdev = mirror->rdev->bdev; 852 read_bio->bi_end_io = raid1_end_read_request; 853 read_bio->bi_rw = READ | do_sync; 854 read_bio->bi_private = r1_bio; 855 856 generic_make_request(read_bio); 857 return 0; 858 } 859 860 /* 861 * WRITE: 862 */ 863 /* first select target devices under spinlock and 864 * inc refcount on their rdev. Record them by setting 865 * bios[x] to bio 866 */ 867 disks = conf->raid_disks; 868 #if 0 869 { static int first=1; 870 if (first) printk("First Write sector %llu disks %d\n", 871 (unsigned long long)r1_bio->sector, disks); 872 first = 0; 873 } 874 #endif 875 retry_write: 876 blocked_rdev = NULL; 877 rcu_read_lock(); 878 for (i = 0; i < disks; i++) { 879 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 880 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 881 atomic_inc(&rdev->nr_pending); 882 blocked_rdev = rdev; 883 break; 884 } 885 if (rdev && !test_bit(Faulty, &rdev->flags)) { 886 atomic_inc(&rdev->nr_pending); 887 if (test_bit(Faulty, &rdev->flags)) { 888 rdev_dec_pending(rdev, mddev); 889 r1_bio->bios[i] = NULL; 890 } else 891 r1_bio->bios[i] = bio; 892 targets++; 893 } else 894 r1_bio->bios[i] = NULL; 895 } 896 rcu_read_unlock(); 897 898 if (unlikely(blocked_rdev)) { 899 /* Wait for this device to become unblocked */ 900 int j; 901 902 for (j = 0; j < i; j++) 903 if (r1_bio->bios[j]) 904 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 905 906 allow_barrier(conf); 907 md_wait_for_blocked_rdev(blocked_rdev, mddev); 908 wait_barrier(conf); 909 goto retry_write; 910 } 911 912 BUG_ON(targets == 0); /* we never fail the last device */ 913 914 if (targets < conf->raid_disks) { 915 /* array is degraded, we will not clear the bitmap 916 * on I/O completion (see raid1_end_write_request) */ 917 set_bit(R1BIO_Degraded, &r1_bio->state); 918 } 919 920 /* do behind I/O ? */ 921 if (bitmap && 922 atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind && 923 (behind_pages = alloc_behind_pages(bio)) != NULL) 924 set_bit(R1BIO_BehindIO, &r1_bio->state); 925 926 atomic_set(&r1_bio->remaining, 0); 927 atomic_set(&r1_bio->behind_remaining, 0); 928 929 do_barriers = bio_barrier(bio); 930 if (do_barriers) 931 set_bit(R1BIO_Barrier, &r1_bio->state); 932 933 bio_list_init(&bl); 934 for (i = 0; i < disks; i++) { 935 struct bio *mbio; 936 if (!r1_bio->bios[i]) 937 continue; 938 939 mbio = bio_clone(bio, GFP_NOIO); 940 r1_bio->bios[i] = mbio; 941 942 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 943 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 944 mbio->bi_end_io = raid1_end_write_request; 945 mbio->bi_rw = WRITE | do_barriers | do_sync; 946 mbio->bi_private = r1_bio; 947 948 if (behind_pages) { 949 struct bio_vec *bvec; 950 int j; 951 952 /* Yes, I really want the '__' version so that 953 * we clear any unused pointer in the io_vec, rather 954 * than leave them unchanged. This is important 955 * because when we come to free the pages, we won't 956 * know the originial bi_idx, so we just free 957 * them all 958 */ 959 __bio_for_each_segment(bvec, mbio, j, 0) 960 bvec->bv_page = behind_pages[j]; 961 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 962 atomic_inc(&r1_bio->behind_remaining); 963 } 964 965 atomic_inc(&r1_bio->remaining); 966 967 bio_list_add(&bl, mbio); 968 } 969 kfree(behind_pages); /* the behind pages are attached to the bios now */ 970 971 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors, 972 test_bit(R1BIO_BehindIO, &r1_bio->state)); 973 spin_lock_irqsave(&conf->device_lock, flags); 974 bio_list_merge(&conf->pending_bio_list, &bl); 975 bio_list_init(&bl); 976 977 blk_plug_device(mddev->queue); 978 spin_unlock_irqrestore(&conf->device_lock, flags); 979 980 /* In case raid1d snuck into freeze_array */ 981 wake_up(&conf->wait_barrier); 982 983 if (do_sync) 984 md_wakeup_thread(mddev->thread); 985 #if 0 986 while ((bio = bio_list_pop(&bl)) != NULL) 987 generic_make_request(bio); 988 #endif 989 990 return 0; 991 } 992 993 static void status(struct seq_file *seq, mddev_t *mddev) 994 { 995 conf_t *conf = mddev_to_conf(mddev); 996 int i; 997 998 seq_printf(seq, " [%d/%d] [", conf->raid_disks, 999 conf->raid_disks - mddev->degraded); 1000 rcu_read_lock(); 1001 for (i = 0; i < conf->raid_disks; i++) { 1002 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 1003 seq_printf(seq, "%s", 1004 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1005 } 1006 rcu_read_unlock(); 1007 seq_printf(seq, "]"); 1008 } 1009 1010 1011 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1012 { 1013 char b[BDEVNAME_SIZE]; 1014 conf_t *conf = mddev_to_conf(mddev); 1015 1016 /* 1017 * If it is not operational, then we have already marked it as dead 1018 * else if it is the last working disks, ignore the error, let the 1019 * next level up know. 1020 * else mark the drive as failed 1021 */ 1022 if (test_bit(In_sync, &rdev->flags) 1023 && (conf->raid_disks - mddev->degraded) == 1) { 1024 /* 1025 * Don't fail the drive, act as though we were just a 1026 * normal single drive. 1027 * However don't try a recovery from this drive as 1028 * it is very likely to fail. 1029 */ 1030 mddev->recovery_disabled = 1; 1031 return; 1032 } 1033 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1034 unsigned long flags; 1035 spin_lock_irqsave(&conf->device_lock, flags); 1036 mddev->degraded++; 1037 set_bit(Faulty, &rdev->flags); 1038 spin_unlock_irqrestore(&conf->device_lock, flags); 1039 /* 1040 * if recovery is running, make sure it aborts. 1041 */ 1042 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1043 } else 1044 set_bit(Faulty, &rdev->flags); 1045 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1046 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n" 1047 "raid1: Operation continuing on %d devices.\n", 1048 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1049 } 1050 1051 static void print_conf(conf_t *conf) 1052 { 1053 int i; 1054 1055 printk("RAID1 conf printout:\n"); 1056 if (!conf) { 1057 printk("(!conf)\n"); 1058 return; 1059 } 1060 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 1061 conf->raid_disks); 1062 1063 rcu_read_lock(); 1064 for (i = 0; i < conf->raid_disks; i++) { 1065 char b[BDEVNAME_SIZE]; 1066 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 1067 if (rdev) 1068 printk(" disk %d, wo:%d, o:%d, dev:%s\n", 1069 i, !test_bit(In_sync, &rdev->flags), 1070 !test_bit(Faulty, &rdev->flags), 1071 bdevname(rdev->bdev,b)); 1072 } 1073 rcu_read_unlock(); 1074 } 1075 1076 static void close_sync(conf_t *conf) 1077 { 1078 wait_barrier(conf); 1079 allow_barrier(conf); 1080 1081 mempool_destroy(conf->r1buf_pool); 1082 conf->r1buf_pool = NULL; 1083 } 1084 1085 static int raid1_spare_active(mddev_t *mddev) 1086 { 1087 int i; 1088 conf_t *conf = mddev->private; 1089 1090 /* 1091 * Find all failed disks within the RAID1 configuration 1092 * and mark them readable. 1093 * Called under mddev lock, so rcu protection not needed. 1094 */ 1095 for (i = 0; i < conf->raid_disks; i++) { 1096 mdk_rdev_t *rdev = conf->mirrors[i].rdev; 1097 if (rdev 1098 && !test_bit(Faulty, &rdev->flags) 1099 && !test_and_set_bit(In_sync, &rdev->flags)) { 1100 unsigned long flags; 1101 spin_lock_irqsave(&conf->device_lock, flags); 1102 mddev->degraded--; 1103 spin_unlock_irqrestore(&conf->device_lock, flags); 1104 } 1105 } 1106 1107 print_conf(conf); 1108 return 0; 1109 } 1110 1111 1112 static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 1113 { 1114 conf_t *conf = mddev->private; 1115 int err = -EEXIST; 1116 int mirror = 0; 1117 mirror_info_t *p; 1118 int first = 0; 1119 int last = mddev->raid_disks - 1; 1120 1121 if (rdev->raid_disk >= 0) 1122 first = last = rdev->raid_disk; 1123 1124 for (mirror = first; mirror <= last; mirror++) 1125 if ( !(p=conf->mirrors+mirror)->rdev) { 1126 1127 blk_queue_stack_limits(mddev->queue, 1128 rdev->bdev->bd_disk->queue); 1129 /* as we don't honour merge_bvec_fn, we must never risk 1130 * violating it, so limit ->max_sector to one PAGE, as 1131 * a one page request is never in violation. 1132 */ 1133 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1134 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1135 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1136 1137 p->head_position = 0; 1138 rdev->raid_disk = mirror; 1139 err = 0; 1140 /* As all devices are equivalent, we don't need a full recovery 1141 * if this was recently any drive of the array 1142 */ 1143 if (rdev->saved_raid_disk < 0) 1144 conf->fullsync = 1; 1145 rcu_assign_pointer(p->rdev, rdev); 1146 break; 1147 } 1148 1149 print_conf(conf); 1150 return err; 1151 } 1152 1153 static int raid1_remove_disk(mddev_t *mddev, int number) 1154 { 1155 conf_t *conf = mddev->private; 1156 int err = 0; 1157 mdk_rdev_t *rdev; 1158 mirror_info_t *p = conf->mirrors+ number; 1159 1160 print_conf(conf); 1161 rdev = p->rdev; 1162 if (rdev) { 1163 if (test_bit(In_sync, &rdev->flags) || 1164 atomic_read(&rdev->nr_pending)) { 1165 err = -EBUSY; 1166 goto abort; 1167 } 1168 /* Only remove non-faulty devices is recovery 1169 * is not possible. 1170 */ 1171 if (!test_bit(Faulty, &rdev->flags) && 1172 mddev->degraded < conf->raid_disks) { 1173 err = -EBUSY; 1174 goto abort; 1175 } 1176 p->rdev = NULL; 1177 synchronize_rcu(); 1178 if (atomic_read(&rdev->nr_pending)) { 1179 /* lost the race, try later */ 1180 err = -EBUSY; 1181 p->rdev = rdev; 1182 } 1183 } 1184 abort: 1185 1186 print_conf(conf); 1187 return err; 1188 } 1189 1190 1191 static void end_sync_read(struct bio *bio, int error) 1192 { 1193 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1194 int i; 1195 1196 for (i=r1_bio->mddev->raid_disks; i--; ) 1197 if (r1_bio->bios[i] == bio) 1198 break; 1199 BUG_ON(i < 0); 1200 update_head_pos(i, r1_bio); 1201 /* 1202 * we have read a block, now it needs to be re-written, 1203 * or re-read if the read failed. 1204 * We don't do much here, just schedule handling by raid1d 1205 */ 1206 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1207 set_bit(R1BIO_Uptodate, &r1_bio->state); 1208 1209 if (atomic_dec_and_test(&r1_bio->remaining)) 1210 reschedule_retry(r1_bio); 1211 } 1212 1213 static void end_sync_write(struct bio *bio, int error) 1214 { 1215 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1216 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1217 mddev_t *mddev = r1_bio->mddev; 1218 conf_t *conf = mddev_to_conf(mddev); 1219 int i; 1220 int mirror=0; 1221 1222 for (i = 0; i < conf->raid_disks; i++) 1223 if (r1_bio->bios[i] == bio) { 1224 mirror = i; 1225 break; 1226 } 1227 if (!uptodate) { 1228 int sync_blocks = 0; 1229 sector_t s = r1_bio->sector; 1230 long sectors_to_go = r1_bio->sectors; 1231 /* make sure these bits doesn't get cleared. */ 1232 do { 1233 bitmap_end_sync(mddev->bitmap, s, 1234 &sync_blocks, 1); 1235 s += sync_blocks; 1236 sectors_to_go -= sync_blocks; 1237 } while (sectors_to_go > 0); 1238 md_error(mddev, conf->mirrors[mirror].rdev); 1239 } 1240 1241 update_head_pos(mirror, r1_bio); 1242 1243 if (atomic_dec_and_test(&r1_bio->remaining)) { 1244 sector_t s = r1_bio->sectors; 1245 put_buf(r1_bio); 1246 md_done_sync(mddev, s, uptodate); 1247 } 1248 } 1249 1250 static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1251 { 1252 conf_t *conf = mddev_to_conf(mddev); 1253 int i; 1254 int disks = conf->raid_disks; 1255 struct bio *bio, *wbio; 1256 1257 bio = r1_bio->bios[r1_bio->read_disk]; 1258 1259 1260 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1261 /* We have read all readable devices. If we haven't 1262 * got the block, then there is no hope left. 1263 * If we have, then we want to do a comparison 1264 * and skip the write if everything is the same. 1265 * If any blocks failed to read, then we need to 1266 * attempt an over-write 1267 */ 1268 int primary; 1269 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { 1270 for (i=0; i<mddev->raid_disks; i++) 1271 if (r1_bio->bios[i]->bi_end_io == end_sync_read) 1272 md_error(mddev, conf->mirrors[i].rdev); 1273 1274 md_done_sync(mddev, r1_bio->sectors, 1); 1275 put_buf(r1_bio); 1276 return; 1277 } 1278 for (primary=0; primary<mddev->raid_disks; primary++) 1279 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1280 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { 1281 r1_bio->bios[primary]->bi_end_io = NULL; 1282 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 1283 break; 1284 } 1285 r1_bio->read_disk = primary; 1286 for (i=0; i<mddev->raid_disks; i++) 1287 if (r1_bio->bios[i]->bi_end_io == end_sync_read) { 1288 int j; 1289 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); 1290 struct bio *pbio = r1_bio->bios[primary]; 1291 struct bio *sbio = r1_bio->bios[i]; 1292 1293 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { 1294 for (j = vcnt; j-- ; ) { 1295 struct page *p, *s; 1296 p = pbio->bi_io_vec[j].bv_page; 1297 s = sbio->bi_io_vec[j].bv_page; 1298 if (memcmp(page_address(p), 1299 page_address(s), 1300 PAGE_SIZE)) 1301 break; 1302 } 1303 } else 1304 j = 0; 1305 if (j >= 0) 1306 mddev->resync_mismatches += r1_bio->sectors; 1307 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 1308 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { 1309 sbio->bi_end_io = NULL; 1310 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 1311 } else { 1312 /* fixup the bio for reuse */ 1313 int size; 1314 sbio->bi_vcnt = vcnt; 1315 sbio->bi_size = r1_bio->sectors << 9; 1316 sbio->bi_idx = 0; 1317 sbio->bi_phys_segments = 0; 1318 sbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1319 sbio->bi_flags |= 1 << BIO_UPTODATE; 1320 sbio->bi_next = NULL; 1321 sbio->bi_sector = r1_bio->sector + 1322 conf->mirrors[i].rdev->data_offset; 1323 sbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1324 size = sbio->bi_size; 1325 for (j = 0; j < vcnt ; j++) { 1326 struct bio_vec *bi; 1327 bi = &sbio->bi_io_vec[j]; 1328 bi->bv_offset = 0; 1329 if (size > PAGE_SIZE) 1330 bi->bv_len = PAGE_SIZE; 1331 else 1332 bi->bv_len = size; 1333 size -= PAGE_SIZE; 1334 memcpy(page_address(bi->bv_page), 1335 page_address(pbio->bi_io_vec[j].bv_page), 1336 PAGE_SIZE); 1337 } 1338 1339 } 1340 } 1341 } 1342 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { 1343 /* ouch - failed to read all of that. 1344 * Try some synchronous reads of other devices to get 1345 * good data, much like with normal read errors. Only 1346 * read into the pages we already have so we don't 1347 * need to re-issue the read request. 1348 * We don't need to freeze the array, because being in an 1349 * active sync request, there is no normal IO, and 1350 * no overlapping syncs. 1351 */ 1352 sector_t sect = r1_bio->sector; 1353 int sectors = r1_bio->sectors; 1354 int idx = 0; 1355 1356 while(sectors) { 1357 int s = sectors; 1358 int d = r1_bio->read_disk; 1359 int success = 0; 1360 mdk_rdev_t *rdev; 1361 1362 if (s > (PAGE_SIZE>>9)) 1363 s = PAGE_SIZE >> 9; 1364 do { 1365 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 1366 /* No rcu protection needed here devices 1367 * can only be removed when no resync is 1368 * active, and resync is currently active 1369 */ 1370 rdev = conf->mirrors[d].rdev; 1371 if (sync_page_io(rdev->bdev, 1372 sect + rdev->data_offset, 1373 s<<9, 1374 bio->bi_io_vec[idx].bv_page, 1375 READ)) { 1376 success = 1; 1377 break; 1378 } 1379 } 1380 d++; 1381 if (d == conf->raid_disks) 1382 d = 0; 1383 } while (!success && d != r1_bio->read_disk); 1384 1385 if (success) { 1386 int start = d; 1387 /* write it back and re-read */ 1388 set_bit(R1BIO_Uptodate, &r1_bio->state); 1389 while (d != r1_bio->read_disk) { 1390 if (d == 0) 1391 d = conf->raid_disks; 1392 d--; 1393 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1394 continue; 1395 rdev = conf->mirrors[d].rdev; 1396 atomic_add(s, &rdev->corrected_errors); 1397 if (sync_page_io(rdev->bdev, 1398 sect + rdev->data_offset, 1399 s<<9, 1400 bio->bi_io_vec[idx].bv_page, 1401 WRITE) == 0) 1402 md_error(mddev, rdev); 1403 } 1404 d = start; 1405 while (d != r1_bio->read_disk) { 1406 if (d == 0) 1407 d = conf->raid_disks; 1408 d--; 1409 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1410 continue; 1411 rdev = conf->mirrors[d].rdev; 1412 if (sync_page_io(rdev->bdev, 1413 sect + rdev->data_offset, 1414 s<<9, 1415 bio->bi_io_vec[idx].bv_page, 1416 READ) == 0) 1417 md_error(mddev, rdev); 1418 } 1419 } else { 1420 char b[BDEVNAME_SIZE]; 1421 /* Cannot read from anywhere, array is toast */ 1422 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); 1423 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" 1424 " for block %llu\n", 1425 bdevname(bio->bi_bdev,b), 1426 (unsigned long long)r1_bio->sector); 1427 md_done_sync(mddev, r1_bio->sectors, 0); 1428 put_buf(r1_bio); 1429 return; 1430 } 1431 sectors -= s; 1432 sect += s; 1433 idx ++; 1434 } 1435 } 1436 1437 /* 1438 * schedule writes 1439 */ 1440 atomic_set(&r1_bio->remaining, 1); 1441 for (i = 0; i < disks ; i++) { 1442 wbio = r1_bio->bios[i]; 1443 if (wbio->bi_end_io == NULL || 1444 (wbio->bi_end_io == end_sync_read && 1445 (i == r1_bio->read_disk || 1446 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 1447 continue; 1448 1449 wbio->bi_rw = WRITE; 1450 wbio->bi_end_io = end_sync_write; 1451 atomic_inc(&r1_bio->remaining); 1452 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); 1453 1454 generic_make_request(wbio); 1455 } 1456 1457 if (atomic_dec_and_test(&r1_bio->remaining)) { 1458 /* if we're here, all write(s) have completed, so clean up */ 1459 md_done_sync(mddev, r1_bio->sectors, 1); 1460 put_buf(r1_bio); 1461 } 1462 } 1463 1464 /* 1465 * This is a kernel thread which: 1466 * 1467 * 1. Retries failed read operations on working mirrors. 1468 * 2. Updates the raid superblock when problems encounter. 1469 * 3. Performs writes following reads for array syncronising. 1470 */ 1471 1472 static void fix_read_error(conf_t *conf, int read_disk, 1473 sector_t sect, int sectors) 1474 { 1475 mddev_t *mddev = conf->mddev; 1476 while(sectors) { 1477 int s = sectors; 1478 int d = read_disk; 1479 int success = 0; 1480 int start; 1481 mdk_rdev_t *rdev; 1482 1483 if (s > (PAGE_SIZE>>9)) 1484 s = PAGE_SIZE >> 9; 1485 1486 do { 1487 /* Note: no rcu protection needed here 1488 * as this is synchronous in the raid1d thread 1489 * which is the thread that might remove 1490 * a device. If raid1d ever becomes multi-threaded.... 1491 */ 1492 rdev = conf->mirrors[d].rdev; 1493 if (rdev && 1494 test_bit(In_sync, &rdev->flags) && 1495 sync_page_io(rdev->bdev, 1496 sect + rdev->data_offset, 1497 s<<9, 1498 conf->tmppage, READ)) 1499 success = 1; 1500 else { 1501 d++; 1502 if (d == conf->raid_disks) 1503 d = 0; 1504 } 1505 } while (!success && d != read_disk); 1506 1507 if (!success) { 1508 /* Cannot read from anywhere -- bye bye array */ 1509 md_error(mddev, conf->mirrors[read_disk].rdev); 1510 break; 1511 } 1512 /* write it back and re-read */ 1513 start = d; 1514 while (d != read_disk) { 1515 if (d==0) 1516 d = conf->raid_disks; 1517 d--; 1518 rdev = conf->mirrors[d].rdev; 1519 if (rdev && 1520 test_bit(In_sync, &rdev->flags)) { 1521 if (sync_page_io(rdev->bdev, 1522 sect + rdev->data_offset, 1523 s<<9, conf->tmppage, WRITE) 1524 == 0) 1525 /* Well, this device is dead */ 1526 md_error(mddev, rdev); 1527 } 1528 } 1529 d = start; 1530 while (d != read_disk) { 1531 char b[BDEVNAME_SIZE]; 1532 if (d==0) 1533 d = conf->raid_disks; 1534 d--; 1535 rdev = conf->mirrors[d].rdev; 1536 if (rdev && 1537 test_bit(In_sync, &rdev->flags)) { 1538 if (sync_page_io(rdev->bdev, 1539 sect + rdev->data_offset, 1540 s<<9, conf->tmppage, READ) 1541 == 0) 1542 /* Well, this device is dead */ 1543 md_error(mddev, rdev); 1544 else { 1545 atomic_add(s, &rdev->corrected_errors); 1546 printk(KERN_INFO 1547 "raid1:%s: read error corrected " 1548 "(%d sectors at %llu on %s)\n", 1549 mdname(mddev), s, 1550 (unsigned long long)(sect + 1551 rdev->data_offset), 1552 bdevname(rdev->bdev, b)); 1553 } 1554 } 1555 } 1556 sectors -= s; 1557 sect += s; 1558 } 1559 } 1560 1561 static void raid1d(mddev_t *mddev) 1562 { 1563 r1bio_t *r1_bio; 1564 struct bio *bio; 1565 unsigned long flags; 1566 conf_t *conf = mddev_to_conf(mddev); 1567 struct list_head *head = &conf->retry_list; 1568 int unplug=0; 1569 mdk_rdev_t *rdev; 1570 1571 md_check_recovery(mddev); 1572 1573 for (;;) { 1574 char b[BDEVNAME_SIZE]; 1575 1576 unplug += flush_pending_writes(conf); 1577 1578 spin_lock_irqsave(&conf->device_lock, flags); 1579 if (list_empty(head)) { 1580 spin_unlock_irqrestore(&conf->device_lock, flags); 1581 break; 1582 } 1583 r1_bio = list_entry(head->prev, r1bio_t, retry_list); 1584 list_del(head->prev); 1585 conf->nr_queued--; 1586 spin_unlock_irqrestore(&conf->device_lock, flags); 1587 1588 mddev = r1_bio->mddev; 1589 conf = mddev_to_conf(mddev); 1590 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1591 sync_request_write(mddev, r1_bio); 1592 unplug = 1; 1593 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 1594 /* some requests in the r1bio were BIO_RW_BARRIER 1595 * requests which failed with -EOPNOTSUPP. Hohumm.. 1596 * Better resubmit without the barrier. 1597 * We know which devices to resubmit for, because 1598 * all others have had their bios[] entry cleared. 1599 * We already have a nr_pending reference on these rdevs. 1600 */ 1601 int i; 1602 const int do_sync = bio_sync(r1_bio->master_bio); 1603 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1604 clear_bit(R1BIO_Barrier, &r1_bio->state); 1605 for (i=0; i < conf->raid_disks; i++) 1606 if (r1_bio->bios[i]) 1607 atomic_inc(&r1_bio->remaining); 1608 for (i=0; i < conf->raid_disks; i++) 1609 if (r1_bio->bios[i]) { 1610 struct bio_vec *bvec; 1611 int j; 1612 1613 bio = bio_clone(r1_bio->master_bio, GFP_NOIO); 1614 /* copy pages from the failed bio, as 1615 * this might be a write-behind device */ 1616 __bio_for_each_segment(bvec, bio, j, 0) 1617 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page; 1618 bio_put(r1_bio->bios[i]); 1619 bio->bi_sector = r1_bio->sector + 1620 conf->mirrors[i].rdev->data_offset; 1621 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1622 bio->bi_end_io = raid1_end_write_request; 1623 bio->bi_rw = WRITE | do_sync; 1624 bio->bi_private = r1_bio; 1625 r1_bio->bios[i] = bio; 1626 generic_make_request(bio); 1627 } 1628 } else { 1629 int disk; 1630 1631 /* we got a read error. Maybe the drive is bad. Maybe just 1632 * the block and we can fix it. 1633 * We freeze all other IO, and try reading the block from 1634 * other devices. When we find one, we re-write 1635 * and check it that fixes the read error. 1636 * This is all done synchronously while the array is 1637 * frozen 1638 */ 1639 if (mddev->ro == 0) { 1640 freeze_array(conf); 1641 fix_read_error(conf, r1_bio->read_disk, 1642 r1_bio->sector, 1643 r1_bio->sectors); 1644 unfreeze_array(conf); 1645 } 1646 1647 bio = r1_bio->bios[r1_bio->read_disk]; 1648 if ((disk=read_balance(conf, r1_bio)) == -1 || 1649 disk == r1_bio->read_disk) { 1650 printk(KERN_ALERT "raid1: %s: unrecoverable I/O" 1651 " read error for block %llu\n", 1652 bdevname(bio->bi_bdev,b), 1653 (unsigned long long)r1_bio->sector); 1654 raid_end_bio_io(r1_bio); 1655 } else { 1656 const int do_sync = bio_sync(r1_bio->master_bio); 1657 r1_bio->bios[r1_bio->read_disk] = 1658 mddev->ro ? IO_BLOCKED : NULL; 1659 r1_bio->read_disk = disk; 1660 bio_put(bio); 1661 bio = bio_clone(r1_bio->master_bio, GFP_NOIO); 1662 r1_bio->bios[r1_bio->read_disk] = bio; 1663 rdev = conf->mirrors[disk].rdev; 1664 if (printk_ratelimit()) 1665 printk(KERN_ERR "raid1: %s: redirecting sector %llu to" 1666 " another mirror\n", 1667 bdevname(rdev->bdev,b), 1668 (unsigned long long)r1_bio->sector); 1669 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1670 bio->bi_bdev = rdev->bdev; 1671 bio->bi_end_io = raid1_end_read_request; 1672 bio->bi_rw = READ | do_sync; 1673 bio->bi_private = r1_bio; 1674 unplug = 1; 1675 generic_make_request(bio); 1676 } 1677 } 1678 } 1679 if (unplug) 1680 unplug_slaves(mddev); 1681 } 1682 1683 1684 static int init_resync(conf_t *conf) 1685 { 1686 int buffs; 1687 1688 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 1689 BUG_ON(conf->r1buf_pool); 1690 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, 1691 conf->poolinfo); 1692 if (!conf->r1buf_pool) 1693 return -ENOMEM; 1694 conf->next_resync = 0; 1695 return 0; 1696 } 1697 1698 /* 1699 * perform a "sync" on one "block" 1700 * 1701 * We need to make sure that no normal I/O request - particularly write 1702 * requests - conflict with active sync requests. 1703 * 1704 * This is achieved by tracking pending requests and a 'barrier' concept 1705 * that can be installed to exclude normal IO requests. 1706 */ 1707 1708 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1709 { 1710 conf_t *conf = mddev_to_conf(mddev); 1711 r1bio_t *r1_bio; 1712 struct bio *bio; 1713 sector_t max_sector, nr_sectors; 1714 int disk = -1; 1715 int i; 1716 int wonly = -1; 1717 int write_targets = 0, read_targets = 0; 1718 int sync_blocks; 1719 int still_degraded = 0; 1720 1721 if (!conf->r1buf_pool) 1722 { 1723 /* 1724 printk("sync start - bitmap %p\n", mddev->bitmap); 1725 */ 1726 if (init_resync(conf)) 1727 return 0; 1728 } 1729 1730 max_sector = mddev->dev_sectors; 1731 if (sector_nr >= max_sector) { 1732 /* If we aborted, we need to abort the 1733 * sync on the 'current' bitmap chunk (there will 1734 * only be one in raid1 resync. 1735 * We can find the current addess in mddev->curr_resync 1736 */ 1737 if (mddev->curr_resync < max_sector) /* aborted */ 1738 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 1739 &sync_blocks, 1); 1740 else /* completed sync */ 1741 conf->fullsync = 0; 1742 1743 bitmap_close_sync(mddev->bitmap); 1744 close_sync(conf); 1745 return 0; 1746 } 1747 1748 if (mddev->bitmap == NULL && 1749 mddev->recovery_cp == MaxSector && 1750 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 1751 conf->fullsync == 0) { 1752 *skipped = 1; 1753 return max_sector - sector_nr; 1754 } 1755 /* before building a request, check if we can skip these blocks.. 1756 * This call the bitmap_start_sync doesn't actually record anything 1757 */ 1758 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1759 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1760 /* We can skip this block, and probably several more */ 1761 *skipped = 1; 1762 return sync_blocks; 1763 } 1764 /* 1765 * If there is non-resync activity waiting for a turn, 1766 * and resync is going fast enough, 1767 * then let it though before starting on this new sync request. 1768 */ 1769 if (!go_faster && conf->nr_waiting) 1770 msleep_interruptible(1000); 1771 1772 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 1773 raise_barrier(conf); 1774 1775 conf->next_resync = sector_nr; 1776 1777 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 1778 rcu_read_lock(); 1779 /* 1780 * If we get a correctably read error during resync or recovery, 1781 * we might want to read from a different device. So we 1782 * flag all drives that could conceivably be read from for READ, 1783 * and any others (which will be non-In_sync devices) for WRITE. 1784 * If a read fails, we try reading from something else for which READ 1785 * is OK. 1786 */ 1787 1788 r1_bio->mddev = mddev; 1789 r1_bio->sector = sector_nr; 1790 r1_bio->state = 0; 1791 set_bit(R1BIO_IsSync, &r1_bio->state); 1792 1793 for (i=0; i < conf->raid_disks; i++) { 1794 mdk_rdev_t *rdev; 1795 bio = r1_bio->bios[i]; 1796 1797 /* take from bio_init */ 1798 bio->bi_next = NULL; 1799 bio->bi_flags |= 1 << BIO_UPTODATE; 1800 bio->bi_rw = READ; 1801 bio->bi_vcnt = 0; 1802 bio->bi_idx = 0; 1803 bio->bi_phys_segments = 0; 1804 bio->bi_size = 0; 1805 bio->bi_end_io = NULL; 1806 bio->bi_private = NULL; 1807 1808 rdev = rcu_dereference(conf->mirrors[i].rdev); 1809 if (rdev == NULL || 1810 test_bit(Faulty, &rdev->flags)) { 1811 still_degraded = 1; 1812 continue; 1813 } else if (!test_bit(In_sync, &rdev->flags)) { 1814 bio->bi_rw = WRITE; 1815 bio->bi_end_io = end_sync_write; 1816 write_targets ++; 1817 } else { 1818 /* may need to read from here */ 1819 bio->bi_rw = READ; 1820 bio->bi_end_io = end_sync_read; 1821 if (test_bit(WriteMostly, &rdev->flags)) { 1822 if (wonly < 0) 1823 wonly = i; 1824 } else { 1825 if (disk < 0) 1826 disk = i; 1827 } 1828 read_targets++; 1829 } 1830 atomic_inc(&rdev->nr_pending); 1831 bio->bi_sector = sector_nr + rdev->data_offset; 1832 bio->bi_bdev = rdev->bdev; 1833 bio->bi_private = r1_bio; 1834 } 1835 rcu_read_unlock(); 1836 if (disk < 0) 1837 disk = wonly; 1838 r1_bio->read_disk = disk; 1839 1840 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 1841 /* extra read targets are also write targets */ 1842 write_targets += read_targets-1; 1843 1844 if (write_targets == 0 || read_targets == 0) { 1845 /* There is nowhere to write, so all non-sync 1846 * drives must be failed - so we are finished 1847 */ 1848 sector_t rv = max_sector - sector_nr; 1849 *skipped = 1; 1850 put_buf(r1_bio); 1851 return rv; 1852 } 1853 1854 if (max_sector > mddev->resync_max) 1855 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 1856 nr_sectors = 0; 1857 sync_blocks = 0; 1858 do { 1859 struct page *page; 1860 int len = PAGE_SIZE; 1861 if (sector_nr + (len>>9) > max_sector) 1862 len = (max_sector - sector_nr) << 9; 1863 if (len == 0) 1864 break; 1865 if (sync_blocks == 0) { 1866 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 1867 &sync_blocks, still_degraded) && 1868 !conf->fullsync && 1869 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1870 break; 1871 BUG_ON(sync_blocks < (PAGE_SIZE>>9)); 1872 if (len > (sync_blocks<<9)) 1873 len = sync_blocks<<9; 1874 } 1875 1876 for (i=0 ; i < conf->raid_disks; i++) { 1877 bio = r1_bio->bios[i]; 1878 if (bio->bi_end_io) { 1879 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 1880 if (bio_add_page(bio, page, len, 0) == 0) { 1881 /* stop here */ 1882 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 1883 while (i > 0) { 1884 i--; 1885 bio = r1_bio->bios[i]; 1886 if (bio->bi_end_io==NULL) 1887 continue; 1888 /* remove last page from this bio */ 1889 bio->bi_vcnt--; 1890 bio->bi_size -= len; 1891 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 1892 } 1893 goto bio_full; 1894 } 1895 } 1896 } 1897 nr_sectors += len>>9; 1898 sector_nr += len>>9; 1899 sync_blocks -= (len>>9); 1900 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); 1901 bio_full: 1902 r1_bio->sectors = nr_sectors; 1903 1904 /* For a user-requested sync, we read all readable devices and do a 1905 * compare 1906 */ 1907 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1908 atomic_set(&r1_bio->remaining, read_targets); 1909 for (i=0; i<conf->raid_disks; i++) { 1910 bio = r1_bio->bios[i]; 1911 if (bio->bi_end_io == end_sync_read) { 1912 md_sync_acct(bio->bi_bdev, nr_sectors); 1913 generic_make_request(bio); 1914 } 1915 } 1916 } else { 1917 atomic_set(&r1_bio->remaining, 1); 1918 bio = r1_bio->bios[r1_bio->read_disk]; 1919 md_sync_acct(bio->bi_bdev, nr_sectors); 1920 generic_make_request(bio); 1921 1922 } 1923 return nr_sectors; 1924 } 1925 1926 static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks) 1927 { 1928 if (sectors) 1929 return sectors; 1930 1931 return mddev->dev_sectors; 1932 } 1933 1934 static int run(mddev_t *mddev) 1935 { 1936 conf_t *conf; 1937 int i, j, disk_idx; 1938 mirror_info_t *disk; 1939 mdk_rdev_t *rdev; 1940 1941 if (mddev->level != 1) { 1942 printk("raid1: %s: raid level not set to mirroring (%d)\n", 1943 mdname(mddev), mddev->level); 1944 goto out; 1945 } 1946 if (mddev->reshape_position != MaxSector) { 1947 printk("raid1: %s: reshape_position set but not supported\n", 1948 mdname(mddev)); 1949 goto out; 1950 } 1951 /* 1952 * copy the already verified devices into our private RAID1 1953 * bookkeeping area. [whatever we allocate in run(), 1954 * should be freed in stop()] 1955 */ 1956 conf = kzalloc(sizeof(conf_t), GFP_KERNEL); 1957 mddev->private = conf; 1958 if (!conf) 1959 goto out_no_mem; 1960 1961 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, 1962 GFP_KERNEL); 1963 if (!conf->mirrors) 1964 goto out_no_mem; 1965 1966 conf->tmppage = alloc_page(GFP_KERNEL); 1967 if (!conf->tmppage) 1968 goto out_no_mem; 1969 1970 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 1971 if (!conf->poolinfo) 1972 goto out_no_mem; 1973 conf->poolinfo->mddev = mddev; 1974 conf->poolinfo->raid_disks = mddev->raid_disks; 1975 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 1976 r1bio_pool_free, 1977 conf->poolinfo); 1978 if (!conf->r1bio_pool) 1979 goto out_no_mem; 1980 1981 spin_lock_init(&conf->device_lock); 1982 mddev->queue->queue_lock = &conf->device_lock; 1983 1984 list_for_each_entry(rdev, &mddev->disks, same_set) { 1985 disk_idx = rdev->raid_disk; 1986 if (disk_idx >= mddev->raid_disks 1987 || disk_idx < 0) 1988 continue; 1989 disk = conf->mirrors + disk_idx; 1990 1991 disk->rdev = rdev; 1992 1993 blk_queue_stack_limits(mddev->queue, 1994 rdev->bdev->bd_disk->queue); 1995 /* as we don't honour merge_bvec_fn, we must never risk 1996 * violating it, so limit ->max_sector to one PAGE, as 1997 * a one page request is never in violation. 1998 */ 1999 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2000 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 2001 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2002 2003 disk->head_position = 0; 2004 } 2005 conf->raid_disks = mddev->raid_disks; 2006 conf->mddev = mddev; 2007 INIT_LIST_HEAD(&conf->retry_list); 2008 2009 spin_lock_init(&conf->resync_lock); 2010 init_waitqueue_head(&conf->wait_barrier); 2011 2012 bio_list_init(&conf->pending_bio_list); 2013 bio_list_init(&conf->flushing_bio_list); 2014 2015 2016 mddev->degraded = 0; 2017 for (i = 0; i < conf->raid_disks; i++) { 2018 2019 disk = conf->mirrors + i; 2020 2021 if (!disk->rdev || 2022 !test_bit(In_sync, &disk->rdev->flags)) { 2023 disk->head_position = 0; 2024 mddev->degraded++; 2025 if (disk->rdev) 2026 conf->fullsync = 1; 2027 } 2028 } 2029 if (mddev->degraded == conf->raid_disks) { 2030 printk(KERN_ERR "raid1: no operational mirrors for %s\n", 2031 mdname(mddev)); 2032 goto out_free_conf; 2033 } 2034 if (conf->raid_disks - mddev->degraded == 1) 2035 mddev->recovery_cp = MaxSector; 2036 2037 /* 2038 * find the first working one and use it as a starting point 2039 * to read balancing. 2040 */ 2041 for (j = 0; j < conf->raid_disks && 2042 (!conf->mirrors[j].rdev || 2043 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++) 2044 /* nothing */; 2045 conf->last_used = j; 2046 2047 2048 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1"); 2049 if (!mddev->thread) { 2050 printk(KERN_ERR 2051 "raid1: couldn't allocate thread for %s\n", 2052 mdname(mddev)); 2053 goto out_free_conf; 2054 } 2055 2056 printk(KERN_INFO 2057 "raid1: raid set %s active with %d out of %d mirrors\n", 2058 mdname(mddev), mddev->raid_disks - mddev->degraded, 2059 mddev->raid_disks); 2060 /* 2061 * Ok, everything is just fine now 2062 */ 2063 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2064 2065 mddev->queue->unplug_fn = raid1_unplug; 2066 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2067 mddev->queue->backing_dev_info.congested_data = mddev; 2068 2069 return 0; 2070 2071 out_no_mem: 2072 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n", 2073 mdname(mddev)); 2074 2075 out_free_conf: 2076 if (conf) { 2077 if (conf->r1bio_pool) 2078 mempool_destroy(conf->r1bio_pool); 2079 kfree(conf->mirrors); 2080 safe_put_page(conf->tmppage); 2081 kfree(conf->poolinfo); 2082 kfree(conf); 2083 mddev->private = NULL; 2084 } 2085 out: 2086 return -EIO; 2087 } 2088 2089 static int stop(mddev_t *mddev) 2090 { 2091 conf_t *conf = mddev_to_conf(mddev); 2092 struct bitmap *bitmap = mddev->bitmap; 2093 int behind_wait = 0; 2094 2095 /* wait for behind writes to complete */ 2096 while (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 2097 behind_wait++; 2098 printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait); 2099 set_current_state(TASK_UNINTERRUPTIBLE); 2100 schedule_timeout(HZ); /* wait a second */ 2101 /* need to kick something here to make sure I/O goes? */ 2102 } 2103 2104 raise_barrier(conf); 2105 lower_barrier(conf); 2106 2107 md_unregister_thread(mddev->thread); 2108 mddev->thread = NULL; 2109 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2110 if (conf->r1bio_pool) 2111 mempool_destroy(conf->r1bio_pool); 2112 kfree(conf->mirrors); 2113 kfree(conf->poolinfo); 2114 kfree(conf); 2115 mddev->private = NULL; 2116 return 0; 2117 } 2118 2119 static int raid1_resize(mddev_t *mddev, sector_t sectors) 2120 { 2121 /* no resync is happening, and there is enough space 2122 * on all devices, so we can resize. 2123 * We need to make sure resync covers any new space. 2124 * If the array is shrinking we should possibly wait until 2125 * any io in the removed space completes, but it hardly seems 2126 * worth it. 2127 */ 2128 md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0)); 2129 if (mddev->array_sectors > raid1_size(mddev, sectors, 0)) 2130 return -EINVAL; 2131 set_capacity(mddev->gendisk, mddev->array_sectors); 2132 mddev->changed = 1; 2133 if (sectors > mddev->dev_sectors && 2134 mddev->recovery_cp == MaxSector) { 2135 mddev->recovery_cp = mddev->dev_sectors; 2136 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2137 } 2138 mddev->dev_sectors = sectors; 2139 mddev->resync_max_sectors = sectors; 2140 return 0; 2141 } 2142 2143 static int raid1_reshape(mddev_t *mddev) 2144 { 2145 /* We need to: 2146 * 1/ resize the r1bio_pool 2147 * 2/ resize conf->mirrors 2148 * 2149 * We allocate a new r1bio_pool if we can. 2150 * Then raise a device barrier and wait until all IO stops. 2151 * Then resize conf->mirrors and swap in the new r1bio pool. 2152 * 2153 * At the same time, we "pack" the devices so that all the missing 2154 * devices have the higher raid_disk numbers. 2155 */ 2156 mempool_t *newpool, *oldpool; 2157 struct pool_info *newpoolinfo; 2158 mirror_info_t *newmirrors; 2159 conf_t *conf = mddev_to_conf(mddev); 2160 int cnt, raid_disks; 2161 unsigned long flags; 2162 int d, d2, err; 2163 2164 /* Cannot change chunk_size, layout, or level */ 2165 if (mddev->chunk_size != mddev->new_chunk || 2166 mddev->layout != mddev->new_layout || 2167 mddev->level != mddev->new_level) { 2168 mddev->new_chunk = mddev->chunk_size; 2169 mddev->new_layout = mddev->layout; 2170 mddev->new_level = mddev->level; 2171 return -EINVAL; 2172 } 2173 2174 err = md_allow_write(mddev); 2175 if (err) 2176 return err; 2177 2178 raid_disks = mddev->raid_disks + mddev->delta_disks; 2179 2180 if (raid_disks < conf->raid_disks) { 2181 cnt=0; 2182 for (d= 0; d < conf->raid_disks; d++) 2183 if (conf->mirrors[d].rdev) 2184 cnt++; 2185 if (cnt > raid_disks) 2186 return -EBUSY; 2187 } 2188 2189 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 2190 if (!newpoolinfo) 2191 return -ENOMEM; 2192 newpoolinfo->mddev = mddev; 2193 newpoolinfo->raid_disks = raid_disks; 2194 2195 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 2196 r1bio_pool_free, newpoolinfo); 2197 if (!newpool) { 2198 kfree(newpoolinfo); 2199 return -ENOMEM; 2200 } 2201 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); 2202 if (!newmirrors) { 2203 kfree(newpoolinfo); 2204 mempool_destroy(newpool); 2205 return -ENOMEM; 2206 } 2207 2208 raise_barrier(conf); 2209 2210 /* ok, everything is stopped */ 2211 oldpool = conf->r1bio_pool; 2212 conf->r1bio_pool = newpool; 2213 2214 for (d = d2 = 0; d < conf->raid_disks; d++) { 2215 mdk_rdev_t *rdev = conf->mirrors[d].rdev; 2216 if (rdev && rdev->raid_disk != d2) { 2217 char nm[20]; 2218 sprintf(nm, "rd%d", rdev->raid_disk); 2219 sysfs_remove_link(&mddev->kobj, nm); 2220 rdev->raid_disk = d2; 2221 sprintf(nm, "rd%d", rdev->raid_disk); 2222 sysfs_remove_link(&mddev->kobj, nm); 2223 if (sysfs_create_link(&mddev->kobj, 2224 &rdev->kobj, nm)) 2225 printk(KERN_WARNING 2226 "md/raid1: cannot register " 2227 "%s for %s\n", 2228 nm, mdname(mddev)); 2229 } 2230 if (rdev) 2231 newmirrors[d2++].rdev = rdev; 2232 } 2233 kfree(conf->mirrors); 2234 conf->mirrors = newmirrors; 2235 kfree(conf->poolinfo); 2236 conf->poolinfo = newpoolinfo; 2237 2238 spin_lock_irqsave(&conf->device_lock, flags); 2239 mddev->degraded += (raid_disks - conf->raid_disks); 2240 spin_unlock_irqrestore(&conf->device_lock, flags); 2241 conf->raid_disks = mddev->raid_disks = raid_disks; 2242 mddev->delta_disks = 0; 2243 2244 conf->last_used = 0; /* just make sure it is in-range */ 2245 lower_barrier(conf); 2246 2247 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2248 md_wakeup_thread(mddev->thread); 2249 2250 mempool_destroy(oldpool); 2251 return 0; 2252 } 2253 2254 static void raid1_quiesce(mddev_t *mddev, int state) 2255 { 2256 conf_t *conf = mddev_to_conf(mddev); 2257 2258 switch(state) { 2259 case 1: 2260 raise_barrier(conf); 2261 break; 2262 case 0: 2263 lower_barrier(conf); 2264 break; 2265 } 2266 } 2267 2268 2269 static struct mdk_personality raid1_personality = 2270 { 2271 .name = "raid1", 2272 .level = 1, 2273 .owner = THIS_MODULE, 2274 .make_request = make_request, 2275 .run = run, 2276 .stop = stop, 2277 .status = status, 2278 .error_handler = error, 2279 .hot_add_disk = raid1_add_disk, 2280 .hot_remove_disk= raid1_remove_disk, 2281 .spare_active = raid1_spare_active, 2282 .sync_request = sync_request, 2283 .resize = raid1_resize, 2284 .size = raid1_size, 2285 .check_reshape = raid1_reshape, 2286 .quiesce = raid1_quiesce, 2287 }; 2288 2289 static int __init raid_init(void) 2290 { 2291 return register_md_personality(&raid1_personality); 2292 } 2293 2294 static void raid_exit(void) 2295 { 2296 unregister_md_personality(&raid1_personality); 2297 } 2298 2299 module_init(raid_init); 2300 module_exit(raid_exit); 2301 MODULE_LICENSE("GPL"); 2302 MODULE_ALIAS("md-personality-3"); /* RAID1 */ 2303 MODULE_ALIAS("md-raid1"); 2304 MODULE_ALIAS("md-level-1"); 2305