1 /* 2 * raid1.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 5 * 6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 7 * 8 * RAID-1 management functions. 9 * 10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 11 * 12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 14 * 15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 16 * bitmapped intelligence in resync: 17 * 18 * - bitmap marked during normal i/o 19 * - bitmap used to skip nondirty blocks during sync 20 * 21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 22 * - persistent bitmap code 23 * 24 * This program is free software; you can redistribute it and/or modify 25 * it under the terms of the GNU General Public License as published by 26 * the Free Software Foundation; either version 2, or (at your option) 27 * any later version. 28 * 29 * You should have received a copy of the GNU General Public License 30 * (for example /usr/src/linux/COPYING); if not, write to the Free 31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include "dm-bio-list.h" 35 #include <linux/raid/raid1.h> 36 #include <linux/raid/bitmap.h> 37 38 #define DEBUG 0 39 #if DEBUG 40 #define PRINTK(x...) printk(x) 41 #else 42 #define PRINTK(x...) 43 #endif 44 45 /* 46 * Number of guaranteed r1bios in case of extreme VM load: 47 */ 48 #define NR_RAID1_BIOS 256 49 50 51 static void unplug_slaves(mddev_t *mddev); 52 53 static void allow_barrier(conf_t *conf); 54 static void lower_barrier(conf_t *conf); 55 56 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 57 { 58 struct pool_info *pi = data; 59 r1bio_t *r1_bio; 60 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 61 62 /* allocate a r1bio with room for raid_disks entries in the bios array */ 63 r1_bio = kzalloc(size, gfp_flags); 64 if (!r1_bio) 65 unplug_slaves(pi->mddev); 66 67 return r1_bio; 68 } 69 70 static void r1bio_pool_free(void *r1_bio, void *data) 71 { 72 kfree(r1_bio); 73 } 74 75 #define RESYNC_BLOCK_SIZE (64*1024) 76 //#define RESYNC_BLOCK_SIZE PAGE_SIZE 77 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 78 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 79 #define RESYNC_WINDOW (2048*1024) 80 81 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 82 { 83 struct pool_info *pi = data; 84 struct page *page; 85 r1bio_t *r1_bio; 86 struct bio *bio; 87 int i, j; 88 89 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 90 if (!r1_bio) { 91 unplug_slaves(pi->mddev); 92 return NULL; 93 } 94 95 /* 96 * Allocate bios : 1 for reading, n-1 for writing 97 */ 98 for (j = pi->raid_disks ; j-- ; ) { 99 bio = bio_alloc(gfp_flags, RESYNC_PAGES); 100 if (!bio) 101 goto out_free_bio; 102 r1_bio->bios[j] = bio; 103 } 104 /* 105 * Allocate RESYNC_PAGES data pages and attach them to 106 * the first bio. 107 * If this is a user-requested check/repair, allocate 108 * RESYNC_PAGES for each bio. 109 */ 110 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 111 j = pi->raid_disks; 112 else 113 j = 1; 114 while(j--) { 115 bio = r1_bio->bios[j]; 116 for (i = 0; i < RESYNC_PAGES; i++) { 117 page = alloc_page(gfp_flags); 118 if (unlikely(!page)) 119 goto out_free_pages; 120 121 bio->bi_io_vec[i].bv_page = page; 122 } 123 } 124 /* If not user-requests, copy the page pointers to all bios */ 125 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { 126 for (i=0; i<RESYNC_PAGES ; i++) 127 for (j=1; j<pi->raid_disks; j++) 128 r1_bio->bios[j]->bi_io_vec[i].bv_page = 129 r1_bio->bios[0]->bi_io_vec[i].bv_page; 130 } 131 132 r1_bio->master_bio = NULL; 133 134 return r1_bio; 135 136 out_free_pages: 137 for (i=0; i < RESYNC_PAGES ; i++) 138 for (j=0 ; j < pi->raid_disks; j++) 139 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); 140 j = -1; 141 out_free_bio: 142 while ( ++j < pi->raid_disks ) 143 bio_put(r1_bio->bios[j]); 144 r1bio_pool_free(r1_bio, data); 145 return NULL; 146 } 147 148 static void r1buf_pool_free(void *__r1_bio, void *data) 149 { 150 struct pool_info *pi = data; 151 int i,j; 152 r1bio_t *r1bio = __r1_bio; 153 154 for (i = 0; i < RESYNC_PAGES; i++) 155 for (j = pi->raid_disks; j-- ;) { 156 if (j == 0 || 157 r1bio->bios[j]->bi_io_vec[i].bv_page != 158 r1bio->bios[0]->bi_io_vec[i].bv_page) 159 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); 160 } 161 for (i=0 ; i < pi->raid_disks; i++) 162 bio_put(r1bio->bios[i]); 163 164 r1bio_pool_free(r1bio, data); 165 } 166 167 static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) 168 { 169 int i; 170 171 for (i = 0; i < conf->raid_disks; i++) { 172 struct bio **bio = r1_bio->bios + i; 173 if (*bio && *bio != IO_BLOCKED) 174 bio_put(*bio); 175 *bio = NULL; 176 } 177 } 178 179 static void free_r1bio(r1bio_t *r1_bio) 180 { 181 conf_t *conf = mddev_to_conf(r1_bio->mddev); 182 183 /* 184 * Wake up any possible resync thread that waits for the device 185 * to go idle. 186 */ 187 allow_barrier(conf); 188 189 put_all_bios(conf, r1_bio); 190 mempool_free(r1_bio, conf->r1bio_pool); 191 } 192 193 static void put_buf(r1bio_t *r1_bio) 194 { 195 conf_t *conf = mddev_to_conf(r1_bio->mddev); 196 int i; 197 198 for (i=0; i<conf->raid_disks; i++) { 199 struct bio *bio = r1_bio->bios[i]; 200 if (bio->bi_end_io) 201 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 202 } 203 204 mempool_free(r1_bio, conf->r1buf_pool); 205 206 lower_barrier(conf); 207 } 208 209 static void reschedule_retry(r1bio_t *r1_bio) 210 { 211 unsigned long flags; 212 mddev_t *mddev = r1_bio->mddev; 213 conf_t *conf = mddev_to_conf(mddev); 214 215 spin_lock_irqsave(&conf->device_lock, flags); 216 list_add(&r1_bio->retry_list, &conf->retry_list); 217 conf->nr_queued ++; 218 spin_unlock_irqrestore(&conf->device_lock, flags); 219 220 wake_up(&conf->wait_barrier); 221 md_wakeup_thread(mddev->thread); 222 } 223 224 /* 225 * raid_end_bio_io() is called when we have finished servicing a mirrored 226 * operation and are ready to return a success/failure code to the buffer 227 * cache layer. 228 */ 229 static void raid_end_bio_io(r1bio_t *r1_bio) 230 { 231 struct bio *bio = r1_bio->master_bio; 232 233 /* if nobody has done the final endio yet, do it now */ 234 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 235 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n", 236 (bio_data_dir(bio) == WRITE) ? "write" : "read", 237 (unsigned long long) bio->bi_sector, 238 (unsigned long long) bio->bi_sector + 239 (bio->bi_size >> 9) - 1); 240 241 bio_endio(bio, 242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO); 243 } 244 free_r1bio(r1_bio); 245 } 246 247 /* 248 * Update disk head position estimator based on IRQ completion info. 249 */ 250 static inline void update_head_pos(int disk, r1bio_t *r1_bio) 251 { 252 conf_t *conf = mddev_to_conf(r1_bio->mddev); 253 254 conf->mirrors[disk].head_position = 255 r1_bio->sector + (r1_bio->sectors); 256 } 257 258 static void raid1_end_read_request(struct bio *bio, int error) 259 { 260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 262 int mirror; 263 conf_t *conf = mddev_to_conf(r1_bio->mddev); 264 265 mirror = r1_bio->read_disk; 266 /* 267 * this branch is our 'one mirror IO has finished' event handler: 268 */ 269 update_head_pos(mirror, r1_bio); 270 271 if (uptodate) 272 set_bit(R1BIO_Uptodate, &r1_bio->state); 273 else { 274 /* If all other devices have failed, we want to return 275 * the error upwards rather than fail the last device. 276 * Here we redefine "uptodate" to mean "Don't want to retry" 277 */ 278 unsigned long flags; 279 spin_lock_irqsave(&conf->device_lock, flags); 280 if (r1_bio->mddev->degraded == conf->raid_disks || 281 (r1_bio->mddev->degraded == conf->raid_disks-1 && 282 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 283 uptodate = 1; 284 spin_unlock_irqrestore(&conf->device_lock, flags); 285 } 286 287 if (uptodate) 288 raid_end_bio_io(r1_bio); 289 else { 290 /* 291 * oops, read error: 292 */ 293 char b[BDEVNAME_SIZE]; 294 if (printk_ratelimit()) 295 printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n", 296 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector); 297 reschedule_retry(r1_bio); 298 } 299 300 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 301 } 302 303 static void raid1_end_write_request(struct bio *bio, int error) 304 { 305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 307 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 308 conf_t *conf = mddev_to_conf(r1_bio->mddev); 309 struct bio *to_put = NULL; 310 311 312 for (mirror = 0; mirror < conf->raid_disks; mirror++) 313 if (r1_bio->bios[mirror] == bio) 314 break; 315 316 if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { 317 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); 318 set_bit(R1BIO_BarrierRetry, &r1_bio->state); 319 r1_bio->mddev->barriers_work = 0; 320 /* Don't rdev_dec_pending in this branch - keep it for the retry */ 321 } else { 322 /* 323 * this branch is our 'one mirror IO has finished' event handler: 324 */ 325 r1_bio->bios[mirror] = NULL; 326 to_put = bio; 327 if (!uptodate) { 328 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 329 /* an I/O failed, we can't clear the bitmap */ 330 set_bit(R1BIO_Degraded, &r1_bio->state); 331 } else 332 /* 333 * Set R1BIO_Uptodate in our master bio, so that 334 * we will return a good error code for to the higher 335 * levels even if IO on some other mirrored buffer fails. 336 * 337 * The 'master' represents the composite IO operation to 338 * user-side. So if something waits for IO, then it will 339 * wait for the 'master' bio. 340 */ 341 set_bit(R1BIO_Uptodate, &r1_bio->state); 342 343 update_head_pos(mirror, r1_bio); 344 345 if (behind) { 346 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) 347 atomic_dec(&r1_bio->behind_remaining); 348 349 /* In behind mode, we ACK the master bio once the I/O has safely 350 * reached all non-writemostly disks. Setting the Returned bit 351 * ensures that this gets done only once -- we don't ever want to 352 * return -EIO here, instead we'll wait */ 353 354 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 355 test_bit(R1BIO_Uptodate, &r1_bio->state)) { 356 /* Maybe we can return now */ 357 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 358 struct bio *mbio = r1_bio->master_bio; 359 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", 360 (unsigned long long) mbio->bi_sector, 361 (unsigned long long) mbio->bi_sector + 362 (mbio->bi_size >> 9) - 1); 363 bio_endio(mbio, 0); 364 } 365 } 366 } 367 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 368 } 369 /* 370 * 371 * Let's see if all mirrored write operations have finished 372 * already. 373 */ 374 if (atomic_dec_and_test(&r1_bio->remaining)) { 375 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) 376 reschedule_retry(r1_bio); 377 else { 378 /* it really is the end of this request */ 379 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 380 /* free extra copy of the data pages */ 381 int i = bio->bi_vcnt; 382 while (i--) 383 safe_put_page(bio->bi_io_vec[i].bv_page); 384 } 385 /* clear the bitmap if all writes complete successfully */ 386 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 387 r1_bio->sectors, 388 !test_bit(R1BIO_Degraded, &r1_bio->state), 389 behind); 390 md_write_end(r1_bio->mddev); 391 raid_end_bio_io(r1_bio); 392 } 393 } 394 395 if (to_put) 396 bio_put(to_put); 397 } 398 399 400 /* 401 * This routine returns the disk from which the requested read should 402 * be done. There is a per-array 'next expected sequential IO' sector 403 * number - if this matches on the next IO then we use the last disk. 404 * There is also a per-disk 'last know head position' sector that is 405 * maintained from IRQ contexts, both the normal and the resync IO 406 * completion handlers update this position correctly. If there is no 407 * perfect sequential match then we pick the disk whose head is closest. 408 * 409 * If there are 2 mirrors in the same 2 devices, performance degrades 410 * because position is mirror, not device based. 411 * 412 * The rdev for the device selected will have nr_pending incremented. 413 */ 414 static int read_balance(conf_t *conf, r1bio_t *r1_bio) 415 { 416 const unsigned long this_sector = r1_bio->sector; 417 int new_disk = conf->last_used, disk = new_disk; 418 int wonly_disk = -1; 419 const int sectors = r1_bio->sectors; 420 sector_t new_distance, current_distance; 421 mdk_rdev_t *rdev; 422 423 rcu_read_lock(); 424 /* 425 * Check if we can balance. We can balance on the whole 426 * device if no resync is going on, or below the resync window. 427 * We take the first readable disk when above the resync window. 428 */ 429 retry: 430 if (conf->mddev->recovery_cp < MaxSector && 431 (this_sector + sectors >= conf->next_resync)) { 432 /* Choose the first operation device, for consistancy */ 433 new_disk = 0; 434 435 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 436 r1_bio->bios[new_disk] == IO_BLOCKED || 437 !rdev || !test_bit(In_sync, &rdev->flags) 438 || test_bit(WriteMostly, &rdev->flags); 439 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { 440 441 if (rdev && test_bit(In_sync, &rdev->flags) && 442 r1_bio->bios[new_disk] != IO_BLOCKED) 443 wonly_disk = new_disk; 444 445 if (new_disk == conf->raid_disks - 1) { 446 new_disk = wonly_disk; 447 break; 448 } 449 } 450 goto rb_out; 451 } 452 453 454 /* make sure the disk is operational */ 455 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 456 r1_bio->bios[new_disk] == IO_BLOCKED || 457 !rdev || !test_bit(In_sync, &rdev->flags) || 458 test_bit(WriteMostly, &rdev->flags); 459 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { 460 461 if (rdev && test_bit(In_sync, &rdev->flags) && 462 r1_bio->bios[new_disk] != IO_BLOCKED) 463 wonly_disk = new_disk; 464 465 if (new_disk <= 0) 466 new_disk = conf->raid_disks; 467 new_disk--; 468 if (new_disk == disk) { 469 new_disk = wonly_disk; 470 break; 471 } 472 } 473 474 if (new_disk < 0) 475 goto rb_out; 476 477 disk = new_disk; 478 /* now disk == new_disk == starting point for search */ 479 480 /* 481 * Don't change to another disk for sequential reads: 482 */ 483 if (conf->next_seq_sect == this_sector) 484 goto rb_out; 485 if (this_sector == conf->mirrors[new_disk].head_position) 486 goto rb_out; 487 488 current_distance = abs(this_sector - conf->mirrors[disk].head_position); 489 490 /* Find the disk whose head is closest */ 491 492 do { 493 if (disk <= 0) 494 disk = conf->raid_disks; 495 disk--; 496 497 rdev = rcu_dereference(conf->mirrors[disk].rdev); 498 499 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED || 500 !test_bit(In_sync, &rdev->flags) || 501 test_bit(WriteMostly, &rdev->flags)) 502 continue; 503 504 if (!atomic_read(&rdev->nr_pending)) { 505 new_disk = disk; 506 break; 507 } 508 new_distance = abs(this_sector - conf->mirrors[disk].head_position); 509 if (new_distance < current_distance) { 510 current_distance = new_distance; 511 new_disk = disk; 512 } 513 } while (disk != conf->last_used); 514 515 rb_out: 516 517 518 if (new_disk >= 0) { 519 rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 520 if (!rdev) 521 goto retry; 522 atomic_inc(&rdev->nr_pending); 523 if (!test_bit(In_sync, &rdev->flags)) { 524 /* cannot risk returning a device that failed 525 * before we inc'ed nr_pending 526 */ 527 rdev_dec_pending(rdev, conf->mddev); 528 goto retry; 529 } 530 conf->next_seq_sect = this_sector + sectors; 531 conf->last_used = new_disk; 532 } 533 rcu_read_unlock(); 534 535 return new_disk; 536 } 537 538 static void unplug_slaves(mddev_t *mddev) 539 { 540 conf_t *conf = mddev_to_conf(mddev); 541 int i; 542 543 rcu_read_lock(); 544 for (i=0; i<mddev->raid_disks; i++) { 545 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 546 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 547 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 548 549 atomic_inc(&rdev->nr_pending); 550 rcu_read_unlock(); 551 552 blk_unplug(r_queue); 553 554 rdev_dec_pending(rdev, mddev); 555 rcu_read_lock(); 556 } 557 } 558 rcu_read_unlock(); 559 } 560 561 static void raid1_unplug(struct request_queue *q) 562 { 563 mddev_t *mddev = q->queuedata; 564 565 unplug_slaves(mddev); 566 md_wakeup_thread(mddev->thread); 567 } 568 569 static int raid1_congested(void *data, int bits) 570 { 571 mddev_t *mddev = data; 572 conf_t *conf = mddev_to_conf(mddev); 573 int i, ret = 0; 574 575 rcu_read_lock(); 576 for (i = 0; i < mddev->raid_disks; i++) { 577 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 578 if (rdev && !test_bit(Faulty, &rdev->flags)) { 579 struct request_queue *q = bdev_get_queue(rdev->bdev); 580 581 /* Note the '|| 1' - when read_balance prefers 582 * non-congested targets, it can be removed 583 */ 584 if ((bits & (1<<BDI_write_congested)) || 1) 585 ret |= bdi_congested(&q->backing_dev_info, bits); 586 else 587 ret &= bdi_congested(&q->backing_dev_info, bits); 588 } 589 } 590 rcu_read_unlock(); 591 return ret; 592 } 593 594 595 static int flush_pending_writes(conf_t *conf) 596 { 597 /* Any writes that have been queued but are awaiting 598 * bitmap updates get flushed here. 599 * We return 1 if any requests were actually submitted. 600 */ 601 int rv = 0; 602 603 spin_lock_irq(&conf->device_lock); 604 605 if (conf->pending_bio_list.head) { 606 struct bio *bio; 607 bio = bio_list_get(&conf->pending_bio_list); 608 blk_remove_plug(conf->mddev->queue); 609 spin_unlock_irq(&conf->device_lock); 610 /* flush any pending bitmap writes to 611 * disk before proceeding w/ I/O */ 612 bitmap_unplug(conf->mddev->bitmap); 613 614 while (bio) { /* submit pending writes */ 615 struct bio *next = bio->bi_next; 616 bio->bi_next = NULL; 617 generic_make_request(bio); 618 bio = next; 619 } 620 rv = 1; 621 } else 622 spin_unlock_irq(&conf->device_lock); 623 return rv; 624 } 625 626 /* Barriers.... 627 * Sometimes we need to suspend IO while we do something else, 628 * either some resync/recovery, or reconfigure the array. 629 * To do this we raise a 'barrier'. 630 * The 'barrier' is a counter that can be raised multiple times 631 * to count how many activities are happening which preclude 632 * normal IO. 633 * We can only raise the barrier if there is no pending IO. 634 * i.e. if nr_pending == 0. 635 * We choose only to raise the barrier if no-one is waiting for the 636 * barrier to go down. This means that as soon as an IO request 637 * is ready, no other operations which require a barrier will start 638 * until the IO request has had a chance. 639 * 640 * So: regular IO calls 'wait_barrier'. When that returns there 641 * is no backgroup IO happening, It must arrange to call 642 * allow_barrier when it has finished its IO. 643 * backgroup IO calls must call raise_barrier. Once that returns 644 * there is no normal IO happeing. It must arrange to call 645 * lower_barrier when the particular background IO completes. 646 */ 647 #define RESYNC_DEPTH 32 648 649 static void raise_barrier(conf_t *conf) 650 { 651 spin_lock_irq(&conf->resync_lock); 652 653 /* Wait until no block IO is waiting */ 654 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 655 conf->resync_lock, 656 raid1_unplug(conf->mddev->queue)); 657 658 /* block any new IO from starting */ 659 conf->barrier++; 660 661 /* No wait for all pending IO to complete */ 662 wait_event_lock_irq(conf->wait_barrier, 663 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 664 conf->resync_lock, 665 raid1_unplug(conf->mddev->queue)); 666 667 spin_unlock_irq(&conf->resync_lock); 668 } 669 670 static void lower_barrier(conf_t *conf) 671 { 672 unsigned long flags; 673 spin_lock_irqsave(&conf->resync_lock, flags); 674 conf->barrier--; 675 spin_unlock_irqrestore(&conf->resync_lock, flags); 676 wake_up(&conf->wait_barrier); 677 } 678 679 static void wait_barrier(conf_t *conf) 680 { 681 spin_lock_irq(&conf->resync_lock); 682 if (conf->barrier) { 683 conf->nr_waiting++; 684 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 685 conf->resync_lock, 686 raid1_unplug(conf->mddev->queue)); 687 conf->nr_waiting--; 688 } 689 conf->nr_pending++; 690 spin_unlock_irq(&conf->resync_lock); 691 } 692 693 static void allow_barrier(conf_t *conf) 694 { 695 unsigned long flags; 696 spin_lock_irqsave(&conf->resync_lock, flags); 697 conf->nr_pending--; 698 spin_unlock_irqrestore(&conf->resync_lock, flags); 699 wake_up(&conf->wait_barrier); 700 } 701 702 static void freeze_array(conf_t *conf) 703 { 704 /* stop syncio and normal IO and wait for everything to 705 * go quite. 706 * We increment barrier and nr_waiting, and then 707 * wait until nr_pending match nr_queued+1 708 * This is called in the context of one normal IO request 709 * that has failed. Thus any sync request that might be pending 710 * will be blocked by nr_pending, and we need to wait for 711 * pending IO requests to complete or be queued for re-try. 712 * Thus the number queued (nr_queued) plus this request (1) 713 * must match the number of pending IOs (nr_pending) before 714 * we continue. 715 */ 716 spin_lock_irq(&conf->resync_lock); 717 conf->barrier++; 718 conf->nr_waiting++; 719 wait_event_lock_irq(conf->wait_barrier, 720 conf->nr_pending == conf->nr_queued+1, 721 conf->resync_lock, 722 ({ flush_pending_writes(conf); 723 raid1_unplug(conf->mddev->queue); })); 724 spin_unlock_irq(&conf->resync_lock); 725 } 726 static void unfreeze_array(conf_t *conf) 727 { 728 /* reverse the effect of the freeze */ 729 spin_lock_irq(&conf->resync_lock); 730 conf->barrier--; 731 conf->nr_waiting--; 732 wake_up(&conf->wait_barrier); 733 spin_unlock_irq(&conf->resync_lock); 734 } 735 736 737 /* duplicate the data pages for behind I/O */ 738 static struct page **alloc_behind_pages(struct bio *bio) 739 { 740 int i; 741 struct bio_vec *bvec; 742 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *), 743 GFP_NOIO); 744 if (unlikely(!pages)) 745 goto do_sync_io; 746 747 bio_for_each_segment(bvec, bio, i) { 748 pages[i] = alloc_page(GFP_NOIO); 749 if (unlikely(!pages[i])) 750 goto do_sync_io; 751 memcpy(kmap(pages[i]) + bvec->bv_offset, 752 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); 753 kunmap(pages[i]); 754 kunmap(bvec->bv_page); 755 } 756 757 return pages; 758 759 do_sync_io: 760 if (pages) 761 for (i = 0; i < bio->bi_vcnt && pages[i]; i++) 762 put_page(pages[i]); 763 kfree(pages); 764 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 765 return NULL; 766 } 767 768 static int make_request(struct request_queue *q, struct bio * bio) 769 { 770 mddev_t *mddev = q->queuedata; 771 conf_t *conf = mddev_to_conf(mddev); 772 mirror_info_t *mirror; 773 r1bio_t *r1_bio; 774 struct bio *read_bio; 775 int i, targets = 0, disks; 776 struct bitmap *bitmap; 777 unsigned long flags; 778 struct bio_list bl; 779 struct page **behind_pages = NULL; 780 const int rw = bio_data_dir(bio); 781 const int do_sync = bio_sync(bio); 782 int cpu, do_barriers; 783 mdk_rdev_t *blocked_rdev; 784 785 /* 786 * Register the new request and wait if the reconstruction 787 * thread has put up a bar for new requests. 788 * Continue immediately if no resync is active currently. 789 * We test barriers_work *after* md_write_start as md_write_start 790 * may cause the first superblock write, and that will check out 791 * if barriers work. 792 */ 793 794 md_write_start(mddev, bio); /* wait on superblock update early */ 795 796 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 797 if (rw == WRITE) 798 md_write_end(mddev); 799 bio_endio(bio, -EOPNOTSUPP); 800 return 0; 801 } 802 803 wait_barrier(conf); 804 805 bitmap = mddev->bitmap; 806 807 cpu = part_stat_lock(); 808 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 809 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 810 bio_sectors(bio)); 811 part_stat_unlock(); 812 813 /* 814 * make_request() can abort the operation when READA is being 815 * used and no empty request is available. 816 * 817 */ 818 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 819 820 r1_bio->master_bio = bio; 821 r1_bio->sectors = bio->bi_size >> 9; 822 r1_bio->state = 0; 823 r1_bio->mddev = mddev; 824 r1_bio->sector = bio->bi_sector; 825 826 if (rw == READ) { 827 /* 828 * read balancing logic: 829 */ 830 int rdisk = read_balance(conf, r1_bio); 831 832 if (rdisk < 0) { 833 /* couldn't find anywhere to read from */ 834 raid_end_bio_io(r1_bio); 835 return 0; 836 } 837 mirror = conf->mirrors + rdisk; 838 839 r1_bio->read_disk = rdisk; 840 841 read_bio = bio_clone(bio, GFP_NOIO); 842 843 r1_bio->bios[rdisk] = read_bio; 844 845 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 846 read_bio->bi_bdev = mirror->rdev->bdev; 847 read_bio->bi_end_io = raid1_end_read_request; 848 read_bio->bi_rw = READ | do_sync; 849 read_bio->bi_private = r1_bio; 850 851 generic_make_request(read_bio); 852 return 0; 853 } 854 855 /* 856 * WRITE: 857 */ 858 /* first select target devices under spinlock and 859 * inc refcount on their rdev. Record them by setting 860 * bios[x] to bio 861 */ 862 disks = conf->raid_disks; 863 #if 0 864 { static int first=1; 865 if (first) printk("First Write sector %llu disks %d\n", 866 (unsigned long long)r1_bio->sector, disks); 867 first = 0; 868 } 869 #endif 870 retry_write: 871 blocked_rdev = NULL; 872 rcu_read_lock(); 873 for (i = 0; i < disks; i++) { 874 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 875 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 876 atomic_inc(&rdev->nr_pending); 877 blocked_rdev = rdev; 878 break; 879 } 880 if (rdev && !test_bit(Faulty, &rdev->flags)) { 881 atomic_inc(&rdev->nr_pending); 882 if (test_bit(Faulty, &rdev->flags)) { 883 rdev_dec_pending(rdev, mddev); 884 r1_bio->bios[i] = NULL; 885 } else 886 r1_bio->bios[i] = bio; 887 targets++; 888 } else 889 r1_bio->bios[i] = NULL; 890 } 891 rcu_read_unlock(); 892 893 if (unlikely(blocked_rdev)) { 894 /* Wait for this device to become unblocked */ 895 int j; 896 897 for (j = 0; j < i; j++) 898 if (r1_bio->bios[j]) 899 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 900 901 allow_barrier(conf); 902 md_wait_for_blocked_rdev(blocked_rdev, mddev); 903 wait_barrier(conf); 904 goto retry_write; 905 } 906 907 BUG_ON(targets == 0); /* we never fail the last device */ 908 909 if (targets < conf->raid_disks) { 910 /* array is degraded, we will not clear the bitmap 911 * on I/O completion (see raid1_end_write_request) */ 912 set_bit(R1BIO_Degraded, &r1_bio->state); 913 } 914 915 /* do behind I/O ? */ 916 if (bitmap && 917 atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind && 918 (behind_pages = alloc_behind_pages(bio)) != NULL) 919 set_bit(R1BIO_BehindIO, &r1_bio->state); 920 921 atomic_set(&r1_bio->remaining, 0); 922 atomic_set(&r1_bio->behind_remaining, 0); 923 924 do_barriers = bio_barrier(bio); 925 if (do_barriers) 926 set_bit(R1BIO_Barrier, &r1_bio->state); 927 928 bio_list_init(&bl); 929 for (i = 0; i < disks; i++) { 930 struct bio *mbio; 931 if (!r1_bio->bios[i]) 932 continue; 933 934 mbio = bio_clone(bio, GFP_NOIO); 935 r1_bio->bios[i] = mbio; 936 937 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 938 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 939 mbio->bi_end_io = raid1_end_write_request; 940 mbio->bi_rw = WRITE | do_barriers | do_sync; 941 mbio->bi_private = r1_bio; 942 943 if (behind_pages) { 944 struct bio_vec *bvec; 945 int j; 946 947 /* Yes, I really want the '__' version so that 948 * we clear any unused pointer in the io_vec, rather 949 * than leave them unchanged. This is important 950 * because when we come to free the pages, we won't 951 * know the originial bi_idx, so we just free 952 * them all 953 */ 954 __bio_for_each_segment(bvec, mbio, j, 0) 955 bvec->bv_page = behind_pages[j]; 956 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 957 atomic_inc(&r1_bio->behind_remaining); 958 } 959 960 atomic_inc(&r1_bio->remaining); 961 962 bio_list_add(&bl, mbio); 963 } 964 kfree(behind_pages); /* the behind pages are attached to the bios now */ 965 966 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors, 967 test_bit(R1BIO_BehindIO, &r1_bio->state)); 968 spin_lock_irqsave(&conf->device_lock, flags); 969 bio_list_merge(&conf->pending_bio_list, &bl); 970 bio_list_init(&bl); 971 972 blk_plug_device(mddev->queue); 973 spin_unlock_irqrestore(&conf->device_lock, flags); 974 975 /* In case raid1d snuck into freeze_array */ 976 wake_up(&conf->wait_barrier); 977 978 if (do_sync) 979 md_wakeup_thread(mddev->thread); 980 #if 0 981 while ((bio = bio_list_pop(&bl)) != NULL) 982 generic_make_request(bio); 983 #endif 984 985 return 0; 986 } 987 988 static void status(struct seq_file *seq, mddev_t *mddev) 989 { 990 conf_t *conf = mddev_to_conf(mddev); 991 int i; 992 993 seq_printf(seq, " [%d/%d] [", conf->raid_disks, 994 conf->raid_disks - mddev->degraded); 995 rcu_read_lock(); 996 for (i = 0; i < conf->raid_disks; i++) { 997 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 998 seq_printf(seq, "%s", 999 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1000 } 1001 rcu_read_unlock(); 1002 seq_printf(seq, "]"); 1003 } 1004 1005 1006 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1007 { 1008 char b[BDEVNAME_SIZE]; 1009 conf_t *conf = mddev_to_conf(mddev); 1010 1011 /* 1012 * If it is not operational, then we have already marked it as dead 1013 * else if it is the last working disks, ignore the error, let the 1014 * next level up know. 1015 * else mark the drive as failed 1016 */ 1017 if (test_bit(In_sync, &rdev->flags) 1018 && (conf->raid_disks - mddev->degraded) == 1) 1019 /* 1020 * Don't fail the drive, act as though we were just a 1021 * normal single drive 1022 */ 1023 return; 1024 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1025 unsigned long flags; 1026 spin_lock_irqsave(&conf->device_lock, flags); 1027 mddev->degraded++; 1028 set_bit(Faulty, &rdev->flags); 1029 spin_unlock_irqrestore(&conf->device_lock, flags); 1030 /* 1031 * if recovery is running, make sure it aborts. 1032 */ 1033 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1034 } else 1035 set_bit(Faulty, &rdev->flags); 1036 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1037 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n" 1038 "raid1: Operation continuing on %d devices.\n", 1039 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1040 } 1041 1042 static void print_conf(conf_t *conf) 1043 { 1044 int i; 1045 1046 printk("RAID1 conf printout:\n"); 1047 if (!conf) { 1048 printk("(!conf)\n"); 1049 return; 1050 } 1051 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 1052 conf->raid_disks); 1053 1054 rcu_read_lock(); 1055 for (i = 0; i < conf->raid_disks; i++) { 1056 char b[BDEVNAME_SIZE]; 1057 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 1058 if (rdev) 1059 printk(" disk %d, wo:%d, o:%d, dev:%s\n", 1060 i, !test_bit(In_sync, &rdev->flags), 1061 !test_bit(Faulty, &rdev->flags), 1062 bdevname(rdev->bdev,b)); 1063 } 1064 rcu_read_unlock(); 1065 } 1066 1067 static void close_sync(conf_t *conf) 1068 { 1069 wait_barrier(conf); 1070 allow_barrier(conf); 1071 1072 mempool_destroy(conf->r1buf_pool); 1073 conf->r1buf_pool = NULL; 1074 } 1075 1076 static int raid1_spare_active(mddev_t *mddev) 1077 { 1078 int i; 1079 conf_t *conf = mddev->private; 1080 1081 /* 1082 * Find all failed disks within the RAID1 configuration 1083 * and mark them readable. 1084 * Called under mddev lock, so rcu protection not needed. 1085 */ 1086 for (i = 0; i < conf->raid_disks; i++) { 1087 mdk_rdev_t *rdev = conf->mirrors[i].rdev; 1088 if (rdev 1089 && !test_bit(Faulty, &rdev->flags) 1090 && !test_and_set_bit(In_sync, &rdev->flags)) { 1091 unsigned long flags; 1092 spin_lock_irqsave(&conf->device_lock, flags); 1093 mddev->degraded--; 1094 spin_unlock_irqrestore(&conf->device_lock, flags); 1095 } 1096 } 1097 1098 print_conf(conf); 1099 return 0; 1100 } 1101 1102 1103 static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 1104 { 1105 conf_t *conf = mddev->private; 1106 int err = -EEXIST; 1107 int mirror = 0; 1108 mirror_info_t *p; 1109 int first = 0; 1110 int last = mddev->raid_disks - 1; 1111 1112 if (rdev->raid_disk >= 0) 1113 first = last = rdev->raid_disk; 1114 1115 for (mirror = first; mirror <= last; mirror++) 1116 if ( !(p=conf->mirrors+mirror)->rdev) { 1117 1118 blk_queue_stack_limits(mddev->queue, 1119 rdev->bdev->bd_disk->queue); 1120 /* as we don't honour merge_bvec_fn, we must never risk 1121 * violating it, so limit ->max_sector to one PAGE, as 1122 * a one page request is never in violation. 1123 */ 1124 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1125 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1126 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1127 1128 p->head_position = 0; 1129 rdev->raid_disk = mirror; 1130 err = 0; 1131 /* As all devices are equivalent, we don't need a full recovery 1132 * if this was recently any drive of the array 1133 */ 1134 if (rdev->saved_raid_disk < 0) 1135 conf->fullsync = 1; 1136 rcu_assign_pointer(p->rdev, rdev); 1137 break; 1138 } 1139 1140 print_conf(conf); 1141 return err; 1142 } 1143 1144 static int raid1_remove_disk(mddev_t *mddev, int number) 1145 { 1146 conf_t *conf = mddev->private; 1147 int err = 0; 1148 mdk_rdev_t *rdev; 1149 mirror_info_t *p = conf->mirrors+ number; 1150 1151 print_conf(conf); 1152 rdev = p->rdev; 1153 if (rdev) { 1154 if (test_bit(In_sync, &rdev->flags) || 1155 atomic_read(&rdev->nr_pending)) { 1156 err = -EBUSY; 1157 goto abort; 1158 } 1159 /* Only remove non-faulty devices is recovery 1160 * is not possible. 1161 */ 1162 if (!test_bit(Faulty, &rdev->flags) && 1163 mddev->degraded < conf->raid_disks) { 1164 err = -EBUSY; 1165 goto abort; 1166 } 1167 p->rdev = NULL; 1168 synchronize_rcu(); 1169 if (atomic_read(&rdev->nr_pending)) { 1170 /* lost the race, try later */ 1171 err = -EBUSY; 1172 p->rdev = rdev; 1173 } 1174 } 1175 abort: 1176 1177 print_conf(conf); 1178 return err; 1179 } 1180 1181 1182 static void end_sync_read(struct bio *bio, int error) 1183 { 1184 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1185 int i; 1186 1187 for (i=r1_bio->mddev->raid_disks; i--; ) 1188 if (r1_bio->bios[i] == bio) 1189 break; 1190 BUG_ON(i < 0); 1191 update_head_pos(i, r1_bio); 1192 /* 1193 * we have read a block, now it needs to be re-written, 1194 * or re-read if the read failed. 1195 * We don't do much here, just schedule handling by raid1d 1196 */ 1197 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1198 set_bit(R1BIO_Uptodate, &r1_bio->state); 1199 1200 if (atomic_dec_and_test(&r1_bio->remaining)) 1201 reschedule_retry(r1_bio); 1202 } 1203 1204 static void end_sync_write(struct bio *bio, int error) 1205 { 1206 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1207 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1208 mddev_t *mddev = r1_bio->mddev; 1209 conf_t *conf = mddev_to_conf(mddev); 1210 int i; 1211 int mirror=0; 1212 1213 for (i = 0; i < conf->raid_disks; i++) 1214 if (r1_bio->bios[i] == bio) { 1215 mirror = i; 1216 break; 1217 } 1218 if (!uptodate) { 1219 int sync_blocks = 0; 1220 sector_t s = r1_bio->sector; 1221 long sectors_to_go = r1_bio->sectors; 1222 /* make sure these bits doesn't get cleared. */ 1223 do { 1224 bitmap_end_sync(mddev->bitmap, s, 1225 &sync_blocks, 1); 1226 s += sync_blocks; 1227 sectors_to_go -= sync_blocks; 1228 } while (sectors_to_go > 0); 1229 md_error(mddev, conf->mirrors[mirror].rdev); 1230 } 1231 1232 update_head_pos(mirror, r1_bio); 1233 1234 if (atomic_dec_and_test(&r1_bio->remaining)) { 1235 md_done_sync(mddev, r1_bio->sectors, uptodate); 1236 put_buf(r1_bio); 1237 } 1238 } 1239 1240 static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1241 { 1242 conf_t *conf = mddev_to_conf(mddev); 1243 int i; 1244 int disks = conf->raid_disks; 1245 struct bio *bio, *wbio; 1246 1247 bio = r1_bio->bios[r1_bio->read_disk]; 1248 1249 1250 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1251 /* We have read all readable devices. If we haven't 1252 * got the block, then there is no hope left. 1253 * If we have, then we want to do a comparison 1254 * and skip the write if everything is the same. 1255 * If any blocks failed to read, then we need to 1256 * attempt an over-write 1257 */ 1258 int primary; 1259 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { 1260 for (i=0; i<mddev->raid_disks; i++) 1261 if (r1_bio->bios[i]->bi_end_io == end_sync_read) 1262 md_error(mddev, conf->mirrors[i].rdev); 1263 1264 md_done_sync(mddev, r1_bio->sectors, 1); 1265 put_buf(r1_bio); 1266 return; 1267 } 1268 for (primary=0; primary<mddev->raid_disks; primary++) 1269 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1270 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { 1271 r1_bio->bios[primary]->bi_end_io = NULL; 1272 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 1273 break; 1274 } 1275 r1_bio->read_disk = primary; 1276 for (i=0; i<mddev->raid_disks; i++) 1277 if (r1_bio->bios[i]->bi_end_io == end_sync_read) { 1278 int j; 1279 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); 1280 struct bio *pbio = r1_bio->bios[primary]; 1281 struct bio *sbio = r1_bio->bios[i]; 1282 1283 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { 1284 for (j = vcnt; j-- ; ) { 1285 struct page *p, *s; 1286 p = pbio->bi_io_vec[j].bv_page; 1287 s = sbio->bi_io_vec[j].bv_page; 1288 if (memcmp(page_address(p), 1289 page_address(s), 1290 PAGE_SIZE)) 1291 break; 1292 } 1293 } else 1294 j = 0; 1295 if (j >= 0) 1296 mddev->resync_mismatches += r1_bio->sectors; 1297 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 1298 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { 1299 sbio->bi_end_io = NULL; 1300 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 1301 } else { 1302 /* fixup the bio for reuse */ 1303 int size; 1304 sbio->bi_vcnt = vcnt; 1305 sbio->bi_size = r1_bio->sectors << 9; 1306 sbio->bi_idx = 0; 1307 sbio->bi_phys_segments = 0; 1308 sbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1309 sbio->bi_flags |= 1 << BIO_UPTODATE; 1310 sbio->bi_next = NULL; 1311 sbio->bi_sector = r1_bio->sector + 1312 conf->mirrors[i].rdev->data_offset; 1313 sbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1314 size = sbio->bi_size; 1315 for (j = 0; j < vcnt ; j++) { 1316 struct bio_vec *bi; 1317 bi = &sbio->bi_io_vec[j]; 1318 bi->bv_offset = 0; 1319 if (size > PAGE_SIZE) 1320 bi->bv_len = PAGE_SIZE; 1321 else 1322 bi->bv_len = size; 1323 size -= PAGE_SIZE; 1324 memcpy(page_address(bi->bv_page), 1325 page_address(pbio->bi_io_vec[j].bv_page), 1326 PAGE_SIZE); 1327 } 1328 1329 } 1330 } 1331 } 1332 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { 1333 /* ouch - failed to read all of that. 1334 * Try some synchronous reads of other devices to get 1335 * good data, much like with normal read errors. Only 1336 * read into the pages we already have so we don't 1337 * need to re-issue the read request. 1338 * We don't need to freeze the array, because being in an 1339 * active sync request, there is no normal IO, and 1340 * no overlapping syncs. 1341 */ 1342 sector_t sect = r1_bio->sector; 1343 int sectors = r1_bio->sectors; 1344 int idx = 0; 1345 1346 while(sectors) { 1347 int s = sectors; 1348 int d = r1_bio->read_disk; 1349 int success = 0; 1350 mdk_rdev_t *rdev; 1351 1352 if (s > (PAGE_SIZE>>9)) 1353 s = PAGE_SIZE >> 9; 1354 do { 1355 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 1356 /* No rcu protection needed here devices 1357 * can only be removed when no resync is 1358 * active, and resync is currently active 1359 */ 1360 rdev = conf->mirrors[d].rdev; 1361 if (sync_page_io(rdev->bdev, 1362 sect + rdev->data_offset, 1363 s<<9, 1364 bio->bi_io_vec[idx].bv_page, 1365 READ)) { 1366 success = 1; 1367 break; 1368 } 1369 } 1370 d++; 1371 if (d == conf->raid_disks) 1372 d = 0; 1373 } while (!success && d != r1_bio->read_disk); 1374 1375 if (success) { 1376 int start = d; 1377 /* write it back and re-read */ 1378 set_bit(R1BIO_Uptodate, &r1_bio->state); 1379 while (d != r1_bio->read_disk) { 1380 if (d == 0) 1381 d = conf->raid_disks; 1382 d--; 1383 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1384 continue; 1385 rdev = conf->mirrors[d].rdev; 1386 atomic_add(s, &rdev->corrected_errors); 1387 if (sync_page_io(rdev->bdev, 1388 sect + rdev->data_offset, 1389 s<<9, 1390 bio->bi_io_vec[idx].bv_page, 1391 WRITE) == 0) 1392 md_error(mddev, rdev); 1393 } 1394 d = start; 1395 while (d != r1_bio->read_disk) { 1396 if (d == 0) 1397 d = conf->raid_disks; 1398 d--; 1399 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1400 continue; 1401 rdev = conf->mirrors[d].rdev; 1402 if (sync_page_io(rdev->bdev, 1403 sect + rdev->data_offset, 1404 s<<9, 1405 bio->bi_io_vec[idx].bv_page, 1406 READ) == 0) 1407 md_error(mddev, rdev); 1408 } 1409 } else { 1410 char b[BDEVNAME_SIZE]; 1411 /* Cannot read from anywhere, array is toast */ 1412 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); 1413 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" 1414 " for block %llu\n", 1415 bdevname(bio->bi_bdev,b), 1416 (unsigned long long)r1_bio->sector); 1417 md_done_sync(mddev, r1_bio->sectors, 0); 1418 put_buf(r1_bio); 1419 return; 1420 } 1421 sectors -= s; 1422 sect += s; 1423 idx ++; 1424 } 1425 } 1426 1427 /* 1428 * schedule writes 1429 */ 1430 atomic_set(&r1_bio->remaining, 1); 1431 for (i = 0; i < disks ; i++) { 1432 wbio = r1_bio->bios[i]; 1433 if (wbio->bi_end_io == NULL || 1434 (wbio->bi_end_io == end_sync_read && 1435 (i == r1_bio->read_disk || 1436 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 1437 continue; 1438 1439 wbio->bi_rw = WRITE; 1440 wbio->bi_end_io = end_sync_write; 1441 atomic_inc(&r1_bio->remaining); 1442 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); 1443 1444 generic_make_request(wbio); 1445 } 1446 1447 if (atomic_dec_and_test(&r1_bio->remaining)) { 1448 /* if we're here, all write(s) have completed, so clean up */ 1449 md_done_sync(mddev, r1_bio->sectors, 1); 1450 put_buf(r1_bio); 1451 } 1452 } 1453 1454 /* 1455 * This is a kernel thread which: 1456 * 1457 * 1. Retries failed read operations on working mirrors. 1458 * 2. Updates the raid superblock when problems encounter. 1459 * 3. Performs writes following reads for array syncronising. 1460 */ 1461 1462 static void fix_read_error(conf_t *conf, int read_disk, 1463 sector_t sect, int sectors) 1464 { 1465 mddev_t *mddev = conf->mddev; 1466 while(sectors) { 1467 int s = sectors; 1468 int d = read_disk; 1469 int success = 0; 1470 int start; 1471 mdk_rdev_t *rdev; 1472 1473 if (s > (PAGE_SIZE>>9)) 1474 s = PAGE_SIZE >> 9; 1475 1476 do { 1477 /* Note: no rcu protection needed here 1478 * as this is synchronous in the raid1d thread 1479 * which is the thread that might remove 1480 * a device. If raid1d ever becomes multi-threaded.... 1481 */ 1482 rdev = conf->mirrors[d].rdev; 1483 if (rdev && 1484 test_bit(In_sync, &rdev->flags) && 1485 sync_page_io(rdev->bdev, 1486 sect + rdev->data_offset, 1487 s<<9, 1488 conf->tmppage, READ)) 1489 success = 1; 1490 else { 1491 d++; 1492 if (d == conf->raid_disks) 1493 d = 0; 1494 } 1495 } while (!success && d != read_disk); 1496 1497 if (!success) { 1498 /* Cannot read from anywhere -- bye bye array */ 1499 md_error(mddev, conf->mirrors[read_disk].rdev); 1500 break; 1501 } 1502 /* write it back and re-read */ 1503 start = d; 1504 while (d != read_disk) { 1505 if (d==0) 1506 d = conf->raid_disks; 1507 d--; 1508 rdev = conf->mirrors[d].rdev; 1509 if (rdev && 1510 test_bit(In_sync, &rdev->flags)) { 1511 if (sync_page_io(rdev->bdev, 1512 sect + rdev->data_offset, 1513 s<<9, conf->tmppage, WRITE) 1514 == 0) 1515 /* Well, this device is dead */ 1516 md_error(mddev, rdev); 1517 } 1518 } 1519 d = start; 1520 while (d != read_disk) { 1521 char b[BDEVNAME_SIZE]; 1522 if (d==0) 1523 d = conf->raid_disks; 1524 d--; 1525 rdev = conf->mirrors[d].rdev; 1526 if (rdev && 1527 test_bit(In_sync, &rdev->flags)) { 1528 if (sync_page_io(rdev->bdev, 1529 sect + rdev->data_offset, 1530 s<<9, conf->tmppage, READ) 1531 == 0) 1532 /* Well, this device is dead */ 1533 md_error(mddev, rdev); 1534 else { 1535 atomic_add(s, &rdev->corrected_errors); 1536 printk(KERN_INFO 1537 "raid1:%s: read error corrected " 1538 "(%d sectors at %llu on %s)\n", 1539 mdname(mddev), s, 1540 (unsigned long long)(sect + 1541 rdev->data_offset), 1542 bdevname(rdev->bdev, b)); 1543 } 1544 } 1545 } 1546 sectors -= s; 1547 sect += s; 1548 } 1549 } 1550 1551 static void raid1d(mddev_t *mddev) 1552 { 1553 r1bio_t *r1_bio; 1554 struct bio *bio; 1555 unsigned long flags; 1556 conf_t *conf = mddev_to_conf(mddev); 1557 struct list_head *head = &conf->retry_list; 1558 int unplug=0; 1559 mdk_rdev_t *rdev; 1560 1561 md_check_recovery(mddev); 1562 1563 for (;;) { 1564 char b[BDEVNAME_SIZE]; 1565 1566 unplug += flush_pending_writes(conf); 1567 1568 spin_lock_irqsave(&conf->device_lock, flags); 1569 if (list_empty(head)) { 1570 spin_unlock_irqrestore(&conf->device_lock, flags); 1571 break; 1572 } 1573 r1_bio = list_entry(head->prev, r1bio_t, retry_list); 1574 list_del(head->prev); 1575 conf->nr_queued--; 1576 spin_unlock_irqrestore(&conf->device_lock, flags); 1577 1578 mddev = r1_bio->mddev; 1579 conf = mddev_to_conf(mddev); 1580 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1581 sync_request_write(mddev, r1_bio); 1582 unplug = 1; 1583 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 1584 /* some requests in the r1bio were BIO_RW_BARRIER 1585 * requests which failed with -EOPNOTSUPP. Hohumm.. 1586 * Better resubmit without the barrier. 1587 * We know which devices to resubmit for, because 1588 * all others have had their bios[] entry cleared. 1589 * We already have a nr_pending reference on these rdevs. 1590 */ 1591 int i; 1592 const int do_sync = bio_sync(r1_bio->master_bio); 1593 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1594 clear_bit(R1BIO_Barrier, &r1_bio->state); 1595 for (i=0; i < conf->raid_disks; i++) 1596 if (r1_bio->bios[i]) 1597 atomic_inc(&r1_bio->remaining); 1598 for (i=0; i < conf->raid_disks; i++) 1599 if (r1_bio->bios[i]) { 1600 struct bio_vec *bvec; 1601 int j; 1602 1603 bio = bio_clone(r1_bio->master_bio, GFP_NOIO); 1604 /* copy pages from the failed bio, as 1605 * this might be a write-behind device */ 1606 __bio_for_each_segment(bvec, bio, j, 0) 1607 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page; 1608 bio_put(r1_bio->bios[i]); 1609 bio->bi_sector = r1_bio->sector + 1610 conf->mirrors[i].rdev->data_offset; 1611 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1612 bio->bi_end_io = raid1_end_write_request; 1613 bio->bi_rw = WRITE | do_sync; 1614 bio->bi_private = r1_bio; 1615 r1_bio->bios[i] = bio; 1616 generic_make_request(bio); 1617 } 1618 } else { 1619 int disk; 1620 1621 /* we got a read error. Maybe the drive is bad. Maybe just 1622 * the block and we can fix it. 1623 * We freeze all other IO, and try reading the block from 1624 * other devices. When we find one, we re-write 1625 * and check it that fixes the read error. 1626 * This is all done synchronously while the array is 1627 * frozen 1628 */ 1629 if (mddev->ro == 0) { 1630 freeze_array(conf); 1631 fix_read_error(conf, r1_bio->read_disk, 1632 r1_bio->sector, 1633 r1_bio->sectors); 1634 unfreeze_array(conf); 1635 } 1636 1637 bio = r1_bio->bios[r1_bio->read_disk]; 1638 if ((disk=read_balance(conf, r1_bio)) == -1) { 1639 printk(KERN_ALERT "raid1: %s: unrecoverable I/O" 1640 " read error for block %llu\n", 1641 bdevname(bio->bi_bdev,b), 1642 (unsigned long long)r1_bio->sector); 1643 raid_end_bio_io(r1_bio); 1644 } else { 1645 const int do_sync = bio_sync(r1_bio->master_bio); 1646 r1_bio->bios[r1_bio->read_disk] = 1647 mddev->ro ? IO_BLOCKED : NULL; 1648 r1_bio->read_disk = disk; 1649 bio_put(bio); 1650 bio = bio_clone(r1_bio->master_bio, GFP_NOIO); 1651 r1_bio->bios[r1_bio->read_disk] = bio; 1652 rdev = conf->mirrors[disk].rdev; 1653 if (printk_ratelimit()) 1654 printk(KERN_ERR "raid1: %s: redirecting sector %llu to" 1655 " another mirror\n", 1656 bdevname(rdev->bdev,b), 1657 (unsigned long long)r1_bio->sector); 1658 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1659 bio->bi_bdev = rdev->bdev; 1660 bio->bi_end_io = raid1_end_read_request; 1661 bio->bi_rw = READ | do_sync; 1662 bio->bi_private = r1_bio; 1663 unplug = 1; 1664 generic_make_request(bio); 1665 } 1666 } 1667 } 1668 if (unplug) 1669 unplug_slaves(mddev); 1670 } 1671 1672 1673 static int init_resync(conf_t *conf) 1674 { 1675 int buffs; 1676 1677 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 1678 BUG_ON(conf->r1buf_pool); 1679 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, 1680 conf->poolinfo); 1681 if (!conf->r1buf_pool) 1682 return -ENOMEM; 1683 conf->next_resync = 0; 1684 return 0; 1685 } 1686 1687 /* 1688 * perform a "sync" on one "block" 1689 * 1690 * We need to make sure that no normal I/O request - particularly write 1691 * requests - conflict with active sync requests. 1692 * 1693 * This is achieved by tracking pending requests and a 'barrier' concept 1694 * that can be installed to exclude normal IO requests. 1695 */ 1696 1697 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1698 { 1699 conf_t *conf = mddev_to_conf(mddev); 1700 r1bio_t *r1_bio; 1701 struct bio *bio; 1702 sector_t max_sector, nr_sectors; 1703 int disk = -1; 1704 int i; 1705 int wonly = -1; 1706 int write_targets = 0, read_targets = 0; 1707 int sync_blocks; 1708 int still_degraded = 0; 1709 1710 if (!conf->r1buf_pool) 1711 { 1712 /* 1713 printk("sync start - bitmap %p\n", mddev->bitmap); 1714 */ 1715 if (init_resync(conf)) 1716 return 0; 1717 } 1718 1719 max_sector = mddev->size << 1; 1720 if (sector_nr >= max_sector) { 1721 /* If we aborted, we need to abort the 1722 * sync on the 'current' bitmap chunk (there will 1723 * only be one in raid1 resync. 1724 * We can find the current addess in mddev->curr_resync 1725 */ 1726 if (mddev->curr_resync < max_sector) /* aborted */ 1727 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 1728 &sync_blocks, 1); 1729 else /* completed sync */ 1730 conf->fullsync = 0; 1731 1732 bitmap_close_sync(mddev->bitmap); 1733 close_sync(conf); 1734 return 0; 1735 } 1736 1737 if (mddev->bitmap == NULL && 1738 mddev->recovery_cp == MaxSector && 1739 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 1740 conf->fullsync == 0) { 1741 *skipped = 1; 1742 return max_sector - sector_nr; 1743 } 1744 /* before building a request, check if we can skip these blocks.. 1745 * This call the bitmap_start_sync doesn't actually record anything 1746 */ 1747 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1748 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1749 /* We can skip this block, and probably several more */ 1750 *skipped = 1; 1751 return sync_blocks; 1752 } 1753 /* 1754 * If there is non-resync activity waiting for a turn, 1755 * and resync is going fast enough, 1756 * then let it though before starting on this new sync request. 1757 */ 1758 if (!go_faster && conf->nr_waiting) 1759 msleep_interruptible(1000); 1760 1761 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 1762 raise_barrier(conf); 1763 1764 conf->next_resync = sector_nr; 1765 1766 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 1767 rcu_read_lock(); 1768 /* 1769 * If we get a correctably read error during resync or recovery, 1770 * we might want to read from a different device. So we 1771 * flag all drives that could conceivably be read from for READ, 1772 * and any others (which will be non-In_sync devices) for WRITE. 1773 * If a read fails, we try reading from something else for which READ 1774 * is OK. 1775 */ 1776 1777 r1_bio->mddev = mddev; 1778 r1_bio->sector = sector_nr; 1779 r1_bio->state = 0; 1780 set_bit(R1BIO_IsSync, &r1_bio->state); 1781 1782 for (i=0; i < conf->raid_disks; i++) { 1783 mdk_rdev_t *rdev; 1784 bio = r1_bio->bios[i]; 1785 1786 /* take from bio_init */ 1787 bio->bi_next = NULL; 1788 bio->bi_flags |= 1 << BIO_UPTODATE; 1789 bio->bi_rw = READ; 1790 bio->bi_vcnt = 0; 1791 bio->bi_idx = 0; 1792 bio->bi_phys_segments = 0; 1793 bio->bi_size = 0; 1794 bio->bi_end_io = NULL; 1795 bio->bi_private = NULL; 1796 1797 rdev = rcu_dereference(conf->mirrors[i].rdev); 1798 if (rdev == NULL || 1799 test_bit(Faulty, &rdev->flags)) { 1800 still_degraded = 1; 1801 continue; 1802 } else if (!test_bit(In_sync, &rdev->flags)) { 1803 bio->bi_rw = WRITE; 1804 bio->bi_end_io = end_sync_write; 1805 write_targets ++; 1806 } else { 1807 /* may need to read from here */ 1808 bio->bi_rw = READ; 1809 bio->bi_end_io = end_sync_read; 1810 if (test_bit(WriteMostly, &rdev->flags)) { 1811 if (wonly < 0) 1812 wonly = i; 1813 } else { 1814 if (disk < 0) 1815 disk = i; 1816 } 1817 read_targets++; 1818 } 1819 atomic_inc(&rdev->nr_pending); 1820 bio->bi_sector = sector_nr + rdev->data_offset; 1821 bio->bi_bdev = rdev->bdev; 1822 bio->bi_private = r1_bio; 1823 } 1824 rcu_read_unlock(); 1825 if (disk < 0) 1826 disk = wonly; 1827 r1_bio->read_disk = disk; 1828 1829 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 1830 /* extra read targets are also write targets */ 1831 write_targets += read_targets-1; 1832 1833 if (write_targets == 0 || read_targets == 0) { 1834 /* There is nowhere to write, so all non-sync 1835 * drives must be failed - so we are finished 1836 */ 1837 sector_t rv = max_sector - sector_nr; 1838 *skipped = 1; 1839 put_buf(r1_bio); 1840 return rv; 1841 } 1842 1843 if (max_sector > mddev->resync_max) 1844 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 1845 nr_sectors = 0; 1846 sync_blocks = 0; 1847 do { 1848 struct page *page; 1849 int len = PAGE_SIZE; 1850 if (sector_nr + (len>>9) > max_sector) 1851 len = (max_sector - sector_nr) << 9; 1852 if (len == 0) 1853 break; 1854 if (sync_blocks == 0) { 1855 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 1856 &sync_blocks, still_degraded) && 1857 !conf->fullsync && 1858 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1859 break; 1860 BUG_ON(sync_blocks < (PAGE_SIZE>>9)); 1861 if (len > (sync_blocks<<9)) 1862 len = sync_blocks<<9; 1863 } 1864 1865 for (i=0 ; i < conf->raid_disks; i++) { 1866 bio = r1_bio->bios[i]; 1867 if (bio->bi_end_io) { 1868 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 1869 if (bio_add_page(bio, page, len, 0) == 0) { 1870 /* stop here */ 1871 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 1872 while (i > 0) { 1873 i--; 1874 bio = r1_bio->bios[i]; 1875 if (bio->bi_end_io==NULL) 1876 continue; 1877 /* remove last page from this bio */ 1878 bio->bi_vcnt--; 1879 bio->bi_size -= len; 1880 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 1881 } 1882 goto bio_full; 1883 } 1884 } 1885 } 1886 nr_sectors += len>>9; 1887 sector_nr += len>>9; 1888 sync_blocks -= (len>>9); 1889 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); 1890 bio_full: 1891 r1_bio->sectors = nr_sectors; 1892 1893 /* For a user-requested sync, we read all readable devices and do a 1894 * compare 1895 */ 1896 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1897 atomic_set(&r1_bio->remaining, read_targets); 1898 for (i=0; i<conf->raid_disks; i++) { 1899 bio = r1_bio->bios[i]; 1900 if (bio->bi_end_io == end_sync_read) { 1901 md_sync_acct(bio->bi_bdev, nr_sectors); 1902 generic_make_request(bio); 1903 } 1904 } 1905 } else { 1906 atomic_set(&r1_bio->remaining, 1); 1907 bio = r1_bio->bios[r1_bio->read_disk]; 1908 md_sync_acct(bio->bi_bdev, nr_sectors); 1909 generic_make_request(bio); 1910 1911 } 1912 return nr_sectors; 1913 } 1914 1915 static int run(mddev_t *mddev) 1916 { 1917 conf_t *conf; 1918 int i, j, disk_idx; 1919 mirror_info_t *disk; 1920 mdk_rdev_t *rdev; 1921 struct list_head *tmp; 1922 1923 if (mddev->level != 1) { 1924 printk("raid1: %s: raid level not set to mirroring (%d)\n", 1925 mdname(mddev), mddev->level); 1926 goto out; 1927 } 1928 if (mddev->reshape_position != MaxSector) { 1929 printk("raid1: %s: reshape_position set but not supported\n", 1930 mdname(mddev)); 1931 goto out; 1932 } 1933 /* 1934 * copy the already verified devices into our private RAID1 1935 * bookkeeping area. [whatever we allocate in run(), 1936 * should be freed in stop()] 1937 */ 1938 conf = kzalloc(sizeof(conf_t), GFP_KERNEL); 1939 mddev->private = conf; 1940 if (!conf) 1941 goto out_no_mem; 1942 1943 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, 1944 GFP_KERNEL); 1945 if (!conf->mirrors) 1946 goto out_no_mem; 1947 1948 conf->tmppage = alloc_page(GFP_KERNEL); 1949 if (!conf->tmppage) 1950 goto out_no_mem; 1951 1952 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 1953 if (!conf->poolinfo) 1954 goto out_no_mem; 1955 conf->poolinfo->mddev = mddev; 1956 conf->poolinfo->raid_disks = mddev->raid_disks; 1957 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 1958 r1bio_pool_free, 1959 conf->poolinfo); 1960 if (!conf->r1bio_pool) 1961 goto out_no_mem; 1962 1963 spin_lock_init(&conf->device_lock); 1964 mddev->queue->queue_lock = &conf->device_lock; 1965 1966 rdev_for_each(rdev, tmp, mddev) { 1967 disk_idx = rdev->raid_disk; 1968 if (disk_idx >= mddev->raid_disks 1969 || disk_idx < 0) 1970 continue; 1971 disk = conf->mirrors + disk_idx; 1972 1973 disk->rdev = rdev; 1974 1975 blk_queue_stack_limits(mddev->queue, 1976 rdev->bdev->bd_disk->queue); 1977 /* as we don't honour merge_bvec_fn, we must never risk 1978 * violating it, so limit ->max_sector to one PAGE, as 1979 * a one page request is never in violation. 1980 */ 1981 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1982 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1983 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1984 1985 disk->head_position = 0; 1986 } 1987 conf->raid_disks = mddev->raid_disks; 1988 conf->mddev = mddev; 1989 INIT_LIST_HEAD(&conf->retry_list); 1990 1991 spin_lock_init(&conf->resync_lock); 1992 init_waitqueue_head(&conf->wait_barrier); 1993 1994 bio_list_init(&conf->pending_bio_list); 1995 bio_list_init(&conf->flushing_bio_list); 1996 1997 1998 mddev->degraded = 0; 1999 for (i = 0; i < conf->raid_disks; i++) { 2000 2001 disk = conf->mirrors + i; 2002 2003 if (!disk->rdev || 2004 !test_bit(In_sync, &disk->rdev->flags)) { 2005 disk->head_position = 0; 2006 mddev->degraded++; 2007 if (disk->rdev) 2008 conf->fullsync = 1; 2009 } 2010 } 2011 if (mddev->degraded == conf->raid_disks) { 2012 printk(KERN_ERR "raid1: no operational mirrors for %s\n", 2013 mdname(mddev)); 2014 goto out_free_conf; 2015 } 2016 if (conf->raid_disks - mddev->degraded == 1) 2017 mddev->recovery_cp = MaxSector; 2018 2019 /* 2020 * find the first working one and use it as a starting point 2021 * to read balancing. 2022 */ 2023 for (j = 0; j < conf->raid_disks && 2024 (!conf->mirrors[j].rdev || 2025 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++) 2026 /* nothing */; 2027 conf->last_used = j; 2028 2029 2030 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1"); 2031 if (!mddev->thread) { 2032 printk(KERN_ERR 2033 "raid1: couldn't allocate thread for %s\n", 2034 mdname(mddev)); 2035 goto out_free_conf; 2036 } 2037 2038 printk(KERN_INFO 2039 "raid1: raid set %s active with %d out of %d mirrors\n", 2040 mdname(mddev), mddev->raid_disks - mddev->degraded, 2041 mddev->raid_disks); 2042 /* 2043 * Ok, everything is just fine now 2044 */ 2045 mddev->array_sectors = mddev->size * 2; 2046 2047 mddev->queue->unplug_fn = raid1_unplug; 2048 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2049 mddev->queue->backing_dev_info.congested_data = mddev; 2050 2051 return 0; 2052 2053 out_no_mem: 2054 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n", 2055 mdname(mddev)); 2056 2057 out_free_conf: 2058 if (conf) { 2059 if (conf->r1bio_pool) 2060 mempool_destroy(conf->r1bio_pool); 2061 kfree(conf->mirrors); 2062 safe_put_page(conf->tmppage); 2063 kfree(conf->poolinfo); 2064 kfree(conf); 2065 mddev->private = NULL; 2066 } 2067 out: 2068 return -EIO; 2069 } 2070 2071 static int stop(mddev_t *mddev) 2072 { 2073 conf_t *conf = mddev_to_conf(mddev); 2074 struct bitmap *bitmap = mddev->bitmap; 2075 int behind_wait = 0; 2076 2077 /* wait for behind writes to complete */ 2078 while (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 2079 behind_wait++; 2080 printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait); 2081 set_current_state(TASK_UNINTERRUPTIBLE); 2082 schedule_timeout(HZ); /* wait a second */ 2083 /* need to kick something here to make sure I/O goes? */ 2084 } 2085 2086 md_unregister_thread(mddev->thread); 2087 mddev->thread = NULL; 2088 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2089 if (conf->r1bio_pool) 2090 mempool_destroy(conf->r1bio_pool); 2091 kfree(conf->mirrors); 2092 kfree(conf->poolinfo); 2093 kfree(conf); 2094 mddev->private = NULL; 2095 return 0; 2096 } 2097 2098 static int raid1_resize(mddev_t *mddev, sector_t sectors) 2099 { 2100 /* no resync is happening, and there is enough space 2101 * on all devices, so we can resize. 2102 * We need to make sure resync covers any new space. 2103 * If the array is shrinking we should possibly wait until 2104 * any io in the removed space completes, but it hardly seems 2105 * worth it. 2106 */ 2107 mddev->array_sectors = sectors; 2108 set_capacity(mddev->gendisk, mddev->array_sectors); 2109 mddev->changed = 1; 2110 if (mddev->array_sectors / 2 > mddev->size && 2111 mddev->recovery_cp == MaxSector) { 2112 mddev->recovery_cp = mddev->size << 1; 2113 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2114 } 2115 mddev->size = mddev->array_sectors / 2; 2116 mddev->resync_max_sectors = sectors; 2117 return 0; 2118 } 2119 2120 static int raid1_reshape(mddev_t *mddev) 2121 { 2122 /* We need to: 2123 * 1/ resize the r1bio_pool 2124 * 2/ resize conf->mirrors 2125 * 2126 * We allocate a new r1bio_pool if we can. 2127 * Then raise a device barrier and wait until all IO stops. 2128 * Then resize conf->mirrors and swap in the new r1bio pool. 2129 * 2130 * At the same time, we "pack" the devices so that all the missing 2131 * devices have the higher raid_disk numbers. 2132 */ 2133 mempool_t *newpool, *oldpool; 2134 struct pool_info *newpoolinfo; 2135 mirror_info_t *newmirrors; 2136 conf_t *conf = mddev_to_conf(mddev); 2137 int cnt, raid_disks; 2138 unsigned long flags; 2139 int d, d2, err; 2140 2141 /* Cannot change chunk_size, layout, or level */ 2142 if (mddev->chunk_size != mddev->new_chunk || 2143 mddev->layout != mddev->new_layout || 2144 mddev->level != mddev->new_level) { 2145 mddev->new_chunk = mddev->chunk_size; 2146 mddev->new_layout = mddev->layout; 2147 mddev->new_level = mddev->level; 2148 return -EINVAL; 2149 } 2150 2151 err = md_allow_write(mddev); 2152 if (err) 2153 return err; 2154 2155 raid_disks = mddev->raid_disks + mddev->delta_disks; 2156 2157 if (raid_disks < conf->raid_disks) { 2158 cnt=0; 2159 for (d= 0; d < conf->raid_disks; d++) 2160 if (conf->mirrors[d].rdev) 2161 cnt++; 2162 if (cnt > raid_disks) 2163 return -EBUSY; 2164 } 2165 2166 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 2167 if (!newpoolinfo) 2168 return -ENOMEM; 2169 newpoolinfo->mddev = mddev; 2170 newpoolinfo->raid_disks = raid_disks; 2171 2172 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 2173 r1bio_pool_free, newpoolinfo); 2174 if (!newpool) { 2175 kfree(newpoolinfo); 2176 return -ENOMEM; 2177 } 2178 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); 2179 if (!newmirrors) { 2180 kfree(newpoolinfo); 2181 mempool_destroy(newpool); 2182 return -ENOMEM; 2183 } 2184 2185 raise_barrier(conf); 2186 2187 /* ok, everything is stopped */ 2188 oldpool = conf->r1bio_pool; 2189 conf->r1bio_pool = newpool; 2190 2191 for (d = d2 = 0; d < conf->raid_disks; d++) { 2192 mdk_rdev_t *rdev = conf->mirrors[d].rdev; 2193 if (rdev && rdev->raid_disk != d2) { 2194 char nm[20]; 2195 sprintf(nm, "rd%d", rdev->raid_disk); 2196 sysfs_remove_link(&mddev->kobj, nm); 2197 rdev->raid_disk = d2; 2198 sprintf(nm, "rd%d", rdev->raid_disk); 2199 sysfs_remove_link(&mddev->kobj, nm); 2200 if (sysfs_create_link(&mddev->kobj, 2201 &rdev->kobj, nm)) 2202 printk(KERN_WARNING 2203 "md/raid1: cannot register " 2204 "%s for %s\n", 2205 nm, mdname(mddev)); 2206 } 2207 if (rdev) 2208 newmirrors[d2++].rdev = rdev; 2209 } 2210 kfree(conf->mirrors); 2211 conf->mirrors = newmirrors; 2212 kfree(conf->poolinfo); 2213 conf->poolinfo = newpoolinfo; 2214 2215 spin_lock_irqsave(&conf->device_lock, flags); 2216 mddev->degraded += (raid_disks - conf->raid_disks); 2217 spin_unlock_irqrestore(&conf->device_lock, flags); 2218 conf->raid_disks = mddev->raid_disks = raid_disks; 2219 mddev->delta_disks = 0; 2220 2221 conf->last_used = 0; /* just make sure it is in-range */ 2222 lower_barrier(conf); 2223 2224 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2225 md_wakeup_thread(mddev->thread); 2226 2227 mempool_destroy(oldpool); 2228 return 0; 2229 } 2230 2231 static void raid1_quiesce(mddev_t *mddev, int state) 2232 { 2233 conf_t *conf = mddev_to_conf(mddev); 2234 2235 switch(state) { 2236 case 1: 2237 raise_barrier(conf); 2238 break; 2239 case 0: 2240 lower_barrier(conf); 2241 break; 2242 } 2243 } 2244 2245 2246 static struct mdk_personality raid1_personality = 2247 { 2248 .name = "raid1", 2249 .level = 1, 2250 .owner = THIS_MODULE, 2251 .make_request = make_request, 2252 .run = run, 2253 .stop = stop, 2254 .status = status, 2255 .error_handler = error, 2256 .hot_add_disk = raid1_add_disk, 2257 .hot_remove_disk= raid1_remove_disk, 2258 .spare_active = raid1_spare_active, 2259 .sync_request = sync_request, 2260 .resize = raid1_resize, 2261 .check_reshape = raid1_reshape, 2262 .quiesce = raid1_quiesce, 2263 }; 2264 2265 static int __init raid_init(void) 2266 { 2267 return register_md_personality(&raid1_personality); 2268 } 2269 2270 static void raid_exit(void) 2271 { 2272 unregister_md_personality(&raid1_personality); 2273 } 2274 2275 module_init(raid_init); 2276 module_exit(raid_exit); 2277 MODULE_LICENSE("GPL"); 2278 MODULE_ALIAS("md-personality-3"); /* RAID1 */ 2279 MODULE_ALIAS("md-raid1"); 2280 MODULE_ALIAS("md-level-1"); 2281