1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/seq_file.h> 51 #include "md.h" 52 #include "raid5.h" 53 #include "bitmap.h" 54 55 /* 56 * Stripe cache 57 */ 58 59 #define NR_STRIPES 256 60 #define STRIPE_SIZE PAGE_SIZE 61 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 62 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 63 #define IO_THRESHOLD 1 64 #define BYPASS_THRESHOLD 1 65 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 66 #define HASH_MASK (NR_HASH - 1) 67 68 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 69 70 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 71 * order without overlap. There may be several bio's per stripe+device, and 72 * a bio could span several devices. 73 * When walking this list for a particular stripe+device, we must never proceed 74 * beyond a bio that extends past this device, as the next bio might no longer 75 * be valid. 76 * This macro is used to determine the 'next' bio in the list, given the sector 77 * of the current stripe+device 78 */ 79 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 80 /* 81 * The following can be used to debug the driver 82 */ 83 #define RAID5_PARANOIA 1 84 #if RAID5_PARANOIA && defined(CONFIG_SMP) 85 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 86 #else 87 # define CHECK_DEVLOCK() 88 #endif 89 90 #ifdef DEBUG 91 #define inline 92 #define __inline__ 93 #endif 94 95 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) 96 97 /* 98 * We maintain a biased count of active stripes in the bottom 16 bits of 99 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 100 */ 101 static inline int raid5_bi_phys_segments(struct bio *bio) 102 { 103 return bio->bi_phys_segments & 0xffff; 104 } 105 106 static inline int raid5_bi_hw_segments(struct bio *bio) 107 { 108 return (bio->bi_phys_segments >> 16) & 0xffff; 109 } 110 111 static inline int raid5_dec_bi_phys_segments(struct bio *bio) 112 { 113 --bio->bi_phys_segments; 114 return raid5_bi_phys_segments(bio); 115 } 116 117 static inline int raid5_dec_bi_hw_segments(struct bio *bio) 118 { 119 unsigned short val = raid5_bi_hw_segments(bio); 120 121 --val; 122 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); 123 return val; 124 } 125 126 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 127 { 128 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); 129 } 130 131 /* Find first data disk in a raid6 stripe */ 132 static inline int raid6_d0(struct stripe_head *sh) 133 { 134 if (sh->ddf_layout) 135 /* ddf always start from first device */ 136 return 0; 137 /* md starts just after Q block */ 138 if (sh->qd_idx == sh->disks - 1) 139 return 0; 140 else 141 return sh->qd_idx + 1; 142 } 143 static inline int raid6_next_disk(int disk, int raid_disks) 144 { 145 disk++; 146 return (disk < raid_disks) ? disk : 0; 147 } 148 149 /* When walking through the disks in a raid5, starting at raid6_d0, 150 * We need to map each disk to a 'slot', where the data disks are slot 151 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 152 * is raid_disks-1. This help does that mapping. 153 */ 154 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 155 int *count, int syndrome_disks) 156 { 157 int slot; 158 159 if (idx == sh->pd_idx) 160 return syndrome_disks; 161 if (idx == sh->qd_idx) 162 return syndrome_disks + 1; 163 slot = (*count)++; 164 return slot; 165 } 166 167 static void return_io(struct bio *return_bi) 168 { 169 struct bio *bi = return_bi; 170 while (bi) { 171 172 return_bi = bi->bi_next; 173 bi->bi_next = NULL; 174 bi->bi_size = 0; 175 bio_endio(bi, 0); 176 bi = return_bi; 177 } 178 } 179 180 static void print_raid5_conf (raid5_conf_t *conf); 181 182 static int stripe_operations_active(struct stripe_head *sh) 183 { 184 return sh->check_state || sh->reconstruct_state || 185 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 186 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 187 } 188 189 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 190 { 191 if (atomic_dec_and_test(&sh->count)) { 192 BUG_ON(!list_empty(&sh->lru)); 193 BUG_ON(atomic_read(&conf->active_stripes)==0); 194 if (test_bit(STRIPE_HANDLE, &sh->state)) { 195 if (test_bit(STRIPE_DELAYED, &sh->state)) { 196 list_add_tail(&sh->lru, &conf->delayed_list); 197 blk_plug_device(conf->mddev->queue); 198 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 199 sh->bm_seq - conf->seq_write > 0) { 200 list_add_tail(&sh->lru, &conf->bitmap_list); 201 blk_plug_device(conf->mddev->queue); 202 } else { 203 clear_bit(STRIPE_BIT_DELAY, &sh->state); 204 list_add_tail(&sh->lru, &conf->handle_list); 205 } 206 md_wakeup_thread(conf->mddev->thread); 207 } else { 208 BUG_ON(stripe_operations_active(sh)); 209 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 210 atomic_dec(&conf->preread_active_stripes); 211 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 212 md_wakeup_thread(conf->mddev->thread); 213 } 214 atomic_dec(&conf->active_stripes); 215 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 216 list_add_tail(&sh->lru, &conf->inactive_list); 217 wake_up(&conf->wait_for_stripe); 218 if (conf->retry_read_aligned) 219 md_wakeup_thread(conf->mddev->thread); 220 } 221 } 222 } 223 } 224 225 static void release_stripe(struct stripe_head *sh) 226 { 227 raid5_conf_t *conf = sh->raid_conf; 228 unsigned long flags; 229 230 spin_lock_irqsave(&conf->device_lock, flags); 231 __release_stripe(conf, sh); 232 spin_unlock_irqrestore(&conf->device_lock, flags); 233 } 234 235 static inline void remove_hash(struct stripe_head *sh) 236 { 237 pr_debug("remove_hash(), stripe %llu\n", 238 (unsigned long long)sh->sector); 239 240 hlist_del_init(&sh->hash); 241 } 242 243 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 244 { 245 struct hlist_head *hp = stripe_hash(conf, sh->sector); 246 247 pr_debug("insert_hash(), stripe %llu\n", 248 (unsigned long long)sh->sector); 249 250 CHECK_DEVLOCK(); 251 hlist_add_head(&sh->hash, hp); 252 } 253 254 255 /* find an idle stripe, make sure it is unhashed, and return it. */ 256 static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 257 { 258 struct stripe_head *sh = NULL; 259 struct list_head *first; 260 261 CHECK_DEVLOCK(); 262 if (list_empty(&conf->inactive_list)) 263 goto out; 264 first = conf->inactive_list.next; 265 sh = list_entry(first, struct stripe_head, lru); 266 list_del_init(first); 267 remove_hash(sh); 268 atomic_inc(&conf->active_stripes); 269 out: 270 return sh; 271 } 272 273 static void shrink_buffers(struct stripe_head *sh, int num) 274 { 275 struct page *p; 276 int i; 277 278 for (i=0; i<num ; i++) { 279 p = sh->dev[i].page; 280 if (!p) 281 continue; 282 sh->dev[i].page = NULL; 283 put_page(p); 284 } 285 } 286 287 static int grow_buffers(struct stripe_head *sh, int num) 288 { 289 int i; 290 291 for (i=0; i<num; i++) { 292 struct page *page; 293 294 if (!(page = alloc_page(GFP_KERNEL))) { 295 return 1; 296 } 297 sh->dev[i].page = page; 298 } 299 return 0; 300 } 301 302 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 303 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 304 struct stripe_head *sh); 305 306 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 307 { 308 raid5_conf_t *conf = sh->raid_conf; 309 int i; 310 311 BUG_ON(atomic_read(&sh->count) != 0); 312 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 313 BUG_ON(stripe_operations_active(sh)); 314 315 CHECK_DEVLOCK(); 316 pr_debug("init_stripe called, stripe %llu\n", 317 (unsigned long long)sh->sector); 318 319 remove_hash(sh); 320 321 sh->generation = conf->generation - previous; 322 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 323 sh->sector = sector; 324 stripe_set_idx(sector, conf, previous, sh); 325 sh->state = 0; 326 327 328 for (i = sh->disks; i--; ) { 329 struct r5dev *dev = &sh->dev[i]; 330 331 if (dev->toread || dev->read || dev->towrite || dev->written || 332 test_bit(R5_LOCKED, &dev->flags)) { 333 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 334 (unsigned long long)sh->sector, i, dev->toread, 335 dev->read, dev->towrite, dev->written, 336 test_bit(R5_LOCKED, &dev->flags)); 337 BUG(); 338 } 339 dev->flags = 0; 340 raid5_build_block(sh, i, previous); 341 } 342 insert_hash(conf, sh); 343 } 344 345 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, 346 short generation) 347 { 348 struct stripe_head *sh; 349 struct hlist_node *hn; 350 351 CHECK_DEVLOCK(); 352 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 353 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 354 if (sh->sector == sector && sh->generation == generation) 355 return sh; 356 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 357 return NULL; 358 } 359 360 static void unplug_slaves(mddev_t *mddev); 361 static void raid5_unplug_device(struct request_queue *q); 362 363 static struct stripe_head * 364 get_active_stripe(raid5_conf_t *conf, sector_t sector, 365 int previous, int noblock) 366 { 367 struct stripe_head *sh; 368 369 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 370 371 spin_lock_irq(&conf->device_lock); 372 373 do { 374 wait_event_lock_irq(conf->wait_for_stripe, 375 conf->quiesce == 0, 376 conf->device_lock, /* nothing */); 377 sh = __find_stripe(conf, sector, conf->generation - previous); 378 if (!sh) { 379 if (!conf->inactive_blocked) 380 sh = get_free_stripe(conf); 381 if (noblock && sh == NULL) 382 break; 383 if (!sh) { 384 conf->inactive_blocked = 1; 385 wait_event_lock_irq(conf->wait_for_stripe, 386 !list_empty(&conf->inactive_list) && 387 (atomic_read(&conf->active_stripes) 388 < (conf->max_nr_stripes *3/4) 389 || !conf->inactive_blocked), 390 conf->device_lock, 391 raid5_unplug_device(conf->mddev->queue) 392 ); 393 conf->inactive_blocked = 0; 394 } else 395 init_stripe(sh, sector, previous); 396 } else { 397 if (atomic_read(&sh->count)) { 398 BUG_ON(!list_empty(&sh->lru) 399 && !test_bit(STRIPE_EXPANDING, &sh->state)); 400 } else { 401 if (!test_bit(STRIPE_HANDLE, &sh->state)) 402 atomic_inc(&conf->active_stripes); 403 if (list_empty(&sh->lru) && 404 !test_bit(STRIPE_EXPANDING, &sh->state)) 405 BUG(); 406 list_del_init(&sh->lru); 407 } 408 } 409 } while (sh == NULL); 410 411 if (sh) 412 atomic_inc(&sh->count); 413 414 spin_unlock_irq(&conf->device_lock); 415 return sh; 416 } 417 418 static void 419 raid5_end_read_request(struct bio *bi, int error); 420 static void 421 raid5_end_write_request(struct bio *bi, int error); 422 423 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 424 { 425 raid5_conf_t *conf = sh->raid_conf; 426 int i, disks = sh->disks; 427 428 might_sleep(); 429 430 for (i = disks; i--; ) { 431 int rw; 432 struct bio *bi; 433 mdk_rdev_t *rdev; 434 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 435 rw = WRITE; 436 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 437 rw = READ; 438 else 439 continue; 440 441 bi = &sh->dev[i].req; 442 443 bi->bi_rw = rw; 444 if (rw == WRITE) 445 bi->bi_end_io = raid5_end_write_request; 446 else 447 bi->bi_end_io = raid5_end_read_request; 448 449 rcu_read_lock(); 450 rdev = rcu_dereference(conf->disks[i].rdev); 451 if (rdev && test_bit(Faulty, &rdev->flags)) 452 rdev = NULL; 453 if (rdev) 454 atomic_inc(&rdev->nr_pending); 455 rcu_read_unlock(); 456 457 if (rdev) { 458 if (s->syncing || s->expanding || s->expanded) 459 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 460 461 set_bit(STRIPE_IO_STARTED, &sh->state); 462 463 bi->bi_bdev = rdev->bdev; 464 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 465 __func__, (unsigned long long)sh->sector, 466 bi->bi_rw, i); 467 atomic_inc(&sh->count); 468 bi->bi_sector = sh->sector + rdev->data_offset; 469 bi->bi_flags = 1 << BIO_UPTODATE; 470 bi->bi_vcnt = 1; 471 bi->bi_max_vecs = 1; 472 bi->bi_idx = 0; 473 bi->bi_io_vec = &sh->dev[i].vec; 474 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 475 bi->bi_io_vec[0].bv_offset = 0; 476 bi->bi_size = STRIPE_SIZE; 477 bi->bi_next = NULL; 478 if (rw == WRITE && 479 test_bit(R5_ReWrite, &sh->dev[i].flags)) 480 atomic_add(STRIPE_SECTORS, 481 &rdev->corrected_errors); 482 generic_make_request(bi); 483 } else { 484 if (rw == WRITE) 485 set_bit(STRIPE_DEGRADED, &sh->state); 486 pr_debug("skip op %ld on disc %d for sector %llu\n", 487 bi->bi_rw, i, (unsigned long long)sh->sector); 488 clear_bit(R5_LOCKED, &sh->dev[i].flags); 489 set_bit(STRIPE_HANDLE, &sh->state); 490 } 491 } 492 } 493 494 static struct dma_async_tx_descriptor * 495 async_copy_data(int frombio, struct bio *bio, struct page *page, 496 sector_t sector, struct dma_async_tx_descriptor *tx) 497 { 498 struct bio_vec *bvl; 499 struct page *bio_page; 500 int i; 501 int page_offset; 502 503 if (bio->bi_sector >= sector) 504 page_offset = (signed)(bio->bi_sector - sector) * 512; 505 else 506 page_offset = (signed)(sector - bio->bi_sector) * -512; 507 bio_for_each_segment(bvl, bio, i) { 508 int len = bio_iovec_idx(bio, i)->bv_len; 509 int clen; 510 int b_offset = 0; 511 512 if (page_offset < 0) { 513 b_offset = -page_offset; 514 page_offset += b_offset; 515 len -= b_offset; 516 } 517 518 if (len > 0 && page_offset + len > STRIPE_SIZE) 519 clen = STRIPE_SIZE - page_offset; 520 else 521 clen = len; 522 523 if (clen > 0) { 524 b_offset += bio_iovec_idx(bio, i)->bv_offset; 525 bio_page = bio_iovec_idx(bio, i)->bv_page; 526 if (frombio) 527 tx = async_memcpy(page, bio_page, page_offset, 528 b_offset, clen, 529 ASYNC_TX_DEP_ACK, 530 tx, NULL, NULL); 531 else 532 tx = async_memcpy(bio_page, page, b_offset, 533 page_offset, clen, 534 ASYNC_TX_DEP_ACK, 535 tx, NULL, NULL); 536 } 537 if (clen < len) /* hit end of page */ 538 break; 539 page_offset += len; 540 } 541 542 return tx; 543 } 544 545 static void ops_complete_biofill(void *stripe_head_ref) 546 { 547 struct stripe_head *sh = stripe_head_ref; 548 struct bio *return_bi = NULL; 549 raid5_conf_t *conf = sh->raid_conf; 550 int i; 551 552 pr_debug("%s: stripe %llu\n", __func__, 553 (unsigned long long)sh->sector); 554 555 /* clear completed biofills */ 556 spin_lock_irq(&conf->device_lock); 557 for (i = sh->disks; i--; ) { 558 struct r5dev *dev = &sh->dev[i]; 559 560 /* acknowledge completion of a biofill operation */ 561 /* and check if we need to reply to a read request, 562 * new R5_Wantfill requests are held off until 563 * !STRIPE_BIOFILL_RUN 564 */ 565 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 566 struct bio *rbi, *rbi2; 567 568 BUG_ON(!dev->read); 569 rbi = dev->read; 570 dev->read = NULL; 571 while (rbi && rbi->bi_sector < 572 dev->sector + STRIPE_SECTORS) { 573 rbi2 = r5_next_bio(rbi, dev->sector); 574 if (!raid5_dec_bi_phys_segments(rbi)) { 575 rbi->bi_next = return_bi; 576 return_bi = rbi; 577 } 578 rbi = rbi2; 579 } 580 } 581 } 582 spin_unlock_irq(&conf->device_lock); 583 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 584 585 return_io(return_bi); 586 587 set_bit(STRIPE_HANDLE, &sh->state); 588 release_stripe(sh); 589 } 590 591 static void ops_run_biofill(struct stripe_head *sh) 592 { 593 struct dma_async_tx_descriptor *tx = NULL; 594 raid5_conf_t *conf = sh->raid_conf; 595 int i; 596 597 pr_debug("%s: stripe %llu\n", __func__, 598 (unsigned long long)sh->sector); 599 600 for (i = sh->disks; i--; ) { 601 struct r5dev *dev = &sh->dev[i]; 602 if (test_bit(R5_Wantfill, &dev->flags)) { 603 struct bio *rbi; 604 spin_lock_irq(&conf->device_lock); 605 dev->read = rbi = dev->toread; 606 dev->toread = NULL; 607 spin_unlock_irq(&conf->device_lock); 608 while (rbi && rbi->bi_sector < 609 dev->sector + STRIPE_SECTORS) { 610 tx = async_copy_data(0, rbi, dev->page, 611 dev->sector, tx); 612 rbi = r5_next_bio(rbi, dev->sector); 613 } 614 } 615 } 616 617 atomic_inc(&sh->count); 618 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 619 ops_complete_biofill, sh); 620 } 621 622 static void ops_complete_compute5(void *stripe_head_ref) 623 { 624 struct stripe_head *sh = stripe_head_ref; 625 int target = sh->ops.target; 626 struct r5dev *tgt = &sh->dev[target]; 627 628 pr_debug("%s: stripe %llu\n", __func__, 629 (unsigned long long)sh->sector); 630 631 set_bit(R5_UPTODATE, &tgt->flags); 632 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 633 clear_bit(R5_Wantcompute, &tgt->flags); 634 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 635 if (sh->check_state == check_state_compute_run) 636 sh->check_state = check_state_compute_result; 637 set_bit(STRIPE_HANDLE, &sh->state); 638 release_stripe(sh); 639 } 640 641 static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) 642 { 643 /* kernel stack size limits the total number of disks */ 644 int disks = sh->disks; 645 struct page *xor_srcs[disks]; 646 int target = sh->ops.target; 647 struct r5dev *tgt = &sh->dev[target]; 648 struct page *xor_dest = tgt->page; 649 int count = 0; 650 struct dma_async_tx_descriptor *tx; 651 int i; 652 653 pr_debug("%s: stripe %llu block: %d\n", 654 __func__, (unsigned long long)sh->sector, target); 655 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 656 657 for (i = disks; i--; ) 658 if (i != target) 659 xor_srcs[count++] = sh->dev[i].page; 660 661 atomic_inc(&sh->count); 662 663 if (unlikely(count == 1)) 664 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 665 0, NULL, ops_complete_compute5, sh); 666 else 667 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 668 ASYNC_TX_XOR_ZERO_DST, NULL, 669 ops_complete_compute5, sh); 670 671 return tx; 672 } 673 674 static void ops_complete_prexor(void *stripe_head_ref) 675 { 676 struct stripe_head *sh = stripe_head_ref; 677 678 pr_debug("%s: stripe %llu\n", __func__, 679 (unsigned long long)sh->sector); 680 } 681 682 static struct dma_async_tx_descriptor * 683 ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 684 { 685 /* kernel stack size limits the total number of disks */ 686 int disks = sh->disks; 687 struct page *xor_srcs[disks]; 688 int count = 0, pd_idx = sh->pd_idx, i; 689 690 /* existing parity data subtracted */ 691 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 692 693 pr_debug("%s: stripe %llu\n", __func__, 694 (unsigned long long)sh->sector); 695 696 for (i = disks; i--; ) { 697 struct r5dev *dev = &sh->dev[i]; 698 /* Only process blocks that are known to be uptodate */ 699 if (test_bit(R5_Wantdrain, &dev->flags)) 700 xor_srcs[count++] = dev->page; 701 } 702 703 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 704 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, 705 ops_complete_prexor, sh); 706 707 return tx; 708 } 709 710 static struct dma_async_tx_descriptor * 711 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 712 { 713 int disks = sh->disks; 714 int i; 715 716 pr_debug("%s: stripe %llu\n", __func__, 717 (unsigned long long)sh->sector); 718 719 for (i = disks; i--; ) { 720 struct r5dev *dev = &sh->dev[i]; 721 struct bio *chosen; 722 723 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 724 struct bio *wbi; 725 726 spin_lock(&sh->lock); 727 chosen = dev->towrite; 728 dev->towrite = NULL; 729 BUG_ON(dev->written); 730 wbi = dev->written = chosen; 731 spin_unlock(&sh->lock); 732 733 while (wbi && wbi->bi_sector < 734 dev->sector + STRIPE_SECTORS) { 735 tx = async_copy_data(1, wbi, dev->page, 736 dev->sector, tx); 737 wbi = r5_next_bio(wbi, dev->sector); 738 } 739 } 740 } 741 742 return tx; 743 } 744 745 static void ops_complete_postxor(void *stripe_head_ref) 746 { 747 struct stripe_head *sh = stripe_head_ref; 748 int disks = sh->disks, i, pd_idx = sh->pd_idx; 749 750 pr_debug("%s: stripe %llu\n", __func__, 751 (unsigned long long)sh->sector); 752 753 for (i = disks; i--; ) { 754 struct r5dev *dev = &sh->dev[i]; 755 if (dev->written || i == pd_idx) 756 set_bit(R5_UPTODATE, &dev->flags); 757 } 758 759 if (sh->reconstruct_state == reconstruct_state_drain_run) 760 sh->reconstruct_state = reconstruct_state_drain_result; 761 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 762 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 763 else { 764 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 765 sh->reconstruct_state = reconstruct_state_result; 766 } 767 768 set_bit(STRIPE_HANDLE, &sh->state); 769 release_stripe(sh); 770 } 771 772 static void 773 ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 774 { 775 /* kernel stack size limits the total number of disks */ 776 int disks = sh->disks; 777 struct page *xor_srcs[disks]; 778 779 int count = 0, pd_idx = sh->pd_idx, i; 780 struct page *xor_dest; 781 int prexor = 0; 782 unsigned long flags; 783 784 pr_debug("%s: stripe %llu\n", __func__, 785 (unsigned long long)sh->sector); 786 787 /* check if prexor is active which means only process blocks 788 * that are part of a read-modify-write (written) 789 */ 790 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 791 prexor = 1; 792 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 793 for (i = disks; i--; ) { 794 struct r5dev *dev = &sh->dev[i]; 795 if (dev->written) 796 xor_srcs[count++] = dev->page; 797 } 798 } else { 799 xor_dest = sh->dev[pd_idx].page; 800 for (i = disks; i--; ) { 801 struct r5dev *dev = &sh->dev[i]; 802 if (i != pd_idx) 803 xor_srcs[count++] = dev->page; 804 } 805 } 806 807 /* 1/ if we prexor'd then the dest is reused as a source 808 * 2/ if we did not prexor then we are redoing the parity 809 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 810 * for the synchronous xor case 811 */ 812 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | 813 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 814 815 atomic_inc(&sh->count); 816 817 if (unlikely(count == 1)) { 818 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 819 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 820 flags, tx, ops_complete_postxor, sh); 821 } else 822 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 823 flags, tx, ops_complete_postxor, sh); 824 } 825 826 static void ops_complete_check(void *stripe_head_ref) 827 { 828 struct stripe_head *sh = stripe_head_ref; 829 830 pr_debug("%s: stripe %llu\n", __func__, 831 (unsigned long long)sh->sector); 832 833 sh->check_state = check_state_check_result; 834 set_bit(STRIPE_HANDLE, &sh->state); 835 release_stripe(sh); 836 } 837 838 static void ops_run_check(struct stripe_head *sh) 839 { 840 /* kernel stack size limits the total number of disks */ 841 int disks = sh->disks; 842 struct page *xor_srcs[disks]; 843 struct dma_async_tx_descriptor *tx; 844 845 int count = 0, pd_idx = sh->pd_idx, i; 846 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 847 848 pr_debug("%s: stripe %llu\n", __func__, 849 (unsigned long long)sh->sector); 850 851 for (i = disks; i--; ) { 852 struct r5dev *dev = &sh->dev[i]; 853 if (i != pd_idx) 854 xor_srcs[count++] = dev->page; 855 } 856 857 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 858 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 859 860 atomic_inc(&sh->count); 861 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 862 ops_complete_check, sh); 863 } 864 865 static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) 866 { 867 int overlap_clear = 0, i, disks = sh->disks; 868 struct dma_async_tx_descriptor *tx = NULL; 869 870 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 871 ops_run_biofill(sh); 872 overlap_clear++; 873 } 874 875 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 876 tx = ops_run_compute5(sh); 877 /* terminate the chain if postxor is not set to be run */ 878 if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) 879 async_tx_ack(tx); 880 } 881 882 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 883 tx = ops_run_prexor(sh, tx); 884 885 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 886 tx = ops_run_biodrain(sh, tx); 887 overlap_clear++; 888 } 889 890 if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) 891 ops_run_postxor(sh, tx); 892 893 if (test_bit(STRIPE_OP_CHECK, &ops_request)) 894 ops_run_check(sh); 895 896 if (overlap_clear) 897 for (i = disks; i--; ) { 898 struct r5dev *dev = &sh->dev[i]; 899 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 900 wake_up(&sh->raid_conf->wait_for_overlap); 901 } 902 } 903 904 static int grow_one_stripe(raid5_conf_t *conf) 905 { 906 struct stripe_head *sh; 907 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 908 if (!sh) 909 return 0; 910 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 911 sh->raid_conf = conf; 912 spin_lock_init(&sh->lock); 913 914 if (grow_buffers(sh, conf->raid_disks)) { 915 shrink_buffers(sh, conf->raid_disks); 916 kmem_cache_free(conf->slab_cache, sh); 917 return 0; 918 } 919 sh->disks = conf->raid_disks; 920 /* we just created an active stripe so... */ 921 atomic_set(&sh->count, 1); 922 atomic_inc(&conf->active_stripes); 923 INIT_LIST_HEAD(&sh->lru); 924 release_stripe(sh); 925 return 1; 926 } 927 928 static int grow_stripes(raid5_conf_t *conf, int num) 929 { 930 struct kmem_cache *sc; 931 int devs = conf->raid_disks; 932 933 sprintf(conf->cache_name[0], 934 "raid%d-%s", conf->level, mdname(conf->mddev)); 935 sprintf(conf->cache_name[1], 936 "raid%d-%s-alt", conf->level, mdname(conf->mddev)); 937 conf->active_name = 0; 938 sc = kmem_cache_create(conf->cache_name[conf->active_name], 939 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 940 0, 0, NULL); 941 if (!sc) 942 return 1; 943 conf->slab_cache = sc; 944 conf->pool_size = devs; 945 while (num--) 946 if (!grow_one_stripe(conf)) 947 return 1; 948 return 0; 949 } 950 951 static int resize_stripes(raid5_conf_t *conf, int newsize) 952 { 953 /* Make all the stripes able to hold 'newsize' devices. 954 * New slots in each stripe get 'page' set to a new page. 955 * 956 * This happens in stages: 957 * 1/ create a new kmem_cache and allocate the required number of 958 * stripe_heads. 959 * 2/ gather all the old stripe_heads and tranfer the pages across 960 * to the new stripe_heads. This will have the side effect of 961 * freezing the array as once all stripe_heads have been collected, 962 * no IO will be possible. Old stripe heads are freed once their 963 * pages have been transferred over, and the old kmem_cache is 964 * freed when all stripes are done. 965 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 966 * we simple return a failre status - no need to clean anything up. 967 * 4/ allocate new pages for the new slots in the new stripe_heads. 968 * If this fails, we don't bother trying the shrink the 969 * stripe_heads down again, we just leave them as they are. 970 * As each stripe_head is processed the new one is released into 971 * active service. 972 * 973 * Once step2 is started, we cannot afford to wait for a write, 974 * so we use GFP_NOIO allocations. 975 */ 976 struct stripe_head *osh, *nsh; 977 LIST_HEAD(newstripes); 978 struct disk_info *ndisks; 979 int err; 980 struct kmem_cache *sc; 981 int i; 982 983 if (newsize <= conf->pool_size) 984 return 0; /* never bother to shrink */ 985 986 err = md_allow_write(conf->mddev); 987 if (err) 988 return err; 989 990 /* Step 1 */ 991 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 992 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 993 0, 0, NULL); 994 if (!sc) 995 return -ENOMEM; 996 997 for (i = conf->max_nr_stripes; i; i--) { 998 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 999 if (!nsh) 1000 break; 1001 1002 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1003 1004 nsh->raid_conf = conf; 1005 spin_lock_init(&nsh->lock); 1006 1007 list_add(&nsh->lru, &newstripes); 1008 } 1009 if (i) { 1010 /* didn't get enough, give up */ 1011 while (!list_empty(&newstripes)) { 1012 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1013 list_del(&nsh->lru); 1014 kmem_cache_free(sc, nsh); 1015 } 1016 kmem_cache_destroy(sc); 1017 return -ENOMEM; 1018 } 1019 /* Step 2 - Must use GFP_NOIO now. 1020 * OK, we have enough stripes, start collecting inactive 1021 * stripes and copying them over 1022 */ 1023 list_for_each_entry(nsh, &newstripes, lru) { 1024 spin_lock_irq(&conf->device_lock); 1025 wait_event_lock_irq(conf->wait_for_stripe, 1026 !list_empty(&conf->inactive_list), 1027 conf->device_lock, 1028 unplug_slaves(conf->mddev) 1029 ); 1030 osh = get_free_stripe(conf); 1031 spin_unlock_irq(&conf->device_lock); 1032 atomic_set(&nsh->count, 1); 1033 for(i=0; i<conf->pool_size; i++) 1034 nsh->dev[i].page = osh->dev[i].page; 1035 for( ; i<newsize; i++) 1036 nsh->dev[i].page = NULL; 1037 kmem_cache_free(conf->slab_cache, osh); 1038 } 1039 kmem_cache_destroy(conf->slab_cache); 1040 1041 /* Step 3. 1042 * At this point, we are holding all the stripes so the array 1043 * is completely stalled, so now is a good time to resize 1044 * conf->disks. 1045 */ 1046 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1047 if (ndisks) { 1048 for (i=0; i<conf->raid_disks; i++) 1049 ndisks[i] = conf->disks[i]; 1050 kfree(conf->disks); 1051 conf->disks = ndisks; 1052 } else 1053 err = -ENOMEM; 1054 1055 /* Step 4, return new stripes to service */ 1056 while(!list_empty(&newstripes)) { 1057 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1058 list_del_init(&nsh->lru); 1059 for (i=conf->raid_disks; i < newsize; i++) 1060 if (nsh->dev[i].page == NULL) { 1061 struct page *p = alloc_page(GFP_NOIO); 1062 nsh->dev[i].page = p; 1063 if (!p) 1064 err = -ENOMEM; 1065 } 1066 release_stripe(nsh); 1067 } 1068 /* critical section pass, GFP_NOIO no longer needed */ 1069 1070 conf->slab_cache = sc; 1071 conf->active_name = 1-conf->active_name; 1072 conf->pool_size = newsize; 1073 return err; 1074 } 1075 1076 static int drop_one_stripe(raid5_conf_t *conf) 1077 { 1078 struct stripe_head *sh; 1079 1080 spin_lock_irq(&conf->device_lock); 1081 sh = get_free_stripe(conf); 1082 spin_unlock_irq(&conf->device_lock); 1083 if (!sh) 1084 return 0; 1085 BUG_ON(atomic_read(&sh->count)); 1086 shrink_buffers(sh, conf->pool_size); 1087 kmem_cache_free(conf->slab_cache, sh); 1088 atomic_dec(&conf->active_stripes); 1089 return 1; 1090 } 1091 1092 static void shrink_stripes(raid5_conf_t *conf) 1093 { 1094 while (drop_one_stripe(conf)) 1095 ; 1096 1097 if (conf->slab_cache) 1098 kmem_cache_destroy(conf->slab_cache); 1099 conf->slab_cache = NULL; 1100 } 1101 1102 static void raid5_end_read_request(struct bio * bi, int error) 1103 { 1104 struct stripe_head *sh = bi->bi_private; 1105 raid5_conf_t *conf = sh->raid_conf; 1106 int disks = sh->disks, i; 1107 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1108 char b[BDEVNAME_SIZE]; 1109 mdk_rdev_t *rdev; 1110 1111 1112 for (i=0 ; i<disks; i++) 1113 if (bi == &sh->dev[i].req) 1114 break; 1115 1116 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1117 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1118 uptodate); 1119 if (i == disks) { 1120 BUG(); 1121 return; 1122 } 1123 1124 if (uptodate) { 1125 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1126 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1127 rdev = conf->disks[i].rdev; 1128 printk_rl(KERN_INFO "raid5:%s: read error corrected" 1129 " (%lu sectors at %llu on %s)\n", 1130 mdname(conf->mddev), STRIPE_SECTORS, 1131 (unsigned long long)(sh->sector 1132 + rdev->data_offset), 1133 bdevname(rdev->bdev, b)); 1134 clear_bit(R5_ReadError, &sh->dev[i].flags); 1135 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1136 } 1137 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1138 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1139 } else { 1140 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1141 int retry = 0; 1142 rdev = conf->disks[i].rdev; 1143 1144 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1145 atomic_inc(&rdev->read_errors); 1146 if (conf->mddev->degraded) 1147 printk_rl(KERN_WARNING 1148 "raid5:%s: read error not correctable " 1149 "(sector %llu on %s).\n", 1150 mdname(conf->mddev), 1151 (unsigned long long)(sh->sector 1152 + rdev->data_offset), 1153 bdn); 1154 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1155 /* Oh, no!!! */ 1156 printk_rl(KERN_WARNING 1157 "raid5:%s: read error NOT corrected!! " 1158 "(sector %llu on %s).\n", 1159 mdname(conf->mddev), 1160 (unsigned long long)(sh->sector 1161 + rdev->data_offset), 1162 bdn); 1163 else if (atomic_read(&rdev->read_errors) 1164 > conf->max_nr_stripes) 1165 printk(KERN_WARNING 1166 "raid5:%s: Too many read errors, failing device %s.\n", 1167 mdname(conf->mddev), bdn); 1168 else 1169 retry = 1; 1170 if (retry) 1171 set_bit(R5_ReadError, &sh->dev[i].flags); 1172 else { 1173 clear_bit(R5_ReadError, &sh->dev[i].flags); 1174 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1175 md_error(conf->mddev, rdev); 1176 } 1177 } 1178 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1179 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1180 set_bit(STRIPE_HANDLE, &sh->state); 1181 release_stripe(sh); 1182 } 1183 1184 static void raid5_end_write_request(struct bio *bi, int error) 1185 { 1186 struct stripe_head *sh = bi->bi_private; 1187 raid5_conf_t *conf = sh->raid_conf; 1188 int disks = sh->disks, i; 1189 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1190 1191 for (i=0 ; i<disks; i++) 1192 if (bi == &sh->dev[i].req) 1193 break; 1194 1195 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1196 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1197 uptodate); 1198 if (i == disks) { 1199 BUG(); 1200 return; 1201 } 1202 1203 if (!uptodate) 1204 md_error(conf->mddev, conf->disks[i].rdev); 1205 1206 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1207 1208 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1209 set_bit(STRIPE_HANDLE, &sh->state); 1210 release_stripe(sh); 1211 } 1212 1213 1214 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 1215 1216 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 1217 { 1218 struct r5dev *dev = &sh->dev[i]; 1219 1220 bio_init(&dev->req); 1221 dev->req.bi_io_vec = &dev->vec; 1222 dev->req.bi_vcnt++; 1223 dev->req.bi_max_vecs++; 1224 dev->vec.bv_page = dev->page; 1225 dev->vec.bv_len = STRIPE_SIZE; 1226 dev->vec.bv_offset = 0; 1227 1228 dev->req.bi_sector = sh->sector; 1229 dev->req.bi_private = sh; 1230 1231 dev->flags = 0; 1232 dev->sector = compute_blocknr(sh, i, previous); 1233 } 1234 1235 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1236 { 1237 char b[BDEVNAME_SIZE]; 1238 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1239 pr_debug("raid5: error called\n"); 1240 1241 if (!test_bit(Faulty, &rdev->flags)) { 1242 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1243 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1244 unsigned long flags; 1245 spin_lock_irqsave(&conf->device_lock, flags); 1246 mddev->degraded++; 1247 spin_unlock_irqrestore(&conf->device_lock, flags); 1248 /* 1249 * if recovery was running, make sure it aborts. 1250 */ 1251 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1252 } 1253 set_bit(Faulty, &rdev->flags); 1254 printk(KERN_ALERT 1255 "raid5: Disk failure on %s, disabling device.\n" 1256 "raid5: Operation continuing on %d devices.\n", 1257 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1258 } 1259 } 1260 1261 /* 1262 * Input: a 'big' sector number, 1263 * Output: index of the data and parity disk, and the sector # in them. 1264 */ 1265 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, 1266 int previous, int *dd_idx, 1267 struct stripe_head *sh) 1268 { 1269 long stripe; 1270 unsigned long chunk_number; 1271 unsigned int chunk_offset; 1272 int pd_idx, qd_idx; 1273 int ddf_layout = 0; 1274 sector_t new_sector; 1275 int algorithm = previous ? conf->prev_algo 1276 : conf->algorithm; 1277 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) 1278 : (conf->chunk_size >> 9); 1279 int raid_disks = previous ? conf->previous_raid_disks 1280 : conf->raid_disks; 1281 int data_disks = raid_disks - conf->max_degraded; 1282 1283 /* First compute the information on this sector */ 1284 1285 /* 1286 * Compute the chunk number and the sector offset inside the chunk 1287 */ 1288 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1289 chunk_number = r_sector; 1290 BUG_ON(r_sector != chunk_number); 1291 1292 /* 1293 * Compute the stripe number 1294 */ 1295 stripe = chunk_number / data_disks; 1296 1297 /* 1298 * Compute the data disk and parity disk indexes inside the stripe 1299 */ 1300 *dd_idx = chunk_number % data_disks; 1301 1302 /* 1303 * Select the parity disk based on the user selected algorithm. 1304 */ 1305 pd_idx = qd_idx = ~0; 1306 switch(conf->level) { 1307 case 4: 1308 pd_idx = data_disks; 1309 break; 1310 case 5: 1311 switch (algorithm) { 1312 case ALGORITHM_LEFT_ASYMMETRIC: 1313 pd_idx = data_disks - stripe % raid_disks; 1314 if (*dd_idx >= pd_idx) 1315 (*dd_idx)++; 1316 break; 1317 case ALGORITHM_RIGHT_ASYMMETRIC: 1318 pd_idx = stripe % raid_disks; 1319 if (*dd_idx >= pd_idx) 1320 (*dd_idx)++; 1321 break; 1322 case ALGORITHM_LEFT_SYMMETRIC: 1323 pd_idx = data_disks - stripe % raid_disks; 1324 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1325 break; 1326 case ALGORITHM_RIGHT_SYMMETRIC: 1327 pd_idx = stripe % raid_disks; 1328 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1329 break; 1330 case ALGORITHM_PARITY_0: 1331 pd_idx = 0; 1332 (*dd_idx)++; 1333 break; 1334 case ALGORITHM_PARITY_N: 1335 pd_idx = data_disks; 1336 break; 1337 default: 1338 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1339 algorithm); 1340 BUG(); 1341 } 1342 break; 1343 case 6: 1344 1345 switch (algorithm) { 1346 case ALGORITHM_LEFT_ASYMMETRIC: 1347 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1348 qd_idx = pd_idx + 1; 1349 if (pd_idx == raid_disks-1) { 1350 (*dd_idx)++; /* Q D D D P */ 1351 qd_idx = 0; 1352 } else if (*dd_idx >= pd_idx) 1353 (*dd_idx) += 2; /* D D P Q D */ 1354 break; 1355 case ALGORITHM_RIGHT_ASYMMETRIC: 1356 pd_idx = stripe % raid_disks; 1357 qd_idx = pd_idx + 1; 1358 if (pd_idx == raid_disks-1) { 1359 (*dd_idx)++; /* Q D D D P */ 1360 qd_idx = 0; 1361 } else if (*dd_idx >= pd_idx) 1362 (*dd_idx) += 2; /* D D P Q D */ 1363 break; 1364 case ALGORITHM_LEFT_SYMMETRIC: 1365 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1366 qd_idx = (pd_idx + 1) % raid_disks; 1367 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1368 break; 1369 case ALGORITHM_RIGHT_SYMMETRIC: 1370 pd_idx = stripe % raid_disks; 1371 qd_idx = (pd_idx + 1) % raid_disks; 1372 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1373 break; 1374 1375 case ALGORITHM_PARITY_0: 1376 pd_idx = 0; 1377 qd_idx = 1; 1378 (*dd_idx) += 2; 1379 break; 1380 case ALGORITHM_PARITY_N: 1381 pd_idx = data_disks; 1382 qd_idx = data_disks + 1; 1383 break; 1384 1385 case ALGORITHM_ROTATING_ZERO_RESTART: 1386 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1387 * of blocks for computing Q is different. 1388 */ 1389 pd_idx = stripe % raid_disks; 1390 qd_idx = pd_idx + 1; 1391 if (pd_idx == raid_disks-1) { 1392 (*dd_idx)++; /* Q D D D P */ 1393 qd_idx = 0; 1394 } else if (*dd_idx >= pd_idx) 1395 (*dd_idx) += 2; /* D D P Q D */ 1396 ddf_layout = 1; 1397 break; 1398 1399 case ALGORITHM_ROTATING_N_RESTART: 1400 /* Same a left_asymmetric, by first stripe is 1401 * D D D P Q rather than 1402 * Q D D D P 1403 */ 1404 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1405 qd_idx = pd_idx + 1; 1406 if (pd_idx == raid_disks-1) { 1407 (*dd_idx)++; /* Q D D D P */ 1408 qd_idx = 0; 1409 } else if (*dd_idx >= pd_idx) 1410 (*dd_idx) += 2; /* D D P Q D */ 1411 ddf_layout = 1; 1412 break; 1413 1414 case ALGORITHM_ROTATING_N_CONTINUE: 1415 /* Same as left_symmetric but Q is before P */ 1416 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1417 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1418 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1419 ddf_layout = 1; 1420 break; 1421 1422 case ALGORITHM_LEFT_ASYMMETRIC_6: 1423 /* RAID5 left_asymmetric, with Q on last device */ 1424 pd_idx = data_disks - stripe % (raid_disks-1); 1425 if (*dd_idx >= pd_idx) 1426 (*dd_idx)++; 1427 qd_idx = raid_disks - 1; 1428 break; 1429 1430 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1431 pd_idx = stripe % (raid_disks-1); 1432 if (*dd_idx >= pd_idx) 1433 (*dd_idx)++; 1434 qd_idx = raid_disks - 1; 1435 break; 1436 1437 case ALGORITHM_LEFT_SYMMETRIC_6: 1438 pd_idx = data_disks - stripe % (raid_disks-1); 1439 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1440 qd_idx = raid_disks - 1; 1441 break; 1442 1443 case ALGORITHM_RIGHT_SYMMETRIC_6: 1444 pd_idx = stripe % (raid_disks-1); 1445 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1446 qd_idx = raid_disks - 1; 1447 break; 1448 1449 case ALGORITHM_PARITY_0_6: 1450 pd_idx = 0; 1451 (*dd_idx)++; 1452 qd_idx = raid_disks - 1; 1453 break; 1454 1455 1456 default: 1457 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1458 algorithm); 1459 BUG(); 1460 } 1461 break; 1462 } 1463 1464 if (sh) { 1465 sh->pd_idx = pd_idx; 1466 sh->qd_idx = qd_idx; 1467 sh->ddf_layout = ddf_layout; 1468 } 1469 /* 1470 * Finally, compute the new sector number 1471 */ 1472 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1473 return new_sector; 1474 } 1475 1476 1477 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 1478 { 1479 raid5_conf_t *conf = sh->raid_conf; 1480 int raid_disks = sh->disks; 1481 int data_disks = raid_disks - conf->max_degraded; 1482 sector_t new_sector = sh->sector, check; 1483 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) 1484 : (conf->chunk_size >> 9); 1485 int algorithm = previous ? conf->prev_algo 1486 : conf->algorithm; 1487 sector_t stripe; 1488 int chunk_offset; 1489 int chunk_number, dummy1, dd_idx = i; 1490 sector_t r_sector; 1491 struct stripe_head sh2; 1492 1493 1494 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1495 stripe = new_sector; 1496 BUG_ON(new_sector != stripe); 1497 1498 if (i == sh->pd_idx) 1499 return 0; 1500 switch(conf->level) { 1501 case 4: break; 1502 case 5: 1503 switch (algorithm) { 1504 case ALGORITHM_LEFT_ASYMMETRIC: 1505 case ALGORITHM_RIGHT_ASYMMETRIC: 1506 if (i > sh->pd_idx) 1507 i--; 1508 break; 1509 case ALGORITHM_LEFT_SYMMETRIC: 1510 case ALGORITHM_RIGHT_SYMMETRIC: 1511 if (i < sh->pd_idx) 1512 i += raid_disks; 1513 i -= (sh->pd_idx + 1); 1514 break; 1515 case ALGORITHM_PARITY_0: 1516 i -= 1; 1517 break; 1518 case ALGORITHM_PARITY_N: 1519 break; 1520 default: 1521 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1522 algorithm); 1523 BUG(); 1524 } 1525 break; 1526 case 6: 1527 if (i == sh->qd_idx) 1528 return 0; /* It is the Q disk */ 1529 switch (algorithm) { 1530 case ALGORITHM_LEFT_ASYMMETRIC: 1531 case ALGORITHM_RIGHT_ASYMMETRIC: 1532 case ALGORITHM_ROTATING_ZERO_RESTART: 1533 case ALGORITHM_ROTATING_N_RESTART: 1534 if (sh->pd_idx == raid_disks-1) 1535 i--; /* Q D D D P */ 1536 else if (i > sh->pd_idx) 1537 i -= 2; /* D D P Q D */ 1538 break; 1539 case ALGORITHM_LEFT_SYMMETRIC: 1540 case ALGORITHM_RIGHT_SYMMETRIC: 1541 if (sh->pd_idx == raid_disks-1) 1542 i--; /* Q D D D P */ 1543 else { 1544 /* D D P Q D */ 1545 if (i < sh->pd_idx) 1546 i += raid_disks; 1547 i -= (sh->pd_idx + 2); 1548 } 1549 break; 1550 case ALGORITHM_PARITY_0: 1551 i -= 2; 1552 break; 1553 case ALGORITHM_PARITY_N: 1554 break; 1555 case ALGORITHM_ROTATING_N_CONTINUE: 1556 if (sh->pd_idx == 0) 1557 i--; /* P D D D Q */ 1558 else if (i > sh->pd_idx) 1559 i -= 2; /* D D Q P D */ 1560 break; 1561 case ALGORITHM_LEFT_ASYMMETRIC_6: 1562 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1563 if (i > sh->pd_idx) 1564 i--; 1565 break; 1566 case ALGORITHM_LEFT_SYMMETRIC_6: 1567 case ALGORITHM_RIGHT_SYMMETRIC_6: 1568 if (i < sh->pd_idx) 1569 i += data_disks + 1; 1570 i -= (sh->pd_idx + 1); 1571 break; 1572 case ALGORITHM_PARITY_0_6: 1573 i -= 1; 1574 break; 1575 default: 1576 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1577 algorithm); 1578 BUG(); 1579 } 1580 break; 1581 } 1582 1583 chunk_number = stripe * data_disks + i; 1584 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1585 1586 check = raid5_compute_sector(conf, r_sector, 1587 previous, &dummy1, &sh2); 1588 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 1589 || sh2.qd_idx != sh->qd_idx) { 1590 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1591 return 0; 1592 } 1593 return r_sector; 1594 } 1595 1596 1597 1598 /* 1599 * Copy data between a page in the stripe cache, and one or more bion 1600 * The page could align with the middle of the bio, or there could be 1601 * several bion, each with several bio_vecs, which cover part of the page 1602 * Multiple bion are linked together on bi_next. There may be extras 1603 * at the end of this list. We ignore them. 1604 */ 1605 static void copy_data(int frombio, struct bio *bio, 1606 struct page *page, 1607 sector_t sector) 1608 { 1609 char *pa = page_address(page); 1610 struct bio_vec *bvl; 1611 int i; 1612 int page_offset; 1613 1614 if (bio->bi_sector >= sector) 1615 page_offset = (signed)(bio->bi_sector - sector) * 512; 1616 else 1617 page_offset = (signed)(sector - bio->bi_sector) * -512; 1618 bio_for_each_segment(bvl, bio, i) { 1619 int len = bio_iovec_idx(bio,i)->bv_len; 1620 int clen; 1621 int b_offset = 0; 1622 1623 if (page_offset < 0) { 1624 b_offset = -page_offset; 1625 page_offset += b_offset; 1626 len -= b_offset; 1627 } 1628 1629 if (len > 0 && page_offset + len > STRIPE_SIZE) 1630 clen = STRIPE_SIZE - page_offset; 1631 else clen = len; 1632 1633 if (clen > 0) { 1634 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 1635 if (frombio) 1636 memcpy(pa+page_offset, ba+b_offset, clen); 1637 else 1638 memcpy(ba+b_offset, pa+page_offset, clen); 1639 __bio_kunmap_atomic(ba, KM_USER0); 1640 } 1641 if (clen < len) /* hit end of page */ 1642 break; 1643 page_offset += len; 1644 } 1645 } 1646 1647 #define check_xor() do { \ 1648 if (count == MAX_XOR_BLOCKS) { \ 1649 xor_blocks(count, STRIPE_SIZE, dest, ptr);\ 1650 count = 0; \ 1651 } \ 1652 } while(0) 1653 1654 static void compute_parity6(struct stripe_head *sh, int method) 1655 { 1656 raid5_conf_t *conf = sh->raid_conf; 1657 int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count; 1658 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1659 struct bio *chosen; 1660 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1661 void *ptrs[syndrome_disks+2]; 1662 1663 pd_idx = sh->pd_idx; 1664 qd_idx = sh->qd_idx; 1665 d0_idx = raid6_d0(sh); 1666 1667 pr_debug("compute_parity, stripe %llu, method %d\n", 1668 (unsigned long long)sh->sector, method); 1669 1670 switch(method) { 1671 case READ_MODIFY_WRITE: 1672 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1673 case RECONSTRUCT_WRITE: 1674 for (i= disks; i-- ;) 1675 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1676 chosen = sh->dev[i].towrite; 1677 sh->dev[i].towrite = NULL; 1678 1679 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1680 wake_up(&conf->wait_for_overlap); 1681 1682 BUG_ON(sh->dev[i].written); 1683 sh->dev[i].written = chosen; 1684 } 1685 break; 1686 case CHECK_PARITY: 1687 BUG(); /* Not implemented yet */ 1688 } 1689 1690 for (i = disks; i--;) 1691 if (sh->dev[i].written) { 1692 sector_t sector = sh->dev[i].sector; 1693 struct bio *wbi = sh->dev[i].written; 1694 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1695 copy_data(1, wbi, sh->dev[i].page, sector); 1696 wbi = r5_next_bio(wbi, sector); 1697 } 1698 1699 set_bit(R5_LOCKED, &sh->dev[i].flags); 1700 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1701 } 1702 1703 /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/ 1704 1705 for (i = 0; i < disks; i++) 1706 ptrs[i] = (void *)raid6_empty_zero_page; 1707 1708 count = 0; 1709 i = d0_idx; 1710 do { 1711 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1712 1713 ptrs[slot] = page_address(sh->dev[i].page); 1714 if (slot < syndrome_disks && 1715 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) { 1716 printk(KERN_ERR "block %d/%d not uptodate " 1717 "on parity calc\n", i, count); 1718 BUG(); 1719 } 1720 1721 i = raid6_next_disk(i, disks); 1722 } while (i != d0_idx); 1723 BUG_ON(count != syndrome_disks); 1724 1725 raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs); 1726 1727 switch(method) { 1728 case RECONSTRUCT_WRITE: 1729 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1730 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1731 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1732 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1733 break; 1734 case UPDATE_PARITY: 1735 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1736 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1737 break; 1738 } 1739 } 1740 1741 1742 /* Compute one missing block */ 1743 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1744 { 1745 int i, count, disks = sh->disks; 1746 void *ptr[MAX_XOR_BLOCKS], *dest, *p; 1747 int qd_idx = sh->qd_idx; 1748 1749 pr_debug("compute_block_1, stripe %llu, idx %d\n", 1750 (unsigned long long)sh->sector, dd_idx); 1751 1752 if ( dd_idx == qd_idx ) { 1753 /* We're actually computing the Q drive */ 1754 compute_parity6(sh, UPDATE_PARITY); 1755 } else { 1756 dest = page_address(sh->dev[dd_idx].page); 1757 if (!nozero) memset(dest, 0, STRIPE_SIZE); 1758 count = 0; 1759 for (i = disks ; i--; ) { 1760 if (i == dd_idx || i == qd_idx) 1761 continue; 1762 p = page_address(sh->dev[i].page); 1763 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1764 ptr[count++] = p; 1765 else 1766 printk("compute_block() %d, stripe %llu, %d" 1767 " not present\n", dd_idx, 1768 (unsigned long long)sh->sector, i); 1769 1770 check_xor(); 1771 } 1772 if (count) 1773 xor_blocks(count, STRIPE_SIZE, dest, ptr); 1774 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1775 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1776 } 1777 } 1778 1779 /* Compute two missing blocks */ 1780 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1781 { 1782 int i, count, disks = sh->disks; 1783 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1784 int d0_idx = raid6_d0(sh); 1785 int faila = -1, failb = -1; 1786 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1787 void *ptrs[syndrome_disks+2]; 1788 1789 for (i = 0; i < disks ; i++) 1790 ptrs[i] = (void *)raid6_empty_zero_page; 1791 count = 0; 1792 i = d0_idx; 1793 do { 1794 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1795 1796 ptrs[slot] = page_address(sh->dev[i].page); 1797 1798 if (i == dd_idx1) 1799 faila = slot; 1800 if (i == dd_idx2) 1801 failb = slot; 1802 i = raid6_next_disk(i, disks); 1803 } while (i != d0_idx); 1804 BUG_ON(count != syndrome_disks); 1805 1806 BUG_ON(faila == failb); 1807 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1808 1809 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1810 (unsigned long long)sh->sector, dd_idx1, dd_idx2, 1811 faila, failb); 1812 1813 if (failb == syndrome_disks+1) { 1814 /* Q disk is one of the missing disks */ 1815 if (faila == syndrome_disks) { 1816 /* Missing P+Q, just recompute */ 1817 compute_parity6(sh, UPDATE_PARITY); 1818 return; 1819 } else { 1820 /* We're missing D+Q; recompute D from P */ 1821 compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? 1822 dd_idx2 : dd_idx1), 1823 0); 1824 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1825 return; 1826 } 1827 } 1828 1829 /* We're missing D+P or D+D; */ 1830 if (failb == syndrome_disks) { 1831 /* We're missing D+P. */ 1832 raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs); 1833 } else { 1834 /* We're missing D+D. */ 1835 raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb, 1836 ptrs); 1837 } 1838 1839 /* Both the above update both missing blocks */ 1840 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1841 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1842 } 1843 1844 static void 1845 schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, 1846 int rcw, int expand) 1847 { 1848 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1849 1850 if (rcw) { 1851 /* if we are not expanding this is a proper write request, and 1852 * there will be bios with new data to be drained into the 1853 * stripe cache 1854 */ 1855 if (!expand) { 1856 sh->reconstruct_state = reconstruct_state_drain_run; 1857 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1858 } else 1859 sh->reconstruct_state = reconstruct_state_run; 1860 1861 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1862 1863 for (i = disks; i--; ) { 1864 struct r5dev *dev = &sh->dev[i]; 1865 1866 if (dev->towrite) { 1867 set_bit(R5_LOCKED, &dev->flags); 1868 set_bit(R5_Wantdrain, &dev->flags); 1869 if (!expand) 1870 clear_bit(R5_UPTODATE, &dev->flags); 1871 s->locked++; 1872 } 1873 } 1874 if (s->locked + 1 == disks) 1875 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 1876 atomic_inc(&sh->raid_conf->pending_full_writes); 1877 } else { 1878 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1879 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1880 1881 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 1882 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 1883 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1884 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1885 1886 for (i = disks; i--; ) { 1887 struct r5dev *dev = &sh->dev[i]; 1888 if (i == pd_idx) 1889 continue; 1890 1891 if (dev->towrite && 1892 (test_bit(R5_UPTODATE, &dev->flags) || 1893 test_bit(R5_Wantcompute, &dev->flags))) { 1894 set_bit(R5_Wantdrain, &dev->flags); 1895 set_bit(R5_LOCKED, &dev->flags); 1896 clear_bit(R5_UPTODATE, &dev->flags); 1897 s->locked++; 1898 } 1899 } 1900 } 1901 1902 /* keep the parity disk locked while asynchronous operations 1903 * are in flight 1904 */ 1905 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1906 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1907 s->locked++; 1908 1909 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 1910 __func__, (unsigned long long)sh->sector, 1911 s->locked, s->ops_request); 1912 } 1913 1914 /* 1915 * Each stripe/dev can have one or more bion attached. 1916 * toread/towrite point to the first in a chain. 1917 * The bi_next chain must be in order. 1918 */ 1919 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1920 { 1921 struct bio **bip; 1922 raid5_conf_t *conf = sh->raid_conf; 1923 int firstwrite=0; 1924 1925 pr_debug("adding bh b#%llu to stripe s#%llu\n", 1926 (unsigned long long)bi->bi_sector, 1927 (unsigned long long)sh->sector); 1928 1929 1930 spin_lock(&sh->lock); 1931 spin_lock_irq(&conf->device_lock); 1932 if (forwrite) { 1933 bip = &sh->dev[dd_idx].towrite; 1934 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1935 firstwrite = 1; 1936 } else 1937 bip = &sh->dev[dd_idx].toread; 1938 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1939 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1940 goto overlap; 1941 bip = & (*bip)->bi_next; 1942 } 1943 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1944 goto overlap; 1945 1946 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1947 if (*bip) 1948 bi->bi_next = *bip; 1949 *bip = bi; 1950 bi->bi_phys_segments++; 1951 spin_unlock_irq(&conf->device_lock); 1952 spin_unlock(&sh->lock); 1953 1954 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 1955 (unsigned long long)bi->bi_sector, 1956 (unsigned long long)sh->sector, dd_idx); 1957 1958 if (conf->mddev->bitmap && firstwrite) { 1959 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1960 STRIPE_SECTORS, 0); 1961 sh->bm_seq = conf->seq_flush+1; 1962 set_bit(STRIPE_BIT_DELAY, &sh->state); 1963 } 1964 1965 if (forwrite) { 1966 /* check if page is covered */ 1967 sector_t sector = sh->dev[dd_idx].sector; 1968 for (bi=sh->dev[dd_idx].towrite; 1969 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1970 bi && bi->bi_sector <= sector; 1971 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1972 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1973 sector = bi->bi_sector + (bi->bi_size>>9); 1974 } 1975 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1976 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1977 } 1978 return 1; 1979 1980 overlap: 1981 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1982 spin_unlock_irq(&conf->device_lock); 1983 spin_unlock(&sh->lock); 1984 return 0; 1985 } 1986 1987 static void end_reshape(raid5_conf_t *conf); 1988 1989 static int page_is_zero(struct page *p) 1990 { 1991 char *a = page_address(p); 1992 return ((*(u32*)a) == 0 && 1993 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1994 } 1995 1996 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 1997 struct stripe_head *sh) 1998 { 1999 int sectors_per_chunk = 2000 previous ? (conf->prev_chunk >> 9) 2001 : (conf->chunk_size >> 9); 2002 int dd_idx; 2003 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2004 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2005 2006 raid5_compute_sector(conf, 2007 stripe * (disks - conf->max_degraded) 2008 *sectors_per_chunk + chunk_offset, 2009 previous, 2010 &dd_idx, sh); 2011 } 2012 2013 static void 2014 handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, 2015 struct stripe_head_state *s, int disks, 2016 struct bio **return_bi) 2017 { 2018 int i; 2019 for (i = disks; i--; ) { 2020 struct bio *bi; 2021 int bitmap_end = 0; 2022 2023 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2024 mdk_rdev_t *rdev; 2025 rcu_read_lock(); 2026 rdev = rcu_dereference(conf->disks[i].rdev); 2027 if (rdev && test_bit(In_sync, &rdev->flags)) 2028 /* multiple read failures in one stripe */ 2029 md_error(conf->mddev, rdev); 2030 rcu_read_unlock(); 2031 } 2032 spin_lock_irq(&conf->device_lock); 2033 /* fail all writes first */ 2034 bi = sh->dev[i].towrite; 2035 sh->dev[i].towrite = NULL; 2036 if (bi) { 2037 s->to_write--; 2038 bitmap_end = 1; 2039 } 2040 2041 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2042 wake_up(&conf->wait_for_overlap); 2043 2044 while (bi && bi->bi_sector < 2045 sh->dev[i].sector + STRIPE_SECTORS) { 2046 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2047 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2048 if (!raid5_dec_bi_phys_segments(bi)) { 2049 md_write_end(conf->mddev); 2050 bi->bi_next = *return_bi; 2051 *return_bi = bi; 2052 } 2053 bi = nextbi; 2054 } 2055 /* and fail all 'written' */ 2056 bi = sh->dev[i].written; 2057 sh->dev[i].written = NULL; 2058 if (bi) bitmap_end = 1; 2059 while (bi && bi->bi_sector < 2060 sh->dev[i].sector + STRIPE_SECTORS) { 2061 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2062 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2063 if (!raid5_dec_bi_phys_segments(bi)) { 2064 md_write_end(conf->mddev); 2065 bi->bi_next = *return_bi; 2066 *return_bi = bi; 2067 } 2068 bi = bi2; 2069 } 2070 2071 /* fail any reads if this device is non-operational and 2072 * the data has not reached the cache yet. 2073 */ 2074 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2075 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2076 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2077 bi = sh->dev[i].toread; 2078 sh->dev[i].toread = NULL; 2079 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2080 wake_up(&conf->wait_for_overlap); 2081 if (bi) s->to_read--; 2082 while (bi && bi->bi_sector < 2083 sh->dev[i].sector + STRIPE_SECTORS) { 2084 struct bio *nextbi = 2085 r5_next_bio(bi, sh->dev[i].sector); 2086 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2087 if (!raid5_dec_bi_phys_segments(bi)) { 2088 bi->bi_next = *return_bi; 2089 *return_bi = bi; 2090 } 2091 bi = nextbi; 2092 } 2093 } 2094 spin_unlock_irq(&conf->device_lock); 2095 if (bitmap_end) 2096 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2097 STRIPE_SECTORS, 0, 0); 2098 } 2099 2100 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2101 if (atomic_dec_and_test(&conf->pending_full_writes)) 2102 md_wakeup_thread(conf->mddev->thread); 2103 } 2104 2105 /* fetch_block5 - checks the given member device to see if its data needs 2106 * to be read or computed to satisfy a request. 2107 * 2108 * Returns 1 when no more member devices need to be checked, otherwise returns 2109 * 0 to tell the loop in handle_stripe_fill5 to continue 2110 */ 2111 static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, 2112 int disk_idx, int disks) 2113 { 2114 struct r5dev *dev = &sh->dev[disk_idx]; 2115 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 2116 2117 /* is the data in this block needed, and can we get it? */ 2118 if (!test_bit(R5_LOCKED, &dev->flags) && 2119 !test_bit(R5_UPTODATE, &dev->flags) && 2120 (dev->toread || 2121 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2122 s->syncing || s->expanding || 2123 (s->failed && 2124 (failed_dev->toread || 2125 (failed_dev->towrite && 2126 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { 2127 /* We would like to get this block, possibly by computing it, 2128 * otherwise read it if the backing disk is insync 2129 */ 2130 if ((s->uptodate == disks - 1) && 2131 (s->failed && disk_idx == s->failed_num)) { 2132 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2133 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2134 set_bit(R5_Wantcompute, &dev->flags); 2135 sh->ops.target = disk_idx; 2136 s->req_compute = 1; 2137 /* Careful: from this point on 'uptodate' is in the eye 2138 * of raid5_run_ops which services 'compute' operations 2139 * before writes. R5_Wantcompute flags a block that will 2140 * be R5_UPTODATE by the time it is needed for a 2141 * subsequent operation. 2142 */ 2143 s->uptodate++; 2144 return 1; /* uptodate + compute == disks */ 2145 } else if (test_bit(R5_Insync, &dev->flags)) { 2146 set_bit(R5_LOCKED, &dev->flags); 2147 set_bit(R5_Wantread, &dev->flags); 2148 s->locked++; 2149 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2150 s->syncing); 2151 } 2152 } 2153 2154 return 0; 2155 } 2156 2157 /** 2158 * handle_stripe_fill5 - read or compute data to satisfy pending requests. 2159 */ 2160 static void handle_stripe_fill5(struct stripe_head *sh, 2161 struct stripe_head_state *s, int disks) 2162 { 2163 int i; 2164 2165 /* look for blocks to read/compute, skip this if a compute 2166 * is already in flight, or if the stripe contents are in the 2167 * midst of changing due to a write 2168 */ 2169 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2170 !sh->reconstruct_state) 2171 for (i = disks; i--; ) 2172 if (fetch_block5(sh, s, i, disks)) 2173 break; 2174 set_bit(STRIPE_HANDLE, &sh->state); 2175 } 2176 2177 static void handle_stripe_fill6(struct stripe_head *sh, 2178 struct stripe_head_state *s, struct r6_state *r6s, 2179 int disks) 2180 { 2181 int i; 2182 for (i = disks; i--; ) { 2183 struct r5dev *dev = &sh->dev[i]; 2184 if (!test_bit(R5_LOCKED, &dev->flags) && 2185 !test_bit(R5_UPTODATE, &dev->flags) && 2186 (dev->toread || (dev->towrite && 2187 !test_bit(R5_OVERWRITE, &dev->flags)) || 2188 s->syncing || s->expanding || 2189 (s->failed >= 1 && 2190 (sh->dev[r6s->failed_num[0]].toread || 2191 s->to_write)) || 2192 (s->failed >= 2 && 2193 (sh->dev[r6s->failed_num[1]].toread || 2194 s->to_write)))) { 2195 /* we would like to get this block, possibly 2196 * by computing it, but we might not be able to 2197 */ 2198 if ((s->uptodate == disks - 1) && 2199 (s->failed && (i == r6s->failed_num[0] || 2200 i == r6s->failed_num[1]))) { 2201 pr_debug("Computing stripe %llu block %d\n", 2202 (unsigned long long)sh->sector, i); 2203 compute_block_1(sh, i, 0); 2204 s->uptodate++; 2205 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { 2206 /* Computing 2-failure is *very* expensive; only 2207 * do it if failed >= 2 2208 */ 2209 int other; 2210 for (other = disks; other--; ) { 2211 if (other == i) 2212 continue; 2213 if (!test_bit(R5_UPTODATE, 2214 &sh->dev[other].flags)) 2215 break; 2216 } 2217 BUG_ON(other < 0); 2218 pr_debug("Computing stripe %llu blocks %d,%d\n", 2219 (unsigned long long)sh->sector, 2220 i, other); 2221 compute_block_2(sh, i, other); 2222 s->uptodate += 2; 2223 } else if (test_bit(R5_Insync, &dev->flags)) { 2224 set_bit(R5_LOCKED, &dev->flags); 2225 set_bit(R5_Wantread, &dev->flags); 2226 s->locked++; 2227 pr_debug("Reading block %d (sync=%d)\n", 2228 i, s->syncing); 2229 } 2230 } 2231 } 2232 set_bit(STRIPE_HANDLE, &sh->state); 2233 } 2234 2235 2236 /* handle_stripe_clean_event 2237 * any written block on an uptodate or failed drive can be returned. 2238 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2239 * never LOCKED, so we don't need to test 'failed' directly. 2240 */ 2241 static void handle_stripe_clean_event(raid5_conf_t *conf, 2242 struct stripe_head *sh, int disks, struct bio **return_bi) 2243 { 2244 int i; 2245 struct r5dev *dev; 2246 2247 for (i = disks; i--; ) 2248 if (sh->dev[i].written) { 2249 dev = &sh->dev[i]; 2250 if (!test_bit(R5_LOCKED, &dev->flags) && 2251 test_bit(R5_UPTODATE, &dev->flags)) { 2252 /* We can return any write requests */ 2253 struct bio *wbi, *wbi2; 2254 int bitmap_end = 0; 2255 pr_debug("Return write for disc %d\n", i); 2256 spin_lock_irq(&conf->device_lock); 2257 wbi = dev->written; 2258 dev->written = NULL; 2259 while (wbi && wbi->bi_sector < 2260 dev->sector + STRIPE_SECTORS) { 2261 wbi2 = r5_next_bio(wbi, dev->sector); 2262 if (!raid5_dec_bi_phys_segments(wbi)) { 2263 md_write_end(conf->mddev); 2264 wbi->bi_next = *return_bi; 2265 *return_bi = wbi; 2266 } 2267 wbi = wbi2; 2268 } 2269 if (dev->towrite == NULL) 2270 bitmap_end = 1; 2271 spin_unlock_irq(&conf->device_lock); 2272 if (bitmap_end) 2273 bitmap_endwrite(conf->mddev->bitmap, 2274 sh->sector, 2275 STRIPE_SECTORS, 2276 !test_bit(STRIPE_DEGRADED, &sh->state), 2277 0); 2278 } 2279 } 2280 2281 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2282 if (atomic_dec_and_test(&conf->pending_full_writes)) 2283 md_wakeup_thread(conf->mddev->thread); 2284 } 2285 2286 static void handle_stripe_dirtying5(raid5_conf_t *conf, 2287 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2288 { 2289 int rmw = 0, rcw = 0, i; 2290 for (i = disks; i--; ) { 2291 /* would I have to read this buffer for read_modify_write */ 2292 struct r5dev *dev = &sh->dev[i]; 2293 if ((dev->towrite || i == sh->pd_idx) && 2294 !test_bit(R5_LOCKED, &dev->flags) && 2295 !(test_bit(R5_UPTODATE, &dev->flags) || 2296 test_bit(R5_Wantcompute, &dev->flags))) { 2297 if (test_bit(R5_Insync, &dev->flags)) 2298 rmw++; 2299 else 2300 rmw += 2*disks; /* cannot read it */ 2301 } 2302 /* Would I have to read this buffer for reconstruct_write */ 2303 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2304 !test_bit(R5_LOCKED, &dev->flags) && 2305 !(test_bit(R5_UPTODATE, &dev->flags) || 2306 test_bit(R5_Wantcompute, &dev->flags))) { 2307 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2308 else 2309 rcw += 2*disks; 2310 } 2311 } 2312 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2313 (unsigned long long)sh->sector, rmw, rcw); 2314 set_bit(STRIPE_HANDLE, &sh->state); 2315 if (rmw < rcw && rmw > 0) 2316 /* prefer read-modify-write, but need to get some data */ 2317 for (i = disks; i--; ) { 2318 struct r5dev *dev = &sh->dev[i]; 2319 if ((dev->towrite || i == sh->pd_idx) && 2320 !test_bit(R5_LOCKED, &dev->flags) && 2321 !(test_bit(R5_UPTODATE, &dev->flags) || 2322 test_bit(R5_Wantcompute, &dev->flags)) && 2323 test_bit(R5_Insync, &dev->flags)) { 2324 if ( 2325 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2326 pr_debug("Read_old block " 2327 "%d for r-m-w\n", i); 2328 set_bit(R5_LOCKED, &dev->flags); 2329 set_bit(R5_Wantread, &dev->flags); 2330 s->locked++; 2331 } else { 2332 set_bit(STRIPE_DELAYED, &sh->state); 2333 set_bit(STRIPE_HANDLE, &sh->state); 2334 } 2335 } 2336 } 2337 if (rcw <= rmw && rcw > 0) 2338 /* want reconstruct write, but need to get some data */ 2339 for (i = disks; i--; ) { 2340 struct r5dev *dev = &sh->dev[i]; 2341 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2342 i != sh->pd_idx && 2343 !test_bit(R5_LOCKED, &dev->flags) && 2344 !(test_bit(R5_UPTODATE, &dev->flags) || 2345 test_bit(R5_Wantcompute, &dev->flags)) && 2346 test_bit(R5_Insync, &dev->flags)) { 2347 if ( 2348 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2349 pr_debug("Read_old block " 2350 "%d for Reconstruct\n", i); 2351 set_bit(R5_LOCKED, &dev->flags); 2352 set_bit(R5_Wantread, &dev->flags); 2353 s->locked++; 2354 } else { 2355 set_bit(STRIPE_DELAYED, &sh->state); 2356 set_bit(STRIPE_HANDLE, &sh->state); 2357 } 2358 } 2359 } 2360 /* now if nothing is locked, and if we have enough data, 2361 * we can start a write request 2362 */ 2363 /* since handle_stripe can be called at any time we need to handle the 2364 * case where a compute block operation has been submitted and then a 2365 * subsequent call wants to start a write request. raid5_run_ops only 2366 * handles the case where compute block and postxor are requested 2367 * simultaneously. If this is not the case then new writes need to be 2368 * held off until the compute completes. 2369 */ 2370 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2371 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2372 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2373 schedule_reconstruction5(sh, s, rcw == 0, 0); 2374 } 2375 2376 static void handle_stripe_dirtying6(raid5_conf_t *conf, 2377 struct stripe_head *sh, struct stripe_head_state *s, 2378 struct r6_state *r6s, int disks) 2379 { 2380 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; 2381 int qd_idx = sh->qd_idx; 2382 for (i = disks; i--; ) { 2383 struct r5dev *dev = &sh->dev[i]; 2384 /* Would I have to read this buffer for reconstruct_write */ 2385 if (!test_bit(R5_OVERWRITE, &dev->flags) 2386 && i != pd_idx && i != qd_idx 2387 && (!test_bit(R5_LOCKED, &dev->flags) 2388 ) && 2389 !test_bit(R5_UPTODATE, &dev->flags)) { 2390 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2391 else { 2392 pr_debug("raid6: must_compute: " 2393 "disk %d flags=%#lx\n", i, dev->flags); 2394 must_compute++; 2395 } 2396 } 2397 } 2398 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", 2399 (unsigned long long)sh->sector, rcw, must_compute); 2400 set_bit(STRIPE_HANDLE, &sh->state); 2401 2402 if (rcw > 0) 2403 /* want reconstruct write, but need to get some data */ 2404 for (i = disks; i--; ) { 2405 struct r5dev *dev = &sh->dev[i]; 2406 if (!test_bit(R5_OVERWRITE, &dev->flags) 2407 && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) 2408 && !test_bit(R5_LOCKED, &dev->flags) && 2409 !test_bit(R5_UPTODATE, &dev->flags) && 2410 test_bit(R5_Insync, &dev->flags)) { 2411 if ( 2412 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2413 pr_debug("Read_old stripe %llu " 2414 "block %d for Reconstruct\n", 2415 (unsigned long long)sh->sector, i); 2416 set_bit(R5_LOCKED, &dev->flags); 2417 set_bit(R5_Wantread, &dev->flags); 2418 s->locked++; 2419 } else { 2420 pr_debug("Request delayed stripe %llu " 2421 "block %d for Reconstruct\n", 2422 (unsigned long long)sh->sector, i); 2423 set_bit(STRIPE_DELAYED, &sh->state); 2424 set_bit(STRIPE_HANDLE, &sh->state); 2425 } 2426 } 2427 } 2428 /* now if nothing is locked, and if we have enough data, we can start a 2429 * write request 2430 */ 2431 if (s->locked == 0 && rcw == 0 && 2432 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2433 if (must_compute > 0) { 2434 /* We have failed blocks and need to compute them */ 2435 switch (s->failed) { 2436 case 0: 2437 BUG(); 2438 case 1: 2439 compute_block_1(sh, r6s->failed_num[0], 0); 2440 break; 2441 case 2: 2442 compute_block_2(sh, r6s->failed_num[0], 2443 r6s->failed_num[1]); 2444 break; 2445 default: /* This request should have been failed? */ 2446 BUG(); 2447 } 2448 } 2449 2450 pr_debug("Computing parity for stripe %llu\n", 2451 (unsigned long long)sh->sector); 2452 compute_parity6(sh, RECONSTRUCT_WRITE); 2453 /* now every locked buffer is ready to be written */ 2454 for (i = disks; i--; ) 2455 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2456 pr_debug("Writing stripe %llu block %d\n", 2457 (unsigned long long)sh->sector, i); 2458 s->locked++; 2459 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2460 } 2461 if (s->locked == disks) 2462 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2463 atomic_inc(&conf->pending_full_writes); 2464 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2465 set_bit(STRIPE_INSYNC, &sh->state); 2466 2467 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2468 atomic_dec(&conf->preread_active_stripes); 2469 if (atomic_read(&conf->preread_active_stripes) < 2470 IO_THRESHOLD) 2471 md_wakeup_thread(conf->mddev->thread); 2472 } 2473 } 2474 } 2475 2476 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2477 struct stripe_head_state *s, int disks) 2478 { 2479 struct r5dev *dev = NULL; 2480 2481 set_bit(STRIPE_HANDLE, &sh->state); 2482 2483 switch (sh->check_state) { 2484 case check_state_idle: 2485 /* start a new check operation if there are no failures */ 2486 if (s->failed == 0) { 2487 BUG_ON(s->uptodate != disks); 2488 sh->check_state = check_state_run; 2489 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2490 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2491 s->uptodate--; 2492 break; 2493 } 2494 dev = &sh->dev[s->failed_num]; 2495 /* fall through */ 2496 case check_state_compute_result: 2497 sh->check_state = check_state_idle; 2498 if (!dev) 2499 dev = &sh->dev[sh->pd_idx]; 2500 2501 /* check that a write has not made the stripe insync */ 2502 if (test_bit(STRIPE_INSYNC, &sh->state)) 2503 break; 2504 2505 /* either failed parity check, or recovery is happening */ 2506 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2507 BUG_ON(s->uptodate != disks); 2508 2509 set_bit(R5_LOCKED, &dev->flags); 2510 s->locked++; 2511 set_bit(R5_Wantwrite, &dev->flags); 2512 2513 clear_bit(STRIPE_DEGRADED, &sh->state); 2514 set_bit(STRIPE_INSYNC, &sh->state); 2515 break; 2516 case check_state_run: 2517 break; /* we will be called again upon completion */ 2518 case check_state_check_result: 2519 sh->check_state = check_state_idle; 2520 2521 /* if a failure occurred during the check operation, leave 2522 * STRIPE_INSYNC not set and let the stripe be handled again 2523 */ 2524 if (s->failed) 2525 break; 2526 2527 /* handle a successful check operation, if parity is correct 2528 * we are done. Otherwise update the mismatch count and repair 2529 * parity if !MD_RECOVERY_CHECK 2530 */ 2531 if (sh->ops.zero_sum_result == 0) 2532 /* parity is correct (on disc, 2533 * not in buffer any more) 2534 */ 2535 set_bit(STRIPE_INSYNC, &sh->state); 2536 else { 2537 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2538 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2539 /* don't try to repair!! */ 2540 set_bit(STRIPE_INSYNC, &sh->state); 2541 else { 2542 sh->check_state = check_state_compute_run; 2543 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2544 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2545 set_bit(R5_Wantcompute, 2546 &sh->dev[sh->pd_idx].flags); 2547 sh->ops.target = sh->pd_idx; 2548 s->uptodate++; 2549 } 2550 } 2551 break; 2552 case check_state_compute_run: 2553 break; 2554 default: 2555 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2556 __func__, sh->check_state, 2557 (unsigned long long) sh->sector); 2558 BUG(); 2559 } 2560 } 2561 2562 2563 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2564 struct stripe_head_state *s, 2565 struct r6_state *r6s, struct page *tmp_page, 2566 int disks) 2567 { 2568 int update_p = 0, update_q = 0; 2569 struct r5dev *dev; 2570 int pd_idx = sh->pd_idx; 2571 int qd_idx = sh->qd_idx; 2572 2573 set_bit(STRIPE_HANDLE, &sh->state); 2574 2575 BUG_ON(s->failed > 2); 2576 BUG_ON(s->uptodate < disks); 2577 /* Want to check and possibly repair P and Q. 2578 * However there could be one 'failed' device, in which 2579 * case we can only check one of them, possibly using the 2580 * other to generate missing data 2581 */ 2582 2583 /* If !tmp_page, we cannot do the calculations, 2584 * but as we have set STRIPE_HANDLE, we will soon be called 2585 * by stripe_handle with a tmp_page - just wait until then. 2586 */ 2587 if (tmp_page) { 2588 if (s->failed == r6s->q_failed) { 2589 /* The only possible failed device holds 'Q', so it 2590 * makes sense to check P (If anything else were failed, 2591 * we would have used P to recreate it). 2592 */ 2593 compute_block_1(sh, pd_idx, 1); 2594 if (!page_is_zero(sh->dev[pd_idx].page)) { 2595 compute_block_1(sh, pd_idx, 0); 2596 update_p = 1; 2597 } 2598 } 2599 if (!r6s->q_failed && s->failed < 2) { 2600 /* q is not failed, and we didn't use it to generate 2601 * anything, so it makes sense to check it 2602 */ 2603 memcpy(page_address(tmp_page), 2604 page_address(sh->dev[qd_idx].page), 2605 STRIPE_SIZE); 2606 compute_parity6(sh, UPDATE_PARITY); 2607 if (memcmp(page_address(tmp_page), 2608 page_address(sh->dev[qd_idx].page), 2609 STRIPE_SIZE) != 0) { 2610 clear_bit(STRIPE_INSYNC, &sh->state); 2611 update_q = 1; 2612 } 2613 } 2614 if (update_p || update_q) { 2615 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2616 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2617 /* don't try to repair!! */ 2618 update_p = update_q = 0; 2619 } 2620 2621 /* now write out any block on a failed drive, 2622 * or P or Q if they need it 2623 */ 2624 2625 if (s->failed == 2) { 2626 dev = &sh->dev[r6s->failed_num[1]]; 2627 s->locked++; 2628 set_bit(R5_LOCKED, &dev->flags); 2629 set_bit(R5_Wantwrite, &dev->flags); 2630 } 2631 if (s->failed >= 1) { 2632 dev = &sh->dev[r6s->failed_num[0]]; 2633 s->locked++; 2634 set_bit(R5_LOCKED, &dev->flags); 2635 set_bit(R5_Wantwrite, &dev->flags); 2636 } 2637 2638 if (update_p) { 2639 dev = &sh->dev[pd_idx]; 2640 s->locked++; 2641 set_bit(R5_LOCKED, &dev->flags); 2642 set_bit(R5_Wantwrite, &dev->flags); 2643 } 2644 if (update_q) { 2645 dev = &sh->dev[qd_idx]; 2646 s->locked++; 2647 set_bit(R5_LOCKED, &dev->flags); 2648 set_bit(R5_Wantwrite, &dev->flags); 2649 } 2650 clear_bit(STRIPE_DEGRADED, &sh->state); 2651 2652 set_bit(STRIPE_INSYNC, &sh->state); 2653 } 2654 } 2655 2656 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2657 struct r6_state *r6s) 2658 { 2659 int i; 2660 2661 /* We have read all the blocks in this stripe and now we need to 2662 * copy some of them into a target stripe for expand. 2663 */ 2664 struct dma_async_tx_descriptor *tx = NULL; 2665 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2666 for (i = 0; i < sh->disks; i++) 2667 if (i != sh->pd_idx && i != sh->qd_idx) { 2668 int dd_idx, j; 2669 struct stripe_head *sh2; 2670 2671 sector_t bn = compute_blocknr(sh, i, 1); 2672 sector_t s = raid5_compute_sector(conf, bn, 0, 2673 &dd_idx, NULL); 2674 sh2 = get_active_stripe(conf, s, 0, 1); 2675 if (sh2 == NULL) 2676 /* so far only the early blocks of this stripe 2677 * have been requested. When later blocks 2678 * get requested, we will try again 2679 */ 2680 continue; 2681 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2682 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2683 /* must have already done this block */ 2684 release_stripe(sh2); 2685 continue; 2686 } 2687 2688 /* place all the copies on one channel */ 2689 tx = async_memcpy(sh2->dev[dd_idx].page, 2690 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2691 ASYNC_TX_DEP_ACK, tx, NULL, NULL); 2692 2693 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2694 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2695 for (j = 0; j < conf->raid_disks; j++) 2696 if (j != sh2->pd_idx && 2697 (!r6s || j != sh2->qd_idx) && 2698 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2699 break; 2700 if (j == conf->raid_disks) { 2701 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2702 set_bit(STRIPE_HANDLE, &sh2->state); 2703 } 2704 release_stripe(sh2); 2705 2706 } 2707 /* done submitting copies, wait for them to complete */ 2708 if (tx) { 2709 async_tx_ack(tx); 2710 dma_wait_for_async_tx(tx); 2711 } 2712 } 2713 2714 2715 /* 2716 * handle_stripe - do things to a stripe. 2717 * 2718 * We lock the stripe and then examine the state of various bits 2719 * to see what needs to be done. 2720 * Possible results: 2721 * return some read request which now have data 2722 * return some write requests which are safely on disc 2723 * schedule a read on some buffers 2724 * schedule a write of some buffers 2725 * return confirmation of parity correctness 2726 * 2727 * buffers are taken off read_list or write_list, and bh_cache buffers 2728 * get BH_Lock set before the stripe lock is released. 2729 * 2730 */ 2731 2732 static bool handle_stripe5(struct stripe_head *sh) 2733 { 2734 raid5_conf_t *conf = sh->raid_conf; 2735 int disks = sh->disks, i; 2736 struct bio *return_bi = NULL; 2737 struct stripe_head_state s; 2738 struct r5dev *dev; 2739 mdk_rdev_t *blocked_rdev = NULL; 2740 int prexor; 2741 2742 memset(&s, 0, sizeof(s)); 2743 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " 2744 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, 2745 atomic_read(&sh->count), sh->pd_idx, sh->check_state, 2746 sh->reconstruct_state); 2747 2748 spin_lock(&sh->lock); 2749 clear_bit(STRIPE_HANDLE, &sh->state); 2750 clear_bit(STRIPE_DELAYED, &sh->state); 2751 2752 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2753 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2754 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2755 2756 /* Now to look around and see what can be done */ 2757 rcu_read_lock(); 2758 for (i=disks; i--; ) { 2759 mdk_rdev_t *rdev; 2760 struct r5dev *dev = &sh->dev[i]; 2761 clear_bit(R5_Insync, &dev->flags); 2762 2763 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2764 "written %p\n", i, dev->flags, dev->toread, dev->read, 2765 dev->towrite, dev->written); 2766 2767 /* maybe we can request a biofill operation 2768 * 2769 * new wantfill requests are only permitted while 2770 * ops_complete_biofill is guaranteed to be inactive 2771 */ 2772 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2773 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 2774 set_bit(R5_Wantfill, &dev->flags); 2775 2776 /* now count some things */ 2777 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2778 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2779 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2780 2781 if (test_bit(R5_Wantfill, &dev->flags)) 2782 s.to_fill++; 2783 else if (dev->toread) 2784 s.to_read++; 2785 if (dev->towrite) { 2786 s.to_write++; 2787 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2788 s.non_overwrite++; 2789 } 2790 if (dev->written) 2791 s.written++; 2792 rdev = rcu_dereference(conf->disks[i].rdev); 2793 if (blocked_rdev == NULL && 2794 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2795 blocked_rdev = rdev; 2796 atomic_inc(&rdev->nr_pending); 2797 } 2798 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2799 /* The ReadError flag will just be confusing now */ 2800 clear_bit(R5_ReadError, &dev->flags); 2801 clear_bit(R5_ReWrite, &dev->flags); 2802 } 2803 if (!rdev || !test_bit(In_sync, &rdev->flags) 2804 || test_bit(R5_ReadError, &dev->flags)) { 2805 s.failed++; 2806 s.failed_num = i; 2807 } else 2808 set_bit(R5_Insync, &dev->flags); 2809 } 2810 rcu_read_unlock(); 2811 2812 if (unlikely(blocked_rdev)) { 2813 if (s.syncing || s.expanding || s.expanded || 2814 s.to_write || s.written) { 2815 set_bit(STRIPE_HANDLE, &sh->state); 2816 goto unlock; 2817 } 2818 /* There is nothing for the blocked_rdev to block */ 2819 rdev_dec_pending(blocked_rdev, conf->mddev); 2820 blocked_rdev = NULL; 2821 } 2822 2823 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 2824 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 2825 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 2826 } 2827 2828 pr_debug("locked=%d uptodate=%d to_read=%d" 2829 " to_write=%d failed=%d failed_num=%d\n", 2830 s.locked, s.uptodate, s.to_read, s.to_write, 2831 s.failed, s.failed_num); 2832 /* check if the array has lost two devices and, if so, some requests might 2833 * need to be failed 2834 */ 2835 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2836 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 2837 if (s.failed > 1 && s.syncing) { 2838 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2839 clear_bit(STRIPE_SYNCING, &sh->state); 2840 s.syncing = 0; 2841 } 2842 2843 /* might be able to return some write requests if the parity block 2844 * is safe, or on a failed drive 2845 */ 2846 dev = &sh->dev[sh->pd_idx]; 2847 if ( s.written && 2848 ((test_bit(R5_Insync, &dev->flags) && 2849 !test_bit(R5_LOCKED, &dev->flags) && 2850 test_bit(R5_UPTODATE, &dev->flags)) || 2851 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2852 handle_stripe_clean_event(conf, sh, disks, &return_bi); 2853 2854 /* Now we might consider reading some blocks, either to check/generate 2855 * parity, or to satisfy requests 2856 * or to load a block that is being partially written. 2857 */ 2858 if (s.to_read || s.non_overwrite || 2859 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 2860 handle_stripe_fill5(sh, &s, disks); 2861 2862 /* Now we check to see if any write operations have recently 2863 * completed 2864 */ 2865 prexor = 0; 2866 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 2867 prexor = 1; 2868 if (sh->reconstruct_state == reconstruct_state_drain_result || 2869 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 2870 sh->reconstruct_state = reconstruct_state_idle; 2871 2872 /* All the 'written' buffers and the parity block are ready to 2873 * be written back to disk 2874 */ 2875 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 2876 for (i = disks; i--; ) { 2877 dev = &sh->dev[i]; 2878 if (test_bit(R5_LOCKED, &dev->flags) && 2879 (i == sh->pd_idx || dev->written)) { 2880 pr_debug("Writing block %d\n", i); 2881 set_bit(R5_Wantwrite, &dev->flags); 2882 if (prexor) 2883 continue; 2884 if (!test_bit(R5_Insync, &dev->flags) || 2885 (i == sh->pd_idx && s.failed == 0)) 2886 set_bit(STRIPE_INSYNC, &sh->state); 2887 } 2888 } 2889 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2890 atomic_dec(&conf->preread_active_stripes); 2891 if (atomic_read(&conf->preread_active_stripes) < 2892 IO_THRESHOLD) 2893 md_wakeup_thread(conf->mddev->thread); 2894 } 2895 } 2896 2897 /* Now to consider new write requests and what else, if anything 2898 * should be read. We do not handle new writes when: 2899 * 1/ A 'write' operation (copy+xor) is already in flight. 2900 * 2/ A 'check' operation is in flight, as it may clobber the parity 2901 * block. 2902 */ 2903 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 2904 handle_stripe_dirtying5(conf, sh, &s, disks); 2905 2906 /* maybe we need to check and possibly fix the parity for this stripe 2907 * Any reads will already have been scheduled, so we just see if enough 2908 * data is available. The parity check is held off while parity 2909 * dependent operations are in flight. 2910 */ 2911 if (sh->check_state || 2912 (s.syncing && s.locked == 0 && 2913 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 2914 !test_bit(STRIPE_INSYNC, &sh->state))) 2915 handle_parity_checks5(conf, sh, &s, disks); 2916 2917 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2918 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2919 clear_bit(STRIPE_SYNCING, &sh->state); 2920 } 2921 2922 /* If the failed drive is just a ReadError, then we might need to progress 2923 * the repair/check process 2924 */ 2925 if (s.failed == 1 && !conf->mddev->ro && 2926 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 2927 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 2928 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 2929 ) { 2930 dev = &sh->dev[s.failed_num]; 2931 if (!test_bit(R5_ReWrite, &dev->flags)) { 2932 set_bit(R5_Wantwrite, &dev->flags); 2933 set_bit(R5_ReWrite, &dev->flags); 2934 set_bit(R5_LOCKED, &dev->flags); 2935 s.locked++; 2936 } else { 2937 /* let's read it back */ 2938 set_bit(R5_Wantread, &dev->flags); 2939 set_bit(R5_LOCKED, &dev->flags); 2940 s.locked++; 2941 } 2942 } 2943 2944 /* Finish reconstruct operations initiated by the expansion process */ 2945 if (sh->reconstruct_state == reconstruct_state_result) { 2946 struct stripe_head *sh2 2947 = get_active_stripe(conf, sh->sector, 1, 1); 2948 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 2949 /* sh cannot be written until sh2 has been read. 2950 * so arrange for sh to be delayed a little 2951 */ 2952 set_bit(STRIPE_DELAYED, &sh->state); 2953 set_bit(STRIPE_HANDLE, &sh->state); 2954 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 2955 &sh2->state)) 2956 atomic_inc(&conf->preread_active_stripes); 2957 release_stripe(sh2); 2958 goto unlock; 2959 } 2960 if (sh2) 2961 release_stripe(sh2); 2962 2963 sh->reconstruct_state = reconstruct_state_idle; 2964 clear_bit(STRIPE_EXPANDING, &sh->state); 2965 for (i = conf->raid_disks; i--; ) { 2966 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2967 set_bit(R5_LOCKED, &sh->dev[i].flags); 2968 s.locked++; 2969 } 2970 } 2971 2972 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2973 !sh->reconstruct_state) { 2974 /* Need to write out all blocks after computing parity */ 2975 sh->disks = conf->raid_disks; 2976 stripe_set_idx(sh->sector, conf, 0, sh); 2977 schedule_reconstruction5(sh, &s, 1, 1); 2978 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 2979 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2980 atomic_dec(&conf->reshape_stripes); 2981 wake_up(&conf->wait_for_overlap); 2982 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 2983 } 2984 2985 if (s.expanding && s.locked == 0 && 2986 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 2987 handle_stripe_expansion(conf, sh, NULL); 2988 2989 unlock: 2990 spin_unlock(&sh->lock); 2991 2992 /* wait for this device to become unblocked */ 2993 if (unlikely(blocked_rdev)) 2994 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 2995 2996 if (s.ops_request) 2997 raid5_run_ops(sh, s.ops_request); 2998 2999 ops_run_io(sh, &s); 3000 3001 return_io(return_bi); 3002 3003 return blocked_rdev == NULL; 3004 } 3005 3006 static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 3007 { 3008 raid5_conf_t *conf = sh->raid_conf; 3009 int disks = sh->disks; 3010 struct bio *return_bi = NULL; 3011 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; 3012 struct stripe_head_state s; 3013 struct r6_state r6s; 3014 struct r5dev *dev, *pdev, *qdev; 3015 mdk_rdev_t *blocked_rdev = NULL; 3016 3017 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3018 "pd_idx=%d, qd_idx=%d\n", 3019 (unsigned long long)sh->sector, sh->state, 3020 atomic_read(&sh->count), pd_idx, qd_idx); 3021 memset(&s, 0, sizeof(s)); 3022 3023 spin_lock(&sh->lock); 3024 clear_bit(STRIPE_HANDLE, &sh->state); 3025 clear_bit(STRIPE_DELAYED, &sh->state); 3026 3027 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 3028 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3029 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 3030 /* Now to look around and see what can be done */ 3031 3032 rcu_read_lock(); 3033 for (i=disks; i--; ) { 3034 mdk_rdev_t *rdev; 3035 dev = &sh->dev[i]; 3036 clear_bit(R5_Insync, &dev->flags); 3037 3038 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3039 i, dev->flags, dev->toread, dev->towrite, dev->written); 3040 /* maybe we can reply to a read */ 3041 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 3042 struct bio *rbi, *rbi2; 3043 pr_debug("Return read for disc %d\n", i); 3044 spin_lock_irq(&conf->device_lock); 3045 rbi = dev->toread; 3046 dev->toread = NULL; 3047 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 3048 wake_up(&conf->wait_for_overlap); 3049 spin_unlock_irq(&conf->device_lock); 3050 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 3051 copy_data(0, rbi, dev->page, dev->sector); 3052 rbi2 = r5_next_bio(rbi, dev->sector); 3053 spin_lock_irq(&conf->device_lock); 3054 if (!raid5_dec_bi_phys_segments(rbi)) { 3055 rbi->bi_next = return_bi; 3056 return_bi = rbi; 3057 } 3058 spin_unlock_irq(&conf->device_lock); 3059 rbi = rbi2; 3060 } 3061 } 3062 3063 /* now count some things */ 3064 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 3065 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 3066 3067 3068 if (dev->toread) 3069 s.to_read++; 3070 if (dev->towrite) { 3071 s.to_write++; 3072 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3073 s.non_overwrite++; 3074 } 3075 if (dev->written) 3076 s.written++; 3077 rdev = rcu_dereference(conf->disks[i].rdev); 3078 if (blocked_rdev == NULL && 3079 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3080 blocked_rdev = rdev; 3081 atomic_inc(&rdev->nr_pending); 3082 } 3083 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3084 /* The ReadError flag will just be confusing now */ 3085 clear_bit(R5_ReadError, &dev->flags); 3086 clear_bit(R5_ReWrite, &dev->flags); 3087 } 3088 if (!rdev || !test_bit(In_sync, &rdev->flags) 3089 || test_bit(R5_ReadError, &dev->flags)) { 3090 if (s.failed < 2) 3091 r6s.failed_num[s.failed] = i; 3092 s.failed++; 3093 } else 3094 set_bit(R5_Insync, &dev->flags); 3095 } 3096 rcu_read_unlock(); 3097 3098 if (unlikely(blocked_rdev)) { 3099 if (s.syncing || s.expanding || s.expanded || 3100 s.to_write || s.written) { 3101 set_bit(STRIPE_HANDLE, &sh->state); 3102 goto unlock; 3103 } 3104 /* There is nothing for the blocked_rdev to block */ 3105 rdev_dec_pending(blocked_rdev, conf->mddev); 3106 blocked_rdev = NULL; 3107 } 3108 3109 pr_debug("locked=%d uptodate=%d to_read=%d" 3110 " to_write=%d failed=%d failed_num=%d,%d\n", 3111 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3112 r6s.failed_num[0], r6s.failed_num[1]); 3113 /* check if the array has lost >2 devices and, if so, some requests 3114 * might need to be failed 3115 */ 3116 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3117 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3118 if (s.failed > 2 && s.syncing) { 3119 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3120 clear_bit(STRIPE_SYNCING, &sh->state); 3121 s.syncing = 0; 3122 } 3123 3124 /* 3125 * might be able to return some write requests if the parity blocks 3126 * are safe, or on a failed drive 3127 */ 3128 pdev = &sh->dev[pd_idx]; 3129 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 3130 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 3131 qdev = &sh->dev[qd_idx]; 3132 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) 3133 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); 3134 3135 if ( s.written && 3136 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3137 && !test_bit(R5_LOCKED, &pdev->flags) 3138 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3139 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3140 && !test_bit(R5_LOCKED, &qdev->flags) 3141 && test_bit(R5_UPTODATE, &qdev->flags))))) 3142 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3143 3144 /* Now we might consider reading some blocks, either to check/generate 3145 * parity, or to satisfy requests 3146 * or to load a block that is being partially written. 3147 */ 3148 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3149 (s.syncing && (s.uptodate < disks)) || s.expanding) 3150 handle_stripe_fill6(sh, &s, &r6s, disks); 3151 3152 /* now to consider writing and what else, if anything should be read */ 3153 if (s.to_write) 3154 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); 3155 3156 /* maybe we need to check and possibly fix the parity for this stripe 3157 * Any reads will already have been scheduled, so we just see if enough 3158 * data is available 3159 */ 3160 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) 3161 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); 3162 3163 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3164 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3165 clear_bit(STRIPE_SYNCING, &sh->state); 3166 } 3167 3168 /* If the failed drives are just a ReadError, then we might need 3169 * to progress the repair/check process 3170 */ 3171 if (s.failed <= 2 && !conf->mddev->ro) 3172 for (i = 0; i < s.failed; i++) { 3173 dev = &sh->dev[r6s.failed_num[i]]; 3174 if (test_bit(R5_ReadError, &dev->flags) 3175 && !test_bit(R5_LOCKED, &dev->flags) 3176 && test_bit(R5_UPTODATE, &dev->flags) 3177 ) { 3178 if (!test_bit(R5_ReWrite, &dev->flags)) { 3179 set_bit(R5_Wantwrite, &dev->flags); 3180 set_bit(R5_ReWrite, &dev->flags); 3181 set_bit(R5_LOCKED, &dev->flags); 3182 } else { 3183 /* let's read it back */ 3184 set_bit(R5_Wantread, &dev->flags); 3185 set_bit(R5_LOCKED, &dev->flags); 3186 } 3187 } 3188 } 3189 3190 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3191 struct stripe_head *sh2 3192 = get_active_stripe(conf, sh->sector, 1, 1); 3193 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 3194 /* sh cannot be written until sh2 has been read. 3195 * so arrange for sh to be delayed a little 3196 */ 3197 set_bit(STRIPE_DELAYED, &sh->state); 3198 set_bit(STRIPE_HANDLE, &sh->state); 3199 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3200 &sh2->state)) 3201 atomic_inc(&conf->preread_active_stripes); 3202 release_stripe(sh2); 3203 goto unlock; 3204 } 3205 if (sh2) 3206 release_stripe(sh2); 3207 3208 /* Need to write out all blocks after computing P&Q */ 3209 sh->disks = conf->raid_disks; 3210 stripe_set_idx(sh->sector, conf, 0, sh); 3211 compute_parity6(sh, RECONSTRUCT_WRITE); 3212 for (i = conf->raid_disks ; i-- ; ) { 3213 set_bit(R5_LOCKED, &sh->dev[i].flags); 3214 s.locked++; 3215 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3216 } 3217 clear_bit(STRIPE_EXPANDING, &sh->state); 3218 } else if (s.expanded) { 3219 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3220 atomic_dec(&conf->reshape_stripes); 3221 wake_up(&conf->wait_for_overlap); 3222 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3223 } 3224 3225 if (s.expanding && s.locked == 0 && 3226 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3227 handle_stripe_expansion(conf, sh, &r6s); 3228 3229 unlock: 3230 spin_unlock(&sh->lock); 3231 3232 /* wait for this device to become unblocked */ 3233 if (unlikely(blocked_rdev)) 3234 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3235 3236 ops_run_io(sh, &s); 3237 3238 return_io(return_bi); 3239 3240 return blocked_rdev == NULL; 3241 } 3242 3243 /* returns true if the stripe was handled */ 3244 static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page) 3245 { 3246 if (sh->raid_conf->level == 6) 3247 return handle_stripe6(sh, tmp_page); 3248 else 3249 return handle_stripe5(sh); 3250 } 3251 3252 3253 3254 static void raid5_activate_delayed(raid5_conf_t *conf) 3255 { 3256 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3257 while (!list_empty(&conf->delayed_list)) { 3258 struct list_head *l = conf->delayed_list.next; 3259 struct stripe_head *sh; 3260 sh = list_entry(l, struct stripe_head, lru); 3261 list_del_init(l); 3262 clear_bit(STRIPE_DELAYED, &sh->state); 3263 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3264 atomic_inc(&conf->preread_active_stripes); 3265 list_add_tail(&sh->lru, &conf->hold_list); 3266 } 3267 } else 3268 blk_plug_device(conf->mddev->queue); 3269 } 3270 3271 static void activate_bit_delay(raid5_conf_t *conf) 3272 { 3273 /* device_lock is held */ 3274 struct list_head head; 3275 list_add(&head, &conf->bitmap_list); 3276 list_del_init(&conf->bitmap_list); 3277 while (!list_empty(&head)) { 3278 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3279 list_del_init(&sh->lru); 3280 atomic_inc(&sh->count); 3281 __release_stripe(conf, sh); 3282 } 3283 } 3284 3285 static void unplug_slaves(mddev_t *mddev) 3286 { 3287 raid5_conf_t *conf = mddev_to_conf(mddev); 3288 int i; 3289 3290 rcu_read_lock(); 3291 for (i=0; i<mddev->raid_disks; i++) { 3292 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3293 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3294 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3295 3296 atomic_inc(&rdev->nr_pending); 3297 rcu_read_unlock(); 3298 3299 blk_unplug(r_queue); 3300 3301 rdev_dec_pending(rdev, mddev); 3302 rcu_read_lock(); 3303 } 3304 } 3305 rcu_read_unlock(); 3306 } 3307 3308 static void raid5_unplug_device(struct request_queue *q) 3309 { 3310 mddev_t *mddev = q->queuedata; 3311 raid5_conf_t *conf = mddev_to_conf(mddev); 3312 unsigned long flags; 3313 3314 spin_lock_irqsave(&conf->device_lock, flags); 3315 3316 if (blk_remove_plug(q)) { 3317 conf->seq_flush++; 3318 raid5_activate_delayed(conf); 3319 } 3320 md_wakeup_thread(mddev->thread); 3321 3322 spin_unlock_irqrestore(&conf->device_lock, flags); 3323 3324 unplug_slaves(mddev); 3325 } 3326 3327 static int raid5_congested(void *data, int bits) 3328 { 3329 mddev_t *mddev = data; 3330 raid5_conf_t *conf = mddev_to_conf(mddev); 3331 3332 /* No difference between reads and writes. Just check 3333 * how busy the stripe_cache is 3334 */ 3335 if (conf->inactive_blocked) 3336 return 1; 3337 if (conf->quiesce) 3338 return 1; 3339 if (list_empty_careful(&conf->inactive_list)) 3340 return 1; 3341 3342 return 0; 3343 } 3344 3345 /* We want read requests to align with chunks where possible, 3346 * but write requests don't need to. 3347 */ 3348 static int raid5_mergeable_bvec(struct request_queue *q, 3349 struct bvec_merge_data *bvm, 3350 struct bio_vec *biovec) 3351 { 3352 mddev_t *mddev = q->queuedata; 3353 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3354 int max; 3355 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3356 unsigned int bio_sectors = bvm->bi_size >> 9; 3357 3358 if ((bvm->bi_rw & 1) == WRITE) 3359 return biovec->bv_len; /* always allow writes to be mergeable */ 3360 3361 if (mddev->new_chunk < mddev->chunk_size) 3362 chunk_sectors = mddev->new_chunk >> 9; 3363 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3364 if (max < 0) max = 0; 3365 if (max <= biovec->bv_len && bio_sectors == 0) 3366 return biovec->bv_len; 3367 else 3368 return max; 3369 } 3370 3371 3372 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3373 { 3374 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3375 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3376 unsigned int bio_sectors = bio->bi_size >> 9; 3377 3378 if (mddev->new_chunk < mddev->chunk_size) 3379 chunk_sectors = mddev->new_chunk >> 9; 3380 return chunk_sectors >= 3381 ((sector & (chunk_sectors - 1)) + bio_sectors); 3382 } 3383 3384 /* 3385 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3386 * later sampled by raid5d. 3387 */ 3388 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3389 { 3390 unsigned long flags; 3391 3392 spin_lock_irqsave(&conf->device_lock, flags); 3393 3394 bi->bi_next = conf->retry_read_aligned_list; 3395 conf->retry_read_aligned_list = bi; 3396 3397 spin_unlock_irqrestore(&conf->device_lock, flags); 3398 md_wakeup_thread(conf->mddev->thread); 3399 } 3400 3401 3402 static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3403 { 3404 struct bio *bi; 3405 3406 bi = conf->retry_read_aligned; 3407 if (bi) { 3408 conf->retry_read_aligned = NULL; 3409 return bi; 3410 } 3411 bi = conf->retry_read_aligned_list; 3412 if(bi) { 3413 conf->retry_read_aligned_list = bi->bi_next; 3414 bi->bi_next = NULL; 3415 /* 3416 * this sets the active strip count to 1 and the processed 3417 * strip count to zero (upper 8 bits) 3418 */ 3419 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3420 } 3421 3422 return bi; 3423 } 3424 3425 3426 /* 3427 * The "raid5_align_endio" should check if the read succeeded and if it 3428 * did, call bio_endio on the original bio (having bio_put the new bio 3429 * first). 3430 * If the read failed.. 3431 */ 3432 static void raid5_align_endio(struct bio *bi, int error) 3433 { 3434 struct bio* raid_bi = bi->bi_private; 3435 mddev_t *mddev; 3436 raid5_conf_t *conf; 3437 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3438 mdk_rdev_t *rdev; 3439 3440 bio_put(bi); 3441 3442 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3443 conf = mddev_to_conf(mddev); 3444 rdev = (void*)raid_bi->bi_next; 3445 raid_bi->bi_next = NULL; 3446 3447 rdev_dec_pending(rdev, conf->mddev); 3448 3449 if (!error && uptodate) { 3450 bio_endio(raid_bi, 0); 3451 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3452 wake_up(&conf->wait_for_stripe); 3453 return; 3454 } 3455 3456 3457 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3458 3459 add_bio_to_retry(raid_bi, conf); 3460 } 3461 3462 static int bio_fits_rdev(struct bio *bi) 3463 { 3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3465 3466 if ((bi->bi_size>>9) > q->max_sectors) 3467 return 0; 3468 blk_recount_segments(q, bi); 3469 if (bi->bi_phys_segments > q->max_phys_segments) 3470 return 0; 3471 3472 if (q->merge_bvec_fn) 3473 /* it's too hard to apply the merge_bvec_fn at this stage, 3474 * just just give up 3475 */ 3476 return 0; 3477 3478 return 1; 3479 } 3480 3481 3482 static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3483 { 3484 mddev_t *mddev = q->queuedata; 3485 raid5_conf_t *conf = mddev_to_conf(mddev); 3486 unsigned int dd_idx; 3487 struct bio* align_bi; 3488 mdk_rdev_t *rdev; 3489 3490 if (!in_chunk_boundary(mddev, raid_bio)) { 3491 pr_debug("chunk_aligned_read : non aligned\n"); 3492 return 0; 3493 } 3494 /* 3495 * use bio_clone to make a copy of the bio 3496 */ 3497 align_bi = bio_clone(raid_bio, GFP_NOIO); 3498 if (!align_bi) 3499 return 0; 3500 /* 3501 * set bi_end_io to a new function, and set bi_private to the 3502 * original bio. 3503 */ 3504 align_bi->bi_end_io = raid5_align_endio; 3505 align_bi->bi_private = raid_bio; 3506 /* 3507 * compute position 3508 */ 3509 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 3510 0, 3511 &dd_idx, NULL); 3512 3513 rcu_read_lock(); 3514 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3515 if (rdev && test_bit(In_sync, &rdev->flags)) { 3516 atomic_inc(&rdev->nr_pending); 3517 rcu_read_unlock(); 3518 raid_bio->bi_next = (void*)rdev; 3519 align_bi->bi_bdev = rdev->bdev; 3520 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3521 align_bi->bi_sector += rdev->data_offset; 3522 3523 if (!bio_fits_rdev(align_bi)) { 3524 /* too big in some way */ 3525 bio_put(align_bi); 3526 rdev_dec_pending(rdev, mddev); 3527 return 0; 3528 } 3529 3530 spin_lock_irq(&conf->device_lock); 3531 wait_event_lock_irq(conf->wait_for_stripe, 3532 conf->quiesce == 0, 3533 conf->device_lock, /* nothing */); 3534 atomic_inc(&conf->active_aligned_reads); 3535 spin_unlock_irq(&conf->device_lock); 3536 3537 generic_make_request(align_bi); 3538 return 1; 3539 } else { 3540 rcu_read_unlock(); 3541 bio_put(align_bi); 3542 return 0; 3543 } 3544 } 3545 3546 /* __get_priority_stripe - get the next stripe to process 3547 * 3548 * Full stripe writes are allowed to pass preread active stripes up until 3549 * the bypass_threshold is exceeded. In general the bypass_count 3550 * increments when the handle_list is handled before the hold_list; however, it 3551 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3552 * stripe with in flight i/o. The bypass_count will be reset when the 3553 * head of the hold_list has changed, i.e. the head was promoted to the 3554 * handle_list. 3555 */ 3556 static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) 3557 { 3558 struct stripe_head *sh; 3559 3560 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3561 __func__, 3562 list_empty(&conf->handle_list) ? "empty" : "busy", 3563 list_empty(&conf->hold_list) ? "empty" : "busy", 3564 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3565 3566 if (!list_empty(&conf->handle_list)) { 3567 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3568 3569 if (list_empty(&conf->hold_list)) 3570 conf->bypass_count = 0; 3571 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3572 if (conf->hold_list.next == conf->last_hold) 3573 conf->bypass_count++; 3574 else { 3575 conf->last_hold = conf->hold_list.next; 3576 conf->bypass_count -= conf->bypass_threshold; 3577 if (conf->bypass_count < 0) 3578 conf->bypass_count = 0; 3579 } 3580 } 3581 } else if (!list_empty(&conf->hold_list) && 3582 ((conf->bypass_threshold && 3583 conf->bypass_count > conf->bypass_threshold) || 3584 atomic_read(&conf->pending_full_writes) == 0)) { 3585 sh = list_entry(conf->hold_list.next, 3586 typeof(*sh), lru); 3587 conf->bypass_count -= conf->bypass_threshold; 3588 if (conf->bypass_count < 0) 3589 conf->bypass_count = 0; 3590 } else 3591 return NULL; 3592 3593 list_del_init(&sh->lru); 3594 atomic_inc(&sh->count); 3595 BUG_ON(atomic_read(&sh->count) != 1); 3596 return sh; 3597 } 3598 3599 static int make_request(struct request_queue *q, struct bio * bi) 3600 { 3601 mddev_t *mddev = q->queuedata; 3602 raid5_conf_t *conf = mddev_to_conf(mddev); 3603 int dd_idx; 3604 sector_t new_sector; 3605 sector_t logical_sector, last_sector; 3606 struct stripe_head *sh; 3607 const int rw = bio_data_dir(bi); 3608 int cpu, remaining; 3609 3610 if (unlikely(bio_barrier(bi))) { 3611 bio_endio(bi, -EOPNOTSUPP); 3612 return 0; 3613 } 3614 3615 md_write_start(mddev, bi); 3616 3617 cpu = part_stat_lock(); 3618 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 3619 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 3620 bio_sectors(bi)); 3621 part_stat_unlock(); 3622 3623 if (rw == READ && 3624 mddev->reshape_position == MaxSector && 3625 chunk_aligned_read(q,bi)) 3626 return 0; 3627 3628 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3629 last_sector = bi->bi_sector + (bi->bi_size>>9); 3630 bi->bi_next = NULL; 3631 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3632 3633 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3634 DEFINE_WAIT(w); 3635 int disks, data_disks; 3636 int previous; 3637 3638 retry: 3639 previous = 0; 3640 disks = conf->raid_disks; 3641 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3642 if (unlikely(conf->reshape_progress != MaxSector)) { 3643 /* spinlock is needed as reshape_progress may be 3644 * 64bit on a 32bit platform, and so it might be 3645 * possible to see a half-updated value 3646 * Ofcourse reshape_progress could change after 3647 * the lock is dropped, so once we get a reference 3648 * to the stripe that we think it is, we will have 3649 * to check again. 3650 */ 3651 spin_lock_irq(&conf->device_lock); 3652 if (mddev->delta_disks < 0 3653 ? logical_sector < conf->reshape_progress 3654 : logical_sector >= conf->reshape_progress) { 3655 disks = conf->previous_raid_disks; 3656 previous = 1; 3657 } else { 3658 if (mddev->delta_disks < 0 3659 ? logical_sector < conf->reshape_safe 3660 : logical_sector >= conf->reshape_safe) { 3661 spin_unlock_irq(&conf->device_lock); 3662 schedule(); 3663 goto retry; 3664 } 3665 } 3666 spin_unlock_irq(&conf->device_lock); 3667 } 3668 data_disks = disks - conf->max_degraded; 3669 3670 new_sector = raid5_compute_sector(conf, logical_sector, 3671 previous, 3672 &dd_idx, NULL); 3673 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3674 (unsigned long long)new_sector, 3675 (unsigned long long)logical_sector); 3676 3677 sh = get_active_stripe(conf, new_sector, previous, 3678 (bi->bi_rw&RWA_MASK)); 3679 if (sh) { 3680 if (unlikely(previous)) { 3681 /* expansion might have moved on while waiting for a 3682 * stripe, so we must do the range check again. 3683 * Expansion could still move past after this 3684 * test, but as we are holding a reference to 3685 * 'sh', we know that if that happens, 3686 * STRIPE_EXPANDING will get set and the expansion 3687 * won't proceed until we finish with the stripe. 3688 */ 3689 int must_retry = 0; 3690 spin_lock_irq(&conf->device_lock); 3691 if (mddev->delta_disks < 0 3692 ? logical_sector >= conf->reshape_progress 3693 : logical_sector < conf->reshape_progress) 3694 /* mismatch, need to try again */ 3695 must_retry = 1; 3696 spin_unlock_irq(&conf->device_lock); 3697 if (must_retry) { 3698 release_stripe(sh); 3699 goto retry; 3700 } 3701 } 3702 /* FIXME what if we get a false positive because these 3703 * are being updated. 3704 */ 3705 if (logical_sector >= mddev->suspend_lo && 3706 logical_sector < mddev->suspend_hi) { 3707 release_stripe(sh); 3708 schedule(); 3709 goto retry; 3710 } 3711 3712 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3713 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3714 /* Stripe is busy expanding or 3715 * add failed due to overlap. Flush everything 3716 * and wait a while 3717 */ 3718 raid5_unplug_device(mddev->queue); 3719 release_stripe(sh); 3720 schedule(); 3721 goto retry; 3722 } 3723 finish_wait(&conf->wait_for_overlap, &w); 3724 set_bit(STRIPE_HANDLE, &sh->state); 3725 clear_bit(STRIPE_DELAYED, &sh->state); 3726 release_stripe(sh); 3727 } else { 3728 /* cannot get stripe for read-ahead, just give-up */ 3729 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3730 finish_wait(&conf->wait_for_overlap, &w); 3731 break; 3732 } 3733 3734 } 3735 spin_lock_irq(&conf->device_lock); 3736 remaining = raid5_dec_bi_phys_segments(bi); 3737 spin_unlock_irq(&conf->device_lock); 3738 if (remaining == 0) { 3739 3740 if ( rw == WRITE ) 3741 md_write_end(mddev); 3742 3743 bio_endio(bi, 0); 3744 } 3745 return 0; 3746 } 3747 3748 static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); 3749 3750 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3751 { 3752 /* reshaping is quite different to recovery/resync so it is 3753 * handled quite separately ... here. 3754 * 3755 * On each call to sync_request, we gather one chunk worth of 3756 * destination stripes and flag them as expanding. 3757 * Then we find all the source stripes and request reads. 3758 * As the reads complete, handle_stripe will copy the data 3759 * into the destination stripe and release that stripe. 3760 */ 3761 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3762 struct stripe_head *sh; 3763 sector_t first_sector, last_sector; 3764 int raid_disks = conf->previous_raid_disks; 3765 int data_disks = raid_disks - conf->max_degraded; 3766 int new_data_disks = conf->raid_disks - conf->max_degraded; 3767 int i; 3768 int dd_idx; 3769 sector_t writepos, readpos, safepos; 3770 sector_t stripe_addr; 3771 int reshape_sectors; 3772 struct list_head stripes; 3773 3774 if (sector_nr == 0) { 3775 /* If restarting in the middle, skip the initial sectors */ 3776 if (mddev->delta_disks < 0 && 3777 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 3778 sector_nr = raid5_size(mddev, 0, 0) 3779 - conf->reshape_progress; 3780 } else if (mddev->delta_disks > 0 && 3781 conf->reshape_progress > 0) 3782 sector_nr = conf->reshape_progress; 3783 sector_div(sector_nr, new_data_disks); 3784 if (sector_nr) { 3785 *skipped = 1; 3786 return sector_nr; 3787 } 3788 } 3789 3790 /* We need to process a full chunk at a time. 3791 * If old and new chunk sizes differ, we need to process the 3792 * largest of these 3793 */ 3794 if (mddev->new_chunk > mddev->chunk_size) 3795 reshape_sectors = mddev->new_chunk / 512; 3796 else 3797 reshape_sectors = mddev->chunk_size / 512; 3798 3799 /* we update the metadata when there is more than 3Meg 3800 * in the block range (that is rather arbitrary, should 3801 * probably be time based) or when the data about to be 3802 * copied would over-write the source of the data at 3803 * the front of the range. 3804 * i.e. one new_stripe along from reshape_progress new_maps 3805 * to after where reshape_safe old_maps to 3806 */ 3807 writepos = conf->reshape_progress; 3808 sector_div(writepos, new_data_disks); 3809 readpos = conf->reshape_progress; 3810 sector_div(readpos, data_disks); 3811 safepos = conf->reshape_safe; 3812 sector_div(safepos, data_disks); 3813 if (mddev->delta_disks < 0) { 3814 writepos -= reshape_sectors; 3815 readpos += reshape_sectors; 3816 safepos += reshape_sectors; 3817 } else { 3818 writepos += reshape_sectors; 3819 readpos -= reshape_sectors; 3820 safepos -= reshape_sectors; 3821 } 3822 3823 /* 'writepos' is the most advanced device address we might write. 3824 * 'readpos' is the least advanced device address we might read. 3825 * 'safepos' is the least address recorded in the metadata as having 3826 * been reshaped. 3827 * If 'readpos' is behind 'writepos', then there is no way that we can 3828 * ensure safety in the face of a crash - that must be done by userspace 3829 * making a backup of the data. So in that case there is no particular 3830 * rush to update metadata. 3831 * Otherwise if 'safepos' is behind 'writepos', then we really need to 3832 * update the metadata to advance 'safepos' to match 'readpos' so that 3833 * we can be safe in the event of a crash. 3834 * So we insist on updating metadata if safepos is behind writepos and 3835 * readpos is beyond writepos. 3836 * In any case, update the metadata every 10 seconds. 3837 * Maybe that number should be configurable, but I'm not sure it is 3838 * worth it.... maybe it could be a multiple of safemode_delay??? 3839 */ 3840 if ((mddev->delta_disks < 0 3841 ? (safepos > writepos && readpos < writepos) 3842 : (safepos < writepos && readpos > writepos)) || 3843 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 3844 /* Cannot proceed until we've updated the superblock... */ 3845 wait_event(conf->wait_for_overlap, 3846 atomic_read(&conf->reshape_stripes)==0); 3847 mddev->reshape_position = conf->reshape_progress; 3848 conf->reshape_checkpoint = jiffies; 3849 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3850 md_wakeup_thread(mddev->thread); 3851 wait_event(mddev->sb_wait, mddev->flags == 0 || 3852 kthread_should_stop()); 3853 spin_lock_irq(&conf->device_lock); 3854 conf->reshape_safe = mddev->reshape_position; 3855 spin_unlock_irq(&conf->device_lock); 3856 wake_up(&conf->wait_for_overlap); 3857 } 3858 3859 if (mddev->delta_disks < 0) { 3860 BUG_ON(conf->reshape_progress == 0); 3861 stripe_addr = writepos; 3862 BUG_ON((mddev->dev_sectors & 3863 ~((sector_t)reshape_sectors - 1)) 3864 - reshape_sectors - stripe_addr 3865 != sector_nr); 3866 } else { 3867 BUG_ON(writepos != sector_nr + reshape_sectors); 3868 stripe_addr = sector_nr; 3869 } 3870 INIT_LIST_HEAD(&stripes); 3871 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 3872 int j; 3873 int skipped = 0; 3874 sh = get_active_stripe(conf, stripe_addr+i, 0, 0); 3875 set_bit(STRIPE_EXPANDING, &sh->state); 3876 atomic_inc(&conf->reshape_stripes); 3877 /* If any of this stripe is beyond the end of the old 3878 * array, then we need to zero those blocks 3879 */ 3880 for (j=sh->disks; j--;) { 3881 sector_t s; 3882 if (j == sh->pd_idx) 3883 continue; 3884 if (conf->level == 6 && 3885 j == sh->qd_idx) 3886 continue; 3887 s = compute_blocknr(sh, j, 0); 3888 if (s < raid5_size(mddev, 0, 0)) { 3889 skipped = 1; 3890 continue; 3891 } 3892 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3893 set_bit(R5_Expanded, &sh->dev[j].flags); 3894 set_bit(R5_UPTODATE, &sh->dev[j].flags); 3895 } 3896 if (!skipped) { 3897 set_bit(STRIPE_EXPAND_READY, &sh->state); 3898 set_bit(STRIPE_HANDLE, &sh->state); 3899 } 3900 list_add(&sh->lru, &stripes); 3901 } 3902 spin_lock_irq(&conf->device_lock); 3903 if (mddev->delta_disks < 0) 3904 conf->reshape_progress -= reshape_sectors * new_data_disks; 3905 else 3906 conf->reshape_progress += reshape_sectors * new_data_disks; 3907 spin_unlock_irq(&conf->device_lock); 3908 /* Ok, those stripe are ready. We can start scheduling 3909 * reads on the source stripes. 3910 * The source stripes are determined by mapping the first and last 3911 * block on the destination stripes. 3912 */ 3913 first_sector = 3914 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 3915 1, &dd_idx, NULL); 3916 last_sector = 3917 raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) 3918 *(new_data_disks) - 1), 3919 1, &dd_idx, NULL); 3920 if (last_sector >= mddev->dev_sectors) 3921 last_sector = mddev->dev_sectors - 1; 3922 while (first_sector <= last_sector) { 3923 sh = get_active_stripe(conf, first_sector, 1, 0); 3924 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3925 set_bit(STRIPE_HANDLE, &sh->state); 3926 release_stripe(sh); 3927 first_sector += STRIPE_SECTORS; 3928 } 3929 /* Now that the sources are clearly marked, we can release 3930 * the destination stripes 3931 */ 3932 while (!list_empty(&stripes)) { 3933 sh = list_entry(stripes.next, struct stripe_head, lru); 3934 list_del_init(&sh->lru); 3935 release_stripe(sh); 3936 } 3937 /* If this takes us to the resync_max point where we have to pause, 3938 * then we need to write out the superblock. 3939 */ 3940 sector_nr += reshape_sectors; 3941 if (sector_nr >= mddev->resync_max) { 3942 /* Cannot proceed until we've updated the superblock... */ 3943 wait_event(conf->wait_for_overlap, 3944 atomic_read(&conf->reshape_stripes) == 0); 3945 mddev->reshape_position = conf->reshape_progress; 3946 conf->reshape_checkpoint = jiffies; 3947 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3948 md_wakeup_thread(mddev->thread); 3949 wait_event(mddev->sb_wait, 3950 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 3951 || kthread_should_stop()); 3952 spin_lock_irq(&conf->device_lock); 3953 conf->reshape_safe = mddev->reshape_position; 3954 spin_unlock_irq(&conf->device_lock); 3955 wake_up(&conf->wait_for_overlap); 3956 } 3957 return reshape_sectors; 3958 } 3959 3960 /* FIXME go_faster isn't used */ 3961 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 3962 { 3963 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3964 struct stripe_head *sh; 3965 sector_t max_sector = mddev->dev_sectors; 3966 int sync_blocks; 3967 int still_degraded = 0; 3968 int i; 3969 3970 if (sector_nr >= max_sector) { 3971 /* just being told to finish up .. nothing much to do */ 3972 unplug_slaves(mddev); 3973 3974 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3975 end_reshape(conf); 3976 return 0; 3977 } 3978 3979 if (mddev->curr_resync < max_sector) /* aborted */ 3980 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 3981 &sync_blocks, 1); 3982 else /* completed sync */ 3983 conf->fullsync = 0; 3984 bitmap_close_sync(mddev->bitmap); 3985 3986 return 0; 3987 } 3988 3989 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3990 return reshape_request(mddev, sector_nr, skipped); 3991 3992 /* No need to check resync_max as we never do more than one 3993 * stripe, and as resync_max will always be on a chunk boundary, 3994 * if the check in md_do_sync didn't fire, there is no chance 3995 * of overstepping resync_max here 3996 */ 3997 3998 /* if there is too many failed drives and we are trying 3999 * to resync, then assert that we are finished, because there is 4000 * nothing we can do. 4001 */ 4002 if (mddev->degraded >= conf->max_degraded && 4003 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4004 sector_t rv = mddev->dev_sectors - sector_nr; 4005 *skipped = 1; 4006 return rv; 4007 } 4008 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 4009 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 4010 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 4011 /* we can skip this block, and probably more */ 4012 sync_blocks /= STRIPE_SECTORS; 4013 *skipped = 1; 4014 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 4015 } 4016 4017 4018 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 4019 4020 sh = get_active_stripe(conf, sector_nr, 0, 1); 4021 if (sh == NULL) { 4022 sh = get_active_stripe(conf, sector_nr, 0, 0); 4023 /* make sure we don't swamp the stripe cache if someone else 4024 * is trying to get access 4025 */ 4026 schedule_timeout_uninterruptible(1); 4027 } 4028 /* Need to check if array will still be degraded after recovery/resync 4029 * We don't need to check the 'failed' flag as when that gets set, 4030 * recovery aborts. 4031 */ 4032 for (i=0; i<mddev->raid_disks; i++) 4033 if (conf->disks[i].rdev == NULL) 4034 still_degraded = 1; 4035 4036 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 4037 4038 spin_lock(&sh->lock); 4039 set_bit(STRIPE_SYNCING, &sh->state); 4040 clear_bit(STRIPE_INSYNC, &sh->state); 4041 spin_unlock(&sh->lock); 4042 4043 /* wait for any blocked device to be handled */ 4044 while(unlikely(!handle_stripe(sh, NULL))) 4045 ; 4046 release_stripe(sh); 4047 4048 return STRIPE_SECTORS; 4049 } 4050 4051 static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 4052 { 4053 /* We may not be able to submit a whole bio at once as there 4054 * may not be enough stripe_heads available. 4055 * We cannot pre-allocate enough stripe_heads as we may need 4056 * more than exist in the cache (if we allow ever large chunks). 4057 * So we do one stripe head at a time and record in 4058 * ->bi_hw_segments how many have been done. 4059 * 4060 * We *know* that this entire raid_bio is in one chunk, so 4061 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 4062 */ 4063 struct stripe_head *sh; 4064 int dd_idx; 4065 sector_t sector, logical_sector, last_sector; 4066 int scnt = 0; 4067 int remaining; 4068 int handled = 0; 4069 4070 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4071 sector = raid5_compute_sector(conf, logical_sector, 4072 0, &dd_idx, NULL); 4073 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 4074 4075 for (; logical_sector < last_sector; 4076 logical_sector += STRIPE_SECTORS, 4077 sector += STRIPE_SECTORS, 4078 scnt++) { 4079 4080 if (scnt < raid5_bi_hw_segments(raid_bio)) 4081 /* already done this stripe */ 4082 continue; 4083 4084 sh = get_active_stripe(conf, sector, 0, 1); 4085 4086 if (!sh) { 4087 /* failed to get a stripe - must wait */ 4088 raid5_set_bi_hw_segments(raid_bio, scnt); 4089 conf->retry_read_aligned = raid_bio; 4090 return handled; 4091 } 4092 4093 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 4094 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 4095 release_stripe(sh); 4096 raid5_set_bi_hw_segments(raid_bio, scnt); 4097 conf->retry_read_aligned = raid_bio; 4098 return handled; 4099 } 4100 4101 handle_stripe(sh, NULL); 4102 release_stripe(sh); 4103 handled++; 4104 } 4105 spin_lock_irq(&conf->device_lock); 4106 remaining = raid5_dec_bi_phys_segments(raid_bio); 4107 spin_unlock_irq(&conf->device_lock); 4108 if (remaining == 0) 4109 bio_endio(raid_bio, 0); 4110 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4111 wake_up(&conf->wait_for_stripe); 4112 return handled; 4113 } 4114 4115 4116 4117 /* 4118 * This is our raid5 kernel thread. 4119 * 4120 * We scan the hash table for stripes which can be handled now. 4121 * During the scan, completed stripes are saved for us by the interrupt 4122 * handler, so that they will not have to wait for our next wakeup. 4123 */ 4124 static void raid5d(mddev_t *mddev) 4125 { 4126 struct stripe_head *sh; 4127 raid5_conf_t *conf = mddev_to_conf(mddev); 4128 int handled; 4129 4130 pr_debug("+++ raid5d active\n"); 4131 4132 md_check_recovery(mddev); 4133 4134 handled = 0; 4135 spin_lock_irq(&conf->device_lock); 4136 while (1) { 4137 struct bio *bio; 4138 4139 if (conf->seq_flush != conf->seq_write) { 4140 int seq = conf->seq_flush; 4141 spin_unlock_irq(&conf->device_lock); 4142 bitmap_unplug(mddev->bitmap); 4143 spin_lock_irq(&conf->device_lock); 4144 conf->seq_write = seq; 4145 activate_bit_delay(conf); 4146 } 4147 4148 while ((bio = remove_bio_from_retry(conf))) { 4149 int ok; 4150 spin_unlock_irq(&conf->device_lock); 4151 ok = retry_aligned_read(conf, bio); 4152 spin_lock_irq(&conf->device_lock); 4153 if (!ok) 4154 break; 4155 handled++; 4156 } 4157 4158 sh = __get_priority_stripe(conf); 4159 4160 if (!sh) 4161 break; 4162 spin_unlock_irq(&conf->device_lock); 4163 4164 handled++; 4165 handle_stripe(sh, conf->spare_page); 4166 release_stripe(sh); 4167 4168 spin_lock_irq(&conf->device_lock); 4169 } 4170 pr_debug("%d stripes handled\n", handled); 4171 4172 spin_unlock_irq(&conf->device_lock); 4173 4174 async_tx_issue_pending_all(); 4175 unplug_slaves(mddev); 4176 4177 pr_debug("--- raid5d inactive\n"); 4178 } 4179 4180 static ssize_t 4181 raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4182 { 4183 raid5_conf_t *conf = mddev_to_conf(mddev); 4184 if (conf) 4185 return sprintf(page, "%d\n", conf->max_nr_stripes); 4186 else 4187 return 0; 4188 } 4189 4190 static ssize_t 4191 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4192 { 4193 raid5_conf_t *conf = mddev_to_conf(mddev); 4194 unsigned long new; 4195 int err; 4196 4197 if (len >= PAGE_SIZE) 4198 return -EINVAL; 4199 if (!conf) 4200 return -ENODEV; 4201 4202 if (strict_strtoul(page, 10, &new)) 4203 return -EINVAL; 4204 if (new <= 16 || new > 32768) 4205 return -EINVAL; 4206 while (new < conf->max_nr_stripes) { 4207 if (drop_one_stripe(conf)) 4208 conf->max_nr_stripes--; 4209 else 4210 break; 4211 } 4212 err = md_allow_write(mddev); 4213 if (err) 4214 return err; 4215 while (new > conf->max_nr_stripes) { 4216 if (grow_one_stripe(conf)) 4217 conf->max_nr_stripes++; 4218 else break; 4219 } 4220 return len; 4221 } 4222 4223 static struct md_sysfs_entry 4224 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4225 raid5_show_stripe_cache_size, 4226 raid5_store_stripe_cache_size); 4227 4228 static ssize_t 4229 raid5_show_preread_threshold(mddev_t *mddev, char *page) 4230 { 4231 raid5_conf_t *conf = mddev_to_conf(mddev); 4232 if (conf) 4233 return sprintf(page, "%d\n", conf->bypass_threshold); 4234 else 4235 return 0; 4236 } 4237 4238 static ssize_t 4239 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4240 { 4241 raid5_conf_t *conf = mddev_to_conf(mddev); 4242 unsigned long new; 4243 if (len >= PAGE_SIZE) 4244 return -EINVAL; 4245 if (!conf) 4246 return -ENODEV; 4247 4248 if (strict_strtoul(page, 10, &new)) 4249 return -EINVAL; 4250 if (new > conf->max_nr_stripes) 4251 return -EINVAL; 4252 conf->bypass_threshold = new; 4253 return len; 4254 } 4255 4256 static struct md_sysfs_entry 4257 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4258 S_IRUGO | S_IWUSR, 4259 raid5_show_preread_threshold, 4260 raid5_store_preread_threshold); 4261 4262 static ssize_t 4263 stripe_cache_active_show(mddev_t *mddev, char *page) 4264 { 4265 raid5_conf_t *conf = mddev_to_conf(mddev); 4266 if (conf) 4267 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4268 else 4269 return 0; 4270 } 4271 4272 static struct md_sysfs_entry 4273 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4274 4275 static struct attribute *raid5_attrs[] = { 4276 &raid5_stripecache_size.attr, 4277 &raid5_stripecache_active.attr, 4278 &raid5_preread_bypass_threshold.attr, 4279 NULL, 4280 }; 4281 static struct attribute_group raid5_attrs_group = { 4282 .name = NULL, 4283 .attrs = raid5_attrs, 4284 }; 4285 4286 static sector_t 4287 raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) 4288 { 4289 raid5_conf_t *conf = mddev_to_conf(mddev); 4290 4291 if (!sectors) 4292 sectors = mddev->dev_sectors; 4293 if (!raid_disks) { 4294 /* size is defined by the smallest of previous and new size */ 4295 if (conf->raid_disks < conf->previous_raid_disks) 4296 raid_disks = conf->raid_disks; 4297 else 4298 raid_disks = conf->previous_raid_disks; 4299 } 4300 4301 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4302 sectors &= ~((sector_t)mddev->new_chunk/512 - 1); 4303 return sectors * (raid_disks - conf->max_degraded); 4304 } 4305 4306 static raid5_conf_t *setup_conf(mddev_t *mddev) 4307 { 4308 raid5_conf_t *conf; 4309 int raid_disk, memory; 4310 mdk_rdev_t *rdev; 4311 struct disk_info *disk; 4312 4313 if (mddev->new_level != 5 4314 && mddev->new_level != 4 4315 && mddev->new_level != 6) { 4316 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4317 mdname(mddev), mddev->new_level); 4318 return ERR_PTR(-EIO); 4319 } 4320 if ((mddev->new_level == 5 4321 && !algorithm_valid_raid5(mddev->new_layout)) || 4322 (mddev->new_level == 6 4323 && !algorithm_valid_raid6(mddev->new_layout))) { 4324 printk(KERN_ERR "raid5: %s: layout %d not supported\n", 4325 mdname(mddev), mddev->new_layout); 4326 return ERR_PTR(-EIO); 4327 } 4328 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 4329 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4330 mdname(mddev), mddev->raid_disks); 4331 return ERR_PTR(-EINVAL); 4332 } 4333 4334 if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) { 4335 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4336 mddev->new_chunk, mdname(mddev)); 4337 return ERR_PTR(-EINVAL); 4338 } 4339 4340 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); 4341 if (conf == NULL) 4342 goto abort; 4343 4344 conf->raid_disks = mddev->raid_disks; 4345 if (mddev->reshape_position == MaxSector) 4346 conf->previous_raid_disks = mddev->raid_disks; 4347 else 4348 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4349 4350 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4351 GFP_KERNEL); 4352 if (!conf->disks) 4353 goto abort; 4354 4355 conf->mddev = mddev; 4356 4357 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4358 goto abort; 4359 4360 if (mddev->new_level == 6) { 4361 conf->spare_page = alloc_page(GFP_KERNEL); 4362 if (!conf->spare_page) 4363 goto abort; 4364 } 4365 spin_lock_init(&conf->device_lock); 4366 init_waitqueue_head(&conf->wait_for_stripe); 4367 init_waitqueue_head(&conf->wait_for_overlap); 4368 INIT_LIST_HEAD(&conf->handle_list); 4369 INIT_LIST_HEAD(&conf->hold_list); 4370 INIT_LIST_HEAD(&conf->delayed_list); 4371 INIT_LIST_HEAD(&conf->bitmap_list); 4372 INIT_LIST_HEAD(&conf->inactive_list); 4373 atomic_set(&conf->active_stripes, 0); 4374 atomic_set(&conf->preread_active_stripes, 0); 4375 atomic_set(&conf->active_aligned_reads, 0); 4376 conf->bypass_threshold = BYPASS_THRESHOLD; 4377 4378 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4379 4380 list_for_each_entry(rdev, &mddev->disks, same_set) { 4381 raid_disk = rdev->raid_disk; 4382 if (raid_disk >= conf->raid_disks 4383 || raid_disk < 0) 4384 continue; 4385 disk = conf->disks + raid_disk; 4386 4387 disk->rdev = rdev; 4388 4389 if (test_bit(In_sync, &rdev->flags)) { 4390 char b[BDEVNAME_SIZE]; 4391 printk(KERN_INFO "raid5: device %s operational as raid" 4392 " disk %d\n", bdevname(rdev->bdev,b), 4393 raid_disk); 4394 } else 4395 /* Cannot rely on bitmap to complete recovery */ 4396 conf->fullsync = 1; 4397 } 4398 4399 conf->chunk_size = mddev->new_chunk; 4400 conf->level = mddev->new_level; 4401 if (conf->level == 6) 4402 conf->max_degraded = 2; 4403 else 4404 conf->max_degraded = 1; 4405 conf->algorithm = mddev->new_layout; 4406 conf->max_nr_stripes = NR_STRIPES; 4407 conf->reshape_progress = mddev->reshape_position; 4408 if (conf->reshape_progress != MaxSector) { 4409 conf->prev_chunk = mddev->chunk_size; 4410 conf->prev_algo = mddev->layout; 4411 } 4412 4413 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4414 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4415 if (grow_stripes(conf, conf->max_nr_stripes)) { 4416 printk(KERN_ERR 4417 "raid5: couldn't allocate %dkB for buffers\n", memory); 4418 goto abort; 4419 } else 4420 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4421 memory, mdname(mddev)); 4422 4423 conf->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 4424 if (!conf->thread) { 4425 printk(KERN_ERR 4426 "raid5: couldn't allocate thread for %s\n", 4427 mdname(mddev)); 4428 goto abort; 4429 } 4430 4431 return conf; 4432 4433 abort: 4434 if (conf) { 4435 shrink_stripes(conf); 4436 safe_put_page(conf->spare_page); 4437 kfree(conf->disks); 4438 kfree(conf->stripe_hashtbl); 4439 kfree(conf); 4440 return ERR_PTR(-EIO); 4441 } else 4442 return ERR_PTR(-ENOMEM); 4443 } 4444 4445 static int run(mddev_t *mddev) 4446 { 4447 raid5_conf_t *conf; 4448 int working_disks = 0; 4449 mdk_rdev_t *rdev; 4450 4451 if (mddev->reshape_position != MaxSector) { 4452 /* Check that we can continue the reshape. 4453 * Currently only disks can change, it must 4454 * increase, and we must be past the point where 4455 * a stripe over-writes itself 4456 */ 4457 sector_t here_new, here_old; 4458 int old_disks; 4459 int max_degraded = (mddev->level == 6 ? 2 : 1); 4460 4461 if (mddev->new_level != mddev->level) { 4462 printk(KERN_ERR "raid5: %s: unsupported reshape " 4463 "required - aborting.\n", 4464 mdname(mddev)); 4465 return -EINVAL; 4466 } 4467 old_disks = mddev->raid_disks - mddev->delta_disks; 4468 /* reshape_position must be on a new-stripe boundary, and one 4469 * further up in new geometry must map after here in old 4470 * geometry. 4471 */ 4472 here_new = mddev->reshape_position; 4473 if (sector_div(here_new, (mddev->new_chunk>>9)* 4474 (mddev->raid_disks - max_degraded))) { 4475 printk(KERN_ERR "raid5: reshape_position not " 4476 "on a stripe boundary\n"); 4477 return -EINVAL; 4478 } 4479 /* here_new is the stripe we will write to */ 4480 here_old = mddev->reshape_position; 4481 sector_div(here_old, (mddev->chunk_size>>9)* 4482 (old_disks-max_degraded)); 4483 /* here_old is the first stripe that we might need to read 4484 * from */ 4485 if (here_new >= here_old) { 4486 /* Reading from the same stripe as writing to - bad */ 4487 printk(KERN_ERR "raid5: reshape_position too early for " 4488 "auto-recovery - aborting.\n"); 4489 return -EINVAL; 4490 } 4491 printk(KERN_INFO "raid5: reshape will continue\n"); 4492 /* OK, we should be able to continue; */ 4493 } else { 4494 BUG_ON(mddev->level != mddev->new_level); 4495 BUG_ON(mddev->layout != mddev->new_layout); 4496 BUG_ON(mddev->chunk_size != mddev->new_chunk); 4497 BUG_ON(mddev->delta_disks != 0); 4498 } 4499 4500 if (mddev->private == NULL) 4501 conf = setup_conf(mddev); 4502 else 4503 conf = mddev->private; 4504 4505 if (IS_ERR(conf)) 4506 return PTR_ERR(conf); 4507 4508 mddev->thread = conf->thread; 4509 conf->thread = NULL; 4510 mddev->private = conf; 4511 4512 /* 4513 * 0 for a fully functional array, 1 or 2 for a degraded array. 4514 */ 4515 list_for_each_entry(rdev, &mddev->disks, same_set) 4516 if (rdev->raid_disk >= 0 && 4517 test_bit(In_sync, &rdev->flags)) 4518 working_disks++; 4519 4520 mddev->degraded = conf->raid_disks - working_disks; 4521 4522 if (mddev->degraded > conf->max_degraded) { 4523 printk(KERN_ERR "raid5: not enough operational devices for %s" 4524 " (%d/%d failed)\n", 4525 mdname(mddev), mddev->degraded, conf->raid_disks); 4526 goto abort; 4527 } 4528 4529 /* device size must be a multiple of chunk size */ 4530 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); 4531 mddev->resync_max_sectors = mddev->dev_sectors; 4532 4533 if (mddev->degraded > 0 && 4534 mddev->recovery_cp != MaxSector) { 4535 if (mddev->ok_start_degraded) 4536 printk(KERN_WARNING 4537 "raid5: starting dirty degraded array: %s" 4538 "- data corruption possible.\n", 4539 mdname(mddev)); 4540 else { 4541 printk(KERN_ERR 4542 "raid5: cannot start dirty degraded array for %s\n", 4543 mdname(mddev)); 4544 goto abort; 4545 } 4546 } 4547 4548 if (mddev->degraded == 0) 4549 printk("raid5: raid level %d set %s active with %d out of %d" 4550 " devices, algorithm %d\n", conf->level, mdname(mddev), 4551 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4552 mddev->new_layout); 4553 else 4554 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 4555 " out of %d devices, algorithm %d\n", conf->level, 4556 mdname(mddev), mddev->raid_disks - mddev->degraded, 4557 mddev->raid_disks, mddev->new_layout); 4558 4559 print_raid5_conf(conf); 4560 4561 if (conf->reshape_progress != MaxSector) { 4562 printk("...ok start reshape thread\n"); 4563 conf->reshape_safe = conf->reshape_progress; 4564 atomic_set(&conf->reshape_stripes, 0); 4565 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4566 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4567 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4568 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4569 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4570 "%s_reshape"); 4571 } 4572 4573 /* read-ahead size must cover two whole stripes, which is 4574 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4575 */ 4576 { 4577 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4578 int stripe = data_disks * 4579 (mddev->chunk_size / PAGE_SIZE); 4580 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4581 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4582 } 4583 4584 /* Ok, everything is just fine now */ 4585 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4586 printk(KERN_WARNING 4587 "raid5: failed to create sysfs attributes for %s\n", 4588 mdname(mddev)); 4589 4590 mddev->queue->queue_lock = &conf->device_lock; 4591 4592 mddev->queue->unplug_fn = raid5_unplug_device; 4593 mddev->queue->backing_dev_info.congested_data = mddev; 4594 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4595 4596 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4597 4598 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4599 4600 return 0; 4601 abort: 4602 md_unregister_thread(mddev->thread); 4603 mddev->thread = NULL; 4604 if (conf) { 4605 shrink_stripes(conf); 4606 print_raid5_conf(conf); 4607 safe_put_page(conf->spare_page); 4608 kfree(conf->disks); 4609 kfree(conf->stripe_hashtbl); 4610 kfree(conf); 4611 } 4612 mddev->private = NULL; 4613 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4614 return -EIO; 4615 } 4616 4617 4618 4619 static int stop(mddev_t *mddev) 4620 { 4621 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4622 4623 md_unregister_thread(mddev->thread); 4624 mddev->thread = NULL; 4625 shrink_stripes(conf); 4626 kfree(conf->stripe_hashtbl); 4627 mddev->queue->backing_dev_info.congested_fn = NULL; 4628 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4629 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4630 kfree(conf->disks); 4631 kfree(conf); 4632 mddev->private = NULL; 4633 return 0; 4634 } 4635 4636 #ifdef DEBUG 4637 static void print_sh(struct seq_file *seq, struct stripe_head *sh) 4638 { 4639 int i; 4640 4641 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 4642 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 4643 seq_printf(seq, "sh %llu, count %d.\n", 4644 (unsigned long long)sh->sector, atomic_read(&sh->count)); 4645 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 4646 for (i = 0; i < sh->disks; i++) { 4647 seq_printf(seq, "(cache%d: %p %ld) ", 4648 i, sh->dev[i].page, sh->dev[i].flags); 4649 } 4650 seq_printf(seq, "\n"); 4651 } 4652 4653 static void printall(struct seq_file *seq, raid5_conf_t *conf) 4654 { 4655 struct stripe_head *sh; 4656 struct hlist_node *hn; 4657 int i; 4658 4659 spin_lock_irq(&conf->device_lock); 4660 for (i = 0; i < NR_HASH; i++) { 4661 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 4662 if (sh->raid_conf != conf) 4663 continue; 4664 print_sh(seq, sh); 4665 } 4666 } 4667 spin_unlock_irq(&conf->device_lock); 4668 } 4669 #endif 4670 4671 static void status(struct seq_file *seq, mddev_t *mddev) 4672 { 4673 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4674 int i; 4675 4676 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4677 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4678 for (i = 0; i < conf->raid_disks; i++) 4679 seq_printf (seq, "%s", 4680 conf->disks[i].rdev && 4681 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4682 seq_printf (seq, "]"); 4683 #ifdef DEBUG 4684 seq_printf (seq, "\n"); 4685 printall(seq, conf); 4686 #endif 4687 } 4688 4689 static void print_raid5_conf (raid5_conf_t *conf) 4690 { 4691 int i; 4692 struct disk_info *tmp; 4693 4694 printk("RAID5 conf printout:\n"); 4695 if (!conf) { 4696 printk("(conf==NULL)\n"); 4697 return; 4698 } 4699 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 4700 conf->raid_disks - conf->mddev->degraded); 4701 4702 for (i = 0; i < conf->raid_disks; i++) { 4703 char b[BDEVNAME_SIZE]; 4704 tmp = conf->disks + i; 4705 if (tmp->rdev) 4706 printk(" disk %d, o:%d, dev:%s\n", 4707 i, !test_bit(Faulty, &tmp->rdev->flags), 4708 bdevname(tmp->rdev->bdev,b)); 4709 } 4710 } 4711 4712 static int raid5_spare_active(mddev_t *mddev) 4713 { 4714 int i; 4715 raid5_conf_t *conf = mddev->private; 4716 struct disk_info *tmp; 4717 4718 for (i = 0; i < conf->raid_disks; i++) { 4719 tmp = conf->disks + i; 4720 if (tmp->rdev 4721 && !test_bit(Faulty, &tmp->rdev->flags) 4722 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 4723 unsigned long flags; 4724 spin_lock_irqsave(&conf->device_lock, flags); 4725 mddev->degraded--; 4726 spin_unlock_irqrestore(&conf->device_lock, flags); 4727 } 4728 } 4729 print_raid5_conf(conf); 4730 return 0; 4731 } 4732 4733 static int raid5_remove_disk(mddev_t *mddev, int number) 4734 { 4735 raid5_conf_t *conf = mddev->private; 4736 int err = 0; 4737 mdk_rdev_t *rdev; 4738 struct disk_info *p = conf->disks + number; 4739 4740 print_raid5_conf(conf); 4741 rdev = p->rdev; 4742 if (rdev) { 4743 if (number >= conf->raid_disks && 4744 conf->reshape_progress == MaxSector) 4745 clear_bit(In_sync, &rdev->flags); 4746 4747 if (test_bit(In_sync, &rdev->flags) || 4748 atomic_read(&rdev->nr_pending)) { 4749 err = -EBUSY; 4750 goto abort; 4751 } 4752 /* Only remove non-faulty devices if recovery 4753 * isn't possible. 4754 */ 4755 if (!test_bit(Faulty, &rdev->flags) && 4756 mddev->degraded <= conf->max_degraded && 4757 number < conf->raid_disks) { 4758 err = -EBUSY; 4759 goto abort; 4760 } 4761 p->rdev = NULL; 4762 synchronize_rcu(); 4763 if (atomic_read(&rdev->nr_pending)) { 4764 /* lost the race, try later */ 4765 err = -EBUSY; 4766 p->rdev = rdev; 4767 } 4768 } 4769 abort: 4770 4771 print_raid5_conf(conf); 4772 return err; 4773 } 4774 4775 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4776 { 4777 raid5_conf_t *conf = mddev->private; 4778 int err = -EEXIST; 4779 int disk; 4780 struct disk_info *p; 4781 int first = 0; 4782 int last = conf->raid_disks - 1; 4783 4784 if (mddev->degraded > conf->max_degraded) 4785 /* no point adding a device */ 4786 return -EINVAL; 4787 4788 if (rdev->raid_disk >= 0) 4789 first = last = rdev->raid_disk; 4790 4791 /* 4792 * find the disk ... but prefer rdev->saved_raid_disk 4793 * if possible. 4794 */ 4795 if (rdev->saved_raid_disk >= 0 && 4796 rdev->saved_raid_disk >= first && 4797 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4798 disk = rdev->saved_raid_disk; 4799 else 4800 disk = first; 4801 for ( ; disk <= last ; disk++) 4802 if ((p=conf->disks + disk)->rdev == NULL) { 4803 clear_bit(In_sync, &rdev->flags); 4804 rdev->raid_disk = disk; 4805 err = 0; 4806 if (rdev->saved_raid_disk != disk) 4807 conf->fullsync = 1; 4808 rcu_assign_pointer(p->rdev, rdev); 4809 break; 4810 } 4811 print_raid5_conf(conf); 4812 return err; 4813 } 4814 4815 static int raid5_resize(mddev_t *mddev, sector_t sectors) 4816 { 4817 /* no resync is happening, and there is enough space 4818 * on all devices, so we can resize. 4819 * We need to make sure resync covers any new space. 4820 * If the array is shrinking we should possibly wait until 4821 * any io in the removed space completes, but it hardly seems 4822 * worth it. 4823 */ 4824 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4825 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 4826 mddev->raid_disks)); 4827 if (mddev->array_sectors > 4828 raid5_size(mddev, sectors, mddev->raid_disks)) 4829 return -EINVAL; 4830 set_capacity(mddev->gendisk, mddev->array_sectors); 4831 mddev->changed = 1; 4832 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { 4833 mddev->recovery_cp = mddev->dev_sectors; 4834 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4835 } 4836 mddev->dev_sectors = sectors; 4837 mddev->resync_max_sectors = sectors; 4838 return 0; 4839 } 4840 4841 static int raid5_check_reshape(mddev_t *mddev) 4842 { 4843 raid5_conf_t *conf = mddev_to_conf(mddev); 4844 4845 if (mddev->delta_disks == 0 && 4846 mddev->new_layout == mddev->layout && 4847 mddev->new_chunk == mddev->chunk_size) 4848 return -EINVAL; /* nothing to do */ 4849 if (mddev->bitmap) 4850 /* Cannot grow a bitmap yet */ 4851 return -EBUSY; 4852 if (mddev->degraded > conf->max_degraded) 4853 return -EINVAL; 4854 if (mddev->delta_disks < 0) { 4855 /* We might be able to shrink, but the devices must 4856 * be made bigger first. 4857 * For raid6, 4 is the minimum size. 4858 * Otherwise 2 is the minimum 4859 */ 4860 int min = 2; 4861 if (mddev->level == 6) 4862 min = 4; 4863 if (mddev->raid_disks + mddev->delta_disks < min) 4864 return -EINVAL; 4865 } 4866 4867 /* Can only proceed if there are plenty of stripe_heads. 4868 * We need a minimum of one full stripe,, and for sensible progress 4869 * it is best to have about 4 times that. 4870 * If we require 4 times, then the default 256 4K stripe_heads will 4871 * allow for chunk sizes up to 256K, which is probably OK. 4872 * If the chunk size is greater, user-space should request more 4873 * stripe_heads first. 4874 */ 4875 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4876 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4877 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4878 (max(mddev->chunk_size, mddev->new_chunk) 4879 / STRIPE_SIZE)*4); 4880 return -ENOSPC; 4881 } 4882 4883 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4884 } 4885 4886 static int raid5_start_reshape(mddev_t *mddev) 4887 { 4888 raid5_conf_t *conf = mddev_to_conf(mddev); 4889 mdk_rdev_t *rdev; 4890 int spares = 0; 4891 int added_devices = 0; 4892 unsigned long flags; 4893 4894 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4895 return -EBUSY; 4896 4897 list_for_each_entry(rdev, &mddev->disks, same_set) 4898 if (rdev->raid_disk < 0 && 4899 !test_bit(Faulty, &rdev->flags)) 4900 spares++; 4901 4902 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 4903 /* Not enough devices even to make a degraded array 4904 * of that size 4905 */ 4906 return -EINVAL; 4907 4908 /* Refuse to reduce size of the array. Any reductions in 4909 * array size must be through explicit setting of array_size 4910 * attribute. 4911 */ 4912 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 4913 < mddev->array_sectors) { 4914 printk(KERN_ERR "md: %s: array size must be reduced " 4915 "before number of disks\n", mdname(mddev)); 4916 return -EINVAL; 4917 } 4918 4919 atomic_set(&conf->reshape_stripes, 0); 4920 spin_lock_irq(&conf->device_lock); 4921 conf->previous_raid_disks = conf->raid_disks; 4922 conf->raid_disks += mddev->delta_disks; 4923 conf->prev_chunk = conf->chunk_size; 4924 conf->chunk_size = mddev->new_chunk; 4925 conf->prev_algo = conf->algorithm; 4926 conf->algorithm = mddev->new_layout; 4927 if (mddev->delta_disks < 0) 4928 conf->reshape_progress = raid5_size(mddev, 0, 0); 4929 else 4930 conf->reshape_progress = 0; 4931 conf->reshape_safe = conf->reshape_progress; 4932 conf->generation++; 4933 spin_unlock_irq(&conf->device_lock); 4934 4935 /* Add some new drives, as many as will fit. 4936 * We know there are enough to make the newly sized array work. 4937 */ 4938 list_for_each_entry(rdev, &mddev->disks, same_set) 4939 if (rdev->raid_disk < 0 && 4940 !test_bit(Faulty, &rdev->flags)) { 4941 if (raid5_add_disk(mddev, rdev) == 0) { 4942 char nm[20]; 4943 set_bit(In_sync, &rdev->flags); 4944 added_devices++; 4945 rdev->recovery_offset = 0; 4946 sprintf(nm, "rd%d", rdev->raid_disk); 4947 if (sysfs_create_link(&mddev->kobj, 4948 &rdev->kobj, nm)) 4949 printk(KERN_WARNING 4950 "raid5: failed to create " 4951 " link %s for %s\n", 4952 nm, mdname(mddev)); 4953 } else 4954 break; 4955 } 4956 4957 if (mddev->delta_disks > 0) { 4958 spin_lock_irqsave(&conf->device_lock, flags); 4959 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 4960 - added_devices; 4961 spin_unlock_irqrestore(&conf->device_lock, flags); 4962 } 4963 mddev->raid_disks = conf->raid_disks; 4964 mddev->reshape_position = 0; 4965 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4966 4967 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4968 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4969 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4970 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4971 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4972 "%s_reshape"); 4973 if (!mddev->sync_thread) { 4974 mddev->recovery = 0; 4975 spin_lock_irq(&conf->device_lock); 4976 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 4977 conf->reshape_progress = MaxSector; 4978 spin_unlock_irq(&conf->device_lock); 4979 return -EAGAIN; 4980 } 4981 conf->reshape_checkpoint = jiffies; 4982 md_wakeup_thread(mddev->sync_thread); 4983 md_new_event(mddev); 4984 return 0; 4985 } 4986 4987 /* This is called from the reshape thread and should make any 4988 * changes needed in 'conf' 4989 */ 4990 static void end_reshape(raid5_conf_t *conf) 4991 { 4992 4993 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4994 4995 spin_lock_irq(&conf->device_lock); 4996 conf->previous_raid_disks = conf->raid_disks; 4997 conf->reshape_progress = MaxSector; 4998 spin_unlock_irq(&conf->device_lock); 4999 wake_up(&conf->wait_for_overlap); 5000 5001 /* read-ahead size must cover two whole stripes, which is 5002 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 5003 */ 5004 { 5005 int data_disks = conf->raid_disks - conf->max_degraded; 5006 int stripe = data_disks * (conf->chunk_size 5007 / PAGE_SIZE); 5008 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 5009 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 5010 } 5011 } 5012 } 5013 5014 /* This is called from the raid5d thread with mddev_lock held. 5015 * It makes config changes to the device. 5016 */ 5017 static void raid5_finish_reshape(mddev_t *mddev) 5018 { 5019 struct block_device *bdev; 5020 raid5_conf_t *conf = mddev_to_conf(mddev); 5021 5022 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5023 5024 if (mddev->delta_disks > 0) { 5025 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5026 set_capacity(mddev->gendisk, mddev->array_sectors); 5027 mddev->changed = 1; 5028 5029 bdev = bdget_disk(mddev->gendisk, 0); 5030 if (bdev) { 5031 mutex_lock(&bdev->bd_inode->i_mutex); 5032 i_size_write(bdev->bd_inode, 5033 (loff_t)mddev->array_sectors << 9); 5034 mutex_unlock(&bdev->bd_inode->i_mutex); 5035 bdput(bdev); 5036 } 5037 } else { 5038 int d; 5039 mddev->degraded = conf->raid_disks; 5040 for (d = 0; d < conf->raid_disks ; d++) 5041 if (conf->disks[d].rdev && 5042 test_bit(In_sync, 5043 &conf->disks[d].rdev->flags)) 5044 mddev->degraded--; 5045 for (d = conf->raid_disks ; 5046 d < conf->raid_disks - mddev->delta_disks; 5047 d++) 5048 raid5_remove_disk(mddev, d); 5049 } 5050 mddev->layout = conf->algorithm; 5051 mddev->chunk_size = conf->chunk_size; 5052 mddev->reshape_position = MaxSector; 5053 mddev->delta_disks = 0; 5054 } 5055 } 5056 5057 static void raid5_quiesce(mddev_t *mddev, int state) 5058 { 5059 raid5_conf_t *conf = mddev_to_conf(mddev); 5060 5061 switch(state) { 5062 case 2: /* resume for a suspend */ 5063 wake_up(&conf->wait_for_overlap); 5064 break; 5065 5066 case 1: /* stop all writes */ 5067 spin_lock_irq(&conf->device_lock); 5068 conf->quiesce = 1; 5069 wait_event_lock_irq(conf->wait_for_stripe, 5070 atomic_read(&conf->active_stripes) == 0 && 5071 atomic_read(&conf->active_aligned_reads) == 0, 5072 conf->device_lock, /* nothing */); 5073 spin_unlock_irq(&conf->device_lock); 5074 break; 5075 5076 case 0: /* re-enable writes */ 5077 spin_lock_irq(&conf->device_lock); 5078 conf->quiesce = 0; 5079 wake_up(&conf->wait_for_stripe); 5080 wake_up(&conf->wait_for_overlap); 5081 spin_unlock_irq(&conf->device_lock); 5082 break; 5083 } 5084 } 5085 5086 5087 static void *raid5_takeover_raid1(mddev_t *mddev) 5088 { 5089 int chunksect; 5090 5091 if (mddev->raid_disks != 2 || 5092 mddev->degraded > 1) 5093 return ERR_PTR(-EINVAL); 5094 5095 /* Should check if there are write-behind devices? */ 5096 5097 chunksect = 64*2; /* 64K by default */ 5098 5099 /* The array must be an exact multiple of chunksize */ 5100 while (chunksect && (mddev->array_sectors & (chunksect-1))) 5101 chunksect >>= 1; 5102 5103 if ((chunksect<<9) < STRIPE_SIZE) 5104 /* array size does not allow a suitable chunk size */ 5105 return ERR_PTR(-EINVAL); 5106 5107 mddev->new_level = 5; 5108 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5109 mddev->new_chunk = chunksect << 9; 5110 5111 return setup_conf(mddev); 5112 } 5113 5114 static void *raid5_takeover_raid6(mddev_t *mddev) 5115 { 5116 int new_layout; 5117 5118 switch (mddev->layout) { 5119 case ALGORITHM_LEFT_ASYMMETRIC_6: 5120 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 5121 break; 5122 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5123 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 5124 break; 5125 case ALGORITHM_LEFT_SYMMETRIC_6: 5126 new_layout = ALGORITHM_LEFT_SYMMETRIC; 5127 break; 5128 case ALGORITHM_RIGHT_SYMMETRIC_6: 5129 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 5130 break; 5131 case ALGORITHM_PARITY_0_6: 5132 new_layout = ALGORITHM_PARITY_0; 5133 break; 5134 case ALGORITHM_PARITY_N: 5135 new_layout = ALGORITHM_PARITY_N; 5136 break; 5137 default: 5138 return ERR_PTR(-EINVAL); 5139 } 5140 mddev->new_level = 5; 5141 mddev->new_layout = new_layout; 5142 mddev->delta_disks = -1; 5143 mddev->raid_disks -= 1; 5144 return setup_conf(mddev); 5145 } 5146 5147 5148 static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) 5149 { 5150 /* For a 2-drive array, the layout and chunk size can be changed 5151 * immediately as not restriping is needed. 5152 * For larger arrays we record the new value - after validation 5153 * to be used by a reshape pass. 5154 */ 5155 raid5_conf_t *conf = mddev_to_conf(mddev); 5156 5157 if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) 5158 return -EINVAL; 5159 if (new_chunk > 0) { 5160 if (new_chunk & (new_chunk-1)) 5161 /* not a power of 2 */ 5162 return -EINVAL; 5163 if (new_chunk < PAGE_SIZE) 5164 return -EINVAL; 5165 if (mddev->array_sectors & ((new_chunk>>9)-1)) 5166 /* not factor of array size */ 5167 return -EINVAL; 5168 } 5169 5170 /* They look valid */ 5171 5172 if (mddev->raid_disks == 2) { 5173 5174 if (new_layout >= 0) { 5175 conf->algorithm = new_layout; 5176 mddev->layout = mddev->new_layout = new_layout; 5177 } 5178 if (new_chunk > 0) { 5179 conf->chunk_size = new_chunk; 5180 mddev->chunk_size = mddev->new_chunk = new_chunk; 5181 } 5182 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5183 md_wakeup_thread(mddev->thread); 5184 } else { 5185 if (new_layout >= 0) 5186 mddev->new_layout = new_layout; 5187 if (new_chunk > 0) 5188 mddev->new_chunk = new_chunk; 5189 } 5190 return 0; 5191 } 5192 5193 static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk) 5194 { 5195 if (new_layout >= 0 && !algorithm_valid_raid6(new_layout)) 5196 return -EINVAL; 5197 if (new_chunk > 0) { 5198 if (new_chunk & (new_chunk-1)) 5199 /* not a power of 2 */ 5200 return -EINVAL; 5201 if (new_chunk < PAGE_SIZE) 5202 return -EINVAL; 5203 if (mddev->array_sectors & ((new_chunk>>9)-1)) 5204 /* not factor of array size */ 5205 return -EINVAL; 5206 } 5207 5208 /* They look valid */ 5209 5210 if (new_layout >= 0) 5211 mddev->new_layout = new_layout; 5212 if (new_chunk > 0) 5213 mddev->new_chunk = new_chunk; 5214 5215 return 0; 5216 } 5217 5218 static void *raid5_takeover(mddev_t *mddev) 5219 { 5220 /* raid5 can take over: 5221 * raid0 - if all devices are the same - make it a raid4 layout 5222 * raid1 - if there are two drives. We need to know the chunk size 5223 * raid4 - trivial - just use a raid4 layout. 5224 * raid6 - Providing it is a *_6 layout 5225 * 5226 * For now, just do raid1 5227 */ 5228 5229 if (mddev->level == 1) 5230 return raid5_takeover_raid1(mddev); 5231 if (mddev->level == 4) { 5232 mddev->new_layout = ALGORITHM_PARITY_N; 5233 mddev->new_level = 5; 5234 return setup_conf(mddev); 5235 } 5236 if (mddev->level == 6) 5237 return raid5_takeover_raid6(mddev); 5238 5239 return ERR_PTR(-EINVAL); 5240 } 5241 5242 5243 static struct mdk_personality raid5_personality; 5244 5245 static void *raid6_takeover(mddev_t *mddev) 5246 { 5247 /* Currently can only take over a raid5. We map the 5248 * personality to an equivalent raid6 personality 5249 * with the Q block at the end. 5250 */ 5251 int new_layout; 5252 5253 if (mddev->pers != &raid5_personality) 5254 return ERR_PTR(-EINVAL); 5255 if (mddev->degraded > 1) 5256 return ERR_PTR(-EINVAL); 5257 if (mddev->raid_disks > 253) 5258 return ERR_PTR(-EINVAL); 5259 if (mddev->raid_disks < 3) 5260 return ERR_PTR(-EINVAL); 5261 5262 switch (mddev->layout) { 5263 case ALGORITHM_LEFT_ASYMMETRIC: 5264 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 5265 break; 5266 case ALGORITHM_RIGHT_ASYMMETRIC: 5267 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 5268 break; 5269 case ALGORITHM_LEFT_SYMMETRIC: 5270 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 5271 break; 5272 case ALGORITHM_RIGHT_SYMMETRIC: 5273 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 5274 break; 5275 case ALGORITHM_PARITY_0: 5276 new_layout = ALGORITHM_PARITY_0_6; 5277 break; 5278 case ALGORITHM_PARITY_N: 5279 new_layout = ALGORITHM_PARITY_N; 5280 break; 5281 default: 5282 return ERR_PTR(-EINVAL); 5283 } 5284 mddev->new_level = 6; 5285 mddev->new_layout = new_layout; 5286 mddev->delta_disks = 1; 5287 mddev->raid_disks += 1; 5288 return setup_conf(mddev); 5289 } 5290 5291 5292 static struct mdk_personality raid6_personality = 5293 { 5294 .name = "raid6", 5295 .level = 6, 5296 .owner = THIS_MODULE, 5297 .make_request = make_request, 5298 .run = run, 5299 .stop = stop, 5300 .status = status, 5301 .error_handler = error, 5302 .hot_add_disk = raid5_add_disk, 5303 .hot_remove_disk= raid5_remove_disk, 5304 .spare_active = raid5_spare_active, 5305 .sync_request = sync_request, 5306 .resize = raid5_resize, 5307 .size = raid5_size, 5308 .check_reshape = raid5_check_reshape, 5309 .start_reshape = raid5_start_reshape, 5310 .finish_reshape = raid5_finish_reshape, 5311 .quiesce = raid5_quiesce, 5312 .takeover = raid6_takeover, 5313 .reconfig = raid6_reconfig, 5314 }; 5315 static struct mdk_personality raid5_personality = 5316 { 5317 .name = "raid5", 5318 .level = 5, 5319 .owner = THIS_MODULE, 5320 .make_request = make_request, 5321 .run = run, 5322 .stop = stop, 5323 .status = status, 5324 .error_handler = error, 5325 .hot_add_disk = raid5_add_disk, 5326 .hot_remove_disk= raid5_remove_disk, 5327 .spare_active = raid5_spare_active, 5328 .sync_request = sync_request, 5329 .resize = raid5_resize, 5330 .size = raid5_size, 5331 .check_reshape = raid5_check_reshape, 5332 .start_reshape = raid5_start_reshape, 5333 .finish_reshape = raid5_finish_reshape, 5334 .quiesce = raid5_quiesce, 5335 .takeover = raid5_takeover, 5336 .reconfig = raid5_reconfig, 5337 }; 5338 5339 static struct mdk_personality raid4_personality = 5340 { 5341 .name = "raid4", 5342 .level = 4, 5343 .owner = THIS_MODULE, 5344 .make_request = make_request, 5345 .run = run, 5346 .stop = stop, 5347 .status = status, 5348 .error_handler = error, 5349 .hot_add_disk = raid5_add_disk, 5350 .hot_remove_disk= raid5_remove_disk, 5351 .spare_active = raid5_spare_active, 5352 .sync_request = sync_request, 5353 .resize = raid5_resize, 5354 .size = raid5_size, 5355 .check_reshape = raid5_check_reshape, 5356 .start_reshape = raid5_start_reshape, 5357 .finish_reshape = raid5_finish_reshape, 5358 .quiesce = raid5_quiesce, 5359 }; 5360 5361 static int __init raid5_init(void) 5362 { 5363 register_md_personality(&raid6_personality); 5364 register_md_personality(&raid5_personality); 5365 register_md_personality(&raid4_personality); 5366 return 0; 5367 } 5368 5369 static void raid5_exit(void) 5370 { 5371 unregister_md_personality(&raid6_personality); 5372 unregister_md_personality(&raid5_personality); 5373 unregister_md_personality(&raid4_personality); 5374 } 5375 5376 module_init(raid5_init); 5377 module_exit(raid5_exit); 5378 MODULE_LICENSE("GPL"); 5379 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 5380 MODULE_ALIAS("md-raid5"); 5381 MODULE_ALIAS("md-raid4"); 5382 MODULE_ALIAS("md-level-5"); 5383 MODULE_ALIAS("md-level-4"); 5384 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 5385 MODULE_ALIAS("md-raid6"); 5386 MODULE_ALIAS("md-level-6"); 5387 5388 /* This used to be two separate modules, they were: */ 5389 MODULE_ALIAS("raid5"); 5390 MODULE_ALIAS("raid6"); 5391