1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/seq_file.h> 51 #include "md.h" 52 #include "raid5.h" 53 #include "bitmap.h" 54 55 /* 56 * Stripe cache 57 */ 58 59 #define NR_STRIPES 256 60 #define STRIPE_SIZE PAGE_SIZE 61 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 62 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 63 #define IO_THRESHOLD 1 64 #define BYPASS_THRESHOLD 1 65 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 66 #define HASH_MASK (NR_HASH - 1) 67 68 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 69 70 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 71 * order without overlap. There may be several bio's per stripe+device, and 72 * a bio could span several devices. 73 * When walking this list for a particular stripe+device, we must never proceed 74 * beyond a bio that extends past this device, as the next bio might no longer 75 * be valid. 76 * This macro is used to determine the 'next' bio in the list, given the sector 77 * of the current stripe+device 78 */ 79 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 80 /* 81 * The following can be used to debug the driver 82 */ 83 #define RAID5_PARANOIA 1 84 #if RAID5_PARANOIA && defined(CONFIG_SMP) 85 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 86 #else 87 # define CHECK_DEVLOCK() 88 #endif 89 90 #ifdef DEBUG 91 #define inline 92 #define __inline__ 93 #endif 94 95 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) 96 97 /* 98 * We maintain a biased count of active stripes in the bottom 16 bits of 99 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 100 */ 101 static inline int raid5_bi_phys_segments(struct bio *bio) 102 { 103 return bio->bi_phys_segments & 0xffff; 104 } 105 106 static inline int raid5_bi_hw_segments(struct bio *bio) 107 { 108 return (bio->bi_phys_segments >> 16) & 0xffff; 109 } 110 111 static inline int raid5_dec_bi_phys_segments(struct bio *bio) 112 { 113 --bio->bi_phys_segments; 114 return raid5_bi_phys_segments(bio); 115 } 116 117 static inline int raid5_dec_bi_hw_segments(struct bio *bio) 118 { 119 unsigned short val = raid5_bi_hw_segments(bio); 120 121 --val; 122 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); 123 return val; 124 } 125 126 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 127 { 128 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); 129 } 130 131 /* Find first data disk in a raid6 stripe */ 132 static inline int raid6_d0(struct stripe_head *sh) 133 { 134 if (sh->ddf_layout) 135 /* ddf always start from first device */ 136 return 0; 137 /* md starts just after Q block */ 138 if (sh->qd_idx == sh->disks - 1) 139 return 0; 140 else 141 return sh->qd_idx + 1; 142 } 143 static inline int raid6_next_disk(int disk, int raid_disks) 144 { 145 disk++; 146 return (disk < raid_disks) ? disk : 0; 147 } 148 149 /* When walking through the disks in a raid5, starting at raid6_d0, 150 * We need to map each disk to a 'slot', where the data disks are slot 151 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 152 * is raid_disks-1. This help does that mapping. 153 */ 154 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 155 int *count, int syndrome_disks) 156 { 157 int slot; 158 159 if (idx == sh->pd_idx) 160 return syndrome_disks; 161 if (idx == sh->qd_idx) 162 return syndrome_disks + 1; 163 slot = (*count)++; 164 return slot; 165 } 166 167 static void return_io(struct bio *return_bi) 168 { 169 struct bio *bi = return_bi; 170 while (bi) { 171 172 return_bi = bi->bi_next; 173 bi->bi_next = NULL; 174 bi->bi_size = 0; 175 bio_endio(bi, 0); 176 bi = return_bi; 177 } 178 } 179 180 static void print_raid5_conf (raid5_conf_t *conf); 181 182 static int stripe_operations_active(struct stripe_head *sh) 183 { 184 return sh->check_state || sh->reconstruct_state || 185 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 186 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 187 } 188 189 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 190 { 191 if (atomic_dec_and_test(&sh->count)) { 192 BUG_ON(!list_empty(&sh->lru)); 193 BUG_ON(atomic_read(&conf->active_stripes)==0); 194 if (test_bit(STRIPE_HANDLE, &sh->state)) { 195 if (test_bit(STRIPE_DELAYED, &sh->state)) { 196 list_add_tail(&sh->lru, &conf->delayed_list); 197 blk_plug_device(conf->mddev->queue); 198 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 199 sh->bm_seq - conf->seq_write > 0) { 200 list_add_tail(&sh->lru, &conf->bitmap_list); 201 blk_plug_device(conf->mddev->queue); 202 } else { 203 clear_bit(STRIPE_BIT_DELAY, &sh->state); 204 list_add_tail(&sh->lru, &conf->handle_list); 205 } 206 md_wakeup_thread(conf->mddev->thread); 207 } else { 208 BUG_ON(stripe_operations_active(sh)); 209 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 210 atomic_dec(&conf->preread_active_stripes); 211 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 212 md_wakeup_thread(conf->mddev->thread); 213 } 214 atomic_dec(&conf->active_stripes); 215 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 216 list_add_tail(&sh->lru, &conf->inactive_list); 217 wake_up(&conf->wait_for_stripe); 218 if (conf->retry_read_aligned) 219 md_wakeup_thread(conf->mddev->thread); 220 } 221 } 222 } 223 } 224 225 static void release_stripe(struct stripe_head *sh) 226 { 227 raid5_conf_t *conf = sh->raid_conf; 228 unsigned long flags; 229 230 spin_lock_irqsave(&conf->device_lock, flags); 231 __release_stripe(conf, sh); 232 spin_unlock_irqrestore(&conf->device_lock, flags); 233 } 234 235 static inline void remove_hash(struct stripe_head *sh) 236 { 237 pr_debug("remove_hash(), stripe %llu\n", 238 (unsigned long long)sh->sector); 239 240 hlist_del_init(&sh->hash); 241 } 242 243 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 244 { 245 struct hlist_head *hp = stripe_hash(conf, sh->sector); 246 247 pr_debug("insert_hash(), stripe %llu\n", 248 (unsigned long long)sh->sector); 249 250 CHECK_DEVLOCK(); 251 hlist_add_head(&sh->hash, hp); 252 } 253 254 255 /* find an idle stripe, make sure it is unhashed, and return it. */ 256 static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 257 { 258 struct stripe_head *sh = NULL; 259 struct list_head *first; 260 261 CHECK_DEVLOCK(); 262 if (list_empty(&conf->inactive_list)) 263 goto out; 264 first = conf->inactive_list.next; 265 sh = list_entry(first, struct stripe_head, lru); 266 list_del_init(first); 267 remove_hash(sh); 268 atomic_inc(&conf->active_stripes); 269 out: 270 return sh; 271 } 272 273 static void shrink_buffers(struct stripe_head *sh, int num) 274 { 275 struct page *p; 276 int i; 277 278 for (i=0; i<num ; i++) { 279 p = sh->dev[i].page; 280 if (!p) 281 continue; 282 sh->dev[i].page = NULL; 283 put_page(p); 284 } 285 } 286 287 static int grow_buffers(struct stripe_head *sh, int num) 288 { 289 int i; 290 291 for (i=0; i<num; i++) { 292 struct page *page; 293 294 if (!(page = alloc_page(GFP_KERNEL))) { 295 return 1; 296 } 297 sh->dev[i].page = page; 298 } 299 return 0; 300 } 301 302 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 303 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 304 struct stripe_head *sh); 305 306 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 307 { 308 raid5_conf_t *conf = sh->raid_conf; 309 int i; 310 311 BUG_ON(atomic_read(&sh->count) != 0); 312 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 313 BUG_ON(stripe_operations_active(sh)); 314 315 CHECK_DEVLOCK(); 316 pr_debug("init_stripe called, stripe %llu\n", 317 (unsigned long long)sh->sector); 318 319 remove_hash(sh); 320 321 sh->generation = conf->generation - previous; 322 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 323 sh->sector = sector; 324 stripe_set_idx(sector, conf, previous, sh); 325 sh->state = 0; 326 327 328 for (i = sh->disks; i--; ) { 329 struct r5dev *dev = &sh->dev[i]; 330 331 if (dev->toread || dev->read || dev->towrite || dev->written || 332 test_bit(R5_LOCKED, &dev->flags)) { 333 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 334 (unsigned long long)sh->sector, i, dev->toread, 335 dev->read, dev->towrite, dev->written, 336 test_bit(R5_LOCKED, &dev->flags)); 337 BUG(); 338 } 339 dev->flags = 0; 340 raid5_build_block(sh, i, previous); 341 } 342 insert_hash(conf, sh); 343 } 344 345 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, 346 short generation) 347 { 348 struct stripe_head *sh; 349 struct hlist_node *hn; 350 351 CHECK_DEVLOCK(); 352 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 353 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 354 if (sh->sector == sector && sh->generation == generation) 355 return sh; 356 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 357 return NULL; 358 } 359 360 static void unplug_slaves(mddev_t *mddev); 361 static void raid5_unplug_device(struct request_queue *q); 362 363 static struct stripe_head * 364 get_active_stripe(raid5_conf_t *conf, sector_t sector, 365 int previous, int noblock, int noquiesce) 366 { 367 struct stripe_head *sh; 368 369 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 370 371 spin_lock_irq(&conf->device_lock); 372 373 do { 374 wait_event_lock_irq(conf->wait_for_stripe, 375 conf->quiesce == 0 || noquiesce, 376 conf->device_lock, /* nothing */); 377 sh = __find_stripe(conf, sector, conf->generation - previous); 378 if (!sh) { 379 if (!conf->inactive_blocked) 380 sh = get_free_stripe(conf); 381 if (noblock && sh == NULL) 382 break; 383 if (!sh) { 384 conf->inactive_blocked = 1; 385 wait_event_lock_irq(conf->wait_for_stripe, 386 !list_empty(&conf->inactive_list) && 387 (atomic_read(&conf->active_stripes) 388 < (conf->max_nr_stripes *3/4) 389 || !conf->inactive_blocked), 390 conf->device_lock, 391 raid5_unplug_device(conf->mddev->queue) 392 ); 393 conf->inactive_blocked = 0; 394 } else 395 init_stripe(sh, sector, previous); 396 } else { 397 if (atomic_read(&sh->count)) { 398 BUG_ON(!list_empty(&sh->lru) 399 && !test_bit(STRIPE_EXPANDING, &sh->state)); 400 } else { 401 if (!test_bit(STRIPE_HANDLE, &sh->state)) 402 atomic_inc(&conf->active_stripes); 403 if (list_empty(&sh->lru) && 404 !test_bit(STRIPE_EXPANDING, &sh->state)) 405 BUG(); 406 list_del_init(&sh->lru); 407 } 408 } 409 } while (sh == NULL); 410 411 if (sh) 412 atomic_inc(&sh->count); 413 414 spin_unlock_irq(&conf->device_lock); 415 return sh; 416 } 417 418 static void 419 raid5_end_read_request(struct bio *bi, int error); 420 static void 421 raid5_end_write_request(struct bio *bi, int error); 422 423 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 424 { 425 raid5_conf_t *conf = sh->raid_conf; 426 int i, disks = sh->disks; 427 428 might_sleep(); 429 430 for (i = disks; i--; ) { 431 int rw; 432 struct bio *bi; 433 mdk_rdev_t *rdev; 434 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 435 rw = WRITE; 436 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 437 rw = READ; 438 else 439 continue; 440 441 bi = &sh->dev[i].req; 442 443 bi->bi_rw = rw; 444 if (rw == WRITE) 445 bi->bi_end_io = raid5_end_write_request; 446 else 447 bi->bi_end_io = raid5_end_read_request; 448 449 rcu_read_lock(); 450 rdev = rcu_dereference(conf->disks[i].rdev); 451 if (rdev && test_bit(Faulty, &rdev->flags)) 452 rdev = NULL; 453 if (rdev) 454 atomic_inc(&rdev->nr_pending); 455 rcu_read_unlock(); 456 457 if (rdev) { 458 if (s->syncing || s->expanding || s->expanded) 459 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 460 461 set_bit(STRIPE_IO_STARTED, &sh->state); 462 463 bi->bi_bdev = rdev->bdev; 464 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 465 __func__, (unsigned long long)sh->sector, 466 bi->bi_rw, i); 467 atomic_inc(&sh->count); 468 bi->bi_sector = sh->sector + rdev->data_offset; 469 bi->bi_flags = 1 << BIO_UPTODATE; 470 bi->bi_vcnt = 1; 471 bi->bi_max_vecs = 1; 472 bi->bi_idx = 0; 473 bi->bi_io_vec = &sh->dev[i].vec; 474 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 475 bi->bi_io_vec[0].bv_offset = 0; 476 bi->bi_size = STRIPE_SIZE; 477 bi->bi_next = NULL; 478 if (rw == WRITE && 479 test_bit(R5_ReWrite, &sh->dev[i].flags)) 480 atomic_add(STRIPE_SECTORS, 481 &rdev->corrected_errors); 482 generic_make_request(bi); 483 } else { 484 if (rw == WRITE) 485 set_bit(STRIPE_DEGRADED, &sh->state); 486 pr_debug("skip op %ld on disc %d for sector %llu\n", 487 bi->bi_rw, i, (unsigned long long)sh->sector); 488 clear_bit(R5_LOCKED, &sh->dev[i].flags); 489 set_bit(STRIPE_HANDLE, &sh->state); 490 } 491 } 492 } 493 494 static struct dma_async_tx_descriptor * 495 async_copy_data(int frombio, struct bio *bio, struct page *page, 496 sector_t sector, struct dma_async_tx_descriptor *tx) 497 { 498 struct bio_vec *bvl; 499 struct page *bio_page; 500 int i; 501 int page_offset; 502 503 if (bio->bi_sector >= sector) 504 page_offset = (signed)(bio->bi_sector - sector) * 512; 505 else 506 page_offset = (signed)(sector - bio->bi_sector) * -512; 507 bio_for_each_segment(bvl, bio, i) { 508 int len = bio_iovec_idx(bio, i)->bv_len; 509 int clen; 510 int b_offset = 0; 511 512 if (page_offset < 0) { 513 b_offset = -page_offset; 514 page_offset += b_offset; 515 len -= b_offset; 516 } 517 518 if (len > 0 && page_offset + len > STRIPE_SIZE) 519 clen = STRIPE_SIZE - page_offset; 520 else 521 clen = len; 522 523 if (clen > 0) { 524 b_offset += bio_iovec_idx(bio, i)->bv_offset; 525 bio_page = bio_iovec_idx(bio, i)->bv_page; 526 if (frombio) 527 tx = async_memcpy(page, bio_page, page_offset, 528 b_offset, clen, 529 ASYNC_TX_DEP_ACK, 530 tx, NULL, NULL); 531 else 532 tx = async_memcpy(bio_page, page, b_offset, 533 page_offset, clen, 534 ASYNC_TX_DEP_ACK, 535 tx, NULL, NULL); 536 } 537 if (clen < len) /* hit end of page */ 538 break; 539 page_offset += len; 540 } 541 542 return tx; 543 } 544 545 static void ops_complete_biofill(void *stripe_head_ref) 546 { 547 struct stripe_head *sh = stripe_head_ref; 548 struct bio *return_bi = NULL; 549 raid5_conf_t *conf = sh->raid_conf; 550 int i; 551 552 pr_debug("%s: stripe %llu\n", __func__, 553 (unsigned long long)sh->sector); 554 555 /* clear completed biofills */ 556 spin_lock_irq(&conf->device_lock); 557 for (i = sh->disks; i--; ) { 558 struct r5dev *dev = &sh->dev[i]; 559 560 /* acknowledge completion of a biofill operation */ 561 /* and check if we need to reply to a read request, 562 * new R5_Wantfill requests are held off until 563 * !STRIPE_BIOFILL_RUN 564 */ 565 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 566 struct bio *rbi, *rbi2; 567 568 BUG_ON(!dev->read); 569 rbi = dev->read; 570 dev->read = NULL; 571 while (rbi && rbi->bi_sector < 572 dev->sector + STRIPE_SECTORS) { 573 rbi2 = r5_next_bio(rbi, dev->sector); 574 if (!raid5_dec_bi_phys_segments(rbi)) { 575 rbi->bi_next = return_bi; 576 return_bi = rbi; 577 } 578 rbi = rbi2; 579 } 580 } 581 } 582 spin_unlock_irq(&conf->device_lock); 583 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 584 585 return_io(return_bi); 586 587 set_bit(STRIPE_HANDLE, &sh->state); 588 release_stripe(sh); 589 } 590 591 static void ops_run_biofill(struct stripe_head *sh) 592 { 593 struct dma_async_tx_descriptor *tx = NULL; 594 raid5_conf_t *conf = sh->raid_conf; 595 int i; 596 597 pr_debug("%s: stripe %llu\n", __func__, 598 (unsigned long long)sh->sector); 599 600 for (i = sh->disks; i--; ) { 601 struct r5dev *dev = &sh->dev[i]; 602 if (test_bit(R5_Wantfill, &dev->flags)) { 603 struct bio *rbi; 604 spin_lock_irq(&conf->device_lock); 605 dev->read = rbi = dev->toread; 606 dev->toread = NULL; 607 spin_unlock_irq(&conf->device_lock); 608 while (rbi && rbi->bi_sector < 609 dev->sector + STRIPE_SECTORS) { 610 tx = async_copy_data(0, rbi, dev->page, 611 dev->sector, tx); 612 rbi = r5_next_bio(rbi, dev->sector); 613 } 614 } 615 } 616 617 atomic_inc(&sh->count); 618 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 619 ops_complete_biofill, sh); 620 } 621 622 static void ops_complete_compute5(void *stripe_head_ref) 623 { 624 struct stripe_head *sh = stripe_head_ref; 625 int target = sh->ops.target; 626 struct r5dev *tgt = &sh->dev[target]; 627 628 pr_debug("%s: stripe %llu\n", __func__, 629 (unsigned long long)sh->sector); 630 631 set_bit(R5_UPTODATE, &tgt->flags); 632 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 633 clear_bit(R5_Wantcompute, &tgt->flags); 634 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 635 if (sh->check_state == check_state_compute_run) 636 sh->check_state = check_state_compute_result; 637 set_bit(STRIPE_HANDLE, &sh->state); 638 release_stripe(sh); 639 } 640 641 static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) 642 { 643 /* kernel stack size limits the total number of disks */ 644 int disks = sh->disks; 645 struct page *xor_srcs[disks]; 646 int target = sh->ops.target; 647 struct r5dev *tgt = &sh->dev[target]; 648 struct page *xor_dest = tgt->page; 649 int count = 0; 650 struct dma_async_tx_descriptor *tx; 651 int i; 652 653 pr_debug("%s: stripe %llu block: %d\n", 654 __func__, (unsigned long long)sh->sector, target); 655 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 656 657 for (i = disks; i--; ) 658 if (i != target) 659 xor_srcs[count++] = sh->dev[i].page; 660 661 atomic_inc(&sh->count); 662 663 if (unlikely(count == 1)) 664 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 665 0, NULL, ops_complete_compute5, sh); 666 else 667 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 668 ASYNC_TX_XOR_ZERO_DST, NULL, 669 ops_complete_compute5, sh); 670 671 return tx; 672 } 673 674 static void ops_complete_prexor(void *stripe_head_ref) 675 { 676 struct stripe_head *sh = stripe_head_ref; 677 678 pr_debug("%s: stripe %llu\n", __func__, 679 (unsigned long long)sh->sector); 680 } 681 682 static struct dma_async_tx_descriptor * 683 ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 684 { 685 /* kernel stack size limits the total number of disks */ 686 int disks = sh->disks; 687 struct page *xor_srcs[disks]; 688 int count = 0, pd_idx = sh->pd_idx, i; 689 690 /* existing parity data subtracted */ 691 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 692 693 pr_debug("%s: stripe %llu\n", __func__, 694 (unsigned long long)sh->sector); 695 696 for (i = disks; i--; ) { 697 struct r5dev *dev = &sh->dev[i]; 698 /* Only process blocks that are known to be uptodate */ 699 if (test_bit(R5_Wantdrain, &dev->flags)) 700 xor_srcs[count++] = dev->page; 701 } 702 703 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 704 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, 705 ops_complete_prexor, sh); 706 707 return tx; 708 } 709 710 static struct dma_async_tx_descriptor * 711 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 712 { 713 int disks = sh->disks; 714 int i; 715 716 pr_debug("%s: stripe %llu\n", __func__, 717 (unsigned long long)sh->sector); 718 719 for (i = disks; i--; ) { 720 struct r5dev *dev = &sh->dev[i]; 721 struct bio *chosen; 722 723 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 724 struct bio *wbi; 725 726 spin_lock(&sh->lock); 727 chosen = dev->towrite; 728 dev->towrite = NULL; 729 BUG_ON(dev->written); 730 wbi = dev->written = chosen; 731 spin_unlock(&sh->lock); 732 733 while (wbi && wbi->bi_sector < 734 dev->sector + STRIPE_SECTORS) { 735 tx = async_copy_data(1, wbi, dev->page, 736 dev->sector, tx); 737 wbi = r5_next_bio(wbi, dev->sector); 738 } 739 } 740 } 741 742 return tx; 743 } 744 745 static void ops_complete_postxor(void *stripe_head_ref) 746 { 747 struct stripe_head *sh = stripe_head_ref; 748 int disks = sh->disks, i, pd_idx = sh->pd_idx; 749 750 pr_debug("%s: stripe %llu\n", __func__, 751 (unsigned long long)sh->sector); 752 753 for (i = disks; i--; ) { 754 struct r5dev *dev = &sh->dev[i]; 755 if (dev->written || i == pd_idx) 756 set_bit(R5_UPTODATE, &dev->flags); 757 } 758 759 if (sh->reconstruct_state == reconstruct_state_drain_run) 760 sh->reconstruct_state = reconstruct_state_drain_result; 761 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 762 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 763 else { 764 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 765 sh->reconstruct_state = reconstruct_state_result; 766 } 767 768 set_bit(STRIPE_HANDLE, &sh->state); 769 release_stripe(sh); 770 } 771 772 static void 773 ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 774 { 775 /* kernel stack size limits the total number of disks */ 776 int disks = sh->disks; 777 struct page *xor_srcs[disks]; 778 779 int count = 0, pd_idx = sh->pd_idx, i; 780 struct page *xor_dest; 781 int prexor = 0; 782 unsigned long flags; 783 784 pr_debug("%s: stripe %llu\n", __func__, 785 (unsigned long long)sh->sector); 786 787 /* check if prexor is active which means only process blocks 788 * that are part of a read-modify-write (written) 789 */ 790 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 791 prexor = 1; 792 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 793 for (i = disks; i--; ) { 794 struct r5dev *dev = &sh->dev[i]; 795 if (dev->written) 796 xor_srcs[count++] = dev->page; 797 } 798 } else { 799 xor_dest = sh->dev[pd_idx].page; 800 for (i = disks; i--; ) { 801 struct r5dev *dev = &sh->dev[i]; 802 if (i != pd_idx) 803 xor_srcs[count++] = dev->page; 804 } 805 } 806 807 /* 1/ if we prexor'd then the dest is reused as a source 808 * 2/ if we did not prexor then we are redoing the parity 809 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 810 * for the synchronous xor case 811 */ 812 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | 813 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 814 815 atomic_inc(&sh->count); 816 817 if (unlikely(count == 1)) { 818 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 819 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 820 flags, tx, ops_complete_postxor, sh); 821 } else 822 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 823 flags, tx, ops_complete_postxor, sh); 824 } 825 826 static void ops_complete_check(void *stripe_head_ref) 827 { 828 struct stripe_head *sh = stripe_head_ref; 829 830 pr_debug("%s: stripe %llu\n", __func__, 831 (unsigned long long)sh->sector); 832 833 sh->check_state = check_state_check_result; 834 set_bit(STRIPE_HANDLE, &sh->state); 835 release_stripe(sh); 836 } 837 838 static void ops_run_check(struct stripe_head *sh) 839 { 840 /* kernel stack size limits the total number of disks */ 841 int disks = sh->disks; 842 struct page *xor_srcs[disks]; 843 struct dma_async_tx_descriptor *tx; 844 845 int count = 0, pd_idx = sh->pd_idx, i; 846 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 847 848 pr_debug("%s: stripe %llu\n", __func__, 849 (unsigned long long)sh->sector); 850 851 for (i = disks; i--; ) { 852 struct r5dev *dev = &sh->dev[i]; 853 if (i != pd_idx) 854 xor_srcs[count++] = dev->page; 855 } 856 857 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 858 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 859 860 atomic_inc(&sh->count); 861 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 862 ops_complete_check, sh); 863 } 864 865 static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) 866 { 867 int overlap_clear = 0, i, disks = sh->disks; 868 struct dma_async_tx_descriptor *tx = NULL; 869 870 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 871 ops_run_biofill(sh); 872 overlap_clear++; 873 } 874 875 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 876 tx = ops_run_compute5(sh); 877 /* terminate the chain if postxor is not set to be run */ 878 if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) 879 async_tx_ack(tx); 880 } 881 882 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 883 tx = ops_run_prexor(sh, tx); 884 885 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 886 tx = ops_run_biodrain(sh, tx); 887 overlap_clear++; 888 } 889 890 if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) 891 ops_run_postxor(sh, tx); 892 893 if (test_bit(STRIPE_OP_CHECK, &ops_request)) 894 ops_run_check(sh); 895 896 if (overlap_clear) 897 for (i = disks; i--; ) { 898 struct r5dev *dev = &sh->dev[i]; 899 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 900 wake_up(&sh->raid_conf->wait_for_overlap); 901 } 902 } 903 904 static int grow_one_stripe(raid5_conf_t *conf) 905 { 906 struct stripe_head *sh; 907 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 908 if (!sh) 909 return 0; 910 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 911 sh->raid_conf = conf; 912 spin_lock_init(&sh->lock); 913 914 if (grow_buffers(sh, conf->raid_disks)) { 915 shrink_buffers(sh, conf->raid_disks); 916 kmem_cache_free(conf->slab_cache, sh); 917 return 0; 918 } 919 sh->disks = conf->raid_disks; 920 /* we just created an active stripe so... */ 921 atomic_set(&sh->count, 1); 922 atomic_inc(&conf->active_stripes); 923 INIT_LIST_HEAD(&sh->lru); 924 release_stripe(sh); 925 return 1; 926 } 927 928 static int grow_stripes(raid5_conf_t *conf, int num) 929 { 930 struct kmem_cache *sc; 931 int devs = conf->raid_disks; 932 933 sprintf(conf->cache_name[0], 934 "raid%d-%s", conf->level, mdname(conf->mddev)); 935 sprintf(conf->cache_name[1], 936 "raid%d-%s-alt", conf->level, mdname(conf->mddev)); 937 conf->active_name = 0; 938 sc = kmem_cache_create(conf->cache_name[conf->active_name], 939 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 940 0, 0, NULL); 941 if (!sc) 942 return 1; 943 conf->slab_cache = sc; 944 conf->pool_size = devs; 945 while (num--) 946 if (!grow_one_stripe(conf)) 947 return 1; 948 return 0; 949 } 950 951 static int resize_stripes(raid5_conf_t *conf, int newsize) 952 { 953 /* Make all the stripes able to hold 'newsize' devices. 954 * New slots in each stripe get 'page' set to a new page. 955 * 956 * This happens in stages: 957 * 1/ create a new kmem_cache and allocate the required number of 958 * stripe_heads. 959 * 2/ gather all the old stripe_heads and tranfer the pages across 960 * to the new stripe_heads. This will have the side effect of 961 * freezing the array as once all stripe_heads have been collected, 962 * no IO will be possible. Old stripe heads are freed once their 963 * pages have been transferred over, and the old kmem_cache is 964 * freed when all stripes are done. 965 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 966 * we simple return a failre status - no need to clean anything up. 967 * 4/ allocate new pages for the new slots in the new stripe_heads. 968 * If this fails, we don't bother trying the shrink the 969 * stripe_heads down again, we just leave them as they are. 970 * As each stripe_head is processed the new one is released into 971 * active service. 972 * 973 * Once step2 is started, we cannot afford to wait for a write, 974 * so we use GFP_NOIO allocations. 975 */ 976 struct stripe_head *osh, *nsh; 977 LIST_HEAD(newstripes); 978 struct disk_info *ndisks; 979 int err; 980 struct kmem_cache *sc; 981 int i; 982 983 if (newsize <= conf->pool_size) 984 return 0; /* never bother to shrink */ 985 986 err = md_allow_write(conf->mddev); 987 if (err) 988 return err; 989 990 /* Step 1 */ 991 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 992 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 993 0, 0, NULL); 994 if (!sc) 995 return -ENOMEM; 996 997 for (i = conf->max_nr_stripes; i; i--) { 998 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 999 if (!nsh) 1000 break; 1001 1002 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1003 1004 nsh->raid_conf = conf; 1005 spin_lock_init(&nsh->lock); 1006 1007 list_add(&nsh->lru, &newstripes); 1008 } 1009 if (i) { 1010 /* didn't get enough, give up */ 1011 while (!list_empty(&newstripes)) { 1012 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1013 list_del(&nsh->lru); 1014 kmem_cache_free(sc, nsh); 1015 } 1016 kmem_cache_destroy(sc); 1017 return -ENOMEM; 1018 } 1019 /* Step 2 - Must use GFP_NOIO now. 1020 * OK, we have enough stripes, start collecting inactive 1021 * stripes and copying them over 1022 */ 1023 list_for_each_entry(nsh, &newstripes, lru) { 1024 spin_lock_irq(&conf->device_lock); 1025 wait_event_lock_irq(conf->wait_for_stripe, 1026 !list_empty(&conf->inactive_list), 1027 conf->device_lock, 1028 unplug_slaves(conf->mddev) 1029 ); 1030 osh = get_free_stripe(conf); 1031 spin_unlock_irq(&conf->device_lock); 1032 atomic_set(&nsh->count, 1); 1033 for(i=0; i<conf->pool_size; i++) 1034 nsh->dev[i].page = osh->dev[i].page; 1035 for( ; i<newsize; i++) 1036 nsh->dev[i].page = NULL; 1037 kmem_cache_free(conf->slab_cache, osh); 1038 } 1039 kmem_cache_destroy(conf->slab_cache); 1040 1041 /* Step 3. 1042 * At this point, we are holding all the stripes so the array 1043 * is completely stalled, so now is a good time to resize 1044 * conf->disks. 1045 */ 1046 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1047 if (ndisks) { 1048 for (i=0; i<conf->raid_disks; i++) 1049 ndisks[i] = conf->disks[i]; 1050 kfree(conf->disks); 1051 conf->disks = ndisks; 1052 } else 1053 err = -ENOMEM; 1054 1055 /* Step 4, return new stripes to service */ 1056 while(!list_empty(&newstripes)) { 1057 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1058 list_del_init(&nsh->lru); 1059 for (i=conf->raid_disks; i < newsize; i++) 1060 if (nsh->dev[i].page == NULL) { 1061 struct page *p = alloc_page(GFP_NOIO); 1062 nsh->dev[i].page = p; 1063 if (!p) 1064 err = -ENOMEM; 1065 } 1066 release_stripe(nsh); 1067 } 1068 /* critical section pass, GFP_NOIO no longer needed */ 1069 1070 conf->slab_cache = sc; 1071 conf->active_name = 1-conf->active_name; 1072 conf->pool_size = newsize; 1073 return err; 1074 } 1075 1076 static int drop_one_stripe(raid5_conf_t *conf) 1077 { 1078 struct stripe_head *sh; 1079 1080 spin_lock_irq(&conf->device_lock); 1081 sh = get_free_stripe(conf); 1082 spin_unlock_irq(&conf->device_lock); 1083 if (!sh) 1084 return 0; 1085 BUG_ON(atomic_read(&sh->count)); 1086 shrink_buffers(sh, conf->pool_size); 1087 kmem_cache_free(conf->slab_cache, sh); 1088 atomic_dec(&conf->active_stripes); 1089 return 1; 1090 } 1091 1092 static void shrink_stripes(raid5_conf_t *conf) 1093 { 1094 while (drop_one_stripe(conf)) 1095 ; 1096 1097 if (conf->slab_cache) 1098 kmem_cache_destroy(conf->slab_cache); 1099 conf->slab_cache = NULL; 1100 } 1101 1102 static void raid5_end_read_request(struct bio * bi, int error) 1103 { 1104 struct stripe_head *sh = bi->bi_private; 1105 raid5_conf_t *conf = sh->raid_conf; 1106 int disks = sh->disks, i; 1107 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1108 char b[BDEVNAME_SIZE]; 1109 mdk_rdev_t *rdev; 1110 1111 1112 for (i=0 ; i<disks; i++) 1113 if (bi == &sh->dev[i].req) 1114 break; 1115 1116 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1117 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1118 uptodate); 1119 if (i == disks) { 1120 BUG(); 1121 return; 1122 } 1123 1124 if (uptodate) { 1125 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1126 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1127 rdev = conf->disks[i].rdev; 1128 printk_rl(KERN_INFO "raid5:%s: read error corrected" 1129 " (%lu sectors at %llu on %s)\n", 1130 mdname(conf->mddev), STRIPE_SECTORS, 1131 (unsigned long long)(sh->sector 1132 + rdev->data_offset), 1133 bdevname(rdev->bdev, b)); 1134 clear_bit(R5_ReadError, &sh->dev[i].flags); 1135 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1136 } 1137 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1138 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1139 } else { 1140 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1141 int retry = 0; 1142 rdev = conf->disks[i].rdev; 1143 1144 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1145 atomic_inc(&rdev->read_errors); 1146 if (conf->mddev->degraded) 1147 printk_rl(KERN_WARNING 1148 "raid5:%s: read error not correctable " 1149 "(sector %llu on %s).\n", 1150 mdname(conf->mddev), 1151 (unsigned long long)(sh->sector 1152 + rdev->data_offset), 1153 bdn); 1154 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1155 /* Oh, no!!! */ 1156 printk_rl(KERN_WARNING 1157 "raid5:%s: read error NOT corrected!! " 1158 "(sector %llu on %s).\n", 1159 mdname(conf->mddev), 1160 (unsigned long long)(sh->sector 1161 + rdev->data_offset), 1162 bdn); 1163 else if (atomic_read(&rdev->read_errors) 1164 > conf->max_nr_stripes) 1165 printk(KERN_WARNING 1166 "raid5:%s: Too many read errors, failing device %s.\n", 1167 mdname(conf->mddev), bdn); 1168 else 1169 retry = 1; 1170 if (retry) 1171 set_bit(R5_ReadError, &sh->dev[i].flags); 1172 else { 1173 clear_bit(R5_ReadError, &sh->dev[i].flags); 1174 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1175 md_error(conf->mddev, rdev); 1176 } 1177 } 1178 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1179 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1180 set_bit(STRIPE_HANDLE, &sh->state); 1181 release_stripe(sh); 1182 } 1183 1184 static void raid5_end_write_request(struct bio *bi, int error) 1185 { 1186 struct stripe_head *sh = bi->bi_private; 1187 raid5_conf_t *conf = sh->raid_conf; 1188 int disks = sh->disks, i; 1189 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1190 1191 for (i=0 ; i<disks; i++) 1192 if (bi == &sh->dev[i].req) 1193 break; 1194 1195 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1196 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1197 uptodate); 1198 if (i == disks) { 1199 BUG(); 1200 return; 1201 } 1202 1203 if (!uptodate) 1204 md_error(conf->mddev, conf->disks[i].rdev); 1205 1206 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1207 1208 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1209 set_bit(STRIPE_HANDLE, &sh->state); 1210 release_stripe(sh); 1211 } 1212 1213 1214 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 1215 1216 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 1217 { 1218 struct r5dev *dev = &sh->dev[i]; 1219 1220 bio_init(&dev->req); 1221 dev->req.bi_io_vec = &dev->vec; 1222 dev->req.bi_vcnt++; 1223 dev->req.bi_max_vecs++; 1224 dev->vec.bv_page = dev->page; 1225 dev->vec.bv_len = STRIPE_SIZE; 1226 dev->vec.bv_offset = 0; 1227 1228 dev->req.bi_sector = sh->sector; 1229 dev->req.bi_private = sh; 1230 1231 dev->flags = 0; 1232 dev->sector = compute_blocknr(sh, i, previous); 1233 } 1234 1235 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1236 { 1237 char b[BDEVNAME_SIZE]; 1238 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1239 pr_debug("raid5: error called\n"); 1240 1241 if (!test_bit(Faulty, &rdev->flags)) { 1242 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1243 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1244 unsigned long flags; 1245 spin_lock_irqsave(&conf->device_lock, flags); 1246 mddev->degraded++; 1247 spin_unlock_irqrestore(&conf->device_lock, flags); 1248 /* 1249 * if recovery was running, make sure it aborts. 1250 */ 1251 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1252 } 1253 set_bit(Faulty, &rdev->flags); 1254 printk(KERN_ALERT 1255 "raid5: Disk failure on %s, disabling device.\n" 1256 "raid5: Operation continuing on %d devices.\n", 1257 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1258 } 1259 } 1260 1261 /* 1262 * Input: a 'big' sector number, 1263 * Output: index of the data and parity disk, and the sector # in them. 1264 */ 1265 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, 1266 int previous, int *dd_idx, 1267 struct stripe_head *sh) 1268 { 1269 long stripe; 1270 unsigned long chunk_number; 1271 unsigned int chunk_offset; 1272 int pd_idx, qd_idx; 1273 int ddf_layout = 0; 1274 sector_t new_sector; 1275 int algorithm = previous ? conf->prev_algo 1276 : conf->algorithm; 1277 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 1278 : conf->chunk_sectors; 1279 int raid_disks = previous ? conf->previous_raid_disks 1280 : conf->raid_disks; 1281 int data_disks = raid_disks - conf->max_degraded; 1282 1283 /* First compute the information on this sector */ 1284 1285 /* 1286 * Compute the chunk number and the sector offset inside the chunk 1287 */ 1288 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1289 chunk_number = r_sector; 1290 BUG_ON(r_sector != chunk_number); 1291 1292 /* 1293 * Compute the stripe number 1294 */ 1295 stripe = chunk_number / data_disks; 1296 1297 /* 1298 * Compute the data disk and parity disk indexes inside the stripe 1299 */ 1300 *dd_idx = chunk_number % data_disks; 1301 1302 /* 1303 * Select the parity disk based on the user selected algorithm. 1304 */ 1305 pd_idx = qd_idx = ~0; 1306 switch(conf->level) { 1307 case 4: 1308 pd_idx = data_disks; 1309 break; 1310 case 5: 1311 switch (algorithm) { 1312 case ALGORITHM_LEFT_ASYMMETRIC: 1313 pd_idx = data_disks - stripe % raid_disks; 1314 if (*dd_idx >= pd_idx) 1315 (*dd_idx)++; 1316 break; 1317 case ALGORITHM_RIGHT_ASYMMETRIC: 1318 pd_idx = stripe % raid_disks; 1319 if (*dd_idx >= pd_idx) 1320 (*dd_idx)++; 1321 break; 1322 case ALGORITHM_LEFT_SYMMETRIC: 1323 pd_idx = data_disks - stripe % raid_disks; 1324 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1325 break; 1326 case ALGORITHM_RIGHT_SYMMETRIC: 1327 pd_idx = stripe % raid_disks; 1328 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1329 break; 1330 case ALGORITHM_PARITY_0: 1331 pd_idx = 0; 1332 (*dd_idx)++; 1333 break; 1334 case ALGORITHM_PARITY_N: 1335 pd_idx = data_disks; 1336 break; 1337 default: 1338 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1339 algorithm); 1340 BUG(); 1341 } 1342 break; 1343 case 6: 1344 1345 switch (algorithm) { 1346 case ALGORITHM_LEFT_ASYMMETRIC: 1347 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1348 qd_idx = pd_idx + 1; 1349 if (pd_idx == raid_disks-1) { 1350 (*dd_idx)++; /* Q D D D P */ 1351 qd_idx = 0; 1352 } else if (*dd_idx >= pd_idx) 1353 (*dd_idx) += 2; /* D D P Q D */ 1354 break; 1355 case ALGORITHM_RIGHT_ASYMMETRIC: 1356 pd_idx = stripe % raid_disks; 1357 qd_idx = pd_idx + 1; 1358 if (pd_idx == raid_disks-1) { 1359 (*dd_idx)++; /* Q D D D P */ 1360 qd_idx = 0; 1361 } else if (*dd_idx >= pd_idx) 1362 (*dd_idx) += 2; /* D D P Q D */ 1363 break; 1364 case ALGORITHM_LEFT_SYMMETRIC: 1365 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1366 qd_idx = (pd_idx + 1) % raid_disks; 1367 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1368 break; 1369 case ALGORITHM_RIGHT_SYMMETRIC: 1370 pd_idx = stripe % raid_disks; 1371 qd_idx = (pd_idx + 1) % raid_disks; 1372 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1373 break; 1374 1375 case ALGORITHM_PARITY_0: 1376 pd_idx = 0; 1377 qd_idx = 1; 1378 (*dd_idx) += 2; 1379 break; 1380 case ALGORITHM_PARITY_N: 1381 pd_idx = data_disks; 1382 qd_idx = data_disks + 1; 1383 break; 1384 1385 case ALGORITHM_ROTATING_ZERO_RESTART: 1386 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1387 * of blocks for computing Q is different. 1388 */ 1389 pd_idx = stripe % raid_disks; 1390 qd_idx = pd_idx + 1; 1391 if (pd_idx == raid_disks-1) { 1392 (*dd_idx)++; /* Q D D D P */ 1393 qd_idx = 0; 1394 } else if (*dd_idx >= pd_idx) 1395 (*dd_idx) += 2; /* D D P Q D */ 1396 ddf_layout = 1; 1397 break; 1398 1399 case ALGORITHM_ROTATING_N_RESTART: 1400 /* Same a left_asymmetric, by first stripe is 1401 * D D D P Q rather than 1402 * Q D D D P 1403 */ 1404 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1405 qd_idx = pd_idx + 1; 1406 if (pd_idx == raid_disks-1) { 1407 (*dd_idx)++; /* Q D D D P */ 1408 qd_idx = 0; 1409 } else if (*dd_idx >= pd_idx) 1410 (*dd_idx) += 2; /* D D P Q D */ 1411 ddf_layout = 1; 1412 break; 1413 1414 case ALGORITHM_ROTATING_N_CONTINUE: 1415 /* Same as left_symmetric but Q is before P */ 1416 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1417 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1418 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1419 ddf_layout = 1; 1420 break; 1421 1422 case ALGORITHM_LEFT_ASYMMETRIC_6: 1423 /* RAID5 left_asymmetric, with Q on last device */ 1424 pd_idx = data_disks - stripe % (raid_disks-1); 1425 if (*dd_idx >= pd_idx) 1426 (*dd_idx)++; 1427 qd_idx = raid_disks - 1; 1428 break; 1429 1430 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1431 pd_idx = stripe % (raid_disks-1); 1432 if (*dd_idx >= pd_idx) 1433 (*dd_idx)++; 1434 qd_idx = raid_disks - 1; 1435 break; 1436 1437 case ALGORITHM_LEFT_SYMMETRIC_6: 1438 pd_idx = data_disks - stripe % (raid_disks-1); 1439 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1440 qd_idx = raid_disks - 1; 1441 break; 1442 1443 case ALGORITHM_RIGHT_SYMMETRIC_6: 1444 pd_idx = stripe % (raid_disks-1); 1445 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1446 qd_idx = raid_disks - 1; 1447 break; 1448 1449 case ALGORITHM_PARITY_0_6: 1450 pd_idx = 0; 1451 (*dd_idx)++; 1452 qd_idx = raid_disks - 1; 1453 break; 1454 1455 1456 default: 1457 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1458 algorithm); 1459 BUG(); 1460 } 1461 break; 1462 } 1463 1464 if (sh) { 1465 sh->pd_idx = pd_idx; 1466 sh->qd_idx = qd_idx; 1467 sh->ddf_layout = ddf_layout; 1468 } 1469 /* 1470 * Finally, compute the new sector number 1471 */ 1472 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1473 return new_sector; 1474 } 1475 1476 1477 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 1478 { 1479 raid5_conf_t *conf = sh->raid_conf; 1480 int raid_disks = sh->disks; 1481 int data_disks = raid_disks - conf->max_degraded; 1482 sector_t new_sector = sh->sector, check; 1483 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 1484 : conf->chunk_sectors; 1485 int algorithm = previous ? conf->prev_algo 1486 : conf->algorithm; 1487 sector_t stripe; 1488 int chunk_offset; 1489 int chunk_number, dummy1, dd_idx = i; 1490 sector_t r_sector; 1491 struct stripe_head sh2; 1492 1493 1494 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1495 stripe = new_sector; 1496 BUG_ON(new_sector != stripe); 1497 1498 if (i == sh->pd_idx) 1499 return 0; 1500 switch(conf->level) { 1501 case 4: break; 1502 case 5: 1503 switch (algorithm) { 1504 case ALGORITHM_LEFT_ASYMMETRIC: 1505 case ALGORITHM_RIGHT_ASYMMETRIC: 1506 if (i > sh->pd_idx) 1507 i--; 1508 break; 1509 case ALGORITHM_LEFT_SYMMETRIC: 1510 case ALGORITHM_RIGHT_SYMMETRIC: 1511 if (i < sh->pd_idx) 1512 i += raid_disks; 1513 i -= (sh->pd_idx + 1); 1514 break; 1515 case ALGORITHM_PARITY_0: 1516 i -= 1; 1517 break; 1518 case ALGORITHM_PARITY_N: 1519 break; 1520 default: 1521 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1522 algorithm); 1523 BUG(); 1524 } 1525 break; 1526 case 6: 1527 if (i == sh->qd_idx) 1528 return 0; /* It is the Q disk */ 1529 switch (algorithm) { 1530 case ALGORITHM_LEFT_ASYMMETRIC: 1531 case ALGORITHM_RIGHT_ASYMMETRIC: 1532 case ALGORITHM_ROTATING_ZERO_RESTART: 1533 case ALGORITHM_ROTATING_N_RESTART: 1534 if (sh->pd_idx == raid_disks-1) 1535 i--; /* Q D D D P */ 1536 else if (i > sh->pd_idx) 1537 i -= 2; /* D D P Q D */ 1538 break; 1539 case ALGORITHM_LEFT_SYMMETRIC: 1540 case ALGORITHM_RIGHT_SYMMETRIC: 1541 if (sh->pd_idx == raid_disks-1) 1542 i--; /* Q D D D P */ 1543 else { 1544 /* D D P Q D */ 1545 if (i < sh->pd_idx) 1546 i += raid_disks; 1547 i -= (sh->pd_idx + 2); 1548 } 1549 break; 1550 case ALGORITHM_PARITY_0: 1551 i -= 2; 1552 break; 1553 case ALGORITHM_PARITY_N: 1554 break; 1555 case ALGORITHM_ROTATING_N_CONTINUE: 1556 if (sh->pd_idx == 0) 1557 i--; /* P D D D Q */ 1558 else if (i > sh->pd_idx) 1559 i -= 2; /* D D Q P D */ 1560 break; 1561 case ALGORITHM_LEFT_ASYMMETRIC_6: 1562 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1563 if (i > sh->pd_idx) 1564 i--; 1565 break; 1566 case ALGORITHM_LEFT_SYMMETRIC_6: 1567 case ALGORITHM_RIGHT_SYMMETRIC_6: 1568 if (i < sh->pd_idx) 1569 i += data_disks + 1; 1570 i -= (sh->pd_idx + 1); 1571 break; 1572 case ALGORITHM_PARITY_0_6: 1573 i -= 1; 1574 break; 1575 default: 1576 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1577 algorithm); 1578 BUG(); 1579 } 1580 break; 1581 } 1582 1583 chunk_number = stripe * data_disks + i; 1584 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1585 1586 check = raid5_compute_sector(conf, r_sector, 1587 previous, &dummy1, &sh2); 1588 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 1589 || sh2.qd_idx != sh->qd_idx) { 1590 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1591 return 0; 1592 } 1593 return r_sector; 1594 } 1595 1596 1597 1598 /* 1599 * Copy data between a page in the stripe cache, and one or more bion 1600 * The page could align with the middle of the bio, or there could be 1601 * several bion, each with several bio_vecs, which cover part of the page 1602 * Multiple bion are linked together on bi_next. There may be extras 1603 * at the end of this list. We ignore them. 1604 */ 1605 static void copy_data(int frombio, struct bio *bio, 1606 struct page *page, 1607 sector_t sector) 1608 { 1609 char *pa = page_address(page); 1610 struct bio_vec *bvl; 1611 int i; 1612 int page_offset; 1613 1614 if (bio->bi_sector >= sector) 1615 page_offset = (signed)(bio->bi_sector - sector) * 512; 1616 else 1617 page_offset = (signed)(sector - bio->bi_sector) * -512; 1618 bio_for_each_segment(bvl, bio, i) { 1619 int len = bio_iovec_idx(bio,i)->bv_len; 1620 int clen; 1621 int b_offset = 0; 1622 1623 if (page_offset < 0) { 1624 b_offset = -page_offset; 1625 page_offset += b_offset; 1626 len -= b_offset; 1627 } 1628 1629 if (len > 0 && page_offset + len > STRIPE_SIZE) 1630 clen = STRIPE_SIZE - page_offset; 1631 else clen = len; 1632 1633 if (clen > 0) { 1634 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 1635 if (frombio) 1636 memcpy(pa+page_offset, ba+b_offset, clen); 1637 else 1638 memcpy(ba+b_offset, pa+page_offset, clen); 1639 __bio_kunmap_atomic(ba, KM_USER0); 1640 } 1641 if (clen < len) /* hit end of page */ 1642 break; 1643 page_offset += len; 1644 } 1645 } 1646 1647 #define check_xor() do { \ 1648 if (count == MAX_XOR_BLOCKS) { \ 1649 xor_blocks(count, STRIPE_SIZE, dest, ptr);\ 1650 count = 0; \ 1651 } \ 1652 } while(0) 1653 1654 static void compute_parity6(struct stripe_head *sh, int method) 1655 { 1656 raid5_conf_t *conf = sh->raid_conf; 1657 int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count; 1658 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1659 struct bio *chosen; 1660 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1661 void *ptrs[syndrome_disks+2]; 1662 1663 pd_idx = sh->pd_idx; 1664 qd_idx = sh->qd_idx; 1665 d0_idx = raid6_d0(sh); 1666 1667 pr_debug("compute_parity, stripe %llu, method %d\n", 1668 (unsigned long long)sh->sector, method); 1669 1670 switch(method) { 1671 case READ_MODIFY_WRITE: 1672 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1673 case RECONSTRUCT_WRITE: 1674 for (i= disks; i-- ;) 1675 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1676 chosen = sh->dev[i].towrite; 1677 sh->dev[i].towrite = NULL; 1678 1679 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1680 wake_up(&conf->wait_for_overlap); 1681 1682 BUG_ON(sh->dev[i].written); 1683 sh->dev[i].written = chosen; 1684 } 1685 break; 1686 case CHECK_PARITY: 1687 BUG(); /* Not implemented yet */ 1688 } 1689 1690 for (i = disks; i--;) 1691 if (sh->dev[i].written) { 1692 sector_t sector = sh->dev[i].sector; 1693 struct bio *wbi = sh->dev[i].written; 1694 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1695 copy_data(1, wbi, sh->dev[i].page, sector); 1696 wbi = r5_next_bio(wbi, sector); 1697 } 1698 1699 set_bit(R5_LOCKED, &sh->dev[i].flags); 1700 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1701 } 1702 1703 /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/ 1704 1705 for (i = 0; i < disks; i++) 1706 ptrs[i] = (void *)raid6_empty_zero_page; 1707 1708 count = 0; 1709 i = d0_idx; 1710 do { 1711 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1712 1713 ptrs[slot] = page_address(sh->dev[i].page); 1714 if (slot < syndrome_disks && 1715 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) { 1716 printk(KERN_ERR "block %d/%d not uptodate " 1717 "on parity calc\n", i, count); 1718 BUG(); 1719 } 1720 1721 i = raid6_next_disk(i, disks); 1722 } while (i != d0_idx); 1723 BUG_ON(count != syndrome_disks); 1724 1725 raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs); 1726 1727 switch(method) { 1728 case RECONSTRUCT_WRITE: 1729 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1730 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1731 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1732 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1733 break; 1734 case UPDATE_PARITY: 1735 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1736 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1737 break; 1738 } 1739 } 1740 1741 1742 /* Compute one missing block */ 1743 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1744 { 1745 int i, count, disks = sh->disks; 1746 void *ptr[MAX_XOR_BLOCKS], *dest, *p; 1747 int qd_idx = sh->qd_idx; 1748 1749 pr_debug("compute_block_1, stripe %llu, idx %d\n", 1750 (unsigned long long)sh->sector, dd_idx); 1751 1752 if ( dd_idx == qd_idx ) { 1753 /* We're actually computing the Q drive */ 1754 compute_parity6(sh, UPDATE_PARITY); 1755 } else { 1756 dest = page_address(sh->dev[dd_idx].page); 1757 if (!nozero) memset(dest, 0, STRIPE_SIZE); 1758 count = 0; 1759 for (i = disks ; i--; ) { 1760 if (i == dd_idx || i == qd_idx) 1761 continue; 1762 p = page_address(sh->dev[i].page); 1763 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1764 ptr[count++] = p; 1765 else 1766 printk("compute_block() %d, stripe %llu, %d" 1767 " not present\n", dd_idx, 1768 (unsigned long long)sh->sector, i); 1769 1770 check_xor(); 1771 } 1772 if (count) 1773 xor_blocks(count, STRIPE_SIZE, dest, ptr); 1774 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1775 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1776 } 1777 } 1778 1779 /* Compute two missing blocks */ 1780 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1781 { 1782 int i, count, disks = sh->disks; 1783 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1784 int d0_idx = raid6_d0(sh); 1785 int faila = -1, failb = -1; 1786 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1787 void *ptrs[syndrome_disks+2]; 1788 1789 for (i = 0; i < disks ; i++) 1790 ptrs[i] = (void *)raid6_empty_zero_page; 1791 count = 0; 1792 i = d0_idx; 1793 do { 1794 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1795 1796 ptrs[slot] = page_address(sh->dev[i].page); 1797 1798 if (i == dd_idx1) 1799 faila = slot; 1800 if (i == dd_idx2) 1801 failb = slot; 1802 i = raid6_next_disk(i, disks); 1803 } while (i != d0_idx); 1804 BUG_ON(count != syndrome_disks); 1805 1806 BUG_ON(faila == failb); 1807 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1808 1809 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1810 (unsigned long long)sh->sector, dd_idx1, dd_idx2, 1811 faila, failb); 1812 1813 if (failb == syndrome_disks+1) { 1814 /* Q disk is one of the missing disks */ 1815 if (faila == syndrome_disks) { 1816 /* Missing P+Q, just recompute */ 1817 compute_parity6(sh, UPDATE_PARITY); 1818 return; 1819 } else { 1820 /* We're missing D+Q; recompute D from P */ 1821 compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? 1822 dd_idx2 : dd_idx1), 1823 0); 1824 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1825 return; 1826 } 1827 } 1828 1829 /* We're missing D+P or D+D; */ 1830 if (failb == syndrome_disks) { 1831 /* We're missing D+P. */ 1832 raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs); 1833 } else { 1834 /* We're missing D+D. */ 1835 raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb, 1836 ptrs); 1837 } 1838 1839 /* Both the above update both missing blocks */ 1840 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1841 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1842 } 1843 1844 static void 1845 schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, 1846 int rcw, int expand) 1847 { 1848 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1849 1850 if (rcw) { 1851 /* if we are not expanding this is a proper write request, and 1852 * there will be bios with new data to be drained into the 1853 * stripe cache 1854 */ 1855 if (!expand) { 1856 sh->reconstruct_state = reconstruct_state_drain_run; 1857 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1858 } else 1859 sh->reconstruct_state = reconstruct_state_run; 1860 1861 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1862 1863 for (i = disks; i--; ) { 1864 struct r5dev *dev = &sh->dev[i]; 1865 1866 if (dev->towrite) { 1867 set_bit(R5_LOCKED, &dev->flags); 1868 set_bit(R5_Wantdrain, &dev->flags); 1869 if (!expand) 1870 clear_bit(R5_UPTODATE, &dev->flags); 1871 s->locked++; 1872 } 1873 } 1874 if (s->locked + 1 == disks) 1875 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 1876 atomic_inc(&sh->raid_conf->pending_full_writes); 1877 } else { 1878 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1879 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1880 1881 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 1882 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 1883 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1884 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1885 1886 for (i = disks; i--; ) { 1887 struct r5dev *dev = &sh->dev[i]; 1888 if (i == pd_idx) 1889 continue; 1890 1891 if (dev->towrite && 1892 (test_bit(R5_UPTODATE, &dev->flags) || 1893 test_bit(R5_Wantcompute, &dev->flags))) { 1894 set_bit(R5_Wantdrain, &dev->flags); 1895 set_bit(R5_LOCKED, &dev->flags); 1896 clear_bit(R5_UPTODATE, &dev->flags); 1897 s->locked++; 1898 } 1899 } 1900 } 1901 1902 /* keep the parity disk locked while asynchronous operations 1903 * are in flight 1904 */ 1905 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1906 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1907 s->locked++; 1908 1909 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 1910 __func__, (unsigned long long)sh->sector, 1911 s->locked, s->ops_request); 1912 } 1913 1914 /* 1915 * Each stripe/dev can have one or more bion attached. 1916 * toread/towrite point to the first in a chain. 1917 * The bi_next chain must be in order. 1918 */ 1919 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1920 { 1921 struct bio **bip; 1922 raid5_conf_t *conf = sh->raid_conf; 1923 int firstwrite=0; 1924 1925 pr_debug("adding bh b#%llu to stripe s#%llu\n", 1926 (unsigned long long)bi->bi_sector, 1927 (unsigned long long)sh->sector); 1928 1929 1930 spin_lock(&sh->lock); 1931 spin_lock_irq(&conf->device_lock); 1932 if (forwrite) { 1933 bip = &sh->dev[dd_idx].towrite; 1934 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1935 firstwrite = 1; 1936 } else 1937 bip = &sh->dev[dd_idx].toread; 1938 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1939 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1940 goto overlap; 1941 bip = & (*bip)->bi_next; 1942 } 1943 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1944 goto overlap; 1945 1946 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1947 if (*bip) 1948 bi->bi_next = *bip; 1949 *bip = bi; 1950 bi->bi_phys_segments++; 1951 spin_unlock_irq(&conf->device_lock); 1952 spin_unlock(&sh->lock); 1953 1954 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 1955 (unsigned long long)bi->bi_sector, 1956 (unsigned long long)sh->sector, dd_idx); 1957 1958 if (conf->mddev->bitmap && firstwrite) { 1959 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1960 STRIPE_SECTORS, 0); 1961 sh->bm_seq = conf->seq_flush+1; 1962 set_bit(STRIPE_BIT_DELAY, &sh->state); 1963 } 1964 1965 if (forwrite) { 1966 /* check if page is covered */ 1967 sector_t sector = sh->dev[dd_idx].sector; 1968 for (bi=sh->dev[dd_idx].towrite; 1969 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1970 bi && bi->bi_sector <= sector; 1971 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1972 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1973 sector = bi->bi_sector + (bi->bi_size>>9); 1974 } 1975 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1976 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1977 } 1978 return 1; 1979 1980 overlap: 1981 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1982 spin_unlock_irq(&conf->device_lock); 1983 spin_unlock(&sh->lock); 1984 return 0; 1985 } 1986 1987 static void end_reshape(raid5_conf_t *conf); 1988 1989 static int page_is_zero(struct page *p) 1990 { 1991 char *a = page_address(p); 1992 return ((*(u32*)a) == 0 && 1993 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1994 } 1995 1996 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 1997 struct stripe_head *sh) 1998 { 1999 int sectors_per_chunk = 2000 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 2001 int dd_idx; 2002 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2003 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2004 2005 raid5_compute_sector(conf, 2006 stripe * (disks - conf->max_degraded) 2007 *sectors_per_chunk + chunk_offset, 2008 previous, 2009 &dd_idx, sh); 2010 } 2011 2012 static void 2013 handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, 2014 struct stripe_head_state *s, int disks, 2015 struct bio **return_bi) 2016 { 2017 int i; 2018 for (i = disks; i--; ) { 2019 struct bio *bi; 2020 int bitmap_end = 0; 2021 2022 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2023 mdk_rdev_t *rdev; 2024 rcu_read_lock(); 2025 rdev = rcu_dereference(conf->disks[i].rdev); 2026 if (rdev && test_bit(In_sync, &rdev->flags)) 2027 /* multiple read failures in one stripe */ 2028 md_error(conf->mddev, rdev); 2029 rcu_read_unlock(); 2030 } 2031 spin_lock_irq(&conf->device_lock); 2032 /* fail all writes first */ 2033 bi = sh->dev[i].towrite; 2034 sh->dev[i].towrite = NULL; 2035 if (bi) { 2036 s->to_write--; 2037 bitmap_end = 1; 2038 } 2039 2040 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2041 wake_up(&conf->wait_for_overlap); 2042 2043 while (bi && bi->bi_sector < 2044 sh->dev[i].sector + STRIPE_SECTORS) { 2045 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2046 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2047 if (!raid5_dec_bi_phys_segments(bi)) { 2048 md_write_end(conf->mddev); 2049 bi->bi_next = *return_bi; 2050 *return_bi = bi; 2051 } 2052 bi = nextbi; 2053 } 2054 /* and fail all 'written' */ 2055 bi = sh->dev[i].written; 2056 sh->dev[i].written = NULL; 2057 if (bi) bitmap_end = 1; 2058 while (bi && bi->bi_sector < 2059 sh->dev[i].sector + STRIPE_SECTORS) { 2060 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2061 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2062 if (!raid5_dec_bi_phys_segments(bi)) { 2063 md_write_end(conf->mddev); 2064 bi->bi_next = *return_bi; 2065 *return_bi = bi; 2066 } 2067 bi = bi2; 2068 } 2069 2070 /* fail any reads if this device is non-operational and 2071 * the data has not reached the cache yet. 2072 */ 2073 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2074 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2075 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2076 bi = sh->dev[i].toread; 2077 sh->dev[i].toread = NULL; 2078 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2079 wake_up(&conf->wait_for_overlap); 2080 if (bi) s->to_read--; 2081 while (bi && bi->bi_sector < 2082 sh->dev[i].sector + STRIPE_SECTORS) { 2083 struct bio *nextbi = 2084 r5_next_bio(bi, sh->dev[i].sector); 2085 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2086 if (!raid5_dec_bi_phys_segments(bi)) { 2087 bi->bi_next = *return_bi; 2088 *return_bi = bi; 2089 } 2090 bi = nextbi; 2091 } 2092 } 2093 spin_unlock_irq(&conf->device_lock); 2094 if (bitmap_end) 2095 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2096 STRIPE_SECTORS, 0, 0); 2097 } 2098 2099 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2100 if (atomic_dec_and_test(&conf->pending_full_writes)) 2101 md_wakeup_thread(conf->mddev->thread); 2102 } 2103 2104 /* fetch_block5 - checks the given member device to see if its data needs 2105 * to be read or computed to satisfy a request. 2106 * 2107 * Returns 1 when no more member devices need to be checked, otherwise returns 2108 * 0 to tell the loop in handle_stripe_fill5 to continue 2109 */ 2110 static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, 2111 int disk_idx, int disks) 2112 { 2113 struct r5dev *dev = &sh->dev[disk_idx]; 2114 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 2115 2116 /* is the data in this block needed, and can we get it? */ 2117 if (!test_bit(R5_LOCKED, &dev->flags) && 2118 !test_bit(R5_UPTODATE, &dev->flags) && 2119 (dev->toread || 2120 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2121 s->syncing || s->expanding || 2122 (s->failed && 2123 (failed_dev->toread || 2124 (failed_dev->towrite && 2125 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { 2126 /* We would like to get this block, possibly by computing it, 2127 * otherwise read it if the backing disk is insync 2128 */ 2129 if ((s->uptodate == disks - 1) && 2130 (s->failed && disk_idx == s->failed_num)) { 2131 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2132 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2133 set_bit(R5_Wantcompute, &dev->flags); 2134 sh->ops.target = disk_idx; 2135 s->req_compute = 1; 2136 /* Careful: from this point on 'uptodate' is in the eye 2137 * of raid5_run_ops which services 'compute' operations 2138 * before writes. R5_Wantcompute flags a block that will 2139 * be R5_UPTODATE by the time it is needed for a 2140 * subsequent operation. 2141 */ 2142 s->uptodate++; 2143 return 1; /* uptodate + compute == disks */ 2144 } else if (test_bit(R5_Insync, &dev->flags)) { 2145 set_bit(R5_LOCKED, &dev->flags); 2146 set_bit(R5_Wantread, &dev->flags); 2147 s->locked++; 2148 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2149 s->syncing); 2150 } 2151 } 2152 2153 return 0; 2154 } 2155 2156 /** 2157 * handle_stripe_fill5 - read or compute data to satisfy pending requests. 2158 */ 2159 static void handle_stripe_fill5(struct stripe_head *sh, 2160 struct stripe_head_state *s, int disks) 2161 { 2162 int i; 2163 2164 /* look for blocks to read/compute, skip this if a compute 2165 * is already in flight, or if the stripe contents are in the 2166 * midst of changing due to a write 2167 */ 2168 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2169 !sh->reconstruct_state) 2170 for (i = disks; i--; ) 2171 if (fetch_block5(sh, s, i, disks)) 2172 break; 2173 set_bit(STRIPE_HANDLE, &sh->state); 2174 } 2175 2176 static void handle_stripe_fill6(struct stripe_head *sh, 2177 struct stripe_head_state *s, struct r6_state *r6s, 2178 int disks) 2179 { 2180 int i; 2181 for (i = disks; i--; ) { 2182 struct r5dev *dev = &sh->dev[i]; 2183 if (!test_bit(R5_LOCKED, &dev->flags) && 2184 !test_bit(R5_UPTODATE, &dev->flags) && 2185 (dev->toread || (dev->towrite && 2186 !test_bit(R5_OVERWRITE, &dev->flags)) || 2187 s->syncing || s->expanding || 2188 (s->failed >= 1 && 2189 (sh->dev[r6s->failed_num[0]].toread || 2190 s->to_write)) || 2191 (s->failed >= 2 && 2192 (sh->dev[r6s->failed_num[1]].toread || 2193 s->to_write)))) { 2194 /* we would like to get this block, possibly 2195 * by computing it, but we might not be able to 2196 */ 2197 if ((s->uptodate == disks - 1) && 2198 (s->failed && (i == r6s->failed_num[0] || 2199 i == r6s->failed_num[1]))) { 2200 pr_debug("Computing stripe %llu block %d\n", 2201 (unsigned long long)sh->sector, i); 2202 compute_block_1(sh, i, 0); 2203 s->uptodate++; 2204 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { 2205 /* Computing 2-failure is *very* expensive; only 2206 * do it if failed >= 2 2207 */ 2208 int other; 2209 for (other = disks; other--; ) { 2210 if (other == i) 2211 continue; 2212 if (!test_bit(R5_UPTODATE, 2213 &sh->dev[other].flags)) 2214 break; 2215 } 2216 BUG_ON(other < 0); 2217 pr_debug("Computing stripe %llu blocks %d,%d\n", 2218 (unsigned long long)sh->sector, 2219 i, other); 2220 compute_block_2(sh, i, other); 2221 s->uptodate += 2; 2222 } else if (test_bit(R5_Insync, &dev->flags)) { 2223 set_bit(R5_LOCKED, &dev->flags); 2224 set_bit(R5_Wantread, &dev->flags); 2225 s->locked++; 2226 pr_debug("Reading block %d (sync=%d)\n", 2227 i, s->syncing); 2228 } 2229 } 2230 } 2231 set_bit(STRIPE_HANDLE, &sh->state); 2232 } 2233 2234 2235 /* handle_stripe_clean_event 2236 * any written block on an uptodate or failed drive can be returned. 2237 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2238 * never LOCKED, so we don't need to test 'failed' directly. 2239 */ 2240 static void handle_stripe_clean_event(raid5_conf_t *conf, 2241 struct stripe_head *sh, int disks, struct bio **return_bi) 2242 { 2243 int i; 2244 struct r5dev *dev; 2245 2246 for (i = disks; i--; ) 2247 if (sh->dev[i].written) { 2248 dev = &sh->dev[i]; 2249 if (!test_bit(R5_LOCKED, &dev->flags) && 2250 test_bit(R5_UPTODATE, &dev->flags)) { 2251 /* We can return any write requests */ 2252 struct bio *wbi, *wbi2; 2253 int bitmap_end = 0; 2254 pr_debug("Return write for disc %d\n", i); 2255 spin_lock_irq(&conf->device_lock); 2256 wbi = dev->written; 2257 dev->written = NULL; 2258 while (wbi && wbi->bi_sector < 2259 dev->sector + STRIPE_SECTORS) { 2260 wbi2 = r5_next_bio(wbi, dev->sector); 2261 if (!raid5_dec_bi_phys_segments(wbi)) { 2262 md_write_end(conf->mddev); 2263 wbi->bi_next = *return_bi; 2264 *return_bi = wbi; 2265 } 2266 wbi = wbi2; 2267 } 2268 if (dev->towrite == NULL) 2269 bitmap_end = 1; 2270 spin_unlock_irq(&conf->device_lock); 2271 if (bitmap_end) 2272 bitmap_endwrite(conf->mddev->bitmap, 2273 sh->sector, 2274 STRIPE_SECTORS, 2275 !test_bit(STRIPE_DEGRADED, &sh->state), 2276 0); 2277 } 2278 } 2279 2280 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2281 if (atomic_dec_and_test(&conf->pending_full_writes)) 2282 md_wakeup_thread(conf->mddev->thread); 2283 } 2284 2285 static void handle_stripe_dirtying5(raid5_conf_t *conf, 2286 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2287 { 2288 int rmw = 0, rcw = 0, i; 2289 for (i = disks; i--; ) { 2290 /* would I have to read this buffer for read_modify_write */ 2291 struct r5dev *dev = &sh->dev[i]; 2292 if ((dev->towrite || i == sh->pd_idx) && 2293 !test_bit(R5_LOCKED, &dev->flags) && 2294 !(test_bit(R5_UPTODATE, &dev->flags) || 2295 test_bit(R5_Wantcompute, &dev->flags))) { 2296 if (test_bit(R5_Insync, &dev->flags)) 2297 rmw++; 2298 else 2299 rmw += 2*disks; /* cannot read it */ 2300 } 2301 /* Would I have to read this buffer for reconstruct_write */ 2302 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2303 !test_bit(R5_LOCKED, &dev->flags) && 2304 !(test_bit(R5_UPTODATE, &dev->flags) || 2305 test_bit(R5_Wantcompute, &dev->flags))) { 2306 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2307 else 2308 rcw += 2*disks; 2309 } 2310 } 2311 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2312 (unsigned long long)sh->sector, rmw, rcw); 2313 set_bit(STRIPE_HANDLE, &sh->state); 2314 if (rmw < rcw && rmw > 0) 2315 /* prefer read-modify-write, but need to get some data */ 2316 for (i = disks; i--; ) { 2317 struct r5dev *dev = &sh->dev[i]; 2318 if ((dev->towrite || i == sh->pd_idx) && 2319 !test_bit(R5_LOCKED, &dev->flags) && 2320 !(test_bit(R5_UPTODATE, &dev->flags) || 2321 test_bit(R5_Wantcompute, &dev->flags)) && 2322 test_bit(R5_Insync, &dev->flags)) { 2323 if ( 2324 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2325 pr_debug("Read_old block " 2326 "%d for r-m-w\n", i); 2327 set_bit(R5_LOCKED, &dev->flags); 2328 set_bit(R5_Wantread, &dev->flags); 2329 s->locked++; 2330 } else { 2331 set_bit(STRIPE_DELAYED, &sh->state); 2332 set_bit(STRIPE_HANDLE, &sh->state); 2333 } 2334 } 2335 } 2336 if (rcw <= rmw && rcw > 0) 2337 /* want reconstruct write, but need to get some data */ 2338 for (i = disks; i--; ) { 2339 struct r5dev *dev = &sh->dev[i]; 2340 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2341 i != sh->pd_idx && 2342 !test_bit(R5_LOCKED, &dev->flags) && 2343 !(test_bit(R5_UPTODATE, &dev->flags) || 2344 test_bit(R5_Wantcompute, &dev->flags)) && 2345 test_bit(R5_Insync, &dev->flags)) { 2346 if ( 2347 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2348 pr_debug("Read_old block " 2349 "%d for Reconstruct\n", i); 2350 set_bit(R5_LOCKED, &dev->flags); 2351 set_bit(R5_Wantread, &dev->flags); 2352 s->locked++; 2353 } else { 2354 set_bit(STRIPE_DELAYED, &sh->state); 2355 set_bit(STRIPE_HANDLE, &sh->state); 2356 } 2357 } 2358 } 2359 /* now if nothing is locked, and if we have enough data, 2360 * we can start a write request 2361 */ 2362 /* since handle_stripe can be called at any time we need to handle the 2363 * case where a compute block operation has been submitted and then a 2364 * subsequent call wants to start a write request. raid5_run_ops only 2365 * handles the case where compute block and postxor are requested 2366 * simultaneously. If this is not the case then new writes need to be 2367 * held off until the compute completes. 2368 */ 2369 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2370 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2371 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2372 schedule_reconstruction5(sh, s, rcw == 0, 0); 2373 } 2374 2375 static void handle_stripe_dirtying6(raid5_conf_t *conf, 2376 struct stripe_head *sh, struct stripe_head_state *s, 2377 struct r6_state *r6s, int disks) 2378 { 2379 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; 2380 int qd_idx = sh->qd_idx; 2381 for (i = disks; i--; ) { 2382 struct r5dev *dev = &sh->dev[i]; 2383 /* Would I have to read this buffer for reconstruct_write */ 2384 if (!test_bit(R5_OVERWRITE, &dev->flags) 2385 && i != pd_idx && i != qd_idx 2386 && (!test_bit(R5_LOCKED, &dev->flags) 2387 ) && 2388 !test_bit(R5_UPTODATE, &dev->flags)) { 2389 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2390 else { 2391 pr_debug("raid6: must_compute: " 2392 "disk %d flags=%#lx\n", i, dev->flags); 2393 must_compute++; 2394 } 2395 } 2396 } 2397 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", 2398 (unsigned long long)sh->sector, rcw, must_compute); 2399 set_bit(STRIPE_HANDLE, &sh->state); 2400 2401 if (rcw > 0) 2402 /* want reconstruct write, but need to get some data */ 2403 for (i = disks; i--; ) { 2404 struct r5dev *dev = &sh->dev[i]; 2405 if (!test_bit(R5_OVERWRITE, &dev->flags) 2406 && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) 2407 && !test_bit(R5_LOCKED, &dev->flags) && 2408 !test_bit(R5_UPTODATE, &dev->flags) && 2409 test_bit(R5_Insync, &dev->flags)) { 2410 if ( 2411 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2412 pr_debug("Read_old stripe %llu " 2413 "block %d for Reconstruct\n", 2414 (unsigned long long)sh->sector, i); 2415 set_bit(R5_LOCKED, &dev->flags); 2416 set_bit(R5_Wantread, &dev->flags); 2417 s->locked++; 2418 } else { 2419 pr_debug("Request delayed stripe %llu " 2420 "block %d for Reconstruct\n", 2421 (unsigned long long)sh->sector, i); 2422 set_bit(STRIPE_DELAYED, &sh->state); 2423 set_bit(STRIPE_HANDLE, &sh->state); 2424 } 2425 } 2426 } 2427 /* now if nothing is locked, and if we have enough data, we can start a 2428 * write request 2429 */ 2430 if (s->locked == 0 && rcw == 0 && 2431 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2432 if (must_compute > 0) { 2433 /* We have failed blocks and need to compute them */ 2434 switch (s->failed) { 2435 case 0: 2436 BUG(); 2437 case 1: 2438 compute_block_1(sh, r6s->failed_num[0], 0); 2439 break; 2440 case 2: 2441 compute_block_2(sh, r6s->failed_num[0], 2442 r6s->failed_num[1]); 2443 break; 2444 default: /* This request should have been failed? */ 2445 BUG(); 2446 } 2447 } 2448 2449 pr_debug("Computing parity for stripe %llu\n", 2450 (unsigned long long)sh->sector); 2451 compute_parity6(sh, RECONSTRUCT_WRITE); 2452 /* now every locked buffer is ready to be written */ 2453 for (i = disks; i--; ) 2454 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2455 pr_debug("Writing stripe %llu block %d\n", 2456 (unsigned long long)sh->sector, i); 2457 s->locked++; 2458 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2459 } 2460 if (s->locked == disks) 2461 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2462 atomic_inc(&conf->pending_full_writes); 2463 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2464 set_bit(STRIPE_INSYNC, &sh->state); 2465 2466 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2467 atomic_dec(&conf->preread_active_stripes); 2468 if (atomic_read(&conf->preread_active_stripes) < 2469 IO_THRESHOLD) 2470 md_wakeup_thread(conf->mddev->thread); 2471 } 2472 } 2473 } 2474 2475 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2476 struct stripe_head_state *s, int disks) 2477 { 2478 struct r5dev *dev = NULL; 2479 2480 set_bit(STRIPE_HANDLE, &sh->state); 2481 2482 switch (sh->check_state) { 2483 case check_state_idle: 2484 /* start a new check operation if there are no failures */ 2485 if (s->failed == 0) { 2486 BUG_ON(s->uptodate != disks); 2487 sh->check_state = check_state_run; 2488 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2489 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2490 s->uptodate--; 2491 break; 2492 } 2493 dev = &sh->dev[s->failed_num]; 2494 /* fall through */ 2495 case check_state_compute_result: 2496 sh->check_state = check_state_idle; 2497 if (!dev) 2498 dev = &sh->dev[sh->pd_idx]; 2499 2500 /* check that a write has not made the stripe insync */ 2501 if (test_bit(STRIPE_INSYNC, &sh->state)) 2502 break; 2503 2504 /* either failed parity check, or recovery is happening */ 2505 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2506 BUG_ON(s->uptodate != disks); 2507 2508 set_bit(R5_LOCKED, &dev->flags); 2509 s->locked++; 2510 set_bit(R5_Wantwrite, &dev->flags); 2511 2512 clear_bit(STRIPE_DEGRADED, &sh->state); 2513 set_bit(STRIPE_INSYNC, &sh->state); 2514 break; 2515 case check_state_run: 2516 break; /* we will be called again upon completion */ 2517 case check_state_check_result: 2518 sh->check_state = check_state_idle; 2519 2520 /* if a failure occurred during the check operation, leave 2521 * STRIPE_INSYNC not set and let the stripe be handled again 2522 */ 2523 if (s->failed) 2524 break; 2525 2526 /* handle a successful check operation, if parity is correct 2527 * we are done. Otherwise update the mismatch count and repair 2528 * parity if !MD_RECOVERY_CHECK 2529 */ 2530 if (sh->ops.zero_sum_result == 0) 2531 /* parity is correct (on disc, 2532 * not in buffer any more) 2533 */ 2534 set_bit(STRIPE_INSYNC, &sh->state); 2535 else { 2536 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2537 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2538 /* don't try to repair!! */ 2539 set_bit(STRIPE_INSYNC, &sh->state); 2540 else { 2541 sh->check_state = check_state_compute_run; 2542 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2543 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2544 set_bit(R5_Wantcompute, 2545 &sh->dev[sh->pd_idx].flags); 2546 sh->ops.target = sh->pd_idx; 2547 s->uptodate++; 2548 } 2549 } 2550 break; 2551 case check_state_compute_run: 2552 break; 2553 default: 2554 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2555 __func__, sh->check_state, 2556 (unsigned long long) sh->sector); 2557 BUG(); 2558 } 2559 } 2560 2561 2562 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2563 struct stripe_head_state *s, 2564 struct r6_state *r6s, struct page *tmp_page, 2565 int disks) 2566 { 2567 int update_p = 0, update_q = 0; 2568 struct r5dev *dev; 2569 int pd_idx = sh->pd_idx; 2570 int qd_idx = sh->qd_idx; 2571 2572 set_bit(STRIPE_HANDLE, &sh->state); 2573 2574 BUG_ON(s->failed > 2); 2575 BUG_ON(s->uptodate < disks); 2576 /* Want to check and possibly repair P and Q. 2577 * However there could be one 'failed' device, in which 2578 * case we can only check one of them, possibly using the 2579 * other to generate missing data 2580 */ 2581 2582 /* If !tmp_page, we cannot do the calculations, 2583 * but as we have set STRIPE_HANDLE, we will soon be called 2584 * by stripe_handle with a tmp_page - just wait until then. 2585 */ 2586 if (tmp_page) { 2587 if (s->failed == r6s->q_failed) { 2588 /* The only possible failed device holds 'Q', so it 2589 * makes sense to check P (If anything else were failed, 2590 * we would have used P to recreate it). 2591 */ 2592 compute_block_1(sh, pd_idx, 1); 2593 if (!page_is_zero(sh->dev[pd_idx].page)) { 2594 compute_block_1(sh, pd_idx, 0); 2595 update_p = 1; 2596 } 2597 } 2598 if (!r6s->q_failed && s->failed < 2) { 2599 /* q is not failed, and we didn't use it to generate 2600 * anything, so it makes sense to check it 2601 */ 2602 memcpy(page_address(tmp_page), 2603 page_address(sh->dev[qd_idx].page), 2604 STRIPE_SIZE); 2605 compute_parity6(sh, UPDATE_PARITY); 2606 if (memcmp(page_address(tmp_page), 2607 page_address(sh->dev[qd_idx].page), 2608 STRIPE_SIZE) != 0) { 2609 clear_bit(STRIPE_INSYNC, &sh->state); 2610 update_q = 1; 2611 } 2612 } 2613 if (update_p || update_q) { 2614 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2615 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2616 /* don't try to repair!! */ 2617 update_p = update_q = 0; 2618 } 2619 2620 /* now write out any block on a failed drive, 2621 * or P or Q if they need it 2622 */ 2623 2624 if (s->failed == 2) { 2625 dev = &sh->dev[r6s->failed_num[1]]; 2626 s->locked++; 2627 set_bit(R5_LOCKED, &dev->flags); 2628 set_bit(R5_Wantwrite, &dev->flags); 2629 } 2630 if (s->failed >= 1) { 2631 dev = &sh->dev[r6s->failed_num[0]]; 2632 s->locked++; 2633 set_bit(R5_LOCKED, &dev->flags); 2634 set_bit(R5_Wantwrite, &dev->flags); 2635 } 2636 2637 if (update_p) { 2638 dev = &sh->dev[pd_idx]; 2639 s->locked++; 2640 set_bit(R5_LOCKED, &dev->flags); 2641 set_bit(R5_Wantwrite, &dev->flags); 2642 } 2643 if (update_q) { 2644 dev = &sh->dev[qd_idx]; 2645 s->locked++; 2646 set_bit(R5_LOCKED, &dev->flags); 2647 set_bit(R5_Wantwrite, &dev->flags); 2648 } 2649 clear_bit(STRIPE_DEGRADED, &sh->state); 2650 2651 set_bit(STRIPE_INSYNC, &sh->state); 2652 } 2653 } 2654 2655 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2656 struct r6_state *r6s) 2657 { 2658 int i; 2659 2660 /* We have read all the blocks in this stripe and now we need to 2661 * copy some of them into a target stripe for expand. 2662 */ 2663 struct dma_async_tx_descriptor *tx = NULL; 2664 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2665 for (i = 0; i < sh->disks; i++) 2666 if (i != sh->pd_idx && i != sh->qd_idx) { 2667 int dd_idx, j; 2668 struct stripe_head *sh2; 2669 2670 sector_t bn = compute_blocknr(sh, i, 1); 2671 sector_t s = raid5_compute_sector(conf, bn, 0, 2672 &dd_idx, NULL); 2673 sh2 = get_active_stripe(conf, s, 0, 1, 1); 2674 if (sh2 == NULL) 2675 /* so far only the early blocks of this stripe 2676 * have been requested. When later blocks 2677 * get requested, we will try again 2678 */ 2679 continue; 2680 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2681 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2682 /* must have already done this block */ 2683 release_stripe(sh2); 2684 continue; 2685 } 2686 2687 /* place all the copies on one channel */ 2688 tx = async_memcpy(sh2->dev[dd_idx].page, 2689 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2690 ASYNC_TX_DEP_ACK, tx, NULL, NULL); 2691 2692 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2693 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2694 for (j = 0; j < conf->raid_disks; j++) 2695 if (j != sh2->pd_idx && 2696 (!r6s || j != sh2->qd_idx) && 2697 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2698 break; 2699 if (j == conf->raid_disks) { 2700 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2701 set_bit(STRIPE_HANDLE, &sh2->state); 2702 } 2703 release_stripe(sh2); 2704 2705 } 2706 /* done submitting copies, wait for them to complete */ 2707 if (tx) { 2708 async_tx_ack(tx); 2709 dma_wait_for_async_tx(tx); 2710 } 2711 } 2712 2713 2714 /* 2715 * handle_stripe - do things to a stripe. 2716 * 2717 * We lock the stripe and then examine the state of various bits 2718 * to see what needs to be done. 2719 * Possible results: 2720 * return some read request which now have data 2721 * return some write requests which are safely on disc 2722 * schedule a read on some buffers 2723 * schedule a write of some buffers 2724 * return confirmation of parity correctness 2725 * 2726 * buffers are taken off read_list or write_list, and bh_cache buffers 2727 * get BH_Lock set before the stripe lock is released. 2728 * 2729 */ 2730 2731 static bool handle_stripe5(struct stripe_head *sh) 2732 { 2733 raid5_conf_t *conf = sh->raid_conf; 2734 int disks = sh->disks, i; 2735 struct bio *return_bi = NULL; 2736 struct stripe_head_state s; 2737 struct r5dev *dev; 2738 mdk_rdev_t *blocked_rdev = NULL; 2739 int prexor; 2740 2741 memset(&s, 0, sizeof(s)); 2742 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " 2743 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, 2744 atomic_read(&sh->count), sh->pd_idx, sh->check_state, 2745 sh->reconstruct_state); 2746 2747 spin_lock(&sh->lock); 2748 clear_bit(STRIPE_HANDLE, &sh->state); 2749 clear_bit(STRIPE_DELAYED, &sh->state); 2750 2751 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2752 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2753 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2754 2755 /* Now to look around and see what can be done */ 2756 rcu_read_lock(); 2757 for (i=disks; i--; ) { 2758 mdk_rdev_t *rdev; 2759 struct r5dev *dev = &sh->dev[i]; 2760 clear_bit(R5_Insync, &dev->flags); 2761 2762 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2763 "written %p\n", i, dev->flags, dev->toread, dev->read, 2764 dev->towrite, dev->written); 2765 2766 /* maybe we can request a biofill operation 2767 * 2768 * new wantfill requests are only permitted while 2769 * ops_complete_biofill is guaranteed to be inactive 2770 */ 2771 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2772 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 2773 set_bit(R5_Wantfill, &dev->flags); 2774 2775 /* now count some things */ 2776 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2777 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2778 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2779 2780 if (test_bit(R5_Wantfill, &dev->flags)) 2781 s.to_fill++; 2782 else if (dev->toread) 2783 s.to_read++; 2784 if (dev->towrite) { 2785 s.to_write++; 2786 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2787 s.non_overwrite++; 2788 } 2789 if (dev->written) 2790 s.written++; 2791 rdev = rcu_dereference(conf->disks[i].rdev); 2792 if (blocked_rdev == NULL && 2793 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2794 blocked_rdev = rdev; 2795 atomic_inc(&rdev->nr_pending); 2796 } 2797 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2798 /* The ReadError flag will just be confusing now */ 2799 clear_bit(R5_ReadError, &dev->flags); 2800 clear_bit(R5_ReWrite, &dev->flags); 2801 } 2802 if (!rdev || !test_bit(In_sync, &rdev->flags) 2803 || test_bit(R5_ReadError, &dev->flags)) { 2804 s.failed++; 2805 s.failed_num = i; 2806 } else 2807 set_bit(R5_Insync, &dev->flags); 2808 } 2809 rcu_read_unlock(); 2810 2811 if (unlikely(blocked_rdev)) { 2812 if (s.syncing || s.expanding || s.expanded || 2813 s.to_write || s.written) { 2814 set_bit(STRIPE_HANDLE, &sh->state); 2815 goto unlock; 2816 } 2817 /* There is nothing for the blocked_rdev to block */ 2818 rdev_dec_pending(blocked_rdev, conf->mddev); 2819 blocked_rdev = NULL; 2820 } 2821 2822 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 2823 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 2824 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 2825 } 2826 2827 pr_debug("locked=%d uptodate=%d to_read=%d" 2828 " to_write=%d failed=%d failed_num=%d\n", 2829 s.locked, s.uptodate, s.to_read, s.to_write, 2830 s.failed, s.failed_num); 2831 /* check if the array has lost two devices and, if so, some requests might 2832 * need to be failed 2833 */ 2834 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2835 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 2836 if (s.failed > 1 && s.syncing) { 2837 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2838 clear_bit(STRIPE_SYNCING, &sh->state); 2839 s.syncing = 0; 2840 } 2841 2842 /* might be able to return some write requests if the parity block 2843 * is safe, or on a failed drive 2844 */ 2845 dev = &sh->dev[sh->pd_idx]; 2846 if ( s.written && 2847 ((test_bit(R5_Insync, &dev->flags) && 2848 !test_bit(R5_LOCKED, &dev->flags) && 2849 test_bit(R5_UPTODATE, &dev->flags)) || 2850 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2851 handle_stripe_clean_event(conf, sh, disks, &return_bi); 2852 2853 /* Now we might consider reading some blocks, either to check/generate 2854 * parity, or to satisfy requests 2855 * or to load a block that is being partially written. 2856 */ 2857 if (s.to_read || s.non_overwrite || 2858 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 2859 handle_stripe_fill5(sh, &s, disks); 2860 2861 /* Now we check to see if any write operations have recently 2862 * completed 2863 */ 2864 prexor = 0; 2865 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 2866 prexor = 1; 2867 if (sh->reconstruct_state == reconstruct_state_drain_result || 2868 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 2869 sh->reconstruct_state = reconstruct_state_idle; 2870 2871 /* All the 'written' buffers and the parity block are ready to 2872 * be written back to disk 2873 */ 2874 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 2875 for (i = disks; i--; ) { 2876 dev = &sh->dev[i]; 2877 if (test_bit(R5_LOCKED, &dev->flags) && 2878 (i == sh->pd_idx || dev->written)) { 2879 pr_debug("Writing block %d\n", i); 2880 set_bit(R5_Wantwrite, &dev->flags); 2881 if (prexor) 2882 continue; 2883 if (!test_bit(R5_Insync, &dev->flags) || 2884 (i == sh->pd_idx && s.failed == 0)) 2885 set_bit(STRIPE_INSYNC, &sh->state); 2886 } 2887 } 2888 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2889 atomic_dec(&conf->preread_active_stripes); 2890 if (atomic_read(&conf->preread_active_stripes) < 2891 IO_THRESHOLD) 2892 md_wakeup_thread(conf->mddev->thread); 2893 } 2894 } 2895 2896 /* Now to consider new write requests and what else, if anything 2897 * should be read. We do not handle new writes when: 2898 * 1/ A 'write' operation (copy+xor) is already in flight. 2899 * 2/ A 'check' operation is in flight, as it may clobber the parity 2900 * block. 2901 */ 2902 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 2903 handle_stripe_dirtying5(conf, sh, &s, disks); 2904 2905 /* maybe we need to check and possibly fix the parity for this stripe 2906 * Any reads will already have been scheduled, so we just see if enough 2907 * data is available. The parity check is held off while parity 2908 * dependent operations are in flight. 2909 */ 2910 if (sh->check_state || 2911 (s.syncing && s.locked == 0 && 2912 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 2913 !test_bit(STRIPE_INSYNC, &sh->state))) 2914 handle_parity_checks5(conf, sh, &s, disks); 2915 2916 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2917 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2918 clear_bit(STRIPE_SYNCING, &sh->state); 2919 } 2920 2921 /* If the failed drive is just a ReadError, then we might need to progress 2922 * the repair/check process 2923 */ 2924 if (s.failed == 1 && !conf->mddev->ro && 2925 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 2926 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 2927 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 2928 ) { 2929 dev = &sh->dev[s.failed_num]; 2930 if (!test_bit(R5_ReWrite, &dev->flags)) { 2931 set_bit(R5_Wantwrite, &dev->flags); 2932 set_bit(R5_ReWrite, &dev->flags); 2933 set_bit(R5_LOCKED, &dev->flags); 2934 s.locked++; 2935 } else { 2936 /* let's read it back */ 2937 set_bit(R5_Wantread, &dev->flags); 2938 set_bit(R5_LOCKED, &dev->flags); 2939 s.locked++; 2940 } 2941 } 2942 2943 /* Finish reconstruct operations initiated by the expansion process */ 2944 if (sh->reconstruct_state == reconstruct_state_result) { 2945 struct stripe_head *sh2 2946 = get_active_stripe(conf, sh->sector, 1, 1, 1); 2947 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 2948 /* sh cannot be written until sh2 has been read. 2949 * so arrange for sh to be delayed a little 2950 */ 2951 set_bit(STRIPE_DELAYED, &sh->state); 2952 set_bit(STRIPE_HANDLE, &sh->state); 2953 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 2954 &sh2->state)) 2955 atomic_inc(&conf->preread_active_stripes); 2956 release_stripe(sh2); 2957 goto unlock; 2958 } 2959 if (sh2) 2960 release_stripe(sh2); 2961 2962 sh->reconstruct_state = reconstruct_state_idle; 2963 clear_bit(STRIPE_EXPANDING, &sh->state); 2964 for (i = conf->raid_disks; i--; ) { 2965 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2966 set_bit(R5_LOCKED, &sh->dev[i].flags); 2967 s.locked++; 2968 } 2969 } 2970 2971 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2972 !sh->reconstruct_state) { 2973 /* Need to write out all blocks after computing parity */ 2974 sh->disks = conf->raid_disks; 2975 stripe_set_idx(sh->sector, conf, 0, sh); 2976 schedule_reconstruction5(sh, &s, 1, 1); 2977 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 2978 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2979 atomic_dec(&conf->reshape_stripes); 2980 wake_up(&conf->wait_for_overlap); 2981 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 2982 } 2983 2984 if (s.expanding && s.locked == 0 && 2985 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 2986 handle_stripe_expansion(conf, sh, NULL); 2987 2988 unlock: 2989 spin_unlock(&sh->lock); 2990 2991 /* wait for this device to become unblocked */ 2992 if (unlikely(blocked_rdev)) 2993 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 2994 2995 if (s.ops_request) 2996 raid5_run_ops(sh, s.ops_request); 2997 2998 ops_run_io(sh, &s); 2999 3000 return_io(return_bi); 3001 3002 return blocked_rdev == NULL; 3003 } 3004 3005 static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 3006 { 3007 raid5_conf_t *conf = sh->raid_conf; 3008 int disks = sh->disks; 3009 struct bio *return_bi = NULL; 3010 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; 3011 struct stripe_head_state s; 3012 struct r6_state r6s; 3013 struct r5dev *dev, *pdev, *qdev; 3014 mdk_rdev_t *blocked_rdev = NULL; 3015 3016 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3017 "pd_idx=%d, qd_idx=%d\n", 3018 (unsigned long long)sh->sector, sh->state, 3019 atomic_read(&sh->count), pd_idx, qd_idx); 3020 memset(&s, 0, sizeof(s)); 3021 3022 spin_lock(&sh->lock); 3023 clear_bit(STRIPE_HANDLE, &sh->state); 3024 clear_bit(STRIPE_DELAYED, &sh->state); 3025 3026 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 3027 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3028 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 3029 /* Now to look around and see what can be done */ 3030 3031 rcu_read_lock(); 3032 for (i=disks; i--; ) { 3033 mdk_rdev_t *rdev; 3034 dev = &sh->dev[i]; 3035 clear_bit(R5_Insync, &dev->flags); 3036 3037 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3038 i, dev->flags, dev->toread, dev->towrite, dev->written); 3039 /* maybe we can reply to a read */ 3040 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 3041 struct bio *rbi, *rbi2; 3042 pr_debug("Return read for disc %d\n", i); 3043 spin_lock_irq(&conf->device_lock); 3044 rbi = dev->toread; 3045 dev->toread = NULL; 3046 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 3047 wake_up(&conf->wait_for_overlap); 3048 spin_unlock_irq(&conf->device_lock); 3049 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 3050 copy_data(0, rbi, dev->page, dev->sector); 3051 rbi2 = r5_next_bio(rbi, dev->sector); 3052 spin_lock_irq(&conf->device_lock); 3053 if (!raid5_dec_bi_phys_segments(rbi)) { 3054 rbi->bi_next = return_bi; 3055 return_bi = rbi; 3056 } 3057 spin_unlock_irq(&conf->device_lock); 3058 rbi = rbi2; 3059 } 3060 } 3061 3062 /* now count some things */ 3063 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 3064 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 3065 3066 3067 if (dev->toread) 3068 s.to_read++; 3069 if (dev->towrite) { 3070 s.to_write++; 3071 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3072 s.non_overwrite++; 3073 } 3074 if (dev->written) 3075 s.written++; 3076 rdev = rcu_dereference(conf->disks[i].rdev); 3077 if (blocked_rdev == NULL && 3078 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3079 blocked_rdev = rdev; 3080 atomic_inc(&rdev->nr_pending); 3081 } 3082 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3083 /* The ReadError flag will just be confusing now */ 3084 clear_bit(R5_ReadError, &dev->flags); 3085 clear_bit(R5_ReWrite, &dev->flags); 3086 } 3087 if (!rdev || !test_bit(In_sync, &rdev->flags) 3088 || test_bit(R5_ReadError, &dev->flags)) { 3089 if (s.failed < 2) 3090 r6s.failed_num[s.failed] = i; 3091 s.failed++; 3092 } else 3093 set_bit(R5_Insync, &dev->flags); 3094 } 3095 rcu_read_unlock(); 3096 3097 if (unlikely(blocked_rdev)) { 3098 if (s.syncing || s.expanding || s.expanded || 3099 s.to_write || s.written) { 3100 set_bit(STRIPE_HANDLE, &sh->state); 3101 goto unlock; 3102 } 3103 /* There is nothing for the blocked_rdev to block */ 3104 rdev_dec_pending(blocked_rdev, conf->mddev); 3105 blocked_rdev = NULL; 3106 } 3107 3108 pr_debug("locked=%d uptodate=%d to_read=%d" 3109 " to_write=%d failed=%d failed_num=%d,%d\n", 3110 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3111 r6s.failed_num[0], r6s.failed_num[1]); 3112 /* check if the array has lost >2 devices and, if so, some requests 3113 * might need to be failed 3114 */ 3115 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3116 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3117 if (s.failed > 2 && s.syncing) { 3118 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3119 clear_bit(STRIPE_SYNCING, &sh->state); 3120 s.syncing = 0; 3121 } 3122 3123 /* 3124 * might be able to return some write requests if the parity blocks 3125 * are safe, or on a failed drive 3126 */ 3127 pdev = &sh->dev[pd_idx]; 3128 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 3129 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 3130 qdev = &sh->dev[qd_idx]; 3131 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) 3132 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); 3133 3134 if ( s.written && 3135 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3136 && !test_bit(R5_LOCKED, &pdev->flags) 3137 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3138 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3139 && !test_bit(R5_LOCKED, &qdev->flags) 3140 && test_bit(R5_UPTODATE, &qdev->flags))))) 3141 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3142 3143 /* Now we might consider reading some blocks, either to check/generate 3144 * parity, or to satisfy requests 3145 * or to load a block that is being partially written. 3146 */ 3147 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3148 (s.syncing && (s.uptodate < disks)) || s.expanding) 3149 handle_stripe_fill6(sh, &s, &r6s, disks); 3150 3151 /* now to consider writing and what else, if anything should be read */ 3152 if (s.to_write) 3153 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); 3154 3155 /* maybe we need to check and possibly fix the parity for this stripe 3156 * Any reads will already have been scheduled, so we just see if enough 3157 * data is available 3158 */ 3159 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) 3160 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); 3161 3162 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3163 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3164 clear_bit(STRIPE_SYNCING, &sh->state); 3165 } 3166 3167 /* If the failed drives are just a ReadError, then we might need 3168 * to progress the repair/check process 3169 */ 3170 if (s.failed <= 2 && !conf->mddev->ro) 3171 for (i = 0; i < s.failed; i++) { 3172 dev = &sh->dev[r6s.failed_num[i]]; 3173 if (test_bit(R5_ReadError, &dev->flags) 3174 && !test_bit(R5_LOCKED, &dev->flags) 3175 && test_bit(R5_UPTODATE, &dev->flags) 3176 ) { 3177 if (!test_bit(R5_ReWrite, &dev->flags)) { 3178 set_bit(R5_Wantwrite, &dev->flags); 3179 set_bit(R5_ReWrite, &dev->flags); 3180 set_bit(R5_LOCKED, &dev->flags); 3181 } else { 3182 /* let's read it back */ 3183 set_bit(R5_Wantread, &dev->flags); 3184 set_bit(R5_LOCKED, &dev->flags); 3185 } 3186 } 3187 } 3188 3189 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3190 struct stripe_head *sh2 3191 = get_active_stripe(conf, sh->sector, 1, 1, 1); 3192 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 3193 /* sh cannot be written until sh2 has been read. 3194 * so arrange for sh to be delayed a little 3195 */ 3196 set_bit(STRIPE_DELAYED, &sh->state); 3197 set_bit(STRIPE_HANDLE, &sh->state); 3198 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3199 &sh2->state)) 3200 atomic_inc(&conf->preread_active_stripes); 3201 release_stripe(sh2); 3202 goto unlock; 3203 } 3204 if (sh2) 3205 release_stripe(sh2); 3206 3207 /* Need to write out all blocks after computing P&Q */ 3208 sh->disks = conf->raid_disks; 3209 stripe_set_idx(sh->sector, conf, 0, sh); 3210 compute_parity6(sh, RECONSTRUCT_WRITE); 3211 for (i = conf->raid_disks ; i-- ; ) { 3212 set_bit(R5_LOCKED, &sh->dev[i].flags); 3213 s.locked++; 3214 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3215 } 3216 clear_bit(STRIPE_EXPANDING, &sh->state); 3217 } else if (s.expanded) { 3218 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3219 atomic_dec(&conf->reshape_stripes); 3220 wake_up(&conf->wait_for_overlap); 3221 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3222 } 3223 3224 if (s.expanding && s.locked == 0 && 3225 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3226 handle_stripe_expansion(conf, sh, &r6s); 3227 3228 unlock: 3229 spin_unlock(&sh->lock); 3230 3231 /* wait for this device to become unblocked */ 3232 if (unlikely(blocked_rdev)) 3233 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3234 3235 ops_run_io(sh, &s); 3236 3237 return_io(return_bi); 3238 3239 return blocked_rdev == NULL; 3240 } 3241 3242 /* returns true if the stripe was handled */ 3243 static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page) 3244 { 3245 if (sh->raid_conf->level == 6) 3246 return handle_stripe6(sh, tmp_page); 3247 else 3248 return handle_stripe5(sh); 3249 } 3250 3251 3252 3253 static void raid5_activate_delayed(raid5_conf_t *conf) 3254 { 3255 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3256 while (!list_empty(&conf->delayed_list)) { 3257 struct list_head *l = conf->delayed_list.next; 3258 struct stripe_head *sh; 3259 sh = list_entry(l, struct stripe_head, lru); 3260 list_del_init(l); 3261 clear_bit(STRIPE_DELAYED, &sh->state); 3262 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3263 atomic_inc(&conf->preread_active_stripes); 3264 list_add_tail(&sh->lru, &conf->hold_list); 3265 } 3266 } else 3267 blk_plug_device(conf->mddev->queue); 3268 } 3269 3270 static void activate_bit_delay(raid5_conf_t *conf) 3271 { 3272 /* device_lock is held */ 3273 struct list_head head; 3274 list_add(&head, &conf->bitmap_list); 3275 list_del_init(&conf->bitmap_list); 3276 while (!list_empty(&head)) { 3277 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3278 list_del_init(&sh->lru); 3279 atomic_inc(&sh->count); 3280 __release_stripe(conf, sh); 3281 } 3282 } 3283 3284 static void unplug_slaves(mddev_t *mddev) 3285 { 3286 raid5_conf_t *conf = mddev->private; 3287 int i; 3288 3289 rcu_read_lock(); 3290 for (i = 0; i < conf->raid_disks; i++) { 3291 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3292 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3293 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3294 3295 atomic_inc(&rdev->nr_pending); 3296 rcu_read_unlock(); 3297 3298 blk_unplug(r_queue); 3299 3300 rdev_dec_pending(rdev, mddev); 3301 rcu_read_lock(); 3302 } 3303 } 3304 rcu_read_unlock(); 3305 } 3306 3307 static void raid5_unplug_device(struct request_queue *q) 3308 { 3309 mddev_t *mddev = q->queuedata; 3310 raid5_conf_t *conf = mddev->private; 3311 unsigned long flags; 3312 3313 spin_lock_irqsave(&conf->device_lock, flags); 3314 3315 if (blk_remove_plug(q)) { 3316 conf->seq_flush++; 3317 raid5_activate_delayed(conf); 3318 } 3319 md_wakeup_thread(mddev->thread); 3320 3321 spin_unlock_irqrestore(&conf->device_lock, flags); 3322 3323 unplug_slaves(mddev); 3324 } 3325 3326 static int raid5_congested(void *data, int bits) 3327 { 3328 mddev_t *mddev = data; 3329 raid5_conf_t *conf = mddev->private; 3330 3331 /* No difference between reads and writes. Just check 3332 * how busy the stripe_cache is 3333 */ 3334 if (conf->inactive_blocked) 3335 return 1; 3336 if (conf->quiesce) 3337 return 1; 3338 if (list_empty_careful(&conf->inactive_list)) 3339 return 1; 3340 3341 return 0; 3342 } 3343 3344 /* We want read requests to align with chunks where possible, 3345 * but write requests don't need to. 3346 */ 3347 static int raid5_mergeable_bvec(struct request_queue *q, 3348 struct bvec_merge_data *bvm, 3349 struct bio_vec *biovec) 3350 { 3351 mddev_t *mddev = q->queuedata; 3352 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3353 int max; 3354 unsigned int chunk_sectors = mddev->chunk_sectors; 3355 unsigned int bio_sectors = bvm->bi_size >> 9; 3356 3357 if ((bvm->bi_rw & 1) == WRITE) 3358 return biovec->bv_len; /* always allow writes to be mergeable */ 3359 3360 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3361 chunk_sectors = mddev->new_chunk_sectors; 3362 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3363 if (max < 0) max = 0; 3364 if (max <= biovec->bv_len && bio_sectors == 0) 3365 return biovec->bv_len; 3366 else 3367 return max; 3368 } 3369 3370 3371 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3372 { 3373 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3374 unsigned int chunk_sectors = mddev->chunk_sectors; 3375 unsigned int bio_sectors = bio->bi_size >> 9; 3376 3377 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3378 chunk_sectors = mddev->new_chunk_sectors; 3379 return chunk_sectors >= 3380 ((sector & (chunk_sectors - 1)) + bio_sectors); 3381 } 3382 3383 /* 3384 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3385 * later sampled by raid5d. 3386 */ 3387 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3388 { 3389 unsigned long flags; 3390 3391 spin_lock_irqsave(&conf->device_lock, flags); 3392 3393 bi->bi_next = conf->retry_read_aligned_list; 3394 conf->retry_read_aligned_list = bi; 3395 3396 spin_unlock_irqrestore(&conf->device_lock, flags); 3397 md_wakeup_thread(conf->mddev->thread); 3398 } 3399 3400 3401 static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3402 { 3403 struct bio *bi; 3404 3405 bi = conf->retry_read_aligned; 3406 if (bi) { 3407 conf->retry_read_aligned = NULL; 3408 return bi; 3409 } 3410 bi = conf->retry_read_aligned_list; 3411 if(bi) { 3412 conf->retry_read_aligned_list = bi->bi_next; 3413 bi->bi_next = NULL; 3414 /* 3415 * this sets the active strip count to 1 and the processed 3416 * strip count to zero (upper 8 bits) 3417 */ 3418 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3419 } 3420 3421 return bi; 3422 } 3423 3424 3425 /* 3426 * The "raid5_align_endio" should check if the read succeeded and if it 3427 * did, call bio_endio on the original bio (having bio_put the new bio 3428 * first). 3429 * If the read failed.. 3430 */ 3431 static void raid5_align_endio(struct bio *bi, int error) 3432 { 3433 struct bio* raid_bi = bi->bi_private; 3434 mddev_t *mddev; 3435 raid5_conf_t *conf; 3436 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3437 mdk_rdev_t *rdev; 3438 3439 bio_put(bi); 3440 3441 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3442 conf = mddev->private; 3443 rdev = (void*)raid_bi->bi_next; 3444 raid_bi->bi_next = NULL; 3445 3446 rdev_dec_pending(rdev, conf->mddev); 3447 3448 if (!error && uptodate) { 3449 bio_endio(raid_bi, 0); 3450 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3451 wake_up(&conf->wait_for_stripe); 3452 return; 3453 } 3454 3455 3456 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3457 3458 add_bio_to_retry(raid_bi, conf); 3459 } 3460 3461 static int bio_fits_rdev(struct bio *bi) 3462 { 3463 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3464 3465 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3466 return 0; 3467 blk_recount_segments(q, bi); 3468 if (bi->bi_phys_segments > queue_max_phys_segments(q)) 3469 return 0; 3470 3471 if (q->merge_bvec_fn) 3472 /* it's too hard to apply the merge_bvec_fn at this stage, 3473 * just just give up 3474 */ 3475 return 0; 3476 3477 return 1; 3478 } 3479 3480 3481 static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3482 { 3483 mddev_t *mddev = q->queuedata; 3484 raid5_conf_t *conf = mddev->private; 3485 unsigned int dd_idx; 3486 struct bio* align_bi; 3487 mdk_rdev_t *rdev; 3488 3489 if (!in_chunk_boundary(mddev, raid_bio)) { 3490 pr_debug("chunk_aligned_read : non aligned\n"); 3491 return 0; 3492 } 3493 /* 3494 * use bio_clone to make a copy of the bio 3495 */ 3496 align_bi = bio_clone(raid_bio, GFP_NOIO); 3497 if (!align_bi) 3498 return 0; 3499 /* 3500 * set bi_end_io to a new function, and set bi_private to the 3501 * original bio. 3502 */ 3503 align_bi->bi_end_io = raid5_align_endio; 3504 align_bi->bi_private = raid_bio; 3505 /* 3506 * compute position 3507 */ 3508 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 3509 0, 3510 &dd_idx, NULL); 3511 3512 rcu_read_lock(); 3513 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3514 if (rdev && test_bit(In_sync, &rdev->flags)) { 3515 atomic_inc(&rdev->nr_pending); 3516 rcu_read_unlock(); 3517 raid_bio->bi_next = (void*)rdev; 3518 align_bi->bi_bdev = rdev->bdev; 3519 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3520 align_bi->bi_sector += rdev->data_offset; 3521 3522 if (!bio_fits_rdev(align_bi)) { 3523 /* too big in some way */ 3524 bio_put(align_bi); 3525 rdev_dec_pending(rdev, mddev); 3526 return 0; 3527 } 3528 3529 spin_lock_irq(&conf->device_lock); 3530 wait_event_lock_irq(conf->wait_for_stripe, 3531 conf->quiesce == 0, 3532 conf->device_lock, /* nothing */); 3533 atomic_inc(&conf->active_aligned_reads); 3534 spin_unlock_irq(&conf->device_lock); 3535 3536 generic_make_request(align_bi); 3537 return 1; 3538 } else { 3539 rcu_read_unlock(); 3540 bio_put(align_bi); 3541 return 0; 3542 } 3543 } 3544 3545 /* __get_priority_stripe - get the next stripe to process 3546 * 3547 * Full stripe writes are allowed to pass preread active stripes up until 3548 * the bypass_threshold is exceeded. In general the bypass_count 3549 * increments when the handle_list is handled before the hold_list; however, it 3550 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3551 * stripe with in flight i/o. The bypass_count will be reset when the 3552 * head of the hold_list has changed, i.e. the head was promoted to the 3553 * handle_list. 3554 */ 3555 static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) 3556 { 3557 struct stripe_head *sh; 3558 3559 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3560 __func__, 3561 list_empty(&conf->handle_list) ? "empty" : "busy", 3562 list_empty(&conf->hold_list) ? "empty" : "busy", 3563 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3564 3565 if (!list_empty(&conf->handle_list)) { 3566 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3567 3568 if (list_empty(&conf->hold_list)) 3569 conf->bypass_count = 0; 3570 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3571 if (conf->hold_list.next == conf->last_hold) 3572 conf->bypass_count++; 3573 else { 3574 conf->last_hold = conf->hold_list.next; 3575 conf->bypass_count -= conf->bypass_threshold; 3576 if (conf->bypass_count < 0) 3577 conf->bypass_count = 0; 3578 } 3579 } 3580 } else if (!list_empty(&conf->hold_list) && 3581 ((conf->bypass_threshold && 3582 conf->bypass_count > conf->bypass_threshold) || 3583 atomic_read(&conf->pending_full_writes) == 0)) { 3584 sh = list_entry(conf->hold_list.next, 3585 typeof(*sh), lru); 3586 conf->bypass_count -= conf->bypass_threshold; 3587 if (conf->bypass_count < 0) 3588 conf->bypass_count = 0; 3589 } else 3590 return NULL; 3591 3592 list_del_init(&sh->lru); 3593 atomic_inc(&sh->count); 3594 BUG_ON(atomic_read(&sh->count) != 1); 3595 return sh; 3596 } 3597 3598 static int make_request(struct request_queue *q, struct bio * bi) 3599 { 3600 mddev_t *mddev = q->queuedata; 3601 raid5_conf_t *conf = mddev->private; 3602 int dd_idx; 3603 sector_t new_sector; 3604 sector_t logical_sector, last_sector; 3605 struct stripe_head *sh; 3606 const int rw = bio_data_dir(bi); 3607 int cpu, remaining; 3608 3609 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { 3610 bio_endio(bi, -EOPNOTSUPP); 3611 return 0; 3612 } 3613 3614 md_write_start(mddev, bi); 3615 3616 cpu = part_stat_lock(); 3617 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 3618 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 3619 bio_sectors(bi)); 3620 part_stat_unlock(); 3621 3622 if (rw == READ && 3623 mddev->reshape_position == MaxSector && 3624 chunk_aligned_read(q,bi)) 3625 return 0; 3626 3627 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3628 last_sector = bi->bi_sector + (bi->bi_size>>9); 3629 bi->bi_next = NULL; 3630 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3631 3632 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3633 DEFINE_WAIT(w); 3634 int disks, data_disks; 3635 int previous; 3636 3637 retry: 3638 previous = 0; 3639 disks = conf->raid_disks; 3640 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3641 if (unlikely(conf->reshape_progress != MaxSector)) { 3642 /* spinlock is needed as reshape_progress may be 3643 * 64bit on a 32bit platform, and so it might be 3644 * possible to see a half-updated value 3645 * Ofcourse reshape_progress could change after 3646 * the lock is dropped, so once we get a reference 3647 * to the stripe that we think it is, we will have 3648 * to check again. 3649 */ 3650 spin_lock_irq(&conf->device_lock); 3651 if (mddev->delta_disks < 0 3652 ? logical_sector < conf->reshape_progress 3653 : logical_sector >= conf->reshape_progress) { 3654 disks = conf->previous_raid_disks; 3655 previous = 1; 3656 } else { 3657 if (mddev->delta_disks < 0 3658 ? logical_sector < conf->reshape_safe 3659 : logical_sector >= conf->reshape_safe) { 3660 spin_unlock_irq(&conf->device_lock); 3661 schedule(); 3662 goto retry; 3663 } 3664 } 3665 spin_unlock_irq(&conf->device_lock); 3666 } 3667 data_disks = disks - conf->max_degraded; 3668 3669 new_sector = raid5_compute_sector(conf, logical_sector, 3670 previous, 3671 &dd_idx, NULL); 3672 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3673 (unsigned long long)new_sector, 3674 (unsigned long long)logical_sector); 3675 3676 sh = get_active_stripe(conf, new_sector, previous, 3677 (bi->bi_rw&RWA_MASK), 0); 3678 if (sh) { 3679 if (unlikely(previous)) { 3680 /* expansion might have moved on while waiting for a 3681 * stripe, so we must do the range check again. 3682 * Expansion could still move past after this 3683 * test, but as we are holding a reference to 3684 * 'sh', we know that if that happens, 3685 * STRIPE_EXPANDING will get set and the expansion 3686 * won't proceed until we finish with the stripe. 3687 */ 3688 int must_retry = 0; 3689 spin_lock_irq(&conf->device_lock); 3690 if (mddev->delta_disks < 0 3691 ? logical_sector >= conf->reshape_progress 3692 : logical_sector < conf->reshape_progress) 3693 /* mismatch, need to try again */ 3694 must_retry = 1; 3695 spin_unlock_irq(&conf->device_lock); 3696 if (must_retry) { 3697 release_stripe(sh); 3698 schedule(); 3699 goto retry; 3700 } 3701 } 3702 3703 if (bio_data_dir(bi) == WRITE && 3704 logical_sector >= mddev->suspend_lo && 3705 logical_sector < mddev->suspend_hi) { 3706 release_stripe(sh); 3707 /* As the suspend_* range is controlled by 3708 * userspace, we want an interruptible 3709 * wait. 3710 */ 3711 flush_signals(current); 3712 prepare_to_wait(&conf->wait_for_overlap, 3713 &w, TASK_INTERRUPTIBLE); 3714 if (logical_sector >= mddev->suspend_lo && 3715 logical_sector < mddev->suspend_hi) 3716 schedule(); 3717 goto retry; 3718 } 3719 3720 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3721 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3722 /* Stripe is busy expanding or 3723 * add failed due to overlap. Flush everything 3724 * and wait a while 3725 */ 3726 raid5_unplug_device(mddev->queue); 3727 release_stripe(sh); 3728 schedule(); 3729 goto retry; 3730 } 3731 finish_wait(&conf->wait_for_overlap, &w); 3732 set_bit(STRIPE_HANDLE, &sh->state); 3733 clear_bit(STRIPE_DELAYED, &sh->state); 3734 release_stripe(sh); 3735 } else { 3736 /* cannot get stripe for read-ahead, just give-up */ 3737 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3738 finish_wait(&conf->wait_for_overlap, &w); 3739 break; 3740 } 3741 3742 } 3743 spin_lock_irq(&conf->device_lock); 3744 remaining = raid5_dec_bi_phys_segments(bi); 3745 spin_unlock_irq(&conf->device_lock); 3746 if (remaining == 0) { 3747 3748 if ( rw == WRITE ) 3749 md_write_end(mddev); 3750 3751 bio_endio(bi, 0); 3752 } 3753 return 0; 3754 } 3755 3756 static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); 3757 3758 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3759 { 3760 /* reshaping is quite different to recovery/resync so it is 3761 * handled quite separately ... here. 3762 * 3763 * On each call to sync_request, we gather one chunk worth of 3764 * destination stripes and flag them as expanding. 3765 * Then we find all the source stripes and request reads. 3766 * As the reads complete, handle_stripe will copy the data 3767 * into the destination stripe and release that stripe. 3768 */ 3769 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3770 struct stripe_head *sh; 3771 sector_t first_sector, last_sector; 3772 int raid_disks = conf->previous_raid_disks; 3773 int data_disks = raid_disks - conf->max_degraded; 3774 int new_data_disks = conf->raid_disks - conf->max_degraded; 3775 int i; 3776 int dd_idx; 3777 sector_t writepos, readpos, safepos; 3778 sector_t stripe_addr; 3779 int reshape_sectors; 3780 struct list_head stripes; 3781 3782 if (sector_nr == 0) { 3783 /* If restarting in the middle, skip the initial sectors */ 3784 if (mddev->delta_disks < 0 && 3785 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 3786 sector_nr = raid5_size(mddev, 0, 0) 3787 - conf->reshape_progress; 3788 } else if (mddev->delta_disks >= 0 && 3789 conf->reshape_progress > 0) 3790 sector_nr = conf->reshape_progress; 3791 sector_div(sector_nr, new_data_disks); 3792 if (sector_nr) { 3793 *skipped = 1; 3794 return sector_nr; 3795 } 3796 } 3797 3798 /* We need to process a full chunk at a time. 3799 * If old and new chunk sizes differ, we need to process the 3800 * largest of these 3801 */ 3802 if (mddev->new_chunk_sectors > mddev->chunk_sectors) 3803 reshape_sectors = mddev->new_chunk_sectors; 3804 else 3805 reshape_sectors = mddev->chunk_sectors; 3806 3807 /* we update the metadata when there is more than 3Meg 3808 * in the block range (that is rather arbitrary, should 3809 * probably be time based) or when the data about to be 3810 * copied would over-write the source of the data at 3811 * the front of the range. 3812 * i.e. one new_stripe along from reshape_progress new_maps 3813 * to after where reshape_safe old_maps to 3814 */ 3815 writepos = conf->reshape_progress; 3816 sector_div(writepos, new_data_disks); 3817 readpos = conf->reshape_progress; 3818 sector_div(readpos, data_disks); 3819 safepos = conf->reshape_safe; 3820 sector_div(safepos, data_disks); 3821 if (mddev->delta_disks < 0) { 3822 writepos -= min_t(sector_t, reshape_sectors, writepos); 3823 readpos += reshape_sectors; 3824 safepos += reshape_sectors; 3825 } else { 3826 writepos += reshape_sectors; 3827 readpos -= min_t(sector_t, reshape_sectors, readpos); 3828 safepos -= min_t(sector_t, reshape_sectors, safepos); 3829 } 3830 3831 /* 'writepos' is the most advanced device address we might write. 3832 * 'readpos' is the least advanced device address we might read. 3833 * 'safepos' is the least address recorded in the metadata as having 3834 * been reshaped. 3835 * If 'readpos' is behind 'writepos', then there is no way that we can 3836 * ensure safety in the face of a crash - that must be done by userspace 3837 * making a backup of the data. So in that case there is no particular 3838 * rush to update metadata. 3839 * Otherwise if 'safepos' is behind 'writepos', then we really need to 3840 * update the metadata to advance 'safepos' to match 'readpos' so that 3841 * we can be safe in the event of a crash. 3842 * So we insist on updating metadata if safepos is behind writepos and 3843 * readpos is beyond writepos. 3844 * In any case, update the metadata every 10 seconds. 3845 * Maybe that number should be configurable, but I'm not sure it is 3846 * worth it.... maybe it could be a multiple of safemode_delay??? 3847 */ 3848 if ((mddev->delta_disks < 0 3849 ? (safepos > writepos && readpos < writepos) 3850 : (safepos < writepos && readpos > writepos)) || 3851 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 3852 /* Cannot proceed until we've updated the superblock... */ 3853 wait_event(conf->wait_for_overlap, 3854 atomic_read(&conf->reshape_stripes)==0); 3855 mddev->reshape_position = conf->reshape_progress; 3856 mddev->curr_resync_completed = mddev->curr_resync; 3857 conf->reshape_checkpoint = jiffies; 3858 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3859 md_wakeup_thread(mddev->thread); 3860 wait_event(mddev->sb_wait, mddev->flags == 0 || 3861 kthread_should_stop()); 3862 spin_lock_irq(&conf->device_lock); 3863 conf->reshape_safe = mddev->reshape_position; 3864 spin_unlock_irq(&conf->device_lock); 3865 wake_up(&conf->wait_for_overlap); 3866 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3867 } 3868 3869 if (mddev->delta_disks < 0) { 3870 BUG_ON(conf->reshape_progress == 0); 3871 stripe_addr = writepos; 3872 BUG_ON((mddev->dev_sectors & 3873 ~((sector_t)reshape_sectors - 1)) 3874 - reshape_sectors - stripe_addr 3875 != sector_nr); 3876 } else { 3877 BUG_ON(writepos != sector_nr + reshape_sectors); 3878 stripe_addr = sector_nr; 3879 } 3880 INIT_LIST_HEAD(&stripes); 3881 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 3882 int j; 3883 int skipped = 0; 3884 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 3885 set_bit(STRIPE_EXPANDING, &sh->state); 3886 atomic_inc(&conf->reshape_stripes); 3887 /* If any of this stripe is beyond the end of the old 3888 * array, then we need to zero those blocks 3889 */ 3890 for (j=sh->disks; j--;) { 3891 sector_t s; 3892 if (j == sh->pd_idx) 3893 continue; 3894 if (conf->level == 6 && 3895 j == sh->qd_idx) 3896 continue; 3897 s = compute_blocknr(sh, j, 0); 3898 if (s < raid5_size(mddev, 0, 0)) { 3899 skipped = 1; 3900 continue; 3901 } 3902 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3903 set_bit(R5_Expanded, &sh->dev[j].flags); 3904 set_bit(R5_UPTODATE, &sh->dev[j].flags); 3905 } 3906 if (!skipped) { 3907 set_bit(STRIPE_EXPAND_READY, &sh->state); 3908 set_bit(STRIPE_HANDLE, &sh->state); 3909 } 3910 list_add(&sh->lru, &stripes); 3911 } 3912 spin_lock_irq(&conf->device_lock); 3913 if (mddev->delta_disks < 0) 3914 conf->reshape_progress -= reshape_sectors * new_data_disks; 3915 else 3916 conf->reshape_progress += reshape_sectors * new_data_disks; 3917 spin_unlock_irq(&conf->device_lock); 3918 /* Ok, those stripe are ready. We can start scheduling 3919 * reads on the source stripes. 3920 * The source stripes are determined by mapping the first and last 3921 * block on the destination stripes. 3922 */ 3923 first_sector = 3924 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 3925 1, &dd_idx, NULL); 3926 last_sector = 3927 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 3928 * new_data_disks - 1), 3929 1, &dd_idx, NULL); 3930 if (last_sector >= mddev->dev_sectors) 3931 last_sector = mddev->dev_sectors - 1; 3932 while (first_sector <= last_sector) { 3933 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 3934 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3935 set_bit(STRIPE_HANDLE, &sh->state); 3936 release_stripe(sh); 3937 first_sector += STRIPE_SECTORS; 3938 } 3939 /* Now that the sources are clearly marked, we can release 3940 * the destination stripes 3941 */ 3942 while (!list_empty(&stripes)) { 3943 sh = list_entry(stripes.next, struct stripe_head, lru); 3944 list_del_init(&sh->lru); 3945 release_stripe(sh); 3946 } 3947 /* If this takes us to the resync_max point where we have to pause, 3948 * then we need to write out the superblock. 3949 */ 3950 sector_nr += reshape_sectors; 3951 if ((sector_nr - mddev->curr_resync_completed) * 2 3952 >= mddev->resync_max - mddev->curr_resync_completed) { 3953 /* Cannot proceed until we've updated the superblock... */ 3954 wait_event(conf->wait_for_overlap, 3955 atomic_read(&conf->reshape_stripes) == 0); 3956 mddev->reshape_position = conf->reshape_progress; 3957 mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors; 3958 conf->reshape_checkpoint = jiffies; 3959 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3960 md_wakeup_thread(mddev->thread); 3961 wait_event(mddev->sb_wait, 3962 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 3963 || kthread_should_stop()); 3964 spin_lock_irq(&conf->device_lock); 3965 conf->reshape_safe = mddev->reshape_position; 3966 spin_unlock_irq(&conf->device_lock); 3967 wake_up(&conf->wait_for_overlap); 3968 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3969 } 3970 return reshape_sectors; 3971 } 3972 3973 /* FIXME go_faster isn't used */ 3974 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 3975 { 3976 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3977 struct stripe_head *sh; 3978 sector_t max_sector = mddev->dev_sectors; 3979 int sync_blocks; 3980 int still_degraded = 0; 3981 int i; 3982 3983 if (sector_nr >= max_sector) { 3984 /* just being told to finish up .. nothing much to do */ 3985 unplug_slaves(mddev); 3986 3987 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3988 end_reshape(conf); 3989 return 0; 3990 } 3991 3992 if (mddev->curr_resync < max_sector) /* aborted */ 3993 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 3994 &sync_blocks, 1); 3995 else /* completed sync */ 3996 conf->fullsync = 0; 3997 bitmap_close_sync(mddev->bitmap); 3998 3999 return 0; 4000 } 4001 4002 /* Allow raid5_quiesce to complete */ 4003 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 4004 4005 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4006 return reshape_request(mddev, sector_nr, skipped); 4007 4008 /* No need to check resync_max as we never do more than one 4009 * stripe, and as resync_max will always be on a chunk boundary, 4010 * if the check in md_do_sync didn't fire, there is no chance 4011 * of overstepping resync_max here 4012 */ 4013 4014 /* if there is too many failed drives and we are trying 4015 * to resync, then assert that we are finished, because there is 4016 * nothing we can do. 4017 */ 4018 if (mddev->degraded >= conf->max_degraded && 4019 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4020 sector_t rv = mddev->dev_sectors - sector_nr; 4021 *skipped = 1; 4022 return rv; 4023 } 4024 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 4025 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 4026 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 4027 /* we can skip this block, and probably more */ 4028 sync_blocks /= STRIPE_SECTORS; 4029 *skipped = 1; 4030 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 4031 } 4032 4033 4034 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 4035 4036 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 4037 if (sh == NULL) { 4038 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 4039 /* make sure we don't swamp the stripe cache if someone else 4040 * is trying to get access 4041 */ 4042 schedule_timeout_uninterruptible(1); 4043 } 4044 /* Need to check if array will still be degraded after recovery/resync 4045 * We don't need to check the 'failed' flag as when that gets set, 4046 * recovery aborts. 4047 */ 4048 for (i = 0; i < conf->raid_disks; i++) 4049 if (conf->disks[i].rdev == NULL) 4050 still_degraded = 1; 4051 4052 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 4053 4054 spin_lock(&sh->lock); 4055 set_bit(STRIPE_SYNCING, &sh->state); 4056 clear_bit(STRIPE_INSYNC, &sh->state); 4057 spin_unlock(&sh->lock); 4058 4059 /* wait for any blocked device to be handled */ 4060 while(unlikely(!handle_stripe(sh, NULL))) 4061 ; 4062 release_stripe(sh); 4063 4064 return STRIPE_SECTORS; 4065 } 4066 4067 static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 4068 { 4069 /* We may not be able to submit a whole bio at once as there 4070 * may not be enough stripe_heads available. 4071 * We cannot pre-allocate enough stripe_heads as we may need 4072 * more than exist in the cache (if we allow ever large chunks). 4073 * So we do one stripe head at a time and record in 4074 * ->bi_hw_segments how many have been done. 4075 * 4076 * We *know* that this entire raid_bio is in one chunk, so 4077 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 4078 */ 4079 struct stripe_head *sh; 4080 int dd_idx; 4081 sector_t sector, logical_sector, last_sector; 4082 int scnt = 0; 4083 int remaining; 4084 int handled = 0; 4085 4086 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4087 sector = raid5_compute_sector(conf, logical_sector, 4088 0, &dd_idx, NULL); 4089 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 4090 4091 for (; logical_sector < last_sector; 4092 logical_sector += STRIPE_SECTORS, 4093 sector += STRIPE_SECTORS, 4094 scnt++) { 4095 4096 if (scnt < raid5_bi_hw_segments(raid_bio)) 4097 /* already done this stripe */ 4098 continue; 4099 4100 sh = get_active_stripe(conf, sector, 0, 1, 0); 4101 4102 if (!sh) { 4103 /* failed to get a stripe - must wait */ 4104 raid5_set_bi_hw_segments(raid_bio, scnt); 4105 conf->retry_read_aligned = raid_bio; 4106 return handled; 4107 } 4108 4109 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 4110 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 4111 release_stripe(sh); 4112 raid5_set_bi_hw_segments(raid_bio, scnt); 4113 conf->retry_read_aligned = raid_bio; 4114 return handled; 4115 } 4116 4117 handle_stripe(sh, NULL); 4118 release_stripe(sh); 4119 handled++; 4120 } 4121 spin_lock_irq(&conf->device_lock); 4122 remaining = raid5_dec_bi_phys_segments(raid_bio); 4123 spin_unlock_irq(&conf->device_lock); 4124 if (remaining == 0) 4125 bio_endio(raid_bio, 0); 4126 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4127 wake_up(&conf->wait_for_stripe); 4128 return handled; 4129 } 4130 4131 4132 4133 /* 4134 * This is our raid5 kernel thread. 4135 * 4136 * We scan the hash table for stripes which can be handled now. 4137 * During the scan, completed stripes are saved for us by the interrupt 4138 * handler, so that they will not have to wait for our next wakeup. 4139 */ 4140 static void raid5d(mddev_t *mddev) 4141 { 4142 struct stripe_head *sh; 4143 raid5_conf_t *conf = mddev->private; 4144 int handled; 4145 4146 pr_debug("+++ raid5d active\n"); 4147 4148 md_check_recovery(mddev); 4149 4150 handled = 0; 4151 spin_lock_irq(&conf->device_lock); 4152 while (1) { 4153 struct bio *bio; 4154 4155 if (conf->seq_flush != conf->seq_write) { 4156 int seq = conf->seq_flush; 4157 spin_unlock_irq(&conf->device_lock); 4158 bitmap_unplug(mddev->bitmap); 4159 spin_lock_irq(&conf->device_lock); 4160 conf->seq_write = seq; 4161 activate_bit_delay(conf); 4162 } 4163 4164 while ((bio = remove_bio_from_retry(conf))) { 4165 int ok; 4166 spin_unlock_irq(&conf->device_lock); 4167 ok = retry_aligned_read(conf, bio); 4168 spin_lock_irq(&conf->device_lock); 4169 if (!ok) 4170 break; 4171 handled++; 4172 } 4173 4174 sh = __get_priority_stripe(conf); 4175 4176 if (!sh) 4177 break; 4178 spin_unlock_irq(&conf->device_lock); 4179 4180 handled++; 4181 handle_stripe(sh, conf->spare_page); 4182 release_stripe(sh); 4183 4184 spin_lock_irq(&conf->device_lock); 4185 } 4186 pr_debug("%d stripes handled\n", handled); 4187 4188 spin_unlock_irq(&conf->device_lock); 4189 4190 async_tx_issue_pending_all(); 4191 unplug_slaves(mddev); 4192 4193 pr_debug("--- raid5d inactive\n"); 4194 } 4195 4196 static ssize_t 4197 raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4198 { 4199 raid5_conf_t *conf = mddev->private; 4200 if (conf) 4201 return sprintf(page, "%d\n", conf->max_nr_stripes); 4202 else 4203 return 0; 4204 } 4205 4206 static ssize_t 4207 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4208 { 4209 raid5_conf_t *conf = mddev->private; 4210 unsigned long new; 4211 int err; 4212 4213 if (len >= PAGE_SIZE) 4214 return -EINVAL; 4215 if (!conf) 4216 return -ENODEV; 4217 4218 if (strict_strtoul(page, 10, &new)) 4219 return -EINVAL; 4220 if (new <= 16 || new > 32768) 4221 return -EINVAL; 4222 while (new < conf->max_nr_stripes) { 4223 if (drop_one_stripe(conf)) 4224 conf->max_nr_stripes--; 4225 else 4226 break; 4227 } 4228 err = md_allow_write(mddev); 4229 if (err) 4230 return err; 4231 while (new > conf->max_nr_stripes) { 4232 if (grow_one_stripe(conf)) 4233 conf->max_nr_stripes++; 4234 else break; 4235 } 4236 return len; 4237 } 4238 4239 static struct md_sysfs_entry 4240 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4241 raid5_show_stripe_cache_size, 4242 raid5_store_stripe_cache_size); 4243 4244 static ssize_t 4245 raid5_show_preread_threshold(mddev_t *mddev, char *page) 4246 { 4247 raid5_conf_t *conf = mddev->private; 4248 if (conf) 4249 return sprintf(page, "%d\n", conf->bypass_threshold); 4250 else 4251 return 0; 4252 } 4253 4254 static ssize_t 4255 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4256 { 4257 raid5_conf_t *conf = mddev->private; 4258 unsigned long new; 4259 if (len >= PAGE_SIZE) 4260 return -EINVAL; 4261 if (!conf) 4262 return -ENODEV; 4263 4264 if (strict_strtoul(page, 10, &new)) 4265 return -EINVAL; 4266 if (new > conf->max_nr_stripes) 4267 return -EINVAL; 4268 conf->bypass_threshold = new; 4269 return len; 4270 } 4271 4272 static struct md_sysfs_entry 4273 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4274 S_IRUGO | S_IWUSR, 4275 raid5_show_preread_threshold, 4276 raid5_store_preread_threshold); 4277 4278 static ssize_t 4279 stripe_cache_active_show(mddev_t *mddev, char *page) 4280 { 4281 raid5_conf_t *conf = mddev->private; 4282 if (conf) 4283 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4284 else 4285 return 0; 4286 } 4287 4288 static struct md_sysfs_entry 4289 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4290 4291 static struct attribute *raid5_attrs[] = { 4292 &raid5_stripecache_size.attr, 4293 &raid5_stripecache_active.attr, 4294 &raid5_preread_bypass_threshold.attr, 4295 NULL, 4296 }; 4297 static struct attribute_group raid5_attrs_group = { 4298 .name = NULL, 4299 .attrs = raid5_attrs, 4300 }; 4301 4302 static sector_t 4303 raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) 4304 { 4305 raid5_conf_t *conf = mddev->private; 4306 4307 if (!sectors) 4308 sectors = mddev->dev_sectors; 4309 if (!raid_disks) { 4310 /* size is defined by the smallest of previous and new size */ 4311 if (conf->raid_disks < conf->previous_raid_disks) 4312 raid_disks = conf->raid_disks; 4313 else 4314 raid_disks = conf->previous_raid_disks; 4315 } 4316 4317 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 4318 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 4319 return sectors * (raid_disks - conf->max_degraded); 4320 } 4321 4322 static void free_conf(raid5_conf_t *conf) 4323 { 4324 shrink_stripes(conf); 4325 safe_put_page(conf->spare_page); 4326 kfree(conf->disks); 4327 kfree(conf->stripe_hashtbl); 4328 kfree(conf); 4329 } 4330 4331 static raid5_conf_t *setup_conf(mddev_t *mddev) 4332 { 4333 raid5_conf_t *conf; 4334 int raid_disk, memory; 4335 mdk_rdev_t *rdev; 4336 struct disk_info *disk; 4337 4338 if (mddev->new_level != 5 4339 && mddev->new_level != 4 4340 && mddev->new_level != 6) { 4341 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4342 mdname(mddev), mddev->new_level); 4343 return ERR_PTR(-EIO); 4344 } 4345 if ((mddev->new_level == 5 4346 && !algorithm_valid_raid5(mddev->new_layout)) || 4347 (mddev->new_level == 6 4348 && !algorithm_valid_raid6(mddev->new_layout))) { 4349 printk(KERN_ERR "raid5: %s: layout %d not supported\n", 4350 mdname(mddev), mddev->new_layout); 4351 return ERR_PTR(-EIO); 4352 } 4353 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 4354 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4355 mdname(mddev), mddev->raid_disks); 4356 return ERR_PTR(-EINVAL); 4357 } 4358 4359 if (!mddev->new_chunk_sectors || 4360 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 4361 !is_power_of_2(mddev->new_chunk_sectors)) { 4362 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4363 mddev->new_chunk_sectors << 9, mdname(mddev)); 4364 return ERR_PTR(-EINVAL); 4365 } 4366 4367 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); 4368 if (conf == NULL) 4369 goto abort; 4370 4371 conf->raid_disks = mddev->raid_disks; 4372 if (mddev->reshape_position == MaxSector) 4373 conf->previous_raid_disks = mddev->raid_disks; 4374 else 4375 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4376 4377 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4378 GFP_KERNEL); 4379 if (!conf->disks) 4380 goto abort; 4381 4382 conf->mddev = mddev; 4383 4384 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4385 goto abort; 4386 4387 if (mddev->new_level == 6) { 4388 conf->spare_page = alloc_page(GFP_KERNEL); 4389 if (!conf->spare_page) 4390 goto abort; 4391 } 4392 spin_lock_init(&conf->device_lock); 4393 init_waitqueue_head(&conf->wait_for_stripe); 4394 init_waitqueue_head(&conf->wait_for_overlap); 4395 INIT_LIST_HEAD(&conf->handle_list); 4396 INIT_LIST_HEAD(&conf->hold_list); 4397 INIT_LIST_HEAD(&conf->delayed_list); 4398 INIT_LIST_HEAD(&conf->bitmap_list); 4399 INIT_LIST_HEAD(&conf->inactive_list); 4400 atomic_set(&conf->active_stripes, 0); 4401 atomic_set(&conf->preread_active_stripes, 0); 4402 atomic_set(&conf->active_aligned_reads, 0); 4403 conf->bypass_threshold = BYPASS_THRESHOLD; 4404 4405 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4406 4407 list_for_each_entry(rdev, &mddev->disks, same_set) { 4408 raid_disk = rdev->raid_disk; 4409 if (raid_disk >= conf->raid_disks 4410 || raid_disk < 0) 4411 continue; 4412 disk = conf->disks + raid_disk; 4413 4414 disk->rdev = rdev; 4415 4416 if (test_bit(In_sync, &rdev->flags)) { 4417 char b[BDEVNAME_SIZE]; 4418 printk(KERN_INFO "raid5: device %s operational as raid" 4419 " disk %d\n", bdevname(rdev->bdev,b), 4420 raid_disk); 4421 } else 4422 /* Cannot rely on bitmap to complete recovery */ 4423 conf->fullsync = 1; 4424 } 4425 4426 conf->chunk_sectors = mddev->new_chunk_sectors; 4427 conf->level = mddev->new_level; 4428 if (conf->level == 6) 4429 conf->max_degraded = 2; 4430 else 4431 conf->max_degraded = 1; 4432 conf->algorithm = mddev->new_layout; 4433 conf->max_nr_stripes = NR_STRIPES; 4434 conf->reshape_progress = mddev->reshape_position; 4435 if (conf->reshape_progress != MaxSector) { 4436 conf->prev_chunk_sectors = mddev->chunk_sectors; 4437 conf->prev_algo = mddev->layout; 4438 } 4439 4440 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4441 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4442 if (grow_stripes(conf, conf->max_nr_stripes)) { 4443 printk(KERN_ERR 4444 "raid5: couldn't allocate %dkB for buffers\n", memory); 4445 goto abort; 4446 } else 4447 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4448 memory, mdname(mddev)); 4449 4450 conf->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 4451 if (!conf->thread) { 4452 printk(KERN_ERR 4453 "raid5: couldn't allocate thread for %s\n", 4454 mdname(mddev)); 4455 goto abort; 4456 } 4457 4458 return conf; 4459 4460 abort: 4461 if (conf) { 4462 free_conf(conf); 4463 return ERR_PTR(-EIO); 4464 } else 4465 return ERR_PTR(-ENOMEM); 4466 } 4467 4468 static int run(mddev_t *mddev) 4469 { 4470 raid5_conf_t *conf; 4471 int working_disks = 0, chunk_size; 4472 mdk_rdev_t *rdev; 4473 4474 if (mddev->recovery_cp != MaxSector) 4475 printk(KERN_NOTICE "raid5: %s is not clean" 4476 " -- starting background reconstruction\n", 4477 mdname(mddev)); 4478 if (mddev->reshape_position != MaxSector) { 4479 /* Check that we can continue the reshape. 4480 * Currently only disks can change, it must 4481 * increase, and we must be past the point where 4482 * a stripe over-writes itself 4483 */ 4484 sector_t here_new, here_old; 4485 int old_disks; 4486 int max_degraded = (mddev->level == 6 ? 2 : 1); 4487 4488 if (mddev->new_level != mddev->level) { 4489 printk(KERN_ERR "raid5: %s: unsupported reshape " 4490 "required - aborting.\n", 4491 mdname(mddev)); 4492 return -EINVAL; 4493 } 4494 old_disks = mddev->raid_disks - mddev->delta_disks; 4495 /* reshape_position must be on a new-stripe boundary, and one 4496 * further up in new geometry must map after here in old 4497 * geometry. 4498 */ 4499 here_new = mddev->reshape_position; 4500 if (sector_div(here_new, mddev->new_chunk_sectors * 4501 (mddev->raid_disks - max_degraded))) { 4502 printk(KERN_ERR "raid5: reshape_position not " 4503 "on a stripe boundary\n"); 4504 return -EINVAL; 4505 } 4506 /* here_new is the stripe we will write to */ 4507 here_old = mddev->reshape_position; 4508 sector_div(here_old, mddev->chunk_sectors * 4509 (old_disks-max_degraded)); 4510 /* here_old is the first stripe that we might need to read 4511 * from */ 4512 if (mddev->delta_disks == 0) { 4513 /* We cannot be sure it is safe to start an in-place 4514 * reshape. It is only safe if user-space if monitoring 4515 * and taking constant backups. 4516 * mdadm always starts a situation like this in 4517 * readonly mode so it can take control before 4518 * allowing any writes. So just check for that. 4519 */ 4520 if ((here_new * mddev->new_chunk_sectors != 4521 here_old * mddev->chunk_sectors) || 4522 mddev->ro == 0) { 4523 printk(KERN_ERR "raid5: in-place reshape must be started" 4524 " in read-only mode - aborting\n"); 4525 return -EINVAL; 4526 } 4527 } else if (mddev->delta_disks < 0 4528 ? (here_new * mddev->new_chunk_sectors <= 4529 here_old * mddev->chunk_sectors) 4530 : (here_new * mddev->new_chunk_sectors >= 4531 here_old * mddev->chunk_sectors)) { 4532 /* Reading from the same stripe as writing to - bad */ 4533 printk(KERN_ERR "raid5: reshape_position too early for " 4534 "auto-recovery - aborting.\n"); 4535 return -EINVAL; 4536 } 4537 printk(KERN_INFO "raid5: reshape will continue\n"); 4538 /* OK, we should be able to continue; */ 4539 } else { 4540 BUG_ON(mddev->level != mddev->new_level); 4541 BUG_ON(mddev->layout != mddev->new_layout); 4542 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 4543 BUG_ON(mddev->delta_disks != 0); 4544 } 4545 4546 if (mddev->private == NULL) 4547 conf = setup_conf(mddev); 4548 else 4549 conf = mddev->private; 4550 4551 if (IS_ERR(conf)) 4552 return PTR_ERR(conf); 4553 4554 mddev->thread = conf->thread; 4555 conf->thread = NULL; 4556 mddev->private = conf; 4557 4558 /* 4559 * 0 for a fully functional array, 1 or 2 for a degraded array. 4560 */ 4561 list_for_each_entry(rdev, &mddev->disks, same_set) 4562 if (rdev->raid_disk >= 0 && 4563 test_bit(In_sync, &rdev->flags)) 4564 working_disks++; 4565 4566 mddev->degraded = conf->raid_disks - working_disks; 4567 4568 if (mddev->degraded > conf->max_degraded) { 4569 printk(KERN_ERR "raid5: not enough operational devices for %s" 4570 " (%d/%d failed)\n", 4571 mdname(mddev), mddev->degraded, conf->raid_disks); 4572 goto abort; 4573 } 4574 4575 /* device size must be a multiple of chunk size */ 4576 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 4577 mddev->resync_max_sectors = mddev->dev_sectors; 4578 4579 if (mddev->degraded > 0 && 4580 mddev->recovery_cp != MaxSector) { 4581 if (mddev->ok_start_degraded) 4582 printk(KERN_WARNING 4583 "raid5: starting dirty degraded array: %s" 4584 "- data corruption possible.\n", 4585 mdname(mddev)); 4586 else { 4587 printk(KERN_ERR 4588 "raid5: cannot start dirty degraded array for %s\n", 4589 mdname(mddev)); 4590 goto abort; 4591 } 4592 } 4593 4594 if (mddev->degraded == 0) 4595 printk("raid5: raid level %d set %s active with %d out of %d" 4596 " devices, algorithm %d\n", conf->level, mdname(mddev), 4597 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4598 mddev->new_layout); 4599 else 4600 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 4601 " out of %d devices, algorithm %d\n", conf->level, 4602 mdname(mddev), mddev->raid_disks - mddev->degraded, 4603 mddev->raid_disks, mddev->new_layout); 4604 4605 print_raid5_conf(conf); 4606 4607 if (conf->reshape_progress != MaxSector) { 4608 printk("...ok start reshape thread\n"); 4609 conf->reshape_safe = conf->reshape_progress; 4610 atomic_set(&conf->reshape_stripes, 0); 4611 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4612 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4613 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4614 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4615 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4616 "%s_reshape"); 4617 } 4618 4619 /* read-ahead size must cover two whole stripes, which is 4620 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4621 */ 4622 { 4623 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4624 int stripe = data_disks * 4625 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 4626 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4627 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4628 } 4629 4630 /* Ok, everything is just fine now */ 4631 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4632 printk(KERN_WARNING 4633 "raid5: failed to create sysfs attributes for %s\n", 4634 mdname(mddev)); 4635 4636 mddev->queue->queue_lock = &conf->device_lock; 4637 4638 mddev->queue->unplug_fn = raid5_unplug_device; 4639 mddev->queue->backing_dev_info.congested_data = mddev; 4640 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4641 4642 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4643 4644 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4645 chunk_size = mddev->chunk_sectors << 9; 4646 blk_queue_io_min(mddev->queue, chunk_size); 4647 blk_queue_io_opt(mddev->queue, chunk_size * 4648 (conf->raid_disks - conf->max_degraded)); 4649 4650 list_for_each_entry(rdev, &mddev->disks, same_set) 4651 disk_stack_limits(mddev->gendisk, rdev->bdev, 4652 rdev->data_offset << 9); 4653 4654 return 0; 4655 abort: 4656 md_unregister_thread(mddev->thread); 4657 mddev->thread = NULL; 4658 if (conf) { 4659 print_raid5_conf(conf); 4660 free_conf(conf); 4661 } 4662 mddev->private = NULL; 4663 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4664 return -EIO; 4665 } 4666 4667 4668 4669 static int stop(mddev_t *mddev) 4670 { 4671 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4672 4673 md_unregister_thread(mddev->thread); 4674 mddev->thread = NULL; 4675 mddev->queue->backing_dev_info.congested_fn = NULL; 4676 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4677 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4678 free_conf(conf); 4679 mddev->private = NULL; 4680 return 0; 4681 } 4682 4683 #ifdef DEBUG 4684 static void print_sh(struct seq_file *seq, struct stripe_head *sh) 4685 { 4686 int i; 4687 4688 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 4689 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 4690 seq_printf(seq, "sh %llu, count %d.\n", 4691 (unsigned long long)sh->sector, atomic_read(&sh->count)); 4692 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 4693 for (i = 0; i < sh->disks; i++) { 4694 seq_printf(seq, "(cache%d: %p %ld) ", 4695 i, sh->dev[i].page, sh->dev[i].flags); 4696 } 4697 seq_printf(seq, "\n"); 4698 } 4699 4700 static void printall(struct seq_file *seq, raid5_conf_t *conf) 4701 { 4702 struct stripe_head *sh; 4703 struct hlist_node *hn; 4704 int i; 4705 4706 spin_lock_irq(&conf->device_lock); 4707 for (i = 0; i < NR_HASH; i++) { 4708 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 4709 if (sh->raid_conf != conf) 4710 continue; 4711 print_sh(seq, sh); 4712 } 4713 } 4714 spin_unlock_irq(&conf->device_lock); 4715 } 4716 #endif 4717 4718 static void status(struct seq_file *seq, mddev_t *mddev) 4719 { 4720 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4721 int i; 4722 4723 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 4724 mddev->chunk_sectors / 2, mddev->layout); 4725 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4726 for (i = 0; i < conf->raid_disks; i++) 4727 seq_printf (seq, "%s", 4728 conf->disks[i].rdev && 4729 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4730 seq_printf (seq, "]"); 4731 #ifdef DEBUG 4732 seq_printf (seq, "\n"); 4733 printall(seq, conf); 4734 #endif 4735 } 4736 4737 static void print_raid5_conf (raid5_conf_t *conf) 4738 { 4739 int i; 4740 struct disk_info *tmp; 4741 4742 printk("RAID5 conf printout:\n"); 4743 if (!conf) { 4744 printk("(conf==NULL)\n"); 4745 return; 4746 } 4747 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 4748 conf->raid_disks - conf->mddev->degraded); 4749 4750 for (i = 0; i < conf->raid_disks; i++) { 4751 char b[BDEVNAME_SIZE]; 4752 tmp = conf->disks + i; 4753 if (tmp->rdev) 4754 printk(" disk %d, o:%d, dev:%s\n", 4755 i, !test_bit(Faulty, &tmp->rdev->flags), 4756 bdevname(tmp->rdev->bdev,b)); 4757 } 4758 } 4759 4760 static int raid5_spare_active(mddev_t *mddev) 4761 { 4762 int i; 4763 raid5_conf_t *conf = mddev->private; 4764 struct disk_info *tmp; 4765 4766 for (i = 0; i < conf->raid_disks; i++) { 4767 tmp = conf->disks + i; 4768 if (tmp->rdev 4769 && !test_bit(Faulty, &tmp->rdev->flags) 4770 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 4771 unsigned long flags; 4772 spin_lock_irqsave(&conf->device_lock, flags); 4773 mddev->degraded--; 4774 spin_unlock_irqrestore(&conf->device_lock, flags); 4775 } 4776 } 4777 print_raid5_conf(conf); 4778 return 0; 4779 } 4780 4781 static int raid5_remove_disk(mddev_t *mddev, int number) 4782 { 4783 raid5_conf_t *conf = mddev->private; 4784 int err = 0; 4785 mdk_rdev_t *rdev; 4786 struct disk_info *p = conf->disks + number; 4787 4788 print_raid5_conf(conf); 4789 rdev = p->rdev; 4790 if (rdev) { 4791 if (number >= conf->raid_disks && 4792 conf->reshape_progress == MaxSector) 4793 clear_bit(In_sync, &rdev->flags); 4794 4795 if (test_bit(In_sync, &rdev->flags) || 4796 atomic_read(&rdev->nr_pending)) { 4797 err = -EBUSY; 4798 goto abort; 4799 } 4800 /* Only remove non-faulty devices if recovery 4801 * isn't possible. 4802 */ 4803 if (!test_bit(Faulty, &rdev->flags) && 4804 mddev->degraded <= conf->max_degraded && 4805 number < conf->raid_disks) { 4806 err = -EBUSY; 4807 goto abort; 4808 } 4809 p->rdev = NULL; 4810 synchronize_rcu(); 4811 if (atomic_read(&rdev->nr_pending)) { 4812 /* lost the race, try later */ 4813 err = -EBUSY; 4814 p->rdev = rdev; 4815 } 4816 } 4817 abort: 4818 4819 print_raid5_conf(conf); 4820 return err; 4821 } 4822 4823 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4824 { 4825 raid5_conf_t *conf = mddev->private; 4826 int err = -EEXIST; 4827 int disk; 4828 struct disk_info *p; 4829 int first = 0; 4830 int last = conf->raid_disks - 1; 4831 4832 if (mddev->degraded > conf->max_degraded) 4833 /* no point adding a device */ 4834 return -EINVAL; 4835 4836 if (rdev->raid_disk >= 0) 4837 first = last = rdev->raid_disk; 4838 4839 /* 4840 * find the disk ... but prefer rdev->saved_raid_disk 4841 * if possible. 4842 */ 4843 if (rdev->saved_raid_disk >= 0 && 4844 rdev->saved_raid_disk >= first && 4845 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4846 disk = rdev->saved_raid_disk; 4847 else 4848 disk = first; 4849 for ( ; disk <= last ; disk++) 4850 if ((p=conf->disks + disk)->rdev == NULL) { 4851 clear_bit(In_sync, &rdev->flags); 4852 rdev->raid_disk = disk; 4853 err = 0; 4854 if (rdev->saved_raid_disk != disk) 4855 conf->fullsync = 1; 4856 rcu_assign_pointer(p->rdev, rdev); 4857 break; 4858 } 4859 print_raid5_conf(conf); 4860 return err; 4861 } 4862 4863 static int raid5_resize(mddev_t *mddev, sector_t sectors) 4864 { 4865 /* no resync is happening, and there is enough space 4866 * on all devices, so we can resize. 4867 * We need to make sure resync covers any new space. 4868 * If the array is shrinking we should possibly wait until 4869 * any io in the removed space completes, but it hardly seems 4870 * worth it. 4871 */ 4872 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 4873 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 4874 mddev->raid_disks)); 4875 if (mddev->array_sectors > 4876 raid5_size(mddev, sectors, mddev->raid_disks)) 4877 return -EINVAL; 4878 set_capacity(mddev->gendisk, mddev->array_sectors); 4879 mddev->changed = 1; 4880 revalidate_disk(mddev->gendisk); 4881 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { 4882 mddev->recovery_cp = mddev->dev_sectors; 4883 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4884 } 4885 mddev->dev_sectors = sectors; 4886 mddev->resync_max_sectors = sectors; 4887 return 0; 4888 } 4889 4890 static int check_stripe_cache(mddev_t *mddev) 4891 { 4892 /* Can only proceed if there are plenty of stripe_heads. 4893 * We need a minimum of one full stripe,, and for sensible progress 4894 * it is best to have about 4 times that. 4895 * If we require 4 times, then the default 256 4K stripe_heads will 4896 * allow for chunk sizes up to 256K, which is probably OK. 4897 * If the chunk size is greater, user-space should request more 4898 * stripe_heads first. 4899 */ 4900 raid5_conf_t *conf = mddev->private; 4901 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 4902 > conf->max_nr_stripes || 4903 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 4904 > conf->max_nr_stripes) { 4905 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4906 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 4907 / STRIPE_SIZE)*4); 4908 return 0; 4909 } 4910 return 1; 4911 } 4912 4913 static int check_reshape(mddev_t *mddev) 4914 { 4915 raid5_conf_t *conf = mddev->private; 4916 4917 if (mddev->delta_disks == 0 && 4918 mddev->new_layout == mddev->layout && 4919 mddev->new_chunk_sectors == mddev->chunk_sectors) 4920 return 0; /* nothing to do */ 4921 if (mddev->bitmap) 4922 /* Cannot grow a bitmap yet */ 4923 return -EBUSY; 4924 if (mddev->degraded > conf->max_degraded) 4925 return -EINVAL; 4926 if (mddev->delta_disks < 0) { 4927 /* We might be able to shrink, but the devices must 4928 * be made bigger first. 4929 * For raid6, 4 is the minimum size. 4930 * Otherwise 2 is the minimum 4931 */ 4932 int min = 2; 4933 if (mddev->level == 6) 4934 min = 4; 4935 if (mddev->raid_disks + mddev->delta_disks < min) 4936 return -EINVAL; 4937 } 4938 4939 if (!check_stripe_cache(mddev)) 4940 return -ENOSPC; 4941 4942 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4943 } 4944 4945 static int raid5_start_reshape(mddev_t *mddev) 4946 { 4947 raid5_conf_t *conf = mddev->private; 4948 mdk_rdev_t *rdev; 4949 int spares = 0; 4950 int added_devices = 0; 4951 unsigned long flags; 4952 4953 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4954 return -EBUSY; 4955 4956 if (!check_stripe_cache(mddev)) 4957 return -ENOSPC; 4958 4959 list_for_each_entry(rdev, &mddev->disks, same_set) 4960 if (rdev->raid_disk < 0 && 4961 !test_bit(Faulty, &rdev->flags)) 4962 spares++; 4963 4964 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 4965 /* Not enough devices even to make a degraded array 4966 * of that size 4967 */ 4968 return -EINVAL; 4969 4970 /* Refuse to reduce size of the array. Any reductions in 4971 * array size must be through explicit setting of array_size 4972 * attribute. 4973 */ 4974 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 4975 < mddev->array_sectors) { 4976 printk(KERN_ERR "md: %s: array size must be reduced " 4977 "before number of disks\n", mdname(mddev)); 4978 return -EINVAL; 4979 } 4980 4981 atomic_set(&conf->reshape_stripes, 0); 4982 spin_lock_irq(&conf->device_lock); 4983 conf->previous_raid_disks = conf->raid_disks; 4984 conf->raid_disks += mddev->delta_disks; 4985 conf->prev_chunk_sectors = conf->chunk_sectors; 4986 conf->chunk_sectors = mddev->new_chunk_sectors; 4987 conf->prev_algo = conf->algorithm; 4988 conf->algorithm = mddev->new_layout; 4989 if (mddev->delta_disks < 0) 4990 conf->reshape_progress = raid5_size(mddev, 0, 0); 4991 else 4992 conf->reshape_progress = 0; 4993 conf->reshape_safe = conf->reshape_progress; 4994 conf->generation++; 4995 spin_unlock_irq(&conf->device_lock); 4996 4997 /* Add some new drives, as many as will fit. 4998 * We know there are enough to make the newly sized array work. 4999 */ 5000 list_for_each_entry(rdev, &mddev->disks, same_set) 5001 if (rdev->raid_disk < 0 && 5002 !test_bit(Faulty, &rdev->flags)) { 5003 if (raid5_add_disk(mddev, rdev) == 0) { 5004 char nm[20]; 5005 set_bit(In_sync, &rdev->flags); 5006 added_devices++; 5007 rdev->recovery_offset = 0; 5008 sprintf(nm, "rd%d", rdev->raid_disk); 5009 if (sysfs_create_link(&mddev->kobj, 5010 &rdev->kobj, nm)) 5011 printk(KERN_WARNING 5012 "raid5: failed to create " 5013 " link %s for %s\n", 5014 nm, mdname(mddev)); 5015 } else 5016 break; 5017 } 5018 5019 if (mddev->delta_disks > 0) { 5020 spin_lock_irqsave(&conf->device_lock, flags); 5021 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 5022 - added_devices; 5023 spin_unlock_irqrestore(&conf->device_lock, flags); 5024 } 5025 mddev->raid_disks = conf->raid_disks; 5026 mddev->reshape_position = conf->reshape_progress; 5027 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5028 5029 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5030 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5031 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 5032 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 5033 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 5034 "%s_reshape"); 5035 if (!mddev->sync_thread) { 5036 mddev->recovery = 0; 5037 spin_lock_irq(&conf->device_lock); 5038 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 5039 conf->reshape_progress = MaxSector; 5040 spin_unlock_irq(&conf->device_lock); 5041 return -EAGAIN; 5042 } 5043 conf->reshape_checkpoint = jiffies; 5044 md_wakeup_thread(mddev->sync_thread); 5045 md_new_event(mddev); 5046 return 0; 5047 } 5048 5049 /* This is called from the reshape thread and should make any 5050 * changes needed in 'conf' 5051 */ 5052 static void end_reshape(raid5_conf_t *conf) 5053 { 5054 5055 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 5056 5057 spin_lock_irq(&conf->device_lock); 5058 conf->previous_raid_disks = conf->raid_disks; 5059 conf->reshape_progress = MaxSector; 5060 spin_unlock_irq(&conf->device_lock); 5061 wake_up(&conf->wait_for_overlap); 5062 5063 /* read-ahead size must cover two whole stripes, which is 5064 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 5065 */ 5066 { 5067 int data_disks = conf->raid_disks - conf->max_degraded; 5068 int stripe = data_disks * ((conf->chunk_sectors << 9) 5069 / PAGE_SIZE); 5070 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 5071 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 5072 } 5073 } 5074 } 5075 5076 /* This is called from the raid5d thread with mddev_lock held. 5077 * It makes config changes to the device. 5078 */ 5079 static void raid5_finish_reshape(mddev_t *mddev) 5080 { 5081 raid5_conf_t *conf = mddev->private; 5082 5083 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5084 5085 if (mddev->delta_disks > 0) { 5086 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5087 set_capacity(mddev->gendisk, mddev->array_sectors); 5088 mddev->changed = 1; 5089 revalidate_disk(mddev->gendisk); 5090 } else { 5091 int d; 5092 mddev->degraded = conf->raid_disks; 5093 for (d = 0; d < conf->raid_disks ; d++) 5094 if (conf->disks[d].rdev && 5095 test_bit(In_sync, 5096 &conf->disks[d].rdev->flags)) 5097 mddev->degraded--; 5098 for (d = conf->raid_disks ; 5099 d < conf->raid_disks - mddev->delta_disks; 5100 d++) { 5101 mdk_rdev_t *rdev = conf->disks[d].rdev; 5102 if (rdev && raid5_remove_disk(mddev, d) == 0) { 5103 char nm[20]; 5104 sprintf(nm, "rd%d", rdev->raid_disk); 5105 sysfs_remove_link(&mddev->kobj, nm); 5106 rdev->raid_disk = -1; 5107 } 5108 } 5109 } 5110 mddev->layout = conf->algorithm; 5111 mddev->chunk_sectors = conf->chunk_sectors; 5112 mddev->reshape_position = MaxSector; 5113 mddev->delta_disks = 0; 5114 } 5115 } 5116 5117 static void raid5_quiesce(mddev_t *mddev, int state) 5118 { 5119 raid5_conf_t *conf = mddev->private; 5120 5121 switch(state) { 5122 case 2: /* resume for a suspend */ 5123 wake_up(&conf->wait_for_overlap); 5124 break; 5125 5126 case 1: /* stop all writes */ 5127 spin_lock_irq(&conf->device_lock); 5128 /* '2' tells resync/reshape to pause so that all 5129 * active stripes can drain 5130 */ 5131 conf->quiesce = 2; 5132 wait_event_lock_irq(conf->wait_for_stripe, 5133 atomic_read(&conf->active_stripes) == 0 && 5134 atomic_read(&conf->active_aligned_reads) == 0, 5135 conf->device_lock, /* nothing */); 5136 conf->quiesce = 1; 5137 spin_unlock_irq(&conf->device_lock); 5138 /* allow reshape to continue */ 5139 wake_up(&conf->wait_for_overlap); 5140 break; 5141 5142 case 0: /* re-enable writes */ 5143 spin_lock_irq(&conf->device_lock); 5144 conf->quiesce = 0; 5145 wake_up(&conf->wait_for_stripe); 5146 wake_up(&conf->wait_for_overlap); 5147 spin_unlock_irq(&conf->device_lock); 5148 break; 5149 } 5150 } 5151 5152 5153 static void *raid5_takeover_raid1(mddev_t *mddev) 5154 { 5155 int chunksect; 5156 5157 if (mddev->raid_disks != 2 || 5158 mddev->degraded > 1) 5159 return ERR_PTR(-EINVAL); 5160 5161 /* Should check if there are write-behind devices? */ 5162 5163 chunksect = 64*2; /* 64K by default */ 5164 5165 /* The array must be an exact multiple of chunksize */ 5166 while (chunksect && (mddev->array_sectors & (chunksect-1))) 5167 chunksect >>= 1; 5168 5169 if ((chunksect<<9) < STRIPE_SIZE) 5170 /* array size does not allow a suitable chunk size */ 5171 return ERR_PTR(-EINVAL); 5172 5173 mddev->new_level = 5; 5174 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5175 mddev->new_chunk_sectors = chunksect; 5176 5177 return setup_conf(mddev); 5178 } 5179 5180 static void *raid5_takeover_raid6(mddev_t *mddev) 5181 { 5182 int new_layout; 5183 5184 switch (mddev->layout) { 5185 case ALGORITHM_LEFT_ASYMMETRIC_6: 5186 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 5187 break; 5188 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5189 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 5190 break; 5191 case ALGORITHM_LEFT_SYMMETRIC_6: 5192 new_layout = ALGORITHM_LEFT_SYMMETRIC; 5193 break; 5194 case ALGORITHM_RIGHT_SYMMETRIC_6: 5195 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 5196 break; 5197 case ALGORITHM_PARITY_0_6: 5198 new_layout = ALGORITHM_PARITY_0; 5199 break; 5200 case ALGORITHM_PARITY_N: 5201 new_layout = ALGORITHM_PARITY_N; 5202 break; 5203 default: 5204 return ERR_PTR(-EINVAL); 5205 } 5206 mddev->new_level = 5; 5207 mddev->new_layout = new_layout; 5208 mddev->delta_disks = -1; 5209 mddev->raid_disks -= 1; 5210 return setup_conf(mddev); 5211 } 5212 5213 5214 static int raid5_check_reshape(mddev_t *mddev) 5215 { 5216 /* For a 2-drive array, the layout and chunk size can be changed 5217 * immediately as not restriping is needed. 5218 * For larger arrays we record the new value - after validation 5219 * to be used by a reshape pass. 5220 */ 5221 raid5_conf_t *conf = mddev->private; 5222 int new_chunk = mddev->new_chunk_sectors; 5223 5224 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 5225 return -EINVAL; 5226 if (new_chunk > 0) { 5227 if (!is_power_of_2(new_chunk)) 5228 return -EINVAL; 5229 if (new_chunk < (PAGE_SIZE>>9)) 5230 return -EINVAL; 5231 if (mddev->array_sectors & (new_chunk-1)) 5232 /* not factor of array size */ 5233 return -EINVAL; 5234 } 5235 5236 /* They look valid */ 5237 5238 if (mddev->raid_disks == 2) { 5239 /* can make the change immediately */ 5240 if (mddev->new_layout >= 0) { 5241 conf->algorithm = mddev->new_layout; 5242 mddev->layout = mddev->new_layout; 5243 } 5244 if (new_chunk > 0) { 5245 conf->chunk_sectors = new_chunk ; 5246 mddev->chunk_sectors = new_chunk; 5247 } 5248 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5249 md_wakeup_thread(mddev->thread); 5250 } 5251 return check_reshape(mddev); 5252 } 5253 5254 static int raid6_check_reshape(mddev_t *mddev) 5255 { 5256 int new_chunk = mddev->new_chunk_sectors; 5257 5258 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 5259 return -EINVAL; 5260 if (new_chunk > 0) { 5261 if (!is_power_of_2(new_chunk)) 5262 return -EINVAL; 5263 if (new_chunk < (PAGE_SIZE >> 9)) 5264 return -EINVAL; 5265 if (mddev->array_sectors & (new_chunk-1)) 5266 /* not factor of array size */ 5267 return -EINVAL; 5268 } 5269 5270 /* They look valid */ 5271 return check_reshape(mddev); 5272 } 5273 5274 static void *raid5_takeover(mddev_t *mddev) 5275 { 5276 /* raid5 can take over: 5277 * raid0 - if all devices are the same - make it a raid4 layout 5278 * raid1 - if there are two drives. We need to know the chunk size 5279 * raid4 - trivial - just use a raid4 layout. 5280 * raid6 - Providing it is a *_6 layout 5281 */ 5282 5283 if (mddev->level == 1) 5284 return raid5_takeover_raid1(mddev); 5285 if (mddev->level == 4) { 5286 mddev->new_layout = ALGORITHM_PARITY_N; 5287 mddev->new_level = 5; 5288 return setup_conf(mddev); 5289 } 5290 if (mddev->level == 6) 5291 return raid5_takeover_raid6(mddev); 5292 5293 return ERR_PTR(-EINVAL); 5294 } 5295 5296 5297 static struct mdk_personality raid5_personality; 5298 5299 static void *raid6_takeover(mddev_t *mddev) 5300 { 5301 /* Currently can only take over a raid5. We map the 5302 * personality to an equivalent raid6 personality 5303 * with the Q block at the end. 5304 */ 5305 int new_layout; 5306 5307 if (mddev->pers != &raid5_personality) 5308 return ERR_PTR(-EINVAL); 5309 if (mddev->degraded > 1) 5310 return ERR_PTR(-EINVAL); 5311 if (mddev->raid_disks > 253) 5312 return ERR_PTR(-EINVAL); 5313 if (mddev->raid_disks < 3) 5314 return ERR_PTR(-EINVAL); 5315 5316 switch (mddev->layout) { 5317 case ALGORITHM_LEFT_ASYMMETRIC: 5318 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 5319 break; 5320 case ALGORITHM_RIGHT_ASYMMETRIC: 5321 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 5322 break; 5323 case ALGORITHM_LEFT_SYMMETRIC: 5324 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 5325 break; 5326 case ALGORITHM_RIGHT_SYMMETRIC: 5327 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 5328 break; 5329 case ALGORITHM_PARITY_0: 5330 new_layout = ALGORITHM_PARITY_0_6; 5331 break; 5332 case ALGORITHM_PARITY_N: 5333 new_layout = ALGORITHM_PARITY_N; 5334 break; 5335 default: 5336 return ERR_PTR(-EINVAL); 5337 } 5338 mddev->new_level = 6; 5339 mddev->new_layout = new_layout; 5340 mddev->delta_disks = 1; 5341 mddev->raid_disks += 1; 5342 return setup_conf(mddev); 5343 } 5344 5345 5346 static struct mdk_personality raid6_personality = 5347 { 5348 .name = "raid6", 5349 .level = 6, 5350 .owner = THIS_MODULE, 5351 .make_request = make_request, 5352 .run = run, 5353 .stop = stop, 5354 .status = status, 5355 .error_handler = error, 5356 .hot_add_disk = raid5_add_disk, 5357 .hot_remove_disk= raid5_remove_disk, 5358 .spare_active = raid5_spare_active, 5359 .sync_request = sync_request, 5360 .resize = raid5_resize, 5361 .size = raid5_size, 5362 .check_reshape = raid6_check_reshape, 5363 .start_reshape = raid5_start_reshape, 5364 .finish_reshape = raid5_finish_reshape, 5365 .quiesce = raid5_quiesce, 5366 .takeover = raid6_takeover, 5367 }; 5368 static struct mdk_personality raid5_personality = 5369 { 5370 .name = "raid5", 5371 .level = 5, 5372 .owner = THIS_MODULE, 5373 .make_request = make_request, 5374 .run = run, 5375 .stop = stop, 5376 .status = status, 5377 .error_handler = error, 5378 .hot_add_disk = raid5_add_disk, 5379 .hot_remove_disk= raid5_remove_disk, 5380 .spare_active = raid5_spare_active, 5381 .sync_request = sync_request, 5382 .resize = raid5_resize, 5383 .size = raid5_size, 5384 .check_reshape = raid5_check_reshape, 5385 .start_reshape = raid5_start_reshape, 5386 .finish_reshape = raid5_finish_reshape, 5387 .quiesce = raid5_quiesce, 5388 .takeover = raid5_takeover, 5389 }; 5390 5391 static struct mdk_personality raid4_personality = 5392 { 5393 .name = "raid4", 5394 .level = 4, 5395 .owner = THIS_MODULE, 5396 .make_request = make_request, 5397 .run = run, 5398 .stop = stop, 5399 .status = status, 5400 .error_handler = error, 5401 .hot_add_disk = raid5_add_disk, 5402 .hot_remove_disk= raid5_remove_disk, 5403 .spare_active = raid5_spare_active, 5404 .sync_request = sync_request, 5405 .resize = raid5_resize, 5406 .size = raid5_size, 5407 .check_reshape = raid5_check_reshape, 5408 .start_reshape = raid5_start_reshape, 5409 .finish_reshape = raid5_finish_reshape, 5410 .quiesce = raid5_quiesce, 5411 }; 5412 5413 static int __init raid5_init(void) 5414 { 5415 register_md_personality(&raid6_personality); 5416 register_md_personality(&raid5_personality); 5417 register_md_personality(&raid4_personality); 5418 return 0; 5419 } 5420 5421 static void raid5_exit(void) 5422 { 5423 unregister_md_personality(&raid6_personality); 5424 unregister_md_personality(&raid5_personality); 5425 unregister_md_personality(&raid4_personality); 5426 } 5427 5428 module_init(raid5_init); 5429 module_exit(raid5_exit); 5430 MODULE_LICENSE("GPL"); 5431 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 5432 MODULE_ALIAS("md-raid5"); 5433 MODULE_ALIAS("md-raid4"); 5434 MODULE_ALIAS("md-level-5"); 5435 MODULE_ALIAS("md-level-4"); 5436 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 5437 MODULE_ALIAS("md-raid6"); 5438 MODULE_ALIAS("md-level-6"); 5439 5440 /* This used to be two separate modules, they were: */ 5441 MODULE_ALIAS("raid5"); 5442 MODULE_ALIAS("raid6"); 5443