1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/seq_file.h> 51 #include "md.h" 52 #include "raid5.h" 53 #include "bitmap.h" 54 55 /* 56 * Stripe cache 57 */ 58 59 #define NR_STRIPES 256 60 #define STRIPE_SIZE PAGE_SIZE 61 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 62 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 63 #define IO_THRESHOLD 1 64 #define BYPASS_THRESHOLD 1 65 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 66 #define HASH_MASK (NR_HASH - 1) 67 68 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 69 70 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 71 * order without overlap. There may be several bio's per stripe+device, and 72 * a bio could span several devices. 73 * When walking this list for a particular stripe+device, we must never proceed 74 * beyond a bio that extends past this device, as the next bio might no longer 75 * be valid. 76 * This macro is used to determine the 'next' bio in the list, given the sector 77 * of the current stripe+device 78 */ 79 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 80 /* 81 * The following can be used to debug the driver 82 */ 83 #define RAID5_PARANOIA 1 84 #if RAID5_PARANOIA && defined(CONFIG_SMP) 85 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 86 #else 87 # define CHECK_DEVLOCK() 88 #endif 89 90 #ifdef DEBUG 91 #define inline 92 #define __inline__ 93 #endif 94 95 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) 96 97 /* 98 * We maintain a biased count of active stripes in the bottom 16 bits of 99 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 100 */ 101 static inline int raid5_bi_phys_segments(struct bio *bio) 102 { 103 return bio->bi_phys_segments & 0xffff; 104 } 105 106 static inline int raid5_bi_hw_segments(struct bio *bio) 107 { 108 return (bio->bi_phys_segments >> 16) & 0xffff; 109 } 110 111 static inline int raid5_dec_bi_phys_segments(struct bio *bio) 112 { 113 --bio->bi_phys_segments; 114 return raid5_bi_phys_segments(bio); 115 } 116 117 static inline int raid5_dec_bi_hw_segments(struct bio *bio) 118 { 119 unsigned short val = raid5_bi_hw_segments(bio); 120 121 --val; 122 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); 123 return val; 124 } 125 126 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 127 { 128 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); 129 } 130 131 /* Find first data disk in a raid6 stripe */ 132 static inline int raid6_d0(struct stripe_head *sh) 133 { 134 if (sh->ddf_layout) 135 /* ddf always start from first device */ 136 return 0; 137 /* md starts just after Q block */ 138 if (sh->qd_idx == sh->disks - 1) 139 return 0; 140 else 141 return sh->qd_idx + 1; 142 } 143 static inline int raid6_next_disk(int disk, int raid_disks) 144 { 145 disk++; 146 return (disk < raid_disks) ? disk : 0; 147 } 148 149 /* When walking through the disks in a raid5, starting at raid6_d0, 150 * We need to map each disk to a 'slot', where the data disks are slot 151 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 152 * is raid_disks-1. This help does that mapping. 153 */ 154 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 155 int *count, int syndrome_disks) 156 { 157 int slot; 158 159 if (idx == sh->pd_idx) 160 return syndrome_disks; 161 if (idx == sh->qd_idx) 162 return syndrome_disks + 1; 163 slot = (*count)++; 164 return slot; 165 } 166 167 static void return_io(struct bio *return_bi) 168 { 169 struct bio *bi = return_bi; 170 while (bi) { 171 172 return_bi = bi->bi_next; 173 bi->bi_next = NULL; 174 bi->bi_size = 0; 175 bio_endio(bi, 0); 176 bi = return_bi; 177 } 178 } 179 180 static void print_raid5_conf (raid5_conf_t *conf); 181 182 static int stripe_operations_active(struct stripe_head *sh) 183 { 184 return sh->check_state || sh->reconstruct_state || 185 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 186 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 187 } 188 189 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 190 { 191 if (atomic_dec_and_test(&sh->count)) { 192 BUG_ON(!list_empty(&sh->lru)); 193 BUG_ON(atomic_read(&conf->active_stripes)==0); 194 if (test_bit(STRIPE_HANDLE, &sh->state)) { 195 if (test_bit(STRIPE_DELAYED, &sh->state)) { 196 list_add_tail(&sh->lru, &conf->delayed_list); 197 blk_plug_device(conf->mddev->queue); 198 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 199 sh->bm_seq - conf->seq_write > 0) { 200 list_add_tail(&sh->lru, &conf->bitmap_list); 201 blk_plug_device(conf->mddev->queue); 202 } else { 203 clear_bit(STRIPE_BIT_DELAY, &sh->state); 204 list_add_tail(&sh->lru, &conf->handle_list); 205 } 206 md_wakeup_thread(conf->mddev->thread); 207 } else { 208 BUG_ON(stripe_operations_active(sh)); 209 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 210 atomic_dec(&conf->preread_active_stripes); 211 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 212 md_wakeup_thread(conf->mddev->thread); 213 } 214 atomic_dec(&conf->active_stripes); 215 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 216 list_add_tail(&sh->lru, &conf->inactive_list); 217 wake_up(&conf->wait_for_stripe); 218 if (conf->retry_read_aligned) 219 md_wakeup_thread(conf->mddev->thread); 220 } 221 } 222 } 223 } 224 225 static void release_stripe(struct stripe_head *sh) 226 { 227 raid5_conf_t *conf = sh->raid_conf; 228 unsigned long flags; 229 230 spin_lock_irqsave(&conf->device_lock, flags); 231 __release_stripe(conf, sh); 232 spin_unlock_irqrestore(&conf->device_lock, flags); 233 } 234 235 static inline void remove_hash(struct stripe_head *sh) 236 { 237 pr_debug("remove_hash(), stripe %llu\n", 238 (unsigned long long)sh->sector); 239 240 hlist_del_init(&sh->hash); 241 } 242 243 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 244 { 245 struct hlist_head *hp = stripe_hash(conf, sh->sector); 246 247 pr_debug("insert_hash(), stripe %llu\n", 248 (unsigned long long)sh->sector); 249 250 CHECK_DEVLOCK(); 251 hlist_add_head(&sh->hash, hp); 252 } 253 254 255 /* find an idle stripe, make sure it is unhashed, and return it. */ 256 static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 257 { 258 struct stripe_head *sh = NULL; 259 struct list_head *first; 260 261 CHECK_DEVLOCK(); 262 if (list_empty(&conf->inactive_list)) 263 goto out; 264 first = conf->inactive_list.next; 265 sh = list_entry(first, struct stripe_head, lru); 266 list_del_init(first); 267 remove_hash(sh); 268 atomic_inc(&conf->active_stripes); 269 out: 270 return sh; 271 } 272 273 static void shrink_buffers(struct stripe_head *sh, int num) 274 { 275 struct page *p; 276 int i; 277 278 for (i=0; i<num ; i++) { 279 p = sh->dev[i].page; 280 if (!p) 281 continue; 282 sh->dev[i].page = NULL; 283 put_page(p); 284 } 285 } 286 287 static int grow_buffers(struct stripe_head *sh, int num) 288 { 289 int i; 290 291 for (i=0; i<num; i++) { 292 struct page *page; 293 294 if (!(page = alloc_page(GFP_KERNEL))) { 295 return 1; 296 } 297 sh->dev[i].page = page; 298 } 299 return 0; 300 } 301 302 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 303 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 304 struct stripe_head *sh); 305 306 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 307 { 308 raid5_conf_t *conf = sh->raid_conf; 309 int i; 310 311 BUG_ON(atomic_read(&sh->count) != 0); 312 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 313 BUG_ON(stripe_operations_active(sh)); 314 315 CHECK_DEVLOCK(); 316 pr_debug("init_stripe called, stripe %llu\n", 317 (unsigned long long)sh->sector); 318 319 remove_hash(sh); 320 321 sh->generation = conf->generation - previous; 322 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 323 sh->sector = sector; 324 stripe_set_idx(sector, conf, previous, sh); 325 sh->state = 0; 326 327 328 for (i = sh->disks; i--; ) { 329 struct r5dev *dev = &sh->dev[i]; 330 331 if (dev->toread || dev->read || dev->towrite || dev->written || 332 test_bit(R5_LOCKED, &dev->flags)) { 333 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 334 (unsigned long long)sh->sector, i, dev->toread, 335 dev->read, dev->towrite, dev->written, 336 test_bit(R5_LOCKED, &dev->flags)); 337 BUG(); 338 } 339 dev->flags = 0; 340 raid5_build_block(sh, i, previous); 341 } 342 insert_hash(conf, sh); 343 } 344 345 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, 346 short generation) 347 { 348 struct stripe_head *sh; 349 struct hlist_node *hn; 350 351 CHECK_DEVLOCK(); 352 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 353 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 354 if (sh->sector == sector && sh->generation == generation) 355 return sh; 356 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 357 return NULL; 358 } 359 360 static void unplug_slaves(mddev_t *mddev); 361 static void raid5_unplug_device(struct request_queue *q); 362 363 static struct stripe_head * 364 get_active_stripe(raid5_conf_t *conf, sector_t sector, 365 int previous, int noblock, int noquiesce) 366 { 367 struct stripe_head *sh; 368 369 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 370 371 spin_lock_irq(&conf->device_lock); 372 373 do { 374 wait_event_lock_irq(conf->wait_for_stripe, 375 conf->quiesce == 0 || noquiesce, 376 conf->device_lock, /* nothing */); 377 sh = __find_stripe(conf, sector, conf->generation - previous); 378 if (!sh) { 379 if (!conf->inactive_blocked) 380 sh = get_free_stripe(conf); 381 if (noblock && sh == NULL) 382 break; 383 if (!sh) { 384 conf->inactive_blocked = 1; 385 wait_event_lock_irq(conf->wait_for_stripe, 386 !list_empty(&conf->inactive_list) && 387 (atomic_read(&conf->active_stripes) 388 < (conf->max_nr_stripes *3/4) 389 || !conf->inactive_blocked), 390 conf->device_lock, 391 raid5_unplug_device(conf->mddev->queue) 392 ); 393 conf->inactive_blocked = 0; 394 } else 395 init_stripe(sh, sector, previous); 396 } else { 397 if (atomic_read(&sh->count)) { 398 BUG_ON(!list_empty(&sh->lru) 399 && !test_bit(STRIPE_EXPANDING, &sh->state)); 400 } else { 401 if (!test_bit(STRIPE_HANDLE, &sh->state)) 402 atomic_inc(&conf->active_stripes); 403 if (list_empty(&sh->lru) && 404 !test_bit(STRIPE_EXPANDING, &sh->state)) 405 BUG(); 406 list_del_init(&sh->lru); 407 } 408 } 409 } while (sh == NULL); 410 411 if (sh) 412 atomic_inc(&sh->count); 413 414 spin_unlock_irq(&conf->device_lock); 415 return sh; 416 } 417 418 static void 419 raid5_end_read_request(struct bio *bi, int error); 420 static void 421 raid5_end_write_request(struct bio *bi, int error); 422 423 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 424 { 425 raid5_conf_t *conf = sh->raid_conf; 426 int i, disks = sh->disks; 427 428 might_sleep(); 429 430 for (i = disks; i--; ) { 431 int rw; 432 struct bio *bi; 433 mdk_rdev_t *rdev; 434 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 435 rw = WRITE; 436 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 437 rw = READ; 438 else 439 continue; 440 441 bi = &sh->dev[i].req; 442 443 bi->bi_rw = rw; 444 if (rw == WRITE) 445 bi->bi_end_io = raid5_end_write_request; 446 else 447 bi->bi_end_io = raid5_end_read_request; 448 449 rcu_read_lock(); 450 rdev = rcu_dereference(conf->disks[i].rdev); 451 if (rdev && test_bit(Faulty, &rdev->flags)) 452 rdev = NULL; 453 if (rdev) 454 atomic_inc(&rdev->nr_pending); 455 rcu_read_unlock(); 456 457 if (rdev) { 458 if (s->syncing || s->expanding || s->expanded) 459 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 460 461 set_bit(STRIPE_IO_STARTED, &sh->state); 462 463 bi->bi_bdev = rdev->bdev; 464 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 465 __func__, (unsigned long long)sh->sector, 466 bi->bi_rw, i); 467 atomic_inc(&sh->count); 468 bi->bi_sector = sh->sector + rdev->data_offset; 469 bi->bi_flags = 1 << BIO_UPTODATE; 470 bi->bi_vcnt = 1; 471 bi->bi_max_vecs = 1; 472 bi->bi_idx = 0; 473 bi->bi_io_vec = &sh->dev[i].vec; 474 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 475 bi->bi_io_vec[0].bv_offset = 0; 476 bi->bi_size = STRIPE_SIZE; 477 bi->bi_next = NULL; 478 if (rw == WRITE && 479 test_bit(R5_ReWrite, &sh->dev[i].flags)) 480 atomic_add(STRIPE_SECTORS, 481 &rdev->corrected_errors); 482 generic_make_request(bi); 483 } else { 484 if (rw == WRITE) 485 set_bit(STRIPE_DEGRADED, &sh->state); 486 pr_debug("skip op %ld on disc %d for sector %llu\n", 487 bi->bi_rw, i, (unsigned long long)sh->sector); 488 clear_bit(R5_LOCKED, &sh->dev[i].flags); 489 set_bit(STRIPE_HANDLE, &sh->state); 490 } 491 } 492 } 493 494 static struct dma_async_tx_descriptor * 495 async_copy_data(int frombio, struct bio *bio, struct page *page, 496 sector_t sector, struct dma_async_tx_descriptor *tx) 497 { 498 struct bio_vec *bvl; 499 struct page *bio_page; 500 int i; 501 int page_offset; 502 503 if (bio->bi_sector >= sector) 504 page_offset = (signed)(bio->bi_sector - sector) * 512; 505 else 506 page_offset = (signed)(sector - bio->bi_sector) * -512; 507 bio_for_each_segment(bvl, bio, i) { 508 int len = bio_iovec_idx(bio, i)->bv_len; 509 int clen; 510 int b_offset = 0; 511 512 if (page_offset < 0) { 513 b_offset = -page_offset; 514 page_offset += b_offset; 515 len -= b_offset; 516 } 517 518 if (len > 0 && page_offset + len > STRIPE_SIZE) 519 clen = STRIPE_SIZE - page_offset; 520 else 521 clen = len; 522 523 if (clen > 0) { 524 b_offset += bio_iovec_idx(bio, i)->bv_offset; 525 bio_page = bio_iovec_idx(bio, i)->bv_page; 526 if (frombio) 527 tx = async_memcpy(page, bio_page, page_offset, 528 b_offset, clen, 529 ASYNC_TX_DEP_ACK, 530 tx, NULL, NULL); 531 else 532 tx = async_memcpy(bio_page, page, b_offset, 533 page_offset, clen, 534 ASYNC_TX_DEP_ACK, 535 tx, NULL, NULL); 536 } 537 if (clen < len) /* hit end of page */ 538 break; 539 page_offset += len; 540 } 541 542 return tx; 543 } 544 545 static void ops_complete_biofill(void *stripe_head_ref) 546 { 547 struct stripe_head *sh = stripe_head_ref; 548 struct bio *return_bi = NULL; 549 raid5_conf_t *conf = sh->raid_conf; 550 int i; 551 552 pr_debug("%s: stripe %llu\n", __func__, 553 (unsigned long long)sh->sector); 554 555 /* clear completed biofills */ 556 spin_lock_irq(&conf->device_lock); 557 for (i = sh->disks; i--; ) { 558 struct r5dev *dev = &sh->dev[i]; 559 560 /* acknowledge completion of a biofill operation */ 561 /* and check if we need to reply to a read request, 562 * new R5_Wantfill requests are held off until 563 * !STRIPE_BIOFILL_RUN 564 */ 565 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 566 struct bio *rbi, *rbi2; 567 568 BUG_ON(!dev->read); 569 rbi = dev->read; 570 dev->read = NULL; 571 while (rbi && rbi->bi_sector < 572 dev->sector + STRIPE_SECTORS) { 573 rbi2 = r5_next_bio(rbi, dev->sector); 574 if (!raid5_dec_bi_phys_segments(rbi)) { 575 rbi->bi_next = return_bi; 576 return_bi = rbi; 577 } 578 rbi = rbi2; 579 } 580 } 581 } 582 spin_unlock_irq(&conf->device_lock); 583 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 584 585 return_io(return_bi); 586 587 set_bit(STRIPE_HANDLE, &sh->state); 588 release_stripe(sh); 589 } 590 591 static void ops_run_biofill(struct stripe_head *sh) 592 { 593 struct dma_async_tx_descriptor *tx = NULL; 594 raid5_conf_t *conf = sh->raid_conf; 595 int i; 596 597 pr_debug("%s: stripe %llu\n", __func__, 598 (unsigned long long)sh->sector); 599 600 for (i = sh->disks; i--; ) { 601 struct r5dev *dev = &sh->dev[i]; 602 if (test_bit(R5_Wantfill, &dev->flags)) { 603 struct bio *rbi; 604 spin_lock_irq(&conf->device_lock); 605 dev->read = rbi = dev->toread; 606 dev->toread = NULL; 607 spin_unlock_irq(&conf->device_lock); 608 while (rbi && rbi->bi_sector < 609 dev->sector + STRIPE_SECTORS) { 610 tx = async_copy_data(0, rbi, dev->page, 611 dev->sector, tx); 612 rbi = r5_next_bio(rbi, dev->sector); 613 } 614 } 615 } 616 617 atomic_inc(&sh->count); 618 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 619 ops_complete_biofill, sh); 620 } 621 622 static void ops_complete_compute5(void *stripe_head_ref) 623 { 624 struct stripe_head *sh = stripe_head_ref; 625 int target = sh->ops.target; 626 struct r5dev *tgt = &sh->dev[target]; 627 628 pr_debug("%s: stripe %llu\n", __func__, 629 (unsigned long long)sh->sector); 630 631 set_bit(R5_UPTODATE, &tgt->flags); 632 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 633 clear_bit(R5_Wantcompute, &tgt->flags); 634 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 635 if (sh->check_state == check_state_compute_run) 636 sh->check_state = check_state_compute_result; 637 set_bit(STRIPE_HANDLE, &sh->state); 638 release_stripe(sh); 639 } 640 641 static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) 642 { 643 /* kernel stack size limits the total number of disks */ 644 int disks = sh->disks; 645 struct page *xor_srcs[disks]; 646 int target = sh->ops.target; 647 struct r5dev *tgt = &sh->dev[target]; 648 struct page *xor_dest = tgt->page; 649 int count = 0; 650 struct dma_async_tx_descriptor *tx; 651 int i; 652 653 pr_debug("%s: stripe %llu block: %d\n", 654 __func__, (unsigned long long)sh->sector, target); 655 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 656 657 for (i = disks; i--; ) 658 if (i != target) 659 xor_srcs[count++] = sh->dev[i].page; 660 661 atomic_inc(&sh->count); 662 663 if (unlikely(count == 1)) 664 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 665 0, NULL, ops_complete_compute5, sh); 666 else 667 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 668 ASYNC_TX_XOR_ZERO_DST, NULL, 669 ops_complete_compute5, sh); 670 671 return tx; 672 } 673 674 static void ops_complete_prexor(void *stripe_head_ref) 675 { 676 struct stripe_head *sh = stripe_head_ref; 677 678 pr_debug("%s: stripe %llu\n", __func__, 679 (unsigned long long)sh->sector); 680 } 681 682 static struct dma_async_tx_descriptor * 683 ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 684 { 685 /* kernel stack size limits the total number of disks */ 686 int disks = sh->disks; 687 struct page *xor_srcs[disks]; 688 int count = 0, pd_idx = sh->pd_idx, i; 689 690 /* existing parity data subtracted */ 691 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 692 693 pr_debug("%s: stripe %llu\n", __func__, 694 (unsigned long long)sh->sector); 695 696 for (i = disks; i--; ) { 697 struct r5dev *dev = &sh->dev[i]; 698 /* Only process blocks that are known to be uptodate */ 699 if (test_bit(R5_Wantdrain, &dev->flags)) 700 xor_srcs[count++] = dev->page; 701 } 702 703 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 704 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, 705 ops_complete_prexor, sh); 706 707 return tx; 708 } 709 710 static struct dma_async_tx_descriptor * 711 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 712 { 713 int disks = sh->disks; 714 int i; 715 716 pr_debug("%s: stripe %llu\n", __func__, 717 (unsigned long long)sh->sector); 718 719 for (i = disks; i--; ) { 720 struct r5dev *dev = &sh->dev[i]; 721 struct bio *chosen; 722 723 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 724 struct bio *wbi; 725 726 spin_lock(&sh->lock); 727 chosen = dev->towrite; 728 dev->towrite = NULL; 729 BUG_ON(dev->written); 730 wbi = dev->written = chosen; 731 spin_unlock(&sh->lock); 732 733 while (wbi && wbi->bi_sector < 734 dev->sector + STRIPE_SECTORS) { 735 tx = async_copy_data(1, wbi, dev->page, 736 dev->sector, tx); 737 wbi = r5_next_bio(wbi, dev->sector); 738 } 739 } 740 } 741 742 return tx; 743 } 744 745 static void ops_complete_postxor(void *stripe_head_ref) 746 { 747 struct stripe_head *sh = stripe_head_ref; 748 int disks = sh->disks, i, pd_idx = sh->pd_idx; 749 750 pr_debug("%s: stripe %llu\n", __func__, 751 (unsigned long long)sh->sector); 752 753 for (i = disks; i--; ) { 754 struct r5dev *dev = &sh->dev[i]; 755 if (dev->written || i == pd_idx) 756 set_bit(R5_UPTODATE, &dev->flags); 757 } 758 759 if (sh->reconstruct_state == reconstruct_state_drain_run) 760 sh->reconstruct_state = reconstruct_state_drain_result; 761 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 762 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 763 else { 764 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 765 sh->reconstruct_state = reconstruct_state_result; 766 } 767 768 set_bit(STRIPE_HANDLE, &sh->state); 769 release_stripe(sh); 770 } 771 772 static void 773 ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 774 { 775 /* kernel stack size limits the total number of disks */ 776 int disks = sh->disks; 777 struct page *xor_srcs[disks]; 778 779 int count = 0, pd_idx = sh->pd_idx, i; 780 struct page *xor_dest; 781 int prexor = 0; 782 unsigned long flags; 783 784 pr_debug("%s: stripe %llu\n", __func__, 785 (unsigned long long)sh->sector); 786 787 /* check if prexor is active which means only process blocks 788 * that are part of a read-modify-write (written) 789 */ 790 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 791 prexor = 1; 792 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 793 for (i = disks; i--; ) { 794 struct r5dev *dev = &sh->dev[i]; 795 if (dev->written) 796 xor_srcs[count++] = dev->page; 797 } 798 } else { 799 xor_dest = sh->dev[pd_idx].page; 800 for (i = disks; i--; ) { 801 struct r5dev *dev = &sh->dev[i]; 802 if (i != pd_idx) 803 xor_srcs[count++] = dev->page; 804 } 805 } 806 807 /* 1/ if we prexor'd then the dest is reused as a source 808 * 2/ if we did not prexor then we are redoing the parity 809 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 810 * for the synchronous xor case 811 */ 812 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | 813 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 814 815 atomic_inc(&sh->count); 816 817 if (unlikely(count == 1)) { 818 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 819 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 820 flags, tx, ops_complete_postxor, sh); 821 } else 822 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 823 flags, tx, ops_complete_postxor, sh); 824 } 825 826 static void ops_complete_check(void *stripe_head_ref) 827 { 828 struct stripe_head *sh = stripe_head_ref; 829 830 pr_debug("%s: stripe %llu\n", __func__, 831 (unsigned long long)sh->sector); 832 833 sh->check_state = check_state_check_result; 834 set_bit(STRIPE_HANDLE, &sh->state); 835 release_stripe(sh); 836 } 837 838 static void ops_run_check(struct stripe_head *sh) 839 { 840 /* kernel stack size limits the total number of disks */ 841 int disks = sh->disks; 842 struct page *xor_srcs[disks]; 843 struct dma_async_tx_descriptor *tx; 844 845 int count = 0, pd_idx = sh->pd_idx, i; 846 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 847 848 pr_debug("%s: stripe %llu\n", __func__, 849 (unsigned long long)sh->sector); 850 851 for (i = disks; i--; ) { 852 struct r5dev *dev = &sh->dev[i]; 853 if (i != pd_idx) 854 xor_srcs[count++] = dev->page; 855 } 856 857 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 858 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 859 860 atomic_inc(&sh->count); 861 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 862 ops_complete_check, sh); 863 } 864 865 static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) 866 { 867 int overlap_clear = 0, i, disks = sh->disks; 868 struct dma_async_tx_descriptor *tx = NULL; 869 870 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 871 ops_run_biofill(sh); 872 overlap_clear++; 873 } 874 875 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 876 tx = ops_run_compute5(sh); 877 /* terminate the chain if postxor is not set to be run */ 878 if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) 879 async_tx_ack(tx); 880 } 881 882 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 883 tx = ops_run_prexor(sh, tx); 884 885 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 886 tx = ops_run_biodrain(sh, tx); 887 overlap_clear++; 888 } 889 890 if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) 891 ops_run_postxor(sh, tx); 892 893 if (test_bit(STRIPE_OP_CHECK, &ops_request)) 894 ops_run_check(sh); 895 896 if (overlap_clear) 897 for (i = disks; i--; ) { 898 struct r5dev *dev = &sh->dev[i]; 899 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 900 wake_up(&sh->raid_conf->wait_for_overlap); 901 } 902 } 903 904 static int grow_one_stripe(raid5_conf_t *conf) 905 { 906 struct stripe_head *sh; 907 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 908 if (!sh) 909 return 0; 910 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 911 sh->raid_conf = conf; 912 spin_lock_init(&sh->lock); 913 914 if (grow_buffers(sh, conf->raid_disks)) { 915 shrink_buffers(sh, conf->raid_disks); 916 kmem_cache_free(conf->slab_cache, sh); 917 return 0; 918 } 919 sh->disks = conf->raid_disks; 920 /* we just created an active stripe so... */ 921 atomic_set(&sh->count, 1); 922 atomic_inc(&conf->active_stripes); 923 INIT_LIST_HEAD(&sh->lru); 924 release_stripe(sh); 925 return 1; 926 } 927 928 static int grow_stripes(raid5_conf_t *conf, int num) 929 { 930 struct kmem_cache *sc; 931 int devs = conf->raid_disks; 932 933 sprintf(conf->cache_name[0], 934 "raid%d-%s", conf->level, mdname(conf->mddev)); 935 sprintf(conf->cache_name[1], 936 "raid%d-%s-alt", conf->level, mdname(conf->mddev)); 937 conf->active_name = 0; 938 sc = kmem_cache_create(conf->cache_name[conf->active_name], 939 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 940 0, 0, NULL); 941 if (!sc) 942 return 1; 943 conf->slab_cache = sc; 944 conf->pool_size = devs; 945 while (num--) 946 if (!grow_one_stripe(conf)) 947 return 1; 948 return 0; 949 } 950 951 static int resize_stripes(raid5_conf_t *conf, int newsize) 952 { 953 /* Make all the stripes able to hold 'newsize' devices. 954 * New slots in each stripe get 'page' set to a new page. 955 * 956 * This happens in stages: 957 * 1/ create a new kmem_cache and allocate the required number of 958 * stripe_heads. 959 * 2/ gather all the old stripe_heads and tranfer the pages across 960 * to the new stripe_heads. This will have the side effect of 961 * freezing the array as once all stripe_heads have been collected, 962 * no IO will be possible. Old stripe heads are freed once their 963 * pages have been transferred over, and the old kmem_cache is 964 * freed when all stripes are done. 965 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 966 * we simple return a failre status - no need to clean anything up. 967 * 4/ allocate new pages for the new slots in the new stripe_heads. 968 * If this fails, we don't bother trying the shrink the 969 * stripe_heads down again, we just leave them as they are. 970 * As each stripe_head is processed the new one is released into 971 * active service. 972 * 973 * Once step2 is started, we cannot afford to wait for a write, 974 * so we use GFP_NOIO allocations. 975 */ 976 struct stripe_head *osh, *nsh; 977 LIST_HEAD(newstripes); 978 struct disk_info *ndisks; 979 int err; 980 struct kmem_cache *sc; 981 int i; 982 983 if (newsize <= conf->pool_size) 984 return 0; /* never bother to shrink */ 985 986 err = md_allow_write(conf->mddev); 987 if (err) 988 return err; 989 990 /* Step 1 */ 991 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 992 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 993 0, 0, NULL); 994 if (!sc) 995 return -ENOMEM; 996 997 for (i = conf->max_nr_stripes; i; i--) { 998 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 999 if (!nsh) 1000 break; 1001 1002 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1003 1004 nsh->raid_conf = conf; 1005 spin_lock_init(&nsh->lock); 1006 1007 list_add(&nsh->lru, &newstripes); 1008 } 1009 if (i) { 1010 /* didn't get enough, give up */ 1011 while (!list_empty(&newstripes)) { 1012 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1013 list_del(&nsh->lru); 1014 kmem_cache_free(sc, nsh); 1015 } 1016 kmem_cache_destroy(sc); 1017 return -ENOMEM; 1018 } 1019 /* Step 2 - Must use GFP_NOIO now. 1020 * OK, we have enough stripes, start collecting inactive 1021 * stripes and copying them over 1022 */ 1023 list_for_each_entry(nsh, &newstripes, lru) { 1024 spin_lock_irq(&conf->device_lock); 1025 wait_event_lock_irq(conf->wait_for_stripe, 1026 !list_empty(&conf->inactive_list), 1027 conf->device_lock, 1028 unplug_slaves(conf->mddev) 1029 ); 1030 osh = get_free_stripe(conf); 1031 spin_unlock_irq(&conf->device_lock); 1032 atomic_set(&nsh->count, 1); 1033 for(i=0; i<conf->pool_size; i++) 1034 nsh->dev[i].page = osh->dev[i].page; 1035 for( ; i<newsize; i++) 1036 nsh->dev[i].page = NULL; 1037 kmem_cache_free(conf->slab_cache, osh); 1038 } 1039 kmem_cache_destroy(conf->slab_cache); 1040 1041 /* Step 3. 1042 * At this point, we are holding all the stripes so the array 1043 * is completely stalled, so now is a good time to resize 1044 * conf->disks. 1045 */ 1046 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1047 if (ndisks) { 1048 for (i=0; i<conf->raid_disks; i++) 1049 ndisks[i] = conf->disks[i]; 1050 kfree(conf->disks); 1051 conf->disks = ndisks; 1052 } else 1053 err = -ENOMEM; 1054 1055 /* Step 4, return new stripes to service */ 1056 while(!list_empty(&newstripes)) { 1057 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1058 list_del_init(&nsh->lru); 1059 for (i=conf->raid_disks; i < newsize; i++) 1060 if (nsh->dev[i].page == NULL) { 1061 struct page *p = alloc_page(GFP_NOIO); 1062 nsh->dev[i].page = p; 1063 if (!p) 1064 err = -ENOMEM; 1065 } 1066 release_stripe(nsh); 1067 } 1068 /* critical section pass, GFP_NOIO no longer needed */ 1069 1070 conf->slab_cache = sc; 1071 conf->active_name = 1-conf->active_name; 1072 conf->pool_size = newsize; 1073 return err; 1074 } 1075 1076 static int drop_one_stripe(raid5_conf_t *conf) 1077 { 1078 struct stripe_head *sh; 1079 1080 spin_lock_irq(&conf->device_lock); 1081 sh = get_free_stripe(conf); 1082 spin_unlock_irq(&conf->device_lock); 1083 if (!sh) 1084 return 0; 1085 BUG_ON(atomic_read(&sh->count)); 1086 shrink_buffers(sh, conf->pool_size); 1087 kmem_cache_free(conf->slab_cache, sh); 1088 atomic_dec(&conf->active_stripes); 1089 return 1; 1090 } 1091 1092 static void shrink_stripes(raid5_conf_t *conf) 1093 { 1094 while (drop_one_stripe(conf)) 1095 ; 1096 1097 if (conf->slab_cache) 1098 kmem_cache_destroy(conf->slab_cache); 1099 conf->slab_cache = NULL; 1100 } 1101 1102 static void raid5_end_read_request(struct bio * bi, int error) 1103 { 1104 struct stripe_head *sh = bi->bi_private; 1105 raid5_conf_t *conf = sh->raid_conf; 1106 int disks = sh->disks, i; 1107 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1108 char b[BDEVNAME_SIZE]; 1109 mdk_rdev_t *rdev; 1110 1111 1112 for (i=0 ; i<disks; i++) 1113 if (bi == &sh->dev[i].req) 1114 break; 1115 1116 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1117 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1118 uptodate); 1119 if (i == disks) { 1120 BUG(); 1121 return; 1122 } 1123 1124 if (uptodate) { 1125 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1126 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1127 rdev = conf->disks[i].rdev; 1128 printk_rl(KERN_INFO "raid5:%s: read error corrected" 1129 " (%lu sectors at %llu on %s)\n", 1130 mdname(conf->mddev), STRIPE_SECTORS, 1131 (unsigned long long)(sh->sector 1132 + rdev->data_offset), 1133 bdevname(rdev->bdev, b)); 1134 clear_bit(R5_ReadError, &sh->dev[i].flags); 1135 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1136 } 1137 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1138 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1139 } else { 1140 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1141 int retry = 0; 1142 rdev = conf->disks[i].rdev; 1143 1144 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1145 atomic_inc(&rdev->read_errors); 1146 if (conf->mddev->degraded) 1147 printk_rl(KERN_WARNING 1148 "raid5:%s: read error not correctable " 1149 "(sector %llu on %s).\n", 1150 mdname(conf->mddev), 1151 (unsigned long long)(sh->sector 1152 + rdev->data_offset), 1153 bdn); 1154 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1155 /* Oh, no!!! */ 1156 printk_rl(KERN_WARNING 1157 "raid5:%s: read error NOT corrected!! " 1158 "(sector %llu on %s).\n", 1159 mdname(conf->mddev), 1160 (unsigned long long)(sh->sector 1161 + rdev->data_offset), 1162 bdn); 1163 else if (atomic_read(&rdev->read_errors) 1164 > conf->max_nr_stripes) 1165 printk(KERN_WARNING 1166 "raid5:%s: Too many read errors, failing device %s.\n", 1167 mdname(conf->mddev), bdn); 1168 else 1169 retry = 1; 1170 if (retry) 1171 set_bit(R5_ReadError, &sh->dev[i].flags); 1172 else { 1173 clear_bit(R5_ReadError, &sh->dev[i].flags); 1174 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1175 md_error(conf->mddev, rdev); 1176 } 1177 } 1178 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1179 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1180 set_bit(STRIPE_HANDLE, &sh->state); 1181 release_stripe(sh); 1182 } 1183 1184 static void raid5_end_write_request(struct bio *bi, int error) 1185 { 1186 struct stripe_head *sh = bi->bi_private; 1187 raid5_conf_t *conf = sh->raid_conf; 1188 int disks = sh->disks, i; 1189 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1190 1191 for (i=0 ; i<disks; i++) 1192 if (bi == &sh->dev[i].req) 1193 break; 1194 1195 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1196 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1197 uptodate); 1198 if (i == disks) { 1199 BUG(); 1200 return; 1201 } 1202 1203 if (!uptodate) 1204 md_error(conf->mddev, conf->disks[i].rdev); 1205 1206 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1207 1208 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1209 set_bit(STRIPE_HANDLE, &sh->state); 1210 release_stripe(sh); 1211 } 1212 1213 1214 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 1215 1216 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 1217 { 1218 struct r5dev *dev = &sh->dev[i]; 1219 1220 bio_init(&dev->req); 1221 dev->req.bi_io_vec = &dev->vec; 1222 dev->req.bi_vcnt++; 1223 dev->req.bi_max_vecs++; 1224 dev->vec.bv_page = dev->page; 1225 dev->vec.bv_len = STRIPE_SIZE; 1226 dev->vec.bv_offset = 0; 1227 1228 dev->req.bi_sector = sh->sector; 1229 dev->req.bi_private = sh; 1230 1231 dev->flags = 0; 1232 dev->sector = compute_blocknr(sh, i, previous); 1233 } 1234 1235 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1236 { 1237 char b[BDEVNAME_SIZE]; 1238 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1239 pr_debug("raid5: error called\n"); 1240 1241 if (!test_bit(Faulty, &rdev->flags)) { 1242 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1243 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1244 unsigned long flags; 1245 spin_lock_irqsave(&conf->device_lock, flags); 1246 mddev->degraded++; 1247 spin_unlock_irqrestore(&conf->device_lock, flags); 1248 /* 1249 * if recovery was running, make sure it aborts. 1250 */ 1251 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1252 } 1253 set_bit(Faulty, &rdev->flags); 1254 printk(KERN_ALERT 1255 "raid5: Disk failure on %s, disabling device.\n" 1256 "raid5: Operation continuing on %d devices.\n", 1257 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1258 } 1259 } 1260 1261 /* 1262 * Input: a 'big' sector number, 1263 * Output: index of the data and parity disk, and the sector # in them. 1264 */ 1265 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, 1266 int previous, int *dd_idx, 1267 struct stripe_head *sh) 1268 { 1269 long stripe; 1270 unsigned long chunk_number; 1271 unsigned int chunk_offset; 1272 int pd_idx, qd_idx; 1273 int ddf_layout = 0; 1274 sector_t new_sector; 1275 int algorithm = previous ? conf->prev_algo 1276 : conf->algorithm; 1277 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) 1278 : (conf->chunk_size >> 9); 1279 int raid_disks = previous ? conf->previous_raid_disks 1280 : conf->raid_disks; 1281 int data_disks = raid_disks - conf->max_degraded; 1282 1283 /* First compute the information on this sector */ 1284 1285 /* 1286 * Compute the chunk number and the sector offset inside the chunk 1287 */ 1288 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1289 chunk_number = r_sector; 1290 BUG_ON(r_sector != chunk_number); 1291 1292 /* 1293 * Compute the stripe number 1294 */ 1295 stripe = chunk_number / data_disks; 1296 1297 /* 1298 * Compute the data disk and parity disk indexes inside the stripe 1299 */ 1300 *dd_idx = chunk_number % data_disks; 1301 1302 /* 1303 * Select the parity disk based on the user selected algorithm. 1304 */ 1305 pd_idx = qd_idx = ~0; 1306 switch(conf->level) { 1307 case 4: 1308 pd_idx = data_disks; 1309 break; 1310 case 5: 1311 switch (algorithm) { 1312 case ALGORITHM_LEFT_ASYMMETRIC: 1313 pd_idx = data_disks - stripe % raid_disks; 1314 if (*dd_idx >= pd_idx) 1315 (*dd_idx)++; 1316 break; 1317 case ALGORITHM_RIGHT_ASYMMETRIC: 1318 pd_idx = stripe % raid_disks; 1319 if (*dd_idx >= pd_idx) 1320 (*dd_idx)++; 1321 break; 1322 case ALGORITHM_LEFT_SYMMETRIC: 1323 pd_idx = data_disks - stripe % raid_disks; 1324 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1325 break; 1326 case ALGORITHM_RIGHT_SYMMETRIC: 1327 pd_idx = stripe % raid_disks; 1328 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1329 break; 1330 case ALGORITHM_PARITY_0: 1331 pd_idx = 0; 1332 (*dd_idx)++; 1333 break; 1334 case ALGORITHM_PARITY_N: 1335 pd_idx = data_disks; 1336 break; 1337 default: 1338 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1339 algorithm); 1340 BUG(); 1341 } 1342 break; 1343 case 6: 1344 1345 switch (algorithm) { 1346 case ALGORITHM_LEFT_ASYMMETRIC: 1347 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1348 qd_idx = pd_idx + 1; 1349 if (pd_idx == raid_disks-1) { 1350 (*dd_idx)++; /* Q D D D P */ 1351 qd_idx = 0; 1352 } else if (*dd_idx >= pd_idx) 1353 (*dd_idx) += 2; /* D D P Q D */ 1354 break; 1355 case ALGORITHM_RIGHT_ASYMMETRIC: 1356 pd_idx = stripe % raid_disks; 1357 qd_idx = pd_idx + 1; 1358 if (pd_idx == raid_disks-1) { 1359 (*dd_idx)++; /* Q D D D P */ 1360 qd_idx = 0; 1361 } else if (*dd_idx >= pd_idx) 1362 (*dd_idx) += 2; /* D D P Q D */ 1363 break; 1364 case ALGORITHM_LEFT_SYMMETRIC: 1365 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1366 qd_idx = (pd_idx + 1) % raid_disks; 1367 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1368 break; 1369 case ALGORITHM_RIGHT_SYMMETRIC: 1370 pd_idx = stripe % raid_disks; 1371 qd_idx = (pd_idx + 1) % raid_disks; 1372 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1373 break; 1374 1375 case ALGORITHM_PARITY_0: 1376 pd_idx = 0; 1377 qd_idx = 1; 1378 (*dd_idx) += 2; 1379 break; 1380 case ALGORITHM_PARITY_N: 1381 pd_idx = data_disks; 1382 qd_idx = data_disks + 1; 1383 break; 1384 1385 case ALGORITHM_ROTATING_ZERO_RESTART: 1386 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1387 * of blocks for computing Q is different. 1388 */ 1389 pd_idx = stripe % raid_disks; 1390 qd_idx = pd_idx + 1; 1391 if (pd_idx == raid_disks-1) { 1392 (*dd_idx)++; /* Q D D D P */ 1393 qd_idx = 0; 1394 } else if (*dd_idx >= pd_idx) 1395 (*dd_idx) += 2; /* D D P Q D */ 1396 ddf_layout = 1; 1397 break; 1398 1399 case ALGORITHM_ROTATING_N_RESTART: 1400 /* Same a left_asymmetric, by first stripe is 1401 * D D D P Q rather than 1402 * Q D D D P 1403 */ 1404 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1405 qd_idx = pd_idx + 1; 1406 if (pd_idx == raid_disks-1) { 1407 (*dd_idx)++; /* Q D D D P */ 1408 qd_idx = 0; 1409 } else if (*dd_idx >= pd_idx) 1410 (*dd_idx) += 2; /* D D P Q D */ 1411 ddf_layout = 1; 1412 break; 1413 1414 case ALGORITHM_ROTATING_N_CONTINUE: 1415 /* Same as left_symmetric but Q is before P */ 1416 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1417 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1418 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1419 ddf_layout = 1; 1420 break; 1421 1422 case ALGORITHM_LEFT_ASYMMETRIC_6: 1423 /* RAID5 left_asymmetric, with Q on last device */ 1424 pd_idx = data_disks - stripe % (raid_disks-1); 1425 if (*dd_idx >= pd_idx) 1426 (*dd_idx)++; 1427 qd_idx = raid_disks - 1; 1428 break; 1429 1430 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1431 pd_idx = stripe % (raid_disks-1); 1432 if (*dd_idx >= pd_idx) 1433 (*dd_idx)++; 1434 qd_idx = raid_disks - 1; 1435 break; 1436 1437 case ALGORITHM_LEFT_SYMMETRIC_6: 1438 pd_idx = data_disks - stripe % (raid_disks-1); 1439 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1440 qd_idx = raid_disks - 1; 1441 break; 1442 1443 case ALGORITHM_RIGHT_SYMMETRIC_6: 1444 pd_idx = stripe % (raid_disks-1); 1445 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1446 qd_idx = raid_disks - 1; 1447 break; 1448 1449 case ALGORITHM_PARITY_0_6: 1450 pd_idx = 0; 1451 (*dd_idx)++; 1452 qd_idx = raid_disks - 1; 1453 break; 1454 1455 1456 default: 1457 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1458 algorithm); 1459 BUG(); 1460 } 1461 break; 1462 } 1463 1464 if (sh) { 1465 sh->pd_idx = pd_idx; 1466 sh->qd_idx = qd_idx; 1467 sh->ddf_layout = ddf_layout; 1468 } 1469 /* 1470 * Finally, compute the new sector number 1471 */ 1472 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1473 return new_sector; 1474 } 1475 1476 1477 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 1478 { 1479 raid5_conf_t *conf = sh->raid_conf; 1480 int raid_disks = sh->disks; 1481 int data_disks = raid_disks - conf->max_degraded; 1482 sector_t new_sector = sh->sector, check; 1483 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) 1484 : (conf->chunk_size >> 9); 1485 int algorithm = previous ? conf->prev_algo 1486 : conf->algorithm; 1487 sector_t stripe; 1488 int chunk_offset; 1489 int chunk_number, dummy1, dd_idx = i; 1490 sector_t r_sector; 1491 struct stripe_head sh2; 1492 1493 1494 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1495 stripe = new_sector; 1496 BUG_ON(new_sector != stripe); 1497 1498 if (i == sh->pd_idx) 1499 return 0; 1500 switch(conf->level) { 1501 case 4: break; 1502 case 5: 1503 switch (algorithm) { 1504 case ALGORITHM_LEFT_ASYMMETRIC: 1505 case ALGORITHM_RIGHT_ASYMMETRIC: 1506 if (i > sh->pd_idx) 1507 i--; 1508 break; 1509 case ALGORITHM_LEFT_SYMMETRIC: 1510 case ALGORITHM_RIGHT_SYMMETRIC: 1511 if (i < sh->pd_idx) 1512 i += raid_disks; 1513 i -= (sh->pd_idx + 1); 1514 break; 1515 case ALGORITHM_PARITY_0: 1516 i -= 1; 1517 break; 1518 case ALGORITHM_PARITY_N: 1519 break; 1520 default: 1521 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1522 algorithm); 1523 BUG(); 1524 } 1525 break; 1526 case 6: 1527 if (i == sh->qd_idx) 1528 return 0; /* It is the Q disk */ 1529 switch (algorithm) { 1530 case ALGORITHM_LEFT_ASYMMETRIC: 1531 case ALGORITHM_RIGHT_ASYMMETRIC: 1532 case ALGORITHM_ROTATING_ZERO_RESTART: 1533 case ALGORITHM_ROTATING_N_RESTART: 1534 if (sh->pd_idx == raid_disks-1) 1535 i--; /* Q D D D P */ 1536 else if (i > sh->pd_idx) 1537 i -= 2; /* D D P Q D */ 1538 break; 1539 case ALGORITHM_LEFT_SYMMETRIC: 1540 case ALGORITHM_RIGHT_SYMMETRIC: 1541 if (sh->pd_idx == raid_disks-1) 1542 i--; /* Q D D D P */ 1543 else { 1544 /* D D P Q D */ 1545 if (i < sh->pd_idx) 1546 i += raid_disks; 1547 i -= (sh->pd_idx + 2); 1548 } 1549 break; 1550 case ALGORITHM_PARITY_0: 1551 i -= 2; 1552 break; 1553 case ALGORITHM_PARITY_N: 1554 break; 1555 case ALGORITHM_ROTATING_N_CONTINUE: 1556 if (sh->pd_idx == 0) 1557 i--; /* P D D D Q */ 1558 else if (i > sh->pd_idx) 1559 i -= 2; /* D D Q P D */ 1560 break; 1561 case ALGORITHM_LEFT_ASYMMETRIC_6: 1562 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1563 if (i > sh->pd_idx) 1564 i--; 1565 break; 1566 case ALGORITHM_LEFT_SYMMETRIC_6: 1567 case ALGORITHM_RIGHT_SYMMETRIC_6: 1568 if (i < sh->pd_idx) 1569 i += data_disks + 1; 1570 i -= (sh->pd_idx + 1); 1571 break; 1572 case ALGORITHM_PARITY_0_6: 1573 i -= 1; 1574 break; 1575 default: 1576 printk(KERN_CRIT "raid6: unsupported algorithm %d\n", 1577 algorithm); 1578 BUG(); 1579 } 1580 break; 1581 } 1582 1583 chunk_number = stripe * data_disks + i; 1584 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1585 1586 check = raid5_compute_sector(conf, r_sector, 1587 previous, &dummy1, &sh2); 1588 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 1589 || sh2.qd_idx != sh->qd_idx) { 1590 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1591 return 0; 1592 } 1593 return r_sector; 1594 } 1595 1596 1597 1598 /* 1599 * Copy data between a page in the stripe cache, and one or more bion 1600 * The page could align with the middle of the bio, or there could be 1601 * several bion, each with several bio_vecs, which cover part of the page 1602 * Multiple bion are linked together on bi_next. There may be extras 1603 * at the end of this list. We ignore them. 1604 */ 1605 static void copy_data(int frombio, struct bio *bio, 1606 struct page *page, 1607 sector_t sector) 1608 { 1609 char *pa = page_address(page); 1610 struct bio_vec *bvl; 1611 int i; 1612 int page_offset; 1613 1614 if (bio->bi_sector >= sector) 1615 page_offset = (signed)(bio->bi_sector - sector) * 512; 1616 else 1617 page_offset = (signed)(sector - bio->bi_sector) * -512; 1618 bio_for_each_segment(bvl, bio, i) { 1619 int len = bio_iovec_idx(bio,i)->bv_len; 1620 int clen; 1621 int b_offset = 0; 1622 1623 if (page_offset < 0) { 1624 b_offset = -page_offset; 1625 page_offset += b_offset; 1626 len -= b_offset; 1627 } 1628 1629 if (len > 0 && page_offset + len > STRIPE_SIZE) 1630 clen = STRIPE_SIZE - page_offset; 1631 else clen = len; 1632 1633 if (clen > 0) { 1634 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 1635 if (frombio) 1636 memcpy(pa+page_offset, ba+b_offset, clen); 1637 else 1638 memcpy(ba+b_offset, pa+page_offset, clen); 1639 __bio_kunmap_atomic(ba, KM_USER0); 1640 } 1641 if (clen < len) /* hit end of page */ 1642 break; 1643 page_offset += len; 1644 } 1645 } 1646 1647 #define check_xor() do { \ 1648 if (count == MAX_XOR_BLOCKS) { \ 1649 xor_blocks(count, STRIPE_SIZE, dest, ptr);\ 1650 count = 0; \ 1651 } \ 1652 } while(0) 1653 1654 static void compute_parity6(struct stripe_head *sh, int method) 1655 { 1656 raid5_conf_t *conf = sh->raid_conf; 1657 int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count; 1658 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1659 struct bio *chosen; 1660 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1661 void *ptrs[syndrome_disks+2]; 1662 1663 pd_idx = sh->pd_idx; 1664 qd_idx = sh->qd_idx; 1665 d0_idx = raid6_d0(sh); 1666 1667 pr_debug("compute_parity, stripe %llu, method %d\n", 1668 (unsigned long long)sh->sector, method); 1669 1670 switch(method) { 1671 case READ_MODIFY_WRITE: 1672 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1673 case RECONSTRUCT_WRITE: 1674 for (i= disks; i-- ;) 1675 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1676 chosen = sh->dev[i].towrite; 1677 sh->dev[i].towrite = NULL; 1678 1679 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1680 wake_up(&conf->wait_for_overlap); 1681 1682 BUG_ON(sh->dev[i].written); 1683 sh->dev[i].written = chosen; 1684 } 1685 break; 1686 case CHECK_PARITY: 1687 BUG(); /* Not implemented yet */ 1688 } 1689 1690 for (i = disks; i--;) 1691 if (sh->dev[i].written) { 1692 sector_t sector = sh->dev[i].sector; 1693 struct bio *wbi = sh->dev[i].written; 1694 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1695 copy_data(1, wbi, sh->dev[i].page, sector); 1696 wbi = r5_next_bio(wbi, sector); 1697 } 1698 1699 set_bit(R5_LOCKED, &sh->dev[i].flags); 1700 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1701 } 1702 1703 /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/ 1704 1705 for (i = 0; i < disks; i++) 1706 ptrs[i] = (void *)raid6_empty_zero_page; 1707 1708 count = 0; 1709 i = d0_idx; 1710 do { 1711 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1712 1713 ptrs[slot] = page_address(sh->dev[i].page); 1714 if (slot < syndrome_disks && 1715 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) { 1716 printk(KERN_ERR "block %d/%d not uptodate " 1717 "on parity calc\n", i, count); 1718 BUG(); 1719 } 1720 1721 i = raid6_next_disk(i, disks); 1722 } while (i != d0_idx); 1723 BUG_ON(count != syndrome_disks); 1724 1725 raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs); 1726 1727 switch(method) { 1728 case RECONSTRUCT_WRITE: 1729 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1730 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1731 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1732 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1733 break; 1734 case UPDATE_PARITY: 1735 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1736 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1737 break; 1738 } 1739 } 1740 1741 1742 /* Compute one missing block */ 1743 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1744 { 1745 int i, count, disks = sh->disks; 1746 void *ptr[MAX_XOR_BLOCKS], *dest, *p; 1747 int qd_idx = sh->qd_idx; 1748 1749 pr_debug("compute_block_1, stripe %llu, idx %d\n", 1750 (unsigned long long)sh->sector, dd_idx); 1751 1752 if ( dd_idx == qd_idx ) { 1753 /* We're actually computing the Q drive */ 1754 compute_parity6(sh, UPDATE_PARITY); 1755 } else { 1756 dest = page_address(sh->dev[dd_idx].page); 1757 if (!nozero) memset(dest, 0, STRIPE_SIZE); 1758 count = 0; 1759 for (i = disks ; i--; ) { 1760 if (i == dd_idx || i == qd_idx) 1761 continue; 1762 p = page_address(sh->dev[i].page); 1763 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1764 ptr[count++] = p; 1765 else 1766 printk("compute_block() %d, stripe %llu, %d" 1767 " not present\n", dd_idx, 1768 (unsigned long long)sh->sector, i); 1769 1770 check_xor(); 1771 } 1772 if (count) 1773 xor_blocks(count, STRIPE_SIZE, dest, ptr); 1774 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1775 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1776 } 1777 } 1778 1779 /* Compute two missing blocks */ 1780 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1781 { 1782 int i, count, disks = sh->disks; 1783 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1784 int d0_idx = raid6_d0(sh); 1785 int faila = -1, failb = -1; 1786 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1787 void *ptrs[syndrome_disks+2]; 1788 1789 for (i = 0; i < disks ; i++) 1790 ptrs[i] = (void *)raid6_empty_zero_page; 1791 count = 0; 1792 i = d0_idx; 1793 do { 1794 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1795 1796 ptrs[slot] = page_address(sh->dev[i].page); 1797 1798 if (i == dd_idx1) 1799 faila = slot; 1800 if (i == dd_idx2) 1801 failb = slot; 1802 i = raid6_next_disk(i, disks); 1803 } while (i != d0_idx); 1804 BUG_ON(count != syndrome_disks); 1805 1806 BUG_ON(faila == failb); 1807 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1808 1809 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1810 (unsigned long long)sh->sector, dd_idx1, dd_idx2, 1811 faila, failb); 1812 1813 if (failb == syndrome_disks+1) { 1814 /* Q disk is one of the missing disks */ 1815 if (faila == syndrome_disks) { 1816 /* Missing P+Q, just recompute */ 1817 compute_parity6(sh, UPDATE_PARITY); 1818 return; 1819 } else { 1820 /* We're missing D+Q; recompute D from P */ 1821 compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? 1822 dd_idx2 : dd_idx1), 1823 0); 1824 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1825 return; 1826 } 1827 } 1828 1829 /* We're missing D+P or D+D; */ 1830 if (failb == syndrome_disks) { 1831 /* We're missing D+P. */ 1832 raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs); 1833 } else { 1834 /* We're missing D+D. */ 1835 raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb, 1836 ptrs); 1837 } 1838 1839 /* Both the above update both missing blocks */ 1840 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1841 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1842 } 1843 1844 static void 1845 schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, 1846 int rcw, int expand) 1847 { 1848 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1849 1850 if (rcw) { 1851 /* if we are not expanding this is a proper write request, and 1852 * there will be bios with new data to be drained into the 1853 * stripe cache 1854 */ 1855 if (!expand) { 1856 sh->reconstruct_state = reconstruct_state_drain_run; 1857 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1858 } else 1859 sh->reconstruct_state = reconstruct_state_run; 1860 1861 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1862 1863 for (i = disks; i--; ) { 1864 struct r5dev *dev = &sh->dev[i]; 1865 1866 if (dev->towrite) { 1867 set_bit(R5_LOCKED, &dev->flags); 1868 set_bit(R5_Wantdrain, &dev->flags); 1869 if (!expand) 1870 clear_bit(R5_UPTODATE, &dev->flags); 1871 s->locked++; 1872 } 1873 } 1874 if (s->locked + 1 == disks) 1875 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 1876 atomic_inc(&sh->raid_conf->pending_full_writes); 1877 } else { 1878 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1879 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1880 1881 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 1882 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 1883 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1884 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1885 1886 for (i = disks; i--; ) { 1887 struct r5dev *dev = &sh->dev[i]; 1888 if (i == pd_idx) 1889 continue; 1890 1891 if (dev->towrite && 1892 (test_bit(R5_UPTODATE, &dev->flags) || 1893 test_bit(R5_Wantcompute, &dev->flags))) { 1894 set_bit(R5_Wantdrain, &dev->flags); 1895 set_bit(R5_LOCKED, &dev->flags); 1896 clear_bit(R5_UPTODATE, &dev->flags); 1897 s->locked++; 1898 } 1899 } 1900 } 1901 1902 /* keep the parity disk locked while asynchronous operations 1903 * are in flight 1904 */ 1905 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1906 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1907 s->locked++; 1908 1909 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 1910 __func__, (unsigned long long)sh->sector, 1911 s->locked, s->ops_request); 1912 } 1913 1914 /* 1915 * Each stripe/dev can have one or more bion attached. 1916 * toread/towrite point to the first in a chain. 1917 * The bi_next chain must be in order. 1918 */ 1919 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1920 { 1921 struct bio **bip; 1922 raid5_conf_t *conf = sh->raid_conf; 1923 int firstwrite=0; 1924 1925 pr_debug("adding bh b#%llu to stripe s#%llu\n", 1926 (unsigned long long)bi->bi_sector, 1927 (unsigned long long)sh->sector); 1928 1929 1930 spin_lock(&sh->lock); 1931 spin_lock_irq(&conf->device_lock); 1932 if (forwrite) { 1933 bip = &sh->dev[dd_idx].towrite; 1934 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1935 firstwrite = 1; 1936 } else 1937 bip = &sh->dev[dd_idx].toread; 1938 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1939 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1940 goto overlap; 1941 bip = & (*bip)->bi_next; 1942 } 1943 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1944 goto overlap; 1945 1946 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1947 if (*bip) 1948 bi->bi_next = *bip; 1949 *bip = bi; 1950 bi->bi_phys_segments++; 1951 spin_unlock_irq(&conf->device_lock); 1952 spin_unlock(&sh->lock); 1953 1954 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 1955 (unsigned long long)bi->bi_sector, 1956 (unsigned long long)sh->sector, dd_idx); 1957 1958 if (conf->mddev->bitmap && firstwrite) { 1959 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1960 STRIPE_SECTORS, 0); 1961 sh->bm_seq = conf->seq_flush+1; 1962 set_bit(STRIPE_BIT_DELAY, &sh->state); 1963 } 1964 1965 if (forwrite) { 1966 /* check if page is covered */ 1967 sector_t sector = sh->dev[dd_idx].sector; 1968 for (bi=sh->dev[dd_idx].towrite; 1969 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1970 bi && bi->bi_sector <= sector; 1971 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1972 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1973 sector = bi->bi_sector + (bi->bi_size>>9); 1974 } 1975 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1976 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1977 } 1978 return 1; 1979 1980 overlap: 1981 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1982 spin_unlock_irq(&conf->device_lock); 1983 spin_unlock(&sh->lock); 1984 return 0; 1985 } 1986 1987 static void end_reshape(raid5_conf_t *conf); 1988 1989 static int page_is_zero(struct page *p) 1990 { 1991 char *a = page_address(p); 1992 return ((*(u32*)a) == 0 && 1993 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1994 } 1995 1996 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 1997 struct stripe_head *sh) 1998 { 1999 int sectors_per_chunk = 2000 previous ? (conf->prev_chunk >> 9) 2001 : (conf->chunk_size >> 9); 2002 int dd_idx; 2003 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2004 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2005 2006 raid5_compute_sector(conf, 2007 stripe * (disks - conf->max_degraded) 2008 *sectors_per_chunk + chunk_offset, 2009 previous, 2010 &dd_idx, sh); 2011 } 2012 2013 static void 2014 handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, 2015 struct stripe_head_state *s, int disks, 2016 struct bio **return_bi) 2017 { 2018 int i; 2019 for (i = disks; i--; ) { 2020 struct bio *bi; 2021 int bitmap_end = 0; 2022 2023 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2024 mdk_rdev_t *rdev; 2025 rcu_read_lock(); 2026 rdev = rcu_dereference(conf->disks[i].rdev); 2027 if (rdev && test_bit(In_sync, &rdev->flags)) 2028 /* multiple read failures in one stripe */ 2029 md_error(conf->mddev, rdev); 2030 rcu_read_unlock(); 2031 } 2032 spin_lock_irq(&conf->device_lock); 2033 /* fail all writes first */ 2034 bi = sh->dev[i].towrite; 2035 sh->dev[i].towrite = NULL; 2036 if (bi) { 2037 s->to_write--; 2038 bitmap_end = 1; 2039 } 2040 2041 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2042 wake_up(&conf->wait_for_overlap); 2043 2044 while (bi && bi->bi_sector < 2045 sh->dev[i].sector + STRIPE_SECTORS) { 2046 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2047 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2048 if (!raid5_dec_bi_phys_segments(bi)) { 2049 md_write_end(conf->mddev); 2050 bi->bi_next = *return_bi; 2051 *return_bi = bi; 2052 } 2053 bi = nextbi; 2054 } 2055 /* and fail all 'written' */ 2056 bi = sh->dev[i].written; 2057 sh->dev[i].written = NULL; 2058 if (bi) bitmap_end = 1; 2059 while (bi && bi->bi_sector < 2060 sh->dev[i].sector + STRIPE_SECTORS) { 2061 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2062 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2063 if (!raid5_dec_bi_phys_segments(bi)) { 2064 md_write_end(conf->mddev); 2065 bi->bi_next = *return_bi; 2066 *return_bi = bi; 2067 } 2068 bi = bi2; 2069 } 2070 2071 /* fail any reads if this device is non-operational and 2072 * the data has not reached the cache yet. 2073 */ 2074 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2075 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2076 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2077 bi = sh->dev[i].toread; 2078 sh->dev[i].toread = NULL; 2079 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2080 wake_up(&conf->wait_for_overlap); 2081 if (bi) s->to_read--; 2082 while (bi && bi->bi_sector < 2083 sh->dev[i].sector + STRIPE_SECTORS) { 2084 struct bio *nextbi = 2085 r5_next_bio(bi, sh->dev[i].sector); 2086 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2087 if (!raid5_dec_bi_phys_segments(bi)) { 2088 bi->bi_next = *return_bi; 2089 *return_bi = bi; 2090 } 2091 bi = nextbi; 2092 } 2093 } 2094 spin_unlock_irq(&conf->device_lock); 2095 if (bitmap_end) 2096 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2097 STRIPE_SECTORS, 0, 0); 2098 } 2099 2100 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2101 if (atomic_dec_and_test(&conf->pending_full_writes)) 2102 md_wakeup_thread(conf->mddev->thread); 2103 } 2104 2105 /* fetch_block5 - checks the given member device to see if its data needs 2106 * to be read or computed to satisfy a request. 2107 * 2108 * Returns 1 when no more member devices need to be checked, otherwise returns 2109 * 0 to tell the loop in handle_stripe_fill5 to continue 2110 */ 2111 static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, 2112 int disk_idx, int disks) 2113 { 2114 struct r5dev *dev = &sh->dev[disk_idx]; 2115 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 2116 2117 /* is the data in this block needed, and can we get it? */ 2118 if (!test_bit(R5_LOCKED, &dev->flags) && 2119 !test_bit(R5_UPTODATE, &dev->flags) && 2120 (dev->toread || 2121 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2122 s->syncing || s->expanding || 2123 (s->failed && 2124 (failed_dev->toread || 2125 (failed_dev->towrite && 2126 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { 2127 /* We would like to get this block, possibly by computing it, 2128 * otherwise read it if the backing disk is insync 2129 */ 2130 if ((s->uptodate == disks - 1) && 2131 (s->failed && disk_idx == s->failed_num)) { 2132 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2133 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2134 set_bit(R5_Wantcompute, &dev->flags); 2135 sh->ops.target = disk_idx; 2136 s->req_compute = 1; 2137 /* Careful: from this point on 'uptodate' is in the eye 2138 * of raid5_run_ops which services 'compute' operations 2139 * before writes. R5_Wantcompute flags a block that will 2140 * be R5_UPTODATE by the time it is needed for a 2141 * subsequent operation. 2142 */ 2143 s->uptodate++; 2144 return 1; /* uptodate + compute == disks */ 2145 } else if (test_bit(R5_Insync, &dev->flags)) { 2146 set_bit(R5_LOCKED, &dev->flags); 2147 set_bit(R5_Wantread, &dev->flags); 2148 s->locked++; 2149 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2150 s->syncing); 2151 } 2152 } 2153 2154 return 0; 2155 } 2156 2157 /** 2158 * handle_stripe_fill5 - read or compute data to satisfy pending requests. 2159 */ 2160 static void handle_stripe_fill5(struct stripe_head *sh, 2161 struct stripe_head_state *s, int disks) 2162 { 2163 int i; 2164 2165 /* look for blocks to read/compute, skip this if a compute 2166 * is already in flight, or if the stripe contents are in the 2167 * midst of changing due to a write 2168 */ 2169 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2170 !sh->reconstruct_state) 2171 for (i = disks; i--; ) 2172 if (fetch_block5(sh, s, i, disks)) 2173 break; 2174 set_bit(STRIPE_HANDLE, &sh->state); 2175 } 2176 2177 static void handle_stripe_fill6(struct stripe_head *sh, 2178 struct stripe_head_state *s, struct r6_state *r6s, 2179 int disks) 2180 { 2181 int i; 2182 for (i = disks; i--; ) { 2183 struct r5dev *dev = &sh->dev[i]; 2184 if (!test_bit(R5_LOCKED, &dev->flags) && 2185 !test_bit(R5_UPTODATE, &dev->flags) && 2186 (dev->toread || (dev->towrite && 2187 !test_bit(R5_OVERWRITE, &dev->flags)) || 2188 s->syncing || s->expanding || 2189 (s->failed >= 1 && 2190 (sh->dev[r6s->failed_num[0]].toread || 2191 s->to_write)) || 2192 (s->failed >= 2 && 2193 (sh->dev[r6s->failed_num[1]].toread || 2194 s->to_write)))) { 2195 /* we would like to get this block, possibly 2196 * by computing it, but we might not be able to 2197 */ 2198 if ((s->uptodate == disks - 1) && 2199 (s->failed && (i == r6s->failed_num[0] || 2200 i == r6s->failed_num[1]))) { 2201 pr_debug("Computing stripe %llu block %d\n", 2202 (unsigned long long)sh->sector, i); 2203 compute_block_1(sh, i, 0); 2204 s->uptodate++; 2205 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { 2206 /* Computing 2-failure is *very* expensive; only 2207 * do it if failed >= 2 2208 */ 2209 int other; 2210 for (other = disks; other--; ) { 2211 if (other == i) 2212 continue; 2213 if (!test_bit(R5_UPTODATE, 2214 &sh->dev[other].flags)) 2215 break; 2216 } 2217 BUG_ON(other < 0); 2218 pr_debug("Computing stripe %llu blocks %d,%d\n", 2219 (unsigned long long)sh->sector, 2220 i, other); 2221 compute_block_2(sh, i, other); 2222 s->uptodate += 2; 2223 } else if (test_bit(R5_Insync, &dev->flags)) { 2224 set_bit(R5_LOCKED, &dev->flags); 2225 set_bit(R5_Wantread, &dev->flags); 2226 s->locked++; 2227 pr_debug("Reading block %d (sync=%d)\n", 2228 i, s->syncing); 2229 } 2230 } 2231 } 2232 set_bit(STRIPE_HANDLE, &sh->state); 2233 } 2234 2235 2236 /* handle_stripe_clean_event 2237 * any written block on an uptodate or failed drive can be returned. 2238 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2239 * never LOCKED, so we don't need to test 'failed' directly. 2240 */ 2241 static void handle_stripe_clean_event(raid5_conf_t *conf, 2242 struct stripe_head *sh, int disks, struct bio **return_bi) 2243 { 2244 int i; 2245 struct r5dev *dev; 2246 2247 for (i = disks; i--; ) 2248 if (sh->dev[i].written) { 2249 dev = &sh->dev[i]; 2250 if (!test_bit(R5_LOCKED, &dev->flags) && 2251 test_bit(R5_UPTODATE, &dev->flags)) { 2252 /* We can return any write requests */ 2253 struct bio *wbi, *wbi2; 2254 int bitmap_end = 0; 2255 pr_debug("Return write for disc %d\n", i); 2256 spin_lock_irq(&conf->device_lock); 2257 wbi = dev->written; 2258 dev->written = NULL; 2259 while (wbi && wbi->bi_sector < 2260 dev->sector + STRIPE_SECTORS) { 2261 wbi2 = r5_next_bio(wbi, dev->sector); 2262 if (!raid5_dec_bi_phys_segments(wbi)) { 2263 md_write_end(conf->mddev); 2264 wbi->bi_next = *return_bi; 2265 *return_bi = wbi; 2266 } 2267 wbi = wbi2; 2268 } 2269 if (dev->towrite == NULL) 2270 bitmap_end = 1; 2271 spin_unlock_irq(&conf->device_lock); 2272 if (bitmap_end) 2273 bitmap_endwrite(conf->mddev->bitmap, 2274 sh->sector, 2275 STRIPE_SECTORS, 2276 !test_bit(STRIPE_DEGRADED, &sh->state), 2277 0); 2278 } 2279 } 2280 2281 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2282 if (atomic_dec_and_test(&conf->pending_full_writes)) 2283 md_wakeup_thread(conf->mddev->thread); 2284 } 2285 2286 static void handle_stripe_dirtying5(raid5_conf_t *conf, 2287 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2288 { 2289 int rmw = 0, rcw = 0, i; 2290 for (i = disks; i--; ) { 2291 /* would I have to read this buffer for read_modify_write */ 2292 struct r5dev *dev = &sh->dev[i]; 2293 if ((dev->towrite || i == sh->pd_idx) && 2294 !test_bit(R5_LOCKED, &dev->flags) && 2295 !(test_bit(R5_UPTODATE, &dev->flags) || 2296 test_bit(R5_Wantcompute, &dev->flags))) { 2297 if (test_bit(R5_Insync, &dev->flags)) 2298 rmw++; 2299 else 2300 rmw += 2*disks; /* cannot read it */ 2301 } 2302 /* Would I have to read this buffer for reconstruct_write */ 2303 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2304 !test_bit(R5_LOCKED, &dev->flags) && 2305 !(test_bit(R5_UPTODATE, &dev->flags) || 2306 test_bit(R5_Wantcompute, &dev->flags))) { 2307 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2308 else 2309 rcw += 2*disks; 2310 } 2311 } 2312 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2313 (unsigned long long)sh->sector, rmw, rcw); 2314 set_bit(STRIPE_HANDLE, &sh->state); 2315 if (rmw < rcw && rmw > 0) 2316 /* prefer read-modify-write, but need to get some data */ 2317 for (i = disks; i--; ) { 2318 struct r5dev *dev = &sh->dev[i]; 2319 if ((dev->towrite || i == sh->pd_idx) && 2320 !test_bit(R5_LOCKED, &dev->flags) && 2321 !(test_bit(R5_UPTODATE, &dev->flags) || 2322 test_bit(R5_Wantcompute, &dev->flags)) && 2323 test_bit(R5_Insync, &dev->flags)) { 2324 if ( 2325 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2326 pr_debug("Read_old block " 2327 "%d for r-m-w\n", i); 2328 set_bit(R5_LOCKED, &dev->flags); 2329 set_bit(R5_Wantread, &dev->flags); 2330 s->locked++; 2331 } else { 2332 set_bit(STRIPE_DELAYED, &sh->state); 2333 set_bit(STRIPE_HANDLE, &sh->state); 2334 } 2335 } 2336 } 2337 if (rcw <= rmw && rcw > 0) 2338 /* want reconstruct write, but need to get some data */ 2339 for (i = disks; i--; ) { 2340 struct r5dev *dev = &sh->dev[i]; 2341 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2342 i != sh->pd_idx && 2343 !test_bit(R5_LOCKED, &dev->flags) && 2344 !(test_bit(R5_UPTODATE, &dev->flags) || 2345 test_bit(R5_Wantcompute, &dev->flags)) && 2346 test_bit(R5_Insync, &dev->flags)) { 2347 if ( 2348 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2349 pr_debug("Read_old block " 2350 "%d for Reconstruct\n", i); 2351 set_bit(R5_LOCKED, &dev->flags); 2352 set_bit(R5_Wantread, &dev->flags); 2353 s->locked++; 2354 } else { 2355 set_bit(STRIPE_DELAYED, &sh->state); 2356 set_bit(STRIPE_HANDLE, &sh->state); 2357 } 2358 } 2359 } 2360 /* now if nothing is locked, and if we have enough data, 2361 * we can start a write request 2362 */ 2363 /* since handle_stripe can be called at any time we need to handle the 2364 * case where a compute block operation has been submitted and then a 2365 * subsequent call wants to start a write request. raid5_run_ops only 2366 * handles the case where compute block and postxor are requested 2367 * simultaneously. If this is not the case then new writes need to be 2368 * held off until the compute completes. 2369 */ 2370 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2371 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2372 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2373 schedule_reconstruction5(sh, s, rcw == 0, 0); 2374 } 2375 2376 static void handle_stripe_dirtying6(raid5_conf_t *conf, 2377 struct stripe_head *sh, struct stripe_head_state *s, 2378 struct r6_state *r6s, int disks) 2379 { 2380 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; 2381 int qd_idx = sh->qd_idx; 2382 for (i = disks; i--; ) { 2383 struct r5dev *dev = &sh->dev[i]; 2384 /* Would I have to read this buffer for reconstruct_write */ 2385 if (!test_bit(R5_OVERWRITE, &dev->flags) 2386 && i != pd_idx && i != qd_idx 2387 && (!test_bit(R5_LOCKED, &dev->flags) 2388 ) && 2389 !test_bit(R5_UPTODATE, &dev->flags)) { 2390 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2391 else { 2392 pr_debug("raid6: must_compute: " 2393 "disk %d flags=%#lx\n", i, dev->flags); 2394 must_compute++; 2395 } 2396 } 2397 } 2398 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", 2399 (unsigned long long)sh->sector, rcw, must_compute); 2400 set_bit(STRIPE_HANDLE, &sh->state); 2401 2402 if (rcw > 0) 2403 /* want reconstruct write, but need to get some data */ 2404 for (i = disks; i--; ) { 2405 struct r5dev *dev = &sh->dev[i]; 2406 if (!test_bit(R5_OVERWRITE, &dev->flags) 2407 && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) 2408 && !test_bit(R5_LOCKED, &dev->flags) && 2409 !test_bit(R5_UPTODATE, &dev->flags) && 2410 test_bit(R5_Insync, &dev->flags)) { 2411 if ( 2412 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2413 pr_debug("Read_old stripe %llu " 2414 "block %d for Reconstruct\n", 2415 (unsigned long long)sh->sector, i); 2416 set_bit(R5_LOCKED, &dev->flags); 2417 set_bit(R5_Wantread, &dev->flags); 2418 s->locked++; 2419 } else { 2420 pr_debug("Request delayed stripe %llu " 2421 "block %d for Reconstruct\n", 2422 (unsigned long long)sh->sector, i); 2423 set_bit(STRIPE_DELAYED, &sh->state); 2424 set_bit(STRIPE_HANDLE, &sh->state); 2425 } 2426 } 2427 } 2428 /* now if nothing is locked, and if we have enough data, we can start a 2429 * write request 2430 */ 2431 if (s->locked == 0 && rcw == 0 && 2432 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2433 if (must_compute > 0) { 2434 /* We have failed blocks and need to compute them */ 2435 switch (s->failed) { 2436 case 0: 2437 BUG(); 2438 case 1: 2439 compute_block_1(sh, r6s->failed_num[0], 0); 2440 break; 2441 case 2: 2442 compute_block_2(sh, r6s->failed_num[0], 2443 r6s->failed_num[1]); 2444 break; 2445 default: /* This request should have been failed? */ 2446 BUG(); 2447 } 2448 } 2449 2450 pr_debug("Computing parity for stripe %llu\n", 2451 (unsigned long long)sh->sector); 2452 compute_parity6(sh, RECONSTRUCT_WRITE); 2453 /* now every locked buffer is ready to be written */ 2454 for (i = disks; i--; ) 2455 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2456 pr_debug("Writing stripe %llu block %d\n", 2457 (unsigned long long)sh->sector, i); 2458 s->locked++; 2459 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2460 } 2461 if (s->locked == disks) 2462 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2463 atomic_inc(&conf->pending_full_writes); 2464 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2465 set_bit(STRIPE_INSYNC, &sh->state); 2466 2467 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2468 atomic_dec(&conf->preread_active_stripes); 2469 if (atomic_read(&conf->preread_active_stripes) < 2470 IO_THRESHOLD) 2471 md_wakeup_thread(conf->mddev->thread); 2472 } 2473 } 2474 } 2475 2476 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2477 struct stripe_head_state *s, int disks) 2478 { 2479 struct r5dev *dev = NULL; 2480 2481 set_bit(STRIPE_HANDLE, &sh->state); 2482 2483 switch (sh->check_state) { 2484 case check_state_idle: 2485 /* start a new check operation if there are no failures */ 2486 if (s->failed == 0) { 2487 BUG_ON(s->uptodate != disks); 2488 sh->check_state = check_state_run; 2489 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2490 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2491 s->uptodate--; 2492 break; 2493 } 2494 dev = &sh->dev[s->failed_num]; 2495 /* fall through */ 2496 case check_state_compute_result: 2497 sh->check_state = check_state_idle; 2498 if (!dev) 2499 dev = &sh->dev[sh->pd_idx]; 2500 2501 /* check that a write has not made the stripe insync */ 2502 if (test_bit(STRIPE_INSYNC, &sh->state)) 2503 break; 2504 2505 /* either failed parity check, or recovery is happening */ 2506 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2507 BUG_ON(s->uptodate != disks); 2508 2509 set_bit(R5_LOCKED, &dev->flags); 2510 s->locked++; 2511 set_bit(R5_Wantwrite, &dev->flags); 2512 2513 clear_bit(STRIPE_DEGRADED, &sh->state); 2514 set_bit(STRIPE_INSYNC, &sh->state); 2515 break; 2516 case check_state_run: 2517 break; /* we will be called again upon completion */ 2518 case check_state_check_result: 2519 sh->check_state = check_state_idle; 2520 2521 /* if a failure occurred during the check operation, leave 2522 * STRIPE_INSYNC not set and let the stripe be handled again 2523 */ 2524 if (s->failed) 2525 break; 2526 2527 /* handle a successful check operation, if parity is correct 2528 * we are done. Otherwise update the mismatch count and repair 2529 * parity if !MD_RECOVERY_CHECK 2530 */ 2531 if (sh->ops.zero_sum_result == 0) 2532 /* parity is correct (on disc, 2533 * not in buffer any more) 2534 */ 2535 set_bit(STRIPE_INSYNC, &sh->state); 2536 else { 2537 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2538 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2539 /* don't try to repair!! */ 2540 set_bit(STRIPE_INSYNC, &sh->state); 2541 else { 2542 sh->check_state = check_state_compute_run; 2543 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2544 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2545 set_bit(R5_Wantcompute, 2546 &sh->dev[sh->pd_idx].flags); 2547 sh->ops.target = sh->pd_idx; 2548 s->uptodate++; 2549 } 2550 } 2551 break; 2552 case check_state_compute_run: 2553 break; 2554 default: 2555 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2556 __func__, sh->check_state, 2557 (unsigned long long) sh->sector); 2558 BUG(); 2559 } 2560 } 2561 2562 2563 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2564 struct stripe_head_state *s, 2565 struct r6_state *r6s, struct page *tmp_page, 2566 int disks) 2567 { 2568 int update_p = 0, update_q = 0; 2569 struct r5dev *dev; 2570 int pd_idx = sh->pd_idx; 2571 int qd_idx = sh->qd_idx; 2572 2573 set_bit(STRIPE_HANDLE, &sh->state); 2574 2575 BUG_ON(s->failed > 2); 2576 BUG_ON(s->uptodate < disks); 2577 /* Want to check and possibly repair P and Q. 2578 * However there could be one 'failed' device, in which 2579 * case we can only check one of them, possibly using the 2580 * other to generate missing data 2581 */ 2582 2583 /* If !tmp_page, we cannot do the calculations, 2584 * but as we have set STRIPE_HANDLE, we will soon be called 2585 * by stripe_handle with a tmp_page - just wait until then. 2586 */ 2587 if (tmp_page) { 2588 if (s->failed == r6s->q_failed) { 2589 /* The only possible failed device holds 'Q', so it 2590 * makes sense to check P (If anything else were failed, 2591 * we would have used P to recreate it). 2592 */ 2593 compute_block_1(sh, pd_idx, 1); 2594 if (!page_is_zero(sh->dev[pd_idx].page)) { 2595 compute_block_1(sh, pd_idx, 0); 2596 update_p = 1; 2597 } 2598 } 2599 if (!r6s->q_failed && s->failed < 2) { 2600 /* q is not failed, and we didn't use it to generate 2601 * anything, so it makes sense to check it 2602 */ 2603 memcpy(page_address(tmp_page), 2604 page_address(sh->dev[qd_idx].page), 2605 STRIPE_SIZE); 2606 compute_parity6(sh, UPDATE_PARITY); 2607 if (memcmp(page_address(tmp_page), 2608 page_address(sh->dev[qd_idx].page), 2609 STRIPE_SIZE) != 0) { 2610 clear_bit(STRIPE_INSYNC, &sh->state); 2611 update_q = 1; 2612 } 2613 } 2614 if (update_p || update_q) { 2615 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2616 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2617 /* don't try to repair!! */ 2618 update_p = update_q = 0; 2619 } 2620 2621 /* now write out any block on a failed drive, 2622 * or P or Q if they need it 2623 */ 2624 2625 if (s->failed == 2) { 2626 dev = &sh->dev[r6s->failed_num[1]]; 2627 s->locked++; 2628 set_bit(R5_LOCKED, &dev->flags); 2629 set_bit(R5_Wantwrite, &dev->flags); 2630 } 2631 if (s->failed >= 1) { 2632 dev = &sh->dev[r6s->failed_num[0]]; 2633 s->locked++; 2634 set_bit(R5_LOCKED, &dev->flags); 2635 set_bit(R5_Wantwrite, &dev->flags); 2636 } 2637 2638 if (update_p) { 2639 dev = &sh->dev[pd_idx]; 2640 s->locked++; 2641 set_bit(R5_LOCKED, &dev->flags); 2642 set_bit(R5_Wantwrite, &dev->flags); 2643 } 2644 if (update_q) { 2645 dev = &sh->dev[qd_idx]; 2646 s->locked++; 2647 set_bit(R5_LOCKED, &dev->flags); 2648 set_bit(R5_Wantwrite, &dev->flags); 2649 } 2650 clear_bit(STRIPE_DEGRADED, &sh->state); 2651 2652 set_bit(STRIPE_INSYNC, &sh->state); 2653 } 2654 } 2655 2656 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2657 struct r6_state *r6s) 2658 { 2659 int i; 2660 2661 /* We have read all the blocks in this stripe and now we need to 2662 * copy some of them into a target stripe for expand. 2663 */ 2664 struct dma_async_tx_descriptor *tx = NULL; 2665 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2666 for (i = 0; i < sh->disks; i++) 2667 if (i != sh->pd_idx && i != sh->qd_idx) { 2668 int dd_idx, j; 2669 struct stripe_head *sh2; 2670 2671 sector_t bn = compute_blocknr(sh, i, 1); 2672 sector_t s = raid5_compute_sector(conf, bn, 0, 2673 &dd_idx, NULL); 2674 sh2 = get_active_stripe(conf, s, 0, 1, 1); 2675 if (sh2 == NULL) 2676 /* so far only the early blocks of this stripe 2677 * have been requested. When later blocks 2678 * get requested, we will try again 2679 */ 2680 continue; 2681 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2682 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2683 /* must have already done this block */ 2684 release_stripe(sh2); 2685 continue; 2686 } 2687 2688 /* place all the copies on one channel */ 2689 tx = async_memcpy(sh2->dev[dd_idx].page, 2690 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2691 ASYNC_TX_DEP_ACK, tx, NULL, NULL); 2692 2693 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2694 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2695 for (j = 0; j < conf->raid_disks; j++) 2696 if (j != sh2->pd_idx && 2697 (!r6s || j != sh2->qd_idx) && 2698 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2699 break; 2700 if (j == conf->raid_disks) { 2701 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2702 set_bit(STRIPE_HANDLE, &sh2->state); 2703 } 2704 release_stripe(sh2); 2705 2706 } 2707 /* done submitting copies, wait for them to complete */ 2708 if (tx) { 2709 async_tx_ack(tx); 2710 dma_wait_for_async_tx(tx); 2711 } 2712 } 2713 2714 2715 /* 2716 * handle_stripe - do things to a stripe. 2717 * 2718 * We lock the stripe and then examine the state of various bits 2719 * to see what needs to be done. 2720 * Possible results: 2721 * return some read request which now have data 2722 * return some write requests which are safely on disc 2723 * schedule a read on some buffers 2724 * schedule a write of some buffers 2725 * return confirmation of parity correctness 2726 * 2727 * buffers are taken off read_list or write_list, and bh_cache buffers 2728 * get BH_Lock set before the stripe lock is released. 2729 * 2730 */ 2731 2732 static bool handle_stripe5(struct stripe_head *sh) 2733 { 2734 raid5_conf_t *conf = sh->raid_conf; 2735 int disks = sh->disks, i; 2736 struct bio *return_bi = NULL; 2737 struct stripe_head_state s; 2738 struct r5dev *dev; 2739 mdk_rdev_t *blocked_rdev = NULL; 2740 int prexor; 2741 2742 memset(&s, 0, sizeof(s)); 2743 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " 2744 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, 2745 atomic_read(&sh->count), sh->pd_idx, sh->check_state, 2746 sh->reconstruct_state); 2747 2748 spin_lock(&sh->lock); 2749 clear_bit(STRIPE_HANDLE, &sh->state); 2750 clear_bit(STRIPE_DELAYED, &sh->state); 2751 2752 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2753 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2754 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2755 2756 /* Now to look around and see what can be done */ 2757 rcu_read_lock(); 2758 for (i=disks; i--; ) { 2759 mdk_rdev_t *rdev; 2760 struct r5dev *dev = &sh->dev[i]; 2761 clear_bit(R5_Insync, &dev->flags); 2762 2763 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2764 "written %p\n", i, dev->flags, dev->toread, dev->read, 2765 dev->towrite, dev->written); 2766 2767 /* maybe we can request a biofill operation 2768 * 2769 * new wantfill requests are only permitted while 2770 * ops_complete_biofill is guaranteed to be inactive 2771 */ 2772 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2773 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 2774 set_bit(R5_Wantfill, &dev->flags); 2775 2776 /* now count some things */ 2777 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2778 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2779 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2780 2781 if (test_bit(R5_Wantfill, &dev->flags)) 2782 s.to_fill++; 2783 else if (dev->toread) 2784 s.to_read++; 2785 if (dev->towrite) { 2786 s.to_write++; 2787 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2788 s.non_overwrite++; 2789 } 2790 if (dev->written) 2791 s.written++; 2792 rdev = rcu_dereference(conf->disks[i].rdev); 2793 if (blocked_rdev == NULL && 2794 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2795 blocked_rdev = rdev; 2796 atomic_inc(&rdev->nr_pending); 2797 } 2798 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2799 /* The ReadError flag will just be confusing now */ 2800 clear_bit(R5_ReadError, &dev->flags); 2801 clear_bit(R5_ReWrite, &dev->flags); 2802 } 2803 if (!rdev || !test_bit(In_sync, &rdev->flags) 2804 || test_bit(R5_ReadError, &dev->flags)) { 2805 s.failed++; 2806 s.failed_num = i; 2807 } else 2808 set_bit(R5_Insync, &dev->flags); 2809 } 2810 rcu_read_unlock(); 2811 2812 if (unlikely(blocked_rdev)) { 2813 if (s.syncing || s.expanding || s.expanded || 2814 s.to_write || s.written) { 2815 set_bit(STRIPE_HANDLE, &sh->state); 2816 goto unlock; 2817 } 2818 /* There is nothing for the blocked_rdev to block */ 2819 rdev_dec_pending(blocked_rdev, conf->mddev); 2820 blocked_rdev = NULL; 2821 } 2822 2823 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 2824 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 2825 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 2826 } 2827 2828 pr_debug("locked=%d uptodate=%d to_read=%d" 2829 " to_write=%d failed=%d failed_num=%d\n", 2830 s.locked, s.uptodate, s.to_read, s.to_write, 2831 s.failed, s.failed_num); 2832 /* check if the array has lost two devices and, if so, some requests might 2833 * need to be failed 2834 */ 2835 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2836 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 2837 if (s.failed > 1 && s.syncing) { 2838 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2839 clear_bit(STRIPE_SYNCING, &sh->state); 2840 s.syncing = 0; 2841 } 2842 2843 /* might be able to return some write requests if the parity block 2844 * is safe, or on a failed drive 2845 */ 2846 dev = &sh->dev[sh->pd_idx]; 2847 if ( s.written && 2848 ((test_bit(R5_Insync, &dev->flags) && 2849 !test_bit(R5_LOCKED, &dev->flags) && 2850 test_bit(R5_UPTODATE, &dev->flags)) || 2851 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2852 handle_stripe_clean_event(conf, sh, disks, &return_bi); 2853 2854 /* Now we might consider reading some blocks, either to check/generate 2855 * parity, or to satisfy requests 2856 * or to load a block that is being partially written. 2857 */ 2858 if (s.to_read || s.non_overwrite || 2859 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 2860 handle_stripe_fill5(sh, &s, disks); 2861 2862 /* Now we check to see if any write operations have recently 2863 * completed 2864 */ 2865 prexor = 0; 2866 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 2867 prexor = 1; 2868 if (sh->reconstruct_state == reconstruct_state_drain_result || 2869 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 2870 sh->reconstruct_state = reconstruct_state_idle; 2871 2872 /* All the 'written' buffers and the parity block are ready to 2873 * be written back to disk 2874 */ 2875 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 2876 for (i = disks; i--; ) { 2877 dev = &sh->dev[i]; 2878 if (test_bit(R5_LOCKED, &dev->flags) && 2879 (i == sh->pd_idx || dev->written)) { 2880 pr_debug("Writing block %d\n", i); 2881 set_bit(R5_Wantwrite, &dev->flags); 2882 if (prexor) 2883 continue; 2884 if (!test_bit(R5_Insync, &dev->flags) || 2885 (i == sh->pd_idx && s.failed == 0)) 2886 set_bit(STRIPE_INSYNC, &sh->state); 2887 } 2888 } 2889 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2890 atomic_dec(&conf->preread_active_stripes); 2891 if (atomic_read(&conf->preread_active_stripes) < 2892 IO_THRESHOLD) 2893 md_wakeup_thread(conf->mddev->thread); 2894 } 2895 } 2896 2897 /* Now to consider new write requests and what else, if anything 2898 * should be read. We do not handle new writes when: 2899 * 1/ A 'write' operation (copy+xor) is already in flight. 2900 * 2/ A 'check' operation is in flight, as it may clobber the parity 2901 * block. 2902 */ 2903 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 2904 handle_stripe_dirtying5(conf, sh, &s, disks); 2905 2906 /* maybe we need to check and possibly fix the parity for this stripe 2907 * Any reads will already have been scheduled, so we just see if enough 2908 * data is available. The parity check is held off while parity 2909 * dependent operations are in flight. 2910 */ 2911 if (sh->check_state || 2912 (s.syncing && s.locked == 0 && 2913 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 2914 !test_bit(STRIPE_INSYNC, &sh->state))) 2915 handle_parity_checks5(conf, sh, &s, disks); 2916 2917 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2918 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2919 clear_bit(STRIPE_SYNCING, &sh->state); 2920 } 2921 2922 /* If the failed drive is just a ReadError, then we might need to progress 2923 * the repair/check process 2924 */ 2925 if (s.failed == 1 && !conf->mddev->ro && 2926 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 2927 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 2928 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 2929 ) { 2930 dev = &sh->dev[s.failed_num]; 2931 if (!test_bit(R5_ReWrite, &dev->flags)) { 2932 set_bit(R5_Wantwrite, &dev->flags); 2933 set_bit(R5_ReWrite, &dev->flags); 2934 set_bit(R5_LOCKED, &dev->flags); 2935 s.locked++; 2936 } else { 2937 /* let's read it back */ 2938 set_bit(R5_Wantread, &dev->flags); 2939 set_bit(R5_LOCKED, &dev->flags); 2940 s.locked++; 2941 } 2942 } 2943 2944 /* Finish reconstruct operations initiated by the expansion process */ 2945 if (sh->reconstruct_state == reconstruct_state_result) { 2946 struct stripe_head *sh2 2947 = get_active_stripe(conf, sh->sector, 1, 1, 1); 2948 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 2949 /* sh cannot be written until sh2 has been read. 2950 * so arrange for sh to be delayed a little 2951 */ 2952 set_bit(STRIPE_DELAYED, &sh->state); 2953 set_bit(STRIPE_HANDLE, &sh->state); 2954 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 2955 &sh2->state)) 2956 atomic_inc(&conf->preread_active_stripes); 2957 release_stripe(sh2); 2958 goto unlock; 2959 } 2960 if (sh2) 2961 release_stripe(sh2); 2962 2963 sh->reconstruct_state = reconstruct_state_idle; 2964 clear_bit(STRIPE_EXPANDING, &sh->state); 2965 for (i = conf->raid_disks; i--; ) { 2966 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2967 set_bit(R5_LOCKED, &sh->dev[i].flags); 2968 s.locked++; 2969 } 2970 } 2971 2972 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2973 !sh->reconstruct_state) { 2974 /* Need to write out all blocks after computing parity */ 2975 sh->disks = conf->raid_disks; 2976 stripe_set_idx(sh->sector, conf, 0, sh); 2977 schedule_reconstruction5(sh, &s, 1, 1); 2978 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 2979 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2980 atomic_dec(&conf->reshape_stripes); 2981 wake_up(&conf->wait_for_overlap); 2982 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 2983 } 2984 2985 if (s.expanding && s.locked == 0 && 2986 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 2987 handle_stripe_expansion(conf, sh, NULL); 2988 2989 unlock: 2990 spin_unlock(&sh->lock); 2991 2992 /* wait for this device to become unblocked */ 2993 if (unlikely(blocked_rdev)) 2994 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 2995 2996 if (s.ops_request) 2997 raid5_run_ops(sh, s.ops_request); 2998 2999 ops_run_io(sh, &s); 3000 3001 return_io(return_bi); 3002 3003 return blocked_rdev == NULL; 3004 } 3005 3006 static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 3007 { 3008 raid5_conf_t *conf = sh->raid_conf; 3009 int disks = sh->disks; 3010 struct bio *return_bi = NULL; 3011 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; 3012 struct stripe_head_state s; 3013 struct r6_state r6s; 3014 struct r5dev *dev, *pdev, *qdev; 3015 mdk_rdev_t *blocked_rdev = NULL; 3016 3017 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3018 "pd_idx=%d, qd_idx=%d\n", 3019 (unsigned long long)sh->sector, sh->state, 3020 atomic_read(&sh->count), pd_idx, qd_idx); 3021 memset(&s, 0, sizeof(s)); 3022 3023 spin_lock(&sh->lock); 3024 clear_bit(STRIPE_HANDLE, &sh->state); 3025 clear_bit(STRIPE_DELAYED, &sh->state); 3026 3027 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 3028 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3029 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 3030 /* Now to look around and see what can be done */ 3031 3032 rcu_read_lock(); 3033 for (i=disks; i--; ) { 3034 mdk_rdev_t *rdev; 3035 dev = &sh->dev[i]; 3036 clear_bit(R5_Insync, &dev->flags); 3037 3038 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3039 i, dev->flags, dev->toread, dev->towrite, dev->written); 3040 /* maybe we can reply to a read */ 3041 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 3042 struct bio *rbi, *rbi2; 3043 pr_debug("Return read for disc %d\n", i); 3044 spin_lock_irq(&conf->device_lock); 3045 rbi = dev->toread; 3046 dev->toread = NULL; 3047 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 3048 wake_up(&conf->wait_for_overlap); 3049 spin_unlock_irq(&conf->device_lock); 3050 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 3051 copy_data(0, rbi, dev->page, dev->sector); 3052 rbi2 = r5_next_bio(rbi, dev->sector); 3053 spin_lock_irq(&conf->device_lock); 3054 if (!raid5_dec_bi_phys_segments(rbi)) { 3055 rbi->bi_next = return_bi; 3056 return_bi = rbi; 3057 } 3058 spin_unlock_irq(&conf->device_lock); 3059 rbi = rbi2; 3060 } 3061 } 3062 3063 /* now count some things */ 3064 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 3065 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 3066 3067 3068 if (dev->toread) 3069 s.to_read++; 3070 if (dev->towrite) { 3071 s.to_write++; 3072 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3073 s.non_overwrite++; 3074 } 3075 if (dev->written) 3076 s.written++; 3077 rdev = rcu_dereference(conf->disks[i].rdev); 3078 if (blocked_rdev == NULL && 3079 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3080 blocked_rdev = rdev; 3081 atomic_inc(&rdev->nr_pending); 3082 } 3083 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3084 /* The ReadError flag will just be confusing now */ 3085 clear_bit(R5_ReadError, &dev->flags); 3086 clear_bit(R5_ReWrite, &dev->flags); 3087 } 3088 if (!rdev || !test_bit(In_sync, &rdev->flags) 3089 || test_bit(R5_ReadError, &dev->flags)) { 3090 if (s.failed < 2) 3091 r6s.failed_num[s.failed] = i; 3092 s.failed++; 3093 } else 3094 set_bit(R5_Insync, &dev->flags); 3095 } 3096 rcu_read_unlock(); 3097 3098 if (unlikely(blocked_rdev)) { 3099 if (s.syncing || s.expanding || s.expanded || 3100 s.to_write || s.written) { 3101 set_bit(STRIPE_HANDLE, &sh->state); 3102 goto unlock; 3103 } 3104 /* There is nothing for the blocked_rdev to block */ 3105 rdev_dec_pending(blocked_rdev, conf->mddev); 3106 blocked_rdev = NULL; 3107 } 3108 3109 pr_debug("locked=%d uptodate=%d to_read=%d" 3110 " to_write=%d failed=%d failed_num=%d,%d\n", 3111 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3112 r6s.failed_num[0], r6s.failed_num[1]); 3113 /* check if the array has lost >2 devices and, if so, some requests 3114 * might need to be failed 3115 */ 3116 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3117 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3118 if (s.failed > 2 && s.syncing) { 3119 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3120 clear_bit(STRIPE_SYNCING, &sh->state); 3121 s.syncing = 0; 3122 } 3123 3124 /* 3125 * might be able to return some write requests if the parity blocks 3126 * are safe, or on a failed drive 3127 */ 3128 pdev = &sh->dev[pd_idx]; 3129 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 3130 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 3131 qdev = &sh->dev[qd_idx]; 3132 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) 3133 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); 3134 3135 if ( s.written && 3136 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3137 && !test_bit(R5_LOCKED, &pdev->flags) 3138 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3139 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3140 && !test_bit(R5_LOCKED, &qdev->flags) 3141 && test_bit(R5_UPTODATE, &qdev->flags))))) 3142 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3143 3144 /* Now we might consider reading some blocks, either to check/generate 3145 * parity, or to satisfy requests 3146 * or to load a block that is being partially written. 3147 */ 3148 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3149 (s.syncing && (s.uptodate < disks)) || s.expanding) 3150 handle_stripe_fill6(sh, &s, &r6s, disks); 3151 3152 /* now to consider writing and what else, if anything should be read */ 3153 if (s.to_write) 3154 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); 3155 3156 /* maybe we need to check and possibly fix the parity for this stripe 3157 * Any reads will already have been scheduled, so we just see if enough 3158 * data is available 3159 */ 3160 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) 3161 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); 3162 3163 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3164 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3165 clear_bit(STRIPE_SYNCING, &sh->state); 3166 } 3167 3168 /* If the failed drives are just a ReadError, then we might need 3169 * to progress the repair/check process 3170 */ 3171 if (s.failed <= 2 && !conf->mddev->ro) 3172 for (i = 0; i < s.failed; i++) { 3173 dev = &sh->dev[r6s.failed_num[i]]; 3174 if (test_bit(R5_ReadError, &dev->flags) 3175 && !test_bit(R5_LOCKED, &dev->flags) 3176 && test_bit(R5_UPTODATE, &dev->flags) 3177 ) { 3178 if (!test_bit(R5_ReWrite, &dev->flags)) { 3179 set_bit(R5_Wantwrite, &dev->flags); 3180 set_bit(R5_ReWrite, &dev->flags); 3181 set_bit(R5_LOCKED, &dev->flags); 3182 } else { 3183 /* let's read it back */ 3184 set_bit(R5_Wantread, &dev->flags); 3185 set_bit(R5_LOCKED, &dev->flags); 3186 } 3187 } 3188 } 3189 3190 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3191 struct stripe_head *sh2 3192 = get_active_stripe(conf, sh->sector, 1, 1, 1); 3193 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 3194 /* sh cannot be written until sh2 has been read. 3195 * so arrange for sh to be delayed a little 3196 */ 3197 set_bit(STRIPE_DELAYED, &sh->state); 3198 set_bit(STRIPE_HANDLE, &sh->state); 3199 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3200 &sh2->state)) 3201 atomic_inc(&conf->preread_active_stripes); 3202 release_stripe(sh2); 3203 goto unlock; 3204 } 3205 if (sh2) 3206 release_stripe(sh2); 3207 3208 /* Need to write out all blocks after computing P&Q */ 3209 sh->disks = conf->raid_disks; 3210 stripe_set_idx(sh->sector, conf, 0, sh); 3211 compute_parity6(sh, RECONSTRUCT_WRITE); 3212 for (i = conf->raid_disks ; i-- ; ) { 3213 set_bit(R5_LOCKED, &sh->dev[i].flags); 3214 s.locked++; 3215 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3216 } 3217 clear_bit(STRIPE_EXPANDING, &sh->state); 3218 } else if (s.expanded) { 3219 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3220 atomic_dec(&conf->reshape_stripes); 3221 wake_up(&conf->wait_for_overlap); 3222 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3223 } 3224 3225 if (s.expanding && s.locked == 0 && 3226 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3227 handle_stripe_expansion(conf, sh, &r6s); 3228 3229 unlock: 3230 spin_unlock(&sh->lock); 3231 3232 /* wait for this device to become unblocked */ 3233 if (unlikely(blocked_rdev)) 3234 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3235 3236 ops_run_io(sh, &s); 3237 3238 return_io(return_bi); 3239 3240 return blocked_rdev == NULL; 3241 } 3242 3243 /* returns true if the stripe was handled */ 3244 static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page) 3245 { 3246 if (sh->raid_conf->level == 6) 3247 return handle_stripe6(sh, tmp_page); 3248 else 3249 return handle_stripe5(sh); 3250 } 3251 3252 3253 3254 static void raid5_activate_delayed(raid5_conf_t *conf) 3255 { 3256 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3257 while (!list_empty(&conf->delayed_list)) { 3258 struct list_head *l = conf->delayed_list.next; 3259 struct stripe_head *sh; 3260 sh = list_entry(l, struct stripe_head, lru); 3261 list_del_init(l); 3262 clear_bit(STRIPE_DELAYED, &sh->state); 3263 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3264 atomic_inc(&conf->preread_active_stripes); 3265 list_add_tail(&sh->lru, &conf->hold_list); 3266 } 3267 } else 3268 blk_plug_device(conf->mddev->queue); 3269 } 3270 3271 static void activate_bit_delay(raid5_conf_t *conf) 3272 { 3273 /* device_lock is held */ 3274 struct list_head head; 3275 list_add(&head, &conf->bitmap_list); 3276 list_del_init(&conf->bitmap_list); 3277 while (!list_empty(&head)) { 3278 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3279 list_del_init(&sh->lru); 3280 atomic_inc(&sh->count); 3281 __release_stripe(conf, sh); 3282 } 3283 } 3284 3285 static void unplug_slaves(mddev_t *mddev) 3286 { 3287 raid5_conf_t *conf = mddev_to_conf(mddev); 3288 int i; 3289 3290 rcu_read_lock(); 3291 for (i = 0; i < conf->raid_disks; i++) { 3292 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3293 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3294 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3295 3296 atomic_inc(&rdev->nr_pending); 3297 rcu_read_unlock(); 3298 3299 blk_unplug(r_queue); 3300 3301 rdev_dec_pending(rdev, mddev); 3302 rcu_read_lock(); 3303 } 3304 } 3305 rcu_read_unlock(); 3306 } 3307 3308 static void raid5_unplug_device(struct request_queue *q) 3309 { 3310 mddev_t *mddev = q->queuedata; 3311 raid5_conf_t *conf = mddev_to_conf(mddev); 3312 unsigned long flags; 3313 3314 spin_lock_irqsave(&conf->device_lock, flags); 3315 3316 if (blk_remove_plug(q)) { 3317 conf->seq_flush++; 3318 raid5_activate_delayed(conf); 3319 } 3320 md_wakeup_thread(mddev->thread); 3321 3322 spin_unlock_irqrestore(&conf->device_lock, flags); 3323 3324 unplug_slaves(mddev); 3325 } 3326 3327 static int raid5_congested(void *data, int bits) 3328 { 3329 mddev_t *mddev = data; 3330 raid5_conf_t *conf = mddev_to_conf(mddev); 3331 3332 /* No difference between reads and writes. Just check 3333 * how busy the stripe_cache is 3334 */ 3335 if (conf->inactive_blocked) 3336 return 1; 3337 if (conf->quiesce) 3338 return 1; 3339 if (list_empty_careful(&conf->inactive_list)) 3340 return 1; 3341 3342 return 0; 3343 } 3344 3345 /* We want read requests to align with chunks where possible, 3346 * but write requests don't need to. 3347 */ 3348 static int raid5_mergeable_bvec(struct request_queue *q, 3349 struct bvec_merge_data *bvm, 3350 struct bio_vec *biovec) 3351 { 3352 mddev_t *mddev = q->queuedata; 3353 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3354 int max; 3355 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3356 unsigned int bio_sectors = bvm->bi_size >> 9; 3357 3358 if ((bvm->bi_rw & 1) == WRITE) 3359 return biovec->bv_len; /* always allow writes to be mergeable */ 3360 3361 if (mddev->new_chunk < mddev->chunk_size) 3362 chunk_sectors = mddev->new_chunk >> 9; 3363 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3364 if (max < 0) max = 0; 3365 if (max <= biovec->bv_len && bio_sectors == 0) 3366 return biovec->bv_len; 3367 else 3368 return max; 3369 } 3370 3371 3372 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3373 { 3374 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3375 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3376 unsigned int bio_sectors = bio->bi_size >> 9; 3377 3378 if (mddev->new_chunk < mddev->chunk_size) 3379 chunk_sectors = mddev->new_chunk >> 9; 3380 return chunk_sectors >= 3381 ((sector & (chunk_sectors - 1)) + bio_sectors); 3382 } 3383 3384 /* 3385 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3386 * later sampled by raid5d. 3387 */ 3388 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3389 { 3390 unsigned long flags; 3391 3392 spin_lock_irqsave(&conf->device_lock, flags); 3393 3394 bi->bi_next = conf->retry_read_aligned_list; 3395 conf->retry_read_aligned_list = bi; 3396 3397 spin_unlock_irqrestore(&conf->device_lock, flags); 3398 md_wakeup_thread(conf->mddev->thread); 3399 } 3400 3401 3402 static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3403 { 3404 struct bio *bi; 3405 3406 bi = conf->retry_read_aligned; 3407 if (bi) { 3408 conf->retry_read_aligned = NULL; 3409 return bi; 3410 } 3411 bi = conf->retry_read_aligned_list; 3412 if(bi) { 3413 conf->retry_read_aligned_list = bi->bi_next; 3414 bi->bi_next = NULL; 3415 /* 3416 * this sets the active strip count to 1 and the processed 3417 * strip count to zero (upper 8 bits) 3418 */ 3419 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3420 } 3421 3422 return bi; 3423 } 3424 3425 3426 /* 3427 * The "raid5_align_endio" should check if the read succeeded and if it 3428 * did, call bio_endio on the original bio (having bio_put the new bio 3429 * first). 3430 * If the read failed.. 3431 */ 3432 static void raid5_align_endio(struct bio *bi, int error) 3433 { 3434 struct bio* raid_bi = bi->bi_private; 3435 mddev_t *mddev; 3436 raid5_conf_t *conf; 3437 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3438 mdk_rdev_t *rdev; 3439 3440 bio_put(bi); 3441 3442 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3443 conf = mddev_to_conf(mddev); 3444 rdev = (void*)raid_bi->bi_next; 3445 raid_bi->bi_next = NULL; 3446 3447 rdev_dec_pending(rdev, conf->mddev); 3448 3449 if (!error && uptodate) { 3450 bio_endio(raid_bi, 0); 3451 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3452 wake_up(&conf->wait_for_stripe); 3453 return; 3454 } 3455 3456 3457 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3458 3459 add_bio_to_retry(raid_bi, conf); 3460 } 3461 3462 static int bio_fits_rdev(struct bio *bi) 3463 { 3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3465 3466 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3467 return 0; 3468 blk_recount_segments(q, bi); 3469 if (bi->bi_phys_segments > queue_max_phys_segments(q)) 3470 return 0; 3471 3472 if (q->merge_bvec_fn) 3473 /* it's too hard to apply the merge_bvec_fn at this stage, 3474 * just just give up 3475 */ 3476 return 0; 3477 3478 return 1; 3479 } 3480 3481 3482 static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3483 { 3484 mddev_t *mddev = q->queuedata; 3485 raid5_conf_t *conf = mddev_to_conf(mddev); 3486 unsigned int dd_idx; 3487 struct bio* align_bi; 3488 mdk_rdev_t *rdev; 3489 3490 if (!in_chunk_boundary(mddev, raid_bio)) { 3491 pr_debug("chunk_aligned_read : non aligned\n"); 3492 return 0; 3493 } 3494 /* 3495 * use bio_clone to make a copy of the bio 3496 */ 3497 align_bi = bio_clone(raid_bio, GFP_NOIO); 3498 if (!align_bi) 3499 return 0; 3500 /* 3501 * set bi_end_io to a new function, and set bi_private to the 3502 * original bio. 3503 */ 3504 align_bi->bi_end_io = raid5_align_endio; 3505 align_bi->bi_private = raid_bio; 3506 /* 3507 * compute position 3508 */ 3509 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 3510 0, 3511 &dd_idx, NULL); 3512 3513 rcu_read_lock(); 3514 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3515 if (rdev && test_bit(In_sync, &rdev->flags)) { 3516 atomic_inc(&rdev->nr_pending); 3517 rcu_read_unlock(); 3518 raid_bio->bi_next = (void*)rdev; 3519 align_bi->bi_bdev = rdev->bdev; 3520 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3521 align_bi->bi_sector += rdev->data_offset; 3522 3523 if (!bio_fits_rdev(align_bi)) { 3524 /* too big in some way */ 3525 bio_put(align_bi); 3526 rdev_dec_pending(rdev, mddev); 3527 return 0; 3528 } 3529 3530 spin_lock_irq(&conf->device_lock); 3531 wait_event_lock_irq(conf->wait_for_stripe, 3532 conf->quiesce == 0, 3533 conf->device_lock, /* nothing */); 3534 atomic_inc(&conf->active_aligned_reads); 3535 spin_unlock_irq(&conf->device_lock); 3536 3537 generic_make_request(align_bi); 3538 return 1; 3539 } else { 3540 rcu_read_unlock(); 3541 bio_put(align_bi); 3542 return 0; 3543 } 3544 } 3545 3546 /* __get_priority_stripe - get the next stripe to process 3547 * 3548 * Full stripe writes are allowed to pass preread active stripes up until 3549 * the bypass_threshold is exceeded. In general the bypass_count 3550 * increments when the handle_list is handled before the hold_list; however, it 3551 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3552 * stripe with in flight i/o. The bypass_count will be reset when the 3553 * head of the hold_list has changed, i.e. the head was promoted to the 3554 * handle_list. 3555 */ 3556 static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) 3557 { 3558 struct stripe_head *sh; 3559 3560 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3561 __func__, 3562 list_empty(&conf->handle_list) ? "empty" : "busy", 3563 list_empty(&conf->hold_list) ? "empty" : "busy", 3564 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3565 3566 if (!list_empty(&conf->handle_list)) { 3567 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3568 3569 if (list_empty(&conf->hold_list)) 3570 conf->bypass_count = 0; 3571 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3572 if (conf->hold_list.next == conf->last_hold) 3573 conf->bypass_count++; 3574 else { 3575 conf->last_hold = conf->hold_list.next; 3576 conf->bypass_count -= conf->bypass_threshold; 3577 if (conf->bypass_count < 0) 3578 conf->bypass_count = 0; 3579 } 3580 } 3581 } else if (!list_empty(&conf->hold_list) && 3582 ((conf->bypass_threshold && 3583 conf->bypass_count > conf->bypass_threshold) || 3584 atomic_read(&conf->pending_full_writes) == 0)) { 3585 sh = list_entry(conf->hold_list.next, 3586 typeof(*sh), lru); 3587 conf->bypass_count -= conf->bypass_threshold; 3588 if (conf->bypass_count < 0) 3589 conf->bypass_count = 0; 3590 } else 3591 return NULL; 3592 3593 list_del_init(&sh->lru); 3594 atomic_inc(&sh->count); 3595 BUG_ON(atomic_read(&sh->count) != 1); 3596 return sh; 3597 } 3598 3599 static int make_request(struct request_queue *q, struct bio * bi) 3600 { 3601 mddev_t *mddev = q->queuedata; 3602 raid5_conf_t *conf = mddev_to_conf(mddev); 3603 int dd_idx; 3604 sector_t new_sector; 3605 sector_t logical_sector, last_sector; 3606 struct stripe_head *sh; 3607 const int rw = bio_data_dir(bi); 3608 int cpu, remaining; 3609 3610 if (unlikely(bio_barrier(bi))) { 3611 bio_endio(bi, -EOPNOTSUPP); 3612 return 0; 3613 } 3614 3615 md_write_start(mddev, bi); 3616 3617 cpu = part_stat_lock(); 3618 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 3619 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 3620 bio_sectors(bi)); 3621 part_stat_unlock(); 3622 3623 if (rw == READ && 3624 mddev->reshape_position == MaxSector && 3625 chunk_aligned_read(q,bi)) 3626 return 0; 3627 3628 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3629 last_sector = bi->bi_sector + (bi->bi_size>>9); 3630 bi->bi_next = NULL; 3631 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3632 3633 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3634 DEFINE_WAIT(w); 3635 int disks, data_disks; 3636 int previous; 3637 3638 retry: 3639 previous = 0; 3640 disks = conf->raid_disks; 3641 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3642 if (unlikely(conf->reshape_progress != MaxSector)) { 3643 /* spinlock is needed as reshape_progress may be 3644 * 64bit on a 32bit platform, and so it might be 3645 * possible to see a half-updated value 3646 * Ofcourse reshape_progress could change after 3647 * the lock is dropped, so once we get a reference 3648 * to the stripe that we think it is, we will have 3649 * to check again. 3650 */ 3651 spin_lock_irq(&conf->device_lock); 3652 if (mddev->delta_disks < 0 3653 ? logical_sector < conf->reshape_progress 3654 : logical_sector >= conf->reshape_progress) { 3655 disks = conf->previous_raid_disks; 3656 previous = 1; 3657 } else { 3658 if (mddev->delta_disks < 0 3659 ? logical_sector < conf->reshape_safe 3660 : logical_sector >= conf->reshape_safe) { 3661 spin_unlock_irq(&conf->device_lock); 3662 schedule(); 3663 goto retry; 3664 } 3665 } 3666 spin_unlock_irq(&conf->device_lock); 3667 } 3668 data_disks = disks - conf->max_degraded; 3669 3670 new_sector = raid5_compute_sector(conf, logical_sector, 3671 previous, 3672 &dd_idx, NULL); 3673 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3674 (unsigned long long)new_sector, 3675 (unsigned long long)logical_sector); 3676 3677 sh = get_active_stripe(conf, new_sector, previous, 3678 (bi->bi_rw&RWA_MASK), 0); 3679 if (sh) { 3680 if (unlikely(previous)) { 3681 /* expansion might have moved on while waiting for a 3682 * stripe, so we must do the range check again. 3683 * Expansion could still move past after this 3684 * test, but as we are holding a reference to 3685 * 'sh', we know that if that happens, 3686 * STRIPE_EXPANDING will get set and the expansion 3687 * won't proceed until we finish with the stripe. 3688 */ 3689 int must_retry = 0; 3690 spin_lock_irq(&conf->device_lock); 3691 if (mddev->delta_disks < 0 3692 ? logical_sector >= conf->reshape_progress 3693 : logical_sector < conf->reshape_progress) 3694 /* mismatch, need to try again */ 3695 must_retry = 1; 3696 spin_unlock_irq(&conf->device_lock); 3697 if (must_retry) { 3698 release_stripe(sh); 3699 goto retry; 3700 } 3701 } 3702 /* FIXME what if we get a false positive because these 3703 * are being updated. 3704 */ 3705 if (logical_sector >= mddev->suspend_lo && 3706 logical_sector < mddev->suspend_hi) { 3707 release_stripe(sh); 3708 schedule(); 3709 goto retry; 3710 } 3711 3712 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3713 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3714 /* Stripe is busy expanding or 3715 * add failed due to overlap. Flush everything 3716 * and wait a while 3717 */ 3718 raid5_unplug_device(mddev->queue); 3719 release_stripe(sh); 3720 schedule(); 3721 goto retry; 3722 } 3723 finish_wait(&conf->wait_for_overlap, &w); 3724 set_bit(STRIPE_HANDLE, &sh->state); 3725 clear_bit(STRIPE_DELAYED, &sh->state); 3726 release_stripe(sh); 3727 } else { 3728 /* cannot get stripe for read-ahead, just give-up */ 3729 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3730 finish_wait(&conf->wait_for_overlap, &w); 3731 break; 3732 } 3733 3734 } 3735 spin_lock_irq(&conf->device_lock); 3736 remaining = raid5_dec_bi_phys_segments(bi); 3737 spin_unlock_irq(&conf->device_lock); 3738 if (remaining == 0) { 3739 3740 if ( rw == WRITE ) 3741 md_write_end(mddev); 3742 3743 bio_endio(bi, 0); 3744 } 3745 return 0; 3746 } 3747 3748 static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); 3749 3750 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3751 { 3752 /* reshaping is quite different to recovery/resync so it is 3753 * handled quite separately ... here. 3754 * 3755 * On each call to sync_request, we gather one chunk worth of 3756 * destination stripes and flag them as expanding. 3757 * Then we find all the source stripes and request reads. 3758 * As the reads complete, handle_stripe will copy the data 3759 * into the destination stripe and release that stripe. 3760 */ 3761 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3762 struct stripe_head *sh; 3763 sector_t first_sector, last_sector; 3764 int raid_disks = conf->previous_raid_disks; 3765 int data_disks = raid_disks - conf->max_degraded; 3766 int new_data_disks = conf->raid_disks - conf->max_degraded; 3767 int i; 3768 int dd_idx; 3769 sector_t writepos, readpos, safepos; 3770 sector_t stripe_addr; 3771 int reshape_sectors; 3772 struct list_head stripes; 3773 3774 if (sector_nr == 0) { 3775 /* If restarting in the middle, skip the initial sectors */ 3776 if (mddev->delta_disks < 0 && 3777 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 3778 sector_nr = raid5_size(mddev, 0, 0) 3779 - conf->reshape_progress; 3780 } else if (mddev->delta_disks > 0 && 3781 conf->reshape_progress > 0) 3782 sector_nr = conf->reshape_progress; 3783 sector_div(sector_nr, new_data_disks); 3784 if (sector_nr) { 3785 *skipped = 1; 3786 return sector_nr; 3787 } 3788 } 3789 3790 /* We need to process a full chunk at a time. 3791 * If old and new chunk sizes differ, we need to process the 3792 * largest of these 3793 */ 3794 if (mddev->new_chunk > mddev->chunk_size) 3795 reshape_sectors = mddev->new_chunk / 512; 3796 else 3797 reshape_sectors = mddev->chunk_size / 512; 3798 3799 /* we update the metadata when there is more than 3Meg 3800 * in the block range (that is rather arbitrary, should 3801 * probably be time based) or when the data about to be 3802 * copied would over-write the source of the data at 3803 * the front of the range. 3804 * i.e. one new_stripe along from reshape_progress new_maps 3805 * to after where reshape_safe old_maps to 3806 */ 3807 writepos = conf->reshape_progress; 3808 sector_div(writepos, new_data_disks); 3809 readpos = conf->reshape_progress; 3810 sector_div(readpos, data_disks); 3811 safepos = conf->reshape_safe; 3812 sector_div(safepos, data_disks); 3813 if (mddev->delta_disks < 0) { 3814 writepos -= min_t(sector_t, reshape_sectors, writepos); 3815 readpos += reshape_sectors; 3816 safepos += reshape_sectors; 3817 } else { 3818 writepos += reshape_sectors; 3819 readpos -= min_t(sector_t, reshape_sectors, readpos); 3820 safepos -= min_t(sector_t, reshape_sectors, safepos); 3821 } 3822 3823 /* 'writepos' is the most advanced device address we might write. 3824 * 'readpos' is the least advanced device address we might read. 3825 * 'safepos' is the least address recorded in the metadata as having 3826 * been reshaped. 3827 * If 'readpos' is behind 'writepos', then there is no way that we can 3828 * ensure safety in the face of a crash - that must be done by userspace 3829 * making a backup of the data. So in that case there is no particular 3830 * rush to update metadata. 3831 * Otherwise if 'safepos' is behind 'writepos', then we really need to 3832 * update the metadata to advance 'safepos' to match 'readpos' so that 3833 * we can be safe in the event of a crash. 3834 * So we insist on updating metadata if safepos is behind writepos and 3835 * readpos is beyond writepos. 3836 * In any case, update the metadata every 10 seconds. 3837 * Maybe that number should be configurable, but I'm not sure it is 3838 * worth it.... maybe it could be a multiple of safemode_delay??? 3839 */ 3840 if ((mddev->delta_disks < 0 3841 ? (safepos > writepos && readpos < writepos) 3842 : (safepos < writepos && readpos > writepos)) || 3843 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 3844 /* Cannot proceed until we've updated the superblock... */ 3845 wait_event(conf->wait_for_overlap, 3846 atomic_read(&conf->reshape_stripes)==0); 3847 mddev->reshape_position = conf->reshape_progress; 3848 mddev->curr_resync_completed = mddev->curr_resync; 3849 conf->reshape_checkpoint = jiffies; 3850 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3851 md_wakeup_thread(mddev->thread); 3852 wait_event(mddev->sb_wait, mddev->flags == 0 || 3853 kthread_should_stop()); 3854 spin_lock_irq(&conf->device_lock); 3855 conf->reshape_safe = mddev->reshape_position; 3856 spin_unlock_irq(&conf->device_lock); 3857 wake_up(&conf->wait_for_overlap); 3858 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3859 } 3860 3861 if (mddev->delta_disks < 0) { 3862 BUG_ON(conf->reshape_progress == 0); 3863 stripe_addr = writepos; 3864 BUG_ON((mddev->dev_sectors & 3865 ~((sector_t)reshape_sectors - 1)) 3866 - reshape_sectors - stripe_addr 3867 != sector_nr); 3868 } else { 3869 BUG_ON(writepos != sector_nr + reshape_sectors); 3870 stripe_addr = sector_nr; 3871 } 3872 INIT_LIST_HEAD(&stripes); 3873 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 3874 int j; 3875 int skipped = 0; 3876 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 3877 set_bit(STRIPE_EXPANDING, &sh->state); 3878 atomic_inc(&conf->reshape_stripes); 3879 /* If any of this stripe is beyond the end of the old 3880 * array, then we need to zero those blocks 3881 */ 3882 for (j=sh->disks; j--;) { 3883 sector_t s; 3884 if (j == sh->pd_idx) 3885 continue; 3886 if (conf->level == 6 && 3887 j == sh->qd_idx) 3888 continue; 3889 s = compute_blocknr(sh, j, 0); 3890 if (s < raid5_size(mddev, 0, 0)) { 3891 skipped = 1; 3892 continue; 3893 } 3894 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3895 set_bit(R5_Expanded, &sh->dev[j].flags); 3896 set_bit(R5_UPTODATE, &sh->dev[j].flags); 3897 } 3898 if (!skipped) { 3899 set_bit(STRIPE_EXPAND_READY, &sh->state); 3900 set_bit(STRIPE_HANDLE, &sh->state); 3901 } 3902 list_add(&sh->lru, &stripes); 3903 } 3904 spin_lock_irq(&conf->device_lock); 3905 if (mddev->delta_disks < 0) 3906 conf->reshape_progress -= reshape_sectors * new_data_disks; 3907 else 3908 conf->reshape_progress += reshape_sectors * new_data_disks; 3909 spin_unlock_irq(&conf->device_lock); 3910 /* Ok, those stripe are ready. We can start scheduling 3911 * reads on the source stripes. 3912 * The source stripes are determined by mapping the first and last 3913 * block on the destination stripes. 3914 */ 3915 first_sector = 3916 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 3917 1, &dd_idx, NULL); 3918 last_sector = 3919 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 3920 *(new_data_disks) - 1), 3921 1, &dd_idx, NULL); 3922 if (last_sector >= mddev->dev_sectors) 3923 last_sector = mddev->dev_sectors - 1; 3924 while (first_sector <= last_sector) { 3925 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 3926 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3927 set_bit(STRIPE_HANDLE, &sh->state); 3928 release_stripe(sh); 3929 first_sector += STRIPE_SECTORS; 3930 } 3931 /* Now that the sources are clearly marked, we can release 3932 * the destination stripes 3933 */ 3934 while (!list_empty(&stripes)) { 3935 sh = list_entry(stripes.next, struct stripe_head, lru); 3936 list_del_init(&sh->lru); 3937 release_stripe(sh); 3938 } 3939 /* If this takes us to the resync_max point where we have to pause, 3940 * then we need to write out the superblock. 3941 */ 3942 sector_nr += reshape_sectors; 3943 if ((sector_nr - mddev->curr_resync_completed) * 2 3944 >= mddev->resync_max - mddev->curr_resync_completed) { 3945 /* Cannot proceed until we've updated the superblock... */ 3946 wait_event(conf->wait_for_overlap, 3947 atomic_read(&conf->reshape_stripes) == 0); 3948 mddev->reshape_position = conf->reshape_progress; 3949 mddev->curr_resync_completed = mddev->curr_resync; 3950 conf->reshape_checkpoint = jiffies; 3951 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3952 md_wakeup_thread(mddev->thread); 3953 wait_event(mddev->sb_wait, 3954 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 3955 || kthread_should_stop()); 3956 spin_lock_irq(&conf->device_lock); 3957 conf->reshape_safe = mddev->reshape_position; 3958 spin_unlock_irq(&conf->device_lock); 3959 wake_up(&conf->wait_for_overlap); 3960 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3961 } 3962 return reshape_sectors; 3963 } 3964 3965 /* FIXME go_faster isn't used */ 3966 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 3967 { 3968 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3969 struct stripe_head *sh; 3970 sector_t max_sector = mddev->dev_sectors; 3971 int sync_blocks; 3972 int still_degraded = 0; 3973 int i; 3974 3975 if (sector_nr >= max_sector) { 3976 /* just being told to finish up .. nothing much to do */ 3977 unplug_slaves(mddev); 3978 3979 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3980 end_reshape(conf); 3981 return 0; 3982 } 3983 3984 if (mddev->curr_resync < max_sector) /* aborted */ 3985 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 3986 &sync_blocks, 1); 3987 else /* completed sync */ 3988 conf->fullsync = 0; 3989 bitmap_close_sync(mddev->bitmap); 3990 3991 return 0; 3992 } 3993 3994 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3995 return reshape_request(mddev, sector_nr, skipped); 3996 3997 /* No need to check resync_max as we never do more than one 3998 * stripe, and as resync_max will always be on a chunk boundary, 3999 * if the check in md_do_sync didn't fire, there is no chance 4000 * of overstepping resync_max here 4001 */ 4002 4003 /* if there is too many failed drives and we are trying 4004 * to resync, then assert that we are finished, because there is 4005 * nothing we can do. 4006 */ 4007 if (mddev->degraded >= conf->max_degraded && 4008 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4009 sector_t rv = mddev->dev_sectors - sector_nr; 4010 *skipped = 1; 4011 return rv; 4012 } 4013 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 4014 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 4015 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 4016 /* we can skip this block, and probably more */ 4017 sync_blocks /= STRIPE_SECTORS; 4018 *skipped = 1; 4019 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 4020 } 4021 4022 4023 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 4024 4025 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 4026 if (sh == NULL) { 4027 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 4028 /* make sure we don't swamp the stripe cache if someone else 4029 * is trying to get access 4030 */ 4031 schedule_timeout_uninterruptible(1); 4032 } 4033 /* Need to check if array will still be degraded after recovery/resync 4034 * We don't need to check the 'failed' flag as when that gets set, 4035 * recovery aborts. 4036 */ 4037 for (i = 0; i < conf->raid_disks; i++) 4038 if (conf->disks[i].rdev == NULL) 4039 still_degraded = 1; 4040 4041 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 4042 4043 spin_lock(&sh->lock); 4044 set_bit(STRIPE_SYNCING, &sh->state); 4045 clear_bit(STRIPE_INSYNC, &sh->state); 4046 spin_unlock(&sh->lock); 4047 4048 /* wait for any blocked device to be handled */ 4049 while(unlikely(!handle_stripe(sh, NULL))) 4050 ; 4051 release_stripe(sh); 4052 4053 return STRIPE_SECTORS; 4054 } 4055 4056 static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 4057 { 4058 /* We may not be able to submit a whole bio at once as there 4059 * may not be enough stripe_heads available. 4060 * We cannot pre-allocate enough stripe_heads as we may need 4061 * more than exist in the cache (if we allow ever large chunks). 4062 * So we do one stripe head at a time and record in 4063 * ->bi_hw_segments how many have been done. 4064 * 4065 * We *know* that this entire raid_bio is in one chunk, so 4066 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 4067 */ 4068 struct stripe_head *sh; 4069 int dd_idx; 4070 sector_t sector, logical_sector, last_sector; 4071 int scnt = 0; 4072 int remaining; 4073 int handled = 0; 4074 4075 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4076 sector = raid5_compute_sector(conf, logical_sector, 4077 0, &dd_idx, NULL); 4078 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 4079 4080 for (; logical_sector < last_sector; 4081 logical_sector += STRIPE_SECTORS, 4082 sector += STRIPE_SECTORS, 4083 scnt++) { 4084 4085 if (scnt < raid5_bi_hw_segments(raid_bio)) 4086 /* already done this stripe */ 4087 continue; 4088 4089 sh = get_active_stripe(conf, sector, 0, 1, 0); 4090 4091 if (!sh) { 4092 /* failed to get a stripe - must wait */ 4093 raid5_set_bi_hw_segments(raid_bio, scnt); 4094 conf->retry_read_aligned = raid_bio; 4095 return handled; 4096 } 4097 4098 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 4099 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 4100 release_stripe(sh); 4101 raid5_set_bi_hw_segments(raid_bio, scnt); 4102 conf->retry_read_aligned = raid_bio; 4103 return handled; 4104 } 4105 4106 handle_stripe(sh, NULL); 4107 release_stripe(sh); 4108 handled++; 4109 } 4110 spin_lock_irq(&conf->device_lock); 4111 remaining = raid5_dec_bi_phys_segments(raid_bio); 4112 spin_unlock_irq(&conf->device_lock); 4113 if (remaining == 0) 4114 bio_endio(raid_bio, 0); 4115 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4116 wake_up(&conf->wait_for_stripe); 4117 return handled; 4118 } 4119 4120 4121 4122 /* 4123 * This is our raid5 kernel thread. 4124 * 4125 * We scan the hash table for stripes which can be handled now. 4126 * During the scan, completed stripes are saved for us by the interrupt 4127 * handler, so that they will not have to wait for our next wakeup. 4128 */ 4129 static void raid5d(mddev_t *mddev) 4130 { 4131 struct stripe_head *sh; 4132 raid5_conf_t *conf = mddev_to_conf(mddev); 4133 int handled; 4134 4135 pr_debug("+++ raid5d active\n"); 4136 4137 md_check_recovery(mddev); 4138 4139 handled = 0; 4140 spin_lock_irq(&conf->device_lock); 4141 while (1) { 4142 struct bio *bio; 4143 4144 if (conf->seq_flush != conf->seq_write) { 4145 int seq = conf->seq_flush; 4146 spin_unlock_irq(&conf->device_lock); 4147 bitmap_unplug(mddev->bitmap); 4148 spin_lock_irq(&conf->device_lock); 4149 conf->seq_write = seq; 4150 activate_bit_delay(conf); 4151 } 4152 4153 while ((bio = remove_bio_from_retry(conf))) { 4154 int ok; 4155 spin_unlock_irq(&conf->device_lock); 4156 ok = retry_aligned_read(conf, bio); 4157 spin_lock_irq(&conf->device_lock); 4158 if (!ok) 4159 break; 4160 handled++; 4161 } 4162 4163 sh = __get_priority_stripe(conf); 4164 4165 if (!sh) 4166 break; 4167 spin_unlock_irq(&conf->device_lock); 4168 4169 handled++; 4170 handle_stripe(sh, conf->spare_page); 4171 release_stripe(sh); 4172 4173 spin_lock_irq(&conf->device_lock); 4174 } 4175 pr_debug("%d stripes handled\n", handled); 4176 4177 spin_unlock_irq(&conf->device_lock); 4178 4179 async_tx_issue_pending_all(); 4180 unplug_slaves(mddev); 4181 4182 pr_debug("--- raid5d inactive\n"); 4183 } 4184 4185 static ssize_t 4186 raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4187 { 4188 raid5_conf_t *conf = mddev_to_conf(mddev); 4189 if (conf) 4190 return sprintf(page, "%d\n", conf->max_nr_stripes); 4191 else 4192 return 0; 4193 } 4194 4195 static ssize_t 4196 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4197 { 4198 raid5_conf_t *conf = mddev_to_conf(mddev); 4199 unsigned long new; 4200 int err; 4201 4202 if (len >= PAGE_SIZE) 4203 return -EINVAL; 4204 if (!conf) 4205 return -ENODEV; 4206 4207 if (strict_strtoul(page, 10, &new)) 4208 return -EINVAL; 4209 if (new <= 16 || new > 32768) 4210 return -EINVAL; 4211 while (new < conf->max_nr_stripes) { 4212 if (drop_one_stripe(conf)) 4213 conf->max_nr_stripes--; 4214 else 4215 break; 4216 } 4217 err = md_allow_write(mddev); 4218 if (err) 4219 return err; 4220 while (new > conf->max_nr_stripes) { 4221 if (grow_one_stripe(conf)) 4222 conf->max_nr_stripes++; 4223 else break; 4224 } 4225 return len; 4226 } 4227 4228 static struct md_sysfs_entry 4229 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4230 raid5_show_stripe_cache_size, 4231 raid5_store_stripe_cache_size); 4232 4233 static ssize_t 4234 raid5_show_preread_threshold(mddev_t *mddev, char *page) 4235 { 4236 raid5_conf_t *conf = mddev_to_conf(mddev); 4237 if (conf) 4238 return sprintf(page, "%d\n", conf->bypass_threshold); 4239 else 4240 return 0; 4241 } 4242 4243 static ssize_t 4244 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4245 { 4246 raid5_conf_t *conf = mddev_to_conf(mddev); 4247 unsigned long new; 4248 if (len >= PAGE_SIZE) 4249 return -EINVAL; 4250 if (!conf) 4251 return -ENODEV; 4252 4253 if (strict_strtoul(page, 10, &new)) 4254 return -EINVAL; 4255 if (new > conf->max_nr_stripes) 4256 return -EINVAL; 4257 conf->bypass_threshold = new; 4258 return len; 4259 } 4260 4261 static struct md_sysfs_entry 4262 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4263 S_IRUGO | S_IWUSR, 4264 raid5_show_preread_threshold, 4265 raid5_store_preread_threshold); 4266 4267 static ssize_t 4268 stripe_cache_active_show(mddev_t *mddev, char *page) 4269 { 4270 raid5_conf_t *conf = mddev_to_conf(mddev); 4271 if (conf) 4272 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4273 else 4274 return 0; 4275 } 4276 4277 static struct md_sysfs_entry 4278 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4279 4280 static struct attribute *raid5_attrs[] = { 4281 &raid5_stripecache_size.attr, 4282 &raid5_stripecache_active.attr, 4283 &raid5_preread_bypass_threshold.attr, 4284 NULL, 4285 }; 4286 static struct attribute_group raid5_attrs_group = { 4287 .name = NULL, 4288 .attrs = raid5_attrs, 4289 }; 4290 4291 static sector_t 4292 raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) 4293 { 4294 raid5_conf_t *conf = mddev_to_conf(mddev); 4295 4296 if (!sectors) 4297 sectors = mddev->dev_sectors; 4298 if (!raid_disks) { 4299 /* size is defined by the smallest of previous and new size */ 4300 if (conf->raid_disks < conf->previous_raid_disks) 4301 raid_disks = conf->raid_disks; 4302 else 4303 raid_disks = conf->previous_raid_disks; 4304 } 4305 4306 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4307 sectors &= ~((sector_t)mddev->new_chunk/512 - 1); 4308 return sectors * (raid_disks - conf->max_degraded); 4309 } 4310 4311 static raid5_conf_t *setup_conf(mddev_t *mddev) 4312 { 4313 raid5_conf_t *conf; 4314 int raid_disk, memory; 4315 mdk_rdev_t *rdev; 4316 struct disk_info *disk; 4317 4318 if (mddev->new_level != 5 4319 && mddev->new_level != 4 4320 && mddev->new_level != 6) { 4321 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4322 mdname(mddev), mddev->new_level); 4323 return ERR_PTR(-EIO); 4324 } 4325 if ((mddev->new_level == 5 4326 && !algorithm_valid_raid5(mddev->new_layout)) || 4327 (mddev->new_level == 6 4328 && !algorithm_valid_raid6(mddev->new_layout))) { 4329 printk(KERN_ERR "raid5: %s: layout %d not supported\n", 4330 mdname(mddev), mddev->new_layout); 4331 return ERR_PTR(-EIO); 4332 } 4333 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 4334 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4335 mdname(mddev), mddev->raid_disks); 4336 return ERR_PTR(-EINVAL); 4337 } 4338 4339 if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) { 4340 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4341 mddev->new_chunk, mdname(mddev)); 4342 return ERR_PTR(-EINVAL); 4343 } 4344 4345 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); 4346 if (conf == NULL) 4347 goto abort; 4348 4349 conf->raid_disks = mddev->raid_disks; 4350 if (mddev->reshape_position == MaxSector) 4351 conf->previous_raid_disks = mddev->raid_disks; 4352 else 4353 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4354 4355 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4356 GFP_KERNEL); 4357 if (!conf->disks) 4358 goto abort; 4359 4360 conf->mddev = mddev; 4361 4362 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4363 goto abort; 4364 4365 if (mddev->new_level == 6) { 4366 conf->spare_page = alloc_page(GFP_KERNEL); 4367 if (!conf->spare_page) 4368 goto abort; 4369 } 4370 spin_lock_init(&conf->device_lock); 4371 init_waitqueue_head(&conf->wait_for_stripe); 4372 init_waitqueue_head(&conf->wait_for_overlap); 4373 INIT_LIST_HEAD(&conf->handle_list); 4374 INIT_LIST_HEAD(&conf->hold_list); 4375 INIT_LIST_HEAD(&conf->delayed_list); 4376 INIT_LIST_HEAD(&conf->bitmap_list); 4377 INIT_LIST_HEAD(&conf->inactive_list); 4378 atomic_set(&conf->active_stripes, 0); 4379 atomic_set(&conf->preread_active_stripes, 0); 4380 atomic_set(&conf->active_aligned_reads, 0); 4381 conf->bypass_threshold = BYPASS_THRESHOLD; 4382 4383 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4384 4385 list_for_each_entry(rdev, &mddev->disks, same_set) { 4386 raid_disk = rdev->raid_disk; 4387 if (raid_disk >= conf->raid_disks 4388 || raid_disk < 0) 4389 continue; 4390 disk = conf->disks + raid_disk; 4391 4392 disk->rdev = rdev; 4393 4394 if (test_bit(In_sync, &rdev->flags)) { 4395 char b[BDEVNAME_SIZE]; 4396 printk(KERN_INFO "raid5: device %s operational as raid" 4397 " disk %d\n", bdevname(rdev->bdev,b), 4398 raid_disk); 4399 } else 4400 /* Cannot rely on bitmap to complete recovery */ 4401 conf->fullsync = 1; 4402 } 4403 4404 conf->chunk_size = mddev->new_chunk; 4405 conf->level = mddev->new_level; 4406 if (conf->level == 6) 4407 conf->max_degraded = 2; 4408 else 4409 conf->max_degraded = 1; 4410 conf->algorithm = mddev->new_layout; 4411 conf->max_nr_stripes = NR_STRIPES; 4412 conf->reshape_progress = mddev->reshape_position; 4413 if (conf->reshape_progress != MaxSector) { 4414 conf->prev_chunk = mddev->chunk_size; 4415 conf->prev_algo = mddev->layout; 4416 } 4417 4418 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4419 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4420 if (grow_stripes(conf, conf->max_nr_stripes)) { 4421 printk(KERN_ERR 4422 "raid5: couldn't allocate %dkB for buffers\n", memory); 4423 goto abort; 4424 } else 4425 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4426 memory, mdname(mddev)); 4427 4428 conf->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 4429 if (!conf->thread) { 4430 printk(KERN_ERR 4431 "raid5: couldn't allocate thread for %s\n", 4432 mdname(mddev)); 4433 goto abort; 4434 } 4435 4436 return conf; 4437 4438 abort: 4439 if (conf) { 4440 shrink_stripes(conf); 4441 safe_put_page(conf->spare_page); 4442 kfree(conf->disks); 4443 kfree(conf->stripe_hashtbl); 4444 kfree(conf); 4445 return ERR_PTR(-EIO); 4446 } else 4447 return ERR_PTR(-ENOMEM); 4448 } 4449 4450 static int run(mddev_t *mddev) 4451 { 4452 raid5_conf_t *conf; 4453 int working_disks = 0; 4454 mdk_rdev_t *rdev; 4455 4456 if (mddev->reshape_position != MaxSector) { 4457 /* Check that we can continue the reshape. 4458 * Currently only disks can change, it must 4459 * increase, and we must be past the point where 4460 * a stripe over-writes itself 4461 */ 4462 sector_t here_new, here_old; 4463 int old_disks; 4464 int max_degraded = (mddev->level == 6 ? 2 : 1); 4465 4466 if (mddev->new_level != mddev->level) { 4467 printk(KERN_ERR "raid5: %s: unsupported reshape " 4468 "required - aborting.\n", 4469 mdname(mddev)); 4470 return -EINVAL; 4471 } 4472 old_disks = mddev->raid_disks - mddev->delta_disks; 4473 /* reshape_position must be on a new-stripe boundary, and one 4474 * further up in new geometry must map after here in old 4475 * geometry. 4476 */ 4477 here_new = mddev->reshape_position; 4478 if (sector_div(here_new, (mddev->new_chunk>>9)* 4479 (mddev->raid_disks - max_degraded))) { 4480 printk(KERN_ERR "raid5: reshape_position not " 4481 "on a stripe boundary\n"); 4482 return -EINVAL; 4483 } 4484 /* here_new is the stripe we will write to */ 4485 here_old = mddev->reshape_position; 4486 sector_div(here_old, (mddev->chunk_size>>9)* 4487 (old_disks-max_degraded)); 4488 /* here_old is the first stripe that we might need to read 4489 * from */ 4490 if (here_new >= here_old) { 4491 /* Reading from the same stripe as writing to - bad */ 4492 printk(KERN_ERR "raid5: reshape_position too early for " 4493 "auto-recovery - aborting.\n"); 4494 return -EINVAL; 4495 } 4496 printk(KERN_INFO "raid5: reshape will continue\n"); 4497 /* OK, we should be able to continue; */ 4498 } else { 4499 BUG_ON(mddev->level != mddev->new_level); 4500 BUG_ON(mddev->layout != mddev->new_layout); 4501 BUG_ON(mddev->chunk_size != mddev->new_chunk); 4502 BUG_ON(mddev->delta_disks != 0); 4503 } 4504 4505 if (mddev->private == NULL) 4506 conf = setup_conf(mddev); 4507 else 4508 conf = mddev->private; 4509 4510 if (IS_ERR(conf)) 4511 return PTR_ERR(conf); 4512 4513 mddev->thread = conf->thread; 4514 conf->thread = NULL; 4515 mddev->private = conf; 4516 4517 /* 4518 * 0 for a fully functional array, 1 or 2 for a degraded array. 4519 */ 4520 list_for_each_entry(rdev, &mddev->disks, same_set) 4521 if (rdev->raid_disk >= 0 && 4522 test_bit(In_sync, &rdev->flags)) 4523 working_disks++; 4524 4525 mddev->degraded = conf->raid_disks - working_disks; 4526 4527 if (mddev->degraded > conf->max_degraded) { 4528 printk(KERN_ERR "raid5: not enough operational devices for %s" 4529 " (%d/%d failed)\n", 4530 mdname(mddev), mddev->degraded, conf->raid_disks); 4531 goto abort; 4532 } 4533 4534 /* device size must be a multiple of chunk size */ 4535 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); 4536 mddev->resync_max_sectors = mddev->dev_sectors; 4537 4538 if (mddev->degraded > 0 && 4539 mddev->recovery_cp != MaxSector) { 4540 if (mddev->ok_start_degraded) 4541 printk(KERN_WARNING 4542 "raid5: starting dirty degraded array: %s" 4543 "- data corruption possible.\n", 4544 mdname(mddev)); 4545 else { 4546 printk(KERN_ERR 4547 "raid5: cannot start dirty degraded array for %s\n", 4548 mdname(mddev)); 4549 goto abort; 4550 } 4551 } 4552 4553 if (mddev->degraded == 0) 4554 printk("raid5: raid level %d set %s active with %d out of %d" 4555 " devices, algorithm %d\n", conf->level, mdname(mddev), 4556 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4557 mddev->new_layout); 4558 else 4559 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 4560 " out of %d devices, algorithm %d\n", conf->level, 4561 mdname(mddev), mddev->raid_disks - mddev->degraded, 4562 mddev->raid_disks, mddev->new_layout); 4563 4564 print_raid5_conf(conf); 4565 4566 if (conf->reshape_progress != MaxSector) { 4567 printk("...ok start reshape thread\n"); 4568 conf->reshape_safe = conf->reshape_progress; 4569 atomic_set(&conf->reshape_stripes, 0); 4570 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4571 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4572 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4573 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4574 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4575 "%s_reshape"); 4576 } 4577 4578 /* read-ahead size must cover two whole stripes, which is 4579 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4580 */ 4581 { 4582 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4583 int stripe = data_disks * 4584 (mddev->chunk_size / PAGE_SIZE); 4585 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4586 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4587 } 4588 4589 /* Ok, everything is just fine now */ 4590 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4591 printk(KERN_WARNING 4592 "raid5: failed to create sysfs attributes for %s\n", 4593 mdname(mddev)); 4594 4595 mddev->queue->queue_lock = &conf->device_lock; 4596 4597 mddev->queue->unplug_fn = raid5_unplug_device; 4598 mddev->queue->backing_dev_info.congested_data = mddev; 4599 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4600 4601 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4602 4603 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4604 4605 return 0; 4606 abort: 4607 md_unregister_thread(mddev->thread); 4608 mddev->thread = NULL; 4609 if (conf) { 4610 shrink_stripes(conf); 4611 print_raid5_conf(conf); 4612 safe_put_page(conf->spare_page); 4613 kfree(conf->disks); 4614 kfree(conf->stripe_hashtbl); 4615 kfree(conf); 4616 } 4617 mddev->private = NULL; 4618 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4619 return -EIO; 4620 } 4621 4622 4623 4624 static int stop(mddev_t *mddev) 4625 { 4626 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4627 4628 md_unregister_thread(mddev->thread); 4629 mddev->thread = NULL; 4630 shrink_stripes(conf); 4631 kfree(conf->stripe_hashtbl); 4632 mddev->queue->backing_dev_info.congested_fn = NULL; 4633 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4634 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4635 kfree(conf->disks); 4636 kfree(conf); 4637 mddev->private = NULL; 4638 return 0; 4639 } 4640 4641 #ifdef DEBUG 4642 static void print_sh(struct seq_file *seq, struct stripe_head *sh) 4643 { 4644 int i; 4645 4646 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 4647 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 4648 seq_printf(seq, "sh %llu, count %d.\n", 4649 (unsigned long long)sh->sector, atomic_read(&sh->count)); 4650 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 4651 for (i = 0; i < sh->disks; i++) { 4652 seq_printf(seq, "(cache%d: %p %ld) ", 4653 i, sh->dev[i].page, sh->dev[i].flags); 4654 } 4655 seq_printf(seq, "\n"); 4656 } 4657 4658 static void printall(struct seq_file *seq, raid5_conf_t *conf) 4659 { 4660 struct stripe_head *sh; 4661 struct hlist_node *hn; 4662 int i; 4663 4664 spin_lock_irq(&conf->device_lock); 4665 for (i = 0; i < NR_HASH; i++) { 4666 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 4667 if (sh->raid_conf != conf) 4668 continue; 4669 print_sh(seq, sh); 4670 } 4671 } 4672 spin_unlock_irq(&conf->device_lock); 4673 } 4674 #endif 4675 4676 static void status(struct seq_file *seq, mddev_t *mddev) 4677 { 4678 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4679 int i; 4680 4681 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4682 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4683 for (i = 0; i < conf->raid_disks; i++) 4684 seq_printf (seq, "%s", 4685 conf->disks[i].rdev && 4686 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4687 seq_printf (seq, "]"); 4688 #ifdef DEBUG 4689 seq_printf (seq, "\n"); 4690 printall(seq, conf); 4691 #endif 4692 } 4693 4694 static void print_raid5_conf (raid5_conf_t *conf) 4695 { 4696 int i; 4697 struct disk_info *tmp; 4698 4699 printk("RAID5 conf printout:\n"); 4700 if (!conf) { 4701 printk("(conf==NULL)\n"); 4702 return; 4703 } 4704 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 4705 conf->raid_disks - conf->mddev->degraded); 4706 4707 for (i = 0; i < conf->raid_disks; i++) { 4708 char b[BDEVNAME_SIZE]; 4709 tmp = conf->disks + i; 4710 if (tmp->rdev) 4711 printk(" disk %d, o:%d, dev:%s\n", 4712 i, !test_bit(Faulty, &tmp->rdev->flags), 4713 bdevname(tmp->rdev->bdev,b)); 4714 } 4715 } 4716 4717 static int raid5_spare_active(mddev_t *mddev) 4718 { 4719 int i; 4720 raid5_conf_t *conf = mddev->private; 4721 struct disk_info *tmp; 4722 4723 for (i = 0; i < conf->raid_disks; i++) { 4724 tmp = conf->disks + i; 4725 if (tmp->rdev 4726 && !test_bit(Faulty, &tmp->rdev->flags) 4727 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 4728 unsigned long flags; 4729 spin_lock_irqsave(&conf->device_lock, flags); 4730 mddev->degraded--; 4731 spin_unlock_irqrestore(&conf->device_lock, flags); 4732 } 4733 } 4734 print_raid5_conf(conf); 4735 return 0; 4736 } 4737 4738 static int raid5_remove_disk(mddev_t *mddev, int number) 4739 { 4740 raid5_conf_t *conf = mddev->private; 4741 int err = 0; 4742 mdk_rdev_t *rdev; 4743 struct disk_info *p = conf->disks + number; 4744 4745 print_raid5_conf(conf); 4746 rdev = p->rdev; 4747 if (rdev) { 4748 if (number >= conf->raid_disks && 4749 conf->reshape_progress == MaxSector) 4750 clear_bit(In_sync, &rdev->flags); 4751 4752 if (test_bit(In_sync, &rdev->flags) || 4753 atomic_read(&rdev->nr_pending)) { 4754 err = -EBUSY; 4755 goto abort; 4756 } 4757 /* Only remove non-faulty devices if recovery 4758 * isn't possible. 4759 */ 4760 if (!test_bit(Faulty, &rdev->flags) && 4761 mddev->degraded <= conf->max_degraded && 4762 number < conf->raid_disks) { 4763 err = -EBUSY; 4764 goto abort; 4765 } 4766 p->rdev = NULL; 4767 synchronize_rcu(); 4768 if (atomic_read(&rdev->nr_pending)) { 4769 /* lost the race, try later */ 4770 err = -EBUSY; 4771 p->rdev = rdev; 4772 } 4773 } 4774 abort: 4775 4776 print_raid5_conf(conf); 4777 return err; 4778 } 4779 4780 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4781 { 4782 raid5_conf_t *conf = mddev->private; 4783 int err = -EEXIST; 4784 int disk; 4785 struct disk_info *p; 4786 int first = 0; 4787 int last = conf->raid_disks - 1; 4788 4789 if (mddev->degraded > conf->max_degraded) 4790 /* no point adding a device */ 4791 return -EINVAL; 4792 4793 if (rdev->raid_disk >= 0) 4794 first = last = rdev->raid_disk; 4795 4796 /* 4797 * find the disk ... but prefer rdev->saved_raid_disk 4798 * if possible. 4799 */ 4800 if (rdev->saved_raid_disk >= 0 && 4801 rdev->saved_raid_disk >= first && 4802 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4803 disk = rdev->saved_raid_disk; 4804 else 4805 disk = first; 4806 for ( ; disk <= last ; disk++) 4807 if ((p=conf->disks + disk)->rdev == NULL) { 4808 clear_bit(In_sync, &rdev->flags); 4809 rdev->raid_disk = disk; 4810 err = 0; 4811 if (rdev->saved_raid_disk != disk) 4812 conf->fullsync = 1; 4813 rcu_assign_pointer(p->rdev, rdev); 4814 break; 4815 } 4816 print_raid5_conf(conf); 4817 return err; 4818 } 4819 4820 static int raid5_resize(mddev_t *mddev, sector_t sectors) 4821 { 4822 /* no resync is happening, and there is enough space 4823 * on all devices, so we can resize. 4824 * We need to make sure resync covers any new space. 4825 * If the array is shrinking we should possibly wait until 4826 * any io in the removed space completes, but it hardly seems 4827 * worth it. 4828 */ 4829 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4830 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 4831 mddev->raid_disks)); 4832 if (mddev->array_sectors > 4833 raid5_size(mddev, sectors, mddev->raid_disks)) 4834 return -EINVAL; 4835 set_capacity(mddev->gendisk, mddev->array_sectors); 4836 mddev->changed = 1; 4837 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { 4838 mddev->recovery_cp = mddev->dev_sectors; 4839 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4840 } 4841 mddev->dev_sectors = sectors; 4842 mddev->resync_max_sectors = sectors; 4843 return 0; 4844 } 4845 4846 static int raid5_check_reshape(mddev_t *mddev) 4847 { 4848 raid5_conf_t *conf = mddev_to_conf(mddev); 4849 4850 if (mddev->delta_disks == 0 && 4851 mddev->new_layout == mddev->layout && 4852 mddev->new_chunk == mddev->chunk_size) 4853 return -EINVAL; /* nothing to do */ 4854 if (mddev->bitmap) 4855 /* Cannot grow a bitmap yet */ 4856 return -EBUSY; 4857 if (mddev->degraded > conf->max_degraded) 4858 return -EINVAL; 4859 if (mddev->delta_disks < 0) { 4860 /* We might be able to shrink, but the devices must 4861 * be made bigger first. 4862 * For raid6, 4 is the minimum size. 4863 * Otherwise 2 is the minimum 4864 */ 4865 int min = 2; 4866 if (mddev->level == 6) 4867 min = 4; 4868 if (mddev->raid_disks + mddev->delta_disks < min) 4869 return -EINVAL; 4870 } 4871 4872 /* Can only proceed if there are plenty of stripe_heads. 4873 * We need a minimum of one full stripe,, and for sensible progress 4874 * it is best to have about 4 times that. 4875 * If we require 4 times, then the default 256 4K stripe_heads will 4876 * allow for chunk sizes up to 256K, which is probably OK. 4877 * If the chunk size is greater, user-space should request more 4878 * stripe_heads first. 4879 */ 4880 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4881 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4882 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4883 (max(mddev->chunk_size, mddev->new_chunk) 4884 / STRIPE_SIZE)*4); 4885 return -ENOSPC; 4886 } 4887 4888 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4889 } 4890 4891 static int raid5_start_reshape(mddev_t *mddev) 4892 { 4893 raid5_conf_t *conf = mddev_to_conf(mddev); 4894 mdk_rdev_t *rdev; 4895 int spares = 0; 4896 int added_devices = 0; 4897 unsigned long flags; 4898 4899 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4900 return -EBUSY; 4901 4902 list_for_each_entry(rdev, &mddev->disks, same_set) 4903 if (rdev->raid_disk < 0 && 4904 !test_bit(Faulty, &rdev->flags)) 4905 spares++; 4906 4907 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 4908 /* Not enough devices even to make a degraded array 4909 * of that size 4910 */ 4911 return -EINVAL; 4912 4913 /* Refuse to reduce size of the array. Any reductions in 4914 * array size must be through explicit setting of array_size 4915 * attribute. 4916 */ 4917 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 4918 < mddev->array_sectors) { 4919 printk(KERN_ERR "md: %s: array size must be reduced " 4920 "before number of disks\n", mdname(mddev)); 4921 return -EINVAL; 4922 } 4923 4924 atomic_set(&conf->reshape_stripes, 0); 4925 spin_lock_irq(&conf->device_lock); 4926 conf->previous_raid_disks = conf->raid_disks; 4927 conf->raid_disks += mddev->delta_disks; 4928 conf->prev_chunk = conf->chunk_size; 4929 conf->chunk_size = mddev->new_chunk; 4930 conf->prev_algo = conf->algorithm; 4931 conf->algorithm = mddev->new_layout; 4932 if (mddev->delta_disks < 0) 4933 conf->reshape_progress = raid5_size(mddev, 0, 0); 4934 else 4935 conf->reshape_progress = 0; 4936 conf->reshape_safe = conf->reshape_progress; 4937 conf->generation++; 4938 spin_unlock_irq(&conf->device_lock); 4939 4940 /* Add some new drives, as many as will fit. 4941 * We know there are enough to make the newly sized array work. 4942 */ 4943 list_for_each_entry(rdev, &mddev->disks, same_set) 4944 if (rdev->raid_disk < 0 && 4945 !test_bit(Faulty, &rdev->flags)) { 4946 if (raid5_add_disk(mddev, rdev) == 0) { 4947 char nm[20]; 4948 set_bit(In_sync, &rdev->flags); 4949 added_devices++; 4950 rdev->recovery_offset = 0; 4951 sprintf(nm, "rd%d", rdev->raid_disk); 4952 if (sysfs_create_link(&mddev->kobj, 4953 &rdev->kobj, nm)) 4954 printk(KERN_WARNING 4955 "raid5: failed to create " 4956 " link %s for %s\n", 4957 nm, mdname(mddev)); 4958 } else 4959 break; 4960 } 4961 4962 if (mddev->delta_disks > 0) { 4963 spin_lock_irqsave(&conf->device_lock, flags); 4964 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 4965 - added_devices; 4966 spin_unlock_irqrestore(&conf->device_lock, flags); 4967 } 4968 mddev->raid_disks = conf->raid_disks; 4969 mddev->reshape_position = 0; 4970 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4971 4972 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4973 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4974 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4975 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4976 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4977 "%s_reshape"); 4978 if (!mddev->sync_thread) { 4979 mddev->recovery = 0; 4980 spin_lock_irq(&conf->device_lock); 4981 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 4982 conf->reshape_progress = MaxSector; 4983 spin_unlock_irq(&conf->device_lock); 4984 return -EAGAIN; 4985 } 4986 conf->reshape_checkpoint = jiffies; 4987 md_wakeup_thread(mddev->sync_thread); 4988 md_new_event(mddev); 4989 return 0; 4990 } 4991 4992 /* This is called from the reshape thread and should make any 4993 * changes needed in 'conf' 4994 */ 4995 static void end_reshape(raid5_conf_t *conf) 4996 { 4997 4998 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4999 5000 spin_lock_irq(&conf->device_lock); 5001 conf->previous_raid_disks = conf->raid_disks; 5002 conf->reshape_progress = MaxSector; 5003 spin_unlock_irq(&conf->device_lock); 5004 wake_up(&conf->wait_for_overlap); 5005 5006 /* read-ahead size must cover two whole stripes, which is 5007 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 5008 */ 5009 { 5010 int data_disks = conf->raid_disks - conf->max_degraded; 5011 int stripe = data_disks * (conf->chunk_size 5012 / PAGE_SIZE); 5013 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 5014 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 5015 } 5016 } 5017 } 5018 5019 /* This is called from the raid5d thread with mddev_lock held. 5020 * It makes config changes to the device. 5021 */ 5022 static void raid5_finish_reshape(mddev_t *mddev) 5023 { 5024 struct block_device *bdev; 5025 raid5_conf_t *conf = mddev_to_conf(mddev); 5026 5027 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5028 5029 if (mddev->delta_disks > 0) { 5030 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5031 set_capacity(mddev->gendisk, mddev->array_sectors); 5032 mddev->changed = 1; 5033 5034 bdev = bdget_disk(mddev->gendisk, 0); 5035 if (bdev) { 5036 mutex_lock(&bdev->bd_inode->i_mutex); 5037 i_size_write(bdev->bd_inode, 5038 (loff_t)mddev->array_sectors << 9); 5039 mutex_unlock(&bdev->bd_inode->i_mutex); 5040 bdput(bdev); 5041 } 5042 } else { 5043 int d; 5044 mddev->degraded = conf->raid_disks; 5045 for (d = 0; d < conf->raid_disks ; d++) 5046 if (conf->disks[d].rdev && 5047 test_bit(In_sync, 5048 &conf->disks[d].rdev->flags)) 5049 mddev->degraded--; 5050 for (d = conf->raid_disks ; 5051 d < conf->raid_disks - mddev->delta_disks; 5052 d++) 5053 raid5_remove_disk(mddev, d); 5054 } 5055 mddev->layout = conf->algorithm; 5056 mddev->chunk_size = conf->chunk_size; 5057 mddev->reshape_position = MaxSector; 5058 mddev->delta_disks = 0; 5059 } 5060 } 5061 5062 static void raid5_quiesce(mddev_t *mddev, int state) 5063 { 5064 raid5_conf_t *conf = mddev_to_conf(mddev); 5065 5066 switch(state) { 5067 case 2: /* resume for a suspend */ 5068 wake_up(&conf->wait_for_overlap); 5069 break; 5070 5071 case 1: /* stop all writes */ 5072 spin_lock_irq(&conf->device_lock); 5073 conf->quiesce = 1; 5074 wait_event_lock_irq(conf->wait_for_stripe, 5075 atomic_read(&conf->active_stripes) == 0 && 5076 atomic_read(&conf->active_aligned_reads) == 0, 5077 conf->device_lock, /* nothing */); 5078 spin_unlock_irq(&conf->device_lock); 5079 break; 5080 5081 case 0: /* re-enable writes */ 5082 spin_lock_irq(&conf->device_lock); 5083 conf->quiesce = 0; 5084 wake_up(&conf->wait_for_stripe); 5085 wake_up(&conf->wait_for_overlap); 5086 spin_unlock_irq(&conf->device_lock); 5087 break; 5088 } 5089 } 5090 5091 5092 static void *raid5_takeover_raid1(mddev_t *mddev) 5093 { 5094 int chunksect; 5095 5096 if (mddev->raid_disks != 2 || 5097 mddev->degraded > 1) 5098 return ERR_PTR(-EINVAL); 5099 5100 /* Should check if there are write-behind devices? */ 5101 5102 chunksect = 64*2; /* 64K by default */ 5103 5104 /* The array must be an exact multiple of chunksize */ 5105 while (chunksect && (mddev->array_sectors & (chunksect-1))) 5106 chunksect >>= 1; 5107 5108 if ((chunksect<<9) < STRIPE_SIZE) 5109 /* array size does not allow a suitable chunk size */ 5110 return ERR_PTR(-EINVAL); 5111 5112 mddev->new_level = 5; 5113 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5114 mddev->new_chunk = chunksect << 9; 5115 5116 return setup_conf(mddev); 5117 } 5118 5119 static void *raid5_takeover_raid6(mddev_t *mddev) 5120 { 5121 int new_layout; 5122 5123 switch (mddev->layout) { 5124 case ALGORITHM_LEFT_ASYMMETRIC_6: 5125 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 5126 break; 5127 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5128 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 5129 break; 5130 case ALGORITHM_LEFT_SYMMETRIC_6: 5131 new_layout = ALGORITHM_LEFT_SYMMETRIC; 5132 break; 5133 case ALGORITHM_RIGHT_SYMMETRIC_6: 5134 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 5135 break; 5136 case ALGORITHM_PARITY_0_6: 5137 new_layout = ALGORITHM_PARITY_0; 5138 break; 5139 case ALGORITHM_PARITY_N: 5140 new_layout = ALGORITHM_PARITY_N; 5141 break; 5142 default: 5143 return ERR_PTR(-EINVAL); 5144 } 5145 mddev->new_level = 5; 5146 mddev->new_layout = new_layout; 5147 mddev->delta_disks = -1; 5148 mddev->raid_disks -= 1; 5149 return setup_conf(mddev); 5150 } 5151 5152 5153 static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) 5154 { 5155 /* For a 2-drive array, the layout and chunk size can be changed 5156 * immediately as not restriping is needed. 5157 * For larger arrays we record the new value - after validation 5158 * to be used by a reshape pass. 5159 */ 5160 raid5_conf_t *conf = mddev_to_conf(mddev); 5161 5162 if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) 5163 return -EINVAL; 5164 if (new_chunk > 0) { 5165 if (new_chunk & (new_chunk-1)) 5166 /* not a power of 2 */ 5167 return -EINVAL; 5168 if (new_chunk < PAGE_SIZE) 5169 return -EINVAL; 5170 if (mddev->array_sectors & ((new_chunk>>9)-1)) 5171 /* not factor of array size */ 5172 return -EINVAL; 5173 } 5174 5175 /* They look valid */ 5176 5177 if (mddev->raid_disks == 2) { 5178 5179 if (new_layout >= 0) { 5180 conf->algorithm = new_layout; 5181 mddev->layout = mddev->new_layout = new_layout; 5182 } 5183 if (new_chunk > 0) { 5184 conf->chunk_size = new_chunk; 5185 mddev->chunk_size = mddev->new_chunk = new_chunk; 5186 } 5187 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5188 md_wakeup_thread(mddev->thread); 5189 } else { 5190 if (new_layout >= 0) 5191 mddev->new_layout = new_layout; 5192 if (new_chunk > 0) 5193 mddev->new_chunk = new_chunk; 5194 } 5195 return 0; 5196 } 5197 5198 static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk) 5199 { 5200 if (new_layout >= 0 && !algorithm_valid_raid6(new_layout)) 5201 return -EINVAL; 5202 if (new_chunk > 0) { 5203 if (new_chunk & (new_chunk-1)) 5204 /* not a power of 2 */ 5205 return -EINVAL; 5206 if (new_chunk < PAGE_SIZE) 5207 return -EINVAL; 5208 if (mddev->array_sectors & ((new_chunk>>9)-1)) 5209 /* not factor of array size */ 5210 return -EINVAL; 5211 } 5212 5213 /* They look valid */ 5214 5215 if (new_layout >= 0) 5216 mddev->new_layout = new_layout; 5217 if (new_chunk > 0) 5218 mddev->new_chunk = new_chunk; 5219 5220 return 0; 5221 } 5222 5223 static void *raid5_takeover(mddev_t *mddev) 5224 { 5225 /* raid5 can take over: 5226 * raid0 - if all devices are the same - make it a raid4 layout 5227 * raid1 - if there are two drives. We need to know the chunk size 5228 * raid4 - trivial - just use a raid4 layout. 5229 * raid6 - Providing it is a *_6 layout 5230 * 5231 * For now, just do raid1 5232 */ 5233 5234 if (mddev->level == 1) 5235 return raid5_takeover_raid1(mddev); 5236 if (mddev->level == 4) { 5237 mddev->new_layout = ALGORITHM_PARITY_N; 5238 mddev->new_level = 5; 5239 return setup_conf(mddev); 5240 } 5241 if (mddev->level == 6) 5242 return raid5_takeover_raid6(mddev); 5243 5244 return ERR_PTR(-EINVAL); 5245 } 5246 5247 5248 static struct mdk_personality raid5_personality; 5249 5250 static void *raid6_takeover(mddev_t *mddev) 5251 { 5252 /* Currently can only take over a raid5. We map the 5253 * personality to an equivalent raid6 personality 5254 * with the Q block at the end. 5255 */ 5256 int new_layout; 5257 5258 if (mddev->pers != &raid5_personality) 5259 return ERR_PTR(-EINVAL); 5260 if (mddev->degraded > 1) 5261 return ERR_PTR(-EINVAL); 5262 if (mddev->raid_disks > 253) 5263 return ERR_PTR(-EINVAL); 5264 if (mddev->raid_disks < 3) 5265 return ERR_PTR(-EINVAL); 5266 5267 switch (mddev->layout) { 5268 case ALGORITHM_LEFT_ASYMMETRIC: 5269 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 5270 break; 5271 case ALGORITHM_RIGHT_ASYMMETRIC: 5272 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 5273 break; 5274 case ALGORITHM_LEFT_SYMMETRIC: 5275 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 5276 break; 5277 case ALGORITHM_RIGHT_SYMMETRIC: 5278 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 5279 break; 5280 case ALGORITHM_PARITY_0: 5281 new_layout = ALGORITHM_PARITY_0_6; 5282 break; 5283 case ALGORITHM_PARITY_N: 5284 new_layout = ALGORITHM_PARITY_N; 5285 break; 5286 default: 5287 return ERR_PTR(-EINVAL); 5288 } 5289 mddev->new_level = 6; 5290 mddev->new_layout = new_layout; 5291 mddev->delta_disks = 1; 5292 mddev->raid_disks += 1; 5293 return setup_conf(mddev); 5294 } 5295 5296 5297 static struct mdk_personality raid6_personality = 5298 { 5299 .name = "raid6", 5300 .level = 6, 5301 .owner = THIS_MODULE, 5302 .make_request = make_request, 5303 .run = run, 5304 .stop = stop, 5305 .status = status, 5306 .error_handler = error, 5307 .hot_add_disk = raid5_add_disk, 5308 .hot_remove_disk= raid5_remove_disk, 5309 .spare_active = raid5_spare_active, 5310 .sync_request = sync_request, 5311 .resize = raid5_resize, 5312 .size = raid5_size, 5313 .check_reshape = raid5_check_reshape, 5314 .start_reshape = raid5_start_reshape, 5315 .finish_reshape = raid5_finish_reshape, 5316 .quiesce = raid5_quiesce, 5317 .takeover = raid6_takeover, 5318 .reconfig = raid6_reconfig, 5319 }; 5320 static struct mdk_personality raid5_personality = 5321 { 5322 .name = "raid5", 5323 .level = 5, 5324 .owner = THIS_MODULE, 5325 .make_request = make_request, 5326 .run = run, 5327 .stop = stop, 5328 .status = status, 5329 .error_handler = error, 5330 .hot_add_disk = raid5_add_disk, 5331 .hot_remove_disk= raid5_remove_disk, 5332 .spare_active = raid5_spare_active, 5333 .sync_request = sync_request, 5334 .resize = raid5_resize, 5335 .size = raid5_size, 5336 .check_reshape = raid5_check_reshape, 5337 .start_reshape = raid5_start_reshape, 5338 .finish_reshape = raid5_finish_reshape, 5339 .quiesce = raid5_quiesce, 5340 .takeover = raid5_takeover, 5341 .reconfig = raid5_reconfig, 5342 }; 5343 5344 static struct mdk_personality raid4_personality = 5345 { 5346 .name = "raid4", 5347 .level = 4, 5348 .owner = THIS_MODULE, 5349 .make_request = make_request, 5350 .run = run, 5351 .stop = stop, 5352 .status = status, 5353 .error_handler = error, 5354 .hot_add_disk = raid5_add_disk, 5355 .hot_remove_disk= raid5_remove_disk, 5356 .spare_active = raid5_spare_active, 5357 .sync_request = sync_request, 5358 .resize = raid5_resize, 5359 .size = raid5_size, 5360 .check_reshape = raid5_check_reshape, 5361 .start_reshape = raid5_start_reshape, 5362 .finish_reshape = raid5_finish_reshape, 5363 .quiesce = raid5_quiesce, 5364 }; 5365 5366 static int __init raid5_init(void) 5367 { 5368 register_md_personality(&raid6_personality); 5369 register_md_personality(&raid5_personality); 5370 register_md_personality(&raid4_personality); 5371 return 0; 5372 } 5373 5374 static void raid5_exit(void) 5375 { 5376 unregister_md_personality(&raid6_personality); 5377 unregister_md_personality(&raid5_personality); 5378 unregister_md_personality(&raid4_personality); 5379 } 5380 5381 module_init(raid5_init); 5382 module_exit(raid5_exit); 5383 MODULE_LICENSE("GPL"); 5384 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 5385 MODULE_ALIAS("md-raid5"); 5386 MODULE_ALIAS("md-raid4"); 5387 MODULE_ALIAS("md-level-5"); 5388 MODULE_ALIAS("md-level-4"); 5389 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 5390 MODULE_ALIAS("md-raid6"); 5391 MODULE_ALIAS("md-level-6"); 5392 5393 /* This used to be two separate modules, they were: */ 5394 MODULE_ALIAS("raid5"); 5395 MODULE_ALIAS("raid6"); 5396