1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/module.h> 47 #include <linux/slab.h> 48 #include <linux/highmem.h> 49 #include <linux/bitops.h> 50 #include <linux/kthread.h> 51 #include <asm/atomic.h> 52 #include "raid6.h" 53 54 #include <linux/raid/bitmap.h> 55 56 /* 57 * Stripe cache 58 */ 59 60 #define NR_STRIPES 256 61 #define STRIPE_SIZE PAGE_SIZE 62 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 63 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 64 #define IO_THRESHOLD 1 65 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 66 #define HASH_MASK (NR_HASH - 1) 67 68 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 69 70 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 71 * order without overlap. There may be several bio's per stripe+device, and 72 * a bio could span several devices. 73 * When walking this list for a particular stripe+device, we must never proceed 74 * beyond a bio that extends past this device, as the next bio might no longer 75 * be valid. 76 * This macro is used to determine the 'next' bio in the list, given the sector 77 * of the current stripe+device 78 */ 79 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 80 /* 81 * The following can be used to debug the driver 82 */ 83 #define RAID5_DEBUG 0 84 #define RAID5_PARANOIA 1 85 #if RAID5_PARANOIA && defined(CONFIG_SMP) 86 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 87 #else 88 # define CHECK_DEVLOCK() 89 #endif 90 91 #define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x))) 92 #if RAID5_DEBUG 93 #define inline 94 #define __inline__ 95 #endif 96 97 #if !RAID6_USE_EMPTY_ZERO_PAGE 98 /* In .bss so it's zeroed */ 99 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 100 #endif 101 102 static inline int raid6_next_disk(int disk, int raid_disks) 103 { 104 disk++; 105 return (disk < raid_disks) ? disk : 0; 106 } 107 static void print_raid5_conf (raid5_conf_t *conf); 108 109 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 110 { 111 if (atomic_dec_and_test(&sh->count)) { 112 BUG_ON(!list_empty(&sh->lru)); 113 BUG_ON(atomic_read(&conf->active_stripes)==0); 114 if (test_bit(STRIPE_HANDLE, &sh->state)) { 115 if (test_bit(STRIPE_DELAYED, &sh->state)) { 116 list_add_tail(&sh->lru, &conf->delayed_list); 117 blk_plug_device(conf->mddev->queue); 118 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 119 sh->bm_seq - conf->seq_write > 0) { 120 list_add_tail(&sh->lru, &conf->bitmap_list); 121 blk_plug_device(conf->mddev->queue); 122 } else { 123 clear_bit(STRIPE_BIT_DELAY, &sh->state); 124 list_add_tail(&sh->lru, &conf->handle_list); 125 } 126 md_wakeup_thread(conf->mddev->thread); 127 } else { 128 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 129 atomic_dec(&conf->preread_active_stripes); 130 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 131 md_wakeup_thread(conf->mddev->thread); 132 } 133 atomic_dec(&conf->active_stripes); 134 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 135 list_add_tail(&sh->lru, &conf->inactive_list); 136 wake_up(&conf->wait_for_stripe); 137 } 138 } 139 } 140 } 141 static void release_stripe(struct stripe_head *sh) 142 { 143 raid5_conf_t *conf = sh->raid_conf; 144 unsigned long flags; 145 146 spin_lock_irqsave(&conf->device_lock, flags); 147 __release_stripe(conf, sh); 148 spin_unlock_irqrestore(&conf->device_lock, flags); 149 } 150 151 static inline void remove_hash(struct stripe_head *sh) 152 { 153 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 154 155 hlist_del_init(&sh->hash); 156 } 157 158 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 159 { 160 struct hlist_head *hp = stripe_hash(conf, sh->sector); 161 162 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 163 164 CHECK_DEVLOCK(); 165 hlist_add_head(&sh->hash, hp); 166 } 167 168 169 /* find an idle stripe, make sure it is unhashed, and return it. */ 170 static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 171 { 172 struct stripe_head *sh = NULL; 173 struct list_head *first; 174 175 CHECK_DEVLOCK(); 176 if (list_empty(&conf->inactive_list)) 177 goto out; 178 first = conf->inactive_list.next; 179 sh = list_entry(first, struct stripe_head, lru); 180 list_del_init(first); 181 remove_hash(sh); 182 atomic_inc(&conf->active_stripes); 183 out: 184 return sh; 185 } 186 187 static void shrink_buffers(struct stripe_head *sh, int num) 188 { 189 struct page *p; 190 int i; 191 192 for (i=0; i<num ; i++) { 193 p = sh->dev[i].page; 194 if (!p) 195 continue; 196 sh->dev[i].page = NULL; 197 put_page(p); 198 } 199 } 200 201 static int grow_buffers(struct stripe_head *sh, int num) 202 { 203 int i; 204 205 for (i=0; i<num; i++) { 206 struct page *page; 207 208 if (!(page = alloc_page(GFP_KERNEL))) { 209 return 1; 210 } 211 sh->dev[i].page = page; 212 } 213 return 0; 214 } 215 216 static void raid5_build_block (struct stripe_head *sh, int i); 217 218 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) 219 { 220 raid5_conf_t *conf = sh->raid_conf; 221 int i; 222 223 BUG_ON(atomic_read(&sh->count) != 0); 224 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 225 226 CHECK_DEVLOCK(); 227 PRINTK("init_stripe called, stripe %llu\n", 228 (unsigned long long)sh->sector); 229 230 remove_hash(sh); 231 232 sh->sector = sector; 233 sh->pd_idx = pd_idx; 234 sh->state = 0; 235 236 sh->disks = disks; 237 238 for (i = sh->disks; i--; ) { 239 struct r5dev *dev = &sh->dev[i]; 240 241 if (dev->toread || dev->towrite || dev->written || 242 test_bit(R5_LOCKED, &dev->flags)) { 243 printk("sector=%llx i=%d %p %p %p %d\n", 244 (unsigned long long)sh->sector, i, dev->toread, 245 dev->towrite, dev->written, 246 test_bit(R5_LOCKED, &dev->flags)); 247 BUG(); 248 } 249 dev->flags = 0; 250 raid5_build_block(sh, i); 251 } 252 insert_hash(conf, sh); 253 } 254 255 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) 256 { 257 struct stripe_head *sh; 258 struct hlist_node *hn; 259 260 CHECK_DEVLOCK(); 261 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 262 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 263 if (sh->sector == sector && sh->disks == disks) 264 return sh; 265 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 266 return NULL; 267 } 268 269 static void unplug_slaves(mddev_t *mddev); 270 static void raid5_unplug_device(request_queue_t *q); 271 272 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 273 int pd_idx, int noblock) 274 { 275 struct stripe_head *sh; 276 277 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector); 278 279 spin_lock_irq(&conf->device_lock); 280 281 do { 282 wait_event_lock_irq(conf->wait_for_stripe, 283 conf->quiesce == 0, 284 conf->device_lock, /* nothing */); 285 sh = __find_stripe(conf, sector, disks); 286 if (!sh) { 287 if (!conf->inactive_blocked) 288 sh = get_free_stripe(conf); 289 if (noblock && sh == NULL) 290 break; 291 if (!sh) { 292 conf->inactive_blocked = 1; 293 wait_event_lock_irq(conf->wait_for_stripe, 294 !list_empty(&conf->inactive_list) && 295 (atomic_read(&conf->active_stripes) 296 < (conf->max_nr_stripes *3/4) 297 || !conf->inactive_blocked), 298 conf->device_lock, 299 raid5_unplug_device(conf->mddev->queue) 300 ); 301 conf->inactive_blocked = 0; 302 } else 303 init_stripe(sh, sector, pd_idx, disks); 304 } else { 305 if (atomic_read(&sh->count)) { 306 BUG_ON(!list_empty(&sh->lru)); 307 } else { 308 if (!test_bit(STRIPE_HANDLE, &sh->state)) 309 atomic_inc(&conf->active_stripes); 310 if (list_empty(&sh->lru) && 311 !test_bit(STRIPE_EXPANDING, &sh->state)) 312 BUG(); 313 list_del_init(&sh->lru); 314 } 315 } 316 } while (sh == NULL); 317 318 if (sh) 319 atomic_inc(&sh->count); 320 321 spin_unlock_irq(&conf->device_lock); 322 return sh; 323 } 324 325 static int grow_one_stripe(raid5_conf_t *conf) 326 { 327 struct stripe_head *sh; 328 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 329 if (!sh) 330 return 0; 331 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 332 sh->raid_conf = conf; 333 spin_lock_init(&sh->lock); 334 335 if (grow_buffers(sh, conf->raid_disks)) { 336 shrink_buffers(sh, conf->raid_disks); 337 kmem_cache_free(conf->slab_cache, sh); 338 return 0; 339 } 340 sh->disks = conf->raid_disks; 341 /* we just created an active stripe so... */ 342 atomic_set(&sh->count, 1); 343 atomic_inc(&conf->active_stripes); 344 INIT_LIST_HEAD(&sh->lru); 345 release_stripe(sh); 346 return 1; 347 } 348 349 static int grow_stripes(raid5_conf_t *conf, int num) 350 { 351 kmem_cache_t *sc; 352 int devs = conf->raid_disks; 353 354 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); 355 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev)); 356 conf->active_name = 0; 357 sc = kmem_cache_create(conf->cache_name[conf->active_name], 358 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 359 0, 0, NULL, NULL); 360 if (!sc) 361 return 1; 362 conf->slab_cache = sc; 363 conf->pool_size = devs; 364 while (num--) 365 if (!grow_one_stripe(conf)) 366 return 1; 367 return 0; 368 } 369 370 #ifdef CONFIG_MD_RAID5_RESHAPE 371 static int resize_stripes(raid5_conf_t *conf, int newsize) 372 { 373 /* Make all the stripes able to hold 'newsize' devices. 374 * New slots in each stripe get 'page' set to a new page. 375 * 376 * This happens in stages: 377 * 1/ create a new kmem_cache and allocate the required number of 378 * stripe_heads. 379 * 2/ gather all the old stripe_heads and tranfer the pages across 380 * to the new stripe_heads. This will have the side effect of 381 * freezing the array as once all stripe_heads have been collected, 382 * no IO will be possible. Old stripe heads are freed once their 383 * pages have been transferred over, and the old kmem_cache is 384 * freed when all stripes are done. 385 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 386 * we simple return a failre status - no need to clean anything up. 387 * 4/ allocate new pages for the new slots in the new stripe_heads. 388 * If this fails, we don't bother trying the shrink the 389 * stripe_heads down again, we just leave them as they are. 390 * As each stripe_head is processed the new one is released into 391 * active service. 392 * 393 * Once step2 is started, we cannot afford to wait for a write, 394 * so we use GFP_NOIO allocations. 395 */ 396 struct stripe_head *osh, *nsh; 397 LIST_HEAD(newstripes); 398 struct disk_info *ndisks; 399 int err = 0; 400 kmem_cache_t *sc; 401 int i; 402 403 if (newsize <= conf->pool_size) 404 return 0; /* never bother to shrink */ 405 406 /* Step 1 */ 407 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 408 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 409 0, 0, NULL, NULL); 410 if (!sc) 411 return -ENOMEM; 412 413 for (i = conf->max_nr_stripes; i; i--) { 414 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 415 if (!nsh) 416 break; 417 418 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 419 420 nsh->raid_conf = conf; 421 spin_lock_init(&nsh->lock); 422 423 list_add(&nsh->lru, &newstripes); 424 } 425 if (i) { 426 /* didn't get enough, give up */ 427 while (!list_empty(&newstripes)) { 428 nsh = list_entry(newstripes.next, struct stripe_head, lru); 429 list_del(&nsh->lru); 430 kmem_cache_free(sc, nsh); 431 } 432 kmem_cache_destroy(sc); 433 return -ENOMEM; 434 } 435 /* Step 2 - Must use GFP_NOIO now. 436 * OK, we have enough stripes, start collecting inactive 437 * stripes and copying them over 438 */ 439 list_for_each_entry(nsh, &newstripes, lru) { 440 spin_lock_irq(&conf->device_lock); 441 wait_event_lock_irq(conf->wait_for_stripe, 442 !list_empty(&conf->inactive_list), 443 conf->device_lock, 444 unplug_slaves(conf->mddev) 445 ); 446 osh = get_free_stripe(conf); 447 spin_unlock_irq(&conf->device_lock); 448 atomic_set(&nsh->count, 1); 449 for(i=0; i<conf->pool_size; i++) 450 nsh->dev[i].page = osh->dev[i].page; 451 for( ; i<newsize; i++) 452 nsh->dev[i].page = NULL; 453 kmem_cache_free(conf->slab_cache, osh); 454 } 455 kmem_cache_destroy(conf->slab_cache); 456 457 /* Step 3. 458 * At this point, we are holding all the stripes so the array 459 * is completely stalled, so now is a good time to resize 460 * conf->disks. 461 */ 462 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 463 if (ndisks) { 464 for (i=0; i<conf->raid_disks; i++) 465 ndisks[i] = conf->disks[i]; 466 kfree(conf->disks); 467 conf->disks = ndisks; 468 } else 469 err = -ENOMEM; 470 471 /* Step 4, return new stripes to service */ 472 while(!list_empty(&newstripes)) { 473 nsh = list_entry(newstripes.next, struct stripe_head, lru); 474 list_del_init(&nsh->lru); 475 for (i=conf->raid_disks; i < newsize; i++) 476 if (nsh->dev[i].page == NULL) { 477 struct page *p = alloc_page(GFP_NOIO); 478 nsh->dev[i].page = p; 479 if (!p) 480 err = -ENOMEM; 481 } 482 release_stripe(nsh); 483 } 484 /* critical section pass, GFP_NOIO no longer needed */ 485 486 conf->slab_cache = sc; 487 conf->active_name = 1-conf->active_name; 488 conf->pool_size = newsize; 489 return err; 490 } 491 #endif 492 493 static int drop_one_stripe(raid5_conf_t *conf) 494 { 495 struct stripe_head *sh; 496 497 spin_lock_irq(&conf->device_lock); 498 sh = get_free_stripe(conf); 499 spin_unlock_irq(&conf->device_lock); 500 if (!sh) 501 return 0; 502 BUG_ON(atomic_read(&sh->count)); 503 shrink_buffers(sh, conf->pool_size); 504 kmem_cache_free(conf->slab_cache, sh); 505 atomic_dec(&conf->active_stripes); 506 return 1; 507 } 508 509 static void shrink_stripes(raid5_conf_t *conf) 510 { 511 while (drop_one_stripe(conf)) 512 ; 513 514 if (conf->slab_cache) 515 kmem_cache_destroy(conf->slab_cache); 516 conf->slab_cache = NULL; 517 } 518 519 static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, 520 int error) 521 { 522 struct stripe_head *sh = bi->bi_private; 523 raid5_conf_t *conf = sh->raid_conf; 524 int disks = sh->disks, i; 525 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 526 char b[BDEVNAME_SIZE]; 527 mdk_rdev_t *rdev; 528 529 if (bi->bi_size) 530 return 1; 531 532 for (i=0 ; i<disks; i++) 533 if (bi == &sh->dev[i].req) 534 break; 535 536 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", 537 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 538 uptodate); 539 if (i == disks) { 540 BUG(); 541 return 0; 542 } 543 544 if (uptodate) { 545 #if 0 546 struct bio *bio; 547 unsigned long flags; 548 spin_lock_irqsave(&conf->device_lock, flags); 549 /* we can return a buffer if we bypassed the cache or 550 * if the top buffer is not in highmem. If there are 551 * multiple buffers, leave the extra work to 552 * handle_stripe 553 */ 554 buffer = sh->bh_read[i]; 555 if (buffer && 556 (!PageHighMem(buffer->b_page) 557 || buffer->b_page == bh->b_page ) 558 ) { 559 sh->bh_read[i] = buffer->b_reqnext; 560 buffer->b_reqnext = NULL; 561 } else 562 buffer = NULL; 563 spin_unlock_irqrestore(&conf->device_lock, flags); 564 if (sh->bh_page[i]==bh->b_page) 565 set_buffer_uptodate(bh); 566 if (buffer) { 567 if (buffer->b_page != bh->b_page) 568 memcpy(buffer->b_data, bh->b_data, bh->b_size); 569 buffer->b_end_io(buffer, 1); 570 } 571 #else 572 set_bit(R5_UPTODATE, &sh->dev[i].flags); 573 #endif 574 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 575 rdev = conf->disks[i].rdev; 576 printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n", 577 mdname(conf->mddev), STRIPE_SECTORS, 578 (unsigned long long)sh->sector + rdev->data_offset, 579 bdevname(rdev->bdev, b)); 580 clear_bit(R5_ReadError, &sh->dev[i].flags); 581 clear_bit(R5_ReWrite, &sh->dev[i].flags); 582 } 583 if (atomic_read(&conf->disks[i].rdev->read_errors)) 584 atomic_set(&conf->disks[i].rdev->read_errors, 0); 585 } else { 586 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 587 int retry = 0; 588 rdev = conf->disks[i].rdev; 589 590 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 591 atomic_inc(&rdev->read_errors); 592 if (conf->mddev->degraded) 593 printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n", 594 mdname(conf->mddev), 595 (unsigned long long)sh->sector + rdev->data_offset, 596 bdn); 597 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 598 /* Oh, no!!! */ 599 printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n", 600 mdname(conf->mddev), 601 (unsigned long long)sh->sector + rdev->data_offset, 602 bdn); 603 else if (atomic_read(&rdev->read_errors) 604 > conf->max_nr_stripes) 605 printk(KERN_WARNING 606 "raid5:%s: Too many read errors, failing device %s.\n", 607 mdname(conf->mddev), bdn); 608 else 609 retry = 1; 610 if (retry) 611 set_bit(R5_ReadError, &sh->dev[i].flags); 612 else { 613 clear_bit(R5_ReadError, &sh->dev[i].flags); 614 clear_bit(R5_ReWrite, &sh->dev[i].flags); 615 md_error(conf->mddev, rdev); 616 } 617 } 618 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 619 #if 0 620 /* must restore b_page before unlocking buffer... */ 621 if (sh->bh_page[i] != bh->b_page) { 622 bh->b_page = sh->bh_page[i]; 623 bh->b_data = page_address(bh->b_page); 624 clear_buffer_uptodate(bh); 625 } 626 #endif 627 clear_bit(R5_LOCKED, &sh->dev[i].flags); 628 set_bit(STRIPE_HANDLE, &sh->state); 629 release_stripe(sh); 630 return 0; 631 } 632 633 static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, 634 int error) 635 { 636 struct stripe_head *sh = bi->bi_private; 637 raid5_conf_t *conf = sh->raid_conf; 638 int disks = sh->disks, i; 639 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 640 641 if (bi->bi_size) 642 return 1; 643 644 for (i=0 ; i<disks; i++) 645 if (bi == &sh->dev[i].req) 646 break; 647 648 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", 649 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 650 uptodate); 651 if (i == disks) { 652 BUG(); 653 return 0; 654 } 655 656 if (!uptodate) 657 md_error(conf->mddev, conf->disks[i].rdev); 658 659 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 660 661 clear_bit(R5_LOCKED, &sh->dev[i].flags); 662 set_bit(STRIPE_HANDLE, &sh->state); 663 release_stripe(sh); 664 return 0; 665 } 666 667 668 static sector_t compute_blocknr(struct stripe_head *sh, int i); 669 670 static void raid5_build_block (struct stripe_head *sh, int i) 671 { 672 struct r5dev *dev = &sh->dev[i]; 673 674 bio_init(&dev->req); 675 dev->req.bi_io_vec = &dev->vec; 676 dev->req.bi_vcnt++; 677 dev->req.bi_max_vecs++; 678 dev->vec.bv_page = dev->page; 679 dev->vec.bv_len = STRIPE_SIZE; 680 dev->vec.bv_offset = 0; 681 682 dev->req.bi_sector = sh->sector; 683 dev->req.bi_private = sh; 684 685 dev->flags = 0; 686 dev->sector = compute_blocknr(sh, i); 687 } 688 689 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 690 { 691 char b[BDEVNAME_SIZE]; 692 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 693 PRINTK("raid5: error called\n"); 694 695 if (!test_bit(Faulty, &rdev->flags)) { 696 set_bit(MD_CHANGE_DEVS, &mddev->flags); 697 if (test_and_clear_bit(In_sync, &rdev->flags)) { 698 unsigned long flags; 699 spin_lock_irqsave(&conf->device_lock, flags); 700 mddev->degraded++; 701 spin_unlock_irqrestore(&conf->device_lock, flags); 702 /* 703 * if recovery was running, make sure it aborts. 704 */ 705 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 706 } 707 set_bit(Faulty, &rdev->flags); 708 printk (KERN_ALERT 709 "raid5: Disk failure on %s, disabling device." 710 " Operation continuing on %d devices\n", 711 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 712 } 713 } 714 715 /* 716 * Input: a 'big' sector number, 717 * Output: index of the data and parity disk, and the sector # in them. 718 */ 719 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 720 unsigned int data_disks, unsigned int * dd_idx, 721 unsigned int * pd_idx, raid5_conf_t *conf) 722 { 723 long stripe; 724 unsigned long chunk_number; 725 unsigned int chunk_offset; 726 sector_t new_sector; 727 int sectors_per_chunk = conf->chunk_size >> 9; 728 729 /* First compute the information on this sector */ 730 731 /* 732 * Compute the chunk number and the sector offset inside the chunk 733 */ 734 chunk_offset = sector_div(r_sector, sectors_per_chunk); 735 chunk_number = r_sector; 736 BUG_ON(r_sector != chunk_number); 737 738 /* 739 * Compute the stripe number 740 */ 741 stripe = chunk_number / data_disks; 742 743 /* 744 * Compute the data disk and parity disk indexes inside the stripe 745 */ 746 *dd_idx = chunk_number % data_disks; 747 748 /* 749 * Select the parity disk based on the user selected algorithm. 750 */ 751 switch(conf->level) { 752 case 4: 753 *pd_idx = data_disks; 754 break; 755 case 5: 756 switch (conf->algorithm) { 757 case ALGORITHM_LEFT_ASYMMETRIC: 758 *pd_idx = data_disks - stripe % raid_disks; 759 if (*dd_idx >= *pd_idx) 760 (*dd_idx)++; 761 break; 762 case ALGORITHM_RIGHT_ASYMMETRIC: 763 *pd_idx = stripe % raid_disks; 764 if (*dd_idx >= *pd_idx) 765 (*dd_idx)++; 766 break; 767 case ALGORITHM_LEFT_SYMMETRIC: 768 *pd_idx = data_disks - stripe % raid_disks; 769 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 770 break; 771 case ALGORITHM_RIGHT_SYMMETRIC: 772 *pd_idx = stripe % raid_disks; 773 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 774 break; 775 default: 776 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 777 conf->algorithm); 778 } 779 break; 780 case 6: 781 782 /**** FIX THIS ****/ 783 switch (conf->algorithm) { 784 case ALGORITHM_LEFT_ASYMMETRIC: 785 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 786 if (*pd_idx == raid_disks-1) 787 (*dd_idx)++; /* Q D D D P */ 788 else if (*dd_idx >= *pd_idx) 789 (*dd_idx) += 2; /* D D P Q D */ 790 break; 791 case ALGORITHM_RIGHT_ASYMMETRIC: 792 *pd_idx = stripe % raid_disks; 793 if (*pd_idx == raid_disks-1) 794 (*dd_idx)++; /* Q D D D P */ 795 else if (*dd_idx >= *pd_idx) 796 (*dd_idx) += 2; /* D D P Q D */ 797 break; 798 case ALGORITHM_LEFT_SYMMETRIC: 799 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 800 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 801 break; 802 case ALGORITHM_RIGHT_SYMMETRIC: 803 *pd_idx = stripe % raid_disks; 804 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 805 break; 806 default: 807 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 808 conf->algorithm); 809 } 810 break; 811 } 812 813 /* 814 * Finally, compute the new sector number 815 */ 816 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 817 return new_sector; 818 } 819 820 821 static sector_t compute_blocknr(struct stripe_head *sh, int i) 822 { 823 raid5_conf_t *conf = sh->raid_conf; 824 int raid_disks = sh->disks, data_disks = raid_disks - 1; 825 sector_t new_sector = sh->sector, check; 826 int sectors_per_chunk = conf->chunk_size >> 9; 827 sector_t stripe; 828 int chunk_offset; 829 int chunk_number, dummy1, dummy2, dd_idx = i; 830 sector_t r_sector; 831 832 833 chunk_offset = sector_div(new_sector, sectors_per_chunk); 834 stripe = new_sector; 835 BUG_ON(new_sector != stripe); 836 837 if (i == sh->pd_idx) 838 return 0; 839 switch(conf->level) { 840 case 4: break; 841 case 5: 842 switch (conf->algorithm) { 843 case ALGORITHM_LEFT_ASYMMETRIC: 844 case ALGORITHM_RIGHT_ASYMMETRIC: 845 if (i > sh->pd_idx) 846 i--; 847 break; 848 case ALGORITHM_LEFT_SYMMETRIC: 849 case ALGORITHM_RIGHT_SYMMETRIC: 850 if (i < sh->pd_idx) 851 i += raid_disks; 852 i -= (sh->pd_idx + 1); 853 break; 854 default: 855 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 856 conf->algorithm); 857 } 858 break; 859 case 6: 860 data_disks = raid_disks - 2; 861 if (i == raid6_next_disk(sh->pd_idx, raid_disks)) 862 return 0; /* It is the Q disk */ 863 switch (conf->algorithm) { 864 case ALGORITHM_LEFT_ASYMMETRIC: 865 case ALGORITHM_RIGHT_ASYMMETRIC: 866 if (sh->pd_idx == raid_disks-1) 867 i--; /* Q D D D P */ 868 else if (i > sh->pd_idx) 869 i -= 2; /* D D P Q D */ 870 break; 871 case ALGORITHM_LEFT_SYMMETRIC: 872 case ALGORITHM_RIGHT_SYMMETRIC: 873 if (sh->pd_idx == raid_disks-1) 874 i--; /* Q D D D P */ 875 else { 876 /* D D P Q D */ 877 if (i < sh->pd_idx) 878 i += raid_disks; 879 i -= (sh->pd_idx + 2); 880 } 881 break; 882 default: 883 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 884 conf->algorithm); 885 } 886 break; 887 } 888 889 chunk_number = stripe * data_disks + i; 890 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 891 892 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 893 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 894 printk(KERN_ERR "compute_blocknr: map not correct\n"); 895 return 0; 896 } 897 return r_sector; 898 } 899 900 901 902 /* 903 * Copy data between a page in the stripe cache, and one or more bion 904 * The page could align with the middle of the bio, or there could be 905 * several bion, each with several bio_vecs, which cover part of the page 906 * Multiple bion are linked together on bi_next. There may be extras 907 * at the end of this list. We ignore them. 908 */ 909 static void copy_data(int frombio, struct bio *bio, 910 struct page *page, 911 sector_t sector) 912 { 913 char *pa = page_address(page); 914 struct bio_vec *bvl; 915 int i; 916 int page_offset; 917 918 if (bio->bi_sector >= sector) 919 page_offset = (signed)(bio->bi_sector - sector) * 512; 920 else 921 page_offset = (signed)(sector - bio->bi_sector) * -512; 922 bio_for_each_segment(bvl, bio, i) { 923 int len = bio_iovec_idx(bio,i)->bv_len; 924 int clen; 925 int b_offset = 0; 926 927 if (page_offset < 0) { 928 b_offset = -page_offset; 929 page_offset += b_offset; 930 len -= b_offset; 931 } 932 933 if (len > 0 && page_offset + len > STRIPE_SIZE) 934 clen = STRIPE_SIZE - page_offset; 935 else clen = len; 936 937 if (clen > 0) { 938 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 939 if (frombio) 940 memcpy(pa+page_offset, ba+b_offset, clen); 941 else 942 memcpy(ba+b_offset, pa+page_offset, clen); 943 __bio_kunmap_atomic(ba, KM_USER0); 944 } 945 if (clen < len) /* hit end of page */ 946 break; 947 page_offset += len; 948 } 949 } 950 951 #define check_xor() do { \ 952 if (count == MAX_XOR_BLOCKS) { \ 953 xor_block(count, STRIPE_SIZE, ptr); \ 954 count = 1; \ 955 } \ 956 } while(0) 957 958 959 static void compute_block(struct stripe_head *sh, int dd_idx) 960 { 961 int i, count, disks = sh->disks; 962 void *ptr[MAX_XOR_BLOCKS], *p; 963 964 PRINTK("compute_block, stripe %llu, idx %d\n", 965 (unsigned long long)sh->sector, dd_idx); 966 967 ptr[0] = page_address(sh->dev[dd_idx].page); 968 memset(ptr[0], 0, STRIPE_SIZE); 969 count = 1; 970 for (i = disks ; i--; ) { 971 if (i == dd_idx) 972 continue; 973 p = page_address(sh->dev[i].page); 974 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 975 ptr[count++] = p; 976 else 977 printk(KERN_ERR "compute_block() %d, stripe %llu, %d" 978 " not present\n", dd_idx, 979 (unsigned long long)sh->sector, i); 980 981 check_xor(); 982 } 983 if (count != 1) 984 xor_block(count, STRIPE_SIZE, ptr); 985 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 986 } 987 988 static void compute_parity5(struct stripe_head *sh, int method) 989 { 990 raid5_conf_t *conf = sh->raid_conf; 991 int i, pd_idx = sh->pd_idx, disks = sh->disks, count; 992 void *ptr[MAX_XOR_BLOCKS]; 993 struct bio *chosen; 994 995 PRINTK("compute_parity5, stripe %llu, method %d\n", 996 (unsigned long long)sh->sector, method); 997 998 count = 1; 999 ptr[0] = page_address(sh->dev[pd_idx].page); 1000 switch(method) { 1001 case READ_MODIFY_WRITE: 1002 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)); 1003 for (i=disks ; i-- ;) { 1004 if (i==pd_idx) 1005 continue; 1006 if (sh->dev[i].towrite && 1007 test_bit(R5_UPTODATE, &sh->dev[i].flags)) { 1008 ptr[count++] = page_address(sh->dev[i].page); 1009 chosen = sh->dev[i].towrite; 1010 sh->dev[i].towrite = NULL; 1011 1012 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1013 wake_up(&conf->wait_for_overlap); 1014 1015 BUG_ON(sh->dev[i].written); 1016 sh->dev[i].written = chosen; 1017 check_xor(); 1018 } 1019 } 1020 break; 1021 case RECONSTRUCT_WRITE: 1022 memset(ptr[0], 0, STRIPE_SIZE); 1023 for (i= disks; i-- ;) 1024 if (i!=pd_idx && sh->dev[i].towrite) { 1025 chosen = sh->dev[i].towrite; 1026 sh->dev[i].towrite = NULL; 1027 1028 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1029 wake_up(&conf->wait_for_overlap); 1030 1031 BUG_ON(sh->dev[i].written); 1032 sh->dev[i].written = chosen; 1033 } 1034 break; 1035 case CHECK_PARITY: 1036 break; 1037 } 1038 if (count>1) { 1039 xor_block(count, STRIPE_SIZE, ptr); 1040 count = 1; 1041 } 1042 1043 for (i = disks; i--;) 1044 if (sh->dev[i].written) { 1045 sector_t sector = sh->dev[i].sector; 1046 struct bio *wbi = sh->dev[i].written; 1047 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1048 copy_data(1, wbi, sh->dev[i].page, sector); 1049 wbi = r5_next_bio(wbi, sector); 1050 } 1051 1052 set_bit(R5_LOCKED, &sh->dev[i].flags); 1053 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1054 } 1055 1056 switch(method) { 1057 case RECONSTRUCT_WRITE: 1058 case CHECK_PARITY: 1059 for (i=disks; i--;) 1060 if (i != pd_idx) { 1061 ptr[count++] = page_address(sh->dev[i].page); 1062 check_xor(); 1063 } 1064 break; 1065 case READ_MODIFY_WRITE: 1066 for (i = disks; i--;) 1067 if (sh->dev[i].written) { 1068 ptr[count++] = page_address(sh->dev[i].page); 1069 check_xor(); 1070 } 1071 } 1072 if (count != 1) 1073 xor_block(count, STRIPE_SIZE, ptr); 1074 1075 if (method != CHECK_PARITY) { 1076 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1077 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1078 } else 1079 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1080 } 1081 1082 static void compute_parity6(struct stripe_head *sh, int method) 1083 { 1084 raid6_conf_t *conf = sh->raid_conf; 1085 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count; 1086 struct bio *chosen; 1087 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1088 void *ptrs[disks]; 1089 1090 qd_idx = raid6_next_disk(pd_idx, disks); 1091 d0_idx = raid6_next_disk(qd_idx, disks); 1092 1093 PRINTK("compute_parity, stripe %llu, method %d\n", 1094 (unsigned long long)sh->sector, method); 1095 1096 switch(method) { 1097 case READ_MODIFY_WRITE: 1098 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1099 case RECONSTRUCT_WRITE: 1100 for (i= disks; i-- ;) 1101 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1102 chosen = sh->dev[i].towrite; 1103 sh->dev[i].towrite = NULL; 1104 1105 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1106 wake_up(&conf->wait_for_overlap); 1107 1108 BUG_ON(sh->dev[i].written); 1109 sh->dev[i].written = chosen; 1110 } 1111 break; 1112 case CHECK_PARITY: 1113 BUG(); /* Not implemented yet */ 1114 } 1115 1116 for (i = disks; i--;) 1117 if (sh->dev[i].written) { 1118 sector_t sector = sh->dev[i].sector; 1119 struct bio *wbi = sh->dev[i].written; 1120 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1121 copy_data(1, wbi, sh->dev[i].page, sector); 1122 wbi = r5_next_bio(wbi, sector); 1123 } 1124 1125 set_bit(R5_LOCKED, &sh->dev[i].flags); 1126 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1127 } 1128 1129 // switch(method) { 1130 // case RECONSTRUCT_WRITE: 1131 // case CHECK_PARITY: 1132 // case UPDATE_PARITY: 1133 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ 1134 /* FIX: Is this ordering of drives even remotely optimal? */ 1135 count = 0; 1136 i = d0_idx; 1137 do { 1138 ptrs[count++] = page_address(sh->dev[i].page); 1139 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1140 printk("block %d/%d not uptodate on parity calc\n", i,count); 1141 i = raid6_next_disk(i, disks); 1142 } while ( i != d0_idx ); 1143 // break; 1144 // } 1145 1146 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); 1147 1148 switch(method) { 1149 case RECONSTRUCT_WRITE: 1150 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1151 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1152 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1153 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1154 break; 1155 case UPDATE_PARITY: 1156 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1157 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1158 break; 1159 } 1160 } 1161 1162 1163 /* Compute one missing block */ 1164 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1165 { 1166 raid6_conf_t *conf = sh->raid_conf; 1167 int i, count, disks = conf->raid_disks; 1168 void *ptr[MAX_XOR_BLOCKS], *p; 1169 int pd_idx = sh->pd_idx; 1170 int qd_idx = raid6_next_disk(pd_idx, disks); 1171 1172 PRINTK("compute_block_1, stripe %llu, idx %d\n", 1173 (unsigned long long)sh->sector, dd_idx); 1174 1175 if ( dd_idx == qd_idx ) { 1176 /* We're actually computing the Q drive */ 1177 compute_parity6(sh, UPDATE_PARITY); 1178 } else { 1179 ptr[0] = page_address(sh->dev[dd_idx].page); 1180 if (!nozero) memset(ptr[0], 0, STRIPE_SIZE); 1181 count = 1; 1182 for (i = disks ; i--; ) { 1183 if (i == dd_idx || i == qd_idx) 1184 continue; 1185 p = page_address(sh->dev[i].page); 1186 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1187 ptr[count++] = p; 1188 else 1189 printk("compute_block() %d, stripe %llu, %d" 1190 " not present\n", dd_idx, 1191 (unsigned long long)sh->sector, i); 1192 1193 check_xor(); 1194 } 1195 if (count != 1) 1196 xor_block(count, STRIPE_SIZE, ptr); 1197 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1198 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1199 } 1200 } 1201 1202 /* Compute two missing blocks */ 1203 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1204 { 1205 raid6_conf_t *conf = sh->raid_conf; 1206 int i, count, disks = conf->raid_disks; 1207 int pd_idx = sh->pd_idx; 1208 int qd_idx = raid6_next_disk(pd_idx, disks); 1209 int d0_idx = raid6_next_disk(qd_idx, disks); 1210 int faila, failb; 1211 1212 /* faila and failb are disk numbers relative to d0_idx */ 1213 /* pd_idx become disks-2 and qd_idx become disks-1 */ 1214 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; 1215 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; 1216 1217 BUG_ON(faila == failb); 1218 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1219 1220 PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1221 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); 1222 1223 if ( failb == disks-1 ) { 1224 /* Q disk is one of the missing disks */ 1225 if ( faila == disks-2 ) { 1226 /* Missing P+Q, just recompute */ 1227 compute_parity6(sh, UPDATE_PARITY); 1228 return; 1229 } else { 1230 /* We're missing D+Q; recompute D from P */ 1231 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); 1232 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1233 return; 1234 } 1235 } 1236 1237 /* We're missing D+P or D+D; build pointer table */ 1238 { 1239 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1240 void *ptrs[disks]; 1241 1242 count = 0; 1243 i = d0_idx; 1244 do { 1245 ptrs[count++] = page_address(sh->dev[i].page); 1246 i = raid6_next_disk(i, disks); 1247 if (i != dd_idx1 && i != dd_idx2 && 1248 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1249 printk("compute_2 with missing block %d/%d\n", count, i); 1250 } while ( i != d0_idx ); 1251 1252 if ( failb == disks-2 ) { 1253 /* We're missing D+P. */ 1254 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); 1255 } else { 1256 /* We're missing D+D. */ 1257 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); 1258 } 1259 1260 /* Both the above update both missing blocks */ 1261 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1262 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1263 } 1264 } 1265 1266 1267 1268 /* 1269 * Each stripe/dev can have one or more bion attached. 1270 * toread/towrite point to the first in a chain. 1271 * The bi_next chain must be in order. 1272 */ 1273 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1274 { 1275 struct bio **bip; 1276 raid5_conf_t *conf = sh->raid_conf; 1277 int firstwrite=0; 1278 1279 PRINTK("adding bh b#%llu to stripe s#%llu\n", 1280 (unsigned long long)bi->bi_sector, 1281 (unsigned long long)sh->sector); 1282 1283 1284 spin_lock(&sh->lock); 1285 spin_lock_irq(&conf->device_lock); 1286 if (forwrite) { 1287 bip = &sh->dev[dd_idx].towrite; 1288 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1289 firstwrite = 1; 1290 } else 1291 bip = &sh->dev[dd_idx].toread; 1292 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1293 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1294 goto overlap; 1295 bip = & (*bip)->bi_next; 1296 } 1297 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1298 goto overlap; 1299 1300 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1301 if (*bip) 1302 bi->bi_next = *bip; 1303 *bip = bi; 1304 bi->bi_phys_segments ++; 1305 spin_unlock_irq(&conf->device_lock); 1306 spin_unlock(&sh->lock); 1307 1308 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n", 1309 (unsigned long long)bi->bi_sector, 1310 (unsigned long long)sh->sector, dd_idx); 1311 1312 if (conf->mddev->bitmap && firstwrite) { 1313 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1314 STRIPE_SECTORS, 0); 1315 sh->bm_seq = conf->seq_flush+1; 1316 set_bit(STRIPE_BIT_DELAY, &sh->state); 1317 } 1318 1319 if (forwrite) { 1320 /* check if page is covered */ 1321 sector_t sector = sh->dev[dd_idx].sector; 1322 for (bi=sh->dev[dd_idx].towrite; 1323 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1324 bi && bi->bi_sector <= sector; 1325 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1326 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1327 sector = bi->bi_sector + (bi->bi_size>>9); 1328 } 1329 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1330 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1331 } 1332 return 1; 1333 1334 overlap: 1335 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1336 spin_unlock_irq(&conf->device_lock); 1337 spin_unlock(&sh->lock); 1338 return 0; 1339 } 1340 1341 static void end_reshape(raid5_conf_t *conf); 1342 1343 static int page_is_zero(struct page *p) 1344 { 1345 char *a = page_address(p); 1346 return ((*(u32*)a) == 0 && 1347 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1348 } 1349 1350 static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) 1351 { 1352 int sectors_per_chunk = conf->chunk_size >> 9; 1353 int pd_idx, dd_idx; 1354 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1355 1356 raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk 1357 + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf); 1358 return pd_idx; 1359 } 1360 1361 1362 /* 1363 * handle_stripe - do things to a stripe. 1364 * 1365 * We lock the stripe and then examine the state of various bits 1366 * to see what needs to be done. 1367 * Possible results: 1368 * return some read request which now have data 1369 * return some write requests which are safely on disc 1370 * schedule a read on some buffers 1371 * schedule a write of some buffers 1372 * return confirmation of parity correctness 1373 * 1374 * Parity calculations are done inside the stripe lock 1375 * buffers are taken off read_list or write_list, and bh_cache buffers 1376 * get BH_Lock set before the stripe lock is released. 1377 * 1378 */ 1379 1380 static void handle_stripe5(struct stripe_head *sh) 1381 { 1382 raid5_conf_t *conf = sh->raid_conf; 1383 int disks = sh->disks; 1384 struct bio *return_bi= NULL; 1385 struct bio *bi; 1386 int i; 1387 int syncing, expanding, expanded; 1388 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 1389 int non_overwrite = 0; 1390 int failed_num=0; 1391 struct r5dev *dev; 1392 1393 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n", 1394 (unsigned long long)sh->sector, atomic_read(&sh->count), 1395 sh->pd_idx); 1396 1397 spin_lock(&sh->lock); 1398 clear_bit(STRIPE_HANDLE, &sh->state); 1399 clear_bit(STRIPE_DELAYED, &sh->state); 1400 1401 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1402 expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 1403 expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 1404 /* Now to look around and see what can be done */ 1405 1406 rcu_read_lock(); 1407 for (i=disks; i--; ) { 1408 mdk_rdev_t *rdev; 1409 dev = &sh->dev[i]; 1410 clear_bit(R5_Insync, &dev->flags); 1411 1412 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 1413 i, dev->flags, dev->toread, dev->towrite, dev->written); 1414 /* maybe we can reply to a read */ 1415 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 1416 struct bio *rbi, *rbi2; 1417 PRINTK("Return read for disc %d\n", i); 1418 spin_lock_irq(&conf->device_lock); 1419 rbi = dev->toread; 1420 dev->toread = NULL; 1421 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1422 wake_up(&conf->wait_for_overlap); 1423 spin_unlock_irq(&conf->device_lock); 1424 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1425 copy_data(0, rbi, dev->page, dev->sector); 1426 rbi2 = r5_next_bio(rbi, dev->sector); 1427 spin_lock_irq(&conf->device_lock); 1428 if (--rbi->bi_phys_segments == 0) { 1429 rbi->bi_next = return_bi; 1430 return_bi = rbi; 1431 } 1432 spin_unlock_irq(&conf->device_lock); 1433 rbi = rbi2; 1434 } 1435 } 1436 1437 /* now count some things */ 1438 if (test_bit(R5_LOCKED, &dev->flags)) locked++; 1439 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; 1440 1441 1442 if (dev->toread) to_read++; 1443 if (dev->towrite) { 1444 to_write++; 1445 if (!test_bit(R5_OVERWRITE, &dev->flags)) 1446 non_overwrite++; 1447 } 1448 if (dev->written) written++; 1449 rdev = rcu_dereference(conf->disks[i].rdev); 1450 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 1451 /* The ReadError flag will just be confusing now */ 1452 clear_bit(R5_ReadError, &dev->flags); 1453 clear_bit(R5_ReWrite, &dev->flags); 1454 } 1455 if (!rdev || !test_bit(In_sync, &rdev->flags) 1456 || test_bit(R5_ReadError, &dev->flags)) { 1457 failed++; 1458 failed_num = i; 1459 } else 1460 set_bit(R5_Insync, &dev->flags); 1461 } 1462 rcu_read_unlock(); 1463 PRINTK("locked=%d uptodate=%d to_read=%d" 1464 " to_write=%d failed=%d failed_num=%d\n", 1465 locked, uptodate, to_read, to_write, failed, failed_num); 1466 /* check if the array has lost two devices and, if so, some requests might 1467 * need to be failed 1468 */ 1469 if (failed > 1 && to_read+to_write+written) { 1470 for (i=disks; i--; ) { 1471 int bitmap_end = 0; 1472 1473 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1474 mdk_rdev_t *rdev; 1475 rcu_read_lock(); 1476 rdev = rcu_dereference(conf->disks[i].rdev); 1477 if (rdev && test_bit(In_sync, &rdev->flags)) 1478 /* multiple read failures in one stripe */ 1479 md_error(conf->mddev, rdev); 1480 rcu_read_unlock(); 1481 } 1482 1483 spin_lock_irq(&conf->device_lock); 1484 /* fail all writes first */ 1485 bi = sh->dev[i].towrite; 1486 sh->dev[i].towrite = NULL; 1487 if (bi) { to_write--; bitmap_end = 1; } 1488 1489 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1490 wake_up(&conf->wait_for_overlap); 1491 1492 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1493 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1494 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1495 if (--bi->bi_phys_segments == 0) { 1496 md_write_end(conf->mddev); 1497 bi->bi_next = return_bi; 1498 return_bi = bi; 1499 } 1500 bi = nextbi; 1501 } 1502 /* and fail all 'written' */ 1503 bi = sh->dev[i].written; 1504 sh->dev[i].written = NULL; 1505 if (bi) bitmap_end = 1; 1506 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { 1507 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1508 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1509 if (--bi->bi_phys_segments == 0) { 1510 md_write_end(conf->mddev); 1511 bi->bi_next = return_bi; 1512 return_bi = bi; 1513 } 1514 bi = bi2; 1515 } 1516 1517 /* fail any reads if this device is non-operational */ 1518 if (!test_bit(R5_Insync, &sh->dev[i].flags) || 1519 test_bit(R5_ReadError, &sh->dev[i].flags)) { 1520 bi = sh->dev[i].toread; 1521 sh->dev[i].toread = NULL; 1522 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1523 wake_up(&conf->wait_for_overlap); 1524 if (bi) to_read--; 1525 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1526 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1527 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1528 if (--bi->bi_phys_segments == 0) { 1529 bi->bi_next = return_bi; 1530 return_bi = bi; 1531 } 1532 bi = nextbi; 1533 } 1534 } 1535 spin_unlock_irq(&conf->device_lock); 1536 if (bitmap_end) 1537 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1538 STRIPE_SECTORS, 0, 0); 1539 } 1540 } 1541 if (failed > 1 && syncing) { 1542 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 1543 clear_bit(STRIPE_SYNCING, &sh->state); 1544 syncing = 0; 1545 } 1546 1547 /* might be able to return some write requests if the parity block 1548 * is safe, or on a failed drive 1549 */ 1550 dev = &sh->dev[sh->pd_idx]; 1551 if ( written && 1552 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) && 1553 test_bit(R5_UPTODATE, &dev->flags)) 1554 || (failed == 1 && failed_num == sh->pd_idx)) 1555 ) { 1556 /* any written block on an uptodate or failed drive can be returned. 1557 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 1558 * never LOCKED, so we don't need to test 'failed' directly. 1559 */ 1560 for (i=disks; i--; ) 1561 if (sh->dev[i].written) { 1562 dev = &sh->dev[i]; 1563 if (!test_bit(R5_LOCKED, &dev->flags) && 1564 test_bit(R5_UPTODATE, &dev->flags) ) { 1565 /* We can return any write requests */ 1566 struct bio *wbi, *wbi2; 1567 int bitmap_end = 0; 1568 PRINTK("Return write for disc %d\n", i); 1569 spin_lock_irq(&conf->device_lock); 1570 wbi = dev->written; 1571 dev->written = NULL; 1572 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1573 wbi2 = r5_next_bio(wbi, dev->sector); 1574 if (--wbi->bi_phys_segments == 0) { 1575 md_write_end(conf->mddev); 1576 wbi->bi_next = return_bi; 1577 return_bi = wbi; 1578 } 1579 wbi = wbi2; 1580 } 1581 if (dev->towrite == NULL) 1582 bitmap_end = 1; 1583 spin_unlock_irq(&conf->device_lock); 1584 if (bitmap_end) 1585 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1586 STRIPE_SECTORS, 1587 !test_bit(STRIPE_DEGRADED, &sh->state), 0); 1588 } 1589 } 1590 } 1591 1592 /* Now we might consider reading some blocks, either to check/generate 1593 * parity, or to satisfy requests 1594 * or to load a block that is being partially written. 1595 */ 1596 if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) { 1597 for (i=disks; i--;) { 1598 dev = &sh->dev[i]; 1599 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1600 (dev->toread || 1601 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1602 syncing || 1603 expanding || 1604 (failed && (sh->dev[failed_num].toread || 1605 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags)))) 1606 ) 1607 ) { 1608 /* we would like to get this block, possibly 1609 * by computing it, but we might not be able to 1610 */ 1611 if (uptodate == disks-1) { 1612 PRINTK("Computing block %d\n", i); 1613 compute_block(sh, i); 1614 uptodate++; 1615 } else if (test_bit(R5_Insync, &dev->flags)) { 1616 set_bit(R5_LOCKED, &dev->flags); 1617 set_bit(R5_Wantread, &dev->flags); 1618 #if 0 1619 /* if I am just reading this block and we don't have 1620 a failed drive, or any pending writes then sidestep the cache */ 1621 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && 1622 ! syncing && !failed && !to_write) { 1623 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; 1624 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; 1625 } 1626 #endif 1627 locked++; 1628 PRINTK("Reading block %d (sync=%d)\n", 1629 i, syncing); 1630 } 1631 } 1632 } 1633 set_bit(STRIPE_HANDLE, &sh->state); 1634 } 1635 1636 /* now to consider writing and what else, if anything should be read */ 1637 if (to_write) { 1638 int rmw=0, rcw=0; 1639 for (i=disks ; i--;) { 1640 /* would I have to read this buffer for read_modify_write */ 1641 dev = &sh->dev[i]; 1642 if ((dev->towrite || i == sh->pd_idx) && 1643 (!test_bit(R5_LOCKED, &dev->flags) 1644 #if 0 1645 || sh->bh_page[i]!=bh->b_page 1646 #endif 1647 ) && 1648 !test_bit(R5_UPTODATE, &dev->flags)) { 1649 if (test_bit(R5_Insync, &dev->flags) 1650 /* && !(!mddev->insync && i == sh->pd_idx) */ 1651 ) 1652 rmw++; 1653 else rmw += 2*disks; /* cannot read it */ 1654 } 1655 /* Would I have to read this buffer for reconstruct_write */ 1656 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1657 (!test_bit(R5_LOCKED, &dev->flags) 1658 #if 0 1659 || sh->bh_page[i] != bh->b_page 1660 #endif 1661 ) && 1662 !test_bit(R5_UPTODATE, &dev->flags)) { 1663 if (test_bit(R5_Insync, &dev->flags)) rcw++; 1664 else rcw += 2*disks; 1665 } 1666 } 1667 PRINTK("for sector %llu, rmw=%d rcw=%d\n", 1668 (unsigned long long)sh->sector, rmw, rcw); 1669 set_bit(STRIPE_HANDLE, &sh->state); 1670 if (rmw < rcw && rmw > 0) 1671 /* prefer read-modify-write, but need to get some data */ 1672 for (i=disks; i--;) { 1673 dev = &sh->dev[i]; 1674 if ((dev->towrite || i == sh->pd_idx) && 1675 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1676 test_bit(R5_Insync, &dev->flags)) { 1677 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1678 { 1679 PRINTK("Read_old block %d for r-m-w\n", i); 1680 set_bit(R5_LOCKED, &dev->flags); 1681 set_bit(R5_Wantread, &dev->flags); 1682 locked++; 1683 } else { 1684 set_bit(STRIPE_DELAYED, &sh->state); 1685 set_bit(STRIPE_HANDLE, &sh->state); 1686 } 1687 } 1688 } 1689 if (rcw <= rmw && rcw > 0) 1690 /* want reconstruct write, but need to get some data */ 1691 for (i=disks; i--;) { 1692 dev = &sh->dev[i]; 1693 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1694 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1695 test_bit(R5_Insync, &dev->flags)) { 1696 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1697 { 1698 PRINTK("Read_old block %d for Reconstruct\n", i); 1699 set_bit(R5_LOCKED, &dev->flags); 1700 set_bit(R5_Wantread, &dev->flags); 1701 locked++; 1702 } else { 1703 set_bit(STRIPE_DELAYED, &sh->state); 1704 set_bit(STRIPE_HANDLE, &sh->state); 1705 } 1706 } 1707 } 1708 /* now if nothing is locked, and if we have enough data, we can start a write request */ 1709 if (locked == 0 && (rcw == 0 ||rmw == 0) && 1710 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 1711 PRINTK("Computing parity...\n"); 1712 compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); 1713 /* now every locked buffer is ready to be written */ 1714 for (i=disks; i--;) 1715 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 1716 PRINTK("Writing block %d\n", i); 1717 locked++; 1718 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1719 if (!test_bit(R5_Insync, &sh->dev[i].flags) 1720 || (i==sh->pd_idx && failed == 0)) 1721 set_bit(STRIPE_INSYNC, &sh->state); 1722 } 1723 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 1724 atomic_dec(&conf->preread_active_stripes); 1725 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 1726 md_wakeup_thread(conf->mddev->thread); 1727 } 1728 } 1729 } 1730 1731 /* maybe we need to check and possibly fix the parity for this stripe 1732 * Any reads will already have been scheduled, so we just see if enough data 1733 * is available 1734 */ 1735 if (syncing && locked == 0 && 1736 !test_bit(STRIPE_INSYNC, &sh->state)) { 1737 set_bit(STRIPE_HANDLE, &sh->state); 1738 if (failed == 0) { 1739 BUG_ON(uptodate != disks); 1740 compute_parity5(sh, CHECK_PARITY); 1741 uptodate--; 1742 if (page_is_zero(sh->dev[sh->pd_idx].page)) { 1743 /* parity is correct (on disc, not in buffer any more) */ 1744 set_bit(STRIPE_INSYNC, &sh->state); 1745 } else { 1746 conf->mddev->resync_mismatches += STRIPE_SECTORS; 1747 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 1748 /* don't try to repair!! */ 1749 set_bit(STRIPE_INSYNC, &sh->state); 1750 else { 1751 compute_block(sh, sh->pd_idx); 1752 uptodate++; 1753 } 1754 } 1755 } 1756 if (!test_bit(STRIPE_INSYNC, &sh->state)) { 1757 /* either failed parity check, or recovery is happening */ 1758 if (failed==0) 1759 failed_num = sh->pd_idx; 1760 dev = &sh->dev[failed_num]; 1761 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 1762 BUG_ON(uptodate != disks); 1763 1764 set_bit(R5_LOCKED, &dev->flags); 1765 set_bit(R5_Wantwrite, &dev->flags); 1766 clear_bit(STRIPE_DEGRADED, &sh->state); 1767 locked++; 1768 set_bit(STRIPE_INSYNC, &sh->state); 1769 } 1770 } 1771 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1772 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 1773 clear_bit(STRIPE_SYNCING, &sh->state); 1774 } 1775 1776 /* If the failed drive is just a ReadError, then we might need to progress 1777 * the repair/check process 1778 */ 1779 if (failed == 1 && ! conf->mddev->ro && 1780 test_bit(R5_ReadError, &sh->dev[failed_num].flags) 1781 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags) 1782 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags) 1783 ) { 1784 dev = &sh->dev[failed_num]; 1785 if (!test_bit(R5_ReWrite, &dev->flags)) { 1786 set_bit(R5_Wantwrite, &dev->flags); 1787 set_bit(R5_ReWrite, &dev->flags); 1788 set_bit(R5_LOCKED, &dev->flags); 1789 locked++; 1790 } else { 1791 /* let's read it back */ 1792 set_bit(R5_Wantread, &dev->flags); 1793 set_bit(R5_LOCKED, &dev->flags); 1794 locked++; 1795 } 1796 } 1797 1798 if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 1799 /* Need to write out all blocks after computing parity */ 1800 sh->disks = conf->raid_disks; 1801 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); 1802 compute_parity5(sh, RECONSTRUCT_WRITE); 1803 for (i= conf->raid_disks; i--;) { 1804 set_bit(R5_LOCKED, &sh->dev[i].flags); 1805 locked++; 1806 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1807 } 1808 clear_bit(STRIPE_EXPANDING, &sh->state); 1809 } else if (expanded) { 1810 clear_bit(STRIPE_EXPAND_READY, &sh->state); 1811 atomic_dec(&conf->reshape_stripes); 1812 wake_up(&conf->wait_for_overlap); 1813 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 1814 } 1815 1816 if (expanding && locked == 0) { 1817 /* We have read all the blocks in this stripe and now we need to 1818 * copy some of them into a target stripe for expand. 1819 */ 1820 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 1821 for (i=0; i< sh->disks; i++) 1822 if (i != sh->pd_idx) { 1823 int dd_idx, pd_idx, j; 1824 struct stripe_head *sh2; 1825 1826 sector_t bn = compute_blocknr(sh, i); 1827 sector_t s = raid5_compute_sector(bn, conf->raid_disks, 1828 conf->raid_disks-1, 1829 &dd_idx, &pd_idx, conf); 1830 sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1); 1831 if (sh2 == NULL) 1832 /* so far only the early blocks of this stripe 1833 * have been requested. When later blocks 1834 * get requested, we will try again 1835 */ 1836 continue; 1837 if(!test_bit(STRIPE_EXPANDING, &sh2->state) || 1838 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 1839 /* must have already done this block */ 1840 release_stripe(sh2); 1841 continue; 1842 } 1843 memcpy(page_address(sh2->dev[dd_idx].page), 1844 page_address(sh->dev[i].page), 1845 STRIPE_SIZE); 1846 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 1847 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 1848 for (j=0; j<conf->raid_disks; j++) 1849 if (j != sh2->pd_idx && 1850 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 1851 break; 1852 if (j == conf->raid_disks) { 1853 set_bit(STRIPE_EXPAND_READY, &sh2->state); 1854 set_bit(STRIPE_HANDLE, &sh2->state); 1855 } 1856 release_stripe(sh2); 1857 } 1858 } 1859 1860 spin_unlock(&sh->lock); 1861 1862 while ((bi=return_bi)) { 1863 int bytes = bi->bi_size; 1864 1865 return_bi = bi->bi_next; 1866 bi->bi_next = NULL; 1867 bi->bi_size = 0; 1868 bi->bi_end_io(bi, bytes, 0); 1869 } 1870 for (i=disks; i-- ;) { 1871 int rw; 1872 struct bio *bi; 1873 mdk_rdev_t *rdev; 1874 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 1875 rw = 1; 1876 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1877 rw = 0; 1878 else 1879 continue; 1880 1881 bi = &sh->dev[i].req; 1882 1883 bi->bi_rw = rw; 1884 if (rw) 1885 bi->bi_end_io = raid5_end_write_request; 1886 else 1887 bi->bi_end_io = raid5_end_read_request; 1888 1889 rcu_read_lock(); 1890 rdev = rcu_dereference(conf->disks[i].rdev); 1891 if (rdev && test_bit(Faulty, &rdev->flags)) 1892 rdev = NULL; 1893 if (rdev) 1894 atomic_inc(&rdev->nr_pending); 1895 rcu_read_unlock(); 1896 1897 if (rdev) { 1898 if (syncing || expanding || expanded) 1899 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1900 1901 bi->bi_bdev = rdev->bdev; 1902 PRINTK("for %llu schedule op %ld on disc %d\n", 1903 (unsigned long long)sh->sector, bi->bi_rw, i); 1904 atomic_inc(&sh->count); 1905 bi->bi_sector = sh->sector + rdev->data_offset; 1906 bi->bi_flags = 1 << BIO_UPTODATE; 1907 bi->bi_vcnt = 1; 1908 bi->bi_max_vecs = 1; 1909 bi->bi_idx = 0; 1910 bi->bi_io_vec = &sh->dev[i].vec; 1911 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1912 bi->bi_io_vec[0].bv_offset = 0; 1913 bi->bi_size = STRIPE_SIZE; 1914 bi->bi_next = NULL; 1915 if (rw == WRITE && 1916 test_bit(R5_ReWrite, &sh->dev[i].flags)) 1917 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 1918 generic_make_request(bi); 1919 } else { 1920 if (rw == 1) 1921 set_bit(STRIPE_DEGRADED, &sh->state); 1922 PRINTK("skip op %ld on disc %d for sector %llu\n", 1923 bi->bi_rw, i, (unsigned long long)sh->sector); 1924 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1925 set_bit(STRIPE_HANDLE, &sh->state); 1926 } 1927 } 1928 } 1929 1930 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 1931 { 1932 raid6_conf_t *conf = sh->raid_conf; 1933 int disks = conf->raid_disks; 1934 struct bio *return_bi= NULL; 1935 struct bio *bi; 1936 int i; 1937 int syncing; 1938 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 1939 int non_overwrite = 0; 1940 int failed_num[2] = {0, 0}; 1941 struct r5dev *dev, *pdev, *qdev; 1942 int pd_idx = sh->pd_idx; 1943 int qd_idx = raid6_next_disk(pd_idx, disks); 1944 int p_failed, q_failed; 1945 1946 PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n", 1947 (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count), 1948 pd_idx, qd_idx); 1949 1950 spin_lock(&sh->lock); 1951 clear_bit(STRIPE_HANDLE, &sh->state); 1952 clear_bit(STRIPE_DELAYED, &sh->state); 1953 1954 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1955 /* Now to look around and see what can be done */ 1956 1957 rcu_read_lock(); 1958 for (i=disks; i--; ) { 1959 mdk_rdev_t *rdev; 1960 dev = &sh->dev[i]; 1961 clear_bit(R5_Insync, &dev->flags); 1962 1963 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 1964 i, dev->flags, dev->toread, dev->towrite, dev->written); 1965 /* maybe we can reply to a read */ 1966 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 1967 struct bio *rbi, *rbi2; 1968 PRINTK("Return read for disc %d\n", i); 1969 spin_lock_irq(&conf->device_lock); 1970 rbi = dev->toread; 1971 dev->toread = NULL; 1972 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1973 wake_up(&conf->wait_for_overlap); 1974 spin_unlock_irq(&conf->device_lock); 1975 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1976 copy_data(0, rbi, dev->page, dev->sector); 1977 rbi2 = r5_next_bio(rbi, dev->sector); 1978 spin_lock_irq(&conf->device_lock); 1979 if (--rbi->bi_phys_segments == 0) { 1980 rbi->bi_next = return_bi; 1981 return_bi = rbi; 1982 } 1983 spin_unlock_irq(&conf->device_lock); 1984 rbi = rbi2; 1985 } 1986 } 1987 1988 /* now count some things */ 1989 if (test_bit(R5_LOCKED, &dev->flags)) locked++; 1990 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; 1991 1992 1993 if (dev->toread) to_read++; 1994 if (dev->towrite) { 1995 to_write++; 1996 if (!test_bit(R5_OVERWRITE, &dev->flags)) 1997 non_overwrite++; 1998 } 1999 if (dev->written) written++; 2000 rdev = rcu_dereference(conf->disks[i].rdev); 2001 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2002 /* The ReadError flag will just be confusing now */ 2003 clear_bit(R5_ReadError, &dev->flags); 2004 clear_bit(R5_ReWrite, &dev->flags); 2005 } 2006 if (!rdev || !test_bit(In_sync, &rdev->flags) 2007 || test_bit(R5_ReadError, &dev->flags)) { 2008 if ( failed < 2 ) 2009 failed_num[failed] = i; 2010 failed++; 2011 } else 2012 set_bit(R5_Insync, &dev->flags); 2013 } 2014 rcu_read_unlock(); 2015 PRINTK("locked=%d uptodate=%d to_read=%d" 2016 " to_write=%d failed=%d failed_num=%d,%d\n", 2017 locked, uptodate, to_read, to_write, failed, 2018 failed_num[0], failed_num[1]); 2019 /* check if the array has lost >2 devices and, if so, some requests might 2020 * need to be failed 2021 */ 2022 if (failed > 2 && to_read+to_write+written) { 2023 for (i=disks; i--; ) { 2024 int bitmap_end = 0; 2025 2026 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2027 mdk_rdev_t *rdev; 2028 rcu_read_lock(); 2029 rdev = rcu_dereference(conf->disks[i].rdev); 2030 if (rdev && test_bit(In_sync, &rdev->flags)) 2031 /* multiple read failures in one stripe */ 2032 md_error(conf->mddev, rdev); 2033 rcu_read_unlock(); 2034 } 2035 2036 spin_lock_irq(&conf->device_lock); 2037 /* fail all writes first */ 2038 bi = sh->dev[i].towrite; 2039 sh->dev[i].towrite = NULL; 2040 if (bi) { to_write--; bitmap_end = 1; } 2041 2042 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2043 wake_up(&conf->wait_for_overlap); 2044 2045 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 2046 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2047 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2048 if (--bi->bi_phys_segments == 0) { 2049 md_write_end(conf->mddev); 2050 bi->bi_next = return_bi; 2051 return_bi = bi; 2052 } 2053 bi = nextbi; 2054 } 2055 /* and fail all 'written' */ 2056 bi = sh->dev[i].written; 2057 sh->dev[i].written = NULL; 2058 if (bi) bitmap_end = 1; 2059 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { 2060 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2061 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2062 if (--bi->bi_phys_segments == 0) { 2063 md_write_end(conf->mddev); 2064 bi->bi_next = return_bi; 2065 return_bi = bi; 2066 } 2067 bi = bi2; 2068 } 2069 2070 /* fail any reads if this device is non-operational */ 2071 if (!test_bit(R5_Insync, &sh->dev[i].flags) || 2072 test_bit(R5_ReadError, &sh->dev[i].flags)) { 2073 bi = sh->dev[i].toread; 2074 sh->dev[i].toread = NULL; 2075 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2076 wake_up(&conf->wait_for_overlap); 2077 if (bi) to_read--; 2078 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 2079 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2080 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2081 if (--bi->bi_phys_segments == 0) { 2082 bi->bi_next = return_bi; 2083 return_bi = bi; 2084 } 2085 bi = nextbi; 2086 } 2087 } 2088 spin_unlock_irq(&conf->device_lock); 2089 if (bitmap_end) 2090 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2091 STRIPE_SECTORS, 0, 0); 2092 } 2093 } 2094 if (failed > 2 && syncing) { 2095 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2096 clear_bit(STRIPE_SYNCING, &sh->state); 2097 syncing = 0; 2098 } 2099 2100 /* 2101 * might be able to return some write requests if the parity blocks 2102 * are safe, or on a failed drive 2103 */ 2104 pdev = &sh->dev[pd_idx]; 2105 p_failed = (failed >= 1 && failed_num[0] == pd_idx) 2106 || (failed >= 2 && failed_num[1] == pd_idx); 2107 qdev = &sh->dev[qd_idx]; 2108 q_failed = (failed >= 1 && failed_num[0] == qd_idx) 2109 || (failed >= 2 && failed_num[1] == qd_idx); 2110 2111 if ( written && 2112 ( p_failed || ((test_bit(R5_Insync, &pdev->flags) 2113 && !test_bit(R5_LOCKED, &pdev->flags) 2114 && test_bit(R5_UPTODATE, &pdev->flags))) ) && 2115 ( q_failed || ((test_bit(R5_Insync, &qdev->flags) 2116 && !test_bit(R5_LOCKED, &qdev->flags) 2117 && test_bit(R5_UPTODATE, &qdev->flags))) ) ) { 2118 /* any written block on an uptodate or failed drive can be 2119 * returned. Note that if we 'wrote' to a failed drive, 2120 * it will be UPTODATE, but never LOCKED, so we don't need 2121 * to test 'failed' directly. 2122 */ 2123 for (i=disks; i--; ) 2124 if (sh->dev[i].written) { 2125 dev = &sh->dev[i]; 2126 if (!test_bit(R5_LOCKED, &dev->flags) && 2127 test_bit(R5_UPTODATE, &dev->flags) ) { 2128 /* We can return any write requests */ 2129 int bitmap_end = 0; 2130 struct bio *wbi, *wbi2; 2131 PRINTK("Return write for stripe %llu disc %d\n", 2132 (unsigned long long)sh->sector, i); 2133 spin_lock_irq(&conf->device_lock); 2134 wbi = dev->written; 2135 dev->written = NULL; 2136 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { 2137 wbi2 = r5_next_bio(wbi, dev->sector); 2138 if (--wbi->bi_phys_segments == 0) { 2139 md_write_end(conf->mddev); 2140 wbi->bi_next = return_bi; 2141 return_bi = wbi; 2142 } 2143 wbi = wbi2; 2144 } 2145 if (dev->towrite == NULL) 2146 bitmap_end = 1; 2147 spin_unlock_irq(&conf->device_lock); 2148 if (bitmap_end) 2149 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2150 STRIPE_SECTORS, 2151 !test_bit(STRIPE_DEGRADED, &sh->state), 0); 2152 } 2153 } 2154 } 2155 2156 /* Now we might consider reading some blocks, either to check/generate 2157 * parity, or to satisfy requests 2158 * or to load a block that is being partially written. 2159 */ 2160 if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) { 2161 for (i=disks; i--;) { 2162 dev = &sh->dev[i]; 2163 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 2164 (dev->toread || 2165 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2166 syncing || 2167 (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) || 2168 (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write)) 2169 ) 2170 ) { 2171 /* we would like to get this block, possibly 2172 * by computing it, but we might not be able to 2173 */ 2174 if (uptodate == disks-1) { 2175 PRINTK("Computing stripe %llu block %d\n", 2176 (unsigned long long)sh->sector, i); 2177 compute_block_1(sh, i, 0); 2178 uptodate++; 2179 } else if ( uptodate == disks-2 && failed >= 2 ) { 2180 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ 2181 int other; 2182 for (other=disks; other--;) { 2183 if ( other == i ) 2184 continue; 2185 if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) ) 2186 break; 2187 } 2188 BUG_ON(other < 0); 2189 PRINTK("Computing stripe %llu blocks %d,%d\n", 2190 (unsigned long long)sh->sector, i, other); 2191 compute_block_2(sh, i, other); 2192 uptodate += 2; 2193 } else if (test_bit(R5_Insync, &dev->flags)) { 2194 set_bit(R5_LOCKED, &dev->flags); 2195 set_bit(R5_Wantread, &dev->flags); 2196 #if 0 2197 /* if I am just reading this block and we don't have 2198 a failed drive, or any pending writes then sidestep the cache */ 2199 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && 2200 ! syncing && !failed && !to_write) { 2201 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; 2202 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; 2203 } 2204 #endif 2205 locked++; 2206 PRINTK("Reading block %d (sync=%d)\n", 2207 i, syncing); 2208 } 2209 } 2210 } 2211 set_bit(STRIPE_HANDLE, &sh->state); 2212 } 2213 2214 /* now to consider writing and what else, if anything should be read */ 2215 if (to_write) { 2216 int rcw=0, must_compute=0; 2217 for (i=disks ; i--;) { 2218 dev = &sh->dev[i]; 2219 /* Would I have to read this buffer for reconstruct_write */ 2220 if (!test_bit(R5_OVERWRITE, &dev->flags) 2221 && i != pd_idx && i != qd_idx 2222 && (!test_bit(R5_LOCKED, &dev->flags) 2223 #if 0 2224 || sh->bh_page[i] != bh->b_page 2225 #endif 2226 ) && 2227 !test_bit(R5_UPTODATE, &dev->flags)) { 2228 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2229 else { 2230 PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags); 2231 must_compute++; 2232 } 2233 } 2234 } 2235 PRINTK("for sector %llu, rcw=%d, must_compute=%d\n", 2236 (unsigned long long)sh->sector, rcw, must_compute); 2237 set_bit(STRIPE_HANDLE, &sh->state); 2238 2239 if (rcw > 0) 2240 /* want reconstruct write, but need to get some data */ 2241 for (i=disks; i--;) { 2242 dev = &sh->dev[i]; 2243 if (!test_bit(R5_OVERWRITE, &dev->flags) 2244 && !(failed == 0 && (i == pd_idx || i == qd_idx)) 2245 && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 2246 test_bit(R5_Insync, &dev->flags)) { 2247 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 2248 { 2249 PRINTK("Read_old stripe %llu block %d for Reconstruct\n", 2250 (unsigned long long)sh->sector, i); 2251 set_bit(R5_LOCKED, &dev->flags); 2252 set_bit(R5_Wantread, &dev->flags); 2253 locked++; 2254 } else { 2255 PRINTK("Request delayed stripe %llu block %d for Reconstruct\n", 2256 (unsigned long long)sh->sector, i); 2257 set_bit(STRIPE_DELAYED, &sh->state); 2258 set_bit(STRIPE_HANDLE, &sh->state); 2259 } 2260 } 2261 } 2262 /* now if nothing is locked, and if we have enough data, we can start a write request */ 2263 if (locked == 0 && rcw == 0 && 2264 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2265 if ( must_compute > 0 ) { 2266 /* We have failed blocks and need to compute them */ 2267 switch ( failed ) { 2268 case 0: BUG(); 2269 case 1: compute_block_1(sh, failed_num[0], 0); break; 2270 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; 2271 default: BUG(); /* This request should have been failed? */ 2272 } 2273 } 2274 2275 PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector); 2276 compute_parity6(sh, RECONSTRUCT_WRITE); 2277 /* now every locked buffer is ready to be written */ 2278 for (i=disks; i--;) 2279 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2280 PRINTK("Writing stripe %llu block %d\n", 2281 (unsigned long long)sh->sector, i); 2282 locked++; 2283 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2284 } 2285 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2286 set_bit(STRIPE_INSYNC, &sh->state); 2287 2288 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2289 atomic_dec(&conf->preread_active_stripes); 2290 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 2291 md_wakeup_thread(conf->mddev->thread); 2292 } 2293 } 2294 } 2295 2296 /* maybe we need to check and possibly fix the parity for this stripe 2297 * Any reads will already have been scheduled, so we just see if enough data 2298 * is available 2299 */ 2300 if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) { 2301 int update_p = 0, update_q = 0; 2302 struct r5dev *dev; 2303 2304 set_bit(STRIPE_HANDLE, &sh->state); 2305 2306 BUG_ON(failed>2); 2307 BUG_ON(uptodate < disks); 2308 /* Want to check and possibly repair P and Q. 2309 * However there could be one 'failed' device, in which 2310 * case we can only check one of them, possibly using the 2311 * other to generate missing data 2312 */ 2313 2314 /* If !tmp_page, we cannot do the calculations, 2315 * but as we have set STRIPE_HANDLE, we will soon be called 2316 * by stripe_handle with a tmp_page - just wait until then. 2317 */ 2318 if (tmp_page) { 2319 if (failed == q_failed) { 2320 /* The only possible failed device holds 'Q', so it makes 2321 * sense to check P (If anything else were failed, we would 2322 * have used P to recreate it). 2323 */ 2324 compute_block_1(sh, pd_idx, 1); 2325 if (!page_is_zero(sh->dev[pd_idx].page)) { 2326 compute_block_1(sh,pd_idx,0); 2327 update_p = 1; 2328 } 2329 } 2330 if (!q_failed && failed < 2) { 2331 /* q is not failed, and we didn't use it to generate 2332 * anything, so it makes sense to check it 2333 */ 2334 memcpy(page_address(tmp_page), 2335 page_address(sh->dev[qd_idx].page), 2336 STRIPE_SIZE); 2337 compute_parity6(sh, UPDATE_PARITY); 2338 if (memcmp(page_address(tmp_page), 2339 page_address(sh->dev[qd_idx].page), 2340 STRIPE_SIZE)!= 0) { 2341 clear_bit(STRIPE_INSYNC, &sh->state); 2342 update_q = 1; 2343 } 2344 } 2345 if (update_p || update_q) { 2346 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2347 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2348 /* don't try to repair!! */ 2349 update_p = update_q = 0; 2350 } 2351 2352 /* now write out any block on a failed drive, 2353 * or P or Q if they need it 2354 */ 2355 2356 if (failed == 2) { 2357 dev = &sh->dev[failed_num[1]]; 2358 locked++; 2359 set_bit(R5_LOCKED, &dev->flags); 2360 set_bit(R5_Wantwrite, &dev->flags); 2361 } 2362 if (failed >= 1) { 2363 dev = &sh->dev[failed_num[0]]; 2364 locked++; 2365 set_bit(R5_LOCKED, &dev->flags); 2366 set_bit(R5_Wantwrite, &dev->flags); 2367 } 2368 2369 if (update_p) { 2370 dev = &sh->dev[pd_idx]; 2371 locked ++; 2372 set_bit(R5_LOCKED, &dev->flags); 2373 set_bit(R5_Wantwrite, &dev->flags); 2374 } 2375 if (update_q) { 2376 dev = &sh->dev[qd_idx]; 2377 locked++; 2378 set_bit(R5_LOCKED, &dev->flags); 2379 set_bit(R5_Wantwrite, &dev->flags); 2380 } 2381 clear_bit(STRIPE_DEGRADED, &sh->state); 2382 2383 set_bit(STRIPE_INSYNC, &sh->state); 2384 } 2385 } 2386 2387 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2388 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2389 clear_bit(STRIPE_SYNCING, &sh->state); 2390 } 2391 2392 /* If the failed drives are just a ReadError, then we might need 2393 * to progress the repair/check process 2394 */ 2395 if (failed <= 2 && ! conf->mddev->ro) 2396 for (i=0; i<failed;i++) { 2397 dev = &sh->dev[failed_num[i]]; 2398 if (test_bit(R5_ReadError, &dev->flags) 2399 && !test_bit(R5_LOCKED, &dev->flags) 2400 && test_bit(R5_UPTODATE, &dev->flags) 2401 ) { 2402 if (!test_bit(R5_ReWrite, &dev->flags)) { 2403 set_bit(R5_Wantwrite, &dev->flags); 2404 set_bit(R5_ReWrite, &dev->flags); 2405 set_bit(R5_LOCKED, &dev->flags); 2406 } else { 2407 /* let's read it back */ 2408 set_bit(R5_Wantread, &dev->flags); 2409 set_bit(R5_LOCKED, &dev->flags); 2410 } 2411 } 2412 } 2413 spin_unlock(&sh->lock); 2414 2415 while ((bi=return_bi)) { 2416 int bytes = bi->bi_size; 2417 2418 return_bi = bi->bi_next; 2419 bi->bi_next = NULL; 2420 bi->bi_size = 0; 2421 bi->bi_end_io(bi, bytes, 0); 2422 } 2423 for (i=disks; i-- ;) { 2424 int rw; 2425 struct bio *bi; 2426 mdk_rdev_t *rdev; 2427 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 2428 rw = 1; 2429 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 2430 rw = 0; 2431 else 2432 continue; 2433 2434 bi = &sh->dev[i].req; 2435 2436 bi->bi_rw = rw; 2437 if (rw) 2438 bi->bi_end_io = raid5_end_write_request; 2439 else 2440 bi->bi_end_io = raid5_end_read_request; 2441 2442 rcu_read_lock(); 2443 rdev = rcu_dereference(conf->disks[i].rdev); 2444 if (rdev && test_bit(Faulty, &rdev->flags)) 2445 rdev = NULL; 2446 if (rdev) 2447 atomic_inc(&rdev->nr_pending); 2448 rcu_read_unlock(); 2449 2450 if (rdev) { 2451 if (syncing) 2452 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 2453 2454 bi->bi_bdev = rdev->bdev; 2455 PRINTK("for %llu schedule op %ld on disc %d\n", 2456 (unsigned long long)sh->sector, bi->bi_rw, i); 2457 atomic_inc(&sh->count); 2458 bi->bi_sector = sh->sector + rdev->data_offset; 2459 bi->bi_flags = 1 << BIO_UPTODATE; 2460 bi->bi_vcnt = 1; 2461 bi->bi_max_vecs = 1; 2462 bi->bi_idx = 0; 2463 bi->bi_io_vec = &sh->dev[i].vec; 2464 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 2465 bi->bi_io_vec[0].bv_offset = 0; 2466 bi->bi_size = STRIPE_SIZE; 2467 bi->bi_next = NULL; 2468 if (rw == WRITE && 2469 test_bit(R5_ReWrite, &sh->dev[i].flags)) 2470 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2471 generic_make_request(bi); 2472 } else { 2473 if (rw == 1) 2474 set_bit(STRIPE_DEGRADED, &sh->state); 2475 PRINTK("skip op %ld on disc %d for sector %llu\n", 2476 bi->bi_rw, i, (unsigned long long)sh->sector); 2477 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2478 set_bit(STRIPE_HANDLE, &sh->state); 2479 } 2480 } 2481 } 2482 2483 static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) 2484 { 2485 if (sh->raid_conf->level == 6) 2486 handle_stripe6(sh, tmp_page); 2487 else 2488 handle_stripe5(sh); 2489 } 2490 2491 2492 2493 static void raid5_activate_delayed(raid5_conf_t *conf) 2494 { 2495 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 2496 while (!list_empty(&conf->delayed_list)) { 2497 struct list_head *l = conf->delayed_list.next; 2498 struct stripe_head *sh; 2499 sh = list_entry(l, struct stripe_head, lru); 2500 list_del_init(l); 2501 clear_bit(STRIPE_DELAYED, &sh->state); 2502 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 2503 atomic_inc(&conf->preread_active_stripes); 2504 list_add_tail(&sh->lru, &conf->handle_list); 2505 } 2506 } 2507 } 2508 2509 static void activate_bit_delay(raid5_conf_t *conf) 2510 { 2511 /* device_lock is held */ 2512 struct list_head head; 2513 list_add(&head, &conf->bitmap_list); 2514 list_del_init(&conf->bitmap_list); 2515 while (!list_empty(&head)) { 2516 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 2517 list_del_init(&sh->lru); 2518 atomic_inc(&sh->count); 2519 __release_stripe(conf, sh); 2520 } 2521 } 2522 2523 static void unplug_slaves(mddev_t *mddev) 2524 { 2525 raid5_conf_t *conf = mddev_to_conf(mddev); 2526 int i; 2527 2528 rcu_read_lock(); 2529 for (i=0; i<mddev->raid_disks; i++) { 2530 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 2531 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 2532 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 2533 2534 atomic_inc(&rdev->nr_pending); 2535 rcu_read_unlock(); 2536 2537 if (r_queue->unplug_fn) 2538 r_queue->unplug_fn(r_queue); 2539 2540 rdev_dec_pending(rdev, mddev); 2541 rcu_read_lock(); 2542 } 2543 } 2544 rcu_read_unlock(); 2545 } 2546 2547 static void raid5_unplug_device(request_queue_t *q) 2548 { 2549 mddev_t *mddev = q->queuedata; 2550 raid5_conf_t *conf = mddev_to_conf(mddev); 2551 unsigned long flags; 2552 2553 spin_lock_irqsave(&conf->device_lock, flags); 2554 2555 if (blk_remove_plug(q)) { 2556 conf->seq_flush++; 2557 raid5_activate_delayed(conf); 2558 } 2559 md_wakeup_thread(mddev->thread); 2560 2561 spin_unlock_irqrestore(&conf->device_lock, flags); 2562 2563 unplug_slaves(mddev); 2564 } 2565 2566 static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 2567 sector_t *error_sector) 2568 { 2569 mddev_t *mddev = q->queuedata; 2570 raid5_conf_t *conf = mddev_to_conf(mddev); 2571 int i, ret = 0; 2572 2573 rcu_read_lock(); 2574 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 2575 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 2576 if (rdev && !test_bit(Faulty, &rdev->flags)) { 2577 struct block_device *bdev = rdev->bdev; 2578 request_queue_t *r_queue = bdev_get_queue(bdev); 2579 2580 if (!r_queue->issue_flush_fn) 2581 ret = -EOPNOTSUPP; 2582 else { 2583 atomic_inc(&rdev->nr_pending); 2584 rcu_read_unlock(); 2585 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, 2586 error_sector); 2587 rdev_dec_pending(rdev, mddev); 2588 rcu_read_lock(); 2589 } 2590 } 2591 } 2592 rcu_read_unlock(); 2593 return ret; 2594 } 2595 2596 static int raid5_congested(void *data, int bits) 2597 { 2598 mddev_t *mddev = data; 2599 raid5_conf_t *conf = mddev_to_conf(mddev); 2600 2601 /* No difference between reads and writes. Just check 2602 * how busy the stripe_cache is 2603 */ 2604 if (conf->inactive_blocked) 2605 return 1; 2606 if (conf->quiesce) 2607 return 1; 2608 if (list_empty_careful(&conf->inactive_list)) 2609 return 1; 2610 2611 return 0; 2612 } 2613 2614 static int make_request(request_queue_t *q, struct bio * bi) 2615 { 2616 mddev_t *mddev = q->queuedata; 2617 raid5_conf_t *conf = mddev_to_conf(mddev); 2618 unsigned int dd_idx, pd_idx; 2619 sector_t new_sector; 2620 sector_t logical_sector, last_sector; 2621 struct stripe_head *sh; 2622 const int rw = bio_data_dir(bi); 2623 int remaining; 2624 2625 if (unlikely(bio_barrier(bi))) { 2626 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 2627 return 0; 2628 } 2629 2630 md_write_start(mddev, bi); 2631 2632 disk_stat_inc(mddev->gendisk, ios[rw]); 2633 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 2634 2635 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 2636 last_sector = bi->bi_sector + (bi->bi_size>>9); 2637 bi->bi_next = NULL; 2638 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 2639 2640 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 2641 DEFINE_WAIT(w); 2642 int disks, data_disks; 2643 2644 retry: 2645 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 2646 if (likely(conf->expand_progress == MaxSector)) 2647 disks = conf->raid_disks; 2648 else { 2649 /* spinlock is needed as expand_progress may be 2650 * 64bit on a 32bit platform, and so it might be 2651 * possible to see a half-updated value 2652 * Ofcourse expand_progress could change after 2653 * the lock is dropped, so once we get a reference 2654 * to the stripe that we think it is, we will have 2655 * to check again. 2656 */ 2657 spin_lock_irq(&conf->device_lock); 2658 disks = conf->raid_disks; 2659 if (logical_sector >= conf->expand_progress) 2660 disks = conf->previous_raid_disks; 2661 else { 2662 if (logical_sector >= conf->expand_lo) { 2663 spin_unlock_irq(&conf->device_lock); 2664 schedule(); 2665 goto retry; 2666 } 2667 } 2668 spin_unlock_irq(&conf->device_lock); 2669 } 2670 data_disks = disks - conf->max_degraded; 2671 2672 new_sector = raid5_compute_sector(logical_sector, disks, data_disks, 2673 &dd_idx, &pd_idx, conf); 2674 PRINTK("raid5: make_request, sector %llu logical %llu\n", 2675 (unsigned long long)new_sector, 2676 (unsigned long long)logical_sector); 2677 2678 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); 2679 if (sh) { 2680 if (unlikely(conf->expand_progress != MaxSector)) { 2681 /* expansion might have moved on while waiting for a 2682 * stripe, so we must do the range check again. 2683 * Expansion could still move past after this 2684 * test, but as we are holding a reference to 2685 * 'sh', we know that if that happens, 2686 * STRIPE_EXPANDING will get set and the expansion 2687 * won't proceed until we finish with the stripe. 2688 */ 2689 int must_retry = 0; 2690 spin_lock_irq(&conf->device_lock); 2691 if (logical_sector < conf->expand_progress && 2692 disks == conf->previous_raid_disks) 2693 /* mismatch, need to try again */ 2694 must_retry = 1; 2695 spin_unlock_irq(&conf->device_lock); 2696 if (must_retry) { 2697 release_stripe(sh); 2698 goto retry; 2699 } 2700 } 2701 /* FIXME what if we get a false positive because these 2702 * are being updated. 2703 */ 2704 if (logical_sector >= mddev->suspend_lo && 2705 logical_sector < mddev->suspend_hi) { 2706 release_stripe(sh); 2707 schedule(); 2708 goto retry; 2709 } 2710 2711 if (test_bit(STRIPE_EXPANDING, &sh->state) || 2712 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 2713 /* Stripe is busy expanding or 2714 * add failed due to overlap. Flush everything 2715 * and wait a while 2716 */ 2717 raid5_unplug_device(mddev->queue); 2718 release_stripe(sh); 2719 schedule(); 2720 goto retry; 2721 } 2722 finish_wait(&conf->wait_for_overlap, &w); 2723 handle_stripe(sh, NULL); 2724 release_stripe(sh); 2725 } else { 2726 /* cannot get stripe for read-ahead, just give-up */ 2727 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2728 finish_wait(&conf->wait_for_overlap, &w); 2729 break; 2730 } 2731 2732 } 2733 spin_lock_irq(&conf->device_lock); 2734 remaining = --bi->bi_phys_segments; 2735 spin_unlock_irq(&conf->device_lock); 2736 if (remaining == 0) { 2737 int bytes = bi->bi_size; 2738 2739 if ( rw == WRITE ) 2740 md_write_end(mddev); 2741 bi->bi_size = 0; 2742 bi->bi_end_io(bi, bytes, 0); 2743 } 2744 return 0; 2745 } 2746 2747 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 2748 { 2749 /* reshaping is quite different to recovery/resync so it is 2750 * handled quite separately ... here. 2751 * 2752 * On each call to sync_request, we gather one chunk worth of 2753 * destination stripes and flag them as expanding. 2754 * Then we find all the source stripes and request reads. 2755 * As the reads complete, handle_stripe will copy the data 2756 * into the destination stripe and release that stripe. 2757 */ 2758 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 2759 struct stripe_head *sh; 2760 int pd_idx; 2761 sector_t first_sector, last_sector; 2762 int raid_disks; 2763 int data_disks; 2764 int i; 2765 int dd_idx; 2766 sector_t writepos, safepos, gap; 2767 2768 if (sector_nr == 0 && 2769 conf->expand_progress != 0) { 2770 /* restarting in the middle, skip the initial sectors */ 2771 sector_nr = conf->expand_progress; 2772 sector_div(sector_nr, conf->raid_disks-1); 2773 *skipped = 1; 2774 return sector_nr; 2775 } 2776 2777 /* we update the metadata when there is more than 3Meg 2778 * in the block range (that is rather arbitrary, should 2779 * probably be time based) or when the data about to be 2780 * copied would over-write the source of the data at 2781 * the front of the range. 2782 * i.e. one new_stripe forward from expand_progress new_maps 2783 * to after where expand_lo old_maps to 2784 */ 2785 writepos = conf->expand_progress + 2786 conf->chunk_size/512*(conf->raid_disks-1); 2787 sector_div(writepos, conf->raid_disks-1); 2788 safepos = conf->expand_lo; 2789 sector_div(safepos, conf->previous_raid_disks-1); 2790 gap = conf->expand_progress - conf->expand_lo; 2791 2792 if (writepos >= safepos || 2793 gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) { 2794 /* Cannot proceed until we've updated the superblock... */ 2795 wait_event(conf->wait_for_overlap, 2796 atomic_read(&conf->reshape_stripes)==0); 2797 mddev->reshape_position = conf->expand_progress; 2798 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2799 md_wakeup_thread(mddev->thread); 2800 wait_event(mddev->sb_wait, mddev->flags == 0 || 2801 kthread_should_stop()); 2802 spin_lock_irq(&conf->device_lock); 2803 conf->expand_lo = mddev->reshape_position; 2804 spin_unlock_irq(&conf->device_lock); 2805 wake_up(&conf->wait_for_overlap); 2806 } 2807 2808 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { 2809 int j; 2810 int skipped = 0; 2811 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); 2812 sh = get_active_stripe(conf, sector_nr+i, 2813 conf->raid_disks, pd_idx, 0); 2814 set_bit(STRIPE_EXPANDING, &sh->state); 2815 atomic_inc(&conf->reshape_stripes); 2816 /* If any of this stripe is beyond the end of the old 2817 * array, then we need to zero those blocks 2818 */ 2819 for (j=sh->disks; j--;) { 2820 sector_t s; 2821 if (j == sh->pd_idx) 2822 continue; 2823 s = compute_blocknr(sh, j); 2824 if (s < (mddev->array_size<<1)) { 2825 skipped = 1; 2826 continue; 2827 } 2828 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 2829 set_bit(R5_Expanded, &sh->dev[j].flags); 2830 set_bit(R5_UPTODATE, &sh->dev[j].flags); 2831 } 2832 if (!skipped) { 2833 set_bit(STRIPE_EXPAND_READY, &sh->state); 2834 set_bit(STRIPE_HANDLE, &sh->state); 2835 } 2836 release_stripe(sh); 2837 } 2838 spin_lock_irq(&conf->device_lock); 2839 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1); 2840 spin_unlock_irq(&conf->device_lock); 2841 /* Ok, those stripe are ready. We can start scheduling 2842 * reads on the source stripes. 2843 * The source stripes are determined by mapping the first and last 2844 * block on the destination stripes. 2845 */ 2846 raid_disks = conf->previous_raid_disks; 2847 data_disks = raid_disks - 1; 2848 first_sector = 2849 raid5_compute_sector(sector_nr*(conf->raid_disks-1), 2850 raid_disks, data_disks, 2851 &dd_idx, &pd_idx, conf); 2852 last_sector = 2853 raid5_compute_sector((sector_nr+conf->chunk_size/512) 2854 *(conf->raid_disks-1) -1, 2855 raid_disks, data_disks, 2856 &dd_idx, &pd_idx, conf); 2857 if (last_sector >= (mddev->size<<1)) 2858 last_sector = (mddev->size<<1)-1; 2859 while (first_sector <= last_sector) { 2860 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks); 2861 sh = get_active_stripe(conf, first_sector, 2862 conf->previous_raid_disks, pd_idx, 0); 2863 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2864 set_bit(STRIPE_HANDLE, &sh->state); 2865 release_stripe(sh); 2866 first_sector += STRIPE_SECTORS; 2867 } 2868 return conf->chunk_size>>9; 2869 } 2870 2871 /* FIXME go_faster isn't used */ 2872 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 2873 { 2874 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 2875 struct stripe_head *sh; 2876 int pd_idx; 2877 int raid_disks = conf->raid_disks; 2878 sector_t max_sector = mddev->size << 1; 2879 int sync_blocks; 2880 int still_degraded = 0; 2881 int i; 2882 2883 if (sector_nr >= max_sector) { 2884 /* just being told to finish up .. nothing much to do */ 2885 unplug_slaves(mddev); 2886 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2887 end_reshape(conf); 2888 return 0; 2889 } 2890 2891 if (mddev->curr_resync < max_sector) /* aborted */ 2892 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2893 &sync_blocks, 1); 2894 else /* completed sync */ 2895 conf->fullsync = 0; 2896 bitmap_close_sync(mddev->bitmap); 2897 2898 return 0; 2899 } 2900 2901 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2902 return reshape_request(mddev, sector_nr, skipped); 2903 2904 /* if there is too many failed drives and we are trying 2905 * to resync, then assert that we are finished, because there is 2906 * nothing we can do. 2907 */ 2908 if (mddev->degraded >= conf->max_degraded && 2909 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2910 sector_t rv = (mddev->size << 1) - sector_nr; 2911 *skipped = 1; 2912 return rv; 2913 } 2914 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2915 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2916 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 2917 /* we can skip this block, and probably more */ 2918 sync_blocks /= STRIPE_SECTORS; 2919 *skipped = 1; 2920 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 2921 } 2922 2923 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); 2924 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); 2925 if (sh == NULL) { 2926 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); 2927 /* make sure we don't swamp the stripe cache if someone else 2928 * is trying to get access 2929 */ 2930 schedule_timeout_uninterruptible(1); 2931 } 2932 /* Need to check if array will still be degraded after recovery/resync 2933 * We don't need to check the 'failed' flag as when that gets set, 2934 * recovery aborts. 2935 */ 2936 for (i=0; i<mddev->raid_disks; i++) 2937 if (conf->disks[i].rdev == NULL) 2938 still_degraded = 1; 2939 2940 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 2941 2942 spin_lock(&sh->lock); 2943 set_bit(STRIPE_SYNCING, &sh->state); 2944 clear_bit(STRIPE_INSYNC, &sh->state); 2945 spin_unlock(&sh->lock); 2946 2947 handle_stripe(sh, NULL); 2948 release_stripe(sh); 2949 2950 return STRIPE_SECTORS; 2951 } 2952 2953 /* 2954 * This is our raid5 kernel thread. 2955 * 2956 * We scan the hash table for stripes which can be handled now. 2957 * During the scan, completed stripes are saved for us by the interrupt 2958 * handler, so that they will not have to wait for our next wakeup. 2959 */ 2960 static void raid5d (mddev_t *mddev) 2961 { 2962 struct stripe_head *sh; 2963 raid5_conf_t *conf = mddev_to_conf(mddev); 2964 int handled; 2965 2966 PRINTK("+++ raid5d active\n"); 2967 2968 md_check_recovery(mddev); 2969 2970 handled = 0; 2971 spin_lock_irq(&conf->device_lock); 2972 while (1) { 2973 struct list_head *first; 2974 2975 if (conf->seq_flush != conf->seq_write) { 2976 int seq = conf->seq_flush; 2977 spin_unlock_irq(&conf->device_lock); 2978 bitmap_unplug(mddev->bitmap); 2979 spin_lock_irq(&conf->device_lock); 2980 conf->seq_write = seq; 2981 activate_bit_delay(conf); 2982 } 2983 2984 if (list_empty(&conf->handle_list) && 2985 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && 2986 !blk_queue_plugged(mddev->queue) && 2987 !list_empty(&conf->delayed_list)) 2988 raid5_activate_delayed(conf); 2989 2990 if (list_empty(&conf->handle_list)) 2991 break; 2992 2993 first = conf->handle_list.next; 2994 sh = list_entry(first, struct stripe_head, lru); 2995 2996 list_del_init(first); 2997 atomic_inc(&sh->count); 2998 BUG_ON(atomic_read(&sh->count)!= 1); 2999 spin_unlock_irq(&conf->device_lock); 3000 3001 handled++; 3002 handle_stripe(sh, conf->spare_page); 3003 release_stripe(sh); 3004 3005 spin_lock_irq(&conf->device_lock); 3006 } 3007 PRINTK("%d stripes handled\n", handled); 3008 3009 spin_unlock_irq(&conf->device_lock); 3010 3011 unplug_slaves(mddev); 3012 3013 PRINTK("--- raid5d inactive\n"); 3014 } 3015 3016 static ssize_t 3017 raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 3018 { 3019 raid5_conf_t *conf = mddev_to_conf(mddev); 3020 if (conf) 3021 return sprintf(page, "%d\n", conf->max_nr_stripes); 3022 else 3023 return 0; 3024 } 3025 3026 static ssize_t 3027 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 3028 { 3029 raid5_conf_t *conf = mddev_to_conf(mddev); 3030 char *end; 3031 int new; 3032 if (len >= PAGE_SIZE) 3033 return -EINVAL; 3034 if (!conf) 3035 return -ENODEV; 3036 3037 new = simple_strtoul(page, &end, 10); 3038 if (!*page || (*end && *end != '\n') ) 3039 return -EINVAL; 3040 if (new <= 16 || new > 32768) 3041 return -EINVAL; 3042 while (new < conf->max_nr_stripes) { 3043 if (drop_one_stripe(conf)) 3044 conf->max_nr_stripes--; 3045 else 3046 break; 3047 } 3048 while (new > conf->max_nr_stripes) { 3049 if (grow_one_stripe(conf)) 3050 conf->max_nr_stripes++; 3051 else break; 3052 } 3053 return len; 3054 } 3055 3056 static struct md_sysfs_entry 3057 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 3058 raid5_show_stripe_cache_size, 3059 raid5_store_stripe_cache_size); 3060 3061 static ssize_t 3062 stripe_cache_active_show(mddev_t *mddev, char *page) 3063 { 3064 raid5_conf_t *conf = mddev_to_conf(mddev); 3065 if (conf) 3066 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 3067 else 3068 return 0; 3069 } 3070 3071 static struct md_sysfs_entry 3072 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 3073 3074 static struct attribute *raid5_attrs[] = { 3075 &raid5_stripecache_size.attr, 3076 &raid5_stripecache_active.attr, 3077 NULL, 3078 }; 3079 static struct attribute_group raid5_attrs_group = { 3080 .name = NULL, 3081 .attrs = raid5_attrs, 3082 }; 3083 3084 static int run(mddev_t *mddev) 3085 { 3086 raid5_conf_t *conf; 3087 int raid_disk, memory; 3088 mdk_rdev_t *rdev; 3089 struct disk_info *disk; 3090 struct list_head *tmp; 3091 int working_disks = 0; 3092 3093 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { 3094 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 3095 mdname(mddev), mddev->level); 3096 return -EIO; 3097 } 3098 3099 if (mddev->reshape_position != MaxSector) { 3100 /* Check that we can continue the reshape. 3101 * Currently only disks can change, it must 3102 * increase, and we must be past the point where 3103 * a stripe over-writes itself 3104 */ 3105 sector_t here_new, here_old; 3106 int old_disks; 3107 3108 if (mddev->new_level != mddev->level || 3109 mddev->new_layout != mddev->layout || 3110 mddev->new_chunk != mddev->chunk_size) { 3111 printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n", 3112 mdname(mddev)); 3113 return -EINVAL; 3114 } 3115 if (mddev->delta_disks <= 0) { 3116 printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n", 3117 mdname(mddev)); 3118 return -EINVAL; 3119 } 3120 old_disks = mddev->raid_disks - mddev->delta_disks; 3121 /* reshape_position must be on a new-stripe boundary, and one 3122 * further up in new geometry must map after here in old geometry. 3123 */ 3124 here_new = mddev->reshape_position; 3125 if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) { 3126 printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n"); 3127 return -EINVAL; 3128 } 3129 /* here_new is the stripe we will write to */ 3130 here_old = mddev->reshape_position; 3131 sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1)); 3132 /* here_old is the first stripe that we might need to read from */ 3133 if (here_new >= here_old) { 3134 /* Reading from the same stripe as writing to - bad */ 3135 printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n"); 3136 return -EINVAL; 3137 } 3138 printk(KERN_INFO "raid5: reshape will continue\n"); 3139 /* OK, we should be able to continue; */ 3140 } 3141 3142 3143 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); 3144 if ((conf = mddev->private) == NULL) 3145 goto abort; 3146 if (mddev->reshape_position == MaxSector) { 3147 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; 3148 } else { 3149 conf->raid_disks = mddev->raid_disks; 3150 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 3151 } 3152 3153 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 3154 GFP_KERNEL); 3155 if (!conf->disks) 3156 goto abort; 3157 3158 conf->mddev = mddev; 3159 3160 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 3161 goto abort; 3162 3163 if (mddev->level == 6) { 3164 conf->spare_page = alloc_page(GFP_KERNEL); 3165 if (!conf->spare_page) 3166 goto abort; 3167 } 3168 spin_lock_init(&conf->device_lock); 3169 init_waitqueue_head(&conf->wait_for_stripe); 3170 init_waitqueue_head(&conf->wait_for_overlap); 3171 INIT_LIST_HEAD(&conf->handle_list); 3172 INIT_LIST_HEAD(&conf->delayed_list); 3173 INIT_LIST_HEAD(&conf->bitmap_list); 3174 INIT_LIST_HEAD(&conf->inactive_list); 3175 atomic_set(&conf->active_stripes, 0); 3176 atomic_set(&conf->preread_active_stripes, 0); 3177 3178 PRINTK("raid5: run(%s) called.\n", mdname(mddev)); 3179 3180 ITERATE_RDEV(mddev,rdev,tmp) { 3181 raid_disk = rdev->raid_disk; 3182 if (raid_disk >= conf->raid_disks 3183 || raid_disk < 0) 3184 continue; 3185 disk = conf->disks + raid_disk; 3186 3187 disk->rdev = rdev; 3188 3189 if (test_bit(In_sync, &rdev->flags)) { 3190 char b[BDEVNAME_SIZE]; 3191 printk(KERN_INFO "raid5: device %s operational as raid" 3192 " disk %d\n", bdevname(rdev->bdev,b), 3193 raid_disk); 3194 working_disks++; 3195 } 3196 } 3197 3198 /* 3199 * 0 for a fully functional array, 1 or 2 for a degraded array. 3200 */ 3201 mddev->degraded = conf->raid_disks - working_disks; 3202 conf->mddev = mddev; 3203 conf->chunk_size = mddev->chunk_size; 3204 conf->level = mddev->level; 3205 if (conf->level == 6) 3206 conf->max_degraded = 2; 3207 else 3208 conf->max_degraded = 1; 3209 conf->algorithm = mddev->layout; 3210 conf->max_nr_stripes = NR_STRIPES; 3211 conf->expand_progress = mddev->reshape_position; 3212 3213 /* device size must be a multiple of chunk size */ 3214 mddev->size &= ~(mddev->chunk_size/1024 -1); 3215 mddev->resync_max_sectors = mddev->size << 1; 3216 3217 if (conf->level == 6 && conf->raid_disks < 4) { 3218 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 3219 mdname(mddev), conf->raid_disks); 3220 goto abort; 3221 } 3222 if (!conf->chunk_size || conf->chunk_size % 4) { 3223 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 3224 conf->chunk_size, mdname(mddev)); 3225 goto abort; 3226 } 3227 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 3228 printk(KERN_ERR 3229 "raid5: unsupported parity algorithm %d for %s\n", 3230 conf->algorithm, mdname(mddev)); 3231 goto abort; 3232 } 3233 if (mddev->degraded > conf->max_degraded) { 3234 printk(KERN_ERR "raid5: not enough operational devices for %s" 3235 " (%d/%d failed)\n", 3236 mdname(mddev), mddev->degraded, conf->raid_disks); 3237 goto abort; 3238 } 3239 3240 if (mddev->degraded > 0 && 3241 mddev->recovery_cp != MaxSector) { 3242 if (mddev->ok_start_degraded) 3243 printk(KERN_WARNING 3244 "raid5: starting dirty degraded array: %s" 3245 "- data corruption possible.\n", 3246 mdname(mddev)); 3247 else { 3248 printk(KERN_ERR 3249 "raid5: cannot start dirty degraded array for %s\n", 3250 mdname(mddev)); 3251 goto abort; 3252 } 3253 } 3254 3255 { 3256 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 3257 if (!mddev->thread) { 3258 printk(KERN_ERR 3259 "raid5: couldn't allocate thread for %s\n", 3260 mdname(mddev)); 3261 goto abort; 3262 } 3263 } 3264 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 3265 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 3266 if (grow_stripes(conf, conf->max_nr_stripes)) { 3267 printk(KERN_ERR 3268 "raid5: couldn't allocate %dkB for buffers\n", memory); 3269 shrink_stripes(conf); 3270 md_unregister_thread(mddev->thread); 3271 goto abort; 3272 } else 3273 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 3274 memory, mdname(mddev)); 3275 3276 if (mddev->degraded == 0) 3277 printk("raid5: raid level %d set %s active with %d out of %d" 3278 " devices, algorithm %d\n", conf->level, mdname(mddev), 3279 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 3280 conf->algorithm); 3281 else 3282 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 3283 " out of %d devices, algorithm %d\n", conf->level, 3284 mdname(mddev), mddev->raid_disks - mddev->degraded, 3285 mddev->raid_disks, conf->algorithm); 3286 3287 print_raid5_conf(conf); 3288 3289 if (conf->expand_progress != MaxSector) { 3290 printk("...ok start reshape thread\n"); 3291 conf->expand_lo = conf->expand_progress; 3292 atomic_set(&conf->reshape_stripes, 0); 3293 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3294 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3295 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3296 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3297 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3298 "%s_reshape"); 3299 } 3300 3301 /* read-ahead size must cover two whole stripes, which is 3302 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 3303 */ 3304 { 3305 int data_disks = conf->previous_raid_disks - conf->max_degraded; 3306 int stripe = data_disks * 3307 (mddev->chunk_size / PAGE_SIZE); 3308 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3309 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3310 } 3311 3312 /* Ok, everything is just fine now */ 3313 sysfs_create_group(&mddev->kobj, &raid5_attrs_group); 3314 3315 mddev->queue->unplug_fn = raid5_unplug_device; 3316 mddev->queue->issue_flush_fn = raid5_issue_flush; 3317 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 3318 mddev->queue->backing_dev_info.congested_data = mddev; 3319 3320 mddev->array_size = mddev->size * (conf->previous_raid_disks - 3321 conf->max_degraded); 3322 3323 return 0; 3324 abort: 3325 if (conf) { 3326 print_raid5_conf(conf); 3327 safe_put_page(conf->spare_page); 3328 kfree(conf->disks); 3329 kfree(conf->stripe_hashtbl); 3330 kfree(conf); 3331 } 3332 mddev->private = NULL; 3333 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 3334 return -EIO; 3335 } 3336 3337 3338 3339 static int stop(mddev_t *mddev) 3340 { 3341 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3342 3343 md_unregister_thread(mddev->thread); 3344 mddev->thread = NULL; 3345 shrink_stripes(conf); 3346 kfree(conf->stripe_hashtbl); 3347 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 3348 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 3349 kfree(conf->disks); 3350 kfree(conf); 3351 mddev->private = NULL; 3352 return 0; 3353 } 3354 3355 #if RAID5_DEBUG 3356 static void print_sh (struct seq_file *seq, struct stripe_head *sh) 3357 { 3358 int i; 3359 3360 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 3361 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 3362 seq_printf(seq, "sh %llu, count %d.\n", 3363 (unsigned long long)sh->sector, atomic_read(&sh->count)); 3364 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 3365 for (i = 0; i < sh->disks; i++) { 3366 seq_printf(seq, "(cache%d: %p %ld) ", 3367 i, sh->dev[i].page, sh->dev[i].flags); 3368 } 3369 seq_printf(seq, "\n"); 3370 } 3371 3372 static void printall (struct seq_file *seq, raid5_conf_t *conf) 3373 { 3374 struct stripe_head *sh; 3375 struct hlist_node *hn; 3376 int i; 3377 3378 spin_lock_irq(&conf->device_lock); 3379 for (i = 0; i < NR_HASH; i++) { 3380 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 3381 if (sh->raid_conf != conf) 3382 continue; 3383 print_sh(seq, sh); 3384 } 3385 } 3386 spin_unlock_irq(&conf->device_lock); 3387 } 3388 #endif 3389 3390 static void status (struct seq_file *seq, mddev_t *mddev) 3391 { 3392 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3393 int i; 3394 3395 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 3396 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 3397 for (i = 0; i < conf->raid_disks; i++) 3398 seq_printf (seq, "%s", 3399 conf->disks[i].rdev && 3400 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 3401 seq_printf (seq, "]"); 3402 #if RAID5_DEBUG 3403 seq_printf (seq, "\n"); 3404 printall(seq, conf); 3405 #endif 3406 } 3407 3408 static void print_raid5_conf (raid5_conf_t *conf) 3409 { 3410 int i; 3411 struct disk_info *tmp; 3412 3413 printk("RAID5 conf printout:\n"); 3414 if (!conf) { 3415 printk("(conf==NULL)\n"); 3416 return; 3417 } 3418 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 3419 conf->raid_disks - conf->mddev->degraded); 3420 3421 for (i = 0; i < conf->raid_disks; i++) { 3422 char b[BDEVNAME_SIZE]; 3423 tmp = conf->disks + i; 3424 if (tmp->rdev) 3425 printk(" disk %d, o:%d, dev:%s\n", 3426 i, !test_bit(Faulty, &tmp->rdev->flags), 3427 bdevname(tmp->rdev->bdev,b)); 3428 } 3429 } 3430 3431 static int raid5_spare_active(mddev_t *mddev) 3432 { 3433 int i; 3434 raid5_conf_t *conf = mddev->private; 3435 struct disk_info *tmp; 3436 3437 for (i = 0; i < conf->raid_disks; i++) { 3438 tmp = conf->disks + i; 3439 if (tmp->rdev 3440 && !test_bit(Faulty, &tmp->rdev->flags) 3441 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 3442 unsigned long flags; 3443 spin_lock_irqsave(&conf->device_lock, flags); 3444 mddev->degraded--; 3445 spin_unlock_irqrestore(&conf->device_lock, flags); 3446 } 3447 } 3448 print_raid5_conf(conf); 3449 return 0; 3450 } 3451 3452 static int raid5_remove_disk(mddev_t *mddev, int number) 3453 { 3454 raid5_conf_t *conf = mddev->private; 3455 int err = 0; 3456 mdk_rdev_t *rdev; 3457 struct disk_info *p = conf->disks + number; 3458 3459 print_raid5_conf(conf); 3460 rdev = p->rdev; 3461 if (rdev) { 3462 if (test_bit(In_sync, &rdev->flags) || 3463 atomic_read(&rdev->nr_pending)) { 3464 err = -EBUSY; 3465 goto abort; 3466 } 3467 p->rdev = NULL; 3468 synchronize_rcu(); 3469 if (atomic_read(&rdev->nr_pending)) { 3470 /* lost the race, try later */ 3471 err = -EBUSY; 3472 p->rdev = rdev; 3473 } 3474 } 3475 abort: 3476 3477 print_raid5_conf(conf); 3478 return err; 3479 } 3480 3481 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 3482 { 3483 raid5_conf_t *conf = mddev->private; 3484 int found = 0; 3485 int disk; 3486 struct disk_info *p; 3487 3488 if (mddev->degraded > conf->max_degraded) 3489 /* no point adding a device */ 3490 return 0; 3491 3492 /* 3493 * find the disk ... but prefer rdev->saved_raid_disk 3494 * if possible. 3495 */ 3496 if (rdev->saved_raid_disk >= 0 && 3497 conf->disks[rdev->saved_raid_disk].rdev == NULL) 3498 disk = rdev->saved_raid_disk; 3499 else 3500 disk = 0; 3501 for ( ; disk < conf->raid_disks; disk++) 3502 if ((p=conf->disks + disk)->rdev == NULL) { 3503 clear_bit(In_sync, &rdev->flags); 3504 rdev->raid_disk = disk; 3505 found = 1; 3506 if (rdev->saved_raid_disk != disk) 3507 conf->fullsync = 1; 3508 rcu_assign_pointer(p->rdev, rdev); 3509 break; 3510 } 3511 print_raid5_conf(conf); 3512 return found; 3513 } 3514 3515 static int raid5_resize(mddev_t *mddev, sector_t sectors) 3516 { 3517 /* no resync is happening, and there is enough space 3518 * on all devices, so we can resize. 3519 * We need to make sure resync covers any new space. 3520 * If the array is shrinking we should possibly wait until 3521 * any io in the removed space completes, but it hardly seems 3522 * worth it. 3523 */ 3524 raid5_conf_t *conf = mddev_to_conf(mddev); 3525 3526 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 3527 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 3528 set_capacity(mddev->gendisk, mddev->array_size << 1); 3529 mddev->changed = 1; 3530 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 3531 mddev->recovery_cp = mddev->size << 1; 3532 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3533 } 3534 mddev->size = sectors /2; 3535 mddev->resync_max_sectors = sectors; 3536 return 0; 3537 } 3538 3539 #ifdef CONFIG_MD_RAID5_RESHAPE 3540 static int raid5_check_reshape(mddev_t *mddev) 3541 { 3542 raid5_conf_t *conf = mddev_to_conf(mddev); 3543 int err; 3544 3545 if (mddev->delta_disks < 0 || 3546 mddev->new_level != mddev->level) 3547 return -EINVAL; /* Cannot shrink array or change level yet */ 3548 if (mddev->delta_disks == 0) 3549 return 0; /* nothing to do */ 3550 3551 /* Can only proceed if there are plenty of stripe_heads. 3552 * We need a minimum of one full stripe,, and for sensible progress 3553 * it is best to have about 4 times that. 3554 * If we require 4 times, then the default 256 4K stripe_heads will 3555 * allow for chunk sizes up to 256K, which is probably OK. 3556 * If the chunk size is greater, user-space should request more 3557 * stripe_heads first. 3558 */ 3559 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 3560 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 3561 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 3562 (mddev->chunk_size / STRIPE_SIZE)*4); 3563 return -ENOSPC; 3564 } 3565 3566 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 3567 if (err) 3568 return err; 3569 3570 /* looks like we might be able to manage this */ 3571 return 0; 3572 } 3573 3574 static int raid5_start_reshape(mddev_t *mddev) 3575 { 3576 raid5_conf_t *conf = mddev_to_conf(mddev); 3577 mdk_rdev_t *rdev; 3578 struct list_head *rtmp; 3579 int spares = 0; 3580 int added_devices = 0; 3581 unsigned long flags; 3582 3583 if (mddev->degraded || 3584 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3585 return -EBUSY; 3586 3587 ITERATE_RDEV(mddev, rdev, rtmp) 3588 if (rdev->raid_disk < 0 && 3589 !test_bit(Faulty, &rdev->flags)) 3590 spares++; 3591 3592 if (spares < mddev->delta_disks-1) 3593 /* Not enough devices even to make a degraded array 3594 * of that size 3595 */ 3596 return -EINVAL; 3597 3598 atomic_set(&conf->reshape_stripes, 0); 3599 spin_lock_irq(&conf->device_lock); 3600 conf->previous_raid_disks = conf->raid_disks; 3601 conf->raid_disks += mddev->delta_disks; 3602 conf->expand_progress = 0; 3603 conf->expand_lo = 0; 3604 spin_unlock_irq(&conf->device_lock); 3605 3606 /* Add some new drives, as many as will fit. 3607 * We know there are enough to make the newly sized array work. 3608 */ 3609 ITERATE_RDEV(mddev, rdev, rtmp) 3610 if (rdev->raid_disk < 0 && 3611 !test_bit(Faulty, &rdev->flags)) { 3612 if (raid5_add_disk(mddev, rdev)) { 3613 char nm[20]; 3614 set_bit(In_sync, &rdev->flags); 3615 added_devices++; 3616 rdev->recovery_offset = 0; 3617 sprintf(nm, "rd%d", rdev->raid_disk); 3618 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 3619 } else 3620 break; 3621 } 3622 3623 spin_lock_irqsave(&conf->device_lock, flags); 3624 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; 3625 spin_unlock_irqrestore(&conf->device_lock, flags); 3626 mddev->raid_disks = conf->raid_disks; 3627 mddev->reshape_position = 0; 3628 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3629 3630 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3631 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3632 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3633 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3634 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3635 "%s_reshape"); 3636 if (!mddev->sync_thread) { 3637 mddev->recovery = 0; 3638 spin_lock_irq(&conf->device_lock); 3639 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 3640 conf->expand_progress = MaxSector; 3641 spin_unlock_irq(&conf->device_lock); 3642 return -EAGAIN; 3643 } 3644 md_wakeup_thread(mddev->sync_thread); 3645 md_new_event(mddev); 3646 return 0; 3647 } 3648 #endif 3649 3650 static void end_reshape(raid5_conf_t *conf) 3651 { 3652 struct block_device *bdev; 3653 3654 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 3655 conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1); 3656 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 3657 conf->mddev->changed = 1; 3658 3659 bdev = bdget_disk(conf->mddev->gendisk, 0); 3660 if (bdev) { 3661 mutex_lock(&bdev->bd_inode->i_mutex); 3662 i_size_write(bdev->bd_inode, conf->mddev->array_size << 10); 3663 mutex_unlock(&bdev->bd_inode->i_mutex); 3664 bdput(bdev); 3665 } 3666 spin_lock_irq(&conf->device_lock); 3667 conf->expand_progress = MaxSector; 3668 spin_unlock_irq(&conf->device_lock); 3669 conf->mddev->reshape_position = MaxSector; 3670 3671 /* read-ahead size must cover two whole stripes, which is 3672 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 3673 */ 3674 { 3675 int data_disks = conf->previous_raid_disks - conf->max_degraded; 3676 int stripe = data_disks * 3677 (conf->mddev->chunk_size / PAGE_SIZE); 3678 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3679 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3680 } 3681 } 3682 } 3683 3684 static void raid5_quiesce(mddev_t *mddev, int state) 3685 { 3686 raid5_conf_t *conf = mddev_to_conf(mddev); 3687 3688 switch(state) { 3689 case 2: /* resume for a suspend */ 3690 wake_up(&conf->wait_for_overlap); 3691 break; 3692 3693 case 1: /* stop all writes */ 3694 spin_lock_irq(&conf->device_lock); 3695 conf->quiesce = 1; 3696 wait_event_lock_irq(conf->wait_for_stripe, 3697 atomic_read(&conf->active_stripes) == 0, 3698 conf->device_lock, /* nothing */); 3699 spin_unlock_irq(&conf->device_lock); 3700 break; 3701 3702 case 0: /* re-enable writes */ 3703 spin_lock_irq(&conf->device_lock); 3704 conf->quiesce = 0; 3705 wake_up(&conf->wait_for_stripe); 3706 wake_up(&conf->wait_for_overlap); 3707 spin_unlock_irq(&conf->device_lock); 3708 break; 3709 } 3710 } 3711 3712 static struct mdk_personality raid6_personality = 3713 { 3714 .name = "raid6", 3715 .level = 6, 3716 .owner = THIS_MODULE, 3717 .make_request = make_request, 3718 .run = run, 3719 .stop = stop, 3720 .status = status, 3721 .error_handler = error, 3722 .hot_add_disk = raid5_add_disk, 3723 .hot_remove_disk= raid5_remove_disk, 3724 .spare_active = raid5_spare_active, 3725 .sync_request = sync_request, 3726 .resize = raid5_resize, 3727 .quiesce = raid5_quiesce, 3728 }; 3729 static struct mdk_personality raid5_personality = 3730 { 3731 .name = "raid5", 3732 .level = 5, 3733 .owner = THIS_MODULE, 3734 .make_request = make_request, 3735 .run = run, 3736 .stop = stop, 3737 .status = status, 3738 .error_handler = error, 3739 .hot_add_disk = raid5_add_disk, 3740 .hot_remove_disk= raid5_remove_disk, 3741 .spare_active = raid5_spare_active, 3742 .sync_request = sync_request, 3743 .resize = raid5_resize, 3744 #ifdef CONFIG_MD_RAID5_RESHAPE 3745 .check_reshape = raid5_check_reshape, 3746 .start_reshape = raid5_start_reshape, 3747 #endif 3748 .quiesce = raid5_quiesce, 3749 }; 3750 3751 static struct mdk_personality raid4_personality = 3752 { 3753 .name = "raid4", 3754 .level = 4, 3755 .owner = THIS_MODULE, 3756 .make_request = make_request, 3757 .run = run, 3758 .stop = stop, 3759 .status = status, 3760 .error_handler = error, 3761 .hot_add_disk = raid5_add_disk, 3762 .hot_remove_disk= raid5_remove_disk, 3763 .spare_active = raid5_spare_active, 3764 .sync_request = sync_request, 3765 .resize = raid5_resize, 3766 .quiesce = raid5_quiesce, 3767 }; 3768 3769 static int __init raid5_init(void) 3770 { 3771 int e; 3772 3773 e = raid6_select_algo(); 3774 if ( e ) 3775 return e; 3776 register_md_personality(&raid6_personality); 3777 register_md_personality(&raid5_personality); 3778 register_md_personality(&raid4_personality); 3779 return 0; 3780 } 3781 3782 static void raid5_exit(void) 3783 { 3784 unregister_md_personality(&raid6_personality); 3785 unregister_md_personality(&raid5_personality); 3786 unregister_md_personality(&raid4_personality); 3787 } 3788 3789 module_init(raid5_init); 3790 module_exit(raid5_exit); 3791 MODULE_LICENSE("GPL"); 3792 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 3793 MODULE_ALIAS("md-raid5"); 3794 MODULE_ALIAS("md-raid4"); 3795 MODULE_ALIAS("md-level-5"); 3796 MODULE_ALIAS("md-level-4"); 3797 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 3798 MODULE_ALIAS("md-raid6"); 3799 MODULE_ALIAS("md-level-6"); 3800 3801 /* This used to be two separate modules, they were: */ 3802 MODULE_ALIAS("raid5"); 3803 MODULE_ALIAS("raid6"); 3804