1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->seq_write is the number of the last batch successfully written. 31 * conf->seq_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is seq_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/module.h> 51 #include <linux/async.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpu.h> 54 #include <linux/slab.h> 55 #include <linux/ratelimit.h> 56 #include <linux/nodemask.h> 57 #include <trace/events/block.h> 58 59 #include "md.h" 60 #include "raid5.h" 61 #include "raid0.h" 62 #include "bitmap.h" 63 64 #define cpu_to_group(cpu) cpu_to_node(cpu) 65 #define ANY_GROUP NUMA_NO_NODE 66 67 static struct workqueue_struct *raid5_wq; 68 /* 69 * Stripe cache 70 */ 71 72 #define NR_STRIPES 256 73 #define STRIPE_SIZE PAGE_SIZE 74 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 75 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 76 #define IO_THRESHOLD 1 77 #define BYPASS_THRESHOLD 1 78 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 79 #define HASH_MASK (NR_HASH - 1) 80 #define MAX_STRIPE_BATCH 8 81 82 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 83 { 84 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 85 return &conf->stripe_hashtbl[hash]; 86 } 87 88 static inline int stripe_hash_locks_hash(sector_t sect) 89 { 90 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; 91 } 92 93 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) 94 { 95 spin_lock_irq(conf->hash_locks + hash); 96 spin_lock(&conf->device_lock); 97 } 98 99 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) 100 { 101 spin_unlock(&conf->device_lock); 102 spin_unlock_irq(conf->hash_locks + hash); 103 } 104 105 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 106 { 107 int i; 108 local_irq_disable(); 109 spin_lock(conf->hash_locks); 110 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 111 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 112 spin_lock(&conf->device_lock); 113 } 114 115 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) 116 { 117 int i; 118 spin_unlock(&conf->device_lock); 119 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 120 spin_unlock(conf->hash_locks + i - 1); 121 local_irq_enable(); 122 } 123 124 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 125 * order without overlap. There may be several bio's per stripe+device, and 126 * a bio could span several devices. 127 * When walking this list for a particular stripe+device, we must never proceed 128 * beyond a bio that extends past this device, as the next bio might no longer 129 * be valid. 130 * This function is used to determine the 'next' bio in the list, given the sector 131 * of the current stripe+device 132 */ 133 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 134 { 135 int sectors = bio_sectors(bio); 136 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) 137 return bio->bi_next; 138 else 139 return NULL; 140 } 141 142 /* 143 * We maintain a biased count of active stripes in the bottom 16 bits of 144 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 145 */ 146 static inline int raid5_bi_processed_stripes(struct bio *bio) 147 { 148 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 149 return (atomic_read(segments) >> 16) & 0xffff; 150 } 151 152 static inline int raid5_dec_bi_active_stripes(struct bio *bio) 153 { 154 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 155 return atomic_sub_return(1, segments) & 0xffff; 156 } 157 158 static inline void raid5_inc_bi_active_stripes(struct bio *bio) 159 { 160 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 161 atomic_inc(segments); 162 } 163 164 static inline void raid5_set_bi_processed_stripes(struct bio *bio, 165 unsigned int cnt) 166 { 167 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 168 int old, new; 169 170 do { 171 old = atomic_read(segments); 172 new = (old & 0xffff) | (cnt << 16); 173 } while (atomic_cmpxchg(segments, old, new) != old); 174 } 175 176 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) 177 { 178 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 179 atomic_set(segments, cnt); 180 } 181 182 /* Find first data disk in a raid6 stripe */ 183 static inline int raid6_d0(struct stripe_head *sh) 184 { 185 if (sh->ddf_layout) 186 /* ddf always start from first device */ 187 return 0; 188 /* md starts just after Q block */ 189 if (sh->qd_idx == sh->disks - 1) 190 return 0; 191 else 192 return sh->qd_idx + 1; 193 } 194 static inline int raid6_next_disk(int disk, int raid_disks) 195 { 196 disk++; 197 return (disk < raid_disks) ? disk : 0; 198 } 199 200 /* When walking through the disks in a raid5, starting at raid6_d0, 201 * We need to map each disk to a 'slot', where the data disks are slot 202 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 203 * is raid_disks-1. This help does that mapping. 204 */ 205 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 206 int *count, int syndrome_disks) 207 { 208 int slot = *count; 209 210 if (sh->ddf_layout) 211 (*count)++; 212 if (idx == sh->pd_idx) 213 return syndrome_disks; 214 if (idx == sh->qd_idx) 215 return syndrome_disks + 1; 216 if (!sh->ddf_layout) 217 (*count)++; 218 return slot; 219 } 220 221 static void return_io(struct bio *return_bi) 222 { 223 struct bio *bi = return_bi; 224 while (bi) { 225 226 return_bi = bi->bi_next; 227 bi->bi_next = NULL; 228 bi->bi_iter.bi_size = 0; 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 230 bi, 0); 231 bio_endio(bi, 0); 232 bi = return_bi; 233 } 234 } 235 236 static void print_raid5_conf (struct r5conf *conf); 237 238 static int stripe_operations_active(struct stripe_head *sh) 239 { 240 return sh->check_state || sh->reconstruct_state || 241 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 242 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 243 } 244 245 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) 246 { 247 struct r5conf *conf = sh->raid_conf; 248 struct r5worker_group *group; 249 int thread_cnt; 250 int i, cpu = sh->cpu; 251 252 if (!cpu_online(cpu)) { 253 cpu = cpumask_any(cpu_online_mask); 254 sh->cpu = cpu; 255 } 256 257 if (list_empty(&sh->lru)) { 258 struct r5worker_group *group; 259 group = conf->worker_groups + cpu_to_group(cpu); 260 list_add_tail(&sh->lru, &group->handle_list); 261 group->stripes_cnt++; 262 sh->group = group; 263 } 264 265 if (conf->worker_cnt_per_group == 0) { 266 md_wakeup_thread(conf->mddev->thread); 267 return; 268 } 269 270 group = conf->worker_groups + cpu_to_group(sh->cpu); 271 272 group->workers[0].working = true; 273 /* at least one worker should run to avoid race */ 274 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); 275 276 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; 277 /* wakeup more workers */ 278 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { 279 if (group->workers[i].working == false) { 280 group->workers[i].working = true; 281 queue_work_on(sh->cpu, raid5_wq, 282 &group->workers[i].work); 283 thread_cnt--; 284 } 285 } 286 } 287 288 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, 289 struct list_head *temp_inactive_list) 290 { 291 BUG_ON(!list_empty(&sh->lru)); 292 BUG_ON(atomic_read(&conf->active_stripes)==0); 293 if (test_bit(STRIPE_HANDLE, &sh->state)) { 294 if (test_bit(STRIPE_DELAYED, &sh->state) && 295 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 296 list_add_tail(&sh->lru, &conf->delayed_list); 297 if (atomic_read(&conf->preread_active_stripes) 298 < IO_THRESHOLD) 299 md_wakeup_thread(conf->mddev->thread); 300 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 301 sh->bm_seq - conf->seq_write > 0) 302 list_add_tail(&sh->lru, &conf->bitmap_list); 303 else { 304 clear_bit(STRIPE_DELAYED, &sh->state); 305 clear_bit(STRIPE_BIT_DELAY, &sh->state); 306 if (conf->worker_cnt_per_group == 0) { 307 list_add_tail(&sh->lru, &conf->handle_list); 308 } else { 309 raid5_wakeup_stripe_thread(sh); 310 return; 311 } 312 } 313 md_wakeup_thread(conf->mddev->thread); 314 } else { 315 BUG_ON(stripe_operations_active(sh)); 316 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 317 if (atomic_dec_return(&conf->preread_active_stripes) 318 < IO_THRESHOLD) 319 md_wakeup_thread(conf->mddev->thread); 320 atomic_dec(&conf->active_stripes); 321 if (!test_bit(STRIPE_EXPANDING, &sh->state)) 322 list_add_tail(&sh->lru, temp_inactive_list); 323 } 324 } 325 326 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, 327 struct list_head *temp_inactive_list) 328 { 329 if (atomic_dec_and_test(&sh->count)) 330 do_release_stripe(conf, sh, temp_inactive_list); 331 } 332 333 /* 334 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list 335 * 336 * Be careful: Only one task can add/delete stripes from temp_inactive_list at 337 * given time. Adding stripes only takes device lock, while deleting stripes 338 * only takes hash lock. 339 */ 340 static void release_inactive_stripe_list(struct r5conf *conf, 341 struct list_head *temp_inactive_list, 342 int hash) 343 { 344 int size; 345 bool do_wakeup = false; 346 unsigned long flags; 347 348 if (hash == NR_STRIPE_HASH_LOCKS) { 349 size = NR_STRIPE_HASH_LOCKS; 350 hash = NR_STRIPE_HASH_LOCKS - 1; 351 } else 352 size = 1; 353 while (size) { 354 struct list_head *list = &temp_inactive_list[size - 1]; 355 356 /* 357 * We don't hold any lock here yet, get_active_stripe() might 358 * remove stripes from the list 359 */ 360 if (!list_empty_careful(list)) { 361 spin_lock_irqsave(conf->hash_locks + hash, flags); 362 if (list_empty(conf->inactive_list + hash) && 363 !list_empty(list)) 364 atomic_dec(&conf->empty_inactive_list_nr); 365 list_splice_tail_init(list, conf->inactive_list + hash); 366 do_wakeup = true; 367 spin_unlock_irqrestore(conf->hash_locks + hash, flags); 368 } 369 size--; 370 hash--; 371 } 372 373 if (do_wakeup) { 374 wake_up(&conf->wait_for_stripe); 375 if (conf->retry_read_aligned) 376 md_wakeup_thread(conf->mddev->thread); 377 } 378 } 379 380 /* should hold conf->device_lock already */ 381 static int release_stripe_list(struct r5conf *conf, 382 struct list_head *temp_inactive_list) 383 { 384 struct stripe_head *sh; 385 int count = 0; 386 struct llist_node *head; 387 388 head = llist_del_all(&conf->released_stripes); 389 head = llist_reverse_order(head); 390 while (head) { 391 int hash; 392 393 sh = llist_entry(head, struct stripe_head, release_list); 394 head = llist_next(head); 395 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 396 smp_mb(); 397 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); 398 /* 399 * Don't worry the bit is set here, because if the bit is set 400 * again, the count is always > 1. This is true for 401 * STRIPE_ON_UNPLUG_LIST bit too. 402 */ 403 hash = sh->hash_lock_index; 404 __release_stripe(conf, sh, &temp_inactive_list[hash]); 405 count++; 406 } 407 408 return count; 409 } 410 411 static void release_stripe(struct stripe_head *sh) 412 { 413 struct r5conf *conf = sh->raid_conf; 414 unsigned long flags; 415 struct list_head list; 416 int hash; 417 bool wakeup; 418 419 /* Avoid release_list until the last reference. 420 */ 421 if (atomic_add_unless(&sh->count, -1, 1)) 422 return; 423 424 if (unlikely(!conf->mddev->thread) || 425 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 426 goto slow_path; 427 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 428 if (wakeup) 429 md_wakeup_thread(conf->mddev->thread); 430 return; 431 slow_path: 432 local_irq_save(flags); 433 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 434 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { 435 INIT_LIST_HEAD(&list); 436 hash = sh->hash_lock_index; 437 do_release_stripe(conf, sh, &list); 438 spin_unlock(&conf->device_lock); 439 release_inactive_stripe_list(conf, &list, hash); 440 } 441 local_irq_restore(flags); 442 } 443 444 static inline void remove_hash(struct stripe_head *sh) 445 { 446 pr_debug("remove_hash(), stripe %llu\n", 447 (unsigned long long)sh->sector); 448 449 hlist_del_init(&sh->hash); 450 } 451 452 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 453 { 454 struct hlist_head *hp = stripe_hash(conf, sh->sector); 455 456 pr_debug("insert_hash(), stripe %llu\n", 457 (unsigned long long)sh->sector); 458 459 hlist_add_head(&sh->hash, hp); 460 } 461 462 463 /* find an idle stripe, make sure it is unhashed, and return it. */ 464 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) 465 { 466 struct stripe_head *sh = NULL; 467 struct list_head *first; 468 469 if (list_empty(conf->inactive_list + hash)) 470 goto out; 471 first = (conf->inactive_list + hash)->next; 472 sh = list_entry(first, struct stripe_head, lru); 473 list_del_init(first); 474 remove_hash(sh); 475 atomic_inc(&conf->active_stripes); 476 BUG_ON(hash != sh->hash_lock_index); 477 if (list_empty(conf->inactive_list + hash)) 478 atomic_inc(&conf->empty_inactive_list_nr); 479 out: 480 return sh; 481 } 482 483 static void shrink_buffers(struct stripe_head *sh) 484 { 485 struct page *p; 486 int i; 487 int num = sh->raid_conf->pool_size; 488 489 for (i = 0; i < num ; i++) { 490 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); 491 p = sh->dev[i].page; 492 if (!p) 493 continue; 494 sh->dev[i].page = NULL; 495 put_page(p); 496 } 497 } 498 499 static int grow_buffers(struct stripe_head *sh) 500 { 501 int i; 502 int num = sh->raid_conf->pool_size; 503 504 for (i = 0; i < num; i++) { 505 struct page *page; 506 507 if (!(page = alloc_page(GFP_KERNEL))) { 508 return 1; 509 } 510 sh->dev[i].page = page; 511 sh->dev[i].orig_page = page; 512 } 513 return 0; 514 } 515 516 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 517 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 518 struct stripe_head *sh); 519 520 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 521 { 522 struct r5conf *conf = sh->raid_conf; 523 int i, seq; 524 525 BUG_ON(atomic_read(&sh->count) != 0); 526 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 527 BUG_ON(stripe_operations_active(sh)); 528 529 pr_debug("init_stripe called, stripe %llu\n", 530 (unsigned long long)sh->sector); 531 532 remove_hash(sh); 533 retry: 534 seq = read_seqcount_begin(&conf->gen_lock); 535 sh->generation = conf->generation - previous; 536 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 537 sh->sector = sector; 538 stripe_set_idx(sector, conf, previous, sh); 539 sh->state = 0; 540 541 542 for (i = sh->disks; i--; ) { 543 struct r5dev *dev = &sh->dev[i]; 544 545 if (dev->toread || dev->read || dev->towrite || dev->written || 546 test_bit(R5_LOCKED, &dev->flags)) { 547 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 548 (unsigned long long)sh->sector, i, dev->toread, 549 dev->read, dev->towrite, dev->written, 550 test_bit(R5_LOCKED, &dev->flags)); 551 WARN_ON(1); 552 } 553 dev->flags = 0; 554 raid5_build_block(sh, i, previous); 555 } 556 if (read_seqcount_retry(&conf->gen_lock, seq)) 557 goto retry; 558 insert_hash(conf, sh); 559 sh->cpu = smp_processor_id(); 560 } 561 562 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 563 short generation) 564 { 565 struct stripe_head *sh; 566 567 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 568 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) 569 if (sh->sector == sector && sh->generation == generation) 570 return sh; 571 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 572 return NULL; 573 } 574 575 /* 576 * Need to check if array has failed when deciding whether to: 577 * - start an array 578 * - remove non-faulty devices 579 * - add a spare 580 * - allow a reshape 581 * This determination is simple when no reshape is happening. 582 * However if there is a reshape, we need to carefully check 583 * both the before and after sections. 584 * This is because some failed devices may only affect one 585 * of the two sections, and some non-in_sync devices may 586 * be insync in the section most affected by failed devices. 587 */ 588 static int calc_degraded(struct r5conf *conf) 589 { 590 int degraded, degraded2; 591 int i; 592 593 rcu_read_lock(); 594 degraded = 0; 595 for (i = 0; i < conf->previous_raid_disks; i++) { 596 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 597 if (rdev && test_bit(Faulty, &rdev->flags)) 598 rdev = rcu_dereference(conf->disks[i].replacement); 599 if (!rdev || test_bit(Faulty, &rdev->flags)) 600 degraded++; 601 else if (test_bit(In_sync, &rdev->flags)) 602 ; 603 else 604 /* not in-sync or faulty. 605 * If the reshape increases the number of devices, 606 * this is being recovered by the reshape, so 607 * this 'previous' section is not in_sync. 608 * If the number of devices is being reduced however, 609 * the device can only be part of the array if 610 * we are reverting a reshape, so this section will 611 * be in-sync. 612 */ 613 if (conf->raid_disks >= conf->previous_raid_disks) 614 degraded++; 615 } 616 rcu_read_unlock(); 617 if (conf->raid_disks == conf->previous_raid_disks) 618 return degraded; 619 rcu_read_lock(); 620 degraded2 = 0; 621 for (i = 0; i < conf->raid_disks; i++) { 622 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 623 if (rdev && test_bit(Faulty, &rdev->flags)) 624 rdev = rcu_dereference(conf->disks[i].replacement); 625 if (!rdev || test_bit(Faulty, &rdev->flags)) 626 degraded2++; 627 else if (test_bit(In_sync, &rdev->flags)) 628 ; 629 else 630 /* not in-sync or faulty. 631 * If reshape increases the number of devices, this 632 * section has already been recovered, else it 633 * almost certainly hasn't. 634 */ 635 if (conf->raid_disks <= conf->previous_raid_disks) 636 degraded2++; 637 } 638 rcu_read_unlock(); 639 if (degraded2 > degraded) 640 return degraded2; 641 return degraded; 642 } 643 644 static int has_failed(struct r5conf *conf) 645 { 646 int degraded; 647 648 if (conf->mddev->reshape_position == MaxSector) 649 return conf->mddev->degraded > conf->max_degraded; 650 651 degraded = calc_degraded(conf); 652 if (degraded > conf->max_degraded) 653 return 1; 654 return 0; 655 } 656 657 static struct stripe_head * 658 get_active_stripe(struct r5conf *conf, sector_t sector, 659 int previous, int noblock, int noquiesce) 660 { 661 struct stripe_head *sh; 662 int hash = stripe_hash_locks_hash(sector); 663 664 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 665 666 spin_lock_irq(conf->hash_locks + hash); 667 668 do { 669 wait_event_lock_irq(conf->wait_for_stripe, 670 conf->quiesce == 0 || noquiesce, 671 *(conf->hash_locks + hash)); 672 sh = __find_stripe(conf, sector, conf->generation - previous); 673 if (!sh) { 674 if (!conf->inactive_blocked) 675 sh = get_free_stripe(conf, hash); 676 if (noblock && sh == NULL) 677 break; 678 if (!sh) { 679 conf->inactive_blocked = 1; 680 wait_event_lock_irq( 681 conf->wait_for_stripe, 682 !list_empty(conf->inactive_list + hash) && 683 (atomic_read(&conf->active_stripes) 684 < (conf->max_nr_stripes * 3 / 4) 685 || !conf->inactive_blocked), 686 *(conf->hash_locks + hash)); 687 conf->inactive_blocked = 0; 688 } else { 689 init_stripe(sh, sector, previous); 690 atomic_inc(&sh->count); 691 } 692 } else if (!atomic_inc_not_zero(&sh->count)) { 693 spin_lock(&conf->device_lock); 694 if (!atomic_read(&sh->count)) { 695 if (!test_bit(STRIPE_HANDLE, &sh->state)) 696 atomic_inc(&conf->active_stripes); 697 BUG_ON(list_empty(&sh->lru) && 698 !test_bit(STRIPE_EXPANDING, &sh->state)); 699 list_del_init(&sh->lru); 700 if (sh->group) { 701 sh->group->stripes_cnt--; 702 sh->group = NULL; 703 } 704 } 705 atomic_inc(&sh->count); 706 spin_unlock(&conf->device_lock); 707 } 708 } while (sh == NULL); 709 710 spin_unlock_irq(conf->hash_locks + hash); 711 return sh; 712 } 713 714 /* Determine if 'data_offset' or 'new_data_offset' should be used 715 * in this stripe_head. 716 */ 717 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) 718 { 719 sector_t progress = conf->reshape_progress; 720 /* Need a memory barrier to make sure we see the value 721 * of conf->generation, or ->data_offset that was set before 722 * reshape_progress was updated. 723 */ 724 smp_rmb(); 725 if (progress == MaxSector) 726 return 0; 727 if (sh->generation == conf->generation - 1) 728 return 0; 729 /* We are in a reshape, and this is a new-generation stripe, 730 * so use new_data_offset. 731 */ 732 return 1; 733 } 734 735 static void 736 raid5_end_read_request(struct bio *bi, int error); 737 static void 738 raid5_end_write_request(struct bio *bi, int error); 739 740 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 741 { 742 struct r5conf *conf = sh->raid_conf; 743 int i, disks = sh->disks; 744 745 might_sleep(); 746 747 for (i = disks; i--; ) { 748 int rw; 749 int replace_only = 0; 750 struct bio *bi, *rbi; 751 struct md_rdev *rdev, *rrdev = NULL; 752 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 753 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 754 rw = WRITE_FUA; 755 else 756 rw = WRITE; 757 if (test_bit(R5_Discard, &sh->dev[i].flags)) 758 rw |= REQ_DISCARD; 759 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 760 rw = READ; 761 else if (test_and_clear_bit(R5_WantReplace, 762 &sh->dev[i].flags)) { 763 rw = WRITE; 764 replace_only = 1; 765 } else 766 continue; 767 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) 768 rw |= REQ_SYNC; 769 770 bi = &sh->dev[i].req; 771 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 772 773 rcu_read_lock(); 774 rrdev = rcu_dereference(conf->disks[i].replacement); 775 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 776 rdev = rcu_dereference(conf->disks[i].rdev); 777 if (!rdev) { 778 rdev = rrdev; 779 rrdev = NULL; 780 } 781 if (rw & WRITE) { 782 if (replace_only) 783 rdev = NULL; 784 if (rdev == rrdev) 785 /* We raced and saw duplicates */ 786 rrdev = NULL; 787 } else { 788 if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev) 789 rdev = rrdev; 790 rrdev = NULL; 791 } 792 793 if (rdev && test_bit(Faulty, &rdev->flags)) 794 rdev = NULL; 795 if (rdev) 796 atomic_inc(&rdev->nr_pending); 797 if (rrdev && test_bit(Faulty, &rrdev->flags)) 798 rrdev = NULL; 799 if (rrdev) 800 atomic_inc(&rrdev->nr_pending); 801 rcu_read_unlock(); 802 803 /* We have already checked bad blocks for reads. Now 804 * need to check for writes. We never accept write errors 805 * on the replacement, so we don't to check rrdev. 806 */ 807 while ((rw & WRITE) && rdev && 808 test_bit(WriteErrorSeen, &rdev->flags)) { 809 sector_t first_bad; 810 int bad_sectors; 811 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 812 &first_bad, &bad_sectors); 813 if (!bad) 814 break; 815 816 if (bad < 0) { 817 set_bit(BlockedBadBlocks, &rdev->flags); 818 if (!conf->mddev->external && 819 conf->mddev->flags) { 820 /* It is very unlikely, but we might 821 * still need to write out the 822 * bad block log - better give it 823 * a chance*/ 824 md_check_recovery(conf->mddev); 825 } 826 /* 827 * Because md_wait_for_blocked_rdev 828 * will dec nr_pending, we must 829 * increment it first. 830 */ 831 atomic_inc(&rdev->nr_pending); 832 md_wait_for_blocked_rdev(rdev, conf->mddev); 833 } else { 834 /* Acknowledged bad block - skip the write */ 835 rdev_dec_pending(rdev, conf->mddev); 836 rdev = NULL; 837 } 838 } 839 840 if (rdev) { 841 if (s->syncing || s->expanding || s->expanded 842 || s->replacing) 843 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 844 845 set_bit(STRIPE_IO_STARTED, &sh->state); 846 847 bio_reset(bi); 848 bi->bi_bdev = rdev->bdev; 849 bi->bi_rw = rw; 850 bi->bi_end_io = (rw & WRITE) 851 ? raid5_end_write_request 852 : raid5_end_read_request; 853 bi->bi_private = sh; 854 855 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 856 __func__, (unsigned long long)sh->sector, 857 bi->bi_rw, i); 858 atomic_inc(&sh->count); 859 if (use_new_offset(conf, sh)) 860 bi->bi_iter.bi_sector = (sh->sector 861 + rdev->new_data_offset); 862 else 863 bi->bi_iter.bi_sector = (sh->sector 864 + rdev->data_offset); 865 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 866 bi->bi_rw |= REQ_NOMERGE; 867 868 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 869 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 870 sh->dev[i].vec.bv_page = sh->dev[i].page; 871 bi->bi_vcnt = 1; 872 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 873 bi->bi_io_vec[0].bv_offset = 0; 874 bi->bi_iter.bi_size = STRIPE_SIZE; 875 /* 876 * If this is discard request, set bi_vcnt 0. We don't 877 * want to confuse SCSI because SCSI will replace payload 878 */ 879 if (rw & REQ_DISCARD) 880 bi->bi_vcnt = 0; 881 if (rrdev) 882 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 883 884 if (conf->mddev->gendisk) 885 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 886 bi, disk_devt(conf->mddev->gendisk), 887 sh->dev[i].sector); 888 generic_make_request(bi); 889 } 890 if (rrdev) { 891 if (s->syncing || s->expanding || s->expanded 892 || s->replacing) 893 md_sync_acct(rrdev->bdev, STRIPE_SECTORS); 894 895 set_bit(STRIPE_IO_STARTED, &sh->state); 896 897 bio_reset(rbi); 898 rbi->bi_bdev = rrdev->bdev; 899 rbi->bi_rw = rw; 900 BUG_ON(!(rw & WRITE)); 901 rbi->bi_end_io = raid5_end_write_request; 902 rbi->bi_private = sh; 903 904 pr_debug("%s: for %llu schedule op %ld on " 905 "replacement disc %d\n", 906 __func__, (unsigned long long)sh->sector, 907 rbi->bi_rw, i); 908 atomic_inc(&sh->count); 909 if (use_new_offset(conf, sh)) 910 rbi->bi_iter.bi_sector = (sh->sector 911 + rrdev->new_data_offset); 912 else 913 rbi->bi_iter.bi_sector = (sh->sector 914 + rrdev->data_offset); 915 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 916 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 917 sh->dev[i].rvec.bv_page = sh->dev[i].page; 918 rbi->bi_vcnt = 1; 919 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 920 rbi->bi_io_vec[0].bv_offset = 0; 921 rbi->bi_iter.bi_size = STRIPE_SIZE; 922 /* 923 * If this is discard request, set bi_vcnt 0. We don't 924 * want to confuse SCSI because SCSI will replace payload 925 */ 926 if (rw & REQ_DISCARD) 927 rbi->bi_vcnt = 0; 928 if (conf->mddev->gendisk) 929 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 930 rbi, disk_devt(conf->mddev->gendisk), 931 sh->dev[i].sector); 932 generic_make_request(rbi); 933 } 934 if (!rdev && !rrdev) { 935 if (rw & WRITE) 936 set_bit(STRIPE_DEGRADED, &sh->state); 937 pr_debug("skip op %ld on disc %d for sector %llu\n", 938 bi->bi_rw, i, (unsigned long long)sh->sector); 939 clear_bit(R5_LOCKED, &sh->dev[i].flags); 940 set_bit(STRIPE_HANDLE, &sh->state); 941 } 942 } 943 } 944 945 static struct dma_async_tx_descriptor * 946 async_copy_data(int frombio, struct bio *bio, struct page **page, 947 sector_t sector, struct dma_async_tx_descriptor *tx, 948 struct stripe_head *sh) 949 { 950 struct bio_vec bvl; 951 struct bvec_iter iter; 952 struct page *bio_page; 953 int page_offset; 954 struct async_submit_ctl submit; 955 enum async_tx_flags flags = 0; 956 957 if (bio->bi_iter.bi_sector >= sector) 958 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 959 else 960 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 961 962 if (frombio) 963 flags |= ASYNC_TX_FENCE; 964 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 965 966 bio_for_each_segment(bvl, bio, iter) { 967 int len = bvl.bv_len; 968 int clen; 969 int b_offset = 0; 970 971 if (page_offset < 0) { 972 b_offset = -page_offset; 973 page_offset += b_offset; 974 len -= b_offset; 975 } 976 977 if (len > 0 && page_offset + len > STRIPE_SIZE) 978 clen = STRIPE_SIZE - page_offset; 979 else 980 clen = len; 981 982 if (clen > 0) { 983 b_offset += bvl.bv_offset; 984 bio_page = bvl.bv_page; 985 if (frombio) { 986 if (sh->raid_conf->skip_copy && 987 b_offset == 0 && page_offset == 0 && 988 clen == STRIPE_SIZE) 989 *page = bio_page; 990 else 991 tx = async_memcpy(*page, bio_page, page_offset, 992 b_offset, clen, &submit); 993 } else 994 tx = async_memcpy(bio_page, *page, b_offset, 995 page_offset, clen, &submit); 996 } 997 /* chain the operations */ 998 submit.depend_tx = tx; 999 1000 if (clen < len) /* hit end of page */ 1001 break; 1002 page_offset += len; 1003 } 1004 1005 return tx; 1006 } 1007 1008 static void ops_complete_biofill(void *stripe_head_ref) 1009 { 1010 struct stripe_head *sh = stripe_head_ref; 1011 struct bio *return_bi = NULL; 1012 int i; 1013 1014 pr_debug("%s: stripe %llu\n", __func__, 1015 (unsigned long long)sh->sector); 1016 1017 /* clear completed biofills */ 1018 for (i = sh->disks; i--; ) { 1019 struct r5dev *dev = &sh->dev[i]; 1020 1021 /* acknowledge completion of a biofill operation */ 1022 /* and check if we need to reply to a read request, 1023 * new R5_Wantfill requests are held off until 1024 * !STRIPE_BIOFILL_RUN 1025 */ 1026 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 1027 struct bio *rbi, *rbi2; 1028 1029 BUG_ON(!dev->read); 1030 rbi = dev->read; 1031 dev->read = NULL; 1032 while (rbi && rbi->bi_iter.bi_sector < 1033 dev->sector + STRIPE_SECTORS) { 1034 rbi2 = r5_next_bio(rbi, dev->sector); 1035 if (!raid5_dec_bi_active_stripes(rbi)) { 1036 rbi->bi_next = return_bi; 1037 return_bi = rbi; 1038 } 1039 rbi = rbi2; 1040 } 1041 } 1042 } 1043 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 1044 1045 return_io(return_bi); 1046 1047 set_bit(STRIPE_HANDLE, &sh->state); 1048 release_stripe(sh); 1049 } 1050 1051 static void ops_run_biofill(struct stripe_head *sh) 1052 { 1053 struct dma_async_tx_descriptor *tx = NULL; 1054 struct async_submit_ctl submit; 1055 int i; 1056 1057 pr_debug("%s: stripe %llu\n", __func__, 1058 (unsigned long long)sh->sector); 1059 1060 for (i = sh->disks; i--; ) { 1061 struct r5dev *dev = &sh->dev[i]; 1062 if (test_bit(R5_Wantfill, &dev->flags)) { 1063 struct bio *rbi; 1064 spin_lock_irq(&sh->stripe_lock); 1065 dev->read = rbi = dev->toread; 1066 dev->toread = NULL; 1067 spin_unlock_irq(&sh->stripe_lock); 1068 while (rbi && rbi->bi_iter.bi_sector < 1069 dev->sector + STRIPE_SECTORS) { 1070 tx = async_copy_data(0, rbi, &dev->page, 1071 dev->sector, tx, sh); 1072 rbi = r5_next_bio(rbi, dev->sector); 1073 } 1074 } 1075 } 1076 1077 atomic_inc(&sh->count); 1078 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 1079 async_trigger_callback(&submit); 1080 } 1081 1082 static void mark_target_uptodate(struct stripe_head *sh, int target) 1083 { 1084 struct r5dev *tgt; 1085 1086 if (target < 0) 1087 return; 1088 1089 tgt = &sh->dev[target]; 1090 set_bit(R5_UPTODATE, &tgt->flags); 1091 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1092 clear_bit(R5_Wantcompute, &tgt->flags); 1093 } 1094 1095 static void ops_complete_compute(void *stripe_head_ref) 1096 { 1097 struct stripe_head *sh = stripe_head_ref; 1098 1099 pr_debug("%s: stripe %llu\n", __func__, 1100 (unsigned long long)sh->sector); 1101 1102 /* mark the computed target(s) as uptodate */ 1103 mark_target_uptodate(sh, sh->ops.target); 1104 mark_target_uptodate(sh, sh->ops.target2); 1105 1106 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 1107 if (sh->check_state == check_state_compute_run) 1108 sh->check_state = check_state_compute_result; 1109 set_bit(STRIPE_HANDLE, &sh->state); 1110 release_stripe(sh); 1111 } 1112 1113 /* return a pointer to the address conversion region of the scribble buffer */ 1114 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 1115 struct raid5_percpu *percpu) 1116 { 1117 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); 1118 } 1119 1120 static struct dma_async_tx_descriptor * 1121 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 1122 { 1123 int disks = sh->disks; 1124 struct page **xor_srcs = percpu->scribble; 1125 int target = sh->ops.target; 1126 struct r5dev *tgt = &sh->dev[target]; 1127 struct page *xor_dest = tgt->page; 1128 int count = 0; 1129 struct dma_async_tx_descriptor *tx; 1130 struct async_submit_ctl submit; 1131 int i; 1132 1133 pr_debug("%s: stripe %llu block: %d\n", 1134 __func__, (unsigned long long)sh->sector, target); 1135 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1136 1137 for (i = disks; i--; ) 1138 if (i != target) 1139 xor_srcs[count++] = sh->dev[i].page; 1140 1141 atomic_inc(&sh->count); 1142 1143 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 1144 ops_complete_compute, sh, to_addr_conv(sh, percpu)); 1145 if (unlikely(count == 1)) 1146 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1147 else 1148 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1149 1150 return tx; 1151 } 1152 1153 /* set_syndrome_sources - populate source buffers for gen_syndrome 1154 * @srcs - (struct page *) array of size sh->disks 1155 * @sh - stripe_head to parse 1156 * 1157 * Populates srcs in proper layout order for the stripe and returns the 1158 * 'count' of sources to be used in a call to async_gen_syndrome. The P 1159 * destination buffer is recorded in srcs[count] and the Q destination 1160 * is recorded in srcs[count+1]]. 1161 */ 1162 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) 1163 { 1164 int disks = sh->disks; 1165 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1166 int d0_idx = raid6_d0(sh); 1167 int count; 1168 int i; 1169 1170 for (i = 0; i < disks; i++) 1171 srcs[i] = NULL; 1172 1173 count = 0; 1174 i = d0_idx; 1175 do { 1176 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1177 1178 srcs[slot] = sh->dev[i].page; 1179 i = raid6_next_disk(i, disks); 1180 } while (i != d0_idx); 1181 1182 return syndrome_disks; 1183 } 1184 1185 static struct dma_async_tx_descriptor * 1186 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 1187 { 1188 int disks = sh->disks; 1189 struct page **blocks = percpu->scribble; 1190 int target; 1191 int qd_idx = sh->qd_idx; 1192 struct dma_async_tx_descriptor *tx; 1193 struct async_submit_ctl submit; 1194 struct r5dev *tgt; 1195 struct page *dest; 1196 int i; 1197 int count; 1198 1199 if (sh->ops.target < 0) 1200 target = sh->ops.target2; 1201 else if (sh->ops.target2 < 0) 1202 target = sh->ops.target; 1203 else 1204 /* we should only have one valid target */ 1205 BUG(); 1206 BUG_ON(target < 0); 1207 pr_debug("%s: stripe %llu block: %d\n", 1208 __func__, (unsigned long long)sh->sector, target); 1209 1210 tgt = &sh->dev[target]; 1211 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1212 dest = tgt->page; 1213 1214 atomic_inc(&sh->count); 1215 1216 if (target == qd_idx) { 1217 count = set_syndrome_sources(blocks, sh); 1218 blocks[count] = NULL; /* regenerating p is not necessary */ 1219 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 1220 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1221 ops_complete_compute, sh, 1222 to_addr_conv(sh, percpu)); 1223 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1224 } else { 1225 /* Compute any data- or p-drive using XOR */ 1226 count = 0; 1227 for (i = disks; i-- ; ) { 1228 if (i == target || i == qd_idx) 1229 continue; 1230 blocks[count++] = sh->dev[i].page; 1231 } 1232 1233 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1234 NULL, ops_complete_compute, sh, 1235 to_addr_conv(sh, percpu)); 1236 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 1237 } 1238 1239 return tx; 1240 } 1241 1242 static struct dma_async_tx_descriptor * 1243 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 1244 { 1245 int i, count, disks = sh->disks; 1246 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1247 int d0_idx = raid6_d0(sh); 1248 int faila = -1, failb = -1; 1249 int target = sh->ops.target; 1250 int target2 = sh->ops.target2; 1251 struct r5dev *tgt = &sh->dev[target]; 1252 struct r5dev *tgt2 = &sh->dev[target2]; 1253 struct dma_async_tx_descriptor *tx; 1254 struct page **blocks = percpu->scribble; 1255 struct async_submit_ctl submit; 1256 1257 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 1258 __func__, (unsigned long long)sh->sector, target, target2); 1259 BUG_ON(target < 0 || target2 < 0); 1260 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1261 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 1262 1263 /* we need to open-code set_syndrome_sources to handle the 1264 * slot number conversion for 'faila' and 'failb' 1265 */ 1266 for (i = 0; i < disks ; i++) 1267 blocks[i] = NULL; 1268 count = 0; 1269 i = d0_idx; 1270 do { 1271 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1272 1273 blocks[slot] = sh->dev[i].page; 1274 1275 if (i == target) 1276 faila = slot; 1277 if (i == target2) 1278 failb = slot; 1279 i = raid6_next_disk(i, disks); 1280 } while (i != d0_idx); 1281 1282 BUG_ON(faila == failb); 1283 if (failb < faila) 1284 swap(faila, failb); 1285 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 1286 __func__, (unsigned long long)sh->sector, faila, failb); 1287 1288 atomic_inc(&sh->count); 1289 1290 if (failb == syndrome_disks+1) { 1291 /* Q disk is one of the missing disks */ 1292 if (faila == syndrome_disks) { 1293 /* Missing P+Q, just recompute */ 1294 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1295 ops_complete_compute, sh, 1296 to_addr_conv(sh, percpu)); 1297 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 1298 STRIPE_SIZE, &submit); 1299 } else { 1300 struct page *dest; 1301 int data_target; 1302 int qd_idx = sh->qd_idx; 1303 1304 /* Missing D+Q: recompute D from P, then recompute Q */ 1305 if (target == qd_idx) 1306 data_target = target2; 1307 else 1308 data_target = target; 1309 1310 count = 0; 1311 for (i = disks; i-- ; ) { 1312 if (i == data_target || i == qd_idx) 1313 continue; 1314 blocks[count++] = sh->dev[i].page; 1315 } 1316 dest = sh->dev[data_target].page; 1317 init_async_submit(&submit, 1318 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1319 NULL, NULL, NULL, 1320 to_addr_conv(sh, percpu)); 1321 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 1322 &submit); 1323 1324 count = set_syndrome_sources(blocks, sh); 1325 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 1326 ops_complete_compute, sh, 1327 to_addr_conv(sh, percpu)); 1328 return async_gen_syndrome(blocks, 0, count+2, 1329 STRIPE_SIZE, &submit); 1330 } 1331 } else { 1332 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1333 ops_complete_compute, sh, 1334 to_addr_conv(sh, percpu)); 1335 if (failb == syndrome_disks) { 1336 /* We're missing D+P. */ 1337 return async_raid6_datap_recov(syndrome_disks+2, 1338 STRIPE_SIZE, faila, 1339 blocks, &submit); 1340 } else { 1341 /* We're missing D+D. */ 1342 return async_raid6_2data_recov(syndrome_disks+2, 1343 STRIPE_SIZE, faila, failb, 1344 blocks, &submit); 1345 } 1346 } 1347 } 1348 1349 1350 static void ops_complete_prexor(void *stripe_head_ref) 1351 { 1352 struct stripe_head *sh = stripe_head_ref; 1353 1354 pr_debug("%s: stripe %llu\n", __func__, 1355 (unsigned long long)sh->sector); 1356 } 1357 1358 static struct dma_async_tx_descriptor * 1359 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, 1360 struct dma_async_tx_descriptor *tx) 1361 { 1362 int disks = sh->disks; 1363 struct page **xor_srcs = percpu->scribble; 1364 int count = 0, pd_idx = sh->pd_idx, i; 1365 struct async_submit_ctl submit; 1366 1367 /* existing parity data subtracted */ 1368 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1369 1370 pr_debug("%s: stripe %llu\n", __func__, 1371 (unsigned long long)sh->sector); 1372 1373 for (i = disks; i--; ) { 1374 struct r5dev *dev = &sh->dev[i]; 1375 /* Only process blocks that are known to be uptodate */ 1376 if (test_bit(R5_Wantdrain, &dev->flags)) 1377 xor_srcs[count++] = dev->page; 1378 } 1379 1380 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1381 ops_complete_prexor, sh, to_addr_conv(sh, percpu)); 1382 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1383 1384 return tx; 1385 } 1386 1387 static struct dma_async_tx_descriptor * 1388 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1389 { 1390 int disks = sh->disks; 1391 int i; 1392 1393 pr_debug("%s: stripe %llu\n", __func__, 1394 (unsigned long long)sh->sector); 1395 1396 for (i = disks; i--; ) { 1397 struct r5dev *dev = &sh->dev[i]; 1398 struct bio *chosen; 1399 1400 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 1401 struct bio *wbi; 1402 1403 spin_lock_irq(&sh->stripe_lock); 1404 chosen = dev->towrite; 1405 dev->towrite = NULL; 1406 BUG_ON(dev->written); 1407 wbi = dev->written = chosen; 1408 spin_unlock_irq(&sh->stripe_lock); 1409 WARN_ON(dev->page != dev->orig_page); 1410 1411 while (wbi && wbi->bi_iter.bi_sector < 1412 dev->sector + STRIPE_SECTORS) { 1413 if (wbi->bi_rw & REQ_FUA) 1414 set_bit(R5_WantFUA, &dev->flags); 1415 if (wbi->bi_rw & REQ_SYNC) 1416 set_bit(R5_SyncIO, &dev->flags); 1417 if (wbi->bi_rw & REQ_DISCARD) 1418 set_bit(R5_Discard, &dev->flags); 1419 else { 1420 tx = async_copy_data(1, wbi, &dev->page, 1421 dev->sector, tx, sh); 1422 if (dev->page != dev->orig_page) { 1423 set_bit(R5_SkipCopy, &dev->flags); 1424 clear_bit(R5_UPTODATE, &dev->flags); 1425 clear_bit(R5_OVERWRITE, &dev->flags); 1426 } 1427 } 1428 wbi = r5_next_bio(wbi, dev->sector); 1429 } 1430 } 1431 } 1432 1433 return tx; 1434 } 1435 1436 static void ops_complete_reconstruct(void *stripe_head_ref) 1437 { 1438 struct stripe_head *sh = stripe_head_ref; 1439 int disks = sh->disks; 1440 int pd_idx = sh->pd_idx; 1441 int qd_idx = sh->qd_idx; 1442 int i; 1443 bool fua = false, sync = false, discard = false; 1444 1445 pr_debug("%s: stripe %llu\n", __func__, 1446 (unsigned long long)sh->sector); 1447 1448 for (i = disks; i--; ) { 1449 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1450 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1451 discard |= test_bit(R5_Discard, &sh->dev[i].flags); 1452 } 1453 1454 for (i = disks; i--; ) { 1455 struct r5dev *dev = &sh->dev[i]; 1456 1457 if (dev->written || i == pd_idx || i == qd_idx) { 1458 if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) 1459 set_bit(R5_UPTODATE, &dev->flags); 1460 if (fua) 1461 set_bit(R5_WantFUA, &dev->flags); 1462 if (sync) 1463 set_bit(R5_SyncIO, &dev->flags); 1464 } 1465 } 1466 1467 if (sh->reconstruct_state == reconstruct_state_drain_run) 1468 sh->reconstruct_state = reconstruct_state_drain_result; 1469 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1470 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1471 else { 1472 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1473 sh->reconstruct_state = reconstruct_state_result; 1474 } 1475 1476 set_bit(STRIPE_HANDLE, &sh->state); 1477 release_stripe(sh); 1478 } 1479 1480 static void 1481 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1482 struct dma_async_tx_descriptor *tx) 1483 { 1484 int disks = sh->disks; 1485 struct page **xor_srcs = percpu->scribble; 1486 struct async_submit_ctl submit; 1487 int count = 0, pd_idx = sh->pd_idx, i; 1488 struct page *xor_dest; 1489 int prexor = 0; 1490 unsigned long flags; 1491 1492 pr_debug("%s: stripe %llu\n", __func__, 1493 (unsigned long long)sh->sector); 1494 1495 for (i = 0; i < sh->disks; i++) { 1496 if (pd_idx == i) 1497 continue; 1498 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1499 break; 1500 } 1501 if (i >= sh->disks) { 1502 atomic_inc(&sh->count); 1503 set_bit(R5_Discard, &sh->dev[pd_idx].flags); 1504 ops_complete_reconstruct(sh); 1505 return; 1506 } 1507 /* check if prexor is active which means only process blocks 1508 * that are part of a read-modify-write (written) 1509 */ 1510 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1511 prexor = 1; 1512 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1513 for (i = disks; i--; ) { 1514 struct r5dev *dev = &sh->dev[i]; 1515 if (dev->written) 1516 xor_srcs[count++] = dev->page; 1517 } 1518 } else { 1519 xor_dest = sh->dev[pd_idx].page; 1520 for (i = disks; i--; ) { 1521 struct r5dev *dev = &sh->dev[i]; 1522 if (i != pd_idx) 1523 xor_srcs[count++] = dev->page; 1524 } 1525 } 1526 1527 /* 1/ if we prexor'd then the dest is reused as a source 1528 * 2/ if we did not prexor then we are redoing the parity 1529 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1530 * for the synchronous xor case 1531 */ 1532 flags = ASYNC_TX_ACK | 1533 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1534 1535 atomic_inc(&sh->count); 1536 1537 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, 1538 to_addr_conv(sh, percpu)); 1539 if (unlikely(count == 1)) 1540 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1541 else 1542 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1543 } 1544 1545 static void 1546 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1547 struct dma_async_tx_descriptor *tx) 1548 { 1549 struct async_submit_ctl submit; 1550 struct page **blocks = percpu->scribble; 1551 int count, i; 1552 1553 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1554 1555 for (i = 0; i < sh->disks; i++) { 1556 if (sh->pd_idx == i || sh->qd_idx == i) 1557 continue; 1558 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1559 break; 1560 } 1561 if (i >= sh->disks) { 1562 atomic_inc(&sh->count); 1563 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 1564 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 1565 ops_complete_reconstruct(sh); 1566 return; 1567 } 1568 1569 count = set_syndrome_sources(blocks, sh); 1570 1571 atomic_inc(&sh->count); 1572 1573 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct, 1574 sh, to_addr_conv(sh, percpu)); 1575 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1576 } 1577 1578 static void ops_complete_check(void *stripe_head_ref) 1579 { 1580 struct stripe_head *sh = stripe_head_ref; 1581 1582 pr_debug("%s: stripe %llu\n", __func__, 1583 (unsigned long long)sh->sector); 1584 1585 sh->check_state = check_state_check_result; 1586 set_bit(STRIPE_HANDLE, &sh->state); 1587 release_stripe(sh); 1588 } 1589 1590 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1591 { 1592 int disks = sh->disks; 1593 int pd_idx = sh->pd_idx; 1594 int qd_idx = sh->qd_idx; 1595 struct page *xor_dest; 1596 struct page **xor_srcs = percpu->scribble; 1597 struct dma_async_tx_descriptor *tx; 1598 struct async_submit_ctl submit; 1599 int count; 1600 int i; 1601 1602 pr_debug("%s: stripe %llu\n", __func__, 1603 (unsigned long long)sh->sector); 1604 1605 count = 0; 1606 xor_dest = sh->dev[pd_idx].page; 1607 xor_srcs[count++] = xor_dest; 1608 for (i = disks; i--; ) { 1609 if (i == pd_idx || i == qd_idx) 1610 continue; 1611 xor_srcs[count++] = sh->dev[i].page; 1612 } 1613 1614 init_async_submit(&submit, 0, NULL, NULL, NULL, 1615 to_addr_conv(sh, percpu)); 1616 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 1617 &sh->ops.zero_sum_result, &submit); 1618 1619 atomic_inc(&sh->count); 1620 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 1621 tx = async_trigger_callback(&submit); 1622 } 1623 1624 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 1625 { 1626 struct page **srcs = percpu->scribble; 1627 struct async_submit_ctl submit; 1628 int count; 1629 1630 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 1631 (unsigned long long)sh->sector, checkp); 1632 1633 count = set_syndrome_sources(srcs, sh); 1634 if (!checkp) 1635 srcs[count] = NULL; 1636 1637 atomic_inc(&sh->count); 1638 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 1639 sh, to_addr_conv(sh, percpu)); 1640 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 1641 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1642 } 1643 1644 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1645 { 1646 int overlap_clear = 0, i, disks = sh->disks; 1647 struct dma_async_tx_descriptor *tx = NULL; 1648 struct r5conf *conf = sh->raid_conf; 1649 int level = conf->level; 1650 struct raid5_percpu *percpu; 1651 unsigned long cpu; 1652 1653 cpu = get_cpu(); 1654 percpu = per_cpu_ptr(conf->percpu, cpu); 1655 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 1656 ops_run_biofill(sh); 1657 overlap_clear++; 1658 } 1659 1660 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1661 if (level < 6) 1662 tx = ops_run_compute5(sh, percpu); 1663 else { 1664 if (sh->ops.target2 < 0 || sh->ops.target < 0) 1665 tx = ops_run_compute6_1(sh, percpu); 1666 else 1667 tx = ops_run_compute6_2(sh, percpu); 1668 } 1669 /* terminate the chain if reconstruct is not set to be run */ 1670 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 1671 async_tx_ack(tx); 1672 } 1673 1674 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 1675 tx = ops_run_prexor(sh, percpu, tx); 1676 1677 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 1678 tx = ops_run_biodrain(sh, tx); 1679 overlap_clear++; 1680 } 1681 1682 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 1683 if (level < 6) 1684 ops_run_reconstruct5(sh, percpu, tx); 1685 else 1686 ops_run_reconstruct6(sh, percpu, tx); 1687 } 1688 1689 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 1690 if (sh->check_state == check_state_run) 1691 ops_run_check_p(sh, percpu); 1692 else if (sh->check_state == check_state_run_q) 1693 ops_run_check_pq(sh, percpu, 0); 1694 else if (sh->check_state == check_state_run_pq) 1695 ops_run_check_pq(sh, percpu, 1); 1696 else 1697 BUG(); 1698 } 1699 1700 if (overlap_clear) 1701 for (i = disks; i--; ) { 1702 struct r5dev *dev = &sh->dev[i]; 1703 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1704 wake_up(&sh->raid_conf->wait_for_overlap); 1705 } 1706 put_cpu(); 1707 } 1708 1709 static int grow_one_stripe(struct r5conf *conf, int hash) 1710 { 1711 struct stripe_head *sh; 1712 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); 1713 if (!sh) 1714 return 0; 1715 1716 sh->raid_conf = conf; 1717 1718 spin_lock_init(&sh->stripe_lock); 1719 1720 if (grow_buffers(sh)) { 1721 shrink_buffers(sh); 1722 kmem_cache_free(conf->slab_cache, sh); 1723 return 0; 1724 } 1725 sh->hash_lock_index = hash; 1726 /* we just created an active stripe so... */ 1727 atomic_set(&sh->count, 1); 1728 atomic_inc(&conf->active_stripes); 1729 INIT_LIST_HEAD(&sh->lru); 1730 release_stripe(sh); 1731 return 1; 1732 } 1733 1734 static int grow_stripes(struct r5conf *conf, int num) 1735 { 1736 struct kmem_cache *sc; 1737 int devs = max(conf->raid_disks, conf->previous_raid_disks); 1738 int hash; 1739 1740 if (conf->mddev->gendisk) 1741 sprintf(conf->cache_name[0], 1742 "raid%d-%s", conf->level, mdname(conf->mddev)); 1743 else 1744 sprintf(conf->cache_name[0], 1745 "raid%d-%p", conf->level, conf->mddev); 1746 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 1747 1748 conf->active_name = 0; 1749 sc = kmem_cache_create(conf->cache_name[conf->active_name], 1750 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 1751 0, 0, NULL); 1752 if (!sc) 1753 return 1; 1754 conf->slab_cache = sc; 1755 conf->pool_size = devs; 1756 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 1757 while (num--) { 1758 if (!grow_one_stripe(conf, hash)) 1759 return 1; 1760 conf->max_nr_stripes++; 1761 hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; 1762 } 1763 return 0; 1764 } 1765 1766 /** 1767 * scribble_len - return the required size of the scribble region 1768 * @num - total number of disks in the array 1769 * 1770 * The size must be enough to contain: 1771 * 1/ a struct page pointer for each device in the array +2 1772 * 2/ room to convert each entry in (1) to its corresponding dma 1773 * (dma_map_page()) or page (page_address()) address. 1774 * 1775 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 1776 * calculate over all devices (not just the data blocks), using zeros in place 1777 * of the P and Q blocks. 1778 */ 1779 static size_t scribble_len(int num) 1780 { 1781 size_t len; 1782 1783 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 1784 1785 return len; 1786 } 1787 1788 static int resize_stripes(struct r5conf *conf, int newsize) 1789 { 1790 /* Make all the stripes able to hold 'newsize' devices. 1791 * New slots in each stripe get 'page' set to a new page. 1792 * 1793 * This happens in stages: 1794 * 1/ create a new kmem_cache and allocate the required number of 1795 * stripe_heads. 1796 * 2/ gather all the old stripe_heads and transfer the pages across 1797 * to the new stripe_heads. This will have the side effect of 1798 * freezing the array as once all stripe_heads have been collected, 1799 * no IO will be possible. Old stripe heads are freed once their 1800 * pages have been transferred over, and the old kmem_cache is 1801 * freed when all stripes are done. 1802 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 1803 * we simple return a failre status - no need to clean anything up. 1804 * 4/ allocate new pages for the new slots in the new stripe_heads. 1805 * If this fails, we don't bother trying the shrink the 1806 * stripe_heads down again, we just leave them as they are. 1807 * As each stripe_head is processed the new one is released into 1808 * active service. 1809 * 1810 * Once step2 is started, we cannot afford to wait for a write, 1811 * so we use GFP_NOIO allocations. 1812 */ 1813 struct stripe_head *osh, *nsh; 1814 LIST_HEAD(newstripes); 1815 struct disk_info *ndisks; 1816 unsigned long cpu; 1817 int err; 1818 struct kmem_cache *sc; 1819 int i; 1820 int hash, cnt; 1821 1822 if (newsize <= conf->pool_size) 1823 return 0; /* never bother to shrink */ 1824 1825 err = md_allow_write(conf->mddev); 1826 if (err) 1827 return err; 1828 1829 /* Step 1 */ 1830 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1831 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1832 0, 0, NULL); 1833 if (!sc) 1834 return -ENOMEM; 1835 1836 for (i = conf->max_nr_stripes; i; i--) { 1837 nsh = kmem_cache_zalloc(sc, GFP_KERNEL); 1838 if (!nsh) 1839 break; 1840 1841 nsh->raid_conf = conf; 1842 spin_lock_init(&nsh->stripe_lock); 1843 1844 list_add(&nsh->lru, &newstripes); 1845 } 1846 if (i) { 1847 /* didn't get enough, give up */ 1848 while (!list_empty(&newstripes)) { 1849 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1850 list_del(&nsh->lru); 1851 kmem_cache_free(sc, nsh); 1852 } 1853 kmem_cache_destroy(sc); 1854 return -ENOMEM; 1855 } 1856 /* Step 2 - Must use GFP_NOIO now. 1857 * OK, we have enough stripes, start collecting inactive 1858 * stripes and copying them over 1859 */ 1860 hash = 0; 1861 cnt = 0; 1862 list_for_each_entry(nsh, &newstripes, lru) { 1863 lock_device_hash_lock(conf, hash); 1864 wait_event_cmd(conf->wait_for_stripe, 1865 !list_empty(conf->inactive_list + hash), 1866 unlock_device_hash_lock(conf, hash), 1867 lock_device_hash_lock(conf, hash)); 1868 osh = get_free_stripe(conf, hash); 1869 unlock_device_hash_lock(conf, hash); 1870 atomic_set(&nsh->count, 1); 1871 for(i=0; i<conf->pool_size; i++) { 1872 nsh->dev[i].page = osh->dev[i].page; 1873 nsh->dev[i].orig_page = osh->dev[i].page; 1874 } 1875 for( ; i<newsize; i++) 1876 nsh->dev[i].page = NULL; 1877 nsh->hash_lock_index = hash; 1878 kmem_cache_free(conf->slab_cache, osh); 1879 cnt++; 1880 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 1881 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 1882 hash++; 1883 cnt = 0; 1884 } 1885 } 1886 kmem_cache_destroy(conf->slab_cache); 1887 1888 /* Step 3. 1889 * At this point, we are holding all the stripes so the array 1890 * is completely stalled, so now is a good time to resize 1891 * conf->disks and the scribble region 1892 */ 1893 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1894 if (ndisks) { 1895 for (i=0; i<conf->raid_disks; i++) 1896 ndisks[i] = conf->disks[i]; 1897 kfree(conf->disks); 1898 conf->disks = ndisks; 1899 } else 1900 err = -ENOMEM; 1901 1902 get_online_cpus(); 1903 conf->scribble_len = scribble_len(newsize); 1904 for_each_present_cpu(cpu) { 1905 struct raid5_percpu *percpu; 1906 void *scribble; 1907 1908 percpu = per_cpu_ptr(conf->percpu, cpu); 1909 scribble = kmalloc(conf->scribble_len, GFP_NOIO); 1910 1911 if (scribble) { 1912 kfree(percpu->scribble); 1913 percpu->scribble = scribble; 1914 } else { 1915 err = -ENOMEM; 1916 break; 1917 } 1918 } 1919 put_online_cpus(); 1920 1921 /* Step 4, return new stripes to service */ 1922 while(!list_empty(&newstripes)) { 1923 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1924 list_del_init(&nsh->lru); 1925 1926 for (i=conf->raid_disks; i < newsize; i++) 1927 if (nsh->dev[i].page == NULL) { 1928 struct page *p = alloc_page(GFP_NOIO); 1929 nsh->dev[i].page = p; 1930 nsh->dev[i].orig_page = p; 1931 if (!p) 1932 err = -ENOMEM; 1933 } 1934 release_stripe(nsh); 1935 } 1936 /* critical section pass, GFP_NOIO no longer needed */ 1937 1938 conf->slab_cache = sc; 1939 conf->active_name = 1-conf->active_name; 1940 conf->pool_size = newsize; 1941 return err; 1942 } 1943 1944 static int drop_one_stripe(struct r5conf *conf, int hash) 1945 { 1946 struct stripe_head *sh; 1947 1948 spin_lock_irq(conf->hash_locks + hash); 1949 sh = get_free_stripe(conf, hash); 1950 spin_unlock_irq(conf->hash_locks + hash); 1951 if (!sh) 1952 return 0; 1953 BUG_ON(atomic_read(&sh->count)); 1954 shrink_buffers(sh); 1955 kmem_cache_free(conf->slab_cache, sh); 1956 atomic_dec(&conf->active_stripes); 1957 return 1; 1958 } 1959 1960 static void shrink_stripes(struct r5conf *conf) 1961 { 1962 int hash; 1963 for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++) 1964 while (drop_one_stripe(conf, hash)) 1965 ; 1966 1967 if (conf->slab_cache) 1968 kmem_cache_destroy(conf->slab_cache); 1969 conf->slab_cache = NULL; 1970 } 1971 1972 static void raid5_end_read_request(struct bio * bi, int error) 1973 { 1974 struct stripe_head *sh = bi->bi_private; 1975 struct r5conf *conf = sh->raid_conf; 1976 int disks = sh->disks, i; 1977 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1978 char b[BDEVNAME_SIZE]; 1979 struct md_rdev *rdev = NULL; 1980 sector_t s; 1981 1982 for (i=0 ; i<disks; i++) 1983 if (bi == &sh->dev[i].req) 1984 break; 1985 1986 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1987 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1988 uptodate); 1989 if (i == disks) { 1990 BUG(); 1991 return; 1992 } 1993 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 1994 /* If replacement finished while this request was outstanding, 1995 * 'replacement' might be NULL already. 1996 * In that case it moved down to 'rdev'. 1997 * rdev is not removed until all requests are finished. 1998 */ 1999 rdev = conf->disks[i].replacement; 2000 if (!rdev) 2001 rdev = conf->disks[i].rdev; 2002 2003 if (use_new_offset(conf, sh)) 2004 s = sh->sector + rdev->new_data_offset; 2005 else 2006 s = sh->sector + rdev->data_offset; 2007 if (uptodate) { 2008 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2009 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2010 /* Note that this cannot happen on a 2011 * replacement device. We just fail those on 2012 * any error 2013 */ 2014 printk_ratelimited( 2015 KERN_INFO 2016 "md/raid:%s: read error corrected" 2017 " (%lu sectors at %llu on %s)\n", 2018 mdname(conf->mddev), STRIPE_SECTORS, 2019 (unsigned long long)s, 2020 bdevname(rdev->bdev, b)); 2021 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2022 clear_bit(R5_ReadError, &sh->dev[i].flags); 2023 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2024 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2025 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2026 2027 if (atomic_read(&rdev->read_errors)) 2028 atomic_set(&rdev->read_errors, 0); 2029 } else { 2030 const char *bdn = bdevname(rdev->bdev, b); 2031 int retry = 0; 2032 int set_bad = 0; 2033 2034 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 2035 atomic_inc(&rdev->read_errors); 2036 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2037 printk_ratelimited( 2038 KERN_WARNING 2039 "md/raid:%s: read error on replacement device " 2040 "(sector %llu on %s).\n", 2041 mdname(conf->mddev), 2042 (unsigned long long)s, 2043 bdn); 2044 else if (conf->mddev->degraded >= conf->max_degraded) { 2045 set_bad = 1; 2046 printk_ratelimited( 2047 KERN_WARNING 2048 "md/raid:%s: read error not correctable " 2049 "(sector %llu on %s).\n", 2050 mdname(conf->mddev), 2051 (unsigned long long)s, 2052 bdn); 2053 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { 2054 /* Oh, no!!! */ 2055 set_bad = 1; 2056 printk_ratelimited( 2057 KERN_WARNING 2058 "md/raid:%s: read error NOT corrected!! " 2059 "(sector %llu on %s).\n", 2060 mdname(conf->mddev), 2061 (unsigned long long)s, 2062 bdn); 2063 } else if (atomic_read(&rdev->read_errors) 2064 > conf->max_nr_stripes) 2065 printk(KERN_WARNING 2066 "md/raid:%s: Too many read errors, failing device %s.\n", 2067 mdname(conf->mddev), bdn); 2068 else 2069 retry = 1; 2070 if (set_bad && test_bit(In_sync, &rdev->flags) 2071 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2072 retry = 1; 2073 if (retry) 2074 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2075 set_bit(R5_ReadError, &sh->dev[i].flags); 2076 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2077 } else 2078 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2079 else { 2080 clear_bit(R5_ReadError, &sh->dev[i].flags); 2081 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2082 if (!(set_bad 2083 && test_bit(In_sync, &rdev->flags) 2084 && rdev_set_badblocks( 2085 rdev, sh->sector, STRIPE_SECTORS, 0))) 2086 md_error(conf->mddev, rdev); 2087 } 2088 } 2089 rdev_dec_pending(rdev, conf->mddev); 2090 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2091 set_bit(STRIPE_HANDLE, &sh->state); 2092 release_stripe(sh); 2093 } 2094 2095 static void raid5_end_write_request(struct bio *bi, int error) 2096 { 2097 struct stripe_head *sh = bi->bi_private; 2098 struct r5conf *conf = sh->raid_conf; 2099 int disks = sh->disks, i; 2100 struct md_rdev *uninitialized_var(rdev); 2101 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 2102 sector_t first_bad; 2103 int bad_sectors; 2104 int replacement = 0; 2105 2106 for (i = 0 ; i < disks; i++) { 2107 if (bi == &sh->dev[i].req) { 2108 rdev = conf->disks[i].rdev; 2109 break; 2110 } 2111 if (bi == &sh->dev[i].rreq) { 2112 rdev = conf->disks[i].replacement; 2113 if (rdev) 2114 replacement = 1; 2115 else 2116 /* rdev was removed and 'replacement' 2117 * replaced it. rdev is not removed 2118 * until all requests are finished. 2119 */ 2120 rdev = conf->disks[i].rdev; 2121 break; 2122 } 2123 } 2124 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 2125 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2126 uptodate); 2127 if (i == disks) { 2128 BUG(); 2129 return; 2130 } 2131 2132 if (replacement) { 2133 if (!uptodate) 2134 md_error(conf->mddev, rdev); 2135 else if (is_badblock(rdev, sh->sector, 2136 STRIPE_SECTORS, 2137 &first_bad, &bad_sectors)) 2138 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2139 } else { 2140 if (!uptodate) { 2141 set_bit(STRIPE_DEGRADED, &sh->state); 2142 set_bit(WriteErrorSeen, &rdev->flags); 2143 set_bit(R5_WriteError, &sh->dev[i].flags); 2144 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2145 set_bit(MD_RECOVERY_NEEDED, 2146 &rdev->mddev->recovery); 2147 } else if (is_badblock(rdev, sh->sector, 2148 STRIPE_SECTORS, 2149 &first_bad, &bad_sectors)) { 2150 set_bit(R5_MadeGood, &sh->dev[i].flags); 2151 if (test_bit(R5_ReadError, &sh->dev[i].flags)) 2152 /* That was a successful write so make 2153 * sure it looks like we already did 2154 * a re-write. 2155 */ 2156 set_bit(R5_ReWrite, &sh->dev[i].flags); 2157 } 2158 } 2159 rdev_dec_pending(rdev, conf->mddev); 2160 2161 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2162 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2163 set_bit(STRIPE_HANDLE, &sh->state); 2164 release_stripe(sh); 2165 } 2166 2167 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 2168 2169 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2170 { 2171 struct r5dev *dev = &sh->dev[i]; 2172 2173 bio_init(&dev->req); 2174 dev->req.bi_io_vec = &dev->vec; 2175 dev->req.bi_max_vecs = 1; 2176 dev->req.bi_private = sh; 2177 2178 bio_init(&dev->rreq); 2179 dev->rreq.bi_io_vec = &dev->rvec; 2180 dev->rreq.bi_max_vecs = 1; 2181 dev->rreq.bi_private = sh; 2182 2183 dev->flags = 0; 2184 dev->sector = compute_blocknr(sh, i, previous); 2185 } 2186 2187 static void error(struct mddev *mddev, struct md_rdev *rdev) 2188 { 2189 char b[BDEVNAME_SIZE]; 2190 struct r5conf *conf = mddev->private; 2191 unsigned long flags; 2192 pr_debug("raid456: error called\n"); 2193 2194 spin_lock_irqsave(&conf->device_lock, flags); 2195 clear_bit(In_sync, &rdev->flags); 2196 mddev->degraded = calc_degraded(conf); 2197 spin_unlock_irqrestore(&conf->device_lock, flags); 2198 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2199 2200 set_bit(Blocked, &rdev->flags); 2201 set_bit(Faulty, &rdev->flags); 2202 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2203 printk(KERN_ALERT 2204 "md/raid:%s: Disk failure on %s, disabling device.\n" 2205 "md/raid:%s: Operation continuing on %d devices.\n", 2206 mdname(mddev), 2207 bdevname(rdev->bdev, b), 2208 mdname(mddev), 2209 conf->raid_disks - mddev->degraded); 2210 } 2211 2212 /* 2213 * Input: a 'big' sector number, 2214 * Output: index of the data and parity disk, and the sector # in them. 2215 */ 2216 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2217 int previous, int *dd_idx, 2218 struct stripe_head *sh) 2219 { 2220 sector_t stripe, stripe2; 2221 sector_t chunk_number; 2222 unsigned int chunk_offset; 2223 int pd_idx, qd_idx; 2224 int ddf_layout = 0; 2225 sector_t new_sector; 2226 int algorithm = previous ? conf->prev_algo 2227 : conf->algorithm; 2228 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2229 : conf->chunk_sectors; 2230 int raid_disks = previous ? conf->previous_raid_disks 2231 : conf->raid_disks; 2232 int data_disks = raid_disks - conf->max_degraded; 2233 2234 /* First compute the information on this sector */ 2235 2236 /* 2237 * Compute the chunk number and the sector offset inside the chunk 2238 */ 2239 chunk_offset = sector_div(r_sector, sectors_per_chunk); 2240 chunk_number = r_sector; 2241 2242 /* 2243 * Compute the stripe number 2244 */ 2245 stripe = chunk_number; 2246 *dd_idx = sector_div(stripe, data_disks); 2247 stripe2 = stripe; 2248 /* 2249 * Select the parity disk based on the user selected algorithm. 2250 */ 2251 pd_idx = qd_idx = -1; 2252 switch(conf->level) { 2253 case 4: 2254 pd_idx = data_disks; 2255 break; 2256 case 5: 2257 switch (algorithm) { 2258 case ALGORITHM_LEFT_ASYMMETRIC: 2259 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2260 if (*dd_idx >= pd_idx) 2261 (*dd_idx)++; 2262 break; 2263 case ALGORITHM_RIGHT_ASYMMETRIC: 2264 pd_idx = sector_div(stripe2, raid_disks); 2265 if (*dd_idx >= pd_idx) 2266 (*dd_idx)++; 2267 break; 2268 case ALGORITHM_LEFT_SYMMETRIC: 2269 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2270 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2271 break; 2272 case ALGORITHM_RIGHT_SYMMETRIC: 2273 pd_idx = sector_div(stripe2, raid_disks); 2274 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2275 break; 2276 case ALGORITHM_PARITY_0: 2277 pd_idx = 0; 2278 (*dd_idx)++; 2279 break; 2280 case ALGORITHM_PARITY_N: 2281 pd_idx = data_disks; 2282 break; 2283 default: 2284 BUG(); 2285 } 2286 break; 2287 case 6: 2288 2289 switch (algorithm) { 2290 case ALGORITHM_LEFT_ASYMMETRIC: 2291 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2292 qd_idx = pd_idx + 1; 2293 if (pd_idx == raid_disks-1) { 2294 (*dd_idx)++; /* Q D D D P */ 2295 qd_idx = 0; 2296 } else if (*dd_idx >= pd_idx) 2297 (*dd_idx) += 2; /* D D P Q D */ 2298 break; 2299 case ALGORITHM_RIGHT_ASYMMETRIC: 2300 pd_idx = sector_div(stripe2, raid_disks); 2301 qd_idx = pd_idx + 1; 2302 if (pd_idx == raid_disks-1) { 2303 (*dd_idx)++; /* Q D D D P */ 2304 qd_idx = 0; 2305 } else if (*dd_idx >= pd_idx) 2306 (*dd_idx) += 2; /* D D P Q D */ 2307 break; 2308 case ALGORITHM_LEFT_SYMMETRIC: 2309 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2310 qd_idx = (pd_idx + 1) % raid_disks; 2311 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2312 break; 2313 case ALGORITHM_RIGHT_SYMMETRIC: 2314 pd_idx = sector_div(stripe2, raid_disks); 2315 qd_idx = (pd_idx + 1) % raid_disks; 2316 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2317 break; 2318 2319 case ALGORITHM_PARITY_0: 2320 pd_idx = 0; 2321 qd_idx = 1; 2322 (*dd_idx) += 2; 2323 break; 2324 case ALGORITHM_PARITY_N: 2325 pd_idx = data_disks; 2326 qd_idx = data_disks + 1; 2327 break; 2328 2329 case ALGORITHM_ROTATING_ZERO_RESTART: 2330 /* Exactly the same as RIGHT_ASYMMETRIC, but or 2331 * of blocks for computing Q is different. 2332 */ 2333 pd_idx = sector_div(stripe2, raid_disks); 2334 qd_idx = pd_idx + 1; 2335 if (pd_idx == raid_disks-1) { 2336 (*dd_idx)++; /* Q D D D P */ 2337 qd_idx = 0; 2338 } else if (*dd_idx >= pd_idx) 2339 (*dd_idx) += 2; /* D D P Q D */ 2340 ddf_layout = 1; 2341 break; 2342 2343 case ALGORITHM_ROTATING_N_RESTART: 2344 /* Same a left_asymmetric, by first stripe is 2345 * D D D P Q rather than 2346 * Q D D D P 2347 */ 2348 stripe2 += 1; 2349 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2350 qd_idx = pd_idx + 1; 2351 if (pd_idx == raid_disks-1) { 2352 (*dd_idx)++; /* Q D D D P */ 2353 qd_idx = 0; 2354 } else if (*dd_idx >= pd_idx) 2355 (*dd_idx) += 2; /* D D P Q D */ 2356 ddf_layout = 1; 2357 break; 2358 2359 case ALGORITHM_ROTATING_N_CONTINUE: 2360 /* Same as left_symmetric but Q is before P */ 2361 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2362 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 2363 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2364 ddf_layout = 1; 2365 break; 2366 2367 case ALGORITHM_LEFT_ASYMMETRIC_6: 2368 /* RAID5 left_asymmetric, with Q on last device */ 2369 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2370 if (*dd_idx >= pd_idx) 2371 (*dd_idx)++; 2372 qd_idx = raid_disks - 1; 2373 break; 2374 2375 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2376 pd_idx = sector_div(stripe2, raid_disks-1); 2377 if (*dd_idx >= pd_idx) 2378 (*dd_idx)++; 2379 qd_idx = raid_disks - 1; 2380 break; 2381 2382 case ALGORITHM_LEFT_SYMMETRIC_6: 2383 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2384 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2385 qd_idx = raid_disks - 1; 2386 break; 2387 2388 case ALGORITHM_RIGHT_SYMMETRIC_6: 2389 pd_idx = sector_div(stripe2, raid_disks-1); 2390 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2391 qd_idx = raid_disks - 1; 2392 break; 2393 2394 case ALGORITHM_PARITY_0_6: 2395 pd_idx = 0; 2396 (*dd_idx)++; 2397 qd_idx = raid_disks - 1; 2398 break; 2399 2400 default: 2401 BUG(); 2402 } 2403 break; 2404 } 2405 2406 if (sh) { 2407 sh->pd_idx = pd_idx; 2408 sh->qd_idx = qd_idx; 2409 sh->ddf_layout = ddf_layout; 2410 } 2411 /* 2412 * Finally, compute the new sector number 2413 */ 2414 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 2415 return new_sector; 2416 } 2417 2418 2419 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 2420 { 2421 struct r5conf *conf = sh->raid_conf; 2422 int raid_disks = sh->disks; 2423 int data_disks = raid_disks - conf->max_degraded; 2424 sector_t new_sector = sh->sector, check; 2425 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2426 : conf->chunk_sectors; 2427 int algorithm = previous ? conf->prev_algo 2428 : conf->algorithm; 2429 sector_t stripe; 2430 int chunk_offset; 2431 sector_t chunk_number; 2432 int dummy1, dd_idx = i; 2433 sector_t r_sector; 2434 struct stripe_head sh2; 2435 2436 2437 chunk_offset = sector_div(new_sector, sectors_per_chunk); 2438 stripe = new_sector; 2439 2440 if (i == sh->pd_idx) 2441 return 0; 2442 switch(conf->level) { 2443 case 4: break; 2444 case 5: 2445 switch (algorithm) { 2446 case ALGORITHM_LEFT_ASYMMETRIC: 2447 case ALGORITHM_RIGHT_ASYMMETRIC: 2448 if (i > sh->pd_idx) 2449 i--; 2450 break; 2451 case ALGORITHM_LEFT_SYMMETRIC: 2452 case ALGORITHM_RIGHT_SYMMETRIC: 2453 if (i < sh->pd_idx) 2454 i += raid_disks; 2455 i -= (sh->pd_idx + 1); 2456 break; 2457 case ALGORITHM_PARITY_0: 2458 i -= 1; 2459 break; 2460 case ALGORITHM_PARITY_N: 2461 break; 2462 default: 2463 BUG(); 2464 } 2465 break; 2466 case 6: 2467 if (i == sh->qd_idx) 2468 return 0; /* It is the Q disk */ 2469 switch (algorithm) { 2470 case ALGORITHM_LEFT_ASYMMETRIC: 2471 case ALGORITHM_RIGHT_ASYMMETRIC: 2472 case ALGORITHM_ROTATING_ZERO_RESTART: 2473 case ALGORITHM_ROTATING_N_RESTART: 2474 if (sh->pd_idx == raid_disks-1) 2475 i--; /* Q D D D P */ 2476 else if (i > sh->pd_idx) 2477 i -= 2; /* D D P Q D */ 2478 break; 2479 case ALGORITHM_LEFT_SYMMETRIC: 2480 case ALGORITHM_RIGHT_SYMMETRIC: 2481 if (sh->pd_idx == raid_disks-1) 2482 i--; /* Q D D D P */ 2483 else { 2484 /* D D P Q D */ 2485 if (i < sh->pd_idx) 2486 i += raid_disks; 2487 i -= (sh->pd_idx + 2); 2488 } 2489 break; 2490 case ALGORITHM_PARITY_0: 2491 i -= 2; 2492 break; 2493 case ALGORITHM_PARITY_N: 2494 break; 2495 case ALGORITHM_ROTATING_N_CONTINUE: 2496 /* Like left_symmetric, but P is before Q */ 2497 if (sh->pd_idx == 0) 2498 i--; /* P D D D Q */ 2499 else { 2500 /* D D Q P D */ 2501 if (i < sh->pd_idx) 2502 i += raid_disks; 2503 i -= (sh->pd_idx + 1); 2504 } 2505 break; 2506 case ALGORITHM_LEFT_ASYMMETRIC_6: 2507 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2508 if (i > sh->pd_idx) 2509 i--; 2510 break; 2511 case ALGORITHM_LEFT_SYMMETRIC_6: 2512 case ALGORITHM_RIGHT_SYMMETRIC_6: 2513 if (i < sh->pd_idx) 2514 i += data_disks + 1; 2515 i -= (sh->pd_idx + 1); 2516 break; 2517 case ALGORITHM_PARITY_0_6: 2518 i -= 1; 2519 break; 2520 default: 2521 BUG(); 2522 } 2523 break; 2524 } 2525 2526 chunk_number = stripe * data_disks + i; 2527 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 2528 2529 check = raid5_compute_sector(conf, r_sector, 2530 previous, &dummy1, &sh2); 2531 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 2532 || sh2.qd_idx != sh->qd_idx) { 2533 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", 2534 mdname(conf->mddev)); 2535 return 0; 2536 } 2537 return r_sector; 2538 } 2539 2540 2541 static void 2542 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2543 int rcw, int expand) 2544 { 2545 int i, pd_idx = sh->pd_idx, disks = sh->disks; 2546 struct r5conf *conf = sh->raid_conf; 2547 int level = conf->level; 2548 2549 if (rcw) { 2550 2551 for (i = disks; i--; ) { 2552 struct r5dev *dev = &sh->dev[i]; 2553 2554 if (dev->towrite) { 2555 set_bit(R5_LOCKED, &dev->flags); 2556 set_bit(R5_Wantdrain, &dev->flags); 2557 if (!expand) 2558 clear_bit(R5_UPTODATE, &dev->flags); 2559 s->locked++; 2560 } 2561 } 2562 /* if we are not expanding this is a proper write request, and 2563 * there will be bios with new data to be drained into the 2564 * stripe cache 2565 */ 2566 if (!expand) { 2567 if (!s->locked) 2568 /* False alarm, nothing to do */ 2569 return; 2570 sh->reconstruct_state = reconstruct_state_drain_run; 2571 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2572 } else 2573 sh->reconstruct_state = reconstruct_state_run; 2574 2575 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2576 2577 if (s->locked + conf->max_degraded == disks) 2578 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2579 atomic_inc(&conf->pending_full_writes); 2580 } else { 2581 BUG_ON(level == 6); 2582 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2583 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2584 2585 for (i = disks; i--; ) { 2586 struct r5dev *dev = &sh->dev[i]; 2587 if (i == pd_idx) 2588 continue; 2589 2590 if (dev->towrite && 2591 (test_bit(R5_UPTODATE, &dev->flags) || 2592 test_bit(R5_Wantcompute, &dev->flags))) { 2593 set_bit(R5_Wantdrain, &dev->flags); 2594 set_bit(R5_LOCKED, &dev->flags); 2595 clear_bit(R5_UPTODATE, &dev->flags); 2596 s->locked++; 2597 } 2598 } 2599 if (!s->locked) 2600 /* False alarm - nothing to do */ 2601 return; 2602 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 2603 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 2604 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2605 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2606 } 2607 2608 /* keep the parity disk(s) locked while asynchronous operations 2609 * are in flight 2610 */ 2611 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 2612 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2613 s->locked++; 2614 2615 if (level == 6) { 2616 int qd_idx = sh->qd_idx; 2617 struct r5dev *dev = &sh->dev[qd_idx]; 2618 2619 set_bit(R5_LOCKED, &dev->flags); 2620 clear_bit(R5_UPTODATE, &dev->flags); 2621 s->locked++; 2622 } 2623 2624 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 2625 __func__, (unsigned long long)sh->sector, 2626 s->locked, s->ops_request); 2627 } 2628 2629 /* 2630 * Each stripe/dev can have one or more bion attached. 2631 * toread/towrite point to the first in a chain. 2632 * The bi_next chain must be in order. 2633 */ 2634 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 2635 { 2636 struct bio **bip; 2637 struct r5conf *conf = sh->raid_conf; 2638 int firstwrite=0; 2639 2640 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2641 (unsigned long long)bi->bi_iter.bi_sector, 2642 (unsigned long long)sh->sector); 2643 2644 /* 2645 * If several bio share a stripe. The bio bi_phys_segments acts as a 2646 * reference count to avoid race. The reference count should already be 2647 * increased before this function is called (for example, in 2648 * make_request()), so other bio sharing this stripe will not free the 2649 * stripe. If a stripe is owned by one stripe, the stripe lock will 2650 * protect it. 2651 */ 2652 spin_lock_irq(&sh->stripe_lock); 2653 if (forwrite) { 2654 bip = &sh->dev[dd_idx].towrite; 2655 if (*bip == NULL) 2656 firstwrite = 1; 2657 } else 2658 bip = &sh->dev[dd_idx].toread; 2659 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 2660 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 2661 goto overlap; 2662 bip = & (*bip)->bi_next; 2663 } 2664 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 2665 goto overlap; 2666 2667 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2668 if (*bip) 2669 bi->bi_next = *bip; 2670 *bip = bi; 2671 raid5_inc_bi_active_stripes(bi); 2672 2673 if (forwrite) { 2674 /* check if page is covered */ 2675 sector_t sector = sh->dev[dd_idx].sector; 2676 for (bi=sh->dev[dd_idx].towrite; 2677 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2678 bi && bi->bi_iter.bi_sector <= sector; 2679 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2680 if (bio_end_sector(bi) >= sector) 2681 sector = bio_end_sector(bi); 2682 } 2683 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2684 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2685 } 2686 2687 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2688 (unsigned long long)(*bip)->bi_iter.bi_sector, 2689 (unsigned long long)sh->sector, dd_idx); 2690 spin_unlock_irq(&sh->stripe_lock); 2691 2692 if (conf->mddev->bitmap && firstwrite) { 2693 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 2694 STRIPE_SECTORS, 0); 2695 sh->bm_seq = conf->seq_flush+1; 2696 set_bit(STRIPE_BIT_DELAY, &sh->state); 2697 } 2698 return 1; 2699 2700 overlap: 2701 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 2702 spin_unlock_irq(&sh->stripe_lock); 2703 return 0; 2704 } 2705 2706 static void end_reshape(struct r5conf *conf); 2707 2708 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 2709 struct stripe_head *sh) 2710 { 2711 int sectors_per_chunk = 2712 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 2713 int dd_idx; 2714 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2715 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2716 2717 raid5_compute_sector(conf, 2718 stripe * (disks - conf->max_degraded) 2719 *sectors_per_chunk + chunk_offset, 2720 previous, 2721 &dd_idx, sh); 2722 } 2723 2724 static void 2725 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 2726 struct stripe_head_state *s, int disks, 2727 struct bio **return_bi) 2728 { 2729 int i; 2730 for (i = disks; i--; ) { 2731 struct bio *bi; 2732 int bitmap_end = 0; 2733 2734 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2735 struct md_rdev *rdev; 2736 rcu_read_lock(); 2737 rdev = rcu_dereference(conf->disks[i].rdev); 2738 if (rdev && test_bit(In_sync, &rdev->flags)) 2739 atomic_inc(&rdev->nr_pending); 2740 else 2741 rdev = NULL; 2742 rcu_read_unlock(); 2743 if (rdev) { 2744 if (!rdev_set_badblocks( 2745 rdev, 2746 sh->sector, 2747 STRIPE_SECTORS, 0)) 2748 md_error(conf->mddev, rdev); 2749 rdev_dec_pending(rdev, conf->mddev); 2750 } 2751 } 2752 spin_lock_irq(&sh->stripe_lock); 2753 /* fail all writes first */ 2754 bi = sh->dev[i].towrite; 2755 sh->dev[i].towrite = NULL; 2756 spin_unlock_irq(&sh->stripe_lock); 2757 if (bi) 2758 bitmap_end = 1; 2759 2760 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2761 wake_up(&conf->wait_for_overlap); 2762 2763 while (bi && bi->bi_iter.bi_sector < 2764 sh->dev[i].sector + STRIPE_SECTORS) { 2765 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2766 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2767 if (!raid5_dec_bi_active_stripes(bi)) { 2768 md_write_end(conf->mddev); 2769 bi->bi_next = *return_bi; 2770 *return_bi = bi; 2771 } 2772 bi = nextbi; 2773 } 2774 if (bitmap_end) 2775 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2776 STRIPE_SECTORS, 0, 0); 2777 bitmap_end = 0; 2778 /* and fail all 'written' */ 2779 bi = sh->dev[i].written; 2780 sh->dev[i].written = NULL; 2781 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { 2782 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 2783 sh->dev[i].page = sh->dev[i].orig_page; 2784 } 2785 2786 if (bi) bitmap_end = 1; 2787 while (bi && bi->bi_iter.bi_sector < 2788 sh->dev[i].sector + STRIPE_SECTORS) { 2789 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2790 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2791 if (!raid5_dec_bi_active_stripes(bi)) { 2792 md_write_end(conf->mddev); 2793 bi->bi_next = *return_bi; 2794 *return_bi = bi; 2795 } 2796 bi = bi2; 2797 } 2798 2799 /* fail any reads if this device is non-operational and 2800 * the data has not reached the cache yet. 2801 */ 2802 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2803 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2804 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2805 spin_lock_irq(&sh->stripe_lock); 2806 bi = sh->dev[i].toread; 2807 sh->dev[i].toread = NULL; 2808 spin_unlock_irq(&sh->stripe_lock); 2809 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2810 wake_up(&conf->wait_for_overlap); 2811 while (bi && bi->bi_iter.bi_sector < 2812 sh->dev[i].sector + STRIPE_SECTORS) { 2813 struct bio *nextbi = 2814 r5_next_bio(bi, sh->dev[i].sector); 2815 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2816 if (!raid5_dec_bi_active_stripes(bi)) { 2817 bi->bi_next = *return_bi; 2818 *return_bi = bi; 2819 } 2820 bi = nextbi; 2821 } 2822 } 2823 if (bitmap_end) 2824 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2825 STRIPE_SECTORS, 0, 0); 2826 /* If we were in the middle of a write the parity block might 2827 * still be locked - so just clear all R5_LOCKED flags 2828 */ 2829 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2830 } 2831 2832 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2833 if (atomic_dec_and_test(&conf->pending_full_writes)) 2834 md_wakeup_thread(conf->mddev->thread); 2835 } 2836 2837 static void 2838 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 2839 struct stripe_head_state *s) 2840 { 2841 int abort = 0; 2842 int i; 2843 2844 clear_bit(STRIPE_SYNCING, &sh->state); 2845 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 2846 wake_up(&conf->wait_for_overlap); 2847 s->syncing = 0; 2848 s->replacing = 0; 2849 /* There is nothing more to do for sync/check/repair. 2850 * Don't even need to abort as that is handled elsewhere 2851 * if needed, and not always wanted e.g. if there is a known 2852 * bad block here. 2853 * For recover/replace we need to record a bad block on all 2854 * non-sync devices, or abort the recovery 2855 */ 2856 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { 2857 /* During recovery devices cannot be removed, so 2858 * locking and refcounting of rdevs is not needed 2859 */ 2860 for (i = 0; i < conf->raid_disks; i++) { 2861 struct md_rdev *rdev = conf->disks[i].rdev; 2862 if (rdev 2863 && !test_bit(Faulty, &rdev->flags) 2864 && !test_bit(In_sync, &rdev->flags) 2865 && !rdev_set_badblocks(rdev, sh->sector, 2866 STRIPE_SECTORS, 0)) 2867 abort = 1; 2868 rdev = conf->disks[i].replacement; 2869 if (rdev 2870 && !test_bit(Faulty, &rdev->flags) 2871 && !test_bit(In_sync, &rdev->flags) 2872 && !rdev_set_badblocks(rdev, sh->sector, 2873 STRIPE_SECTORS, 0)) 2874 abort = 1; 2875 } 2876 if (abort) 2877 conf->recovery_disabled = 2878 conf->mddev->recovery_disabled; 2879 } 2880 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); 2881 } 2882 2883 static int want_replace(struct stripe_head *sh, int disk_idx) 2884 { 2885 struct md_rdev *rdev; 2886 int rv = 0; 2887 /* Doing recovery so rcu locking not required */ 2888 rdev = sh->raid_conf->disks[disk_idx].replacement; 2889 if (rdev 2890 && !test_bit(Faulty, &rdev->flags) 2891 && !test_bit(In_sync, &rdev->flags) 2892 && (rdev->recovery_offset <= sh->sector 2893 || rdev->mddev->recovery_cp <= sh->sector)) 2894 rv = 1; 2895 2896 return rv; 2897 } 2898 2899 /* fetch_block - checks the given member device to see if its data needs 2900 * to be read or computed to satisfy a request. 2901 * 2902 * Returns 1 when no more member devices need to be checked, otherwise returns 2903 * 0 to tell the loop in handle_stripe_fill to continue 2904 */ 2905 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 2906 int disk_idx, int disks) 2907 { 2908 struct r5dev *dev = &sh->dev[disk_idx]; 2909 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 2910 &sh->dev[s->failed_num[1]] }; 2911 2912 /* is the data in this block needed, and can we get it? */ 2913 if (!test_bit(R5_LOCKED, &dev->flags) && 2914 !test_bit(R5_UPTODATE, &dev->flags) && 2915 (dev->toread || 2916 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2917 s->syncing || s->expanding || 2918 (s->replacing && want_replace(sh, disk_idx)) || 2919 (s->failed >= 1 && fdev[0]->toread) || 2920 (s->failed >= 2 && fdev[1]->toread) || 2921 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && 2922 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) && 2923 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || 2924 (sh->raid_conf->level == 6 && s->failed && s->to_write && 2925 s->to_write < sh->raid_conf->raid_disks - 2 && 2926 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) { 2927 /* we would like to get this block, possibly by computing it, 2928 * otherwise read it if the backing disk is insync 2929 */ 2930 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 2931 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 2932 if ((s->uptodate == disks - 1) && 2933 (s->failed && (disk_idx == s->failed_num[0] || 2934 disk_idx == s->failed_num[1]))) { 2935 /* have disk failed, and we're requested to fetch it; 2936 * do compute it 2937 */ 2938 pr_debug("Computing stripe %llu block %d\n", 2939 (unsigned long long)sh->sector, disk_idx); 2940 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2941 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2942 set_bit(R5_Wantcompute, &dev->flags); 2943 sh->ops.target = disk_idx; 2944 sh->ops.target2 = -1; /* no 2nd target */ 2945 s->req_compute = 1; 2946 /* Careful: from this point on 'uptodate' is in the eye 2947 * of raid_run_ops which services 'compute' operations 2948 * before writes. R5_Wantcompute flags a block that will 2949 * be R5_UPTODATE by the time it is needed for a 2950 * subsequent operation. 2951 */ 2952 s->uptodate++; 2953 return 1; 2954 } else if (s->uptodate == disks-2 && s->failed >= 2) { 2955 /* Computing 2-failure is *very* expensive; only 2956 * do it if failed >= 2 2957 */ 2958 int other; 2959 for (other = disks; other--; ) { 2960 if (other == disk_idx) 2961 continue; 2962 if (!test_bit(R5_UPTODATE, 2963 &sh->dev[other].flags)) 2964 break; 2965 } 2966 BUG_ON(other < 0); 2967 pr_debug("Computing stripe %llu blocks %d,%d\n", 2968 (unsigned long long)sh->sector, 2969 disk_idx, other); 2970 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2971 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2972 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 2973 set_bit(R5_Wantcompute, &sh->dev[other].flags); 2974 sh->ops.target = disk_idx; 2975 sh->ops.target2 = other; 2976 s->uptodate += 2; 2977 s->req_compute = 1; 2978 return 1; 2979 } else if (test_bit(R5_Insync, &dev->flags)) { 2980 set_bit(R5_LOCKED, &dev->flags); 2981 set_bit(R5_Wantread, &dev->flags); 2982 s->locked++; 2983 pr_debug("Reading block %d (sync=%d)\n", 2984 disk_idx, s->syncing); 2985 } 2986 } 2987 2988 return 0; 2989 } 2990 2991 /** 2992 * handle_stripe_fill - read or compute data to satisfy pending requests. 2993 */ 2994 static void handle_stripe_fill(struct stripe_head *sh, 2995 struct stripe_head_state *s, 2996 int disks) 2997 { 2998 int i; 2999 3000 /* look for blocks to read/compute, skip this if a compute 3001 * is already in flight, or if the stripe contents are in the 3002 * midst of changing due to a write 3003 */ 3004 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3005 !sh->reconstruct_state) 3006 for (i = disks; i--; ) 3007 if (fetch_block(sh, s, i, disks)) 3008 break; 3009 set_bit(STRIPE_HANDLE, &sh->state); 3010 } 3011 3012 3013 /* handle_stripe_clean_event 3014 * any written block on an uptodate or failed drive can be returned. 3015 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 3016 * never LOCKED, so we don't need to test 'failed' directly. 3017 */ 3018 static void handle_stripe_clean_event(struct r5conf *conf, 3019 struct stripe_head *sh, int disks, struct bio **return_bi) 3020 { 3021 int i; 3022 struct r5dev *dev; 3023 int discard_pending = 0; 3024 3025 for (i = disks; i--; ) 3026 if (sh->dev[i].written) { 3027 dev = &sh->dev[i]; 3028 if (!test_bit(R5_LOCKED, &dev->flags) && 3029 (test_bit(R5_UPTODATE, &dev->flags) || 3030 test_bit(R5_Discard, &dev->flags) || 3031 test_bit(R5_SkipCopy, &dev->flags))) { 3032 /* We can return any write requests */ 3033 struct bio *wbi, *wbi2; 3034 pr_debug("Return write for disc %d\n", i); 3035 if (test_and_clear_bit(R5_Discard, &dev->flags)) 3036 clear_bit(R5_UPTODATE, &dev->flags); 3037 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { 3038 WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); 3039 dev->page = dev->orig_page; 3040 } 3041 wbi = dev->written; 3042 dev->written = NULL; 3043 while (wbi && wbi->bi_iter.bi_sector < 3044 dev->sector + STRIPE_SECTORS) { 3045 wbi2 = r5_next_bio(wbi, dev->sector); 3046 if (!raid5_dec_bi_active_stripes(wbi)) { 3047 md_write_end(conf->mddev); 3048 wbi->bi_next = *return_bi; 3049 *return_bi = wbi; 3050 } 3051 wbi = wbi2; 3052 } 3053 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3054 STRIPE_SECTORS, 3055 !test_bit(STRIPE_DEGRADED, &sh->state), 3056 0); 3057 } else if (test_bit(R5_Discard, &dev->flags)) 3058 discard_pending = 1; 3059 WARN_ON(test_bit(R5_SkipCopy, &dev->flags)); 3060 WARN_ON(dev->page != dev->orig_page); 3061 } 3062 if (!discard_pending && 3063 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3064 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 3065 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3066 if (sh->qd_idx >= 0) { 3067 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 3068 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); 3069 } 3070 /* now that discard is done we can proceed with any sync */ 3071 clear_bit(STRIPE_DISCARD, &sh->state); 3072 /* 3073 * SCSI discard will change some bio fields and the stripe has 3074 * no updated data, so remove it from hash list and the stripe 3075 * will be reinitialized 3076 */ 3077 spin_lock_irq(&conf->device_lock); 3078 remove_hash(sh); 3079 spin_unlock_irq(&conf->device_lock); 3080 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 3081 set_bit(STRIPE_HANDLE, &sh->state); 3082 3083 } 3084 3085 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3086 if (atomic_dec_and_test(&conf->pending_full_writes)) 3087 md_wakeup_thread(conf->mddev->thread); 3088 } 3089 3090 static void handle_stripe_dirtying(struct r5conf *conf, 3091 struct stripe_head *sh, 3092 struct stripe_head_state *s, 3093 int disks) 3094 { 3095 int rmw = 0, rcw = 0, i; 3096 sector_t recovery_cp = conf->mddev->recovery_cp; 3097 3098 /* RAID6 requires 'rcw' in current implementation. 3099 * Otherwise, check whether resync is now happening or should start. 3100 * If yes, then the array is dirty (after unclean shutdown or 3101 * initial creation), so parity in some stripes might be inconsistent. 3102 * In this case, we need to always do reconstruct-write, to ensure 3103 * that in case of drive failure or read-error correction, we 3104 * generate correct data from the parity. 3105 */ 3106 if (conf->max_degraded == 2 || 3107 (recovery_cp < MaxSector && sh->sector >= recovery_cp)) { 3108 /* Calculate the real rcw later - for now make it 3109 * look like rcw is cheaper 3110 */ 3111 rcw = 1; rmw = 2; 3112 pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n", 3113 conf->max_degraded, (unsigned long long)recovery_cp, 3114 (unsigned long long)sh->sector); 3115 } else for (i = disks; i--; ) { 3116 /* would I have to read this buffer for read_modify_write */ 3117 struct r5dev *dev = &sh->dev[i]; 3118 if ((dev->towrite || i == sh->pd_idx) && 3119 !test_bit(R5_LOCKED, &dev->flags) && 3120 !(test_bit(R5_UPTODATE, &dev->flags) || 3121 test_bit(R5_Wantcompute, &dev->flags))) { 3122 if (test_bit(R5_Insync, &dev->flags)) 3123 rmw++; 3124 else 3125 rmw += 2*disks; /* cannot read it */ 3126 } 3127 /* Would I have to read this buffer for reconstruct_write */ 3128 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 3129 !test_bit(R5_LOCKED, &dev->flags) && 3130 !(test_bit(R5_UPTODATE, &dev->flags) || 3131 test_bit(R5_Wantcompute, &dev->flags))) { 3132 if (test_bit(R5_Insync, &dev->flags)) 3133 rcw++; 3134 else 3135 rcw += 2*disks; 3136 } 3137 } 3138 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 3139 (unsigned long long)sh->sector, rmw, rcw); 3140 set_bit(STRIPE_HANDLE, &sh->state); 3141 if (rmw < rcw && rmw > 0) { 3142 /* prefer read-modify-write, but need to get some data */ 3143 if (conf->mddev->queue) 3144 blk_add_trace_msg(conf->mddev->queue, 3145 "raid5 rmw %llu %d", 3146 (unsigned long long)sh->sector, rmw); 3147 for (i = disks; i--; ) { 3148 struct r5dev *dev = &sh->dev[i]; 3149 if ((dev->towrite || i == sh->pd_idx) && 3150 !test_bit(R5_LOCKED, &dev->flags) && 3151 !(test_bit(R5_UPTODATE, &dev->flags) || 3152 test_bit(R5_Wantcompute, &dev->flags)) && 3153 test_bit(R5_Insync, &dev->flags)) { 3154 if (test_bit(STRIPE_PREREAD_ACTIVE, 3155 &sh->state)) { 3156 pr_debug("Read_old block %d for r-m-w\n", 3157 i); 3158 set_bit(R5_LOCKED, &dev->flags); 3159 set_bit(R5_Wantread, &dev->flags); 3160 s->locked++; 3161 } else { 3162 set_bit(STRIPE_DELAYED, &sh->state); 3163 set_bit(STRIPE_HANDLE, &sh->state); 3164 } 3165 } 3166 } 3167 } 3168 if (rcw <= rmw && rcw > 0) { 3169 /* want reconstruct write, but need to get some data */ 3170 int qread =0; 3171 rcw = 0; 3172 for (i = disks; i--; ) { 3173 struct r5dev *dev = &sh->dev[i]; 3174 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3175 i != sh->pd_idx && i != sh->qd_idx && 3176 !test_bit(R5_LOCKED, &dev->flags) && 3177 !(test_bit(R5_UPTODATE, &dev->flags) || 3178 test_bit(R5_Wantcompute, &dev->flags))) { 3179 rcw++; 3180 if (test_bit(R5_Insync, &dev->flags) && 3181 test_bit(STRIPE_PREREAD_ACTIVE, 3182 &sh->state)) { 3183 pr_debug("Read_old block " 3184 "%d for Reconstruct\n", i); 3185 set_bit(R5_LOCKED, &dev->flags); 3186 set_bit(R5_Wantread, &dev->flags); 3187 s->locked++; 3188 qread++; 3189 } else { 3190 set_bit(STRIPE_DELAYED, &sh->state); 3191 set_bit(STRIPE_HANDLE, &sh->state); 3192 } 3193 } 3194 } 3195 if (rcw && conf->mddev->queue) 3196 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 3197 (unsigned long long)sh->sector, 3198 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3199 } 3200 /* now if nothing is locked, and if we have enough data, 3201 * we can start a write request 3202 */ 3203 /* since handle_stripe can be called at any time we need to handle the 3204 * case where a compute block operation has been submitted and then a 3205 * subsequent call wants to start a write request. raid_run_ops only 3206 * handles the case where compute block and reconstruct are requested 3207 * simultaneously. If this is not the case then new writes need to be 3208 * held off until the compute completes. 3209 */ 3210 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 3211 (s->locked == 0 && (rcw == 0 || rmw == 0) && 3212 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 3213 schedule_reconstruction(sh, s, rcw == 0, 0); 3214 } 3215 3216 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 3217 struct stripe_head_state *s, int disks) 3218 { 3219 struct r5dev *dev = NULL; 3220 3221 set_bit(STRIPE_HANDLE, &sh->state); 3222 3223 switch (sh->check_state) { 3224 case check_state_idle: 3225 /* start a new check operation if there are no failures */ 3226 if (s->failed == 0) { 3227 BUG_ON(s->uptodate != disks); 3228 sh->check_state = check_state_run; 3229 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3230 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3231 s->uptodate--; 3232 break; 3233 } 3234 dev = &sh->dev[s->failed_num[0]]; 3235 /* fall through */ 3236 case check_state_compute_result: 3237 sh->check_state = check_state_idle; 3238 if (!dev) 3239 dev = &sh->dev[sh->pd_idx]; 3240 3241 /* check that a write has not made the stripe insync */ 3242 if (test_bit(STRIPE_INSYNC, &sh->state)) 3243 break; 3244 3245 /* either failed parity check, or recovery is happening */ 3246 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 3247 BUG_ON(s->uptodate != disks); 3248 3249 set_bit(R5_LOCKED, &dev->flags); 3250 s->locked++; 3251 set_bit(R5_Wantwrite, &dev->flags); 3252 3253 clear_bit(STRIPE_DEGRADED, &sh->state); 3254 set_bit(STRIPE_INSYNC, &sh->state); 3255 break; 3256 case check_state_run: 3257 break; /* we will be called again upon completion */ 3258 case check_state_check_result: 3259 sh->check_state = check_state_idle; 3260 3261 /* if a failure occurred during the check operation, leave 3262 * STRIPE_INSYNC not set and let the stripe be handled again 3263 */ 3264 if (s->failed) 3265 break; 3266 3267 /* handle a successful check operation, if parity is correct 3268 * we are done. Otherwise update the mismatch count and repair 3269 * parity if !MD_RECOVERY_CHECK 3270 */ 3271 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 3272 /* parity is correct (on disc, 3273 * not in buffer any more) 3274 */ 3275 set_bit(STRIPE_INSYNC, &sh->state); 3276 else { 3277 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3278 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3279 /* don't try to repair!! */ 3280 set_bit(STRIPE_INSYNC, &sh->state); 3281 else { 3282 sh->check_state = check_state_compute_run; 3283 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3284 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3285 set_bit(R5_Wantcompute, 3286 &sh->dev[sh->pd_idx].flags); 3287 sh->ops.target = sh->pd_idx; 3288 sh->ops.target2 = -1; 3289 s->uptodate++; 3290 } 3291 } 3292 break; 3293 case check_state_compute_run: 3294 break; 3295 default: 3296 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 3297 __func__, sh->check_state, 3298 (unsigned long long) sh->sector); 3299 BUG(); 3300 } 3301 } 3302 3303 3304 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 3305 struct stripe_head_state *s, 3306 int disks) 3307 { 3308 int pd_idx = sh->pd_idx; 3309 int qd_idx = sh->qd_idx; 3310 struct r5dev *dev; 3311 3312 set_bit(STRIPE_HANDLE, &sh->state); 3313 3314 BUG_ON(s->failed > 2); 3315 3316 /* Want to check and possibly repair P and Q. 3317 * However there could be one 'failed' device, in which 3318 * case we can only check one of them, possibly using the 3319 * other to generate missing data 3320 */ 3321 3322 switch (sh->check_state) { 3323 case check_state_idle: 3324 /* start a new check operation if there are < 2 failures */ 3325 if (s->failed == s->q_failed) { 3326 /* The only possible failed device holds Q, so it 3327 * makes sense to check P (If anything else were failed, 3328 * we would have used P to recreate it). 3329 */ 3330 sh->check_state = check_state_run; 3331 } 3332 if (!s->q_failed && s->failed < 2) { 3333 /* Q is not failed, and we didn't use it to generate 3334 * anything, so it makes sense to check it 3335 */ 3336 if (sh->check_state == check_state_run) 3337 sh->check_state = check_state_run_pq; 3338 else 3339 sh->check_state = check_state_run_q; 3340 } 3341 3342 /* discard potentially stale zero_sum_result */ 3343 sh->ops.zero_sum_result = 0; 3344 3345 if (sh->check_state == check_state_run) { 3346 /* async_xor_zero_sum destroys the contents of P */ 3347 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 3348 s->uptodate--; 3349 } 3350 if (sh->check_state >= check_state_run && 3351 sh->check_state <= check_state_run_pq) { 3352 /* async_syndrome_zero_sum preserves P and Q, so 3353 * no need to mark them !uptodate here 3354 */ 3355 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3356 break; 3357 } 3358 3359 /* we have 2-disk failure */ 3360 BUG_ON(s->failed != 2); 3361 /* fall through */ 3362 case check_state_compute_result: 3363 sh->check_state = check_state_idle; 3364 3365 /* check that a write has not made the stripe insync */ 3366 if (test_bit(STRIPE_INSYNC, &sh->state)) 3367 break; 3368 3369 /* now write out any block on a failed drive, 3370 * or P or Q if they were recomputed 3371 */ 3372 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 3373 if (s->failed == 2) { 3374 dev = &sh->dev[s->failed_num[1]]; 3375 s->locked++; 3376 set_bit(R5_LOCKED, &dev->flags); 3377 set_bit(R5_Wantwrite, &dev->flags); 3378 } 3379 if (s->failed >= 1) { 3380 dev = &sh->dev[s->failed_num[0]]; 3381 s->locked++; 3382 set_bit(R5_LOCKED, &dev->flags); 3383 set_bit(R5_Wantwrite, &dev->flags); 3384 } 3385 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 3386 dev = &sh->dev[pd_idx]; 3387 s->locked++; 3388 set_bit(R5_LOCKED, &dev->flags); 3389 set_bit(R5_Wantwrite, &dev->flags); 3390 } 3391 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 3392 dev = &sh->dev[qd_idx]; 3393 s->locked++; 3394 set_bit(R5_LOCKED, &dev->flags); 3395 set_bit(R5_Wantwrite, &dev->flags); 3396 } 3397 clear_bit(STRIPE_DEGRADED, &sh->state); 3398 3399 set_bit(STRIPE_INSYNC, &sh->state); 3400 break; 3401 case check_state_run: 3402 case check_state_run_q: 3403 case check_state_run_pq: 3404 break; /* we will be called again upon completion */ 3405 case check_state_check_result: 3406 sh->check_state = check_state_idle; 3407 3408 /* handle a successful check operation, if parity is correct 3409 * we are done. Otherwise update the mismatch count and repair 3410 * parity if !MD_RECOVERY_CHECK 3411 */ 3412 if (sh->ops.zero_sum_result == 0) { 3413 /* both parities are correct */ 3414 if (!s->failed) 3415 set_bit(STRIPE_INSYNC, &sh->state); 3416 else { 3417 /* in contrast to the raid5 case we can validate 3418 * parity, but still have a failure to write 3419 * back 3420 */ 3421 sh->check_state = check_state_compute_result; 3422 /* Returning at this point means that we may go 3423 * off and bring p and/or q uptodate again so 3424 * we make sure to check zero_sum_result again 3425 * to verify if p or q need writeback 3426 */ 3427 } 3428 } else { 3429 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3430 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3431 /* don't try to repair!! */ 3432 set_bit(STRIPE_INSYNC, &sh->state); 3433 else { 3434 int *target = &sh->ops.target; 3435 3436 sh->ops.target = -1; 3437 sh->ops.target2 = -1; 3438 sh->check_state = check_state_compute_run; 3439 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3440 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3441 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 3442 set_bit(R5_Wantcompute, 3443 &sh->dev[pd_idx].flags); 3444 *target = pd_idx; 3445 target = &sh->ops.target2; 3446 s->uptodate++; 3447 } 3448 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 3449 set_bit(R5_Wantcompute, 3450 &sh->dev[qd_idx].flags); 3451 *target = qd_idx; 3452 s->uptodate++; 3453 } 3454 } 3455 } 3456 break; 3457 case check_state_compute_run: 3458 break; 3459 default: 3460 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 3461 __func__, sh->check_state, 3462 (unsigned long long) sh->sector); 3463 BUG(); 3464 } 3465 } 3466 3467 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 3468 { 3469 int i; 3470 3471 /* We have read all the blocks in this stripe and now we need to 3472 * copy some of them into a target stripe for expand. 3473 */ 3474 struct dma_async_tx_descriptor *tx = NULL; 3475 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3476 for (i = 0; i < sh->disks; i++) 3477 if (i != sh->pd_idx && i != sh->qd_idx) { 3478 int dd_idx, j; 3479 struct stripe_head *sh2; 3480 struct async_submit_ctl submit; 3481 3482 sector_t bn = compute_blocknr(sh, i, 1); 3483 sector_t s = raid5_compute_sector(conf, bn, 0, 3484 &dd_idx, NULL); 3485 sh2 = get_active_stripe(conf, s, 0, 1, 1); 3486 if (sh2 == NULL) 3487 /* so far only the early blocks of this stripe 3488 * have been requested. When later blocks 3489 * get requested, we will try again 3490 */ 3491 continue; 3492 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 3493 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 3494 /* must have already done this block */ 3495 release_stripe(sh2); 3496 continue; 3497 } 3498 3499 /* place all the copies on one channel */ 3500 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 3501 tx = async_memcpy(sh2->dev[dd_idx].page, 3502 sh->dev[i].page, 0, 0, STRIPE_SIZE, 3503 &submit); 3504 3505 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 3506 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 3507 for (j = 0; j < conf->raid_disks; j++) 3508 if (j != sh2->pd_idx && 3509 j != sh2->qd_idx && 3510 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 3511 break; 3512 if (j == conf->raid_disks) { 3513 set_bit(STRIPE_EXPAND_READY, &sh2->state); 3514 set_bit(STRIPE_HANDLE, &sh2->state); 3515 } 3516 release_stripe(sh2); 3517 3518 } 3519 /* done submitting copies, wait for them to complete */ 3520 async_tx_quiesce(&tx); 3521 } 3522 3523 /* 3524 * handle_stripe - do things to a stripe. 3525 * 3526 * We lock the stripe by setting STRIPE_ACTIVE and then examine the 3527 * state of various bits to see what needs to be done. 3528 * Possible results: 3529 * return some read requests which now have data 3530 * return some write requests which are safely on storage 3531 * schedule a read on some buffers 3532 * schedule a write of some buffers 3533 * return confirmation of parity correctness 3534 * 3535 */ 3536 3537 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 3538 { 3539 struct r5conf *conf = sh->raid_conf; 3540 int disks = sh->disks; 3541 struct r5dev *dev; 3542 int i; 3543 int do_recovery = 0; 3544 3545 memset(s, 0, sizeof(*s)); 3546 3547 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3548 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 3549 s->failed_num[0] = -1; 3550 s->failed_num[1] = -1; 3551 3552 /* Now to look around and see what can be done */ 3553 rcu_read_lock(); 3554 for (i=disks; i--; ) { 3555 struct md_rdev *rdev; 3556 sector_t first_bad; 3557 int bad_sectors; 3558 int is_bad = 0; 3559 3560 dev = &sh->dev[i]; 3561 3562 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3563 i, dev->flags, 3564 dev->toread, dev->towrite, dev->written); 3565 /* maybe we can reply to a read 3566 * 3567 * new wantfill requests are only permitted while 3568 * ops_complete_biofill is guaranteed to be inactive 3569 */ 3570 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 3571 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 3572 set_bit(R5_Wantfill, &dev->flags); 3573 3574 /* now count some things */ 3575 if (test_bit(R5_LOCKED, &dev->flags)) 3576 s->locked++; 3577 if (test_bit(R5_UPTODATE, &dev->flags)) 3578 s->uptodate++; 3579 if (test_bit(R5_Wantcompute, &dev->flags)) { 3580 s->compute++; 3581 BUG_ON(s->compute > 2); 3582 } 3583 3584 if (test_bit(R5_Wantfill, &dev->flags)) 3585 s->to_fill++; 3586 else if (dev->toread) 3587 s->to_read++; 3588 if (dev->towrite) { 3589 s->to_write++; 3590 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3591 s->non_overwrite++; 3592 } 3593 if (dev->written) 3594 s->written++; 3595 /* Prefer to use the replacement for reads, but only 3596 * if it is recovered enough and has no bad blocks. 3597 */ 3598 rdev = rcu_dereference(conf->disks[i].replacement); 3599 if (rdev && !test_bit(Faulty, &rdev->flags) && 3600 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && 3601 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3602 &first_bad, &bad_sectors)) 3603 set_bit(R5_ReadRepl, &dev->flags); 3604 else { 3605 if (rdev) 3606 set_bit(R5_NeedReplace, &dev->flags); 3607 rdev = rcu_dereference(conf->disks[i].rdev); 3608 clear_bit(R5_ReadRepl, &dev->flags); 3609 } 3610 if (rdev && test_bit(Faulty, &rdev->flags)) 3611 rdev = NULL; 3612 if (rdev) { 3613 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3614 &first_bad, &bad_sectors); 3615 if (s->blocked_rdev == NULL 3616 && (test_bit(Blocked, &rdev->flags) 3617 || is_bad < 0)) { 3618 if (is_bad < 0) 3619 set_bit(BlockedBadBlocks, 3620 &rdev->flags); 3621 s->blocked_rdev = rdev; 3622 atomic_inc(&rdev->nr_pending); 3623 } 3624 } 3625 clear_bit(R5_Insync, &dev->flags); 3626 if (!rdev) 3627 /* Not in-sync */; 3628 else if (is_bad) { 3629 /* also not in-sync */ 3630 if (!test_bit(WriteErrorSeen, &rdev->flags) && 3631 test_bit(R5_UPTODATE, &dev->flags)) { 3632 /* treat as in-sync, but with a read error 3633 * which we can now try to correct 3634 */ 3635 set_bit(R5_Insync, &dev->flags); 3636 set_bit(R5_ReadError, &dev->flags); 3637 } 3638 } else if (test_bit(In_sync, &rdev->flags)) 3639 set_bit(R5_Insync, &dev->flags); 3640 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3641 /* in sync if before recovery_offset */ 3642 set_bit(R5_Insync, &dev->flags); 3643 else if (test_bit(R5_UPTODATE, &dev->flags) && 3644 test_bit(R5_Expanded, &dev->flags)) 3645 /* If we've reshaped into here, we assume it is Insync. 3646 * We will shortly update recovery_offset to make 3647 * it official. 3648 */ 3649 set_bit(R5_Insync, &dev->flags); 3650 3651 if (test_bit(R5_WriteError, &dev->flags)) { 3652 /* This flag does not apply to '.replacement' 3653 * only to .rdev, so make sure to check that*/ 3654 struct md_rdev *rdev2 = rcu_dereference( 3655 conf->disks[i].rdev); 3656 if (rdev2 == rdev) 3657 clear_bit(R5_Insync, &dev->flags); 3658 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 3659 s->handle_bad_blocks = 1; 3660 atomic_inc(&rdev2->nr_pending); 3661 } else 3662 clear_bit(R5_WriteError, &dev->flags); 3663 } 3664 if (test_bit(R5_MadeGood, &dev->flags)) { 3665 /* This flag does not apply to '.replacement' 3666 * only to .rdev, so make sure to check that*/ 3667 struct md_rdev *rdev2 = rcu_dereference( 3668 conf->disks[i].rdev); 3669 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 3670 s->handle_bad_blocks = 1; 3671 atomic_inc(&rdev2->nr_pending); 3672 } else 3673 clear_bit(R5_MadeGood, &dev->flags); 3674 } 3675 if (test_bit(R5_MadeGoodRepl, &dev->flags)) { 3676 struct md_rdev *rdev2 = rcu_dereference( 3677 conf->disks[i].replacement); 3678 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 3679 s->handle_bad_blocks = 1; 3680 atomic_inc(&rdev2->nr_pending); 3681 } else 3682 clear_bit(R5_MadeGoodRepl, &dev->flags); 3683 } 3684 if (!test_bit(R5_Insync, &dev->flags)) { 3685 /* The ReadError flag will just be confusing now */ 3686 clear_bit(R5_ReadError, &dev->flags); 3687 clear_bit(R5_ReWrite, &dev->flags); 3688 } 3689 if (test_bit(R5_ReadError, &dev->flags)) 3690 clear_bit(R5_Insync, &dev->flags); 3691 if (!test_bit(R5_Insync, &dev->flags)) { 3692 if (s->failed < 2) 3693 s->failed_num[s->failed] = i; 3694 s->failed++; 3695 if (rdev && !test_bit(Faulty, &rdev->flags)) 3696 do_recovery = 1; 3697 } 3698 } 3699 if (test_bit(STRIPE_SYNCING, &sh->state)) { 3700 /* If there is a failed device being replaced, 3701 * we must be recovering. 3702 * else if we are after recovery_cp, we must be syncing 3703 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 3704 * else we can only be replacing 3705 * sync and recovery both need to read all devices, and so 3706 * use the same flag. 3707 */ 3708 if (do_recovery || 3709 sh->sector >= conf->mddev->recovery_cp || 3710 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 3711 s->syncing = 1; 3712 else 3713 s->replacing = 1; 3714 } 3715 rcu_read_unlock(); 3716 } 3717 3718 static void handle_stripe(struct stripe_head *sh) 3719 { 3720 struct stripe_head_state s; 3721 struct r5conf *conf = sh->raid_conf; 3722 int i; 3723 int prexor; 3724 int disks = sh->disks; 3725 struct r5dev *pdev, *qdev; 3726 3727 clear_bit(STRIPE_HANDLE, &sh->state); 3728 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 3729 /* already being handled, ensure it gets handled 3730 * again when current action finishes */ 3731 set_bit(STRIPE_HANDLE, &sh->state); 3732 return; 3733 } 3734 3735 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3736 spin_lock(&sh->stripe_lock); 3737 /* Cannot process 'sync' concurrently with 'discard' */ 3738 if (!test_bit(STRIPE_DISCARD, &sh->state) && 3739 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3740 set_bit(STRIPE_SYNCING, &sh->state); 3741 clear_bit(STRIPE_INSYNC, &sh->state); 3742 clear_bit(STRIPE_REPLACED, &sh->state); 3743 } 3744 spin_unlock(&sh->stripe_lock); 3745 } 3746 clear_bit(STRIPE_DELAYED, &sh->state); 3747 3748 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3749 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 3750 (unsigned long long)sh->sector, sh->state, 3751 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 3752 sh->check_state, sh->reconstruct_state); 3753 3754 analyse_stripe(sh, &s); 3755 3756 if (s.handle_bad_blocks) { 3757 set_bit(STRIPE_HANDLE, &sh->state); 3758 goto finish; 3759 } 3760 3761 if (unlikely(s.blocked_rdev)) { 3762 if (s.syncing || s.expanding || s.expanded || 3763 s.replacing || s.to_write || s.written) { 3764 set_bit(STRIPE_HANDLE, &sh->state); 3765 goto finish; 3766 } 3767 /* There is nothing for the blocked_rdev to block */ 3768 rdev_dec_pending(s.blocked_rdev, conf->mddev); 3769 s.blocked_rdev = NULL; 3770 } 3771 3772 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3773 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 3774 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 3775 } 3776 3777 pr_debug("locked=%d uptodate=%d to_read=%d" 3778 " to_write=%d failed=%d failed_num=%d,%d\n", 3779 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3780 s.failed_num[0], s.failed_num[1]); 3781 /* check if the array has lost more than max_degraded devices and, 3782 * if so, some requests might need to be failed. 3783 */ 3784 if (s.failed > conf->max_degraded) { 3785 sh->check_state = 0; 3786 sh->reconstruct_state = 0; 3787 if (s.to_read+s.to_write+s.written) 3788 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 3789 if (s.syncing + s.replacing) 3790 handle_failed_sync(conf, sh, &s); 3791 } 3792 3793 /* Now we check to see if any write operations have recently 3794 * completed 3795 */ 3796 prexor = 0; 3797 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 3798 prexor = 1; 3799 if (sh->reconstruct_state == reconstruct_state_drain_result || 3800 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 3801 sh->reconstruct_state = reconstruct_state_idle; 3802 3803 /* All the 'written' buffers and the parity block are ready to 3804 * be written back to disk 3805 */ 3806 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && 3807 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); 3808 BUG_ON(sh->qd_idx >= 0 && 3809 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && 3810 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); 3811 for (i = disks; i--; ) { 3812 struct r5dev *dev = &sh->dev[i]; 3813 if (test_bit(R5_LOCKED, &dev->flags) && 3814 (i == sh->pd_idx || i == sh->qd_idx || 3815 dev->written)) { 3816 pr_debug("Writing block %d\n", i); 3817 set_bit(R5_Wantwrite, &dev->flags); 3818 if (prexor) 3819 continue; 3820 if (!test_bit(R5_Insync, &dev->flags) || 3821 ((i == sh->pd_idx || i == sh->qd_idx) && 3822 s.failed == 0)) 3823 set_bit(STRIPE_INSYNC, &sh->state); 3824 } 3825 } 3826 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3827 s.dec_preread_active = 1; 3828 } 3829 3830 /* 3831 * might be able to return some write requests if the parity blocks 3832 * are safe, or on a failed drive 3833 */ 3834 pdev = &sh->dev[sh->pd_idx]; 3835 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 3836 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 3837 qdev = &sh->dev[sh->qd_idx]; 3838 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 3839 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 3840 || conf->level < 6; 3841 3842 if (s.written && 3843 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3844 && !test_bit(R5_LOCKED, &pdev->flags) 3845 && (test_bit(R5_UPTODATE, &pdev->flags) || 3846 test_bit(R5_Discard, &pdev->flags))))) && 3847 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3848 && !test_bit(R5_LOCKED, &qdev->flags) 3849 && (test_bit(R5_UPTODATE, &qdev->flags) || 3850 test_bit(R5_Discard, &qdev->flags)))))) 3851 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); 3852 3853 /* Now we might consider reading some blocks, either to check/generate 3854 * parity, or to satisfy requests 3855 * or to load a block that is being partially written. 3856 */ 3857 if (s.to_read || s.non_overwrite 3858 || (conf->level == 6 && s.to_write && s.failed) 3859 || (s.syncing && (s.uptodate + s.compute < disks)) 3860 || s.replacing 3861 || s.expanding) 3862 handle_stripe_fill(sh, &s, disks); 3863 3864 /* Now to consider new write requests and what else, if anything 3865 * should be read. We do not handle new writes when: 3866 * 1/ A 'write' operation (copy+xor) is already in flight. 3867 * 2/ A 'check' operation is in flight, as it may clobber the parity 3868 * block. 3869 */ 3870 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 3871 handle_stripe_dirtying(conf, sh, &s, disks); 3872 3873 /* maybe we need to check and possibly fix the parity for this stripe 3874 * Any reads will already have been scheduled, so we just see if enough 3875 * data is available. The parity check is held off while parity 3876 * dependent operations are in flight. 3877 */ 3878 if (sh->check_state || 3879 (s.syncing && s.locked == 0 && 3880 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3881 !test_bit(STRIPE_INSYNC, &sh->state))) { 3882 if (conf->level == 6) 3883 handle_parity_checks6(conf, sh, &s, disks); 3884 else 3885 handle_parity_checks5(conf, sh, &s, disks); 3886 } 3887 3888 if ((s.replacing || s.syncing) && s.locked == 0 3889 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) 3890 && !test_bit(STRIPE_REPLACED, &sh->state)) { 3891 /* Write out to replacement devices where possible */ 3892 for (i = 0; i < conf->raid_disks; i++) 3893 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 3894 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); 3895 set_bit(R5_WantReplace, &sh->dev[i].flags); 3896 set_bit(R5_LOCKED, &sh->dev[i].flags); 3897 s.locked++; 3898 } 3899 if (s.replacing) 3900 set_bit(STRIPE_INSYNC, &sh->state); 3901 set_bit(STRIPE_REPLACED, &sh->state); 3902 } 3903 if ((s.syncing || s.replacing) && s.locked == 0 && 3904 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3905 test_bit(STRIPE_INSYNC, &sh->state)) { 3906 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3907 clear_bit(STRIPE_SYNCING, &sh->state); 3908 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 3909 wake_up(&conf->wait_for_overlap); 3910 } 3911 3912 /* If the failed drives are just a ReadError, then we might need 3913 * to progress the repair/check process 3914 */ 3915 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 3916 for (i = 0; i < s.failed; i++) { 3917 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 3918 if (test_bit(R5_ReadError, &dev->flags) 3919 && !test_bit(R5_LOCKED, &dev->flags) 3920 && test_bit(R5_UPTODATE, &dev->flags) 3921 ) { 3922 if (!test_bit(R5_ReWrite, &dev->flags)) { 3923 set_bit(R5_Wantwrite, &dev->flags); 3924 set_bit(R5_ReWrite, &dev->flags); 3925 set_bit(R5_LOCKED, &dev->flags); 3926 s.locked++; 3927 } else { 3928 /* let's read it back */ 3929 set_bit(R5_Wantread, &dev->flags); 3930 set_bit(R5_LOCKED, &dev->flags); 3931 s.locked++; 3932 } 3933 } 3934 } 3935 3936 3937 /* Finish reconstruct operations initiated by the expansion process */ 3938 if (sh->reconstruct_state == reconstruct_state_result) { 3939 struct stripe_head *sh_src 3940 = get_active_stripe(conf, sh->sector, 1, 1, 1); 3941 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 3942 /* sh cannot be written until sh_src has been read. 3943 * so arrange for sh to be delayed a little 3944 */ 3945 set_bit(STRIPE_DELAYED, &sh->state); 3946 set_bit(STRIPE_HANDLE, &sh->state); 3947 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3948 &sh_src->state)) 3949 atomic_inc(&conf->preread_active_stripes); 3950 release_stripe(sh_src); 3951 goto finish; 3952 } 3953 if (sh_src) 3954 release_stripe(sh_src); 3955 3956 sh->reconstruct_state = reconstruct_state_idle; 3957 clear_bit(STRIPE_EXPANDING, &sh->state); 3958 for (i = conf->raid_disks; i--; ) { 3959 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3960 set_bit(R5_LOCKED, &sh->dev[i].flags); 3961 s.locked++; 3962 } 3963 } 3964 3965 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 3966 !sh->reconstruct_state) { 3967 /* Need to write out all blocks after computing parity */ 3968 sh->disks = conf->raid_disks; 3969 stripe_set_idx(sh->sector, conf, 0, sh); 3970 schedule_reconstruction(sh, &s, 1, 1); 3971 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 3972 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3973 atomic_dec(&conf->reshape_stripes); 3974 wake_up(&conf->wait_for_overlap); 3975 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3976 } 3977 3978 if (s.expanding && s.locked == 0 && 3979 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3980 handle_stripe_expansion(conf, sh); 3981 3982 finish: 3983 /* wait for this device to become unblocked */ 3984 if (unlikely(s.blocked_rdev)) { 3985 if (conf->mddev->external) 3986 md_wait_for_blocked_rdev(s.blocked_rdev, 3987 conf->mddev); 3988 else 3989 /* Internal metadata will immediately 3990 * be written by raid5d, so we don't 3991 * need to wait here. 3992 */ 3993 rdev_dec_pending(s.blocked_rdev, 3994 conf->mddev); 3995 } 3996 3997 if (s.handle_bad_blocks) 3998 for (i = disks; i--; ) { 3999 struct md_rdev *rdev; 4000 struct r5dev *dev = &sh->dev[i]; 4001 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 4002 /* We own a safe reference to the rdev */ 4003 rdev = conf->disks[i].rdev; 4004 if (!rdev_set_badblocks(rdev, sh->sector, 4005 STRIPE_SECTORS, 0)) 4006 md_error(conf->mddev, rdev); 4007 rdev_dec_pending(rdev, conf->mddev); 4008 } 4009 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 4010 rdev = conf->disks[i].rdev; 4011 rdev_clear_badblocks(rdev, sh->sector, 4012 STRIPE_SECTORS, 0); 4013 rdev_dec_pending(rdev, conf->mddev); 4014 } 4015 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { 4016 rdev = conf->disks[i].replacement; 4017 if (!rdev) 4018 /* rdev have been moved down */ 4019 rdev = conf->disks[i].rdev; 4020 rdev_clear_badblocks(rdev, sh->sector, 4021 STRIPE_SECTORS, 0); 4022 rdev_dec_pending(rdev, conf->mddev); 4023 } 4024 } 4025 4026 if (s.ops_request) 4027 raid_run_ops(sh, s.ops_request); 4028 4029 ops_run_io(sh, &s); 4030 4031 if (s.dec_preread_active) { 4032 /* We delay this until after ops_run_io so that if make_request 4033 * is waiting on a flush, it won't continue until the writes 4034 * have actually been submitted. 4035 */ 4036 atomic_dec(&conf->preread_active_stripes); 4037 if (atomic_read(&conf->preread_active_stripes) < 4038 IO_THRESHOLD) 4039 md_wakeup_thread(conf->mddev->thread); 4040 } 4041 4042 return_io(s.return_bi); 4043 4044 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4045 } 4046 4047 static void raid5_activate_delayed(struct r5conf *conf) 4048 { 4049 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 4050 while (!list_empty(&conf->delayed_list)) { 4051 struct list_head *l = conf->delayed_list.next; 4052 struct stripe_head *sh; 4053 sh = list_entry(l, struct stripe_head, lru); 4054 list_del_init(l); 4055 clear_bit(STRIPE_DELAYED, &sh->state); 4056 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4057 atomic_inc(&conf->preread_active_stripes); 4058 list_add_tail(&sh->lru, &conf->hold_list); 4059 raid5_wakeup_stripe_thread(sh); 4060 } 4061 } 4062 } 4063 4064 static void activate_bit_delay(struct r5conf *conf, 4065 struct list_head *temp_inactive_list) 4066 { 4067 /* device_lock is held */ 4068 struct list_head head; 4069 list_add(&head, &conf->bitmap_list); 4070 list_del_init(&conf->bitmap_list); 4071 while (!list_empty(&head)) { 4072 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 4073 int hash; 4074 list_del_init(&sh->lru); 4075 atomic_inc(&sh->count); 4076 hash = sh->hash_lock_index; 4077 __release_stripe(conf, sh, &temp_inactive_list[hash]); 4078 } 4079 } 4080 4081 int md_raid5_congested(struct mddev *mddev, int bits) 4082 { 4083 struct r5conf *conf = mddev->private; 4084 4085 /* No difference between reads and writes. Just check 4086 * how busy the stripe_cache is 4087 */ 4088 4089 if (conf->inactive_blocked) 4090 return 1; 4091 if (conf->quiesce) 4092 return 1; 4093 if (atomic_read(&conf->empty_inactive_list_nr)) 4094 return 1; 4095 4096 return 0; 4097 } 4098 EXPORT_SYMBOL_GPL(md_raid5_congested); 4099 4100 static int raid5_congested(void *data, int bits) 4101 { 4102 struct mddev *mddev = data; 4103 4104 return mddev_congested(mddev, bits) || 4105 md_raid5_congested(mddev, bits); 4106 } 4107 4108 /* We want read requests to align with chunks where possible, 4109 * but write requests don't need to. 4110 */ 4111 static int raid5_mergeable_bvec(struct request_queue *q, 4112 struct bvec_merge_data *bvm, 4113 struct bio_vec *biovec) 4114 { 4115 struct mddev *mddev = q->queuedata; 4116 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 4117 int max; 4118 unsigned int chunk_sectors = mddev->chunk_sectors; 4119 unsigned int bio_sectors = bvm->bi_size >> 9; 4120 4121 if ((bvm->bi_rw & 1) == WRITE) 4122 return biovec->bv_len; /* always allow writes to be mergeable */ 4123 4124 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 4125 chunk_sectors = mddev->new_chunk_sectors; 4126 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 4127 if (max < 0) max = 0; 4128 if (max <= biovec->bv_len && bio_sectors == 0) 4129 return biovec->bv_len; 4130 else 4131 return max; 4132 } 4133 4134 4135 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4136 { 4137 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); 4138 unsigned int chunk_sectors = mddev->chunk_sectors; 4139 unsigned int bio_sectors = bio_sectors(bio); 4140 4141 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 4142 chunk_sectors = mddev->new_chunk_sectors; 4143 return chunk_sectors >= 4144 ((sector & (chunk_sectors - 1)) + bio_sectors); 4145 } 4146 4147 /* 4148 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 4149 * later sampled by raid5d. 4150 */ 4151 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 4152 { 4153 unsigned long flags; 4154 4155 spin_lock_irqsave(&conf->device_lock, flags); 4156 4157 bi->bi_next = conf->retry_read_aligned_list; 4158 conf->retry_read_aligned_list = bi; 4159 4160 spin_unlock_irqrestore(&conf->device_lock, flags); 4161 md_wakeup_thread(conf->mddev->thread); 4162 } 4163 4164 4165 static struct bio *remove_bio_from_retry(struct r5conf *conf) 4166 { 4167 struct bio *bi; 4168 4169 bi = conf->retry_read_aligned; 4170 if (bi) { 4171 conf->retry_read_aligned = NULL; 4172 return bi; 4173 } 4174 bi = conf->retry_read_aligned_list; 4175 if(bi) { 4176 conf->retry_read_aligned_list = bi->bi_next; 4177 bi->bi_next = NULL; 4178 /* 4179 * this sets the active strip count to 1 and the processed 4180 * strip count to zero (upper 8 bits) 4181 */ 4182 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ 4183 } 4184 4185 return bi; 4186 } 4187 4188 4189 /* 4190 * The "raid5_align_endio" should check if the read succeeded and if it 4191 * did, call bio_endio on the original bio (having bio_put the new bio 4192 * first). 4193 * If the read failed.. 4194 */ 4195 static void raid5_align_endio(struct bio *bi, int error) 4196 { 4197 struct bio* raid_bi = bi->bi_private; 4198 struct mddev *mddev; 4199 struct r5conf *conf; 4200 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 4201 struct md_rdev *rdev; 4202 4203 bio_put(bi); 4204 4205 rdev = (void*)raid_bi->bi_next; 4206 raid_bi->bi_next = NULL; 4207 mddev = rdev->mddev; 4208 conf = mddev->private; 4209 4210 rdev_dec_pending(rdev, conf->mddev); 4211 4212 if (!error && uptodate) { 4213 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), 4214 raid_bi, 0); 4215 bio_endio(raid_bi, 0); 4216 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4217 wake_up(&conf->wait_for_stripe); 4218 return; 4219 } 4220 4221 4222 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 4223 4224 add_bio_to_retry(raid_bi, conf); 4225 } 4226 4227 static int bio_fits_rdev(struct bio *bi) 4228 { 4229 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 4230 4231 if (bio_sectors(bi) > queue_max_sectors(q)) 4232 return 0; 4233 blk_recount_segments(q, bi); 4234 if (bi->bi_phys_segments > queue_max_segments(q)) 4235 return 0; 4236 4237 if (q->merge_bvec_fn) 4238 /* it's too hard to apply the merge_bvec_fn at this stage, 4239 * just just give up 4240 */ 4241 return 0; 4242 4243 return 1; 4244 } 4245 4246 4247 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) 4248 { 4249 struct r5conf *conf = mddev->private; 4250 int dd_idx; 4251 struct bio* align_bi; 4252 struct md_rdev *rdev; 4253 sector_t end_sector; 4254 4255 if (!in_chunk_boundary(mddev, raid_bio)) { 4256 pr_debug("chunk_aligned_read : non aligned\n"); 4257 return 0; 4258 } 4259 /* 4260 * use bio_clone_mddev to make a copy of the bio 4261 */ 4262 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); 4263 if (!align_bi) 4264 return 0; 4265 /* 4266 * set bi_end_io to a new function, and set bi_private to the 4267 * original bio. 4268 */ 4269 align_bi->bi_end_io = raid5_align_endio; 4270 align_bi->bi_private = raid_bio; 4271 /* 4272 * compute position 4273 */ 4274 align_bi->bi_iter.bi_sector = 4275 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 4276 0, &dd_idx, NULL); 4277 4278 end_sector = bio_end_sector(align_bi); 4279 rcu_read_lock(); 4280 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 4281 if (!rdev || test_bit(Faulty, &rdev->flags) || 4282 rdev->recovery_offset < end_sector) { 4283 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 4284 if (rdev && 4285 (test_bit(Faulty, &rdev->flags) || 4286 !(test_bit(In_sync, &rdev->flags) || 4287 rdev->recovery_offset >= end_sector))) 4288 rdev = NULL; 4289 } 4290 if (rdev) { 4291 sector_t first_bad; 4292 int bad_sectors; 4293 4294 atomic_inc(&rdev->nr_pending); 4295 rcu_read_unlock(); 4296 raid_bio->bi_next = (void*)rdev; 4297 align_bi->bi_bdev = rdev->bdev; 4298 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4299 4300 if (!bio_fits_rdev(align_bi) || 4301 is_badblock(rdev, align_bi->bi_iter.bi_sector, 4302 bio_sectors(align_bi), 4303 &first_bad, &bad_sectors)) { 4304 /* too big in some way, or has a known bad block */ 4305 bio_put(align_bi); 4306 rdev_dec_pending(rdev, mddev); 4307 return 0; 4308 } 4309 4310 /* No reshape active, so we can trust rdev->data_offset */ 4311 align_bi->bi_iter.bi_sector += rdev->data_offset; 4312 4313 spin_lock_irq(&conf->device_lock); 4314 wait_event_lock_irq(conf->wait_for_stripe, 4315 conf->quiesce == 0, 4316 conf->device_lock); 4317 atomic_inc(&conf->active_aligned_reads); 4318 spin_unlock_irq(&conf->device_lock); 4319 4320 if (mddev->gendisk) 4321 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4322 align_bi, disk_devt(mddev->gendisk), 4323 raid_bio->bi_iter.bi_sector); 4324 generic_make_request(align_bi); 4325 return 1; 4326 } else { 4327 rcu_read_unlock(); 4328 bio_put(align_bi); 4329 return 0; 4330 } 4331 } 4332 4333 /* __get_priority_stripe - get the next stripe to process 4334 * 4335 * Full stripe writes are allowed to pass preread active stripes up until 4336 * the bypass_threshold is exceeded. In general the bypass_count 4337 * increments when the handle_list is handled before the hold_list; however, it 4338 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 4339 * stripe with in flight i/o. The bypass_count will be reset when the 4340 * head of the hold_list has changed, i.e. the head was promoted to the 4341 * handle_list. 4342 */ 4343 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) 4344 { 4345 struct stripe_head *sh = NULL, *tmp; 4346 struct list_head *handle_list = NULL; 4347 struct r5worker_group *wg = NULL; 4348 4349 if (conf->worker_cnt_per_group == 0) { 4350 handle_list = &conf->handle_list; 4351 } else if (group != ANY_GROUP) { 4352 handle_list = &conf->worker_groups[group].handle_list; 4353 wg = &conf->worker_groups[group]; 4354 } else { 4355 int i; 4356 for (i = 0; i < conf->group_cnt; i++) { 4357 handle_list = &conf->worker_groups[i].handle_list; 4358 wg = &conf->worker_groups[i]; 4359 if (!list_empty(handle_list)) 4360 break; 4361 } 4362 } 4363 4364 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 4365 __func__, 4366 list_empty(handle_list) ? "empty" : "busy", 4367 list_empty(&conf->hold_list) ? "empty" : "busy", 4368 atomic_read(&conf->pending_full_writes), conf->bypass_count); 4369 4370 if (!list_empty(handle_list)) { 4371 sh = list_entry(handle_list->next, typeof(*sh), lru); 4372 4373 if (list_empty(&conf->hold_list)) 4374 conf->bypass_count = 0; 4375 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 4376 if (conf->hold_list.next == conf->last_hold) 4377 conf->bypass_count++; 4378 else { 4379 conf->last_hold = conf->hold_list.next; 4380 conf->bypass_count -= conf->bypass_threshold; 4381 if (conf->bypass_count < 0) 4382 conf->bypass_count = 0; 4383 } 4384 } 4385 } else if (!list_empty(&conf->hold_list) && 4386 ((conf->bypass_threshold && 4387 conf->bypass_count > conf->bypass_threshold) || 4388 atomic_read(&conf->pending_full_writes) == 0)) { 4389 4390 list_for_each_entry(tmp, &conf->hold_list, lru) { 4391 if (conf->worker_cnt_per_group == 0 || 4392 group == ANY_GROUP || 4393 !cpu_online(tmp->cpu) || 4394 cpu_to_group(tmp->cpu) == group) { 4395 sh = tmp; 4396 break; 4397 } 4398 } 4399 4400 if (sh) { 4401 conf->bypass_count -= conf->bypass_threshold; 4402 if (conf->bypass_count < 0) 4403 conf->bypass_count = 0; 4404 } 4405 wg = NULL; 4406 } 4407 4408 if (!sh) 4409 return NULL; 4410 4411 if (wg) { 4412 wg->stripes_cnt--; 4413 sh->group = NULL; 4414 } 4415 list_del_init(&sh->lru); 4416 BUG_ON(atomic_inc_return(&sh->count) != 1); 4417 return sh; 4418 } 4419 4420 struct raid5_plug_cb { 4421 struct blk_plug_cb cb; 4422 struct list_head list; 4423 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; 4424 }; 4425 4426 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 4427 { 4428 struct raid5_plug_cb *cb = container_of( 4429 blk_cb, struct raid5_plug_cb, cb); 4430 struct stripe_head *sh; 4431 struct mddev *mddev = cb->cb.data; 4432 struct r5conf *conf = mddev->private; 4433 int cnt = 0; 4434 int hash; 4435 4436 if (cb->list.next && !list_empty(&cb->list)) { 4437 spin_lock_irq(&conf->device_lock); 4438 while (!list_empty(&cb->list)) { 4439 sh = list_first_entry(&cb->list, struct stripe_head, lru); 4440 list_del_init(&sh->lru); 4441 /* 4442 * avoid race release_stripe_plug() sees 4443 * STRIPE_ON_UNPLUG_LIST clear but the stripe 4444 * is still in our list 4445 */ 4446 smp_mb__before_atomic(); 4447 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 4448 /* 4449 * STRIPE_ON_RELEASE_LIST could be set here. In that 4450 * case, the count is always > 1 here 4451 */ 4452 hash = sh->hash_lock_index; 4453 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); 4454 cnt++; 4455 } 4456 spin_unlock_irq(&conf->device_lock); 4457 } 4458 release_inactive_stripe_list(conf, cb->temp_inactive_list, 4459 NR_STRIPE_HASH_LOCKS); 4460 if (mddev->queue) 4461 trace_block_unplug(mddev->queue, cnt, !from_schedule); 4462 kfree(cb); 4463 } 4464 4465 static void release_stripe_plug(struct mddev *mddev, 4466 struct stripe_head *sh) 4467 { 4468 struct blk_plug_cb *blk_cb = blk_check_plugged( 4469 raid5_unplug, mddev, 4470 sizeof(struct raid5_plug_cb)); 4471 struct raid5_plug_cb *cb; 4472 4473 if (!blk_cb) { 4474 release_stripe(sh); 4475 return; 4476 } 4477 4478 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 4479 4480 if (cb->list.next == NULL) { 4481 int i; 4482 INIT_LIST_HEAD(&cb->list); 4483 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 4484 INIT_LIST_HEAD(cb->temp_inactive_list + i); 4485 } 4486 4487 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 4488 list_add_tail(&sh->lru, &cb->list); 4489 else 4490 release_stripe(sh); 4491 } 4492 4493 static void make_discard_request(struct mddev *mddev, struct bio *bi) 4494 { 4495 struct r5conf *conf = mddev->private; 4496 sector_t logical_sector, last_sector; 4497 struct stripe_head *sh; 4498 int remaining; 4499 int stripe_sectors; 4500 4501 if (mddev->reshape_position != MaxSector) 4502 /* Skip discard while reshape is happening */ 4503 return; 4504 4505 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4506 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); 4507 4508 bi->bi_next = NULL; 4509 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4510 4511 stripe_sectors = conf->chunk_sectors * 4512 (conf->raid_disks - conf->max_degraded); 4513 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, 4514 stripe_sectors); 4515 sector_div(last_sector, stripe_sectors); 4516 4517 logical_sector *= conf->chunk_sectors; 4518 last_sector *= conf->chunk_sectors; 4519 4520 for (; logical_sector < last_sector; 4521 logical_sector += STRIPE_SECTORS) { 4522 DEFINE_WAIT(w); 4523 int d; 4524 again: 4525 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); 4526 prepare_to_wait(&conf->wait_for_overlap, &w, 4527 TASK_UNINTERRUPTIBLE); 4528 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 4529 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4530 release_stripe(sh); 4531 schedule(); 4532 goto again; 4533 } 4534 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 4535 spin_lock_irq(&sh->stripe_lock); 4536 for (d = 0; d < conf->raid_disks; d++) { 4537 if (d == sh->pd_idx || d == sh->qd_idx) 4538 continue; 4539 if (sh->dev[d].towrite || sh->dev[d].toread) { 4540 set_bit(R5_Overlap, &sh->dev[d].flags); 4541 spin_unlock_irq(&sh->stripe_lock); 4542 release_stripe(sh); 4543 schedule(); 4544 goto again; 4545 } 4546 } 4547 set_bit(STRIPE_DISCARD, &sh->state); 4548 finish_wait(&conf->wait_for_overlap, &w); 4549 for (d = 0; d < conf->raid_disks; d++) { 4550 if (d == sh->pd_idx || d == sh->qd_idx) 4551 continue; 4552 sh->dev[d].towrite = bi; 4553 set_bit(R5_OVERWRITE, &sh->dev[d].flags); 4554 raid5_inc_bi_active_stripes(bi); 4555 } 4556 spin_unlock_irq(&sh->stripe_lock); 4557 if (conf->mddev->bitmap) { 4558 for (d = 0; 4559 d < conf->raid_disks - conf->max_degraded; 4560 d++) 4561 bitmap_startwrite(mddev->bitmap, 4562 sh->sector, 4563 STRIPE_SECTORS, 4564 0); 4565 sh->bm_seq = conf->seq_flush + 1; 4566 set_bit(STRIPE_BIT_DELAY, &sh->state); 4567 } 4568 4569 set_bit(STRIPE_HANDLE, &sh->state); 4570 clear_bit(STRIPE_DELAYED, &sh->state); 4571 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4572 atomic_inc(&conf->preread_active_stripes); 4573 release_stripe_plug(mddev, sh); 4574 } 4575 4576 remaining = raid5_dec_bi_active_stripes(bi); 4577 if (remaining == 0) { 4578 md_write_end(mddev); 4579 bio_endio(bi, 0); 4580 } 4581 } 4582 4583 static void make_request(struct mddev *mddev, struct bio * bi) 4584 { 4585 struct r5conf *conf = mddev->private; 4586 int dd_idx; 4587 sector_t new_sector; 4588 sector_t logical_sector, last_sector; 4589 struct stripe_head *sh; 4590 const int rw = bio_data_dir(bi); 4591 int remaining; 4592 DEFINE_WAIT(w); 4593 bool do_prepare; 4594 4595 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 4596 md_flush_request(mddev, bi); 4597 return; 4598 } 4599 4600 md_write_start(mddev, bi); 4601 4602 if (rw == READ && 4603 mddev->reshape_position == MaxSector && 4604 chunk_aligned_read(mddev,bi)) 4605 return; 4606 4607 if (unlikely(bi->bi_rw & REQ_DISCARD)) { 4608 make_discard_request(mddev, bi); 4609 return; 4610 } 4611 4612 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4613 last_sector = bio_end_sector(bi); 4614 bi->bi_next = NULL; 4615 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4616 4617 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 4618 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 4619 int previous; 4620 int seq; 4621 4622 do_prepare = false; 4623 retry: 4624 seq = read_seqcount_begin(&conf->gen_lock); 4625 previous = 0; 4626 if (do_prepare) 4627 prepare_to_wait(&conf->wait_for_overlap, &w, 4628 TASK_UNINTERRUPTIBLE); 4629 if (unlikely(conf->reshape_progress != MaxSector)) { 4630 /* spinlock is needed as reshape_progress may be 4631 * 64bit on a 32bit platform, and so it might be 4632 * possible to see a half-updated value 4633 * Of course reshape_progress could change after 4634 * the lock is dropped, so once we get a reference 4635 * to the stripe that we think it is, we will have 4636 * to check again. 4637 */ 4638 spin_lock_irq(&conf->device_lock); 4639 if (mddev->reshape_backwards 4640 ? logical_sector < conf->reshape_progress 4641 : logical_sector >= conf->reshape_progress) { 4642 previous = 1; 4643 } else { 4644 if (mddev->reshape_backwards 4645 ? logical_sector < conf->reshape_safe 4646 : logical_sector >= conf->reshape_safe) { 4647 spin_unlock_irq(&conf->device_lock); 4648 schedule(); 4649 do_prepare = true; 4650 goto retry; 4651 } 4652 } 4653 spin_unlock_irq(&conf->device_lock); 4654 } 4655 4656 new_sector = raid5_compute_sector(conf, logical_sector, 4657 previous, 4658 &dd_idx, NULL); 4659 pr_debug("raid456: make_request, sector %llu logical %llu\n", 4660 (unsigned long long)new_sector, 4661 (unsigned long long)logical_sector); 4662 4663 sh = get_active_stripe(conf, new_sector, previous, 4664 (bi->bi_rw&RWA_MASK), 0); 4665 if (sh) { 4666 if (unlikely(previous)) { 4667 /* expansion might have moved on while waiting for a 4668 * stripe, so we must do the range check again. 4669 * Expansion could still move past after this 4670 * test, but as we are holding a reference to 4671 * 'sh', we know that if that happens, 4672 * STRIPE_EXPANDING will get set and the expansion 4673 * won't proceed until we finish with the stripe. 4674 */ 4675 int must_retry = 0; 4676 spin_lock_irq(&conf->device_lock); 4677 if (mddev->reshape_backwards 4678 ? logical_sector >= conf->reshape_progress 4679 : logical_sector < conf->reshape_progress) 4680 /* mismatch, need to try again */ 4681 must_retry = 1; 4682 spin_unlock_irq(&conf->device_lock); 4683 if (must_retry) { 4684 release_stripe(sh); 4685 schedule(); 4686 do_prepare = true; 4687 goto retry; 4688 } 4689 } 4690 if (read_seqcount_retry(&conf->gen_lock, seq)) { 4691 /* Might have got the wrong stripe_head 4692 * by accident 4693 */ 4694 release_stripe(sh); 4695 goto retry; 4696 } 4697 4698 if (rw == WRITE && 4699 logical_sector >= mddev->suspend_lo && 4700 logical_sector < mddev->suspend_hi) { 4701 release_stripe(sh); 4702 /* As the suspend_* range is controlled by 4703 * userspace, we want an interruptible 4704 * wait. 4705 */ 4706 flush_signals(current); 4707 prepare_to_wait(&conf->wait_for_overlap, 4708 &w, TASK_INTERRUPTIBLE); 4709 if (logical_sector >= mddev->suspend_lo && 4710 logical_sector < mddev->suspend_hi) { 4711 schedule(); 4712 do_prepare = true; 4713 } 4714 goto retry; 4715 } 4716 4717 if (test_bit(STRIPE_EXPANDING, &sh->state) || 4718 !add_stripe_bio(sh, bi, dd_idx, rw)) { 4719 /* Stripe is busy expanding or 4720 * add failed due to overlap. Flush everything 4721 * and wait a while 4722 */ 4723 md_wakeup_thread(mddev->thread); 4724 release_stripe(sh); 4725 schedule(); 4726 do_prepare = true; 4727 goto retry; 4728 } 4729 set_bit(STRIPE_HANDLE, &sh->state); 4730 clear_bit(STRIPE_DELAYED, &sh->state); 4731 if ((bi->bi_rw & REQ_SYNC) && 4732 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4733 atomic_inc(&conf->preread_active_stripes); 4734 release_stripe_plug(mddev, sh); 4735 } else { 4736 /* cannot get stripe for read-ahead, just give-up */ 4737 clear_bit(BIO_UPTODATE, &bi->bi_flags); 4738 break; 4739 } 4740 } 4741 finish_wait(&conf->wait_for_overlap, &w); 4742 4743 remaining = raid5_dec_bi_active_stripes(bi); 4744 if (remaining == 0) { 4745 4746 if ( rw == WRITE ) 4747 md_write_end(mddev); 4748 4749 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 4750 bi, 0); 4751 bio_endio(bi, 0); 4752 } 4753 } 4754 4755 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 4756 4757 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 4758 { 4759 /* reshaping is quite different to recovery/resync so it is 4760 * handled quite separately ... here. 4761 * 4762 * On each call to sync_request, we gather one chunk worth of 4763 * destination stripes and flag them as expanding. 4764 * Then we find all the source stripes and request reads. 4765 * As the reads complete, handle_stripe will copy the data 4766 * into the destination stripe and release that stripe. 4767 */ 4768 struct r5conf *conf = mddev->private; 4769 struct stripe_head *sh; 4770 sector_t first_sector, last_sector; 4771 int raid_disks = conf->previous_raid_disks; 4772 int data_disks = raid_disks - conf->max_degraded; 4773 int new_data_disks = conf->raid_disks - conf->max_degraded; 4774 int i; 4775 int dd_idx; 4776 sector_t writepos, readpos, safepos; 4777 sector_t stripe_addr; 4778 int reshape_sectors; 4779 struct list_head stripes; 4780 4781 if (sector_nr == 0) { 4782 /* If restarting in the middle, skip the initial sectors */ 4783 if (mddev->reshape_backwards && 4784 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 4785 sector_nr = raid5_size(mddev, 0, 0) 4786 - conf->reshape_progress; 4787 } else if (!mddev->reshape_backwards && 4788 conf->reshape_progress > 0) 4789 sector_nr = conf->reshape_progress; 4790 sector_div(sector_nr, new_data_disks); 4791 if (sector_nr) { 4792 mddev->curr_resync_completed = sector_nr; 4793 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4794 *skipped = 1; 4795 return sector_nr; 4796 } 4797 } 4798 4799 /* We need to process a full chunk at a time. 4800 * If old and new chunk sizes differ, we need to process the 4801 * largest of these 4802 */ 4803 if (mddev->new_chunk_sectors > mddev->chunk_sectors) 4804 reshape_sectors = mddev->new_chunk_sectors; 4805 else 4806 reshape_sectors = mddev->chunk_sectors; 4807 4808 /* We update the metadata at least every 10 seconds, or when 4809 * the data about to be copied would over-write the source of 4810 * the data at the front of the range. i.e. one new_stripe 4811 * along from reshape_progress new_maps to after where 4812 * reshape_safe old_maps to 4813 */ 4814 writepos = conf->reshape_progress; 4815 sector_div(writepos, new_data_disks); 4816 readpos = conf->reshape_progress; 4817 sector_div(readpos, data_disks); 4818 safepos = conf->reshape_safe; 4819 sector_div(safepos, data_disks); 4820 if (mddev->reshape_backwards) { 4821 writepos -= min_t(sector_t, reshape_sectors, writepos); 4822 readpos += reshape_sectors; 4823 safepos += reshape_sectors; 4824 } else { 4825 writepos += reshape_sectors; 4826 readpos -= min_t(sector_t, reshape_sectors, readpos); 4827 safepos -= min_t(sector_t, reshape_sectors, safepos); 4828 } 4829 4830 /* Having calculated the 'writepos' possibly use it 4831 * to set 'stripe_addr' which is where we will write to. 4832 */ 4833 if (mddev->reshape_backwards) { 4834 BUG_ON(conf->reshape_progress == 0); 4835 stripe_addr = writepos; 4836 BUG_ON((mddev->dev_sectors & 4837 ~((sector_t)reshape_sectors - 1)) 4838 - reshape_sectors - stripe_addr 4839 != sector_nr); 4840 } else { 4841 BUG_ON(writepos != sector_nr + reshape_sectors); 4842 stripe_addr = sector_nr; 4843 } 4844 4845 /* 'writepos' is the most advanced device address we might write. 4846 * 'readpos' is the least advanced device address we might read. 4847 * 'safepos' is the least address recorded in the metadata as having 4848 * been reshaped. 4849 * If there is a min_offset_diff, these are adjusted either by 4850 * increasing the safepos/readpos if diff is negative, or 4851 * increasing writepos if diff is positive. 4852 * If 'readpos' is then behind 'writepos', there is no way that we can 4853 * ensure safety in the face of a crash - that must be done by userspace 4854 * making a backup of the data. So in that case there is no particular 4855 * rush to update metadata. 4856 * Otherwise if 'safepos' is behind 'writepos', then we really need to 4857 * update the metadata to advance 'safepos' to match 'readpos' so that 4858 * we can be safe in the event of a crash. 4859 * So we insist on updating metadata if safepos is behind writepos and 4860 * readpos is beyond writepos. 4861 * In any case, update the metadata every 10 seconds. 4862 * Maybe that number should be configurable, but I'm not sure it is 4863 * worth it.... maybe it could be a multiple of safemode_delay??? 4864 */ 4865 if (conf->min_offset_diff < 0) { 4866 safepos += -conf->min_offset_diff; 4867 readpos += -conf->min_offset_diff; 4868 } else 4869 writepos += conf->min_offset_diff; 4870 4871 if ((mddev->reshape_backwards 4872 ? (safepos > writepos && readpos < writepos) 4873 : (safepos < writepos && readpos > writepos)) || 4874 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4875 /* Cannot proceed until we've updated the superblock... */ 4876 wait_event(conf->wait_for_overlap, 4877 atomic_read(&conf->reshape_stripes)==0 4878 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4879 if (atomic_read(&conf->reshape_stripes) != 0) 4880 return 0; 4881 mddev->reshape_position = conf->reshape_progress; 4882 mddev->curr_resync_completed = sector_nr; 4883 conf->reshape_checkpoint = jiffies; 4884 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4885 md_wakeup_thread(mddev->thread); 4886 wait_event(mddev->sb_wait, mddev->flags == 0 || 4887 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4888 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4889 return 0; 4890 spin_lock_irq(&conf->device_lock); 4891 conf->reshape_safe = mddev->reshape_position; 4892 spin_unlock_irq(&conf->device_lock); 4893 wake_up(&conf->wait_for_overlap); 4894 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4895 } 4896 4897 INIT_LIST_HEAD(&stripes); 4898 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 4899 int j; 4900 int skipped_disk = 0; 4901 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 4902 set_bit(STRIPE_EXPANDING, &sh->state); 4903 atomic_inc(&conf->reshape_stripes); 4904 /* If any of this stripe is beyond the end of the old 4905 * array, then we need to zero those blocks 4906 */ 4907 for (j=sh->disks; j--;) { 4908 sector_t s; 4909 if (j == sh->pd_idx) 4910 continue; 4911 if (conf->level == 6 && 4912 j == sh->qd_idx) 4913 continue; 4914 s = compute_blocknr(sh, j, 0); 4915 if (s < raid5_size(mddev, 0, 0)) { 4916 skipped_disk = 1; 4917 continue; 4918 } 4919 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 4920 set_bit(R5_Expanded, &sh->dev[j].flags); 4921 set_bit(R5_UPTODATE, &sh->dev[j].flags); 4922 } 4923 if (!skipped_disk) { 4924 set_bit(STRIPE_EXPAND_READY, &sh->state); 4925 set_bit(STRIPE_HANDLE, &sh->state); 4926 } 4927 list_add(&sh->lru, &stripes); 4928 } 4929 spin_lock_irq(&conf->device_lock); 4930 if (mddev->reshape_backwards) 4931 conf->reshape_progress -= reshape_sectors * new_data_disks; 4932 else 4933 conf->reshape_progress += reshape_sectors * new_data_disks; 4934 spin_unlock_irq(&conf->device_lock); 4935 /* Ok, those stripe are ready. We can start scheduling 4936 * reads on the source stripes. 4937 * The source stripes are determined by mapping the first and last 4938 * block on the destination stripes. 4939 */ 4940 first_sector = 4941 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 4942 1, &dd_idx, NULL); 4943 last_sector = 4944 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 4945 * new_data_disks - 1), 4946 1, &dd_idx, NULL); 4947 if (last_sector >= mddev->dev_sectors) 4948 last_sector = mddev->dev_sectors - 1; 4949 while (first_sector <= last_sector) { 4950 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 4951 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4952 set_bit(STRIPE_HANDLE, &sh->state); 4953 release_stripe(sh); 4954 first_sector += STRIPE_SECTORS; 4955 } 4956 /* Now that the sources are clearly marked, we can release 4957 * the destination stripes 4958 */ 4959 while (!list_empty(&stripes)) { 4960 sh = list_entry(stripes.next, struct stripe_head, lru); 4961 list_del_init(&sh->lru); 4962 release_stripe(sh); 4963 } 4964 /* If this takes us to the resync_max point where we have to pause, 4965 * then we need to write out the superblock. 4966 */ 4967 sector_nr += reshape_sectors; 4968 if ((sector_nr - mddev->curr_resync_completed) * 2 4969 >= mddev->resync_max - mddev->curr_resync_completed) { 4970 /* Cannot proceed until we've updated the superblock... */ 4971 wait_event(conf->wait_for_overlap, 4972 atomic_read(&conf->reshape_stripes) == 0 4973 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4974 if (atomic_read(&conf->reshape_stripes) != 0) 4975 goto ret; 4976 mddev->reshape_position = conf->reshape_progress; 4977 mddev->curr_resync_completed = sector_nr; 4978 conf->reshape_checkpoint = jiffies; 4979 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4980 md_wakeup_thread(mddev->thread); 4981 wait_event(mddev->sb_wait, 4982 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 4983 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4984 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4985 goto ret; 4986 spin_lock_irq(&conf->device_lock); 4987 conf->reshape_safe = mddev->reshape_position; 4988 spin_unlock_irq(&conf->device_lock); 4989 wake_up(&conf->wait_for_overlap); 4990 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4991 } 4992 ret: 4993 return reshape_sectors; 4994 } 4995 4996 /* FIXME go_faster isn't used */ 4997 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) 4998 { 4999 struct r5conf *conf = mddev->private; 5000 struct stripe_head *sh; 5001 sector_t max_sector = mddev->dev_sectors; 5002 sector_t sync_blocks; 5003 int still_degraded = 0; 5004 int i; 5005 5006 if (sector_nr >= max_sector) { 5007 /* just being told to finish up .. nothing much to do */ 5008 5009 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 5010 end_reshape(conf); 5011 return 0; 5012 } 5013 5014 if (mddev->curr_resync < max_sector) /* aborted */ 5015 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 5016 &sync_blocks, 1); 5017 else /* completed sync */ 5018 conf->fullsync = 0; 5019 bitmap_close_sync(mddev->bitmap); 5020 5021 return 0; 5022 } 5023 5024 /* Allow raid5_quiesce to complete */ 5025 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 5026 5027 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5028 return reshape_request(mddev, sector_nr, skipped); 5029 5030 /* No need to check resync_max as we never do more than one 5031 * stripe, and as resync_max will always be on a chunk boundary, 5032 * if the check in md_do_sync didn't fire, there is no chance 5033 * of overstepping resync_max here 5034 */ 5035 5036 /* if there is too many failed drives and we are trying 5037 * to resync, then assert that we are finished, because there is 5038 * nothing we can do. 5039 */ 5040 if (mddev->degraded >= conf->max_degraded && 5041 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5042 sector_t rv = mddev->dev_sectors - sector_nr; 5043 *skipped = 1; 5044 return rv; 5045 } 5046 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 5047 !conf->fullsync && 5048 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 5049 sync_blocks >= STRIPE_SECTORS) { 5050 /* we can skip this block, and probably more */ 5051 sync_blocks /= STRIPE_SECTORS; 5052 *skipped = 1; 5053 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 5054 } 5055 5056 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 5057 5058 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 5059 if (sh == NULL) { 5060 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 5061 /* make sure we don't swamp the stripe cache if someone else 5062 * is trying to get access 5063 */ 5064 schedule_timeout_uninterruptible(1); 5065 } 5066 /* Need to check if array will still be degraded after recovery/resync 5067 * We don't need to check the 'failed' flag as when that gets set, 5068 * recovery aborts. 5069 */ 5070 for (i = 0; i < conf->raid_disks; i++) 5071 if (conf->disks[i].rdev == NULL) 5072 still_degraded = 1; 5073 5074 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 5075 5076 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 5077 set_bit(STRIPE_HANDLE, &sh->state); 5078 5079 release_stripe(sh); 5080 5081 return STRIPE_SECTORS; 5082 } 5083 5084 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) 5085 { 5086 /* We may not be able to submit a whole bio at once as there 5087 * may not be enough stripe_heads available. 5088 * We cannot pre-allocate enough stripe_heads as we may need 5089 * more than exist in the cache (if we allow ever large chunks). 5090 * So we do one stripe head at a time and record in 5091 * ->bi_hw_segments how many have been done. 5092 * 5093 * We *know* that this entire raid_bio is in one chunk, so 5094 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 5095 */ 5096 struct stripe_head *sh; 5097 int dd_idx; 5098 sector_t sector, logical_sector, last_sector; 5099 int scnt = 0; 5100 int remaining; 5101 int handled = 0; 5102 5103 logical_sector = raid_bio->bi_iter.bi_sector & 5104 ~((sector_t)STRIPE_SECTORS-1); 5105 sector = raid5_compute_sector(conf, logical_sector, 5106 0, &dd_idx, NULL); 5107 last_sector = bio_end_sector(raid_bio); 5108 5109 for (; logical_sector < last_sector; 5110 logical_sector += STRIPE_SECTORS, 5111 sector += STRIPE_SECTORS, 5112 scnt++) { 5113 5114 if (scnt < raid5_bi_processed_stripes(raid_bio)) 5115 /* already done this stripe */ 5116 continue; 5117 5118 sh = get_active_stripe(conf, sector, 0, 1, 1); 5119 5120 if (!sh) { 5121 /* failed to get a stripe - must wait */ 5122 raid5_set_bi_processed_stripes(raid_bio, scnt); 5123 conf->retry_read_aligned = raid_bio; 5124 return handled; 5125 } 5126 5127 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 5128 release_stripe(sh); 5129 raid5_set_bi_processed_stripes(raid_bio, scnt); 5130 conf->retry_read_aligned = raid_bio; 5131 return handled; 5132 } 5133 5134 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 5135 handle_stripe(sh); 5136 release_stripe(sh); 5137 handled++; 5138 } 5139 remaining = raid5_dec_bi_active_stripes(raid_bio); 5140 if (remaining == 0) { 5141 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), 5142 raid_bio, 0); 5143 bio_endio(raid_bio, 0); 5144 } 5145 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5146 wake_up(&conf->wait_for_stripe); 5147 return handled; 5148 } 5149 5150 static int handle_active_stripes(struct r5conf *conf, int group, 5151 struct r5worker *worker, 5152 struct list_head *temp_inactive_list) 5153 { 5154 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 5155 int i, batch_size = 0, hash; 5156 bool release_inactive = false; 5157 5158 while (batch_size < MAX_STRIPE_BATCH && 5159 (sh = __get_priority_stripe(conf, group)) != NULL) 5160 batch[batch_size++] = sh; 5161 5162 if (batch_size == 0) { 5163 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5164 if (!list_empty(temp_inactive_list + i)) 5165 break; 5166 if (i == NR_STRIPE_HASH_LOCKS) 5167 return batch_size; 5168 release_inactive = true; 5169 } 5170 spin_unlock_irq(&conf->device_lock); 5171 5172 release_inactive_stripe_list(conf, temp_inactive_list, 5173 NR_STRIPE_HASH_LOCKS); 5174 5175 if (release_inactive) { 5176 spin_lock_irq(&conf->device_lock); 5177 return 0; 5178 } 5179 5180 for (i = 0; i < batch_size; i++) 5181 handle_stripe(batch[i]); 5182 5183 cond_resched(); 5184 5185 spin_lock_irq(&conf->device_lock); 5186 for (i = 0; i < batch_size; i++) { 5187 hash = batch[i]->hash_lock_index; 5188 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); 5189 } 5190 return batch_size; 5191 } 5192 5193 static void raid5_do_work(struct work_struct *work) 5194 { 5195 struct r5worker *worker = container_of(work, struct r5worker, work); 5196 struct r5worker_group *group = worker->group; 5197 struct r5conf *conf = group->conf; 5198 int group_id = group - conf->worker_groups; 5199 int handled; 5200 struct blk_plug plug; 5201 5202 pr_debug("+++ raid5worker active\n"); 5203 5204 blk_start_plug(&plug); 5205 handled = 0; 5206 spin_lock_irq(&conf->device_lock); 5207 while (1) { 5208 int batch_size, released; 5209 5210 released = release_stripe_list(conf, worker->temp_inactive_list); 5211 5212 batch_size = handle_active_stripes(conf, group_id, worker, 5213 worker->temp_inactive_list); 5214 worker->working = false; 5215 if (!batch_size && !released) 5216 break; 5217 handled += batch_size; 5218 } 5219 pr_debug("%d stripes handled\n", handled); 5220 5221 spin_unlock_irq(&conf->device_lock); 5222 blk_finish_plug(&plug); 5223 5224 pr_debug("--- raid5worker inactive\n"); 5225 } 5226 5227 /* 5228 * This is our raid5 kernel thread. 5229 * 5230 * We scan the hash table for stripes which can be handled now. 5231 * During the scan, completed stripes are saved for us by the interrupt 5232 * handler, so that they will not have to wait for our next wakeup. 5233 */ 5234 static void raid5d(struct md_thread *thread) 5235 { 5236 struct mddev *mddev = thread->mddev; 5237 struct r5conf *conf = mddev->private; 5238 int handled; 5239 struct blk_plug plug; 5240 5241 pr_debug("+++ raid5d active\n"); 5242 5243 md_check_recovery(mddev); 5244 5245 blk_start_plug(&plug); 5246 handled = 0; 5247 spin_lock_irq(&conf->device_lock); 5248 while (1) { 5249 struct bio *bio; 5250 int batch_size, released; 5251 5252 released = release_stripe_list(conf, conf->temp_inactive_list); 5253 5254 if ( 5255 !list_empty(&conf->bitmap_list)) { 5256 /* Now is a good time to flush some bitmap updates */ 5257 conf->seq_flush++; 5258 spin_unlock_irq(&conf->device_lock); 5259 bitmap_unplug(mddev->bitmap); 5260 spin_lock_irq(&conf->device_lock); 5261 conf->seq_write = conf->seq_flush; 5262 activate_bit_delay(conf, conf->temp_inactive_list); 5263 } 5264 raid5_activate_delayed(conf); 5265 5266 while ((bio = remove_bio_from_retry(conf))) { 5267 int ok; 5268 spin_unlock_irq(&conf->device_lock); 5269 ok = retry_aligned_read(conf, bio); 5270 spin_lock_irq(&conf->device_lock); 5271 if (!ok) 5272 break; 5273 handled++; 5274 } 5275 5276 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, 5277 conf->temp_inactive_list); 5278 if (!batch_size && !released) 5279 break; 5280 handled += batch_size; 5281 5282 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { 5283 spin_unlock_irq(&conf->device_lock); 5284 md_check_recovery(mddev); 5285 spin_lock_irq(&conf->device_lock); 5286 } 5287 } 5288 pr_debug("%d stripes handled\n", handled); 5289 5290 spin_unlock_irq(&conf->device_lock); 5291 5292 async_tx_issue_pending_all(); 5293 blk_finish_plug(&plug); 5294 5295 pr_debug("--- raid5d inactive\n"); 5296 } 5297 5298 static ssize_t 5299 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 5300 { 5301 struct r5conf *conf = mddev->private; 5302 if (conf) 5303 return sprintf(page, "%d\n", conf->max_nr_stripes); 5304 else 5305 return 0; 5306 } 5307 5308 int 5309 raid5_set_cache_size(struct mddev *mddev, int size) 5310 { 5311 struct r5conf *conf = mddev->private; 5312 int err; 5313 int hash; 5314 5315 if (size <= 16 || size > 32768) 5316 return -EINVAL; 5317 hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; 5318 while (size < conf->max_nr_stripes) { 5319 if (drop_one_stripe(conf, hash)) 5320 conf->max_nr_stripes--; 5321 else 5322 break; 5323 hash--; 5324 if (hash < 0) 5325 hash = NR_STRIPE_HASH_LOCKS - 1; 5326 } 5327 err = md_allow_write(mddev); 5328 if (err) 5329 return err; 5330 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 5331 while (size > conf->max_nr_stripes) { 5332 if (grow_one_stripe(conf, hash)) 5333 conf->max_nr_stripes++; 5334 else break; 5335 hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; 5336 } 5337 return 0; 5338 } 5339 EXPORT_SYMBOL(raid5_set_cache_size); 5340 5341 static ssize_t 5342 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 5343 { 5344 struct r5conf *conf = mddev->private; 5345 unsigned long new; 5346 int err; 5347 5348 if (len >= PAGE_SIZE) 5349 return -EINVAL; 5350 if (!conf) 5351 return -ENODEV; 5352 5353 if (kstrtoul(page, 10, &new)) 5354 return -EINVAL; 5355 err = raid5_set_cache_size(mddev, new); 5356 if (err) 5357 return err; 5358 return len; 5359 } 5360 5361 static struct md_sysfs_entry 5362 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 5363 raid5_show_stripe_cache_size, 5364 raid5_store_stripe_cache_size); 5365 5366 static ssize_t 5367 raid5_show_preread_threshold(struct mddev *mddev, char *page) 5368 { 5369 struct r5conf *conf = mddev->private; 5370 if (conf) 5371 return sprintf(page, "%d\n", conf->bypass_threshold); 5372 else 5373 return 0; 5374 } 5375 5376 static ssize_t 5377 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 5378 { 5379 struct r5conf *conf = mddev->private; 5380 unsigned long new; 5381 if (len >= PAGE_SIZE) 5382 return -EINVAL; 5383 if (!conf) 5384 return -ENODEV; 5385 5386 if (kstrtoul(page, 10, &new)) 5387 return -EINVAL; 5388 if (new > conf->max_nr_stripes) 5389 return -EINVAL; 5390 conf->bypass_threshold = new; 5391 return len; 5392 } 5393 5394 static struct md_sysfs_entry 5395 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 5396 S_IRUGO | S_IWUSR, 5397 raid5_show_preread_threshold, 5398 raid5_store_preread_threshold); 5399 5400 static ssize_t 5401 raid5_show_skip_copy(struct mddev *mddev, char *page) 5402 { 5403 struct r5conf *conf = mddev->private; 5404 if (conf) 5405 return sprintf(page, "%d\n", conf->skip_copy); 5406 else 5407 return 0; 5408 } 5409 5410 static ssize_t 5411 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) 5412 { 5413 struct r5conf *conf = mddev->private; 5414 unsigned long new; 5415 if (len >= PAGE_SIZE) 5416 return -EINVAL; 5417 if (!conf) 5418 return -ENODEV; 5419 5420 if (kstrtoul(page, 10, &new)) 5421 return -EINVAL; 5422 new = !!new; 5423 if (new == conf->skip_copy) 5424 return len; 5425 5426 mddev_suspend(mddev); 5427 conf->skip_copy = new; 5428 if (new) 5429 mddev->queue->backing_dev_info.capabilities |= 5430 BDI_CAP_STABLE_WRITES; 5431 else 5432 mddev->queue->backing_dev_info.capabilities &= 5433 ~BDI_CAP_STABLE_WRITES; 5434 mddev_resume(mddev); 5435 return len; 5436 } 5437 5438 static struct md_sysfs_entry 5439 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, 5440 raid5_show_skip_copy, 5441 raid5_store_skip_copy); 5442 5443 5444 static ssize_t 5445 stripe_cache_active_show(struct mddev *mddev, char *page) 5446 { 5447 struct r5conf *conf = mddev->private; 5448 if (conf) 5449 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 5450 else 5451 return 0; 5452 } 5453 5454 static struct md_sysfs_entry 5455 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 5456 5457 static ssize_t 5458 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) 5459 { 5460 struct r5conf *conf = mddev->private; 5461 if (conf) 5462 return sprintf(page, "%d\n", conf->worker_cnt_per_group); 5463 else 5464 return 0; 5465 } 5466 5467 static int alloc_thread_groups(struct r5conf *conf, int cnt, 5468 int *group_cnt, 5469 int *worker_cnt_per_group, 5470 struct r5worker_group **worker_groups); 5471 static ssize_t 5472 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 5473 { 5474 struct r5conf *conf = mddev->private; 5475 unsigned long new; 5476 int err; 5477 struct r5worker_group *new_groups, *old_groups; 5478 int group_cnt, worker_cnt_per_group; 5479 5480 if (len >= PAGE_SIZE) 5481 return -EINVAL; 5482 if (!conf) 5483 return -ENODEV; 5484 5485 if (kstrtoul(page, 10, &new)) 5486 return -EINVAL; 5487 5488 if (new == conf->worker_cnt_per_group) 5489 return len; 5490 5491 mddev_suspend(mddev); 5492 5493 old_groups = conf->worker_groups; 5494 if (old_groups) 5495 flush_workqueue(raid5_wq); 5496 5497 err = alloc_thread_groups(conf, new, 5498 &group_cnt, &worker_cnt_per_group, 5499 &new_groups); 5500 if (!err) { 5501 spin_lock_irq(&conf->device_lock); 5502 conf->group_cnt = group_cnt; 5503 conf->worker_cnt_per_group = worker_cnt_per_group; 5504 conf->worker_groups = new_groups; 5505 spin_unlock_irq(&conf->device_lock); 5506 5507 if (old_groups) 5508 kfree(old_groups[0].workers); 5509 kfree(old_groups); 5510 } 5511 5512 mddev_resume(mddev); 5513 5514 if (err) 5515 return err; 5516 return len; 5517 } 5518 5519 static struct md_sysfs_entry 5520 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, 5521 raid5_show_group_thread_cnt, 5522 raid5_store_group_thread_cnt); 5523 5524 static struct attribute *raid5_attrs[] = { 5525 &raid5_stripecache_size.attr, 5526 &raid5_stripecache_active.attr, 5527 &raid5_preread_bypass_threshold.attr, 5528 &raid5_group_thread_cnt.attr, 5529 &raid5_skip_copy.attr, 5530 NULL, 5531 }; 5532 static struct attribute_group raid5_attrs_group = { 5533 .name = NULL, 5534 .attrs = raid5_attrs, 5535 }; 5536 5537 static int alloc_thread_groups(struct r5conf *conf, int cnt, 5538 int *group_cnt, 5539 int *worker_cnt_per_group, 5540 struct r5worker_group **worker_groups) 5541 { 5542 int i, j, k; 5543 ssize_t size; 5544 struct r5worker *workers; 5545 5546 *worker_cnt_per_group = cnt; 5547 if (cnt == 0) { 5548 *group_cnt = 0; 5549 *worker_groups = NULL; 5550 return 0; 5551 } 5552 *group_cnt = num_possible_nodes(); 5553 size = sizeof(struct r5worker) * cnt; 5554 workers = kzalloc(size * *group_cnt, GFP_NOIO); 5555 *worker_groups = kzalloc(sizeof(struct r5worker_group) * 5556 *group_cnt, GFP_NOIO); 5557 if (!*worker_groups || !workers) { 5558 kfree(workers); 5559 kfree(*worker_groups); 5560 return -ENOMEM; 5561 } 5562 5563 for (i = 0; i < *group_cnt; i++) { 5564 struct r5worker_group *group; 5565 5566 group = &(*worker_groups)[i]; 5567 INIT_LIST_HEAD(&group->handle_list); 5568 group->conf = conf; 5569 group->workers = workers + i * cnt; 5570 5571 for (j = 0; j < cnt; j++) { 5572 struct r5worker *worker = group->workers + j; 5573 worker->group = group; 5574 INIT_WORK(&worker->work, raid5_do_work); 5575 5576 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) 5577 INIT_LIST_HEAD(worker->temp_inactive_list + k); 5578 } 5579 } 5580 5581 return 0; 5582 } 5583 5584 static void free_thread_groups(struct r5conf *conf) 5585 { 5586 if (conf->worker_groups) 5587 kfree(conf->worker_groups[0].workers); 5588 kfree(conf->worker_groups); 5589 conf->worker_groups = NULL; 5590 } 5591 5592 static sector_t 5593 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 5594 { 5595 struct r5conf *conf = mddev->private; 5596 5597 if (!sectors) 5598 sectors = mddev->dev_sectors; 5599 if (!raid_disks) 5600 /* size is defined by the smallest of previous and new size */ 5601 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 5602 5603 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 5604 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 5605 return sectors * (raid_disks - conf->max_degraded); 5606 } 5607 5608 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 5609 { 5610 safe_put_page(percpu->spare_page); 5611 kfree(percpu->scribble); 5612 percpu->spare_page = NULL; 5613 percpu->scribble = NULL; 5614 } 5615 5616 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 5617 { 5618 if (conf->level == 6 && !percpu->spare_page) 5619 percpu->spare_page = alloc_page(GFP_KERNEL); 5620 if (!percpu->scribble) 5621 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); 5622 5623 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { 5624 free_scratch_buffer(conf, percpu); 5625 return -ENOMEM; 5626 } 5627 5628 return 0; 5629 } 5630 5631 static void raid5_free_percpu(struct r5conf *conf) 5632 { 5633 unsigned long cpu; 5634 5635 if (!conf->percpu) 5636 return; 5637 5638 #ifdef CONFIG_HOTPLUG_CPU 5639 unregister_cpu_notifier(&conf->cpu_notify); 5640 #endif 5641 5642 get_online_cpus(); 5643 for_each_possible_cpu(cpu) 5644 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 5645 put_online_cpus(); 5646 5647 free_percpu(conf->percpu); 5648 } 5649 5650 static void free_conf(struct r5conf *conf) 5651 { 5652 free_thread_groups(conf); 5653 shrink_stripes(conf); 5654 raid5_free_percpu(conf); 5655 kfree(conf->disks); 5656 kfree(conf->stripe_hashtbl); 5657 kfree(conf); 5658 } 5659 5660 #ifdef CONFIG_HOTPLUG_CPU 5661 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, 5662 void *hcpu) 5663 { 5664 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); 5665 long cpu = (long)hcpu; 5666 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 5667 5668 switch (action) { 5669 case CPU_UP_PREPARE: 5670 case CPU_UP_PREPARE_FROZEN: 5671 if (alloc_scratch_buffer(conf, percpu)) { 5672 pr_err("%s: failed memory allocation for cpu%ld\n", 5673 __func__, cpu); 5674 return notifier_from_errno(-ENOMEM); 5675 } 5676 break; 5677 case CPU_DEAD: 5678 case CPU_DEAD_FROZEN: 5679 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 5680 break; 5681 default: 5682 break; 5683 } 5684 return NOTIFY_OK; 5685 } 5686 #endif 5687 5688 static int raid5_alloc_percpu(struct r5conf *conf) 5689 { 5690 unsigned long cpu; 5691 int err = 0; 5692 5693 conf->percpu = alloc_percpu(struct raid5_percpu); 5694 if (!conf->percpu) 5695 return -ENOMEM; 5696 5697 #ifdef CONFIG_HOTPLUG_CPU 5698 conf->cpu_notify.notifier_call = raid456_cpu_notify; 5699 conf->cpu_notify.priority = 0; 5700 err = register_cpu_notifier(&conf->cpu_notify); 5701 if (err) 5702 return err; 5703 #endif 5704 5705 get_online_cpus(); 5706 for_each_present_cpu(cpu) { 5707 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 5708 if (err) { 5709 pr_err("%s: failed memory allocation for cpu%ld\n", 5710 __func__, cpu); 5711 break; 5712 } 5713 } 5714 put_online_cpus(); 5715 5716 return err; 5717 } 5718 5719 static struct r5conf *setup_conf(struct mddev *mddev) 5720 { 5721 struct r5conf *conf; 5722 int raid_disk, memory, max_disks; 5723 struct md_rdev *rdev; 5724 struct disk_info *disk; 5725 char pers_name[6]; 5726 int i; 5727 int group_cnt, worker_cnt_per_group; 5728 struct r5worker_group *new_group; 5729 5730 if (mddev->new_level != 5 5731 && mddev->new_level != 4 5732 && mddev->new_level != 6) { 5733 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", 5734 mdname(mddev), mddev->new_level); 5735 return ERR_PTR(-EIO); 5736 } 5737 if ((mddev->new_level == 5 5738 && !algorithm_valid_raid5(mddev->new_layout)) || 5739 (mddev->new_level == 6 5740 && !algorithm_valid_raid6(mddev->new_layout))) { 5741 printk(KERN_ERR "md/raid:%s: layout %d not supported\n", 5742 mdname(mddev), mddev->new_layout); 5743 return ERR_PTR(-EIO); 5744 } 5745 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 5746 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", 5747 mdname(mddev), mddev->raid_disks); 5748 return ERR_PTR(-EINVAL); 5749 } 5750 5751 if (!mddev->new_chunk_sectors || 5752 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 5753 !is_power_of_2(mddev->new_chunk_sectors)) { 5754 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", 5755 mdname(mddev), mddev->new_chunk_sectors << 9); 5756 return ERR_PTR(-EINVAL); 5757 } 5758 5759 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 5760 if (conf == NULL) 5761 goto abort; 5762 /* Don't enable multi-threading by default*/ 5763 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, 5764 &new_group)) { 5765 conf->group_cnt = group_cnt; 5766 conf->worker_cnt_per_group = worker_cnt_per_group; 5767 conf->worker_groups = new_group; 5768 } else 5769 goto abort; 5770 spin_lock_init(&conf->device_lock); 5771 seqcount_init(&conf->gen_lock); 5772 init_waitqueue_head(&conf->wait_for_stripe); 5773 init_waitqueue_head(&conf->wait_for_overlap); 5774 INIT_LIST_HEAD(&conf->handle_list); 5775 INIT_LIST_HEAD(&conf->hold_list); 5776 INIT_LIST_HEAD(&conf->delayed_list); 5777 INIT_LIST_HEAD(&conf->bitmap_list); 5778 init_llist_head(&conf->released_stripes); 5779 atomic_set(&conf->active_stripes, 0); 5780 atomic_set(&conf->preread_active_stripes, 0); 5781 atomic_set(&conf->active_aligned_reads, 0); 5782 conf->bypass_threshold = BYPASS_THRESHOLD; 5783 conf->recovery_disabled = mddev->recovery_disabled - 1; 5784 5785 conf->raid_disks = mddev->raid_disks; 5786 if (mddev->reshape_position == MaxSector) 5787 conf->previous_raid_disks = mddev->raid_disks; 5788 else 5789 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 5790 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 5791 conf->scribble_len = scribble_len(max_disks); 5792 5793 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 5794 GFP_KERNEL); 5795 if (!conf->disks) 5796 goto abort; 5797 5798 conf->mddev = mddev; 5799 5800 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 5801 goto abort; 5802 5803 /* We init hash_locks[0] separately to that it can be used 5804 * as the reference lock in the spin_lock_nest_lock() call 5805 * in lock_all_device_hash_locks_irq in order to convince 5806 * lockdep that we know what we are doing. 5807 */ 5808 spin_lock_init(conf->hash_locks); 5809 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 5810 spin_lock_init(conf->hash_locks + i); 5811 5812 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5813 INIT_LIST_HEAD(conf->inactive_list + i); 5814 5815 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5816 INIT_LIST_HEAD(conf->temp_inactive_list + i); 5817 5818 conf->level = mddev->new_level; 5819 if (raid5_alloc_percpu(conf) != 0) 5820 goto abort; 5821 5822 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 5823 5824 rdev_for_each(rdev, mddev) { 5825 raid_disk = rdev->raid_disk; 5826 if (raid_disk >= max_disks 5827 || raid_disk < 0) 5828 continue; 5829 disk = conf->disks + raid_disk; 5830 5831 if (test_bit(Replacement, &rdev->flags)) { 5832 if (disk->replacement) 5833 goto abort; 5834 disk->replacement = rdev; 5835 } else { 5836 if (disk->rdev) 5837 goto abort; 5838 disk->rdev = rdev; 5839 } 5840 5841 if (test_bit(In_sync, &rdev->flags)) { 5842 char b[BDEVNAME_SIZE]; 5843 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 5844 " disk %d\n", 5845 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 5846 } else if (rdev->saved_raid_disk != raid_disk) 5847 /* Cannot rely on bitmap to complete recovery */ 5848 conf->fullsync = 1; 5849 } 5850 5851 conf->chunk_sectors = mddev->new_chunk_sectors; 5852 conf->level = mddev->new_level; 5853 if (conf->level == 6) 5854 conf->max_degraded = 2; 5855 else 5856 conf->max_degraded = 1; 5857 conf->algorithm = mddev->new_layout; 5858 conf->reshape_progress = mddev->reshape_position; 5859 if (conf->reshape_progress != MaxSector) { 5860 conf->prev_chunk_sectors = mddev->chunk_sectors; 5861 conf->prev_algo = mddev->layout; 5862 } 5863 5864 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 5865 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 5866 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 5867 if (grow_stripes(conf, NR_STRIPES)) { 5868 printk(KERN_ERR 5869 "md/raid:%s: couldn't allocate %dkB for buffers\n", 5870 mdname(mddev), memory); 5871 goto abort; 5872 } else 5873 printk(KERN_INFO "md/raid:%s: allocated %dkB\n", 5874 mdname(mddev), memory); 5875 5876 sprintf(pers_name, "raid%d", mddev->new_level); 5877 conf->thread = md_register_thread(raid5d, mddev, pers_name); 5878 if (!conf->thread) { 5879 printk(KERN_ERR 5880 "md/raid:%s: couldn't allocate thread.\n", 5881 mdname(mddev)); 5882 goto abort; 5883 } 5884 5885 return conf; 5886 5887 abort: 5888 if (conf) { 5889 free_conf(conf); 5890 return ERR_PTR(-EIO); 5891 } else 5892 return ERR_PTR(-ENOMEM); 5893 } 5894 5895 5896 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 5897 { 5898 switch (algo) { 5899 case ALGORITHM_PARITY_0: 5900 if (raid_disk < max_degraded) 5901 return 1; 5902 break; 5903 case ALGORITHM_PARITY_N: 5904 if (raid_disk >= raid_disks - max_degraded) 5905 return 1; 5906 break; 5907 case ALGORITHM_PARITY_0_6: 5908 if (raid_disk == 0 || 5909 raid_disk == raid_disks - 1) 5910 return 1; 5911 break; 5912 case ALGORITHM_LEFT_ASYMMETRIC_6: 5913 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5914 case ALGORITHM_LEFT_SYMMETRIC_6: 5915 case ALGORITHM_RIGHT_SYMMETRIC_6: 5916 if (raid_disk == raid_disks - 1) 5917 return 1; 5918 } 5919 return 0; 5920 } 5921 5922 static int run(struct mddev *mddev) 5923 { 5924 struct r5conf *conf; 5925 int working_disks = 0; 5926 int dirty_parity_disks = 0; 5927 struct md_rdev *rdev; 5928 sector_t reshape_offset = 0; 5929 int i; 5930 long long min_offset_diff = 0; 5931 int first = 1; 5932 5933 if (mddev->recovery_cp != MaxSector) 5934 printk(KERN_NOTICE "md/raid:%s: not clean" 5935 " -- starting background reconstruction\n", 5936 mdname(mddev)); 5937 5938 rdev_for_each(rdev, mddev) { 5939 long long diff; 5940 if (rdev->raid_disk < 0) 5941 continue; 5942 diff = (rdev->new_data_offset - rdev->data_offset); 5943 if (first) { 5944 min_offset_diff = diff; 5945 first = 0; 5946 } else if (mddev->reshape_backwards && 5947 diff < min_offset_diff) 5948 min_offset_diff = diff; 5949 else if (!mddev->reshape_backwards && 5950 diff > min_offset_diff) 5951 min_offset_diff = diff; 5952 } 5953 5954 if (mddev->reshape_position != MaxSector) { 5955 /* Check that we can continue the reshape. 5956 * Difficulties arise if the stripe we would write to 5957 * next is at or after the stripe we would read from next. 5958 * For a reshape that changes the number of devices, this 5959 * is only possible for a very short time, and mdadm makes 5960 * sure that time appears to have past before assembling 5961 * the array. So we fail if that time hasn't passed. 5962 * For a reshape that keeps the number of devices the same 5963 * mdadm must be monitoring the reshape can keeping the 5964 * critical areas read-only and backed up. It will start 5965 * the array in read-only mode, so we check for that. 5966 */ 5967 sector_t here_new, here_old; 5968 int old_disks; 5969 int max_degraded = (mddev->level == 6 ? 2 : 1); 5970 5971 if (mddev->new_level != mddev->level) { 5972 printk(KERN_ERR "md/raid:%s: unsupported reshape " 5973 "required - aborting.\n", 5974 mdname(mddev)); 5975 return -EINVAL; 5976 } 5977 old_disks = mddev->raid_disks - mddev->delta_disks; 5978 /* reshape_position must be on a new-stripe boundary, and one 5979 * further up in new geometry must map after here in old 5980 * geometry. 5981 */ 5982 here_new = mddev->reshape_position; 5983 if (sector_div(here_new, mddev->new_chunk_sectors * 5984 (mddev->raid_disks - max_degraded))) { 5985 printk(KERN_ERR "md/raid:%s: reshape_position not " 5986 "on a stripe boundary\n", mdname(mddev)); 5987 return -EINVAL; 5988 } 5989 reshape_offset = here_new * mddev->new_chunk_sectors; 5990 /* here_new is the stripe we will write to */ 5991 here_old = mddev->reshape_position; 5992 sector_div(here_old, mddev->chunk_sectors * 5993 (old_disks-max_degraded)); 5994 /* here_old is the first stripe that we might need to read 5995 * from */ 5996 if (mddev->delta_disks == 0) { 5997 if ((here_new * mddev->new_chunk_sectors != 5998 here_old * mddev->chunk_sectors)) { 5999 printk(KERN_ERR "md/raid:%s: reshape position is" 6000 " confused - aborting\n", mdname(mddev)); 6001 return -EINVAL; 6002 } 6003 /* We cannot be sure it is safe to start an in-place 6004 * reshape. It is only safe if user-space is monitoring 6005 * and taking constant backups. 6006 * mdadm always starts a situation like this in 6007 * readonly mode so it can take control before 6008 * allowing any writes. So just check for that. 6009 */ 6010 if (abs(min_offset_diff) >= mddev->chunk_sectors && 6011 abs(min_offset_diff) >= mddev->new_chunk_sectors) 6012 /* not really in-place - so OK */; 6013 else if (mddev->ro == 0) { 6014 printk(KERN_ERR "md/raid:%s: in-place reshape " 6015 "must be started in read-only mode " 6016 "- aborting\n", 6017 mdname(mddev)); 6018 return -EINVAL; 6019 } 6020 } else if (mddev->reshape_backwards 6021 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <= 6022 here_old * mddev->chunk_sectors) 6023 : (here_new * mddev->new_chunk_sectors >= 6024 here_old * mddev->chunk_sectors + (-min_offset_diff))) { 6025 /* Reading from the same stripe as writing to - bad */ 6026 printk(KERN_ERR "md/raid:%s: reshape_position too early for " 6027 "auto-recovery - aborting.\n", 6028 mdname(mddev)); 6029 return -EINVAL; 6030 } 6031 printk(KERN_INFO "md/raid:%s: reshape will continue\n", 6032 mdname(mddev)); 6033 /* OK, we should be able to continue; */ 6034 } else { 6035 BUG_ON(mddev->level != mddev->new_level); 6036 BUG_ON(mddev->layout != mddev->new_layout); 6037 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 6038 BUG_ON(mddev->delta_disks != 0); 6039 } 6040 6041 if (mddev->private == NULL) 6042 conf = setup_conf(mddev); 6043 else 6044 conf = mddev->private; 6045 6046 if (IS_ERR(conf)) 6047 return PTR_ERR(conf); 6048 6049 conf->min_offset_diff = min_offset_diff; 6050 mddev->thread = conf->thread; 6051 conf->thread = NULL; 6052 mddev->private = conf; 6053 6054 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; 6055 i++) { 6056 rdev = conf->disks[i].rdev; 6057 if (!rdev && conf->disks[i].replacement) { 6058 /* The replacement is all we have yet */ 6059 rdev = conf->disks[i].replacement; 6060 conf->disks[i].replacement = NULL; 6061 clear_bit(Replacement, &rdev->flags); 6062 conf->disks[i].rdev = rdev; 6063 } 6064 if (!rdev) 6065 continue; 6066 if (conf->disks[i].replacement && 6067 conf->reshape_progress != MaxSector) { 6068 /* replacements and reshape simply do not mix. */ 6069 printk(KERN_ERR "md: cannot handle concurrent " 6070 "replacement and reshape.\n"); 6071 goto abort; 6072 } 6073 if (test_bit(In_sync, &rdev->flags)) { 6074 working_disks++; 6075 continue; 6076 } 6077 /* This disc is not fully in-sync. However if it 6078 * just stored parity (beyond the recovery_offset), 6079 * when we don't need to be concerned about the 6080 * array being dirty. 6081 * When reshape goes 'backwards', we never have 6082 * partially completed devices, so we only need 6083 * to worry about reshape going forwards. 6084 */ 6085 /* Hack because v0.91 doesn't store recovery_offset properly. */ 6086 if (mddev->major_version == 0 && 6087 mddev->minor_version > 90) 6088 rdev->recovery_offset = reshape_offset; 6089 6090 if (rdev->recovery_offset < reshape_offset) { 6091 /* We need to check old and new layout */ 6092 if (!only_parity(rdev->raid_disk, 6093 conf->algorithm, 6094 conf->raid_disks, 6095 conf->max_degraded)) 6096 continue; 6097 } 6098 if (!only_parity(rdev->raid_disk, 6099 conf->prev_algo, 6100 conf->previous_raid_disks, 6101 conf->max_degraded)) 6102 continue; 6103 dirty_parity_disks++; 6104 } 6105 6106 /* 6107 * 0 for a fully functional array, 1 or 2 for a degraded array. 6108 */ 6109 mddev->degraded = calc_degraded(conf); 6110 6111 if (has_failed(conf)) { 6112 printk(KERN_ERR "md/raid:%s: not enough operational devices" 6113 " (%d/%d failed)\n", 6114 mdname(mddev), mddev->degraded, conf->raid_disks); 6115 goto abort; 6116 } 6117 6118 /* device size must be a multiple of chunk size */ 6119 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 6120 mddev->resync_max_sectors = mddev->dev_sectors; 6121 6122 if (mddev->degraded > dirty_parity_disks && 6123 mddev->recovery_cp != MaxSector) { 6124 if (mddev->ok_start_degraded) 6125 printk(KERN_WARNING 6126 "md/raid:%s: starting dirty degraded array" 6127 " - data corruption possible.\n", 6128 mdname(mddev)); 6129 else { 6130 printk(KERN_ERR 6131 "md/raid:%s: cannot start dirty degraded array.\n", 6132 mdname(mddev)); 6133 goto abort; 6134 } 6135 } 6136 6137 if (mddev->degraded == 0) 6138 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" 6139 " devices, algorithm %d\n", mdname(mddev), conf->level, 6140 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 6141 mddev->new_layout); 6142 else 6143 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" 6144 " out of %d devices, algorithm %d\n", 6145 mdname(mddev), conf->level, 6146 mddev->raid_disks - mddev->degraded, 6147 mddev->raid_disks, mddev->new_layout); 6148 6149 print_raid5_conf(conf); 6150 6151 if (conf->reshape_progress != MaxSector) { 6152 conf->reshape_safe = conf->reshape_progress; 6153 atomic_set(&conf->reshape_stripes, 0); 6154 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6155 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6156 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6157 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6158 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 6159 "reshape"); 6160 } 6161 6162 6163 /* Ok, everything is just fine now */ 6164 if (mddev->to_remove == &raid5_attrs_group) 6165 mddev->to_remove = NULL; 6166 else if (mddev->kobj.sd && 6167 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 6168 printk(KERN_WARNING 6169 "raid5: failed to create sysfs attributes for %s\n", 6170 mdname(mddev)); 6171 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 6172 6173 if (mddev->queue) { 6174 int chunk_size; 6175 bool discard_supported = true; 6176 /* read-ahead size must cover two whole stripes, which 6177 * is 2 * (datadisks) * chunksize where 'n' is the 6178 * number of raid devices 6179 */ 6180 int data_disks = conf->previous_raid_disks - conf->max_degraded; 6181 int stripe = data_disks * 6182 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 6183 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 6184 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 6185 6186 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 6187 6188 mddev->queue->backing_dev_info.congested_data = mddev; 6189 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 6190 6191 chunk_size = mddev->chunk_sectors << 9; 6192 blk_queue_io_min(mddev->queue, chunk_size); 6193 blk_queue_io_opt(mddev->queue, chunk_size * 6194 (conf->raid_disks - conf->max_degraded)); 6195 mddev->queue->limits.raid_partial_stripes_expensive = 1; 6196 /* 6197 * We can only discard a whole stripe. It doesn't make sense to 6198 * discard data disk but write parity disk 6199 */ 6200 stripe = stripe * PAGE_SIZE; 6201 /* Round up to power of 2, as discard handling 6202 * currently assumes that */ 6203 while ((stripe-1) & stripe) 6204 stripe = (stripe | (stripe-1)) + 1; 6205 mddev->queue->limits.discard_alignment = stripe; 6206 mddev->queue->limits.discard_granularity = stripe; 6207 /* 6208 * unaligned part of discard request will be ignored, so can't 6209 * guarantee discard_zerors_data 6210 */ 6211 mddev->queue->limits.discard_zeroes_data = 0; 6212 6213 blk_queue_max_write_same_sectors(mddev->queue, 0); 6214 6215 rdev_for_each(rdev, mddev) { 6216 disk_stack_limits(mddev->gendisk, rdev->bdev, 6217 rdev->data_offset << 9); 6218 disk_stack_limits(mddev->gendisk, rdev->bdev, 6219 rdev->new_data_offset << 9); 6220 /* 6221 * discard_zeroes_data is required, otherwise data 6222 * could be lost. Consider a scenario: discard a stripe 6223 * (the stripe could be inconsistent if 6224 * discard_zeroes_data is 0); write one disk of the 6225 * stripe (the stripe could be inconsistent again 6226 * depending on which disks are used to calculate 6227 * parity); the disk is broken; The stripe data of this 6228 * disk is lost. 6229 */ 6230 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || 6231 !bdev_get_queue(rdev->bdev)-> 6232 limits.discard_zeroes_data) 6233 discard_supported = false; 6234 } 6235 6236 if (discard_supported && 6237 mddev->queue->limits.max_discard_sectors >= stripe && 6238 mddev->queue->limits.discard_granularity >= stripe) 6239 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 6240 mddev->queue); 6241 else 6242 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 6243 mddev->queue); 6244 } 6245 6246 return 0; 6247 abort: 6248 md_unregister_thread(&mddev->thread); 6249 print_raid5_conf(conf); 6250 free_conf(conf); 6251 mddev->private = NULL; 6252 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); 6253 return -EIO; 6254 } 6255 6256 static int stop(struct mddev *mddev) 6257 { 6258 struct r5conf *conf = mddev->private; 6259 6260 md_unregister_thread(&mddev->thread); 6261 if (mddev->queue) 6262 mddev->queue->backing_dev_info.congested_fn = NULL; 6263 free_conf(conf); 6264 mddev->private = NULL; 6265 mddev->to_remove = &raid5_attrs_group; 6266 return 0; 6267 } 6268 6269 static void status(struct seq_file *seq, struct mddev *mddev) 6270 { 6271 struct r5conf *conf = mddev->private; 6272 int i; 6273 6274 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 6275 mddev->chunk_sectors / 2, mddev->layout); 6276 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 6277 for (i = 0; i < conf->raid_disks; i++) 6278 seq_printf (seq, "%s", 6279 conf->disks[i].rdev && 6280 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 6281 seq_printf (seq, "]"); 6282 } 6283 6284 static void print_raid5_conf (struct r5conf *conf) 6285 { 6286 int i; 6287 struct disk_info *tmp; 6288 6289 printk(KERN_DEBUG "RAID conf printout:\n"); 6290 if (!conf) { 6291 printk("(conf==NULL)\n"); 6292 return; 6293 } 6294 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, 6295 conf->raid_disks, 6296 conf->raid_disks - conf->mddev->degraded); 6297 6298 for (i = 0; i < conf->raid_disks; i++) { 6299 char b[BDEVNAME_SIZE]; 6300 tmp = conf->disks + i; 6301 if (tmp->rdev) 6302 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", 6303 i, !test_bit(Faulty, &tmp->rdev->flags), 6304 bdevname(tmp->rdev->bdev, b)); 6305 } 6306 } 6307 6308 static int raid5_spare_active(struct mddev *mddev) 6309 { 6310 int i; 6311 struct r5conf *conf = mddev->private; 6312 struct disk_info *tmp; 6313 int count = 0; 6314 unsigned long flags; 6315 6316 for (i = 0; i < conf->raid_disks; i++) { 6317 tmp = conf->disks + i; 6318 if (tmp->replacement 6319 && tmp->replacement->recovery_offset == MaxSector 6320 && !test_bit(Faulty, &tmp->replacement->flags) 6321 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 6322 /* Replacement has just become active. */ 6323 if (!tmp->rdev 6324 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 6325 count++; 6326 if (tmp->rdev) { 6327 /* Replaced device not technically faulty, 6328 * but we need to be sure it gets removed 6329 * and never re-added. 6330 */ 6331 set_bit(Faulty, &tmp->rdev->flags); 6332 sysfs_notify_dirent_safe( 6333 tmp->rdev->sysfs_state); 6334 } 6335 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 6336 } else if (tmp->rdev 6337 && tmp->rdev->recovery_offset == MaxSector 6338 && !test_bit(Faulty, &tmp->rdev->flags) 6339 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 6340 count++; 6341 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 6342 } 6343 } 6344 spin_lock_irqsave(&conf->device_lock, flags); 6345 mddev->degraded = calc_degraded(conf); 6346 spin_unlock_irqrestore(&conf->device_lock, flags); 6347 print_raid5_conf(conf); 6348 return count; 6349 } 6350 6351 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 6352 { 6353 struct r5conf *conf = mddev->private; 6354 int err = 0; 6355 int number = rdev->raid_disk; 6356 struct md_rdev **rdevp; 6357 struct disk_info *p = conf->disks + number; 6358 6359 print_raid5_conf(conf); 6360 if (rdev == p->rdev) 6361 rdevp = &p->rdev; 6362 else if (rdev == p->replacement) 6363 rdevp = &p->replacement; 6364 else 6365 return 0; 6366 6367 if (number >= conf->raid_disks && 6368 conf->reshape_progress == MaxSector) 6369 clear_bit(In_sync, &rdev->flags); 6370 6371 if (test_bit(In_sync, &rdev->flags) || 6372 atomic_read(&rdev->nr_pending)) { 6373 err = -EBUSY; 6374 goto abort; 6375 } 6376 /* Only remove non-faulty devices if recovery 6377 * isn't possible. 6378 */ 6379 if (!test_bit(Faulty, &rdev->flags) && 6380 mddev->recovery_disabled != conf->recovery_disabled && 6381 !has_failed(conf) && 6382 (!p->replacement || p->replacement == rdev) && 6383 number < conf->raid_disks) { 6384 err = -EBUSY; 6385 goto abort; 6386 } 6387 *rdevp = NULL; 6388 synchronize_rcu(); 6389 if (atomic_read(&rdev->nr_pending)) { 6390 /* lost the race, try later */ 6391 err = -EBUSY; 6392 *rdevp = rdev; 6393 } else if (p->replacement) { 6394 /* We must have just cleared 'rdev' */ 6395 p->rdev = p->replacement; 6396 clear_bit(Replacement, &p->replacement->flags); 6397 smp_mb(); /* Make sure other CPUs may see both as identical 6398 * but will never see neither - if they are careful 6399 */ 6400 p->replacement = NULL; 6401 clear_bit(WantReplacement, &rdev->flags); 6402 } else 6403 /* We might have just removed the Replacement as faulty- 6404 * clear the bit just in case 6405 */ 6406 clear_bit(WantReplacement, &rdev->flags); 6407 abort: 6408 6409 print_raid5_conf(conf); 6410 return err; 6411 } 6412 6413 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 6414 { 6415 struct r5conf *conf = mddev->private; 6416 int err = -EEXIST; 6417 int disk; 6418 struct disk_info *p; 6419 int first = 0; 6420 int last = conf->raid_disks - 1; 6421 6422 if (mddev->recovery_disabled == conf->recovery_disabled) 6423 return -EBUSY; 6424 6425 if (rdev->saved_raid_disk < 0 && has_failed(conf)) 6426 /* no point adding a device */ 6427 return -EINVAL; 6428 6429 if (rdev->raid_disk >= 0) 6430 first = last = rdev->raid_disk; 6431 6432 /* 6433 * find the disk ... but prefer rdev->saved_raid_disk 6434 * if possible. 6435 */ 6436 if (rdev->saved_raid_disk >= 0 && 6437 rdev->saved_raid_disk >= first && 6438 conf->disks[rdev->saved_raid_disk].rdev == NULL) 6439 first = rdev->saved_raid_disk; 6440 6441 for (disk = first; disk <= last; disk++) { 6442 p = conf->disks + disk; 6443 if (p->rdev == NULL) { 6444 clear_bit(In_sync, &rdev->flags); 6445 rdev->raid_disk = disk; 6446 err = 0; 6447 if (rdev->saved_raid_disk != disk) 6448 conf->fullsync = 1; 6449 rcu_assign_pointer(p->rdev, rdev); 6450 goto out; 6451 } 6452 } 6453 for (disk = first; disk <= last; disk++) { 6454 p = conf->disks + disk; 6455 if (test_bit(WantReplacement, &p->rdev->flags) && 6456 p->replacement == NULL) { 6457 clear_bit(In_sync, &rdev->flags); 6458 set_bit(Replacement, &rdev->flags); 6459 rdev->raid_disk = disk; 6460 err = 0; 6461 conf->fullsync = 1; 6462 rcu_assign_pointer(p->replacement, rdev); 6463 break; 6464 } 6465 } 6466 out: 6467 print_raid5_conf(conf); 6468 return err; 6469 } 6470 6471 static int raid5_resize(struct mddev *mddev, sector_t sectors) 6472 { 6473 /* no resync is happening, and there is enough space 6474 * on all devices, so we can resize. 6475 * We need to make sure resync covers any new space. 6476 * If the array is shrinking we should possibly wait until 6477 * any io in the removed space completes, but it hardly seems 6478 * worth it. 6479 */ 6480 sector_t newsize; 6481 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 6482 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 6483 if (mddev->external_size && 6484 mddev->array_sectors > newsize) 6485 return -EINVAL; 6486 if (mddev->bitmap) { 6487 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); 6488 if (ret) 6489 return ret; 6490 } 6491 md_set_array_sectors(mddev, newsize); 6492 set_capacity(mddev->gendisk, mddev->array_sectors); 6493 revalidate_disk(mddev->gendisk); 6494 if (sectors > mddev->dev_sectors && 6495 mddev->recovery_cp > mddev->dev_sectors) { 6496 mddev->recovery_cp = mddev->dev_sectors; 6497 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6498 } 6499 mddev->dev_sectors = sectors; 6500 mddev->resync_max_sectors = sectors; 6501 return 0; 6502 } 6503 6504 static int check_stripe_cache(struct mddev *mddev) 6505 { 6506 /* Can only proceed if there are plenty of stripe_heads. 6507 * We need a minimum of one full stripe,, and for sensible progress 6508 * it is best to have about 4 times that. 6509 * If we require 4 times, then the default 256 4K stripe_heads will 6510 * allow for chunk sizes up to 256K, which is probably OK. 6511 * If the chunk size is greater, user-space should request more 6512 * stripe_heads first. 6513 */ 6514 struct r5conf *conf = mddev->private; 6515 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 6516 > conf->max_nr_stripes || 6517 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 6518 > conf->max_nr_stripes) { 6519 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", 6520 mdname(mddev), 6521 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 6522 / STRIPE_SIZE)*4); 6523 return 0; 6524 } 6525 return 1; 6526 } 6527 6528 static int check_reshape(struct mddev *mddev) 6529 { 6530 struct r5conf *conf = mddev->private; 6531 6532 if (mddev->delta_disks == 0 && 6533 mddev->new_layout == mddev->layout && 6534 mddev->new_chunk_sectors == mddev->chunk_sectors) 6535 return 0; /* nothing to do */ 6536 if (has_failed(conf)) 6537 return -EINVAL; 6538 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { 6539 /* We might be able to shrink, but the devices must 6540 * be made bigger first. 6541 * For raid6, 4 is the minimum size. 6542 * Otherwise 2 is the minimum 6543 */ 6544 int min = 2; 6545 if (mddev->level == 6) 6546 min = 4; 6547 if (mddev->raid_disks + mddev->delta_disks < min) 6548 return -EINVAL; 6549 } 6550 6551 if (!check_stripe_cache(mddev)) 6552 return -ENOSPC; 6553 6554 return resize_stripes(conf, (conf->previous_raid_disks 6555 + mddev->delta_disks)); 6556 } 6557 6558 static int raid5_start_reshape(struct mddev *mddev) 6559 { 6560 struct r5conf *conf = mddev->private; 6561 struct md_rdev *rdev; 6562 int spares = 0; 6563 unsigned long flags; 6564 6565 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6566 return -EBUSY; 6567 6568 if (!check_stripe_cache(mddev)) 6569 return -ENOSPC; 6570 6571 if (has_failed(conf)) 6572 return -EINVAL; 6573 6574 rdev_for_each(rdev, mddev) { 6575 if (!test_bit(In_sync, &rdev->flags) 6576 && !test_bit(Faulty, &rdev->flags)) 6577 spares++; 6578 } 6579 6580 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 6581 /* Not enough devices even to make a degraded array 6582 * of that size 6583 */ 6584 return -EINVAL; 6585 6586 /* Refuse to reduce size of the array. Any reductions in 6587 * array size must be through explicit setting of array_size 6588 * attribute. 6589 */ 6590 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 6591 < mddev->array_sectors) { 6592 printk(KERN_ERR "md/raid:%s: array size must be reduced " 6593 "before number of disks\n", mdname(mddev)); 6594 return -EINVAL; 6595 } 6596 6597 atomic_set(&conf->reshape_stripes, 0); 6598 spin_lock_irq(&conf->device_lock); 6599 write_seqcount_begin(&conf->gen_lock); 6600 conf->previous_raid_disks = conf->raid_disks; 6601 conf->raid_disks += mddev->delta_disks; 6602 conf->prev_chunk_sectors = conf->chunk_sectors; 6603 conf->chunk_sectors = mddev->new_chunk_sectors; 6604 conf->prev_algo = conf->algorithm; 6605 conf->algorithm = mddev->new_layout; 6606 conf->generation++; 6607 /* Code that selects data_offset needs to see the generation update 6608 * if reshape_progress has been set - so a memory barrier needed. 6609 */ 6610 smp_mb(); 6611 if (mddev->reshape_backwards) 6612 conf->reshape_progress = raid5_size(mddev, 0, 0); 6613 else 6614 conf->reshape_progress = 0; 6615 conf->reshape_safe = conf->reshape_progress; 6616 write_seqcount_end(&conf->gen_lock); 6617 spin_unlock_irq(&conf->device_lock); 6618 6619 /* Now make sure any requests that proceeded on the assumption 6620 * the reshape wasn't running - like Discard or Read - have 6621 * completed. 6622 */ 6623 mddev_suspend(mddev); 6624 mddev_resume(mddev); 6625 6626 /* Add some new drives, as many as will fit. 6627 * We know there are enough to make the newly sized array work. 6628 * Don't add devices if we are reducing the number of 6629 * devices in the array. This is because it is not possible 6630 * to correctly record the "partially reconstructed" state of 6631 * such devices during the reshape and confusion could result. 6632 */ 6633 if (mddev->delta_disks >= 0) { 6634 rdev_for_each(rdev, mddev) 6635 if (rdev->raid_disk < 0 && 6636 !test_bit(Faulty, &rdev->flags)) { 6637 if (raid5_add_disk(mddev, rdev) == 0) { 6638 if (rdev->raid_disk 6639 >= conf->previous_raid_disks) 6640 set_bit(In_sync, &rdev->flags); 6641 else 6642 rdev->recovery_offset = 0; 6643 6644 if (sysfs_link_rdev(mddev, rdev)) 6645 /* Failure here is OK */; 6646 } 6647 } else if (rdev->raid_disk >= conf->previous_raid_disks 6648 && !test_bit(Faulty, &rdev->flags)) { 6649 /* This is a spare that was manually added */ 6650 set_bit(In_sync, &rdev->flags); 6651 } 6652 6653 /* When a reshape changes the number of devices, 6654 * ->degraded is measured against the larger of the 6655 * pre and post number of devices. 6656 */ 6657 spin_lock_irqsave(&conf->device_lock, flags); 6658 mddev->degraded = calc_degraded(conf); 6659 spin_unlock_irqrestore(&conf->device_lock, flags); 6660 } 6661 mddev->raid_disks = conf->raid_disks; 6662 mddev->reshape_position = conf->reshape_progress; 6663 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6664 6665 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6666 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6667 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6668 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6669 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 6670 "reshape"); 6671 if (!mddev->sync_thread) { 6672 mddev->recovery = 0; 6673 spin_lock_irq(&conf->device_lock); 6674 write_seqcount_begin(&conf->gen_lock); 6675 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 6676 mddev->new_chunk_sectors = 6677 conf->chunk_sectors = conf->prev_chunk_sectors; 6678 mddev->new_layout = conf->algorithm = conf->prev_algo; 6679 rdev_for_each(rdev, mddev) 6680 rdev->new_data_offset = rdev->data_offset; 6681 smp_wmb(); 6682 conf->generation --; 6683 conf->reshape_progress = MaxSector; 6684 mddev->reshape_position = MaxSector; 6685 write_seqcount_end(&conf->gen_lock); 6686 spin_unlock_irq(&conf->device_lock); 6687 return -EAGAIN; 6688 } 6689 conf->reshape_checkpoint = jiffies; 6690 md_wakeup_thread(mddev->sync_thread); 6691 md_new_event(mddev); 6692 return 0; 6693 } 6694 6695 /* This is called from the reshape thread and should make any 6696 * changes needed in 'conf' 6697 */ 6698 static void end_reshape(struct r5conf *conf) 6699 { 6700 6701 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 6702 struct md_rdev *rdev; 6703 6704 spin_lock_irq(&conf->device_lock); 6705 conf->previous_raid_disks = conf->raid_disks; 6706 rdev_for_each(rdev, conf->mddev) 6707 rdev->data_offset = rdev->new_data_offset; 6708 smp_wmb(); 6709 conf->reshape_progress = MaxSector; 6710 spin_unlock_irq(&conf->device_lock); 6711 wake_up(&conf->wait_for_overlap); 6712 6713 /* read-ahead size must cover two whole stripes, which is 6714 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 6715 */ 6716 if (conf->mddev->queue) { 6717 int data_disks = conf->raid_disks - conf->max_degraded; 6718 int stripe = data_disks * ((conf->chunk_sectors << 9) 6719 / PAGE_SIZE); 6720 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 6721 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 6722 } 6723 } 6724 } 6725 6726 /* This is called from the raid5d thread with mddev_lock held. 6727 * It makes config changes to the device. 6728 */ 6729 static void raid5_finish_reshape(struct mddev *mddev) 6730 { 6731 struct r5conf *conf = mddev->private; 6732 6733 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6734 6735 if (mddev->delta_disks > 0) { 6736 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 6737 set_capacity(mddev->gendisk, mddev->array_sectors); 6738 revalidate_disk(mddev->gendisk); 6739 } else { 6740 int d; 6741 spin_lock_irq(&conf->device_lock); 6742 mddev->degraded = calc_degraded(conf); 6743 spin_unlock_irq(&conf->device_lock); 6744 for (d = conf->raid_disks ; 6745 d < conf->raid_disks - mddev->delta_disks; 6746 d++) { 6747 struct md_rdev *rdev = conf->disks[d].rdev; 6748 if (rdev) 6749 clear_bit(In_sync, &rdev->flags); 6750 rdev = conf->disks[d].replacement; 6751 if (rdev) 6752 clear_bit(In_sync, &rdev->flags); 6753 } 6754 } 6755 mddev->layout = conf->algorithm; 6756 mddev->chunk_sectors = conf->chunk_sectors; 6757 mddev->reshape_position = MaxSector; 6758 mddev->delta_disks = 0; 6759 mddev->reshape_backwards = 0; 6760 } 6761 } 6762 6763 static void raid5_quiesce(struct mddev *mddev, int state) 6764 { 6765 struct r5conf *conf = mddev->private; 6766 6767 switch(state) { 6768 case 2: /* resume for a suspend */ 6769 wake_up(&conf->wait_for_overlap); 6770 break; 6771 6772 case 1: /* stop all writes */ 6773 lock_all_device_hash_locks_irq(conf); 6774 /* '2' tells resync/reshape to pause so that all 6775 * active stripes can drain 6776 */ 6777 conf->quiesce = 2; 6778 wait_event_cmd(conf->wait_for_stripe, 6779 atomic_read(&conf->active_stripes) == 0 && 6780 atomic_read(&conf->active_aligned_reads) == 0, 6781 unlock_all_device_hash_locks_irq(conf), 6782 lock_all_device_hash_locks_irq(conf)); 6783 conf->quiesce = 1; 6784 unlock_all_device_hash_locks_irq(conf); 6785 /* allow reshape to continue */ 6786 wake_up(&conf->wait_for_overlap); 6787 break; 6788 6789 case 0: /* re-enable writes */ 6790 lock_all_device_hash_locks_irq(conf); 6791 conf->quiesce = 0; 6792 wake_up(&conf->wait_for_stripe); 6793 wake_up(&conf->wait_for_overlap); 6794 unlock_all_device_hash_locks_irq(conf); 6795 break; 6796 } 6797 } 6798 6799 6800 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 6801 { 6802 struct r0conf *raid0_conf = mddev->private; 6803 sector_t sectors; 6804 6805 /* for raid0 takeover only one zone is supported */ 6806 if (raid0_conf->nr_strip_zones > 1) { 6807 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", 6808 mdname(mddev)); 6809 return ERR_PTR(-EINVAL); 6810 } 6811 6812 sectors = raid0_conf->strip_zone[0].zone_end; 6813 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 6814 mddev->dev_sectors = sectors; 6815 mddev->new_level = level; 6816 mddev->new_layout = ALGORITHM_PARITY_N; 6817 mddev->new_chunk_sectors = mddev->chunk_sectors; 6818 mddev->raid_disks += 1; 6819 mddev->delta_disks = 1; 6820 /* make sure it will be not marked as dirty */ 6821 mddev->recovery_cp = MaxSector; 6822 6823 return setup_conf(mddev); 6824 } 6825 6826 6827 static void *raid5_takeover_raid1(struct mddev *mddev) 6828 { 6829 int chunksect; 6830 6831 if (mddev->raid_disks != 2 || 6832 mddev->degraded > 1) 6833 return ERR_PTR(-EINVAL); 6834 6835 /* Should check if there are write-behind devices? */ 6836 6837 chunksect = 64*2; /* 64K by default */ 6838 6839 /* The array must be an exact multiple of chunksize */ 6840 while (chunksect && (mddev->array_sectors & (chunksect-1))) 6841 chunksect >>= 1; 6842 6843 if ((chunksect<<9) < STRIPE_SIZE) 6844 /* array size does not allow a suitable chunk size */ 6845 return ERR_PTR(-EINVAL); 6846 6847 mddev->new_level = 5; 6848 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 6849 mddev->new_chunk_sectors = chunksect; 6850 6851 return setup_conf(mddev); 6852 } 6853 6854 static void *raid5_takeover_raid6(struct mddev *mddev) 6855 { 6856 int new_layout; 6857 6858 switch (mddev->layout) { 6859 case ALGORITHM_LEFT_ASYMMETRIC_6: 6860 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 6861 break; 6862 case ALGORITHM_RIGHT_ASYMMETRIC_6: 6863 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 6864 break; 6865 case ALGORITHM_LEFT_SYMMETRIC_6: 6866 new_layout = ALGORITHM_LEFT_SYMMETRIC; 6867 break; 6868 case ALGORITHM_RIGHT_SYMMETRIC_6: 6869 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 6870 break; 6871 case ALGORITHM_PARITY_0_6: 6872 new_layout = ALGORITHM_PARITY_0; 6873 break; 6874 case ALGORITHM_PARITY_N: 6875 new_layout = ALGORITHM_PARITY_N; 6876 break; 6877 default: 6878 return ERR_PTR(-EINVAL); 6879 } 6880 mddev->new_level = 5; 6881 mddev->new_layout = new_layout; 6882 mddev->delta_disks = -1; 6883 mddev->raid_disks -= 1; 6884 return setup_conf(mddev); 6885 } 6886 6887 6888 static int raid5_check_reshape(struct mddev *mddev) 6889 { 6890 /* For a 2-drive array, the layout and chunk size can be changed 6891 * immediately as not restriping is needed. 6892 * For larger arrays we record the new value - after validation 6893 * to be used by a reshape pass. 6894 */ 6895 struct r5conf *conf = mddev->private; 6896 int new_chunk = mddev->new_chunk_sectors; 6897 6898 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 6899 return -EINVAL; 6900 if (new_chunk > 0) { 6901 if (!is_power_of_2(new_chunk)) 6902 return -EINVAL; 6903 if (new_chunk < (PAGE_SIZE>>9)) 6904 return -EINVAL; 6905 if (mddev->array_sectors & (new_chunk-1)) 6906 /* not factor of array size */ 6907 return -EINVAL; 6908 } 6909 6910 /* They look valid */ 6911 6912 if (mddev->raid_disks == 2) { 6913 /* can make the change immediately */ 6914 if (mddev->new_layout >= 0) { 6915 conf->algorithm = mddev->new_layout; 6916 mddev->layout = mddev->new_layout; 6917 } 6918 if (new_chunk > 0) { 6919 conf->chunk_sectors = new_chunk ; 6920 mddev->chunk_sectors = new_chunk; 6921 } 6922 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6923 md_wakeup_thread(mddev->thread); 6924 } 6925 return check_reshape(mddev); 6926 } 6927 6928 static int raid6_check_reshape(struct mddev *mddev) 6929 { 6930 int new_chunk = mddev->new_chunk_sectors; 6931 6932 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 6933 return -EINVAL; 6934 if (new_chunk > 0) { 6935 if (!is_power_of_2(new_chunk)) 6936 return -EINVAL; 6937 if (new_chunk < (PAGE_SIZE >> 9)) 6938 return -EINVAL; 6939 if (mddev->array_sectors & (new_chunk-1)) 6940 /* not factor of array size */ 6941 return -EINVAL; 6942 } 6943 6944 /* They look valid */ 6945 return check_reshape(mddev); 6946 } 6947 6948 static void *raid5_takeover(struct mddev *mddev) 6949 { 6950 /* raid5 can take over: 6951 * raid0 - if there is only one strip zone - make it a raid4 layout 6952 * raid1 - if there are two drives. We need to know the chunk size 6953 * raid4 - trivial - just use a raid4 layout. 6954 * raid6 - Providing it is a *_6 layout 6955 */ 6956 if (mddev->level == 0) 6957 return raid45_takeover_raid0(mddev, 5); 6958 if (mddev->level == 1) 6959 return raid5_takeover_raid1(mddev); 6960 if (mddev->level == 4) { 6961 mddev->new_layout = ALGORITHM_PARITY_N; 6962 mddev->new_level = 5; 6963 return setup_conf(mddev); 6964 } 6965 if (mddev->level == 6) 6966 return raid5_takeover_raid6(mddev); 6967 6968 return ERR_PTR(-EINVAL); 6969 } 6970 6971 static void *raid4_takeover(struct mddev *mddev) 6972 { 6973 /* raid4 can take over: 6974 * raid0 - if there is only one strip zone 6975 * raid5 - if layout is right 6976 */ 6977 if (mddev->level == 0) 6978 return raid45_takeover_raid0(mddev, 4); 6979 if (mddev->level == 5 && 6980 mddev->layout == ALGORITHM_PARITY_N) { 6981 mddev->new_layout = 0; 6982 mddev->new_level = 4; 6983 return setup_conf(mddev); 6984 } 6985 return ERR_PTR(-EINVAL); 6986 } 6987 6988 static struct md_personality raid5_personality; 6989 6990 static void *raid6_takeover(struct mddev *mddev) 6991 { 6992 /* Currently can only take over a raid5. We map the 6993 * personality to an equivalent raid6 personality 6994 * with the Q block at the end. 6995 */ 6996 int new_layout; 6997 6998 if (mddev->pers != &raid5_personality) 6999 return ERR_PTR(-EINVAL); 7000 if (mddev->degraded > 1) 7001 return ERR_PTR(-EINVAL); 7002 if (mddev->raid_disks > 253) 7003 return ERR_PTR(-EINVAL); 7004 if (mddev->raid_disks < 3) 7005 return ERR_PTR(-EINVAL); 7006 7007 switch (mddev->layout) { 7008 case ALGORITHM_LEFT_ASYMMETRIC: 7009 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 7010 break; 7011 case ALGORITHM_RIGHT_ASYMMETRIC: 7012 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 7013 break; 7014 case ALGORITHM_LEFT_SYMMETRIC: 7015 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 7016 break; 7017 case ALGORITHM_RIGHT_SYMMETRIC: 7018 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 7019 break; 7020 case ALGORITHM_PARITY_0: 7021 new_layout = ALGORITHM_PARITY_0_6; 7022 break; 7023 case ALGORITHM_PARITY_N: 7024 new_layout = ALGORITHM_PARITY_N; 7025 break; 7026 default: 7027 return ERR_PTR(-EINVAL); 7028 } 7029 mddev->new_level = 6; 7030 mddev->new_layout = new_layout; 7031 mddev->delta_disks = 1; 7032 mddev->raid_disks += 1; 7033 return setup_conf(mddev); 7034 } 7035 7036 7037 static struct md_personality raid6_personality = 7038 { 7039 .name = "raid6", 7040 .level = 6, 7041 .owner = THIS_MODULE, 7042 .make_request = make_request, 7043 .run = run, 7044 .stop = stop, 7045 .status = status, 7046 .error_handler = error, 7047 .hot_add_disk = raid5_add_disk, 7048 .hot_remove_disk= raid5_remove_disk, 7049 .spare_active = raid5_spare_active, 7050 .sync_request = sync_request, 7051 .resize = raid5_resize, 7052 .size = raid5_size, 7053 .check_reshape = raid6_check_reshape, 7054 .start_reshape = raid5_start_reshape, 7055 .finish_reshape = raid5_finish_reshape, 7056 .quiesce = raid5_quiesce, 7057 .takeover = raid6_takeover, 7058 }; 7059 static struct md_personality raid5_personality = 7060 { 7061 .name = "raid5", 7062 .level = 5, 7063 .owner = THIS_MODULE, 7064 .make_request = make_request, 7065 .run = run, 7066 .stop = stop, 7067 .status = status, 7068 .error_handler = error, 7069 .hot_add_disk = raid5_add_disk, 7070 .hot_remove_disk= raid5_remove_disk, 7071 .spare_active = raid5_spare_active, 7072 .sync_request = sync_request, 7073 .resize = raid5_resize, 7074 .size = raid5_size, 7075 .check_reshape = raid5_check_reshape, 7076 .start_reshape = raid5_start_reshape, 7077 .finish_reshape = raid5_finish_reshape, 7078 .quiesce = raid5_quiesce, 7079 .takeover = raid5_takeover, 7080 }; 7081 7082 static struct md_personality raid4_personality = 7083 { 7084 .name = "raid4", 7085 .level = 4, 7086 .owner = THIS_MODULE, 7087 .make_request = make_request, 7088 .run = run, 7089 .stop = stop, 7090 .status = status, 7091 .error_handler = error, 7092 .hot_add_disk = raid5_add_disk, 7093 .hot_remove_disk= raid5_remove_disk, 7094 .spare_active = raid5_spare_active, 7095 .sync_request = sync_request, 7096 .resize = raid5_resize, 7097 .size = raid5_size, 7098 .check_reshape = raid5_check_reshape, 7099 .start_reshape = raid5_start_reshape, 7100 .finish_reshape = raid5_finish_reshape, 7101 .quiesce = raid5_quiesce, 7102 .takeover = raid4_takeover, 7103 }; 7104 7105 static int __init raid5_init(void) 7106 { 7107 raid5_wq = alloc_workqueue("raid5wq", 7108 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 7109 if (!raid5_wq) 7110 return -ENOMEM; 7111 register_md_personality(&raid6_personality); 7112 register_md_personality(&raid5_personality); 7113 register_md_personality(&raid4_personality); 7114 return 0; 7115 } 7116 7117 static void raid5_exit(void) 7118 { 7119 unregister_md_personality(&raid6_personality); 7120 unregister_md_personality(&raid5_personality); 7121 unregister_md_personality(&raid4_personality); 7122 destroy_workqueue(raid5_wq); 7123 } 7124 7125 module_init(raid5_init); 7126 module_exit(raid5_exit); 7127 MODULE_LICENSE("GPL"); 7128 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 7129 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 7130 MODULE_ALIAS("md-raid5"); 7131 MODULE_ALIAS("md-raid4"); 7132 MODULE_ALIAS("md-level-5"); 7133 MODULE_ALIAS("md-level-4"); 7134 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 7135 MODULE_ALIAS("md-raid6"); 7136 MODULE_ALIAS("md-level-6"); 7137 7138 /* This used to be two separate modules, they were: */ 7139 MODULE_ALIAS("raid5"); 7140 MODULE_ALIAS("raid6"); 7141