1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->seq_write is the number of the last batch successfully written. 31 * conf->seq_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is seq_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/module.h> 51 #include <linux/async.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpu.h> 54 #include <linux/slab.h> 55 #include <linux/ratelimit.h> 56 #include <linux/nodemask.h> 57 #include <trace/events/block.h> 58 59 #include "md.h" 60 #include "raid5.h" 61 #include "raid0.h" 62 #include "bitmap.h" 63 64 #define cpu_to_group(cpu) cpu_to_node(cpu) 65 #define ANY_GROUP NUMA_NO_NODE 66 67 static struct workqueue_struct *raid5_wq; 68 /* 69 * Stripe cache 70 */ 71 72 #define NR_STRIPES 256 73 #define STRIPE_SIZE PAGE_SIZE 74 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 75 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 76 #define IO_THRESHOLD 1 77 #define BYPASS_THRESHOLD 1 78 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 79 #define HASH_MASK (NR_HASH - 1) 80 #define MAX_STRIPE_BATCH 8 81 82 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 83 { 84 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 85 return &conf->stripe_hashtbl[hash]; 86 } 87 88 static inline int stripe_hash_locks_hash(sector_t sect) 89 { 90 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; 91 } 92 93 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) 94 { 95 spin_lock_irq(conf->hash_locks + hash); 96 spin_lock(&conf->device_lock); 97 } 98 99 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) 100 { 101 spin_unlock(&conf->device_lock); 102 spin_unlock_irq(conf->hash_locks + hash); 103 } 104 105 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 106 { 107 int i; 108 local_irq_disable(); 109 spin_lock(conf->hash_locks); 110 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 111 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 112 spin_lock(&conf->device_lock); 113 } 114 115 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) 116 { 117 int i; 118 spin_unlock(&conf->device_lock); 119 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 120 spin_unlock(conf->hash_locks + i - 1); 121 local_irq_enable(); 122 } 123 124 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 125 * order without overlap. There may be several bio's per stripe+device, and 126 * a bio could span several devices. 127 * When walking this list for a particular stripe+device, we must never proceed 128 * beyond a bio that extends past this device, as the next bio might no longer 129 * be valid. 130 * This function is used to determine the 'next' bio in the list, given the sector 131 * of the current stripe+device 132 */ 133 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 134 { 135 int sectors = bio_sectors(bio); 136 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) 137 return bio->bi_next; 138 else 139 return NULL; 140 } 141 142 /* 143 * We maintain a biased count of active stripes in the bottom 16 bits of 144 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 145 */ 146 static inline int raid5_bi_processed_stripes(struct bio *bio) 147 { 148 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 149 return (atomic_read(segments) >> 16) & 0xffff; 150 } 151 152 static inline int raid5_dec_bi_active_stripes(struct bio *bio) 153 { 154 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 155 return atomic_sub_return(1, segments) & 0xffff; 156 } 157 158 static inline void raid5_inc_bi_active_stripes(struct bio *bio) 159 { 160 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 161 atomic_inc(segments); 162 } 163 164 static inline void raid5_set_bi_processed_stripes(struct bio *bio, 165 unsigned int cnt) 166 { 167 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 168 int old, new; 169 170 do { 171 old = atomic_read(segments); 172 new = (old & 0xffff) | (cnt << 16); 173 } while (atomic_cmpxchg(segments, old, new) != old); 174 } 175 176 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) 177 { 178 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 179 atomic_set(segments, cnt); 180 } 181 182 /* Find first data disk in a raid6 stripe */ 183 static inline int raid6_d0(struct stripe_head *sh) 184 { 185 if (sh->ddf_layout) 186 /* ddf always start from first device */ 187 return 0; 188 /* md starts just after Q block */ 189 if (sh->qd_idx == sh->disks - 1) 190 return 0; 191 else 192 return sh->qd_idx + 1; 193 } 194 static inline int raid6_next_disk(int disk, int raid_disks) 195 { 196 disk++; 197 return (disk < raid_disks) ? disk : 0; 198 } 199 200 /* When walking through the disks in a raid5, starting at raid6_d0, 201 * We need to map each disk to a 'slot', where the data disks are slot 202 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 203 * is raid_disks-1. This help does that mapping. 204 */ 205 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 206 int *count, int syndrome_disks) 207 { 208 int slot = *count; 209 210 if (sh->ddf_layout) 211 (*count)++; 212 if (idx == sh->pd_idx) 213 return syndrome_disks; 214 if (idx == sh->qd_idx) 215 return syndrome_disks + 1; 216 if (!sh->ddf_layout) 217 (*count)++; 218 return slot; 219 } 220 221 static void return_io(struct bio *return_bi) 222 { 223 struct bio *bi = return_bi; 224 while (bi) { 225 226 return_bi = bi->bi_next; 227 bi->bi_next = NULL; 228 bi->bi_iter.bi_size = 0; 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 230 bi, 0); 231 bio_endio(bi, 0); 232 bi = return_bi; 233 } 234 } 235 236 static void print_raid5_conf (struct r5conf *conf); 237 238 static int stripe_operations_active(struct stripe_head *sh) 239 { 240 return sh->check_state || sh->reconstruct_state || 241 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 242 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 243 } 244 245 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) 246 { 247 struct r5conf *conf = sh->raid_conf; 248 struct r5worker_group *group; 249 int thread_cnt; 250 int i, cpu = sh->cpu; 251 252 if (!cpu_online(cpu)) { 253 cpu = cpumask_any(cpu_online_mask); 254 sh->cpu = cpu; 255 } 256 257 if (list_empty(&sh->lru)) { 258 struct r5worker_group *group; 259 group = conf->worker_groups + cpu_to_group(cpu); 260 list_add_tail(&sh->lru, &group->handle_list); 261 group->stripes_cnt++; 262 sh->group = group; 263 } 264 265 if (conf->worker_cnt_per_group == 0) { 266 md_wakeup_thread(conf->mddev->thread); 267 return; 268 } 269 270 group = conf->worker_groups + cpu_to_group(sh->cpu); 271 272 group->workers[0].working = true; 273 /* at least one worker should run to avoid race */ 274 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); 275 276 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; 277 /* wakeup more workers */ 278 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { 279 if (group->workers[i].working == false) { 280 group->workers[i].working = true; 281 queue_work_on(sh->cpu, raid5_wq, 282 &group->workers[i].work); 283 thread_cnt--; 284 } 285 } 286 } 287 288 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, 289 struct list_head *temp_inactive_list) 290 { 291 BUG_ON(!list_empty(&sh->lru)); 292 BUG_ON(atomic_read(&conf->active_stripes)==0); 293 if (test_bit(STRIPE_HANDLE, &sh->state)) { 294 if (test_bit(STRIPE_DELAYED, &sh->state) && 295 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 296 list_add_tail(&sh->lru, &conf->delayed_list); 297 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 298 sh->bm_seq - conf->seq_write > 0) 299 list_add_tail(&sh->lru, &conf->bitmap_list); 300 else { 301 clear_bit(STRIPE_DELAYED, &sh->state); 302 clear_bit(STRIPE_BIT_DELAY, &sh->state); 303 if (conf->worker_cnt_per_group == 0) { 304 list_add_tail(&sh->lru, &conf->handle_list); 305 } else { 306 raid5_wakeup_stripe_thread(sh); 307 return; 308 } 309 } 310 md_wakeup_thread(conf->mddev->thread); 311 } else { 312 BUG_ON(stripe_operations_active(sh)); 313 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 314 if (atomic_dec_return(&conf->preread_active_stripes) 315 < IO_THRESHOLD) 316 md_wakeup_thread(conf->mddev->thread); 317 atomic_dec(&conf->active_stripes); 318 if (!test_bit(STRIPE_EXPANDING, &sh->state)) 319 list_add_tail(&sh->lru, temp_inactive_list); 320 } 321 } 322 323 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, 324 struct list_head *temp_inactive_list) 325 { 326 if (atomic_dec_and_test(&sh->count)) 327 do_release_stripe(conf, sh, temp_inactive_list); 328 } 329 330 /* 331 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list 332 * 333 * Be careful: Only one task can add/delete stripes from temp_inactive_list at 334 * given time. Adding stripes only takes device lock, while deleting stripes 335 * only takes hash lock. 336 */ 337 static void release_inactive_stripe_list(struct r5conf *conf, 338 struct list_head *temp_inactive_list, 339 int hash) 340 { 341 int size; 342 bool do_wakeup = false; 343 unsigned long flags; 344 345 if (hash == NR_STRIPE_HASH_LOCKS) { 346 size = NR_STRIPE_HASH_LOCKS; 347 hash = NR_STRIPE_HASH_LOCKS - 1; 348 } else 349 size = 1; 350 while (size) { 351 struct list_head *list = &temp_inactive_list[size - 1]; 352 353 /* 354 * We don't hold any lock here yet, get_active_stripe() might 355 * remove stripes from the list 356 */ 357 if (!list_empty_careful(list)) { 358 spin_lock_irqsave(conf->hash_locks + hash, flags); 359 if (list_empty(conf->inactive_list + hash) && 360 !list_empty(list)) 361 atomic_dec(&conf->empty_inactive_list_nr); 362 list_splice_tail_init(list, conf->inactive_list + hash); 363 do_wakeup = true; 364 spin_unlock_irqrestore(conf->hash_locks + hash, flags); 365 } 366 size--; 367 hash--; 368 } 369 370 if (do_wakeup) { 371 wake_up(&conf->wait_for_stripe); 372 if (conf->retry_read_aligned) 373 md_wakeup_thread(conf->mddev->thread); 374 } 375 } 376 377 /* should hold conf->device_lock already */ 378 static int release_stripe_list(struct r5conf *conf, 379 struct list_head *temp_inactive_list) 380 { 381 struct stripe_head *sh; 382 int count = 0; 383 struct llist_node *head; 384 385 head = llist_del_all(&conf->released_stripes); 386 head = llist_reverse_order(head); 387 while (head) { 388 int hash; 389 390 sh = llist_entry(head, struct stripe_head, release_list); 391 head = llist_next(head); 392 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 393 smp_mb(); 394 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); 395 /* 396 * Don't worry the bit is set here, because if the bit is set 397 * again, the count is always > 1. This is true for 398 * STRIPE_ON_UNPLUG_LIST bit too. 399 */ 400 hash = sh->hash_lock_index; 401 __release_stripe(conf, sh, &temp_inactive_list[hash]); 402 count++; 403 } 404 405 return count; 406 } 407 408 static void release_stripe(struct stripe_head *sh) 409 { 410 struct r5conf *conf = sh->raid_conf; 411 unsigned long flags; 412 struct list_head list; 413 int hash; 414 bool wakeup; 415 416 if (unlikely(!conf->mddev->thread) || 417 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 418 goto slow_path; 419 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 420 if (wakeup) 421 md_wakeup_thread(conf->mddev->thread); 422 return; 423 slow_path: 424 local_irq_save(flags); 425 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 426 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { 427 INIT_LIST_HEAD(&list); 428 hash = sh->hash_lock_index; 429 do_release_stripe(conf, sh, &list); 430 spin_unlock(&conf->device_lock); 431 release_inactive_stripe_list(conf, &list, hash); 432 } 433 local_irq_restore(flags); 434 } 435 436 static inline void remove_hash(struct stripe_head *sh) 437 { 438 pr_debug("remove_hash(), stripe %llu\n", 439 (unsigned long long)sh->sector); 440 441 hlist_del_init(&sh->hash); 442 } 443 444 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 445 { 446 struct hlist_head *hp = stripe_hash(conf, sh->sector); 447 448 pr_debug("insert_hash(), stripe %llu\n", 449 (unsigned long long)sh->sector); 450 451 hlist_add_head(&sh->hash, hp); 452 } 453 454 455 /* find an idle stripe, make sure it is unhashed, and return it. */ 456 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) 457 { 458 struct stripe_head *sh = NULL; 459 struct list_head *first; 460 461 if (list_empty(conf->inactive_list + hash)) 462 goto out; 463 first = (conf->inactive_list + hash)->next; 464 sh = list_entry(first, struct stripe_head, lru); 465 list_del_init(first); 466 remove_hash(sh); 467 atomic_inc(&conf->active_stripes); 468 BUG_ON(hash != sh->hash_lock_index); 469 if (list_empty(conf->inactive_list + hash)) 470 atomic_inc(&conf->empty_inactive_list_nr); 471 out: 472 return sh; 473 } 474 475 static void shrink_buffers(struct stripe_head *sh) 476 { 477 struct page *p; 478 int i; 479 int num = sh->raid_conf->pool_size; 480 481 for (i = 0; i < num ; i++) { 482 p = sh->dev[i].page; 483 if (!p) 484 continue; 485 sh->dev[i].page = NULL; 486 put_page(p); 487 } 488 } 489 490 static int grow_buffers(struct stripe_head *sh) 491 { 492 int i; 493 int num = sh->raid_conf->pool_size; 494 495 for (i = 0; i < num; i++) { 496 struct page *page; 497 498 if (!(page = alloc_page(GFP_KERNEL))) { 499 return 1; 500 } 501 sh->dev[i].page = page; 502 } 503 return 0; 504 } 505 506 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 507 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 508 struct stripe_head *sh); 509 510 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 511 { 512 struct r5conf *conf = sh->raid_conf; 513 int i, seq; 514 515 BUG_ON(atomic_read(&sh->count) != 0); 516 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 517 BUG_ON(stripe_operations_active(sh)); 518 519 pr_debug("init_stripe called, stripe %llu\n", 520 (unsigned long long)sh->sector); 521 522 remove_hash(sh); 523 retry: 524 seq = read_seqcount_begin(&conf->gen_lock); 525 sh->generation = conf->generation - previous; 526 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 527 sh->sector = sector; 528 stripe_set_idx(sector, conf, previous, sh); 529 sh->state = 0; 530 531 532 for (i = sh->disks; i--; ) { 533 struct r5dev *dev = &sh->dev[i]; 534 535 if (dev->toread || dev->read || dev->towrite || dev->written || 536 test_bit(R5_LOCKED, &dev->flags)) { 537 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 538 (unsigned long long)sh->sector, i, dev->toread, 539 dev->read, dev->towrite, dev->written, 540 test_bit(R5_LOCKED, &dev->flags)); 541 WARN_ON(1); 542 } 543 dev->flags = 0; 544 raid5_build_block(sh, i, previous); 545 } 546 if (read_seqcount_retry(&conf->gen_lock, seq)) 547 goto retry; 548 insert_hash(conf, sh); 549 sh->cpu = smp_processor_id(); 550 } 551 552 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 553 short generation) 554 { 555 struct stripe_head *sh; 556 557 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 558 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) 559 if (sh->sector == sector && sh->generation == generation) 560 return sh; 561 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 562 return NULL; 563 } 564 565 /* 566 * Need to check if array has failed when deciding whether to: 567 * - start an array 568 * - remove non-faulty devices 569 * - add a spare 570 * - allow a reshape 571 * This determination is simple when no reshape is happening. 572 * However if there is a reshape, we need to carefully check 573 * both the before and after sections. 574 * This is because some failed devices may only affect one 575 * of the two sections, and some non-in_sync devices may 576 * be insync in the section most affected by failed devices. 577 */ 578 static int calc_degraded(struct r5conf *conf) 579 { 580 int degraded, degraded2; 581 int i; 582 583 rcu_read_lock(); 584 degraded = 0; 585 for (i = 0; i < conf->previous_raid_disks; i++) { 586 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 587 if (rdev && test_bit(Faulty, &rdev->flags)) 588 rdev = rcu_dereference(conf->disks[i].replacement); 589 if (!rdev || test_bit(Faulty, &rdev->flags)) 590 degraded++; 591 else if (test_bit(In_sync, &rdev->flags)) 592 ; 593 else 594 /* not in-sync or faulty. 595 * If the reshape increases the number of devices, 596 * this is being recovered by the reshape, so 597 * this 'previous' section is not in_sync. 598 * If the number of devices is being reduced however, 599 * the device can only be part of the array if 600 * we are reverting a reshape, so this section will 601 * be in-sync. 602 */ 603 if (conf->raid_disks >= conf->previous_raid_disks) 604 degraded++; 605 } 606 rcu_read_unlock(); 607 if (conf->raid_disks == conf->previous_raid_disks) 608 return degraded; 609 rcu_read_lock(); 610 degraded2 = 0; 611 for (i = 0; i < conf->raid_disks; i++) { 612 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 613 if (rdev && test_bit(Faulty, &rdev->flags)) 614 rdev = rcu_dereference(conf->disks[i].replacement); 615 if (!rdev || test_bit(Faulty, &rdev->flags)) 616 degraded2++; 617 else if (test_bit(In_sync, &rdev->flags)) 618 ; 619 else 620 /* not in-sync or faulty. 621 * If reshape increases the number of devices, this 622 * section has already been recovered, else it 623 * almost certainly hasn't. 624 */ 625 if (conf->raid_disks <= conf->previous_raid_disks) 626 degraded2++; 627 } 628 rcu_read_unlock(); 629 if (degraded2 > degraded) 630 return degraded2; 631 return degraded; 632 } 633 634 static int has_failed(struct r5conf *conf) 635 { 636 int degraded; 637 638 if (conf->mddev->reshape_position == MaxSector) 639 return conf->mddev->degraded > conf->max_degraded; 640 641 degraded = calc_degraded(conf); 642 if (degraded > conf->max_degraded) 643 return 1; 644 return 0; 645 } 646 647 static struct stripe_head * 648 get_active_stripe(struct r5conf *conf, sector_t sector, 649 int previous, int noblock, int noquiesce) 650 { 651 struct stripe_head *sh; 652 int hash = stripe_hash_locks_hash(sector); 653 654 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 655 656 spin_lock_irq(conf->hash_locks + hash); 657 658 do { 659 wait_event_lock_irq(conf->wait_for_stripe, 660 conf->quiesce == 0 || noquiesce, 661 *(conf->hash_locks + hash)); 662 sh = __find_stripe(conf, sector, conf->generation - previous); 663 if (!sh) { 664 if (!conf->inactive_blocked) 665 sh = get_free_stripe(conf, hash); 666 if (noblock && sh == NULL) 667 break; 668 if (!sh) { 669 conf->inactive_blocked = 1; 670 wait_event_lock_irq( 671 conf->wait_for_stripe, 672 !list_empty(conf->inactive_list + hash) && 673 (atomic_read(&conf->active_stripes) 674 < (conf->max_nr_stripes * 3 / 4) 675 || !conf->inactive_blocked), 676 *(conf->hash_locks + hash)); 677 conf->inactive_blocked = 0; 678 } else { 679 init_stripe(sh, sector, previous); 680 atomic_inc(&sh->count); 681 } 682 } else { 683 spin_lock(&conf->device_lock); 684 if (atomic_read(&sh->count)) { 685 BUG_ON(!list_empty(&sh->lru) 686 && !test_bit(STRIPE_EXPANDING, &sh->state) 687 && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) 688 ); 689 } else { 690 if (!test_bit(STRIPE_HANDLE, &sh->state)) 691 atomic_inc(&conf->active_stripes); 692 BUG_ON(list_empty(&sh->lru) && 693 !test_bit(STRIPE_EXPANDING, &sh->state)); 694 list_del_init(&sh->lru); 695 if (sh->group) { 696 sh->group->stripes_cnt--; 697 sh->group = NULL; 698 } 699 } 700 atomic_inc(&sh->count); 701 spin_unlock(&conf->device_lock); 702 } 703 } while (sh == NULL); 704 705 spin_unlock_irq(conf->hash_locks + hash); 706 return sh; 707 } 708 709 /* Determine if 'data_offset' or 'new_data_offset' should be used 710 * in this stripe_head. 711 */ 712 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) 713 { 714 sector_t progress = conf->reshape_progress; 715 /* Need a memory barrier to make sure we see the value 716 * of conf->generation, or ->data_offset that was set before 717 * reshape_progress was updated. 718 */ 719 smp_rmb(); 720 if (progress == MaxSector) 721 return 0; 722 if (sh->generation == conf->generation - 1) 723 return 0; 724 /* We are in a reshape, and this is a new-generation stripe, 725 * so use new_data_offset. 726 */ 727 return 1; 728 } 729 730 static void 731 raid5_end_read_request(struct bio *bi, int error); 732 static void 733 raid5_end_write_request(struct bio *bi, int error); 734 735 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 736 { 737 struct r5conf *conf = sh->raid_conf; 738 int i, disks = sh->disks; 739 740 might_sleep(); 741 742 for (i = disks; i--; ) { 743 int rw; 744 int replace_only = 0; 745 struct bio *bi, *rbi; 746 struct md_rdev *rdev, *rrdev = NULL; 747 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 748 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 749 rw = WRITE_FUA; 750 else 751 rw = WRITE; 752 if (test_bit(R5_Discard, &sh->dev[i].flags)) 753 rw |= REQ_DISCARD; 754 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 755 rw = READ; 756 else if (test_and_clear_bit(R5_WantReplace, 757 &sh->dev[i].flags)) { 758 rw = WRITE; 759 replace_only = 1; 760 } else 761 continue; 762 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) 763 rw |= REQ_SYNC; 764 765 bi = &sh->dev[i].req; 766 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 767 768 rcu_read_lock(); 769 rrdev = rcu_dereference(conf->disks[i].replacement); 770 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 771 rdev = rcu_dereference(conf->disks[i].rdev); 772 if (!rdev) { 773 rdev = rrdev; 774 rrdev = NULL; 775 } 776 if (rw & WRITE) { 777 if (replace_only) 778 rdev = NULL; 779 if (rdev == rrdev) 780 /* We raced and saw duplicates */ 781 rrdev = NULL; 782 } else { 783 if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev) 784 rdev = rrdev; 785 rrdev = NULL; 786 } 787 788 if (rdev && test_bit(Faulty, &rdev->flags)) 789 rdev = NULL; 790 if (rdev) 791 atomic_inc(&rdev->nr_pending); 792 if (rrdev && test_bit(Faulty, &rrdev->flags)) 793 rrdev = NULL; 794 if (rrdev) 795 atomic_inc(&rrdev->nr_pending); 796 rcu_read_unlock(); 797 798 /* We have already checked bad blocks for reads. Now 799 * need to check for writes. We never accept write errors 800 * on the replacement, so we don't to check rrdev. 801 */ 802 while ((rw & WRITE) && rdev && 803 test_bit(WriteErrorSeen, &rdev->flags)) { 804 sector_t first_bad; 805 int bad_sectors; 806 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 807 &first_bad, &bad_sectors); 808 if (!bad) 809 break; 810 811 if (bad < 0) { 812 set_bit(BlockedBadBlocks, &rdev->flags); 813 if (!conf->mddev->external && 814 conf->mddev->flags) { 815 /* It is very unlikely, but we might 816 * still need to write out the 817 * bad block log - better give it 818 * a chance*/ 819 md_check_recovery(conf->mddev); 820 } 821 /* 822 * Because md_wait_for_blocked_rdev 823 * will dec nr_pending, we must 824 * increment it first. 825 */ 826 atomic_inc(&rdev->nr_pending); 827 md_wait_for_blocked_rdev(rdev, conf->mddev); 828 } else { 829 /* Acknowledged bad block - skip the write */ 830 rdev_dec_pending(rdev, conf->mddev); 831 rdev = NULL; 832 } 833 } 834 835 if (rdev) { 836 if (s->syncing || s->expanding || s->expanded 837 || s->replacing) 838 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 839 840 set_bit(STRIPE_IO_STARTED, &sh->state); 841 842 bio_reset(bi); 843 bi->bi_bdev = rdev->bdev; 844 bi->bi_rw = rw; 845 bi->bi_end_io = (rw & WRITE) 846 ? raid5_end_write_request 847 : raid5_end_read_request; 848 bi->bi_private = sh; 849 850 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 851 __func__, (unsigned long long)sh->sector, 852 bi->bi_rw, i); 853 atomic_inc(&sh->count); 854 if (use_new_offset(conf, sh)) 855 bi->bi_iter.bi_sector = (sh->sector 856 + rdev->new_data_offset); 857 else 858 bi->bi_iter.bi_sector = (sh->sector 859 + rdev->data_offset); 860 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 861 bi->bi_rw |= REQ_NOMERGE; 862 863 bi->bi_vcnt = 1; 864 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 865 bi->bi_io_vec[0].bv_offset = 0; 866 bi->bi_iter.bi_size = STRIPE_SIZE; 867 /* 868 * If this is discard request, set bi_vcnt 0. We don't 869 * want to confuse SCSI because SCSI will replace payload 870 */ 871 if (rw & REQ_DISCARD) 872 bi->bi_vcnt = 0; 873 if (rrdev) 874 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 875 876 if (conf->mddev->gendisk) 877 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 878 bi, disk_devt(conf->mddev->gendisk), 879 sh->dev[i].sector); 880 generic_make_request(bi); 881 } 882 if (rrdev) { 883 if (s->syncing || s->expanding || s->expanded 884 || s->replacing) 885 md_sync_acct(rrdev->bdev, STRIPE_SECTORS); 886 887 set_bit(STRIPE_IO_STARTED, &sh->state); 888 889 bio_reset(rbi); 890 rbi->bi_bdev = rrdev->bdev; 891 rbi->bi_rw = rw; 892 BUG_ON(!(rw & WRITE)); 893 rbi->bi_end_io = raid5_end_write_request; 894 rbi->bi_private = sh; 895 896 pr_debug("%s: for %llu schedule op %ld on " 897 "replacement disc %d\n", 898 __func__, (unsigned long long)sh->sector, 899 rbi->bi_rw, i); 900 atomic_inc(&sh->count); 901 if (use_new_offset(conf, sh)) 902 rbi->bi_iter.bi_sector = (sh->sector 903 + rrdev->new_data_offset); 904 else 905 rbi->bi_iter.bi_sector = (sh->sector 906 + rrdev->data_offset); 907 rbi->bi_vcnt = 1; 908 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 909 rbi->bi_io_vec[0].bv_offset = 0; 910 rbi->bi_iter.bi_size = STRIPE_SIZE; 911 /* 912 * If this is discard request, set bi_vcnt 0. We don't 913 * want to confuse SCSI because SCSI will replace payload 914 */ 915 if (rw & REQ_DISCARD) 916 rbi->bi_vcnt = 0; 917 if (conf->mddev->gendisk) 918 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 919 rbi, disk_devt(conf->mddev->gendisk), 920 sh->dev[i].sector); 921 generic_make_request(rbi); 922 } 923 if (!rdev && !rrdev) { 924 if (rw & WRITE) 925 set_bit(STRIPE_DEGRADED, &sh->state); 926 pr_debug("skip op %ld on disc %d for sector %llu\n", 927 bi->bi_rw, i, (unsigned long long)sh->sector); 928 clear_bit(R5_LOCKED, &sh->dev[i].flags); 929 set_bit(STRIPE_HANDLE, &sh->state); 930 } 931 } 932 } 933 934 static struct dma_async_tx_descriptor * 935 async_copy_data(int frombio, struct bio *bio, struct page *page, 936 sector_t sector, struct dma_async_tx_descriptor *tx) 937 { 938 struct bio_vec bvl; 939 struct bvec_iter iter; 940 struct page *bio_page; 941 int page_offset; 942 struct async_submit_ctl submit; 943 enum async_tx_flags flags = 0; 944 945 if (bio->bi_iter.bi_sector >= sector) 946 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 947 else 948 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 949 950 if (frombio) 951 flags |= ASYNC_TX_FENCE; 952 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 953 954 bio_for_each_segment(bvl, bio, iter) { 955 int len = bvl.bv_len; 956 int clen; 957 int b_offset = 0; 958 959 if (page_offset < 0) { 960 b_offset = -page_offset; 961 page_offset += b_offset; 962 len -= b_offset; 963 } 964 965 if (len > 0 && page_offset + len > STRIPE_SIZE) 966 clen = STRIPE_SIZE - page_offset; 967 else 968 clen = len; 969 970 if (clen > 0) { 971 b_offset += bvl.bv_offset; 972 bio_page = bvl.bv_page; 973 if (frombio) 974 tx = async_memcpy(page, bio_page, page_offset, 975 b_offset, clen, &submit); 976 else 977 tx = async_memcpy(bio_page, page, b_offset, 978 page_offset, clen, &submit); 979 } 980 /* chain the operations */ 981 submit.depend_tx = tx; 982 983 if (clen < len) /* hit end of page */ 984 break; 985 page_offset += len; 986 } 987 988 return tx; 989 } 990 991 static void ops_complete_biofill(void *stripe_head_ref) 992 { 993 struct stripe_head *sh = stripe_head_ref; 994 struct bio *return_bi = NULL; 995 int i; 996 997 pr_debug("%s: stripe %llu\n", __func__, 998 (unsigned long long)sh->sector); 999 1000 /* clear completed biofills */ 1001 for (i = sh->disks; i--; ) { 1002 struct r5dev *dev = &sh->dev[i]; 1003 1004 /* acknowledge completion of a biofill operation */ 1005 /* and check if we need to reply to a read request, 1006 * new R5_Wantfill requests are held off until 1007 * !STRIPE_BIOFILL_RUN 1008 */ 1009 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 1010 struct bio *rbi, *rbi2; 1011 1012 BUG_ON(!dev->read); 1013 rbi = dev->read; 1014 dev->read = NULL; 1015 while (rbi && rbi->bi_iter.bi_sector < 1016 dev->sector + STRIPE_SECTORS) { 1017 rbi2 = r5_next_bio(rbi, dev->sector); 1018 if (!raid5_dec_bi_active_stripes(rbi)) { 1019 rbi->bi_next = return_bi; 1020 return_bi = rbi; 1021 } 1022 rbi = rbi2; 1023 } 1024 } 1025 } 1026 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 1027 1028 return_io(return_bi); 1029 1030 set_bit(STRIPE_HANDLE, &sh->state); 1031 release_stripe(sh); 1032 } 1033 1034 static void ops_run_biofill(struct stripe_head *sh) 1035 { 1036 struct dma_async_tx_descriptor *tx = NULL; 1037 struct async_submit_ctl submit; 1038 int i; 1039 1040 pr_debug("%s: stripe %llu\n", __func__, 1041 (unsigned long long)sh->sector); 1042 1043 for (i = sh->disks; i--; ) { 1044 struct r5dev *dev = &sh->dev[i]; 1045 if (test_bit(R5_Wantfill, &dev->flags)) { 1046 struct bio *rbi; 1047 spin_lock_irq(&sh->stripe_lock); 1048 dev->read = rbi = dev->toread; 1049 dev->toread = NULL; 1050 spin_unlock_irq(&sh->stripe_lock); 1051 while (rbi && rbi->bi_iter.bi_sector < 1052 dev->sector + STRIPE_SECTORS) { 1053 tx = async_copy_data(0, rbi, dev->page, 1054 dev->sector, tx); 1055 rbi = r5_next_bio(rbi, dev->sector); 1056 } 1057 } 1058 } 1059 1060 atomic_inc(&sh->count); 1061 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 1062 async_trigger_callback(&submit); 1063 } 1064 1065 static void mark_target_uptodate(struct stripe_head *sh, int target) 1066 { 1067 struct r5dev *tgt; 1068 1069 if (target < 0) 1070 return; 1071 1072 tgt = &sh->dev[target]; 1073 set_bit(R5_UPTODATE, &tgt->flags); 1074 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1075 clear_bit(R5_Wantcompute, &tgt->flags); 1076 } 1077 1078 static void ops_complete_compute(void *stripe_head_ref) 1079 { 1080 struct stripe_head *sh = stripe_head_ref; 1081 1082 pr_debug("%s: stripe %llu\n", __func__, 1083 (unsigned long long)sh->sector); 1084 1085 /* mark the computed target(s) as uptodate */ 1086 mark_target_uptodate(sh, sh->ops.target); 1087 mark_target_uptodate(sh, sh->ops.target2); 1088 1089 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 1090 if (sh->check_state == check_state_compute_run) 1091 sh->check_state = check_state_compute_result; 1092 set_bit(STRIPE_HANDLE, &sh->state); 1093 release_stripe(sh); 1094 } 1095 1096 /* return a pointer to the address conversion region of the scribble buffer */ 1097 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 1098 struct raid5_percpu *percpu) 1099 { 1100 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); 1101 } 1102 1103 static struct dma_async_tx_descriptor * 1104 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 1105 { 1106 int disks = sh->disks; 1107 struct page **xor_srcs = percpu->scribble; 1108 int target = sh->ops.target; 1109 struct r5dev *tgt = &sh->dev[target]; 1110 struct page *xor_dest = tgt->page; 1111 int count = 0; 1112 struct dma_async_tx_descriptor *tx; 1113 struct async_submit_ctl submit; 1114 int i; 1115 1116 pr_debug("%s: stripe %llu block: %d\n", 1117 __func__, (unsigned long long)sh->sector, target); 1118 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1119 1120 for (i = disks; i--; ) 1121 if (i != target) 1122 xor_srcs[count++] = sh->dev[i].page; 1123 1124 atomic_inc(&sh->count); 1125 1126 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 1127 ops_complete_compute, sh, to_addr_conv(sh, percpu)); 1128 if (unlikely(count == 1)) 1129 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1130 else 1131 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1132 1133 return tx; 1134 } 1135 1136 /* set_syndrome_sources - populate source buffers for gen_syndrome 1137 * @srcs - (struct page *) array of size sh->disks 1138 * @sh - stripe_head to parse 1139 * 1140 * Populates srcs in proper layout order for the stripe and returns the 1141 * 'count' of sources to be used in a call to async_gen_syndrome. The P 1142 * destination buffer is recorded in srcs[count] and the Q destination 1143 * is recorded in srcs[count+1]]. 1144 */ 1145 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) 1146 { 1147 int disks = sh->disks; 1148 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1149 int d0_idx = raid6_d0(sh); 1150 int count; 1151 int i; 1152 1153 for (i = 0; i < disks; i++) 1154 srcs[i] = NULL; 1155 1156 count = 0; 1157 i = d0_idx; 1158 do { 1159 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1160 1161 srcs[slot] = sh->dev[i].page; 1162 i = raid6_next_disk(i, disks); 1163 } while (i != d0_idx); 1164 1165 return syndrome_disks; 1166 } 1167 1168 static struct dma_async_tx_descriptor * 1169 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 1170 { 1171 int disks = sh->disks; 1172 struct page **blocks = percpu->scribble; 1173 int target; 1174 int qd_idx = sh->qd_idx; 1175 struct dma_async_tx_descriptor *tx; 1176 struct async_submit_ctl submit; 1177 struct r5dev *tgt; 1178 struct page *dest; 1179 int i; 1180 int count; 1181 1182 if (sh->ops.target < 0) 1183 target = sh->ops.target2; 1184 else if (sh->ops.target2 < 0) 1185 target = sh->ops.target; 1186 else 1187 /* we should only have one valid target */ 1188 BUG(); 1189 BUG_ON(target < 0); 1190 pr_debug("%s: stripe %llu block: %d\n", 1191 __func__, (unsigned long long)sh->sector, target); 1192 1193 tgt = &sh->dev[target]; 1194 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1195 dest = tgt->page; 1196 1197 atomic_inc(&sh->count); 1198 1199 if (target == qd_idx) { 1200 count = set_syndrome_sources(blocks, sh); 1201 blocks[count] = NULL; /* regenerating p is not necessary */ 1202 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 1203 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1204 ops_complete_compute, sh, 1205 to_addr_conv(sh, percpu)); 1206 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1207 } else { 1208 /* Compute any data- or p-drive using XOR */ 1209 count = 0; 1210 for (i = disks; i-- ; ) { 1211 if (i == target || i == qd_idx) 1212 continue; 1213 blocks[count++] = sh->dev[i].page; 1214 } 1215 1216 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1217 NULL, ops_complete_compute, sh, 1218 to_addr_conv(sh, percpu)); 1219 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 1220 } 1221 1222 return tx; 1223 } 1224 1225 static struct dma_async_tx_descriptor * 1226 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 1227 { 1228 int i, count, disks = sh->disks; 1229 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1230 int d0_idx = raid6_d0(sh); 1231 int faila = -1, failb = -1; 1232 int target = sh->ops.target; 1233 int target2 = sh->ops.target2; 1234 struct r5dev *tgt = &sh->dev[target]; 1235 struct r5dev *tgt2 = &sh->dev[target2]; 1236 struct dma_async_tx_descriptor *tx; 1237 struct page **blocks = percpu->scribble; 1238 struct async_submit_ctl submit; 1239 1240 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 1241 __func__, (unsigned long long)sh->sector, target, target2); 1242 BUG_ON(target < 0 || target2 < 0); 1243 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1244 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 1245 1246 /* we need to open-code set_syndrome_sources to handle the 1247 * slot number conversion for 'faila' and 'failb' 1248 */ 1249 for (i = 0; i < disks ; i++) 1250 blocks[i] = NULL; 1251 count = 0; 1252 i = d0_idx; 1253 do { 1254 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1255 1256 blocks[slot] = sh->dev[i].page; 1257 1258 if (i == target) 1259 faila = slot; 1260 if (i == target2) 1261 failb = slot; 1262 i = raid6_next_disk(i, disks); 1263 } while (i != d0_idx); 1264 1265 BUG_ON(faila == failb); 1266 if (failb < faila) 1267 swap(faila, failb); 1268 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 1269 __func__, (unsigned long long)sh->sector, faila, failb); 1270 1271 atomic_inc(&sh->count); 1272 1273 if (failb == syndrome_disks+1) { 1274 /* Q disk is one of the missing disks */ 1275 if (faila == syndrome_disks) { 1276 /* Missing P+Q, just recompute */ 1277 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1278 ops_complete_compute, sh, 1279 to_addr_conv(sh, percpu)); 1280 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 1281 STRIPE_SIZE, &submit); 1282 } else { 1283 struct page *dest; 1284 int data_target; 1285 int qd_idx = sh->qd_idx; 1286 1287 /* Missing D+Q: recompute D from P, then recompute Q */ 1288 if (target == qd_idx) 1289 data_target = target2; 1290 else 1291 data_target = target; 1292 1293 count = 0; 1294 for (i = disks; i-- ; ) { 1295 if (i == data_target || i == qd_idx) 1296 continue; 1297 blocks[count++] = sh->dev[i].page; 1298 } 1299 dest = sh->dev[data_target].page; 1300 init_async_submit(&submit, 1301 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1302 NULL, NULL, NULL, 1303 to_addr_conv(sh, percpu)); 1304 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 1305 &submit); 1306 1307 count = set_syndrome_sources(blocks, sh); 1308 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 1309 ops_complete_compute, sh, 1310 to_addr_conv(sh, percpu)); 1311 return async_gen_syndrome(blocks, 0, count+2, 1312 STRIPE_SIZE, &submit); 1313 } 1314 } else { 1315 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1316 ops_complete_compute, sh, 1317 to_addr_conv(sh, percpu)); 1318 if (failb == syndrome_disks) { 1319 /* We're missing D+P. */ 1320 return async_raid6_datap_recov(syndrome_disks+2, 1321 STRIPE_SIZE, faila, 1322 blocks, &submit); 1323 } else { 1324 /* We're missing D+D. */ 1325 return async_raid6_2data_recov(syndrome_disks+2, 1326 STRIPE_SIZE, faila, failb, 1327 blocks, &submit); 1328 } 1329 } 1330 } 1331 1332 1333 static void ops_complete_prexor(void *stripe_head_ref) 1334 { 1335 struct stripe_head *sh = stripe_head_ref; 1336 1337 pr_debug("%s: stripe %llu\n", __func__, 1338 (unsigned long long)sh->sector); 1339 } 1340 1341 static struct dma_async_tx_descriptor * 1342 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, 1343 struct dma_async_tx_descriptor *tx) 1344 { 1345 int disks = sh->disks; 1346 struct page **xor_srcs = percpu->scribble; 1347 int count = 0, pd_idx = sh->pd_idx, i; 1348 struct async_submit_ctl submit; 1349 1350 /* existing parity data subtracted */ 1351 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1352 1353 pr_debug("%s: stripe %llu\n", __func__, 1354 (unsigned long long)sh->sector); 1355 1356 for (i = disks; i--; ) { 1357 struct r5dev *dev = &sh->dev[i]; 1358 /* Only process blocks that are known to be uptodate */ 1359 if (test_bit(R5_Wantdrain, &dev->flags)) 1360 xor_srcs[count++] = dev->page; 1361 } 1362 1363 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1364 ops_complete_prexor, sh, to_addr_conv(sh, percpu)); 1365 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1366 1367 return tx; 1368 } 1369 1370 static struct dma_async_tx_descriptor * 1371 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1372 { 1373 int disks = sh->disks; 1374 int i; 1375 1376 pr_debug("%s: stripe %llu\n", __func__, 1377 (unsigned long long)sh->sector); 1378 1379 for (i = disks; i--; ) { 1380 struct r5dev *dev = &sh->dev[i]; 1381 struct bio *chosen; 1382 1383 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 1384 struct bio *wbi; 1385 1386 spin_lock_irq(&sh->stripe_lock); 1387 chosen = dev->towrite; 1388 dev->towrite = NULL; 1389 BUG_ON(dev->written); 1390 wbi = dev->written = chosen; 1391 spin_unlock_irq(&sh->stripe_lock); 1392 1393 while (wbi && wbi->bi_iter.bi_sector < 1394 dev->sector + STRIPE_SECTORS) { 1395 if (wbi->bi_rw & REQ_FUA) 1396 set_bit(R5_WantFUA, &dev->flags); 1397 if (wbi->bi_rw & REQ_SYNC) 1398 set_bit(R5_SyncIO, &dev->flags); 1399 if (wbi->bi_rw & REQ_DISCARD) 1400 set_bit(R5_Discard, &dev->flags); 1401 else 1402 tx = async_copy_data(1, wbi, dev->page, 1403 dev->sector, tx); 1404 wbi = r5_next_bio(wbi, dev->sector); 1405 } 1406 } 1407 } 1408 1409 return tx; 1410 } 1411 1412 static void ops_complete_reconstruct(void *stripe_head_ref) 1413 { 1414 struct stripe_head *sh = stripe_head_ref; 1415 int disks = sh->disks; 1416 int pd_idx = sh->pd_idx; 1417 int qd_idx = sh->qd_idx; 1418 int i; 1419 bool fua = false, sync = false, discard = false; 1420 1421 pr_debug("%s: stripe %llu\n", __func__, 1422 (unsigned long long)sh->sector); 1423 1424 for (i = disks; i--; ) { 1425 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1426 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1427 discard |= test_bit(R5_Discard, &sh->dev[i].flags); 1428 } 1429 1430 for (i = disks; i--; ) { 1431 struct r5dev *dev = &sh->dev[i]; 1432 1433 if (dev->written || i == pd_idx || i == qd_idx) { 1434 if (!discard) 1435 set_bit(R5_UPTODATE, &dev->flags); 1436 if (fua) 1437 set_bit(R5_WantFUA, &dev->flags); 1438 if (sync) 1439 set_bit(R5_SyncIO, &dev->flags); 1440 } 1441 } 1442 1443 if (sh->reconstruct_state == reconstruct_state_drain_run) 1444 sh->reconstruct_state = reconstruct_state_drain_result; 1445 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1446 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1447 else { 1448 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1449 sh->reconstruct_state = reconstruct_state_result; 1450 } 1451 1452 set_bit(STRIPE_HANDLE, &sh->state); 1453 release_stripe(sh); 1454 } 1455 1456 static void 1457 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1458 struct dma_async_tx_descriptor *tx) 1459 { 1460 int disks = sh->disks; 1461 struct page **xor_srcs = percpu->scribble; 1462 struct async_submit_ctl submit; 1463 int count = 0, pd_idx = sh->pd_idx, i; 1464 struct page *xor_dest; 1465 int prexor = 0; 1466 unsigned long flags; 1467 1468 pr_debug("%s: stripe %llu\n", __func__, 1469 (unsigned long long)sh->sector); 1470 1471 for (i = 0; i < sh->disks; i++) { 1472 if (pd_idx == i) 1473 continue; 1474 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1475 break; 1476 } 1477 if (i >= sh->disks) { 1478 atomic_inc(&sh->count); 1479 set_bit(R5_Discard, &sh->dev[pd_idx].flags); 1480 ops_complete_reconstruct(sh); 1481 return; 1482 } 1483 /* check if prexor is active which means only process blocks 1484 * that are part of a read-modify-write (written) 1485 */ 1486 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1487 prexor = 1; 1488 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1489 for (i = disks; i--; ) { 1490 struct r5dev *dev = &sh->dev[i]; 1491 if (dev->written) 1492 xor_srcs[count++] = dev->page; 1493 } 1494 } else { 1495 xor_dest = sh->dev[pd_idx].page; 1496 for (i = disks; i--; ) { 1497 struct r5dev *dev = &sh->dev[i]; 1498 if (i != pd_idx) 1499 xor_srcs[count++] = dev->page; 1500 } 1501 } 1502 1503 /* 1/ if we prexor'd then the dest is reused as a source 1504 * 2/ if we did not prexor then we are redoing the parity 1505 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1506 * for the synchronous xor case 1507 */ 1508 flags = ASYNC_TX_ACK | 1509 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1510 1511 atomic_inc(&sh->count); 1512 1513 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, 1514 to_addr_conv(sh, percpu)); 1515 if (unlikely(count == 1)) 1516 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1517 else 1518 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1519 } 1520 1521 static void 1522 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1523 struct dma_async_tx_descriptor *tx) 1524 { 1525 struct async_submit_ctl submit; 1526 struct page **blocks = percpu->scribble; 1527 int count, i; 1528 1529 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1530 1531 for (i = 0; i < sh->disks; i++) { 1532 if (sh->pd_idx == i || sh->qd_idx == i) 1533 continue; 1534 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1535 break; 1536 } 1537 if (i >= sh->disks) { 1538 atomic_inc(&sh->count); 1539 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 1540 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 1541 ops_complete_reconstruct(sh); 1542 return; 1543 } 1544 1545 count = set_syndrome_sources(blocks, sh); 1546 1547 atomic_inc(&sh->count); 1548 1549 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct, 1550 sh, to_addr_conv(sh, percpu)); 1551 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1552 } 1553 1554 static void ops_complete_check(void *stripe_head_ref) 1555 { 1556 struct stripe_head *sh = stripe_head_ref; 1557 1558 pr_debug("%s: stripe %llu\n", __func__, 1559 (unsigned long long)sh->sector); 1560 1561 sh->check_state = check_state_check_result; 1562 set_bit(STRIPE_HANDLE, &sh->state); 1563 release_stripe(sh); 1564 } 1565 1566 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1567 { 1568 int disks = sh->disks; 1569 int pd_idx = sh->pd_idx; 1570 int qd_idx = sh->qd_idx; 1571 struct page *xor_dest; 1572 struct page **xor_srcs = percpu->scribble; 1573 struct dma_async_tx_descriptor *tx; 1574 struct async_submit_ctl submit; 1575 int count; 1576 int i; 1577 1578 pr_debug("%s: stripe %llu\n", __func__, 1579 (unsigned long long)sh->sector); 1580 1581 count = 0; 1582 xor_dest = sh->dev[pd_idx].page; 1583 xor_srcs[count++] = xor_dest; 1584 for (i = disks; i--; ) { 1585 if (i == pd_idx || i == qd_idx) 1586 continue; 1587 xor_srcs[count++] = sh->dev[i].page; 1588 } 1589 1590 init_async_submit(&submit, 0, NULL, NULL, NULL, 1591 to_addr_conv(sh, percpu)); 1592 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 1593 &sh->ops.zero_sum_result, &submit); 1594 1595 atomic_inc(&sh->count); 1596 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 1597 tx = async_trigger_callback(&submit); 1598 } 1599 1600 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 1601 { 1602 struct page **srcs = percpu->scribble; 1603 struct async_submit_ctl submit; 1604 int count; 1605 1606 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 1607 (unsigned long long)sh->sector, checkp); 1608 1609 count = set_syndrome_sources(srcs, sh); 1610 if (!checkp) 1611 srcs[count] = NULL; 1612 1613 atomic_inc(&sh->count); 1614 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 1615 sh, to_addr_conv(sh, percpu)); 1616 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 1617 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1618 } 1619 1620 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1621 { 1622 int overlap_clear = 0, i, disks = sh->disks; 1623 struct dma_async_tx_descriptor *tx = NULL; 1624 struct r5conf *conf = sh->raid_conf; 1625 int level = conf->level; 1626 struct raid5_percpu *percpu; 1627 unsigned long cpu; 1628 1629 cpu = get_cpu(); 1630 percpu = per_cpu_ptr(conf->percpu, cpu); 1631 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 1632 ops_run_biofill(sh); 1633 overlap_clear++; 1634 } 1635 1636 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1637 if (level < 6) 1638 tx = ops_run_compute5(sh, percpu); 1639 else { 1640 if (sh->ops.target2 < 0 || sh->ops.target < 0) 1641 tx = ops_run_compute6_1(sh, percpu); 1642 else 1643 tx = ops_run_compute6_2(sh, percpu); 1644 } 1645 /* terminate the chain if reconstruct is not set to be run */ 1646 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 1647 async_tx_ack(tx); 1648 } 1649 1650 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 1651 tx = ops_run_prexor(sh, percpu, tx); 1652 1653 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 1654 tx = ops_run_biodrain(sh, tx); 1655 overlap_clear++; 1656 } 1657 1658 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 1659 if (level < 6) 1660 ops_run_reconstruct5(sh, percpu, tx); 1661 else 1662 ops_run_reconstruct6(sh, percpu, tx); 1663 } 1664 1665 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 1666 if (sh->check_state == check_state_run) 1667 ops_run_check_p(sh, percpu); 1668 else if (sh->check_state == check_state_run_q) 1669 ops_run_check_pq(sh, percpu, 0); 1670 else if (sh->check_state == check_state_run_pq) 1671 ops_run_check_pq(sh, percpu, 1); 1672 else 1673 BUG(); 1674 } 1675 1676 if (overlap_clear) 1677 for (i = disks; i--; ) { 1678 struct r5dev *dev = &sh->dev[i]; 1679 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1680 wake_up(&sh->raid_conf->wait_for_overlap); 1681 } 1682 put_cpu(); 1683 } 1684 1685 static int grow_one_stripe(struct r5conf *conf, int hash) 1686 { 1687 struct stripe_head *sh; 1688 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); 1689 if (!sh) 1690 return 0; 1691 1692 sh->raid_conf = conf; 1693 1694 spin_lock_init(&sh->stripe_lock); 1695 1696 if (grow_buffers(sh)) { 1697 shrink_buffers(sh); 1698 kmem_cache_free(conf->slab_cache, sh); 1699 return 0; 1700 } 1701 sh->hash_lock_index = hash; 1702 /* we just created an active stripe so... */ 1703 atomic_set(&sh->count, 1); 1704 atomic_inc(&conf->active_stripes); 1705 INIT_LIST_HEAD(&sh->lru); 1706 release_stripe(sh); 1707 return 1; 1708 } 1709 1710 static int grow_stripes(struct r5conf *conf, int num) 1711 { 1712 struct kmem_cache *sc; 1713 int devs = max(conf->raid_disks, conf->previous_raid_disks); 1714 int hash; 1715 1716 if (conf->mddev->gendisk) 1717 sprintf(conf->cache_name[0], 1718 "raid%d-%s", conf->level, mdname(conf->mddev)); 1719 else 1720 sprintf(conf->cache_name[0], 1721 "raid%d-%p", conf->level, conf->mddev); 1722 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 1723 1724 conf->active_name = 0; 1725 sc = kmem_cache_create(conf->cache_name[conf->active_name], 1726 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 1727 0, 0, NULL); 1728 if (!sc) 1729 return 1; 1730 conf->slab_cache = sc; 1731 conf->pool_size = devs; 1732 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 1733 while (num--) { 1734 if (!grow_one_stripe(conf, hash)) 1735 return 1; 1736 conf->max_nr_stripes++; 1737 hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; 1738 } 1739 return 0; 1740 } 1741 1742 /** 1743 * scribble_len - return the required size of the scribble region 1744 * @num - total number of disks in the array 1745 * 1746 * The size must be enough to contain: 1747 * 1/ a struct page pointer for each device in the array +2 1748 * 2/ room to convert each entry in (1) to its corresponding dma 1749 * (dma_map_page()) or page (page_address()) address. 1750 * 1751 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 1752 * calculate over all devices (not just the data blocks), using zeros in place 1753 * of the P and Q blocks. 1754 */ 1755 static size_t scribble_len(int num) 1756 { 1757 size_t len; 1758 1759 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 1760 1761 return len; 1762 } 1763 1764 static int resize_stripes(struct r5conf *conf, int newsize) 1765 { 1766 /* Make all the stripes able to hold 'newsize' devices. 1767 * New slots in each stripe get 'page' set to a new page. 1768 * 1769 * This happens in stages: 1770 * 1/ create a new kmem_cache and allocate the required number of 1771 * stripe_heads. 1772 * 2/ gather all the old stripe_heads and transfer the pages across 1773 * to the new stripe_heads. This will have the side effect of 1774 * freezing the array as once all stripe_heads have been collected, 1775 * no IO will be possible. Old stripe heads are freed once their 1776 * pages have been transferred over, and the old kmem_cache is 1777 * freed when all stripes are done. 1778 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 1779 * we simple return a failre status - no need to clean anything up. 1780 * 4/ allocate new pages for the new slots in the new stripe_heads. 1781 * If this fails, we don't bother trying the shrink the 1782 * stripe_heads down again, we just leave them as they are. 1783 * As each stripe_head is processed the new one is released into 1784 * active service. 1785 * 1786 * Once step2 is started, we cannot afford to wait for a write, 1787 * so we use GFP_NOIO allocations. 1788 */ 1789 struct stripe_head *osh, *nsh; 1790 LIST_HEAD(newstripes); 1791 struct disk_info *ndisks; 1792 unsigned long cpu; 1793 int err; 1794 struct kmem_cache *sc; 1795 int i; 1796 int hash, cnt; 1797 1798 if (newsize <= conf->pool_size) 1799 return 0; /* never bother to shrink */ 1800 1801 err = md_allow_write(conf->mddev); 1802 if (err) 1803 return err; 1804 1805 /* Step 1 */ 1806 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1807 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1808 0, 0, NULL); 1809 if (!sc) 1810 return -ENOMEM; 1811 1812 for (i = conf->max_nr_stripes; i; i--) { 1813 nsh = kmem_cache_zalloc(sc, GFP_KERNEL); 1814 if (!nsh) 1815 break; 1816 1817 nsh->raid_conf = conf; 1818 spin_lock_init(&nsh->stripe_lock); 1819 1820 list_add(&nsh->lru, &newstripes); 1821 } 1822 if (i) { 1823 /* didn't get enough, give up */ 1824 while (!list_empty(&newstripes)) { 1825 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1826 list_del(&nsh->lru); 1827 kmem_cache_free(sc, nsh); 1828 } 1829 kmem_cache_destroy(sc); 1830 return -ENOMEM; 1831 } 1832 /* Step 2 - Must use GFP_NOIO now. 1833 * OK, we have enough stripes, start collecting inactive 1834 * stripes and copying them over 1835 */ 1836 hash = 0; 1837 cnt = 0; 1838 list_for_each_entry(nsh, &newstripes, lru) { 1839 lock_device_hash_lock(conf, hash); 1840 wait_event_cmd(conf->wait_for_stripe, 1841 !list_empty(conf->inactive_list + hash), 1842 unlock_device_hash_lock(conf, hash), 1843 lock_device_hash_lock(conf, hash)); 1844 osh = get_free_stripe(conf, hash); 1845 unlock_device_hash_lock(conf, hash); 1846 atomic_set(&nsh->count, 1); 1847 for(i=0; i<conf->pool_size; i++) 1848 nsh->dev[i].page = osh->dev[i].page; 1849 for( ; i<newsize; i++) 1850 nsh->dev[i].page = NULL; 1851 nsh->hash_lock_index = hash; 1852 kmem_cache_free(conf->slab_cache, osh); 1853 cnt++; 1854 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 1855 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 1856 hash++; 1857 cnt = 0; 1858 } 1859 } 1860 kmem_cache_destroy(conf->slab_cache); 1861 1862 /* Step 3. 1863 * At this point, we are holding all the stripes so the array 1864 * is completely stalled, so now is a good time to resize 1865 * conf->disks and the scribble region 1866 */ 1867 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1868 if (ndisks) { 1869 for (i=0; i<conf->raid_disks; i++) 1870 ndisks[i] = conf->disks[i]; 1871 kfree(conf->disks); 1872 conf->disks = ndisks; 1873 } else 1874 err = -ENOMEM; 1875 1876 get_online_cpus(); 1877 conf->scribble_len = scribble_len(newsize); 1878 for_each_present_cpu(cpu) { 1879 struct raid5_percpu *percpu; 1880 void *scribble; 1881 1882 percpu = per_cpu_ptr(conf->percpu, cpu); 1883 scribble = kmalloc(conf->scribble_len, GFP_NOIO); 1884 1885 if (scribble) { 1886 kfree(percpu->scribble); 1887 percpu->scribble = scribble; 1888 } else { 1889 err = -ENOMEM; 1890 break; 1891 } 1892 } 1893 put_online_cpus(); 1894 1895 /* Step 4, return new stripes to service */ 1896 while(!list_empty(&newstripes)) { 1897 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1898 list_del_init(&nsh->lru); 1899 1900 for (i=conf->raid_disks; i < newsize; i++) 1901 if (nsh->dev[i].page == NULL) { 1902 struct page *p = alloc_page(GFP_NOIO); 1903 nsh->dev[i].page = p; 1904 if (!p) 1905 err = -ENOMEM; 1906 } 1907 release_stripe(nsh); 1908 } 1909 /* critical section pass, GFP_NOIO no longer needed */ 1910 1911 conf->slab_cache = sc; 1912 conf->active_name = 1-conf->active_name; 1913 conf->pool_size = newsize; 1914 return err; 1915 } 1916 1917 static int drop_one_stripe(struct r5conf *conf, int hash) 1918 { 1919 struct stripe_head *sh; 1920 1921 spin_lock_irq(conf->hash_locks + hash); 1922 sh = get_free_stripe(conf, hash); 1923 spin_unlock_irq(conf->hash_locks + hash); 1924 if (!sh) 1925 return 0; 1926 BUG_ON(atomic_read(&sh->count)); 1927 shrink_buffers(sh); 1928 kmem_cache_free(conf->slab_cache, sh); 1929 atomic_dec(&conf->active_stripes); 1930 return 1; 1931 } 1932 1933 static void shrink_stripes(struct r5conf *conf) 1934 { 1935 int hash; 1936 for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++) 1937 while (drop_one_stripe(conf, hash)) 1938 ; 1939 1940 if (conf->slab_cache) 1941 kmem_cache_destroy(conf->slab_cache); 1942 conf->slab_cache = NULL; 1943 } 1944 1945 static void raid5_end_read_request(struct bio * bi, int error) 1946 { 1947 struct stripe_head *sh = bi->bi_private; 1948 struct r5conf *conf = sh->raid_conf; 1949 int disks = sh->disks, i; 1950 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1951 char b[BDEVNAME_SIZE]; 1952 struct md_rdev *rdev = NULL; 1953 sector_t s; 1954 1955 for (i=0 ; i<disks; i++) 1956 if (bi == &sh->dev[i].req) 1957 break; 1958 1959 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1960 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1961 uptodate); 1962 if (i == disks) { 1963 BUG(); 1964 return; 1965 } 1966 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 1967 /* If replacement finished while this request was outstanding, 1968 * 'replacement' might be NULL already. 1969 * In that case it moved down to 'rdev'. 1970 * rdev is not removed until all requests are finished. 1971 */ 1972 rdev = conf->disks[i].replacement; 1973 if (!rdev) 1974 rdev = conf->disks[i].rdev; 1975 1976 if (use_new_offset(conf, sh)) 1977 s = sh->sector + rdev->new_data_offset; 1978 else 1979 s = sh->sector + rdev->data_offset; 1980 if (uptodate) { 1981 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1982 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1983 /* Note that this cannot happen on a 1984 * replacement device. We just fail those on 1985 * any error 1986 */ 1987 printk_ratelimited( 1988 KERN_INFO 1989 "md/raid:%s: read error corrected" 1990 " (%lu sectors at %llu on %s)\n", 1991 mdname(conf->mddev), STRIPE_SECTORS, 1992 (unsigned long long)s, 1993 bdevname(rdev->bdev, b)); 1994 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 1995 clear_bit(R5_ReadError, &sh->dev[i].flags); 1996 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1997 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 1998 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 1999 2000 if (atomic_read(&rdev->read_errors)) 2001 atomic_set(&rdev->read_errors, 0); 2002 } else { 2003 const char *bdn = bdevname(rdev->bdev, b); 2004 int retry = 0; 2005 int set_bad = 0; 2006 2007 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 2008 atomic_inc(&rdev->read_errors); 2009 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2010 printk_ratelimited( 2011 KERN_WARNING 2012 "md/raid:%s: read error on replacement device " 2013 "(sector %llu on %s).\n", 2014 mdname(conf->mddev), 2015 (unsigned long long)s, 2016 bdn); 2017 else if (conf->mddev->degraded >= conf->max_degraded) { 2018 set_bad = 1; 2019 printk_ratelimited( 2020 KERN_WARNING 2021 "md/raid:%s: read error not correctable " 2022 "(sector %llu on %s).\n", 2023 mdname(conf->mddev), 2024 (unsigned long long)s, 2025 bdn); 2026 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { 2027 /* Oh, no!!! */ 2028 set_bad = 1; 2029 printk_ratelimited( 2030 KERN_WARNING 2031 "md/raid:%s: read error NOT corrected!! " 2032 "(sector %llu on %s).\n", 2033 mdname(conf->mddev), 2034 (unsigned long long)s, 2035 bdn); 2036 } else if (atomic_read(&rdev->read_errors) 2037 > conf->max_nr_stripes) 2038 printk(KERN_WARNING 2039 "md/raid:%s: Too many read errors, failing device %s.\n", 2040 mdname(conf->mddev), bdn); 2041 else 2042 retry = 1; 2043 if (set_bad && test_bit(In_sync, &rdev->flags) 2044 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2045 retry = 1; 2046 if (retry) 2047 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2048 set_bit(R5_ReadError, &sh->dev[i].flags); 2049 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2050 } else 2051 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2052 else { 2053 clear_bit(R5_ReadError, &sh->dev[i].flags); 2054 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2055 if (!(set_bad 2056 && test_bit(In_sync, &rdev->flags) 2057 && rdev_set_badblocks( 2058 rdev, sh->sector, STRIPE_SECTORS, 0))) 2059 md_error(conf->mddev, rdev); 2060 } 2061 } 2062 rdev_dec_pending(rdev, conf->mddev); 2063 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2064 set_bit(STRIPE_HANDLE, &sh->state); 2065 release_stripe(sh); 2066 } 2067 2068 static void raid5_end_write_request(struct bio *bi, int error) 2069 { 2070 struct stripe_head *sh = bi->bi_private; 2071 struct r5conf *conf = sh->raid_conf; 2072 int disks = sh->disks, i; 2073 struct md_rdev *uninitialized_var(rdev); 2074 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 2075 sector_t first_bad; 2076 int bad_sectors; 2077 int replacement = 0; 2078 2079 for (i = 0 ; i < disks; i++) { 2080 if (bi == &sh->dev[i].req) { 2081 rdev = conf->disks[i].rdev; 2082 break; 2083 } 2084 if (bi == &sh->dev[i].rreq) { 2085 rdev = conf->disks[i].replacement; 2086 if (rdev) 2087 replacement = 1; 2088 else 2089 /* rdev was removed and 'replacement' 2090 * replaced it. rdev is not removed 2091 * until all requests are finished. 2092 */ 2093 rdev = conf->disks[i].rdev; 2094 break; 2095 } 2096 } 2097 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 2098 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2099 uptodate); 2100 if (i == disks) { 2101 BUG(); 2102 return; 2103 } 2104 2105 if (replacement) { 2106 if (!uptodate) 2107 md_error(conf->mddev, rdev); 2108 else if (is_badblock(rdev, sh->sector, 2109 STRIPE_SECTORS, 2110 &first_bad, &bad_sectors)) 2111 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2112 } else { 2113 if (!uptodate) { 2114 set_bit(STRIPE_DEGRADED, &sh->state); 2115 set_bit(WriteErrorSeen, &rdev->flags); 2116 set_bit(R5_WriteError, &sh->dev[i].flags); 2117 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2118 set_bit(MD_RECOVERY_NEEDED, 2119 &rdev->mddev->recovery); 2120 } else if (is_badblock(rdev, sh->sector, 2121 STRIPE_SECTORS, 2122 &first_bad, &bad_sectors)) { 2123 set_bit(R5_MadeGood, &sh->dev[i].flags); 2124 if (test_bit(R5_ReadError, &sh->dev[i].flags)) 2125 /* That was a successful write so make 2126 * sure it looks like we already did 2127 * a re-write. 2128 */ 2129 set_bit(R5_ReWrite, &sh->dev[i].flags); 2130 } 2131 } 2132 rdev_dec_pending(rdev, conf->mddev); 2133 2134 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2135 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2136 set_bit(STRIPE_HANDLE, &sh->state); 2137 release_stripe(sh); 2138 } 2139 2140 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 2141 2142 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2143 { 2144 struct r5dev *dev = &sh->dev[i]; 2145 2146 bio_init(&dev->req); 2147 dev->req.bi_io_vec = &dev->vec; 2148 dev->req.bi_vcnt++; 2149 dev->req.bi_max_vecs++; 2150 dev->req.bi_private = sh; 2151 dev->vec.bv_page = dev->page; 2152 2153 bio_init(&dev->rreq); 2154 dev->rreq.bi_io_vec = &dev->rvec; 2155 dev->rreq.bi_vcnt++; 2156 dev->rreq.bi_max_vecs++; 2157 dev->rreq.bi_private = sh; 2158 dev->rvec.bv_page = dev->page; 2159 2160 dev->flags = 0; 2161 dev->sector = compute_blocknr(sh, i, previous); 2162 } 2163 2164 static void error(struct mddev *mddev, struct md_rdev *rdev) 2165 { 2166 char b[BDEVNAME_SIZE]; 2167 struct r5conf *conf = mddev->private; 2168 unsigned long flags; 2169 pr_debug("raid456: error called\n"); 2170 2171 spin_lock_irqsave(&conf->device_lock, flags); 2172 clear_bit(In_sync, &rdev->flags); 2173 mddev->degraded = calc_degraded(conf); 2174 spin_unlock_irqrestore(&conf->device_lock, flags); 2175 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2176 2177 set_bit(Blocked, &rdev->flags); 2178 set_bit(Faulty, &rdev->flags); 2179 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2180 printk(KERN_ALERT 2181 "md/raid:%s: Disk failure on %s, disabling device.\n" 2182 "md/raid:%s: Operation continuing on %d devices.\n", 2183 mdname(mddev), 2184 bdevname(rdev->bdev, b), 2185 mdname(mddev), 2186 conf->raid_disks - mddev->degraded); 2187 } 2188 2189 /* 2190 * Input: a 'big' sector number, 2191 * Output: index of the data and parity disk, and the sector # in them. 2192 */ 2193 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2194 int previous, int *dd_idx, 2195 struct stripe_head *sh) 2196 { 2197 sector_t stripe, stripe2; 2198 sector_t chunk_number; 2199 unsigned int chunk_offset; 2200 int pd_idx, qd_idx; 2201 int ddf_layout = 0; 2202 sector_t new_sector; 2203 int algorithm = previous ? conf->prev_algo 2204 : conf->algorithm; 2205 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2206 : conf->chunk_sectors; 2207 int raid_disks = previous ? conf->previous_raid_disks 2208 : conf->raid_disks; 2209 int data_disks = raid_disks - conf->max_degraded; 2210 2211 /* First compute the information on this sector */ 2212 2213 /* 2214 * Compute the chunk number and the sector offset inside the chunk 2215 */ 2216 chunk_offset = sector_div(r_sector, sectors_per_chunk); 2217 chunk_number = r_sector; 2218 2219 /* 2220 * Compute the stripe number 2221 */ 2222 stripe = chunk_number; 2223 *dd_idx = sector_div(stripe, data_disks); 2224 stripe2 = stripe; 2225 /* 2226 * Select the parity disk based on the user selected algorithm. 2227 */ 2228 pd_idx = qd_idx = -1; 2229 switch(conf->level) { 2230 case 4: 2231 pd_idx = data_disks; 2232 break; 2233 case 5: 2234 switch (algorithm) { 2235 case ALGORITHM_LEFT_ASYMMETRIC: 2236 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2237 if (*dd_idx >= pd_idx) 2238 (*dd_idx)++; 2239 break; 2240 case ALGORITHM_RIGHT_ASYMMETRIC: 2241 pd_idx = sector_div(stripe2, raid_disks); 2242 if (*dd_idx >= pd_idx) 2243 (*dd_idx)++; 2244 break; 2245 case ALGORITHM_LEFT_SYMMETRIC: 2246 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2247 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2248 break; 2249 case ALGORITHM_RIGHT_SYMMETRIC: 2250 pd_idx = sector_div(stripe2, raid_disks); 2251 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2252 break; 2253 case ALGORITHM_PARITY_0: 2254 pd_idx = 0; 2255 (*dd_idx)++; 2256 break; 2257 case ALGORITHM_PARITY_N: 2258 pd_idx = data_disks; 2259 break; 2260 default: 2261 BUG(); 2262 } 2263 break; 2264 case 6: 2265 2266 switch (algorithm) { 2267 case ALGORITHM_LEFT_ASYMMETRIC: 2268 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2269 qd_idx = pd_idx + 1; 2270 if (pd_idx == raid_disks-1) { 2271 (*dd_idx)++; /* Q D D D P */ 2272 qd_idx = 0; 2273 } else if (*dd_idx >= pd_idx) 2274 (*dd_idx) += 2; /* D D P Q D */ 2275 break; 2276 case ALGORITHM_RIGHT_ASYMMETRIC: 2277 pd_idx = sector_div(stripe2, raid_disks); 2278 qd_idx = pd_idx + 1; 2279 if (pd_idx == raid_disks-1) { 2280 (*dd_idx)++; /* Q D D D P */ 2281 qd_idx = 0; 2282 } else if (*dd_idx >= pd_idx) 2283 (*dd_idx) += 2; /* D D P Q D */ 2284 break; 2285 case ALGORITHM_LEFT_SYMMETRIC: 2286 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2287 qd_idx = (pd_idx + 1) % raid_disks; 2288 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2289 break; 2290 case ALGORITHM_RIGHT_SYMMETRIC: 2291 pd_idx = sector_div(stripe2, raid_disks); 2292 qd_idx = (pd_idx + 1) % raid_disks; 2293 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2294 break; 2295 2296 case ALGORITHM_PARITY_0: 2297 pd_idx = 0; 2298 qd_idx = 1; 2299 (*dd_idx) += 2; 2300 break; 2301 case ALGORITHM_PARITY_N: 2302 pd_idx = data_disks; 2303 qd_idx = data_disks + 1; 2304 break; 2305 2306 case ALGORITHM_ROTATING_ZERO_RESTART: 2307 /* Exactly the same as RIGHT_ASYMMETRIC, but or 2308 * of blocks for computing Q is different. 2309 */ 2310 pd_idx = sector_div(stripe2, raid_disks); 2311 qd_idx = pd_idx + 1; 2312 if (pd_idx == raid_disks-1) { 2313 (*dd_idx)++; /* Q D D D P */ 2314 qd_idx = 0; 2315 } else if (*dd_idx >= pd_idx) 2316 (*dd_idx) += 2; /* D D P Q D */ 2317 ddf_layout = 1; 2318 break; 2319 2320 case ALGORITHM_ROTATING_N_RESTART: 2321 /* Same a left_asymmetric, by first stripe is 2322 * D D D P Q rather than 2323 * Q D D D P 2324 */ 2325 stripe2 += 1; 2326 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2327 qd_idx = pd_idx + 1; 2328 if (pd_idx == raid_disks-1) { 2329 (*dd_idx)++; /* Q D D D P */ 2330 qd_idx = 0; 2331 } else if (*dd_idx >= pd_idx) 2332 (*dd_idx) += 2; /* D D P Q D */ 2333 ddf_layout = 1; 2334 break; 2335 2336 case ALGORITHM_ROTATING_N_CONTINUE: 2337 /* Same as left_symmetric but Q is before P */ 2338 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2339 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 2340 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2341 ddf_layout = 1; 2342 break; 2343 2344 case ALGORITHM_LEFT_ASYMMETRIC_6: 2345 /* RAID5 left_asymmetric, with Q on last device */ 2346 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2347 if (*dd_idx >= pd_idx) 2348 (*dd_idx)++; 2349 qd_idx = raid_disks - 1; 2350 break; 2351 2352 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2353 pd_idx = sector_div(stripe2, raid_disks-1); 2354 if (*dd_idx >= pd_idx) 2355 (*dd_idx)++; 2356 qd_idx = raid_disks - 1; 2357 break; 2358 2359 case ALGORITHM_LEFT_SYMMETRIC_6: 2360 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2361 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2362 qd_idx = raid_disks - 1; 2363 break; 2364 2365 case ALGORITHM_RIGHT_SYMMETRIC_6: 2366 pd_idx = sector_div(stripe2, raid_disks-1); 2367 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2368 qd_idx = raid_disks - 1; 2369 break; 2370 2371 case ALGORITHM_PARITY_0_6: 2372 pd_idx = 0; 2373 (*dd_idx)++; 2374 qd_idx = raid_disks - 1; 2375 break; 2376 2377 default: 2378 BUG(); 2379 } 2380 break; 2381 } 2382 2383 if (sh) { 2384 sh->pd_idx = pd_idx; 2385 sh->qd_idx = qd_idx; 2386 sh->ddf_layout = ddf_layout; 2387 } 2388 /* 2389 * Finally, compute the new sector number 2390 */ 2391 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 2392 return new_sector; 2393 } 2394 2395 2396 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 2397 { 2398 struct r5conf *conf = sh->raid_conf; 2399 int raid_disks = sh->disks; 2400 int data_disks = raid_disks - conf->max_degraded; 2401 sector_t new_sector = sh->sector, check; 2402 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2403 : conf->chunk_sectors; 2404 int algorithm = previous ? conf->prev_algo 2405 : conf->algorithm; 2406 sector_t stripe; 2407 int chunk_offset; 2408 sector_t chunk_number; 2409 int dummy1, dd_idx = i; 2410 sector_t r_sector; 2411 struct stripe_head sh2; 2412 2413 2414 chunk_offset = sector_div(new_sector, sectors_per_chunk); 2415 stripe = new_sector; 2416 2417 if (i == sh->pd_idx) 2418 return 0; 2419 switch(conf->level) { 2420 case 4: break; 2421 case 5: 2422 switch (algorithm) { 2423 case ALGORITHM_LEFT_ASYMMETRIC: 2424 case ALGORITHM_RIGHT_ASYMMETRIC: 2425 if (i > sh->pd_idx) 2426 i--; 2427 break; 2428 case ALGORITHM_LEFT_SYMMETRIC: 2429 case ALGORITHM_RIGHT_SYMMETRIC: 2430 if (i < sh->pd_idx) 2431 i += raid_disks; 2432 i -= (sh->pd_idx + 1); 2433 break; 2434 case ALGORITHM_PARITY_0: 2435 i -= 1; 2436 break; 2437 case ALGORITHM_PARITY_N: 2438 break; 2439 default: 2440 BUG(); 2441 } 2442 break; 2443 case 6: 2444 if (i == sh->qd_idx) 2445 return 0; /* It is the Q disk */ 2446 switch (algorithm) { 2447 case ALGORITHM_LEFT_ASYMMETRIC: 2448 case ALGORITHM_RIGHT_ASYMMETRIC: 2449 case ALGORITHM_ROTATING_ZERO_RESTART: 2450 case ALGORITHM_ROTATING_N_RESTART: 2451 if (sh->pd_idx == raid_disks-1) 2452 i--; /* Q D D D P */ 2453 else if (i > sh->pd_idx) 2454 i -= 2; /* D D P Q D */ 2455 break; 2456 case ALGORITHM_LEFT_SYMMETRIC: 2457 case ALGORITHM_RIGHT_SYMMETRIC: 2458 if (sh->pd_idx == raid_disks-1) 2459 i--; /* Q D D D P */ 2460 else { 2461 /* D D P Q D */ 2462 if (i < sh->pd_idx) 2463 i += raid_disks; 2464 i -= (sh->pd_idx + 2); 2465 } 2466 break; 2467 case ALGORITHM_PARITY_0: 2468 i -= 2; 2469 break; 2470 case ALGORITHM_PARITY_N: 2471 break; 2472 case ALGORITHM_ROTATING_N_CONTINUE: 2473 /* Like left_symmetric, but P is before Q */ 2474 if (sh->pd_idx == 0) 2475 i--; /* P D D D Q */ 2476 else { 2477 /* D D Q P D */ 2478 if (i < sh->pd_idx) 2479 i += raid_disks; 2480 i -= (sh->pd_idx + 1); 2481 } 2482 break; 2483 case ALGORITHM_LEFT_ASYMMETRIC_6: 2484 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2485 if (i > sh->pd_idx) 2486 i--; 2487 break; 2488 case ALGORITHM_LEFT_SYMMETRIC_6: 2489 case ALGORITHM_RIGHT_SYMMETRIC_6: 2490 if (i < sh->pd_idx) 2491 i += data_disks + 1; 2492 i -= (sh->pd_idx + 1); 2493 break; 2494 case ALGORITHM_PARITY_0_6: 2495 i -= 1; 2496 break; 2497 default: 2498 BUG(); 2499 } 2500 break; 2501 } 2502 2503 chunk_number = stripe * data_disks + i; 2504 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 2505 2506 check = raid5_compute_sector(conf, r_sector, 2507 previous, &dummy1, &sh2); 2508 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 2509 || sh2.qd_idx != sh->qd_idx) { 2510 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", 2511 mdname(conf->mddev)); 2512 return 0; 2513 } 2514 return r_sector; 2515 } 2516 2517 2518 static void 2519 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2520 int rcw, int expand) 2521 { 2522 int i, pd_idx = sh->pd_idx, disks = sh->disks; 2523 struct r5conf *conf = sh->raid_conf; 2524 int level = conf->level; 2525 2526 if (rcw) { 2527 2528 for (i = disks; i--; ) { 2529 struct r5dev *dev = &sh->dev[i]; 2530 2531 if (dev->towrite) { 2532 set_bit(R5_LOCKED, &dev->flags); 2533 set_bit(R5_Wantdrain, &dev->flags); 2534 if (!expand) 2535 clear_bit(R5_UPTODATE, &dev->flags); 2536 s->locked++; 2537 } 2538 } 2539 /* if we are not expanding this is a proper write request, and 2540 * there will be bios with new data to be drained into the 2541 * stripe cache 2542 */ 2543 if (!expand) { 2544 if (!s->locked) 2545 /* False alarm, nothing to do */ 2546 return; 2547 sh->reconstruct_state = reconstruct_state_drain_run; 2548 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2549 } else 2550 sh->reconstruct_state = reconstruct_state_run; 2551 2552 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2553 2554 if (s->locked + conf->max_degraded == disks) 2555 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2556 atomic_inc(&conf->pending_full_writes); 2557 } else { 2558 BUG_ON(level == 6); 2559 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2560 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2561 2562 for (i = disks; i--; ) { 2563 struct r5dev *dev = &sh->dev[i]; 2564 if (i == pd_idx) 2565 continue; 2566 2567 if (dev->towrite && 2568 (test_bit(R5_UPTODATE, &dev->flags) || 2569 test_bit(R5_Wantcompute, &dev->flags))) { 2570 set_bit(R5_Wantdrain, &dev->flags); 2571 set_bit(R5_LOCKED, &dev->flags); 2572 clear_bit(R5_UPTODATE, &dev->flags); 2573 s->locked++; 2574 } 2575 } 2576 if (!s->locked) 2577 /* False alarm - nothing to do */ 2578 return; 2579 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 2580 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 2581 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2582 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2583 } 2584 2585 /* keep the parity disk(s) locked while asynchronous operations 2586 * are in flight 2587 */ 2588 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 2589 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2590 s->locked++; 2591 2592 if (level == 6) { 2593 int qd_idx = sh->qd_idx; 2594 struct r5dev *dev = &sh->dev[qd_idx]; 2595 2596 set_bit(R5_LOCKED, &dev->flags); 2597 clear_bit(R5_UPTODATE, &dev->flags); 2598 s->locked++; 2599 } 2600 2601 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 2602 __func__, (unsigned long long)sh->sector, 2603 s->locked, s->ops_request); 2604 } 2605 2606 /* 2607 * Each stripe/dev can have one or more bion attached. 2608 * toread/towrite point to the first in a chain. 2609 * The bi_next chain must be in order. 2610 */ 2611 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 2612 { 2613 struct bio **bip; 2614 struct r5conf *conf = sh->raid_conf; 2615 int firstwrite=0; 2616 2617 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2618 (unsigned long long)bi->bi_iter.bi_sector, 2619 (unsigned long long)sh->sector); 2620 2621 /* 2622 * If several bio share a stripe. The bio bi_phys_segments acts as a 2623 * reference count to avoid race. The reference count should already be 2624 * increased before this function is called (for example, in 2625 * make_request()), so other bio sharing this stripe will not free the 2626 * stripe. If a stripe is owned by one stripe, the stripe lock will 2627 * protect it. 2628 */ 2629 spin_lock_irq(&sh->stripe_lock); 2630 if (forwrite) { 2631 bip = &sh->dev[dd_idx].towrite; 2632 if (*bip == NULL) 2633 firstwrite = 1; 2634 } else 2635 bip = &sh->dev[dd_idx].toread; 2636 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 2637 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 2638 goto overlap; 2639 bip = & (*bip)->bi_next; 2640 } 2641 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 2642 goto overlap; 2643 2644 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2645 if (*bip) 2646 bi->bi_next = *bip; 2647 *bip = bi; 2648 raid5_inc_bi_active_stripes(bi); 2649 2650 if (forwrite) { 2651 /* check if page is covered */ 2652 sector_t sector = sh->dev[dd_idx].sector; 2653 for (bi=sh->dev[dd_idx].towrite; 2654 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2655 bi && bi->bi_iter.bi_sector <= sector; 2656 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2657 if (bio_end_sector(bi) >= sector) 2658 sector = bio_end_sector(bi); 2659 } 2660 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2661 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2662 } 2663 2664 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2665 (unsigned long long)(*bip)->bi_iter.bi_sector, 2666 (unsigned long long)sh->sector, dd_idx); 2667 spin_unlock_irq(&sh->stripe_lock); 2668 2669 if (conf->mddev->bitmap && firstwrite) { 2670 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 2671 STRIPE_SECTORS, 0); 2672 sh->bm_seq = conf->seq_flush+1; 2673 set_bit(STRIPE_BIT_DELAY, &sh->state); 2674 } 2675 return 1; 2676 2677 overlap: 2678 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 2679 spin_unlock_irq(&sh->stripe_lock); 2680 return 0; 2681 } 2682 2683 static void end_reshape(struct r5conf *conf); 2684 2685 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 2686 struct stripe_head *sh) 2687 { 2688 int sectors_per_chunk = 2689 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 2690 int dd_idx; 2691 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2692 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2693 2694 raid5_compute_sector(conf, 2695 stripe * (disks - conf->max_degraded) 2696 *sectors_per_chunk + chunk_offset, 2697 previous, 2698 &dd_idx, sh); 2699 } 2700 2701 static void 2702 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 2703 struct stripe_head_state *s, int disks, 2704 struct bio **return_bi) 2705 { 2706 int i; 2707 for (i = disks; i--; ) { 2708 struct bio *bi; 2709 int bitmap_end = 0; 2710 2711 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2712 struct md_rdev *rdev; 2713 rcu_read_lock(); 2714 rdev = rcu_dereference(conf->disks[i].rdev); 2715 if (rdev && test_bit(In_sync, &rdev->flags)) 2716 atomic_inc(&rdev->nr_pending); 2717 else 2718 rdev = NULL; 2719 rcu_read_unlock(); 2720 if (rdev) { 2721 if (!rdev_set_badblocks( 2722 rdev, 2723 sh->sector, 2724 STRIPE_SECTORS, 0)) 2725 md_error(conf->mddev, rdev); 2726 rdev_dec_pending(rdev, conf->mddev); 2727 } 2728 } 2729 spin_lock_irq(&sh->stripe_lock); 2730 /* fail all writes first */ 2731 bi = sh->dev[i].towrite; 2732 sh->dev[i].towrite = NULL; 2733 spin_unlock_irq(&sh->stripe_lock); 2734 if (bi) 2735 bitmap_end = 1; 2736 2737 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2738 wake_up(&conf->wait_for_overlap); 2739 2740 while (bi && bi->bi_iter.bi_sector < 2741 sh->dev[i].sector + STRIPE_SECTORS) { 2742 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2743 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2744 if (!raid5_dec_bi_active_stripes(bi)) { 2745 md_write_end(conf->mddev); 2746 bi->bi_next = *return_bi; 2747 *return_bi = bi; 2748 } 2749 bi = nextbi; 2750 } 2751 if (bitmap_end) 2752 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2753 STRIPE_SECTORS, 0, 0); 2754 bitmap_end = 0; 2755 /* and fail all 'written' */ 2756 bi = sh->dev[i].written; 2757 sh->dev[i].written = NULL; 2758 if (bi) bitmap_end = 1; 2759 while (bi && bi->bi_iter.bi_sector < 2760 sh->dev[i].sector + STRIPE_SECTORS) { 2761 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2762 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2763 if (!raid5_dec_bi_active_stripes(bi)) { 2764 md_write_end(conf->mddev); 2765 bi->bi_next = *return_bi; 2766 *return_bi = bi; 2767 } 2768 bi = bi2; 2769 } 2770 2771 /* fail any reads if this device is non-operational and 2772 * the data has not reached the cache yet. 2773 */ 2774 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2775 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2776 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2777 spin_lock_irq(&sh->stripe_lock); 2778 bi = sh->dev[i].toread; 2779 sh->dev[i].toread = NULL; 2780 spin_unlock_irq(&sh->stripe_lock); 2781 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2782 wake_up(&conf->wait_for_overlap); 2783 while (bi && bi->bi_iter.bi_sector < 2784 sh->dev[i].sector + STRIPE_SECTORS) { 2785 struct bio *nextbi = 2786 r5_next_bio(bi, sh->dev[i].sector); 2787 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2788 if (!raid5_dec_bi_active_stripes(bi)) { 2789 bi->bi_next = *return_bi; 2790 *return_bi = bi; 2791 } 2792 bi = nextbi; 2793 } 2794 } 2795 if (bitmap_end) 2796 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2797 STRIPE_SECTORS, 0, 0); 2798 /* If we were in the middle of a write the parity block might 2799 * still be locked - so just clear all R5_LOCKED flags 2800 */ 2801 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2802 } 2803 2804 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2805 if (atomic_dec_and_test(&conf->pending_full_writes)) 2806 md_wakeup_thread(conf->mddev->thread); 2807 } 2808 2809 static void 2810 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 2811 struct stripe_head_state *s) 2812 { 2813 int abort = 0; 2814 int i; 2815 2816 clear_bit(STRIPE_SYNCING, &sh->state); 2817 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 2818 wake_up(&conf->wait_for_overlap); 2819 s->syncing = 0; 2820 s->replacing = 0; 2821 /* There is nothing more to do for sync/check/repair. 2822 * Don't even need to abort as that is handled elsewhere 2823 * if needed, and not always wanted e.g. if there is a known 2824 * bad block here. 2825 * For recover/replace we need to record a bad block on all 2826 * non-sync devices, or abort the recovery 2827 */ 2828 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { 2829 /* During recovery devices cannot be removed, so 2830 * locking and refcounting of rdevs is not needed 2831 */ 2832 for (i = 0; i < conf->raid_disks; i++) { 2833 struct md_rdev *rdev = conf->disks[i].rdev; 2834 if (rdev 2835 && !test_bit(Faulty, &rdev->flags) 2836 && !test_bit(In_sync, &rdev->flags) 2837 && !rdev_set_badblocks(rdev, sh->sector, 2838 STRIPE_SECTORS, 0)) 2839 abort = 1; 2840 rdev = conf->disks[i].replacement; 2841 if (rdev 2842 && !test_bit(Faulty, &rdev->flags) 2843 && !test_bit(In_sync, &rdev->flags) 2844 && !rdev_set_badblocks(rdev, sh->sector, 2845 STRIPE_SECTORS, 0)) 2846 abort = 1; 2847 } 2848 if (abort) 2849 conf->recovery_disabled = 2850 conf->mddev->recovery_disabled; 2851 } 2852 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); 2853 } 2854 2855 static int want_replace(struct stripe_head *sh, int disk_idx) 2856 { 2857 struct md_rdev *rdev; 2858 int rv = 0; 2859 /* Doing recovery so rcu locking not required */ 2860 rdev = sh->raid_conf->disks[disk_idx].replacement; 2861 if (rdev 2862 && !test_bit(Faulty, &rdev->flags) 2863 && !test_bit(In_sync, &rdev->flags) 2864 && (rdev->recovery_offset <= sh->sector 2865 || rdev->mddev->recovery_cp <= sh->sector)) 2866 rv = 1; 2867 2868 return rv; 2869 } 2870 2871 /* fetch_block - checks the given member device to see if its data needs 2872 * to be read or computed to satisfy a request. 2873 * 2874 * Returns 1 when no more member devices need to be checked, otherwise returns 2875 * 0 to tell the loop in handle_stripe_fill to continue 2876 */ 2877 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 2878 int disk_idx, int disks) 2879 { 2880 struct r5dev *dev = &sh->dev[disk_idx]; 2881 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 2882 &sh->dev[s->failed_num[1]] }; 2883 2884 /* is the data in this block needed, and can we get it? */ 2885 if (!test_bit(R5_LOCKED, &dev->flags) && 2886 !test_bit(R5_UPTODATE, &dev->flags) && 2887 (dev->toread || 2888 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2889 s->syncing || s->expanding || 2890 (s->replacing && want_replace(sh, disk_idx)) || 2891 (s->failed >= 1 && fdev[0]->toread) || 2892 (s->failed >= 2 && fdev[1]->toread) || 2893 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && 2894 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || 2895 (sh->raid_conf->level == 6 && s->failed && s->to_write))) { 2896 /* we would like to get this block, possibly by computing it, 2897 * otherwise read it if the backing disk is insync 2898 */ 2899 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 2900 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 2901 if ((s->uptodate == disks - 1) && 2902 (s->failed && (disk_idx == s->failed_num[0] || 2903 disk_idx == s->failed_num[1]))) { 2904 /* have disk failed, and we're requested to fetch it; 2905 * do compute it 2906 */ 2907 pr_debug("Computing stripe %llu block %d\n", 2908 (unsigned long long)sh->sector, disk_idx); 2909 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2910 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2911 set_bit(R5_Wantcompute, &dev->flags); 2912 sh->ops.target = disk_idx; 2913 sh->ops.target2 = -1; /* no 2nd target */ 2914 s->req_compute = 1; 2915 /* Careful: from this point on 'uptodate' is in the eye 2916 * of raid_run_ops which services 'compute' operations 2917 * before writes. R5_Wantcompute flags a block that will 2918 * be R5_UPTODATE by the time it is needed for a 2919 * subsequent operation. 2920 */ 2921 s->uptodate++; 2922 return 1; 2923 } else if (s->uptodate == disks-2 && s->failed >= 2) { 2924 /* Computing 2-failure is *very* expensive; only 2925 * do it if failed >= 2 2926 */ 2927 int other; 2928 for (other = disks; other--; ) { 2929 if (other == disk_idx) 2930 continue; 2931 if (!test_bit(R5_UPTODATE, 2932 &sh->dev[other].flags)) 2933 break; 2934 } 2935 BUG_ON(other < 0); 2936 pr_debug("Computing stripe %llu blocks %d,%d\n", 2937 (unsigned long long)sh->sector, 2938 disk_idx, other); 2939 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2940 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2941 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 2942 set_bit(R5_Wantcompute, &sh->dev[other].flags); 2943 sh->ops.target = disk_idx; 2944 sh->ops.target2 = other; 2945 s->uptodate += 2; 2946 s->req_compute = 1; 2947 return 1; 2948 } else if (test_bit(R5_Insync, &dev->flags)) { 2949 set_bit(R5_LOCKED, &dev->flags); 2950 set_bit(R5_Wantread, &dev->flags); 2951 s->locked++; 2952 pr_debug("Reading block %d (sync=%d)\n", 2953 disk_idx, s->syncing); 2954 } 2955 } 2956 2957 return 0; 2958 } 2959 2960 /** 2961 * handle_stripe_fill - read or compute data to satisfy pending requests. 2962 */ 2963 static void handle_stripe_fill(struct stripe_head *sh, 2964 struct stripe_head_state *s, 2965 int disks) 2966 { 2967 int i; 2968 2969 /* look for blocks to read/compute, skip this if a compute 2970 * is already in flight, or if the stripe contents are in the 2971 * midst of changing due to a write 2972 */ 2973 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2974 !sh->reconstruct_state) 2975 for (i = disks; i--; ) 2976 if (fetch_block(sh, s, i, disks)) 2977 break; 2978 set_bit(STRIPE_HANDLE, &sh->state); 2979 } 2980 2981 2982 /* handle_stripe_clean_event 2983 * any written block on an uptodate or failed drive can be returned. 2984 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2985 * never LOCKED, so we don't need to test 'failed' directly. 2986 */ 2987 static void handle_stripe_clean_event(struct r5conf *conf, 2988 struct stripe_head *sh, int disks, struct bio **return_bi) 2989 { 2990 int i; 2991 struct r5dev *dev; 2992 int discard_pending = 0; 2993 2994 for (i = disks; i--; ) 2995 if (sh->dev[i].written) { 2996 dev = &sh->dev[i]; 2997 if (!test_bit(R5_LOCKED, &dev->flags) && 2998 (test_bit(R5_UPTODATE, &dev->flags) || 2999 test_bit(R5_Discard, &dev->flags))) { 3000 /* We can return any write requests */ 3001 struct bio *wbi, *wbi2; 3002 pr_debug("Return write for disc %d\n", i); 3003 if (test_and_clear_bit(R5_Discard, &dev->flags)) 3004 clear_bit(R5_UPTODATE, &dev->flags); 3005 wbi = dev->written; 3006 dev->written = NULL; 3007 while (wbi && wbi->bi_iter.bi_sector < 3008 dev->sector + STRIPE_SECTORS) { 3009 wbi2 = r5_next_bio(wbi, dev->sector); 3010 if (!raid5_dec_bi_active_stripes(wbi)) { 3011 md_write_end(conf->mddev); 3012 wbi->bi_next = *return_bi; 3013 *return_bi = wbi; 3014 } 3015 wbi = wbi2; 3016 } 3017 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3018 STRIPE_SECTORS, 3019 !test_bit(STRIPE_DEGRADED, &sh->state), 3020 0); 3021 } else if (test_bit(R5_Discard, &dev->flags)) 3022 discard_pending = 1; 3023 } 3024 if (!discard_pending && 3025 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3026 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 3027 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3028 if (sh->qd_idx >= 0) { 3029 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 3030 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); 3031 } 3032 /* now that discard is done we can proceed with any sync */ 3033 clear_bit(STRIPE_DISCARD, &sh->state); 3034 /* 3035 * SCSI discard will change some bio fields and the stripe has 3036 * no updated data, so remove it from hash list and the stripe 3037 * will be reinitialized 3038 */ 3039 spin_lock_irq(&conf->device_lock); 3040 remove_hash(sh); 3041 spin_unlock_irq(&conf->device_lock); 3042 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 3043 set_bit(STRIPE_HANDLE, &sh->state); 3044 3045 } 3046 3047 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3048 if (atomic_dec_and_test(&conf->pending_full_writes)) 3049 md_wakeup_thread(conf->mddev->thread); 3050 } 3051 3052 static void handle_stripe_dirtying(struct r5conf *conf, 3053 struct stripe_head *sh, 3054 struct stripe_head_state *s, 3055 int disks) 3056 { 3057 int rmw = 0, rcw = 0, i; 3058 sector_t recovery_cp = conf->mddev->recovery_cp; 3059 3060 /* RAID6 requires 'rcw' in current implementation. 3061 * Otherwise, check whether resync is now happening or should start. 3062 * If yes, then the array is dirty (after unclean shutdown or 3063 * initial creation), so parity in some stripes might be inconsistent. 3064 * In this case, we need to always do reconstruct-write, to ensure 3065 * that in case of drive failure or read-error correction, we 3066 * generate correct data from the parity. 3067 */ 3068 if (conf->max_degraded == 2 || 3069 (recovery_cp < MaxSector && sh->sector >= recovery_cp)) { 3070 /* Calculate the real rcw later - for now make it 3071 * look like rcw is cheaper 3072 */ 3073 rcw = 1; rmw = 2; 3074 pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n", 3075 conf->max_degraded, (unsigned long long)recovery_cp, 3076 (unsigned long long)sh->sector); 3077 } else for (i = disks; i--; ) { 3078 /* would I have to read this buffer for read_modify_write */ 3079 struct r5dev *dev = &sh->dev[i]; 3080 if ((dev->towrite || i == sh->pd_idx) && 3081 !test_bit(R5_LOCKED, &dev->flags) && 3082 !(test_bit(R5_UPTODATE, &dev->flags) || 3083 test_bit(R5_Wantcompute, &dev->flags))) { 3084 if (test_bit(R5_Insync, &dev->flags)) 3085 rmw++; 3086 else 3087 rmw += 2*disks; /* cannot read it */ 3088 } 3089 /* Would I have to read this buffer for reconstruct_write */ 3090 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 3091 !test_bit(R5_LOCKED, &dev->flags) && 3092 !(test_bit(R5_UPTODATE, &dev->flags) || 3093 test_bit(R5_Wantcompute, &dev->flags))) { 3094 if (test_bit(R5_Insync, &dev->flags)) rcw++; 3095 else 3096 rcw += 2*disks; 3097 } 3098 } 3099 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 3100 (unsigned long long)sh->sector, rmw, rcw); 3101 set_bit(STRIPE_HANDLE, &sh->state); 3102 if (rmw < rcw && rmw > 0) { 3103 /* prefer read-modify-write, but need to get some data */ 3104 if (conf->mddev->queue) 3105 blk_add_trace_msg(conf->mddev->queue, 3106 "raid5 rmw %llu %d", 3107 (unsigned long long)sh->sector, rmw); 3108 for (i = disks; i--; ) { 3109 struct r5dev *dev = &sh->dev[i]; 3110 if ((dev->towrite || i == sh->pd_idx) && 3111 !test_bit(R5_LOCKED, &dev->flags) && 3112 !(test_bit(R5_UPTODATE, &dev->flags) || 3113 test_bit(R5_Wantcompute, &dev->flags)) && 3114 test_bit(R5_Insync, &dev->flags)) { 3115 if ( 3116 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 3117 pr_debug("Read_old block " 3118 "%d for r-m-w\n", i); 3119 set_bit(R5_LOCKED, &dev->flags); 3120 set_bit(R5_Wantread, &dev->flags); 3121 s->locked++; 3122 } else { 3123 set_bit(STRIPE_DELAYED, &sh->state); 3124 set_bit(STRIPE_HANDLE, &sh->state); 3125 } 3126 } 3127 } 3128 } 3129 if (rcw <= rmw && rcw > 0) { 3130 /* want reconstruct write, but need to get some data */ 3131 int qread =0; 3132 rcw = 0; 3133 for (i = disks; i--; ) { 3134 struct r5dev *dev = &sh->dev[i]; 3135 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3136 i != sh->pd_idx && i != sh->qd_idx && 3137 !test_bit(R5_LOCKED, &dev->flags) && 3138 !(test_bit(R5_UPTODATE, &dev->flags) || 3139 test_bit(R5_Wantcompute, &dev->flags))) { 3140 rcw++; 3141 if (!test_bit(R5_Insync, &dev->flags)) 3142 continue; /* it's a failed drive */ 3143 if ( 3144 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 3145 pr_debug("Read_old block " 3146 "%d for Reconstruct\n", i); 3147 set_bit(R5_LOCKED, &dev->flags); 3148 set_bit(R5_Wantread, &dev->flags); 3149 s->locked++; 3150 qread++; 3151 } else { 3152 set_bit(STRIPE_DELAYED, &sh->state); 3153 set_bit(STRIPE_HANDLE, &sh->state); 3154 } 3155 } 3156 } 3157 if (rcw && conf->mddev->queue) 3158 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 3159 (unsigned long long)sh->sector, 3160 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3161 } 3162 /* now if nothing is locked, and if we have enough data, 3163 * we can start a write request 3164 */ 3165 /* since handle_stripe can be called at any time we need to handle the 3166 * case where a compute block operation has been submitted and then a 3167 * subsequent call wants to start a write request. raid_run_ops only 3168 * handles the case where compute block and reconstruct are requested 3169 * simultaneously. If this is not the case then new writes need to be 3170 * held off until the compute completes. 3171 */ 3172 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 3173 (s->locked == 0 && (rcw == 0 || rmw == 0) && 3174 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 3175 schedule_reconstruction(sh, s, rcw == 0, 0); 3176 } 3177 3178 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 3179 struct stripe_head_state *s, int disks) 3180 { 3181 struct r5dev *dev = NULL; 3182 3183 set_bit(STRIPE_HANDLE, &sh->state); 3184 3185 switch (sh->check_state) { 3186 case check_state_idle: 3187 /* start a new check operation if there are no failures */ 3188 if (s->failed == 0) { 3189 BUG_ON(s->uptodate != disks); 3190 sh->check_state = check_state_run; 3191 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3192 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3193 s->uptodate--; 3194 break; 3195 } 3196 dev = &sh->dev[s->failed_num[0]]; 3197 /* fall through */ 3198 case check_state_compute_result: 3199 sh->check_state = check_state_idle; 3200 if (!dev) 3201 dev = &sh->dev[sh->pd_idx]; 3202 3203 /* check that a write has not made the stripe insync */ 3204 if (test_bit(STRIPE_INSYNC, &sh->state)) 3205 break; 3206 3207 /* either failed parity check, or recovery is happening */ 3208 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 3209 BUG_ON(s->uptodate != disks); 3210 3211 set_bit(R5_LOCKED, &dev->flags); 3212 s->locked++; 3213 set_bit(R5_Wantwrite, &dev->flags); 3214 3215 clear_bit(STRIPE_DEGRADED, &sh->state); 3216 set_bit(STRIPE_INSYNC, &sh->state); 3217 break; 3218 case check_state_run: 3219 break; /* we will be called again upon completion */ 3220 case check_state_check_result: 3221 sh->check_state = check_state_idle; 3222 3223 /* if a failure occurred during the check operation, leave 3224 * STRIPE_INSYNC not set and let the stripe be handled again 3225 */ 3226 if (s->failed) 3227 break; 3228 3229 /* handle a successful check operation, if parity is correct 3230 * we are done. Otherwise update the mismatch count and repair 3231 * parity if !MD_RECOVERY_CHECK 3232 */ 3233 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 3234 /* parity is correct (on disc, 3235 * not in buffer any more) 3236 */ 3237 set_bit(STRIPE_INSYNC, &sh->state); 3238 else { 3239 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3240 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3241 /* don't try to repair!! */ 3242 set_bit(STRIPE_INSYNC, &sh->state); 3243 else { 3244 sh->check_state = check_state_compute_run; 3245 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3246 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3247 set_bit(R5_Wantcompute, 3248 &sh->dev[sh->pd_idx].flags); 3249 sh->ops.target = sh->pd_idx; 3250 sh->ops.target2 = -1; 3251 s->uptodate++; 3252 } 3253 } 3254 break; 3255 case check_state_compute_run: 3256 break; 3257 default: 3258 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 3259 __func__, sh->check_state, 3260 (unsigned long long) sh->sector); 3261 BUG(); 3262 } 3263 } 3264 3265 3266 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 3267 struct stripe_head_state *s, 3268 int disks) 3269 { 3270 int pd_idx = sh->pd_idx; 3271 int qd_idx = sh->qd_idx; 3272 struct r5dev *dev; 3273 3274 set_bit(STRIPE_HANDLE, &sh->state); 3275 3276 BUG_ON(s->failed > 2); 3277 3278 /* Want to check and possibly repair P and Q. 3279 * However there could be one 'failed' device, in which 3280 * case we can only check one of them, possibly using the 3281 * other to generate missing data 3282 */ 3283 3284 switch (sh->check_state) { 3285 case check_state_idle: 3286 /* start a new check operation if there are < 2 failures */ 3287 if (s->failed == s->q_failed) { 3288 /* The only possible failed device holds Q, so it 3289 * makes sense to check P (If anything else were failed, 3290 * we would have used P to recreate it). 3291 */ 3292 sh->check_state = check_state_run; 3293 } 3294 if (!s->q_failed && s->failed < 2) { 3295 /* Q is not failed, and we didn't use it to generate 3296 * anything, so it makes sense to check it 3297 */ 3298 if (sh->check_state == check_state_run) 3299 sh->check_state = check_state_run_pq; 3300 else 3301 sh->check_state = check_state_run_q; 3302 } 3303 3304 /* discard potentially stale zero_sum_result */ 3305 sh->ops.zero_sum_result = 0; 3306 3307 if (sh->check_state == check_state_run) { 3308 /* async_xor_zero_sum destroys the contents of P */ 3309 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 3310 s->uptodate--; 3311 } 3312 if (sh->check_state >= check_state_run && 3313 sh->check_state <= check_state_run_pq) { 3314 /* async_syndrome_zero_sum preserves P and Q, so 3315 * no need to mark them !uptodate here 3316 */ 3317 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3318 break; 3319 } 3320 3321 /* we have 2-disk failure */ 3322 BUG_ON(s->failed != 2); 3323 /* fall through */ 3324 case check_state_compute_result: 3325 sh->check_state = check_state_idle; 3326 3327 /* check that a write has not made the stripe insync */ 3328 if (test_bit(STRIPE_INSYNC, &sh->state)) 3329 break; 3330 3331 /* now write out any block on a failed drive, 3332 * or P or Q if they were recomputed 3333 */ 3334 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 3335 if (s->failed == 2) { 3336 dev = &sh->dev[s->failed_num[1]]; 3337 s->locked++; 3338 set_bit(R5_LOCKED, &dev->flags); 3339 set_bit(R5_Wantwrite, &dev->flags); 3340 } 3341 if (s->failed >= 1) { 3342 dev = &sh->dev[s->failed_num[0]]; 3343 s->locked++; 3344 set_bit(R5_LOCKED, &dev->flags); 3345 set_bit(R5_Wantwrite, &dev->flags); 3346 } 3347 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 3348 dev = &sh->dev[pd_idx]; 3349 s->locked++; 3350 set_bit(R5_LOCKED, &dev->flags); 3351 set_bit(R5_Wantwrite, &dev->flags); 3352 } 3353 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 3354 dev = &sh->dev[qd_idx]; 3355 s->locked++; 3356 set_bit(R5_LOCKED, &dev->flags); 3357 set_bit(R5_Wantwrite, &dev->flags); 3358 } 3359 clear_bit(STRIPE_DEGRADED, &sh->state); 3360 3361 set_bit(STRIPE_INSYNC, &sh->state); 3362 break; 3363 case check_state_run: 3364 case check_state_run_q: 3365 case check_state_run_pq: 3366 break; /* we will be called again upon completion */ 3367 case check_state_check_result: 3368 sh->check_state = check_state_idle; 3369 3370 /* handle a successful check operation, if parity is correct 3371 * we are done. Otherwise update the mismatch count and repair 3372 * parity if !MD_RECOVERY_CHECK 3373 */ 3374 if (sh->ops.zero_sum_result == 0) { 3375 /* both parities are correct */ 3376 if (!s->failed) 3377 set_bit(STRIPE_INSYNC, &sh->state); 3378 else { 3379 /* in contrast to the raid5 case we can validate 3380 * parity, but still have a failure to write 3381 * back 3382 */ 3383 sh->check_state = check_state_compute_result; 3384 /* Returning at this point means that we may go 3385 * off and bring p and/or q uptodate again so 3386 * we make sure to check zero_sum_result again 3387 * to verify if p or q need writeback 3388 */ 3389 } 3390 } else { 3391 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3392 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3393 /* don't try to repair!! */ 3394 set_bit(STRIPE_INSYNC, &sh->state); 3395 else { 3396 int *target = &sh->ops.target; 3397 3398 sh->ops.target = -1; 3399 sh->ops.target2 = -1; 3400 sh->check_state = check_state_compute_run; 3401 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3402 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3403 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 3404 set_bit(R5_Wantcompute, 3405 &sh->dev[pd_idx].flags); 3406 *target = pd_idx; 3407 target = &sh->ops.target2; 3408 s->uptodate++; 3409 } 3410 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 3411 set_bit(R5_Wantcompute, 3412 &sh->dev[qd_idx].flags); 3413 *target = qd_idx; 3414 s->uptodate++; 3415 } 3416 } 3417 } 3418 break; 3419 case check_state_compute_run: 3420 break; 3421 default: 3422 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 3423 __func__, sh->check_state, 3424 (unsigned long long) sh->sector); 3425 BUG(); 3426 } 3427 } 3428 3429 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 3430 { 3431 int i; 3432 3433 /* We have read all the blocks in this stripe and now we need to 3434 * copy some of them into a target stripe for expand. 3435 */ 3436 struct dma_async_tx_descriptor *tx = NULL; 3437 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3438 for (i = 0; i < sh->disks; i++) 3439 if (i != sh->pd_idx && i != sh->qd_idx) { 3440 int dd_idx, j; 3441 struct stripe_head *sh2; 3442 struct async_submit_ctl submit; 3443 3444 sector_t bn = compute_blocknr(sh, i, 1); 3445 sector_t s = raid5_compute_sector(conf, bn, 0, 3446 &dd_idx, NULL); 3447 sh2 = get_active_stripe(conf, s, 0, 1, 1); 3448 if (sh2 == NULL) 3449 /* so far only the early blocks of this stripe 3450 * have been requested. When later blocks 3451 * get requested, we will try again 3452 */ 3453 continue; 3454 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 3455 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 3456 /* must have already done this block */ 3457 release_stripe(sh2); 3458 continue; 3459 } 3460 3461 /* place all the copies on one channel */ 3462 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 3463 tx = async_memcpy(sh2->dev[dd_idx].page, 3464 sh->dev[i].page, 0, 0, STRIPE_SIZE, 3465 &submit); 3466 3467 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 3468 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 3469 for (j = 0; j < conf->raid_disks; j++) 3470 if (j != sh2->pd_idx && 3471 j != sh2->qd_idx && 3472 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 3473 break; 3474 if (j == conf->raid_disks) { 3475 set_bit(STRIPE_EXPAND_READY, &sh2->state); 3476 set_bit(STRIPE_HANDLE, &sh2->state); 3477 } 3478 release_stripe(sh2); 3479 3480 } 3481 /* done submitting copies, wait for them to complete */ 3482 async_tx_quiesce(&tx); 3483 } 3484 3485 /* 3486 * handle_stripe - do things to a stripe. 3487 * 3488 * We lock the stripe by setting STRIPE_ACTIVE and then examine the 3489 * state of various bits to see what needs to be done. 3490 * Possible results: 3491 * return some read requests which now have data 3492 * return some write requests which are safely on storage 3493 * schedule a read on some buffers 3494 * schedule a write of some buffers 3495 * return confirmation of parity correctness 3496 * 3497 */ 3498 3499 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 3500 { 3501 struct r5conf *conf = sh->raid_conf; 3502 int disks = sh->disks; 3503 struct r5dev *dev; 3504 int i; 3505 int do_recovery = 0; 3506 3507 memset(s, 0, sizeof(*s)); 3508 3509 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3510 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 3511 s->failed_num[0] = -1; 3512 s->failed_num[1] = -1; 3513 3514 /* Now to look around and see what can be done */ 3515 rcu_read_lock(); 3516 for (i=disks; i--; ) { 3517 struct md_rdev *rdev; 3518 sector_t first_bad; 3519 int bad_sectors; 3520 int is_bad = 0; 3521 3522 dev = &sh->dev[i]; 3523 3524 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3525 i, dev->flags, 3526 dev->toread, dev->towrite, dev->written); 3527 /* maybe we can reply to a read 3528 * 3529 * new wantfill requests are only permitted while 3530 * ops_complete_biofill is guaranteed to be inactive 3531 */ 3532 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 3533 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 3534 set_bit(R5_Wantfill, &dev->flags); 3535 3536 /* now count some things */ 3537 if (test_bit(R5_LOCKED, &dev->flags)) 3538 s->locked++; 3539 if (test_bit(R5_UPTODATE, &dev->flags)) 3540 s->uptodate++; 3541 if (test_bit(R5_Wantcompute, &dev->flags)) { 3542 s->compute++; 3543 BUG_ON(s->compute > 2); 3544 } 3545 3546 if (test_bit(R5_Wantfill, &dev->flags)) 3547 s->to_fill++; 3548 else if (dev->toread) 3549 s->to_read++; 3550 if (dev->towrite) { 3551 s->to_write++; 3552 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3553 s->non_overwrite++; 3554 } 3555 if (dev->written) 3556 s->written++; 3557 /* Prefer to use the replacement for reads, but only 3558 * if it is recovered enough and has no bad blocks. 3559 */ 3560 rdev = rcu_dereference(conf->disks[i].replacement); 3561 if (rdev && !test_bit(Faulty, &rdev->flags) && 3562 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && 3563 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3564 &first_bad, &bad_sectors)) 3565 set_bit(R5_ReadRepl, &dev->flags); 3566 else { 3567 if (rdev) 3568 set_bit(R5_NeedReplace, &dev->flags); 3569 rdev = rcu_dereference(conf->disks[i].rdev); 3570 clear_bit(R5_ReadRepl, &dev->flags); 3571 } 3572 if (rdev && test_bit(Faulty, &rdev->flags)) 3573 rdev = NULL; 3574 if (rdev) { 3575 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3576 &first_bad, &bad_sectors); 3577 if (s->blocked_rdev == NULL 3578 && (test_bit(Blocked, &rdev->flags) 3579 || is_bad < 0)) { 3580 if (is_bad < 0) 3581 set_bit(BlockedBadBlocks, 3582 &rdev->flags); 3583 s->blocked_rdev = rdev; 3584 atomic_inc(&rdev->nr_pending); 3585 } 3586 } 3587 clear_bit(R5_Insync, &dev->flags); 3588 if (!rdev) 3589 /* Not in-sync */; 3590 else if (is_bad) { 3591 /* also not in-sync */ 3592 if (!test_bit(WriteErrorSeen, &rdev->flags) && 3593 test_bit(R5_UPTODATE, &dev->flags)) { 3594 /* treat as in-sync, but with a read error 3595 * which we can now try to correct 3596 */ 3597 set_bit(R5_Insync, &dev->flags); 3598 set_bit(R5_ReadError, &dev->flags); 3599 } 3600 } else if (test_bit(In_sync, &rdev->flags)) 3601 set_bit(R5_Insync, &dev->flags); 3602 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3603 /* in sync if before recovery_offset */ 3604 set_bit(R5_Insync, &dev->flags); 3605 else if (test_bit(R5_UPTODATE, &dev->flags) && 3606 test_bit(R5_Expanded, &dev->flags)) 3607 /* If we've reshaped into here, we assume it is Insync. 3608 * We will shortly update recovery_offset to make 3609 * it official. 3610 */ 3611 set_bit(R5_Insync, &dev->flags); 3612 3613 if (test_bit(R5_WriteError, &dev->flags)) { 3614 /* This flag does not apply to '.replacement' 3615 * only to .rdev, so make sure to check that*/ 3616 struct md_rdev *rdev2 = rcu_dereference( 3617 conf->disks[i].rdev); 3618 if (rdev2 == rdev) 3619 clear_bit(R5_Insync, &dev->flags); 3620 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 3621 s->handle_bad_blocks = 1; 3622 atomic_inc(&rdev2->nr_pending); 3623 } else 3624 clear_bit(R5_WriteError, &dev->flags); 3625 } 3626 if (test_bit(R5_MadeGood, &dev->flags)) { 3627 /* This flag does not apply to '.replacement' 3628 * only to .rdev, so make sure to check that*/ 3629 struct md_rdev *rdev2 = rcu_dereference( 3630 conf->disks[i].rdev); 3631 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 3632 s->handle_bad_blocks = 1; 3633 atomic_inc(&rdev2->nr_pending); 3634 } else 3635 clear_bit(R5_MadeGood, &dev->flags); 3636 } 3637 if (test_bit(R5_MadeGoodRepl, &dev->flags)) { 3638 struct md_rdev *rdev2 = rcu_dereference( 3639 conf->disks[i].replacement); 3640 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 3641 s->handle_bad_blocks = 1; 3642 atomic_inc(&rdev2->nr_pending); 3643 } else 3644 clear_bit(R5_MadeGoodRepl, &dev->flags); 3645 } 3646 if (!test_bit(R5_Insync, &dev->flags)) { 3647 /* The ReadError flag will just be confusing now */ 3648 clear_bit(R5_ReadError, &dev->flags); 3649 clear_bit(R5_ReWrite, &dev->flags); 3650 } 3651 if (test_bit(R5_ReadError, &dev->flags)) 3652 clear_bit(R5_Insync, &dev->flags); 3653 if (!test_bit(R5_Insync, &dev->flags)) { 3654 if (s->failed < 2) 3655 s->failed_num[s->failed] = i; 3656 s->failed++; 3657 if (rdev && !test_bit(Faulty, &rdev->flags)) 3658 do_recovery = 1; 3659 } 3660 } 3661 if (test_bit(STRIPE_SYNCING, &sh->state)) { 3662 /* If there is a failed device being replaced, 3663 * we must be recovering. 3664 * else if we are after recovery_cp, we must be syncing 3665 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 3666 * else we can only be replacing 3667 * sync and recovery both need to read all devices, and so 3668 * use the same flag. 3669 */ 3670 if (do_recovery || 3671 sh->sector >= conf->mddev->recovery_cp || 3672 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 3673 s->syncing = 1; 3674 else 3675 s->replacing = 1; 3676 } 3677 rcu_read_unlock(); 3678 } 3679 3680 static void handle_stripe(struct stripe_head *sh) 3681 { 3682 struct stripe_head_state s; 3683 struct r5conf *conf = sh->raid_conf; 3684 int i; 3685 int prexor; 3686 int disks = sh->disks; 3687 struct r5dev *pdev, *qdev; 3688 3689 clear_bit(STRIPE_HANDLE, &sh->state); 3690 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 3691 /* already being handled, ensure it gets handled 3692 * again when current action finishes */ 3693 set_bit(STRIPE_HANDLE, &sh->state); 3694 return; 3695 } 3696 3697 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3698 spin_lock(&sh->stripe_lock); 3699 /* Cannot process 'sync' concurrently with 'discard' */ 3700 if (!test_bit(STRIPE_DISCARD, &sh->state) && 3701 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3702 set_bit(STRIPE_SYNCING, &sh->state); 3703 clear_bit(STRIPE_INSYNC, &sh->state); 3704 clear_bit(STRIPE_REPLACED, &sh->state); 3705 } 3706 spin_unlock(&sh->stripe_lock); 3707 } 3708 clear_bit(STRIPE_DELAYED, &sh->state); 3709 3710 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3711 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 3712 (unsigned long long)sh->sector, sh->state, 3713 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 3714 sh->check_state, sh->reconstruct_state); 3715 3716 analyse_stripe(sh, &s); 3717 3718 if (s.handle_bad_blocks) { 3719 set_bit(STRIPE_HANDLE, &sh->state); 3720 goto finish; 3721 } 3722 3723 if (unlikely(s.blocked_rdev)) { 3724 if (s.syncing || s.expanding || s.expanded || 3725 s.replacing || s.to_write || s.written) { 3726 set_bit(STRIPE_HANDLE, &sh->state); 3727 goto finish; 3728 } 3729 /* There is nothing for the blocked_rdev to block */ 3730 rdev_dec_pending(s.blocked_rdev, conf->mddev); 3731 s.blocked_rdev = NULL; 3732 } 3733 3734 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3735 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 3736 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 3737 } 3738 3739 pr_debug("locked=%d uptodate=%d to_read=%d" 3740 " to_write=%d failed=%d failed_num=%d,%d\n", 3741 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3742 s.failed_num[0], s.failed_num[1]); 3743 /* check if the array has lost more than max_degraded devices and, 3744 * if so, some requests might need to be failed. 3745 */ 3746 if (s.failed > conf->max_degraded) { 3747 sh->check_state = 0; 3748 sh->reconstruct_state = 0; 3749 if (s.to_read+s.to_write+s.written) 3750 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 3751 if (s.syncing + s.replacing) 3752 handle_failed_sync(conf, sh, &s); 3753 } 3754 3755 /* Now we check to see if any write operations have recently 3756 * completed 3757 */ 3758 prexor = 0; 3759 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 3760 prexor = 1; 3761 if (sh->reconstruct_state == reconstruct_state_drain_result || 3762 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 3763 sh->reconstruct_state = reconstruct_state_idle; 3764 3765 /* All the 'written' buffers and the parity block are ready to 3766 * be written back to disk 3767 */ 3768 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && 3769 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); 3770 BUG_ON(sh->qd_idx >= 0 && 3771 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && 3772 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); 3773 for (i = disks; i--; ) { 3774 struct r5dev *dev = &sh->dev[i]; 3775 if (test_bit(R5_LOCKED, &dev->flags) && 3776 (i == sh->pd_idx || i == sh->qd_idx || 3777 dev->written)) { 3778 pr_debug("Writing block %d\n", i); 3779 set_bit(R5_Wantwrite, &dev->flags); 3780 if (prexor) 3781 continue; 3782 if (!test_bit(R5_Insync, &dev->flags) || 3783 ((i == sh->pd_idx || i == sh->qd_idx) && 3784 s.failed == 0)) 3785 set_bit(STRIPE_INSYNC, &sh->state); 3786 } 3787 } 3788 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3789 s.dec_preread_active = 1; 3790 } 3791 3792 /* 3793 * might be able to return some write requests if the parity blocks 3794 * are safe, or on a failed drive 3795 */ 3796 pdev = &sh->dev[sh->pd_idx]; 3797 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 3798 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 3799 qdev = &sh->dev[sh->qd_idx]; 3800 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 3801 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 3802 || conf->level < 6; 3803 3804 if (s.written && 3805 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3806 && !test_bit(R5_LOCKED, &pdev->flags) 3807 && (test_bit(R5_UPTODATE, &pdev->flags) || 3808 test_bit(R5_Discard, &pdev->flags))))) && 3809 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3810 && !test_bit(R5_LOCKED, &qdev->flags) 3811 && (test_bit(R5_UPTODATE, &qdev->flags) || 3812 test_bit(R5_Discard, &qdev->flags)))))) 3813 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); 3814 3815 /* Now we might consider reading some blocks, either to check/generate 3816 * parity, or to satisfy requests 3817 * or to load a block that is being partially written. 3818 */ 3819 if (s.to_read || s.non_overwrite 3820 || (conf->level == 6 && s.to_write && s.failed) 3821 || (s.syncing && (s.uptodate + s.compute < disks)) 3822 || s.replacing 3823 || s.expanding) 3824 handle_stripe_fill(sh, &s, disks); 3825 3826 /* Now to consider new write requests and what else, if anything 3827 * should be read. We do not handle new writes when: 3828 * 1/ A 'write' operation (copy+xor) is already in flight. 3829 * 2/ A 'check' operation is in flight, as it may clobber the parity 3830 * block. 3831 */ 3832 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 3833 handle_stripe_dirtying(conf, sh, &s, disks); 3834 3835 /* maybe we need to check and possibly fix the parity for this stripe 3836 * Any reads will already have been scheduled, so we just see if enough 3837 * data is available. The parity check is held off while parity 3838 * dependent operations are in flight. 3839 */ 3840 if (sh->check_state || 3841 (s.syncing && s.locked == 0 && 3842 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3843 !test_bit(STRIPE_INSYNC, &sh->state))) { 3844 if (conf->level == 6) 3845 handle_parity_checks6(conf, sh, &s, disks); 3846 else 3847 handle_parity_checks5(conf, sh, &s, disks); 3848 } 3849 3850 if ((s.replacing || s.syncing) && s.locked == 0 3851 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) 3852 && !test_bit(STRIPE_REPLACED, &sh->state)) { 3853 /* Write out to replacement devices where possible */ 3854 for (i = 0; i < conf->raid_disks; i++) 3855 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 3856 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); 3857 set_bit(R5_WantReplace, &sh->dev[i].flags); 3858 set_bit(R5_LOCKED, &sh->dev[i].flags); 3859 s.locked++; 3860 } 3861 if (s.replacing) 3862 set_bit(STRIPE_INSYNC, &sh->state); 3863 set_bit(STRIPE_REPLACED, &sh->state); 3864 } 3865 if ((s.syncing || s.replacing) && s.locked == 0 && 3866 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3867 test_bit(STRIPE_INSYNC, &sh->state)) { 3868 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3869 clear_bit(STRIPE_SYNCING, &sh->state); 3870 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 3871 wake_up(&conf->wait_for_overlap); 3872 } 3873 3874 /* If the failed drives are just a ReadError, then we might need 3875 * to progress the repair/check process 3876 */ 3877 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 3878 for (i = 0; i < s.failed; i++) { 3879 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 3880 if (test_bit(R5_ReadError, &dev->flags) 3881 && !test_bit(R5_LOCKED, &dev->flags) 3882 && test_bit(R5_UPTODATE, &dev->flags) 3883 ) { 3884 if (!test_bit(R5_ReWrite, &dev->flags)) { 3885 set_bit(R5_Wantwrite, &dev->flags); 3886 set_bit(R5_ReWrite, &dev->flags); 3887 set_bit(R5_LOCKED, &dev->flags); 3888 s.locked++; 3889 } else { 3890 /* let's read it back */ 3891 set_bit(R5_Wantread, &dev->flags); 3892 set_bit(R5_LOCKED, &dev->flags); 3893 s.locked++; 3894 } 3895 } 3896 } 3897 3898 3899 /* Finish reconstruct operations initiated by the expansion process */ 3900 if (sh->reconstruct_state == reconstruct_state_result) { 3901 struct stripe_head *sh_src 3902 = get_active_stripe(conf, sh->sector, 1, 1, 1); 3903 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 3904 /* sh cannot be written until sh_src has been read. 3905 * so arrange for sh to be delayed a little 3906 */ 3907 set_bit(STRIPE_DELAYED, &sh->state); 3908 set_bit(STRIPE_HANDLE, &sh->state); 3909 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3910 &sh_src->state)) 3911 atomic_inc(&conf->preread_active_stripes); 3912 release_stripe(sh_src); 3913 goto finish; 3914 } 3915 if (sh_src) 3916 release_stripe(sh_src); 3917 3918 sh->reconstruct_state = reconstruct_state_idle; 3919 clear_bit(STRIPE_EXPANDING, &sh->state); 3920 for (i = conf->raid_disks; i--; ) { 3921 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3922 set_bit(R5_LOCKED, &sh->dev[i].flags); 3923 s.locked++; 3924 } 3925 } 3926 3927 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 3928 !sh->reconstruct_state) { 3929 /* Need to write out all blocks after computing parity */ 3930 sh->disks = conf->raid_disks; 3931 stripe_set_idx(sh->sector, conf, 0, sh); 3932 schedule_reconstruction(sh, &s, 1, 1); 3933 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 3934 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3935 atomic_dec(&conf->reshape_stripes); 3936 wake_up(&conf->wait_for_overlap); 3937 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3938 } 3939 3940 if (s.expanding && s.locked == 0 && 3941 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3942 handle_stripe_expansion(conf, sh); 3943 3944 finish: 3945 /* wait for this device to become unblocked */ 3946 if (unlikely(s.blocked_rdev)) { 3947 if (conf->mddev->external) 3948 md_wait_for_blocked_rdev(s.blocked_rdev, 3949 conf->mddev); 3950 else 3951 /* Internal metadata will immediately 3952 * be written by raid5d, so we don't 3953 * need to wait here. 3954 */ 3955 rdev_dec_pending(s.blocked_rdev, 3956 conf->mddev); 3957 } 3958 3959 if (s.handle_bad_blocks) 3960 for (i = disks; i--; ) { 3961 struct md_rdev *rdev; 3962 struct r5dev *dev = &sh->dev[i]; 3963 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 3964 /* We own a safe reference to the rdev */ 3965 rdev = conf->disks[i].rdev; 3966 if (!rdev_set_badblocks(rdev, sh->sector, 3967 STRIPE_SECTORS, 0)) 3968 md_error(conf->mddev, rdev); 3969 rdev_dec_pending(rdev, conf->mddev); 3970 } 3971 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 3972 rdev = conf->disks[i].rdev; 3973 rdev_clear_badblocks(rdev, sh->sector, 3974 STRIPE_SECTORS, 0); 3975 rdev_dec_pending(rdev, conf->mddev); 3976 } 3977 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { 3978 rdev = conf->disks[i].replacement; 3979 if (!rdev) 3980 /* rdev have been moved down */ 3981 rdev = conf->disks[i].rdev; 3982 rdev_clear_badblocks(rdev, sh->sector, 3983 STRIPE_SECTORS, 0); 3984 rdev_dec_pending(rdev, conf->mddev); 3985 } 3986 } 3987 3988 if (s.ops_request) 3989 raid_run_ops(sh, s.ops_request); 3990 3991 ops_run_io(sh, &s); 3992 3993 if (s.dec_preread_active) { 3994 /* We delay this until after ops_run_io so that if make_request 3995 * is waiting on a flush, it won't continue until the writes 3996 * have actually been submitted. 3997 */ 3998 atomic_dec(&conf->preread_active_stripes); 3999 if (atomic_read(&conf->preread_active_stripes) < 4000 IO_THRESHOLD) 4001 md_wakeup_thread(conf->mddev->thread); 4002 } 4003 4004 return_io(s.return_bi); 4005 4006 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4007 } 4008 4009 static void raid5_activate_delayed(struct r5conf *conf) 4010 { 4011 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 4012 while (!list_empty(&conf->delayed_list)) { 4013 struct list_head *l = conf->delayed_list.next; 4014 struct stripe_head *sh; 4015 sh = list_entry(l, struct stripe_head, lru); 4016 list_del_init(l); 4017 clear_bit(STRIPE_DELAYED, &sh->state); 4018 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4019 atomic_inc(&conf->preread_active_stripes); 4020 list_add_tail(&sh->lru, &conf->hold_list); 4021 raid5_wakeup_stripe_thread(sh); 4022 } 4023 } 4024 } 4025 4026 static void activate_bit_delay(struct r5conf *conf, 4027 struct list_head *temp_inactive_list) 4028 { 4029 /* device_lock is held */ 4030 struct list_head head; 4031 list_add(&head, &conf->bitmap_list); 4032 list_del_init(&conf->bitmap_list); 4033 while (!list_empty(&head)) { 4034 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 4035 int hash; 4036 list_del_init(&sh->lru); 4037 atomic_inc(&sh->count); 4038 hash = sh->hash_lock_index; 4039 __release_stripe(conf, sh, &temp_inactive_list[hash]); 4040 } 4041 } 4042 4043 int md_raid5_congested(struct mddev *mddev, int bits) 4044 { 4045 struct r5conf *conf = mddev->private; 4046 4047 /* No difference between reads and writes. Just check 4048 * how busy the stripe_cache is 4049 */ 4050 4051 if (conf->inactive_blocked) 4052 return 1; 4053 if (conf->quiesce) 4054 return 1; 4055 if (atomic_read(&conf->empty_inactive_list_nr)) 4056 return 1; 4057 4058 return 0; 4059 } 4060 EXPORT_SYMBOL_GPL(md_raid5_congested); 4061 4062 static int raid5_congested(void *data, int bits) 4063 { 4064 struct mddev *mddev = data; 4065 4066 return mddev_congested(mddev, bits) || 4067 md_raid5_congested(mddev, bits); 4068 } 4069 4070 /* We want read requests to align with chunks where possible, 4071 * but write requests don't need to. 4072 */ 4073 static int raid5_mergeable_bvec(struct request_queue *q, 4074 struct bvec_merge_data *bvm, 4075 struct bio_vec *biovec) 4076 { 4077 struct mddev *mddev = q->queuedata; 4078 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 4079 int max; 4080 unsigned int chunk_sectors = mddev->chunk_sectors; 4081 unsigned int bio_sectors = bvm->bi_size >> 9; 4082 4083 if ((bvm->bi_rw & 1) == WRITE) 4084 return biovec->bv_len; /* always allow writes to be mergeable */ 4085 4086 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 4087 chunk_sectors = mddev->new_chunk_sectors; 4088 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 4089 if (max < 0) max = 0; 4090 if (max <= biovec->bv_len && bio_sectors == 0) 4091 return biovec->bv_len; 4092 else 4093 return max; 4094 } 4095 4096 4097 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4098 { 4099 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); 4100 unsigned int chunk_sectors = mddev->chunk_sectors; 4101 unsigned int bio_sectors = bio_sectors(bio); 4102 4103 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 4104 chunk_sectors = mddev->new_chunk_sectors; 4105 return chunk_sectors >= 4106 ((sector & (chunk_sectors - 1)) + bio_sectors); 4107 } 4108 4109 /* 4110 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 4111 * later sampled by raid5d. 4112 */ 4113 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 4114 { 4115 unsigned long flags; 4116 4117 spin_lock_irqsave(&conf->device_lock, flags); 4118 4119 bi->bi_next = conf->retry_read_aligned_list; 4120 conf->retry_read_aligned_list = bi; 4121 4122 spin_unlock_irqrestore(&conf->device_lock, flags); 4123 md_wakeup_thread(conf->mddev->thread); 4124 } 4125 4126 4127 static struct bio *remove_bio_from_retry(struct r5conf *conf) 4128 { 4129 struct bio *bi; 4130 4131 bi = conf->retry_read_aligned; 4132 if (bi) { 4133 conf->retry_read_aligned = NULL; 4134 return bi; 4135 } 4136 bi = conf->retry_read_aligned_list; 4137 if(bi) { 4138 conf->retry_read_aligned_list = bi->bi_next; 4139 bi->bi_next = NULL; 4140 /* 4141 * this sets the active strip count to 1 and the processed 4142 * strip count to zero (upper 8 bits) 4143 */ 4144 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ 4145 } 4146 4147 return bi; 4148 } 4149 4150 4151 /* 4152 * The "raid5_align_endio" should check if the read succeeded and if it 4153 * did, call bio_endio on the original bio (having bio_put the new bio 4154 * first). 4155 * If the read failed.. 4156 */ 4157 static void raid5_align_endio(struct bio *bi, int error) 4158 { 4159 struct bio* raid_bi = bi->bi_private; 4160 struct mddev *mddev; 4161 struct r5conf *conf; 4162 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 4163 struct md_rdev *rdev; 4164 4165 bio_put(bi); 4166 4167 rdev = (void*)raid_bi->bi_next; 4168 raid_bi->bi_next = NULL; 4169 mddev = rdev->mddev; 4170 conf = mddev->private; 4171 4172 rdev_dec_pending(rdev, conf->mddev); 4173 4174 if (!error && uptodate) { 4175 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), 4176 raid_bi, 0); 4177 bio_endio(raid_bi, 0); 4178 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4179 wake_up(&conf->wait_for_stripe); 4180 return; 4181 } 4182 4183 4184 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 4185 4186 add_bio_to_retry(raid_bi, conf); 4187 } 4188 4189 static int bio_fits_rdev(struct bio *bi) 4190 { 4191 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 4192 4193 if (bio_sectors(bi) > queue_max_sectors(q)) 4194 return 0; 4195 blk_recount_segments(q, bi); 4196 if (bi->bi_phys_segments > queue_max_segments(q)) 4197 return 0; 4198 4199 if (q->merge_bvec_fn) 4200 /* it's too hard to apply the merge_bvec_fn at this stage, 4201 * just just give up 4202 */ 4203 return 0; 4204 4205 return 1; 4206 } 4207 4208 4209 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) 4210 { 4211 struct r5conf *conf = mddev->private; 4212 int dd_idx; 4213 struct bio* align_bi; 4214 struct md_rdev *rdev; 4215 sector_t end_sector; 4216 4217 if (!in_chunk_boundary(mddev, raid_bio)) { 4218 pr_debug("chunk_aligned_read : non aligned\n"); 4219 return 0; 4220 } 4221 /* 4222 * use bio_clone_mddev to make a copy of the bio 4223 */ 4224 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); 4225 if (!align_bi) 4226 return 0; 4227 /* 4228 * set bi_end_io to a new function, and set bi_private to the 4229 * original bio. 4230 */ 4231 align_bi->bi_end_io = raid5_align_endio; 4232 align_bi->bi_private = raid_bio; 4233 /* 4234 * compute position 4235 */ 4236 align_bi->bi_iter.bi_sector = 4237 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 4238 0, &dd_idx, NULL); 4239 4240 end_sector = bio_end_sector(align_bi); 4241 rcu_read_lock(); 4242 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 4243 if (!rdev || test_bit(Faulty, &rdev->flags) || 4244 rdev->recovery_offset < end_sector) { 4245 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 4246 if (rdev && 4247 (test_bit(Faulty, &rdev->flags) || 4248 !(test_bit(In_sync, &rdev->flags) || 4249 rdev->recovery_offset >= end_sector))) 4250 rdev = NULL; 4251 } 4252 if (rdev) { 4253 sector_t first_bad; 4254 int bad_sectors; 4255 4256 atomic_inc(&rdev->nr_pending); 4257 rcu_read_unlock(); 4258 raid_bio->bi_next = (void*)rdev; 4259 align_bi->bi_bdev = rdev->bdev; 4260 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4261 4262 if (!bio_fits_rdev(align_bi) || 4263 is_badblock(rdev, align_bi->bi_iter.bi_sector, 4264 bio_sectors(align_bi), 4265 &first_bad, &bad_sectors)) { 4266 /* too big in some way, or has a known bad block */ 4267 bio_put(align_bi); 4268 rdev_dec_pending(rdev, mddev); 4269 return 0; 4270 } 4271 4272 /* No reshape active, so we can trust rdev->data_offset */ 4273 align_bi->bi_iter.bi_sector += rdev->data_offset; 4274 4275 spin_lock_irq(&conf->device_lock); 4276 wait_event_lock_irq(conf->wait_for_stripe, 4277 conf->quiesce == 0, 4278 conf->device_lock); 4279 atomic_inc(&conf->active_aligned_reads); 4280 spin_unlock_irq(&conf->device_lock); 4281 4282 if (mddev->gendisk) 4283 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4284 align_bi, disk_devt(mddev->gendisk), 4285 raid_bio->bi_iter.bi_sector); 4286 generic_make_request(align_bi); 4287 return 1; 4288 } else { 4289 rcu_read_unlock(); 4290 bio_put(align_bi); 4291 return 0; 4292 } 4293 } 4294 4295 /* __get_priority_stripe - get the next stripe to process 4296 * 4297 * Full stripe writes are allowed to pass preread active stripes up until 4298 * the bypass_threshold is exceeded. In general the bypass_count 4299 * increments when the handle_list is handled before the hold_list; however, it 4300 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 4301 * stripe with in flight i/o. The bypass_count will be reset when the 4302 * head of the hold_list has changed, i.e. the head was promoted to the 4303 * handle_list. 4304 */ 4305 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) 4306 { 4307 struct stripe_head *sh = NULL, *tmp; 4308 struct list_head *handle_list = NULL; 4309 struct r5worker_group *wg = NULL; 4310 4311 if (conf->worker_cnt_per_group == 0) { 4312 handle_list = &conf->handle_list; 4313 } else if (group != ANY_GROUP) { 4314 handle_list = &conf->worker_groups[group].handle_list; 4315 wg = &conf->worker_groups[group]; 4316 } else { 4317 int i; 4318 for (i = 0; i < conf->group_cnt; i++) { 4319 handle_list = &conf->worker_groups[i].handle_list; 4320 wg = &conf->worker_groups[i]; 4321 if (!list_empty(handle_list)) 4322 break; 4323 } 4324 } 4325 4326 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 4327 __func__, 4328 list_empty(handle_list) ? "empty" : "busy", 4329 list_empty(&conf->hold_list) ? "empty" : "busy", 4330 atomic_read(&conf->pending_full_writes), conf->bypass_count); 4331 4332 if (!list_empty(handle_list)) { 4333 sh = list_entry(handle_list->next, typeof(*sh), lru); 4334 4335 if (list_empty(&conf->hold_list)) 4336 conf->bypass_count = 0; 4337 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 4338 if (conf->hold_list.next == conf->last_hold) 4339 conf->bypass_count++; 4340 else { 4341 conf->last_hold = conf->hold_list.next; 4342 conf->bypass_count -= conf->bypass_threshold; 4343 if (conf->bypass_count < 0) 4344 conf->bypass_count = 0; 4345 } 4346 } 4347 } else if (!list_empty(&conf->hold_list) && 4348 ((conf->bypass_threshold && 4349 conf->bypass_count > conf->bypass_threshold) || 4350 atomic_read(&conf->pending_full_writes) == 0)) { 4351 4352 list_for_each_entry(tmp, &conf->hold_list, lru) { 4353 if (conf->worker_cnt_per_group == 0 || 4354 group == ANY_GROUP || 4355 !cpu_online(tmp->cpu) || 4356 cpu_to_group(tmp->cpu) == group) { 4357 sh = tmp; 4358 break; 4359 } 4360 } 4361 4362 if (sh) { 4363 conf->bypass_count -= conf->bypass_threshold; 4364 if (conf->bypass_count < 0) 4365 conf->bypass_count = 0; 4366 } 4367 wg = NULL; 4368 } 4369 4370 if (!sh) 4371 return NULL; 4372 4373 if (wg) { 4374 wg->stripes_cnt--; 4375 sh->group = NULL; 4376 } 4377 list_del_init(&sh->lru); 4378 atomic_inc(&sh->count); 4379 BUG_ON(atomic_read(&sh->count) != 1); 4380 return sh; 4381 } 4382 4383 struct raid5_plug_cb { 4384 struct blk_plug_cb cb; 4385 struct list_head list; 4386 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; 4387 }; 4388 4389 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 4390 { 4391 struct raid5_plug_cb *cb = container_of( 4392 blk_cb, struct raid5_plug_cb, cb); 4393 struct stripe_head *sh; 4394 struct mddev *mddev = cb->cb.data; 4395 struct r5conf *conf = mddev->private; 4396 int cnt = 0; 4397 int hash; 4398 4399 if (cb->list.next && !list_empty(&cb->list)) { 4400 spin_lock_irq(&conf->device_lock); 4401 while (!list_empty(&cb->list)) { 4402 sh = list_first_entry(&cb->list, struct stripe_head, lru); 4403 list_del_init(&sh->lru); 4404 /* 4405 * avoid race release_stripe_plug() sees 4406 * STRIPE_ON_UNPLUG_LIST clear but the stripe 4407 * is still in our list 4408 */ 4409 smp_mb__before_clear_bit(); 4410 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 4411 /* 4412 * STRIPE_ON_RELEASE_LIST could be set here. In that 4413 * case, the count is always > 1 here 4414 */ 4415 hash = sh->hash_lock_index; 4416 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); 4417 cnt++; 4418 } 4419 spin_unlock_irq(&conf->device_lock); 4420 } 4421 release_inactive_stripe_list(conf, cb->temp_inactive_list, 4422 NR_STRIPE_HASH_LOCKS); 4423 if (mddev->queue) 4424 trace_block_unplug(mddev->queue, cnt, !from_schedule); 4425 kfree(cb); 4426 } 4427 4428 static void release_stripe_plug(struct mddev *mddev, 4429 struct stripe_head *sh) 4430 { 4431 struct blk_plug_cb *blk_cb = blk_check_plugged( 4432 raid5_unplug, mddev, 4433 sizeof(struct raid5_plug_cb)); 4434 struct raid5_plug_cb *cb; 4435 4436 if (!blk_cb) { 4437 release_stripe(sh); 4438 return; 4439 } 4440 4441 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 4442 4443 if (cb->list.next == NULL) { 4444 int i; 4445 INIT_LIST_HEAD(&cb->list); 4446 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 4447 INIT_LIST_HEAD(cb->temp_inactive_list + i); 4448 } 4449 4450 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 4451 list_add_tail(&sh->lru, &cb->list); 4452 else 4453 release_stripe(sh); 4454 } 4455 4456 static void make_discard_request(struct mddev *mddev, struct bio *bi) 4457 { 4458 struct r5conf *conf = mddev->private; 4459 sector_t logical_sector, last_sector; 4460 struct stripe_head *sh; 4461 int remaining; 4462 int stripe_sectors; 4463 4464 if (mddev->reshape_position != MaxSector) 4465 /* Skip discard while reshape is happening */ 4466 return; 4467 4468 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4469 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); 4470 4471 bi->bi_next = NULL; 4472 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4473 4474 stripe_sectors = conf->chunk_sectors * 4475 (conf->raid_disks - conf->max_degraded); 4476 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, 4477 stripe_sectors); 4478 sector_div(last_sector, stripe_sectors); 4479 4480 logical_sector *= conf->chunk_sectors; 4481 last_sector *= conf->chunk_sectors; 4482 4483 for (; logical_sector < last_sector; 4484 logical_sector += STRIPE_SECTORS) { 4485 DEFINE_WAIT(w); 4486 int d; 4487 again: 4488 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); 4489 prepare_to_wait(&conf->wait_for_overlap, &w, 4490 TASK_UNINTERRUPTIBLE); 4491 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 4492 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4493 release_stripe(sh); 4494 schedule(); 4495 goto again; 4496 } 4497 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 4498 spin_lock_irq(&sh->stripe_lock); 4499 for (d = 0; d < conf->raid_disks; d++) { 4500 if (d == sh->pd_idx || d == sh->qd_idx) 4501 continue; 4502 if (sh->dev[d].towrite || sh->dev[d].toread) { 4503 set_bit(R5_Overlap, &sh->dev[d].flags); 4504 spin_unlock_irq(&sh->stripe_lock); 4505 release_stripe(sh); 4506 schedule(); 4507 goto again; 4508 } 4509 } 4510 set_bit(STRIPE_DISCARD, &sh->state); 4511 finish_wait(&conf->wait_for_overlap, &w); 4512 for (d = 0; d < conf->raid_disks; d++) { 4513 if (d == sh->pd_idx || d == sh->qd_idx) 4514 continue; 4515 sh->dev[d].towrite = bi; 4516 set_bit(R5_OVERWRITE, &sh->dev[d].flags); 4517 raid5_inc_bi_active_stripes(bi); 4518 } 4519 spin_unlock_irq(&sh->stripe_lock); 4520 if (conf->mddev->bitmap) { 4521 for (d = 0; 4522 d < conf->raid_disks - conf->max_degraded; 4523 d++) 4524 bitmap_startwrite(mddev->bitmap, 4525 sh->sector, 4526 STRIPE_SECTORS, 4527 0); 4528 sh->bm_seq = conf->seq_flush + 1; 4529 set_bit(STRIPE_BIT_DELAY, &sh->state); 4530 } 4531 4532 set_bit(STRIPE_HANDLE, &sh->state); 4533 clear_bit(STRIPE_DELAYED, &sh->state); 4534 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4535 atomic_inc(&conf->preread_active_stripes); 4536 release_stripe_plug(mddev, sh); 4537 } 4538 4539 remaining = raid5_dec_bi_active_stripes(bi); 4540 if (remaining == 0) { 4541 md_write_end(mddev); 4542 bio_endio(bi, 0); 4543 } 4544 } 4545 4546 static void make_request(struct mddev *mddev, struct bio * bi) 4547 { 4548 struct r5conf *conf = mddev->private; 4549 int dd_idx; 4550 sector_t new_sector; 4551 sector_t logical_sector, last_sector; 4552 struct stripe_head *sh; 4553 const int rw = bio_data_dir(bi); 4554 int remaining; 4555 4556 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 4557 md_flush_request(mddev, bi); 4558 return; 4559 } 4560 4561 md_write_start(mddev, bi); 4562 4563 if (rw == READ && 4564 mddev->reshape_position == MaxSector && 4565 chunk_aligned_read(mddev,bi)) 4566 return; 4567 4568 if (unlikely(bi->bi_rw & REQ_DISCARD)) { 4569 make_discard_request(mddev, bi); 4570 return; 4571 } 4572 4573 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4574 last_sector = bio_end_sector(bi); 4575 bi->bi_next = NULL; 4576 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4577 4578 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 4579 DEFINE_WAIT(w); 4580 int previous; 4581 int seq; 4582 4583 retry: 4584 seq = read_seqcount_begin(&conf->gen_lock); 4585 previous = 0; 4586 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 4587 if (unlikely(conf->reshape_progress != MaxSector)) { 4588 /* spinlock is needed as reshape_progress may be 4589 * 64bit on a 32bit platform, and so it might be 4590 * possible to see a half-updated value 4591 * Of course reshape_progress could change after 4592 * the lock is dropped, so once we get a reference 4593 * to the stripe that we think it is, we will have 4594 * to check again. 4595 */ 4596 spin_lock_irq(&conf->device_lock); 4597 if (mddev->reshape_backwards 4598 ? logical_sector < conf->reshape_progress 4599 : logical_sector >= conf->reshape_progress) { 4600 previous = 1; 4601 } else { 4602 if (mddev->reshape_backwards 4603 ? logical_sector < conf->reshape_safe 4604 : logical_sector >= conf->reshape_safe) { 4605 spin_unlock_irq(&conf->device_lock); 4606 schedule(); 4607 goto retry; 4608 } 4609 } 4610 spin_unlock_irq(&conf->device_lock); 4611 } 4612 4613 new_sector = raid5_compute_sector(conf, logical_sector, 4614 previous, 4615 &dd_idx, NULL); 4616 pr_debug("raid456: make_request, sector %llu logical %llu\n", 4617 (unsigned long long)new_sector, 4618 (unsigned long long)logical_sector); 4619 4620 sh = get_active_stripe(conf, new_sector, previous, 4621 (bi->bi_rw&RWA_MASK), 0); 4622 if (sh) { 4623 if (unlikely(previous)) { 4624 /* expansion might have moved on while waiting for a 4625 * stripe, so we must do the range check again. 4626 * Expansion could still move past after this 4627 * test, but as we are holding a reference to 4628 * 'sh', we know that if that happens, 4629 * STRIPE_EXPANDING will get set and the expansion 4630 * won't proceed until we finish with the stripe. 4631 */ 4632 int must_retry = 0; 4633 spin_lock_irq(&conf->device_lock); 4634 if (mddev->reshape_backwards 4635 ? logical_sector >= conf->reshape_progress 4636 : logical_sector < conf->reshape_progress) 4637 /* mismatch, need to try again */ 4638 must_retry = 1; 4639 spin_unlock_irq(&conf->device_lock); 4640 if (must_retry) { 4641 release_stripe(sh); 4642 schedule(); 4643 goto retry; 4644 } 4645 } 4646 if (read_seqcount_retry(&conf->gen_lock, seq)) { 4647 /* Might have got the wrong stripe_head 4648 * by accident 4649 */ 4650 release_stripe(sh); 4651 goto retry; 4652 } 4653 4654 if (rw == WRITE && 4655 logical_sector >= mddev->suspend_lo && 4656 logical_sector < mddev->suspend_hi) { 4657 release_stripe(sh); 4658 /* As the suspend_* range is controlled by 4659 * userspace, we want an interruptible 4660 * wait. 4661 */ 4662 flush_signals(current); 4663 prepare_to_wait(&conf->wait_for_overlap, 4664 &w, TASK_INTERRUPTIBLE); 4665 if (logical_sector >= mddev->suspend_lo && 4666 logical_sector < mddev->suspend_hi) 4667 schedule(); 4668 goto retry; 4669 } 4670 4671 if (test_bit(STRIPE_EXPANDING, &sh->state) || 4672 !add_stripe_bio(sh, bi, dd_idx, rw)) { 4673 /* Stripe is busy expanding or 4674 * add failed due to overlap. Flush everything 4675 * and wait a while 4676 */ 4677 md_wakeup_thread(mddev->thread); 4678 release_stripe(sh); 4679 schedule(); 4680 goto retry; 4681 } 4682 finish_wait(&conf->wait_for_overlap, &w); 4683 set_bit(STRIPE_HANDLE, &sh->state); 4684 clear_bit(STRIPE_DELAYED, &sh->state); 4685 if ((bi->bi_rw & REQ_SYNC) && 4686 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4687 atomic_inc(&conf->preread_active_stripes); 4688 release_stripe_plug(mddev, sh); 4689 } else { 4690 /* cannot get stripe for read-ahead, just give-up */ 4691 clear_bit(BIO_UPTODATE, &bi->bi_flags); 4692 finish_wait(&conf->wait_for_overlap, &w); 4693 break; 4694 } 4695 } 4696 4697 remaining = raid5_dec_bi_active_stripes(bi); 4698 if (remaining == 0) { 4699 4700 if ( rw == WRITE ) 4701 md_write_end(mddev); 4702 4703 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 4704 bi, 0); 4705 bio_endio(bi, 0); 4706 } 4707 } 4708 4709 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 4710 4711 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 4712 { 4713 /* reshaping is quite different to recovery/resync so it is 4714 * handled quite separately ... here. 4715 * 4716 * On each call to sync_request, we gather one chunk worth of 4717 * destination stripes and flag them as expanding. 4718 * Then we find all the source stripes and request reads. 4719 * As the reads complete, handle_stripe will copy the data 4720 * into the destination stripe and release that stripe. 4721 */ 4722 struct r5conf *conf = mddev->private; 4723 struct stripe_head *sh; 4724 sector_t first_sector, last_sector; 4725 int raid_disks = conf->previous_raid_disks; 4726 int data_disks = raid_disks - conf->max_degraded; 4727 int new_data_disks = conf->raid_disks - conf->max_degraded; 4728 int i; 4729 int dd_idx; 4730 sector_t writepos, readpos, safepos; 4731 sector_t stripe_addr; 4732 int reshape_sectors; 4733 struct list_head stripes; 4734 4735 if (sector_nr == 0) { 4736 /* If restarting in the middle, skip the initial sectors */ 4737 if (mddev->reshape_backwards && 4738 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 4739 sector_nr = raid5_size(mddev, 0, 0) 4740 - conf->reshape_progress; 4741 } else if (!mddev->reshape_backwards && 4742 conf->reshape_progress > 0) 4743 sector_nr = conf->reshape_progress; 4744 sector_div(sector_nr, new_data_disks); 4745 if (sector_nr) { 4746 mddev->curr_resync_completed = sector_nr; 4747 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4748 *skipped = 1; 4749 return sector_nr; 4750 } 4751 } 4752 4753 /* We need to process a full chunk at a time. 4754 * If old and new chunk sizes differ, we need to process the 4755 * largest of these 4756 */ 4757 if (mddev->new_chunk_sectors > mddev->chunk_sectors) 4758 reshape_sectors = mddev->new_chunk_sectors; 4759 else 4760 reshape_sectors = mddev->chunk_sectors; 4761 4762 /* We update the metadata at least every 10 seconds, or when 4763 * the data about to be copied would over-write the source of 4764 * the data at the front of the range. i.e. one new_stripe 4765 * along from reshape_progress new_maps to after where 4766 * reshape_safe old_maps to 4767 */ 4768 writepos = conf->reshape_progress; 4769 sector_div(writepos, new_data_disks); 4770 readpos = conf->reshape_progress; 4771 sector_div(readpos, data_disks); 4772 safepos = conf->reshape_safe; 4773 sector_div(safepos, data_disks); 4774 if (mddev->reshape_backwards) { 4775 writepos -= min_t(sector_t, reshape_sectors, writepos); 4776 readpos += reshape_sectors; 4777 safepos += reshape_sectors; 4778 } else { 4779 writepos += reshape_sectors; 4780 readpos -= min_t(sector_t, reshape_sectors, readpos); 4781 safepos -= min_t(sector_t, reshape_sectors, safepos); 4782 } 4783 4784 /* Having calculated the 'writepos' possibly use it 4785 * to set 'stripe_addr' which is where we will write to. 4786 */ 4787 if (mddev->reshape_backwards) { 4788 BUG_ON(conf->reshape_progress == 0); 4789 stripe_addr = writepos; 4790 BUG_ON((mddev->dev_sectors & 4791 ~((sector_t)reshape_sectors - 1)) 4792 - reshape_sectors - stripe_addr 4793 != sector_nr); 4794 } else { 4795 BUG_ON(writepos != sector_nr + reshape_sectors); 4796 stripe_addr = sector_nr; 4797 } 4798 4799 /* 'writepos' is the most advanced device address we might write. 4800 * 'readpos' is the least advanced device address we might read. 4801 * 'safepos' is the least address recorded in the metadata as having 4802 * been reshaped. 4803 * If there is a min_offset_diff, these are adjusted either by 4804 * increasing the safepos/readpos if diff is negative, or 4805 * increasing writepos if diff is positive. 4806 * If 'readpos' is then behind 'writepos', there is no way that we can 4807 * ensure safety in the face of a crash - that must be done by userspace 4808 * making a backup of the data. So in that case there is no particular 4809 * rush to update metadata. 4810 * Otherwise if 'safepos' is behind 'writepos', then we really need to 4811 * update the metadata to advance 'safepos' to match 'readpos' so that 4812 * we can be safe in the event of a crash. 4813 * So we insist on updating metadata if safepos is behind writepos and 4814 * readpos is beyond writepos. 4815 * In any case, update the metadata every 10 seconds. 4816 * Maybe that number should be configurable, but I'm not sure it is 4817 * worth it.... maybe it could be a multiple of safemode_delay??? 4818 */ 4819 if (conf->min_offset_diff < 0) { 4820 safepos += -conf->min_offset_diff; 4821 readpos += -conf->min_offset_diff; 4822 } else 4823 writepos += conf->min_offset_diff; 4824 4825 if ((mddev->reshape_backwards 4826 ? (safepos > writepos && readpos < writepos) 4827 : (safepos < writepos && readpos > writepos)) || 4828 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4829 /* Cannot proceed until we've updated the superblock... */ 4830 wait_event(conf->wait_for_overlap, 4831 atomic_read(&conf->reshape_stripes)==0 4832 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4833 if (atomic_read(&conf->reshape_stripes) != 0) 4834 return 0; 4835 mddev->reshape_position = conf->reshape_progress; 4836 mddev->curr_resync_completed = sector_nr; 4837 conf->reshape_checkpoint = jiffies; 4838 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4839 md_wakeup_thread(mddev->thread); 4840 wait_event(mddev->sb_wait, mddev->flags == 0 || 4841 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4842 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4843 return 0; 4844 spin_lock_irq(&conf->device_lock); 4845 conf->reshape_safe = mddev->reshape_position; 4846 spin_unlock_irq(&conf->device_lock); 4847 wake_up(&conf->wait_for_overlap); 4848 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4849 } 4850 4851 INIT_LIST_HEAD(&stripes); 4852 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 4853 int j; 4854 int skipped_disk = 0; 4855 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 4856 set_bit(STRIPE_EXPANDING, &sh->state); 4857 atomic_inc(&conf->reshape_stripes); 4858 /* If any of this stripe is beyond the end of the old 4859 * array, then we need to zero those blocks 4860 */ 4861 for (j=sh->disks; j--;) { 4862 sector_t s; 4863 if (j == sh->pd_idx) 4864 continue; 4865 if (conf->level == 6 && 4866 j == sh->qd_idx) 4867 continue; 4868 s = compute_blocknr(sh, j, 0); 4869 if (s < raid5_size(mddev, 0, 0)) { 4870 skipped_disk = 1; 4871 continue; 4872 } 4873 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 4874 set_bit(R5_Expanded, &sh->dev[j].flags); 4875 set_bit(R5_UPTODATE, &sh->dev[j].flags); 4876 } 4877 if (!skipped_disk) { 4878 set_bit(STRIPE_EXPAND_READY, &sh->state); 4879 set_bit(STRIPE_HANDLE, &sh->state); 4880 } 4881 list_add(&sh->lru, &stripes); 4882 } 4883 spin_lock_irq(&conf->device_lock); 4884 if (mddev->reshape_backwards) 4885 conf->reshape_progress -= reshape_sectors * new_data_disks; 4886 else 4887 conf->reshape_progress += reshape_sectors * new_data_disks; 4888 spin_unlock_irq(&conf->device_lock); 4889 /* Ok, those stripe are ready. We can start scheduling 4890 * reads on the source stripes. 4891 * The source stripes are determined by mapping the first and last 4892 * block on the destination stripes. 4893 */ 4894 first_sector = 4895 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 4896 1, &dd_idx, NULL); 4897 last_sector = 4898 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 4899 * new_data_disks - 1), 4900 1, &dd_idx, NULL); 4901 if (last_sector >= mddev->dev_sectors) 4902 last_sector = mddev->dev_sectors - 1; 4903 while (first_sector <= last_sector) { 4904 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 4905 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4906 set_bit(STRIPE_HANDLE, &sh->state); 4907 release_stripe(sh); 4908 first_sector += STRIPE_SECTORS; 4909 } 4910 /* Now that the sources are clearly marked, we can release 4911 * the destination stripes 4912 */ 4913 while (!list_empty(&stripes)) { 4914 sh = list_entry(stripes.next, struct stripe_head, lru); 4915 list_del_init(&sh->lru); 4916 release_stripe(sh); 4917 } 4918 /* If this takes us to the resync_max point where we have to pause, 4919 * then we need to write out the superblock. 4920 */ 4921 sector_nr += reshape_sectors; 4922 if ((sector_nr - mddev->curr_resync_completed) * 2 4923 >= mddev->resync_max - mddev->curr_resync_completed) { 4924 /* Cannot proceed until we've updated the superblock... */ 4925 wait_event(conf->wait_for_overlap, 4926 atomic_read(&conf->reshape_stripes) == 0 4927 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4928 if (atomic_read(&conf->reshape_stripes) != 0) 4929 goto ret; 4930 mddev->reshape_position = conf->reshape_progress; 4931 mddev->curr_resync_completed = sector_nr; 4932 conf->reshape_checkpoint = jiffies; 4933 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4934 md_wakeup_thread(mddev->thread); 4935 wait_event(mddev->sb_wait, 4936 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 4937 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4938 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4939 goto ret; 4940 spin_lock_irq(&conf->device_lock); 4941 conf->reshape_safe = mddev->reshape_position; 4942 spin_unlock_irq(&conf->device_lock); 4943 wake_up(&conf->wait_for_overlap); 4944 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4945 } 4946 ret: 4947 return reshape_sectors; 4948 } 4949 4950 /* FIXME go_faster isn't used */ 4951 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) 4952 { 4953 struct r5conf *conf = mddev->private; 4954 struct stripe_head *sh; 4955 sector_t max_sector = mddev->dev_sectors; 4956 sector_t sync_blocks; 4957 int still_degraded = 0; 4958 int i; 4959 4960 if (sector_nr >= max_sector) { 4961 /* just being told to finish up .. nothing much to do */ 4962 4963 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 4964 end_reshape(conf); 4965 return 0; 4966 } 4967 4968 if (mddev->curr_resync < max_sector) /* aborted */ 4969 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 4970 &sync_blocks, 1); 4971 else /* completed sync */ 4972 conf->fullsync = 0; 4973 bitmap_close_sync(mddev->bitmap); 4974 4975 return 0; 4976 } 4977 4978 /* Allow raid5_quiesce to complete */ 4979 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 4980 4981 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4982 return reshape_request(mddev, sector_nr, skipped); 4983 4984 /* No need to check resync_max as we never do more than one 4985 * stripe, and as resync_max will always be on a chunk boundary, 4986 * if the check in md_do_sync didn't fire, there is no chance 4987 * of overstepping resync_max here 4988 */ 4989 4990 /* if there is too many failed drives and we are trying 4991 * to resync, then assert that we are finished, because there is 4992 * nothing we can do. 4993 */ 4994 if (mddev->degraded >= conf->max_degraded && 4995 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4996 sector_t rv = mddev->dev_sectors - sector_nr; 4997 *skipped = 1; 4998 return rv; 4999 } 5000 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 5001 !conf->fullsync && 5002 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 5003 sync_blocks >= STRIPE_SECTORS) { 5004 /* we can skip this block, and probably more */ 5005 sync_blocks /= STRIPE_SECTORS; 5006 *skipped = 1; 5007 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 5008 } 5009 5010 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 5011 5012 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 5013 if (sh == NULL) { 5014 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 5015 /* make sure we don't swamp the stripe cache if someone else 5016 * is trying to get access 5017 */ 5018 schedule_timeout_uninterruptible(1); 5019 } 5020 /* Need to check if array will still be degraded after recovery/resync 5021 * We don't need to check the 'failed' flag as when that gets set, 5022 * recovery aborts. 5023 */ 5024 for (i = 0; i < conf->raid_disks; i++) 5025 if (conf->disks[i].rdev == NULL) 5026 still_degraded = 1; 5027 5028 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 5029 5030 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 5031 5032 handle_stripe(sh); 5033 release_stripe(sh); 5034 5035 return STRIPE_SECTORS; 5036 } 5037 5038 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) 5039 { 5040 /* We may not be able to submit a whole bio at once as there 5041 * may not be enough stripe_heads available. 5042 * We cannot pre-allocate enough stripe_heads as we may need 5043 * more than exist in the cache (if we allow ever large chunks). 5044 * So we do one stripe head at a time and record in 5045 * ->bi_hw_segments how many have been done. 5046 * 5047 * We *know* that this entire raid_bio is in one chunk, so 5048 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 5049 */ 5050 struct stripe_head *sh; 5051 int dd_idx; 5052 sector_t sector, logical_sector, last_sector; 5053 int scnt = 0; 5054 int remaining; 5055 int handled = 0; 5056 5057 logical_sector = raid_bio->bi_iter.bi_sector & 5058 ~((sector_t)STRIPE_SECTORS-1); 5059 sector = raid5_compute_sector(conf, logical_sector, 5060 0, &dd_idx, NULL); 5061 last_sector = bio_end_sector(raid_bio); 5062 5063 for (; logical_sector < last_sector; 5064 logical_sector += STRIPE_SECTORS, 5065 sector += STRIPE_SECTORS, 5066 scnt++) { 5067 5068 if (scnt < raid5_bi_processed_stripes(raid_bio)) 5069 /* already done this stripe */ 5070 continue; 5071 5072 sh = get_active_stripe(conf, sector, 0, 1, 0); 5073 5074 if (!sh) { 5075 /* failed to get a stripe - must wait */ 5076 raid5_set_bi_processed_stripes(raid_bio, scnt); 5077 conf->retry_read_aligned = raid_bio; 5078 return handled; 5079 } 5080 5081 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 5082 release_stripe(sh); 5083 raid5_set_bi_processed_stripes(raid_bio, scnt); 5084 conf->retry_read_aligned = raid_bio; 5085 return handled; 5086 } 5087 5088 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 5089 handle_stripe(sh); 5090 release_stripe(sh); 5091 handled++; 5092 } 5093 remaining = raid5_dec_bi_active_stripes(raid_bio); 5094 if (remaining == 0) { 5095 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), 5096 raid_bio, 0); 5097 bio_endio(raid_bio, 0); 5098 } 5099 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5100 wake_up(&conf->wait_for_stripe); 5101 return handled; 5102 } 5103 5104 static int handle_active_stripes(struct r5conf *conf, int group, 5105 struct r5worker *worker, 5106 struct list_head *temp_inactive_list) 5107 { 5108 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 5109 int i, batch_size = 0, hash; 5110 bool release_inactive = false; 5111 5112 while (batch_size < MAX_STRIPE_BATCH && 5113 (sh = __get_priority_stripe(conf, group)) != NULL) 5114 batch[batch_size++] = sh; 5115 5116 if (batch_size == 0) { 5117 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5118 if (!list_empty(temp_inactive_list + i)) 5119 break; 5120 if (i == NR_STRIPE_HASH_LOCKS) 5121 return batch_size; 5122 release_inactive = true; 5123 } 5124 spin_unlock_irq(&conf->device_lock); 5125 5126 release_inactive_stripe_list(conf, temp_inactive_list, 5127 NR_STRIPE_HASH_LOCKS); 5128 5129 if (release_inactive) { 5130 spin_lock_irq(&conf->device_lock); 5131 return 0; 5132 } 5133 5134 for (i = 0; i < batch_size; i++) 5135 handle_stripe(batch[i]); 5136 5137 cond_resched(); 5138 5139 spin_lock_irq(&conf->device_lock); 5140 for (i = 0; i < batch_size; i++) { 5141 hash = batch[i]->hash_lock_index; 5142 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); 5143 } 5144 return batch_size; 5145 } 5146 5147 static void raid5_do_work(struct work_struct *work) 5148 { 5149 struct r5worker *worker = container_of(work, struct r5worker, work); 5150 struct r5worker_group *group = worker->group; 5151 struct r5conf *conf = group->conf; 5152 int group_id = group - conf->worker_groups; 5153 int handled; 5154 struct blk_plug plug; 5155 5156 pr_debug("+++ raid5worker active\n"); 5157 5158 blk_start_plug(&plug); 5159 handled = 0; 5160 spin_lock_irq(&conf->device_lock); 5161 while (1) { 5162 int batch_size, released; 5163 5164 released = release_stripe_list(conf, worker->temp_inactive_list); 5165 5166 batch_size = handle_active_stripes(conf, group_id, worker, 5167 worker->temp_inactive_list); 5168 worker->working = false; 5169 if (!batch_size && !released) 5170 break; 5171 handled += batch_size; 5172 } 5173 pr_debug("%d stripes handled\n", handled); 5174 5175 spin_unlock_irq(&conf->device_lock); 5176 blk_finish_plug(&plug); 5177 5178 pr_debug("--- raid5worker inactive\n"); 5179 } 5180 5181 /* 5182 * This is our raid5 kernel thread. 5183 * 5184 * We scan the hash table for stripes which can be handled now. 5185 * During the scan, completed stripes are saved for us by the interrupt 5186 * handler, so that they will not have to wait for our next wakeup. 5187 */ 5188 static void raid5d(struct md_thread *thread) 5189 { 5190 struct mddev *mddev = thread->mddev; 5191 struct r5conf *conf = mddev->private; 5192 int handled; 5193 struct blk_plug plug; 5194 5195 pr_debug("+++ raid5d active\n"); 5196 5197 md_check_recovery(mddev); 5198 5199 blk_start_plug(&plug); 5200 handled = 0; 5201 spin_lock_irq(&conf->device_lock); 5202 while (1) { 5203 struct bio *bio; 5204 int batch_size, released; 5205 5206 released = release_stripe_list(conf, conf->temp_inactive_list); 5207 5208 if ( 5209 !list_empty(&conf->bitmap_list)) { 5210 /* Now is a good time to flush some bitmap updates */ 5211 conf->seq_flush++; 5212 spin_unlock_irq(&conf->device_lock); 5213 bitmap_unplug(mddev->bitmap); 5214 spin_lock_irq(&conf->device_lock); 5215 conf->seq_write = conf->seq_flush; 5216 activate_bit_delay(conf, conf->temp_inactive_list); 5217 } 5218 raid5_activate_delayed(conf); 5219 5220 while ((bio = remove_bio_from_retry(conf))) { 5221 int ok; 5222 spin_unlock_irq(&conf->device_lock); 5223 ok = retry_aligned_read(conf, bio); 5224 spin_lock_irq(&conf->device_lock); 5225 if (!ok) 5226 break; 5227 handled++; 5228 } 5229 5230 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, 5231 conf->temp_inactive_list); 5232 if (!batch_size && !released) 5233 break; 5234 handled += batch_size; 5235 5236 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { 5237 spin_unlock_irq(&conf->device_lock); 5238 md_check_recovery(mddev); 5239 spin_lock_irq(&conf->device_lock); 5240 } 5241 } 5242 pr_debug("%d stripes handled\n", handled); 5243 5244 spin_unlock_irq(&conf->device_lock); 5245 5246 async_tx_issue_pending_all(); 5247 blk_finish_plug(&plug); 5248 5249 pr_debug("--- raid5d inactive\n"); 5250 } 5251 5252 static ssize_t 5253 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 5254 { 5255 struct r5conf *conf = mddev->private; 5256 if (conf) 5257 return sprintf(page, "%d\n", conf->max_nr_stripes); 5258 else 5259 return 0; 5260 } 5261 5262 int 5263 raid5_set_cache_size(struct mddev *mddev, int size) 5264 { 5265 struct r5conf *conf = mddev->private; 5266 int err; 5267 int hash; 5268 5269 if (size <= 16 || size > 32768) 5270 return -EINVAL; 5271 hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; 5272 while (size < conf->max_nr_stripes) { 5273 if (drop_one_stripe(conf, hash)) 5274 conf->max_nr_stripes--; 5275 else 5276 break; 5277 hash--; 5278 if (hash < 0) 5279 hash = NR_STRIPE_HASH_LOCKS - 1; 5280 } 5281 err = md_allow_write(mddev); 5282 if (err) 5283 return err; 5284 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 5285 while (size > conf->max_nr_stripes) { 5286 if (grow_one_stripe(conf, hash)) 5287 conf->max_nr_stripes++; 5288 else break; 5289 hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; 5290 } 5291 return 0; 5292 } 5293 EXPORT_SYMBOL(raid5_set_cache_size); 5294 5295 static ssize_t 5296 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 5297 { 5298 struct r5conf *conf = mddev->private; 5299 unsigned long new; 5300 int err; 5301 5302 if (len >= PAGE_SIZE) 5303 return -EINVAL; 5304 if (!conf) 5305 return -ENODEV; 5306 5307 if (kstrtoul(page, 10, &new)) 5308 return -EINVAL; 5309 err = raid5_set_cache_size(mddev, new); 5310 if (err) 5311 return err; 5312 return len; 5313 } 5314 5315 static struct md_sysfs_entry 5316 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 5317 raid5_show_stripe_cache_size, 5318 raid5_store_stripe_cache_size); 5319 5320 static ssize_t 5321 raid5_show_preread_threshold(struct mddev *mddev, char *page) 5322 { 5323 struct r5conf *conf = mddev->private; 5324 if (conf) 5325 return sprintf(page, "%d\n", conf->bypass_threshold); 5326 else 5327 return 0; 5328 } 5329 5330 static ssize_t 5331 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 5332 { 5333 struct r5conf *conf = mddev->private; 5334 unsigned long new; 5335 if (len >= PAGE_SIZE) 5336 return -EINVAL; 5337 if (!conf) 5338 return -ENODEV; 5339 5340 if (kstrtoul(page, 10, &new)) 5341 return -EINVAL; 5342 if (new > conf->max_nr_stripes) 5343 return -EINVAL; 5344 conf->bypass_threshold = new; 5345 return len; 5346 } 5347 5348 static struct md_sysfs_entry 5349 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 5350 S_IRUGO | S_IWUSR, 5351 raid5_show_preread_threshold, 5352 raid5_store_preread_threshold); 5353 5354 static ssize_t 5355 stripe_cache_active_show(struct mddev *mddev, char *page) 5356 { 5357 struct r5conf *conf = mddev->private; 5358 if (conf) 5359 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 5360 else 5361 return 0; 5362 } 5363 5364 static struct md_sysfs_entry 5365 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 5366 5367 static ssize_t 5368 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) 5369 { 5370 struct r5conf *conf = mddev->private; 5371 if (conf) 5372 return sprintf(page, "%d\n", conf->worker_cnt_per_group); 5373 else 5374 return 0; 5375 } 5376 5377 static int alloc_thread_groups(struct r5conf *conf, int cnt, 5378 int *group_cnt, 5379 int *worker_cnt_per_group, 5380 struct r5worker_group **worker_groups); 5381 static ssize_t 5382 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 5383 { 5384 struct r5conf *conf = mddev->private; 5385 unsigned long new; 5386 int err; 5387 struct r5worker_group *new_groups, *old_groups; 5388 int group_cnt, worker_cnt_per_group; 5389 5390 if (len >= PAGE_SIZE) 5391 return -EINVAL; 5392 if (!conf) 5393 return -ENODEV; 5394 5395 if (kstrtoul(page, 10, &new)) 5396 return -EINVAL; 5397 5398 if (new == conf->worker_cnt_per_group) 5399 return len; 5400 5401 mddev_suspend(mddev); 5402 5403 old_groups = conf->worker_groups; 5404 if (old_groups) 5405 flush_workqueue(raid5_wq); 5406 5407 err = alloc_thread_groups(conf, new, 5408 &group_cnt, &worker_cnt_per_group, 5409 &new_groups); 5410 if (!err) { 5411 spin_lock_irq(&conf->device_lock); 5412 conf->group_cnt = group_cnt; 5413 conf->worker_cnt_per_group = worker_cnt_per_group; 5414 conf->worker_groups = new_groups; 5415 spin_unlock_irq(&conf->device_lock); 5416 5417 if (old_groups) 5418 kfree(old_groups[0].workers); 5419 kfree(old_groups); 5420 } 5421 5422 mddev_resume(mddev); 5423 5424 if (err) 5425 return err; 5426 return len; 5427 } 5428 5429 static struct md_sysfs_entry 5430 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, 5431 raid5_show_group_thread_cnt, 5432 raid5_store_group_thread_cnt); 5433 5434 static struct attribute *raid5_attrs[] = { 5435 &raid5_stripecache_size.attr, 5436 &raid5_stripecache_active.attr, 5437 &raid5_preread_bypass_threshold.attr, 5438 &raid5_group_thread_cnt.attr, 5439 NULL, 5440 }; 5441 static struct attribute_group raid5_attrs_group = { 5442 .name = NULL, 5443 .attrs = raid5_attrs, 5444 }; 5445 5446 static int alloc_thread_groups(struct r5conf *conf, int cnt, 5447 int *group_cnt, 5448 int *worker_cnt_per_group, 5449 struct r5worker_group **worker_groups) 5450 { 5451 int i, j, k; 5452 ssize_t size; 5453 struct r5worker *workers; 5454 5455 *worker_cnt_per_group = cnt; 5456 if (cnt == 0) { 5457 *group_cnt = 0; 5458 *worker_groups = NULL; 5459 return 0; 5460 } 5461 *group_cnt = num_possible_nodes(); 5462 size = sizeof(struct r5worker) * cnt; 5463 workers = kzalloc(size * *group_cnt, GFP_NOIO); 5464 *worker_groups = kzalloc(sizeof(struct r5worker_group) * 5465 *group_cnt, GFP_NOIO); 5466 if (!*worker_groups || !workers) { 5467 kfree(workers); 5468 kfree(*worker_groups); 5469 return -ENOMEM; 5470 } 5471 5472 for (i = 0; i < *group_cnt; i++) { 5473 struct r5worker_group *group; 5474 5475 group = &(*worker_groups)[i]; 5476 INIT_LIST_HEAD(&group->handle_list); 5477 group->conf = conf; 5478 group->workers = workers + i * cnt; 5479 5480 for (j = 0; j < cnt; j++) { 5481 struct r5worker *worker = group->workers + j; 5482 worker->group = group; 5483 INIT_WORK(&worker->work, raid5_do_work); 5484 5485 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) 5486 INIT_LIST_HEAD(worker->temp_inactive_list + k); 5487 } 5488 } 5489 5490 return 0; 5491 } 5492 5493 static void free_thread_groups(struct r5conf *conf) 5494 { 5495 if (conf->worker_groups) 5496 kfree(conf->worker_groups[0].workers); 5497 kfree(conf->worker_groups); 5498 conf->worker_groups = NULL; 5499 } 5500 5501 static sector_t 5502 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 5503 { 5504 struct r5conf *conf = mddev->private; 5505 5506 if (!sectors) 5507 sectors = mddev->dev_sectors; 5508 if (!raid_disks) 5509 /* size is defined by the smallest of previous and new size */ 5510 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 5511 5512 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 5513 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 5514 return sectors * (raid_disks - conf->max_degraded); 5515 } 5516 5517 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 5518 { 5519 safe_put_page(percpu->spare_page); 5520 kfree(percpu->scribble); 5521 percpu->spare_page = NULL; 5522 percpu->scribble = NULL; 5523 } 5524 5525 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 5526 { 5527 if (conf->level == 6 && !percpu->spare_page) 5528 percpu->spare_page = alloc_page(GFP_KERNEL); 5529 if (!percpu->scribble) 5530 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); 5531 5532 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { 5533 free_scratch_buffer(conf, percpu); 5534 return -ENOMEM; 5535 } 5536 5537 return 0; 5538 } 5539 5540 static void raid5_free_percpu(struct r5conf *conf) 5541 { 5542 unsigned long cpu; 5543 5544 if (!conf->percpu) 5545 return; 5546 5547 #ifdef CONFIG_HOTPLUG_CPU 5548 unregister_cpu_notifier(&conf->cpu_notify); 5549 #endif 5550 5551 get_online_cpus(); 5552 for_each_possible_cpu(cpu) 5553 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 5554 put_online_cpus(); 5555 5556 free_percpu(conf->percpu); 5557 } 5558 5559 static void free_conf(struct r5conf *conf) 5560 { 5561 free_thread_groups(conf); 5562 shrink_stripes(conf); 5563 raid5_free_percpu(conf); 5564 kfree(conf->disks); 5565 kfree(conf->stripe_hashtbl); 5566 kfree(conf); 5567 } 5568 5569 #ifdef CONFIG_HOTPLUG_CPU 5570 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, 5571 void *hcpu) 5572 { 5573 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); 5574 long cpu = (long)hcpu; 5575 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 5576 5577 switch (action) { 5578 case CPU_UP_PREPARE: 5579 case CPU_UP_PREPARE_FROZEN: 5580 if (alloc_scratch_buffer(conf, percpu)) { 5581 pr_err("%s: failed memory allocation for cpu%ld\n", 5582 __func__, cpu); 5583 return notifier_from_errno(-ENOMEM); 5584 } 5585 break; 5586 case CPU_DEAD: 5587 case CPU_DEAD_FROZEN: 5588 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 5589 break; 5590 default: 5591 break; 5592 } 5593 return NOTIFY_OK; 5594 } 5595 #endif 5596 5597 static int raid5_alloc_percpu(struct r5conf *conf) 5598 { 5599 unsigned long cpu; 5600 int err = 0; 5601 5602 conf->percpu = alloc_percpu(struct raid5_percpu); 5603 if (!conf->percpu) 5604 return -ENOMEM; 5605 5606 #ifdef CONFIG_HOTPLUG_CPU 5607 conf->cpu_notify.notifier_call = raid456_cpu_notify; 5608 conf->cpu_notify.priority = 0; 5609 err = register_cpu_notifier(&conf->cpu_notify); 5610 if (err) 5611 return err; 5612 #endif 5613 5614 get_online_cpus(); 5615 for_each_present_cpu(cpu) { 5616 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 5617 if (err) { 5618 pr_err("%s: failed memory allocation for cpu%ld\n", 5619 __func__, cpu); 5620 break; 5621 } 5622 } 5623 put_online_cpus(); 5624 5625 return err; 5626 } 5627 5628 static struct r5conf *setup_conf(struct mddev *mddev) 5629 { 5630 struct r5conf *conf; 5631 int raid_disk, memory, max_disks; 5632 struct md_rdev *rdev; 5633 struct disk_info *disk; 5634 char pers_name[6]; 5635 int i; 5636 int group_cnt, worker_cnt_per_group; 5637 struct r5worker_group *new_group; 5638 5639 if (mddev->new_level != 5 5640 && mddev->new_level != 4 5641 && mddev->new_level != 6) { 5642 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", 5643 mdname(mddev), mddev->new_level); 5644 return ERR_PTR(-EIO); 5645 } 5646 if ((mddev->new_level == 5 5647 && !algorithm_valid_raid5(mddev->new_layout)) || 5648 (mddev->new_level == 6 5649 && !algorithm_valid_raid6(mddev->new_layout))) { 5650 printk(KERN_ERR "md/raid:%s: layout %d not supported\n", 5651 mdname(mddev), mddev->new_layout); 5652 return ERR_PTR(-EIO); 5653 } 5654 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 5655 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", 5656 mdname(mddev), mddev->raid_disks); 5657 return ERR_PTR(-EINVAL); 5658 } 5659 5660 if (!mddev->new_chunk_sectors || 5661 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 5662 !is_power_of_2(mddev->new_chunk_sectors)) { 5663 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", 5664 mdname(mddev), mddev->new_chunk_sectors << 9); 5665 return ERR_PTR(-EINVAL); 5666 } 5667 5668 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 5669 if (conf == NULL) 5670 goto abort; 5671 /* Don't enable multi-threading by default*/ 5672 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, 5673 &new_group)) { 5674 conf->group_cnt = group_cnt; 5675 conf->worker_cnt_per_group = worker_cnt_per_group; 5676 conf->worker_groups = new_group; 5677 } else 5678 goto abort; 5679 spin_lock_init(&conf->device_lock); 5680 seqcount_init(&conf->gen_lock); 5681 init_waitqueue_head(&conf->wait_for_stripe); 5682 init_waitqueue_head(&conf->wait_for_overlap); 5683 INIT_LIST_HEAD(&conf->handle_list); 5684 INIT_LIST_HEAD(&conf->hold_list); 5685 INIT_LIST_HEAD(&conf->delayed_list); 5686 INIT_LIST_HEAD(&conf->bitmap_list); 5687 init_llist_head(&conf->released_stripes); 5688 atomic_set(&conf->active_stripes, 0); 5689 atomic_set(&conf->preread_active_stripes, 0); 5690 atomic_set(&conf->active_aligned_reads, 0); 5691 conf->bypass_threshold = BYPASS_THRESHOLD; 5692 conf->recovery_disabled = mddev->recovery_disabled - 1; 5693 5694 conf->raid_disks = mddev->raid_disks; 5695 if (mddev->reshape_position == MaxSector) 5696 conf->previous_raid_disks = mddev->raid_disks; 5697 else 5698 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 5699 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 5700 conf->scribble_len = scribble_len(max_disks); 5701 5702 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 5703 GFP_KERNEL); 5704 if (!conf->disks) 5705 goto abort; 5706 5707 conf->mddev = mddev; 5708 5709 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 5710 goto abort; 5711 5712 /* We init hash_locks[0] separately to that it can be used 5713 * as the reference lock in the spin_lock_nest_lock() call 5714 * in lock_all_device_hash_locks_irq in order to convince 5715 * lockdep that we know what we are doing. 5716 */ 5717 spin_lock_init(conf->hash_locks); 5718 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 5719 spin_lock_init(conf->hash_locks + i); 5720 5721 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5722 INIT_LIST_HEAD(conf->inactive_list + i); 5723 5724 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5725 INIT_LIST_HEAD(conf->temp_inactive_list + i); 5726 5727 conf->level = mddev->new_level; 5728 if (raid5_alloc_percpu(conf) != 0) 5729 goto abort; 5730 5731 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 5732 5733 rdev_for_each(rdev, mddev) { 5734 raid_disk = rdev->raid_disk; 5735 if (raid_disk >= max_disks 5736 || raid_disk < 0) 5737 continue; 5738 disk = conf->disks + raid_disk; 5739 5740 if (test_bit(Replacement, &rdev->flags)) { 5741 if (disk->replacement) 5742 goto abort; 5743 disk->replacement = rdev; 5744 } else { 5745 if (disk->rdev) 5746 goto abort; 5747 disk->rdev = rdev; 5748 } 5749 5750 if (test_bit(In_sync, &rdev->flags)) { 5751 char b[BDEVNAME_SIZE]; 5752 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 5753 " disk %d\n", 5754 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 5755 } else if (rdev->saved_raid_disk != raid_disk) 5756 /* Cannot rely on bitmap to complete recovery */ 5757 conf->fullsync = 1; 5758 } 5759 5760 conf->chunk_sectors = mddev->new_chunk_sectors; 5761 conf->level = mddev->new_level; 5762 if (conf->level == 6) 5763 conf->max_degraded = 2; 5764 else 5765 conf->max_degraded = 1; 5766 conf->algorithm = mddev->new_layout; 5767 conf->reshape_progress = mddev->reshape_position; 5768 if (conf->reshape_progress != MaxSector) { 5769 conf->prev_chunk_sectors = mddev->chunk_sectors; 5770 conf->prev_algo = mddev->layout; 5771 } 5772 5773 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 5774 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 5775 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 5776 if (grow_stripes(conf, NR_STRIPES)) { 5777 printk(KERN_ERR 5778 "md/raid:%s: couldn't allocate %dkB for buffers\n", 5779 mdname(mddev), memory); 5780 goto abort; 5781 } else 5782 printk(KERN_INFO "md/raid:%s: allocated %dkB\n", 5783 mdname(mddev), memory); 5784 5785 sprintf(pers_name, "raid%d", mddev->new_level); 5786 conf->thread = md_register_thread(raid5d, mddev, pers_name); 5787 if (!conf->thread) { 5788 printk(KERN_ERR 5789 "md/raid:%s: couldn't allocate thread.\n", 5790 mdname(mddev)); 5791 goto abort; 5792 } 5793 5794 return conf; 5795 5796 abort: 5797 if (conf) { 5798 free_conf(conf); 5799 return ERR_PTR(-EIO); 5800 } else 5801 return ERR_PTR(-ENOMEM); 5802 } 5803 5804 5805 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 5806 { 5807 switch (algo) { 5808 case ALGORITHM_PARITY_0: 5809 if (raid_disk < max_degraded) 5810 return 1; 5811 break; 5812 case ALGORITHM_PARITY_N: 5813 if (raid_disk >= raid_disks - max_degraded) 5814 return 1; 5815 break; 5816 case ALGORITHM_PARITY_0_6: 5817 if (raid_disk == 0 || 5818 raid_disk == raid_disks - 1) 5819 return 1; 5820 break; 5821 case ALGORITHM_LEFT_ASYMMETRIC_6: 5822 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5823 case ALGORITHM_LEFT_SYMMETRIC_6: 5824 case ALGORITHM_RIGHT_SYMMETRIC_6: 5825 if (raid_disk == raid_disks - 1) 5826 return 1; 5827 } 5828 return 0; 5829 } 5830 5831 static int run(struct mddev *mddev) 5832 { 5833 struct r5conf *conf; 5834 int working_disks = 0; 5835 int dirty_parity_disks = 0; 5836 struct md_rdev *rdev; 5837 sector_t reshape_offset = 0; 5838 int i; 5839 long long min_offset_diff = 0; 5840 int first = 1; 5841 5842 if (mddev->recovery_cp != MaxSector) 5843 printk(KERN_NOTICE "md/raid:%s: not clean" 5844 " -- starting background reconstruction\n", 5845 mdname(mddev)); 5846 5847 rdev_for_each(rdev, mddev) { 5848 long long diff; 5849 if (rdev->raid_disk < 0) 5850 continue; 5851 diff = (rdev->new_data_offset - rdev->data_offset); 5852 if (first) { 5853 min_offset_diff = diff; 5854 first = 0; 5855 } else if (mddev->reshape_backwards && 5856 diff < min_offset_diff) 5857 min_offset_diff = diff; 5858 else if (!mddev->reshape_backwards && 5859 diff > min_offset_diff) 5860 min_offset_diff = diff; 5861 } 5862 5863 if (mddev->reshape_position != MaxSector) { 5864 /* Check that we can continue the reshape. 5865 * Difficulties arise if the stripe we would write to 5866 * next is at or after the stripe we would read from next. 5867 * For a reshape that changes the number of devices, this 5868 * is only possible for a very short time, and mdadm makes 5869 * sure that time appears to have past before assembling 5870 * the array. So we fail if that time hasn't passed. 5871 * For a reshape that keeps the number of devices the same 5872 * mdadm must be monitoring the reshape can keeping the 5873 * critical areas read-only and backed up. It will start 5874 * the array in read-only mode, so we check for that. 5875 */ 5876 sector_t here_new, here_old; 5877 int old_disks; 5878 int max_degraded = (mddev->level == 6 ? 2 : 1); 5879 5880 if (mddev->new_level != mddev->level) { 5881 printk(KERN_ERR "md/raid:%s: unsupported reshape " 5882 "required - aborting.\n", 5883 mdname(mddev)); 5884 return -EINVAL; 5885 } 5886 old_disks = mddev->raid_disks - mddev->delta_disks; 5887 /* reshape_position must be on a new-stripe boundary, and one 5888 * further up in new geometry must map after here in old 5889 * geometry. 5890 */ 5891 here_new = mddev->reshape_position; 5892 if (sector_div(here_new, mddev->new_chunk_sectors * 5893 (mddev->raid_disks - max_degraded))) { 5894 printk(KERN_ERR "md/raid:%s: reshape_position not " 5895 "on a stripe boundary\n", mdname(mddev)); 5896 return -EINVAL; 5897 } 5898 reshape_offset = here_new * mddev->new_chunk_sectors; 5899 /* here_new is the stripe we will write to */ 5900 here_old = mddev->reshape_position; 5901 sector_div(here_old, mddev->chunk_sectors * 5902 (old_disks-max_degraded)); 5903 /* here_old is the first stripe that we might need to read 5904 * from */ 5905 if (mddev->delta_disks == 0) { 5906 if ((here_new * mddev->new_chunk_sectors != 5907 here_old * mddev->chunk_sectors)) { 5908 printk(KERN_ERR "md/raid:%s: reshape position is" 5909 " confused - aborting\n", mdname(mddev)); 5910 return -EINVAL; 5911 } 5912 /* We cannot be sure it is safe to start an in-place 5913 * reshape. It is only safe if user-space is monitoring 5914 * and taking constant backups. 5915 * mdadm always starts a situation like this in 5916 * readonly mode so it can take control before 5917 * allowing any writes. So just check for that. 5918 */ 5919 if (abs(min_offset_diff) >= mddev->chunk_sectors && 5920 abs(min_offset_diff) >= mddev->new_chunk_sectors) 5921 /* not really in-place - so OK */; 5922 else if (mddev->ro == 0) { 5923 printk(KERN_ERR "md/raid:%s: in-place reshape " 5924 "must be started in read-only mode " 5925 "- aborting\n", 5926 mdname(mddev)); 5927 return -EINVAL; 5928 } 5929 } else if (mddev->reshape_backwards 5930 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <= 5931 here_old * mddev->chunk_sectors) 5932 : (here_new * mddev->new_chunk_sectors >= 5933 here_old * mddev->chunk_sectors + (-min_offset_diff))) { 5934 /* Reading from the same stripe as writing to - bad */ 5935 printk(KERN_ERR "md/raid:%s: reshape_position too early for " 5936 "auto-recovery - aborting.\n", 5937 mdname(mddev)); 5938 return -EINVAL; 5939 } 5940 printk(KERN_INFO "md/raid:%s: reshape will continue\n", 5941 mdname(mddev)); 5942 /* OK, we should be able to continue; */ 5943 } else { 5944 BUG_ON(mddev->level != mddev->new_level); 5945 BUG_ON(mddev->layout != mddev->new_layout); 5946 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 5947 BUG_ON(mddev->delta_disks != 0); 5948 } 5949 5950 if (mddev->private == NULL) 5951 conf = setup_conf(mddev); 5952 else 5953 conf = mddev->private; 5954 5955 if (IS_ERR(conf)) 5956 return PTR_ERR(conf); 5957 5958 conf->min_offset_diff = min_offset_diff; 5959 mddev->thread = conf->thread; 5960 conf->thread = NULL; 5961 mddev->private = conf; 5962 5963 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; 5964 i++) { 5965 rdev = conf->disks[i].rdev; 5966 if (!rdev && conf->disks[i].replacement) { 5967 /* The replacement is all we have yet */ 5968 rdev = conf->disks[i].replacement; 5969 conf->disks[i].replacement = NULL; 5970 clear_bit(Replacement, &rdev->flags); 5971 conf->disks[i].rdev = rdev; 5972 } 5973 if (!rdev) 5974 continue; 5975 if (conf->disks[i].replacement && 5976 conf->reshape_progress != MaxSector) { 5977 /* replacements and reshape simply do not mix. */ 5978 printk(KERN_ERR "md: cannot handle concurrent " 5979 "replacement and reshape.\n"); 5980 goto abort; 5981 } 5982 if (test_bit(In_sync, &rdev->flags)) { 5983 working_disks++; 5984 continue; 5985 } 5986 /* This disc is not fully in-sync. However if it 5987 * just stored parity (beyond the recovery_offset), 5988 * when we don't need to be concerned about the 5989 * array being dirty. 5990 * When reshape goes 'backwards', we never have 5991 * partially completed devices, so we only need 5992 * to worry about reshape going forwards. 5993 */ 5994 /* Hack because v0.91 doesn't store recovery_offset properly. */ 5995 if (mddev->major_version == 0 && 5996 mddev->minor_version > 90) 5997 rdev->recovery_offset = reshape_offset; 5998 5999 if (rdev->recovery_offset < reshape_offset) { 6000 /* We need to check old and new layout */ 6001 if (!only_parity(rdev->raid_disk, 6002 conf->algorithm, 6003 conf->raid_disks, 6004 conf->max_degraded)) 6005 continue; 6006 } 6007 if (!only_parity(rdev->raid_disk, 6008 conf->prev_algo, 6009 conf->previous_raid_disks, 6010 conf->max_degraded)) 6011 continue; 6012 dirty_parity_disks++; 6013 } 6014 6015 /* 6016 * 0 for a fully functional array, 1 or 2 for a degraded array. 6017 */ 6018 mddev->degraded = calc_degraded(conf); 6019 6020 if (has_failed(conf)) { 6021 printk(KERN_ERR "md/raid:%s: not enough operational devices" 6022 " (%d/%d failed)\n", 6023 mdname(mddev), mddev->degraded, conf->raid_disks); 6024 goto abort; 6025 } 6026 6027 /* device size must be a multiple of chunk size */ 6028 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 6029 mddev->resync_max_sectors = mddev->dev_sectors; 6030 6031 if (mddev->degraded > dirty_parity_disks && 6032 mddev->recovery_cp != MaxSector) { 6033 if (mddev->ok_start_degraded) 6034 printk(KERN_WARNING 6035 "md/raid:%s: starting dirty degraded array" 6036 " - data corruption possible.\n", 6037 mdname(mddev)); 6038 else { 6039 printk(KERN_ERR 6040 "md/raid:%s: cannot start dirty degraded array.\n", 6041 mdname(mddev)); 6042 goto abort; 6043 } 6044 } 6045 6046 if (mddev->degraded == 0) 6047 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" 6048 " devices, algorithm %d\n", mdname(mddev), conf->level, 6049 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 6050 mddev->new_layout); 6051 else 6052 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" 6053 " out of %d devices, algorithm %d\n", 6054 mdname(mddev), conf->level, 6055 mddev->raid_disks - mddev->degraded, 6056 mddev->raid_disks, mddev->new_layout); 6057 6058 print_raid5_conf(conf); 6059 6060 if (conf->reshape_progress != MaxSector) { 6061 conf->reshape_safe = conf->reshape_progress; 6062 atomic_set(&conf->reshape_stripes, 0); 6063 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6064 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6065 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6066 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6067 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 6068 "reshape"); 6069 } 6070 6071 6072 /* Ok, everything is just fine now */ 6073 if (mddev->to_remove == &raid5_attrs_group) 6074 mddev->to_remove = NULL; 6075 else if (mddev->kobj.sd && 6076 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 6077 printk(KERN_WARNING 6078 "raid5: failed to create sysfs attributes for %s\n", 6079 mdname(mddev)); 6080 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 6081 6082 if (mddev->queue) { 6083 int chunk_size; 6084 bool discard_supported = true; 6085 /* read-ahead size must cover two whole stripes, which 6086 * is 2 * (datadisks) * chunksize where 'n' is the 6087 * number of raid devices 6088 */ 6089 int data_disks = conf->previous_raid_disks - conf->max_degraded; 6090 int stripe = data_disks * 6091 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 6092 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 6093 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 6094 6095 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 6096 6097 mddev->queue->backing_dev_info.congested_data = mddev; 6098 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 6099 6100 chunk_size = mddev->chunk_sectors << 9; 6101 blk_queue_io_min(mddev->queue, chunk_size); 6102 blk_queue_io_opt(mddev->queue, chunk_size * 6103 (conf->raid_disks - conf->max_degraded)); 6104 mddev->queue->limits.raid_partial_stripes_expensive = 1; 6105 /* 6106 * We can only discard a whole stripe. It doesn't make sense to 6107 * discard data disk but write parity disk 6108 */ 6109 stripe = stripe * PAGE_SIZE; 6110 /* Round up to power of 2, as discard handling 6111 * currently assumes that */ 6112 while ((stripe-1) & stripe) 6113 stripe = (stripe | (stripe-1)) + 1; 6114 mddev->queue->limits.discard_alignment = stripe; 6115 mddev->queue->limits.discard_granularity = stripe; 6116 /* 6117 * unaligned part of discard request will be ignored, so can't 6118 * guarantee discard_zerors_data 6119 */ 6120 mddev->queue->limits.discard_zeroes_data = 0; 6121 6122 blk_queue_max_write_same_sectors(mddev->queue, 0); 6123 6124 rdev_for_each(rdev, mddev) { 6125 disk_stack_limits(mddev->gendisk, rdev->bdev, 6126 rdev->data_offset << 9); 6127 disk_stack_limits(mddev->gendisk, rdev->bdev, 6128 rdev->new_data_offset << 9); 6129 /* 6130 * discard_zeroes_data is required, otherwise data 6131 * could be lost. Consider a scenario: discard a stripe 6132 * (the stripe could be inconsistent if 6133 * discard_zeroes_data is 0); write one disk of the 6134 * stripe (the stripe could be inconsistent again 6135 * depending on which disks are used to calculate 6136 * parity); the disk is broken; The stripe data of this 6137 * disk is lost. 6138 */ 6139 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || 6140 !bdev_get_queue(rdev->bdev)-> 6141 limits.discard_zeroes_data) 6142 discard_supported = false; 6143 } 6144 6145 if (discard_supported && 6146 mddev->queue->limits.max_discard_sectors >= stripe && 6147 mddev->queue->limits.discard_granularity >= stripe) 6148 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 6149 mddev->queue); 6150 else 6151 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 6152 mddev->queue); 6153 } 6154 6155 return 0; 6156 abort: 6157 md_unregister_thread(&mddev->thread); 6158 print_raid5_conf(conf); 6159 free_conf(conf); 6160 mddev->private = NULL; 6161 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); 6162 return -EIO; 6163 } 6164 6165 static int stop(struct mddev *mddev) 6166 { 6167 struct r5conf *conf = mddev->private; 6168 6169 md_unregister_thread(&mddev->thread); 6170 if (mddev->queue) 6171 mddev->queue->backing_dev_info.congested_fn = NULL; 6172 free_conf(conf); 6173 mddev->private = NULL; 6174 mddev->to_remove = &raid5_attrs_group; 6175 return 0; 6176 } 6177 6178 static void status(struct seq_file *seq, struct mddev *mddev) 6179 { 6180 struct r5conf *conf = mddev->private; 6181 int i; 6182 6183 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 6184 mddev->chunk_sectors / 2, mddev->layout); 6185 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 6186 for (i = 0; i < conf->raid_disks; i++) 6187 seq_printf (seq, "%s", 6188 conf->disks[i].rdev && 6189 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 6190 seq_printf (seq, "]"); 6191 } 6192 6193 static void print_raid5_conf (struct r5conf *conf) 6194 { 6195 int i; 6196 struct disk_info *tmp; 6197 6198 printk(KERN_DEBUG "RAID conf printout:\n"); 6199 if (!conf) { 6200 printk("(conf==NULL)\n"); 6201 return; 6202 } 6203 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, 6204 conf->raid_disks, 6205 conf->raid_disks - conf->mddev->degraded); 6206 6207 for (i = 0; i < conf->raid_disks; i++) { 6208 char b[BDEVNAME_SIZE]; 6209 tmp = conf->disks + i; 6210 if (tmp->rdev) 6211 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", 6212 i, !test_bit(Faulty, &tmp->rdev->flags), 6213 bdevname(tmp->rdev->bdev, b)); 6214 } 6215 } 6216 6217 static int raid5_spare_active(struct mddev *mddev) 6218 { 6219 int i; 6220 struct r5conf *conf = mddev->private; 6221 struct disk_info *tmp; 6222 int count = 0; 6223 unsigned long flags; 6224 6225 for (i = 0; i < conf->raid_disks; i++) { 6226 tmp = conf->disks + i; 6227 if (tmp->replacement 6228 && tmp->replacement->recovery_offset == MaxSector 6229 && !test_bit(Faulty, &tmp->replacement->flags) 6230 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 6231 /* Replacement has just become active. */ 6232 if (!tmp->rdev 6233 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 6234 count++; 6235 if (tmp->rdev) { 6236 /* Replaced device not technically faulty, 6237 * but we need to be sure it gets removed 6238 * and never re-added. 6239 */ 6240 set_bit(Faulty, &tmp->rdev->flags); 6241 sysfs_notify_dirent_safe( 6242 tmp->rdev->sysfs_state); 6243 } 6244 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 6245 } else if (tmp->rdev 6246 && tmp->rdev->recovery_offset == MaxSector 6247 && !test_bit(Faulty, &tmp->rdev->flags) 6248 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 6249 count++; 6250 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 6251 } 6252 } 6253 spin_lock_irqsave(&conf->device_lock, flags); 6254 mddev->degraded = calc_degraded(conf); 6255 spin_unlock_irqrestore(&conf->device_lock, flags); 6256 print_raid5_conf(conf); 6257 return count; 6258 } 6259 6260 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 6261 { 6262 struct r5conf *conf = mddev->private; 6263 int err = 0; 6264 int number = rdev->raid_disk; 6265 struct md_rdev **rdevp; 6266 struct disk_info *p = conf->disks + number; 6267 6268 print_raid5_conf(conf); 6269 if (rdev == p->rdev) 6270 rdevp = &p->rdev; 6271 else if (rdev == p->replacement) 6272 rdevp = &p->replacement; 6273 else 6274 return 0; 6275 6276 if (number >= conf->raid_disks && 6277 conf->reshape_progress == MaxSector) 6278 clear_bit(In_sync, &rdev->flags); 6279 6280 if (test_bit(In_sync, &rdev->flags) || 6281 atomic_read(&rdev->nr_pending)) { 6282 err = -EBUSY; 6283 goto abort; 6284 } 6285 /* Only remove non-faulty devices if recovery 6286 * isn't possible. 6287 */ 6288 if (!test_bit(Faulty, &rdev->flags) && 6289 mddev->recovery_disabled != conf->recovery_disabled && 6290 !has_failed(conf) && 6291 (!p->replacement || p->replacement == rdev) && 6292 number < conf->raid_disks) { 6293 err = -EBUSY; 6294 goto abort; 6295 } 6296 *rdevp = NULL; 6297 synchronize_rcu(); 6298 if (atomic_read(&rdev->nr_pending)) { 6299 /* lost the race, try later */ 6300 err = -EBUSY; 6301 *rdevp = rdev; 6302 } else if (p->replacement) { 6303 /* We must have just cleared 'rdev' */ 6304 p->rdev = p->replacement; 6305 clear_bit(Replacement, &p->replacement->flags); 6306 smp_mb(); /* Make sure other CPUs may see both as identical 6307 * but will never see neither - if they are careful 6308 */ 6309 p->replacement = NULL; 6310 clear_bit(WantReplacement, &rdev->flags); 6311 } else 6312 /* We might have just removed the Replacement as faulty- 6313 * clear the bit just in case 6314 */ 6315 clear_bit(WantReplacement, &rdev->flags); 6316 abort: 6317 6318 print_raid5_conf(conf); 6319 return err; 6320 } 6321 6322 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 6323 { 6324 struct r5conf *conf = mddev->private; 6325 int err = -EEXIST; 6326 int disk; 6327 struct disk_info *p; 6328 int first = 0; 6329 int last = conf->raid_disks - 1; 6330 6331 if (mddev->recovery_disabled == conf->recovery_disabled) 6332 return -EBUSY; 6333 6334 if (rdev->saved_raid_disk < 0 && has_failed(conf)) 6335 /* no point adding a device */ 6336 return -EINVAL; 6337 6338 if (rdev->raid_disk >= 0) 6339 first = last = rdev->raid_disk; 6340 6341 /* 6342 * find the disk ... but prefer rdev->saved_raid_disk 6343 * if possible. 6344 */ 6345 if (rdev->saved_raid_disk >= 0 && 6346 rdev->saved_raid_disk >= first && 6347 conf->disks[rdev->saved_raid_disk].rdev == NULL) 6348 first = rdev->saved_raid_disk; 6349 6350 for (disk = first; disk <= last; disk++) { 6351 p = conf->disks + disk; 6352 if (p->rdev == NULL) { 6353 clear_bit(In_sync, &rdev->flags); 6354 rdev->raid_disk = disk; 6355 err = 0; 6356 if (rdev->saved_raid_disk != disk) 6357 conf->fullsync = 1; 6358 rcu_assign_pointer(p->rdev, rdev); 6359 goto out; 6360 } 6361 } 6362 for (disk = first; disk <= last; disk++) { 6363 p = conf->disks + disk; 6364 if (test_bit(WantReplacement, &p->rdev->flags) && 6365 p->replacement == NULL) { 6366 clear_bit(In_sync, &rdev->flags); 6367 set_bit(Replacement, &rdev->flags); 6368 rdev->raid_disk = disk; 6369 err = 0; 6370 conf->fullsync = 1; 6371 rcu_assign_pointer(p->replacement, rdev); 6372 break; 6373 } 6374 } 6375 out: 6376 print_raid5_conf(conf); 6377 return err; 6378 } 6379 6380 static int raid5_resize(struct mddev *mddev, sector_t sectors) 6381 { 6382 /* no resync is happening, and there is enough space 6383 * on all devices, so we can resize. 6384 * We need to make sure resync covers any new space. 6385 * If the array is shrinking we should possibly wait until 6386 * any io in the removed space completes, but it hardly seems 6387 * worth it. 6388 */ 6389 sector_t newsize; 6390 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 6391 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 6392 if (mddev->external_size && 6393 mddev->array_sectors > newsize) 6394 return -EINVAL; 6395 if (mddev->bitmap) { 6396 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); 6397 if (ret) 6398 return ret; 6399 } 6400 md_set_array_sectors(mddev, newsize); 6401 set_capacity(mddev->gendisk, mddev->array_sectors); 6402 revalidate_disk(mddev->gendisk); 6403 if (sectors > mddev->dev_sectors && 6404 mddev->recovery_cp > mddev->dev_sectors) { 6405 mddev->recovery_cp = mddev->dev_sectors; 6406 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6407 } 6408 mddev->dev_sectors = sectors; 6409 mddev->resync_max_sectors = sectors; 6410 return 0; 6411 } 6412 6413 static int check_stripe_cache(struct mddev *mddev) 6414 { 6415 /* Can only proceed if there are plenty of stripe_heads. 6416 * We need a minimum of one full stripe,, and for sensible progress 6417 * it is best to have about 4 times that. 6418 * If we require 4 times, then the default 256 4K stripe_heads will 6419 * allow for chunk sizes up to 256K, which is probably OK. 6420 * If the chunk size is greater, user-space should request more 6421 * stripe_heads first. 6422 */ 6423 struct r5conf *conf = mddev->private; 6424 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 6425 > conf->max_nr_stripes || 6426 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 6427 > conf->max_nr_stripes) { 6428 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", 6429 mdname(mddev), 6430 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 6431 / STRIPE_SIZE)*4); 6432 return 0; 6433 } 6434 return 1; 6435 } 6436 6437 static int check_reshape(struct mddev *mddev) 6438 { 6439 struct r5conf *conf = mddev->private; 6440 6441 if (mddev->delta_disks == 0 && 6442 mddev->new_layout == mddev->layout && 6443 mddev->new_chunk_sectors == mddev->chunk_sectors) 6444 return 0; /* nothing to do */ 6445 if (has_failed(conf)) 6446 return -EINVAL; 6447 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { 6448 /* We might be able to shrink, but the devices must 6449 * be made bigger first. 6450 * For raid6, 4 is the minimum size. 6451 * Otherwise 2 is the minimum 6452 */ 6453 int min = 2; 6454 if (mddev->level == 6) 6455 min = 4; 6456 if (mddev->raid_disks + mddev->delta_disks < min) 6457 return -EINVAL; 6458 } 6459 6460 if (!check_stripe_cache(mddev)) 6461 return -ENOSPC; 6462 6463 return resize_stripes(conf, (conf->previous_raid_disks 6464 + mddev->delta_disks)); 6465 } 6466 6467 static int raid5_start_reshape(struct mddev *mddev) 6468 { 6469 struct r5conf *conf = mddev->private; 6470 struct md_rdev *rdev; 6471 int spares = 0; 6472 unsigned long flags; 6473 6474 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6475 return -EBUSY; 6476 6477 if (!check_stripe_cache(mddev)) 6478 return -ENOSPC; 6479 6480 if (has_failed(conf)) 6481 return -EINVAL; 6482 6483 rdev_for_each(rdev, mddev) { 6484 if (!test_bit(In_sync, &rdev->flags) 6485 && !test_bit(Faulty, &rdev->flags)) 6486 spares++; 6487 } 6488 6489 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 6490 /* Not enough devices even to make a degraded array 6491 * of that size 6492 */ 6493 return -EINVAL; 6494 6495 /* Refuse to reduce size of the array. Any reductions in 6496 * array size must be through explicit setting of array_size 6497 * attribute. 6498 */ 6499 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 6500 < mddev->array_sectors) { 6501 printk(KERN_ERR "md/raid:%s: array size must be reduced " 6502 "before number of disks\n", mdname(mddev)); 6503 return -EINVAL; 6504 } 6505 6506 atomic_set(&conf->reshape_stripes, 0); 6507 spin_lock_irq(&conf->device_lock); 6508 write_seqcount_begin(&conf->gen_lock); 6509 conf->previous_raid_disks = conf->raid_disks; 6510 conf->raid_disks += mddev->delta_disks; 6511 conf->prev_chunk_sectors = conf->chunk_sectors; 6512 conf->chunk_sectors = mddev->new_chunk_sectors; 6513 conf->prev_algo = conf->algorithm; 6514 conf->algorithm = mddev->new_layout; 6515 conf->generation++; 6516 /* Code that selects data_offset needs to see the generation update 6517 * if reshape_progress has been set - so a memory barrier needed. 6518 */ 6519 smp_mb(); 6520 if (mddev->reshape_backwards) 6521 conf->reshape_progress = raid5_size(mddev, 0, 0); 6522 else 6523 conf->reshape_progress = 0; 6524 conf->reshape_safe = conf->reshape_progress; 6525 write_seqcount_end(&conf->gen_lock); 6526 spin_unlock_irq(&conf->device_lock); 6527 6528 /* Now make sure any requests that proceeded on the assumption 6529 * the reshape wasn't running - like Discard or Read - have 6530 * completed. 6531 */ 6532 mddev_suspend(mddev); 6533 mddev_resume(mddev); 6534 6535 /* Add some new drives, as many as will fit. 6536 * We know there are enough to make the newly sized array work. 6537 * Don't add devices if we are reducing the number of 6538 * devices in the array. This is because it is not possible 6539 * to correctly record the "partially reconstructed" state of 6540 * such devices during the reshape and confusion could result. 6541 */ 6542 if (mddev->delta_disks >= 0) { 6543 rdev_for_each(rdev, mddev) 6544 if (rdev->raid_disk < 0 && 6545 !test_bit(Faulty, &rdev->flags)) { 6546 if (raid5_add_disk(mddev, rdev) == 0) { 6547 if (rdev->raid_disk 6548 >= conf->previous_raid_disks) 6549 set_bit(In_sync, &rdev->flags); 6550 else 6551 rdev->recovery_offset = 0; 6552 6553 if (sysfs_link_rdev(mddev, rdev)) 6554 /* Failure here is OK */; 6555 } 6556 } else if (rdev->raid_disk >= conf->previous_raid_disks 6557 && !test_bit(Faulty, &rdev->flags)) { 6558 /* This is a spare that was manually added */ 6559 set_bit(In_sync, &rdev->flags); 6560 } 6561 6562 /* When a reshape changes the number of devices, 6563 * ->degraded is measured against the larger of the 6564 * pre and post number of devices. 6565 */ 6566 spin_lock_irqsave(&conf->device_lock, flags); 6567 mddev->degraded = calc_degraded(conf); 6568 spin_unlock_irqrestore(&conf->device_lock, flags); 6569 } 6570 mddev->raid_disks = conf->raid_disks; 6571 mddev->reshape_position = conf->reshape_progress; 6572 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6573 6574 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6575 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6576 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6577 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6578 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 6579 "reshape"); 6580 if (!mddev->sync_thread) { 6581 mddev->recovery = 0; 6582 spin_lock_irq(&conf->device_lock); 6583 write_seqcount_begin(&conf->gen_lock); 6584 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 6585 mddev->new_chunk_sectors = 6586 conf->chunk_sectors = conf->prev_chunk_sectors; 6587 mddev->new_layout = conf->algorithm = conf->prev_algo; 6588 rdev_for_each(rdev, mddev) 6589 rdev->new_data_offset = rdev->data_offset; 6590 smp_wmb(); 6591 conf->generation --; 6592 conf->reshape_progress = MaxSector; 6593 mddev->reshape_position = MaxSector; 6594 write_seqcount_end(&conf->gen_lock); 6595 spin_unlock_irq(&conf->device_lock); 6596 return -EAGAIN; 6597 } 6598 conf->reshape_checkpoint = jiffies; 6599 md_wakeup_thread(mddev->sync_thread); 6600 md_new_event(mddev); 6601 return 0; 6602 } 6603 6604 /* This is called from the reshape thread and should make any 6605 * changes needed in 'conf' 6606 */ 6607 static void end_reshape(struct r5conf *conf) 6608 { 6609 6610 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 6611 struct md_rdev *rdev; 6612 6613 spin_lock_irq(&conf->device_lock); 6614 conf->previous_raid_disks = conf->raid_disks; 6615 rdev_for_each(rdev, conf->mddev) 6616 rdev->data_offset = rdev->new_data_offset; 6617 smp_wmb(); 6618 conf->reshape_progress = MaxSector; 6619 spin_unlock_irq(&conf->device_lock); 6620 wake_up(&conf->wait_for_overlap); 6621 6622 /* read-ahead size must cover two whole stripes, which is 6623 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 6624 */ 6625 if (conf->mddev->queue) { 6626 int data_disks = conf->raid_disks - conf->max_degraded; 6627 int stripe = data_disks * ((conf->chunk_sectors << 9) 6628 / PAGE_SIZE); 6629 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 6630 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 6631 } 6632 } 6633 } 6634 6635 /* This is called from the raid5d thread with mddev_lock held. 6636 * It makes config changes to the device. 6637 */ 6638 static void raid5_finish_reshape(struct mddev *mddev) 6639 { 6640 struct r5conf *conf = mddev->private; 6641 6642 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6643 6644 if (mddev->delta_disks > 0) { 6645 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 6646 set_capacity(mddev->gendisk, mddev->array_sectors); 6647 revalidate_disk(mddev->gendisk); 6648 } else { 6649 int d; 6650 spin_lock_irq(&conf->device_lock); 6651 mddev->degraded = calc_degraded(conf); 6652 spin_unlock_irq(&conf->device_lock); 6653 for (d = conf->raid_disks ; 6654 d < conf->raid_disks - mddev->delta_disks; 6655 d++) { 6656 struct md_rdev *rdev = conf->disks[d].rdev; 6657 if (rdev) 6658 clear_bit(In_sync, &rdev->flags); 6659 rdev = conf->disks[d].replacement; 6660 if (rdev) 6661 clear_bit(In_sync, &rdev->flags); 6662 } 6663 } 6664 mddev->layout = conf->algorithm; 6665 mddev->chunk_sectors = conf->chunk_sectors; 6666 mddev->reshape_position = MaxSector; 6667 mddev->delta_disks = 0; 6668 mddev->reshape_backwards = 0; 6669 } 6670 } 6671 6672 static void raid5_quiesce(struct mddev *mddev, int state) 6673 { 6674 struct r5conf *conf = mddev->private; 6675 6676 switch(state) { 6677 case 2: /* resume for a suspend */ 6678 wake_up(&conf->wait_for_overlap); 6679 break; 6680 6681 case 1: /* stop all writes */ 6682 lock_all_device_hash_locks_irq(conf); 6683 /* '2' tells resync/reshape to pause so that all 6684 * active stripes can drain 6685 */ 6686 conf->quiesce = 2; 6687 wait_event_cmd(conf->wait_for_stripe, 6688 atomic_read(&conf->active_stripes) == 0 && 6689 atomic_read(&conf->active_aligned_reads) == 0, 6690 unlock_all_device_hash_locks_irq(conf), 6691 lock_all_device_hash_locks_irq(conf)); 6692 conf->quiesce = 1; 6693 unlock_all_device_hash_locks_irq(conf); 6694 /* allow reshape to continue */ 6695 wake_up(&conf->wait_for_overlap); 6696 break; 6697 6698 case 0: /* re-enable writes */ 6699 lock_all_device_hash_locks_irq(conf); 6700 conf->quiesce = 0; 6701 wake_up(&conf->wait_for_stripe); 6702 wake_up(&conf->wait_for_overlap); 6703 unlock_all_device_hash_locks_irq(conf); 6704 break; 6705 } 6706 } 6707 6708 6709 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 6710 { 6711 struct r0conf *raid0_conf = mddev->private; 6712 sector_t sectors; 6713 6714 /* for raid0 takeover only one zone is supported */ 6715 if (raid0_conf->nr_strip_zones > 1) { 6716 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", 6717 mdname(mddev)); 6718 return ERR_PTR(-EINVAL); 6719 } 6720 6721 sectors = raid0_conf->strip_zone[0].zone_end; 6722 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 6723 mddev->dev_sectors = sectors; 6724 mddev->new_level = level; 6725 mddev->new_layout = ALGORITHM_PARITY_N; 6726 mddev->new_chunk_sectors = mddev->chunk_sectors; 6727 mddev->raid_disks += 1; 6728 mddev->delta_disks = 1; 6729 /* make sure it will be not marked as dirty */ 6730 mddev->recovery_cp = MaxSector; 6731 6732 return setup_conf(mddev); 6733 } 6734 6735 6736 static void *raid5_takeover_raid1(struct mddev *mddev) 6737 { 6738 int chunksect; 6739 6740 if (mddev->raid_disks != 2 || 6741 mddev->degraded > 1) 6742 return ERR_PTR(-EINVAL); 6743 6744 /* Should check if there are write-behind devices? */ 6745 6746 chunksect = 64*2; /* 64K by default */ 6747 6748 /* The array must be an exact multiple of chunksize */ 6749 while (chunksect && (mddev->array_sectors & (chunksect-1))) 6750 chunksect >>= 1; 6751 6752 if ((chunksect<<9) < STRIPE_SIZE) 6753 /* array size does not allow a suitable chunk size */ 6754 return ERR_PTR(-EINVAL); 6755 6756 mddev->new_level = 5; 6757 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 6758 mddev->new_chunk_sectors = chunksect; 6759 6760 return setup_conf(mddev); 6761 } 6762 6763 static void *raid5_takeover_raid6(struct mddev *mddev) 6764 { 6765 int new_layout; 6766 6767 switch (mddev->layout) { 6768 case ALGORITHM_LEFT_ASYMMETRIC_6: 6769 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 6770 break; 6771 case ALGORITHM_RIGHT_ASYMMETRIC_6: 6772 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 6773 break; 6774 case ALGORITHM_LEFT_SYMMETRIC_6: 6775 new_layout = ALGORITHM_LEFT_SYMMETRIC; 6776 break; 6777 case ALGORITHM_RIGHT_SYMMETRIC_6: 6778 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 6779 break; 6780 case ALGORITHM_PARITY_0_6: 6781 new_layout = ALGORITHM_PARITY_0; 6782 break; 6783 case ALGORITHM_PARITY_N: 6784 new_layout = ALGORITHM_PARITY_N; 6785 break; 6786 default: 6787 return ERR_PTR(-EINVAL); 6788 } 6789 mddev->new_level = 5; 6790 mddev->new_layout = new_layout; 6791 mddev->delta_disks = -1; 6792 mddev->raid_disks -= 1; 6793 return setup_conf(mddev); 6794 } 6795 6796 6797 static int raid5_check_reshape(struct mddev *mddev) 6798 { 6799 /* For a 2-drive array, the layout and chunk size can be changed 6800 * immediately as not restriping is needed. 6801 * For larger arrays we record the new value - after validation 6802 * to be used by a reshape pass. 6803 */ 6804 struct r5conf *conf = mddev->private; 6805 int new_chunk = mddev->new_chunk_sectors; 6806 6807 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 6808 return -EINVAL; 6809 if (new_chunk > 0) { 6810 if (!is_power_of_2(new_chunk)) 6811 return -EINVAL; 6812 if (new_chunk < (PAGE_SIZE>>9)) 6813 return -EINVAL; 6814 if (mddev->array_sectors & (new_chunk-1)) 6815 /* not factor of array size */ 6816 return -EINVAL; 6817 } 6818 6819 /* They look valid */ 6820 6821 if (mddev->raid_disks == 2) { 6822 /* can make the change immediately */ 6823 if (mddev->new_layout >= 0) { 6824 conf->algorithm = mddev->new_layout; 6825 mddev->layout = mddev->new_layout; 6826 } 6827 if (new_chunk > 0) { 6828 conf->chunk_sectors = new_chunk ; 6829 mddev->chunk_sectors = new_chunk; 6830 } 6831 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6832 md_wakeup_thread(mddev->thread); 6833 } 6834 return check_reshape(mddev); 6835 } 6836 6837 static int raid6_check_reshape(struct mddev *mddev) 6838 { 6839 int new_chunk = mddev->new_chunk_sectors; 6840 6841 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 6842 return -EINVAL; 6843 if (new_chunk > 0) { 6844 if (!is_power_of_2(new_chunk)) 6845 return -EINVAL; 6846 if (new_chunk < (PAGE_SIZE >> 9)) 6847 return -EINVAL; 6848 if (mddev->array_sectors & (new_chunk-1)) 6849 /* not factor of array size */ 6850 return -EINVAL; 6851 } 6852 6853 /* They look valid */ 6854 return check_reshape(mddev); 6855 } 6856 6857 static void *raid5_takeover(struct mddev *mddev) 6858 { 6859 /* raid5 can take over: 6860 * raid0 - if there is only one strip zone - make it a raid4 layout 6861 * raid1 - if there are two drives. We need to know the chunk size 6862 * raid4 - trivial - just use a raid4 layout. 6863 * raid6 - Providing it is a *_6 layout 6864 */ 6865 if (mddev->level == 0) 6866 return raid45_takeover_raid0(mddev, 5); 6867 if (mddev->level == 1) 6868 return raid5_takeover_raid1(mddev); 6869 if (mddev->level == 4) { 6870 mddev->new_layout = ALGORITHM_PARITY_N; 6871 mddev->new_level = 5; 6872 return setup_conf(mddev); 6873 } 6874 if (mddev->level == 6) 6875 return raid5_takeover_raid6(mddev); 6876 6877 return ERR_PTR(-EINVAL); 6878 } 6879 6880 static void *raid4_takeover(struct mddev *mddev) 6881 { 6882 /* raid4 can take over: 6883 * raid0 - if there is only one strip zone 6884 * raid5 - if layout is right 6885 */ 6886 if (mddev->level == 0) 6887 return raid45_takeover_raid0(mddev, 4); 6888 if (mddev->level == 5 && 6889 mddev->layout == ALGORITHM_PARITY_N) { 6890 mddev->new_layout = 0; 6891 mddev->new_level = 4; 6892 return setup_conf(mddev); 6893 } 6894 return ERR_PTR(-EINVAL); 6895 } 6896 6897 static struct md_personality raid5_personality; 6898 6899 static void *raid6_takeover(struct mddev *mddev) 6900 { 6901 /* Currently can only take over a raid5. We map the 6902 * personality to an equivalent raid6 personality 6903 * with the Q block at the end. 6904 */ 6905 int new_layout; 6906 6907 if (mddev->pers != &raid5_personality) 6908 return ERR_PTR(-EINVAL); 6909 if (mddev->degraded > 1) 6910 return ERR_PTR(-EINVAL); 6911 if (mddev->raid_disks > 253) 6912 return ERR_PTR(-EINVAL); 6913 if (mddev->raid_disks < 3) 6914 return ERR_PTR(-EINVAL); 6915 6916 switch (mddev->layout) { 6917 case ALGORITHM_LEFT_ASYMMETRIC: 6918 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 6919 break; 6920 case ALGORITHM_RIGHT_ASYMMETRIC: 6921 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 6922 break; 6923 case ALGORITHM_LEFT_SYMMETRIC: 6924 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 6925 break; 6926 case ALGORITHM_RIGHT_SYMMETRIC: 6927 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 6928 break; 6929 case ALGORITHM_PARITY_0: 6930 new_layout = ALGORITHM_PARITY_0_6; 6931 break; 6932 case ALGORITHM_PARITY_N: 6933 new_layout = ALGORITHM_PARITY_N; 6934 break; 6935 default: 6936 return ERR_PTR(-EINVAL); 6937 } 6938 mddev->new_level = 6; 6939 mddev->new_layout = new_layout; 6940 mddev->delta_disks = 1; 6941 mddev->raid_disks += 1; 6942 return setup_conf(mddev); 6943 } 6944 6945 6946 static struct md_personality raid6_personality = 6947 { 6948 .name = "raid6", 6949 .level = 6, 6950 .owner = THIS_MODULE, 6951 .make_request = make_request, 6952 .run = run, 6953 .stop = stop, 6954 .status = status, 6955 .error_handler = error, 6956 .hot_add_disk = raid5_add_disk, 6957 .hot_remove_disk= raid5_remove_disk, 6958 .spare_active = raid5_spare_active, 6959 .sync_request = sync_request, 6960 .resize = raid5_resize, 6961 .size = raid5_size, 6962 .check_reshape = raid6_check_reshape, 6963 .start_reshape = raid5_start_reshape, 6964 .finish_reshape = raid5_finish_reshape, 6965 .quiesce = raid5_quiesce, 6966 .takeover = raid6_takeover, 6967 }; 6968 static struct md_personality raid5_personality = 6969 { 6970 .name = "raid5", 6971 .level = 5, 6972 .owner = THIS_MODULE, 6973 .make_request = make_request, 6974 .run = run, 6975 .stop = stop, 6976 .status = status, 6977 .error_handler = error, 6978 .hot_add_disk = raid5_add_disk, 6979 .hot_remove_disk= raid5_remove_disk, 6980 .spare_active = raid5_spare_active, 6981 .sync_request = sync_request, 6982 .resize = raid5_resize, 6983 .size = raid5_size, 6984 .check_reshape = raid5_check_reshape, 6985 .start_reshape = raid5_start_reshape, 6986 .finish_reshape = raid5_finish_reshape, 6987 .quiesce = raid5_quiesce, 6988 .takeover = raid5_takeover, 6989 }; 6990 6991 static struct md_personality raid4_personality = 6992 { 6993 .name = "raid4", 6994 .level = 4, 6995 .owner = THIS_MODULE, 6996 .make_request = make_request, 6997 .run = run, 6998 .stop = stop, 6999 .status = status, 7000 .error_handler = error, 7001 .hot_add_disk = raid5_add_disk, 7002 .hot_remove_disk= raid5_remove_disk, 7003 .spare_active = raid5_spare_active, 7004 .sync_request = sync_request, 7005 .resize = raid5_resize, 7006 .size = raid5_size, 7007 .check_reshape = raid5_check_reshape, 7008 .start_reshape = raid5_start_reshape, 7009 .finish_reshape = raid5_finish_reshape, 7010 .quiesce = raid5_quiesce, 7011 .takeover = raid4_takeover, 7012 }; 7013 7014 static int __init raid5_init(void) 7015 { 7016 raid5_wq = alloc_workqueue("raid5wq", 7017 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 7018 if (!raid5_wq) 7019 return -ENOMEM; 7020 register_md_personality(&raid6_personality); 7021 register_md_personality(&raid5_personality); 7022 register_md_personality(&raid4_personality); 7023 return 0; 7024 } 7025 7026 static void raid5_exit(void) 7027 { 7028 unregister_md_personality(&raid6_personality); 7029 unregister_md_personality(&raid5_personality); 7030 unregister_md_personality(&raid4_personality); 7031 destroy_workqueue(raid5_wq); 7032 } 7033 7034 module_init(raid5_init); 7035 module_exit(raid5_exit); 7036 MODULE_LICENSE("GPL"); 7037 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 7038 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 7039 MODULE_ALIAS("md-raid5"); 7040 MODULE_ALIAS("md-raid4"); 7041 MODULE_ALIAS("md-level-5"); 7042 MODULE_ALIAS("md-level-4"); 7043 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 7044 MODULE_ALIAS("md-raid6"); 7045 MODULE_ALIAS("md-level-6"); 7046 7047 /* This used to be two separate modules, they were: */ 7048 MODULE_ALIAS("raid5"); 7049 MODULE_ALIAS("raid6"); 7050