1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->seq_write is the number of the last batch successfully written. 31 * conf->seq_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is seq_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/module.h> 51 #include <linux/async.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpu.h> 54 #include <linux/slab.h> 55 #include <linux/ratelimit.h> 56 #include <linux/nodemask.h> 57 #include <linux/flex_array.h> 58 #include <trace/events/block.h> 59 60 #include "md.h" 61 #include "raid5.h" 62 #include "raid0.h" 63 #include "bitmap.h" 64 65 #define cpu_to_group(cpu) cpu_to_node(cpu) 66 #define ANY_GROUP NUMA_NO_NODE 67 68 static bool devices_handle_discard_safely = false; 69 module_param(devices_handle_discard_safely, bool, 0644); 70 MODULE_PARM_DESC(devices_handle_discard_safely, 71 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 72 static struct workqueue_struct *raid5_wq; 73 /* 74 * Stripe cache 75 */ 76 77 #define NR_STRIPES 256 78 #define STRIPE_SIZE PAGE_SIZE 79 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 80 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 81 #define IO_THRESHOLD 1 82 #define BYPASS_THRESHOLD 1 83 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 84 #define HASH_MASK (NR_HASH - 1) 85 #define MAX_STRIPE_BATCH 8 86 87 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 88 { 89 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 90 return &conf->stripe_hashtbl[hash]; 91 } 92 93 static inline int stripe_hash_locks_hash(sector_t sect) 94 { 95 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; 96 } 97 98 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) 99 { 100 spin_lock_irq(conf->hash_locks + hash); 101 spin_lock(&conf->device_lock); 102 } 103 104 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) 105 { 106 spin_unlock(&conf->device_lock); 107 spin_unlock_irq(conf->hash_locks + hash); 108 } 109 110 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 111 { 112 int i; 113 local_irq_disable(); 114 spin_lock(conf->hash_locks); 115 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 116 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 117 spin_lock(&conf->device_lock); 118 } 119 120 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) 121 { 122 int i; 123 spin_unlock(&conf->device_lock); 124 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 125 spin_unlock(conf->hash_locks + i - 1); 126 local_irq_enable(); 127 } 128 129 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 130 * order without overlap. There may be several bio's per stripe+device, and 131 * a bio could span several devices. 132 * When walking this list for a particular stripe+device, we must never proceed 133 * beyond a bio that extends past this device, as the next bio might no longer 134 * be valid. 135 * This function is used to determine the 'next' bio in the list, given the sector 136 * of the current stripe+device 137 */ 138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 139 { 140 int sectors = bio_sectors(bio); 141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) 142 return bio->bi_next; 143 else 144 return NULL; 145 } 146 147 /* 148 * We maintain a biased count of active stripes in the bottom 16 bits of 149 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 150 */ 151 static inline int raid5_bi_processed_stripes(struct bio *bio) 152 { 153 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 154 return (atomic_read(segments) >> 16) & 0xffff; 155 } 156 157 static inline int raid5_dec_bi_active_stripes(struct bio *bio) 158 { 159 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 160 return atomic_sub_return(1, segments) & 0xffff; 161 } 162 163 static inline void raid5_inc_bi_active_stripes(struct bio *bio) 164 { 165 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 166 atomic_inc(segments); 167 } 168 169 static inline void raid5_set_bi_processed_stripes(struct bio *bio, 170 unsigned int cnt) 171 { 172 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 173 int old, new; 174 175 do { 176 old = atomic_read(segments); 177 new = (old & 0xffff) | (cnt << 16); 178 } while (atomic_cmpxchg(segments, old, new) != old); 179 } 180 181 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) 182 { 183 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 184 atomic_set(segments, cnt); 185 } 186 187 /* Find first data disk in a raid6 stripe */ 188 static inline int raid6_d0(struct stripe_head *sh) 189 { 190 if (sh->ddf_layout) 191 /* ddf always start from first device */ 192 return 0; 193 /* md starts just after Q block */ 194 if (sh->qd_idx == sh->disks - 1) 195 return 0; 196 else 197 return sh->qd_idx + 1; 198 } 199 static inline int raid6_next_disk(int disk, int raid_disks) 200 { 201 disk++; 202 return (disk < raid_disks) ? disk : 0; 203 } 204 205 /* When walking through the disks in a raid5, starting at raid6_d0, 206 * We need to map each disk to a 'slot', where the data disks are slot 207 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 208 * is raid_disks-1. This help does that mapping. 209 */ 210 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 211 int *count, int syndrome_disks) 212 { 213 int slot = *count; 214 215 if (sh->ddf_layout) 216 (*count)++; 217 if (idx == sh->pd_idx) 218 return syndrome_disks; 219 if (idx == sh->qd_idx) 220 return syndrome_disks + 1; 221 if (!sh->ddf_layout) 222 (*count)++; 223 return slot; 224 } 225 226 static void return_io(struct bio *return_bi) 227 { 228 struct bio *bi = return_bi; 229 while (bi) { 230 231 return_bi = bi->bi_next; 232 bi->bi_next = NULL; 233 bi->bi_iter.bi_size = 0; 234 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 235 bi, 0); 236 bio_endio(bi, 0); 237 bi = return_bi; 238 } 239 } 240 241 static void print_raid5_conf (struct r5conf *conf); 242 243 static int stripe_operations_active(struct stripe_head *sh) 244 { 245 return sh->check_state || sh->reconstruct_state || 246 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 247 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 248 } 249 250 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) 251 { 252 struct r5conf *conf = sh->raid_conf; 253 struct r5worker_group *group; 254 int thread_cnt; 255 int i, cpu = sh->cpu; 256 257 if (!cpu_online(cpu)) { 258 cpu = cpumask_any(cpu_online_mask); 259 sh->cpu = cpu; 260 } 261 262 if (list_empty(&sh->lru)) { 263 struct r5worker_group *group; 264 group = conf->worker_groups + cpu_to_group(cpu); 265 list_add_tail(&sh->lru, &group->handle_list); 266 group->stripes_cnt++; 267 sh->group = group; 268 } 269 270 if (conf->worker_cnt_per_group == 0) { 271 md_wakeup_thread(conf->mddev->thread); 272 return; 273 } 274 275 group = conf->worker_groups + cpu_to_group(sh->cpu); 276 277 group->workers[0].working = true; 278 /* at least one worker should run to avoid race */ 279 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); 280 281 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; 282 /* wakeup more workers */ 283 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { 284 if (group->workers[i].working == false) { 285 group->workers[i].working = true; 286 queue_work_on(sh->cpu, raid5_wq, 287 &group->workers[i].work); 288 thread_cnt--; 289 } 290 } 291 } 292 293 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, 294 struct list_head *temp_inactive_list) 295 { 296 BUG_ON(!list_empty(&sh->lru)); 297 BUG_ON(atomic_read(&conf->active_stripes)==0); 298 if (test_bit(STRIPE_HANDLE, &sh->state)) { 299 if (test_bit(STRIPE_DELAYED, &sh->state) && 300 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 301 list_add_tail(&sh->lru, &conf->delayed_list); 302 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 303 sh->bm_seq - conf->seq_write > 0) 304 list_add_tail(&sh->lru, &conf->bitmap_list); 305 else { 306 clear_bit(STRIPE_DELAYED, &sh->state); 307 clear_bit(STRIPE_BIT_DELAY, &sh->state); 308 if (conf->worker_cnt_per_group == 0) { 309 list_add_tail(&sh->lru, &conf->handle_list); 310 } else { 311 raid5_wakeup_stripe_thread(sh); 312 return; 313 } 314 } 315 md_wakeup_thread(conf->mddev->thread); 316 } else { 317 BUG_ON(stripe_operations_active(sh)); 318 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 319 if (atomic_dec_return(&conf->preread_active_stripes) 320 < IO_THRESHOLD) 321 md_wakeup_thread(conf->mddev->thread); 322 atomic_dec(&conf->active_stripes); 323 if (!test_bit(STRIPE_EXPANDING, &sh->state)) 324 list_add_tail(&sh->lru, temp_inactive_list); 325 } 326 } 327 328 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, 329 struct list_head *temp_inactive_list) 330 { 331 if (atomic_dec_and_test(&sh->count)) 332 do_release_stripe(conf, sh, temp_inactive_list); 333 } 334 335 /* 336 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list 337 * 338 * Be careful: Only one task can add/delete stripes from temp_inactive_list at 339 * given time. Adding stripes only takes device lock, while deleting stripes 340 * only takes hash lock. 341 */ 342 static void release_inactive_stripe_list(struct r5conf *conf, 343 struct list_head *temp_inactive_list, 344 int hash) 345 { 346 int size; 347 bool do_wakeup = false; 348 unsigned long flags; 349 350 if (hash == NR_STRIPE_HASH_LOCKS) { 351 size = NR_STRIPE_HASH_LOCKS; 352 hash = NR_STRIPE_HASH_LOCKS - 1; 353 } else 354 size = 1; 355 while (size) { 356 struct list_head *list = &temp_inactive_list[size - 1]; 357 358 /* 359 * We don't hold any lock here yet, get_active_stripe() might 360 * remove stripes from the list 361 */ 362 if (!list_empty_careful(list)) { 363 spin_lock_irqsave(conf->hash_locks + hash, flags); 364 if (list_empty(conf->inactive_list + hash) && 365 !list_empty(list)) 366 atomic_dec(&conf->empty_inactive_list_nr); 367 list_splice_tail_init(list, conf->inactive_list + hash); 368 do_wakeup = true; 369 spin_unlock_irqrestore(conf->hash_locks + hash, flags); 370 } 371 size--; 372 hash--; 373 } 374 375 if (do_wakeup) { 376 wake_up(&conf->wait_for_stripe); 377 if (conf->retry_read_aligned) 378 md_wakeup_thread(conf->mddev->thread); 379 } 380 } 381 382 /* should hold conf->device_lock already */ 383 static int release_stripe_list(struct r5conf *conf, 384 struct list_head *temp_inactive_list) 385 { 386 struct stripe_head *sh; 387 int count = 0; 388 struct llist_node *head; 389 390 head = llist_del_all(&conf->released_stripes); 391 head = llist_reverse_order(head); 392 while (head) { 393 int hash; 394 395 sh = llist_entry(head, struct stripe_head, release_list); 396 head = llist_next(head); 397 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 398 smp_mb(); 399 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); 400 /* 401 * Don't worry the bit is set here, because if the bit is set 402 * again, the count is always > 1. This is true for 403 * STRIPE_ON_UNPLUG_LIST bit too. 404 */ 405 hash = sh->hash_lock_index; 406 __release_stripe(conf, sh, &temp_inactive_list[hash]); 407 count++; 408 } 409 410 return count; 411 } 412 413 static void release_stripe(struct stripe_head *sh) 414 { 415 struct r5conf *conf = sh->raid_conf; 416 unsigned long flags; 417 struct list_head list; 418 int hash; 419 bool wakeup; 420 421 /* Avoid release_list until the last reference. 422 */ 423 if (atomic_add_unless(&sh->count, -1, 1)) 424 return; 425 426 if (unlikely(!conf->mddev->thread) || 427 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 428 goto slow_path; 429 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 430 if (wakeup) 431 md_wakeup_thread(conf->mddev->thread); 432 return; 433 slow_path: 434 local_irq_save(flags); 435 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 436 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { 437 INIT_LIST_HEAD(&list); 438 hash = sh->hash_lock_index; 439 do_release_stripe(conf, sh, &list); 440 spin_unlock(&conf->device_lock); 441 release_inactive_stripe_list(conf, &list, hash); 442 } 443 local_irq_restore(flags); 444 } 445 446 static inline void remove_hash(struct stripe_head *sh) 447 { 448 pr_debug("remove_hash(), stripe %llu\n", 449 (unsigned long long)sh->sector); 450 451 hlist_del_init(&sh->hash); 452 } 453 454 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 455 { 456 struct hlist_head *hp = stripe_hash(conf, sh->sector); 457 458 pr_debug("insert_hash(), stripe %llu\n", 459 (unsigned long long)sh->sector); 460 461 hlist_add_head(&sh->hash, hp); 462 } 463 464 /* find an idle stripe, make sure it is unhashed, and return it. */ 465 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) 466 { 467 struct stripe_head *sh = NULL; 468 struct list_head *first; 469 470 if (list_empty(conf->inactive_list + hash)) 471 goto out; 472 first = (conf->inactive_list + hash)->next; 473 sh = list_entry(first, struct stripe_head, lru); 474 list_del_init(first); 475 remove_hash(sh); 476 atomic_inc(&conf->active_stripes); 477 BUG_ON(hash != sh->hash_lock_index); 478 if (list_empty(conf->inactive_list + hash)) 479 atomic_inc(&conf->empty_inactive_list_nr); 480 out: 481 return sh; 482 } 483 484 static void shrink_buffers(struct stripe_head *sh) 485 { 486 struct page *p; 487 int i; 488 int num = sh->raid_conf->pool_size; 489 490 for (i = 0; i < num ; i++) { 491 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); 492 p = sh->dev[i].page; 493 if (!p) 494 continue; 495 sh->dev[i].page = NULL; 496 put_page(p); 497 } 498 } 499 500 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) 501 { 502 int i; 503 int num = sh->raid_conf->pool_size; 504 505 for (i = 0; i < num; i++) { 506 struct page *page; 507 508 if (!(page = alloc_page(gfp))) { 509 return 1; 510 } 511 sh->dev[i].page = page; 512 sh->dev[i].orig_page = page; 513 } 514 return 0; 515 } 516 517 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 518 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 519 struct stripe_head *sh); 520 521 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 522 { 523 struct r5conf *conf = sh->raid_conf; 524 int i, seq; 525 526 BUG_ON(atomic_read(&sh->count) != 0); 527 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 528 BUG_ON(stripe_operations_active(sh)); 529 BUG_ON(sh->batch_head); 530 531 pr_debug("init_stripe called, stripe %llu\n", 532 (unsigned long long)sector); 533 retry: 534 seq = read_seqcount_begin(&conf->gen_lock); 535 sh->generation = conf->generation - previous; 536 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 537 sh->sector = sector; 538 stripe_set_idx(sector, conf, previous, sh); 539 sh->state = 0; 540 541 for (i = sh->disks; i--; ) { 542 struct r5dev *dev = &sh->dev[i]; 543 544 if (dev->toread || dev->read || dev->towrite || dev->written || 545 test_bit(R5_LOCKED, &dev->flags)) { 546 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 547 (unsigned long long)sh->sector, i, dev->toread, 548 dev->read, dev->towrite, dev->written, 549 test_bit(R5_LOCKED, &dev->flags)); 550 WARN_ON(1); 551 } 552 dev->flags = 0; 553 raid5_build_block(sh, i, previous); 554 } 555 if (read_seqcount_retry(&conf->gen_lock, seq)) 556 goto retry; 557 sh->overwrite_disks = 0; 558 insert_hash(conf, sh); 559 sh->cpu = smp_processor_id(); 560 set_bit(STRIPE_BATCH_READY, &sh->state); 561 } 562 563 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 564 short generation) 565 { 566 struct stripe_head *sh; 567 568 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 569 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) 570 if (sh->sector == sector && sh->generation == generation) 571 return sh; 572 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 573 return NULL; 574 } 575 576 /* 577 * Need to check if array has failed when deciding whether to: 578 * - start an array 579 * - remove non-faulty devices 580 * - add a spare 581 * - allow a reshape 582 * This determination is simple when no reshape is happening. 583 * However if there is a reshape, we need to carefully check 584 * both the before and after sections. 585 * This is because some failed devices may only affect one 586 * of the two sections, and some non-in_sync devices may 587 * be insync in the section most affected by failed devices. 588 */ 589 static int calc_degraded(struct r5conf *conf) 590 { 591 int degraded, degraded2; 592 int i; 593 594 rcu_read_lock(); 595 degraded = 0; 596 for (i = 0; i < conf->previous_raid_disks; i++) { 597 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 598 if (rdev && test_bit(Faulty, &rdev->flags)) 599 rdev = rcu_dereference(conf->disks[i].replacement); 600 if (!rdev || test_bit(Faulty, &rdev->flags)) 601 degraded++; 602 else if (test_bit(In_sync, &rdev->flags)) 603 ; 604 else 605 /* not in-sync or faulty. 606 * If the reshape increases the number of devices, 607 * this is being recovered by the reshape, so 608 * this 'previous' section is not in_sync. 609 * If the number of devices is being reduced however, 610 * the device can only be part of the array if 611 * we are reverting a reshape, so this section will 612 * be in-sync. 613 */ 614 if (conf->raid_disks >= conf->previous_raid_disks) 615 degraded++; 616 } 617 rcu_read_unlock(); 618 if (conf->raid_disks == conf->previous_raid_disks) 619 return degraded; 620 rcu_read_lock(); 621 degraded2 = 0; 622 for (i = 0; i < conf->raid_disks; i++) { 623 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 624 if (rdev && test_bit(Faulty, &rdev->flags)) 625 rdev = rcu_dereference(conf->disks[i].replacement); 626 if (!rdev || test_bit(Faulty, &rdev->flags)) 627 degraded2++; 628 else if (test_bit(In_sync, &rdev->flags)) 629 ; 630 else 631 /* not in-sync or faulty. 632 * If reshape increases the number of devices, this 633 * section has already been recovered, else it 634 * almost certainly hasn't. 635 */ 636 if (conf->raid_disks <= conf->previous_raid_disks) 637 degraded2++; 638 } 639 rcu_read_unlock(); 640 if (degraded2 > degraded) 641 return degraded2; 642 return degraded; 643 } 644 645 static int has_failed(struct r5conf *conf) 646 { 647 int degraded; 648 649 if (conf->mddev->reshape_position == MaxSector) 650 return conf->mddev->degraded > conf->max_degraded; 651 652 degraded = calc_degraded(conf); 653 if (degraded > conf->max_degraded) 654 return 1; 655 return 0; 656 } 657 658 static struct stripe_head * 659 get_active_stripe(struct r5conf *conf, sector_t sector, 660 int previous, int noblock, int noquiesce) 661 { 662 struct stripe_head *sh; 663 int hash = stripe_hash_locks_hash(sector); 664 665 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 666 667 spin_lock_irq(conf->hash_locks + hash); 668 669 do { 670 wait_event_lock_irq(conf->wait_for_stripe, 671 conf->quiesce == 0 || noquiesce, 672 *(conf->hash_locks + hash)); 673 sh = __find_stripe(conf, sector, conf->generation - previous); 674 if (!sh) { 675 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { 676 sh = get_free_stripe(conf, hash); 677 if (!sh && llist_empty(&conf->released_stripes) && 678 !test_bit(R5_DID_ALLOC, &conf->cache_state)) 679 set_bit(R5_ALLOC_MORE, 680 &conf->cache_state); 681 } 682 if (noblock && sh == NULL) 683 break; 684 if (!sh) { 685 set_bit(R5_INACTIVE_BLOCKED, 686 &conf->cache_state); 687 wait_event_lock_irq( 688 conf->wait_for_stripe, 689 !list_empty(conf->inactive_list + hash) && 690 (atomic_read(&conf->active_stripes) 691 < (conf->max_nr_stripes * 3 / 4) 692 || !test_bit(R5_INACTIVE_BLOCKED, 693 &conf->cache_state)), 694 *(conf->hash_locks + hash)); 695 clear_bit(R5_INACTIVE_BLOCKED, 696 &conf->cache_state); 697 } else { 698 init_stripe(sh, sector, previous); 699 atomic_inc(&sh->count); 700 } 701 } else if (!atomic_inc_not_zero(&sh->count)) { 702 spin_lock(&conf->device_lock); 703 if (!atomic_read(&sh->count)) { 704 if (!test_bit(STRIPE_HANDLE, &sh->state)) 705 atomic_inc(&conf->active_stripes); 706 BUG_ON(list_empty(&sh->lru) && 707 !test_bit(STRIPE_EXPANDING, &sh->state)); 708 list_del_init(&sh->lru); 709 if (sh->group) { 710 sh->group->stripes_cnt--; 711 sh->group = NULL; 712 } 713 } 714 atomic_inc(&sh->count); 715 spin_unlock(&conf->device_lock); 716 } 717 } while (sh == NULL); 718 719 spin_unlock_irq(conf->hash_locks + hash); 720 return sh; 721 } 722 723 static bool is_full_stripe_write(struct stripe_head *sh) 724 { 725 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); 726 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); 727 } 728 729 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 730 { 731 local_irq_disable(); 732 if (sh1 > sh2) { 733 spin_lock(&sh2->stripe_lock); 734 spin_lock_nested(&sh1->stripe_lock, 1); 735 } else { 736 spin_lock(&sh1->stripe_lock); 737 spin_lock_nested(&sh2->stripe_lock, 1); 738 } 739 } 740 741 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 742 { 743 spin_unlock(&sh1->stripe_lock); 744 spin_unlock(&sh2->stripe_lock); 745 local_irq_enable(); 746 } 747 748 /* Only freshly new full stripe normal write stripe can be added to a batch list */ 749 static bool stripe_can_batch(struct stripe_head *sh) 750 { 751 return test_bit(STRIPE_BATCH_READY, &sh->state) && 752 is_full_stripe_write(sh); 753 } 754 755 /* we only do back search */ 756 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) 757 { 758 struct stripe_head *head; 759 sector_t head_sector, tmp_sec; 760 int hash; 761 int dd_idx; 762 763 if (!stripe_can_batch(sh)) 764 return; 765 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ 766 tmp_sec = sh->sector; 767 if (!sector_div(tmp_sec, conf->chunk_sectors)) 768 return; 769 head_sector = sh->sector - STRIPE_SECTORS; 770 771 hash = stripe_hash_locks_hash(head_sector); 772 spin_lock_irq(conf->hash_locks + hash); 773 head = __find_stripe(conf, head_sector, conf->generation); 774 if (head && !atomic_inc_not_zero(&head->count)) { 775 spin_lock(&conf->device_lock); 776 if (!atomic_read(&head->count)) { 777 if (!test_bit(STRIPE_HANDLE, &head->state)) 778 atomic_inc(&conf->active_stripes); 779 BUG_ON(list_empty(&head->lru) && 780 !test_bit(STRIPE_EXPANDING, &head->state)); 781 list_del_init(&head->lru); 782 if (head->group) { 783 head->group->stripes_cnt--; 784 head->group = NULL; 785 } 786 } 787 atomic_inc(&head->count); 788 spin_unlock(&conf->device_lock); 789 } 790 spin_unlock_irq(conf->hash_locks + hash); 791 792 if (!head) 793 return; 794 if (!stripe_can_batch(head)) 795 goto out; 796 797 lock_two_stripes(head, sh); 798 /* clear_batch_ready clear the flag */ 799 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) 800 goto unlock_out; 801 802 if (sh->batch_head) 803 goto unlock_out; 804 805 dd_idx = 0; 806 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) 807 dd_idx++; 808 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) 809 goto unlock_out; 810 811 if (head->batch_head) { 812 spin_lock(&head->batch_head->batch_lock); 813 /* This batch list is already running */ 814 if (!stripe_can_batch(head)) { 815 spin_unlock(&head->batch_head->batch_lock); 816 goto unlock_out; 817 } 818 819 /* 820 * at this point, head's BATCH_READY could be cleared, but we 821 * can still add the stripe to batch list 822 */ 823 list_add(&sh->batch_list, &head->batch_list); 824 spin_unlock(&head->batch_head->batch_lock); 825 826 sh->batch_head = head->batch_head; 827 } else { 828 head->batch_head = head; 829 sh->batch_head = head->batch_head; 830 spin_lock(&head->batch_lock); 831 list_add_tail(&sh->batch_list, &head->batch_list); 832 spin_unlock(&head->batch_lock); 833 } 834 835 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 836 if (atomic_dec_return(&conf->preread_active_stripes) 837 < IO_THRESHOLD) 838 md_wakeup_thread(conf->mddev->thread); 839 840 atomic_inc(&sh->count); 841 unlock_out: 842 unlock_two_stripes(head, sh); 843 out: 844 release_stripe(head); 845 } 846 847 /* Determine if 'data_offset' or 'new_data_offset' should be used 848 * in this stripe_head. 849 */ 850 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) 851 { 852 sector_t progress = conf->reshape_progress; 853 /* Need a memory barrier to make sure we see the value 854 * of conf->generation, or ->data_offset that was set before 855 * reshape_progress was updated. 856 */ 857 smp_rmb(); 858 if (progress == MaxSector) 859 return 0; 860 if (sh->generation == conf->generation - 1) 861 return 0; 862 /* We are in a reshape, and this is a new-generation stripe, 863 * so use new_data_offset. 864 */ 865 return 1; 866 } 867 868 static void 869 raid5_end_read_request(struct bio *bi, int error); 870 static void 871 raid5_end_write_request(struct bio *bi, int error); 872 873 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 874 { 875 struct r5conf *conf = sh->raid_conf; 876 int i, disks = sh->disks; 877 struct stripe_head *head_sh = sh; 878 879 might_sleep(); 880 881 for (i = disks; i--; ) { 882 int rw; 883 int replace_only = 0; 884 struct bio *bi, *rbi; 885 struct md_rdev *rdev, *rrdev = NULL; 886 887 sh = head_sh; 888 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 889 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 890 rw = WRITE_FUA; 891 else 892 rw = WRITE; 893 if (test_bit(R5_Discard, &sh->dev[i].flags)) 894 rw |= REQ_DISCARD; 895 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 896 rw = READ; 897 else if (test_and_clear_bit(R5_WantReplace, 898 &sh->dev[i].flags)) { 899 rw = WRITE; 900 replace_only = 1; 901 } else 902 continue; 903 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) 904 rw |= REQ_SYNC; 905 906 again: 907 bi = &sh->dev[i].req; 908 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 909 910 rcu_read_lock(); 911 rrdev = rcu_dereference(conf->disks[i].replacement); 912 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 913 rdev = rcu_dereference(conf->disks[i].rdev); 914 if (!rdev) { 915 rdev = rrdev; 916 rrdev = NULL; 917 } 918 if (rw & WRITE) { 919 if (replace_only) 920 rdev = NULL; 921 if (rdev == rrdev) 922 /* We raced and saw duplicates */ 923 rrdev = NULL; 924 } else { 925 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev) 926 rdev = rrdev; 927 rrdev = NULL; 928 } 929 930 if (rdev && test_bit(Faulty, &rdev->flags)) 931 rdev = NULL; 932 if (rdev) 933 atomic_inc(&rdev->nr_pending); 934 if (rrdev && test_bit(Faulty, &rrdev->flags)) 935 rrdev = NULL; 936 if (rrdev) 937 atomic_inc(&rrdev->nr_pending); 938 rcu_read_unlock(); 939 940 /* We have already checked bad blocks for reads. Now 941 * need to check for writes. We never accept write errors 942 * on the replacement, so we don't to check rrdev. 943 */ 944 while ((rw & WRITE) && rdev && 945 test_bit(WriteErrorSeen, &rdev->flags)) { 946 sector_t first_bad; 947 int bad_sectors; 948 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 949 &first_bad, &bad_sectors); 950 if (!bad) 951 break; 952 953 if (bad < 0) { 954 set_bit(BlockedBadBlocks, &rdev->flags); 955 if (!conf->mddev->external && 956 conf->mddev->flags) { 957 /* It is very unlikely, but we might 958 * still need to write out the 959 * bad block log - better give it 960 * a chance*/ 961 md_check_recovery(conf->mddev); 962 } 963 /* 964 * Because md_wait_for_blocked_rdev 965 * will dec nr_pending, we must 966 * increment it first. 967 */ 968 atomic_inc(&rdev->nr_pending); 969 md_wait_for_blocked_rdev(rdev, conf->mddev); 970 } else { 971 /* Acknowledged bad block - skip the write */ 972 rdev_dec_pending(rdev, conf->mddev); 973 rdev = NULL; 974 } 975 } 976 977 if (rdev) { 978 if (s->syncing || s->expanding || s->expanded 979 || s->replacing) 980 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 981 982 set_bit(STRIPE_IO_STARTED, &sh->state); 983 984 bio_reset(bi); 985 bi->bi_bdev = rdev->bdev; 986 bi->bi_rw = rw; 987 bi->bi_end_io = (rw & WRITE) 988 ? raid5_end_write_request 989 : raid5_end_read_request; 990 bi->bi_private = sh; 991 992 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 993 __func__, (unsigned long long)sh->sector, 994 bi->bi_rw, i); 995 atomic_inc(&sh->count); 996 if (sh != head_sh) 997 atomic_inc(&head_sh->count); 998 if (use_new_offset(conf, sh)) 999 bi->bi_iter.bi_sector = (sh->sector 1000 + rdev->new_data_offset); 1001 else 1002 bi->bi_iter.bi_sector = (sh->sector 1003 + rdev->data_offset); 1004 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) 1005 bi->bi_rw |= REQ_NOMERGE; 1006 1007 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1008 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1009 sh->dev[i].vec.bv_page = sh->dev[i].page; 1010 bi->bi_vcnt = 1; 1011 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1012 bi->bi_io_vec[0].bv_offset = 0; 1013 bi->bi_iter.bi_size = STRIPE_SIZE; 1014 /* 1015 * If this is discard request, set bi_vcnt 0. We don't 1016 * want to confuse SCSI because SCSI will replace payload 1017 */ 1018 if (rw & REQ_DISCARD) 1019 bi->bi_vcnt = 0; 1020 if (rrdev) 1021 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 1022 1023 if (conf->mddev->gendisk) 1024 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 1025 bi, disk_devt(conf->mddev->gendisk), 1026 sh->dev[i].sector); 1027 generic_make_request(bi); 1028 } 1029 if (rrdev) { 1030 if (s->syncing || s->expanding || s->expanded 1031 || s->replacing) 1032 md_sync_acct(rrdev->bdev, STRIPE_SECTORS); 1033 1034 set_bit(STRIPE_IO_STARTED, &sh->state); 1035 1036 bio_reset(rbi); 1037 rbi->bi_bdev = rrdev->bdev; 1038 rbi->bi_rw = rw; 1039 BUG_ON(!(rw & WRITE)); 1040 rbi->bi_end_io = raid5_end_write_request; 1041 rbi->bi_private = sh; 1042 1043 pr_debug("%s: for %llu schedule op %ld on " 1044 "replacement disc %d\n", 1045 __func__, (unsigned long long)sh->sector, 1046 rbi->bi_rw, i); 1047 atomic_inc(&sh->count); 1048 if (sh != head_sh) 1049 atomic_inc(&head_sh->count); 1050 if (use_new_offset(conf, sh)) 1051 rbi->bi_iter.bi_sector = (sh->sector 1052 + rrdev->new_data_offset); 1053 else 1054 rbi->bi_iter.bi_sector = (sh->sector 1055 + rrdev->data_offset); 1056 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1057 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1058 sh->dev[i].rvec.bv_page = sh->dev[i].page; 1059 rbi->bi_vcnt = 1; 1060 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1061 rbi->bi_io_vec[0].bv_offset = 0; 1062 rbi->bi_iter.bi_size = STRIPE_SIZE; 1063 /* 1064 * If this is discard request, set bi_vcnt 0. We don't 1065 * want to confuse SCSI because SCSI will replace payload 1066 */ 1067 if (rw & REQ_DISCARD) 1068 rbi->bi_vcnt = 0; 1069 if (conf->mddev->gendisk) 1070 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 1071 rbi, disk_devt(conf->mddev->gendisk), 1072 sh->dev[i].sector); 1073 generic_make_request(rbi); 1074 } 1075 if (!rdev && !rrdev) { 1076 if (rw & WRITE) 1077 set_bit(STRIPE_DEGRADED, &sh->state); 1078 pr_debug("skip op %ld on disc %d for sector %llu\n", 1079 bi->bi_rw, i, (unsigned long long)sh->sector); 1080 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1081 if (sh->batch_head) 1082 set_bit(STRIPE_BATCH_ERR, 1083 &sh->batch_head->state); 1084 set_bit(STRIPE_HANDLE, &sh->state); 1085 } 1086 1087 if (!head_sh->batch_head) 1088 continue; 1089 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1090 batch_list); 1091 if (sh != head_sh) 1092 goto again; 1093 } 1094 } 1095 1096 static struct dma_async_tx_descriptor * 1097 async_copy_data(int frombio, struct bio *bio, struct page **page, 1098 sector_t sector, struct dma_async_tx_descriptor *tx, 1099 struct stripe_head *sh) 1100 { 1101 struct bio_vec bvl; 1102 struct bvec_iter iter; 1103 struct page *bio_page; 1104 int page_offset; 1105 struct async_submit_ctl submit; 1106 enum async_tx_flags flags = 0; 1107 1108 if (bio->bi_iter.bi_sector >= sector) 1109 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 1110 else 1111 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 1112 1113 if (frombio) 1114 flags |= ASYNC_TX_FENCE; 1115 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 1116 1117 bio_for_each_segment(bvl, bio, iter) { 1118 int len = bvl.bv_len; 1119 int clen; 1120 int b_offset = 0; 1121 1122 if (page_offset < 0) { 1123 b_offset = -page_offset; 1124 page_offset += b_offset; 1125 len -= b_offset; 1126 } 1127 1128 if (len > 0 && page_offset + len > STRIPE_SIZE) 1129 clen = STRIPE_SIZE - page_offset; 1130 else 1131 clen = len; 1132 1133 if (clen > 0) { 1134 b_offset += bvl.bv_offset; 1135 bio_page = bvl.bv_page; 1136 if (frombio) { 1137 if (sh->raid_conf->skip_copy && 1138 b_offset == 0 && page_offset == 0 && 1139 clen == STRIPE_SIZE) 1140 *page = bio_page; 1141 else 1142 tx = async_memcpy(*page, bio_page, page_offset, 1143 b_offset, clen, &submit); 1144 } else 1145 tx = async_memcpy(bio_page, *page, b_offset, 1146 page_offset, clen, &submit); 1147 } 1148 /* chain the operations */ 1149 submit.depend_tx = tx; 1150 1151 if (clen < len) /* hit end of page */ 1152 break; 1153 page_offset += len; 1154 } 1155 1156 return tx; 1157 } 1158 1159 static void ops_complete_biofill(void *stripe_head_ref) 1160 { 1161 struct stripe_head *sh = stripe_head_ref; 1162 struct bio *return_bi = NULL; 1163 int i; 1164 1165 pr_debug("%s: stripe %llu\n", __func__, 1166 (unsigned long long)sh->sector); 1167 1168 /* clear completed biofills */ 1169 for (i = sh->disks; i--; ) { 1170 struct r5dev *dev = &sh->dev[i]; 1171 1172 /* acknowledge completion of a biofill operation */ 1173 /* and check if we need to reply to a read request, 1174 * new R5_Wantfill requests are held off until 1175 * !STRIPE_BIOFILL_RUN 1176 */ 1177 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 1178 struct bio *rbi, *rbi2; 1179 1180 BUG_ON(!dev->read); 1181 rbi = dev->read; 1182 dev->read = NULL; 1183 while (rbi && rbi->bi_iter.bi_sector < 1184 dev->sector + STRIPE_SECTORS) { 1185 rbi2 = r5_next_bio(rbi, dev->sector); 1186 if (!raid5_dec_bi_active_stripes(rbi)) { 1187 rbi->bi_next = return_bi; 1188 return_bi = rbi; 1189 } 1190 rbi = rbi2; 1191 } 1192 } 1193 } 1194 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 1195 1196 return_io(return_bi); 1197 1198 set_bit(STRIPE_HANDLE, &sh->state); 1199 release_stripe(sh); 1200 } 1201 1202 static void ops_run_biofill(struct stripe_head *sh) 1203 { 1204 struct dma_async_tx_descriptor *tx = NULL; 1205 struct async_submit_ctl submit; 1206 int i; 1207 1208 BUG_ON(sh->batch_head); 1209 pr_debug("%s: stripe %llu\n", __func__, 1210 (unsigned long long)sh->sector); 1211 1212 for (i = sh->disks; i--; ) { 1213 struct r5dev *dev = &sh->dev[i]; 1214 if (test_bit(R5_Wantfill, &dev->flags)) { 1215 struct bio *rbi; 1216 spin_lock_irq(&sh->stripe_lock); 1217 dev->read = rbi = dev->toread; 1218 dev->toread = NULL; 1219 spin_unlock_irq(&sh->stripe_lock); 1220 while (rbi && rbi->bi_iter.bi_sector < 1221 dev->sector + STRIPE_SECTORS) { 1222 tx = async_copy_data(0, rbi, &dev->page, 1223 dev->sector, tx, sh); 1224 rbi = r5_next_bio(rbi, dev->sector); 1225 } 1226 } 1227 } 1228 1229 atomic_inc(&sh->count); 1230 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 1231 async_trigger_callback(&submit); 1232 } 1233 1234 static void mark_target_uptodate(struct stripe_head *sh, int target) 1235 { 1236 struct r5dev *tgt; 1237 1238 if (target < 0) 1239 return; 1240 1241 tgt = &sh->dev[target]; 1242 set_bit(R5_UPTODATE, &tgt->flags); 1243 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1244 clear_bit(R5_Wantcompute, &tgt->flags); 1245 } 1246 1247 static void ops_complete_compute(void *stripe_head_ref) 1248 { 1249 struct stripe_head *sh = stripe_head_ref; 1250 1251 pr_debug("%s: stripe %llu\n", __func__, 1252 (unsigned long long)sh->sector); 1253 1254 /* mark the computed target(s) as uptodate */ 1255 mark_target_uptodate(sh, sh->ops.target); 1256 mark_target_uptodate(sh, sh->ops.target2); 1257 1258 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 1259 if (sh->check_state == check_state_compute_run) 1260 sh->check_state = check_state_compute_result; 1261 set_bit(STRIPE_HANDLE, &sh->state); 1262 release_stripe(sh); 1263 } 1264 1265 /* return a pointer to the address conversion region of the scribble buffer */ 1266 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 1267 struct raid5_percpu *percpu, int i) 1268 { 1269 void *addr; 1270 1271 addr = flex_array_get(percpu->scribble, i); 1272 return addr + sizeof(struct page *) * (sh->disks + 2); 1273 } 1274 1275 /* return a pointer to the address conversion region of the scribble buffer */ 1276 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) 1277 { 1278 void *addr; 1279 1280 addr = flex_array_get(percpu->scribble, i); 1281 return addr; 1282 } 1283 1284 static struct dma_async_tx_descriptor * 1285 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 1286 { 1287 int disks = sh->disks; 1288 struct page **xor_srcs = to_addr_page(percpu, 0); 1289 int target = sh->ops.target; 1290 struct r5dev *tgt = &sh->dev[target]; 1291 struct page *xor_dest = tgt->page; 1292 int count = 0; 1293 struct dma_async_tx_descriptor *tx; 1294 struct async_submit_ctl submit; 1295 int i; 1296 1297 BUG_ON(sh->batch_head); 1298 1299 pr_debug("%s: stripe %llu block: %d\n", 1300 __func__, (unsigned long long)sh->sector, target); 1301 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1302 1303 for (i = disks; i--; ) 1304 if (i != target) 1305 xor_srcs[count++] = sh->dev[i].page; 1306 1307 atomic_inc(&sh->count); 1308 1309 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 1310 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); 1311 if (unlikely(count == 1)) 1312 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1313 else 1314 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1315 1316 return tx; 1317 } 1318 1319 /* set_syndrome_sources - populate source buffers for gen_syndrome 1320 * @srcs - (struct page *) array of size sh->disks 1321 * @sh - stripe_head to parse 1322 * 1323 * Populates srcs in proper layout order for the stripe and returns the 1324 * 'count' of sources to be used in a call to async_gen_syndrome. The P 1325 * destination buffer is recorded in srcs[count] and the Q destination 1326 * is recorded in srcs[count+1]]. 1327 */ 1328 static int set_syndrome_sources(struct page **srcs, 1329 struct stripe_head *sh, 1330 int srctype) 1331 { 1332 int disks = sh->disks; 1333 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1334 int d0_idx = raid6_d0(sh); 1335 int count; 1336 int i; 1337 1338 for (i = 0; i < disks; i++) 1339 srcs[i] = NULL; 1340 1341 count = 0; 1342 i = d0_idx; 1343 do { 1344 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1345 struct r5dev *dev = &sh->dev[i]; 1346 1347 if (i == sh->qd_idx || i == sh->pd_idx || 1348 (srctype == SYNDROME_SRC_ALL) || 1349 (srctype == SYNDROME_SRC_WANT_DRAIN && 1350 test_bit(R5_Wantdrain, &dev->flags)) || 1351 (srctype == SYNDROME_SRC_WRITTEN && 1352 dev->written)) 1353 srcs[slot] = sh->dev[i].page; 1354 i = raid6_next_disk(i, disks); 1355 } while (i != d0_idx); 1356 1357 return syndrome_disks; 1358 } 1359 1360 static struct dma_async_tx_descriptor * 1361 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 1362 { 1363 int disks = sh->disks; 1364 struct page **blocks = to_addr_page(percpu, 0); 1365 int target; 1366 int qd_idx = sh->qd_idx; 1367 struct dma_async_tx_descriptor *tx; 1368 struct async_submit_ctl submit; 1369 struct r5dev *tgt; 1370 struct page *dest; 1371 int i; 1372 int count; 1373 1374 BUG_ON(sh->batch_head); 1375 if (sh->ops.target < 0) 1376 target = sh->ops.target2; 1377 else if (sh->ops.target2 < 0) 1378 target = sh->ops.target; 1379 else 1380 /* we should only have one valid target */ 1381 BUG(); 1382 BUG_ON(target < 0); 1383 pr_debug("%s: stripe %llu block: %d\n", 1384 __func__, (unsigned long long)sh->sector, target); 1385 1386 tgt = &sh->dev[target]; 1387 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1388 dest = tgt->page; 1389 1390 atomic_inc(&sh->count); 1391 1392 if (target == qd_idx) { 1393 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1394 blocks[count] = NULL; /* regenerating p is not necessary */ 1395 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 1396 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1397 ops_complete_compute, sh, 1398 to_addr_conv(sh, percpu, 0)); 1399 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1400 } else { 1401 /* Compute any data- or p-drive using XOR */ 1402 count = 0; 1403 for (i = disks; i-- ; ) { 1404 if (i == target || i == qd_idx) 1405 continue; 1406 blocks[count++] = sh->dev[i].page; 1407 } 1408 1409 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1410 NULL, ops_complete_compute, sh, 1411 to_addr_conv(sh, percpu, 0)); 1412 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 1413 } 1414 1415 return tx; 1416 } 1417 1418 static struct dma_async_tx_descriptor * 1419 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 1420 { 1421 int i, count, disks = sh->disks; 1422 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1423 int d0_idx = raid6_d0(sh); 1424 int faila = -1, failb = -1; 1425 int target = sh->ops.target; 1426 int target2 = sh->ops.target2; 1427 struct r5dev *tgt = &sh->dev[target]; 1428 struct r5dev *tgt2 = &sh->dev[target2]; 1429 struct dma_async_tx_descriptor *tx; 1430 struct page **blocks = to_addr_page(percpu, 0); 1431 struct async_submit_ctl submit; 1432 1433 BUG_ON(sh->batch_head); 1434 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 1435 __func__, (unsigned long long)sh->sector, target, target2); 1436 BUG_ON(target < 0 || target2 < 0); 1437 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1438 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 1439 1440 /* we need to open-code set_syndrome_sources to handle the 1441 * slot number conversion for 'faila' and 'failb' 1442 */ 1443 for (i = 0; i < disks ; i++) 1444 blocks[i] = NULL; 1445 count = 0; 1446 i = d0_idx; 1447 do { 1448 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1449 1450 blocks[slot] = sh->dev[i].page; 1451 1452 if (i == target) 1453 faila = slot; 1454 if (i == target2) 1455 failb = slot; 1456 i = raid6_next_disk(i, disks); 1457 } while (i != d0_idx); 1458 1459 BUG_ON(faila == failb); 1460 if (failb < faila) 1461 swap(faila, failb); 1462 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 1463 __func__, (unsigned long long)sh->sector, faila, failb); 1464 1465 atomic_inc(&sh->count); 1466 1467 if (failb == syndrome_disks+1) { 1468 /* Q disk is one of the missing disks */ 1469 if (faila == syndrome_disks) { 1470 /* Missing P+Q, just recompute */ 1471 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1472 ops_complete_compute, sh, 1473 to_addr_conv(sh, percpu, 0)); 1474 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 1475 STRIPE_SIZE, &submit); 1476 } else { 1477 struct page *dest; 1478 int data_target; 1479 int qd_idx = sh->qd_idx; 1480 1481 /* Missing D+Q: recompute D from P, then recompute Q */ 1482 if (target == qd_idx) 1483 data_target = target2; 1484 else 1485 data_target = target; 1486 1487 count = 0; 1488 for (i = disks; i-- ; ) { 1489 if (i == data_target || i == qd_idx) 1490 continue; 1491 blocks[count++] = sh->dev[i].page; 1492 } 1493 dest = sh->dev[data_target].page; 1494 init_async_submit(&submit, 1495 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1496 NULL, NULL, NULL, 1497 to_addr_conv(sh, percpu, 0)); 1498 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 1499 &submit); 1500 1501 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1502 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 1503 ops_complete_compute, sh, 1504 to_addr_conv(sh, percpu, 0)); 1505 return async_gen_syndrome(blocks, 0, count+2, 1506 STRIPE_SIZE, &submit); 1507 } 1508 } else { 1509 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1510 ops_complete_compute, sh, 1511 to_addr_conv(sh, percpu, 0)); 1512 if (failb == syndrome_disks) { 1513 /* We're missing D+P. */ 1514 return async_raid6_datap_recov(syndrome_disks+2, 1515 STRIPE_SIZE, faila, 1516 blocks, &submit); 1517 } else { 1518 /* We're missing D+D. */ 1519 return async_raid6_2data_recov(syndrome_disks+2, 1520 STRIPE_SIZE, faila, failb, 1521 blocks, &submit); 1522 } 1523 } 1524 } 1525 1526 static void ops_complete_prexor(void *stripe_head_ref) 1527 { 1528 struct stripe_head *sh = stripe_head_ref; 1529 1530 pr_debug("%s: stripe %llu\n", __func__, 1531 (unsigned long long)sh->sector); 1532 } 1533 1534 static struct dma_async_tx_descriptor * 1535 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, 1536 struct dma_async_tx_descriptor *tx) 1537 { 1538 int disks = sh->disks; 1539 struct page **xor_srcs = to_addr_page(percpu, 0); 1540 int count = 0, pd_idx = sh->pd_idx, i; 1541 struct async_submit_ctl submit; 1542 1543 /* existing parity data subtracted */ 1544 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1545 1546 BUG_ON(sh->batch_head); 1547 pr_debug("%s: stripe %llu\n", __func__, 1548 (unsigned long long)sh->sector); 1549 1550 for (i = disks; i--; ) { 1551 struct r5dev *dev = &sh->dev[i]; 1552 /* Only process blocks that are known to be uptodate */ 1553 if (test_bit(R5_Wantdrain, &dev->flags)) 1554 xor_srcs[count++] = dev->page; 1555 } 1556 1557 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1558 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1559 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1560 1561 return tx; 1562 } 1563 1564 static struct dma_async_tx_descriptor * 1565 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, 1566 struct dma_async_tx_descriptor *tx) 1567 { 1568 struct page **blocks = to_addr_page(percpu, 0); 1569 int count; 1570 struct async_submit_ctl submit; 1571 1572 pr_debug("%s: stripe %llu\n", __func__, 1573 (unsigned long long)sh->sector); 1574 1575 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); 1576 1577 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx, 1578 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1579 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1580 1581 return tx; 1582 } 1583 1584 static struct dma_async_tx_descriptor * 1585 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1586 { 1587 int disks = sh->disks; 1588 int i; 1589 struct stripe_head *head_sh = sh; 1590 1591 pr_debug("%s: stripe %llu\n", __func__, 1592 (unsigned long long)sh->sector); 1593 1594 for (i = disks; i--; ) { 1595 struct r5dev *dev; 1596 struct bio *chosen; 1597 1598 sh = head_sh; 1599 if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) { 1600 struct bio *wbi; 1601 1602 again: 1603 dev = &sh->dev[i]; 1604 spin_lock_irq(&sh->stripe_lock); 1605 chosen = dev->towrite; 1606 dev->towrite = NULL; 1607 sh->overwrite_disks = 0; 1608 BUG_ON(dev->written); 1609 wbi = dev->written = chosen; 1610 spin_unlock_irq(&sh->stripe_lock); 1611 WARN_ON(dev->page != dev->orig_page); 1612 1613 while (wbi && wbi->bi_iter.bi_sector < 1614 dev->sector + STRIPE_SECTORS) { 1615 if (wbi->bi_rw & REQ_FUA) 1616 set_bit(R5_WantFUA, &dev->flags); 1617 if (wbi->bi_rw & REQ_SYNC) 1618 set_bit(R5_SyncIO, &dev->flags); 1619 if (wbi->bi_rw & REQ_DISCARD) 1620 set_bit(R5_Discard, &dev->flags); 1621 else { 1622 tx = async_copy_data(1, wbi, &dev->page, 1623 dev->sector, tx, sh); 1624 if (dev->page != dev->orig_page) { 1625 set_bit(R5_SkipCopy, &dev->flags); 1626 clear_bit(R5_UPTODATE, &dev->flags); 1627 clear_bit(R5_OVERWRITE, &dev->flags); 1628 } 1629 } 1630 wbi = r5_next_bio(wbi, dev->sector); 1631 } 1632 1633 if (head_sh->batch_head) { 1634 sh = list_first_entry(&sh->batch_list, 1635 struct stripe_head, 1636 batch_list); 1637 if (sh == head_sh) 1638 continue; 1639 goto again; 1640 } 1641 } 1642 } 1643 1644 return tx; 1645 } 1646 1647 static void ops_complete_reconstruct(void *stripe_head_ref) 1648 { 1649 struct stripe_head *sh = stripe_head_ref; 1650 int disks = sh->disks; 1651 int pd_idx = sh->pd_idx; 1652 int qd_idx = sh->qd_idx; 1653 int i; 1654 bool fua = false, sync = false, discard = false; 1655 1656 pr_debug("%s: stripe %llu\n", __func__, 1657 (unsigned long long)sh->sector); 1658 1659 for (i = disks; i--; ) { 1660 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1661 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1662 discard |= test_bit(R5_Discard, &sh->dev[i].flags); 1663 } 1664 1665 for (i = disks; i--; ) { 1666 struct r5dev *dev = &sh->dev[i]; 1667 1668 if (dev->written || i == pd_idx || i == qd_idx) { 1669 if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) 1670 set_bit(R5_UPTODATE, &dev->flags); 1671 if (fua) 1672 set_bit(R5_WantFUA, &dev->flags); 1673 if (sync) 1674 set_bit(R5_SyncIO, &dev->flags); 1675 } 1676 } 1677 1678 if (sh->reconstruct_state == reconstruct_state_drain_run) 1679 sh->reconstruct_state = reconstruct_state_drain_result; 1680 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1681 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1682 else { 1683 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1684 sh->reconstruct_state = reconstruct_state_result; 1685 } 1686 1687 set_bit(STRIPE_HANDLE, &sh->state); 1688 release_stripe(sh); 1689 } 1690 1691 static void 1692 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1693 struct dma_async_tx_descriptor *tx) 1694 { 1695 int disks = sh->disks; 1696 struct page **xor_srcs; 1697 struct async_submit_ctl submit; 1698 int count, pd_idx = sh->pd_idx, i; 1699 struct page *xor_dest; 1700 int prexor = 0; 1701 unsigned long flags; 1702 int j = 0; 1703 struct stripe_head *head_sh = sh; 1704 int last_stripe; 1705 1706 pr_debug("%s: stripe %llu\n", __func__, 1707 (unsigned long long)sh->sector); 1708 1709 for (i = 0; i < sh->disks; i++) { 1710 if (pd_idx == i) 1711 continue; 1712 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1713 break; 1714 } 1715 if (i >= sh->disks) { 1716 atomic_inc(&sh->count); 1717 set_bit(R5_Discard, &sh->dev[pd_idx].flags); 1718 ops_complete_reconstruct(sh); 1719 return; 1720 } 1721 again: 1722 count = 0; 1723 xor_srcs = to_addr_page(percpu, j); 1724 /* check if prexor is active which means only process blocks 1725 * that are part of a read-modify-write (written) 1726 */ 1727 if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1728 prexor = 1; 1729 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1730 for (i = disks; i--; ) { 1731 struct r5dev *dev = &sh->dev[i]; 1732 if (head_sh->dev[i].written) 1733 xor_srcs[count++] = dev->page; 1734 } 1735 } else { 1736 xor_dest = sh->dev[pd_idx].page; 1737 for (i = disks; i--; ) { 1738 struct r5dev *dev = &sh->dev[i]; 1739 if (i != pd_idx) 1740 xor_srcs[count++] = dev->page; 1741 } 1742 } 1743 1744 /* 1/ if we prexor'd then the dest is reused as a source 1745 * 2/ if we did not prexor then we are redoing the parity 1746 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1747 * for the synchronous xor case 1748 */ 1749 last_stripe = !head_sh->batch_head || 1750 list_first_entry(&sh->batch_list, 1751 struct stripe_head, batch_list) == head_sh; 1752 if (last_stripe) { 1753 flags = ASYNC_TX_ACK | 1754 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1755 1756 atomic_inc(&head_sh->count); 1757 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh, 1758 to_addr_conv(sh, percpu, j)); 1759 } else { 1760 flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST; 1761 init_async_submit(&submit, flags, tx, NULL, NULL, 1762 to_addr_conv(sh, percpu, j)); 1763 } 1764 1765 if (unlikely(count == 1)) 1766 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1767 else 1768 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1769 if (!last_stripe) { 1770 j++; 1771 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1772 batch_list); 1773 goto again; 1774 } 1775 } 1776 1777 static void 1778 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1779 struct dma_async_tx_descriptor *tx) 1780 { 1781 struct async_submit_ctl submit; 1782 struct page **blocks; 1783 int count, i, j = 0; 1784 struct stripe_head *head_sh = sh; 1785 int last_stripe; 1786 int synflags; 1787 unsigned long txflags; 1788 1789 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1790 1791 for (i = 0; i < sh->disks; i++) { 1792 if (sh->pd_idx == i || sh->qd_idx == i) 1793 continue; 1794 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1795 break; 1796 } 1797 if (i >= sh->disks) { 1798 atomic_inc(&sh->count); 1799 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 1800 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 1801 ops_complete_reconstruct(sh); 1802 return; 1803 } 1804 1805 again: 1806 blocks = to_addr_page(percpu, j); 1807 1808 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1809 synflags = SYNDROME_SRC_WRITTEN; 1810 txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST; 1811 } else { 1812 synflags = SYNDROME_SRC_ALL; 1813 txflags = ASYNC_TX_ACK; 1814 } 1815 1816 count = set_syndrome_sources(blocks, sh, synflags); 1817 last_stripe = !head_sh->batch_head || 1818 list_first_entry(&sh->batch_list, 1819 struct stripe_head, batch_list) == head_sh; 1820 1821 if (last_stripe) { 1822 atomic_inc(&head_sh->count); 1823 init_async_submit(&submit, txflags, tx, ops_complete_reconstruct, 1824 head_sh, to_addr_conv(sh, percpu, j)); 1825 } else 1826 init_async_submit(&submit, 0, tx, NULL, NULL, 1827 to_addr_conv(sh, percpu, j)); 1828 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1829 if (!last_stripe) { 1830 j++; 1831 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1832 batch_list); 1833 goto again; 1834 } 1835 } 1836 1837 static void ops_complete_check(void *stripe_head_ref) 1838 { 1839 struct stripe_head *sh = stripe_head_ref; 1840 1841 pr_debug("%s: stripe %llu\n", __func__, 1842 (unsigned long long)sh->sector); 1843 1844 sh->check_state = check_state_check_result; 1845 set_bit(STRIPE_HANDLE, &sh->state); 1846 release_stripe(sh); 1847 } 1848 1849 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1850 { 1851 int disks = sh->disks; 1852 int pd_idx = sh->pd_idx; 1853 int qd_idx = sh->qd_idx; 1854 struct page *xor_dest; 1855 struct page **xor_srcs = to_addr_page(percpu, 0); 1856 struct dma_async_tx_descriptor *tx; 1857 struct async_submit_ctl submit; 1858 int count; 1859 int i; 1860 1861 pr_debug("%s: stripe %llu\n", __func__, 1862 (unsigned long long)sh->sector); 1863 1864 BUG_ON(sh->batch_head); 1865 count = 0; 1866 xor_dest = sh->dev[pd_idx].page; 1867 xor_srcs[count++] = xor_dest; 1868 for (i = disks; i--; ) { 1869 if (i == pd_idx || i == qd_idx) 1870 continue; 1871 xor_srcs[count++] = sh->dev[i].page; 1872 } 1873 1874 init_async_submit(&submit, 0, NULL, NULL, NULL, 1875 to_addr_conv(sh, percpu, 0)); 1876 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 1877 &sh->ops.zero_sum_result, &submit); 1878 1879 atomic_inc(&sh->count); 1880 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 1881 tx = async_trigger_callback(&submit); 1882 } 1883 1884 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 1885 { 1886 struct page **srcs = to_addr_page(percpu, 0); 1887 struct async_submit_ctl submit; 1888 int count; 1889 1890 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 1891 (unsigned long long)sh->sector, checkp); 1892 1893 BUG_ON(sh->batch_head); 1894 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); 1895 if (!checkp) 1896 srcs[count] = NULL; 1897 1898 atomic_inc(&sh->count); 1899 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 1900 sh, to_addr_conv(sh, percpu, 0)); 1901 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 1902 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1903 } 1904 1905 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1906 { 1907 int overlap_clear = 0, i, disks = sh->disks; 1908 struct dma_async_tx_descriptor *tx = NULL; 1909 struct r5conf *conf = sh->raid_conf; 1910 int level = conf->level; 1911 struct raid5_percpu *percpu; 1912 unsigned long cpu; 1913 1914 cpu = get_cpu(); 1915 percpu = per_cpu_ptr(conf->percpu, cpu); 1916 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 1917 ops_run_biofill(sh); 1918 overlap_clear++; 1919 } 1920 1921 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1922 if (level < 6) 1923 tx = ops_run_compute5(sh, percpu); 1924 else { 1925 if (sh->ops.target2 < 0 || sh->ops.target < 0) 1926 tx = ops_run_compute6_1(sh, percpu); 1927 else 1928 tx = ops_run_compute6_2(sh, percpu); 1929 } 1930 /* terminate the chain if reconstruct is not set to be run */ 1931 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 1932 async_tx_ack(tx); 1933 } 1934 1935 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) { 1936 if (level < 6) 1937 tx = ops_run_prexor5(sh, percpu, tx); 1938 else 1939 tx = ops_run_prexor6(sh, percpu, tx); 1940 } 1941 1942 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 1943 tx = ops_run_biodrain(sh, tx); 1944 overlap_clear++; 1945 } 1946 1947 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 1948 if (level < 6) 1949 ops_run_reconstruct5(sh, percpu, tx); 1950 else 1951 ops_run_reconstruct6(sh, percpu, tx); 1952 } 1953 1954 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 1955 if (sh->check_state == check_state_run) 1956 ops_run_check_p(sh, percpu); 1957 else if (sh->check_state == check_state_run_q) 1958 ops_run_check_pq(sh, percpu, 0); 1959 else if (sh->check_state == check_state_run_pq) 1960 ops_run_check_pq(sh, percpu, 1); 1961 else 1962 BUG(); 1963 } 1964 1965 if (overlap_clear && !sh->batch_head) 1966 for (i = disks; i--; ) { 1967 struct r5dev *dev = &sh->dev[i]; 1968 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1969 wake_up(&sh->raid_conf->wait_for_overlap); 1970 } 1971 put_cpu(); 1972 } 1973 1974 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) 1975 { 1976 struct stripe_head *sh; 1977 sh = kmem_cache_zalloc(conf->slab_cache, gfp); 1978 if (!sh) 1979 return 0; 1980 1981 sh->raid_conf = conf; 1982 1983 spin_lock_init(&sh->stripe_lock); 1984 1985 if (grow_buffers(sh, gfp)) { 1986 shrink_buffers(sh); 1987 kmem_cache_free(conf->slab_cache, sh); 1988 return 0; 1989 } 1990 sh->hash_lock_index = 1991 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 1992 /* we just created an active stripe so... */ 1993 atomic_set(&sh->count, 1); 1994 atomic_inc(&conf->active_stripes); 1995 INIT_LIST_HEAD(&sh->lru); 1996 1997 spin_lock_init(&sh->batch_lock); 1998 INIT_LIST_HEAD(&sh->batch_list); 1999 sh->batch_head = NULL; 2000 release_stripe(sh); 2001 conf->max_nr_stripes++; 2002 return 1; 2003 } 2004 2005 static int grow_stripes(struct r5conf *conf, int num) 2006 { 2007 struct kmem_cache *sc; 2008 int devs = max(conf->raid_disks, conf->previous_raid_disks); 2009 2010 if (conf->mddev->gendisk) 2011 sprintf(conf->cache_name[0], 2012 "raid%d-%s", conf->level, mdname(conf->mddev)); 2013 else 2014 sprintf(conf->cache_name[0], 2015 "raid%d-%p", conf->level, conf->mddev); 2016 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 2017 2018 conf->active_name = 0; 2019 sc = kmem_cache_create(conf->cache_name[conf->active_name], 2020 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 2021 0, 0, NULL); 2022 if (!sc) 2023 return 1; 2024 conf->slab_cache = sc; 2025 conf->pool_size = devs; 2026 while (num--) 2027 if (!grow_one_stripe(conf, GFP_KERNEL)) 2028 return 1; 2029 2030 return 0; 2031 } 2032 2033 /** 2034 * scribble_len - return the required size of the scribble region 2035 * @num - total number of disks in the array 2036 * 2037 * The size must be enough to contain: 2038 * 1/ a struct page pointer for each device in the array +2 2039 * 2/ room to convert each entry in (1) to its corresponding dma 2040 * (dma_map_page()) or page (page_address()) address. 2041 * 2042 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 2043 * calculate over all devices (not just the data blocks), using zeros in place 2044 * of the P and Q blocks. 2045 */ 2046 static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) 2047 { 2048 struct flex_array *ret; 2049 size_t len; 2050 2051 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 2052 ret = flex_array_alloc(len, cnt, flags); 2053 if (!ret) 2054 return NULL; 2055 /* always prealloc all elements, so no locking is required */ 2056 if (flex_array_prealloc(ret, 0, cnt, flags)) { 2057 flex_array_free(ret); 2058 return NULL; 2059 } 2060 return ret; 2061 } 2062 2063 static int resize_stripes(struct r5conf *conf, int newsize) 2064 { 2065 /* Make all the stripes able to hold 'newsize' devices. 2066 * New slots in each stripe get 'page' set to a new page. 2067 * 2068 * This happens in stages: 2069 * 1/ create a new kmem_cache and allocate the required number of 2070 * stripe_heads. 2071 * 2/ gather all the old stripe_heads and transfer the pages across 2072 * to the new stripe_heads. This will have the side effect of 2073 * freezing the array as once all stripe_heads have been collected, 2074 * no IO will be possible. Old stripe heads are freed once their 2075 * pages have been transferred over, and the old kmem_cache is 2076 * freed when all stripes are done. 2077 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 2078 * we simple return a failre status - no need to clean anything up. 2079 * 4/ allocate new pages for the new slots in the new stripe_heads. 2080 * If this fails, we don't bother trying the shrink the 2081 * stripe_heads down again, we just leave them as they are. 2082 * As each stripe_head is processed the new one is released into 2083 * active service. 2084 * 2085 * Once step2 is started, we cannot afford to wait for a write, 2086 * so we use GFP_NOIO allocations. 2087 */ 2088 struct stripe_head *osh, *nsh; 2089 LIST_HEAD(newstripes); 2090 struct disk_info *ndisks; 2091 unsigned long cpu; 2092 int err; 2093 struct kmem_cache *sc; 2094 int i; 2095 int hash, cnt; 2096 2097 if (newsize <= conf->pool_size) 2098 return 0; /* never bother to shrink */ 2099 2100 err = md_allow_write(conf->mddev); 2101 if (err) 2102 return err; 2103 2104 /* Step 1 */ 2105 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2106 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 2107 0, 0, NULL); 2108 if (!sc) 2109 return -ENOMEM; 2110 2111 for (i = conf->max_nr_stripes; i; i--) { 2112 nsh = kmem_cache_zalloc(sc, GFP_KERNEL); 2113 if (!nsh) 2114 break; 2115 2116 nsh->raid_conf = conf; 2117 spin_lock_init(&nsh->stripe_lock); 2118 2119 list_add(&nsh->lru, &newstripes); 2120 } 2121 if (i) { 2122 /* didn't get enough, give up */ 2123 while (!list_empty(&newstripes)) { 2124 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2125 list_del(&nsh->lru); 2126 kmem_cache_free(sc, nsh); 2127 } 2128 kmem_cache_destroy(sc); 2129 return -ENOMEM; 2130 } 2131 /* Step 2 - Must use GFP_NOIO now. 2132 * OK, we have enough stripes, start collecting inactive 2133 * stripes and copying them over 2134 */ 2135 hash = 0; 2136 cnt = 0; 2137 list_for_each_entry(nsh, &newstripes, lru) { 2138 lock_device_hash_lock(conf, hash); 2139 wait_event_cmd(conf->wait_for_stripe, 2140 !list_empty(conf->inactive_list + hash), 2141 unlock_device_hash_lock(conf, hash), 2142 lock_device_hash_lock(conf, hash)); 2143 osh = get_free_stripe(conf, hash); 2144 unlock_device_hash_lock(conf, hash); 2145 atomic_set(&nsh->count, 1); 2146 for(i=0; i<conf->pool_size; i++) { 2147 nsh->dev[i].page = osh->dev[i].page; 2148 nsh->dev[i].orig_page = osh->dev[i].page; 2149 } 2150 for( ; i<newsize; i++) 2151 nsh->dev[i].page = NULL; 2152 nsh->hash_lock_index = hash; 2153 kmem_cache_free(conf->slab_cache, osh); 2154 cnt++; 2155 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 2156 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 2157 hash++; 2158 cnt = 0; 2159 } 2160 } 2161 kmem_cache_destroy(conf->slab_cache); 2162 2163 /* Step 3. 2164 * At this point, we are holding all the stripes so the array 2165 * is completely stalled, so now is a good time to resize 2166 * conf->disks and the scribble region 2167 */ 2168 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 2169 if (ndisks) { 2170 for (i=0; i<conf->raid_disks; i++) 2171 ndisks[i] = conf->disks[i]; 2172 kfree(conf->disks); 2173 conf->disks = ndisks; 2174 } else 2175 err = -ENOMEM; 2176 2177 get_online_cpus(); 2178 for_each_present_cpu(cpu) { 2179 struct raid5_percpu *percpu; 2180 struct flex_array *scribble; 2181 2182 percpu = per_cpu_ptr(conf->percpu, cpu); 2183 scribble = scribble_alloc(newsize, conf->chunk_sectors / 2184 STRIPE_SECTORS, GFP_NOIO); 2185 2186 if (scribble) { 2187 flex_array_free(percpu->scribble); 2188 percpu->scribble = scribble; 2189 } else { 2190 err = -ENOMEM; 2191 break; 2192 } 2193 } 2194 put_online_cpus(); 2195 2196 /* Step 4, return new stripes to service */ 2197 while(!list_empty(&newstripes)) { 2198 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2199 list_del_init(&nsh->lru); 2200 2201 for (i=conf->raid_disks; i < newsize; i++) 2202 if (nsh->dev[i].page == NULL) { 2203 struct page *p = alloc_page(GFP_NOIO); 2204 nsh->dev[i].page = p; 2205 nsh->dev[i].orig_page = p; 2206 if (!p) 2207 err = -ENOMEM; 2208 } 2209 release_stripe(nsh); 2210 } 2211 /* critical section pass, GFP_NOIO no longer needed */ 2212 2213 conf->slab_cache = sc; 2214 conf->active_name = 1-conf->active_name; 2215 conf->pool_size = newsize; 2216 return err; 2217 } 2218 2219 static int drop_one_stripe(struct r5conf *conf) 2220 { 2221 struct stripe_head *sh; 2222 int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; 2223 2224 spin_lock_irq(conf->hash_locks + hash); 2225 sh = get_free_stripe(conf, hash); 2226 spin_unlock_irq(conf->hash_locks + hash); 2227 if (!sh) 2228 return 0; 2229 BUG_ON(atomic_read(&sh->count)); 2230 shrink_buffers(sh); 2231 kmem_cache_free(conf->slab_cache, sh); 2232 atomic_dec(&conf->active_stripes); 2233 conf->max_nr_stripes--; 2234 return 1; 2235 } 2236 2237 static void shrink_stripes(struct r5conf *conf) 2238 { 2239 while (conf->max_nr_stripes && 2240 drop_one_stripe(conf)) 2241 ; 2242 2243 if (conf->slab_cache) 2244 kmem_cache_destroy(conf->slab_cache); 2245 conf->slab_cache = NULL; 2246 } 2247 2248 static void raid5_end_read_request(struct bio * bi, int error) 2249 { 2250 struct stripe_head *sh = bi->bi_private; 2251 struct r5conf *conf = sh->raid_conf; 2252 int disks = sh->disks, i; 2253 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 2254 char b[BDEVNAME_SIZE]; 2255 struct md_rdev *rdev = NULL; 2256 sector_t s; 2257 2258 for (i=0 ; i<disks; i++) 2259 if (bi == &sh->dev[i].req) 2260 break; 2261 2262 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 2263 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2264 uptodate); 2265 if (i == disks) { 2266 BUG(); 2267 return; 2268 } 2269 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2270 /* If replacement finished while this request was outstanding, 2271 * 'replacement' might be NULL already. 2272 * In that case it moved down to 'rdev'. 2273 * rdev is not removed until all requests are finished. 2274 */ 2275 rdev = conf->disks[i].replacement; 2276 if (!rdev) 2277 rdev = conf->disks[i].rdev; 2278 2279 if (use_new_offset(conf, sh)) 2280 s = sh->sector + rdev->new_data_offset; 2281 else 2282 s = sh->sector + rdev->data_offset; 2283 if (uptodate) { 2284 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2285 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2286 /* Note that this cannot happen on a 2287 * replacement device. We just fail those on 2288 * any error 2289 */ 2290 printk_ratelimited( 2291 KERN_INFO 2292 "md/raid:%s: read error corrected" 2293 " (%lu sectors at %llu on %s)\n", 2294 mdname(conf->mddev), STRIPE_SECTORS, 2295 (unsigned long long)s, 2296 bdevname(rdev->bdev, b)); 2297 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2298 clear_bit(R5_ReadError, &sh->dev[i].flags); 2299 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2300 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2301 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2302 2303 if (atomic_read(&rdev->read_errors)) 2304 atomic_set(&rdev->read_errors, 0); 2305 } else { 2306 const char *bdn = bdevname(rdev->bdev, b); 2307 int retry = 0; 2308 int set_bad = 0; 2309 2310 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 2311 atomic_inc(&rdev->read_errors); 2312 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2313 printk_ratelimited( 2314 KERN_WARNING 2315 "md/raid:%s: read error on replacement device " 2316 "(sector %llu on %s).\n", 2317 mdname(conf->mddev), 2318 (unsigned long long)s, 2319 bdn); 2320 else if (conf->mddev->degraded >= conf->max_degraded) { 2321 set_bad = 1; 2322 printk_ratelimited( 2323 KERN_WARNING 2324 "md/raid:%s: read error not correctable " 2325 "(sector %llu on %s).\n", 2326 mdname(conf->mddev), 2327 (unsigned long long)s, 2328 bdn); 2329 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { 2330 /* Oh, no!!! */ 2331 set_bad = 1; 2332 printk_ratelimited( 2333 KERN_WARNING 2334 "md/raid:%s: read error NOT corrected!! " 2335 "(sector %llu on %s).\n", 2336 mdname(conf->mddev), 2337 (unsigned long long)s, 2338 bdn); 2339 } else if (atomic_read(&rdev->read_errors) 2340 > conf->max_nr_stripes) 2341 printk(KERN_WARNING 2342 "md/raid:%s: Too many read errors, failing device %s.\n", 2343 mdname(conf->mddev), bdn); 2344 else 2345 retry = 1; 2346 if (set_bad && test_bit(In_sync, &rdev->flags) 2347 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2348 retry = 1; 2349 if (retry) 2350 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2351 set_bit(R5_ReadError, &sh->dev[i].flags); 2352 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2353 } else 2354 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2355 else { 2356 clear_bit(R5_ReadError, &sh->dev[i].flags); 2357 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2358 if (!(set_bad 2359 && test_bit(In_sync, &rdev->flags) 2360 && rdev_set_badblocks( 2361 rdev, sh->sector, STRIPE_SECTORS, 0))) 2362 md_error(conf->mddev, rdev); 2363 } 2364 } 2365 rdev_dec_pending(rdev, conf->mddev); 2366 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2367 set_bit(STRIPE_HANDLE, &sh->state); 2368 release_stripe(sh); 2369 } 2370 2371 static void raid5_end_write_request(struct bio *bi, int error) 2372 { 2373 struct stripe_head *sh = bi->bi_private; 2374 struct r5conf *conf = sh->raid_conf; 2375 int disks = sh->disks, i; 2376 struct md_rdev *uninitialized_var(rdev); 2377 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 2378 sector_t first_bad; 2379 int bad_sectors; 2380 int replacement = 0; 2381 2382 for (i = 0 ; i < disks; i++) { 2383 if (bi == &sh->dev[i].req) { 2384 rdev = conf->disks[i].rdev; 2385 break; 2386 } 2387 if (bi == &sh->dev[i].rreq) { 2388 rdev = conf->disks[i].replacement; 2389 if (rdev) 2390 replacement = 1; 2391 else 2392 /* rdev was removed and 'replacement' 2393 * replaced it. rdev is not removed 2394 * until all requests are finished. 2395 */ 2396 rdev = conf->disks[i].rdev; 2397 break; 2398 } 2399 } 2400 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 2401 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2402 uptodate); 2403 if (i == disks) { 2404 BUG(); 2405 return; 2406 } 2407 2408 if (replacement) { 2409 if (!uptodate) 2410 md_error(conf->mddev, rdev); 2411 else if (is_badblock(rdev, sh->sector, 2412 STRIPE_SECTORS, 2413 &first_bad, &bad_sectors)) 2414 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2415 } else { 2416 if (!uptodate) { 2417 set_bit(STRIPE_DEGRADED, &sh->state); 2418 set_bit(WriteErrorSeen, &rdev->flags); 2419 set_bit(R5_WriteError, &sh->dev[i].flags); 2420 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2421 set_bit(MD_RECOVERY_NEEDED, 2422 &rdev->mddev->recovery); 2423 } else if (is_badblock(rdev, sh->sector, 2424 STRIPE_SECTORS, 2425 &first_bad, &bad_sectors)) { 2426 set_bit(R5_MadeGood, &sh->dev[i].flags); 2427 if (test_bit(R5_ReadError, &sh->dev[i].flags)) 2428 /* That was a successful write so make 2429 * sure it looks like we already did 2430 * a re-write. 2431 */ 2432 set_bit(R5_ReWrite, &sh->dev[i].flags); 2433 } 2434 } 2435 rdev_dec_pending(rdev, conf->mddev); 2436 2437 if (sh->batch_head && !uptodate) 2438 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2439 2440 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2441 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2442 set_bit(STRIPE_HANDLE, &sh->state); 2443 release_stripe(sh); 2444 2445 if (sh->batch_head && sh != sh->batch_head) 2446 release_stripe(sh->batch_head); 2447 } 2448 2449 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 2450 2451 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2452 { 2453 struct r5dev *dev = &sh->dev[i]; 2454 2455 bio_init(&dev->req); 2456 dev->req.bi_io_vec = &dev->vec; 2457 dev->req.bi_max_vecs = 1; 2458 dev->req.bi_private = sh; 2459 2460 bio_init(&dev->rreq); 2461 dev->rreq.bi_io_vec = &dev->rvec; 2462 dev->rreq.bi_max_vecs = 1; 2463 dev->rreq.bi_private = sh; 2464 2465 dev->flags = 0; 2466 dev->sector = compute_blocknr(sh, i, previous); 2467 } 2468 2469 static void error(struct mddev *mddev, struct md_rdev *rdev) 2470 { 2471 char b[BDEVNAME_SIZE]; 2472 struct r5conf *conf = mddev->private; 2473 unsigned long flags; 2474 pr_debug("raid456: error called\n"); 2475 2476 spin_lock_irqsave(&conf->device_lock, flags); 2477 clear_bit(In_sync, &rdev->flags); 2478 mddev->degraded = calc_degraded(conf); 2479 spin_unlock_irqrestore(&conf->device_lock, flags); 2480 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2481 2482 set_bit(Blocked, &rdev->flags); 2483 set_bit(Faulty, &rdev->flags); 2484 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2485 printk(KERN_ALERT 2486 "md/raid:%s: Disk failure on %s, disabling device.\n" 2487 "md/raid:%s: Operation continuing on %d devices.\n", 2488 mdname(mddev), 2489 bdevname(rdev->bdev, b), 2490 mdname(mddev), 2491 conf->raid_disks - mddev->degraded); 2492 } 2493 2494 /* 2495 * Input: a 'big' sector number, 2496 * Output: index of the data and parity disk, and the sector # in them. 2497 */ 2498 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2499 int previous, int *dd_idx, 2500 struct stripe_head *sh) 2501 { 2502 sector_t stripe, stripe2; 2503 sector_t chunk_number; 2504 unsigned int chunk_offset; 2505 int pd_idx, qd_idx; 2506 int ddf_layout = 0; 2507 sector_t new_sector; 2508 int algorithm = previous ? conf->prev_algo 2509 : conf->algorithm; 2510 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2511 : conf->chunk_sectors; 2512 int raid_disks = previous ? conf->previous_raid_disks 2513 : conf->raid_disks; 2514 int data_disks = raid_disks - conf->max_degraded; 2515 2516 /* First compute the information on this sector */ 2517 2518 /* 2519 * Compute the chunk number and the sector offset inside the chunk 2520 */ 2521 chunk_offset = sector_div(r_sector, sectors_per_chunk); 2522 chunk_number = r_sector; 2523 2524 /* 2525 * Compute the stripe number 2526 */ 2527 stripe = chunk_number; 2528 *dd_idx = sector_div(stripe, data_disks); 2529 stripe2 = stripe; 2530 /* 2531 * Select the parity disk based on the user selected algorithm. 2532 */ 2533 pd_idx = qd_idx = -1; 2534 switch(conf->level) { 2535 case 4: 2536 pd_idx = data_disks; 2537 break; 2538 case 5: 2539 switch (algorithm) { 2540 case ALGORITHM_LEFT_ASYMMETRIC: 2541 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2542 if (*dd_idx >= pd_idx) 2543 (*dd_idx)++; 2544 break; 2545 case ALGORITHM_RIGHT_ASYMMETRIC: 2546 pd_idx = sector_div(stripe2, raid_disks); 2547 if (*dd_idx >= pd_idx) 2548 (*dd_idx)++; 2549 break; 2550 case ALGORITHM_LEFT_SYMMETRIC: 2551 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2552 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2553 break; 2554 case ALGORITHM_RIGHT_SYMMETRIC: 2555 pd_idx = sector_div(stripe2, raid_disks); 2556 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2557 break; 2558 case ALGORITHM_PARITY_0: 2559 pd_idx = 0; 2560 (*dd_idx)++; 2561 break; 2562 case ALGORITHM_PARITY_N: 2563 pd_idx = data_disks; 2564 break; 2565 default: 2566 BUG(); 2567 } 2568 break; 2569 case 6: 2570 2571 switch (algorithm) { 2572 case ALGORITHM_LEFT_ASYMMETRIC: 2573 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2574 qd_idx = pd_idx + 1; 2575 if (pd_idx == raid_disks-1) { 2576 (*dd_idx)++; /* Q D D D P */ 2577 qd_idx = 0; 2578 } else if (*dd_idx >= pd_idx) 2579 (*dd_idx) += 2; /* D D P Q D */ 2580 break; 2581 case ALGORITHM_RIGHT_ASYMMETRIC: 2582 pd_idx = sector_div(stripe2, raid_disks); 2583 qd_idx = pd_idx + 1; 2584 if (pd_idx == raid_disks-1) { 2585 (*dd_idx)++; /* Q D D D P */ 2586 qd_idx = 0; 2587 } else if (*dd_idx >= pd_idx) 2588 (*dd_idx) += 2; /* D D P Q D */ 2589 break; 2590 case ALGORITHM_LEFT_SYMMETRIC: 2591 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2592 qd_idx = (pd_idx + 1) % raid_disks; 2593 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2594 break; 2595 case ALGORITHM_RIGHT_SYMMETRIC: 2596 pd_idx = sector_div(stripe2, raid_disks); 2597 qd_idx = (pd_idx + 1) % raid_disks; 2598 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2599 break; 2600 2601 case ALGORITHM_PARITY_0: 2602 pd_idx = 0; 2603 qd_idx = 1; 2604 (*dd_idx) += 2; 2605 break; 2606 case ALGORITHM_PARITY_N: 2607 pd_idx = data_disks; 2608 qd_idx = data_disks + 1; 2609 break; 2610 2611 case ALGORITHM_ROTATING_ZERO_RESTART: 2612 /* Exactly the same as RIGHT_ASYMMETRIC, but or 2613 * of blocks for computing Q is different. 2614 */ 2615 pd_idx = sector_div(stripe2, raid_disks); 2616 qd_idx = pd_idx + 1; 2617 if (pd_idx == raid_disks-1) { 2618 (*dd_idx)++; /* Q D D D P */ 2619 qd_idx = 0; 2620 } else if (*dd_idx >= pd_idx) 2621 (*dd_idx) += 2; /* D D P Q D */ 2622 ddf_layout = 1; 2623 break; 2624 2625 case ALGORITHM_ROTATING_N_RESTART: 2626 /* Same a left_asymmetric, by first stripe is 2627 * D D D P Q rather than 2628 * Q D D D P 2629 */ 2630 stripe2 += 1; 2631 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2632 qd_idx = pd_idx + 1; 2633 if (pd_idx == raid_disks-1) { 2634 (*dd_idx)++; /* Q D D D P */ 2635 qd_idx = 0; 2636 } else if (*dd_idx >= pd_idx) 2637 (*dd_idx) += 2; /* D D P Q D */ 2638 ddf_layout = 1; 2639 break; 2640 2641 case ALGORITHM_ROTATING_N_CONTINUE: 2642 /* Same as left_symmetric but Q is before P */ 2643 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2644 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 2645 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2646 ddf_layout = 1; 2647 break; 2648 2649 case ALGORITHM_LEFT_ASYMMETRIC_6: 2650 /* RAID5 left_asymmetric, with Q on last device */ 2651 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2652 if (*dd_idx >= pd_idx) 2653 (*dd_idx)++; 2654 qd_idx = raid_disks - 1; 2655 break; 2656 2657 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2658 pd_idx = sector_div(stripe2, raid_disks-1); 2659 if (*dd_idx >= pd_idx) 2660 (*dd_idx)++; 2661 qd_idx = raid_disks - 1; 2662 break; 2663 2664 case ALGORITHM_LEFT_SYMMETRIC_6: 2665 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2666 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2667 qd_idx = raid_disks - 1; 2668 break; 2669 2670 case ALGORITHM_RIGHT_SYMMETRIC_6: 2671 pd_idx = sector_div(stripe2, raid_disks-1); 2672 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2673 qd_idx = raid_disks - 1; 2674 break; 2675 2676 case ALGORITHM_PARITY_0_6: 2677 pd_idx = 0; 2678 (*dd_idx)++; 2679 qd_idx = raid_disks - 1; 2680 break; 2681 2682 default: 2683 BUG(); 2684 } 2685 break; 2686 } 2687 2688 if (sh) { 2689 sh->pd_idx = pd_idx; 2690 sh->qd_idx = qd_idx; 2691 sh->ddf_layout = ddf_layout; 2692 } 2693 /* 2694 * Finally, compute the new sector number 2695 */ 2696 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 2697 return new_sector; 2698 } 2699 2700 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 2701 { 2702 struct r5conf *conf = sh->raid_conf; 2703 int raid_disks = sh->disks; 2704 int data_disks = raid_disks - conf->max_degraded; 2705 sector_t new_sector = sh->sector, check; 2706 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2707 : conf->chunk_sectors; 2708 int algorithm = previous ? conf->prev_algo 2709 : conf->algorithm; 2710 sector_t stripe; 2711 int chunk_offset; 2712 sector_t chunk_number; 2713 int dummy1, dd_idx = i; 2714 sector_t r_sector; 2715 struct stripe_head sh2; 2716 2717 chunk_offset = sector_div(new_sector, sectors_per_chunk); 2718 stripe = new_sector; 2719 2720 if (i == sh->pd_idx) 2721 return 0; 2722 switch(conf->level) { 2723 case 4: break; 2724 case 5: 2725 switch (algorithm) { 2726 case ALGORITHM_LEFT_ASYMMETRIC: 2727 case ALGORITHM_RIGHT_ASYMMETRIC: 2728 if (i > sh->pd_idx) 2729 i--; 2730 break; 2731 case ALGORITHM_LEFT_SYMMETRIC: 2732 case ALGORITHM_RIGHT_SYMMETRIC: 2733 if (i < sh->pd_idx) 2734 i += raid_disks; 2735 i -= (sh->pd_idx + 1); 2736 break; 2737 case ALGORITHM_PARITY_0: 2738 i -= 1; 2739 break; 2740 case ALGORITHM_PARITY_N: 2741 break; 2742 default: 2743 BUG(); 2744 } 2745 break; 2746 case 6: 2747 if (i == sh->qd_idx) 2748 return 0; /* It is the Q disk */ 2749 switch (algorithm) { 2750 case ALGORITHM_LEFT_ASYMMETRIC: 2751 case ALGORITHM_RIGHT_ASYMMETRIC: 2752 case ALGORITHM_ROTATING_ZERO_RESTART: 2753 case ALGORITHM_ROTATING_N_RESTART: 2754 if (sh->pd_idx == raid_disks-1) 2755 i--; /* Q D D D P */ 2756 else if (i > sh->pd_idx) 2757 i -= 2; /* D D P Q D */ 2758 break; 2759 case ALGORITHM_LEFT_SYMMETRIC: 2760 case ALGORITHM_RIGHT_SYMMETRIC: 2761 if (sh->pd_idx == raid_disks-1) 2762 i--; /* Q D D D P */ 2763 else { 2764 /* D D P Q D */ 2765 if (i < sh->pd_idx) 2766 i += raid_disks; 2767 i -= (sh->pd_idx + 2); 2768 } 2769 break; 2770 case ALGORITHM_PARITY_0: 2771 i -= 2; 2772 break; 2773 case ALGORITHM_PARITY_N: 2774 break; 2775 case ALGORITHM_ROTATING_N_CONTINUE: 2776 /* Like left_symmetric, but P is before Q */ 2777 if (sh->pd_idx == 0) 2778 i--; /* P D D D Q */ 2779 else { 2780 /* D D Q P D */ 2781 if (i < sh->pd_idx) 2782 i += raid_disks; 2783 i -= (sh->pd_idx + 1); 2784 } 2785 break; 2786 case ALGORITHM_LEFT_ASYMMETRIC_6: 2787 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2788 if (i > sh->pd_idx) 2789 i--; 2790 break; 2791 case ALGORITHM_LEFT_SYMMETRIC_6: 2792 case ALGORITHM_RIGHT_SYMMETRIC_6: 2793 if (i < sh->pd_idx) 2794 i += data_disks + 1; 2795 i -= (sh->pd_idx + 1); 2796 break; 2797 case ALGORITHM_PARITY_0_6: 2798 i -= 1; 2799 break; 2800 default: 2801 BUG(); 2802 } 2803 break; 2804 } 2805 2806 chunk_number = stripe * data_disks + i; 2807 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 2808 2809 check = raid5_compute_sector(conf, r_sector, 2810 previous, &dummy1, &sh2); 2811 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 2812 || sh2.qd_idx != sh->qd_idx) { 2813 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", 2814 mdname(conf->mddev)); 2815 return 0; 2816 } 2817 return r_sector; 2818 } 2819 2820 static void 2821 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2822 int rcw, int expand) 2823 { 2824 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; 2825 struct r5conf *conf = sh->raid_conf; 2826 int level = conf->level; 2827 2828 if (rcw) { 2829 2830 for (i = disks; i--; ) { 2831 struct r5dev *dev = &sh->dev[i]; 2832 2833 if (dev->towrite) { 2834 set_bit(R5_LOCKED, &dev->flags); 2835 set_bit(R5_Wantdrain, &dev->flags); 2836 if (!expand) 2837 clear_bit(R5_UPTODATE, &dev->flags); 2838 s->locked++; 2839 } 2840 } 2841 /* if we are not expanding this is a proper write request, and 2842 * there will be bios with new data to be drained into the 2843 * stripe cache 2844 */ 2845 if (!expand) { 2846 if (!s->locked) 2847 /* False alarm, nothing to do */ 2848 return; 2849 sh->reconstruct_state = reconstruct_state_drain_run; 2850 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2851 } else 2852 sh->reconstruct_state = reconstruct_state_run; 2853 2854 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2855 2856 if (s->locked + conf->max_degraded == disks) 2857 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2858 atomic_inc(&conf->pending_full_writes); 2859 } else { 2860 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2861 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2862 BUG_ON(level == 6 && 2863 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || 2864 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); 2865 2866 for (i = disks; i--; ) { 2867 struct r5dev *dev = &sh->dev[i]; 2868 if (i == pd_idx || i == qd_idx) 2869 continue; 2870 2871 if (dev->towrite && 2872 (test_bit(R5_UPTODATE, &dev->flags) || 2873 test_bit(R5_Wantcompute, &dev->flags))) { 2874 set_bit(R5_Wantdrain, &dev->flags); 2875 set_bit(R5_LOCKED, &dev->flags); 2876 clear_bit(R5_UPTODATE, &dev->flags); 2877 s->locked++; 2878 } 2879 } 2880 if (!s->locked) 2881 /* False alarm - nothing to do */ 2882 return; 2883 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 2884 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 2885 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2886 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2887 } 2888 2889 /* keep the parity disk(s) locked while asynchronous operations 2890 * are in flight 2891 */ 2892 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 2893 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2894 s->locked++; 2895 2896 if (level == 6) { 2897 int qd_idx = sh->qd_idx; 2898 struct r5dev *dev = &sh->dev[qd_idx]; 2899 2900 set_bit(R5_LOCKED, &dev->flags); 2901 clear_bit(R5_UPTODATE, &dev->flags); 2902 s->locked++; 2903 } 2904 2905 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 2906 __func__, (unsigned long long)sh->sector, 2907 s->locked, s->ops_request); 2908 } 2909 2910 /* 2911 * Each stripe/dev can have one or more bion attached. 2912 * toread/towrite point to the first in a chain. 2913 * The bi_next chain must be in order. 2914 */ 2915 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, 2916 int forwrite, int previous) 2917 { 2918 struct bio **bip; 2919 struct r5conf *conf = sh->raid_conf; 2920 int firstwrite=0; 2921 2922 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2923 (unsigned long long)bi->bi_iter.bi_sector, 2924 (unsigned long long)sh->sector); 2925 2926 /* 2927 * If several bio share a stripe. The bio bi_phys_segments acts as a 2928 * reference count to avoid race. The reference count should already be 2929 * increased before this function is called (for example, in 2930 * make_request()), so other bio sharing this stripe will not free the 2931 * stripe. If a stripe is owned by one stripe, the stripe lock will 2932 * protect it. 2933 */ 2934 spin_lock_irq(&sh->stripe_lock); 2935 /* Don't allow new IO added to stripes in batch list */ 2936 if (sh->batch_head) 2937 goto overlap; 2938 if (forwrite) { 2939 bip = &sh->dev[dd_idx].towrite; 2940 if (*bip == NULL) 2941 firstwrite = 1; 2942 } else 2943 bip = &sh->dev[dd_idx].toread; 2944 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 2945 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 2946 goto overlap; 2947 bip = & (*bip)->bi_next; 2948 } 2949 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 2950 goto overlap; 2951 2952 if (!forwrite || previous) 2953 clear_bit(STRIPE_BATCH_READY, &sh->state); 2954 2955 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2956 if (*bip) 2957 bi->bi_next = *bip; 2958 *bip = bi; 2959 raid5_inc_bi_active_stripes(bi); 2960 2961 if (forwrite) { 2962 /* check if page is covered */ 2963 sector_t sector = sh->dev[dd_idx].sector; 2964 for (bi=sh->dev[dd_idx].towrite; 2965 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2966 bi && bi->bi_iter.bi_sector <= sector; 2967 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2968 if (bio_end_sector(bi) >= sector) 2969 sector = bio_end_sector(bi); 2970 } 2971 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2972 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) 2973 sh->overwrite_disks++; 2974 } 2975 2976 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2977 (unsigned long long)(*bip)->bi_iter.bi_sector, 2978 (unsigned long long)sh->sector, dd_idx); 2979 spin_unlock_irq(&sh->stripe_lock); 2980 2981 if (conf->mddev->bitmap && firstwrite) { 2982 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 2983 STRIPE_SECTORS, 0); 2984 sh->bm_seq = conf->seq_flush+1; 2985 set_bit(STRIPE_BIT_DELAY, &sh->state); 2986 } 2987 2988 if (stripe_can_batch(sh)) 2989 stripe_add_to_batch_list(conf, sh); 2990 return 1; 2991 2992 overlap: 2993 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 2994 spin_unlock_irq(&sh->stripe_lock); 2995 return 0; 2996 } 2997 2998 static void end_reshape(struct r5conf *conf); 2999 3000 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 3001 struct stripe_head *sh) 3002 { 3003 int sectors_per_chunk = 3004 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 3005 int dd_idx; 3006 int chunk_offset = sector_div(stripe, sectors_per_chunk); 3007 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 3008 3009 raid5_compute_sector(conf, 3010 stripe * (disks - conf->max_degraded) 3011 *sectors_per_chunk + chunk_offset, 3012 previous, 3013 &dd_idx, sh); 3014 } 3015 3016 static void 3017 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 3018 struct stripe_head_state *s, int disks, 3019 struct bio **return_bi) 3020 { 3021 int i; 3022 BUG_ON(sh->batch_head); 3023 for (i = disks; i--; ) { 3024 struct bio *bi; 3025 int bitmap_end = 0; 3026 3027 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 3028 struct md_rdev *rdev; 3029 rcu_read_lock(); 3030 rdev = rcu_dereference(conf->disks[i].rdev); 3031 if (rdev && test_bit(In_sync, &rdev->flags)) 3032 atomic_inc(&rdev->nr_pending); 3033 else 3034 rdev = NULL; 3035 rcu_read_unlock(); 3036 if (rdev) { 3037 if (!rdev_set_badblocks( 3038 rdev, 3039 sh->sector, 3040 STRIPE_SECTORS, 0)) 3041 md_error(conf->mddev, rdev); 3042 rdev_dec_pending(rdev, conf->mddev); 3043 } 3044 } 3045 spin_lock_irq(&sh->stripe_lock); 3046 /* fail all writes first */ 3047 bi = sh->dev[i].towrite; 3048 sh->dev[i].towrite = NULL; 3049 sh->overwrite_disks = 0; 3050 spin_unlock_irq(&sh->stripe_lock); 3051 if (bi) 3052 bitmap_end = 1; 3053 3054 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3055 wake_up(&conf->wait_for_overlap); 3056 3057 while (bi && bi->bi_iter.bi_sector < 3058 sh->dev[i].sector + STRIPE_SECTORS) { 3059 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3060 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3061 if (!raid5_dec_bi_active_stripes(bi)) { 3062 md_write_end(conf->mddev); 3063 bi->bi_next = *return_bi; 3064 *return_bi = bi; 3065 } 3066 bi = nextbi; 3067 } 3068 if (bitmap_end) 3069 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3070 STRIPE_SECTORS, 0, 0); 3071 bitmap_end = 0; 3072 /* and fail all 'written' */ 3073 bi = sh->dev[i].written; 3074 sh->dev[i].written = NULL; 3075 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { 3076 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 3077 sh->dev[i].page = sh->dev[i].orig_page; 3078 } 3079 3080 if (bi) bitmap_end = 1; 3081 while (bi && bi->bi_iter.bi_sector < 3082 sh->dev[i].sector + STRIPE_SECTORS) { 3083 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3084 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3085 if (!raid5_dec_bi_active_stripes(bi)) { 3086 md_write_end(conf->mddev); 3087 bi->bi_next = *return_bi; 3088 *return_bi = bi; 3089 } 3090 bi = bi2; 3091 } 3092 3093 /* fail any reads if this device is non-operational and 3094 * the data has not reached the cache yet. 3095 */ 3096 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 3097 (!test_bit(R5_Insync, &sh->dev[i].flags) || 3098 test_bit(R5_ReadError, &sh->dev[i].flags))) { 3099 spin_lock_irq(&sh->stripe_lock); 3100 bi = sh->dev[i].toread; 3101 sh->dev[i].toread = NULL; 3102 spin_unlock_irq(&sh->stripe_lock); 3103 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3104 wake_up(&conf->wait_for_overlap); 3105 while (bi && bi->bi_iter.bi_sector < 3106 sh->dev[i].sector + STRIPE_SECTORS) { 3107 struct bio *nextbi = 3108 r5_next_bio(bi, sh->dev[i].sector); 3109 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3110 if (!raid5_dec_bi_active_stripes(bi)) { 3111 bi->bi_next = *return_bi; 3112 *return_bi = bi; 3113 } 3114 bi = nextbi; 3115 } 3116 } 3117 if (bitmap_end) 3118 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3119 STRIPE_SECTORS, 0, 0); 3120 /* If we were in the middle of a write the parity block might 3121 * still be locked - so just clear all R5_LOCKED flags 3122 */ 3123 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3124 } 3125 3126 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3127 if (atomic_dec_and_test(&conf->pending_full_writes)) 3128 md_wakeup_thread(conf->mddev->thread); 3129 } 3130 3131 static void 3132 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 3133 struct stripe_head_state *s) 3134 { 3135 int abort = 0; 3136 int i; 3137 3138 BUG_ON(sh->batch_head); 3139 clear_bit(STRIPE_SYNCING, &sh->state); 3140 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 3141 wake_up(&conf->wait_for_overlap); 3142 s->syncing = 0; 3143 s->replacing = 0; 3144 /* There is nothing more to do for sync/check/repair. 3145 * Don't even need to abort as that is handled elsewhere 3146 * if needed, and not always wanted e.g. if there is a known 3147 * bad block here. 3148 * For recover/replace we need to record a bad block on all 3149 * non-sync devices, or abort the recovery 3150 */ 3151 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { 3152 /* During recovery devices cannot be removed, so 3153 * locking and refcounting of rdevs is not needed 3154 */ 3155 for (i = 0; i < conf->raid_disks; i++) { 3156 struct md_rdev *rdev = conf->disks[i].rdev; 3157 if (rdev 3158 && !test_bit(Faulty, &rdev->flags) 3159 && !test_bit(In_sync, &rdev->flags) 3160 && !rdev_set_badblocks(rdev, sh->sector, 3161 STRIPE_SECTORS, 0)) 3162 abort = 1; 3163 rdev = conf->disks[i].replacement; 3164 if (rdev 3165 && !test_bit(Faulty, &rdev->flags) 3166 && !test_bit(In_sync, &rdev->flags) 3167 && !rdev_set_badblocks(rdev, sh->sector, 3168 STRIPE_SECTORS, 0)) 3169 abort = 1; 3170 } 3171 if (abort) 3172 conf->recovery_disabled = 3173 conf->mddev->recovery_disabled; 3174 } 3175 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); 3176 } 3177 3178 static int want_replace(struct stripe_head *sh, int disk_idx) 3179 { 3180 struct md_rdev *rdev; 3181 int rv = 0; 3182 /* Doing recovery so rcu locking not required */ 3183 rdev = sh->raid_conf->disks[disk_idx].replacement; 3184 if (rdev 3185 && !test_bit(Faulty, &rdev->flags) 3186 && !test_bit(In_sync, &rdev->flags) 3187 && (rdev->recovery_offset <= sh->sector 3188 || rdev->mddev->recovery_cp <= sh->sector)) 3189 rv = 1; 3190 3191 return rv; 3192 } 3193 3194 /* fetch_block - checks the given member device to see if its data needs 3195 * to be read or computed to satisfy a request. 3196 * 3197 * Returns 1 when no more member devices need to be checked, otherwise returns 3198 * 0 to tell the loop in handle_stripe_fill to continue 3199 */ 3200 3201 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, 3202 int disk_idx, int disks) 3203 { 3204 struct r5dev *dev = &sh->dev[disk_idx]; 3205 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 3206 &sh->dev[s->failed_num[1]] }; 3207 int i; 3208 3209 3210 if (test_bit(R5_LOCKED, &dev->flags) || 3211 test_bit(R5_UPTODATE, &dev->flags)) 3212 /* No point reading this as we already have it or have 3213 * decided to get it. 3214 */ 3215 return 0; 3216 3217 if (dev->toread || 3218 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags))) 3219 /* We need this block to directly satisfy a request */ 3220 return 1; 3221 3222 if (s->syncing || s->expanding || 3223 (s->replacing && want_replace(sh, disk_idx))) 3224 /* When syncing, or expanding we read everything. 3225 * When replacing, we need the replaced block. 3226 */ 3227 return 1; 3228 3229 if ((s->failed >= 1 && fdev[0]->toread) || 3230 (s->failed >= 2 && fdev[1]->toread)) 3231 /* If we want to read from a failed device, then 3232 * we need to actually read every other device. 3233 */ 3234 return 1; 3235 3236 /* Sometimes neither read-modify-write nor reconstruct-write 3237 * cycles can work. In those cases we read every block we 3238 * can. Then the parity-update is certain to have enough to 3239 * work with. 3240 * This can only be a problem when we need to write something, 3241 * and some device has failed. If either of those tests 3242 * fail we need look no further. 3243 */ 3244 if (!s->failed || !s->to_write) 3245 return 0; 3246 3247 if (test_bit(R5_Insync, &dev->flags) && 3248 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3249 /* Pre-reads at not permitted until after short delay 3250 * to gather multiple requests. However if this 3251 * device is no Insync, the block could only be be computed 3252 * and there is no need to delay that. 3253 */ 3254 return 0; 3255 3256 for (i = 0; i < s->failed; i++) { 3257 if (fdev[i]->towrite && 3258 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3259 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3260 /* If we have a partial write to a failed 3261 * device, then we will need to reconstruct 3262 * the content of that device, so all other 3263 * devices must be read. 3264 */ 3265 return 1; 3266 } 3267 3268 /* If we are forced to do a reconstruct-write, either because 3269 * the current RAID6 implementation only supports that, or 3270 * or because parity cannot be trusted and we are currently 3271 * recovering it, there is extra need to be careful. 3272 * If one of the devices that we would need to read, because 3273 * it is not being overwritten (and maybe not written at all) 3274 * is missing/faulty, then we need to read everything we can. 3275 */ 3276 if (sh->raid_conf->level != 6 && 3277 sh->sector < sh->raid_conf->mddev->recovery_cp) 3278 /* reconstruct-write isn't being forced */ 3279 return 0; 3280 for (i = 0; i < s->failed; i++) { 3281 if (!test_bit(R5_UPTODATE, &fdev[i]->flags) && 3282 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3283 return 1; 3284 } 3285 3286 return 0; 3287 } 3288 3289 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 3290 int disk_idx, int disks) 3291 { 3292 struct r5dev *dev = &sh->dev[disk_idx]; 3293 3294 /* is the data in this block needed, and can we get it? */ 3295 if (need_this_block(sh, s, disk_idx, disks)) { 3296 /* we would like to get this block, possibly by computing it, 3297 * otherwise read it if the backing disk is insync 3298 */ 3299 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 3300 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 3301 if ((s->uptodate == disks - 1) && 3302 (s->failed && (disk_idx == s->failed_num[0] || 3303 disk_idx == s->failed_num[1]))) { 3304 /* have disk failed, and we're requested to fetch it; 3305 * do compute it 3306 */ 3307 pr_debug("Computing stripe %llu block %d\n", 3308 (unsigned long long)sh->sector, disk_idx); 3309 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3310 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3311 set_bit(R5_Wantcompute, &dev->flags); 3312 sh->ops.target = disk_idx; 3313 sh->ops.target2 = -1; /* no 2nd target */ 3314 s->req_compute = 1; 3315 /* Careful: from this point on 'uptodate' is in the eye 3316 * of raid_run_ops which services 'compute' operations 3317 * before writes. R5_Wantcompute flags a block that will 3318 * be R5_UPTODATE by the time it is needed for a 3319 * subsequent operation. 3320 */ 3321 s->uptodate++; 3322 return 1; 3323 } else if (s->uptodate == disks-2 && s->failed >= 2) { 3324 /* Computing 2-failure is *very* expensive; only 3325 * do it if failed >= 2 3326 */ 3327 int other; 3328 for (other = disks; other--; ) { 3329 if (other == disk_idx) 3330 continue; 3331 if (!test_bit(R5_UPTODATE, 3332 &sh->dev[other].flags)) 3333 break; 3334 } 3335 BUG_ON(other < 0); 3336 pr_debug("Computing stripe %llu blocks %d,%d\n", 3337 (unsigned long long)sh->sector, 3338 disk_idx, other); 3339 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3340 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3341 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 3342 set_bit(R5_Wantcompute, &sh->dev[other].flags); 3343 sh->ops.target = disk_idx; 3344 sh->ops.target2 = other; 3345 s->uptodate += 2; 3346 s->req_compute = 1; 3347 return 1; 3348 } else if (test_bit(R5_Insync, &dev->flags)) { 3349 set_bit(R5_LOCKED, &dev->flags); 3350 set_bit(R5_Wantread, &dev->flags); 3351 s->locked++; 3352 pr_debug("Reading block %d (sync=%d)\n", 3353 disk_idx, s->syncing); 3354 } 3355 } 3356 3357 return 0; 3358 } 3359 3360 /** 3361 * handle_stripe_fill - read or compute data to satisfy pending requests. 3362 */ 3363 static void handle_stripe_fill(struct stripe_head *sh, 3364 struct stripe_head_state *s, 3365 int disks) 3366 { 3367 int i; 3368 3369 BUG_ON(sh->batch_head); 3370 /* look for blocks to read/compute, skip this if a compute 3371 * is already in flight, or if the stripe contents are in the 3372 * midst of changing due to a write 3373 */ 3374 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3375 !sh->reconstruct_state) 3376 for (i = disks; i--; ) 3377 if (fetch_block(sh, s, i, disks)) 3378 break; 3379 set_bit(STRIPE_HANDLE, &sh->state); 3380 } 3381 3382 /* handle_stripe_clean_event 3383 * any written block on an uptodate or failed drive can be returned. 3384 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 3385 * never LOCKED, so we don't need to test 'failed' directly. 3386 */ 3387 static void handle_stripe_clean_event(struct r5conf *conf, 3388 struct stripe_head *sh, int disks, struct bio **return_bi) 3389 { 3390 int i; 3391 struct r5dev *dev; 3392 int discard_pending = 0; 3393 struct stripe_head *head_sh = sh; 3394 bool do_endio = false; 3395 int wakeup_nr = 0; 3396 3397 for (i = disks; i--; ) 3398 if (sh->dev[i].written) { 3399 dev = &sh->dev[i]; 3400 if (!test_bit(R5_LOCKED, &dev->flags) && 3401 (test_bit(R5_UPTODATE, &dev->flags) || 3402 test_bit(R5_Discard, &dev->flags) || 3403 test_bit(R5_SkipCopy, &dev->flags))) { 3404 /* We can return any write requests */ 3405 struct bio *wbi, *wbi2; 3406 pr_debug("Return write for disc %d\n", i); 3407 if (test_and_clear_bit(R5_Discard, &dev->flags)) 3408 clear_bit(R5_UPTODATE, &dev->flags); 3409 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { 3410 WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); 3411 } 3412 do_endio = true; 3413 3414 returnbi: 3415 dev->page = dev->orig_page; 3416 wbi = dev->written; 3417 dev->written = NULL; 3418 while (wbi && wbi->bi_iter.bi_sector < 3419 dev->sector + STRIPE_SECTORS) { 3420 wbi2 = r5_next_bio(wbi, dev->sector); 3421 if (!raid5_dec_bi_active_stripes(wbi)) { 3422 md_write_end(conf->mddev); 3423 wbi->bi_next = *return_bi; 3424 *return_bi = wbi; 3425 } 3426 wbi = wbi2; 3427 } 3428 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3429 STRIPE_SECTORS, 3430 !test_bit(STRIPE_DEGRADED, &sh->state), 3431 0); 3432 if (head_sh->batch_head) { 3433 sh = list_first_entry(&sh->batch_list, 3434 struct stripe_head, 3435 batch_list); 3436 if (sh != head_sh) { 3437 dev = &sh->dev[i]; 3438 goto returnbi; 3439 } 3440 } 3441 sh = head_sh; 3442 dev = &sh->dev[i]; 3443 } else if (test_bit(R5_Discard, &dev->flags)) 3444 discard_pending = 1; 3445 WARN_ON(test_bit(R5_SkipCopy, &dev->flags)); 3446 WARN_ON(dev->page != dev->orig_page); 3447 } 3448 if (!discard_pending && 3449 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3450 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 3451 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3452 if (sh->qd_idx >= 0) { 3453 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 3454 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); 3455 } 3456 /* now that discard is done we can proceed with any sync */ 3457 clear_bit(STRIPE_DISCARD, &sh->state); 3458 /* 3459 * SCSI discard will change some bio fields and the stripe has 3460 * no updated data, so remove it from hash list and the stripe 3461 * will be reinitialized 3462 */ 3463 spin_lock_irq(&conf->device_lock); 3464 unhash: 3465 remove_hash(sh); 3466 if (head_sh->batch_head) { 3467 sh = list_first_entry(&sh->batch_list, 3468 struct stripe_head, batch_list); 3469 if (sh != head_sh) 3470 goto unhash; 3471 } 3472 spin_unlock_irq(&conf->device_lock); 3473 sh = head_sh; 3474 3475 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 3476 set_bit(STRIPE_HANDLE, &sh->state); 3477 3478 } 3479 3480 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3481 if (atomic_dec_and_test(&conf->pending_full_writes)) 3482 md_wakeup_thread(conf->mddev->thread); 3483 3484 if (!head_sh->batch_head || !do_endio) 3485 return; 3486 for (i = 0; i < head_sh->disks; i++) { 3487 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) 3488 wakeup_nr++; 3489 } 3490 while (!list_empty(&head_sh->batch_list)) { 3491 int i; 3492 sh = list_first_entry(&head_sh->batch_list, 3493 struct stripe_head, batch_list); 3494 list_del_init(&sh->batch_list); 3495 3496 set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, 3497 head_sh->state & ~((1 << STRIPE_ACTIVE) | 3498 (1 << STRIPE_PREREAD_ACTIVE) | 3499 STRIPE_EXPAND_SYNC_FLAG)); 3500 sh->check_state = head_sh->check_state; 3501 sh->reconstruct_state = head_sh->reconstruct_state; 3502 for (i = 0; i < sh->disks; i++) { 3503 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3504 wakeup_nr++; 3505 sh->dev[i].flags = head_sh->dev[i].flags; 3506 } 3507 3508 spin_lock_irq(&sh->stripe_lock); 3509 sh->batch_head = NULL; 3510 spin_unlock_irq(&sh->stripe_lock); 3511 if (sh->state & STRIPE_EXPAND_SYNC_FLAG) 3512 set_bit(STRIPE_HANDLE, &sh->state); 3513 release_stripe(sh); 3514 } 3515 3516 spin_lock_irq(&head_sh->stripe_lock); 3517 head_sh->batch_head = NULL; 3518 spin_unlock_irq(&head_sh->stripe_lock); 3519 wake_up_nr(&conf->wait_for_overlap, wakeup_nr); 3520 if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG) 3521 set_bit(STRIPE_HANDLE, &head_sh->state); 3522 } 3523 3524 static void handle_stripe_dirtying(struct r5conf *conf, 3525 struct stripe_head *sh, 3526 struct stripe_head_state *s, 3527 int disks) 3528 { 3529 int rmw = 0, rcw = 0, i; 3530 sector_t recovery_cp = conf->mddev->recovery_cp; 3531 3532 /* Check whether resync is now happening or should start. 3533 * If yes, then the array is dirty (after unclean shutdown or 3534 * initial creation), so parity in some stripes might be inconsistent. 3535 * In this case, we need to always do reconstruct-write, to ensure 3536 * that in case of drive failure or read-error correction, we 3537 * generate correct data from the parity. 3538 */ 3539 if (conf->rmw_level == PARITY_DISABLE_RMW || 3540 (recovery_cp < MaxSector && sh->sector >= recovery_cp && 3541 s->failed == 0)) { 3542 /* Calculate the real rcw later - for now make it 3543 * look like rcw is cheaper 3544 */ 3545 rcw = 1; rmw = 2; 3546 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", 3547 conf->rmw_level, (unsigned long long)recovery_cp, 3548 (unsigned long long)sh->sector); 3549 } else for (i = disks; i--; ) { 3550 /* would I have to read this buffer for read_modify_write */ 3551 struct r5dev *dev = &sh->dev[i]; 3552 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && 3553 !test_bit(R5_LOCKED, &dev->flags) && 3554 !(test_bit(R5_UPTODATE, &dev->flags) || 3555 test_bit(R5_Wantcompute, &dev->flags))) { 3556 if (test_bit(R5_Insync, &dev->flags)) 3557 rmw++; 3558 else 3559 rmw += 2*disks; /* cannot read it */ 3560 } 3561 /* Would I have to read this buffer for reconstruct_write */ 3562 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3563 i != sh->pd_idx && i != sh->qd_idx && 3564 !test_bit(R5_LOCKED, &dev->flags) && 3565 !(test_bit(R5_UPTODATE, &dev->flags) || 3566 test_bit(R5_Wantcompute, &dev->flags))) { 3567 if (test_bit(R5_Insync, &dev->flags)) 3568 rcw++; 3569 else 3570 rcw += 2*disks; 3571 } 3572 } 3573 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 3574 (unsigned long long)sh->sector, rmw, rcw); 3575 set_bit(STRIPE_HANDLE, &sh->state); 3576 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) { 3577 /* prefer read-modify-write, but need to get some data */ 3578 if (conf->mddev->queue) 3579 blk_add_trace_msg(conf->mddev->queue, 3580 "raid5 rmw %llu %d", 3581 (unsigned long long)sh->sector, rmw); 3582 for (i = disks; i--; ) { 3583 struct r5dev *dev = &sh->dev[i]; 3584 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && 3585 !test_bit(R5_LOCKED, &dev->flags) && 3586 !(test_bit(R5_UPTODATE, &dev->flags) || 3587 test_bit(R5_Wantcompute, &dev->flags)) && 3588 test_bit(R5_Insync, &dev->flags)) { 3589 if (test_bit(STRIPE_PREREAD_ACTIVE, 3590 &sh->state)) { 3591 pr_debug("Read_old block %d for r-m-w\n", 3592 i); 3593 set_bit(R5_LOCKED, &dev->flags); 3594 set_bit(R5_Wantread, &dev->flags); 3595 s->locked++; 3596 } else { 3597 set_bit(STRIPE_DELAYED, &sh->state); 3598 set_bit(STRIPE_HANDLE, &sh->state); 3599 } 3600 } 3601 } 3602 } 3603 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) { 3604 /* want reconstruct write, but need to get some data */ 3605 int qread =0; 3606 rcw = 0; 3607 for (i = disks; i--; ) { 3608 struct r5dev *dev = &sh->dev[i]; 3609 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3610 i != sh->pd_idx && i != sh->qd_idx && 3611 !test_bit(R5_LOCKED, &dev->flags) && 3612 !(test_bit(R5_UPTODATE, &dev->flags) || 3613 test_bit(R5_Wantcompute, &dev->flags))) { 3614 rcw++; 3615 if (test_bit(R5_Insync, &dev->flags) && 3616 test_bit(STRIPE_PREREAD_ACTIVE, 3617 &sh->state)) { 3618 pr_debug("Read_old block " 3619 "%d for Reconstruct\n", i); 3620 set_bit(R5_LOCKED, &dev->flags); 3621 set_bit(R5_Wantread, &dev->flags); 3622 s->locked++; 3623 qread++; 3624 } else { 3625 set_bit(STRIPE_DELAYED, &sh->state); 3626 set_bit(STRIPE_HANDLE, &sh->state); 3627 } 3628 } 3629 } 3630 if (rcw && conf->mddev->queue) 3631 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 3632 (unsigned long long)sh->sector, 3633 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3634 } 3635 3636 if (rcw > disks && rmw > disks && 3637 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3638 set_bit(STRIPE_DELAYED, &sh->state); 3639 3640 /* now if nothing is locked, and if we have enough data, 3641 * we can start a write request 3642 */ 3643 /* since handle_stripe can be called at any time we need to handle the 3644 * case where a compute block operation has been submitted and then a 3645 * subsequent call wants to start a write request. raid_run_ops only 3646 * handles the case where compute block and reconstruct are requested 3647 * simultaneously. If this is not the case then new writes need to be 3648 * held off until the compute completes. 3649 */ 3650 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 3651 (s->locked == 0 && (rcw == 0 || rmw == 0) && 3652 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 3653 schedule_reconstruction(sh, s, rcw == 0, 0); 3654 } 3655 3656 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 3657 struct stripe_head_state *s, int disks) 3658 { 3659 struct r5dev *dev = NULL; 3660 3661 BUG_ON(sh->batch_head); 3662 set_bit(STRIPE_HANDLE, &sh->state); 3663 3664 switch (sh->check_state) { 3665 case check_state_idle: 3666 /* start a new check operation if there are no failures */ 3667 if (s->failed == 0) { 3668 BUG_ON(s->uptodate != disks); 3669 sh->check_state = check_state_run; 3670 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3671 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3672 s->uptodate--; 3673 break; 3674 } 3675 dev = &sh->dev[s->failed_num[0]]; 3676 /* fall through */ 3677 case check_state_compute_result: 3678 sh->check_state = check_state_idle; 3679 if (!dev) 3680 dev = &sh->dev[sh->pd_idx]; 3681 3682 /* check that a write has not made the stripe insync */ 3683 if (test_bit(STRIPE_INSYNC, &sh->state)) 3684 break; 3685 3686 /* either failed parity check, or recovery is happening */ 3687 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 3688 BUG_ON(s->uptodate != disks); 3689 3690 set_bit(R5_LOCKED, &dev->flags); 3691 s->locked++; 3692 set_bit(R5_Wantwrite, &dev->flags); 3693 3694 clear_bit(STRIPE_DEGRADED, &sh->state); 3695 set_bit(STRIPE_INSYNC, &sh->state); 3696 break; 3697 case check_state_run: 3698 break; /* we will be called again upon completion */ 3699 case check_state_check_result: 3700 sh->check_state = check_state_idle; 3701 3702 /* if a failure occurred during the check operation, leave 3703 * STRIPE_INSYNC not set and let the stripe be handled again 3704 */ 3705 if (s->failed) 3706 break; 3707 3708 /* handle a successful check operation, if parity is correct 3709 * we are done. Otherwise update the mismatch count and repair 3710 * parity if !MD_RECOVERY_CHECK 3711 */ 3712 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 3713 /* parity is correct (on disc, 3714 * not in buffer any more) 3715 */ 3716 set_bit(STRIPE_INSYNC, &sh->state); 3717 else { 3718 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3719 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3720 /* don't try to repair!! */ 3721 set_bit(STRIPE_INSYNC, &sh->state); 3722 else { 3723 sh->check_state = check_state_compute_run; 3724 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3725 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3726 set_bit(R5_Wantcompute, 3727 &sh->dev[sh->pd_idx].flags); 3728 sh->ops.target = sh->pd_idx; 3729 sh->ops.target2 = -1; 3730 s->uptodate++; 3731 } 3732 } 3733 break; 3734 case check_state_compute_run: 3735 break; 3736 default: 3737 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 3738 __func__, sh->check_state, 3739 (unsigned long long) sh->sector); 3740 BUG(); 3741 } 3742 } 3743 3744 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 3745 struct stripe_head_state *s, 3746 int disks) 3747 { 3748 int pd_idx = sh->pd_idx; 3749 int qd_idx = sh->qd_idx; 3750 struct r5dev *dev; 3751 3752 BUG_ON(sh->batch_head); 3753 set_bit(STRIPE_HANDLE, &sh->state); 3754 3755 BUG_ON(s->failed > 2); 3756 3757 /* Want to check and possibly repair P and Q. 3758 * However there could be one 'failed' device, in which 3759 * case we can only check one of them, possibly using the 3760 * other to generate missing data 3761 */ 3762 3763 switch (sh->check_state) { 3764 case check_state_idle: 3765 /* start a new check operation if there are < 2 failures */ 3766 if (s->failed == s->q_failed) { 3767 /* The only possible failed device holds Q, so it 3768 * makes sense to check P (If anything else were failed, 3769 * we would have used P to recreate it). 3770 */ 3771 sh->check_state = check_state_run; 3772 } 3773 if (!s->q_failed && s->failed < 2) { 3774 /* Q is not failed, and we didn't use it to generate 3775 * anything, so it makes sense to check it 3776 */ 3777 if (sh->check_state == check_state_run) 3778 sh->check_state = check_state_run_pq; 3779 else 3780 sh->check_state = check_state_run_q; 3781 } 3782 3783 /* discard potentially stale zero_sum_result */ 3784 sh->ops.zero_sum_result = 0; 3785 3786 if (sh->check_state == check_state_run) { 3787 /* async_xor_zero_sum destroys the contents of P */ 3788 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 3789 s->uptodate--; 3790 } 3791 if (sh->check_state >= check_state_run && 3792 sh->check_state <= check_state_run_pq) { 3793 /* async_syndrome_zero_sum preserves P and Q, so 3794 * no need to mark them !uptodate here 3795 */ 3796 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3797 break; 3798 } 3799 3800 /* we have 2-disk failure */ 3801 BUG_ON(s->failed != 2); 3802 /* fall through */ 3803 case check_state_compute_result: 3804 sh->check_state = check_state_idle; 3805 3806 /* check that a write has not made the stripe insync */ 3807 if (test_bit(STRIPE_INSYNC, &sh->state)) 3808 break; 3809 3810 /* now write out any block on a failed drive, 3811 * or P or Q if they were recomputed 3812 */ 3813 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 3814 if (s->failed == 2) { 3815 dev = &sh->dev[s->failed_num[1]]; 3816 s->locked++; 3817 set_bit(R5_LOCKED, &dev->flags); 3818 set_bit(R5_Wantwrite, &dev->flags); 3819 } 3820 if (s->failed >= 1) { 3821 dev = &sh->dev[s->failed_num[0]]; 3822 s->locked++; 3823 set_bit(R5_LOCKED, &dev->flags); 3824 set_bit(R5_Wantwrite, &dev->flags); 3825 } 3826 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 3827 dev = &sh->dev[pd_idx]; 3828 s->locked++; 3829 set_bit(R5_LOCKED, &dev->flags); 3830 set_bit(R5_Wantwrite, &dev->flags); 3831 } 3832 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 3833 dev = &sh->dev[qd_idx]; 3834 s->locked++; 3835 set_bit(R5_LOCKED, &dev->flags); 3836 set_bit(R5_Wantwrite, &dev->flags); 3837 } 3838 clear_bit(STRIPE_DEGRADED, &sh->state); 3839 3840 set_bit(STRIPE_INSYNC, &sh->state); 3841 break; 3842 case check_state_run: 3843 case check_state_run_q: 3844 case check_state_run_pq: 3845 break; /* we will be called again upon completion */ 3846 case check_state_check_result: 3847 sh->check_state = check_state_idle; 3848 3849 /* handle a successful check operation, if parity is correct 3850 * we are done. Otherwise update the mismatch count and repair 3851 * parity if !MD_RECOVERY_CHECK 3852 */ 3853 if (sh->ops.zero_sum_result == 0) { 3854 /* both parities are correct */ 3855 if (!s->failed) 3856 set_bit(STRIPE_INSYNC, &sh->state); 3857 else { 3858 /* in contrast to the raid5 case we can validate 3859 * parity, but still have a failure to write 3860 * back 3861 */ 3862 sh->check_state = check_state_compute_result; 3863 /* Returning at this point means that we may go 3864 * off and bring p and/or q uptodate again so 3865 * we make sure to check zero_sum_result again 3866 * to verify if p or q need writeback 3867 */ 3868 } 3869 } else { 3870 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3871 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3872 /* don't try to repair!! */ 3873 set_bit(STRIPE_INSYNC, &sh->state); 3874 else { 3875 int *target = &sh->ops.target; 3876 3877 sh->ops.target = -1; 3878 sh->ops.target2 = -1; 3879 sh->check_state = check_state_compute_run; 3880 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3881 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3882 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 3883 set_bit(R5_Wantcompute, 3884 &sh->dev[pd_idx].flags); 3885 *target = pd_idx; 3886 target = &sh->ops.target2; 3887 s->uptodate++; 3888 } 3889 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 3890 set_bit(R5_Wantcompute, 3891 &sh->dev[qd_idx].flags); 3892 *target = qd_idx; 3893 s->uptodate++; 3894 } 3895 } 3896 } 3897 break; 3898 case check_state_compute_run: 3899 break; 3900 default: 3901 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 3902 __func__, sh->check_state, 3903 (unsigned long long) sh->sector); 3904 BUG(); 3905 } 3906 } 3907 3908 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 3909 { 3910 int i; 3911 3912 /* We have read all the blocks in this stripe and now we need to 3913 * copy some of them into a target stripe for expand. 3914 */ 3915 struct dma_async_tx_descriptor *tx = NULL; 3916 BUG_ON(sh->batch_head); 3917 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3918 for (i = 0; i < sh->disks; i++) 3919 if (i != sh->pd_idx && i != sh->qd_idx) { 3920 int dd_idx, j; 3921 struct stripe_head *sh2; 3922 struct async_submit_ctl submit; 3923 3924 sector_t bn = compute_blocknr(sh, i, 1); 3925 sector_t s = raid5_compute_sector(conf, bn, 0, 3926 &dd_idx, NULL); 3927 sh2 = get_active_stripe(conf, s, 0, 1, 1); 3928 if (sh2 == NULL) 3929 /* so far only the early blocks of this stripe 3930 * have been requested. When later blocks 3931 * get requested, we will try again 3932 */ 3933 continue; 3934 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 3935 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 3936 /* must have already done this block */ 3937 release_stripe(sh2); 3938 continue; 3939 } 3940 3941 /* place all the copies on one channel */ 3942 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 3943 tx = async_memcpy(sh2->dev[dd_idx].page, 3944 sh->dev[i].page, 0, 0, STRIPE_SIZE, 3945 &submit); 3946 3947 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 3948 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 3949 for (j = 0; j < conf->raid_disks; j++) 3950 if (j != sh2->pd_idx && 3951 j != sh2->qd_idx && 3952 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 3953 break; 3954 if (j == conf->raid_disks) { 3955 set_bit(STRIPE_EXPAND_READY, &sh2->state); 3956 set_bit(STRIPE_HANDLE, &sh2->state); 3957 } 3958 release_stripe(sh2); 3959 3960 } 3961 /* done submitting copies, wait for them to complete */ 3962 async_tx_quiesce(&tx); 3963 } 3964 3965 /* 3966 * handle_stripe - do things to a stripe. 3967 * 3968 * We lock the stripe by setting STRIPE_ACTIVE and then examine the 3969 * state of various bits to see what needs to be done. 3970 * Possible results: 3971 * return some read requests which now have data 3972 * return some write requests which are safely on storage 3973 * schedule a read on some buffers 3974 * schedule a write of some buffers 3975 * return confirmation of parity correctness 3976 * 3977 */ 3978 3979 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 3980 { 3981 struct r5conf *conf = sh->raid_conf; 3982 int disks = sh->disks; 3983 struct r5dev *dev; 3984 int i; 3985 int do_recovery = 0; 3986 3987 memset(s, 0, sizeof(*s)); 3988 3989 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; 3990 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; 3991 s->failed_num[0] = -1; 3992 s->failed_num[1] = -1; 3993 3994 /* Now to look around and see what can be done */ 3995 rcu_read_lock(); 3996 for (i=disks; i--; ) { 3997 struct md_rdev *rdev; 3998 sector_t first_bad; 3999 int bad_sectors; 4000 int is_bad = 0; 4001 4002 dev = &sh->dev[i]; 4003 4004 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 4005 i, dev->flags, 4006 dev->toread, dev->towrite, dev->written); 4007 /* maybe we can reply to a read 4008 * 4009 * new wantfill requests are only permitted while 4010 * ops_complete_biofill is guaranteed to be inactive 4011 */ 4012 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 4013 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 4014 set_bit(R5_Wantfill, &dev->flags); 4015 4016 /* now count some things */ 4017 if (test_bit(R5_LOCKED, &dev->flags)) 4018 s->locked++; 4019 if (test_bit(R5_UPTODATE, &dev->flags)) 4020 s->uptodate++; 4021 if (test_bit(R5_Wantcompute, &dev->flags)) { 4022 s->compute++; 4023 BUG_ON(s->compute > 2); 4024 } 4025 4026 if (test_bit(R5_Wantfill, &dev->flags)) 4027 s->to_fill++; 4028 else if (dev->toread) 4029 s->to_read++; 4030 if (dev->towrite) { 4031 s->to_write++; 4032 if (!test_bit(R5_OVERWRITE, &dev->flags)) 4033 s->non_overwrite++; 4034 } 4035 if (dev->written) 4036 s->written++; 4037 /* Prefer to use the replacement for reads, but only 4038 * if it is recovered enough and has no bad blocks. 4039 */ 4040 rdev = rcu_dereference(conf->disks[i].replacement); 4041 if (rdev && !test_bit(Faulty, &rdev->flags) && 4042 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && 4043 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4044 &first_bad, &bad_sectors)) 4045 set_bit(R5_ReadRepl, &dev->flags); 4046 else { 4047 if (rdev) 4048 set_bit(R5_NeedReplace, &dev->flags); 4049 rdev = rcu_dereference(conf->disks[i].rdev); 4050 clear_bit(R5_ReadRepl, &dev->flags); 4051 } 4052 if (rdev && test_bit(Faulty, &rdev->flags)) 4053 rdev = NULL; 4054 if (rdev) { 4055 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4056 &first_bad, &bad_sectors); 4057 if (s->blocked_rdev == NULL 4058 && (test_bit(Blocked, &rdev->flags) 4059 || is_bad < 0)) { 4060 if (is_bad < 0) 4061 set_bit(BlockedBadBlocks, 4062 &rdev->flags); 4063 s->blocked_rdev = rdev; 4064 atomic_inc(&rdev->nr_pending); 4065 } 4066 } 4067 clear_bit(R5_Insync, &dev->flags); 4068 if (!rdev) 4069 /* Not in-sync */; 4070 else if (is_bad) { 4071 /* also not in-sync */ 4072 if (!test_bit(WriteErrorSeen, &rdev->flags) && 4073 test_bit(R5_UPTODATE, &dev->flags)) { 4074 /* treat as in-sync, but with a read error 4075 * which we can now try to correct 4076 */ 4077 set_bit(R5_Insync, &dev->flags); 4078 set_bit(R5_ReadError, &dev->flags); 4079 } 4080 } else if (test_bit(In_sync, &rdev->flags)) 4081 set_bit(R5_Insync, &dev->flags); 4082 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 4083 /* in sync if before recovery_offset */ 4084 set_bit(R5_Insync, &dev->flags); 4085 else if (test_bit(R5_UPTODATE, &dev->flags) && 4086 test_bit(R5_Expanded, &dev->flags)) 4087 /* If we've reshaped into here, we assume it is Insync. 4088 * We will shortly update recovery_offset to make 4089 * it official. 4090 */ 4091 set_bit(R5_Insync, &dev->flags); 4092 4093 if (test_bit(R5_WriteError, &dev->flags)) { 4094 /* This flag does not apply to '.replacement' 4095 * only to .rdev, so make sure to check that*/ 4096 struct md_rdev *rdev2 = rcu_dereference( 4097 conf->disks[i].rdev); 4098 if (rdev2 == rdev) 4099 clear_bit(R5_Insync, &dev->flags); 4100 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4101 s->handle_bad_blocks = 1; 4102 atomic_inc(&rdev2->nr_pending); 4103 } else 4104 clear_bit(R5_WriteError, &dev->flags); 4105 } 4106 if (test_bit(R5_MadeGood, &dev->flags)) { 4107 /* This flag does not apply to '.replacement' 4108 * only to .rdev, so make sure to check that*/ 4109 struct md_rdev *rdev2 = rcu_dereference( 4110 conf->disks[i].rdev); 4111 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4112 s->handle_bad_blocks = 1; 4113 atomic_inc(&rdev2->nr_pending); 4114 } else 4115 clear_bit(R5_MadeGood, &dev->flags); 4116 } 4117 if (test_bit(R5_MadeGoodRepl, &dev->flags)) { 4118 struct md_rdev *rdev2 = rcu_dereference( 4119 conf->disks[i].replacement); 4120 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4121 s->handle_bad_blocks = 1; 4122 atomic_inc(&rdev2->nr_pending); 4123 } else 4124 clear_bit(R5_MadeGoodRepl, &dev->flags); 4125 } 4126 if (!test_bit(R5_Insync, &dev->flags)) { 4127 /* The ReadError flag will just be confusing now */ 4128 clear_bit(R5_ReadError, &dev->flags); 4129 clear_bit(R5_ReWrite, &dev->flags); 4130 } 4131 if (test_bit(R5_ReadError, &dev->flags)) 4132 clear_bit(R5_Insync, &dev->flags); 4133 if (!test_bit(R5_Insync, &dev->flags)) { 4134 if (s->failed < 2) 4135 s->failed_num[s->failed] = i; 4136 s->failed++; 4137 if (rdev && !test_bit(Faulty, &rdev->flags)) 4138 do_recovery = 1; 4139 } 4140 } 4141 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4142 /* If there is a failed device being replaced, 4143 * we must be recovering. 4144 * else if we are after recovery_cp, we must be syncing 4145 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 4146 * else we can only be replacing 4147 * sync and recovery both need to read all devices, and so 4148 * use the same flag. 4149 */ 4150 if (do_recovery || 4151 sh->sector >= conf->mddev->recovery_cp || 4152 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 4153 s->syncing = 1; 4154 else 4155 s->replacing = 1; 4156 } 4157 rcu_read_unlock(); 4158 } 4159 4160 static int clear_batch_ready(struct stripe_head *sh) 4161 { 4162 struct stripe_head *tmp; 4163 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) 4164 return 0; 4165 spin_lock(&sh->stripe_lock); 4166 if (!sh->batch_head) { 4167 spin_unlock(&sh->stripe_lock); 4168 return 0; 4169 } 4170 4171 /* 4172 * this stripe could be added to a batch list before we check 4173 * BATCH_READY, skips it 4174 */ 4175 if (sh->batch_head != sh) { 4176 spin_unlock(&sh->stripe_lock); 4177 return 1; 4178 } 4179 spin_lock(&sh->batch_lock); 4180 list_for_each_entry(tmp, &sh->batch_list, batch_list) 4181 clear_bit(STRIPE_BATCH_READY, &tmp->state); 4182 spin_unlock(&sh->batch_lock); 4183 spin_unlock(&sh->stripe_lock); 4184 4185 /* 4186 * BATCH_READY is cleared, no new stripes can be added. 4187 * batch_list can be accessed without lock 4188 */ 4189 return 0; 4190 } 4191 4192 static void check_break_stripe_batch_list(struct stripe_head *sh) 4193 { 4194 struct stripe_head *head_sh, *next; 4195 int i; 4196 4197 if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) 4198 return; 4199 4200 head_sh = sh; 4201 do { 4202 sh = list_first_entry(&sh->batch_list, 4203 struct stripe_head, batch_list); 4204 BUG_ON(sh == head_sh); 4205 } while (!test_bit(STRIPE_DEGRADED, &sh->state)); 4206 4207 while (sh != head_sh) { 4208 next = list_first_entry(&sh->batch_list, 4209 struct stripe_head, batch_list); 4210 list_del_init(&sh->batch_list); 4211 4212 set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, 4213 head_sh->state & ~((1 << STRIPE_ACTIVE) | 4214 (1 << STRIPE_PREREAD_ACTIVE) | 4215 (1 << STRIPE_DEGRADED) | 4216 STRIPE_EXPAND_SYNC_FLAG)); 4217 sh->check_state = head_sh->check_state; 4218 sh->reconstruct_state = head_sh->reconstruct_state; 4219 for (i = 0; i < sh->disks; i++) 4220 sh->dev[i].flags = head_sh->dev[i].flags & 4221 (~((1 << R5_WriteError) | (1 << R5_Overlap))); 4222 4223 spin_lock_irq(&sh->stripe_lock); 4224 sh->batch_head = NULL; 4225 spin_unlock_irq(&sh->stripe_lock); 4226 4227 set_bit(STRIPE_HANDLE, &sh->state); 4228 release_stripe(sh); 4229 4230 sh = next; 4231 } 4232 } 4233 4234 static void handle_stripe(struct stripe_head *sh) 4235 { 4236 struct stripe_head_state s; 4237 struct r5conf *conf = sh->raid_conf; 4238 int i; 4239 int prexor; 4240 int disks = sh->disks; 4241 struct r5dev *pdev, *qdev; 4242 4243 clear_bit(STRIPE_HANDLE, &sh->state); 4244 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 4245 /* already being handled, ensure it gets handled 4246 * again when current action finishes */ 4247 set_bit(STRIPE_HANDLE, &sh->state); 4248 return; 4249 } 4250 4251 if (clear_batch_ready(sh) ) { 4252 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4253 return; 4254 } 4255 4256 check_break_stripe_batch_list(sh); 4257 4258 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4259 spin_lock(&sh->stripe_lock); 4260 /* Cannot process 'sync' concurrently with 'discard' */ 4261 if (!test_bit(STRIPE_DISCARD, &sh->state) && 4262 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4263 set_bit(STRIPE_SYNCING, &sh->state); 4264 clear_bit(STRIPE_INSYNC, &sh->state); 4265 clear_bit(STRIPE_REPLACED, &sh->state); 4266 } 4267 spin_unlock(&sh->stripe_lock); 4268 } 4269 clear_bit(STRIPE_DELAYED, &sh->state); 4270 4271 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 4272 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 4273 (unsigned long long)sh->sector, sh->state, 4274 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 4275 sh->check_state, sh->reconstruct_state); 4276 4277 analyse_stripe(sh, &s); 4278 4279 if (s.handle_bad_blocks) { 4280 set_bit(STRIPE_HANDLE, &sh->state); 4281 goto finish; 4282 } 4283 4284 if (unlikely(s.blocked_rdev)) { 4285 if (s.syncing || s.expanding || s.expanded || 4286 s.replacing || s.to_write || s.written) { 4287 set_bit(STRIPE_HANDLE, &sh->state); 4288 goto finish; 4289 } 4290 /* There is nothing for the blocked_rdev to block */ 4291 rdev_dec_pending(s.blocked_rdev, conf->mddev); 4292 s.blocked_rdev = NULL; 4293 } 4294 4295 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 4296 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 4297 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 4298 } 4299 4300 pr_debug("locked=%d uptodate=%d to_read=%d" 4301 " to_write=%d failed=%d failed_num=%d,%d\n", 4302 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4303 s.failed_num[0], s.failed_num[1]); 4304 /* check if the array has lost more than max_degraded devices and, 4305 * if so, some requests might need to be failed. 4306 */ 4307 if (s.failed > conf->max_degraded) { 4308 sh->check_state = 0; 4309 sh->reconstruct_state = 0; 4310 if (s.to_read+s.to_write+s.written) 4311 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 4312 if (s.syncing + s.replacing) 4313 handle_failed_sync(conf, sh, &s); 4314 } 4315 4316 /* Now we check to see if any write operations have recently 4317 * completed 4318 */ 4319 prexor = 0; 4320 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 4321 prexor = 1; 4322 if (sh->reconstruct_state == reconstruct_state_drain_result || 4323 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 4324 sh->reconstruct_state = reconstruct_state_idle; 4325 4326 /* All the 'written' buffers and the parity block are ready to 4327 * be written back to disk 4328 */ 4329 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && 4330 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); 4331 BUG_ON(sh->qd_idx >= 0 && 4332 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && 4333 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); 4334 for (i = disks; i--; ) { 4335 struct r5dev *dev = &sh->dev[i]; 4336 if (test_bit(R5_LOCKED, &dev->flags) && 4337 (i == sh->pd_idx || i == sh->qd_idx || 4338 dev->written)) { 4339 pr_debug("Writing block %d\n", i); 4340 set_bit(R5_Wantwrite, &dev->flags); 4341 if (prexor) 4342 continue; 4343 if (s.failed > 1) 4344 continue; 4345 if (!test_bit(R5_Insync, &dev->flags) || 4346 ((i == sh->pd_idx || i == sh->qd_idx) && 4347 s.failed == 0)) 4348 set_bit(STRIPE_INSYNC, &sh->state); 4349 } 4350 } 4351 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4352 s.dec_preread_active = 1; 4353 } 4354 4355 /* 4356 * might be able to return some write requests if the parity blocks 4357 * are safe, or on a failed drive 4358 */ 4359 pdev = &sh->dev[sh->pd_idx]; 4360 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 4361 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 4362 qdev = &sh->dev[sh->qd_idx]; 4363 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 4364 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 4365 || conf->level < 6; 4366 4367 if (s.written && 4368 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 4369 && !test_bit(R5_LOCKED, &pdev->flags) 4370 && (test_bit(R5_UPTODATE, &pdev->flags) || 4371 test_bit(R5_Discard, &pdev->flags))))) && 4372 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 4373 && !test_bit(R5_LOCKED, &qdev->flags) 4374 && (test_bit(R5_UPTODATE, &qdev->flags) || 4375 test_bit(R5_Discard, &qdev->flags)))))) 4376 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); 4377 4378 /* Now we might consider reading some blocks, either to check/generate 4379 * parity, or to satisfy requests 4380 * or to load a block that is being partially written. 4381 */ 4382 if (s.to_read || s.non_overwrite 4383 || (conf->level == 6 && s.to_write && s.failed) 4384 || (s.syncing && (s.uptodate + s.compute < disks)) 4385 || s.replacing 4386 || s.expanding) 4387 handle_stripe_fill(sh, &s, disks); 4388 4389 /* Now to consider new write requests and what else, if anything 4390 * should be read. We do not handle new writes when: 4391 * 1/ A 'write' operation (copy+xor) is already in flight. 4392 * 2/ A 'check' operation is in flight, as it may clobber the parity 4393 * block. 4394 */ 4395 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 4396 handle_stripe_dirtying(conf, sh, &s, disks); 4397 4398 /* maybe we need to check and possibly fix the parity for this stripe 4399 * Any reads will already have been scheduled, so we just see if enough 4400 * data is available. The parity check is held off while parity 4401 * dependent operations are in flight. 4402 */ 4403 if (sh->check_state || 4404 (s.syncing && s.locked == 0 && 4405 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4406 !test_bit(STRIPE_INSYNC, &sh->state))) { 4407 if (conf->level == 6) 4408 handle_parity_checks6(conf, sh, &s, disks); 4409 else 4410 handle_parity_checks5(conf, sh, &s, disks); 4411 } 4412 4413 if ((s.replacing || s.syncing) && s.locked == 0 4414 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) 4415 && !test_bit(STRIPE_REPLACED, &sh->state)) { 4416 /* Write out to replacement devices where possible */ 4417 for (i = 0; i < conf->raid_disks; i++) 4418 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 4419 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); 4420 set_bit(R5_WantReplace, &sh->dev[i].flags); 4421 set_bit(R5_LOCKED, &sh->dev[i].flags); 4422 s.locked++; 4423 } 4424 if (s.replacing) 4425 set_bit(STRIPE_INSYNC, &sh->state); 4426 set_bit(STRIPE_REPLACED, &sh->state); 4427 } 4428 if ((s.syncing || s.replacing) && s.locked == 0 && 4429 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4430 test_bit(STRIPE_INSYNC, &sh->state)) { 4431 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4432 clear_bit(STRIPE_SYNCING, &sh->state); 4433 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 4434 wake_up(&conf->wait_for_overlap); 4435 } 4436 4437 /* If the failed drives are just a ReadError, then we might need 4438 * to progress the repair/check process 4439 */ 4440 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 4441 for (i = 0; i < s.failed; i++) { 4442 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 4443 if (test_bit(R5_ReadError, &dev->flags) 4444 && !test_bit(R5_LOCKED, &dev->flags) 4445 && test_bit(R5_UPTODATE, &dev->flags) 4446 ) { 4447 if (!test_bit(R5_ReWrite, &dev->flags)) { 4448 set_bit(R5_Wantwrite, &dev->flags); 4449 set_bit(R5_ReWrite, &dev->flags); 4450 set_bit(R5_LOCKED, &dev->flags); 4451 s.locked++; 4452 } else { 4453 /* let's read it back */ 4454 set_bit(R5_Wantread, &dev->flags); 4455 set_bit(R5_LOCKED, &dev->flags); 4456 s.locked++; 4457 } 4458 } 4459 } 4460 4461 /* Finish reconstruct operations initiated by the expansion process */ 4462 if (sh->reconstruct_state == reconstruct_state_result) { 4463 struct stripe_head *sh_src 4464 = get_active_stripe(conf, sh->sector, 1, 1, 1); 4465 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 4466 /* sh cannot be written until sh_src has been read. 4467 * so arrange for sh to be delayed a little 4468 */ 4469 set_bit(STRIPE_DELAYED, &sh->state); 4470 set_bit(STRIPE_HANDLE, &sh->state); 4471 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 4472 &sh_src->state)) 4473 atomic_inc(&conf->preread_active_stripes); 4474 release_stripe(sh_src); 4475 goto finish; 4476 } 4477 if (sh_src) 4478 release_stripe(sh_src); 4479 4480 sh->reconstruct_state = reconstruct_state_idle; 4481 clear_bit(STRIPE_EXPANDING, &sh->state); 4482 for (i = conf->raid_disks; i--; ) { 4483 set_bit(R5_Wantwrite, &sh->dev[i].flags); 4484 set_bit(R5_LOCKED, &sh->dev[i].flags); 4485 s.locked++; 4486 } 4487 } 4488 4489 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 4490 !sh->reconstruct_state) { 4491 /* Need to write out all blocks after computing parity */ 4492 sh->disks = conf->raid_disks; 4493 stripe_set_idx(sh->sector, conf, 0, sh); 4494 schedule_reconstruction(sh, &s, 1, 1); 4495 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 4496 clear_bit(STRIPE_EXPAND_READY, &sh->state); 4497 atomic_dec(&conf->reshape_stripes); 4498 wake_up(&conf->wait_for_overlap); 4499 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4500 } 4501 4502 if (s.expanding && s.locked == 0 && 4503 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 4504 handle_stripe_expansion(conf, sh); 4505 4506 finish: 4507 /* wait for this device to become unblocked */ 4508 if (unlikely(s.blocked_rdev)) { 4509 if (conf->mddev->external) 4510 md_wait_for_blocked_rdev(s.blocked_rdev, 4511 conf->mddev); 4512 else 4513 /* Internal metadata will immediately 4514 * be written by raid5d, so we don't 4515 * need to wait here. 4516 */ 4517 rdev_dec_pending(s.blocked_rdev, 4518 conf->mddev); 4519 } 4520 4521 if (s.handle_bad_blocks) 4522 for (i = disks; i--; ) { 4523 struct md_rdev *rdev; 4524 struct r5dev *dev = &sh->dev[i]; 4525 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 4526 /* We own a safe reference to the rdev */ 4527 rdev = conf->disks[i].rdev; 4528 if (!rdev_set_badblocks(rdev, sh->sector, 4529 STRIPE_SECTORS, 0)) 4530 md_error(conf->mddev, rdev); 4531 rdev_dec_pending(rdev, conf->mddev); 4532 } 4533 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 4534 rdev = conf->disks[i].rdev; 4535 rdev_clear_badblocks(rdev, sh->sector, 4536 STRIPE_SECTORS, 0); 4537 rdev_dec_pending(rdev, conf->mddev); 4538 } 4539 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { 4540 rdev = conf->disks[i].replacement; 4541 if (!rdev) 4542 /* rdev have been moved down */ 4543 rdev = conf->disks[i].rdev; 4544 rdev_clear_badblocks(rdev, sh->sector, 4545 STRIPE_SECTORS, 0); 4546 rdev_dec_pending(rdev, conf->mddev); 4547 } 4548 } 4549 4550 if (s.ops_request) 4551 raid_run_ops(sh, s.ops_request); 4552 4553 ops_run_io(sh, &s); 4554 4555 if (s.dec_preread_active) { 4556 /* We delay this until after ops_run_io so that if make_request 4557 * is waiting on a flush, it won't continue until the writes 4558 * have actually been submitted. 4559 */ 4560 atomic_dec(&conf->preread_active_stripes); 4561 if (atomic_read(&conf->preread_active_stripes) < 4562 IO_THRESHOLD) 4563 md_wakeup_thread(conf->mddev->thread); 4564 } 4565 4566 return_io(s.return_bi); 4567 4568 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4569 } 4570 4571 static void raid5_activate_delayed(struct r5conf *conf) 4572 { 4573 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 4574 while (!list_empty(&conf->delayed_list)) { 4575 struct list_head *l = conf->delayed_list.next; 4576 struct stripe_head *sh; 4577 sh = list_entry(l, struct stripe_head, lru); 4578 list_del_init(l); 4579 clear_bit(STRIPE_DELAYED, &sh->state); 4580 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4581 atomic_inc(&conf->preread_active_stripes); 4582 list_add_tail(&sh->lru, &conf->hold_list); 4583 raid5_wakeup_stripe_thread(sh); 4584 } 4585 } 4586 } 4587 4588 static void activate_bit_delay(struct r5conf *conf, 4589 struct list_head *temp_inactive_list) 4590 { 4591 /* device_lock is held */ 4592 struct list_head head; 4593 list_add(&head, &conf->bitmap_list); 4594 list_del_init(&conf->bitmap_list); 4595 while (!list_empty(&head)) { 4596 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 4597 int hash; 4598 list_del_init(&sh->lru); 4599 atomic_inc(&sh->count); 4600 hash = sh->hash_lock_index; 4601 __release_stripe(conf, sh, &temp_inactive_list[hash]); 4602 } 4603 } 4604 4605 static int raid5_congested(struct mddev *mddev, int bits) 4606 { 4607 struct r5conf *conf = mddev->private; 4608 4609 /* No difference between reads and writes. Just check 4610 * how busy the stripe_cache is 4611 */ 4612 4613 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) 4614 return 1; 4615 if (conf->quiesce) 4616 return 1; 4617 if (atomic_read(&conf->empty_inactive_list_nr)) 4618 return 1; 4619 4620 return 0; 4621 } 4622 4623 /* We want read requests to align with chunks where possible, 4624 * but write requests don't need to. 4625 */ 4626 static int raid5_mergeable_bvec(struct mddev *mddev, 4627 struct bvec_merge_data *bvm, 4628 struct bio_vec *biovec) 4629 { 4630 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 4631 int max; 4632 unsigned int chunk_sectors = mddev->chunk_sectors; 4633 unsigned int bio_sectors = bvm->bi_size >> 9; 4634 4635 /* 4636 * always allow writes to be mergeable, read as well if array 4637 * is degraded as we'll go through stripe cache anyway. 4638 */ 4639 if ((bvm->bi_rw & 1) == WRITE || mddev->degraded) 4640 return biovec->bv_len; 4641 4642 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 4643 chunk_sectors = mddev->new_chunk_sectors; 4644 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 4645 if (max < 0) max = 0; 4646 if (max <= biovec->bv_len && bio_sectors == 0) 4647 return biovec->bv_len; 4648 else 4649 return max; 4650 } 4651 4652 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4653 { 4654 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); 4655 unsigned int chunk_sectors = mddev->chunk_sectors; 4656 unsigned int bio_sectors = bio_sectors(bio); 4657 4658 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 4659 chunk_sectors = mddev->new_chunk_sectors; 4660 return chunk_sectors >= 4661 ((sector & (chunk_sectors - 1)) + bio_sectors); 4662 } 4663 4664 /* 4665 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 4666 * later sampled by raid5d. 4667 */ 4668 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 4669 { 4670 unsigned long flags; 4671 4672 spin_lock_irqsave(&conf->device_lock, flags); 4673 4674 bi->bi_next = conf->retry_read_aligned_list; 4675 conf->retry_read_aligned_list = bi; 4676 4677 spin_unlock_irqrestore(&conf->device_lock, flags); 4678 md_wakeup_thread(conf->mddev->thread); 4679 } 4680 4681 static struct bio *remove_bio_from_retry(struct r5conf *conf) 4682 { 4683 struct bio *bi; 4684 4685 bi = conf->retry_read_aligned; 4686 if (bi) { 4687 conf->retry_read_aligned = NULL; 4688 return bi; 4689 } 4690 bi = conf->retry_read_aligned_list; 4691 if(bi) { 4692 conf->retry_read_aligned_list = bi->bi_next; 4693 bi->bi_next = NULL; 4694 /* 4695 * this sets the active strip count to 1 and the processed 4696 * strip count to zero (upper 8 bits) 4697 */ 4698 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ 4699 } 4700 4701 return bi; 4702 } 4703 4704 /* 4705 * The "raid5_align_endio" should check if the read succeeded and if it 4706 * did, call bio_endio on the original bio (having bio_put the new bio 4707 * first). 4708 * If the read failed.. 4709 */ 4710 static void raid5_align_endio(struct bio *bi, int error) 4711 { 4712 struct bio* raid_bi = bi->bi_private; 4713 struct mddev *mddev; 4714 struct r5conf *conf; 4715 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 4716 struct md_rdev *rdev; 4717 4718 bio_put(bi); 4719 4720 rdev = (void*)raid_bi->bi_next; 4721 raid_bi->bi_next = NULL; 4722 mddev = rdev->mddev; 4723 conf = mddev->private; 4724 4725 rdev_dec_pending(rdev, conf->mddev); 4726 4727 if (!error && uptodate) { 4728 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), 4729 raid_bi, 0); 4730 bio_endio(raid_bi, 0); 4731 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4732 wake_up(&conf->wait_for_stripe); 4733 return; 4734 } 4735 4736 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 4737 4738 add_bio_to_retry(raid_bi, conf); 4739 } 4740 4741 static int bio_fits_rdev(struct bio *bi) 4742 { 4743 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 4744 4745 if (bio_sectors(bi) > queue_max_sectors(q)) 4746 return 0; 4747 blk_recount_segments(q, bi); 4748 if (bi->bi_phys_segments > queue_max_segments(q)) 4749 return 0; 4750 4751 if (q->merge_bvec_fn) 4752 /* it's too hard to apply the merge_bvec_fn at this stage, 4753 * just just give up 4754 */ 4755 return 0; 4756 4757 return 1; 4758 } 4759 4760 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) 4761 { 4762 struct r5conf *conf = mddev->private; 4763 int dd_idx; 4764 struct bio* align_bi; 4765 struct md_rdev *rdev; 4766 sector_t end_sector; 4767 4768 if (!in_chunk_boundary(mddev, raid_bio)) { 4769 pr_debug("chunk_aligned_read : non aligned\n"); 4770 return 0; 4771 } 4772 /* 4773 * use bio_clone_mddev to make a copy of the bio 4774 */ 4775 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); 4776 if (!align_bi) 4777 return 0; 4778 /* 4779 * set bi_end_io to a new function, and set bi_private to the 4780 * original bio. 4781 */ 4782 align_bi->bi_end_io = raid5_align_endio; 4783 align_bi->bi_private = raid_bio; 4784 /* 4785 * compute position 4786 */ 4787 align_bi->bi_iter.bi_sector = 4788 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 4789 0, &dd_idx, NULL); 4790 4791 end_sector = bio_end_sector(align_bi); 4792 rcu_read_lock(); 4793 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 4794 if (!rdev || test_bit(Faulty, &rdev->flags) || 4795 rdev->recovery_offset < end_sector) { 4796 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 4797 if (rdev && 4798 (test_bit(Faulty, &rdev->flags) || 4799 !(test_bit(In_sync, &rdev->flags) || 4800 rdev->recovery_offset >= end_sector))) 4801 rdev = NULL; 4802 } 4803 if (rdev) { 4804 sector_t first_bad; 4805 int bad_sectors; 4806 4807 atomic_inc(&rdev->nr_pending); 4808 rcu_read_unlock(); 4809 raid_bio->bi_next = (void*)rdev; 4810 align_bi->bi_bdev = rdev->bdev; 4811 __clear_bit(BIO_SEG_VALID, &align_bi->bi_flags); 4812 4813 if (!bio_fits_rdev(align_bi) || 4814 is_badblock(rdev, align_bi->bi_iter.bi_sector, 4815 bio_sectors(align_bi), 4816 &first_bad, &bad_sectors)) { 4817 /* too big in some way, or has a known bad block */ 4818 bio_put(align_bi); 4819 rdev_dec_pending(rdev, mddev); 4820 return 0; 4821 } 4822 4823 /* No reshape active, so we can trust rdev->data_offset */ 4824 align_bi->bi_iter.bi_sector += rdev->data_offset; 4825 4826 spin_lock_irq(&conf->device_lock); 4827 wait_event_lock_irq(conf->wait_for_stripe, 4828 conf->quiesce == 0, 4829 conf->device_lock); 4830 atomic_inc(&conf->active_aligned_reads); 4831 spin_unlock_irq(&conf->device_lock); 4832 4833 if (mddev->gendisk) 4834 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4835 align_bi, disk_devt(mddev->gendisk), 4836 raid_bio->bi_iter.bi_sector); 4837 generic_make_request(align_bi); 4838 return 1; 4839 } else { 4840 rcu_read_unlock(); 4841 bio_put(align_bi); 4842 return 0; 4843 } 4844 } 4845 4846 /* __get_priority_stripe - get the next stripe to process 4847 * 4848 * Full stripe writes are allowed to pass preread active stripes up until 4849 * the bypass_threshold is exceeded. In general the bypass_count 4850 * increments when the handle_list is handled before the hold_list; however, it 4851 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 4852 * stripe with in flight i/o. The bypass_count will be reset when the 4853 * head of the hold_list has changed, i.e. the head was promoted to the 4854 * handle_list. 4855 */ 4856 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) 4857 { 4858 struct stripe_head *sh = NULL, *tmp; 4859 struct list_head *handle_list = NULL; 4860 struct r5worker_group *wg = NULL; 4861 4862 if (conf->worker_cnt_per_group == 0) { 4863 handle_list = &conf->handle_list; 4864 } else if (group != ANY_GROUP) { 4865 handle_list = &conf->worker_groups[group].handle_list; 4866 wg = &conf->worker_groups[group]; 4867 } else { 4868 int i; 4869 for (i = 0; i < conf->group_cnt; i++) { 4870 handle_list = &conf->worker_groups[i].handle_list; 4871 wg = &conf->worker_groups[i]; 4872 if (!list_empty(handle_list)) 4873 break; 4874 } 4875 } 4876 4877 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 4878 __func__, 4879 list_empty(handle_list) ? "empty" : "busy", 4880 list_empty(&conf->hold_list) ? "empty" : "busy", 4881 atomic_read(&conf->pending_full_writes), conf->bypass_count); 4882 4883 if (!list_empty(handle_list)) { 4884 sh = list_entry(handle_list->next, typeof(*sh), lru); 4885 4886 if (list_empty(&conf->hold_list)) 4887 conf->bypass_count = 0; 4888 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 4889 if (conf->hold_list.next == conf->last_hold) 4890 conf->bypass_count++; 4891 else { 4892 conf->last_hold = conf->hold_list.next; 4893 conf->bypass_count -= conf->bypass_threshold; 4894 if (conf->bypass_count < 0) 4895 conf->bypass_count = 0; 4896 } 4897 } 4898 } else if (!list_empty(&conf->hold_list) && 4899 ((conf->bypass_threshold && 4900 conf->bypass_count > conf->bypass_threshold) || 4901 atomic_read(&conf->pending_full_writes) == 0)) { 4902 4903 list_for_each_entry(tmp, &conf->hold_list, lru) { 4904 if (conf->worker_cnt_per_group == 0 || 4905 group == ANY_GROUP || 4906 !cpu_online(tmp->cpu) || 4907 cpu_to_group(tmp->cpu) == group) { 4908 sh = tmp; 4909 break; 4910 } 4911 } 4912 4913 if (sh) { 4914 conf->bypass_count -= conf->bypass_threshold; 4915 if (conf->bypass_count < 0) 4916 conf->bypass_count = 0; 4917 } 4918 wg = NULL; 4919 } 4920 4921 if (!sh) 4922 return NULL; 4923 4924 if (wg) { 4925 wg->stripes_cnt--; 4926 sh->group = NULL; 4927 } 4928 list_del_init(&sh->lru); 4929 BUG_ON(atomic_inc_return(&sh->count) != 1); 4930 return sh; 4931 } 4932 4933 struct raid5_plug_cb { 4934 struct blk_plug_cb cb; 4935 struct list_head list; 4936 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; 4937 }; 4938 4939 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 4940 { 4941 struct raid5_plug_cb *cb = container_of( 4942 blk_cb, struct raid5_plug_cb, cb); 4943 struct stripe_head *sh; 4944 struct mddev *mddev = cb->cb.data; 4945 struct r5conf *conf = mddev->private; 4946 int cnt = 0; 4947 int hash; 4948 4949 if (cb->list.next && !list_empty(&cb->list)) { 4950 spin_lock_irq(&conf->device_lock); 4951 while (!list_empty(&cb->list)) { 4952 sh = list_first_entry(&cb->list, struct stripe_head, lru); 4953 list_del_init(&sh->lru); 4954 /* 4955 * avoid race release_stripe_plug() sees 4956 * STRIPE_ON_UNPLUG_LIST clear but the stripe 4957 * is still in our list 4958 */ 4959 smp_mb__before_atomic(); 4960 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 4961 /* 4962 * STRIPE_ON_RELEASE_LIST could be set here. In that 4963 * case, the count is always > 1 here 4964 */ 4965 hash = sh->hash_lock_index; 4966 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); 4967 cnt++; 4968 } 4969 spin_unlock_irq(&conf->device_lock); 4970 } 4971 release_inactive_stripe_list(conf, cb->temp_inactive_list, 4972 NR_STRIPE_HASH_LOCKS); 4973 if (mddev->queue) 4974 trace_block_unplug(mddev->queue, cnt, !from_schedule); 4975 kfree(cb); 4976 } 4977 4978 static void release_stripe_plug(struct mddev *mddev, 4979 struct stripe_head *sh) 4980 { 4981 struct blk_plug_cb *blk_cb = blk_check_plugged( 4982 raid5_unplug, mddev, 4983 sizeof(struct raid5_plug_cb)); 4984 struct raid5_plug_cb *cb; 4985 4986 if (!blk_cb) { 4987 release_stripe(sh); 4988 return; 4989 } 4990 4991 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 4992 4993 if (cb->list.next == NULL) { 4994 int i; 4995 INIT_LIST_HEAD(&cb->list); 4996 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 4997 INIT_LIST_HEAD(cb->temp_inactive_list + i); 4998 } 4999 5000 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 5001 list_add_tail(&sh->lru, &cb->list); 5002 else 5003 release_stripe(sh); 5004 } 5005 5006 static void make_discard_request(struct mddev *mddev, struct bio *bi) 5007 { 5008 struct r5conf *conf = mddev->private; 5009 sector_t logical_sector, last_sector; 5010 struct stripe_head *sh; 5011 int remaining; 5012 int stripe_sectors; 5013 5014 if (mddev->reshape_position != MaxSector) 5015 /* Skip discard while reshape is happening */ 5016 return; 5017 5018 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5019 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); 5020 5021 bi->bi_next = NULL; 5022 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 5023 5024 stripe_sectors = conf->chunk_sectors * 5025 (conf->raid_disks - conf->max_degraded); 5026 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, 5027 stripe_sectors); 5028 sector_div(last_sector, stripe_sectors); 5029 5030 logical_sector *= conf->chunk_sectors; 5031 last_sector *= conf->chunk_sectors; 5032 5033 for (; logical_sector < last_sector; 5034 logical_sector += STRIPE_SECTORS) { 5035 DEFINE_WAIT(w); 5036 int d; 5037 again: 5038 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); 5039 prepare_to_wait(&conf->wait_for_overlap, &w, 5040 TASK_UNINTERRUPTIBLE); 5041 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5042 if (test_bit(STRIPE_SYNCING, &sh->state)) { 5043 release_stripe(sh); 5044 schedule(); 5045 goto again; 5046 } 5047 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5048 spin_lock_irq(&sh->stripe_lock); 5049 for (d = 0; d < conf->raid_disks; d++) { 5050 if (d == sh->pd_idx || d == sh->qd_idx) 5051 continue; 5052 if (sh->dev[d].towrite || sh->dev[d].toread) { 5053 set_bit(R5_Overlap, &sh->dev[d].flags); 5054 spin_unlock_irq(&sh->stripe_lock); 5055 release_stripe(sh); 5056 schedule(); 5057 goto again; 5058 } 5059 } 5060 set_bit(STRIPE_DISCARD, &sh->state); 5061 finish_wait(&conf->wait_for_overlap, &w); 5062 sh->overwrite_disks = 0; 5063 for (d = 0; d < conf->raid_disks; d++) { 5064 if (d == sh->pd_idx || d == sh->qd_idx) 5065 continue; 5066 sh->dev[d].towrite = bi; 5067 set_bit(R5_OVERWRITE, &sh->dev[d].flags); 5068 raid5_inc_bi_active_stripes(bi); 5069 sh->overwrite_disks++; 5070 } 5071 spin_unlock_irq(&sh->stripe_lock); 5072 if (conf->mddev->bitmap) { 5073 for (d = 0; 5074 d < conf->raid_disks - conf->max_degraded; 5075 d++) 5076 bitmap_startwrite(mddev->bitmap, 5077 sh->sector, 5078 STRIPE_SECTORS, 5079 0); 5080 sh->bm_seq = conf->seq_flush + 1; 5081 set_bit(STRIPE_BIT_DELAY, &sh->state); 5082 } 5083 5084 set_bit(STRIPE_HANDLE, &sh->state); 5085 clear_bit(STRIPE_DELAYED, &sh->state); 5086 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5087 atomic_inc(&conf->preread_active_stripes); 5088 release_stripe_plug(mddev, sh); 5089 } 5090 5091 remaining = raid5_dec_bi_active_stripes(bi); 5092 if (remaining == 0) { 5093 md_write_end(mddev); 5094 bio_endio(bi, 0); 5095 } 5096 } 5097 5098 static void make_request(struct mddev *mddev, struct bio * bi) 5099 { 5100 struct r5conf *conf = mddev->private; 5101 int dd_idx; 5102 sector_t new_sector; 5103 sector_t logical_sector, last_sector; 5104 struct stripe_head *sh; 5105 const int rw = bio_data_dir(bi); 5106 int remaining; 5107 DEFINE_WAIT(w); 5108 bool do_prepare; 5109 5110 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 5111 md_flush_request(mddev, bi); 5112 return; 5113 } 5114 5115 md_write_start(mddev, bi); 5116 5117 /* 5118 * If array is degraded, better not do chunk aligned read because 5119 * later we might have to read it again in order to reconstruct 5120 * data on failed drives. 5121 */ 5122 if (rw == READ && mddev->degraded == 0 && 5123 mddev->reshape_position == MaxSector && 5124 chunk_aligned_read(mddev,bi)) 5125 return; 5126 5127 if (unlikely(bi->bi_rw & REQ_DISCARD)) { 5128 make_discard_request(mddev, bi); 5129 return; 5130 } 5131 5132 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5133 last_sector = bio_end_sector(bi); 5134 bi->bi_next = NULL; 5135 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 5136 5137 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 5138 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 5139 int previous; 5140 int seq; 5141 5142 do_prepare = false; 5143 retry: 5144 seq = read_seqcount_begin(&conf->gen_lock); 5145 previous = 0; 5146 if (do_prepare) 5147 prepare_to_wait(&conf->wait_for_overlap, &w, 5148 TASK_UNINTERRUPTIBLE); 5149 if (unlikely(conf->reshape_progress != MaxSector)) { 5150 /* spinlock is needed as reshape_progress may be 5151 * 64bit on a 32bit platform, and so it might be 5152 * possible to see a half-updated value 5153 * Of course reshape_progress could change after 5154 * the lock is dropped, so once we get a reference 5155 * to the stripe that we think it is, we will have 5156 * to check again. 5157 */ 5158 spin_lock_irq(&conf->device_lock); 5159 if (mddev->reshape_backwards 5160 ? logical_sector < conf->reshape_progress 5161 : logical_sector >= conf->reshape_progress) { 5162 previous = 1; 5163 } else { 5164 if (mddev->reshape_backwards 5165 ? logical_sector < conf->reshape_safe 5166 : logical_sector >= conf->reshape_safe) { 5167 spin_unlock_irq(&conf->device_lock); 5168 schedule(); 5169 do_prepare = true; 5170 goto retry; 5171 } 5172 } 5173 spin_unlock_irq(&conf->device_lock); 5174 } 5175 5176 new_sector = raid5_compute_sector(conf, logical_sector, 5177 previous, 5178 &dd_idx, NULL); 5179 pr_debug("raid456: make_request, sector %llu logical %llu\n", 5180 (unsigned long long)new_sector, 5181 (unsigned long long)logical_sector); 5182 5183 sh = get_active_stripe(conf, new_sector, previous, 5184 (bi->bi_rw&RWA_MASK), 0); 5185 if (sh) { 5186 if (unlikely(previous)) { 5187 /* expansion might have moved on while waiting for a 5188 * stripe, so we must do the range check again. 5189 * Expansion could still move past after this 5190 * test, but as we are holding a reference to 5191 * 'sh', we know that if that happens, 5192 * STRIPE_EXPANDING will get set and the expansion 5193 * won't proceed until we finish with the stripe. 5194 */ 5195 int must_retry = 0; 5196 spin_lock_irq(&conf->device_lock); 5197 if (mddev->reshape_backwards 5198 ? logical_sector >= conf->reshape_progress 5199 : logical_sector < conf->reshape_progress) 5200 /* mismatch, need to try again */ 5201 must_retry = 1; 5202 spin_unlock_irq(&conf->device_lock); 5203 if (must_retry) { 5204 release_stripe(sh); 5205 schedule(); 5206 do_prepare = true; 5207 goto retry; 5208 } 5209 } 5210 if (read_seqcount_retry(&conf->gen_lock, seq)) { 5211 /* Might have got the wrong stripe_head 5212 * by accident 5213 */ 5214 release_stripe(sh); 5215 goto retry; 5216 } 5217 5218 if (rw == WRITE && 5219 logical_sector >= mddev->suspend_lo && 5220 logical_sector < mddev->suspend_hi) { 5221 release_stripe(sh); 5222 /* As the suspend_* range is controlled by 5223 * userspace, we want an interruptible 5224 * wait. 5225 */ 5226 flush_signals(current); 5227 prepare_to_wait(&conf->wait_for_overlap, 5228 &w, TASK_INTERRUPTIBLE); 5229 if (logical_sector >= mddev->suspend_lo && 5230 logical_sector < mddev->suspend_hi) { 5231 schedule(); 5232 do_prepare = true; 5233 } 5234 goto retry; 5235 } 5236 5237 if (test_bit(STRIPE_EXPANDING, &sh->state) || 5238 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { 5239 /* Stripe is busy expanding or 5240 * add failed due to overlap. Flush everything 5241 * and wait a while 5242 */ 5243 md_wakeup_thread(mddev->thread); 5244 release_stripe(sh); 5245 schedule(); 5246 do_prepare = true; 5247 goto retry; 5248 } 5249 set_bit(STRIPE_HANDLE, &sh->state); 5250 clear_bit(STRIPE_DELAYED, &sh->state); 5251 if ((!sh->batch_head || sh == sh->batch_head) && 5252 (bi->bi_rw & REQ_SYNC) && 5253 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5254 atomic_inc(&conf->preread_active_stripes); 5255 release_stripe_plug(mddev, sh); 5256 } else { 5257 /* cannot get stripe for read-ahead, just give-up */ 5258 clear_bit(BIO_UPTODATE, &bi->bi_flags); 5259 break; 5260 } 5261 } 5262 finish_wait(&conf->wait_for_overlap, &w); 5263 5264 remaining = raid5_dec_bi_active_stripes(bi); 5265 if (remaining == 0) { 5266 5267 if ( rw == WRITE ) 5268 md_write_end(mddev); 5269 5270 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 5271 bi, 0); 5272 bio_endio(bi, 0); 5273 } 5274 } 5275 5276 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 5277 5278 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5279 { 5280 /* reshaping is quite different to recovery/resync so it is 5281 * handled quite separately ... here. 5282 * 5283 * On each call to sync_request, we gather one chunk worth of 5284 * destination stripes and flag them as expanding. 5285 * Then we find all the source stripes and request reads. 5286 * As the reads complete, handle_stripe will copy the data 5287 * into the destination stripe and release that stripe. 5288 */ 5289 struct r5conf *conf = mddev->private; 5290 struct stripe_head *sh; 5291 sector_t first_sector, last_sector; 5292 int raid_disks = conf->previous_raid_disks; 5293 int data_disks = raid_disks - conf->max_degraded; 5294 int new_data_disks = conf->raid_disks - conf->max_degraded; 5295 int i; 5296 int dd_idx; 5297 sector_t writepos, readpos, safepos; 5298 sector_t stripe_addr; 5299 int reshape_sectors; 5300 struct list_head stripes; 5301 5302 if (sector_nr == 0) { 5303 /* If restarting in the middle, skip the initial sectors */ 5304 if (mddev->reshape_backwards && 5305 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 5306 sector_nr = raid5_size(mddev, 0, 0) 5307 - conf->reshape_progress; 5308 } else if (!mddev->reshape_backwards && 5309 conf->reshape_progress > 0) 5310 sector_nr = conf->reshape_progress; 5311 sector_div(sector_nr, new_data_disks); 5312 if (sector_nr) { 5313 mddev->curr_resync_completed = sector_nr; 5314 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5315 *skipped = 1; 5316 return sector_nr; 5317 } 5318 } 5319 5320 /* We need to process a full chunk at a time. 5321 * If old and new chunk sizes differ, we need to process the 5322 * largest of these 5323 */ 5324 if (mddev->new_chunk_sectors > mddev->chunk_sectors) 5325 reshape_sectors = mddev->new_chunk_sectors; 5326 else 5327 reshape_sectors = mddev->chunk_sectors; 5328 5329 /* We update the metadata at least every 10 seconds, or when 5330 * the data about to be copied would over-write the source of 5331 * the data at the front of the range. i.e. one new_stripe 5332 * along from reshape_progress new_maps to after where 5333 * reshape_safe old_maps to 5334 */ 5335 writepos = conf->reshape_progress; 5336 sector_div(writepos, new_data_disks); 5337 readpos = conf->reshape_progress; 5338 sector_div(readpos, data_disks); 5339 safepos = conf->reshape_safe; 5340 sector_div(safepos, data_disks); 5341 if (mddev->reshape_backwards) { 5342 writepos -= min_t(sector_t, reshape_sectors, writepos); 5343 readpos += reshape_sectors; 5344 safepos += reshape_sectors; 5345 } else { 5346 writepos += reshape_sectors; 5347 readpos -= min_t(sector_t, reshape_sectors, readpos); 5348 safepos -= min_t(sector_t, reshape_sectors, safepos); 5349 } 5350 5351 /* Having calculated the 'writepos' possibly use it 5352 * to set 'stripe_addr' which is where we will write to. 5353 */ 5354 if (mddev->reshape_backwards) { 5355 BUG_ON(conf->reshape_progress == 0); 5356 stripe_addr = writepos; 5357 BUG_ON((mddev->dev_sectors & 5358 ~((sector_t)reshape_sectors - 1)) 5359 - reshape_sectors - stripe_addr 5360 != sector_nr); 5361 } else { 5362 BUG_ON(writepos != sector_nr + reshape_sectors); 5363 stripe_addr = sector_nr; 5364 } 5365 5366 /* 'writepos' is the most advanced device address we might write. 5367 * 'readpos' is the least advanced device address we might read. 5368 * 'safepos' is the least address recorded in the metadata as having 5369 * been reshaped. 5370 * If there is a min_offset_diff, these are adjusted either by 5371 * increasing the safepos/readpos if diff is negative, or 5372 * increasing writepos if diff is positive. 5373 * If 'readpos' is then behind 'writepos', there is no way that we can 5374 * ensure safety in the face of a crash - that must be done by userspace 5375 * making a backup of the data. So in that case there is no particular 5376 * rush to update metadata. 5377 * Otherwise if 'safepos' is behind 'writepos', then we really need to 5378 * update the metadata to advance 'safepos' to match 'readpos' so that 5379 * we can be safe in the event of a crash. 5380 * So we insist on updating metadata if safepos is behind writepos and 5381 * readpos is beyond writepos. 5382 * In any case, update the metadata every 10 seconds. 5383 * Maybe that number should be configurable, but I'm not sure it is 5384 * worth it.... maybe it could be a multiple of safemode_delay??? 5385 */ 5386 if (conf->min_offset_diff < 0) { 5387 safepos += -conf->min_offset_diff; 5388 readpos += -conf->min_offset_diff; 5389 } else 5390 writepos += conf->min_offset_diff; 5391 5392 if ((mddev->reshape_backwards 5393 ? (safepos > writepos && readpos < writepos) 5394 : (safepos < writepos && readpos > writepos)) || 5395 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 5396 /* Cannot proceed until we've updated the superblock... */ 5397 wait_event(conf->wait_for_overlap, 5398 atomic_read(&conf->reshape_stripes)==0 5399 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5400 if (atomic_read(&conf->reshape_stripes) != 0) 5401 return 0; 5402 mddev->reshape_position = conf->reshape_progress; 5403 mddev->curr_resync_completed = sector_nr; 5404 conf->reshape_checkpoint = jiffies; 5405 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5406 md_wakeup_thread(mddev->thread); 5407 wait_event(mddev->sb_wait, mddev->flags == 0 || 5408 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5409 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5410 return 0; 5411 spin_lock_irq(&conf->device_lock); 5412 conf->reshape_safe = mddev->reshape_position; 5413 spin_unlock_irq(&conf->device_lock); 5414 wake_up(&conf->wait_for_overlap); 5415 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5416 } 5417 5418 INIT_LIST_HEAD(&stripes); 5419 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 5420 int j; 5421 int skipped_disk = 0; 5422 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 5423 set_bit(STRIPE_EXPANDING, &sh->state); 5424 atomic_inc(&conf->reshape_stripes); 5425 /* If any of this stripe is beyond the end of the old 5426 * array, then we need to zero those blocks 5427 */ 5428 for (j=sh->disks; j--;) { 5429 sector_t s; 5430 if (j == sh->pd_idx) 5431 continue; 5432 if (conf->level == 6 && 5433 j == sh->qd_idx) 5434 continue; 5435 s = compute_blocknr(sh, j, 0); 5436 if (s < raid5_size(mddev, 0, 0)) { 5437 skipped_disk = 1; 5438 continue; 5439 } 5440 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 5441 set_bit(R5_Expanded, &sh->dev[j].flags); 5442 set_bit(R5_UPTODATE, &sh->dev[j].flags); 5443 } 5444 if (!skipped_disk) { 5445 set_bit(STRIPE_EXPAND_READY, &sh->state); 5446 set_bit(STRIPE_HANDLE, &sh->state); 5447 } 5448 list_add(&sh->lru, &stripes); 5449 } 5450 spin_lock_irq(&conf->device_lock); 5451 if (mddev->reshape_backwards) 5452 conf->reshape_progress -= reshape_sectors * new_data_disks; 5453 else 5454 conf->reshape_progress += reshape_sectors * new_data_disks; 5455 spin_unlock_irq(&conf->device_lock); 5456 /* Ok, those stripe are ready. We can start scheduling 5457 * reads on the source stripes. 5458 * The source stripes are determined by mapping the first and last 5459 * block on the destination stripes. 5460 */ 5461 first_sector = 5462 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 5463 1, &dd_idx, NULL); 5464 last_sector = 5465 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 5466 * new_data_disks - 1), 5467 1, &dd_idx, NULL); 5468 if (last_sector >= mddev->dev_sectors) 5469 last_sector = mddev->dev_sectors - 1; 5470 while (first_sector <= last_sector) { 5471 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 5472 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 5473 set_bit(STRIPE_HANDLE, &sh->state); 5474 release_stripe(sh); 5475 first_sector += STRIPE_SECTORS; 5476 } 5477 /* Now that the sources are clearly marked, we can release 5478 * the destination stripes 5479 */ 5480 while (!list_empty(&stripes)) { 5481 sh = list_entry(stripes.next, struct stripe_head, lru); 5482 list_del_init(&sh->lru); 5483 release_stripe(sh); 5484 } 5485 /* If this takes us to the resync_max point where we have to pause, 5486 * then we need to write out the superblock. 5487 */ 5488 sector_nr += reshape_sectors; 5489 if ((sector_nr - mddev->curr_resync_completed) * 2 5490 >= mddev->resync_max - mddev->curr_resync_completed) { 5491 /* Cannot proceed until we've updated the superblock... */ 5492 wait_event(conf->wait_for_overlap, 5493 atomic_read(&conf->reshape_stripes) == 0 5494 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5495 if (atomic_read(&conf->reshape_stripes) != 0) 5496 goto ret; 5497 mddev->reshape_position = conf->reshape_progress; 5498 mddev->curr_resync_completed = sector_nr; 5499 conf->reshape_checkpoint = jiffies; 5500 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5501 md_wakeup_thread(mddev->thread); 5502 wait_event(mddev->sb_wait, 5503 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 5504 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5505 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5506 goto ret; 5507 spin_lock_irq(&conf->device_lock); 5508 conf->reshape_safe = mddev->reshape_position; 5509 spin_unlock_irq(&conf->device_lock); 5510 wake_up(&conf->wait_for_overlap); 5511 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5512 } 5513 ret: 5514 return reshape_sectors; 5515 } 5516 5517 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5518 { 5519 struct r5conf *conf = mddev->private; 5520 struct stripe_head *sh; 5521 sector_t max_sector = mddev->dev_sectors; 5522 sector_t sync_blocks; 5523 int still_degraded = 0; 5524 int i; 5525 5526 if (sector_nr >= max_sector) { 5527 /* just being told to finish up .. nothing much to do */ 5528 5529 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 5530 end_reshape(conf); 5531 return 0; 5532 } 5533 5534 if (mddev->curr_resync < max_sector) /* aborted */ 5535 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 5536 &sync_blocks, 1); 5537 else /* completed sync */ 5538 conf->fullsync = 0; 5539 bitmap_close_sync(mddev->bitmap); 5540 5541 return 0; 5542 } 5543 5544 /* Allow raid5_quiesce to complete */ 5545 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 5546 5547 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5548 return reshape_request(mddev, sector_nr, skipped); 5549 5550 /* No need to check resync_max as we never do more than one 5551 * stripe, and as resync_max will always be on a chunk boundary, 5552 * if the check in md_do_sync didn't fire, there is no chance 5553 * of overstepping resync_max here 5554 */ 5555 5556 /* if there is too many failed drives and we are trying 5557 * to resync, then assert that we are finished, because there is 5558 * nothing we can do. 5559 */ 5560 if (mddev->degraded >= conf->max_degraded && 5561 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5562 sector_t rv = mddev->dev_sectors - sector_nr; 5563 *skipped = 1; 5564 return rv; 5565 } 5566 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 5567 !conf->fullsync && 5568 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 5569 sync_blocks >= STRIPE_SECTORS) { 5570 /* we can skip this block, and probably more */ 5571 sync_blocks /= STRIPE_SECTORS; 5572 *skipped = 1; 5573 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 5574 } 5575 5576 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 5577 5578 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 5579 if (sh == NULL) { 5580 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 5581 /* make sure we don't swamp the stripe cache if someone else 5582 * is trying to get access 5583 */ 5584 schedule_timeout_uninterruptible(1); 5585 } 5586 /* Need to check if array will still be degraded after recovery/resync 5587 * Note in case of > 1 drive failures it's possible we're rebuilding 5588 * one drive while leaving another faulty drive in array. 5589 */ 5590 rcu_read_lock(); 5591 for (i = 0; i < conf->raid_disks; i++) { 5592 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); 5593 5594 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) 5595 still_degraded = 1; 5596 } 5597 rcu_read_unlock(); 5598 5599 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 5600 5601 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 5602 set_bit(STRIPE_HANDLE, &sh->state); 5603 5604 release_stripe(sh); 5605 5606 return STRIPE_SECTORS; 5607 } 5608 5609 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) 5610 { 5611 /* We may not be able to submit a whole bio at once as there 5612 * may not be enough stripe_heads available. 5613 * We cannot pre-allocate enough stripe_heads as we may need 5614 * more than exist in the cache (if we allow ever large chunks). 5615 * So we do one stripe head at a time and record in 5616 * ->bi_hw_segments how many have been done. 5617 * 5618 * We *know* that this entire raid_bio is in one chunk, so 5619 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 5620 */ 5621 struct stripe_head *sh; 5622 int dd_idx; 5623 sector_t sector, logical_sector, last_sector; 5624 int scnt = 0; 5625 int remaining; 5626 int handled = 0; 5627 5628 logical_sector = raid_bio->bi_iter.bi_sector & 5629 ~((sector_t)STRIPE_SECTORS-1); 5630 sector = raid5_compute_sector(conf, logical_sector, 5631 0, &dd_idx, NULL); 5632 last_sector = bio_end_sector(raid_bio); 5633 5634 for (; logical_sector < last_sector; 5635 logical_sector += STRIPE_SECTORS, 5636 sector += STRIPE_SECTORS, 5637 scnt++) { 5638 5639 if (scnt < raid5_bi_processed_stripes(raid_bio)) 5640 /* already done this stripe */ 5641 continue; 5642 5643 sh = get_active_stripe(conf, sector, 0, 1, 1); 5644 5645 if (!sh) { 5646 /* failed to get a stripe - must wait */ 5647 raid5_set_bi_processed_stripes(raid_bio, scnt); 5648 conf->retry_read_aligned = raid_bio; 5649 return handled; 5650 } 5651 5652 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 5653 release_stripe(sh); 5654 raid5_set_bi_processed_stripes(raid_bio, scnt); 5655 conf->retry_read_aligned = raid_bio; 5656 return handled; 5657 } 5658 5659 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 5660 handle_stripe(sh); 5661 release_stripe(sh); 5662 handled++; 5663 } 5664 remaining = raid5_dec_bi_active_stripes(raid_bio); 5665 if (remaining == 0) { 5666 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), 5667 raid_bio, 0); 5668 bio_endio(raid_bio, 0); 5669 } 5670 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5671 wake_up(&conf->wait_for_stripe); 5672 return handled; 5673 } 5674 5675 static int handle_active_stripes(struct r5conf *conf, int group, 5676 struct r5worker *worker, 5677 struct list_head *temp_inactive_list) 5678 { 5679 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 5680 int i, batch_size = 0, hash; 5681 bool release_inactive = false; 5682 5683 while (batch_size < MAX_STRIPE_BATCH && 5684 (sh = __get_priority_stripe(conf, group)) != NULL) 5685 batch[batch_size++] = sh; 5686 5687 if (batch_size == 0) { 5688 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5689 if (!list_empty(temp_inactive_list + i)) 5690 break; 5691 if (i == NR_STRIPE_HASH_LOCKS) 5692 return batch_size; 5693 release_inactive = true; 5694 } 5695 spin_unlock_irq(&conf->device_lock); 5696 5697 release_inactive_stripe_list(conf, temp_inactive_list, 5698 NR_STRIPE_HASH_LOCKS); 5699 5700 if (release_inactive) { 5701 spin_lock_irq(&conf->device_lock); 5702 return 0; 5703 } 5704 5705 for (i = 0; i < batch_size; i++) 5706 handle_stripe(batch[i]); 5707 5708 cond_resched(); 5709 5710 spin_lock_irq(&conf->device_lock); 5711 for (i = 0; i < batch_size; i++) { 5712 hash = batch[i]->hash_lock_index; 5713 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); 5714 } 5715 return batch_size; 5716 } 5717 5718 static void raid5_do_work(struct work_struct *work) 5719 { 5720 struct r5worker *worker = container_of(work, struct r5worker, work); 5721 struct r5worker_group *group = worker->group; 5722 struct r5conf *conf = group->conf; 5723 int group_id = group - conf->worker_groups; 5724 int handled; 5725 struct blk_plug plug; 5726 5727 pr_debug("+++ raid5worker active\n"); 5728 5729 blk_start_plug(&plug); 5730 handled = 0; 5731 spin_lock_irq(&conf->device_lock); 5732 while (1) { 5733 int batch_size, released; 5734 5735 released = release_stripe_list(conf, worker->temp_inactive_list); 5736 5737 batch_size = handle_active_stripes(conf, group_id, worker, 5738 worker->temp_inactive_list); 5739 worker->working = false; 5740 if (!batch_size && !released) 5741 break; 5742 handled += batch_size; 5743 } 5744 pr_debug("%d stripes handled\n", handled); 5745 5746 spin_unlock_irq(&conf->device_lock); 5747 blk_finish_plug(&plug); 5748 5749 pr_debug("--- raid5worker inactive\n"); 5750 } 5751 5752 /* 5753 * This is our raid5 kernel thread. 5754 * 5755 * We scan the hash table for stripes which can be handled now. 5756 * During the scan, completed stripes are saved for us by the interrupt 5757 * handler, so that they will not have to wait for our next wakeup. 5758 */ 5759 static void raid5d(struct md_thread *thread) 5760 { 5761 struct mddev *mddev = thread->mddev; 5762 struct r5conf *conf = mddev->private; 5763 int handled; 5764 struct blk_plug plug; 5765 5766 pr_debug("+++ raid5d active\n"); 5767 5768 md_check_recovery(mddev); 5769 5770 blk_start_plug(&plug); 5771 handled = 0; 5772 spin_lock_irq(&conf->device_lock); 5773 while (1) { 5774 struct bio *bio; 5775 int batch_size, released; 5776 5777 released = release_stripe_list(conf, conf->temp_inactive_list); 5778 if (released) 5779 clear_bit(R5_DID_ALLOC, &conf->cache_state); 5780 5781 if ( 5782 !list_empty(&conf->bitmap_list)) { 5783 /* Now is a good time to flush some bitmap updates */ 5784 conf->seq_flush++; 5785 spin_unlock_irq(&conf->device_lock); 5786 bitmap_unplug(mddev->bitmap); 5787 spin_lock_irq(&conf->device_lock); 5788 conf->seq_write = conf->seq_flush; 5789 activate_bit_delay(conf, conf->temp_inactive_list); 5790 } 5791 raid5_activate_delayed(conf); 5792 5793 while ((bio = remove_bio_from_retry(conf))) { 5794 int ok; 5795 spin_unlock_irq(&conf->device_lock); 5796 ok = retry_aligned_read(conf, bio); 5797 spin_lock_irq(&conf->device_lock); 5798 if (!ok) 5799 break; 5800 handled++; 5801 } 5802 5803 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, 5804 conf->temp_inactive_list); 5805 if (!batch_size && !released) 5806 break; 5807 handled += batch_size; 5808 5809 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { 5810 spin_unlock_irq(&conf->device_lock); 5811 md_check_recovery(mddev); 5812 spin_lock_irq(&conf->device_lock); 5813 } 5814 } 5815 pr_debug("%d stripes handled\n", handled); 5816 5817 spin_unlock_irq(&conf->device_lock); 5818 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { 5819 grow_one_stripe(conf, __GFP_NOWARN); 5820 /* Set flag even if allocation failed. This helps 5821 * slow down allocation requests when mem is short 5822 */ 5823 set_bit(R5_DID_ALLOC, &conf->cache_state); 5824 } 5825 5826 async_tx_issue_pending_all(); 5827 blk_finish_plug(&plug); 5828 5829 pr_debug("--- raid5d inactive\n"); 5830 } 5831 5832 static ssize_t 5833 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 5834 { 5835 struct r5conf *conf; 5836 int ret = 0; 5837 spin_lock(&mddev->lock); 5838 conf = mddev->private; 5839 if (conf) 5840 ret = sprintf(page, "%d\n", conf->min_nr_stripes); 5841 spin_unlock(&mddev->lock); 5842 return ret; 5843 } 5844 5845 int 5846 raid5_set_cache_size(struct mddev *mddev, int size) 5847 { 5848 struct r5conf *conf = mddev->private; 5849 int err; 5850 5851 if (size <= 16 || size > 32768) 5852 return -EINVAL; 5853 5854 conf->min_nr_stripes = size; 5855 while (size < conf->max_nr_stripes && 5856 drop_one_stripe(conf)) 5857 ; 5858 5859 5860 err = md_allow_write(mddev); 5861 if (err) 5862 return err; 5863 5864 while (size > conf->max_nr_stripes) 5865 if (!grow_one_stripe(conf, GFP_KERNEL)) 5866 break; 5867 5868 return 0; 5869 } 5870 EXPORT_SYMBOL(raid5_set_cache_size); 5871 5872 static ssize_t 5873 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 5874 { 5875 struct r5conf *conf; 5876 unsigned long new; 5877 int err; 5878 5879 if (len >= PAGE_SIZE) 5880 return -EINVAL; 5881 if (kstrtoul(page, 10, &new)) 5882 return -EINVAL; 5883 err = mddev_lock(mddev); 5884 if (err) 5885 return err; 5886 conf = mddev->private; 5887 if (!conf) 5888 err = -ENODEV; 5889 else 5890 err = raid5_set_cache_size(mddev, new); 5891 mddev_unlock(mddev); 5892 5893 return err ?: len; 5894 } 5895 5896 static struct md_sysfs_entry 5897 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 5898 raid5_show_stripe_cache_size, 5899 raid5_store_stripe_cache_size); 5900 5901 static ssize_t 5902 raid5_show_rmw_level(struct mddev *mddev, char *page) 5903 { 5904 struct r5conf *conf = mddev->private; 5905 if (conf) 5906 return sprintf(page, "%d\n", conf->rmw_level); 5907 else 5908 return 0; 5909 } 5910 5911 static ssize_t 5912 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) 5913 { 5914 struct r5conf *conf = mddev->private; 5915 unsigned long new; 5916 5917 if (!conf) 5918 return -ENODEV; 5919 5920 if (len >= PAGE_SIZE) 5921 return -EINVAL; 5922 5923 if (kstrtoul(page, 10, &new)) 5924 return -EINVAL; 5925 5926 if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome) 5927 return -EINVAL; 5928 5929 if (new != PARITY_DISABLE_RMW && 5930 new != PARITY_ENABLE_RMW && 5931 new != PARITY_PREFER_RMW) 5932 return -EINVAL; 5933 5934 conf->rmw_level = new; 5935 return len; 5936 } 5937 5938 static struct md_sysfs_entry 5939 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR, 5940 raid5_show_rmw_level, 5941 raid5_store_rmw_level); 5942 5943 5944 static ssize_t 5945 raid5_show_preread_threshold(struct mddev *mddev, char *page) 5946 { 5947 struct r5conf *conf; 5948 int ret = 0; 5949 spin_lock(&mddev->lock); 5950 conf = mddev->private; 5951 if (conf) 5952 ret = sprintf(page, "%d\n", conf->bypass_threshold); 5953 spin_unlock(&mddev->lock); 5954 return ret; 5955 } 5956 5957 static ssize_t 5958 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 5959 { 5960 struct r5conf *conf; 5961 unsigned long new; 5962 int err; 5963 5964 if (len >= PAGE_SIZE) 5965 return -EINVAL; 5966 if (kstrtoul(page, 10, &new)) 5967 return -EINVAL; 5968 5969 err = mddev_lock(mddev); 5970 if (err) 5971 return err; 5972 conf = mddev->private; 5973 if (!conf) 5974 err = -ENODEV; 5975 else if (new > conf->min_nr_stripes) 5976 err = -EINVAL; 5977 else 5978 conf->bypass_threshold = new; 5979 mddev_unlock(mddev); 5980 return err ?: len; 5981 } 5982 5983 static struct md_sysfs_entry 5984 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 5985 S_IRUGO | S_IWUSR, 5986 raid5_show_preread_threshold, 5987 raid5_store_preread_threshold); 5988 5989 static ssize_t 5990 raid5_show_skip_copy(struct mddev *mddev, char *page) 5991 { 5992 struct r5conf *conf; 5993 int ret = 0; 5994 spin_lock(&mddev->lock); 5995 conf = mddev->private; 5996 if (conf) 5997 ret = sprintf(page, "%d\n", conf->skip_copy); 5998 spin_unlock(&mddev->lock); 5999 return ret; 6000 } 6001 6002 static ssize_t 6003 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) 6004 { 6005 struct r5conf *conf; 6006 unsigned long new; 6007 int err; 6008 6009 if (len >= PAGE_SIZE) 6010 return -EINVAL; 6011 if (kstrtoul(page, 10, &new)) 6012 return -EINVAL; 6013 new = !!new; 6014 6015 err = mddev_lock(mddev); 6016 if (err) 6017 return err; 6018 conf = mddev->private; 6019 if (!conf) 6020 err = -ENODEV; 6021 else if (new != conf->skip_copy) { 6022 mddev_suspend(mddev); 6023 conf->skip_copy = new; 6024 if (new) 6025 mddev->queue->backing_dev_info.capabilities |= 6026 BDI_CAP_STABLE_WRITES; 6027 else 6028 mddev->queue->backing_dev_info.capabilities &= 6029 ~BDI_CAP_STABLE_WRITES; 6030 mddev_resume(mddev); 6031 } 6032 mddev_unlock(mddev); 6033 return err ?: len; 6034 } 6035 6036 static struct md_sysfs_entry 6037 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, 6038 raid5_show_skip_copy, 6039 raid5_store_skip_copy); 6040 6041 static ssize_t 6042 stripe_cache_active_show(struct mddev *mddev, char *page) 6043 { 6044 struct r5conf *conf = mddev->private; 6045 if (conf) 6046 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 6047 else 6048 return 0; 6049 } 6050 6051 static struct md_sysfs_entry 6052 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 6053 6054 static ssize_t 6055 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) 6056 { 6057 struct r5conf *conf; 6058 int ret = 0; 6059 spin_lock(&mddev->lock); 6060 conf = mddev->private; 6061 if (conf) 6062 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); 6063 spin_unlock(&mddev->lock); 6064 return ret; 6065 } 6066 6067 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6068 int *group_cnt, 6069 int *worker_cnt_per_group, 6070 struct r5worker_group **worker_groups); 6071 static ssize_t 6072 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 6073 { 6074 struct r5conf *conf; 6075 unsigned long new; 6076 int err; 6077 struct r5worker_group *new_groups, *old_groups; 6078 int group_cnt, worker_cnt_per_group; 6079 6080 if (len >= PAGE_SIZE) 6081 return -EINVAL; 6082 if (kstrtoul(page, 10, &new)) 6083 return -EINVAL; 6084 6085 err = mddev_lock(mddev); 6086 if (err) 6087 return err; 6088 conf = mddev->private; 6089 if (!conf) 6090 err = -ENODEV; 6091 else if (new != conf->worker_cnt_per_group) { 6092 mddev_suspend(mddev); 6093 6094 old_groups = conf->worker_groups; 6095 if (old_groups) 6096 flush_workqueue(raid5_wq); 6097 6098 err = alloc_thread_groups(conf, new, 6099 &group_cnt, &worker_cnt_per_group, 6100 &new_groups); 6101 if (!err) { 6102 spin_lock_irq(&conf->device_lock); 6103 conf->group_cnt = group_cnt; 6104 conf->worker_cnt_per_group = worker_cnt_per_group; 6105 conf->worker_groups = new_groups; 6106 spin_unlock_irq(&conf->device_lock); 6107 6108 if (old_groups) 6109 kfree(old_groups[0].workers); 6110 kfree(old_groups); 6111 } 6112 mddev_resume(mddev); 6113 } 6114 mddev_unlock(mddev); 6115 6116 return err ?: len; 6117 } 6118 6119 static struct md_sysfs_entry 6120 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, 6121 raid5_show_group_thread_cnt, 6122 raid5_store_group_thread_cnt); 6123 6124 static struct attribute *raid5_attrs[] = { 6125 &raid5_stripecache_size.attr, 6126 &raid5_stripecache_active.attr, 6127 &raid5_preread_bypass_threshold.attr, 6128 &raid5_group_thread_cnt.attr, 6129 &raid5_skip_copy.attr, 6130 &raid5_rmw_level.attr, 6131 NULL, 6132 }; 6133 static struct attribute_group raid5_attrs_group = { 6134 .name = NULL, 6135 .attrs = raid5_attrs, 6136 }; 6137 6138 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6139 int *group_cnt, 6140 int *worker_cnt_per_group, 6141 struct r5worker_group **worker_groups) 6142 { 6143 int i, j, k; 6144 ssize_t size; 6145 struct r5worker *workers; 6146 6147 *worker_cnt_per_group = cnt; 6148 if (cnt == 0) { 6149 *group_cnt = 0; 6150 *worker_groups = NULL; 6151 return 0; 6152 } 6153 *group_cnt = num_possible_nodes(); 6154 size = sizeof(struct r5worker) * cnt; 6155 workers = kzalloc(size * *group_cnt, GFP_NOIO); 6156 *worker_groups = kzalloc(sizeof(struct r5worker_group) * 6157 *group_cnt, GFP_NOIO); 6158 if (!*worker_groups || !workers) { 6159 kfree(workers); 6160 kfree(*worker_groups); 6161 return -ENOMEM; 6162 } 6163 6164 for (i = 0; i < *group_cnt; i++) { 6165 struct r5worker_group *group; 6166 6167 group = &(*worker_groups)[i]; 6168 INIT_LIST_HEAD(&group->handle_list); 6169 group->conf = conf; 6170 group->workers = workers + i * cnt; 6171 6172 for (j = 0; j < cnt; j++) { 6173 struct r5worker *worker = group->workers + j; 6174 worker->group = group; 6175 INIT_WORK(&worker->work, raid5_do_work); 6176 6177 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) 6178 INIT_LIST_HEAD(worker->temp_inactive_list + k); 6179 } 6180 } 6181 6182 return 0; 6183 } 6184 6185 static void free_thread_groups(struct r5conf *conf) 6186 { 6187 if (conf->worker_groups) 6188 kfree(conf->worker_groups[0].workers); 6189 kfree(conf->worker_groups); 6190 conf->worker_groups = NULL; 6191 } 6192 6193 static sector_t 6194 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 6195 { 6196 struct r5conf *conf = mddev->private; 6197 6198 if (!sectors) 6199 sectors = mddev->dev_sectors; 6200 if (!raid_disks) 6201 /* size is defined by the smallest of previous and new size */ 6202 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 6203 6204 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 6205 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 6206 return sectors * (raid_disks - conf->max_degraded); 6207 } 6208 6209 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6210 { 6211 safe_put_page(percpu->spare_page); 6212 if (percpu->scribble) 6213 flex_array_free(percpu->scribble); 6214 percpu->spare_page = NULL; 6215 percpu->scribble = NULL; 6216 } 6217 6218 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6219 { 6220 if (conf->level == 6 && !percpu->spare_page) 6221 percpu->spare_page = alloc_page(GFP_KERNEL); 6222 if (!percpu->scribble) 6223 percpu->scribble = scribble_alloc(max(conf->raid_disks, 6224 conf->previous_raid_disks), conf->chunk_sectors / 6225 STRIPE_SECTORS, GFP_KERNEL); 6226 6227 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { 6228 free_scratch_buffer(conf, percpu); 6229 return -ENOMEM; 6230 } 6231 6232 return 0; 6233 } 6234 6235 static void raid5_free_percpu(struct r5conf *conf) 6236 { 6237 unsigned long cpu; 6238 6239 if (!conf->percpu) 6240 return; 6241 6242 #ifdef CONFIG_HOTPLUG_CPU 6243 unregister_cpu_notifier(&conf->cpu_notify); 6244 #endif 6245 6246 get_online_cpus(); 6247 for_each_possible_cpu(cpu) 6248 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6249 put_online_cpus(); 6250 6251 free_percpu(conf->percpu); 6252 } 6253 6254 static void free_conf(struct r5conf *conf) 6255 { 6256 if (conf->shrinker.seeks) 6257 unregister_shrinker(&conf->shrinker); 6258 free_thread_groups(conf); 6259 shrink_stripes(conf); 6260 raid5_free_percpu(conf); 6261 kfree(conf->disks); 6262 kfree(conf->stripe_hashtbl); 6263 kfree(conf); 6264 } 6265 6266 #ifdef CONFIG_HOTPLUG_CPU 6267 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, 6268 void *hcpu) 6269 { 6270 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); 6271 long cpu = (long)hcpu; 6272 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 6273 6274 switch (action) { 6275 case CPU_UP_PREPARE: 6276 case CPU_UP_PREPARE_FROZEN: 6277 if (alloc_scratch_buffer(conf, percpu)) { 6278 pr_err("%s: failed memory allocation for cpu%ld\n", 6279 __func__, cpu); 6280 return notifier_from_errno(-ENOMEM); 6281 } 6282 break; 6283 case CPU_DEAD: 6284 case CPU_DEAD_FROZEN: 6285 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6286 break; 6287 default: 6288 break; 6289 } 6290 return NOTIFY_OK; 6291 } 6292 #endif 6293 6294 static int raid5_alloc_percpu(struct r5conf *conf) 6295 { 6296 unsigned long cpu; 6297 int err = 0; 6298 6299 conf->percpu = alloc_percpu(struct raid5_percpu); 6300 if (!conf->percpu) 6301 return -ENOMEM; 6302 6303 #ifdef CONFIG_HOTPLUG_CPU 6304 conf->cpu_notify.notifier_call = raid456_cpu_notify; 6305 conf->cpu_notify.priority = 0; 6306 err = register_cpu_notifier(&conf->cpu_notify); 6307 if (err) 6308 return err; 6309 #endif 6310 6311 get_online_cpus(); 6312 for_each_present_cpu(cpu) { 6313 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6314 if (err) { 6315 pr_err("%s: failed memory allocation for cpu%ld\n", 6316 __func__, cpu); 6317 break; 6318 } 6319 } 6320 put_online_cpus(); 6321 6322 return err; 6323 } 6324 6325 static unsigned long raid5_cache_scan(struct shrinker *shrink, 6326 struct shrink_control *sc) 6327 { 6328 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6329 int ret = 0; 6330 while (ret < sc->nr_to_scan) { 6331 if (drop_one_stripe(conf) == 0) 6332 return SHRINK_STOP; 6333 ret++; 6334 } 6335 return ret; 6336 } 6337 6338 static unsigned long raid5_cache_count(struct shrinker *shrink, 6339 struct shrink_control *sc) 6340 { 6341 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6342 6343 if (conf->max_nr_stripes < conf->min_nr_stripes) 6344 /* unlikely, but not impossible */ 6345 return 0; 6346 return conf->max_nr_stripes - conf->min_nr_stripes; 6347 } 6348 6349 static struct r5conf *setup_conf(struct mddev *mddev) 6350 { 6351 struct r5conf *conf; 6352 int raid_disk, memory, max_disks; 6353 struct md_rdev *rdev; 6354 struct disk_info *disk; 6355 char pers_name[6]; 6356 int i; 6357 int group_cnt, worker_cnt_per_group; 6358 struct r5worker_group *new_group; 6359 6360 if (mddev->new_level != 5 6361 && mddev->new_level != 4 6362 && mddev->new_level != 6) { 6363 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", 6364 mdname(mddev), mddev->new_level); 6365 return ERR_PTR(-EIO); 6366 } 6367 if ((mddev->new_level == 5 6368 && !algorithm_valid_raid5(mddev->new_layout)) || 6369 (mddev->new_level == 6 6370 && !algorithm_valid_raid6(mddev->new_layout))) { 6371 printk(KERN_ERR "md/raid:%s: layout %d not supported\n", 6372 mdname(mddev), mddev->new_layout); 6373 return ERR_PTR(-EIO); 6374 } 6375 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 6376 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", 6377 mdname(mddev), mddev->raid_disks); 6378 return ERR_PTR(-EINVAL); 6379 } 6380 6381 if (!mddev->new_chunk_sectors || 6382 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 6383 !is_power_of_2(mddev->new_chunk_sectors)) { 6384 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", 6385 mdname(mddev), mddev->new_chunk_sectors << 9); 6386 return ERR_PTR(-EINVAL); 6387 } 6388 6389 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 6390 if (conf == NULL) 6391 goto abort; 6392 /* Don't enable multi-threading by default*/ 6393 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, 6394 &new_group)) { 6395 conf->group_cnt = group_cnt; 6396 conf->worker_cnt_per_group = worker_cnt_per_group; 6397 conf->worker_groups = new_group; 6398 } else 6399 goto abort; 6400 spin_lock_init(&conf->device_lock); 6401 seqcount_init(&conf->gen_lock); 6402 init_waitqueue_head(&conf->wait_for_stripe); 6403 init_waitqueue_head(&conf->wait_for_overlap); 6404 INIT_LIST_HEAD(&conf->handle_list); 6405 INIT_LIST_HEAD(&conf->hold_list); 6406 INIT_LIST_HEAD(&conf->delayed_list); 6407 INIT_LIST_HEAD(&conf->bitmap_list); 6408 init_llist_head(&conf->released_stripes); 6409 atomic_set(&conf->active_stripes, 0); 6410 atomic_set(&conf->preread_active_stripes, 0); 6411 atomic_set(&conf->active_aligned_reads, 0); 6412 conf->bypass_threshold = BYPASS_THRESHOLD; 6413 conf->recovery_disabled = mddev->recovery_disabled - 1; 6414 6415 conf->raid_disks = mddev->raid_disks; 6416 if (mddev->reshape_position == MaxSector) 6417 conf->previous_raid_disks = mddev->raid_disks; 6418 else 6419 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 6420 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 6421 6422 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 6423 GFP_KERNEL); 6424 if (!conf->disks) 6425 goto abort; 6426 6427 conf->mddev = mddev; 6428 6429 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 6430 goto abort; 6431 6432 /* We init hash_locks[0] separately to that it can be used 6433 * as the reference lock in the spin_lock_nest_lock() call 6434 * in lock_all_device_hash_locks_irq in order to convince 6435 * lockdep that we know what we are doing. 6436 */ 6437 spin_lock_init(conf->hash_locks); 6438 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 6439 spin_lock_init(conf->hash_locks + i); 6440 6441 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6442 INIT_LIST_HEAD(conf->inactive_list + i); 6443 6444 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6445 INIT_LIST_HEAD(conf->temp_inactive_list + i); 6446 6447 conf->level = mddev->new_level; 6448 conf->chunk_sectors = mddev->new_chunk_sectors; 6449 if (raid5_alloc_percpu(conf) != 0) 6450 goto abort; 6451 6452 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 6453 6454 rdev_for_each(rdev, mddev) { 6455 raid_disk = rdev->raid_disk; 6456 if (raid_disk >= max_disks 6457 || raid_disk < 0) 6458 continue; 6459 disk = conf->disks + raid_disk; 6460 6461 if (test_bit(Replacement, &rdev->flags)) { 6462 if (disk->replacement) 6463 goto abort; 6464 disk->replacement = rdev; 6465 } else { 6466 if (disk->rdev) 6467 goto abort; 6468 disk->rdev = rdev; 6469 } 6470 6471 if (test_bit(In_sync, &rdev->flags)) { 6472 char b[BDEVNAME_SIZE]; 6473 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 6474 " disk %d\n", 6475 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 6476 } else if (rdev->saved_raid_disk != raid_disk) 6477 /* Cannot rely on bitmap to complete recovery */ 6478 conf->fullsync = 1; 6479 } 6480 6481 conf->level = mddev->new_level; 6482 if (conf->level == 6) { 6483 conf->max_degraded = 2; 6484 if (raid6_call.xor_syndrome) 6485 conf->rmw_level = PARITY_ENABLE_RMW; 6486 else 6487 conf->rmw_level = PARITY_DISABLE_RMW; 6488 } else { 6489 conf->max_degraded = 1; 6490 conf->rmw_level = PARITY_ENABLE_RMW; 6491 } 6492 conf->algorithm = mddev->new_layout; 6493 conf->reshape_progress = mddev->reshape_position; 6494 if (conf->reshape_progress != MaxSector) { 6495 conf->prev_chunk_sectors = mddev->chunk_sectors; 6496 conf->prev_algo = mddev->layout; 6497 } 6498 6499 conf->min_nr_stripes = NR_STRIPES; 6500 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + 6501 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 6502 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 6503 if (grow_stripes(conf, conf->min_nr_stripes)) { 6504 printk(KERN_ERR 6505 "md/raid:%s: couldn't allocate %dkB for buffers\n", 6506 mdname(mddev), memory); 6507 goto abort; 6508 } else 6509 printk(KERN_INFO "md/raid:%s: allocated %dkB\n", 6510 mdname(mddev), memory); 6511 /* 6512 * Losing a stripe head costs more than the time to refill it, 6513 * it reduces the queue depth and so can hurt throughput. 6514 * So set it rather large, scaled by number of devices. 6515 */ 6516 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; 6517 conf->shrinker.scan_objects = raid5_cache_scan; 6518 conf->shrinker.count_objects = raid5_cache_count; 6519 conf->shrinker.batch = 128; 6520 conf->shrinker.flags = 0; 6521 register_shrinker(&conf->shrinker); 6522 6523 sprintf(pers_name, "raid%d", mddev->new_level); 6524 conf->thread = md_register_thread(raid5d, mddev, pers_name); 6525 if (!conf->thread) { 6526 printk(KERN_ERR 6527 "md/raid:%s: couldn't allocate thread.\n", 6528 mdname(mddev)); 6529 goto abort; 6530 } 6531 6532 return conf; 6533 6534 abort: 6535 if (conf) { 6536 free_conf(conf); 6537 return ERR_PTR(-EIO); 6538 } else 6539 return ERR_PTR(-ENOMEM); 6540 } 6541 6542 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 6543 { 6544 switch (algo) { 6545 case ALGORITHM_PARITY_0: 6546 if (raid_disk < max_degraded) 6547 return 1; 6548 break; 6549 case ALGORITHM_PARITY_N: 6550 if (raid_disk >= raid_disks - max_degraded) 6551 return 1; 6552 break; 6553 case ALGORITHM_PARITY_0_6: 6554 if (raid_disk == 0 || 6555 raid_disk == raid_disks - 1) 6556 return 1; 6557 break; 6558 case ALGORITHM_LEFT_ASYMMETRIC_6: 6559 case ALGORITHM_RIGHT_ASYMMETRIC_6: 6560 case ALGORITHM_LEFT_SYMMETRIC_6: 6561 case ALGORITHM_RIGHT_SYMMETRIC_6: 6562 if (raid_disk == raid_disks - 1) 6563 return 1; 6564 } 6565 return 0; 6566 } 6567 6568 static int run(struct mddev *mddev) 6569 { 6570 struct r5conf *conf; 6571 int working_disks = 0; 6572 int dirty_parity_disks = 0; 6573 struct md_rdev *rdev; 6574 sector_t reshape_offset = 0; 6575 int i; 6576 long long min_offset_diff = 0; 6577 int first = 1; 6578 6579 if (mddev->recovery_cp != MaxSector) 6580 printk(KERN_NOTICE "md/raid:%s: not clean" 6581 " -- starting background reconstruction\n", 6582 mdname(mddev)); 6583 6584 rdev_for_each(rdev, mddev) { 6585 long long diff; 6586 if (rdev->raid_disk < 0) 6587 continue; 6588 diff = (rdev->new_data_offset - rdev->data_offset); 6589 if (first) { 6590 min_offset_diff = diff; 6591 first = 0; 6592 } else if (mddev->reshape_backwards && 6593 diff < min_offset_diff) 6594 min_offset_diff = diff; 6595 else if (!mddev->reshape_backwards && 6596 diff > min_offset_diff) 6597 min_offset_diff = diff; 6598 } 6599 6600 if (mddev->reshape_position != MaxSector) { 6601 /* Check that we can continue the reshape. 6602 * Difficulties arise if the stripe we would write to 6603 * next is at or after the stripe we would read from next. 6604 * For a reshape that changes the number of devices, this 6605 * is only possible for a very short time, and mdadm makes 6606 * sure that time appears to have past before assembling 6607 * the array. So we fail if that time hasn't passed. 6608 * For a reshape that keeps the number of devices the same 6609 * mdadm must be monitoring the reshape can keeping the 6610 * critical areas read-only and backed up. It will start 6611 * the array in read-only mode, so we check for that. 6612 */ 6613 sector_t here_new, here_old; 6614 int old_disks; 6615 int max_degraded = (mddev->level == 6 ? 2 : 1); 6616 6617 if (mddev->new_level != mddev->level) { 6618 printk(KERN_ERR "md/raid:%s: unsupported reshape " 6619 "required - aborting.\n", 6620 mdname(mddev)); 6621 return -EINVAL; 6622 } 6623 old_disks = mddev->raid_disks - mddev->delta_disks; 6624 /* reshape_position must be on a new-stripe boundary, and one 6625 * further up in new geometry must map after here in old 6626 * geometry. 6627 */ 6628 here_new = mddev->reshape_position; 6629 if (sector_div(here_new, mddev->new_chunk_sectors * 6630 (mddev->raid_disks - max_degraded))) { 6631 printk(KERN_ERR "md/raid:%s: reshape_position not " 6632 "on a stripe boundary\n", mdname(mddev)); 6633 return -EINVAL; 6634 } 6635 reshape_offset = here_new * mddev->new_chunk_sectors; 6636 /* here_new is the stripe we will write to */ 6637 here_old = mddev->reshape_position; 6638 sector_div(here_old, mddev->chunk_sectors * 6639 (old_disks-max_degraded)); 6640 /* here_old is the first stripe that we might need to read 6641 * from */ 6642 if (mddev->delta_disks == 0) { 6643 if ((here_new * mddev->new_chunk_sectors != 6644 here_old * mddev->chunk_sectors)) { 6645 printk(KERN_ERR "md/raid:%s: reshape position is" 6646 " confused - aborting\n", mdname(mddev)); 6647 return -EINVAL; 6648 } 6649 /* We cannot be sure it is safe to start an in-place 6650 * reshape. It is only safe if user-space is monitoring 6651 * and taking constant backups. 6652 * mdadm always starts a situation like this in 6653 * readonly mode so it can take control before 6654 * allowing any writes. So just check for that. 6655 */ 6656 if (abs(min_offset_diff) >= mddev->chunk_sectors && 6657 abs(min_offset_diff) >= mddev->new_chunk_sectors) 6658 /* not really in-place - so OK */; 6659 else if (mddev->ro == 0) { 6660 printk(KERN_ERR "md/raid:%s: in-place reshape " 6661 "must be started in read-only mode " 6662 "- aborting\n", 6663 mdname(mddev)); 6664 return -EINVAL; 6665 } 6666 } else if (mddev->reshape_backwards 6667 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <= 6668 here_old * mddev->chunk_sectors) 6669 : (here_new * mddev->new_chunk_sectors >= 6670 here_old * mddev->chunk_sectors + (-min_offset_diff))) { 6671 /* Reading from the same stripe as writing to - bad */ 6672 printk(KERN_ERR "md/raid:%s: reshape_position too early for " 6673 "auto-recovery - aborting.\n", 6674 mdname(mddev)); 6675 return -EINVAL; 6676 } 6677 printk(KERN_INFO "md/raid:%s: reshape will continue\n", 6678 mdname(mddev)); 6679 /* OK, we should be able to continue; */ 6680 } else { 6681 BUG_ON(mddev->level != mddev->new_level); 6682 BUG_ON(mddev->layout != mddev->new_layout); 6683 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 6684 BUG_ON(mddev->delta_disks != 0); 6685 } 6686 6687 if (mddev->private == NULL) 6688 conf = setup_conf(mddev); 6689 else 6690 conf = mddev->private; 6691 6692 if (IS_ERR(conf)) 6693 return PTR_ERR(conf); 6694 6695 conf->min_offset_diff = min_offset_diff; 6696 mddev->thread = conf->thread; 6697 conf->thread = NULL; 6698 mddev->private = conf; 6699 6700 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; 6701 i++) { 6702 rdev = conf->disks[i].rdev; 6703 if (!rdev && conf->disks[i].replacement) { 6704 /* The replacement is all we have yet */ 6705 rdev = conf->disks[i].replacement; 6706 conf->disks[i].replacement = NULL; 6707 clear_bit(Replacement, &rdev->flags); 6708 conf->disks[i].rdev = rdev; 6709 } 6710 if (!rdev) 6711 continue; 6712 if (conf->disks[i].replacement && 6713 conf->reshape_progress != MaxSector) { 6714 /* replacements and reshape simply do not mix. */ 6715 printk(KERN_ERR "md: cannot handle concurrent " 6716 "replacement and reshape.\n"); 6717 goto abort; 6718 } 6719 if (test_bit(In_sync, &rdev->flags)) { 6720 working_disks++; 6721 continue; 6722 } 6723 /* This disc is not fully in-sync. However if it 6724 * just stored parity (beyond the recovery_offset), 6725 * when we don't need to be concerned about the 6726 * array being dirty. 6727 * When reshape goes 'backwards', we never have 6728 * partially completed devices, so we only need 6729 * to worry about reshape going forwards. 6730 */ 6731 /* Hack because v0.91 doesn't store recovery_offset properly. */ 6732 if (mddev->major_version == 0 && 6733 mddev->minor_version > 90) 6734 rdev->recovery_offset = reshape_offset; 6735 6736 if (rdev->recovery_offset < reshape_offset) { 6737 /* We need to check old and new layout */ 6738 if (!only_parity(rdev->raid_disk, 6739 conf->algorithm, 6740 conf->raid_disks, 6741 conf->max_degraded)) 6742 continue; 6743 } 6744 if (!only_parity(rdev->raid_disk, 6745 conf->prev_algo, 6746 conf->previous_raid_disks, 6747 conf->max_degraded)) 6748 continue; 6749 dirty_parity_disks++; 6750 } 6751 6752 /* 6753 * 0 for a fully functional array, 1 or 2 for a degraded array. 6754 */ 6755 mddev->degraded = calc_degraded(conf); 6756 6757 if (has_failed(conf)) { 6758 printk(KERN_ERR "md/raid:%s: not enough operational devices" 6759 " (%d/%d failed)\n", 6760 mdname(mddev), mddev->degraded, conf->raid_disks); 6761 goto abort; 6762 } 6763 6764 /* device size must be a multiple of chunk size */ 6765 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 6766 mddev->resync_max_sectors = mddev->dev_sectors; 6767 6768 if (mddev->degraded > dirty_parity_disks && 6769 mddev->recovery_cp != MaxSector) { 6770 if (mddev->ok_start_degraded) 6771 printk(KERN_WARNING 6772 "md/raid:%s: starting dirty degraded array" 6773 " - data corruption possible.\n", 6774 mdname(mddev)); 6775 else { 6776 printk(KERN_ERR 6777 "md/raid:%s: cannot start dirty degraded array.\n", 6778 mdname(mddev)); 6779 goto abort; 6780 } 6781 } 6782 6783 if (mddev->degraded == 0) 6784 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" 6785 " devices, algorithm %d\n", mdname(mddev), conf->level, 6786 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 6787 mddev->new_layout); 6788 else 6789 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" 6790 " out of %d devices, algorithm %d\n", 6791 mdname(mddev), conf->level, 6792 mddev->raid_disks - mddev->degraded, 6793 mddev->raid_disks, mddev->new_layout); 6794 6795 print_raid5_conf(conf); 6796 6797 if (conf->reshape_progress != MaxSector) { 6798 conf->reshape_safe = conf->reshape_progress; 6799 atomic_set(&conf->reshape_stripes, 0); 6800 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6801 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6802 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6803 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6804 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 6805 "reshape"); 6806 } 6807 6808 /* Ok, everything is just fine now */ 6809 if (mddev->to_remove == &raid5_attrs_group) 6810 mddev->to_remove = NULL; 6811 else if (mddev->kobj.sd && 6812 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 6813 printk(KERN_WARNING 6814 "raid5: failed to create sysfs attributes for %s\n", 6815 mdname(mddev)); 6816 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 6817 6818 if (mddev->queue) { 6819 int chunk_size; 6820 bool discard_supported = true; 6821 /* read-ahead size must cover two whole stripes, which 6822 * is 2 * (datadisks) * chunksize where 'n' is the 6823 * number of raid devices 6824 */ 6825 int data_disks = conf->previous_raid_disks - conf->max_degraded; 6826 int stripe = data_disks * 6827 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 6828 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 6829 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 6830 6831 chunk_size = mddev->chunk_sectors << 9; 6832 blk_queue_io_min(mddev->queue, chunk_size); 6833 blk_queue_io_opt(mddev->queue, chunk_size * 6834 (conf->raid_disks - conf->max_degraded)); 6835 mddev->queue->limits.raid_partial_stripes_expensive = 1; 6836 /* 6837 * We can only discard a whole stripe. It doesn't make sense to 6838 * discard data disk but write parity disk 6839 */ 6840 stripe = stripe * PAGE_SIZE; 6841 /* Round up to power of 2, as discard handling 6842 * currently assumes that */ 6843 while ((stripe-1) & stripe) 6844 stripe = (stripe | (stripe-1)) + 1; 6845 mddev->queue->limits.discard_alignment = stripe; 6846 mddev->queue->limits.discard_granularity = stripe; 6847 /* 6848 * unaligned part of discard request will be ignored, so can't 6849 * guarantee discard_zeroes_data 6850 */ 6851 mddev->queue->limits.discard_zeroes_data = 0; 6852 6853 blk_queue_max_write_same_sectors(mddev->queue, 0); 6854 6855 rdev_for_each(rdev, mddev) { 6856 disk_stack_limits(mddev->gendisk, rdev->bdev, 6857 rdev->data_offset << 9); 6858 disk_stack_limits(mddev->gendisk, rdev->bdev, 6859 rdev->new_data_offset << 9); 6860 /* 6861 * discard_zeroes_data is required, otherwise data 6862 * could be lost. Consider a scenario: discard a stripe 6863 * (the stripe could be inconsistent if 6864 * discard_zeroes_data is 0); write one disk of the 6865 * stripe (the stripe could be inconsistent again 6866 * depending on which disks are used to calculate 6867 * parity); the disk is broken; The stripe data of this 6868 * disk is lost. 6869 */ 6870 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || 6871 !bdev_get_queue(rdev->bdev)-> 6872 limits.discard_zeroes_data) 6873 discard_supported = false; 6874 /* Unfortunately, discard_zeroes_data is not currently 6875 * a guarantee - just a hint. So we only allow DISCARD 6876 * if the sysadmin has confirmed that only safe devices 6877 * are in use by setting a module parameter. 6878 */ 6879 if (!devices_handle_discard_safely) { 6880 if (discard_supported) { 6881 pr_info("md/raid456: discard support disabled due to uncertainty.\n"); 6882 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); 6883 } 6884 discard_supported = false; 6885 } 6886 } 6887 6888 if (discard_supported && 6889 mddev->queue->limits.max_discard_sectors >= stripe && 6890 mddev->queue->limits.discard_granularity >= stripe) 6891 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 6892 mddev->queue); 6893 else 6894 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 6895 mddev->queue); 6896 } 6897 6898 return 0; 6899 abort: 6900 md_unregister_thread(&mddev->thread); 6901 print_raid5_conf(conf); 6902 free_conf(conf); 6903 mddev->private = NULL; 6904 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); 6905 return -EIO; 6906 } 6907 6908 static void raid5_free(struct mddev *mddev, void *priv) 6909 { 6910 struct r5conf *conf = priv; 6911 6912 free_conf(conf); 6913 mddev->to_remove = &raid5_attrs_group; 6914 } 6915 6916 static void status(struct seq_file *seq, struct mddev *mddev) 6917 { 6918 struct r5conf *conf = mddev->private; 6919 int i; 6920 6921 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 6922 mddev->chunk_sectors / 2, mddev->layout); 6923 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 6924 for (i = 0; i < conf->raid_disks; i++) 6925 seq_printf (seq, "%s", 6926 conf->disks[i].rdev && 6927 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 6928 seq_printf (seq, "]"); 6929 } 6930 6931 static void print_raid5_conf (struct r5conf *conf) 6932 { 6933 int i; 6934 struct disk_info *tmp; 6935 6936 printk(KERN_DEBUG "RAID conf printout:\n"); 6937 if (!conf) { 6938 printk("(conf==NULL)\n"); 6939 return; 6940 } 6941 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, 6942 conf->raid_disks, 6943 conf->raid_disks - conf->mddev->degraded); 6944 6945 for (i = 0; i < conf->raid_disks; i++) { 6946 char b[BDEVNAME_SIZE]; 6947 tmp = conf->disks + i; 6948 if (tmp->rdev) 6949 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", 6950 i, !test_bit(Faulty, &tmp->rdev->flags), 6951 bdevname(tmp->rdev->bdev, b)); 6952 } 6953 } 6954 6955 static int raid5_spare_active(struct mddev *mddev) 6956 { 6957 int i; 6958 struct r5conf *conf = mddev->private; 6959 struct disk_info *tmp; 6960 int count = 0; 6961 unsigned long flags; 6962 6963 for (i = 0; i < conf->raid_disks; i++) { 6964 tmp = conf->disks + i; 6965 if (tmp->replacement 6966 && tmp->replacement->recovery_offset == MaxSector 6967 && !test_bit(Faulty, &tmp->replacement->flags) 6968 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 6969 /* Replacement has just become active. */ 6970 if (!tmp->rdev 6971 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 6972 count++; 6973 if (tmp->rdev) { 6974 /* Replaced device not technically faulty, 6975 * but we need to be sure it gets removed 6976 * and never re-added. 6977 */ 6978 set_bit(Faulty, &tmp->rdev->flags); 6979 sysfs_notify_dirent_safe( 6980 tmp->rdev->sysfs_state); 6981 } 6982 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 6983 } else if (tmp->rdev 6984 && tmp->rdev->recovery_offset == MaxSector 6985 && !test_bit(Faulty, &tmp->rdev->flags) 6986 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 6987 count++; 6988 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 6989 } 6990 } 6991 spin_lock_irqsave(&conf->device_lock, flags); 6992 mddev->degraded = calc_degraded(conf); 6993 spin_unlock_irqrestore(&conf->device_lock, flags); 6994 print_raid5_conf(conf); 6995 return count; 6996 } 6997 6998 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 6999 { 7000 struct r5conf *conf = mddev->private; 7001 int err = 0; 7002 int number = rdev->raid_disk; 7003 struct md_rdev **rdevp; 7004 struct disk_info *p = conf->disks + number; 7005 7006 print_raid5_conf(conf); 7007 if (rdev == p->rdev) 7008 rdevp = &p->rdev; 7009 else if (rdev == p->replacement) 7010 rdevp = &p->replacement; 7011 else 7012 return 0; 7013 7014 if (number >= conf->raid_disks && 7015 conf->reshape_progress == MaxSector) 7016 clear_bit(In_sync, &rdev->flags); 7017 7018 if (test_bit(In_sync, &rdev->flags) || 7019 atomic_read(&rdev->nr_pending)) { 7020 err = -EBUSY; 7021 goto abort; 7022 } 7023 /* Only remove non-faulty devices if recovery 7024 * isn't possible. 7025 */ 7026 if (!test_bit(Faulty, &rdev->flags) && 7027 mddev->recovery_disabled != conf->recovery_disabled && 7028 !has_failed(conf) && 7029 (!p->replacement || p->replacement == rdev) && 7030 number < conf->raid_disks) { 7031 err = -EBUSY; 7032 goto abort; 7033 } 7034 *rdevp = NULL; 7035 synchronize_rcu(); 7036 if (atomic_read(&rdev->nr_pending)) { 7037 /* lost the race, try later */ 7038 err = -EBUSY; 7039 *rdevp = rdev; 7040 } else if (p->replacement) { 7041 /* We must have just cleared 'rdev' */ 7042 p->rdev = p->replacement; 7043 clear_bit(Replacement, &p->replacement->flags); 7044 smp_mb(); /* Make sure other CPUs may see both as identical 7045 * but will never see neither - if they are careful 7046 */ 7047 p->replacement = NULL; 7048 clear_bit(WantReplacement, &rdev->flags); 7049 } else 7050 /* We might have just removed the Replacement as faulty- 7051 * clear the bit just in case 7052 */ 7053 clear_bit(WantReplacement, &rdev->flags); 7054 abort: 7055 7056 print_raid5_conf(conf); 7057 return err; 7058 } 7059 7060 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 7061 { 7062 struct r5conf *conf = mddev->private; 7063 int err = -EEXIST; 7064 int disk; 7065 struct disk_info *p; 7066 int first = 0; 7067 int last = conf->raid_disks - 1; 7068 7069 if (mddev->recovery_disabled == conf->recovery_disabled) 7070 return -EBUSY; 7071 7072 if (rdev->saved_raid_disk < 0 && has_failed(conf)) 7073 /* no point adding a device */ 7074 return -EINVAL; 7075 7076 if (rdev->raid_disk >= 0) 7077 first = last = rdev->raid_disk; 7078 7079 /* 7080 * find the disk ... but prefer rdev->saved_raid_disk 7081 * if possible. 7082 */ 7083 if (rdev->saved_raid_disk >= 0 && 7084 rdev->saved_raid_disk >= first && 7085 conf->disks[rdev->saved_raid_disk].rdev == NULL) 7086 first = rdev->saved_raid_disk; 7087 7088 for (disk = first; disk <= last; disk++) { 7089 p = conf->disks + disk; 7090 if (p->rdev == NULL) { 7091 clear_bit(In_sync, &rdev->flags); 7092 rdev->raid_disk = disk; 7093 err = 0; 7094 if (rdev->saved_raid_disk != disk) 7095 conf->fullsync = 1; 7096 rcu_assign_pointer(p->rdev, rdev); 7097 goto out; 7098 } 7099 } 7100 for (disk = first; disk <= last; disk++) { 7101 p = conf->disks + disk; 7102 if (test_bit(WantReplacement, &p->rdev->flags) && 7103 p->replacement == NULL) { 7104 clear_bit(In_sync, &rdev->flags); 7105 set_bit(Replacement, &rdev->flags); 7106 rdev->raid_disk = disk; 7107 err = 0; 7108 conf->fullsync = 1; 7109 rcu_assign_pointer(p->replacement, rdev); 7110 break; 7111 } 7112 } 7113 out: 7114 print_raid5_conf(conf); 7115 return err; 7116 } 7117 7118 static int raid5_resize(struct mddev *mddev, sector_t sectors) 7119 { 7120 /* no resync is happening, and there is enough space 7121 * on all devices, so we can resize. 7122 * We need to make sure resync covers any new space. 7123 * If the array is shrinking we should possibly wait until 7124 * any io in the removed space completes, but it hardly seems 7125 * worth it. 7126 */ 7127 sector_t newsize; 7128 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 7129 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 7130 if (mddev->external_size && 7131 mddev->array_sectors > newsize) 7132 return -EINVAL; 7133 if (mddev->bitmap) { 7134 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); 7135 if (ret) 7136 return ret; 7137 } 7138 md_set_array_sectors(mddev, newsize); 7139 set_capacity(mddev->gendisk, mddev->array_sectors); 7140 revalidate_disk(mddev->gendisk); 7141 if (sectors > mddev->dev_sectors && 7142 mddev->recovery_cp > mddev->dev_sectors) { 7143 mddev->recovery_cp = mddev->dev_sectors; 7144 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7145 } 7146 mddev->dev_sectors = sectors; 7147 mddev->resync_max_sectors = sectors; 7148 return 0; 7149 } 7150 7151 static int check_stripe_cache(struct mddev *mddev) 7152 { 7153 /* Can only proceed if there are plenty of stripe_heads. 7154 * We need a minimum of one full stripe,, and for sensible progress 7155 * it is best to have about 4 times that. 7156 * If we require 4 times, then the default 256 4K stripe_heads will 7157 * allow for chunk sizes up to 256K, which is probably OK. 7158 * If the chunk size is greater, user-space should request more 7159 * stripe_heads first. 7160 */ 7161 struct r5conf *conf = mddev->private; 7162 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 7163 > conf->min_nr_stripes || 7164 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 7165 > conf->min_nr_stripes) { 7166 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", 7167 mdname(mddev), 7168 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 7169 / STRIPE_SIZE)*4); 7170 return 0; 7171 } 7172 return 1; 7173 } 7174 7175 static int check_reshape(struct mddev *mddev) 7176 { 7177 struct r5conf *conf = mddev->private; 7178 7179 if (mddev->delta_disks == 0 && 7180 mddev->new_layout == mddev->layout && 7181 mddev->new_chunk_sectors == mddev->chunk_sectors) 7182 return 0; /* nothing to do */ 7183 if (has_failed(conf)) 7184 return -EINVAL; 7185 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { 7186 /* We might be able to shrink, but the devices must 7187 * be made bigger first. 7188 * For raid6, 4 is the minimum size. 7189 * Otherwise 2 is the minimum 7190 */ 7191 int min = 2; 7192 if (mddev->level == 6) 7193 min = 4; 7194 if (mddev->raid_disks + mddev->delta_disks < min) 7195 return -EINVAL; 7196 } 7197 7198 if (!check_stripe_cache(mddev)) 7199 return -ENOSPC; 7200 7201 return resize_stripes(conf, (conf->previous_raid_disks 7202 + mddev->delta_disks)); 7203 } 7204 7205 static int raid5_start_reshape(struct mddev *mddev) 7206 { 7207 struct r5conf *conf = mddev->private; 7208 struct md_rdev *rdev; 7209 int spares = 0; 7210 unsigned long flags; 7211 7212 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 7213 return -EBUSY; 7214 7215 if (!check_stripe_cache(mddev)) 7216 return -ENOSPC; 7217 7218 if (has_failed(conf)) 7219 return -EINVAL; 7220 7221 rdev_for_each(rdev, mddev) { 7222 if (!test_bit(In_sync, &rdev->flags) 7223 && !test_bit(Faulty, &rdev->flags)) 7224 spares++; 7225 } 7226 7227 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 7228 /* Not enough devices even to make a degraded array 7229 * of that size 7230 */ 7231 return -EINVAL; 7232 7233 /* Refuse to reduce size of the array. Any reductions in 7234 * array size must be through explicit setting of array_size 7235 * attribute. 7236 */ 7237 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 7238 < mddev->array_sectors) { 7239 printk(KERN_ERR "md/raid:%s: array size must be reduced " 7240 "before number of disks\n", mdname(mddev)); 7241 return -EINVAL; 7242 } 7243 7244 atomic_set(&conf->reshape_stripes, 0); 7245 spin_lock_irq(&conf->device_lock); 7246 write_seqcount_begin(&conf->gen_lock); 7247 conf->previous_raid_disks = conf->raid_disks; 7248 conf->raid_disks += mddev->delta_disks; 7249 conf->prev_chunk_sectors = conf->chunk_sectors; 7250 conf->chunk_sectors = mddev->new_chunk_sectors; 7251 conf->prev_algo = conf->algorithm; 7252 conf->algorithm = mddev->new_layout; 7253 conf->generation++; 7254 /* Code that selects data_offset needs to see the generation update 7255 * if reshape_progress has been set - so a memory barrier needed. 7256 */ 7257 smp_mb(); 7258 if (mddev->reshape_backwards) 7259 conf->reshape_progress = raid5_size(mddev, 0, 0); 7260 else 7261 conf->reshape_progress = 0; 7262 conf->reshape_safe = conf->reshape_progress; 7263 write_seqcount_end(&conf->gen_lock); 7264 spin_unlock_irq(&conf->device_lock); 7265 7266 /* Now make sure any requests that proceeded on the assumption 7267 * the reshape wasn't running - like Discard or Read - have 7268 * completed. 7269 */ 7270 mddev_suspend(mddev); 7271 mddev_resume(mddev); 7272 7273 /* Add some new drives, as many as will fit. 7274 * We know there are enough to make the newly sized array work. 7275 * Don't add devices if we are reducing the number of 7276 * devices in the array. This is because it is not possible 7277 * to correctly record the "partially reconstructed" state of 7278 * such devices during the reshape and confusion could result. 7279 */ 7280 if (mddev->delta_disks >= 0) { 7281 rdev_for_each(rdev, mddev) 7282 if (rdev->raid_disk < 0 && 7283 !test_bit(Faulty, &rdev->flags)) { 7284 if (raid5_add_disk(mddev, rdev) == 0) { 7285 if (rdev->raid_disk 7286 >= conf->previous_raid_disks) 7287 set_bit(In_sync, &rdev->flags); 7288 else 7289 rdev->recovery_offset = 0; 7290 7291 if (sysfs_link_rdev(mddev, rdev)) 7292 /* Failure here is OK */; 7293 } 7294 } else if (rdev->raid_disk >= conf->previous_raid_disks 7295 && !test_bit(Faulty, &rdev->flags)) { 7296 /* This is a spare that was manually added */ 7297 set_bit(In_sync, &rdev->flags); 7298 } 7299 7300 /* When a reshape changes the number of devices, 7301 * ->degraded is measured against the larger of the 7302 * pre and post number of devices. 7303 */ 7304 spin_lock_irqsave(&conf->device_lock, flags); 7305 mddev->degraded = calc_degraded(conf); 7306 spin_unlock_irqrestore(&conf->device_lock, flags); 7307 } 7308 mddev->raid_disks = conf->raid_disks; 7309 mddev->reshape_position = conf->reshape_progress; 7310 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7311 7312 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7313 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7314 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7315 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7316 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7317 "reshape"); 7318 if (!mddev->sync_thread) { 7319 mddev->recovery = 0; 7320 spin_lock_irq(&conf->device_lock); 7321 write_seqcount_begin(&conf->gen_lock); 7322 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 7323 mddev->new_chunk_sectors = 7324 conf->chunk_sectors = conf->prev_chunk_sectors; 7325 mddev->new_layout = conf->algorithm = conf->prev_algo; 7326 rdev_for_each(rdev, mddev) 7327 rdev->new_data_offset = rdev->data_offset; 7328 smp_wmb(); 7329 conf->generation --; 7330 conf->reshape_progress = MaxSector; 7331 mddev->reshape_position = MaxSector; 7332 write_seqcount_end(&conf->gen_lock); 7333 spin_unlock_irq(&conf->device_lock); 7334 return -EAGAIN; 7335 } 7336 conf->reshape_checkpoint = jiffies; 7337 md_wakeup_thread(mddev->sync_thread); 7338 md_new_event(mddev); 7339 return 0; 7340 } 7341 7342 /* This is called from the reshape thread and should make any 7343 * changes needed in 'conf' 7344 */ 7345 static void end_reshape(struct r5conf *conf) 7346 { 7347 7348 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 7349 struct md_rdev *rdev; 7350 7351 spin_lock_irq(&conf->device_lock); 7352 conf->previous_raid_disks = conf->raid_disks; 7353 rdev_for_each(rdev, conf->mddev) 7354 rdev->data_offset = rdev->new_data_offset; 7355 smp_wmb(); 7356 conf->reshape_progress = MaxSector; 7357 spin_unlock_irq(&conf->device_lock); 7358 wake_up(&conf->wait_for_overlap); 7359 7360 /* read-ahead size must cover two whole stripes, which is 7361 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 7362 */ 7363 if (conf->mddev->queue) { 7364 int data_disks = conf->raid_disks - conf->max_degraded; 7365 int stripe = data_disks * ((conf->chunk_sectors << 9) 7366 / PAGE_SIZE); 7367 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 7368 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 7369 } 7370 } 7371 } 7372 7373 /* This is called from the raid5d thread with mddev_lock held. 7374 * It makes config changes to the device. 7375 */ 7376 static void raid5_finish_reshape(struct mddev *mddev) 7377 { 7378 struct r5conf *conf = mddev->private; 7379 7380 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7381 7382 if (mddev->delta_disks > 0) { 7383 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 7384 set_capacity(mddev->gendisk, mddev->array_sectors); 7385 revalidate_disk(mddev->gendisk); 7386 } else { 7387 int d; 7388 spin_lock_irq(&conf->device_lock); 7389 mddev->degraded = calc_degraded(conf); 7390 spin_unlock_irq(&conf->device_lock); 7391 for (d = conf->raid_disks ; 7392 d < conf->raid_disks - mddev->delta_disks; 7393 d++) { 7394 struct md_rdev *rdev = conf->disks[d].rdev; 7395 if (rdev) 7396 clear_bit(In_sync, &rdev->flags); 7397 rdev = conf->disks[d].replacement; 7398 if (rdev) 7399 clear_bit(In_sync, &rdev->flags); 7400 } 7401 } 7402 mddev->layout = conf->algorithm; 7403 mddev->chunk_sectors = conf->chunk_sectors; 7404 mddev->reshape_position = MaxSector; 7405 mddev->delta_disks = 0; 7406 mddev->reshape_backwards = 0; 7407 } 7408 } 7409 7410 static void raid5_quiesce(struct mddev *mddev, int state) 7411 { 7412 struct r5conf *conf = mddev->private; 7413 7414 switch(state) { 7415 case 2: /* resume for a suspend */ 7416 wake_up(&conf->wait_for_overlap); 7417 break; 7418 7419 case 1: /* stop all writes */ 7420 lock_all_device_hash_locks_irq(conf); 7421 /* '2' tells resync/reshape to pause so that all 7422 * active stripes can drain 7423 */ 7424 conf->quiesce = 2; 7425 wait_event_cmd(conf->wait_for_stripe, 7426 atomic_read(&conf->active_stripes) == 0 && 7427 atomic_read(&conf->active_aligned_reads) == 0, 7428 unlock_all_device_hash_locks_irq(conf), 7429 lock_all_device_hash_locks_irq(conf)); 7430 conf->quiesce = 1; 7431 unlock_all_device_hash_locks_irq(conf); 7432 /* allow reshape to continue */ 7433 wake_up(&conf->wait_for_overlap); 7434 break; 7435 7436 case 0: /* re-enable writes */ 7437 lock_all_device_hash_locks_irq(conf); 7438 conf->quiesce = 0; 7439 wake_up(&conf->wait_for_stripe); 7440 wake_up(&conf->wait_for_overlap); 7441 unlock_all_device_hash_locks_irq(conf); 7442 break; 7443 } 7444 } 7445 7446 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 7447 { 7448 struct r0conf *raid0_conf = mddev->private; 7449 sector_t sectors; 7450 7451 /* for raid0 takeover only one zone is supported */ 7452 if (raid0_conf->nr_strip_zones > 1) { 7453 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", 7454 mdname(mddev)); 7455 return ERR_PTR(-EINVAL); 7456 } 7457 7458 sectors = raid0_conf->strip_zone[0].zone_end; 7459 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 7460 mddev->dev_sectors = sectors; 7461 mddev->new_level = level; 7462 mddev->new_layout = ALGORITHM_PARITY_N; 7463 mddev->new_chunk_sectors = mddev->chunk_sectors; 7464 mddev->raid_disks += 1; 7465 mddev->delta_disks = 1; 7466 /* make sure it will be not marked as dirty */ 7467 mddev->recovery_cp = MaxSector; 7468 7469 return setup_conf(mddev); 7470 } 7471 7472 static void *raid5_takeover_raid1(struct mddev *mddev) 7473 { 7474 int chunksect; 7475 7476 if (mddev->raid_disks != 2 || 7477 mddev->degraded > 1) 7478 return ERR_PTR(-EINVAL); 7479 7480 /* Should check if there are write-behind devices? */ 7481 7482 chunksect = 64*2; /* 64K by default */ 7483 7484 /* The array must be an exact multiple of chunksize */ 7485 while (chunksect && (mddev->array_sectors & (chunksect-1))) 7486 chunksect >>= 1; 7487 7488 if ((chunksect<<9) < STRIPE_SIZE) 7489 /* array size does not allow a suitable chunk size */ 7490 return ERR_PTR(-EINVAL); 7491 7492 mddev->new_level = 5; 7493 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 7494 mddev->new_chunk_sectors = chunksect; 7495 7496 return setup_conf(mddev); 7497 } 7498 7499 static void *raid5_takeover_raid6(struct mddev *mddev) 7500 { 7501 int new_layout; 7502 7503 switch (mddev->layout) { 7504 case ALGORITHM_LEFT_ASYMMETRIC_6: 7505 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 7506 break; 7507 case ALGORITHM_RIGHT_ASYMMETRIC_6: 7508 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 7509 break; 7510 case ALGORITHM_LEFT_SYMMETRIC_6: 7511 new_layout = ALGORITHM_LEFT_SYMMETRIC; 7512 break; 7513 case ALGORITHM_RIGHT_SYMMETRIC_6: 7514 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 7515 break; 7516 case ALGORITHM_PARITY_0_6: 7517 new_layout = ALGORITHM_PARITY_0; 7518 break; 7519 case ALGORITHM_PARITY_N: 7520 new_layout = ALGORITHM_PARITY_N; 7521 break; 7522 default: 7523 return ERR_PTR(-EINVAL); 7524 } 7525 mddev->new_level = 5; 7526 mddev->new_layout = new_layout; 7527 mddev->delta_disks = -1; 7528 mddev->raid_disks -= 1; 7529 return setup_conf(mddev); 7530 } 7531 7532 static int raid5_check_reshape(struct mddev *mddev) 7533 { 7534 /* For a 2-drive array, the layout and chunk size can be changed 7535 * immediately as not restriping is needed. 7536 * For larger arrays we record the new value - after validation 7537 * to be used by a reshape pass. 7538 */ 7539 struct r5conf *conf = mddev->private; 7540 int new_chunk = mddev->new_chunk_sectors; 7541 7542 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 7543 return -EINVAL; 7544 if (new_chunk > 0) { 7545 if (!is_power_of_2(new_chunk)) 7546 return -EINVAL; 7547 if (new_chunk < (PAGE_SIZE>>9)) 7548 return -EINVAL; 7549 if (mddev->array_sectors & (new_chunk-1)) 7550 /* not factor of array size */ 7551 return -EINVAL; 7552 } 7553 7554 /* They look valid */ 7555 7556 if (mddev->raid_disks == 2) { 7557 /* can make the change immediately */ 7558 if (mddev->new_layout >= 0) { 7559 conf->algorithm = mddev->new_layout; 7560 mddev->layout = mddev->new_layout; 7561 } 7562 if (new_chunk > 0) { 7563 conf->chunk_sectors = new_chunk ; 7564 mddev->chunk_sectors = new_chunk; 7565 } 7566 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7567 md_wakeup_thread(mddev->thread); 7568 } 7569 return check_reshape(mddev); 7570 } 7571 7572 static int raid6_check_reshape(struct mddev *mddev) 7573 { 7574 int new_chunk = mddev->new_chunk_sectors; 7575 7576 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 7577 return -EINVAL; 7578 if (new_chunk > 0) { 7579 if (!is_power_of_2(new_chunk)) 7580 return -EINVAL; 7581 if (new_chunk < (PAGE_SIZE >> 9)) 7582 return -EINVAL; 7583 if (mddev->array_sectors & (new_chunk-1)) 7584 /* not factor of array size */ 7585 return -EINVAL; 7586 } 7587 7588 /* They look valid */ 7589 return check_reshape(mddev); 7590 } 7591 7592 static void *raid5_takeover(struct mddev *mddev) 7593 { 7594 /* raid5 can take over: 7595 * raid0 - if there is only one strip zone - make it a raid4 layout 7596 * raid1 - if there are two drives. We need to know the chunk size 7597 * raid4 - trivial - just use a raid4 layout. 7598 * raid6 - Providing it is a *_6 layout 7599 */ 7600 if (mddev->level == 0) 7601 return raid45_takeover_raid0(mddev, 5); 7602 if (mddev->level == 1) 7603 return raid5_takeover_raid1(mddev); 7604 if (mddev->level == 4) { 7605 mddev->new_layout = ALGORITHM_PARITY_N; 7606 mddev->new_level = 5; 7607 return setup_conf(mddev); 7608 } 7609 if (mddev->level == 6) 7610 return raid5_takeover_raid6(mddev); 7611 7612 return ERR_PTR(-EINVAL); 7613 } 7614 7615 static void *raid4_takeover(struct mddev *mddev) 7616 { 7617 /* raid4 can take over: 7618 * raid0 - if there is only one strip zone 7619 * raid5 - if layout is right 7620 */ 7621 if (mddev->level == 0) 7622 return raid45_takeover_raid0(mddev, 4); 7623 if (mddev->level == 5 && 7624 mddev->layout == ALGORITHM_PARITY_N) { 7625 mddev->new_layout = 0; 7626 mddev->new_level = 4; 7627 return setup_conf(mddev); 7628 } 7629 return ERR_PTR(-EINVAL); 7630 } 7631 7632 static struct md_personality raid5_personality; 7633 7634 static void *raid6_takeover(struct mddev *mddev) 7635 { 7636 /* Currently can only take over a raid5. We map the 7637 * personality to an equivalent raid6 personality 7638 * with the Q block at the end. 7639 */ 7640 int new_layout; 7641 7642 if (mddev->pers != &raid5_personality) 7643 return ERR_PTR(-EINVAL); 7644 if (mddev->degraded > 1) 7645 return ERR_PTR(-EINVAL); 7646 if (mddev->raid_disks > 253) 7647 return ERR_PTR(-EINVAL); 7648 if (mddev->raid_disks < 3) 7649 return ERR_PTR(-EINVAL); 7650 7651 switch (mddev->layout) { 7652 case ALGORITHM_LEFT_ASYMMETRIC: 7653 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 7654 break; 7655 case ALGORITHM_RIGHT_ASYMMETRIC: 7656 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 7657 break; 7658 case ALGORITHM_LEFT_SYMMETRIC: 7659 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 7660 break; 7661 case ALGORITHM_RIGHT_SYMMETRIC: 7662 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 7663 break; 7664 case ALGORITHM_PARITY_0: 7665 new_layout = ALGORITHM_PARITY_0_6; 7666 break; 7667 case ALGORITHM_PARITY_N: 7668 new_layout = ALGORITHM_PARITY_N; 7669 break; 7670 default: 7671 return ERR_PTR(-EINVAL); 7672 } 7673 mddev->new_level = 6; 7674 mddev->new_layout = new_layout; 7675 mddev->delta_disks = 1; 7676 mddev->raid_disks += 1; 7677 return setup_conf(mddev); 7678 } 7679 7680 static struct md_personality raid6_personality = 7681 { 7682 .name = "raid6", 7683 .level = 6, 7684 .owner = THIS_MODULE, 7685 .make_request = make_request, 7686 .run = run, 7687 .free = raid5_free, 7688 .status = status, 7689 .error_handler = error, 7690 .hot_add_disk = raid5_add_disk, 7691 .hot_remove_disk= raid5_remove_disk, 7692 .spare_active = raid5_spare_active, 7693 .sync_request = sync_request, 7694 .resize = raid5_resize, 7695 .size = raid5_size, 7696 .check_reshape = raid6_check_reshape, 7697 .start_reshape = raid5_start_reshape, 7698 .finish_reshape = raid5_finish_reshape, 7699 .quiesce = raid5_quiesce, 7700 .takeover = raid6_takeover, 7701 .congested = raid5_congested, 7702 .mergeable_bvec = raid5_mergeable_bvec, 7703 }; 7704 static struct md_personality raid5_personality = 7705 { 7706 .name = "raid5", 7707 .level = 5, 7708 .owner = THIS_MODULE, 7709 .make_request = make_request, 7710 .run = run, 7711 .free = raid5_free, 7712 .status = status, 7713 .error_handler = error, 7714 .hot_add_disk = raid5_add_disk, 7715 .hot_remove_disk= raid5_remove_disk, 7716 .spare_active = raid5_spare_active, 7717 .sync_request = sync_request, 7718 .resize = raid5_resize, 7719 .size = raid5_size, 7720 .check_reshape = raid5_check_reshape, 7721 .start_reshape = raid5_start_reshape, 7722 .finish_reshape = raid5_finish_reshape, 7723 .quiesce = raid5_quiesce, 7724 .takeover = raid5_takeover, 7725 .congested = raid5_congested, 7726 .mergeable_bvec = raid5_mergeable_bvec, 7727 }; 7728 7729 static struct md_personality raid4_personality = 7730 { 7731 .name = "raid4", 7732 .level = 4, 7733 .owner = THIS_MODULE, 7734 .make_request = make_request, 7735 .run = run, 7736 .free = raid5_free, 7737 .status = status, 7738 .error_handler = error, 7739 .hot_add_disk = raid5_add_disk, 7740 .hot_remove_disk= raid5_remove_disk, 7741 .spare_active = raid5_spare_active, 7742 .sync_request = sync_request, 7743 .resize = raid5_resize, 7744 .size = raid5_size, 7745 .check_reshape = raid5_check_reshape, 7746 .start_reshape = raid5_start_reshape, 7747 .finish_reshape = raid5_finish_reshape, 7748 .quiesce = raid5_quiesce, 7749 .takeover = raid4_takeover, 7750 .congested = raid5_congested, 7751 .mergeable_bvec = raid5_mergeable_bvec, 7752 }; 7753 7754 static int __init raid5_init(void) 7755 { 7756 raid5_wq = alloc_workqueue("raid5wq", 7757 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 7758 if (!raid5_wq) 7759 return -ENOMEM; 7760 register_md_personality(&raid6_personality); 7761 register_md_personality(&raid5_personality); 7762 register_md_personality(&raid4_personality); 7763 return 0; 7764 } 7765 7766 static void raid5_exit(void) 7767 { 7768 unregister_md_personality(&raid6_personality); 7769 unregister_md_personality(&raid5_personality); 7770 unregister_md_personality(&raid4_personality); 7771 destroy_workqueue(raid5_wq); 7772 } 7773 7774 module_init(raid5_init); 7775 module_exit(raid5_exit); 7776 MODULE_LICENSE("GPL"); 7777 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 7778 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 7779 MODULE_ALIAS("md-raid5"); 7780 MODULE_ALIAS("md-raid4"); 7781 MODULE_ALIAS("md-level-5"); 7782 MODULE_ALIAS("md-level-4"); 7783 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 7784 MODULE_ALIAS("md-raid6"); 7785 MODULE_ALIAS("md-level-6"); 7786 7787 /* This used to be two separate modules, they were: */ 7788 MODULE_ALIAS("raid5"); 7789 MODULE_ALIAS("raid6"); 7790