1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * raid5.c : Multiple Devices driver for Linux 4 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 5 * Copyright (C) 1999, 2000 Ingo Molnar 6 * Copyright (C) 2002, 2003 H. Peter Anvin 7 * 8 * RAID-4/5/6 management functions. 9 * Thanks to Penguin Computing for making the RAID-6 development possible 10 * by donating a test server! 11 */ 12 13 /* 14 * BITMAP UNPLUGGING: 15 * 16 * The sequencing for updating the bitmap reliably is a little 17 * subtle (and I got it wrong the first time) so it deserves some 18 * explanation. 19 * 20 * We group bitmap updates into batches. Each batch has a number. 21 * We may write out several batches at once, but that isn't very important. 22 * conf->seq_write is the number of the last batch successfully written. 23 * conf->seq_flush is the number of the last batch that was closed to 24 * new additions. 25 * When we discover that we will need to write to any block in a stripe 26 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 27 * the number of the batch it will be in. This is seq_flush+1. 28 * When we are ready to do a write, if that batch hasn't been written yet, 29 * we plug the array and queue the stripe for later. 30 * When an unplug happens, we increment bm_flush, thus closing the current 31 * batch. 32 * When we notice that bm_flush > bm_write, we write out all pending updates 33 * to the bitmap, and advance bm_write to where bm_flush was. 34 * This may occasionally write a bit out twice, but is sure never to 35 * miss any bits. 36 */ 37 38 #include <linux/blkdev.h> 39 #include <linux/kthread.h> 40 #include <linux/raid/pq.h> 41 #include <linux/async_tx.h> 42 #include <linux/module.h> 43 #include <linux/async.h> 44 #include <linux/seq_file.h> 45 #include <linux/cpu.h> 46 #include <linux/slab.h> 47 #include <linux/ratelimit.h> 48 #include <linux/nodemask.h> 49 50 #include <trace/events/block.h> 51 #include <linux/list_sort.h> 52 53 #include "md.h" 54 #include "raid5.h" 55 #include "raid0.h" 56 #include "md-bitmap.h" 57 #include "raid5-log.h" 58 59 #define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED) 60 61 #define cpu_to_group(cpu) cpu_to_node(cpu) 62 #define ANY_GROUP NUMA_NO_NODE 63 64 static bool devices_handle_discard_safely = false; 65 module_param(devices_handle_discard_safely, bool, 0644); 66 MODULE_PARM_DESC(devices_handle_discard_safely, 67 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 68 static struct workqueue_struct *raid5_wq; 69 70 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 71 { 72 int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK; 73 return &conf->stripe_hashtbl[hash]; 74 } 75 76 static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect) 77 { 78 return (sect >> RAID5_STRIPE_SHIFT(conf)) & STRIPE_HASH_LOCKS_MASK; 79 } 80 81 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) 82 __acquires(&conf->device_lock) 83 { 84 spin_lock_irq(conf->hash_locks + hash); 85 spin_lock(&conf->device_lock); 86 } 87 88 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) 89 __releases(&conf->device_lock) 90 { 91 spin_unlock(&conf->device_lock); 92 spin_unlock_irq(conf->hash_locks + hash); 93 } 94 95 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 96 __acquires(&conf->device_lock) 97 { 98 int i; 99 spin_lock_irq(conf->hash_locks); 100 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 101 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 102 spin_lock(&conf->device_lock); 103 } 104 105 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) 106 __releases(&conf->device_lock) 107 { 108 int i; 109 spin_unlock(&conf->device_lock); 110 for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--) 111 spin_unlock(conf->hash_locks + i); 112 spin_unlock_irq(conf->hash_locks); 113 } 114 115 /* Find first data disk in a raid6 stripe */ 116 static inline int raid6_d0(struct stripe_head *sh) 117 { 118 if (sh->ddf_layout) 119 /* ddf always start from first device */ 120 return 0; 121 /* md starts just after Q block */ 122 if (sh->qd_idx == sh->disks - 1) 123 return 0; 124 else 125 return sh->qd_idx + 1; 126 } 127 static inline int raid6_next_disk(int disk, int raid_disks) 128 { 129 disk++; 130 return (disk < raid_disks) ? disk : 0; 131 } 132 133 /* When walking through the disks in a raid5, starting at raid6_d0, 134 * We need to map each disk to a 'slot', where the data disks are slot 135 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 136 * is raid_disks-1. This help does that mapping. 137 */ 138 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 139 int *count, int syndrome_disks) 140 { 141 int slot = *count; 142 143 if (sh->ddf_layout) 144 (*count)++; 145 if (idx == sh->pd_idx) 146 return syndrome_disks; 147 if (idx == sh->qd_idx) 148 return syndrome_disks + 1; 149 if (!sh->ddf_layout) 150 (*count)++; 151 return slot; 152 } 153 154 static void print_raid5_conf (struct r5conf *conf); 155 156 static int stripe_operations_active(struct stripe_head *sh) 157 { 158 return sh->check_state || sh->reconstruct_state || 159 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 160 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 161 } 162 163 static bool stripe_is_lowprio(struct stripe_head *sh) 164 { 165 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || 166 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && 167 !test_bit(STRIPE_R5C_CACHING, &sh->state); 168 } 169 170 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) 171 __must_hold(&sh->raid_conf->device_lock) 172 { 173 struct r5conf *conf = sh->raid_conf; 174 struct r5worker_group *group; 175 int thread_cnt; 176 int i, cpu = sh->cpu; 177 178 if (!cpu_online(cpu)) { 179 cpu = cpumask_any(cpu_online_mask); 180 sh->cpu = cpu; 181 } 182 183 if (list_empty(&sh->lru)) { 184 struct r5worker_group *group; 185 group = conf->worker_groups + cpu_to_group(cpu); 186 if (stripe_is_lowprio(sh)) 187 list_add_tail(&sh->lru, &group->loprio_list); 188 else 189 list_add_tail(&sh->lru, &group->handle_list); 190 group->stripes_cnt++; 191 sh->group = group; 192 } 193 194 if (conf->worker_cnt_per_group == 0) { 195 md_wakeup_thread(conf->mddev->thread); 196 return; 197 } 198 199 group = conf->worker_groups + cpu_to_group(sh->cpu); 200 201 group->workers[0].working = true; 202 /* at least one worker should run to avoid race */ 203 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); 204 205 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; 206 /* wakeup more workers */ 207 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { 208 if (group->workers[i].working == false) { 209 group->workers[i].working = true; 210 queue_work_on(sh->cpu, raid5_wq, 211 &group->workers[i].work); 212 thread_cnt--; 213 } 214 } 215 } 216 217 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, 218 struct list_head *temp_inactive_list) 219 __must_hold(&conf->device_lock) 220 { 221 int i; 222 int injournal = 0; /* number of date pages with R5_InJournal */ 223 224 BUG_ON(!list_empty(&sh->lru)); 225 BUG_ON(atomic_read(&conf->active_stripes)==0); 226 227 if (r5c_is_writeback(conf->log)) 228 for (i = sh->disks; i--; ) 229 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 230 injournal++; 231 /* 232 * In the following cases, the stripe cannot be released to cached 233 * lists. Therefore, we make the stripe write out and set 234 * STRIPE_HANDLE: 235 * 1. when quiesce in r5c write back; 236 * 2. when resync is requested fot the stripe. 237 */ 238 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || 239 (conf->quiesce && r5c_is_writeback(conf->log) && 240 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { 241 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 242 r5c_make_stripe_write_out(sh); 243 set_bit(STRIPE_HANDLE, &sh->state); 244 } 245 246 if (test_bit(STRIPE_HANDLE, &sh->state)) { 247 if (test_bit(STRIPE_DELAYED, &sh->state) && 248 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 249 list_add_tail(&sh->lru, &conf->delayed_list); 250 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 251 sh->bm_seq - conf->seq_write > 0) 252 list_add_tail(&sh->lru, &conf->bitmap_list); 253 else { 254 clear_bit(STRIPE_DELAYED, &sh->state); 255 clear_bit(STRIPE_BIT_DELAY, &sh->state); 256 if (conf->worker_cnt_per_group == 0) { 257 if (stripe_is_lowprio(sh)) 258 list_add_tail(&sh->lru, 259 &conf->loprio_list); 260 else 261 list_add_tail(&sh->lru, 262 &conf->handle_list); 263 } else { 264 raid5_wakeup_stripe_thread(sh); 265 return; 266 } 267 } 268 md_wakeup_thread(conf->mddev->thread); 269 } else { 270 BUG_ON(stripe_operations_active(sh)); 271 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 272 if (atomic_dec_return(&conf->preread_active_stripes) 273 < IO_THRESHOLD) 274 md_wakeup_thread(conf->mddev->thread); 275 atomic_dec(&conf->active_stripes); 276 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 277 if (!r5c_is_writeback(conf->log)) 278 list_add_tail(&sh->lru, temp_inactive_list); 279 else { 280 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); 281 if (injournal == 0) 282 list_add_tail(&sh->lru, temp_inactive_list); 283 else if (injournal == conf->raid_disks - conf->max_degraded) { 284 /* full stripe */ 285 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) 286 atomic_inc(&conf->r5c_cached_full_stripes); 287 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) 288 atomic_dec(&conf->r5c_cached_partial_stripes); 289 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); 290 r5c_check_cached_full_stripe(conf); 291 } else 292 /* 293 * STRIPE_R5C_PARTIAL_STRIPE is set in 294 * r5c_try_caching_write(). No need to 295 * set it again. 296 */ 297 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); 298 } 299 } 300 } 301 } 302 303 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, 304 struct list_head *temp_inactive_list) 305 __must_hold(&conf->device_lock) 306 { 307 if (atomic_dec_and_test(&sh->count)) 308 do_release_stripe(conf, sh, temp_inactive_list); 309 } 310 311 /* 312 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list 313 * 314 * Be careful: Only one task can add/delete stripes from temp_inactive_list at 315 * given time. Adding stripes only takes device lock, while deleting stripes 316 * only takes hash lock. 317 */ 318 static void release_inactive_stripe_list(struct r5conf *conf, 319 struct list_head *temp_inactive_list, 320 int hash) 321 { 322 int size; 323 bool do_wakeup = false; 324 unsigned long flags; 325 326 if (hash == NR_STRIPE_HASH_LOCKS) { 327 size = NR_STRIPE_HASH_LOCKS; 328 hash = NR_STRIPE_HASH_LOCKS - 1; 329 } else 330 size = 1; 331 while (size) { 332 struct list_head *list = &temp_inactive_list[size - 1]; 333 334 /* 335 * We don't hold any lock here yet, raid5_get_active_stripe() might 336 * remove stripes from the list 337 */ 338 if (!list_empty_careful(list)) { 339 spin_lock_irqsave(conf->hash_locks + hash, flags); 340 if (list_empty(conf->inactive_list + hash) && 341 !list_empty(list)) 342 atomic_dec(&conf->empty_inactive_list_nr); 343 list_splice_tail_init(list, conf->inactive_list + hash); 344 do_wakeup = true; 345 spin_unlock_irqrestore(conf->hash_locks + hash, flags); 346 } 347 size--; 348 hash--; 349 } 350 351 if (do_wakeup) { 352 wake_up(&conf->wait_for_stripe); 353 if (atomic_read(&conf->active_stripes) == 0) 354 wake_up(&conf->wait_for_quiescent); 355 if (conf->retry_read_aligned) 356 md_wakeup_thread(conf->mddev->thread); 357 } 358 } 359 360 static int release_stripe_list(struct r5conf *conf, 361 struct list_head *temp_inactive_list) 362 __must_hold(&conf->device_lock) 363 { 364 struct stripe_head *sh, *t; 365 int count = 0; 366 struct llist_node *head; 367 368 head = llist_del_all(&conf->released_stripes); 369 head = llist_reverse_order(head); 370 llist_for_each_entry_safe(sh, t, head, release_list) { 371 int hash; 372 373 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 374 smp_mb(); 375 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); 376 /* 377 * Don't worry the bit is set here, because if the bit is set 378 * again, the count is always > 1. This is true for 379 * STRIPE_ON_UNPLUG_LIST bit too. 380 */ 381 hash = sh->hash_lock_index; 382 __release_stripe(conf, sh, &temp_inactive_list[hash]); 383 count++; 384 } 385 386 return count; 387 } 388 389 void raid5_release_stripe(struct stripe_head *sh) 390 { 391 struct r5conf *conf = sh->raid_conf; 392 unsigned long flags; 393 struct list_head list; 394 int hash; 395 bool wakeup; 396 397 /* Avoid release_list until the last reference. 398 */ 399 if (atomic_add_unless(&sh->count, -1, 1)) 400 return; 401 402 if (unlikely(!conf->mddev->thread) || 403 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 404 goto slow_path; 405 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 406 if (wakeup) 407 md_wakeup_thread(conf->mddev->thread); 408 return; 409 slow_path: 410 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 411 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { 412 INIT_LIST_HEAD(&list); 413 hash = sh->hash_lock_index; 414 do_release_stripe(conf, sh, &list); 415 spin_unlock_irqrestore(&conf->device_lock, flags); 416 release_inactive_stripe_list(conf, &list, hash); 417 } 418 } 419 420 static inline void remove_hash(struct stripe_head *sh) 421 { 422 pr_debug("remove_hash(), stripe %llu\n", 423 (unsigned long long)sh->sector); 424 425 hlist_del_init(&sh->hash); 426 } 427 428 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 429 { 430 struct hlist_head *hp = stripe_hash(conf, sh->sector); 431 432 pr_debug("insert_hash(), stripe %llu\n", 433 (unsigned long long)sh->sector); 434 435 hlist_add_head(&sh->hash, hp); 436 } 437 438 /* find an idle stripe, make sure it is unhashed, and return it. */ 439 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) 440 { 441 struct stripe_head *sh = NULL; 442 struct list_head *first; 443 444 if (list_empty(conf->inactive_list + hash)) 445 goto out; 446 first = (conf->inactive_list + hash)->next; 447 sh = list_entry(first, struct stripe_head, lru); 448 list_del_init(first); 449 remove_hash(sh); 450 atomic_inc(&conf->active_stripes); 451 BUG_ON(hash != sh->hash_lock_index); 452 if (list_empty(conf->inactive_list + hash)) 453 atomic_inc(&conf->empty_inactive_list_nr); 454 out: 455 return sh; 456 } 457 458 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE 459 static void free_stripe_pages(struct stripe_head *sh) 460 { 461 int i; 462 struct page *p; 463 464 /* Have not allocate page pool */ 465 if (!sh->pages) 466 return; 467 468 for (i = 0; i < sh->nr_pages; i++) { 469 p = sh->pages[i]; 470 if (p) 471 put_page(p); 472 sh->pages[i] = NULL; 473 } 474 } 475 476 static int alloc_stripe_pages(struct stripe_head *sh, gfp_t gfp) 477 { 478 int i; 479 struct page *p; 480 481 for (i = 0; i < sh->nr_pages; i++) { 482 /* The page have allocated. */ 483 if (sh->pages[i]) 484 continue; 485 486 p = alloc_page(gfp); 487 if (!p) { 488 free_stripe_pages(sh); 489 return -ENOMEM; 490 } 491 sh->pages[i] = p; 492 } 493 return 0; 494 } 495 496 static int 497 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) 498 { 499 int nr_pages, cnt; 500 501 if (sh->pages) 502 return 0; 503 504 /* Each of the sh->dev[i] need one conf->stripe_size */ 505 cnt = PAGE_SIZE / conf->stripe_size; 506 nr_pages = (disks + cnt - 1) / cnt; 507 508 sh->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 509 if (!sh->pages) 510 return -ENOMEM; 511 sh->nr_pages = nr_pages; 512 sh->stripes_per_page = cnt; 513 return 0; 514 } 515 #endif 516 517 static void shrink_buffers(struct stripe_head *sh) 518 { 519 int i; 520 int num = sh->raid_conf->pool_size; 521 522 #if PAGE_SIZE == DEFAULT_STRIPE_SIZE 523 for (i = 0; i < num ; i++) { 524 struct page *p; 525 526 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); 527 p = sh->dev[i].page; 528 if (!p) 529 continue; 530 sh->dev[i].page = NULL; 531 put_page(p); 532 } 533 #else 534 for (i = 0; i < num; i++) 535 sh->dev[i].page = NULL; 536 free_stripe_pages(sh); /* Free pages */ 537 #endif 538 } 539 540 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) 541 { 542 int i; 543 int num = sh->raid_conf->pool_size; 544 545 #if PAGE_SIZE == DEFAULT_STRIPE_SIZE 546 for (i = 0; i < num; i++) { 547 struct page *page; 548 549 if (!(page = alloc_page(gfp))) { 550 return 1; 551 } 552 sh->dev[i].page = page; 553 sh->dev[i].orig_page = page; 554 sh->dev[i].offset = 0; 555 } 556 #else 557 if (alloc_stripe_pages(sh, gfp)) 558 return -ENOMEM; 559 560 for (i = 0; i < num; i++) { 561 sh->dev[i].page = raid5_get_dev_page(sh, i); 562 sh->dev[i].orig_page = sh->dev[i].page; 563 sh->dev[i].offset = raid5_get_page_offset(sh, i); 564 } 565 #endif 566 return 0; 567 } 568 569 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 570 struct stripe_head *sh); 571 572 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 573 { 574 struct r5conf *conf = sh->raid_conf; 575 int i, seq; 576 577 BUG_ON(atomic_read(&sh->count) != 0); 578 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 579 BUG_ON(stripe_operations_active(sh)); 580 BUG_ON(sh->batch_head); 581 582 pr_debug("init_stripe called, stripe %llu\n", 583 (unsigned long long)sector); 584 retry: 585 seq = read_seqcount_begin(&conf->gen_lock); 586 sh->generation = conf->generation - previous; 587 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 588 sh->sector = sector; 589 stripe_set_idx(sector, conf, previous, sh); 590 sh->state = 0; 591 592 for (i = sh->disks; i--; ) { 593 struct r5dev *dev = &sh->dev[i]; 594 595 if (dev->toread || dev->read || dev->towrite || dev->written || 596 test_bit(R5_LOCKED, &dev->flags)) { 597 pr_err("sector=%llx i=%d %p %p %p %p %d\n", 598 (unsigned long long)sh->sector, i, dev->toread, 599 dev->read, dev->towrite, dev->written, 600 test_bit(R5_LOCKED, &dev->flags)); 601 WARN_ON(1); 602 } 603 dev->flags = 0; 604 dev->sector = raid5_compute_blocknr(sh, i, previous); 605 } 606 if (read_seqcount_retry(&conf->gen_lock, seq)) 607 goto retry; 608 sh->overwrite_disks = 0; 609 insert_hash(conf, sh); 610 sh->cpu = smp_processor_id(); 611 set_bit(STRIPE_BATCH_READY, &sh->state); 612 } 613 614 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 615 short generation) 616 { 617 struct stripe_head *sh; 618 619 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 620 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) 621 if (sh->sector == sector && sh->generation == generation) 622 return sh; 623 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 624 return NULL; 625 } 626 627 /* 628 * Need to check if array has failed when deciding whether to: 629 * - start an array 630 * - remove non-faulty devices 631 * - add a spare 632 * - allow a reshape 633 * This determination is simple when no reshape is happening. 634 * However if there is a reshape, we need to carefully check 635 * both the before and after sections. 636 * This is because some failed devices may only affect one 637 * of the two sections, and some non-in_sync devices may 638 * be insync in the section most affected by failed devices. 639 * 640 * Most calls to this function hold &conf->device_lock. Calls 641 * in raid5_run() do not require the lock as no other threads 642 * have been started yet. 643 */ 644 int raid5_calc_degraded(struct r5conf *conf) 645 { 646 int degraded, degraded2; 647 int i; 648 649 rcu_read_lock(); 650 degraded = 0; 651 for (i = 0; i < conf->previous_raid_disks; i++) { 652 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 653 if (rdev && test_bit(Faulty, &rdev->flags)) 654 rdev = rcu_dereference(conf->disks[i].replacement); 655 if (!rdev || test_bit(Faulty, &rdev->flags)) 656 degraded++; 657 else if (test_bit(In_sync, &rdev->flags)) 658 ; 659 else 660 /* not in-sync or faulty. 661 * If the reshape increases the number of devices, 662 * this is being recovered by the reshape, so 663 * this 'previous' section is not in_sync. 664 * If the number of devices is being reduced however, 665 * the device can only be part of the array if 666 * we are reverting a reshape, so this section will 667 * be in-sync. 668 */ 669 if (conf->raid_disks >= conf->previous_raid_disks) 670 degraded++; 671 } 672 rcu_read_unlock(); 673 if (conf->raid_disks == conf->previous_raid_disks) 674 return degraded; 675 rcu_read_lock(); 676 degraded2 = 0; 677 for (i = 0; i < conf->raid_disks; i++) { 678 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 679 if (rdev && test_bit(Faulty, &rdev->flags)) 680 rdev = rcu_dereference(conf->disks[i].replacement); 681 if (!rdev || test_bit(Faulty, &rdev->flags)) 682 degraded2++; 683 else if (test_bit(In_sync, &rdev->flags)) 684 ; 685 else 686 /* not in-sync or faulty. 687 * If reshape increases the number of devices, this 688 * section has already been recovered, else it 689 * almost certainly hasn't. 690 */ 691 if (conf->raid_disks <= conf->previous_raid_disks) 692 degraded2++; 693 } 694 rcu_read_unlock(); 695 if (degraded2 > degraded) 696 return degraded2; 697 return degraded; 698 } 699 700 static bool has_failed(struct r5conf *conf) 701 { 702 int degraded = conf->mddev->degraded; 703 704 if (test_bit(MD_BROKEN, &conf->mddev->flags)) 705 return true; 706 707 if (conf->mddev->reshape_position != MaxSector) 708 degraded = raid5_calc_degraded(conf); 709 710 return degraded > conf->max_degraded; 711 } 712 713 struct stripe_head * 714 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, 715 int previous, int noblock, int noquiesce) 716 { 717 struct stripe_head *sh; 718 int hash = stripe_hash_locks_hash(conf, sector); 719 int inc_empty_inactive_list_flag; 720 721 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 722 723 spin_lock_irq(conf->hash_locks + hash); 724 725 do { 726 wait_event_lock_irq(conf->wait_for_quiescent, 727 conf->quiesce == 0 || noquiesce, 728 *(conf->hash_locks + hash)); 729 sh = __find_stripe(conf, sector, conf->generation - previous); 730 if (!sh) { 731 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { 732 sh = get_free_stripe(conf, hash); 733 if (!sh && !test_bit(R5_DID_ALLOC, 734 &conf->cache_state)) 735 set_bit(R5_ALLOC_MORE, 736 &conf->cache_state); 737 } 738 if (noblock && sh == NULL) 739 break; 740 741 r5c_check_stripe_cache_usage(conf); 742 if (!sh) { 743 set_bit(R5_INACTIVE_BLOCKED, 744 &conf->cache_state); 745 r5l_wake_reclaim(conf->log, 0); 746 wait_event_lock_irq( 747 conf->wait_for_stripe, 748 !list_empty(conf->inactive_list + hash) && 749 (atomic_read(&conf->active_stripes) 750 < (conf->max_nr_stripes * 3 / 4) 751 || !test_bit(R5_INACTIVE_BLOCKED, 752 &conf->cache_state)), 753 *(conf->hash_locks + hash)); 754 clear_bit(R5_INACTIVE_BLOCKED, 755 &conf->cache_state); 756 } else { 757 init_stripe(sh, sector, previous); 758 atomic_inc(&sh->count); 759 } 760 } else if (!atomic_inc_not_zero(&sh->count)) { 761 spin_lock(&conf->device_lock); 762 if (!atomic_read(&sh->count)) { 763 if (!test_bit(STRIPE_HANDLE, &sh->state)) 764 atomic_inc(&conf->active_stripes); 765 BUG_ON(list_empty(&sh->lru) && 766 !test_bit(STRIPE_EXPANDING, &sh->state)); 767 inc_empty_inactive_list_flag = 0; 768 if (!list_empty(conf->inactive_list + hash)) 769 inc_empty_inactive_list_flag = 1; 770 list_del_init(&sh->lru); 771 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) 772 atomic_inc(&conf->empty_inactive_list_nr); 773 if (sh->group) { 774 sh->group->stripes_cnt--; 775 sh->group = NULL; 776 } 777 } 778 atomic_inc(&sh->count); 779 spin_unlock(&conf->device_lock); 780 } 781 } while (sh == NULL); 782 783 spin_unlock_irq(conf->hash_locks + hash); 784 return sh; 785 } 786 787 static bool is_full_stripe_write(struct stripe_head *sh) 788 { 789 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); 790 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); 791 } 792 793 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 794 __acquires(&sh1->stripe_lock) 795 __acquires(&sh2->stripe_lock) 796 { 797 if (sh1 > sh2) { 798 spin_lock_irq(&sh2->stripe_lock); 799 spin_lock_nested(&sh1->stripe_lock, 1); 800 } else { 801 spin_lock_irq(&sh1->stripe_lock); 802 spin_lock_nested(&sh2->stripe_lock, 1); 803 } 804 } 805 806 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 807 __releases(&sh1->stripe_lock) 808 __releases(&sh2->stripe_lock) 809 { 810 spin_unlock(&sh1->stripe_lock); 811 spin_unlock_irq(&sh2->stripe_lock); 812 } 813 814 /* Only freshly new full stripe normal write stripe can be added to a batch list */ 815 static bool stripe_can_batch(struct stripe_head *sh) 816 { 817 struct r5conf *conf = sh->raid_conf; 818 819 if (raid5_has_log(conf) || raid5_has_ppl(conf)) 820 return false; 821 return test_bit(STRIPE_BATCH_READY, &sh->state) && 822 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 823 is_full_stripe_write(sh); 824 } 825 826 /* we only do back search */ 827 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) 828 { 829 struct stripe_head *head; 830 sector_t head_sector, tmp_sec; 831 int hash; 832 int dd_idx; 833 int inc_empty_inactive_list_flag; 834 835 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ 836 tmp_sec = sh->sector; 837 if (!sector_div(tmp_sec, conf->chunk_sectors)) 838 return; 839 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); 840 841 hash = stripe_hash_locks_hash(conf, head_sector); 842 spin_lock_irq(conf->hash_locks + hash); 843 head = __find_stripe(conf, head_sector, conf->generation); 844 if (head && !atomic_inc_not_zero(&head->count)) { 845 spin_lock(&conf->device_lock); 846 if (!atomic_read(&head->count)) { 847 if (!test_bit(STRIPE_HANDLE, &head->state)) 848 atomic_inc(&conf->active_stripes); 849 BUG_ON(list_empty(&head->lru) && 850 !test_bit(STRIPE_EXPANDING, &head->state)); 851 inc_empty_inactive_list_flag = 0; 852 if (!list_empty(conf->inactive_list + hash)) 853 inc_empty_inactive_list_flag = 1; 854 list_del_init(&head->lru); 855 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) 856 atomic_inc(&conf->empty_inactive_list_nr); 857 if (head->group) { 858 head->group->stripes_cnt--; 859 head->group = NULL; 860 } 861 } 862 atomic_inc(&head->count); 863 spin_unlock(&conf->device_lock); 864 } 865 spin_unlock_irq(conf->hash_locks + hash); 866 867 if (!head) 868 return; 869 if (!stripe_can_batch(head)) 870 goto out; 871 872 lock_two_stripes(head, sh); 873 /* clear_batch_ready clear the flag */ 874 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) 875 goto unlock_out; 876 877 if (sh->batch_head) 878 goto unlock_out; 879 880 dd_idx = 0; 881 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) 882 dd_idx++; 883 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || 884 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) 885 goto unlock_out; 886 887 if (head->batch_head) { 888 spin_lock(&head->batch_head->batch_lock); 889 /* This batch list is already running */ 890 if (!stripe_can_batch(head)) { 891 spin_unlock(&head->batch_head->batch_lock); 892 goto unlock_out; 893 } 894 /* 895 * We must assign batch_head of this stripe within the 896 * batch_lock, otherwise clear_batch_ready of batch head 897 * stripe could clear BATCH_READY bit of this stripe and 898 * this stripe->batch_head doesn't get assigned, which 899 * could confuse clear_batch_ready for this stripe 900 */ 901 sh->batch_head = head->batch_head; 902 903 /* 904 * at this point, head's BATCH_READY could be cleared, but we 905 * can still add the stripe to batch list 906 */ 907 list_add(&sh->batch_list, &head->batch_list); 908 spin_unlock(&head->batch_head->batch_lock); 909 } else { 910 head->batch_head = head; 911 sh->batch_head = head->batch_head; 912 spin_lock(&head->batch_lock); 913 list_add_tail(&sh->batch_list, &head->batch_list); 914 spin_unlock(&head->batch_lock); 915 } 916 917 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 918 if (atomic_dec_return(&conf->preread_active_stripes) 919 < IO_THRESHOLD) 920 md_wakeup_thread(conf->mddev->thread); 921 922 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { 923 int seq = sh->bm_seq; 924 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && 925 sh->batch_head->bm_seq > seq) 926 seq = sh->batch_head->bm_seq; 927 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); 928 sh->batch_head->bm_seq = seq; 929 } 930 931 atomic_inc(&sh->count); 932 unlock_out: 933 unlock_two_stripes(head, sh); 934 out: 935 raid5_release_stripe(head); 936 } 937 938 /* Determine if 'data_offset' or 'new_data_offset' should be used 939 * in this stripe_head. 940 */ 941 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) 942 { 943 sector_t progress = conf->reshape_progress; 944 /* Need a memory barrier to make sure we see the value 945 * of conf->generation, or ->data_offset that was set before 946 * reshape_progress was updated. 947 */ 948 smp_rmb(); 949 if (progress == MaxSector) 950 return 0; 951 if (sh->generation == conf->generation - 1) 952 return 0; 953 /* We are in a reshape, and this is a new-generation stripe, 954 * so use new_data_offset. 955 */ 956 return 1; 957 } 958 959 static void dispatch_bio_list(struct bio_list *tmp) 960 { 961 struct bio *bio; 962 963 while ((bio = bio_list_pop(tmp))) 964 submit_bio_noacct(bio); 965 } 966 967 static int cmp_stripe(void *priv, const struct list_head *a, 968 const struct list_head *b) 969 { 970 const struct r5pending_data *da = list_entry(a, 971 struct r5pending_data, sibling); 972 const struct r5pending_data *db = list_entry(b, 973 struct r5pending_data, sibling); 974 if (da->sector > db->sector) 975 return 1; 976 if (da->sector < db->sector) 977 return -1; 978 return 0; 979 } 980 981 static void dispatch_defer_bios(struct r5conf *conf, int target, 982 struct bio_list *list) 983 { 984 struct r5pending_data *data; 985 struct list_head *first, *next = NULL; 986 int cnt = 0; 987 988 if (conf->pending_data_cnt == 0) 989 return; 990 991 list_sort(NULL, &conf->pending_list, cmp_stripe); 992 993 first = conf->pending_list.next; 994 995 /* temporarily move the head */ 996 if (conf->next_pending_data) 997 list_move_tail(&conf->pending_list, 998 &conf->next_pending_data->sibling); 999 1000 while (!list_empty(&conf->pending_list)) { 1001 data = list_first_entry(&conf->pending_list, 1002 struct r5pending_data, sibling); 1003 if (&data->sibling == first) 1004 first = data->sibling.next; 1005 next = data->sibling.next; 1006 1007 bio_list_merge(list, &data->bios); 1008 list_move(&data->sibling, &conf->free_list); 1009 cnt++; 1010 if (cnt >= target) 1011 break; 1012 } 1013 conf->pending_data_cnt -= cnt; 1014 BUG_ON(conf->pending_data_cnt < 0 || cnt < target); 1015 1016 if (next != &conf->pending_list) 1017 conf->next_pending_data = list_entry(next, 1018 struct r5pending_data, sibling); 1019 else 1020 conf->next_pending_data = NULL; 1021 /* list isn't empty */ 1022 if (first != &conf->pending_list) 1023 list_move_tail(&conf->pending_list, first); 1024 } 1025 1026 static void flush_deferred_bios(struct r5conf *conf) 1027 { 1028 struct bio_list tmp = BIO_EMPTY_LIST; 1029 1030 if (conf->pending_data_cnt == 0) 1031 return; 1032 1033 spin_lock(&conf->pending_bios_lock); 1034 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); 1035 BUG_ON(conf->pending_data_cnt != 0); 1036 spin_unlock(&conf->pending_bios_lock); 1037 1038 dispatch_bio_list(&tmp); 1039 } 1040 1041 static void defer_issue_bios(struct r5conf *conf, sector_t sector, 1042 struct bio_list *bios) 1043 { 1044 struct bio_list tmp = BIO_EMPTY_LIST; 1045 struct r5pending_data *ent; 1046 1047 spin_lock(&conf->pending_bios_lock); 1048 ent = list_first_entry(&conf->free_list, struct r5pending_data, 1049 sibling); 1050 list_move_tail(&ent->sibling, &conf->pending_list); 1051 ent->sector = sector; 1052 bio_list_init(&ent->bios); 1053 bio_list_merge(&ent->bios, bios); 1054 conf->pending_data_cnt++; 1055 if (conf->pending_data_cnt >= PENDING_IO_MAX) 1056 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); 1057 1058 spin_unlock(&conf->pending_bios_lock); 1059 1060 dispatch_bio_list(&tmp); 1061 } 1062 1063 static void 1064 raid5_end_read_request(struct bio *bi); 1065 static void 1066 raid5_end_write_request(struct bio *bi); 1067 1068 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 1069 { 1070 struct r5conf *conf = sh->raid_conf; 1071 int i, disks = sh->disks; 1072 struct stripe_head *head_sh = sh; 1073 struct bio_list pending_bios = BIO_EMPTY_LIST; 1074 struct r5dev *dev; 1075 bool should_defer; 1076 1077 might_sleep(); 1078 1079 if (log_stripe(sh, s) == 0) 1080 return; 1081 1082 should_defer = conf->batch_bio_dispatch && conf->group_cnt; 1083 1084 for (i = disks; i--; ) { 1085 enum req_op op; 1086 blk_opf_t op_flags = 0; 1087 int replace_only = 0; 1088 struct bio *bi, *rbi; 1089 struct md_rdev *rdev, *rrdev = NULL; 1090 1091 sh = head_sh; 1092 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 1093 op = REQ_OP_WRITE; 1094 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 1095 op_flags = REQ_FUA; 1096 if (test_bit(R5_Discard, &sh->dev[i].flags)) 1097 op = REQ_OP_DISCARD; 1098 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1099 op = REQ_OP_READ; 1100 else if (test_and_clear_bit(R5_WantReplace, 1101 &sh->dev[i].flags)) { 1102 op = REQ_OP_WRITE; 1103 replace_only = 1; 1104 } else 1105 continue; 1106 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) 1107 op_flags |= REQ_SYNC; 1108 1109 again: 1110 dev = &sh->dev[i]; 1111 bi = &dev->req; 1112 rbi = &dev->rreq; /* For writing to replacement */ 1113 1114 rcu_read_lock(); 1115 rrdev = rcu_dereference(conf->disks[i].replacement); 1116 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 1117 rdev = rcu_dereference(conf->disks[i].rdev); 1118 if (!rdev) { 1119 rdev = rrdev; 1120 rrdev = NULL; 1121 } 1122 if (op_is_write(op)) { 1123 if (replace_only) 1124 rdev = NULL; 1125 if (rdev == rrdev) 1126 /* We raced and saw duplicates */ 1127 rrdev = NULL; 1128 } else { 1129 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev) 1130 rdev = rrdev; 1131 rrdev = NULL; 1132 } 1133 1134 if (rdev && test_bit(Faulty, &rdev->flags)) 1135 rdev = NULL; 1136 if (rdev) 1137 atomic_inc(&rdev->nr_pending); 1138 if (rrdev && test_bit(Faulty, &rrdev->flags)) 1139 rrdev = NULL; 1140 if (rrdev) 1141 atomic_inc(&rrdev->nr_pending); 1142 rcu_read_unlock(); 1143 1144 /* We have already checked bad blocks for reads. Now 1145 * need to check for writes. We never accept write errors 1146 * on the replacement, so we don't to check rrdev. 1147 */ 1148 while (op_is_write(op) && rdev && 1149 test_bit(WriteErrorSeen, &rdev->flags)) { 1150 sector_t first_bad; 1151 int bad_sectors; 1152 int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 1153 &first_bad, &bad_sectors); 1154 if (!bad) 1155 break; 1156 1157 if (bad < 0) { 1158 set_bit(BlockedBadBlocks, &rdev->flags); 1159 if (!conf->mddev->external && 1160 conf->mddev->sb_flags) { 1161 /* It is very unlikely, but we might 1162 * still need to write out the 1163 * bad block log - better give it 1164 * a chance*/ 1165 md_check_recovery(conf->mddev); 1166 } 1167 /* 1168 * Because md_wait_for_blocked_rdev 1169 * will dec nr_pending, we must 1170 * increment it first. 1171 */ 1172 atomic_inc(&rdev->nr_pending); 1173 md_wait_for_blocked_rdev(rdev, conf->mddev); 1174 } else { 1175 /* Acknowledged bad block - skip the write */ 1176 rdev_dec_pending(rdev, conf->mddev); 1177 rdev = NULL; 1178 } 1179 } 1180 1181 if (rdev) { 1182 if (s->syncing || s->expanding || s->expanded 1183 || s->replacing) 1184 md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf)); 1185 1186 set_bit(STRIPE_IO_STARTED, &sh->state); 1187 1188 bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags); 1189 bi->bi_end_io = op_is_write(op) 1190 ? raid5_end_write_request 1191 : raid5_end_read_request; 1192 bi->bi_private = sh; 1193 1194 pr_debug("%s: for %llu schedule op %d on disc %d\n", 1195 __func__, (unsigned long long)sh->sector, 1196 bi->bi_opf, i); 1197 atomic_inc(&sh->count); 1198 if (sh != head_sh) 1199 atomic_inc(&head_sh->count); 1200 if (use_new_offset(conf, sh)) 1201 bi->bi_iter.bi_sector = (sh->sector 1202 + rdev->new_data_offset); 1203 else 1204 bi->bi_iter.bi_sector = (sh->sector 1205 + rdev->data_offset); 1206 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) 1207 bi->bi_opf |= REQ_NOMERGE; 1208 1209 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1210 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1211 1212 if (!op_is_write(op) && 1213 test_bit(R5_InJournal, &sh->dev[i].flags)) 1214 /* 1215 * issuing read for a page in journal, this 1216 * must be preparing for prexor in rmw; read 1217 * the data into orig_page 1218 */ 1219 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; 1220 else 1221 sh->dev[i].vec.bv_page = sh->dev[i].page; 1222 bi->bi_vcnt = 1; 1223 bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); 1224 bi->bi_io_vec[0].bv_offset = sh->dev[i].offset; 1225 bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); 1226 /* 1227 * If this is discard request, set bi_vcnt 0. We don't 1228 * want to confuse SCSI because SCSI will replace payload 1229 */ 1230 if (op == REQ_OP_DISCARD) 1231 bi->bi_vcnt = 0; 1232 if (rrdev) 1233 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 1234 1235 if (conf->mddev->gendisk) 1236 trace_block_bio_remap(bi, 1237 disk_devt(conf->mddev->gendisk), 1238 sh->dev[i].sector); 1239 if (should_defer && op_is_write(op)) 1240 bio_list_add(&pending_bios, bi); 1241 else 1242 submit_bio_noacct(bi); 1243 } 1244 if (rrdev) { 1245 if (s->syncing || s->expanding || s->expanded 1246 || s->replacing) 1247 md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf)); 1248 1249 set_bit(STRIPE_IO_STARTED, &sh->state); 1250 1251 bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags); 1252 BUG_ON(!op_is_write(op)); 1253 rbi->bi_end_io = raid5_end_write_request; 1254 rbi->bi_private = sh; 1255 1256 pr_debug("%s: for %llu schedule op %d on " 1257 "replacement disc %d\n", 1258 __func__, (unsigned long long)sh->sector, 1259 rbi->bi_opf, i); 1260 atomic_inc(&sh->count); 1261 if (sh != head_sh) 1262 atomic_inc(&head_sh->count); 1263 if (use_new_offset(conf, sh)) 1264 rbi->bi_iter.bi_sector = (sh->sector 1265 + rrdev->new_data_offset); 1266 else 1267 rbi->bi_iter.bi_sector = (sh->sector 1268 + rrdev->data_offset); 1269 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1270 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1271 sh->dev[i].rvec.bv_page = sh->dev[i].page; 1272 rbi->bi_vcnt = 1; 1273 rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); 1274 rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset; 1275 rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); 1276 /* 1277 * If this is discard request, set bi_vcnt 0. We don't 1278 * want to confuse SCSI because SCSI will replace payload 1279 */ 1280 if (op == REQ_OP_DISCARD) 1281 rbi->bi_vcnt = 0; 1282 if (conf->mddev->gendisk) 1283 trace_block_bio_remap(rbi, 1284 disk_devt(conf->mddev->gendisk), 1285 sh->dev[i].sector); 1286 if (should_defer && op_is_write(op)) 1287 bio_list_add(&pending_bios, rbi); 1288 else 1289 submit_bio_noacct(rbi); 1290 } 1291 if (!rdev && !rrdev) { 1292 if (op_is_write(op)) 1293 set_bit(STRIPE_DEGRADED, &sh->state); 1294 pr_debug("skip op %d on disc %d for sector %llu\n", 1295 bi->bi_opf, i, (unsigned long long)sh->sector); 1296 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1297 set_bit(STRIPE_HANDLE, &sh->state); 1298 } 1299 1300 if (!head_sh->batch_head) 1301 continue; 1302 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1303 batch_list); 1304 if (sh != head_sh) 1305 goto again; 1306 } 1307 1308 if (should_defer && !bio_list_empty(&pending_bios)) 1309 defer_issue_bios(conf, head_sh->sector, &pending_bios); 1310 } 1311 1312 static struct dma_async_tx_descriptor * 1313 async_copy_data(int frombio, struct bio *bio, struct page **page, 1314 unsigned int poff, sector_t sector, struct dma_async_tx_descriptor *tx, 1315 struct stripe_head *sh, int no_skipcopy) 1316 { 1317 struct bio_vec bvl; 1318 struct bvec_iter iter; 1319 struct page *bio_page; 1320 int page_offset; 1321 struct async_submit_ctl submit; 1322 enum async_tx_flags flags = 0; 1323 struct r5conf *conf = sh->raid_conf; 1324 1325 if (bio->bi_iter.bi_sector >= sector) 1326 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 1327 else 1328 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 1329 1330 if (frombio) 1331 flags |= ASYNC_TX_FENCE; 1332 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 1333 1334 bio_for_each_segment(bvl, bio, iter) { 1335 int len = bvl.bv_len; 1336 int clen; 1337 int b_offset = 0; 1338 1339 if (page_offset < 0) { 1340 b_offset = -page_offset; 1341 page_offset += b_offset; 1342 len -= b_offset; 1343 } 1344 1345 if (len > 0 && page_offset + len > RAID5_STRIPE_SIZE(conf)) 1346 clen = RAID5_STRIPE_SIZE(conf) - page_offset; 1347 else 1348 clen = len; 1349 1350 if (clen > 0) { 1351 b_offset += bvl.bv_offset; 1352 bio_page = bvl.bv_page; 1353 if (frombio) { 1354 if (conf->skip_copy && 1355 b_offset == 0 && page_offset == 0 && 1356 clen == RAID5_STRIPE_SIZE(conf) && 1357 !no_skipcopy) 1358 *page = bio_page; 1359 else 1360 tx = async_memcpy(*page, bio_page, page_offset + poff, 1361 b_offset, clen, &submit); 1362 } else 1363 tx = async_memcpy(bio_page, *page, b_offset, 1364 page_offset + poff, clen, &submit); 1365 } 1366 /* chain the operations */ 1367 submit.depend_tx = tx; 1368 1369 if (clen < len) /* hit end of page */ 1370 break; 1371 page_offset += len; 1372 } 1373 1374 return tx; 1375 } 1376 1377 static void ops_complete_biofill(void *stripe_head_ref) 1378 { 1379 struct stripe_head *sh = stripe_head_ref; 1380 int i; 1381 struct r5conf *conf = sh->raid_conf; 1382 1383 pr_debug("%s: stripe %llu\n", __func__, 1384 (unsigned long long)sh->sector); 1385 1386 /* clear completed biofills */ 1387 for (i = sh->disks; i--; ) { 1388 struct r5dev *dev = &sh->dev[i]; 1389 1390 /* acknowledge completion of a biofill operation */ 1391 /* and check if we need to reply to a read request, 1392 * new R5_Wantfill requests are held off until 1393 * !STRIPE_BIOFILL_RUN 1394 */ 1395 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 1396 struct bio *rbi, *rbi2; 1397 1398 BUG_ON(!dev->read); 1399 rbi = dev->read; 1400 dev->read = NULL; 1401 while (rbi && rbi->bi_iter.bi_sector < 1402 dev->sector + RAID5_STRIPE_SECTORS(conf)) { 1403 rbi2 = r5_next_bio(conf, rbi, dev->sector); 1404 bio_endio(rbi); 1405 rbi = rbi2; 1406 } 1407 } 1408 } 1409 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 1410 1411 set_bit(STRIPE_HANDLE, &sh->state); 1412 raid5_release_stripe(sh); 1413 } 1414 1415 static void ops_run_biofill(struct stripe_head *sh) 1416 { 1417 struct dma_async_tx_descriptor *tx = NULL; 1418 struct async_submit_ctl submit; 1419 int i; 1420 struct r5conf *conf = sh->raid_conf; 1421 1422 BUG_ON(sh->batch_head); 1423 pr_debug("%s: stripe %llu\n", __func__, 1424 (unsigned long long)sh->sector); 1425 1426 for (i = sh->disks; i--; ) { 1427 struct r5dev *dev = &sh->dev[i]; 1428 if (test_bit(R5_Wantfill, &dev->flags)) { 1429 struct bio *rbi; 1430 spin_lock_irq(&sh->stripe_lock); 1431 dev->read = rbi = dev->toread; 1432 dev->toread = NULL; 1433 spin_unlock_irq(&sh->stripe_lock); 1434 while (rbi && rbi->bi_iter.bi_sector < 1435 dev->sector + RAID5_STRIPE_SECTORS(conf)) { 1436 tx = async_copy_data(0, rbi, &dev->page, 1437 dev->offset, 1438 dev->sector, tx, sh, 0); 1439 rbi = r5_next_bio(conf, rbi, dev->sector); 1440 } 1441 } 1442 } 1443 1444 atomic_inc(&sh->count); 1445 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 1446 async_trigger_callback(&submit); 1447 } 1448 1449 static void mark_target_uptodate(struct stripe_head *sh, int target) 1450 { 1451 struct r5dev *tgt; 1452 1453 if (target < 0) 1454 return; 1455 1456 tgt = &sh->dev[target]; 1457 set_bit(R5_UPTODATE, &tgt->flags); 1458 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1459 clear_bit(R5_Wantcompute, &tgt->flags); 1460 } 1461 1462 static void ops_complete_compute(void *stripe_head_ref) 1463 { 1464 struct stripe_head *sh = stripe_head_ref; 1465 1466 pr_debug("%s: stripe %llu\n", __func__, 1467 (unsigned long long)sh->sector); 1468 1469 /* mark the computed target(s) as uptodate */ 1470 mark_target_uptodate(sh, sh->ops.target); 1471 mark_target_uptodate(sh, sh->ops.target2); 1472 1473 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 1474 if (sh->check_state == check_state_compute_run) 1475 sh->check_state = check_state_compute_result; 1476 set_bit(STRIPE_HANDLE, &sh->state); 1477 raid5_release_stripe(sh); 1478 } 1479 1480 /* return a pointer to the address conversion region of the scribble buffer */ 1481 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) 1482 { 1483 return percpu->scribble + i * percpu->scribble_obj_size; 1484 } 1485 1486 /* return a pointer to the address conversion region of the scribble buffer */ 1487 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 1488 struct raid5_percpu *percpu, int i) 1489 { 1490 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); 1491 } 1492 1493 /* 1494 * Return a pointer to record offset address. 1495 */ 1496 static unsigned int * 1497 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) 1498 { 1499 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); 1500 } 1501 1502 static struct dma_async_tx_descriptor * 1503 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 1504 { 1505 int disks = sh->disks; 1506 struct page **xor_srcs = to_addr_page(percpu, 0); 1507 unsigned int *off_srcs = to_addr_offs(sh, percpu); 1508 int target = sh->ops.target; 1509 struct r5dev *tgt = &sh->dev[target]; 1510 struct page *xor_dest = tgt->page; 1511 unsigned int off_dest = tgt->offset; 1512 int count = 0; 1513 struct dma_async_tx_descriptor *tx; 1514 struct async_submit_ctl submit; 1515 int i; 1516 1517 BUG_ON(sh->batch_head); 1518 1519 pr_debug("%s: stripe %llu block: %d\n", 1520 __func__, (unsigned long long)sh->sector, target); 1521 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1522 1523 for (i = disks; i--; ) { 1524 if (i != target) { 1525 off_srcs[count] = sh->dev[i].offset; 1526 xor_srcs[count++] = sh->dev[i].page; 1527 } 1528 } 1529 1530 atomic_inc(&sh->count); 1531 1532 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 1533 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); 1534 if (unlikely(count == 1)) 1535 tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0], 1536 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 1537 else 1538 tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count, 1539 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 1540 1541 return tx; 1542 } 1543 1544 /* set_syndrome_sources - populate source buffers for gen_syndrome 1545 * @srcs - (struct page *) array of size sh->disks 1546 * @offs - (unsigned int) array of offset for each page 1547 * @sh - stripe_head to parse 1548 * 1549 * Populates srcs in proper layout order for the stripe and returns the 1550 * 'count' of sources to be used in a call to async_gen_syndrome. The P 1551 * destination buffer is recorded in srcs[count] and the Q destination 1552 * is recorded in srcs[count+1]]. 1553 */ 1554 static int set_syndrome_sources(struct page **srcs, 1555 unsigned int *offs, 1556 struct stripe_head *sh, 1557 int srctype) 1558 { 1559 int disks = sh->disks; 1560 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1561 int d0_idx = raid6_d0(sh); 1562 int count; 1563 int i; 1564 1565 for (i = 0; i < disks; i++) 1566 srcs[i] = NULL; 1567 1568 count = 0; 1569 i = d0_idx; 1570 do { 1571 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1572 struct r5dev *dev = &sh->dev[i]; 1573 1574 if (i == sh->qd_idx || i == sh->pd_idx || 1575 (srctype == SYNDROME_SRC_ALL) || 1576 (srctype == SYNDROME_SRC_WANT_DRAIN && 1577 (test_bit(R5_Wantdrain, &dev->flags) || 1578 test_bit(R5_InJournal, &dev->flags))) || 1579 (srctype == SYNDROME_SRC_WRITTEN && 1580 (dev->written || 1581 test_bit(R5_InJournal, &dev->flags)))) { 1582 if (test_bit(R5_InJournal, &dev->flags)) 1583 srcs[slot] = sh->dev[i].orig_page; 1584 else 1585 srcs[slot] = sh->dev[i].page; 1586 /* 1587 * For R5_InJournal, PAGE_SIZE must be 4KB and will 1588 * not shared page. In that case, dev[i].offset 1589 * is 0. 1590 */ 1591 offs[slot] = sh->dev[i].offset; 1592 } 1593 i = raid6_next_disk(i, disks); 1594 } while (i != d0_idx); 1595 1596 return syndrome_disks; 1597 } 1598 1599 static struct dma_async_tx_descriptor * 1600 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 1601 { 1602 int disks = sh->disks; 1603 struct page **blocks = to_addr_page(percpu, 0); 1604 unsigned int *offs = to_addr_offs(sh, percpu); 1605 int target; 1606 int qd_idx = sh->qd_idx; 1607 struct dma_async_tx_descriptor *tx; 1608 struct async_submit_ctl submit; 1609 struct r5dev *tgt; 1610 struct page *dest; 1611 unsigned int dest_off; 1612 int i; 1613 int count; 1614 1615 BUG_ON(sh->batch_head); 1616 if (sh->ops.target < 0) 1617 target = sh->ops.target2; 1618 else if (sh->ops.target2 < 0) 1619 target = sh->ops.target; 1620 else 1621 /* we should only have one valid target */ 1622 BUG(); 1623 BUG_ON(target < 0); 1624 pr_debug("%s: stripe %llu block: %d\n", 1625 __func__, (unsigned long long)sh->sector, target); 1626 1627 tgt = &sh->dev[target]; 1628 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1629 dest = tgt->page; 1630 dest_off = tgt->offset; 1631 1632 atomic_inc(&sh->count); 1633 1634 if (target == qd_idx) { 1635 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); 1636 blocks[count] = NULL; /* regenerating p is not necessary */ 1637 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 1638 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1639 ops_complete_compute, sh, 1640 to_addr_conv(sh, percpu, 0)); 1641 tx = async_gen_syndrome(blocks, offs, count+2, 1642 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 1643 } else { 1644 /* Compute any data- or p-drive using XOR */ 1645 count = 0; 1646 for (i = disks; i-- ; ) { 1647 if (i == target || i == qd_idx) 1648 continue; 1649 offs[count] = sh->dev[i].offset; 1650 blocks[count++] = sh->dev[i].page; 1651 } 1652 1653 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1654 NULL, ops_complete_compute, sh, 1655 to_addr_conv(sh, percpu, 0)); 1656 tx = async_xor_offs(dest, dest_off, blocks, offs, count, 1657 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 1658 } 1659 1660 return tx; 1661 } 1662 1663 static struct dma_async_tx_descriptor * 1664 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 1665 { 1666 int i, count, disks = sh->disks; 1667 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1668 int d0_idx = raid6_d0(sh); 1669 int faila = -1, failb = -1; 1670 int target = sh->ops.target; 1671 int target2 = sh->ops.target2; 1672 struct r5dev *tgt = &sh->dev[target]; 1673 struct r5dev *tgt2 = &sh->dev[target2]; 1674 struct dma_async_tx_descriptor *tx; 1675 struct page **blocks = to_addr_page(percpu, 0); 1676 unsigned int *offs = to_addr_offs(sh, percpu); 1677 struct async_submit_ctl submit; 1678 1679 BUG_ON(sh->batch_head); 1680 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 1681 __func__, (unsigned long long)sh->sector, target, target2); 1682 BUG_ON(target < 0 || target2 < 0); 1683 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1684 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 1685 1686 /* we need to open-code set_syndrome_sources to handle the 1687 * slot number conversion for 'faila' and 'failb' 1688 */ 1689 for (i = 0; i < disks ; i++) { 1690 offs[i] = 0; 1691 blocks[i] = NULL; 1692 } 1693 count = 0; 1694 i = d0_idx; 1695 do { 1696 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1697 1698 offs[slot] = sh->dev[i].offset; 1699 blocks[slot] = sh->dev[i].page; 1700 1701 if (i == target) 1702 faila = slot; 1703 if (i == target2) 1704 failb = slot; 1705 i = raid6_next_disk(i, disks); 1706 } while (i != d0_idx); 1707 1708 BUG_ON(faila == failb); 1709 if (failb < faila) 1710 swap(faila, failb); 1711 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 1712 __func__, (unsigned long long)sh->sector, faila, failb); 1713 1714 atomic_inc(&sh->count); 1715 1716 if (failb == syndrome_disks+1) { 1717 /* Q disk is one of the missing disks */ 1718 if (faila == syndrome_disks) { 1719 /* Missing P+Q, just recompute */ 1720 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1721 ops_complete_compute, sh, 1722 to_addr_conv(sh, percpu, 0)); 1723 return async_gen_syndrome(blocks, offs, syndrome_disks+2, 1724 RAID5_STRIPE_SIZE(sh->raid_conf), 1725 &submit); 1726 } else { 1727 struct page *dest; 1728 unsigned int dest_off; 1729 int data_target; 1730 int qd_idx = sh->qd_idx; 1731 1732 /* Missing D+Q: recompute D from P, then recompute Q */ 1733 if (target == qd_idx) 1734 data_target = target2; 1735 else 1736 data_target = target; 1737 1738 count = 0; 1739 for (i = disks; i-- ; ) { 1740 if (i == data_target || i == qd_idx) 1741 continue; 1742 offs[count] = sh->dev[i].offset; 1743 blocks[count++] = sh->dev[i].page; 1744 } 1745 dest = sh->dev[data_target].page; 1746 dest_off = sh->dev[data_target].offset; 1747 init_async_submit(&submit, 1748 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1749 NULL, NULL, NULL, 1750 to_addr_conv(sh, percpu, 0)); 1751 tx = async_xor_offs(dest, dest_off, blocks, offs, count, 1752 RAID5_STRIPE_SIZE(sh->raid_conf), 1753 &submit); 1754 1755 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); 1756 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 1757 ops_complete_compute, sh, 1758 to_addr_conv(sh, percpu, 0)); 1759 return async_gen_syndrome(blocks, offs, count+2, 1760 RAID5_STRIPE_SIZE(sh->raid_conf), 1761 &submit); 1762 } 1763 } else { 1764 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1765 ops_complete_compute, sh, 1766 to_addr_conv(sh, percpu, 0)); 1767 if (failb == syndrome_disks) { 1768 /* We're missing D+P. */ 1769 return async_raid6_datap_recov(syndrome_disks+2, 1770 RAID5_STRIPE_SIZE(sh->raid_conf), 1771 faila, 1772 blocks, offs, &submit); 1773 } else { 1774 /* We're missing D+D. */ 1775 return async_raid6_2data_recov(syndrome_disks+2, 1776 RAID5_STRIPE_SIZE(sh->raid_conf), 1777 faila, failb, 1778 blocks, offs, &submit); 1779 } 1780 } 1781 } 1782 1783 static void ops_complete_prexor(void *stripe_head_ref) 1784 { 1785 struct stripe_head *sh = stripe_head_ref; 1786 1787 pr_debug("%s: stripe %llu\n", __func__, 1788 (unsigned long long)sh->sector); 1789 1790 if (r5c_is_writeback(sh->raid_conf->log)) 1791 /* 1792 * raid5-cache write back uses orig_page during prexor. 1793 * After prexor, it is time to free orig_page 1794 */ 1795 r5c_release_extra_page(sh); 1796 } 1797 1798 static struct dma_async_tx_descriptor * 1799 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, 1800 struct dma_async_tx_descriptor *tx) 1801 { 1802 int disks = sh->disks; 1803 struct page **xor_srcs = to_addr_page(percpu, 0); 1804 unsigned int *off_srcs = to_addr_offs(sh, percpu); 1805 int count = 0, pd_idx = sh->pd_idx, i; 1806 struct async_submit_ctl submit; 1807 1808 /* existing parity data subtracted */ 1809 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; 1810 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1811 1812 BUG_ON(sh->batch_head); 1813 pr_debug("%s: stripe %llu\n", __func__, 1814 (unsigned long long)sh->sector); 1815 1816 for (i = disks; i--; ) { 1817 struct r5dev *dev = &sh->dev[i]; 1818 /* Only process blocks that are known to be uptodate */ 1819 if (test_bit(R5_InJournal, &dev->flags)) { 1820 /* 1821 * For this case, PAGE_SIZE must be equal to 4KB and 1822 * page offset is zero. 1823 */ 1824 off_srcs[count] = dev->offset; 1825 xor_srcs[count++] = dev->orig_page; 1826 } else if (test_bit(R5_Wantdrain, &dev->flags)) { 1827 off_srcs[count] = dev->offset; 1828 xor_srcs[count++] = dev->page; 1829 } 1830 } 1831 1832 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1833 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1834 tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count, 1835 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 1836 1837 return tx; 1838 } 1839 1840 static struct dma_async_tx_descriptor * 1841 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, 1842 struct dma_async_tx_descriptor *tx) 1843 { 1844 struct page **blocks = to_addr_page(percpu, 0); 1845 unsigned int *offs = to_addr_offs(sh, percpu); 1846 int count; 1847 struct async_submit_ctl submit; 1848 1849 pr_debug("%s: stripe %llu\n", __func__, 1850 (unsigned long long)sh->sector); 1851 1852 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN); 1853 1854 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx, 1855 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1856 tx = async_gen_syndrome(blocks, offs, count+2, 1857 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 1858 1859 return tx; 1860 } 1861 1862 static struct dma_async_tx_descriptor * 1863 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1864 { 1865 struct r5conf *conf = sh->raid_conf; 1866 int disks = sh->disks; 1867 int i; 1868 struct stripe_head *head_sh = sh; 1869 1870 pr_debug("%s: stripe %llu\n", __func__, 1871 (unsigned long long)sh->sector); 1872 1873 for (i = disks; i--; ) { 1874 struct r5dev *dev; 1875 struct bio *chosen; 1876 1877 sh = head_sh; 1878 if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) { 1879 struct bio *wbi; 1880 1881 again: 1882 dev = &sh->dev[i]; 1883 /* 1884 * clear R5_InJournal, so when rewriting a page in 1885 * journal, it is not skipped by r5l_log_stripe() 1886 */ 1887 clear_bit(R5_InJournal, &dev->flags); 1888 spin_lock_irq(&sh->stripe_lock); 1889 chosen = dev->towrite; 1890 dev->towrite = NULL; 1891 sh->overwrite_disks = 0; 1892 BUG_ON(dev->written); 1893 wbi = dev->written = chosen; 1894 spin_unlock_irq(&sh->stripe_lock); 1895 WARN_ON(dev->page != dev->orig_page); 1896 1897 while (wbi && wbi->bi_iter.bi_sector < 1898 dev->sector + RAID5_STRIPE_SECTORS(conf)) { 1899 if (wbi->bi_opf & REQ_FUA) 1900 set_bit(R5_WantFUA, &dev->flags); 1901 if (wbi->bi_opf & REQ_SYNC) 1902 set_bit(R5_SyncIO, &dev->flags); 1903 if (bio_op(wbi) == REQ_OP_DISCARD) 1904 set_bit(R5_Discard, &dev->flags); 1905 else { 1906 tx = async_copy_data(1, wbi, &dev->page, 1907 dev->offset, 1908 dev->sector, tx, sh, 1909 r5c_is_writeback(conf->log)); 1910 if (dev->page != dev->orig_page && 1911 !r5c_is_writeback(conf->log)) { 1912 set_bit(R5_SkipCopy, &dev->flags); 1913 clear_bit(R5_UPTODATE, &dev->flags); 1914 clear_bit(R5_OVERWRITE, &dev->flags); 1915 } 1916 } 1917 wbi = r5_next_bio(conf, wbi, dev->sector); 1918 } 1919 1920 if (head_sh->batch_head) { 1921 sh = list_first_entry(&sh->batch_list, 1922 struct stripe_head, 1923 batch_list); 1924 if (sh == head_sh) 1925 continue; 1926 goto again; 1927 } 1928 } 1929 } 1930 1931 return tx; 1932 } 1933 1934 static void ops_complete_reconstruct(void *stripe_head_ref) 1935 { 1936 struct stripe_head *sh = stripe_head_ref; 1937 int disks = sh->disks; 1938 int pd_idx = sh->pd_idx; 1939 int qd_idx = sh->qd_idx; 1940 int i; 1941 bool fua = false, sync = false, discard = false; 1942 1943 pr_debug("%s: stripe %llu\n", __func__, 1944 (unsigned long long)sh->sector); 1945 1946 for (i = disks; i--; ) { 1947 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1948 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1949 discard |= test_bit(R5_Discard, &sh->dev[i].flags); 1950 } 1951 1952 for (i = disks; i--; ) { 1953 struct r5dev *dev = &sh->dev[i]; 1954 1955 if (dev->written || i == pd_idx || i == qd_idx) { 1956 if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) { 1957 set_bit(R5_UPTODATE, &dev->flags); 1958 if (test_bit(STRIPE_EXPAND_READY, &sh->state)) 1959 set_bit(R5_Expanded, &dev->flags); 1960 } 1961 if (fua) 1962 set_bit(R5_WantFUA, &dev->flags); 1963 if (sync) 1964 set_bit(R5_SyncIO, &dev->flags); 1965 } 1966 } 1967 1968 if (sh->reconstruct_state == reconstruct_state_drain_run) 1969 sh->reconstruct_state = reconstruct_state_drain_result; 1970 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1971 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1972 else { 1973 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1974 sh->reconstruct_state = reconstruct_state_result; 1975 } 1976 1977 set_bit(STRIPE_HANDLE, &sh->state); 1978 raid5_release_stripe(sh); 1979 } 1980 1981 static void 1982 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1983 struct dma_async_tx_descriptor *tx) 1984 { 1985 int disks = sh->disks; 1986 struct page **xor_srcs; 1987 unsigned int *off_srcs; 1988 struct async_submit_ctl submit; 1989 int count, pd_idx = sh->pd_idx, i; 1990 struct page *xor_dest; 1991 unsigned int off_dest; 1992 int prexor = 0; 1993 unsigned long flags; 1994 int j = 0; 1995 struct stripe_head *head_sh = sh; 1996 int last_stripe; 1997 1998 pr_debug("%s: stripe %llu\n", __func__, 1999 (unsigned long long)sh->sector); 2000 2001 for (i = 0; i < sh->disks; i++) { 2002 if (pd_idx == i) 2003 continue; 2004 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 2005 break; 2006 } 2007 if (i >= sh->disks) { 2008 atomic_inc(&sh->count); 2009 set_bit(R5_Discard, &sh->dev[pd_idx].flags); 2010 ops_complete_reconstruct(sh); 2011 return; 2012 } 2013 again: 2014 count = 0; 2015 xor_srcs = to_addr_page(percpu, j); 2016 off_srcs = to_addr_offs(sh, percpu); 2017 /* check if prexor is active which means only process blocks 2018 * that are part of a read-modify-write (written) 2019 */ 2020 if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 2021 prexor = 1; 2022 off_dest = off_srcs[count] = sh->dev[pd_idx].offset; 2023 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 2024 for (i = disks; i--; ) { 2025 struct r5dev *dev = &sh->dev[i]; 2026 if (head_sh->dev[i].written || 2027 test_bit(R5_InJournal, &head_sh->dev[i].flags)) { 2028 off_srcs[count] = dev->offset; 2029 xor_srcs[count++] = dev->page; 2030 } 2031 } 2032 } else { 2033 xor_dest = sh->dev[pd_idx].page; 2034 off_dest = sh->dev[pd_idx].offset; 2035 for (i = disks; i--; ) { 2036 struct r5dev *dev = &sh->dev[i]; 2037 if (i != pd_idx) { 2038 off_srcs[count] = dev->offset; 2039 xor_srcs[count++] = dev->page; 2040 } 2041 } 2042 } 2043 2044 /* 1/ if we prexor'd then the dest is reused as a source 2045 * 2/ if we did not prexor then we are redoing the parity 2046 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 2047 * for the synchronous xor case 2048 */ 2049 last_stripe = !head_sh->batch_head || 2050 list_first_entry(&sh->batch_list, 2051 struct stripe_head, batch_list) == head_sh; 2052 if (last_stripe) { 2053 flags = ASYNC_TX_ACK | 2054 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 2055 2056 atomic_inc(&head_sh->count); 2057 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh, 2058 to_addr_conv(sh, percpu, j)); 2059 } else { 2060 flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST; 2061 init_async_submit(&submit, flags, tx, NULL, NULL, 2062 to_addr_conv(sh, percpu, j)); 2063 } 2064 2065 if (unlikely(count == 1)) 2066 tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0], 2067 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 2068 else 2069 tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count, 2070 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 2071 if (!last_stripe) { 2072 j++; 2073 sh = list_first_entry(&sh->batch_list, struct stripe_head, 2074 batch_list); 2075 goto again; 2076 } 2077 } 2078 2079 static void 2080 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 2081 struct dma_async_tx_descriptor *tx) 2082 { 2083 struct async_submit_ctl submit; 2084 struct page **blocks; 2085 unsigned int *offs; 2086 int count, i, j = 0; 2087 struct stripe_head *head_sh = sh; 2088 int last_stripe; 2089 int synflags; 2090 unsigned long txflags; 2091 2092 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 2093 2094 for (i = 0; i < sh->disks; i++) { 2095 if (sh->pd_idx == i || sh->qd_idx == i) 2096 continue; 2097 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 2098 break; 2099 } 2100 if (i >= sh->disks) { 2101 atomic_inc(&sh->count); 2102 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 2103 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 2104 ops_complete_reconstruct(sh); 2105 return; 2106 } 2107 2108 again: 2109 blocks = to_addr_page(percpu, j); 2110 offs = to_addr_offs(sh, percpu); 2111 2112 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 2113 synflags = SYNDROME_SRC_WRITTEN; 2114 txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST; 2115 } else { 2116 synflags = SYNDROME_SRC_ALL; 2117 txflags = ASYNC_TX_ACK; 2118 } 2119 2120 count = set_syndrome_sources(blocks, offs, sh, synflags); 2121 last_stripe = !head_sh->batch_head || 2122 list_first_entry(&sh->batch_list, 2123 struct stripe_head, batch_list) == head_sh; 2124 2125 if (last_stripe) { 2126 atomic_inc(&head_sh->count); 2127 init_async_submit(&submit, txflags, tx, ops_complete_reconstruct, 2128 head_sh, to_addr_conv(sh, percpu, j)); 2129 } else 2130 init_async_submit(&submit, 0, tx, NULL, NULL, 2131 to_addr_conv(sh, percpu, j)); 2132 tx = async_gen_syndrome(blocks, offs, count+2, 2133 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); 2134 if (!last_stripe) { 2135 j++; 2136 sh = list_first_entry(&sh->batch_list, struct stripe_head, 2137 batch_list); 2138 goto again; 2139 } 2140 } 2141 2142 static void ops_complete_check(void *stripe_head_ref) 2143 { 2144 struct stripe_head *sh = stripe_head_ref; 2145 2146 pr_debug("%s: stripe %llu\n", __func__, 2147 (unsigned long long)sh->sector); 2148 2149 sh->check_state = check_state_check_result; 2150 set_bit(STRIPE_HANDLE, &sh->state); 2151 raid5_release_stripe(sh); 2152 } 2153 2154 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 2155 { 2156 int disks = sh->disks; 2157 int pd_idx = sh->pd_idx; 2158 int qd_idx = sh->qd_idx; 2159 struct page *xor_dest; 2160 unsigned int off_dest; 2161 struct page **xor_srcs = to_addr_page(percpu, 0); 2162 unsigned int *off_srcs = to_addr_offs(sh, percpu); 2163 struct dma_async_tx_descriptor *tx; 2164 struct async_submit_ctl submit; 2165 int count; 2166 int i; 2167 2168 pr_debug("%s: stripe %llu\n", __func__, 2169 (unsigned long long)sh->sector); 2170 2171 BUG_ON(sh->batch_head); 2172 count = 0; 2173 xor_dest = sh->dev[pd_idx].page; 2174 off_dest = sh->dev[pd_idx].offset; 2175 off_srcs[count] = off_dest; 2176 xor_srcs[count++] = xor_dest; 2177 for (i = disks; i--; ) { 2178 if (i == pd_idx || i == qd_idx) 2179 continue; 2180 off_srcs[count] = sh->dev[i].offset; 2181 xor_srcs[count++] = sh->dev[i].page; 2182 } 2183 2184 init_async_submit(&submit, 0, NULL, NULL, NULL, 2185 to_addr_conv(sh, percpu, 0)); 2186 tx = async_xor_val_offs(xor_dest, off_dest, xor_srcs, off_srcs, count, 2187 RAID5_STRIPE_SIZE(sh->raid_conf), 2188 &sh->ops.zero_sum_result, &submit); 2189 2190 atomic_inc(&sh->count); 2191 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 2192 tx = async_trigger_callback(&submit); 2193 } 2194 2195 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 2196 { 2197 struct page **srcs = to_addr_page(percpu, 0); 2198 unsigned int *offs = to_addr_offs(sh, percpu); 2199 struct async_submit_ctl submit; 2200 int count; 2201 2202 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 2203 (unsigned long long)sh->sector, checkp); 2204 2205 BUG_ON(sh->batch_head); 2206 count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL); 2207 if (!checkp) 2208 srcs[count] = NULL; 2209 2210 atomic_inc(&sh->count); 2211 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 2212 sh, to_addr_conv(sh, percpu, 0)); 2213 async_syndrome_val(srcs, offs, count+2, 2214 RAID5_STRIPE_SIZE(sh->raid_conf), 2215 &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit); 2216 } 2217 2218 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 2219 { 2220 int overlap_clear = 0, i, disks = sh->disks; 2221 struct dma_async_tx_descriptor *tx = NULL; 2222 struct r5conf *conf = sh->raid_conf; 2223 int level = conf->level; 2224 struct raid5_percpu *percpu; 2225 2226 local_lock(&conf->percpu->lock); 2227 percpu = this_cpu_ptr(conf->percpu); 2228 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 2229 ops_run_biofill(sh); 2230 overlap_clear++; 2231 } 2232 2233 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 2234 if (level < 6) 2235 tx = ops_run_compute5(sh, percpu); 2236 else { 2237 if (sh->ops.target2 < 0 || sh->ops.target < 0) 2238 tx = ops_run_compute6_1(sh, percpu); 2239 else 2240 tx = ops_run_compute6_2(sh, percpu); 2241 } 2242 /* terminate the chain if reconstruct is not set to be run */ 2243 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 2244 async_tx_ack(tx); 2245 } 2246 2247 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) { 2248 if (level < 6) 2249 tx = ops_run_prexor5(sh, percpu, tx); 2250 else 2251 tx = ops_run_prexor6(sh, percpu, tx); 2252 } 2253 2254 if (test_bit(STRIPE_OP_PARTIAL_PARITY, &ops_request)) 2255 tx = ops_run_partial_parity(sh, percpu, tx); 2256 2257 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 2258 tx = ops_run_biodrain(sh, tx); 2259 overlap_clear++; 2260 } 2261 2262 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 2263 if (level < 6) 2264 ops_run_reconstruct5(sh, percpu, tx); 2265 else 2266 ops_run_reconstruct6(sh, percpu, tx); 2267 } 2268 2269 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 2270 if (sh->check_state == check_state_run) 2271 ops_run_check_p(sh, percpu); 2272 else if (sh->check_state == check_state_run_q) 2273 ops_run_check_pq(sh, percpu, 0); 2274 else if (sh->check_state == check_state_run_pq) 2275 ops_run_check_pq(sh, percpu, 1); 2276 else 2277 BUG(); 2278 } 2279 2280 if (overlap_clear && !sh->batch_head) { 2281 for (i = disks; i--; ) { 2282 struct r5dev *dev = &sh->dev[i]; 2283 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2284 wake_up(&sh->raid_conf->wait_for_overlap); 2285 } 2286 } 2287 local_unlock(&conf->percpu->lock); 2288 } 2289 2290 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) 2291 { 2292 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE 2293 kfree(sh->pages); 2294 #endif 2295 if (sh->ppl_page) 2296 __free_page(sh->ppl_page); 2297 kmem_cache_free(sc, sh); 2298 } 2299 2300 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, 2301 int disks, struct r5conf *conf) 2302 { 2303 struct stripe_head *sh; 2304 2305 sh = kmem_cache_zalloc(sc, gfp); 2306 if (sh) { 2307 spin_lock_init(&sh->stripe_lock); 2308 spin_lock_init(&sh->batch_lock); 2309 INIT_LIST_HEAD(&sh->batch_list); 2310 INIT_LIST_HEAD(&sh->lru); 2311 INIT_LIST_HEAD(&sh->r5c); 2312 INIT_LIST_HEAD(&sh->log_list); 2313 atomic_set(&sh->count, 1); 2314 sh->raid_conf = conf; 2315 sh->log_start = MaxSector; 2316 2317 if (raid5_has_ppl(conf)) { 2318 sh->ppl_page = alloc_page(gfp); 2319 if (!sh->ppl_page) { 2320 free_stripe(sc, sh); 2321 return NULL; 2322 } 2323 } 2324 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE 2325 if (init_stripe_shared_pages(sh, conf, disks)) { 2326 free_stripe(sc, sh); 2327 return NULL; 2328 } 2329 #endif 2330 } 2331 return sh; 2332 } 2333 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) 2334 { 2335 struct stripe_head *sh; 2336 2337 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); 2338 if (!sh) 2339 return 0; 2340 2341 if (grow_buffers(sh, gfp)) { 2342 shrink_buffers(sh); 2343 free_stripe(conf->slab_cache, sh); 2344 return 0; 2345 } 2346 sh->hash_lock_index = 2347 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 2348 /* we just created an active stripe so... */ 2349 atomic_inc(&conf->active_stripes); 2350 2351 raid5_release_stripe(sh); 2352 conf->max_nr_stripes++; 2353 return 1; 2354 } 2355 2356 static int grow_stripes(struct r5conf *conf, int num) 2357 { 2358 struct kmem_cache *sc; 2359 size_t namelen = sizeof(conf->cache_name[0]); 2360 int devs = max(conf->raid_disks, conf->previous_raid_disks); 2361 2362 if (conf->mddev->gendisk) 2363 snprintf(conf->cache_name[0], namelen, 2364 "raid%d-%s", conf->level, mdname(conf->mddev)); 2365 else 2366 snprintf(conf->cache_name[0], namelen, 2367 "raid%d-%p", conf->level, conf->mddev); 2368 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); 2369 2370 conf->active_name = 0; 2371 sc = kmem_cache_create(conf->cache_name[conf->active_name], 2372 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 2373 0, 0, NULL); 2374 if (!sc) 2375 return 1; 2376 conf->slab_cache = sc; 2377 conf->pool_size = devs; 2378 while (num--) 2379 if (!grow_one_stripe(conf, GFP_KERNEL)) 2380 return 1; 2381 2382 return 0; 2383 } 2384 2385 /** 2386 * scribble_alloc - allocate percpu scribble buffer for required size 2387 * of the scribble region 2388 * @percpu: from for_each_present_cpu() of the caller 2389 * @num: total number of disks in the array 2390 * @cnt: scribble objs count for required size of the scribble region 2391 * 2392 * The scribble buffer size must be enough to contain: 2393 * 1/ a struct page pointer for each device in the array +2 2394 * 2/ room to convert each entry in (1) to its corresponding dma 2395 * (dma_map_page()) or page (page_address()) address. 2396 * 2397 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 2398 * calculate over all devices (not just the data blocks), using zeros in place 2399 * of the P and Q blocks. 2400 */ 2401 static int scribble_alloc(struct raid5_percpu *percpu, 2402 int num, int cnt) 2403 { 2404 size_t obj_size = 2405 sizeof(struct page *) * (num + 2) + 2406 sizeof(addr_conv_t) * (num + 2) + 2407 sizeof(unsigned int) * (num + 2); 2408 void *scribble; 2409 2410 /* 2411 * If here is in raid array suspend context, it is in memalloc noio 2412 * context as well, there is no potential recursive memory reclaim 2413 * I/Os with the GFP_KERNEL flag. 2414 */ 2415 scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL); 2416 if (!scribble) 2417 return -ENOMEM; 2418 2419 kvfree(percpu->scribble); 2420 2421 percpu->scribble = scribble; 2422 percpu->scribble_obj_size = obj_size; 2423 return 0; 2424 } 2425 2426 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) 2427 { 2428 unsigned long cpu; 2429 int err = 0; 2430 2431 /* 2432 * Never shrink. And mddev_suspend() could deadlock if this is called 2433 * from raid5d. In that case, scribble_disks and scribble_sectors 2434 * should equal to new_disks and new_sectors 2435 */ 2436 if (conf->scribble_disks >= new_disks && 2437 conf->scribble_sectors >= new_sectors) 2438 return 0; 2439 mddev_suspend(conf->mddev); 2440 cpus_read_lock(); 2441 2442 for_each_present_cpu(cpu) { 2443 struct raid5_percpu *percpu; 2444 2445 percpu = per_cpu_ptr(conf->percpu, cpu); 2446 err = scribble_alloc(percpu, new_disks, 2447 new_sectors / RAID5_STRIPE_SECTORS(conf)); 2448 if (err) 2449 break; 2450 } 2451 2452 cpus_read_unlock(); 2453 mddev_resume(conf->mddev); 2454 if (!err) { 2455 conf->scribble_disks = new_disks; 2456 conf->scribble_sectors = new_sectors; 2457 } 2458 return err; 2459 } 2460 2461 static int resize_stripes(struct r5conf *conf, int newsize) 2462 { 2463 /* Make all the stripes able to hold 'newsize' devices. 2464 * New slots in each stripe get 'page' set to a new page. 2465 * 2466 * This happens in stages: 2467 * 1/ create a new kmem_cache and allocate the required number of 2468 * stripe_heads. 2469 * 2/ gather all the old stripe_heads and transfer the pages across 2470 * to the new stripe_heads. This will have the side effect of 2471 * freezing the array as once all stripe_heads have been collected, 2472 * no IO will be possible. Old stripe heads are freed once their 2473 * pages have been transferred over, and the old kmem_cache is 2474 * freed when all stripes are done. 2475 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 2476 * we simple return a failure status - no need to clean anything up. 2477 * 4/ allocate new pages for the new slots in the new stripe_heads. 2478 * If this fails, we don't bother trying the shrink the 2479 * stripe_heads down again, we just leave them as they are. 2480 * As each stripe_head is processed the new one is released into 2481 * active service. 2482 * 2483 * Once step2 is started, we cannot afford to wait for a write, 2484 * so we use GFP_NOIO allocations. 2485 */ 2486 struct stripe_head *osh, *nsh; 2487 LIST_HEAD(newstripes); 2488 struct disk_info *ndisks; 2489 int err = 0; 2490 struct kmem_cache *sc; 2491 int i; 2492 int hash, cnt; 2493 2494 md_allow_write(conf->mddev); 2495 2496 /* Step 1 */ 2497 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2498 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 2499 0, 0, NULL); 2500 if (!sc) 2501 return -ENOMEM; 2502 2503 /* Need to ensure auto-resizing doesn't interfere */ 2504 mutex_lock(&conf->cache_size_mutex); 2505 2506 for (i = conf->max_nr_stripes; i; i--) { 2507 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); 2508 if (!nsh) 2509 break; 2510 2511 list_add(&nsh->lru, &newstripes); 2512 } 2513 if (i) { 2514 /* didn't get enough, give up */ 2515 while (!list_empty(&newstripes)) { 2516 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2517 list_del(&nsh->lru); 2518 free_stripe(sc, nsh); 2519 } 2520 kmem_cache_destroy(sc); 2521 mutex_unlock(&conf->cache_size_mutex); 2522 return -ENOMEM; 2523 } 2524 /* Step 2 - Must use GFP_NOIO now. 2525 * OK, we have enough stripes, start collecting inactive 2526 * stripes and copying them over 2527 */ 2528 hash = 0; 2529 cnt = 0; 2530 list_for_each_entry(nsh, &newstripes, lru) { 2531 lock_device_hash_lock(conf, hash); 2532 wait_event_cmd(conf->wait_for_stripe, 2533 !list_empty(conf->inactive_list + hash), 2534 unlock_device_hash_lock(conf, hash), 2535 lock_device_hash_lock(conf, hash)); 2536 osh = get_free_stripe(conf, hash); 2537 unlock_device_hash_lock(conf, hash); 2538 2539 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE 2540 for (i = 0; i < osh->nr_pages; i++) { 2541 nsh->pages[i] = osh->pages[i]; 2542 osh->pages[i] = NULL; 2543 } 2544 #endif 2545 for(i=0; i<conf->pool_size; i++) { 2546 nsh->dev[i].page = osh->dev[i].page; 2547 nsh->dev[i].orig_page = osh->dev[i].page; 2548 nsh->dev[i].offset = osh->dev[i].offset; 2549 } 2550 nsh->hash_lock_index = hash; 2551 free_stripe(conf->slab_cache, osh); 2552 cnt++; 2553 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 2554 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 2555 hash++; 2556 cnt = 0; 2557 } 2558 } 2559 kmem_cache_destroy(conf->slab_cache); 2560 2561 /* Step 3. 2562 * At this point, we are holding all the stripes so the array 2563 * is completely stalled, so now is a good time to resize 2564 * conf->disks and the scribble region 2565 */ 2566 ndisks = kcalloc(newsize, sizeof(struct disk_info), GFP_NOIO); 2567 if (ndisks) { 2568 for (i = 0; i < conf->pool_size; i++) 2569 ndisks[i] = conf->disks[i]; 2570 2571 for (i = conf->pool_size; i < newsize; i++) { 2572 ndisks[i].extra_page = alloc_page(GFP_NOIO); 2573 if (!ndisks[i].extra_page) 2574 err = -ENOMEM; 2575 } 2576 2577 if (err) { 2578 for (i = conf->pool_size; i < newsize; i++) 2579 if (ndisks[i].extra_page) 2580 put_page(ndisks[i].extra_page); 2581 kfree(ndisks); 2582 } else { 2583 kfree(conf->disks); 2584 conf->disks = ndisks; 2585 } 2586 } else 2587 err = -ENOMEM; 2588 2589 conf->slab_cache = sc; 2590 conf->active_name = 1-conf->active_name; 2591 2592 /* Step 4, return new stripes to service */ 2593 while(!list_empty(&newstripes)) { 2594 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2595 list_del_init(&nsh->lru); 2596 2597 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE 2598 for (i = 0; i < nsh->nr_pages; i++) { 2599 if (nsh->pages[i]) 2600 continue; 2601 nsh->pages[i] = alloc_page(GFP_NOIO); 2602 if (!nsh->pages[i]) 2603 err = -ENOMEM; 2604 } 2605 2606 for (i = conf->raid_disks; i < newsize; i++) { 2607 if (nsh->dev[i].page) 2608 continue; 2609 nsh->dev[i].page = raid5_get_dev_page(nsh, i); 2610 nsh->dev[i].orig_page = nsh->dev[i].page; 2611 nsh->dev[i].offset = raid5_get_page_offset(nsh, i); 2612 } 2613 #else 2614 for (i=conf->raid_disks; i < newsize; i++) 2615 if (nsh->dev[i].page == NULL) { 2616 struct page *p = alloc_page(GFP_NOIO); 2617 nsh->dev[i].page = p; 2618 nsh->dev[i].orig_page = p; 2619 nsh->dev[i].offset = 0; 2620 if (!p) 2621 err = -ENOMEM; 2622 } 2623 #endif 2624 raid5_release_stripe(nsh); 2625 } 2626 /* critical section pass, GFP_NOIO no longer needed */ 2627 2628 if (!err) 2629 conf->pool_size = newsize; 2630 mutex_unlock(&conf->cache_size_mutex); 2631 2632 return err; 2633 } 2634 2635 static int drop_one_stripe(struct r5conf *conf) 2636 { 2637 struct stripe_head *sh; 2638 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; 2639 2640 spin_lock_irq(conf->hash_locks + hash); 2641 sh = get_free_stripe(conf, hash); 2642 spin_unlock_irq(conf->hash_locks + hash); 2643 if (!sh) 2644 return 0; 2645 BUG_ON(atomic_read(&sh->count)); 2646 shrink_buffers(sh); 2647 free_stripe(conf->slab_cache, sh); 2648 atomic_dec(&conf->active_stripes); 2649 conf->max_nr_stripes--; 2650 return 1; 2651 } 2652 2653 static void shrink_stripes(struct r5conf *conf) 2654 { 2655 while (conf->max_nr_stripes && 2656 drop_one_stripe(conf)) 2657 ; 2658 2659 kmem_cache_destroy(conf->slab_cache); 2660 conf->slab_cache = NULL; 2661 } 2662 2663 /* 2664 * This helper wraps rcu_dereference_protected() and can be used when 2665 * it is known that the nr_pending of the rdev is elevated. 2666 */ 2667 static struct md_rdev *rdev_pend_deref(struct md_rdev __rcu *rdev) 2668 { 2669 return rcu_dereference_protected(rdev, 2670 atomic_read(&rcu_access_pointer(rdev)->nr_pending)); 2671 } 2672 2673 /* 2674 * This helper wraps rcu_dereference_protected() and should be used 2675 * when it is known that the mddev_lock() is held. This is safe 2676 * seeing raid5_remove_disk() has the same lock held. 2677 */ 2678 static struct md_rdev *rdev_mdlock_deref(struct mddev *mddev, 2679 struct md_rdev __rcu *rdev) 2680 { 2681 return rcu_dereference_protected(rdev, 2682 lockdep_is_held(&mddev->reconfig_mutex)); 2683 } 2684 2685 static void raid5_end_read_request(struct bio * bi) 2686 { 2687 struct stripe_head *sh = bi->bi_private; 2688 struct r5conf *conf = sh->raid_conf; 2689 int disks = sh->disks, i; 2690 struct md_rdev *rdev = NULL; 2691 sector_t s; 2692 2693 for (i=0 ; i<disks; i++) 2694 if (bi == &sh->dev[i].req) 2695 break; 2696 2697 pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", 2698 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2699 bi->bi_status); 2700 if (i == disks) { 2701 BUG(); 2702 return; 2703 } 2704 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2705 /* If replacement finished while this request was outstanding, 2706 * 'replacement' might be NULL already. 2707 * In that case it moved down to 'rdev'. 2708 * rdev is not removed until all requests are finished. 2709 */ 2710 rdev = rdev_pend_deref(conf->disks[i].replacement); 2711 if (!rdev) 2712 rdev = rdev_pend_deref(conf->disks[i].rdev); 2713 2714 if (use_new_offset(conf, sh)) 2715 s = sh->sector + rdev->new_data_offset; 2716 else 2717 s = sh->sector + rdev->data_offset; 2718 if (!bi->bi_status) { 2719 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2720 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2721 /* Note that this cannot happen on a 2722 * replacement device. We just fail those on 2723 * any error 2724 */ 2725 pr_info_ratelimited( 2726 "md/raid:%s: read error corrected (%lu sectors at %llu on %pg)\n", 2727 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf), 2728 (unsigned long long)s, 2729 rdev->bdev); 2730 atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors); 2731 clear_bit(R5_ReadError, &sh->dev[i].flags); 2732 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2733 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2734 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2735 2736 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 2737 /* 2738 * end read for a page in journal, this 2739 * must be preparing for prexor in rmw 2740 */ 2741 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); 2742 2743 if (atomic_read(&rdev->read_errors)) 2744 atomic_set(&rdev->read_errors, 0); 2745 } else { 2746 int retry = 0; 2747 int set_bad = 0; 2748 2749 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 2750 if (!(bi->bi_status == BLK_STS_PROTECTION)) 2751 atomic_inc(&rdev->read_errors); 2752 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2753 pr_warn_ratelimited( 2754 "md/raid:%s: read error on replacement device (sector %llu on %pg).\n", 2755 mdname(conf->mddev), 2756 (unsigned long long)s, 2757 rdev->bdev); 2758 else if (conf->mddev->degraded >= conf->max_degraded) { 2759 set_bad = 1; 2760 pr_warn_ratelimited( 2761 "md/raid:%s: read error not correctable (sector %llu on %pg).\n", 2762 mdname(conf->mddev), 2763 (unsigned long long)s, 2764 rdev->bdev); 2765 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { 2766 /* Oh, no!!! */ 2767 set_bad = 1; 2768 pr_warn_ratelimited( 2769 "md/raid:%s: read error NOT corrected!! (sector %llu on %pg).\n", 2770 mdname(conf->mddev), 2771 (unsigned long long)s, 2772 rdev->bdev); 2773 } else if (atomic_read(&rdev->read_errors) 2774 > conf->max_nr_stripes) { 2775 if (!test_bit(Faulty, &rdev->flags)) { 2776 pr_warn("md/raid:%s: %d read_errors > %d stripes\n", 2777 mdname(conf->mddev), 2778 atomic_read(&rdev->read_errors), 2779 conf->max_nr_stripes); 2780 pr_warn("md/raid:%s: Too many read errors, failing device %pg.\n", 2781 mdname(conf->mddev), rdev->bdev); 2782 } 2783 } else 2784 retry = 1; 2785 if (set_bad && test_bit(In_sync, &rdev->flags) 2786 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2787 retry = 1; 2788 if (retry) 2789 if (sh->qd_idx >= 0 && sh->pd_idx == i) 2790 set_bit(R5_ReadError, &sh->dev[i].flags); 2791 else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2792 set_bit(R5_ReadError, &sh->dev[i].flags); 2793 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2794 } else 2795 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2796 else { 2797 clear_bit(R5_ReadError, &sh->dev[i].flags); 2798 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2799 if (!(set_bad 2800 && test_bit(In_sync, &rdev->flags) 2801 && rdev_set_badblocks( 2802 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) 2803 md_error(conf->mddev, rdev); 2804 } 2805 } 2806 rdev_dec_pending(rdev, conf->mddev); 2807 bio_uninit(bi); 2808 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2809 set_bit(STRIPE_HANDLE, &sh->state); 2810 raid5_release_stripe(sh); 2811 } 2812 2813 static void raid5_end_write_request(struct bio *bi) 2814 { 2815 struct stripe_head *sh = bi->bi_private; 2816 struct r5conf *conf = sh->raid_conf; 2817 int disks = sh->disks, i; 2818 struct md_rdev *rdev; 2819 sector_t first_bad; 2820 int bad_sectors; 2821 int replacement = 0; 2822 2823 for (i = 0 ; i < disks; i++) { 2824 if (bi == &sh->dev[i].req) { 2825 rdev = rdev_pend_deref(conf->disks[i].rdev); 2826 break; 2827 } 2828 if (bi == &sh->dev[i].rreq) { 2829 rdev = rdev_pend_deref(conf->disks[i].replacement); 2830 if (rdev) 2831 replacement = 1; 2832 else 2833 /* rdev was removed and 'replacement' 2834 * replaced it. rdev is not removed 2835 * until all requests are finished. 2836 */ 2837 rdev = rdev_pend_deref(conf->disks[i].rdev); 2838 break; 2839 } 2840 } 2841 pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", 2842 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2843 bi->bi_status); 2844 if (i == disks) { 2845 BUG(); 2846 return; 2847 } 2848 2849 if (replacement) { 2850 if (bi->bi_status) 2851 md_error(conf->mddev, rdev); 2852 else if (is_badblock(rdev, sh->sector, 2853 RAID5_STRIPE_SECTORS(conf), 2854 &first_bad, &bad_sectors)) 2855 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2856 } else { 2857 if (bi->bi_status) { 2858 set_bit(STRIPE_DEGRADED, &sh->state); 2859 set_bit(WriteErrorSeen, &rdev->flags); 2860 set_bit(R5_WriteError, &sh->dev[i].flags); 2861 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2862 set_bit(MD_RECOVERY_NEEDED, 2863 &rdev->mddev->recovery); 2864 } else if (is_badblock(rdev, sh->sector, 2865 RAID5_STRIPE_SECTORS(conf), 2866 &first_bad, &bad_sectors)) { 2867 set_bit(R5_MadeGood, &sh->dev[i].flags); 2868 if (test_bit(R5_ReadError, &sh->dev[i].flags)) 2869 /* That was a successful write so make 2870 * sure it looks like we already did 2871 * a re-write. 2872 */ 2873 set_bit(R5_ReWrite, &sh->dev[i].flags); 2874 } 2875 } 2876 rdev_dec_pending(rdev, conf->mddev); 2877 2878 if (sh->batch_head && bi->bi_status && !replacement) 2879 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2880 2881 bio_uninit(bi); 2882 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2883 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2884 set_bit(STRIPE_HANDLE, &sh->state); 2885 raid5_release_stripe(sh); 2886 2887 if (sh->batch_head && sh != sh->batch_head) 2888 raid5_release_stripe(sh->batch_head); 2889 } 2890 2891 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) 2892 { 2893 struct r5conf *conf = mddev->private; 2894 unsigned long flags; 2895 pr_debug("raid456: error called\n"); 2896 2897 pr_crit("md/raid:%s: Disk failure on %pg, disabling device.\n", 2898 mdname(mddev), rdev->bdev); 2899 2900 spin_lock_irqsave(&conf->device_lock, flags); 2901 set_bit(Faulty, &rdev->flags); 2902 clear_bit(In_sync, &rdev->flags); 2903 mddev->degraded = raid5_calc_degraded(conf); 2904 2905 if (has_failed(conf)) { 2906 set_bit(MD_BROKEN, &conf->mddev->flags); 2907 conf->recovery_disabled = mddev->recovery_disabled; 2908 2909 pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n", 2910 mdname(mddev), mddev->degraded, conf->raid_disks); 2911 } else { 2912 pr_crit("md/raid:%s: Operation continuing on %d devices.\n", 2913 mdname(mddev), conf->raid_disks - mddev->degraded); 2914 } 2915 2916 spin_unlock_irqrestore(&conf->device_lock, flags); 2917 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2918 2919 set_bit(Blocked, &rdev->flags); 2920 set_mask_bits(&mddev->sb_flags, 0, 2921 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 2922 r5c_update_on_rdev_error(mddev, rdev); 2923 } 2924 2925 /* 2926 * Input: a 'big' sector number, 2927 * Output: index of the data and parity disk, and the sector # in them. 2928 */ 2929 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2930 int previous, int *dd_idx, 2931 struct stripe_head *sh) 2932 { 2933 sector_t stripe, stripe2; 2934 sector_t chunk_number; 2935 unsigned int chunk_offset; 2936 int pd_idx, qd_idx; 2937 int ddf_layout = 0; 2938 sector_t new_sector; 2939 int algorithm = previous ? conf->prev_algo 2940 : conf->algorithm; 2941 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2942 : conf->chunk_sectors; 2943 int raid_disks = previous ? conf->previous_raid_disks 2944 : conf->raid_disks; 2945 int data_disks = raid_disks - conf->max_degraded; 2946 2947 /* First compute the information on this sector */ 2948 2949 /* 2950 * Compute the chunk number and the sector offset inside the chunk 2951 */ 2952 chunk_offset = sector_div(r_sector, sectors_per_chunk); 2953 chunk_number = r_sector; 2954 2955 /* 2956 * Compute the stripe number 2957 */ 2958 stripe = chunk_number; 2959 *dd_idx = sector_div(stripe, data_disks); 2960 stripe2 = stripe; 2961 /* 2962 * Select the parity disk based on the user selected algorithm. 2963 */ 2964 pd_idx = qd_idx = -1; 2965 switch(conf->level) { 2966 case 4: 2967 pd_idx = data_disks; 2968 break; 2969 case 5: 2970 switch (algorithm) { 2971 case ALGORITHM_LEFT_ASYMMETRIC: 2972 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2973 if (*dd_idx >= pd_idx) 2974 (*dd_idx)++; 2975 break; 2976 case ALGORITHM_RIGHT_ASYMMETRIC: 2977 pd_idx = sector_div(stripe2, raid_disks); 2978 if (*dd_idx >= pd_idx) 2979 (*dd_idx)++; 2980 break; 2981 case ALGORITHM_LEFT_SYMMETRIC: 2982 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2983 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2984 break; 2985 case ALGORITHM_RIGHT_SYMMETRIC: 2986 pd_idx = sector_div(stripe2, raid_disks); 2987 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2988 break; 2989 case ALGORITHM_PARITY_0: 2990 pd_idx = 0; 2991 (*dd_idx)++; 2992 break; 2993 case ALGORITHM_PARITY_N: 2994 pd_idx = data_disks; 2995 break; 2996 default: 2997 BUG(); 2998 } 2999 break; 3000 case 6: 3001 3002 switch (algorithm) { 3003 case ALGORITHM_LEFT_ASYMMETRIC: 3004 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 3005 qd_idx = pd_idx + 1; 3006 if (pd_idx == raid_disks-1) { 3007 (*dd_idx)++; /* Q D D D P */ 3008 qd_idx = 0; 3009 } else if (*dd_idx >= pd_idx) 3010 (*dd_idx) += 2; /* D D P Q D */ 3011 break; 3012 case ALGORITHM_RIGHT_ASYMMETRIC: 3013 pd_idx = sector_div(stripe2, raid_disks); 3014 qd_idx = pd_idx + 1; 3015 if (pd_idx == raid_disks-1) { 3016 (*dd_idx)++; /* Q D D D P */ 3017 qd_idx = 0; 3018 } else if (*dd_idx >= pd_idx) 3019 (*dd_idx) += 2; /* D D P Q D */ 3020 break; 3021 case ALGORITHM_LEFT_SYMMETRIC: 3022 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 3023 qd_idx = (pd_idx + 1) % raid_disks; 3024 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 3025 break; 3026 case ALGORITHM_RIGHT_SYMMETRIC: 3027 pd_idx = sector_div(stripe2, raid_disks); 3028 qd_idx = (pd_idx + 1) % raid_disks; 3029 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 3030 break; 3031 3032 case ALGORITHM_PARITY_0: 3033 pd_idx = 0; 3034 qd_idx = 1; 3035 (*dd_idx) += 2; 3036 break; 3037 case ALGORITHM_PARITY_N: 3038 pd_idx = data_disks; 3039 qd_idx = data_disks + 1; 3040 break; 3041 3042 case ALGORITHM_ROTATING_ZERO_RESTART: 3043 /* Exactly the same as RIGHT_ASYMMETRIC, but or 3044 * of blocks for computing Q is different. 3045 */ 3046 pd_idx = sector_div(stripe2, raid_disks); 3047 qd_idx = pd_idx + 1; 3048 if (pd_idx == raid_disks-1) { 3049 (*dd_idx)++; /* Q D D D P */ 3050 qd_idx = 0; 3051 } else if (*dd_idx >= pd_idx) 3052 (*dd_idx) += 2; /* D D P Q D */ 3053 ddf_layout = 1; 3054 break; 3055 3056 case ALGORITHM_ROTATING_N_RESTART: 3057 /* Same a left_asymmetric, by first stripe is 3058 * D D D P Q rather than 3059 * Q D D D P 3060 */ 3061 stripe2 += 1; 3062 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 3063 qd_idx = pd_idx + 1; 3064 if (pd_idx == raid_disks-1) { 3065 (*dd_idx)++; /* Q D D D P */ 3066 qd_idx = 0; 3067 } else if (*dd_idx >= pd_idx) 3068 (*dd_idx) += 2; /* D D P Q D */ 3069 ddf_layout = 1; 3070 break; 3071 3072 case ALGORITHM_ROTATING_N_CONTINUE: 3073 /* Same as left_symmetric but Q is before P */ 3074 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 3075 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 3076 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 3077 ddf_layout = 1; 3078 break; 3079 3080 case ALGORITHM_LEFT_ASYMMETRIC_6: 3081 /* RAID5 left_asymmetric, with Q on last device */ 3082 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 3083 if (*dd_idx >= pd_idx) 3084 (*dd_idx)++; 3085 qd_idx = raid_disks - 1; 3086 break; 3087 3088 case ALGORITHM_RIGHT_ASYMMETRIC_6: 3089 pd_idx = sector_div(stripe2, raid_disks-1); 3090 if (*dd_idx >= pd_idx) 3091 (*dd_idx)++; 3092 qd_idx = raid_disks - 1; 3093 break; 3094 3095 case ALGORITHM_LEFT_SYMMETRIC_6: 3096 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 3097 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 3098 qd_idx = raid_disks - 1; 3099 break; 3100 3101 case ALGORITHM_RIGHT_SYMMETRIC_6: 3102 pd_idx = sector_div(stripe2, raid_disks-1); 3103 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 3104 qd_idx = raid_disks - 1; 3105 break; 3106 3107 case ALGORITHM_PARITY_0_6: 3108 pd_idx = 0; 3109 (*dd_idx)++; 3110 qd_idx = raid_disks - 1; 3111 break; 3112 3113 default: 3114 BUG(); 3115 } 3116 break; 3117 } 3118 3119 if (sh) { 3120 sh->pd_idx = pd_idx; 3121 sh->qd_idx = qd_idx; 3122 sh->ddf_layout = ddf_layout; 3123 } 3124 /* 3125 * Finally, compute the new sector number 3126 */ 3127 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 3128 return new_sector; 3129 } 3130 3131 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) 3132 { 3133 struct r5conf *conf = sh->raid_conf; 3134 int raid_disks = sh->disks; 3135 int data_disks = raid_disks - conf->max_degraded; 3136 sector_t new_sector = sh->sector, check; 3137 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 3138 : conf->chunk_sectors; 3139 int algorithm = previous ? conf->prev_algo 3140 : conf->algorithm; 3141 sector_t stripe; 3142 int chunk_offset; 3143 sector_t chunk_number; 3144 int dummy1, dd_idx = i; 3145 sector_t r_sector; 3146 struct stripe_head sh2; 3147 3148 chunk_offset = sector_div(new_sector, sectors_per_chunk); 3149 stripe = new_sector; 3150 3151 if (i == sh->pd_idx) 3152 return 0; 3153 switch(conf->level) { 3154 case 4: break; 3155 case 5: 3156 switch (algorithm) { 3157 case ALGORITHM_LEFT_ASYMMETRIC: 3158 case ALGORITHM_RIGHT_ASYMMETRIC: 3159 if (i > sh->pd_idx) 3160 i--; 3161 break; 3162 case ALGORITHM_LEFT_SYMMETRIC: 3163 case ALGORITHM_RIGHT_SYMMETRIC: 3164 if (i < sh->pd_idx) 3165 i += raid_disks; 3166 i -= (sh->pd_idx + 1); 3167 break; 3168 case ALGORITHM_PARITY_0: 3169 i -= 1; 3170 break; 3171 case ALGORITHM_PARITY_N: 3172 break; 3173 default: 3174 BUG(); 3175 } 3176 break; 3177 case 6: 3178 if (i == sh->qd_idx) 3179 return 0; /* It is the Q disk */ 3180 switch (algorithm) { 3181 case ALGORITHM_LEFT_ASYMMETRIC: 3182 case ALGORITHM_RIGHT_ASYMMETRIC: 3183 case ALGORITHM_ROTATING_ZERO_RESTART: 3184 case ALGORITHM_ROTATING_N_RESTART: 3185 if (sh->pd_idx == raid_disks-1) 3186 i--; /* Q D D D P */ 3187 else if (i > sh->pd_idx) 3188 i -= 2; /* D D P Q D */ 3189 break; 3190 case ALGORITHM_LEFT_SYMMETRIC: 3191 case ALGORITHM_RIGHT_SYMMETRIC: 3192 if (sh->pd_idx == raid_disks-1) 3193 i--; /* Q D D D P */ 3194 else { 3195 /* D D P Q D */ 3196 if (i < sh->pd_idx) 3197 i += raid_disks; 3198 i -= (sh->pd_idx + 2); 3199 } 3200 break; 3201 case ALGORITHM_PARITY_0: 3202 i -= 2; 3203 break; 3204 case ALGORITHM_PARITY_N: 3205 break; 3206 case ALGORITHM_ROTATING_N_CONTINUE: 3207 /* Like left_symmetric, but P is before Q */ 3208 if (sh->pd_idx == 0) 3209 i--; /* P D D D Q */ 3210 else { 3211 /* D D Q P D */ 3212 if (i < sh->pd_idx) 3213 i += raid_disks; 3214 i -= (sh->pd_idx + 1); 3215 } 3216 break; 3217 case ALGORITHM_LEFT_ASYMMETRIC_6: 3218 case ALGORITHM_RIGHT_ASYMMETRIC_6: 3219 if (i > sh->pd_idx) 3220 i--; 3221 break; 3222 case ALGORITHM_LEFT_SYMMETRIC_6: 3223 case ALGORITHM_RIGHT_SYMMETRIC_6: 3224 if (i < sh->pd_idx) 3225 i += data_disks + 1; 3226 i -= (sh->pd_idx + 1); 3227 break; 3228 case ALGORITHM_PARITY_0_6: 3229 i -= 1; 3230 break; 3231 default: 3232 BUG(); 3233 } 3234 break; 3235 } 3236 3237 chunk_number = stripe * data_disks + i; 3238 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 3239 3240 check = raid5_compute_sector(conf, r_sector, 3241 previous, &dummy1, &sh2); 3242 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 3243 || sh2.qd_idx != sh->qd_idx) { 3244 pr_warn("md/raid:%s: compute_blocknr: map not correct\n", 3245 mdname(conf->mddev)); 3246 return 0; 3247 } 3248 return r_sector; 3249 } 3250 3251 /* 3252 * There are cases where we want handle_stripe_dirtying() and 3253 * schedule_reconstruction() to delay towrite to some dev of a stripe. 3254 * 3255 * This function checks whether we want to delay the towrite. Specifically, 3256 * we delay the towrite when: 3257 * 3258 * 1. degraded stripe has a non-overwrite to the missing dev, AND this 3259 * stripe has data in journal (for other devices). 3260 * 3261 * In this case, when reading data for the non-overwrite dev, it is 3262 * necessary to handle complex rmw of write back cache (prexor with 3263 * orig_page, and xor with page). To keep read path simple, we would 3264 * like to flush data in journal to RAID disks first, so complex rmw 3265 * is handled in the write patch (handle_stripe_dirtying). 3266 * 3267 * 2. when journal space is critical (R5C_LOG_CRITICAL=1) 3268 * 3269 * It is important to be able to flush all stripes in raid5-cache. 3270 * Therefore, we need reserve some space on the journal device for 3271 * these flushes. If flush operation includes pending writes to the 3272 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe 3273 * for the flush out. If we exclude these pending writes from flush 3274 * operation, we only need (conf->max_degraded + 1) pages per stripe. 3275 * Therefore, excluding pending writes in these cases enables more 3276 * efficient use of the journal device. 3277 * 3278 * Note: To make sure the stripe makes progress, we only delay 3279 * towrite for stripes with data already in journal (injournal > 0). 3280 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to 3281 * no_space_stripes list. 3282 * 3283 * 3. during journal failure 3284 * In journal failure, we try to flush all cached data to raid disks 3285 * based on data in stripe cache. The array is read-only to upper 3286 * layers, so we would skip all pending writes. 3287 * 3288 */ 3289 static inline bool delay_towrite(struct r5conf *conf, 3290 struct r5dev *dev, 3291 struct stripe_head_state *s) 3292 { 3293 /* case 1 above */ 3294 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3295 !test_bit(R5_Insync, &dev->flags) && s->injournal) 3296 return true; 3297 /* case 2 above */ 3298 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 3299 s->injournal > 0) 3300 return true; 3301 /* case 3 above */ 3302 if (s->log_failed && s->injournal) 3303 return true; 3304 return false; 3305 } 3306 3307 static void 3308 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 3309 int rcw, int expand) 3310 { 3311 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; 3312 struct r5conf *conf = sh->raid_conf; 3313 int level = conf->level; 3314 3315 if (rcw) { 3316 /* 3317 * In some cases, handle_stripe_dirtying initially decided to 3318 * run rmw and allocates extra page for prexor. However, rcw is 3319 * cheaper later on. We need to free the extra page now, 3320 * because we won't be able to do that in ops_complete_prexor(). 3321 */ 3322 r5c_release_extra_page(sh); 3323 3324 for (i = disks; i--; ) { 3325 struct r5dev *dev = &sh->dev[i]; 3326 3327 if (dev->towrite && !delay_towrite(conf, dev, s)) { 3328 set_bit(R5_LOCKED, &dev->flags); 3329 set_bit(R5_Wantdrain, &dev->flags); 3330 if (!expand) 3331 clear_bit(R5_UPTODATE, &dev->flags); 3332 s->locked++; 3333 } else if (test_bit(R5_InJournal, &dev->flags)) { 3334 set_bit(R5_LOCKED, &dev->flags); 3335 s->locked++; 3336 } 3337 } 3338 /* if we are not expanding this is a proper write request, and 3339 * there will be bios with new data to be drained into the 3340 * stripe cache 3341 */ 3342 if (!expand) { 3343 if (!s->locked) 3344 /* False alarm, nothing to do */ 3345 return; 3346 sh->reconstruct_state = reconstruct_state_drain_run; 3347 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 3348 } else 3349 sh->reconstruct_state = reconstruct_state_run; 3350 3351 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 3352 3353 if (s->locked + conf->max_degraded == disks) 3354 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 3355 atomic_inc(&conf->pending_full_writes); 3356 } else { 3357 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 3358 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 3359 BUG_ON(level == 6 && 3360 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || 3361 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); 3362 3363 for (i = disks; i--; ) { 3364 struct r5dev *dev = &sh->dev[i]; 3365 if (i == pd_idx || i == qd_idx) 3366 continue; 3367 3368 if (dev->towrite && 3369 (test_bit(R5_UPTODATE, &dev->flags) || 3370 test_bit(R5_Wantcompute, &dev->flags))) { 3371 set_bit(R5_Wantdrain, &dev->flags); 3372 set_bit(R5_LOCKED, &dev->flags); 3373 clear_bit(R5_UPTODATE, &dev->flags); 3374 s->locked++; 3375 } else if (test_bit(R5_InJournal, &dev->flags)) { 3376 set_bit(R5_LOCKED, &dev->flags); 3377 s->locked++; 3378 } 3379 } 3380 if (!s->locked) 3381 /* False alarm - nothing to do */ 3382 return; 3383 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 3384 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 3385 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 3386 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 3387 } 3388 3389 /* keep the parity disk(s) locked while asynchronous operations 3390 * are in flight 3391 */ 3392 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 3393 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 3394 s->locked++; 3395 3396 if (level == 6) { 3397 int qd_idx = sh->qd_idx; 3398 struct r5dev *dev = &sh->dev[qd_idx]; 3399 3400 set_bit(R5_LOCKED, &dev->flags); 3401 clear_bit(R5_UPTODATE, &dev->flags); 3402 s->locked++; 3403 } 3404 3405 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && 3406 test_bit(STRIPE_OP_BIODRAIN, &s->ops_request) && 3407 !test_bit(STRIPE_FULL_WRITE, &sh->state) && 3408 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) 3409 set_bit(STRIPE_OP_PARTIAL_PARITY, &s->ops_request); 3410 3411 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 3412 __func__, (unsigned long long)sh->sector, 3413 s->locked, s->ops_request); 3414 } 3415 3416 /* 3417 * Each stripe/dev can have one or more bion attached. 3418 * toread/towrite point to the first in a chain. 3419 * The bi_next chain must be in order. 3420 */ 3421 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, 3422 int forwrite, int previous) 3423 { 3424 struct bio **bip; 3425 struct r5conf *conf = sh->raid_conf; 3426 int firstwrite=0; 3427 3428 pr_debug("adding bi b#%llu to stripe s#%llu\n", 3429 (unsigned long long)bi->bi_iter.bi_sector, 3430 (unsigned long long)sh->sector); 3431 3432 spin_lock_irq(&sh->stripe_lock); 3433 /* Don't allow new IO added to stripes in batch list */ 3434 if (sh->batch_head) 3435 goto overlap; 3436 if (forwrite) { 3437 bip = &sh->dev[dd_idx].towrite; 3438 if (*bip == NULL) 3439 firstwrite = 1; 3440 } else 3441 bip = &sh->dev[dd_idx].toread; 3442 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 3443 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 3444 goto overlap; 3445 bip = & (*bip)->bi_next; 3446 } 3447 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 3448 goto overlap; 3449 3450 if (forwrite && raid5_has_ppl(conf)) { 3451 /* 3452 * With PPL only writes to consecutive data chunks within a 3453 * stripe are allowed because for a single stripe_head we can 3454 * only have one PPL entry at a time, which describes one data 3455 * range. Not really an overlap, but wait_for_overlap can be 3456 * used to handle this. 3457 */ 3458 sector_t sector; 3459 sector_t first = 0; 3460 sector_t last = 0; 3461 int count = 0; 3462 int i; 3463 3464 for (i = 0; i < sh->disks; i++) { 3465 if (i != sh->pd_idx && 3466 (i == dd_idx || sh->dev[i].towrite)) { 3467 sector = sh->dev[i].sector; 3468 if (count == 0 || sector < first) 3469 first = sector; 3470 if (sector > last) 3471 last = sector; 3472 count++; 3473 } 3474 } 3475 3476 if (first + conf->chunk_sectors * (count - 1) != last) 3477 goto overlap; 3478 } 3479 3480 if (!forwrite || previous) 3481 clear_bit(STRIPE_BATCH_READY, &sh->state); 3482 3483 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 3484 if (*bip) 3485 bi->bi_next = *bip; 3486 *bip = bi; 3487 bio_inc_remaining(bi); 3488 md_write_inc(conf->mddev, bi); 3489 3490 if (forwrite) { 3491 /* check if page is covered */ 3492 sector_t sector = sh->dev[dd_idx].sector; 3493 for (bi=sh->dev[dd_idx].towrite; 3494 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && 3495 bi && bi->bi_iter.bi_sector <= sector; 3496 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { 3497 if (bio_end_sector(bi) >= sector) 3498 sector = bio_end_sector(bi); 3499 } 3500 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) 3501 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) 3502 sh->overwrite_disks++; 3503 } 3504 3505 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 3506 (unsigned long long)(*bip)->bi_iter.bi_sector, 3507 (unsigned long long)sh->sector, dd_idx); 3508 3509 if (conf->mddev->bitmap && firstwrite) { 3510 /* Cannot hold spinlock over bitmap_startwrite, 3511 * but must ensure this isn't added to a batch until 3512 * we have added to the bitmap and set bm_seq. 3513 * So set STRIPE_BITMAP_PENDING to prevent 3514 * batching. 3515 * If multiple add_stripe_bio() calls race here they 3516 * much all set STRIPE_BITMAP_PENDING. So only the first one 3517 * to complete "bitmap_startwrite" gets to set 3518 * STRIPE_BIT_DELAY. This is important as once a stripe 3519 * is added to a batch, STRIPE_BIT_DELAY cannot be changed 3520 * any more. 3521 */ 3522 set_bit(STRIPE_BITMAP_PENDING, &sh->state); 3523 spin_unlock_irq(&sh->stripe_lock); 3524 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, 3525 RAID5_STRIPE_SECTORS(conf), 0); 3526 spin_lock_irq(&sh->stripe_lock); 3527 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); 3528 if (!sh->batch_head) { 3529 sh->bm_seq = conf->seq_flush+1; 3530 set_bit(STRIPE_BIT_DELAY, &sh->state); 3531 } 3532 } 3533 spin_unlock_irq(&sh->stripe_lock); 3534 3535 if (stripe_can_batch(sh)) 3536 stripe_add_to_batch_list(conf, sh); 3537 return 1; 3538 3539 overlap: 3540 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 3541 spin_unlock_irq(&sh->stripe_lock); 3542 return 0; 3543 } 3544 3545 static void end_reshape(struct r5conf *conf); 3546 3547 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 3548 struct stripe_head *sh) 3549 { 3550 int sectors_per_chunk = 3551 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 3552 int dd_idx; 3553 int chunk_offset = sector_div(stripe, sectors_per_chunk); 3554 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 3555 3556 raid5_compute_sector(conf, 3557 stripe * (disks - conf->max_degraded) 3558 *sectors_per_chunk + chunk_offset, 3559 previous, 3560 &dd_idx, sh); 3561 } 3562 3563 static void 3564 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 3565 struct stripe_head_state *s, int disks) 3566 { 3567 int i; 3568 BUG_ON(sh->batch_head); 3569 for (i = disks; i--; ) { 3570 struct bio *bi; 3571 int bitmap_end = 0; 3572 3573 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 3574 struct md_rdev *rdev; 3575 rcu_read_lock(); 3576 rdev = rcu_dereference(conf->disks[i].rdev); 3577 if (rdev && test_bit(In_sync, &rdev->flags) && 3578 !test_bit(Faulty, &rdev->flags)) 3579 atomic_inc(&rdev->nr_pending); 3580 else 3581 rdev = NULL; 3582 rcu_read_unlock(); 3583 if (rdev) { 3584 if (!rdev_set_badblocks( 3585 rdev, 3586 sh->sector, 3587 RAID5_STRIPE_SECTORS(conf), 0)) 3588 md_error(conf->mddev, rdev); 3589 rdev_dec_pending(rdev, conf->mddev); 3590 } 3591 } 3592 spin_lock_irq(&sh->stripe_lock); 3593 /* fail all writes first */ 3594 bi = sh->dev[i].towrite; 3595 sh->dev[i].towrite = NULL; 3596 sh->overwrite_disks = 0; 3597 spin_unlock_irq(&sh->stripe_lock); 3598 if (bi) 3599 bitmap_end = 1; 3600 3601 log_stripe_write_finished(sh); 3602 3603 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3604 wake_up(&conf->wait_for_overlap); 3605 3606 while (bi && bi->bi_iter.bi_sector < 3607 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { 3608 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); 3609 3610 md_write_end(conf->mddev); 3611 bio_io_error(bi); 3612 bi = nextbi; 3613 } 3614 if (bitmap_end) 3615 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3616 RAID5_STRIPE_SECTORS(conf), 0, 0); 3617 bitmap_end = 0; 3618 /* and fail all 'written' */ 3619 bi = sh->dev[i].written; 3620 sh->dev[i].written = NULL; 3621 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { 3622 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 3623 sh->dev[i].page = sh->dev[i].orig_page; 3624 } 3625 3626 if (bi) bitmap_end = 1; 3627 while (bi && bi->bi_iter.bi_sector < 3628 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { 3629 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); 3630 3631 md_write_end(conf->mddev); 3632 bio_io_error(bi); 3633 bi = bi2; 3634 } 3635 3636 /* fail any reads if this device is non-operational and 3637 * the data has not reached the cache yet. 3638 */ 3639 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 3640 s->failed > conf->max_degraded && 3641 (!test_bit(R5_Insync, &sh->dev[i].flags) || 3642 test_bit(R5_ReadError, &sh->dev[i].flags))) { 3643 spin_lock_irq(&sh->stripe_lock); 3644 bi = sh->dev[i].toread; 3645 sh->dev[i].toread = NULL; 3646 spin_unlock_irq(&sh->stripe_lock); 3647 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3648 wake_up(&conf->wait_for_overlap); 3649 if (bi) 3650 s->to_read--; 3651 while (bi && bi->bi_iter.bi_sector < 3652 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { 3653 struct bio *nextbi = 3654 r5_next_bio(conf, bi, sh->dev[i].sector); 3655 3656 bio_io_error(bi); 3657 bi = nextbi; 3658 } 3659 } 3660 if (bitmap_end) 3661 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3662 RAID5_STRIPE_SECTORS(conf), 0, 0); 3663 /* If we were in the middle of a write the parity block might 3664 * still be locked - so just clear all R5_LOCKED flags 3665 */ 3666 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3667 } 3668 s->to_write = 0; 3669 s->written = 0; 3670 3671 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3672 if (atomic_dec_and_test(&conf->pending_full_writes)) 3673 md_wakeup_thread(conf->mddev->thread); 3674 } 3675 3676 static void 3677 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 3678 struct stripe_head_state *s) 3679 { 3680 int abort = 0; 3681 int i; 3682 3683 BUG_ON(sh->batch_head); 3684 clear_bit(STRIPE_SYNCING, &sh->state); 3685 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 3686 wake_up(&conf->wait_for_overlap); 3687 s->syncing = 0; 3688 s->replacing = 0; 3689 /* There is nothing more to do for sync/check/repair. 3690 * Don't even need to abort as that is handled elsewhere 3691 * if needed, and not always wanted e.g. if there is a known 3692 * bad block here. 3693 * For recover/replace we need to record a bad block on all 3694 * non-sync devices, or abort the recovery 3695 */ 3696 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { 3697 /* During recovery devices cannot be removed, so 3698 * locking and refcounting of rdevs is not needed 3699 */ 3700 rcu_read_lock(); 3701 for (i = 0; i < conf->raid_disks; i++) { 3702 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 3703 if (rdev 3704 && !test_bit(Faulty, &rdev->flags) 3705 && !test_bit(In_sync, &rdev->flags) 3706 && !rdev_set_badblocks(rdev, sh->sector, 3707 RAID5_STRIPE_SECTORS(conf), 0)) 3708 abort = 1; 3709 rdev = rcu_dereference(conf->disks[i].replacement); 3710 if (rdev 3711 && !test_bit(Faulty, &rdev->flags) 3712 && !test_bit(In_sync, &rdev->flags) 3713 && !rdev_set_badblocks(rdev, sh->sector, 3714 RAID5_STRIPE_SECTORS(conf), 0)) 3715 abort = 1; 3716 } 3717 rcu_read_unlock(); 3718 if (abort) 3719 conf->recovery_disabled = 3720 conf->mddev->recovery_disabled; 3721 } 3722 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort); 3723 } 3724 3725 static int want_replace(struct stripe_head *sh, int disk_idx) 3726 { 3727 struct md_rdev *rdev; 3728 int rv = 0; 3729 3730 rcu_read_lock(); 3731 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); 3732 if (rdev 3733 && !test_bit(Faulty, &rdev->flags) 3734 && !test_bit(In_sync, &rdev->flags) 3735 && (rdev->recovery_offset <= sh->sector 3736 || rdev->mddev->recovery_cp <= sh->sector)) 3737 rv = 1; 3738 rcu_read_unlock(); 3739 return rv; 3740 } 3741 3742 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, 3743 int disk_idx, int disks) 3744 { 3745 struct r5dev *dev = &sh->dev[disk_idx]; 3746 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 3747 &sh->dev[s->failed_num[1]] }; 3748 int i; 3749 bool force_rcw = (sh->raid_conf->rmw_level == PARITY_DISABLE_RMW); 3750 3751 3752 if (test_bit(R5_LOCKED, &dev->flags) || 3753 test_bit(R5_UPTODATE, &dev->flags)) 3754 /* No point reading this as we already have it or have 3755 * decided to get it. 3756 */ 3757 return 0; 3758 3759 if (dev->toread || 3760 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags))) 3761 /* We need this block to directly satisfy a request */ 3762 return 1; 3763 3764 if (s->syncing || s->expanding || 3765 (s->replacing && want_replace(sh, disk_idx))) 3766 /* When syncing, or expanding we read everything. 3767 * When replacing, we need the replaced block. 3768 */ 3769 return 1; 3770 3771 if ((s->failed >= 1 && fdev[0]->toread) || 3772 (s->failed >= 2 && fdev[1]->toread)) 3773 /* If we want to read from a failed device, then 3774 * we need to actually read every other device. 3775 */ 3776 return 1; 3777 3778 /* Sometimes neither read-modify-write nor reconstruct-write 3779 * cycles can work. In those cases we read every block we 3780 * can. Then the parity-update is certain to have enough to 3781 * work with. 3782 * This can only be a problem when we need to write something, 3783 * and some device has failed. If either of those tests 3784 * fail we need look no further. 3785 */ 3786 if (!s->failed || !s->to_write) 3787 return 0; 3788 3789 if (test_bit(R5_Insync, &dev->flags) && 3790 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3791 /* Pre-reads at not permitted until after short delay 3792 * to gather multiple requests. However if this 3793 * device is no Insync, the block could only be computed 3794 * and there is no need to delay that. 3795 */ 3796 return 0; 3797 3798 for (i = 0; i < s->failed && i < 2; i++) { 3799 if (fdev[i]->towrite && 3800 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3801 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3802 /* If we have a partial write to a failed 3803 * device, then we will need to reconstruct 3804 * the content of that device, so all other 3805 * devices must be read. 3806 */ 3807 return 1; 3808 3809 if (s->failed >= 2 && 3810 (fdev[i]->towrite || 3811 s->failed_num[i] == sh->pd_idx || 3812 s->failed_num[i] == sh->qd_idx) && 3813 !test_bit(R5_UPTODATE, &fdev[i]->flags)) 3814 /* In max degraded raid6, If the failed disk is P, Q, 3815 * or we want to read the failed disk, we need to do 3816 * reconstruct-write. 3817 */ 3818 force_rcw = true; 3819 } 3820 3821 /* If we are forced to do a reconstruct-write, because parity 3822 * cannot be trusted and we are currently recovering it, there 3823 * is extra need to be careful. 3824 * If one of the devices that we would need to read, because 3825 * it is not being overwritten (and maybe not written at all) 3826 * is missing/faulty, then we need to read everything we can. 3827 */ 3828 if (!force_rcw && 3829 sh->sector < sh->raid_conf->mddev->recovery_cp) 3830 /* reconstruct-write isn't being forced */ 3831 return 0; 3832 for (i = 0; i < s->failed && i < 2; i++) { 3833 if (s->failed_num[i] != sh->pd_idx && 3834 s->failed_num[i] != sh->qd_idx && 3835 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3836 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3837 return 1; 3838 } 3839 3840 return 0; 3841 } 3842 3843 /* fetch_block - checks the given member device to see if its data needs 3844 * to be read or computed to satisfy a request. 3845 * 3846 * Returns 1 when no more member devices need to be checked, otherwise returns 3847 * 0 to tell the loop in handle_stripe_fill to continue 3848 */ 3849 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 3850 int disk_idx, int disks) 3851 { 3852 struct r5dev *dev = &sh->dev[disk_idx]; 3853 3854 /* is the data in this block needed, and can we get it? */ 3855 if (need_this_block(sh, s, disk_idx, disks)) { 3856 /* we would like to get this block, possibly by computing it, 3857 * otherwise read it if the backing disk is insync 3858 */ 3859 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 3860 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 3861 BUG_ON(sh->batch_head); 3862 3863 /* 3864 * In the raid6 case if the only non-uptodate disk is P 3865 * then we already trusted P to compute the other failed 3866 * drives. It is safe to compute rather than re-read P. 3867 * In other cases we only compute blocks from failed 3868 * devices, otherwise check/repair might fail to detect 3869 * a real inconsistency. 3870 */ 3871 3872 if ((s->uptodate == disks - 1) && 3873 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || 3874 (s->failed && (disk_idx == s->failed_num[0] || 3875 disk_idx == s->failed_num[1])))) { 3876 /* have disk failed, and we're requested to fetch it; 3877 * do compute it 3878 */ 3879 pr_debug("Computing stripe %llu block %d\n", 3880 (unsigned long long)sh->sector, disk_idx); 3881 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3882 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3883 set_bit(R5_Wantcompute, &dev->flags); 3884 sh->ops.target = disk_idx; 3885 sh->ops.target2 = -1; /* no 2nd target */ 3886 s->req_compute = 1; 3887 /* Careful: from this point on 'uptodate' is in the eye 3888 * of raid_run_ops which services 'compute' operations 3889 * before writes. R5_Wantcompute flags a block that will 3890 * be R5_UPTODATE by the time it is needed for a 3891 * subsequent operation. 3892 */ 3893 s->uptodate++; 3894 return 1; 3895 } else if (s->uptodate == disks-2 && s->failed >= 2) { 3896 /* Computing 2-failure is *very* expensive; only 3897 * do it if failed >= 2 3898 */ 3899 int other; 3900 for (other = disks; other--; ) { 3901 if (other == disk_idx) 3902 continue; 3903 if (!test_bit(R5_UPTODATE, 3904 &sh->dev[other].flags)) 3905 break; 3906 } 3907 BUG_ON(other < 0); 3908 pr_debug("Computing stripe %llu blocks %d,%d\n", 3909 (unsigned long long)sh->sector, 3910 disk_idx, other); 3911 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3912 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3913 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 3914 set_bit(R5_Wantcompute, &sh->dev[other].flags); 3915 sh->ops.target = disk_idx; 3916 sh->ops.target2 = other; 3917 s->uptodate += 2; 3918 s->req_compute = 1; 3919 return 1; 3920 } else if (test_bit(R5_Insync, &dev->flags)) { 3921 set_bit(R5_LOCKED, &dev->flags); 3922 set_bit(R5_Wantread, &dev->flags); 3923 s->locked++; 3924 pr_debug("Reading block %d (sync=%d)\n", 3925 disk_idx, s->syncing); 3926 } 3927 } 3928 3929 return 0; 3930 } 3931 3932 /* 3933 * handle_stripe_fill - read or compute data to satisfy pending requests. 3934 */ 3935 static void handle_stripe_fill(struct stripe_head *sh, 3936 struct stripe_head_state *s, 3937 int disks) 3938 { 3939 int i; 3940 3941 /* look for blocks to read/compute, skip this if a compute 3942 * is already in flight, or if the stripe contents are in the 3943 * midst of changing due to a write 3944 */ 3945 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3946 !sh->reconstruct_state) { 3947 3948 /* 3949 * For degraded stripe with data in journal, do not handle 3950 * read requests yet, instead, flush the stripe to raid 3951 * disks first, this avoids handling complex rmw of write 3952 * back cache (prexor with orig_page, and then xor with 3953 * page) in the read path 3954 */ 3955 if (s->injournal && s->failed) { 3956 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 3957 r5c_make_stripe_write_out(sh); 3958 goto out; 3959 } 3960 3961 for (i = disks; i--; ) 3962 if (fetch_block(sh, s, i, disks)) 3963 break; 3964 } 3965 out: 3966 set_bit(STRIPE_HANDLE, &sh->state); 3967 } 3968 3969 static void break_stripe_batch_list(struct stripe_head *head_sh, 3970 unsigned long handle_flags); 3971 /* handle_stripe_clean_event 3972 * any written block on an uptodate or failed drive can be returned. 3973 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 3974 * never LOCKED, so we don't need to test 'failed' directly. 3975 */ 3976 static void handle_stripe_clean_event(struct r5conf *conf, 3977 struct stripe_head *sh, int disks) 3978 { 3979 int i; 3980 struct r5dev *dev; 3981 int discard_pending = 0; 3982 struct stripe_head *head_sh = sh; 3983 bool do_endio = false; 3984 3985 for (i = disks; i--; ) 3986 if (sh->dev[i].written) { 3987 dev = &sh->dev[i]; 3988 if (!test_bit(R5_LOCKED, &dev->flags) && 3989 (test_bit(R5_UPTODATE, &dev->flags) || 3990 test_bit(R5_Discard, &dev->flags) || 3991 test_bit(R5_SkipCopy, &dev->flags))) { 3992 /* We can return any write requests */ 3993 struct bio *wbi, *wbi2; 3994 pr_debug("Return write for disc %d\n", i); 3995 if (test_and_clear_bit(R5_Discard, &dev->flags)) 3996 clear_bit(R5_UPTODATE, &dev->flags); 3997 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { 3998 WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); 3999 } 4000 do_endio = true; 4001 4002 returnbi: 4003 dev->page = dev->orig_page; 4004 wbi = dev->written; 4005 dev->written = NULL; 4006 while (wbi && wbi->bi_iter.bi_sector < 4007 dev->sector + RAID5_STRIPE_SECTORS(conf)) { 4008 wbi2 = r5_next_bio(conf, wbi, dev->sector); 4009 md_write_end(conf->mddev); 4010 bio_endio(wbi); 4011 wbi = wbi2; 4012 } 4013 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, 4014 RAID5_STRIPE_SECTORS(conf), 4015 !test_bit(STRIPE_DEGRADED, &sh->state), 4016 0); 4017 if (head_sh->batch_head) { 4018 sh = list_first_entry(&sh->batch_list, 4019 struct stripe_head, 4020 batch_list); 4021 if (sh != head_sh) { 4022 dev = &sh->dev[i]; 4023 goto returnbi; 4024 } 4025 } 4026 sh = head_sh; 4027 dev = &sh->dev[i]; 4028 } else if (test_bit(R5_Discard, &dev->flags)) 4029 discard_pending = 1; 4030 } 4031 4032 log_stripe_write_finished(sh); 4033 4034 if (!discard_pending && 4035 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 4036 int hash; 4037 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 4038 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 4039 if (sh->qd_idx >= 0) { 4040 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 4041 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); 4042 } 4043 /* now that discard is done we can proceed with any sync */ 4044 clear_bit(STRIPE_DISCARD, &sh->state); 4045 /* 4046 * SCSI discard will change some bio fields and the stripe has 4047 * no updated data, so remove it from hash list and the stripe 4048 * will be reinitialized 4049 */ 4050 unhash: 4051 hash = sh->hash_lock_index; 4052 spin_lock_irq(conf->hash_locks + hash); 4053 remove_hash(sh); 4054 spin_unlock_irq(conf->hash_locks + hash); 4055 if (head_sh->batch_head) { 4056 sh = list_first_entry(&sh->batch_list, 4057 struct stripe_head, batch_list); 4058 if (sh != head_sh) 4059 goto unhash; 4060 } 4061 sh = head_sh; 4062 4063 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 4064 set_bit(STRIPE_HANDLE, &sh->state); 4065 4066 } 4067 4068 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 4069 if (atomic_dec_and_test(&conf->pending_full_writes)) 4070 md_wakeup_thread(conf->mddev->thread); 4071 4072 if (head_sh->batch_head && do_endio) 4073 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); 4074 } 4075 4076 /* 4077 * For RMW in write back cache, we need extra page in prexor to store the 4078 * old data. This page is stored in dev->orig_page. 4079 * 4080 * This function checks whether we have data for prexor. The exact logic 4081 * is: 4082 * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE) 4083 */ 4084 static inline bool uptodate_for_rmw(struct r5dev *dev) 4085 { 4086 return (test_bit(R5_UPTODATE, &dev->flags)) && 4087 (!test_bit(R5_InJournal, &dev->flags) || 4088 test_bit(R5_OrigPageUPTDODATE, &dev->flags)); 4089 } 4090 4091 static int handle_stripe_dirtying(struct r5conf *conf, 4092 struct stripe_head *sh, 4093 struct stripe_head_state *s, 4094 int disks) 4095 { 4096 int rmw = 0, rcw = 0, i; 4097 sector_t recovery_cp = conf->mddev->recovery_cp; 4098 4099 /* Check whether resync is now happening or should start. 4100 * If yes, then the array is dirty (after unclean shutdown or 4101 * initial creation), so parity in some stripes might be inconsistent. 4102 * In this case, we need to always do reconstruct-write, to ensure 4103 * that in case of drive failure or read-error correction, we 4104 * generate correct data from the parity. 4105 */ 4106 if (conf->rmw_level == PARITY_DISABLE_RMW || 4107 (recovery_cp < MaxSector && sh->sector >= recovery_cp && 4108 s->failed == 0)) { 4109 /* Calculate the real rcw later - for now make it 4110 * look like rcw is cheaper 4111 */ 4112 rcw = 1; rmw = 2; 4113 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", 4114 conf->rmw_level, (unsigned long long)recovery_cp, 4115 (unsigned long long)sh->sector); 4116 } else for (i = disks; i--; ) { 4117 /* would I have to read this buffer for read_modify_write */ 4118 struct r5dev *dev = &sh->dev[i]; 4119 if (((dev->towrite && !delay_towrite(conf, dev, s)) || 4120 i == sh->pd_idx || i == sh->qd_idx || 4121 test_bit(R5_InJournal, &dev->flags)) && 4122 !test_bit(R5_LOCKED, &dev->flags) && 4123 !(uptodate_for_rmw(dev) || 4124 test_bit(R5_Wantcompute, &dev->flags))) { 4125 if (test_bit(R5_Insync, &dev->flags)) 4126 rmw++; 4127 else 4128 rmw += 2*disks; /* cannot read it */ 4129 } 4130 /* Would I have to read this buffer for reconstruct_write */ 4131 if (!test_bit(R5_OVERWRITE, &dev->flags) && 4132 i != sh->pd_idx && i != sh->qd_idx && 4133 !test_bit(R5_LOCKED, &dev->flags) && 4134 !(test_bit(R5_UPTODATE, &dev->flags) || 4135 test_bit(R5_Wantcompute, &dev->flags))) { 4136 if (test_bit(R5_Insync, &dev->flags)) 4137 rcw++; 4138 else 4139 rcw += 2*disks; 4140 } 4141 } 4142 4143 pr_debug("for sector %llu state 0x%lx, rmw=%d rcw=%d\n", 4144 (unsigned long long)sh->sector, sh->state, rmw, rcw); 4145 set_bit(STRIPE_HANDLE, &sh->state); 4146 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { 4147 /* prefer read-modify-write, but need to get some data */ 4148 if (conf->mddev->queue) 4149 blk_add_trace_msg(conf->mddev->queue, 4150 "raid5 rmw %llu %d", 4151 (unsigned long long)sh->sector, rmw); 4152 for (i = disks; i--; ) { 4153 struct r5dev *dev = &sh->dev[i]; 4154 if (test_bit(R5_InJournal, &dev->flags) && 4155 dev->page == dev->orig_page && 4156 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { 4157 /* alloc page for prexor */ 4158 struct page *p = alloc_page(GFP_NOIO); 4159 4160 if (p) { 4161 dev->orig_page = p; 4162 continue; 4163 } 4164 4165 /* 4166 * alloc_page() failed, try use 4167 * disk_info->extra_page 4168 */ 4169 if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE, 4170 &conf->cache_state)) { 4171 r5c_use_extra_page(sh); 4172 break; 4173 } 4174 4175 /* extra_page in use, add to delayed_list */ 4176 set_bit(STRIPE_DELAYED, &sh->state); 4177 s->waiting_extra_page = 1; 4178 return -EAGAIN; 4179 } 4180 } 4181 4182 for (i = disks; i--; ) { 4183 struct r5dev *dev = &sh->dev[i]; 4184 if (((dev->towrite && !delay_towrite(conf, dev, s)) || 4185 i == sh->pd_idx || i == sh->qd_idx || 4186 test_bit(R5_InJournal, &dev->flags)) && 4187 !test_bit(R5_LOCKED, &dev->flags) && 4188 !(uptodate_for_rmw(dev) || 4189 test_bit(R5_Wantcompute, &dev->flags)) && 4190 test_bit(R5_Insync, &dev->flags)) { 4191 if (test_bit(STRIPE_PREREAD_ACTIVE, 4192 &sh->state)) { 4193 pr_debug("Read_old block %d for r-m-w\n", 4194 i); 4195 set_bit(R5_LOCKED, &dev->flags); 4196 set_bit(R5_Wantread, &dev->flags); 4197 s->locked++; 4198 } else 4199 set_bit(STRIPE_DELAYED, &sh->state); 4200 } 4201 } 4202 } 4203 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { 4204 /* want reconstruct write, but need to get some data */ 4205 int qread =0; 4206 rcw = 0; 4207 for (i = disks; i--; ) { 4208 struct r5dev *dev = &sh->dev[i]; 4209 if (!test_bit(R5_OVERWRITE, &dev->flags) && 4210 i != sh->pd_idx && i != sh->qd_idx && 4211 !test_bit(R5_LOCKED, &dev->flags) && 4212 !(test_bit(R5_UPTODATE, &dev->flags) || 4213 test_bit(R5_Wantcompute, &dev->flags))) { 4214 rcw++; 4215 if (test_bit(R5_Insync, &dev->flags) && 4216 test_bit(STRIPE_PREREAD_ACTIVE, 4217 &sh->state)) { 4218 pr_debug("Read_old block " 4219 "%d for Reconstruct\n", i); 4220 set_bit(R5_LOCKED, &dev->flags); 4221 set_bit(R5_Wantread, &dev->flags); 4222 s->locked++; 4223 qread++; 4224 } else 4225 set_bit(STRIPE_DELAYED, &sh->state); 4226 } 4227 } 4228 if (rcw && conf->mddev->queue) 4229 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 4230 (unsigned long long)sh->sector, 4231 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 4232 } 4233 4234 if (rcw > disks && rmw > disks && 4235 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4236 set_bit(STRIPE_DELAYED, &sh->state); 4237 4238 /* now if nothing is locked, and if we have enough data, 4239 * we can start a write request 4240 */ 4241 /* since handle_stripe can be called at any time we need to handle the 4242 * case where a compute block operation has been submitted and then a 4243 * subsequent call wants to start a write request. raid_run_ops only 4244 * handles the case where compute block and reconstruct are requested 4245 * simultaneously. If this is not the case then new writes need to be 4246 * held off until the compute completes. 4247 */ 4248 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 4249 (s->locked == 0 && (rcw == 0 || rmw == 0) && 4250 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 4251 schedule_reconstruction(sh, s, rcw == 0, 0); 4252 return 0; 4253 } 4254 4255 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 4256 struct stripe_head_state *s, int disks) 4257 { 4258 struct r5dev *dev = NULL; 4259 4260 BUG_ON(sh->batch_head); 4261 set_bit(STRIPE_HANDLE, &sh->state); 4262 4263 switch (sh->check_state) { 4264 case check_state_idle: 4265 /* start a new check operation if there are no failures */ 4266 if (s->failed == 0) { 4267 BUG_ON(s->uptodate != disks); 4268 sh->check_state = check_state_run; 4269 set_bit(STRIPE_OP_CHECK, &s->ops_request); 4270 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 4271 s->uptodate--; 4272 break; 4273 } 4274 dev = &sh->dev[s->failed_num[0]]; 4275 fallthrough; 4276 case check_state_compute_result: 4277 sh->check_state = check_state_idle; 4278 if (!dev) 4279 dev = &sh->dev[sh->pd_idx]; 4280 4281 /* check that a write has not made the stripe insync */ 4282 if (test_bit(STRIPE_INSYNC, &sh->state)) 4283 break; 4284 4285 /* either failed parity check, or recovery is happening */ 4286 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 4287 BUG_ON(s->uptodate != disks); 4288 4289 set_bit(R5_LOCKED, &dev->flags); 4290 s->locked++; 4291 set_bit(R5_Wantwrite, &dev->flags); 4292 4293 clear_bit(STRIPE_DEGRADED, &sh->state); 4294 set_bit(STRIPE_INSYNC, &sh->state); 4295 break; 4296 case check_state_run: 4297 break; /* we will be called again upon completion */ 4298 case check_state_check_result: 4299 sh->check_state = check_state_idle; 4300 4301 /* if a failure occurred during the check operation, leave 4302 * STRIPE_INSYNC not set and let the stripe be handled again 4303 */ 4304 if (s->failed) 4305 break; 4306 4307 /* handle a successful check operation, if parity is correct 4308 * we are done. Otherwise update the mismatch count and repair 4309 * parity if !MD_RECOVERY_CHECK 4310 */ 4311 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 4312 /* parity is correct (on disc, 4313 * not in buffer any more) 4314 */ 4315 set_bit(STRIPE_INSYNC, &sh->state); 4316 else { 4317 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); 4318 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { 4319 /* don't try to repair!! */ 4320 set_bit(STRIPE_INSYNC, &sh->state); 4321 pr_warn_ratelimited("%s: mismatch sector in range " 4322 "%llu-%llu\n", mdname(conf->mddev), 4323 (unsigned long long) sh->sector, 4324 (unsigned long long) sh->sector + 4325 RAID5_STRIPE_SECTORS(conf)); 4326 } else { 4327 sh->check_state = check_state_compute_run; 4328 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4329 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 4330 set_bit(R5_Wantcompute, 4331 &sh->dev[sh->pd_idx].flags); 4332 sh->ops.target = sh->pd_idx; 4333 sh->ops.target2 = -1; 4334 s->uptodate++; 4335 } 4336 } 4337 break; 4338 case check_state_compute_run: 4339 break; 4340 default: 4341 pr_err("%s: unknown check_state: %d sector: %llu\n", 4342 __func__, sh->check_state, 4343 (unsigned long long) sh->sector); 4344 BUG(); 4345 } 4346 } 4347 4348 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 4349 struct stripe_head_state *s, 4350 int disks) 4351 { 4352 int pd_idx = sh->pd_idx; 4353 int qd_idx = sh->qd_idx; 4354 struct r5dev *dev; 4355 4356 BUG_ON(sh->batch_head); 4357 set_bit(STRIPE_HANDLE, &sh->state); 4358 4359 BUG_ON(s->failed > 2); 4360 4361 /* Want to check and possibly repair P and Q. 4362 * However there could be one 'failed' device, in which 4363 * case we can only check one of them, possibly using the 4364 * other to generate missing data 4365 */ 4366 4367 switch (sh->check_state) { 4368 case check_state_idle: 4369 /* start a new check operation if there are < 2 failures */ 4370 if (s->failed == s->q_failed) { 4371 /* The only possible failed device holds Q, so it 4372 * makes sense to check P (If anything else were failed, 4373 * we would have used P to recreate it). 4374 */ 4375 sh->check_state = check_state_run; 4376 } 4377 if (!s->q_failed && s->failed < 2) { 4378 /* Q is not failed, and we didn't use it to generate 4379 * anything, so it makes sense to check it 4380 */ 4381 if (sh->check_state == check_state_run) 4382 sh->check_state = check_state_run_pq; 4383 else 4384 sh->check_state = check_state_run_q; 4385 } 4386 4387 /* discard potentially stale zero_sum_result */ 4388 sh->ops.zero_sum_result = 0; 4389 4390 if (sh->check_state == check_state_run) { 4391 /* async_xor_zero_sum destroys the contents of P */ 4392 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 4393 s->uptodate--; 4394 } 4395 if (sh->check_state >= check_state_run && 4396 sh->check_state <= check_state_run_pq) { 4397 /* async_syndrome_zero_sum preserves P and Q, so 4398 * no need to mark them !uptodate here 4399 */ 4400 set_bit(STRIPE_OP_CHECK, &s->ops_request); 4401 break; 4402 } 4403 4404 /* we have 2-disk failure */ 4405 BUG_ON(s->failed != 2); 4406 fallthrough; 4407 case check_state_compute_result: 4408 sh->check_state = check_state_idle; 4409 4410 /* check that a write has not made the stripe insync */ 4411 if (test_bit(STRIPE_INSYNC, &sh->state)) 4412 break; 4413 4414 /* now write out any block on a failed drive, 4415 * or P or Q if they were recomputed 4416 */ 4417 dev = NULL; 4418 if (s->failed == 2) { 4419 dev = &sh->dev[s->failed_num[1]]; 4420 s->locked++; 4421 set_bit(R5_LOCKED, &dev->flags); 4422 set_bit(R5_Wantwrite, &dev->flags); 4423 } 4424 if (s->failed >= 1) { 4425 dev = &sh->dev[s->failed_num[0]]; 4426 s->locked++; 4427 set_bit(R5_LOCKED, &dev->flags); 4428 set_bit(R5_Wantwrite, &dev->flags); 4429 } 4430 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 4431 dev = &sh->dev[pd_idx]; 4432 s->locked++; 4433 set_bit(R5_LOCKED, &dev->flags); 4434 set_bit(R5_Wantwrite, &dev->flags); 4435 } 4436 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 4437 dev = &sh->dev[qd_idx]; 4438 s->locked++; 4439 set_bit(R5_LOCKED, &dev->flags); 4440 set_bit(R5_Wantwrite, &dev->flags); 4441 } 4442 if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags), 4443 "%s: disk%td not up to date\n", 4444 mdname(conf->mddev), 4445 dev - (struct r5dev *) &sh->dev)) { 4446 clear_bit(R5_LOCKED, &dev->flags); 4447 clear_bit(R5_Wantwrite, &dev->flags); 4448 s->locked--; 4449 } 4450 clear_bit(STRIPE_DEGRADED, &sh->state); 4451 4452 set_bit(STRIPE_INSYNC, &sh->state); 4453 break; 4454 case check_state_run: 4455 case check_state_run_q: 4456 case check_state_run_pq: 4457 break; /* we will be called again upon completion */ 4458 case check_state_check_result: 4459 sh->check_state = check_state_idle; 4460 4461 /* handle a successful check operation, if parity is correct 4462 * we are done. Otherwise update the mismatch count and repair 4463 * parity if !MD_RECOVERY_CHECK 4464 */ 4465 if (sh->ops.zero_sum_result == 0) { 4466 /* both parities are correct */ 4467 if (!s->failed) 4468 set_bit(STRIPE_INSYNC, &sh->state); 4469 else { 4470 /* in contrast to the raid5 case we can validate 4471 * parity, but still have a failure to write 4472 * back 4473 */ 4474 sh->check_state = check_state_compute_result; 4475 /* Returning at this point means that we may go 4476 * off and bring p and/or q uptodate again so 4477 * we make sure to check zero_sum_result again 4478 * to verify if p or q need writeback 4479 */ 4480 } 4481 } else { 4482 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); 4483 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { 4484 /* don't try to repair!! */ 4485 set_bit(STRIPE_INSYNC, &sh->state); 4486 pr_warn_ratelimited("%s: mismatch sector in range " 4487 "%llu-%llu\n", mdname(conf->mddev), 4488 (unsigned long long) sh->sector, 4489 (unsigned long long) sh->sector + 4490 RAID5_STRIPE_SECTORS(conf)); 4491 } else { 4492 int *target = &sh->ops.target; 4493 4494 sh->ops.target = -1; 4495 sh->ops.target2 = -1; 4496 sh->check_state = check_state_compute_run; 4497 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4498 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 4499 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 4500 set_bit(R5_Wantcompute, 4501 &sh->dev[pd_idx].flags); 4502 *target = pd_idx; 4503 target = &sh->ops.target2; 4504 s->uptodate++; 4505 } 4506 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 4507 set_bit(R5_Wantcompute, 4508 &sh->dev[qd_idx].flags); 4509 *target = qd_idx; 4510 s->uptodate++; 4511 } 4512 } 4513 } 4514 break; 4515 case check_state_compute_run: 4516 break; 4517 default: 4518 pr_warn("%s: unknown check_state: %d sector: %llu\n", 4519 __func__, sh->check_state, 4520 (unsigned long long) sh->sector); 4521 BUG(); 4522 } 4523 } 4524 4525 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 4526 { 4527 int i; 4528 4529 /* We have read all the blocks in this stripe and now we need to 4530 * copy some of them into a target stripe for expand. 4531 */ 4532 struct dma_async_tx_descriptor *tx = NULL; 4533 BUG_ON(sh->batch_head); 4534 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4535 for (i = 0; i < sh->disks; i++) 4536 if (i != sh->pd_idx && i != sh->qd_idx) { 4537 int dd_idx, j; 4538 struct stripe_head *sh2; 4539 struct async_submit_ctl submit; 4540 4541 sector_t bn = raid5_compute_blocknr(sh, i, 1); 4542 sector_t s = raid5_compute_sector(conf, bn, 0, 4543 &dd_idx, NULL); 4544 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); 4545 if (sh2 == NULL) 4546 /* so far only the early blocks of this stripe 4547 * have been requested. When later blocks 4548 * get requested, we will try again 4549 */ 4550 continue; 4551 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 4552 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 4553 /* must have already done this block */ 4554 raid5_release_stripe(sh2); 4555 continue; 4556 } 4557 4558 /* place all the copies on one channel */ 4559 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 4560 tx = async_memcpy(sh2->dev[dd_idx].page, 4561 sh->dev[i].page, sh2->dev[dd_idx].offset, 4562 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf), 4563 &submit); 4564 4565 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 4566 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 4567 for (j = 0; j < conf->raid_disks; j++) 4568 if (j != sh2->pd_idx && 4569 j != sh2->qd_idx && 4570 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 4571 break; 4572 if (j == conf->raid_disks) { 4573 set_bit(STRIPE_EXPAND_READY, &sh2->state); 4574 set_bit(STRIPE_HANDLE, &sh2->state); 4575 } 4576 raid5_release_stripe(sh2); 4577 4578 } 4579 /* done submitting copies, wait for them to complete */ 4580 async_tx_quiesce(&tx); 4581 } 4582 4583 /* 4584 * handle_stripe - do things to a stripe. 4585 * 4586 * We lock the stripe by setting STRIPE_ACTIVE and then examine the 4587 * state of various bits to see what needs to be done. 4588 * Possible results: 4589 * return some read requests which now have data 4590 * return some write requests which are safely on storage 4591 * schedule a read on some buffers 4592 * schedule a write of some buffers 4593 * return confirmation of parity correctness 4594 * 4595 */ 4596 4597 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 4598 { 4599 struct r5conf *conf = sh->raid_conf; 4600 int disks = sh->disks; 4601 struct r5dev *dev; 4602 int i; 4603 int do_recovery = 0; 4604 4605 memset(s, 0, sizeof(*s)); 4606 4607 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; 4608 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; 4609 s->failed_num[0] = -1; 4610 s->failed_num[1] = -1; 4611 s->log_failed = r5l_log_disk_error(conf); 4612 4613 /* Now to look around and see what can be done */ 4614 rcu_read_lock(); 4615 for (i=disks; i--; ) { 4616 struct md_rdev *rdev; 4617 sector_t first_bad; 4618 int bad_sectors; 4619 int is_bad = 0; 4620 4621 dev = &sh->dev[i]; 4622 4623 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 4624 i, dev->flags, 4625 dev->toread, dev->towrite, dev->written); 4626 /* maybe we can reply to a read 4627 * 4628 * new wantfill requests are only permitted while 4629 * ops_complete_biofill is guaranteed to be inactive 4630 */ 4631 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 4632 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 4633 set_bit(R5_Wantfill, &dev->flags); 4634 4635 /* now count some things */ 4636 if (test_bit(R5_LOCKED, &dev->flags)) 4637 s->locked++; 4638 if (test_bit(R5_UPTODATE, &dev->flags)) 4639 s->uptodate++; 4640 if (test_bit(R5_Wantcompute, &dev->flags)) { 4641 s->compute++; 4642 BUG_ON(s->compute > 2); 4643 } 4644 4645 if (test_bit(R5_Wantfill, &dev->flags)) 4646 s->to_fill++; 4647 else if (dev->toread) 4648 s->to_read++; 4649 if (dev->towrite) { 4650 s->to_write++; 4651 if (!test_bit(R5_OVERWRITE, &dev->flags)) 4652 s->non_overwrite++; 4653 } 4654 if (dev->written) 4655 s->written++; 4656 /* Prefer to use the replacement for reads, but only 4657 * if it is recovered enough and has no bad blocks. 4658 */ 4659 rdev = rcu_dereference(conf->disks[i].replacement); 4660 if (rdev && !test_bit(Faulty, &rdev->flags) && 4661 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && 4662 !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 4663 &first_bad, &bad_sectors)) 4664 set_bit(R5_ReadRepl, &dev->flags); 4665 else { 4666 if (rdev && !test_bit(Faulty, &rdev->flags)) 4667 set_bit(R5_NeedReplace, &dev->flags); 4668 else 4669 clear_bit(R5_NeedReplace, &dev->flags); 4670 rdev = rcu_dereference(conf->disks[i].rdev); 4671 clear_bit(R5_ReadRepl, &dev->flags); 4672 } 4673 if (rdev && test_bit(Faulty, &rdev->flags)) 4674 rdev = NULL; 4675 if (rdev) { 4676 is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 4677 &first_bad, &bad_sectors); 4678 if (s->blocked_rdev == NULL 4679 && (test_bit(Blocked, &rdev->flags) 4680 || is_bad < 0)) { 4681 if (is_bad < 0) 4682 set_bit(BlockedBadBlocks, 4683 &rdev->flags); 4684 s->blocked_rdev = rdev; 4685 atomic_inc(&rdev->nr_pending); 4686 } 4687 } 4688 clear_bit(R5_Insync, &dev->flags); 4689 if (!rdev) 4690 /* Not in-sync */; 4691 else if (is_bad) { 4692 /* also not in-sync */ 4693 if (!test_bit(WriteErrorSeen, &rdev->flags) && 4694 test_bit(R5_UPTODATE, &dev->flags)) { 4695 /* treat as in-sync, but with a read error 4696 * which we can now try to correct 4697 */ 4698 set_bit(R5_Insync, &dev->flags); 4699 set_bit(R5_ReadError, &dev->flags); 4700 } 4701 } else if (test_bit(In_sync, &rdev->flags)) 4702 set_bit(R5_Insync, &dev->flags); 4703 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) 4704 /* in sync if before recovery_offset */ 4705 set_bit(R5_Insync, &dev->flags); 4706 else if (test_bit(R5_UPTODATE, &dev->flags) && 4707 test_bit(R5_Expanded, &dev->flags)) 4708 /* If we've reshaped into here, we assume it is Insync. 4709 * We will shortly update recovery_offset to make 4710 * it official. 4711 */ 4712 set_bit(R5_Insync, &dev->flags); 4713 4714 if (test_bit(R5_WriteError, &dev->flags)) { 4715 /* This flag does not apply to '.replacement' 4716 * only to .rdev, so make sure to check that*/ 4717 struct md_rdev *rdev2 = rcu_dereference( 4718 conf->disks[i].rdev); 4719 if (rdev2 == rdev) 4720 clear_bit(R5_Insync, &dev->flags); 4721 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4722 s->handle_bad_blocks = 1; 4723 atomic_inc(&rdev2->nr_pending); 4724 } else 4725 clear_bit(R5_WriteError, &dev->flags); 4726 } 4727 if (test_bit(R5_MadeGood, &dev->flags)) { 4728 /* This flag does not apply to '.replacement' 4729 * only to .rdev, so make sure to check that*/ 4730 struct md_rdev *rdev2 = rcu_dereference( 4731 conf->disks[i].rdev); 4732 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4733 s->handle_bad_blocks = 1; 4734 atomic_inc(&rdev2->nr_pending); 4735 } else 4736 clear_bit(R5_MadeGood, &dev->flags); 4737 } 4738 if (test_bit(R5_MadeGoodRepl, &dev->flags)) { 4739 struct md_rdev *rdev2 = rcu_dereference( 4740 conf->disks[i].replacement); 4741 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4742 s->handle_bad_blocks = 1; 4743 atomic_inc(&rdev2->nr_pending); 4744 } else 4745 clear_bit(R5_MadeGoodRepl, &dev->flags); 4746 } 4747 if (!test_bit(R5_Insync, &dev->flags)) { 4748 /* The ReadError flag will just be confusing now */ 4749 clear_bit(R5_ReadError, &dev->flags); 4750 clear_bit(R5_ReWrite, &dev->flags); 4751 } 4752 if (test_bit(R5_ReadError, &dev->flags)) 4753 clear_bit(R5_Insync, &dev->flags); 4754 if (!test_bit(R5_Insync, &dev->flags)) { 4755 if (s->failed < 2) 4756 s->failed_num[s->failed] = i; 4757 s->failed++; 4758 if (rdev && !test_bit(Faulty, &rdev->flags)) 4759 do_recovery = 1; 4760 else if (!rdev) { 4761 rdev = rcu_dereference( 4762 conf->disks[i].replacement); 4763 if (rdev && !test_bit(Faulty, &rdev->flags)) 4764 do_recovery = 1; 4765 } 4766 } 4767 4768 if (test_bit(R5_InJournal, &dev->flags)) 4769 s->injournal++; 4770 if (test_bit(R5_InJournal, &dev->flags) && dev->written) 4771 s->just_cached++; 4772 } 4773 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4774 /* If there is a failed device being replaced, 4775 * we must be recovering. 4776 * else if we are after recovery_cp, we must be syncing 4777 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 4778 * else we can only be replacing 4779 * sync and recovery both need to read all devices, and so 4780 * use the same flag. 4781 */ 4782 if (do_recovery || 4783 sh->sector >= conf->mddev->recovery_cp || 4784 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 4785 s->syncing = 1; 4786 else 4787 s->replacing = 1; 4788 } 4789 rcu_read_unlock(); 4790 } 4791 4792 /* 4793 * Return '1' if this is a member of batch, or '0' if it is a lone stripe or 4794 * a head which can now be handled. 4795 */ 4796 static int clear_batch_ready(struct stripe_head *sh) 4797 { 4798 struct stripe_head *tmp; 4799 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) 4800 return (sh->batch_head && sh->batch_head != sh); 4801 spin_lock(&sh->stripe_lock); 4802 if (!sh->batch_head) { 4803 spin_unlock(&sh->stripe_lock); 4804 return 0; 4805 } 4806 4807 /* 4808 * this stripe could be added to a batch list before we check 4809 * BATCH_READY, skips it 4810 */ 4811 if (sh->batch_head != sh) { 4812 spin_unlock(&sh->stripe_lock); 4813 return 1; 4814 } 4815 spin_lock(&sh->batch_lock); 4816 list_for_each_entry(tmp, &sh->batch_list, batch_list) 4817 clear_bit(STRIPE_BATCH_READY, &tmp->state); 4818 spin_unlock(&sh->batch_lock); 4819 spin_unlock(&sh->stripe_lock); 4820 4821 /* 4822 * BATCH_READY is cleared, no new stripes can be added. 4823 * batch_list can be accessed without lock 4824 */ 4825 return 0; 4826 } 4827 4828 static void break_stripe_batch_list(struct stripe_head *head_sh, 4829 unsigned long handle_flags) 4830 { 4831 struct stripe_head *sh, *next; 4832 int i; 4833 int do_wakeup = 0; 4834 4835 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { 4836 4837 list_del_init(&sh->batch_list); 4838 4839 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | 4840 (1 << STRIPE_SYNCING) | 4841 (1 << STRIPE_REPLACED) | 4842 (1 << STRIPE_DELAYED) | 4843 (1 << STRIPE_BIT_DELAY) | 4844 (1 << STRIPE_FULL_WRITE) | 4845 (1 << STRIPE_BIOFILL_RUN) | 4846 (1 << STRIPE_COMPUTE_RUN) | 4847 (1 << STRIPE_DISCARD) | 4848 (1 << STRIPE_BATCH_READY) | 4849 (1 << STRIPE_BATCH_ERR) | 4850 (1 << STRIPE_BITMAP_PENDING)), 4851 "stripe state: %lx\n", sh->state); 4852 WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | 4853 (1 << STRIPE_REPLACED)), 4854 "head stripe state: %lx\n", head_sh->state); 4855 4856 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | 4857 (1 << STRIPE_PREREAD_ACTIVE) | 4858 (1 << STRIPE_DEGRADED) | 4859 (1 << STRIPE_ON_UNPLUG_LIST)), 4860 head_sh->state & (1 << STRIPE_INSYNC)); 4861 4862 sh->check_state = head_sh->check_state; 4863 sh->reconstruct_state = head_sh->reconstruct_state; 4864 spin_lock_irq(&sh->stripe_lock); 4865 sh->batch_head = NULL; 4866 spin_unlock_irq(&sh->stripe_lock); 4867 for (i = 0; i < sh->disks; i++) { 4868 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 4869 do_wakeup = 1; 4870 sh->dev[i].flags = head_sh->dev[i].flags & 4871 (~((1 << R5_WriteError) | (1 << R5_Overlap))); 4872 } 4873 if (handle_flags == 0 || 4874 sh->state & handle_flags) 4875 set_bit(STRIPE_HANDLE, &sh->state); 4876 raid5_release_stripe(sh); 4877 } 4878 spin_lock_irq(&head_sh->stripe_lock); 4879 head_sh->batch_head = NULL; 4880 spin_unlock_irq(&head_sh->stripe_lock); 4881 for (i = 0; i < head_sh->disks; i++) 4882 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) 4883 do_wakeup = 1; 4884 if (head_sh->state & handle_flags) 4885 set_bit(STRIPE_HANDLE, &head_sh->state); 4886 4887 if (do_wakeup) 4888 wake_up(&head_sh->raid_conf->wait_for_overlap); 4889 } 4890 4891 static void handle_stripe(struct stripe_head *sh) 4892 { 4893 struct stripe_head_state s; 4894 struct r5conf *conf = sh->raid_conf; 4895 int i; 4896 int prexor; 4897 int disks = sh->disks; 4898 struct r5dev *pdev, *qdev; 4899 4900 clear_bit(STRIPE_HANDLE, &sh->state); 4901 4902 /* 4903 * handle_stripe should not continue handle the batched stripe, only 4904 * the head of batch list or lone stripe can continue. Otherwise we 4905 * could see break_stripe_batch_list warns about the STRIPE_ACTIVE 4906 * is set for the batched stripe. 4907 */ 4908 if (clear_batch_ready(sh)) 4909 return; 4910 4911 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 4912 /* already being handled, ensure it gets handled 4913 * again when current action finishes */ 4914 set_bit(STRIPE_HANDLE, &sh->state); 4915 return; 4916 } 4917 4918 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) 4919 break_stripe_batch_list(sh, 0); 4920 4921 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4922 spin_lock(&sh->stripe_lock); 4923 /* 4924 * Cannot process 'sync' concurrently with 'discard'. 4925 * Flush data in r5cache before 'sync'. 4926 */ 4927 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && 4928 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && 4929 !test_bit(STRIPE_DISCARD, &sh->state) && 4930 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4931 set_bit(STRIPE_SYNCING, &sh->state); 4932 clear_bit(STRIPE_INSYNC, &sh->state); 4933 clear_bit(STRIPE_REPLACED, &sh->state); 4934 } 4935 spin_unlock(&sh->stripe_lock); 4936 } 4937 clear_bit(STRIPE_DELAYED, &sh->state); 4938 4939 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 4940 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 4941 (unsigned long long)sh->sector, sh->state, 4942 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 4943 sh->check_state, sh->reconstruct_state); 4944 4945 analyse_stripe(sh, &s); 4946 4947 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 4948 goto finish; 4949 4950 if (s.handle_bad_blocks || 4951 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { 4952 set_bit(STRIPE_HANDLE, &sh->state); 4953 goto finish; 4954 } 4955 4956 if (unlikely(s.blocked_rdev)) { 4957 if (s.syncing || s.expanding || s.expanded || 4958 s.replacing || s.to_write || s.written) { 4959 set_bit(STRIPE_HANDLE, &sh->state); 4960 goto finish; 4961 } 4962 /* There is nothing for the blocked_rdev to block */ 4963 rdev_dec_pending(s.blocked_rdev, conf->mddev); 4964 s.blocked_rdev = NULL; 4965 } 4966 4967 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 4968 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 4969 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 4970 } 4971 4972 pr_debug("locked=%d uptodate=%d to_read=%d" 4973 " to_write=%d failed=%d failed_num=%d,%d\n", 4974 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4975 s.failed_num[0], s.failed_num[1]); 4976 /* 4977 * check if the array has lost more than max_degraded devices and, 4978 * if so, some requests might need to be failed. 4979 * 4980 * When journal device failed (log_failed), we will only process 4981 * the stripe if there is data need write to raid disks 4982 */ 4983 if (s.failed > conf->max_degraded || 4984 (s.log_failed && s.injournal == 0)) { 4985 sh->check_state = 0; 4986 sh->reconstruct_state = 0; 4987 break_stripe_batch_list(sh, 0); 4988 if (s.to_read+s.to_write+s.written) 4989 handle_failed_stripe(conf, sh, &s, disks); 4990 if (s.syncing + s.replacing) 4991 handle_failed_sync(conf, sh, &s); 4992 } 4993 4994 /* Now we check to see if any write operations have recently 4995 * completed 4996 */ 4997 prexor = 0; 4998 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 4999 prexor = 1; 5000 if (sh->reconstruct_state == reconstruct_state_drain_result || 5001 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 5002 sh->reconstruct_state = reconstruct_state_idle; 5003 5004 /* All the 'written' buffers and the parity block are ready to 5005 * be written back to disk 5006 */ 5007 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && 5008 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); 5009 BUG_ON(sh->qd_idx >= 0 && 5010 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && 5011 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); 5012 for (i = disks; i--; ) { 5013 struct r5dev *dev = &sh->dev[i]; 5014 if (test_bit(R5_LOCKED, &dev->flags) && 5015 (i == sh->pd_idx || i == sh->qd_idx || 5016 dev->written || test_bit(R5_InJournal, 5017 &dev->flags))) { 5018 pr_debug("Writing block %d\n", i); 5019 set_bit(R5_Wantwrite, &dev->flags); 5020 if (prexor) 5021 continue; 5022 if (s.failed > 1) 5023 continue; 5024 if (!test_bit(R5_Insync, &dev->flags) || 5025 ((i == sh->pd_idx || i == sh->qd_idx) && 5026 s.failed == 0)) 5027 set_bit(STRIPE_INSYNC, &sh->state); 5028 } 5029 } 5030 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5031 s.dec_preread_active = 1; 5032 } 5033 5034 /* 5035 * might be able to return some write requests if the parity blocks 5036 * are safe, or on a failed drive 5037 */ 5038 pdev = &sh->dev[sh->pd_idx]; 5039 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 5040 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 5041 qdev = &sh->dev[sh->qd_idx]; 5042 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 5043 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 5044 || conf->level < 6; 5045 5046 if (s.written && 5047 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 5048 && !test_bit(R5_LOCKED, &pdev->flags) 5049 && (test_bit(R5_UPTODATE, &pdev->flags) || 5050 test_bit(R5_Discard, &pdev->flags))))) && 5051 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 5052 && !test_bit(R5_LOCKED, &qdev->flags) 5053 && (test_bit(R5_UPTODATE, &qdev->flags) || 5054 test_bit(R5_Discard, &qdev->flags)))))) 5055 handle_stripe_clean_event(conf, sh, disks); 5056 5057 if (s.just_cached) 5058 r5c_handle_cached_data_endio(conf, sh, disks); 5059 log_stripe_write_finished(sh); 5060 5061 /* Now we might consider reading some blocks, either to check/generate 5062 * parity, or to satisfy requests 5063 * or to load a block that is being partially written. 5064 */ 5065 if (s.to_read || s.non_overwrite 5066 || (s.to_write && s.failed) 5067 || (s.syncing && (s.uptodate + s.compute < disks)) 5068 || s.replacing 5069 || s.expanding) 5070 handle_stripe_fill(sh, &s, disks); 5071 5072 /* 5073 * When the stripe finishes full journal write cycle (write to journal 5074 * and raid disk), this is the clean up procedure so it is ready for 5075 * next operation. 5076 */ 5077 r5c_finish_stripe_write_out(conf, sh, &s); 5078 5079 /* 5080 * Now to consider new write requests, cache write back and what else, 5081 * if anything should be read. We do not handle new writes when: 5082 * 1/ A 'write' operation (copy+xor) is already in flight. 5083 * 2/ A 'check' operation is in flight, as it may clobber the parity 5084 * block. 5085 * 3/ A r5c cache log write is in flight. 5086 */ 5087 5088 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { 5089 if (!r5c_is_writeback(conf->log)) { 5090 if (s.to_write) 5091 handle_stripe_dirtying(conf, sh, &s, disks); 5092 } else { /* write back cache */ 5093 int ret = 0; 5094 5095 /* First, try handle writes in caching phase */ 5096 if (s.to_write) 5097 ret = r5c_try_caching_write(conf, sh, &s, 5098 disks); 5099 /* 5100 * If caching phase failed: ret == -EAGAIN 5101 * OR 5102 * stripe under reclaim: !caching && injournal 5103 * 5104 * fall back to handle_stripe_dirtying() 5105 */ 5106 if (ret == -EAGAIN || 5107 /* stripe under reclaim: !caching && injournal */ 5108 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 5109 s.injournal > 0)) { 5110 ret = handle_stripe_dirtying(conf, sh, &s, 5111 disks); 5112 if (ret == -EAGAIN) 5113 goto finish; 5114 } 5115 } 5116 } 5117 5118 /* maybe we need to check and possibly fix the parity for this stripe 5119 * Any reads will already have been scheduled, so we just see if enough 5120 * data is available. The parity check is held off while parity 5121 * dependent operations are in flight. 5122 */ 5123 if (sh->check_state || 5124 (s.syncing && s.locked == 0 && 5125 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 5126 !test_bit(STRIPE_INSYNC, &sh->state))) { 5127 if (conf->level == 6) 5128 handle_parity_checks6(conf, sh, &s, disks); 5129 else 5130 handle_parity_checks5(conf, sh, &s, disks); 5131 } 5132 5133 if ((s.replacing || s.syncing) && s.locked == 0 5134 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) 5135 && !test_bit(STRIPE_REPLACED, &sh->state)) { 5136 /* Write out to replacement devices where possible */ 5137 for (i = 0; i < conf->raid_disks; i++) 5138 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 5139 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); 5140 set_bit(R5_WantReplace, &sh->dev[i].flags); 5141 set_bit(R5_LOCKED, &sh->dev[i].flags); 5142 s.locked++; 5143 } 5144 if (s.replacing) 5145 set_bit(STRIPE_INSYNC, &sh->state); 5146 set_bit(STRIPE_REPLACED, &sh->state); 5147 } 5148 if ((s.syncing || s.replacing) && s.locked == 0 && 5149 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 5150 test_bit(STRIPE_INSYNC, &sh->state)) { 5151 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); 5152 clear_bit(STRIPE_SYNCING, &sh->state); 5153 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 5154 wake_up(&conf->wait_for_overlap); 5155 } 5156 5157 /* If the failed drives are just a ReadError, then we might need 5158 * to progress the repair/check process 5159 */ 5160 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 5161 for (i = 0; i < s.failed; i++) { 5162 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 5163 if (test_bit(R5_ReadError, &dev->flags) 5164 && !test_bit(R5_LOCKED, &dev->flags) 5165 && test_bit(R5_UPTODATE, &dev->flags) 5166 ) { 5167 if (!test_bit(R5_ReWrite, &dev->flags)) { 5168 set_bit(R5_Wantwrite, &dev->flags); 5169 set_bit(R5_ReWrite, &dev->flags); 5170 } else 5171 /* let's read it back */ 5172 set_bit(R5_Wantread, &dev->flags); 5173 set_bit(R5_LOCKED, &dev->flags); 5174 s.locked++; 5175 } 5176 } 5177 5178 /* Finish reconstruct operations initiated by the expansion process */ 5179 if (sh->reconstruct_state == reconstruct_state_result) { 5180 struct stripe_head *sh_src 5181 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); 5182 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 5183 /* sh cannot be written until sh_src has been read. 5184 * so arrange for sh to be delayed a little 5185 */ 5186 set_bit(STRIPE_DELAYED, &sh->state); 5187 set_bit(STRIPE_HANDLE, &sh->state); 5188 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 5189 &sh_src->state)) 5190 atomic_inc(&conf->preread_active_stripes); 5191 raid5_release_stripe(sh_src); 5192 goto finish; 5193 } 5194 if (sh_src) 5195 raid5_release_stripe(sh_src); 5196 5197 sh->reconstruct_state = reconstruct_state_idle; 5198 clear_bit(STRIPE_EXPANDING, &sh->state); 5199 for (i = conf->raid_disks; i--; ) { 5200 set_bit(R5_Wantwrite, &sh->dev[i].flags); 5201 set_bit(R5_LOCKED, &sh->dev[i].flags); 5202 s.locked++; 5203 } 5204 } 5205 5206 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 5207 !sh->reconstruct_state) { 5208 /* Need to write out all blocks after computing parity */ 5209 sh->disks = conf->raid_disks; 5210 stripe_set_idx(sh->sector, conf, 0, sh); 5211 schedule_reconstruction(sh, &s, 1, 1); 5212 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 5213 clear_bit(STRIPE_EXPAND_READY, &sh->state); 5214 atomic_dec(&conf->reshape_stripes); 5215 wake_up(&conf->wait_for_overlap); 5216 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); 5217 } 5218 5219 if (s.expanding && s.locked == 0 && 5220 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 5221 handle_stripe_expansion(conf, sh); 5222 5223 finish: 5224 /* wait for this device to become unblocked */ 5225 if (unlikely(s.blocked_rdev)) { 5226 if (conf->mddev->external) 5227 md_wait_for_blocked_rdev(s.blocked_rdev, 5228 conf->mddev); 5229 else 5230 /* Internal metadata will immediately 5231 * be written by raid5d, so we don't 5232 * need to wait here. 5233 */ 5234 rdev_dec_pending(s.blocked_rdev, 5235 conf->mddev); 5236 } 5237 5238 if (s.handle_bad_blocks) 5239 for (i = disks; i--; ) { 5240 struct md_rdev *rdev; 5241 struct r5dev *dev = &sh->dev[i]; 5242 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 5243 /* We own a safe reference to the rdev */ 5244 rdev = rdev_pend_deref(conf->disks[i].rdev); 5245 if (!rdev_set_badblocks(rdev, sh->sector, 5246 RAID5_STRIPE_SECTORS(conf), 0)) 5247 md_error(conf->mddev, rdev); 5248 rdev_dec_pending(rdev, conf->mddev); 5249 } 5250 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 5251 rdev = rdev_pend_deref(conf->disks[i].rdev); 5252 rdev_clear_badblocks(rdev, sh->sector, 5253 RAID5_STRIPE_SECTORS(conf), 0); 5254 rdev_dec_pending(rdev, conf->mddev); 5255 } 5256 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { 5257 rdev = rdev_pend_deref(conf->disks[i].replacement); 5258 if (!rdev) 5259 /* rdev have been moved down */ 5260 rdev = rdev_pend_deref(conf->disks[i].rdev); 5261 rdev_clear_badblocks(rdev, sh->sector, 5262 RAID5_STRIPE_SECTORS(conf), 0); 5263 rdev_dec_pending(rdev, conf->mddev); 5264 } 5265 } 5266 5267 if (s.ops_request) 5268 raid_run_ops(sh, s.ops_request); 5269 5270 ops_run_io(sh, &s); 5271 5272 if (s.dec_preread_active) { 5273 /* We delay this until after ops_run_io so that if make_request 5274 * is waiting on a flush, it won't continue until the writes 5275 * have actually been submitted. 5276 */ 5277 atomic_dec(&conf->preread_active_stripes); 5278 if (atomic_read(&conf->preread_active_stripes) < 5279 IO_THRESHOLD) 5280 md_wakeup_thread(conf->mddev->thread); 5281 } 5282 5283 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 5284 } 5285 5286 static void raid5_activate_delayed(struct r5conf *conf) 5287 __must_hold(&conf->device_lock) 5288 { 5289 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 5290 while (!list_empty(&conf->delayed_list)) { 5291 struct list_head *l = conf->delayed_list.next; 5292 struct stripe_head *sh; 5293 sh = list_entry(l, struct stripe_head, lru); 5294 list_del_init(l); 5295 clear_bit(STRIPE_DELAYED, &sh->state); 5296 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5297 atomic_inc(&conf->preread_active_stripes); 5298 list_add_tail(&sh->lru, &conf->hold_list); 5299 raid5_wakeup_stripe_thread(sh); 5300 } 5301 } 5302 } 5303 5304 static void activate_bit_delay(struct r5conf *conf, 5305 struct list_head *temp_inactive_list) 5306 __must_hold(&conf->device_lock) 5307 { 5308 struct list_head head; 5309 list_add(&head, &conf->bitmap_list); 5310 list_del_init(&conf->bitmap_list); 5311 while (!list_empty(&head)) { 5312 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 5313 int hash; 5314 list_del_init(&sh->lru); 5315 atomic_inc(&sh->count); 5316 hash = sh->hash_lock_index; 5317 __release_stripe(conf, sh, &temp_inactive_list[hash]); 5318 } 5319 } 5320 5321 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 5322 { 5323 struct r5conf *conf = mddev->private; 5324 sector_t sector = bio->bi_iter.bi_sector; 5325 unsigned int chunk_sectors; 5326 unsigned int bio_sectors = bio_sectors(bio); 5327 5328 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); 5329 return chunk_sectors >= 5330 ((sector & (chunk_sectors - 1)) + bio_sectors); 5331 } 5332 5333 /* 5334 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 5335 * later sampled by raid5d. 5336 */ 5337 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 5338 { 5339 unsigned long flags; 5340 5341 spin_lock_irqsave(&conf->device_lock, flags); 5342 5343 bi->bi_next = conf->retry_read_aligned_list; 5344 conf->retry_read_aligned_list = bi; 5345 5346 spin_unlock_irqrestore(&conf->device_lock, flags); 5347 md_wakeup_thread(conf->mddev->thread); 5348 } 5349 5350 static struct bio *remove_bio_from_retry(struct r5conf *conf, 5351 unsigned int *offset) 5352 { 5353 struct bio *bi; 5354 5355 bi = conf->retry_read_aligned; 5356 if (bi) { 5357 *offset = conf->retry_read_offset; 5358 conf->retry_read_aligned = NULL; 5359 return bi; 5360 } 5361 bi = conf->retry_read_aligned_list; 5362 if(bi) { 5363 conf->retry_read_aligned_list = bi->bi_next; 5364 bi->bi_next = NULL; 5365 *offset = 0; 5366 } 5367 5368 return bi; 5369 } 5370 5371 /* 5372 * The "raid5_align_endio" should check if the read succeeded and if it 5373 * did, call bio_endio on the original bio (having bio_put the new bio 5374 * first). 5375 * If the read failed.. 5376 */ 5377 static void raid5_align_endio(struct bio *bi) 5378 { 5379 struct md_io_acct *md_io_acct = bi->bi_private; 5380 struct bio *raid_bi = md_io_acct->orig_bio; 5381 struct mddev *mddev; 5382 struct r5conf *conf; 5383 struct md_rdev *rdev; 5384 blk_status_t error = bi->bi_status; 5385 unsigned long start_time = md_io_acct->start_time; 5386 5387 bio_put(bi); 5388 5389 rdev = (void*)raid_bi->bi_next; 5390 raid_bi->bi_next = NULL; 5391 mddev = rdev->mddev; 5392 conf = mddev->private; 5393 5394 rdev_dec_pending(rdev, conf->mddev); 5395 5396 if (!error) { 5397 if (blk_queue_io_stat(raid_bi->bi_bdev->bd_disk->queue)) 5398 bio_end_io_acct(raid_bi, start_time); 5399 bio_endio(raid_bi); 5400 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5401 wake_up(&conf->wait_for_quiescent); 5402 return; 5403 } 5404 5405 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 5406 5407 add_bio_to_retry(raid_bi, conf); 5408 } 5409 5410 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) 5411 { 5412 struct r5conf *conf = mddev->private; 5413 struct bio *align_bio; 5414 struct md_rdev *rdev; 5415 sector_t sector, end_sector, first_bad; 5416 int bad_sectors, dd_idx; 5417 struct md_io_acct *md_io_acct; 5418 bool did_inc; 5419 5420 if (!in_chunk_boundary(mddev, raid_bio)) { 5421 pr_debug("%s: non aligned\n", __func__); 5422 return 0; 5423 } 5424 5425 sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, 5426 &dd_idx, NULL); 5427 end_sector = bio_end_sector(raid_bio); 5428 5429 rcu_read_lock(); 5430 if (r5c_big_stripe_cached(conf, sector)) 5431 goto out_rcu_unlock; 5432 5433 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 5434 if (!rdev || test_bit(Faulty, &rdev->flags) || 5435 rdev->recovery_offset < end_sector) { 5436 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 5437 if (!rdev) 5438 goto out_rcu_unlock; 5439 if (test_bit(Faulty, &rdev->flags) || 5440 !(test_bit(In_sync, &rdev->flags) || 5441 rdev->recovery_offset >= end_sector)) 5442 goto out_rcu_unlock; 5443 } 5444 5445 atomic_inc(&rdev->nr_pending); 5446 rcu_read_unlock(); 5447 5448 if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad, 5449 &bad_sectors)) { 5450 bio_put(raid_bio); 5451 rdev_dec_pending(rdev, mddev); 5452 return 0; 5453 } 5454 5455 align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO, 5456 &mddev->io_acct_set); 5457 md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone); 5458 raid_bio->bi_next = (void *)rdev; 5459 if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue)) 5460 md_io_acct->start_time = bio_start_io_acct(raid_bio); 5461 md_io_acct->orig_bio = raid_bio; 5462 5463 align_bio->bi_end_io = raid5_align_endio; 5464 align_bio->bi_private = md_io_acct; 5465 align_bio->bi_iter.bi_sector = sector; 5466 5467 /* No reshape active, so we can trust rdev->data_offset */ 5468 align_bio->bi_iter.bi_sector += rdev->data_offset; 5469 5470 did_inc = false; 5471 if (conf->quiesce == 0) { 5472 atomic_inc(&conf->active_aligned_reads); 5473 did_inc = true; 5474 } 5475 /* need a memory barrier to detect the race with raid5_quiesce() */ 5476 if (!did_inc || smp_load_acquire(&conf->quiesce) != 0) { 5477 /* quiesce is in progress, so we need to undo io activation and wait 5478 * for it to finish 5479 */ 5480 if (did_inc && atomic_dec_and_test(&conf->active_aligned_reads)) 5481 wake_up(&conf->wait_for_quiescent); 5482 spin_lock_irq(&conf->device_lock); 5483 wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0, 5484 conf->device_lock); 5485 atomic_inc(&conf->active_aligned_reads); 5486 spin_unlock_irq(&conf->device_lock); 5487 } 5488 5489 if (mddev->gendisk) 5490 trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk), 5491 raid_bio->bi_iter.bi_sector); 5492 submit_bio_noacct(align_bio); 5493 return 1; 5494 5495 out_rcu_unlock: 5496 rcu_read_unlock(); 5497 return 0; 5498 } 5499 5500 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) 5501 { 5502 struct bio *split; 5503 sector_t sector = raid_bio->bi_iter.bi_sector; 5504 unsigned chunk_sects = mddev->chunk_sectors; 5505 unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); 5506 5507 if (sectors < bio_sectors(raid_bio)) { 5508 struct r5conf *conf = mddev->private; 5509 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); 5510 bio_chain(split, raid_bio); 5511 submit_bio_noacct(raid_bio); 5512 raid_bio = split; 5513 } 5514 5515 if (!raid5_read_one_chunk(mddev, raid_bio)) 5516 return raid_bio; 5517 5518 return NULL; 5519 } 5520 5521 /* __get_priority_stripe - get the next stripe to process 5522 * 5523 * Full stripe writes are allowed to pass preread active stripes up until 5524 * the bypass_threshold is exceeded. In general the bypass_count 5525 * increments when the handle_list is handled before the hold_list; however, it 5526 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 5527 * stripe with in flight i/o. The bypass_count will be reset when the 5528 * head of the hold_list has changed, i.e. the head was promoted to the 5529 * handle_list. 5530 */ 5531 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) 5532 __must_hold(&conf->device_lock) 5533 { 5534 struct stripe_head *sh, *tmp; 5535 struct list_head *handle_list = NULL; 5536 struct r5worker_group *wg; 5537 bool second_try = !r5c_is_writeback(conf->log) && 5538 !r5l_log_disk_error(conf); 5539 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || 5540 r5l_log_disk_error(conf); 5541 5542 again: 5543 wg = NULL; 5544 sh = NULL; 5545 if (conf->worker_cnt_per_group == 0) { 5546 handle_list = try_loprio ? &conf->loprio_list : 5547 &conf->handle_list; 5548 } else if (group != ANY_GROUP) { 5549 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : 5550 &conf->worker_groups[group].handle_list; 5551 wg = &conf->worker_groups[group]; 5552 } else { 5553 int i; 5554 for (i = 0; i < conf->group_cnt; i++) { 5555 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : 5556 &conf->worker_groups[i].handle_list; 5557 wg = &conf->worker_groups[i]; 5558 if (!list_empty(handle_list)) 5559 break; 5560 } 5561 } 5562 5563 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 5564 __func__, 5565 list_empty(handle_list) ? "empty" : "busy", 5566 list_empty(&conf->hold_list) ? "empty" : "busy", 5567 atomic_read(&conf->pending_full_writes), conf->bypass_count); 5568 5569 if (!list_empty(handle_list)) { 5570 sh = list_entry(handle_list->next, typeof(*sh), lru); 5571 5572 if (list_empty(&conf->hold_list)) 5573 conf->bypass_count = 0; 5574 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 5575 if (conf->hold_list.next == conf->last_hold) 5576 conf->bypass_count++; 5577 else { 5578 conf->last_hold = conf->hold_list.next; 5579 conf->bypass_count -= conf->bypass_threshold; 5580 if (conf->bypass_count < 0) 5581 conf->bypass_count = 0; 5582 } 5583 } 5584 } else if (!list_empty(&conf->hold_list) && 5585 ((conf->bypass_threshold && 5586 conf->bypass_count > conf->bypass_threshold) || 5587 atomic_read(&conf->pending_full_writes) == 0)) { 5588 5589 list_for_each_entry(tmp, &conf->hold_list, lru) { 5590 if (conf->worker_cnt_per_group == 0 || 5591 group == ANY_GROUP || 5592 !cpu_online(tmp->cpu) || 5593 cpu_to_group(tmp->cpu) == group) { 5594 sh = tmp; 5595 break; 5596 } 5597 } 5598 5599 if (sh) { 5600 conf->bypass_count -= conf->bypass_threshold; 5601 if (conf->bypass_count < 0) 5602 conf->bypass_count = 0; 5603 } 5604 wg = NULL; 5605 } 5606 5607 if (!sh) { 5608 if (second_try) 5609 return NULL; 5610 second_try = true; 5611 try_loprio = !try_loprio; 5612 goto again; 5613 } 5614 5615 if (wg) { 5616 wg->stripes_cnt--; 5617 sh->group = NULL; 5618 } 5619 list_del_init(&sh->lru); 5620 BUG_ON(atomic_inc_return(&sh->count) != 1); 5621 return sh; 5622 } 5623 5624 struct raid5_plug_cb { 5625 struct blk_plug_cb cb; 5626 struct list_head list; 5627 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; 5628 }; 5629 5630 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 5631 { 5632 struct raid5_plug_cb *cb = container_of( 5633 blk_cb, struct raid5_plug_cb, cb); 5634 struct stripe_head *sh; 5635 struct mddev *mddev = cb->cb.data; 5636 struct r5conf *conf = mddev->private; 5637 int cnt = 0; 5638 int hash; 5639 5640 if (cb->list.next && !list_empty(&cb->list)) { 5641 spin_lock_irq(&conf->device_lock); 5642 while (!list_empty(&cb->list)) { 5643 sh = list_first_entry(&cb->list, struct stripe_head, lru); 5644 list_del_init(&sh->lru); 5645 /* 5646 * avoid race release_stripe_plug() sees 5647 * STRIPE_ON_UNPLUG_LIST clear but the stripe 5648 * is still in our list 5649 */ 5650 smp_mb__before_atomic(); 5651 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 5652 /* 5653 * STRIPE_ON_RELEASE_LIST could be set here. In that 5654 * case, the count is always > 1 here 5655 */ 5656 hash = sh->hash_lock_index; 5657 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); 5658 cnt++; 5659 } 5660 spin_unlock_irq(&conf->device_lock); 5661 } 5662 release_inactive_stripe_list(conf, cb->temp_inactive_list, 5663 NR_STRIPE_HASH_LOCKS); 5664 if (mddev->queue) 5665 trace_block_unplug(mddev->queue, cnt, !from_schedule); 5666 kfree(cb); 5667 } 5668 5669 static void release_stripe_plug(struct mddev *mddev, 5670 struct stripe_head *sh) 5671 { 5672 struct blk_plug_cb *blk_cb = blk_check_plugged( 5673 raid5_unplug, mddev, 5674 sizeof(struct raid5_plug_cb)); 5675 struct raid5_plug_cb *cb; 5676 5677 if (!blk_cb) { 5678 raid5_release_stripe(sh); 5679 return; 5680 } 5681 5682 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 5683 5684 if (cb->list.next == NULL) { 5685 int i; 5686 INIT_LIST_HEAD(&cb->list); 5687 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5688 INIT_LIST_HEAD(cb->temp_inactive_list + i); 5689 } 5690 5691 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 5692 list_add_tail(&sh->lru, &cb->list); 5693 else 5694 raid5_release_stripe(sh); 5695 } 5696 5697 static void make_discard_request(struct mddev *mddev, struct bio *bi) 5698 { 5699 struct r5conf *conf = mddev->private; 5700 sector_t logical_sector, last_sector; 5701 struct stripe_head *sh; 5702 int stripe_sectors; 5703 5704 /* We need to handle this when io_uring supports discard/trim */ 5705 if (WARN_ON_ONCE(bi->bi_opf & REQ_NOWAIT)) 5706 return; 5707 5708 if (mddev->reshape_position != MaxSector) 5709 /* Skip discard while reshape is happening */ 5710 return; 5711 5712 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); 5713 last_sector = bio_end_sector(bi); 5714 5715 bi->bi_next = NULL; 5716 5717 stripe_sectors = conf->chunk_sectors * 5718 (conf->raid_disks - conf->max_degraded); 5719 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, 5720 stripe_sectors); 5721 sector_div(last_sector, stripe_sectors); 5722 5723 logical_sector *= conf->chunk_sectors; 5724 last_sector *= conf->chunk_sectors; 5725 5726 for (; logical_sector < last_sector; 5727 logical_sector += RAID5_STRIPE_SECTORS(conf)) { 5728 DEFINE_WAIT(w); 5729 int d; 5730 again: 5731 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); 5732 prepare_to_wait(&conf->wait_for_overlap, &w, 5733 TASK_UNINTERRUPTIBLE); 5734 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5735 if (test_bit(STRIPE_SYNCING, &sh->state)) { 5736 raid5_release_stripe(sh); 5737 schedule(); 5738 goto again; 5739 } 5740 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5741 spin_lock_irq(&sh->stripe_lock); 5742 for (d = 0; d < conf->raid_disks; d++) { 5743 if (d == sh->pd_idx || d == sh->qd_idx) 5744 continue; 5745 if (sh->dev[d].towrite || sh->dev[d].toread) { 5746 set_bit(R5_Overlap, &sh->dev[d].flags); 5747 spin_unlock_irq(&sh->stripe_lock); 5748 raid5_release_stripe(sh); 5749 schedule(); 5750 goto again; 5751 } 5752 } 5753 set_bit(STRIPE_DISCARD, &sh->state); 5754 finish_wait(&conf->wait_for_overlap, &w); 5755 sh->overwrite_disks = 0; 5756 for (d = 0; d < conf->raid_disks; d++) { 5757 if (d == sh->pd_idx || d == sh->qd_idx) 5758 continue; 5759 sh->dev[d].towrite = bi; 5760 set_bit(R5_OVERWRITE, &sh->dev[d].flags); 5761 bio_inc_remaining(bi); 5762 md_write_inc(mddev, bi); 5763 sh->overwrite_disks++; 5764 } 5765 spin_unlock_irq(&sh->stripe_lock); 5766 if (conf->mddev->bitmap) { 5767 for (d = 0; 5768 d < conf->raid_disks - conf->max_degraded; 5769 d++) 5770 md_bitmap_startwrite(mddev->bitmap, 5771 sh->sector, 5772 RAID5_STRIPE_SECTORS(conf), 5773 0); 5774 sh->bm_seq = conf->seq_flush + 1; 5775 set_bit(STRIPE_BIT_DELAY, &sh->state); 5776 } 5777 5778 set_bit(STRIPE_HANDLE, &sh->state); 5779 clear_bit(STRIPE_DELAYED, &sh->state); 5780 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5781 atomic_inc(&conf->preread_active_stripes); 5782 release_stripe_plug(mddev, sh); 5783 } 5784 5785 bio_endio(bi); 5786 } 5787 5788 static bool raid5_make_request(struct mddev *mddev, struct bio * bi) 5789 { 5790 struct r5conf *conf = mddev->private; 5791 int dd_idx; 5792 sector_t new_sector; 5793 sector_t logical_sector, last_sector; 5794 struct stripe_head *sh; 5795 const int rw = bio_data_dir(bi); 5796 DEFINE_WAIT(w); 5797 bool do_prepare; 5798 bool do_flush = false; 5799 5800 if (unlikely(bi->bi_opf & REQ_PREFLUSH)) { 5801 int ret = log_handle_flush_request(conf, bi); 5802 5803 if (ret == 0) 5804 return true; 5805 if (ret == -ENODEV) { 5806 if (md_flush_request(mddev, bi)) 5807 return true; 5808 } 5809 /* ret == -EAGAIN, fallback */ 5810 /* 5811 * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH, 5812 * we need to flush journal device 5813 */ 5814 do_flush = bi->bi_opf & REQ_PREFLUSH; 5815 } 5816 5817 if (!md_write_start(mddev, bi)) 5818 return false; 5819 /* 5820 * If array is degraded, better not do chunk aligned read because 5821 * later we might have to read it again in order to reconstruct 5822 * data on failed drives. 5823 */ 5824 if (rw == READ && mddev->degraded == 0 && 5825 mddev->reshape_position == MaxSector) { 5826 bi = chunk_aligned_read(mddev, bi); 5827 if (!bi) 5828 return true; 5829 } 5830 5831 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { 5832 make_discard_request(mddev, bi); 5833 md_write_end(mddev); 5834 return true; 5835 } 5836 5837 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); 5838 last_sector = bio_end_sector(bi); 5839 bi->bi_next = NULL; 5840 5841 /* Bail out if conflicts with reshape and REQ_NOWAIT is set */ 5842 if ((bi->bi_opf & REQ_NOWAIT) && 5843 (conf->reshape_progress != MaxSector) && 5844 (mddev->reshape_backwards 5845 ? (logical_sector > conf->reshape_progress && logical_sector <= conf->reshape_safe) 5846 : (logical_sector >= conf->reshape_safe && logical_sector < conf->reshape_progress))) { 5847 bio_wouldblock_error(bi); 5848 if (rw == WRITE) 5849 md_write_end(mddev); 5850 return true; 5851 } 5852 md_account_bio(mddev, &bi); 5853 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 5854 for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) { 5855 int previous; 5856 int seq; 5857 5858 do_prepare = false; 5859 retry: 5860 seq = read_seqcount_begin(&conf->gen_lock); 5861 previous = 0; 5862 if (do_prepare) 5863 prepare_to_wait(&conf->wait_for_overlap, &w, 5864 TASK_UNINTERRUPTIBLE); 5865 if (unlikely(conf->reshape_progress != MaxSector)) { 5866 /* spinlock is needed as reshape_progress may be 5867 * 64bit on a 32bit platform, and so it might be 5868 * possible to see a half-updated value 5869 * Of course reshape_progress could change after 5870 * the lock is dropped, so once we get a reference 5871 * to the stripe that we think it is, we will have 5872 * to check again. 5873 */ 5874 spin_lock_irq(&conf->device_lock); 5875 if (mddev->reshape_backwards 5876 ? logical_sector < conf->reshape_progress 5877 : logical_sector >= conf->reshape_progress) { 5878 previous = 1; 5879 } else { 5880 if (mddev->reshape_backwards 5881 ? logical_sector < conf->reshape_safe 5882 : logical_sector >= conf->reshape_safe) { 5883 spin_unlock_irq(&conf->device_lock); 5884 schedule(); 5885 do_prepare = true; 5886 goto retry; 5887 } 5888 } 5889 spin_unlock_irq(&conf->device_lock); 5890 } 5891 5892 new_sector = raid5_compute_sector(conf, logical_sector, 5893 previous, 5894 &dd_idx, NULL); 5895 pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n", 5896 (unsigned long long)new_sector, 5897 (unsigned long long)logical_sector); 5898 5899 sh = raid5_get_active_stripe(conf, new_sector, previous, 5900 (bi->bi_opf & REQ_RAHEAD), 0); 5901 if (sh) { 5902 if (unlikely(previous)) { 5903 /* expansion might have moved on while waiting for a 5904 * stripe, so we must do the range check again. 5905 * Expansion could still move past after this 5906 * test, but as we are holding a reference to 5907 * 'sh', we know that if that happens, 5908 * STRIPE_EXPANDING will get set and the expansion 5909 * won't proceed until we finish with the stripe. 5910 */ 5911 int must_retry = 0; 5912 spin_lock_irq(&conf->device_lock); 5913 if (mddev->reshape_backwards 5914 ? logical_sector >= conf->reshape_progress 5915 : logical_sector < conf->reshape_progress) 5916 /* mismatch, need to try again */ 5917 must_retry = 1; 5918 spin_unlock_irq(&conf->device_lock); 5919 if (must_retry) { 5920 raid5_release_stripe(sh); 5921 schedule(); 5922 do_prepare = true; 5923 goto retry; 5924 } 5925 } 5926 if (read_seqcount_retry(&conf->gen_lock, seq)) { 5927 /* Might have got the wrong stripe_head 5928 * by accident 5929 */ 5930 raid5_release_stripe(sh); 5931 goto retry; 5932 } 5933 5934 if (test_bit(STRIPE_EXPANDING, &sh->state) || 5935 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { 5936 /* Stripe is busy expanding or 5937 * add failed due to overlap. Flush everything 5938 * and wait a while 5939 */ 5940 md_wakeup_thread(mddev->thread); 5941 raid5_release_stripe(sh); 5942 schedule(); 5943 do_prepare = true; 5944 goto retry; 5945 } 5946 if (do_flush) { 5947 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); 5948 /* we only need flush for one stripe */ 5949 do_flush = false; 5950 } 5951 5952 set_bit(STRIPE_HANDLE, &sh->state); 5953 clear_bit(STRIPE_DELAYED, &sh->state); 5954 if ((!sh->batch_head || sh == sh->batch_head) && 5955 (bi->bi_opf & REQ_SYNC) && 5956 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5957 atomic_inc(&conf->preread_active_stripes); 5958 release_stripe_plug(mddev, sh); 5959 } else { 5960 /* cannot get stripe for read-ahead, just give-up */ 5961 bi->bi_status = BLK_STS_IOERR; 5962 break; 5963 } 5964 } 5965 finish_wait(&conf->wait_for_overlap, &w); 5966 5967 if (rw == WRITE) 5968 md_write_end(mddev); 5969 bio_endio(bi); 5970 return true; 5971 } 5972 5973 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 5974 5975 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5976 { 5977 /* reshaping is quite different to recovery/resync so it is 5978 * handled quite separately ... here. 5979 * 5980 * On each call to sync_request, we gather one chunk worth of 5981 * destination stripes and flag them as expanding. 5982 * Then we find all the source stripes and request reads. 5983 * As the reads complete, handle_stripe will copy the data 5984 * into the destination stripe and release that stripe. 5985 */ 5986 struct r5conf *conf = mddev->private; 5987 struct stripe_head *sh; 5988 struct md_rdev *rdev; 5989 sector_t first_sector, last_sector; 5990 int raid_disks = conf->previous_raid_disks; 5991 int data_disks = raid_disks - conf->max_degraded; 5992 int new_data_disks = conf->raid_disks - conf->max_degraded; 5993 int i; 5994 int dd_idx; 5995 sector_t writepos, readpos, safepos; 5996 sector_t stripe_addr; 5997 int reshape_sectors; 5998 struct list_head stripes; 5999 sector_t retn; 6000 6001 if (sector_nr == 0) { 6002 /* If restarting in the middle, skip the initial sectors */ 6003 if (mddev->reshape_backwards && 6004 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 6005 sector_nr = raid5_size(mddev, 0, 0) 6006 - conf->reshape_progress; 6007 } else if (mddev->reshape_backwards && 6008 conf->reshape_progress == MaxSector) { 6009 /* shouldn't happen, but just in case, finish up.*/ 6010 sector_nr = MaxSector; 6011 } else if (!mddev->reshape_backwards && 6012 conf->reshape_progress > 0) 6013 sector_nr = conf->reshape_progress; 6014 sector_div(sector_nr, new_data_disks); 6015 if (sector_nr) { 6016 mddev->curr_resync_completed = sector_nr; 6017 sysfs_notify_dirent_safe(mddev->sysfs_completed); 6018 *skipped = 1; 6019 retn = sector_nr; 6020 goto finish; 6021 } 6022 } 6023 6024 /* We need to process a full chunk at a time. 6025 * If old and new chunk sizes differ, we need to process the 6026 * largest of these 6027 */ 6028 6029 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); 6030 6031 /* We update the metadata at least every 10 seconds, or when 6032 * the data about to be copied would over-write the source of 6033 * the data at the front of the range. i.e. one new_stripe 6034 * along from reshape_progress new_maps to after where 6035 * reshape_safe old_maps to 6036 */ 6037 writepos = conf->reshape_progress; 6038 sector_div(writepos, new_data_disks); 6039 readpos = conf->reshape_progress; 6040 sector_div(readpos, data_disks); 6041 safepos = conf->reshape_safe; 6042 sector_div(safepos, data_disks); 6043 if (mddev->reshape_backwards) { 6044 BUG_ON(writepos < reshape_sectors); 6045 writepos -= reshape_sectors; 6046 readpos += reshape_sectors; 6047 safepos += reshape_sectors; 6048 } else { 6049 writepos += reshape_sectors; 6050 /* readpos and safepos are worst-case calculations. 6051 * A negative number is overly pessimistic, and causes 6052 * obvious problems for unsigned storage. So clip to 0. 6053 */ 6054 readpos -= min_t(sector_t, reshape_sectors, readpos); 6055 safepos -= min_t(sector_t, reshape_sectors, safepos); 6056 } 6057 6058 /* Having calculated the 'writepos' possibly use it 6059 * to set 'stripe_addr' which is where we will write to. 6060 */ 6061 if (mddev->reshape_backwards) { 6062 BUG_ON(conf->reshape_progress == 0); 6063 stripe_addr = writepos; 6064 BUG_ON((mddev->dev_sectors & 6065 ~((sector_t)reshape_sectors - 1)) 6066 - reshape_sectors - stripe_addr 6067 != sector_nr); 6068 } else { 6069 BUG_ON(writepos != sector_nr + reshape_sectors); 6070 stripe_addr = sector_nr; 6071 } 6072 6073 /* 'writepos' is the most advanced device address we might write. 6074 * 'readpos' is the least advanced device address we might read. 6075 * 'safepos' is the least address recorded in the metadata as having 6076 * been reshaped. 6077 * If there is a min_offset_diff, these are adjusted either by 6078 * increasing the safepos/readpos if diff is negative, or 6079 * increasing writepos if diff is positive. 6080 * If 'readpos' is then behind 'writepos', there is no way that we can 6081 * ensure safety in the face of a crash - that must be done by userspace 6082 * making a backup of the data. So in that case there is no particular 6083 * rush to update metadata. 6084 * Otherwise if 'safepos' is behind 'writepos', then we really need to 6085 * update the metadata to advance 'safepos' to match 'readpos' so that 6086 * we can be safe in the event of a crash. 6087 * So we insist on updating metadata if safepos is behind writepos and 6088 * readpos is beyond writepos. 6089 * In any case, update the metadata every 10 seconds. 6090 * Maybe that number should be configurable, but I'm not sure it is 6091 * worth it.... maybe it could be a multiple of safemode_delay??? 6092 */ 6093 if (conf->min_offset_diff < 0) { 6094 safepos += -conf->min_offset_diff; 6095 readpos += -conf->min_offset_diff; 6096 } else 6097 writepos += conf->min_offset_diff; 6098 6099 if ((mddev->reshape_backwards 6100 ? (safepos > writepos && readpos < writepos) 6101 : (safepos < writepos && readpos > writepos)) || 6102 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 6103 /* Cannot proceed until we've updated the superblock... */ 6104 wait_event(conf->wait_for_overlap, 6105 atomic_read(&conf->reshape_stripes)==0 6106 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 6107 if (atomic_read(&conf->reshape_stripes) != 0) 6108 return 0; 6109 mddev->reshape_position = conf->reshape_progress; 6110 mddev->curr_resync_completed = sector_nr; 6111 if (!mddev->reshape_backwards) 6112 /* Can update recovery_offset */ 6113 rdev_for_each(rdev, mddev) 6114 if (rdev->raid_disk >= 0 && 6115 !test_bit(Journal, &rdev->flags) && 6116 !test_bit(In_sync, &rdev->flags) && 6117 rdev->recovery_offset < sector_nr) 6118 rdev->recovery_offset = sector_nr; 6119 6120 conf->reshape_checkpoint = jiffies; 6121 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6122 md_wakeup_thread(mddev->thread); 6123 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || 6124 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 6125 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6126 return 0; 6127 spin_lock_irq(&conf->device_lock); 6128 conf->reshape_safe = mddev->reshape_position; 6129 spin_unlock_irq(&conf->device_lock); 6130 wake_up(&conf->wait_for_overlap); 6131 sysfs_notify_dirent_safe(mddev->sysfs_completed); 6132 } 6133 6134 INIT_LIST_HEAD(&stripes); 6135 for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) { 6136 int j; 6137 int skipped_disk = 0; 6138 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 6139 set_bit(STRIPE_EXPANDING, &sh->state); 6140 atomic_inc(&conf->reshape_stripes); 6141 /* If any of this stripe is beyond the end of the old 6142 * array, then we need to zero those blocks 6143 */ 6144 for (j=sh->disks; j--;) { 6145 sector_t s; 6146 if (j == sh->pd_idx) 6147 continue; 6148 if (conf->level == 6 && 6149 j == sh->qd_idx) 6150 continue; 6151 s = raid5_compute_blocknr(sh, j, 0); 6152 if (s < raid5_size(mddev, 0, 0)) { 6153 skipped_disk = 1; 6154 continue; 6155 } 6156 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf)); 6157 set_bit(R5_Expanded, &sh->dev[j].flags); 6158 set_bit(R5_UPTODATE, &sh->dev[j].flags); 6159 } 6160 if (!skipped_disk) { 6161 set_bit(STRIPE_EXPAND_READY, &sh->state); 6162 set_bit(STRIPE_HANDLE, &sh->state); 6163 } 6164 list_add(&sh->lru, &stripes); 6165 } 6166 spin_lock_irq(&conf->device_lock); 6167 if (mddev->reshape_backwards) 6168 conf->reshape_progress -= reshape_sectors * new_data_disks; 6169 else 6170 conf->reshape_progress += reshape_sectors * new_data_disks; 6171 spin_unlock_irq(&conf->device_lock); 6172 /* Ok, those stripe are ready. We can start scheduling 6173 * reads on the source stripes. 6174 * The source stripes are determined by mapping the first and last 6175 * block on the destination stripes. 6176 */ 6177 first_sector = 6178 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 6179 1, &dd_idx, NULL); 6180 last_sector = 6181 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 6182 * new_data_disks - 1), 6183 1, &dd_idx, NULL); 6184 if (last_sector >= mddev->dev_sectors) 6185 last_sector = mddev->dev_sectors - 1; 6186 while (first_sector <= last_sector) { 6187 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); 6188 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 6189 set_bit(STRIPE_HANDLE, &sh->state); 6190 raid5_release_stripe(sh); 6191 first_sector += RAID5_STRIPE_SECTORS(conf); 6192 } 6193 /* Now that the sources are clearly marked, we can release 6194 * the destination stripes 6195 */ 6196 while (!list_empty(&stripes)) { 6197 sh = list_entry(stripes.next, struct stripe_head, lru); 6198 list_del_init(&sh->lru); 6199 raid5_release_stripe(sh); 6200 } 6201 /* If this takes us to the resync_max point where we have to pause, 6202 * then we need to write out the superblock. 6203 */ 6204 sector_nr += reshape_sectors; 6205 retn = reshape_sectors; 6206 finish: 6207 if (mddev->curr_resync_completed > mddev->resync_max || 6208 (sector_nr - mddev->curr_resync_completed) * 2 6209 >= mddev->resync_max - mddev->curr_resync_completed) { 6210 /* Cannot proceed until we've updated the superblock... */ 6211 wait_event(conf->wait_for_overlap, 6212 atomic_read(&conf->reshape_stripes) == 0 6213 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 6214 if (atomic_read(&conf->reshape_stripes) != 0) 6215 goto ret; 6216 mddev->reshape_position = conf->reshape_progress; 6217 mddev->curr_resync_completed = sector_nr; 6218 if (!mddev->reshape_backwards) 6219 /* Can update recovery_offset */ 6220 rdev_for_each(rdev, mddev) 6221 if (rdev->raid_disk >= 0 && 6222 !test_bit(Journal, &rdev->flags) && 6223 !test_bit(In_sync, &rdev->flags) && 6224 rdev->recovery_offset < sector_nr) 6225 rdev->recovery_offset = sector_nr; 6226 conf->reshape_checkpoint = jiffies; 6227 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6228 md_wakeup_thread(mddev->thread); 6229 wait_event(mddev->sb_wait, 6230 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) 6231 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 6232 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6233 goto ret; 6234 spin_lock_irq(&conf->device_lock); 6235 conf->reshape_safe = mddev->reshape_position; 6236 spin_unlock_irq(&conf->device_lock); 6237 wake_up(&conf->wait_for_overlap); 6238 sysfs_notify_dirent_safe(mddev->sysfs_completed); 6239 } 6240 ret: 6241 return retn; 6242 } 6243 6244 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, 6245 int *skipped) 6246 { 6247 struct r5conf *conf = mddev->private; 6248 struct stripe_head *sh; 6249 sector_t max_sector = mddev->dev_sectors; 6250 sector_t sync_blocks; 6251 int still_degraded = 0; 6252 int i; 6253 6254 if (sector_nr >= max_sector) { 6255 /* just being told to finish up .. nothing much to do */ 6256 6257 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 6258 end_reshape(conf); 6259 return 0; 6260 } 6261 6262 if (mddev->curr_resync < max_sector) /* aborted */ 6263 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 6264 &sync_blocks, 1); 6265 else /* completed sync */ 6266 conf->fullsync = 0; 6267 md_bitmap_close_sync(mddev->bitmap); 6268 6269 return 0; 6270 } 6271 6272 /* Allow raid5_quiesce to complete */ 6273 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 6274 6275 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6276 return reshape_request(mddev, sector_nr, skipped); 6277 6278 /* No need to check resync_max as we never do more than one 6279 * stripe, and as resync_max will always be on a chunk boundary, 6280 * if the check in md_do_sync didn't fire, there is no chance 6281 * of overstepping resync_max here 6282 */ 6283 6284 /* if there is too many failed drives and we are trying 6285 * to resync, then assert that we are finished, because there is 6286 * nothing we can do. 6287 */ 6288 if (mddev->degraded >= conf->max_degraded && 6289 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6290 sector_t rv = mddev->dev_sectors - sector_nr; 6291 *skipped = 1; 6292 return rv; 6293 } 6294 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 6295 !conf->fullsync && 6296 !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 6297 sync_blocks >= RAID5_STRIPE_SECTORS(conf)) { 6298 /* we can skip this block, and probably more */ 6299 do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf)); 6300 *skipped = 1; 6301 /* keep things rounded to whole stripes */ 6302 return sync_blocks * RAID5_STRIPE_SECTORS(conf); 6303 } 6304 6305 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); 6306 6307 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); 6308 if (sh == NULL) { 6309 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); 6310 /* make sure we don't swamp the stripe cache if someone else 6311 * is trying to get access 6312 */ 6313 schedule_timeout_uninterruptible(1); 6314 } 6315 /* Need to check if array will still be degraded after recovery/resync 6316 * Note in case of > 1 drive failures it's possible we're rebuilding 6317 * one drive while leaving another faulty drive in array. 6318 */ 6319 rcu_read_lock(); 6320 for (i = 0; i < conf->raid_disks; i++) { 6321 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 6322 6323 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) 6324 still_degraded = 1; 6325 } 6326 rcu_read_unlock(); 6327 6328 md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 6329 6330 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 6331 set_bit(STRIPE_HANDLE, &sh->state); 6332 6333 raid5_release_stripe(sh); 6334 6335 return RAID5_STRIPE_SECTORS(conf); 6336 } 6337 6338 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, 6339 unsigned int offset) 6340 { 6341 /* We may not be able to submit a whole bio at once as there 6342 * may not be enough stripe_heads available. 6343 * We cannot pre-allocate enough stripe_heads as we may need 6344 * more than exist in the cache (if we allow ever large chunks). 6345 * So we do one stripe head at a time and record in 6346 * ->bi_hw_segments how many have been done. 6347 * 6348 * We *know* that this entire raid_bio is in one chunk, so 6349 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 6350 */ 6351 struct stripe_head *sh; 6352 int dd_idx; 6353 sector_t sector, logical_sector, last_sector; 6354 int scnt = 0; 6355 int handled = 0; 6356 6357 logical_sector = raid_bio->bi_iter.bi_sector & 6358 ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); 6359 sector = raid5_compute_sector(conf, logical_sector, 6360 0, &dd_idx, NULL); 6361 last_sector = bio_end_sector(raid_bio); 6362 6363 for (; logical_sector < last_sector; 6364 logical_sector += RAID5_STRIPE_SECTORS(conf), 6365 sector += RAID5_STRIPE_SECTORS(conf), 6366 scnt++) { 6367 6368 if (scnt < offset) 6369 /* already done this stripe */ 6370 continue; 6371 6372 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); 6373 6374 if (!sh) { 6375 /* failed to get a stripe - must wait */ 6376 conf->retry_read_aligned = raid_bio; 6377 conf->retry_read_offset = scnt; 6378 return handled; 6379 } 6380 6381 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 6382 raid5_release_stripe(sh); 6383 conf->retry_read_aligned = raid_bio; 6384 conf->retry_read_offset = scnt; 6385 return handled; 6386 } 6387 6388 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 6389 handle_stripe(sh); 6390 raid5_release_stripe(sh); 6391 handled++; 6392 } 6393 6394 bio_endio(raid_bio); 6395 6396 if (atomic_dec_and_test(&conf->active_aligned_reads)) 6397 wake_up(&conf->wait_for_quiescent); 6398 return handled; 6399 } 6400 6401 static int handle_active_stripes(struct r5conf *conf, int group, 6402 struct r5worker *worker, 6403 struct list_head *temp_inactive_list) 6404 __must_hold(&conf->device_lock) 6405 { 6406 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 6407 int i, batch_size = 0, hash; 6408 bool release_inactive = false; 6409 6410 while (batch_size < MAX_STRIPE_BATCH && 6411 (sh = __get_priority_stripe(conf, group)) != NULL) 6412 batch[batch_size++] = sh; 6413 6414 if (batch_size == 0) { 6415 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6416 if (!list_empty(temp_inactive_list + i)) 6417 break; 6418 if (i == NR_STRIPE_HASH_LOCKS) { 6419 spin_unlock_irq(&conf->device_lock); 6420 log_flush_stripe_to_raid(conf); 6421 spin_lock_irq(&conf->device_lock); 6422 return batch_size; 6423 } 6424 release_inactive = true; 6425 } 6426 spin_unlock_irq(&conf->device_lock); 6427 6428 release_inactive_stripe_list(conf, temp_inactive_list, 6429 NR_STRIPE_HASH_LOCKS); 6430 6431 r5l_flush_stripe_to_raid(conf->log); 6432 if (release_inactive) { 6433 spin_lock_irq(&conf->device_lock); 6434 return 0; 6435 } 6436 6437 for (i = 0; i < batch_size; i++) 6438 handle_stripe(batch[i]); 6439 log_write_stripe_run(conf); 6440 6441 cond_resched(); 6442 6443 spin_lock_irq(&conf->device_lock); 6444 for (i = 0; i < batch_size; i++) { 6445 hash = batch[i]->hash_lock_index; 6446 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); 6447 } 6448 return batch_size; 6449 } 6450 6451 static void raid5_do_work(struct work_struct *work) 6452 { 6453 struct r5worker *worker = container_of(work, struct r5worker, work); 6454 struct r5worker_group *group = worker->group; 6455 struct r5conf *conf = group->conf; 6456 struct mddev *mddev = conf->mddev; 6457 int group_id = group - conf->worker_groups; 6458 int handled; 6459 struct blk_plug plug; 6460 6461 pr_debug("+++ raid5worker active\n"); 6462 6463 blk_start_plug(&plug); 6464 handled = 0; 6465 spin_lock_irq(&conf->device_lock); 6466 while (1) { 6467 int batch_size, released; 6468 6469 released = release_stripe_list(conf, worker->temp_inactive_list); 6470 6471 batch_size = handle_active_stripes(conf, group_id, worker, 6472 worker->temp_inactive_list); 6473 worker->working = false; 6474 if (!batch_size && !released) 6475 break; 6476 handled += batch_size; 6477 wait_event_lock_irq(mddev->sb_wait, 6478 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), 6479 conf->device_lock); 6480 } 6481 pr_debug("%d stripes handled\n", handled); 6482 6483 spin_unlock_irq(&conf->device_lock); 6484 6485 flush_deferred_bios(conf); 6486 6487 r5l_flush_stripe_to_raid(conf->log); 6488 6489 async_tx_issue_pending_all(); 6490 blk_finish_plug(&plug); 6491 6492 pr_debug("--- raid5worker inactive\n"); 6493 } 6494 6495 /* 6496 * This is our raid5 kernel thread. 6497 * 6498 * We scan the hash table for stripes which can be handled now. 6499 * During the scan, completed stripes are saved for us by the interrupt 6500 * handler, so that they will not have to wait for our next wakeup. 6501 */ 6502 static void raid5d(struct md_thread *thread) 6503 { 6504 struct mddev *mddev = thread->mddev; 6505 struct r5conf *conf = mddev->private; 6506 int handled; 6507 struct blk_plug plug; 6508 6509 pr_debug("+++ raid5d active\n"); 6510 6511 md_check_recovery(mddev); 6512 6513 blk_start_plug(&plug); 6514 handled = 0; 6515 spin_lock_irq(&conf->device_lock); 6516 while (1) { 6517 struct bio *bio; 6518 int batch_size, released; 6519 unsigned int offset; 6520 6521 released = release_stripe_list(conf, conf->temp_inactive_list); 6522 if (released) 6523 clear_bit(R5_DID_ALLOC, &conf->cache_state); 6524 6525 if ( 6526 !list_empty(&conf->bitmap_list)) { 6527 /* Now is a good time to flush some bitmap updates */ 6528 conf->seq_flush++; 6529 spin_unlock_irq(&conf->device_lock); 6530 md_bitmap_unplug(mddev->bitmap); 6531 spin_lock_irq(&conf->device_lock); 6532 conf->seq_write = conf->seq_flush; 6533 activate_bit_delay(conf, conf->temp_inactive_list); 6534 } 6535 raid5_activate_delayed(conf); 6536 6537 while ((bio = remove_bio_from_retry(conf, &offset))) { 6538 int ok; 6539 spin_unlock_irq(&conf->device_lock); 6540 ok = retry_aligned_read(conf, bio, offset); 6541 spin_lock_irq(&conf->device_lock); 6542 if (!ok) 6543 break; 6544 handled++; 6545 } 6546 6547 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, 6548 conf->temp_inactive_list); 6549 if (!batch_size && !released) 6550 break; 6551 handled += batch_size; 6552 6553 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { 6554 spin_unlock_irq(&conf->device_lock); 6555 md_check_recovery(mddev); 6556 spin_lock_irq(&conf->device_lock); 6557 } 6558 } 6559 pr_debug("%d stripes handled\n", handled); 6560 6561 spin_unlock_irq(&conf->device_lock); 6562 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && 6563 mutex_trylock(&conf->cache_size_mutex)) { 6564 grow_one_stripe(conf, __GFP_NOWARN); 6565 /* Set flag even if allocation failed. This helps 6566 * slow down allocation requests when mem is short 6567 */ 6568 set_bit(R5_DID_ALLOC, &conf->cache_state); 6569 mutex_unlock(&conf->cache_size_mutex); 6570 } 6571 6572 flush_deferred_bios(conf); 6573 6574 r5l_flush_stripe_to_raid(conf->log); 6575 6576 async_tx_issue_pending_all(); 6577 blk_finish_plug(&plug); 6578 6579 pr_debug("--- raid5d inactive\n"); 6580 } 6581 6582 static ssize_t 6583 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 6584 { 6585 struct r5conf *conf; 6586 int ret = 0; 6587 spin_lock(&mddev->lock); 6588 conf = mddev->private; 6589 if (conf) 6590 ret = sprintf(page, "%d\n", conf->min_nr_stripes); 6591 spin_unlock(&mddev->lock); 6592 return ret; 6593 } 6594 6595 int 6596 raid5_set_cache_size(struct mddev *mddev, int size) 6597 { 6598 int result = 0; 6599 struct r5conf *conf = mddev->private; 6600 6601 if (size <= 16 || size > 32768) 6602 return -EINVAL; 6603 6604 conf->min_nr_stripes = size; 6605 mutex_lock(&conf->cache_size_mutex); 6606 while (size < conf->max_nr_stripes && 6607 drop_one_stripe(conf)) 6608 ; 6609 mutex_unlock(&conf->cache_size_mutex); 6610 6611 md_allow_write(mddev); 6612 6613 mutex_lock(&conf->cache_size_mutex); 6614 while (size > conf->max_nr_stripes) 6615 if (!grow_one_stripe(conf, GFP_KERNEL)) { 6616 conf->min_nr_stripes = conf->max_nr_stripes; 6617 result = -ENOMEM; 6618 break; 6619 } 6620 mutex_unlock(&conf->cache_size_mutex); 6621 6622 return result; 6623 } 6624 EXPORT_SYMBOL(raid5_set_cache_size); 6625 6626 static ssize_t 6627 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 6628 { 6629 struct r5conf *conf; 6630 unsigned long new; 6631 int err; 6632 6633 if (len >= PAGE_SIZE) 6634 return -EINVAL; 6635 if (kstrtoul(page, 10, &new)) 6636 return -EINVAL; 6637 err = mddev_lock(mddev); 6638 if (err) 6639 return err; 6640 conf = mddev->private; 6641 if (!conf) 6642 err = -ENODEV; 6643 else 6644 err = raid5_set_cache_size(mddev, new); 6645 mddev_unlock(mddev); 6646 6647 return err ?: len; 6648 } 6649 6650 static struct md_sysfs_entry 6651 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 6652 raid5_show_stripe_cache_size, 6653 raid5_store_stripe_cache_size); 6654 6655 static ssize_t 6656 raid5_show_rmw_level(struct mddev *mddev, char *page) 6657 { 6658 struct r5conf *conf = mddev->private; 6659 if (conf) 6660 return sprintf(page, "%d\n", conf->rmw_level); 6661 else 6662 return 0; 6663 } 6664 6665 static ssize_t 6666 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) 6667 { 6668 struct r5conf *conf = mddev->private; 6669 unsigned long new; 6670 6671 if (!conf) 6672 return -ENODEV; 6673 6674 if (len >= PAGE_SIZE) 6675 return -EINVAL; 6676 6677 if (kstrtoul(page, 10, &new)) 6678 return -EINVAL; 6679 6680 if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome) 6681 return -EINVAL; 6682 6683 if (new != PARITY_DISABLE_RMW && 6684 new != PARITY_ENABLE_RMW && 6685 new != PARITY_PREFER_RMW) 6686 return -EINVAL; 6687 6688 conf->rmw_level = new; 6689 return len; 6690 } 6691 6692 static struct md_sysfs_entry 6693 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR, 6694 raid5_show_rmw_level, 6695 raid5_store_rmw_level); 6696 6697 static ssize_t 6698 raid5_show_stripe_size(struct mddev *mddev, char *page) 6699 { 6700 struct r5conf *conf; 6701 int ret = 0; 6702 6703 spin_lock(&mddev->lock); 6704 conf = mddev->private; 6705 if (conf) 6706 ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf)); 6707 spin_unlock(&mddev->lock); 6708 return ret; 6709 } 6710 6711 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE 6712 static ssize_t 6713 raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len) 6714 { 6715 struct r5conf *conf; 6716 unsigned long new; 6717 int err; 6718 int size; 6719 6720 if (len >= PAGE_SIZE) 6721 return -EINVAL; 6722 if (kstrtoul(page, 10, &new)) 6723 return -EINVAL; 6724 6725 /* 6726 * The value should not be bigger than PAGE_SIZE. It requires to 6727 * be multiple of DEFAULT_STRIPE_SIZE and the value should be power 6728 * of two. 6729 */ 6730 if (new % DEFAULT_STRIPE_SIZE != 0 || 6731 new > PAGE_SIZE || new == 0 || 6732 new != roundup_pow_of_two(new)) 6733 return -EINVAL; 6734 6735 err = mddev_lock(mddev); 6736 if (err) 6737 return err; 6738 6739 conf = mddev->private; 6740 if (!conf) { 6741 err = -ENODEV; 6742 goto out_unlock; 6743 } 6744 6745 if (new == conf->stripe_size) 6746 goto out_unlock; 6747 6748 pr_debug("md/raid: change stripe_size from %lu to %lu\n", 6749 conf->stripe_size, new); 6750 6751 if (mddev->sync_thread || 6752 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6753 mddev->reshape_position != MaxSector || 6754 mddev->sysfs_active) { 6755 err = -EBUSY; 6756 goto out_unlock; 6757 } 6758 6759 mddev_suspend(mddev); 6760 mutex_lock(&conf->cache_size_mutex); 6761 size = conf->max_nr_stripes; 6762 6763 shrink_stripes(conf); 6764 6765 conf->stripe_size = new; 6766 conf->stripe_shift = ilog2(new) - 9; 6767 conf->stripe_sectors = new >> 9; 6768 if (grow_stripes(conf, size)) { 6769 pr_warn("md/raid:%s: couldn't allocate buffers\n", 6770 mdname(mddev)); 6771 err = -ENOMEM; 6772 } 6773 mutex_unlock(&conf->cache_size_mutex); 6774 mddev_resume(mddev); 6775 6776 out_unlock: 6777 mddev_unlock(mddev); 6778 return err ?: len; 6779 } 6780 6781 static struct md_sysfs_entry 6782 raid5_stripe_size = __ATTR(stripe_size, 0644, 6783 raid5_show_stripe_size, 6784 raid5_store_stripe_size); 6785 #else 6786 static struct md_sysfs_entry 6787 raid5_stripe_size = __ATTR(stripe_size, 0444, 6788 raid5_show_stripe_size, 6789 NULL); 6790 #endif 6791 6792 static ssize_t 6793 raid5_show_preread_threshold(struct mddev *mddev, char *page) 6794 { 6795 struct r5conf *conf; 6796 int ret = 0; 6797 spin_lock(&mddev->lock); 6798 conf = mddev->private; 6799 if (conf) 6800 ret = sprintf(page, "%d\n", conf->bypass_threshold); 6801 spin_unlock(&mddev->lock); 6802 return ret; 6803 } 6804 6805 static ssize_t 6806 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 6807 { 6808 struct r5conf *conf; 6809 unsigned long new; 6810 int err; 6811 6812 if (len >= PAGE_SIZE) 6813 return -EINVAL; 6814 if (kstrtoul(page, 10, &new)) 6815 return -EINVAL; 6816 6817 err = mddev_lock(mddev); 6818 if (err) 6819 return err; 6820 conf = mddev->private; 6821 if (!conf) 6822 err = -ENODEV; 6823 else if (new > conf->min_nr_stripes) 6824 err = -EINVAL; 6825 else 6826 conf->bypass_threshold = new; 6827 mddev_unlock(mddev); 6828 return err ?: len; 6829 } 6830 6831 static struct md_sysfs_entry 6832 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 6833 S_IRUGO | S_IWUSR, 6834 raid5_show_preread_threshold, 6835 raid5_store_preread_threshold); 6836 6837 static ssize_t 6838 raid5_show_skip_copy(struct mddev *mddev, char *page) 6839 { 6840 struct r5conf *conf; 6841 int ret = 0; 6842 spin_lock(&mddev->lock); 6843 conf = mddev->private; 6844 if (conf) 6845 ret = sprintf(page, "%d\n", conf->skip_copy); 6846 spin_unlock(&mddev->lock); 6847 return ret; 6848 } 6849 6850 static ssize_t 6851 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) 6852 { 6853 struct r5conf *conf; 6854 unsigned long new; 6855 int err; 6856 6857 if (len >= PAGE_SIZE) 6858 return -EINVAL; 6859 if (kstrtoul(page, 10, &new)) 6860 return -EINVAL; 6861 new = !!new; 6862 6863 err = mddev_lock(mddev); 6864 if (err) 6865 return err; 6866 conf = mddev->private; 6867 if (!conf) 6868 err = -ENODEV; 6869 else if (new != conf->skip_copy) { 6870 struct request_queue *q = mddev->queue; 6871 6872 mddev_suspend(mddev); 6873 conf->skip_copy = new; 6874 if (new) 6875 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); 6876 else 6877 blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); 6878 mddev_resume(mddev); 6879 } 6880 mddev_unlock(mddev); 6881 return err ?: len; 6882 } 6883 6884 static struct md_sysfs_entry 6885 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, 6886 raid5_show_skip_copy, 6887 raid5_store_skip_copy); 6888 6889 static ssize_t 6890 stripe_cache_active_show(struct mddev *mddev, char *page) 6891 { 6892 struct r5conf *conf = mddev->private; 6893 if (conf) 6894 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 6895 else 6896 return 0; 6897 } 6898 6899 static struct md_sysfs_entry 6900 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 6901 6902 static ssize_t 6903 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) 6904 { 6905 struct r5conf *conf; 6906 int ret = 0; 6907 spin_lock(&mddev->lock); 6908 conf = mddev->private; 6909 if (conf) 6910 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); 6911 spin_unlock(&mddev->lock); 6912 return ret; 6913 } 6914 6915 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6916 int *group_cnt, 6917 struct r5worker_group **worker_groups); 6918 static ssize_t 6919 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 6920 { 6921 struct r5conf *conf; 6922 unsigned int new; 6923 int err; 6924 struct r5worker_group *new_groups, *old_groups; 6925 int group_cnt; 6926 6927 if (len >= PAGE_SIZE) 6928 return -EINVAL; 6929 if (kstrtouint(page, 10, &new)) 6930 return -EINVAL; 6931 /* 8192 should be big enough */ 6932 if (new > 8192) 6933 return -EINVAL; 6934 6935 err = mddev_lock(mddev); 6936 if (err) 6937 return err; 6938 conf = mddev->private; 6939 if (!conf) 6940 err = -ENODEV; 6941 else if (new != conf->worker_cnt_per_group) { 6942 mddev_suspend(mddev); 6943 6944 old_groups = conf->worker_groups; 6945 if (old_groups) 6946 flush_workqueue(raid5_wq); 6947 6948 err = alloc_thread_groups(conf, new, &group_cnt, &new_groups); 6949 if (!err) { 6950 spin_lock_irq(&conf->device_lock); 6951 conf->group_cnt = group_cnt; 6952 conf->worker_cnt_per_group = new; 6953 conf->worker_groups = new_groups; 6954 spin_unlock_irq(&conf->device_lock); 6955 6956 if (old_groups) 6957 kfree(old_groups[0].workers); 6958 kfree(old_groups); 6959 } 6960 mddev_resume(mddev); 6961 } 6962 mddev_unlock(mddev); 6963 6964 return err ?: len; 6965 } 6966 6967 static struct md_sysfs_entry 6968 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, 6969 raid5_show_group_thread_cnt, 6970 raid5_store_group_thread_cnt); 6971 6972 static struct attribute *raid5_attrs[] = { 6973 &raid5_stripecache_size.attr, 6974 &raid5_stripecache_active.attr, 6975 &raid5_preread_bypass_threshold.attr, 6976 &raid5_group_thread_cnt.attr, 6977 &raid5_skip_copy.attr, 6978 &raid5_rmw_level.attr, 6979 &raid5_stripe_size.attr, 6980 &r5c_journal_mode.attr, 6981 &ppl_write_hint.attr, 6982 NULL, 6983 }; 6984 static const struct attribute_group raid5_attrs_group = { 6985 .name = NULL, 6986 .attrs = raid5_attrs, 6987 }; 6988 6989 static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, 6990 struct r5worker_group **worker_groups) 6991 { 6992 int i, j, k; 6993 ssize_t size; 6994 struct r5worker *workers; 6995 6996 if (cnt == 0) { 6997 *group_cnt = 0; 6998 *worker_groups = NULL; 6999 return 0; 7000 } 7001 *group_cnt = num_possible_nodes(); 7002 size = sizeof(struct r5worker) * cnt; 7003 workers = kcalloc(size, *group_cnt, GFP_NOIO); 7004 *worker_groups = kcalloc(*group_cnt, sizeof(struct r5worker_group), 7005 GFP_NOIO); 7006 if (!*worker_groups || !workers) { 7007 kfree(workers); 7008 kfree(*worker_groups); 7009 return -ENOMEM; 7010 } 7011 7012 for (i = 0; i < *group_cnt; i++) { 7013 struct r5worker_group *group; 7014 7015 group = &(*worker_groups)[i]; 7016 INIT_LIST_HEAD(&group->handle_list); 7017 INIT_LIST_HEAD(&group->loprio_list); 7018 group->conf = conf; 7019 group->workers = workers + i * cnt; 7020 7021 for (j = 0; j < cnt; j++) { 7022 struct r5worker *worker = group->workers + j; 7023 worker->group = group; 7024 INIT_WORK(&worker->work, raid5_do_work); 7025 7026 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) 7027 INIT_LIST_HEAD(worker->temp_inactive_list + k); 7028 } 7029 } 7030 7031 return 0; 7032 } 7033 7034 static void free_thread_groups(struct r5conf *conf) 7035 { 7036 if (conf->worker_groups) 7037 kfree(conf->worker_groups[0].workers); 7038 kfree(conf->worker_groups); 7039 conf->worker_groups = NULL; 7040 } 7041 7042 static sector_t 7043 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 7044 { 7045 struct r5conf *conf = mddev->private; 7046 7047 if (!sectors) 7048 sectors = mddev->dev_sectors; 7049 if (!raid_disks) 7050 /* size is defined by the smallest of previous and new size */ 7051 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 7052 7053 sectors &= ~((sector_t)conf->chunk_sectors - 1); 7054 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); 7055 return sectors * (raid_disks - conf->max_degraded); 7056 } 7057 7058 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 7059 { 7060 safe_put_page(percpu->spare_page); 7061 percpu->spare_page = NULL; 7062 kvfree(percpu->scribble); 7063 percpu->scribble = NULL; 7064 } 7065 7066 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 7067 { 7068 if (conf->level == 6 && !percpu->spare_page) { 7069 percpu->spare_page = alloc_page(GFP_KERNEL); 7070 if (!percpu->spare_page) 7071 return -ENOMEM; 7072 } 7073 7074 if (scribble_alloc(percpu, 7075 max(conf->raid_disks, 7076 conf->previous_raid_disks), 7077 max(conf->chunk_sectors, 7078 conf->prev_chunk_sectors) 7079 / RAID5_STRIPE_SECTORS(conf))) { 7080 free_scratch_buffer(conf, percpu); 7081 return -ENOMEM; 7082 } 7083 7084 local_lock_init(&percpu->lock); 7085 return 0; 7086 } 7087 7088 static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node) 7089 { 7090 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); 7091 7092 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 7093 return 0; 7094 } 7095 7096 static void raid5_free_percpu(struct r5conf *conf) 7097 { 7098 if (!conf->percpu) 7099 return; 7100 7101 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); 7102 free_percpu(conf->percpu); 7103 } 7104 7105 static void free_conf(struct r5conf *conf) 7106 { 7107 int i; 7108 7109 log_exit(conf); 7110 7111 unregister_shrinker(&conf->shrinker); 7112 free_thread_groups(conf); 7113 shrink_stripes(conf); 7114 raid5_free_percpu(conf); 7115 for (i = 0; i < conf->pool_size; i++) 7116 if (conf->disks[i].extra_page) 7117 put_page(conf->disks[i].extra_page); 7118 kfree(conf->disks); 7119 bioset_exit(&conf->bio_split); 7120 kfree(conf->stripe_hashtbl); 7121 kfree(conf->pending_data); 7122 kfree(conf); 7123 } 7124 7125 static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) 7126 { 7127 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); 7128 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 7129 7130 if (alloc_scratch_buffer(conf, percpu)) { 7131 pr_warn("%s: failed memory allocation for cpu%u\n", 7132 __func__, cpu); 7133 return -ENOMEM; 7134 } 7135 return 0; 7136 } 7137 7138 static int raid5_alloc_percpu(struct r5conf *conf) 7139 { 7140 int err = 0; 7141 7142 conf->percpu = alloc_percpu(struct raid5_percpu); 7143 if (!conf->percpu) 7144 return -ENOMEM; 7145 7146 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); 7147 if (!err) { 7148 conf->scribble_disks = max(conf->raid_disks, 7149 conf->previous_raid_disks); 7150 conf->scribble_sectors = max(conf->chunk_sectors, 7151 conf->prev_chunk_sectors); 7152 } 7153 return err; 7154 } 7155 7156 static unsigned long raid5_cache_scan(struct shrinker *shrink, 7157 struct shrink_control *sc) 7158 { 7159 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 7160 unsigned long ret = SHRINK_STOP; 7161 7162 if (mutex_trylock(&conf->cache_size_mutex)) { 7163 ret= 0; 7164 while (ret < sc->nr_to_scan && 7165 conf->max_nr_stripes > conf->min_nr_stripes) { 7166 if (drop_one_stripe(conf) == 0) { 7167 ret = SHRINK_STOP; 7168 break; 7169 } 7170 ret++; 7171 } 7172 mutex_unlock(&conf->cache_size_mutex); 7173 } 7174 return ret; 7175 } 7176 7177 static unsigned long raid5_cache_count(struct shrinker *shrink, 7178 struct shrink_control *sc) 7179 { 7180 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 7181 7182 if (conf->max_nr_stripes < conf->min_nr_stripes) 7183 /* unlikely, but not impossible */ 7184 return 0; 7185 return conf->max_nr_stripes - conf->min_nr_stripes; 7186 } 7187 7188 static struct r5conf *setup_conf(struct mddev *mddev) 7189 { 7190 struct r5conf *conf; 7191 int raid_disk, memory, max_disks; 7192 struct md_rdev *rdev; 7193 struct disk_info *disk; 7194 char pers_name[6]; 7195 int i; 7196 int group_cnt; 7197 struct r5worker_group *new_group; 7198 int ret = -ENOMEM; 7199 7200 if (mddev->new_level != 5 7201 && mddev->new_level != 4 7202 && mddev->new_level != 6) { 7203 pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n", 7204 mdname(mddev), mddev->new_level); 7205 return ERR_PTR(-EIO); 7206 } 7207 if ((mddev->new_level == 5 7208 && !algorithm_valid_raid5(mddev->new_layout)) || 7209 (mddev->new_level == 6 7210 && !algorithm_valid_raid6(mddev->new_layout))) { 7211 pr_warn("md/raid:%s: layout %d not supported\n", 7212 mdname(mddev), mddev->new_layout); 7213 return ERR_PTR(-EIO); 7214 } 7215 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 7216 pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n", 7217 mdname(mddev), mddev->raid_disks); 7218 return ERR_PTR(-EINVAL); 7219 } 7220 7221 if (!mddev->new_chunk_sectors || 7222 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 7223 !is_power_of_2(mddev->new_chunk_sectors)) { 7224 pr_warn("md/raid:%s: invalid chunk size %d\n", 7225 mdname(mddev), mddev->new_chunk_sectors << 9); 7226 return ERR_PTR(-EINVAL); 7227 } 7228 7229 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 7230 if (conf == NULL) 7231 goto abort; 7232 7233 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE 7234 conf->stripe_size = DEFAULT_STRIPE_SIZE; 7235 conf->stripe_shift = ilog2(DEFAULT_STRIPE_SIZE) - 9; 7236 conf->stripe_sectors = DEFAULT_STRIPE_SIZE >> 9; 7237 #endif 7238 INIT_LIST_HEAD(&conf->free_list); 7239 INIT_LIST_HEAD(&conf->pending_list); 7240 conf->pending_data = kcalloc(PENDING_IO_MAX, 7241 sizeof(struct r5pending_data), 7242 GFP_KERNEL); 7243 if (!conf->pending_data) 7244 goto abort; 7245 for (i = 0; i < PENDING_IO_MAX; i++) 7246 list_add(&conf->pending_data[i].sibling, &conf->free_list); 7247 /* Don't enable multi-threading by default*/ 7248 if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) { 7249 conf->group_cnt = group_cnt; 7250 conf->worker_cnt_per_group = 0; 7251 conf->worker_groups = new_group; 7252 } else 7253 goto abort; 7254 spin_lock_init(&conf->device_lock); 7255 seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock); 7256 mutex_init(&conf->cache_size_mutex); 7257 7258 init_waitqueue_head(&conf->wait_for_quiescent); 7259 init_waitqueue_head(&conf->wait_for_stripe); 7260 init_waitqueue_head(&conf->wait_for_overlap); 7261 INIT_LIST_HEAD(&conf->handle_list); 7262 INIT_LIST_HEAD(&conf->loprio_list); 7263 INIT_LIST_HEAD(&conf->hold_list); 7264 INIT_LIST_HEAD(&conf->delayed_list); 7265 INIT_LIST_HEAD(&conf->bitmap_list); 7266 init_llist_head(&conf->released_stripes); 7267 atomic_set(&conf->active_stripes, 0); 7268 atomic_set(&conf->preread_active_stripes, 0); 7269 atomic_set(&conf->active_aligned_reads, 0); 7270 spin_lock_init(&conf->pending_bios_lock); 7271 conf->batch_bio_dispatch = true; 7272 rdev_for_each(rdev, mddev) { 7273 if (test_bit(Journal, &rdev->flags)) 7274 continue; 7275 if (bdev_nonrot(rdev->bdev)) { 7276 conf->batch_bio_dispatch = false; 7277 break; 7278 } 7279 } 7280 7281 conf->bypass_threshold = BYPASS_THRESHOLD; 7282 conf->recovery_disabled = mddev->recovery_disabled - 1; 7283 7284 conf->raid_disks = mddev->raid_disks; 7285 if (mddev->reshape_position == MaxSector) 7286 conf->previous_raid_disks = mddev->raid_disks; 7287 else 7288 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 7289 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 7290 7291 conf->disks = kcalloc(max_disks, sizeof(struct disk_info), 7292 GFP_KERNEL); 7293 7294 if (!conf->disks) 7295 goto abort; 7296 7297 for (i = 0; i < max_disks; i++) { 7298 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); 7299 if (!conf->disks[i].extra_page) 7300 goto abort; 7301 } 7302 7303 ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); 7304 if (ret) 7305 goto abort; 7306 conf->mddev = mddev; 7307 7308 ret = -ENOMEM; 7309 conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL); 7310 if (!conf->stripe_hashtbl) 7311 goto abort; 7312 7313 /* We init hash_locks[0] separately to that it can be used 7314 * as the reference lock in the spin_lock_nest_lock() call 7315 * in lock_all_device_hash_locks_irq in order to convince 7316 * lockdep that we know what we are doing. 7317 */ 7318 spin_lock_init(conf->hash_locks); 7319 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 7320 spin_lock_init(conf->hash_locks + i); 7321 7322 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 7323 INIT_LIST_HEAD(conf->inactive_list + i); 7324 7325 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 7326 INIT_LIST_HEAD(conf->temp_inactive_list + i); 7327 7328 atomic_set(&conf->r5c_cached_full_stripes, 0); 7329 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); 7330 atomic_set(&conf->r5c_cached_partial_stripes, 0); 7331 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); 7332 atomic_set(&conf->r5c_flushing_full_stripes, 0); 7333 atomic_set(&conf->r5c_flushing_partial_stripes, 0); 7334 7335 conf->level = mddev->new_level; 7336 conf->chunk_sectors = mddev->new_chunk_sectors; 7337 ret = raid5_alloc_percpu(conf); 7338 if (ret) 7339 goto abort; 7340 7341 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 7342 7343 ret = -EIO; 7344 rdev_for_each(rdev, mddev) { 7345 raid_disk = rdev->raid_disk; 7346 if (raid_disk >= max_disks 7347 || raid_disk < 0 || test_bit(Journal, &rdev->flags)) 7348 continue; 7349 disk = conf->disks + raid_disk; 7350 7351 if (test_bit(Replacement, &rdev->flags)) { 7352 if (disk->replacement) 7353 goto abort; 7354 RCU_INIT_POINTER(disk->replacement, rdev); 7355 } else { 7356 if (disk->rdev) 7357 goto abort; 7358 RCU_INIT_POINTER(disk->rdev, rdev); 7359 } 7360 7361 if (test_bit(In_sync, &rdev->flags)) { 7362 pr_info("md/raid:%s: device %pg operational as raid disk %d\n", 7363 mdname(mddev), rdev->bdev, raid_disk); 7364 } else if (rdev->saved_raid_disk != raid_disk) 7365 /* Cannot rely on bitmap to complete recovery */ 7366 conf->fullsync = 1; 7367 } 7368 7369 conf->level = mddev->new_level; 7370 if (conf->level == 6) { 7371 conf->max_degraded = 2; 7372 if (raid6_call.xor_syndrome) 7373 conf->rmw_level = PARITY_ENABLE_RMW; 7374 else 7375 conf->rmw_level = PARITY_DISABLE_RMW; 7376 } else { 7377 conf->max_degraded = 1; 7378 conf->rmw_level = PARITY_ENABLE_RMW; 7379 } 7380 conf->algorithm = mddev->new_layout; 7381 conf->reshape_progress = mddev->reshape_position; 7382 if (conf->reshape_progress != MaxSector) { 7383 conf->prev_chunk_sectors = mddev->chunk_sectors; 7384 conf->prev_algo = mddev->layout; 7385 } else { 7386 conf->prev_chunk_sectors = conf->chunk_sectors; 7387 conf->prev_algo = conf->algorithm; 7388 } 7389 7390 conf->min_nr_stripes = NR_STRIPES; 7391 if (mddev->reshape_position != MaxSector) { 7392 int stripes = max_t(int, 7393 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, 7394 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4); 7395 conf->min_nr_stripes = max(NR_STRIPES, stripes); 7396 if (conf->min_nr_stripes != NR_STRIPES) 7397 pr_info("md/raid:%s: force stripe size %d for reshape\n", 7398 mdname(mddev), conf->min_nr_stripes); 7399 } 7400 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + 7401 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 7402 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 7403 if (grow_stripes(conf, conf->min_nr_stripes)) { 7404 pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n", 7405 mdname(mddev), memory); 7406 ret = -ENOMEM; 7407 goto abort; 7408 } else 7409 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); 7410 /* 7411 * Losing a stripe head costs more than the time to refill it, 7412 * it reduces the queue depth and so can hurt throughput. 7413 * So set it rather large, scaled by number of devices. 7414 */ 7415 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; 7416 conf->shrinker.scan_objects = raid5_cache_scan; 7417 conf->shrinker.count_objects = raid5_cache_count; 7418 conf->shrinker.batch = 128; 7419 conf->shrinker.flags = 0; 7420 ret = register_shrinker(&conf->shrinker); 7421 if (ret) { 7422 pr_warn("md/raid:%s: couldn't register shrinker.\n", 7423 mdname(mddev)); 7424 goto abort; 7425 } 7426 7427 sprintf(pers_name, "raid%d", mddev->new_level); 7428 conf->thread = md_register_thread(raid5d, mddev, pers_name); 7429 if (!conf->thread) { 7430 pr_warn("md/raid:%s: couldn't allocate thread.\n", 7431 mdname(mddev)); 7432 ret = -ENOMEM; 7433 goto abort; 7434 } 7435 7436 return conf; 7437 7438 abort: 7439 if (conf) 7440 free_conf(conf); 7441 return ERR_PTR(ret); 7442 } 7443 7444 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 7445 { 7446 switch (algo) { 7447 case ALGORITHM_PARITY_0: 7448 if (raid_disk < max_degraded) 7449 return 1; 7450 break; 7451 case ALGORITHM_PARITY_N: 7452 if (raid_disk >= raid_disks - max_degraded) 7453 return 1; 7454 break; 7455 case ALGORITHM_PARITY_0_6: 7456 if (raid_disk == 0 || 7457 raid_disk == raid_disks - 1) 7458 return 1; 7459 break; 7460 case ALGORITHM_LEFT_ASYMMETRIC_6: 7461 case ALGORITHM_RIGHT_ASYMMETRIC_6: 7462 case ALGORITHM_LEFT_SYMMETRIC_6: 7463 case ALGORITHM_RIGHT_SYMMETRIC_6: 7464 if (raid_disk == raid_disks - 1) 7465 return 1; 7466 } 7467 return 0; 7468 } 7469 7470 static void raid5_set_io_opt(struct r5conf *conf) 7471 { 7472 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * 7473 (conf->raid_disks - conf->max_degraded)); 7474 } 7475 7476 static int raid5_run(struct mddev *mddev) 7477 { 7478 struct r5conf *conf; 7479 int working_disks = 0; 7480 int dirty_parity_disks = 0; 7481 struct md_rdev *rdev; 7482 struct md_rdev *journal_dev = NULL; 7483 sector_t reshape_offset = 0; 7484 int i, ret = 0; 7485 long long min_offset_diff = 0; 7486 int first = 1; 7487 7488 if (acct_bioset_init(mddev)) { 7489 pr_err("md/raid456:%s: alloc acct bioset failed.\n", mdname(mddev)); 7490 return -ENOMEM; 7491 } 7492 7493 if (mddev_init_writes_pending(mddev) < 0) { 7494 ret = -ENOMEM; 7495 goto exit_acct_set; 7496 } 7497 7498 if (mddev->recovery_cp != MaxSector) 7499 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", 7500 mdname(mddev)); 7501 7502 rdev_for_each(rdev, mddev) { 7503 long long diff; 7504 7505 if (test_bit(Journal, &rdev->flags)) { 7506 journal_dev = rdev; 7507 continue; 7508 } 7509 if (rdev->raid_disk < 0) 7510 continue; 7511 diff = (rdev->new_data_offset - rdev->data_offset); 7512 if (first) { 7513 min_offset_diff = diff; 7514 first = 0; 7515 } else if (mddev->reshape_backwards && 7516 diff < min_offset_diff) 7517 min_offset_diff = diff; 7518 else if (!mddev->reshape_backwards && 7519 diff > min_offset_diff) 7520 min_offset_diff = diff; 7521 } 7522 7523 if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && 7524 (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { 7525 pr_notice("md/raid:%s: array cannot have both journal and bitmap\n", 7526 mdname(mddev)); 7527 ret = -EINVAL; 7528 goto exit_acct_set; 7529 } 7530 7531 if (mddev->reshape_position != MaxSector) { 7532 /* Check that we can continue the reshape. 7533 * Difficulties arise if the stripe we would write to 7534 * next is at or after the stripe we would read from next. 7535 * For a reshape that changes the number of devices, this 7536 * is only possible for a very short time, and mdadm makes 7537 * sure that time appears to have past before assembling 7538 * the array. So we fail if that time hasn't passed. 7539 * For a reshape that keeps the number of devices the same 7540 * mdadm must be monitoring the reshape can keeping the 7541 * critical areas read-only and backed up. It will start 7542 * the array in read-only mode, so we check for that. 7543 */ 7544 sector_t here_new, here_old; 7545 int old_disks; 7546 int max_degraded = (mddev->level == 6 ? 2 : 1); 7547 int chunk_sectors; 7548 int new_data_disks; 7549 7550 if (journal_dev) { 7551 pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n", 7552 mdname(mddev)); 7553 ret = -EINVAL; 7554 goto exit_acct_set; 7555 } 7556 7557 if (mddev->new_level != mddev->level) { 7558 pr_warn("md/raid:%s: unsupported reshape required - aborting.\n", 7559 mdname(mddev)); 7560 ret = -EINVAL; 7561 goto exit_acct_set; 7562 } 7563 old_disks = mddev->raid_disks - mddev->delta_disks; 7564 /* reshape_position must be on a new-stripe boundary, and one 7565 * further up in new geometry must map after here in old 7566 * geometry. 7567 * If the chunk sizes are different, then as we perform reshape 7568 * in units of the largest of the two, reshape_position needs 7569 * be a multiple of the largest chunk size times new data disks. 7570 */ 7571 here_new = mddev->reshape_position; 7572 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); 7573 new_data_disks = mddev->raid_disks - max_degraded; 7574 if (sector_div(here_new, chunk_sectors * new_data_disks)) { 7575 pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n", 7576 mdname(mddev)); 7577 ret = -EINVAL; 7578 goto exit_acct_set; 7579 } 7580 reshape_offset = here_new * chunk_sectors; 7581 /* here_new is the stripe we will write to */ 7582 here_old = mddev->reshape_position; 7583 sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); 7584 /* here_old is the first stripe that we might need to read 7585 * from */ 7586 if (mddev->delta_disks == 0) { 7587 /* We cannot be sure it is safe to start an in-place 7588 * reshape. It is only safe if user-space is monitoring 7589 * and taking constant backups. 7590 * mdadm always starts a situation like this in 7591 * readonly mode so it can take control before 7592 * allowing any writes. So just check for that. 7593 */ 7594 if (abs(min_offset_diff) >= mddev->chunk_sectors && 7595 abs(min_offset_diff) >= mddev->new_chunk_sectors) 7596 /* not really in-place - so OK */; 7597 else if (mddev->ro == 0) { 7598 pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n", 7599 mdname(mddev)); 7600 ret = -EINVAL; 7601 goto exit_acct_set; 7602 } 7603 } else if (mddev->reshape_backwards 7604 ? (here_new * chunk_sectors + min_offset_diff <= 7605 here_old * chunk_sectors) 7606 : (here_new * chunk_sectors >= 7607 here_old * chunk_sectors + (-min_offset_diff))) { 7608 /* Reading from the same stripe as writing to - bad */ 7609 pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n", 7610 mdname(mddev)); 7611 ret = -EINVAL; 7612 goto exit_acct_set; 7613 } 7614 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); 7615 /* OK, we should be able to continue; */ 7616 } else { 7617 BUG_ON(mddev->level != mddev->new_level); 7618 BUG_ON(mddev->layout != mddev->new_layout); 7619 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 7620 BUG_ON(mddev->delta_disks != 0); 7621 } 7622 7623 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && 7624 test_bit(MD_HAS_PPL, &mddev->flags)) { 7625 pr_warn("md/raid:%s: using journal device and PPL not allowed - disabling PPL\n", 7626 mdname(mddev)); 7627 clear_bit(MD_HAS_PPL, &mddev->flags); 7628 clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags); 7629 } 7630 7631 if (mddev->private == NULL) 7632 conf = setup_conf(mddev); 7633 else 7634 conf = mddev->private; 7635 7636 if (IS_ERR(conf)) { 7637 ret = PTR_ERR(conf); 7638 goto exit_acct_set; 7639 } 7640 7641 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 7642 if (!journal_dev) { 7643 pr_warn("md/raid:%s: journal disk is missing, force array readonly\n", 7644 mdname(mddev)); 7645 mddev->ro = 1; 7646 set_disk_ro(mddev->gendisk, 1); 7647 } else if (mddev->recovery_cp == MaxSector) 7648 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 7649 } 7650 7651 conf->min_offset_diff = min_offset_diff; 7652 mddev->thread = conf->thread; 7653 conf->thread = NULL; 7654 mddev->private = conf; 7655 7656 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; 7657 i++) { 7658 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); 7659 if (!rdev && conf->disks[i].replacement) { 7660 /* The replacement is all we have yet */ 7661 rdev = rdev_mdlock_deref(mddev, 7662 conf->disks[i].replacement); 7663 conf->disks[i].replacement = NULL; 7664 clear_bit(Replacement, &rdev->flags); 7665 rcu_assign_pointer(conf->disks[i].rdev, rdev); 7666 } 7667 if (!rdev) 7668 continue; 7669 if (rcu_access_pointer(conf->disks[i].replacement) && 7670 conf->reshape_progress != MaxSector) { 7671 /* replacements and reshape simply do not mix. */ 7672 pr_warn("md: cannot handle concurrent replacement and reshape.\n"); 7673 goto abort; 7674 } 7675 if (test_bit(In_sync, &rdev->flags)) { 7676 working_disks++; 7677 continue; 7678 } 7679 /* This disc is not fully in-sync. However if it 7680 * just stored parity (beyond the recovery_offset), 7681 * when we don't need to be concerned about the 7682 * array being dirty. 7683 * When reshape goes 'backwards', we never have 7684 * partially completed devices, so we only need 7685 * to worry about reshape going forwards. 7686 */ 7687 /* Hack because v0.91 doesn't store recovery_offset properly. */ 7688 if (mddev->major_version == 0 && 7689 mddev->minor_version > 90) 7690 rdev->recovery_offset = reshape_offset; 7691 7692 if (rdev->recovery_offset < reshape_offset) { 7693 /* We need to check old and new layout */ 7694 if (!only_parity(rdev->raid_disk, 7695 conf->algorithm, 7696 conf->raid_disks, 7697 conf->max_degraded)) 7698 continue; 7699 } 7700 if (!only_parity(rdev->raid_disk, 7701 conf->prev_algo, 7702 conf->previous_raid_disks, 7703 conf->max_degraded)) 7704 continue; 7705 dirty_parity_disks++; 7706 } 7707 7708 /* 7709 * 0 for a fully functional array, 1 or 2 for a degraded array. 7710 */ 7711 mddev->degraded = raid5_calc_degraded(conf); 7712 7713 if (has_failed(conf)) { 7714 pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", 7715 mdname(mddev), mddev->degraded, conf->raid_disks); 7716 goto abort; 7717 } 7718 7719 /* device size must be a multiple of chunk size */ 7720 mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1); 7721 mddev->resync_max_sectors = mddev->dev_sectors; 7722 7723 if (mddev->degraded > dirty_parity_disks && 7724 mddev->recovery_cp != MaxSector) { 7725 if (test_bit(MD_HAS_PPL, &mddev->flags)) 7726 pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n", 7727 mdname(mddev)); 7728 else if (mddev->ok_start_degraded) 7729 pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n", 7730 mdname(mddev)); 7731 else { 7732 pr_crit("md/raid:%s: cannot start dirty degraded array.\n", 7733 mdname(mddev)); 7734 goto abort; 7735 } 7736 } 7737 7738 pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n", 7739 mdname(mddev), conf->level, 7740 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 7741 mddev->new_layout); 7742 7743 print_raid5_conf(conf); 7744 7745 if (conf->reshape_progress != MaxSector) { 7746 conf->reshape_safe = conf->reshape_progress; 7747 atomic_set(&conf->reshape_stripes, 0); 7748 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7749 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7750 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7751 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7752 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7753 "reshape"); 7754 if (!mddev->sync_thread) 7755 goto abort; 7756 } 7757 7758 /* Ok, everything is just fine now */ 7759 if (mddev->to_remove == &raid5_attrs_group) 7760 mddev->to_remove = NULL; 7761 else if (mddev->kobj.sd && 7762 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 7763 pr_warn("raid5: failed to create sysfs attributes for %s\n", 7764 mdname(mddev)); 7765 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 7766 7767 if (mddev->queue) { 7768 int chunk_size; 7769 /* read-ahead size must cover two whole stripes, which 7770 * is 2 * (datadisks) * chunksize where 'n' is the 7771 * number of raid devices 7772 */ 7773 int data_disks = conf->previous_raid_disks - conf->max_degraded; 7774 int stripe = data_disks * 7775 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 7776 7777 chunk_size = mddev->chunk_sectors << 9; 7778 blk_queue_io_min(mddev->queue, chunk_size); 7779 raid5_set_io_opt(conf); 7780 mddev->queue->limits.raid_partial_stripes_expensive = 1; 7781 /* 7782 * We can only discard a whole stripe. It doesn't make sense to 7783 * discard data disk but write parity disk 7784 */ 7785 stripe = stripe * PAGE_SIZE; 7786 stripe = roundup_pow_of_two(stripe); 7787 mddev->queue->limits.discard_granularity = stripe; 7788 7789 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 7790 7791 rdev_for_each(rdev, mddev) { 7792 disk_stack_limits(mddev->gendisk, rdev->bdev, 7793 rdev->data_offset << 9); 7794 disk_stack_limits(mddev->gendisk, rdev->bdev, 7795 rdev->new_data_offset << 9); 7796 } 7797 7798 /* 7799 * zeroing is required, otherwise data 7800 * could be lost. Consider a scenario: discard a stripe 7801 * (the stripe could be inconsistent if 7802 * discard_zeroes_data is 0); write one disk of the 7803 * stripe (the stripe could be inconsistent again 7804 * depending on which disks are used to calculate 7805 * parity); the disk is broken; The stripe data of this 7806 * disk is lost. 7807 * 7808 * We only allow DISCARD if the sysadmin has confirmed that 7809 * only safe devices are in use by setting a module parameter. 7810 * A better idea might be to turn DISCARD into WRITE_ZEROES 7811 * requests, as that is required to be safe. 7812 */ 7813 if (!devices_handle_discard_safely || 7814 mddev->queue->limits.max_discard_sectors < (stripe >> 9) || 7815 mddev->queue->limits.discard_granularity < stripe) 7816 blk_queue_max_discard_sectors(mddev->queue, 0); 7817 7818 blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); 7819 } 7820 7821 if (log_init(conf, journal_dev, raid5_has_ppl(conf))) 7822 goto abort; 7823 7824 return 0; 7825 abort: 7826 md_unregister_thread(&mddev->thread); 7827 print_raid5_conf(conf); 7828 free_conf(conf); 7829 mddev->private = NULL; 7830 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); 7831 ret = -EIO; 7832 exit_acct_set: 7833 acct_bioset_exit(mddev); 7834 return ret; 7835 } 7836 7837 static void raid5_free(struct mddev *mddev, void *priv) 7838 { 7839 struct r5conf *conf = priv; 7840 7841 free_conf(conf); 7842 acct_bioset_exit(mddev); 7843 mddev->to_remove = &raid5_attrs_group; 7844 } 7845 7846 static void raid5_status(struct seq_file *seq, struct mddev *mddev) 7847 { 7848 struct r5conf *conf = mddev->private; 7849 int i; 7850 7851 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 7852 conf->chunk_sectors / 2, mddev->layout); 7853 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 7854 rcu_read_lock(); 7855 for (i = 0; i < conf->raid_disks; i++) { 7856 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 7857 seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 7858 } 7859 rcu_read_unlock(); 7860 seq_printf (seq, "]"); 7861 } 7862 7863 static void print_raid5_conf (struct r5conf *conf) 7864 { 7865 struct md_rdev *rdev; 7866 int i; 7867 7868 pr_debug("RAID conf printout:\n"); 7869 if (!conf) { 7870 pr_debug("(conf==NULL)\n"); 7871 return; 7872 } 7873 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, 7874 conf->raid_disks, 7875 conf->raid_disks - conf->mddev->degraded); 7876 7877 rcu_read_lock(); 7878 for (i = 0; i < conf->raid_disks; i++) { 7879 rdev = rcu_dereference(conf->disks[i].rdev); 7880 if (rdev) 7881 pr_debug(" disk %d, o:%d, dev:%pg\n", 7882 i, !test_bit(Faulty, &rdev->flags), 7883 rdev->bdev); 7884 } 7885 rcu_read_unlock(); 7886 } 7887 7888 static int raid5_spare_active(struct mddev *mddev) 7889 { 7890 int i; 7891 struct r5conf *conf = mddev->private; 7892 struct md_rdev *rdev, *replacement; 7893 int count = 0; 7894 unsigned long flags; 7895 7896 for (i = 0; i < conf->raid_disks; i++) { 7897 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); 7898 replacement = rdev_mdlock_deref(mddev, 7899 conf->disks[i].replacement); 7900 if (replacement 7901 && replacement->recovery_offset == MaxSector 7902 && !test_bit(Faulty, &replacement->flags) 7903 && !test_and_set_bit(In_sync, &replacement->flags)) { 7904 /* Replacement has just become active. */ 7905 if (!rdev 7906 || !test_and_clear_bit(In_sync, &rdev->flags)) 7907 count++; 7908 if (rdev) { 7909 /* Replaced device not technically faulty, 7910 * but we need to be sure it gets removed 7911 * and never re-added. 7912 */ 7913 set_bit(Faulty, &rdev->flags); 7914 sysfs_notify_dirent_safe( 7915 rdev->sysfs_state); 7916 } 7917 sysfs_notify_dirent_safe(replacement->sysfs_state); 7918 } else if (rdev 7919 && rdev->recovery_offset == MaxSector 7920 && !test_bit(Faulty, &rdev->flags) 7921 && !test_and_set_bit(In_sync, &rdev->flags)) { 7922 count++; 7923 sysfs_notify_dirent_safe(rdev->sysfs_state); 7924 } 7925 } 7926 spin_lock_irqsave(&conf->device_lock, flags); 7927 mddev->degraded = raid5_calc_degraded(conf); 7928 spin_unlock_irqrestore(&conf->device_lock, flags); 7929 print_raid5_conf(conf); 7930 return count; 7931 } 7932 7933 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 7934 { 7935 struct r5conf *conf = mddev->private; 7936 int err = 0; 7937 int number = rdev->raid_disk; 7938 struct md_rdev __rcu **rdevp; 7939 struct disk_info *p; 7940 struct md_rdev *tmp; 7941 7942 print_raid5_conf(conf); 7943 if (test_bit(Journal, &rdev->flags) && conf->log) { 7944 /* 7945 * we can't wait pending write here, as this is called in 7946 * raid5d, wait will deadlock. 7947 * neilb: there is no locking about new writes here, 7948 * so this cannot be safe. 7949 */ 7950 if (atomic_read(&conf->active_stripes) || 7951 atomic_read(&conf->r5c_cached_full_stripes) || 7952 atomic_read(&conf->r5c_cached_partial_stripes)) { 7953 return -EBUSY; 7954 } 7955 log_exit(conf); 7956 return 0; 7957 } 7958 if (unlikely(number >= conf->pool_size)) 7959 return 0; 7960 p = conf->disks + number; 7961 if (rdev == rcu_access_pointer(p->rdev)) 7962 rdevp = &p->rdev; 7963 else if (rdev == rcu_access_pointer(p->replacement)) 7964 rdevp = &p->replacement; 7965 else 7966 return 0; 7967 7968 if (number >= conf->raid_disks && 7969 conf->reshape_progress == MaxSector) 7970 clear_bit(In_sync, &rdev->flags); 7971 7972 if (test_bit(In_sync, &rdev->flags) || 7973 atomic_read(&rdev->nr_pending)) { 7974 err = -EBUSY; 7975 goto abort; 7976 } 7977 /* Only remove non-faulty devices if recovery 7978 * isn't possible. 7979 */ 7980 if (!test_bit(Faulty, &rdev->flags) && 7981 mddev->recovery_disabled != conf->recovery_disabled && 7982 !has_failed(conf) && 7983 (!rcu_access_pointer(p->replacement) || 7984 rcu_access_pointer(p->replacement) == rdev) && 7985 number < conf->raid_disks) { 7986 err = -EBUSY; 7987 goto abort; 7988 } 7989 *rdevp = NULL; 7990 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 7991 lockdep_assert_held(&mddev->reconfig_mutex); 7992 synchronize_rcu(); 7993 if (atomic_read(&rdev->nr_pending)) { 7994 /* lost the race, try later */ 7995 err = -EBUSY; 7996 rcu_assign_pointer(*rdevp, rdev); 7997 } 7998 } 7999 if (!err) { 8000 err = log_modify(conf, rdev, false); 8001 if (err) 8002 goto abort; 8003 } 8004 8005 tmp = rcu_access_pointer(p->replacement); 8006 if (tmp) { 8007 /* We must have just cleared 'rdev' */ 8008 rcu_assign_pointer(p->rdev, tmp); 8009 clear_bit(Replacement, &tmp->flags); 8010 smp_mb(); /* Make sure other CPUs may see both as identical 8011 * but will never see neither - if they are careful 8012 */ 8013 rcu_assign_pointer(p->replacement, NULL); 8014 8015 if (!err) 8016 err = log_modify(conf, tmp, true); 8017 } 8018 8019 clear_bit(WantReplacement, &rdev->flags); 8020 abort: 8021 8022 print_raid5_conf(conf); 8023 return err; 8024 } 8025 8026 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 8027 { 8028 struct r5conf *conf = mddev->private; 8029 int ret, err = -EEXIST; 8030 int disk; 8031 struct disk_info *p; 8032 struct md_rdev *tmp; 8033 int first = 0; 8034 int last = conf->raid_disks - 1; 8035 8036 if (test_bit(Journal, &rdev->flags)) { 8037 if (conf->log) 8038 return -EBUSY; 8039 8040 rdev->raid_disk = 0; 8041 /* 8042 * The array is in readonly mode if journal is missing, so no 8043 * write requests running. We should be safe 8044 */ 8045 ret = log_init(conf, rdev, false); 8046 if (ret) 8047 return ret; 8048 8049 ret = r5l_start(conf->log); 8050 if (ret) 8051 return ret; 8052 8053 return 0; 8054 } 8055 if (mddev->recovery_disabled == conf->recovery_disabled) 8056 return -EBUSY; 8057 8058 if (rdev->saved_raid_disk < 0 && has_failed(conf)) 8059 /* no point adding a device */ 8060 return -EINVAL; 8061 8062 if (rdev->raid_disk >= 0) 8063 first = last = rdev->raid_disk; 8064 8065 /* 8066 * find the disk ... but prefer rdev->saved_raid_disk 8067 * if possible. 8068 */ 8069 if (rdev->saved_raid_disk >= 0 && 8070 rdev->saved_raid_disk >= first && 8071 rdev->saved_raid_disk <= last && 8072 conf->disks[rdev->saved_raid_disk].rdev == NULL) 8073 first = rdev->saved_raid_disk; 8074 8075 for (disk = first; disk <= last; disk++) { 8076 p = conf->disks + disk; 8077 if (p->rdev == NULL) { 8078 clear_bit(In_sync, &rdev->flags); 8079 rdev->raid_disk = disk; 8080 if (rdev->saved_raid_disk != disk) 8081 conf->fullsync = 1; 8082 rcu_assign_pointer(p->rdev, rdev); 8083 8084 err = log_modify(conf, rdev, true); 8085 8086 goto out; 8087 } 8088 } 8089 for (disk = first; disk <= last; disk++) { 8090 p = conf->disks + disk; 8091 tmp = rdev_mdlock_deref(mddev, p->rdev); 8092 if (test_bit(WantReplacement, &tmp->flags) && 8093 p->replacement == NULL) { 8094 clear_bit(In_sync, &rdev->flags); 8095 set_bit(Replacement, &rdev->flags); 8096 rdev->raid_disk = disk; 8097 err = 0; 8098 conf->fullsync = 1; 8099 rcu_assign_pointer(p->replacement, rdev); 8100 break; 8101 } 8102 } 8103 out: 8104 print_raid5_conf(conf); 8105 return err; 8106 } 8107 8108 static int raid5_resize(struct mddev *mddev, sector_t sectors) 8109 { 8110 /* no resync is happening, and there is enough space 8111 * on all devices, so we can resize. 8112 * We need to make sure resync covers any new space. 8113 * If the array is shrinking we should possibly wait until 8114 * any io in the removed space completes, but it hardly seems 8115 * worth it. 8116 */ 8117 sector_t newsize; 8118 struct r5conf *conf = mddev->private; 8119 8120 if (raid5_has_log(conf) || raid5_has_ppl(conf)) 8121 return -EINVAL; 8122 sectors &= ~((sector_t)conf->chunk_sectors - 1); 8123 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 8124 if (mddev->external_size && 8125 mddev->array_sectors > newsize) 8126 return -EINVAL; 8127 if (mddev->bitmap) { 8128 int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0); 8129 if (ret) 8130 return ret; 8131 } 8132 md_set_array_sectors(mddev, newsize); 8133 if (sectors > mddev->dev_sectors && 8134 mddev->recovery_cp > mddev->dev_sectors) { 8135 mddev->recovery_cp = mddev->dev_sectors; 8136 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8137 } 8138 mddev->dev_sectors = sectors; 8139 mddev->resync_max_sectors = sectors; 8140 return 0; 8141 } 8142 8143 static int check_stripe_cache(struct mddev *mddev) 8144 { 8145 /* Can only proceed if there are plenty of stripe_heads. 8146 * We need a minimum of one full stripe,, and for sensible progress 8147 * it is best to have about 4 times that. 8148 * If we require 4 times, then the default 256 4K stripe_heads will 8149 * allow for chunk sizes up to 256K, which is probably OK. 8150 * If the chunk size is greater, user-space should request more 8151 * stripe_heads first. 8152 */ 8153 struct r5conf *conf = mddev->private; 8154 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 8155 > conf->min_nr_stripes || 8156 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 8157 > conf->min_nr_stripes) { 8158 pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n", 8159 mdname(mddev), 8160 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 8161 / RAID5_STRIPE_SIZE(conf))*4); 8162 return 0; 8163 } 8164 return 1; 8165 } 8166 8167 static int check_reshape(struct mddev *mddev) 8168 { 8169 struct r5conf *conf = mddev->private; 8170 8171 if (raid5_has_log(conf) || raid5_has_ppl(conf)) 8172 return -EINVAL; 8173 if (mddev->delta_disks == 0 && 8174 mddev->new_layout == mddev->layout && 8175 mddev->new_chunk_sectors == mddev->chunk_sectors) 8176 return 0; /* nothing to do */ 8177 if (has_failed(conf)) 8178 return -EINVAL; 8179 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { 8180 /* We might be able to shrink, but the devices must 8181 * be made bigger first. 8182 * For raid6, 4 is the minimum size. 8183 * Otherwise 2 is the minimum 8184 */ 8185 int min = 2; 8186 if (mddev->level == 6) 8187 min = 4; 8188 if (mddev->raid_disks + mddev->delta_disks < min) 8189 return -EINVAL; 8190 } 8191 8192 if (!check_stripe_cache(mddev)) 8193 return -ENOSPC; 8194 8195 if (mddev->new_chunk_sectors > mddev->chunk_sectors || 8196 mddev->delta_disks > 0) 8197 if (resize_chunks(conf, 8198 conf->previous_raid_disks 8199 + max(0, mddev->delta_disks), 8200 max(mddev->new_chunk_sectors, 8201 mddev->chunk_sectors) 8202 ) < 0) 8203 return -ENOMEM; 8204 8205 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) 8206 return 0; /* never bother to shrink */ 8207 return resize_stripes(conf, (conf->previous_raid_disks 8208 + mddev->delta_disks)); 8209 } 8210 8211 static int raid5_start_reshape(struct mddev *mddev) 8212 { 8213 struct r5conf *conf = mddev->private; 8214 struct md_rdev *rdev; 8215 int spares = 0; 8216 unsigned long flags; 8217 8218 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 8219 return -EBUSY; 8220 8221 if (!check_stripe_cache(mddev)) 8222 return -ENOSPC; 8223 8224 if (has_failed(conf)) 8225 return -EINVAL; 8226 8227 rdev_for_each(rdev, mddev) { 8228 if (!test_bit(In_sync, &rdev->flags) 8229 && !test_bit(Faulty, &rdev->flags)) 8230 spares++; 8231 } 8232 8233 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 8234 /* Not enough devices even to make a degraded array 8235 * of that size 8236 */ 8237 return -EINVAL; 8238 8239 /* Refuse to reduce size of the array. Any reductions in 8240 * array size must be through explicit setting of array_size 8241 * attribute. 8242 */ 8243 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 8244 < mddev->array_sectors) { 8245 pr_warn("md/raid:%s: array size must be reduced before number of disks\n", 8246 mdname(mddev)); 8247 return -EINVAL; 8248 } 8249 8250 atomic_set(&conf->reshape_stripes, 0); 8251 spin_lock_irq(&conf->device_lock); 8252 write_seqcount_begin(&conf->gen_lock); 8253 conf->previous_raid_disks = conf->raid_disks; 8254 conf->raid_disks += mddev->delta_disks; 8255 conf->prev_chunk_sectors = conf->chunk_sectors; 8256 conf->chunk_sectors = mddev->new_chunk_sectors; 8257 conf->prev_algo = conf->algorithm; 8258 conf->algorithm = mddev->new_layout; 8259 conf->generation++; 8260 /* Code that selects data_offset needs to see the generation update 8261 * if reshape_progress has been set - so a memory barrier needed. 8262 */ 8263 smp_mb(); 8264 if (mddev->reshape_backwards) 8265 conf->reshape_progress = raid5_size(mddev, 0, 0); 8266 else 8267 conf->reshape_progress = 0; 8268 conf->reshape_safe = conf->reshape_progress; 8269 write_seqcount_end(&conf->gen_lock); 8270 spin_unlock_irq(&conf->device_lock); 8271 8272 /* Now make sure any requests that proceeded on the assumption 8273 * the reshape wasn't running - like Discard or Read - have 8274 * completed. 8275 */ 8276 mddev_suspend(mddev); 8277 mddev_resume(mddev); 8278 8279 /* Add some new drives, as many as will fit. 8280 * We know there are enough to make the newly sized array work. 8281 * Don't add devices if we are reducing the number of 8282 * devices in the array. This is because it is not possible 8283 * to correctly record the "partially reconstructed" state of 8284 * such devices during the reshape and confusion could result. 8285 */ 8286 if (mddev->delta_disks >= 0) { 8287 rdev_for_each(rdev, mddev) 8288 if (rdev->raid_disk < 0 && 8289 !test_bit(Faulty, &rdev->flags)) { 8290 if (raid5_add_disk(mddev, rdev) == 0) { 8291 if (rdev->raid_disk 8292 >= conf->previous_raid_disks) 8293 set_bit(In_sync, &rdev->flags); 8294 else 8295 rdev->recovery_offset = 0; 8296 8297 /* Failure here is OK */ 8298 sysfs_link_rdev(mddev, rdev); 8299 } 8300 } else if (rdev->raid_disk >= conf->previous_raid_disks 8301 && !test_bit(Faulty, &rdev->flags)) { 8302 /* This is a spare that was manually added */ 8303 set_bit(In_sync, &rdev->flags); 8304 } 8305 8306 /* When a reshape changes the number of devices, 8307 * ->degraded is measured against the larger of the 8308 * pre and post number of devices. 8309 */ 8310 spin_lock_irqsave(&conf->device_lock, flags); 8311 mddev->degraded = raid5_calc_degraded(conf); 8312 spin_unlock_irqrestore(&conf->device_lock, flags); 8313 } 8314 mddev->raid_disks = conf->raid_disks; 8315 mddev->reshape_position = conf->reshape_progress; 8316 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8317 8318 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8319 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8320 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8321 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8322 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8323 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 8324 "reshape"); 8325 if (!mddev->sync_thread) { 8326 mddev->recovery = 0; 8327 spin_lock_irq(&conf->device_lock); 8328 write_seqcount_begin(&conf->gen_lock); 8329 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 8330 mddev->new_chunk_sectors = 8331 conf->chunk_sectors = conf->prev_chunk_sectors; 8332 mddev->new_layout = conf->algorithm = conf->prev_algo; 8333 rdev_for_each(rdev, mddev) 8334 rdev->new_data_offset = rdev->data_offset; 8335 smp_wmb(); 8336 conf->generation --; 8337 conf->reshape_progress = MaxSector; 8338 mddev->reshape_position = MaxSector; 8339 write_seqcount_end(&conf->gen_lock); 8340 spin_unlock_irq(&conf->device_lock); 8341 return -EAGAIN; 8342 } 8343 conf->reshape_checkpoint = jiffies; 8344 md_wakeup_thread(mddev->sync_thread); 8345 md_new_event(); 8346 return 0; 8347 } 8348 8349 /* This is called from the reshape thread and should make any 8350 * changes needed in 'conf' 8351 */ 8352 static void end_reshape(struct r5conf *conf) 8353 { 8354 8355 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 8356 struct md_rdev *rdev; 8357 8358 spin_lock_irq(&conf->device_lock); 8359 conf->previous_raid_disks = conf->raid_disks; 8360 md_finish_reshape(conf->mddev); 8361 smp_wmb(); 8362 conf->reshape_progress = MaxSector; 8363 conf->mddev->reshape_position = MaxSector; 8364 rdev_for_each(rdev, conf->mddev) 8365 if (rdev->raid_disk >= 0 && 8366 !test_bit(Journal, &rdev->flags) && 8367 !test_bit(In_sync, &rdev->flags)) 8368 rdev->recovery_offset = MaxSector; 8369 spin_unlock_irq(&conf->device_lock); 8370 wake_up(&conf->wait_for_overlap); 8371 8372 if (conf->mddev->queue) 8373 raid5_set_io_opt(conf); 8374 } 8375 } 8376 8377 /* This is called from the raid5d thread with mddev_lock held. 8378 * It makes config changes to the device. 8379 */ 8380 static void raid5_finish_reshape(struct mddev *mddev) 8381 { 8382 struct r5conf *conf = mddev->private; 8383 struct md_rdev *rdev; 8384 8385 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8386 8387 if (mddev->delta_disks <= 0) { 8388 int d; 8389 spin_lock_irq(&conf->device_lock); 8390 mddev->degraded = raid5_calc_degraded(conf); 8391 spin_unlock_irq(&conf->device_lock); 8392 for (d = conf->raid_disks ; 8393 d < conf->raid_disks - mddev->delta_disks; 8394 d++) { 8395 rdev = rdev_mdlock_deref(mddev, 8396 conf->disks[d].rdev); 8397 if (rdev) 8398 clear_bit(In_sync, &rdev->flags); 8399 rdev = rdev_mdlock_deref(mddev, 8400 conf->disks[d].replacement); 8401 if (rdev) 8402 clear_bit(In_sync, &rdev->flags); 8403 } 8404 } 8405 mddev->layout = conf->algorithm; 8406 mddev->chunk_sectors = conf->chunk_sectors; 8407 mddev->reshape_position = MaxSector; 8408 mddev->delta_disks = 0; 8409 mddev->reshape_backwards = 0; 8410 } 8411 } 8412 8413 static void raid5_quiesce(struct mddev *mddev, int quiesce) 8414 { 8415 struct r5conf *conf = mddev->private; 8416 8417 if (quiesce) { 8418 /* stop all writes */ 8419 lock_all_device_hash_locks_irq(conf); 8420 /* '2' tells resync/reshape to pause so that all 8421 * active stripes can drain 8422 */ 8423 r5c_flush_cache(conf, INT_MAX); 8424 /* need a memory barrier to make sure read_one_chunk() sees 8425 * quiesce started and reverts to slow (locked) path. 8426 */ 8427 smp_store_release(&conf->quiesce, 2); 8428 wait_event_cmd(conf->wait_for_quiescent, 8429 atomic_read(&conf->active_stripes) == 0 && 8430 atomic_read(&conf->active_aligned_reads) == 0, 8431 unlock_all_device_hash_locks_irq(conf), 8432 lock_all_device_hash_locks_irq(conf)); 8433 conf->quiesce = 1; 8434 unlock_all_device_hash_locks_irq(conf); 8435 /* allow reshape to continue */ 8436 wake_up(&conf->wait_for_overlap); 8437 } else { 8438 /* re-enable writes */ 8439 lock_all_device_hash_locks_irq(conf); 8440 conf->quiesce = 0; 8441 wake_up(&conf->wait_for_quiescent); 8442 wake_up(&conf->wait_for_overlap); 8443 unlock_all_device_hash_locks_irq(conf); 8444 } 8445 log_quiesce(conf, quiesce); 8446 } 8447 8448 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 8449 { 8450 struct r0conf *raid0_conf = mddev->private; 8451 sector_t sectors; 8452 8453 /* for raid0 takeover only one zone is supported */ 8454 if (raid0_conf->nr_strip_zones > 1) { 8455 pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n", 8456 mdname(mddev)); 8457 return ERR_PTR(-EINVAL); 8458 } 8459 8460 sectors = raid0_conf->strip_zone[0].zone_end; 8461 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 8462 mddev->dev_sectors = sectors; 8463 mddev->new_level = level; 8464 mddev->new_layout = ALGORITHM_PARITY_N; 8465 mddev->new_chunk_sectors = mddev->chunk_sectors; 8466 mddev->raid_disks += 1; 8467 mddev->delta_disks = 1; 8468 /* make sure it will be not marked as dirty */ 8469 mddev->recovery_cp = MaxSector; 8470 8471 return setup_conf(mddev); 8472 } 8473 8474 static void *raid5_takeover_raid1(struct mddev *mddev) 8475 { 8476 int chunksect; 8477 void *ret; 8478 8479 if (mddev->raid_disks != 2 || 8480 mddev->degraded > 1) 8481 return ERR_PTR(-EINVAL); 8482 8483 /* Should check if there are write-behind devices? */ 8484 8485 chunksect = 64*2; /* 64K by default */ 8486 8487 /* The array must be an exact multiple of chunksize */ 8488 while (chunksect && (mddev->array_sectors & (chunksect-1))) 8489 chunksect >>= 1; 8490 8491 if ((chunksect<<9) < RAID5_STRIPE_SIZE((struct r5conf *)mddev->private)) 8492 /* array size does not allow a suitable chunk size */ 8493 return ERR_PTR(-EINVAL); 8494 8495 mddev->new_level = 5; 8496 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 8497 mddev->new_chunk_sectors = chunksect; 8498 8499 ret = setup_conf(mddev); 8500 if (!IS_ERR(ret)) 8501 mddev_clear_unsupported_flags(mddev, 8502 UNSUPPORTED_MDDEV_FLAGS); 8503 return ret; 8504 } 8505 8506 static void *raid5_takeover_raid6(struct mddev *mddev) 8507 { 8508 int new_layout; 8509 8510 switch (mddev->layout) { 8511 case ALGORITHM_LEFT_ASYMMETRIC_6: 8512 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 8513 break; 8514 case ALGORITHM_RIGHT_ASYMMETRIC_6: 8515 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 8516 break; 8517 case ALGORITHM_LEFT_SYMMETRIC_6: 8518 new_layout = ALGORITHM_LEFT_SYMMETRIC; 8519 break; 8520 case ALGORITHM_RIGHT_SYMMETRIC_6: 8521 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 8522 break; 8523 case ALGORITHM_PARITY_0_6: 8524 new_layout = ALGORITHM_PARITY_0; 8525 break; 8526 case ALGORITHM_PARITY_N: 8527 new_layout = ALGORITHM_PARITY_N; 8528 break; 8529 default: 8530 return ERR_PTR(-EINVAL); 8531 } 8532 mddev->new_level = 5; 8533 mddev->new_layout = new_layout; 8534 mddev->delta_disks = -1; 8535 mddev->raid_disks -= 1; 8536 return setup_conf(mddev); 8537 } 8538 8539 static int raid5_check_reshape(struct mddev *mddev) 8540 { 8541 /* For a 2-drive array, the layout and chunk size can be changed 8542 * immediately as not restriping is needed. 8543 * For larger arrays we record the new value - after validation 8544 * to be used by a reshape pass. 8545 */ 8546 struct r5conf *conf = mddev->private; 8547 int new_chunk = mddev->new_chunk_sectors; 8548 8549 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 8550 return -EINVAL; 8551 if (new_chunk > 0) { 8552 if (!is_power_of_2(new_chunk)) 8553 return -EINVAL; 8554 if (new_chunk < (PAGE_SIZE>>9)) 8555 return -EINVAL; 8556 if (mddev->array_sectors & (new_chunk-1)) 8557 /* not factor of array size */ 8558 return -EINVAL; 8559 } 8560 8561 /* They look valid */ 8562 8563 if (mddev->raid_disks == 2) { 8564 /* can make the change immediately */ 8565 if (mddev->new_layout >= 0) { 8566 conf->algorithm = mddev->new_layout; 8567 mddev->layout = mddev->new_layout; 8568 } 8569 if (new_chunk > 0) { 8570 conf->chunk_sectors = new_chunk ; 8571 mddev->chunk_sectors = new_chunk; 8572 } 8573 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8574 md_wakeup_thread(mddev->thread); 8575 } 8576 return check_reshape(mddev); 8577 } 8578 8579 static int raid6_check_reshape(struct mddev *mddev) 8580 { 8581 int new_chunk = mddev->new_chunk_sectors; 8582 8583 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 8584 return -EINVAL; 8585 if (new_chunk > 0) { 8586 if (!is_power_of_2(new_chunk)) 8587 return -EINVAL; 8588 if (new_chunk < (PAGE_SIZE >> 9)) 8589 return -EINVAL; 8590 if (mddev->array_sectors & (new_chunk-1)) 8591 /* not factor of array size */ 8592 return -EINVAL; 8593 } 8594 8595 /* They look valid */ 8596 return check_reshape(mddev); 8597 } 8598 8599 static void *raid5_takeover(struct mddev *mddev) 8600 { 8601 /* raid5 can take over: 8602 * raid0 - if there is only one strip zone - make it a raid4 layout 8603 * raid1 - if there are two drives. We need to know the chunk size 8604 * raid4 - trivial - just use a raid4 layout. 8605 * raid6 - Providing it is a *_6 layout 8606 */ 8607 if (mddev->level == 0) 8608 return raid45_takeover_raid0(mddev, 5); 8609 if (mddev->level == 1) 8610 return raid5_takeover_raid1(mddev); 8611 if (mddev->level == 4) { 8612 mddev->new_layout = ALGORITHM_PARITY_N; 8613 mddev->new_level = 5; 8614 return setup_conf(mddev); 8615 } 8616 if (mddev->level == 6) 8617 return raid5_takeover_raid6(mddev); 8618 8619 return ERR_PTR(-EINVAL); 8620 } 8621 8622 static void *raid4_takeover(struct mddev *mddev) 8623 { 8624 /* raid4 can take over: 8625 * raid0 - if there is only one strip zone 8626 * raid5 - if layout is right 8627 */ 8628 if (mddev->level == 0) 8629 return raid45_takeover_raid0(mddev, 4); 8630 if (mddev->level == 5 && 8631 mddev->layout == ALGORITHM_PARITY_N) { 8632 mddev->new_layout = 0; 8633 mddev->new_level = 4; 8634 return setup_conf(mddev); 8635 } 8636 return ERR_PTR(-EINVAL); 8637 } 8638 8639 static struct md_personality raid5_personality; 8640 8641 static void *raid6_takeover(struct mddev *mddev) 8642 { 8643 /* Currently can only take over a raid5. We map the 8644 * personality to an equivalent raid6 personality 8645 * with the Q block at the end. 8646 */ 8647 int new_layout; 8648 8649 if (mddev->pers != &raid5_personality) 8650 return ERR_PTR(-EINVAL); 8651 if (mddev->degraded > 1) 8652 return ERR_PTR(-EINVAL); 8653 if (mddev->raid_disks > 253) 8654 return ERR_PTR(-EINVAL); 8655 if (mddev->raid_disks < 3) 8656 return ERR_PTR(-EINVAL); 8657 8658 switch (mddev->layout) { 8659 case ALGORITHM_LEFT_ASYMMETRIC: 8660 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 8661 break; 8662 case ALGORITHM_RIGHT_ASYMMETRIC: 8663 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 8664 break; 8665 case ALGORITHM_LEFT_SYMMETRIC: 8666 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 8667 break; 8668 case ALGORITHM_RIGHT_SYMMETRIC: 8669 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 8670 break; 8671 case ALGORITHM_PARITY_0: 8672 new_layout = ALGORITHM_PARITY_0_6; 8673 break; 8674 case ALGORITHM_PARITY_N: 8675 new_layout = ALGORITHM_PARITY_N; 8676 break; 8677 default: 8678 return ERR_PTR(-EINVAL); 8679 } 8680 mddev->new_level = 6; 8681 mddev->new_layout = new_layout; 8682 mddev->delta_disks = 1; 8683 mddev->raid_disks += 1; 8684 return setup_conf(mddev); 8685 } 8686 8687 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) 8688 { 8689 struct r5conf *conf; 8690 int err; 8691 8692 err = mddev_lock(mddev); 8693 if (err) 8694 return err; 8695 conf = mddev->private; 8696 if (!conf) { 8697 mddev_unlock(mddev); 8698 return -ENODEV; 8699 } 8700 8701 if (strncmp(buf, "ppl", 3) == 0) { 8702 /* ppl only works with RAID 5 */ 8703 if (!raid5_has_ppl(conf) && conf->level == 5) { 8704 err = log_init(conf, NULL, true); 8705 if (!err) { 8706 err = resize_stripes(conf, conf->pool_size); 8707 if (err) { 8708 mddev_suspend(mddev); 8709 log_exit(conf); 8710 mddev_resume(mddev); 8711 } 8712 } 8713 } else 8714 err = -EINVAL; 8715 } else if (strncmp(buf, "resync", 6) == 0) { 8716 if (raid5_has_ppl(conf)) { 8717 mddev_suspend(mddev); 8718 log_exit(conf); 8719 mddev_resume(mddev); 8720 err = resize_stripes(conf, conf->pool_size); 8721 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && 8722 r5l_log_disk_error(conf)) { 8723 bool journal_dev_exists = false; 8724 struct md_rdev *rdev; 8725 8726 rdev_for_each(rdev, mddev) 8727 if (test_bit(Journal, &rdev->flags)) { 8728 journal_dev_exists = true; 8729 break; 8730 } 8731 8732 if (!journal_dev_exists) { 8733 mddev_suspend(mddev); 8734 clear_bit(MD_HAS_JOURNAL, &mddev->flags); 8735 mddev_resume(mddev); 8736 } else /* need remove journal device first */ 8737 err = -EBUSY; 8738 } else 8739 err = -EINVAL; 8740 } else { 8741 err = -EINVAL; 8742 } 8743 8744 if (!err) 8745 md_update_sb(mddev, 1); 8746 8747 mddev_unlock(mddev); 8748 8749 return err; 8750 } 8751 8752 static int raid5_start(struct mddev *mddev) 8753 { 8754 struct r5conf *conf = mddev->private; 8755 8756 return r5l_start(conf->log); 8757 } 8758 8759 static struct md_personality raid6_personality = 8760 { 8761 .name = "raid6", 8762 .level = 6, 8763 .owner = THIS_MODULE, 8764 .make_request = raid5_make_request, 8765 .run = raid5_run, 8766 .start = raid5_start, 8767 .free = raid5_free, 8768 .status = raid5_status, 8769 .error_handler = raid5_error, 8770 .hot_add_disk = raid5_add_disk, 8771 .hot_remove_disk= raid5_remove_disk, 8772 .spare_active = raid5_spare_active, 8773 .sync_request = raid5_sync_request, 8774 .resize = raid5_resize, 8775 .size = raid5_size, 8776 .check_reshape = raid6_check_reshape, 8777 .start_reshape = raid5_start_reshape, 8778 .finish_reshape = raid5_finish_reshape, 8779 .quiesce = raid5_quiesce, 8780 .takeover = raid6_takeover, 8781 .change_consistency_policy = raid5_change_consistency_policy, 8782 }; 8783 static struct md_personality raid5_personality = 8784 { 8785 .name = "raid5", 8786 .level = 5, 8787 .owner = THIS_MODULE, 8788 .make_request = raid5_make_request, 8789 .run = raid5_run, 8790 .start = raid5_start, 8791 .free = raid5_free, 8792 .status = raid5_status, 8793 .error_handler = raid5_error, 8794 .hot_add_disk = raid5_add_disk, 8795 .hot_remove_disk= raid5_remove_disk, 8796 .spare_active = raid5_spare_active, 8797 .sync_request = raid5_sync_request, 8798 .resize = raid5_resize, 8799 .size = raid5_size, 8800 .check_reshape = raid5_check_reshape, 8801 .start_reshape = raid5_start_reshape, 8802 .finish_reshape = raid5_finish_reshape, 8803 .quiesce = raid5_quiesce, 8804 .takeover = raid5_takeover, 8805 .change_consistency_policy = raid5_change_consistency_policy, 8806 }; 8807 8808 static struct md_personality raid4_personality = 8809 { 8810 .name = "raid4", 8811 .level = 4, 8812 .owner = THIS_MODULE, 8813 .make_request = raid5_make_request, 8814 .run = raid5_run, 8815 .start = raid5_start, 8816 .free = raid5_free, 8817 .status = raid5_status, 8818 .error_handler = raid5_error, 8819 .hot_add_disk = raid5_add_disk, 8820 .hot_remove_disk= raid5_remove_disk, 8821 .spare_active = raid5_spare_active, 8822 .sync_request = raid5_sync_request, 8823 .resize = raid5_resize, 8824 .size = raid5_size, 8825 .check_reshape = raid5_check_reshape, 8826 .start_reshape = raid5_start_reshape, 8827 .finish_reshape = raid5_finish_reshape, 8828 .quiesce = raid5_quiesce, 8829 .takeover = raid4_takeover, 8830 .change_consistency_policy = raid5_change_consistency_policy, 8831 }; 8832 8833 static int __init raid5_init(void) 8834 { 8835 int ret; 8836 8837 raid5_wq = alloc_workqueue("raid5wq", 8838 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 8839 if (!raid5_wq) 8840 return -ENOMEM; 8841 8842 ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE, 8843 "md/raid5:prepare", 8844 raid456_cpu_up_prepare, 8845 raid456_cpu_dead); 8846 if (ret) { 8847 destroy_workqueue(raid5_wq); 8848 return ret; 8849 } 8850 register_md_personality(&raid6_personality); 8851 register_md_personality(&raid5_personality); 8852 register_md_personality(&raid4_personality); 8853 return 0; 8854 } 8855 8856 static void raid5_exit(void) 8857 { 8858 unregister_md_personality(&raid6_personality); 8859 unregister_md_personality(&raid5_personality); 8860 unregister_md_personality(&raid4_personality); 8861 cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE); 8862 destroy_workqueue(raid5_wq); 8863 } 8864 8865 module_init(raid5_init); 8866 module_exit(raid5_exit); 8867 MODULE_LICENSE("GPL"); 8868 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 8869 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 8870 MODULE_ALIAS("md-raid5"); 8871 MODULE_ALIAS("md-raid4"); 8872 MODULE_ALIAS("md-level-5"); 8873 MODULE_ALIAS("md-level-4"); 8874 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 8875 MODULE_ALIAS("md-raid6"); 8876 MODULE_ALIAS("md-level-6"); 8877 8878 /* This used to be two separate modules, they were: */ 8879 MODULE_ALIAS("raid5"); 8880 MODULE_ALIAS("raid6"); 8881