1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * raid5.c : Multiple Devices driver for Linux 4 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 5 * Copyright (C) 1999, 2000 Ingo Molnar 6 * Copyright (C) 2002, 2003 H. Peter Anvin 7 * 8 * RAID-4/5/6 management functions. 9 * Thanks to Penguin Computing for making the RAID-6 development possible 10 * by donating a test server! 11 */ 12 13 /* 14 * BITMAP UNPLUGGING: 15 * 16 * The sequencing for updating the bitmap reliably is a little 17 * subtle (and I got it wrong the first time) so it deserves some 18 * explanation. 19 * 20 * We group bitmap updates into batches. Each batch has a number. 21 * We may write out several batches at once, but that isn't very important. 22 * conf->seq_write is the number of the last batch successfully written. 23 * conf->seq_flush is the number of the last batch that was closed to 24 * new additions. 25 * When we discover that we will need to write to any block in a stripe 26 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 27 * the number of the batch it will be in. This is seq_flush+1. 28 * When we are ready to do a write, if that batch hasn't been written yet, 29 * we plug the array and queue the stripe for later. 30 * When an unplug happens, we increment bm_flush, thus closing the current 31 * batch. 32 * When we notice that bm_flush > bm_write, we write out all pending updates 33 * to the bitmap, and advance bm_write to where bm_flush was. 34 * This may occasionally write a bit out twice, but is sure never to 35 * miss any bits. 36 */ 37 38 #include <linux/blkdev.h> 39 #include <linux/kthread.h> 40 #include <linux/raid/pq.h> 41 #include <linux/async_tx.h> 42 #include <linux/module.h> 43 #include <linux/async.h> 44 #include <linux/seq_file.h> 45 #include <linux/cpu.h> 46 #include <linux/slab.h> 47 #include <linux/ratelimit.h> 48 #include <linux/nodemask.h> 49 50 #include <trace/events/block.h> 51 #include <linux/list_sort.h> 52 53 #include "md.h" 54 #include "raid5.h" 55 #include "raid0.h" 56 #include "md-bitmap.h" 57 #include "raid5-log.h" 58 59 #define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED) 60 61 #define cpu_to_group(cpu) cpu_to_node(cpu) 62 #define ANY_GROUP NUMA_NO_NODE 63 64 static bool devices_handle_discard_safely = false; 65 module_param(devices_handle_discard_safely, bool, 0644); 66 MODULE_PARM_DESC(devices_handle_discard_safely, 67 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 68 static struct workqueue_struct *raid5_wq; 69 70 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 71 { 72 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 73 return &conf->stripe_hashtbl[hash]; 74 } 75 76 static inline int stripe_hash_locks_hash(sector_t sect) 77 { 78 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; 79 } 80 81 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) 82 { 83 spin_lock_irq(conf->hash_locks + hash); 84 spin_lock(&conf->device_lock); 85 } 86 87 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) 88 { 89 spin_unlock(&conf->device_lock); 90 spin_unlock_irq(conf->hash_locks + hash); 91 } 92 93 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 94 { 95 int i; 96 spin_lock_irq(conf->hash_locks); 97 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 98 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 99 spin_lock(&conf->device_lock); 100 } 101 102 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) 103 { 104 int i; 105 spin_unlock(&conf->device_lock); 106 for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--) 107 spin_unlock(conf->hash_locks + i); 108 spin_unlock_irq(conf->hash_locks); 109 } 110 111 /* Find first data disk in a raid6 stripe */ 112 static inline int raid6_d0(struct stripe_head *sh) 113 { 114 if (sh->ddf_layout) 115 /* ddf always start from first device */ 116 return 0; 117 /* md starts just after Q block */ 118 if (sh->qd_idx == sh->disks - 1) 119 return 0; 120 else 121 return sh->qd_idx + 1; 122 } 123 static inline int raid6_next_disk(int disk, int raid_disks) 124 { 125 disk++; 126 return (disk < raid_disks) ? disk : 0; 127 } 128 129 /* When walking through the disks in a raid5, starting at raid6_d0, 130 * We need to map each disk to a 'slot', where the data disks are slot 131 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 132 * is raid_disks-1. This help does that mapping. 133 */ 134 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 135 int *count, int syndrome_disks) 136 { 137 int slot = *count; 138 139 if (sh->ddf_layout) 140 (*count)++; 141 if (idx == sh->pd_idx) 142 return syndrome_disks; 143 if (idx == sh->qd_idx) 144 return syndrome_disks + 1; 145 if (!sh->ddf_layout) 146 (*count)++; 147 return slot; 148 } 149 150 static void print_raid5_conf (struct r5conf *conf); 151 152 static int stripe_operations_active(struct stripe_head *sh) 153 { 154 return sh->check_state || sh->reconstruct_state || 155 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 156 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 157 } 158 159 static bool stripe_is_lowprio(struct stripe_head *sh) 160 { 161 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || 162 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && 163 !test_bit(STRIPE_R5C_CACHING, &sh->state); 164 } 165 166 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) 167 { 168 struct r5conf *conf = sh->raid_conf; 169 struct r5worker_group *group; 170 int thread_cnt; 171 int i, cpu = sh->cpu; 172 173 if (!cpu_online(cpu)) { 174 cpu = cpumask_any(cpu_online_mask); 175 sh->cpu = cpu; 176 } 177 178 if (list_empty(&sh->lru)) { 179 struct r5worker_group *group; 180 group = conf->worker_groups + cpu_to_group(cpu); 181 if (stripe_is_lowprio(sh)) 182 list_add_tail(&sh->lru, &group->loprio_list); 183 else 184 list_add_tail(&sh->lru, &group->handle_list); 185 group->stripes_cnt++; 186 sh->group = group; 187 } 188 189 if (conf->worker_cnt_per_group == 0) { 190 md_wakeup_thread(conf->mddev->thread); 191 return; 192 } 193 194 group = conf->worker_groups + cpu_to_group(sh->cpu); 195 196 group->workers[0].working = true; 197 /* at least one worker should run to avoid race */ 198 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); 199 200 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; 201 /* wakeup more workers */ 202 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { 203 if (group->workers[i].working == false) { 204 group->workers[i].working = true; 205 queue_work_on(sh->cpu, raid5_wq, 206 &group->workers[i].work); 207 thread_cnt--; 208 } 209 } 210 } 211 212 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, 213 struct list_head *temp_inactive_list) 214 { 215 int i; 216 int injournal = 0; /* number of date pages with R5_InJournal */ 217 218 BUG_ON(!list_empty(&sh->lru)); 219 BUG_ON(atomic_read(&conf->active_stripes)==0); 220 221 if (r5c_is_writeback(conf->log)) 222 for (i = sh->disks; i--; ) 223 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 224 injournal++; 225 /* 226 * In the following cases, the stripe cannot be released to cached 227 * lists. Therefore, we make the stripe write out and set 228 * STRIPE_HANDLE: 229 * 1. when quiesce in r5c write back; 230 * 2. when resync is requested fot the stripe. 231 */ 232 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || 233 (conf->quiesce && r5c_is_writeback(conf->log) && 234 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { 235 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 236 r5c_make_stripe_write_out(sh); 237 set_bit(STRIPE_HANDLE, &sh->state); 238 } 239 240 if (test_bit(STRIPE_HANDLE, &sh->state)) { 241 if (test_bit(STRIPE_DELAYED, &sh->state) && 242 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 243 list_add_tail(&sh->lru, &conf->delayed_list); 244 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 245 sh->bm_seq - conf->seq_write > 0) 246 list_add_tail(&sh->lru, &conf->bitmap_list); 247 else { 248 clear_bit(STRIPE_DELAYED, &sh->state); 249 clear_bit(STRIPE_BIT_DELAY, &sh->state); 250 if (conf->worker_cnt_per_group == 0) { 251 if (stripe_is_lowprio(sh)) 252 list_add_tail(&sh->lru, 253 &conf->loprio_list); 254 else 255 list_add_tail(&sh->lru, 256 &conf->handle_list); 257 } else { 258 raid5_wakeup_stripe_thread(sh); 259 return; 260 } 261 } 262 md_wakeup_thread(conf->mddev->thread); 263 } else { 264 BUG_ON(stripe_operations_active(sh)); 265 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 266 if (atomic_dec_return(&conf->preread_active_stripes) 267 < IO_THRESHOLD) 268 md_wakeup_thread(conf->mddev->thread); 269 atomic_dec(&conf->active_stripes); 270 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 271 if (!r5c_is_writeback(conf->log)) 272 list_add_tail(&sh->lru, temp_inactive_list); 273 else { 274 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); 275 if (injournal == 0) 276 list_add_tail(&sh->lru, temp_inactive_list); 277 else if (injournal == conf->raid_disks - conf->max_degraded) { 278 /* full stripe */ 279 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) 280 atomic_inc(&conf->r5c_cached_full_stripes); 281 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) 282 atomic_dec(&conf->r5c_cached_partial_stripes); 283 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); 284 r5c_check_cached_full_stripe(conf); 285 } else 286 /* 287 * STRIPE_R5C_PARTIAL_STRIPE is set in 288 * r5c_try_caching_write(). No need to 289 * set it again. 290 */ 291 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); 292 } 293 } 294 } 295 } 296 297 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, 298 struct list_head *temp_inactive_list) 299 { 300 if (atomic_dec_and_test(&sh->count)) 301 do_release_stripe(conf, sh, temp_inactive_list); 302 } 303 304 /* 305 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list 306 * 307 * Be careful: Only one task can add/delete stripes from temp_inactive_list at 308 * given time. Adding stripes only takes device lock, while deleting stripes 309 * only takes hash lock. 310 */ 311 static void release_inactive_stripe_list(struct r5conf *conf, 312 struct list_head *temp_inactive_list, 313 int hash) 314 { 315 int size; 316 bool do_wakeup = false; 317 unsigned long flags; 318 319 if (hash == NR_STRIPE_HASH_LOCKS) { 320 size = NR_STRIPE_HASH_LOCKS; 321 hash = NR_STRIPE_HASH_LOCKS - 1; 322 } else 323 size = 1; 324 while (size) { 325 struct list_head *list = &temp_inactive_list[size - 1]; 326 327 /* 328 * We don't hold any lock here yet, raid5_get_active_stripe() might 329 * remove stripes from the list 330 */ 331 if (!list_empty_careful(list)) { 332 spin_lock_irqsave(conf->hash_locks + hash, flags); 333 if (list_empty(conf->inactive_list + hash) && 334 !list_empty(list)) 335 atomic_dec(&conf->empty_inactive_list_nr); 336 list_splice_tail_init(list, conf->inactive_list + hash); 337 do_wakeup = true; 338 spin_unlock_irqrestore(conf->hash_locks + hash, flags); 339 } 340 size--; 341 hash--; 342 } 343 344 if (do_wakeup) { 345 wake_up(&conf->wait_for_stripe); 346 if (atomic_read(&conf->active_stripes) == 0) 347 wake_up(&conf->wait_for_quiescent); 348 if (conf->retry_read_aligned) 349 md_wakeup_thread(conf->mddev->thread); 350 } 351 } 352 353 /* should hold conf->device_lock already */ 354 static int release_stripe_list(struct r5conf *conf, 355 struct list_head *temp_inactive_list) 356 { 357 struct stripe_head *sh, *t; 358 int count = 0; 359 struct llist_node *head; 360 361 head = llist_del_all(&conf->released_stripes); 362 head = llist_reverse_order(head); 363 llist_for_each_entry_safe(sh, t, head, release_list) { 364 int hash; 365 366 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 367 smp_mb(); 368 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); 369 /* 370 * Don't worry the bit is set here, because if the bit is set 371 * again, the count is always > 1. This is true for 372 * STRIPE_ON_UNPLUG_LIST bit too. 373 */ 374 hash = sh->hash_lock_index; 375 __release_stripe(conf, sh, &temp_inactive_list[hash]); 376 count++; 377 } 378 379 return count; 380 } 381 382 void raid5_release_stripe(struct stripe_head *sh) 383 { 384 struct r5conf *conf = sh->raid_conf; 385 unsigned long flags; 386 struct list_head list; 387 int hash; 388 bool wakeup; 389 390 /* Avoid release_list until the last reference. 391 */ 392 if (atomic_add_unless(&sh->count, -1, 1)) 393 return; 394 395 if (unlikely(!conf->mddev->thread) || 396 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 397 goto slow_path; 398 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 399 if (wakeup) 400 md_wakeup_thread(conf->mddev->thread); 401 return; 402 slow_path: 403 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 404 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { 405 INIT_LIST_HEAD(&list); 406 hash = sh->hash_lock_index; 407 do_release_stripe(conf, sh, &list); 408 spin_unlock_irqrestore(&conf->device_lock, flags); 409 release_inactive_stripe_list(conf, &list, hash); 410 } 411 } 412 413 static inline void remove_hash(struct stripe_head *sh) 414 { 415 pr_debug("remove_hash(), stripe %llu\n", 416 (unsigned long long)sh->sector); 417 418 hlist_del_init(&sh->hash); 419 } 420 421 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 422 { 423 struct hlist_head *hp = stripe_hash(conf, sh->sector); 424 425 pr_debug("insert_hash(), stripe %llu\n", 426 (unsigned long long)sh->sector); 427 428 hlist_add_head(&sh->hash, hp); 429 } 430 431 /* find an idle stripe, make sure it is unhashed, and return it. */ 432 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) 433 { 434 struct stripe_head *sh = NULL; 435 struct list_head *first; 436 437 if (list_empty(conf->inactive_list + hash)) 438 goto out; 439 first = (conf->inactive_list + hash)->next; 440 sh = list_entry(first, struct stripe_head, lru); 441 list_del_init(first); 442 remove_hash(sh); 443 atomic_inc(&conf->active_stripes); 444 BUG_ON(hash != sh->hash_lock_index); 445 if (list_empty(conf->inactive_list + hash)) 446 atomic_inc(&conf->empty_inactive_list_nr); 447 out: 448 return sh; 449 } 450 451 static void shrink_buffers(struct stripe_head *sh) 452 { 453 struct page *p; 454 int i; 455 int num = sh->raid_conf->pool_size; 456 457 for (i = 0; i < num ; i++) { 458 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); 459 p = sh->dev[i].page; 460 if (!p) 461 continue; 462 sh->dev[i].page = NULL; 463 put_page(p); 464 } 465 } 466 467 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) 468 { 469 int i; 470 int num = sh->raid_conf->pool_size; 471 472 for (i = 0; i < num; i++) { 473 struct page *page; 474 475 if (!(page = alloc_page(gfp))) { 476 return 1; 477 } 478 sh->dev[i].page = page; 479 sh->dev[i].orig_page = page; 480 } 481 482 return 0; 483 } 484 485 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 486 struct stripe_head *sh); 487 488 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 489 { 490 struct r5conf *conf = sh->raid_conf; 491 int i, seq; 492 493 BUG_ON(atomic_read(&sh->count) != 0); 494 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 495 BUG_ON(stripe_operations_active(sh)); 496 BUG_ON(sh->batch_head); 497 498 pr_debug("init_stripe called, stripe %llu\n", 499 (unsigned long long)sector); 500 retry: 501 seq = read_seqcount_begin(&conf->gen_lock); 502 sh->generation = conf->generation - previous; 503 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 504 sh->sector = sector; 505 stripe_set_idx(sector, conf, previous, sh); 506 sh->state = 0; 507 508 for (i = sh->disks; i--; ) { 509 struct r5dev *dev = &sh->dev[i]; 510 511 if (dev->toread || dev->read || dev->towrite || dev->written || 512 test_bit(R5_LOCKED, &dev->flags)) { 513 pr_err("sector=%llx i=%d %p %p %p %p %d\n", 514 (unsigned long long)sh->sector, i, dev->toread, 515 dev->read, dev->towrite, dev->written, 516 test_bit(R5_LOCKED, &dev->flags)); 517 WARN_ON(1); 518 } 519 dev->flags = 0; 520 dev->sector = raid5_compute_blocknr(sh, i, previous); 521 } 522 if (read_seqcount_retry(&conf->gen_lock, seq)) 523 goto retry; 524 sh->overwrite_disks = 0; 525 insert_hash(conf, sh); 526 sh->cpu = smp_processor_id(); 527 set_bit(STRIPE_BATCH_READY, &sh->state); 528 } 529 530 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 531 short generation) 532 { 533 struct stripe_head *sh; 534 535 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 536 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) 537 if (sh->sector == sector && sh->generation == generation) 538 return sh; 539 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 540 return NULL; 541 } 542 543 /* 544 * Need to check if array has failed when deciding whether to: 545 * - start an array 546 * - remove non-faulty devices 547 * - add a spare 548 * - allow a reshape 549 * This determination is simple when no reshape is happening. 550 * However if there is a reshape, we need to carefully check 551 * both the before and after sections. 552 * This is because some failed devices may only affect one 553 * of the two sections, and some non-in_sync devices may 554 * be insync in the section most affected by failed devices. 555 */ 556 int raid5_calc_degraded(struct r5conf *conf) 557 { 558 int degraded, degraded2; 559 int i; 560 561 rcu_read_lock(); 562 degraded = 0; 563 for (i = 0; i < conf->previous_raid_disks; i++) { 564 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 565 if (rdev && test_bit(Faulty, &rdev->flags)) 566 rdev = rcu_dereference(conf->disks[i].replacement); 567 if (!rdev || test_bit(Faulty, &rdev->flags)) 568 degraded++; 569 else if (test_bit(In_sync, &rdev->flags)) 570 ; 571 else 572 /* not in-sync or faulty. 573 * If the reshape increases the number of devices, 574 * this is being recovered by the reshape, so 575 * this 'previous' section is not in_sync. 576 * If the number of devices is being reduced however, 577 * the device can only be part of the array if 578 * we are reverting a reshape, so this section will 579 * be in-sync. 580 */ 581 if (conf->raid_disks >= conf->previous_raid_disks) 582 degraded++; 583 } 584 rcu_read_unlock(); 585 if (conf->raid_disks == conf->previous_raid_disks) 586 return degraded; 587 rcu_read_lock(); 588 degraded2 = 0; 589 for (i = 0; i < conf->raid_disks; i++) { 590 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 591 if (rdev && test_bit(Faulty, &rdev->flags)) 592 rdev = rcu_dereference(conf->disks[i].replacement); 593 if (!rdev || test_bit(Faulty, &rdev->flags)) 594 degraded2++; 595 else if (test_bit(In_sync, &rdev->flags)) 596 ; 597 else 598 /* not in-sync or faulty. 599 * If reshape increases the number of devices, this 600 * section has already been recovered, else it 601 * almost certainly hasn't. 602 */ 603 if (conf->raid_disks <= conf->previous_raid_disks) 604 degraded2++; 605 } 606 rcu_read_unlock(); 607 if (degraded2 > degraded) 608 return degraded2; 609 return degraded; 610 } 611 612 static int has_failed(struct r5conf *conf) 613 { 614 int degraded; 615 616 if (conf->mddev->reshape_position == MaxSector) 617 return conf->mddev->degraded > conf->max_degraded; 618 619 degraded = raid5_calc_degraded(conf); 620 if (degraded > conf->max_degraded) 621 return 1; 622 return 0; 623 } 624 625 struct stripe_head * 626 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, 627 int previous, int noblock, int noquiesce) 628 { 629 struct stripe_head *sh; 630 int hash = stripe_hash_locks_hash(sector); 631 int inc_empty_inactive_list_flag; 632 633 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 634 635 spin_lock_irq(conf->hash_locks + hash); 636 637 do { 638 wait_event_lock_irq(conf->wait_for_quiescent, 639 conf->quiesce == 0 || noquiesce, 640 *(conf->hash_locks + hash)); 641 sh = __find_stripe(conf, sector, conf->generation - previous); 642 if (!sh) { 643 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { 644 sh = get_free_stripe(conf, hash); 645 if (!sh && !test_bit(R5_DID_ALLOC, 646 &conf->cache_state)) 647 set_bit(R5_ALLOC_MORE, 648 &conf->cache_state); 649 } 650 if (noblock && sh == NULL) 651 break; 652 653 r5c_check_stripe_cache_usage(conf); 654 if (!sh) { 655 set_bit(R5_INACTIVE_BLOCKED, 656 &conf->cache_state); 657 r5l_wake_reclaim(conf->log, 0); 658 wait_event_lock_irq( 659 conf->wait_for_stripe, 660 !list_empty(conf->inactive_list + hash) && 661 (atomic_read(&conf->active_stripes) 662 < (conf->max_nr_stripes * 3 / 4) 663 || !test_bit(R5_INACTIVE_BLOCKED, 664 &conf->cache_state)), 665 *(conf->hash_locks + hash)); 666 clear_bit(R5_INACTIVE_BLOCKED, 667 &conf->cache_state); 668 } else { 669 init_stripe(sh, sector, previous); 670 atomic_inc(&sh->count); 671 } 672 } else if (!atomic_inc_not_zero(&sh->count)) { 673 spin_lock(&conf->device_lock); 674 if (!atomic_read(&sh->count)) { 675 if (!test_bit(STRIPE_HANDLE, &sh->state)) 676 atomic_inc(&conf->active_stripes); 677 BUG_ON(list_empty(&sh->lru) && 678 !test_bit(STRIPE_EXPANDING, &sh->state)); 679 inc_empty_inactive_list_flag = 0; 680 if (!list_empty(conf->inactive_list + hash)) 681 inc_empty_inactive_list_flag = 1; 682 list_del_init(&sh->lru); 683 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) 684 atomic_inc(&conf->empty_inactive_list_nr); 685 if (sh->group) { 686 sh->group->stripes_cnt--; 687 sh->group = NULL; 688 } 689 } 690 atomic_inc(&sh->count); 691 spin_unlock(&conf->device_lock); 692 } 693 } while (sh == NULL); 694 695 spin_unlock_irq(conf->hash_locks + hash); 696 return sh; 697 } 698 699 static bool is_full_stripe_write(struct stripe_head *sh) 700 { 701 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); 702 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); 703 } 704 705 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 706 __acquires(&sh1->stripe_lock) 707 __acquires(&sh2->stripe_lock) 708 { 709 if (sh1 > sh2) { 710 spin_lock_irq(&sh2->stripe_lock); 711 spin_lock_nested(&sh1->stripe_lock, 1); 712 } else { 713 spin_lock_irq(&sh1->stripe_lock); 714 spin_lock_nested(&sh2->stripe_lock, 1); 715 } 716 } 717 718 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 719 __releases(&sh1->stripe_lock) 720 __releases(&sh2->stripe_lock) 721 { 722 spin_unlock(&sh1->stripe_lock); 723 spin_unlock_irq(&sh2->stripe_lock); 724 } 725 726 /* Only freshly new full stripe normal write stripe can be added to a batch list */ 727 static bool stripe_can_batch(struct stripe_head *sh) 728 { 729 struct r5conf *conf = sh->raid_conf; 730 731 if (raid5_has_log(conf) || raid5_has_ppl(conf)) 732 return false; 733 return test_bit(STRIPE_BATCH_READY, &sh->state) && 734 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 735 is_full_stripe_write(sh); 736 } 737 738 /* we only do back search */ 739 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) 740 { 741 struct stripe_head *head; 742 sector_t head_sector, tmp_sec; 743 int hash; 744 int dd_idx; 745 int inc_empty_inactive_list_flag; 746 747 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ 748 tmp_sec = sh->sector; 749 if (!sector_div(tmp_sec, conf->chunk_sectors)) 750 return; 751 head_sector = sh->sector - STRIPE_SECTORS; 752 753 hash = stripe_hash_locks_hash(head_sector); 754 spin_lock_irq(conf->hash_locks + hash); 755 head = __find_stripe(conf, head_sector, conf->generation); 756 if (head && !atomic_inc_not_zero(&head->count)) { 757 spin_lock(&conf->device_lock); 758 if (!atomic_read(&head->count)) { 759 if (!test_bit(STRIPE_HANDLE, &head->state)) 760 atomic_inc(&conf->active_stripes); 761 BUG_ON(list_empty(&head->lru) && 762 !test_bit(STRIPE_EXPANDING, &head->state)); 763 inc_empty_inactive_list_flag = 0; 764 if (!list_empty(conf->inactive_list + hash)) 765 inc_empty_inactive_list_flag = 1; 766 list_del_init(&head->lru); 767 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) 768 atomic_inc(&conf->empty_inactive_list_nr); 769 if (head->group) { 770 head->group->stripes_cnt--; 771 head->group = NULL; 772 } 773 } 774 atomic_inc(&head->count); 775 spin_unlock(&conf->device_lock); 776 } 777 spin_unlock_irq(conf->hash_locks + hash); 778 779 if (!head) 780 return; 781 if (!stripe_can_batch(head)) 782 goto out; 783 784 lock_two_stripes(head, sh); 785 /* clear_batch_ready clear the flag */ 786 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) 787 goto unlock_out; 788 789 if (sh->batch_head) 790 goto unlock_out; 791 792 dd_idx = 0; 793 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) 794 dd_idx++; 795 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || 796 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) 797 goto unlock_out; 798 799 if (head->batch_head) { 800 spin_lock(&head->batch_head->batch_lock); 801 /* This batch list is already running */ 802 if (!stripe_can_batch(head)) { 803 spin_unlock(&head->batch_head->batch_lock); 804 goto unlock_out; 805 } 806 /* 807 * We must assign batch_head of this stripe within the 808 * batch_lock, otherwise clear_batch_ready of batch head 809 * stripe could clear BATCH_READY bit of this stripe and 810 * this stripe->batch_head doesn't get assigned, which 811 * could confuse clear_batch_ready for this stripe 812 */ 813 sh->batch_head = head->batch_head; 814 815 /* 816 * at this point, head's BATCH_READY could be cleared, but we 817 * can still add the stripe to batch list 818 */ 819 list_add(&sh->batch_list, &head->batch_list); 820 spin_unlock(&head->batch_head->batch_lock); 821 } else { 822 head->batch_head = head; 823 sh->batch_head = head->batch_head; 824 spin_lock(&head->batch_lock); 825 list_add_tail(&sh->batch_list, &head->batch_list); 826 spin_unlock(&head->batch_lock); 827 } 828 829 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 830 if (atomic_dec_return(&conf->preread_active_stripes) 831 < IO_THRESHOLD) 832 md_wakeup_thread(conf->mddev->thread); 833 834 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { 835 int seq = sh->bm_seq; 836 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && 837 sh->batch_head->bm_seq > seq) 838 seq = sh->batch_head->bm_seq; 839 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); 840 sh->batch_head->bm_seq = seq; 841 } 842 843 atomic_inc(&sh->count); 844 unlock_out: 845 unlock_two_stripes(head, sh); 846 out: 847 raid5_release_stripe(head); 848 } 849 850 /* Determine if 'data_offset' or 'new_data_offset' should be used 851 * in this stripe_head. 852 */ 853 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) 854 { 855 sector_t progress = conf->reshape_progress; 856 /* Need a memory barrier to make sure we see the value 857 * of conf->generation, or ->data_offset that was set before 858 * reshape_progress was updated. 859 */ 860 smp_rmb(); 861 if (progress == MaxSector) 862 return 0; 863 if (sh->generation == conf->generation - 1) 864 return 0; 865 /* We are in a reshape, and this is a new-generation stripe, 866 * so use new_data_offset. 867 */ 868 return 1; 869 } 870 871 static void dispatch_bio_list(struct bio_list *tmp) 872 { 873 struct bio *bio; 874 875 while ((bio = bio_list_pop(tmp))) 876 generic_make_request(bio); 877 } 878 879 static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b) 880 { 881 const struct r5pending_data *da = list_entry(a, 882 struct r5pending_data, sibling); 883 const struct r5pending_data *db = list_entry(b, 884 struct r5pending_data, sibling); 885 if (da->sector > db->sector) 886 return 1; 887 if (da->sector < db->sector) 888 return -1; 889 return 0; 890 } 891 892 static void dispatch_defer_bios(struct r5conf *conf, int target, 893 struct bio_list *list) 894 { 895 struct r5pending_data *data; 896 struct list_head *first, *next = NULL; 897 int cnt = 0; 898 899 if (conf->pending_data_cnt == 0) 900 return; 901 902 list_sort(NULL, &conf->pending_list, cmp_stripe); 903 904 first = conf->pending_list.next; 905 906 /* temporarily move the head */ 907 if (conf->next_pending_data) 908 list_move_tail(&conf->pending_list, 909 &conf->next_pending_data->sibling); 910 911 while (!list_empty(&conf->pending_list)) { 912 data = list_first_entry(&conf->pending_list, 913 struct r5pending_data, sibling); 914 if (&data->sibling == first) 915 first = data->sibling.next; 916 next = data->sibling.next; 917 918 bio_list_merge(list, &data->bios); 919 list_move(&data->sibling, &conf->free_list); 920 cnt++; 921 if (cnt >= target) 922 break; 923 } 924 conf->pending_data_cnt -= cnt; 925 BUG_ON(conf->pending_data_cnt < 0 || cnt < target); 926 927 if (next != &conf->pending_list) 928 conf->next_pending_data = list_entry(next, 929 struct r5pending_data, sibling); 930 else 931 conf->next_pending_data = NULL; 932 /* list isn't empty */ 933 if (first != &conf->pending_list) 934 list_move_tail(&conf->pending_list, first); 935 } 936 937 static void flush_deferred_bios(struct r5conf *conf) 938 { 939 struct bio_list tmp = BIO_EMPTY_LIST; 940 941 if (conf->pending_data_cnt == 0) 942 return; 943 944 spin_lock(&conf->pending_bios_lock); 945 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); 946 BUG_ON(conf->pending_data_cnt != 0); 947 spin_unlock(&conf->pending_bios_lock); 948 949 dispatch_bio_list(&tmp); 950 } 951 952 static void defer_issue_bios(struct r5conf *conf, sector_t sector, 953 struct bio_list *bios) 954 { 955 struct bio_list tmp = BIO_EMPTY_LIST; 956 struct r5pending_data *ent; 957 958 spin_lock(&conf->pending_bios_lock); 959 ent = list_first_entry(&conf->free_list, struct r5pending_data, 960 sibling); 961 list_move_tail(&ent->sibling, &conf->pending_list); 962 ent->sector = sector; 963 bio_list_init(&ent->bios); 964 bio_list_merge(&ent->bios, bios); 965 conf->pending_data_cnt++; 966 if (conf->pending_data_cnt >= PENDING_IO_MAX) 967 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); 968 969 spin_unlock(&conf->pending_bios_lock); 970 971 dispatch_bio_list(&tmp); 972 } 973 974 static void 975 raid5_end_read_request(struct bio *bi); 976 static void 977 raid5_end_write_request(struct bio *bi); 978 979 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 980 { 981 struct r5conf *conf = sh->raid_conf; 982 int i, disks = sh->disks; 983 struct stripe_head *head_sh = sh; 984 struct bio_list pending_bios = BIO_EMPTY_LIST; 985 bool should_defer; 986 987 might_sleep(); 988 989 if (log_stripe(sh, s) == 0) 990 return; 991 992 should_defer = conf->batch_bio_dispatch && conf->group_cnt; 993 994 for (i = disks; i--; ) { 995 int op, op_flags = 0; 996 int replace_only = 0; 997 struct bio *bi, *rbi; 998 struct md_rdev *rdev, *rrdev = NULL; 999 1000 sh = head_sh; 1001 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 1002 op = REQ_OP_WRITE; 1003 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 1004 op_flags = REQ_FUA; 1005 if (test_bit(R5_Discard, &sh->dev[i].flags)) 1006 op = REQ_OP_DISCARD; 1007 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1008 op = REQ_OP_READ; 1009 else if (test_and_clear_bit(R5_WantReplace, 1010 &sh->dev[i].flags)) { 1011 op = REQ_OP_WRITE; 1012 replace_only = 1; 1013 } else 1014 continue; 1015 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) 1016 op_flags |= REQ_SYNC; 1017 1018 again: 1019 bi = &sh->dev[i].req; 1020 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 1021 1022 rcu_read_lock(); 1023 rrdev = rcu_dereference(conf->disks[i].replacement); 1024 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 1025 rdev = rcu_dereference(conf->disks[i].rdev); 1026 if (!rdev) { 1027 rdev = rrdev; 1028 rrdev = NULL; 1029 } 1030 if (op_is_write(op)) { 1031 if (replace_only) 1032 rdev = NULL; 1033 if (rdev == rrdev) 1034 /* We raced and saw duplicates */ 1035 rrdev = NULL; 1036 } else { 1037 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev) 1038 rdev = rrdev; 1039 rrdev = NULL; 1040 } 1041 1042 if (rdev && test_bit(Faulty, &rdev->flags)) 1043 rdev = NULL; 1044 if (rdev) 1045 atomic_inc(&rdev->nr_pending); 1046 if (rrdev && test_bit(Faulty, &rrdev->flags)) 1047 rrdev = NULL; 1048 if (rrdev) 1049 atomic_inc(&rrdev->nr_pending); 1050 rcu_read_unlock(); 1051 1052 /* We have already checked bad blocks for reads. Now 1053 * need to check for writes. We never accept write errors 1054 * on the replacement, so we don't to check rrdev. 1055 */ 1056 while (op_is_write(op) && rdev && 1057 test_bit(WriteErrorSeen, &rdev->flags)) { 1058 sector_t first_bad; 1059 int bad_sectors; 1060 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 1061 &first_bad, &bad_sectors); 1062 if (!bad) 1063 break; 1064 1065 if (bad < 0) { 1066 set_bit(BlockedBadBlocks, &rdev->flags); 1067 if (!conf->mddev->external && 1068 conf->mddev->sb_flags) { 1069 /* It is very unlikely, but we might 1070 * still need to write out the 1071 * bad block log - better give it 1072 * a chance*/ 1073 md_check_recovery(conf->mddev); 1074 } 1075 /* 1076 * Because md_wait_for_blocked_rdev 1077 * will dec nr_pending, we must 1078 * increment it first. 1079 */ 1080 atomic_inc(&rdev->nr_pending); 1081 md_wait_for_blocked_rdev(rdev, conf->mddev); 1082 } else { 1083 /* Acknowledged bad block - skip the write */ 1084 rdev_dec_pending(rdev, conf->mddev); 1085 rdev = NULL; 1086 } 1087 } 1088 1089 if (rdev) { 1090 if (s->syncing || s->expanding || s->expanded 1091 || s->replacing) 1092 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1093 1094 set_bit(STRIPE_IO_STARTED, &sh->state); 1095 1096 bio_set_dev(bi, rdev->bdev); 1097 bio_set_op_attrs(bi, op, op_flags); 1098 bi->bi_end_io = op_is_write(op) 1099 ? raid5_end_write_request 1100 : raid5_end_read_request; 1101 bi->bi_private = sh; 1102 1103 pr_debug("%s: for %llu schedule op %d on disc %d\n", 1104 __func__, (unsigned long long)sh->sector, 1105 bi->bi_opf, i); 1106 atomic_inc(&sh->count); 1107 if (sh != head_sh) 1108 atomic_inc(&head_sh->count); 1109 if (use_new_offset(conf, sh)) 1110 bi->bi_iter.bi_sector = (sh->sector 1111 + rdev->new_data_offset); 1112 else 1113 bi->bi_iter.bi_sector = (sh->sector 1114 + rdev->data_offset); 1115 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) 1116 bi->bi_opf |= REQ_NOMERGE; 1117 1118 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1119 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1120 1121 if (!op_is_write(op) && 1122 test_bit(R5_InJournal, &sh->dev[i].flags)) 1123 /* 1124 * issuing read for a page in journal, this 1125 * must be preparing for prexor in rmw; read 1126 * the data into orig_page 1127 */ 1128 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; 1129 else 1130 sh->dev[i].vec.bv_page = sh->dev[i].page; 1131 bi->bi_vcnt = 1; 1132 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1133 bi->bi_io_vec[0].bv_offset = 0; 1134 bi->bi_iter.bi_size = STRIPE_SIZE; 1135 bi->bi_write_hint = sh->dev[i].write_hint; 1136 if (!rrdev) 1137 sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET; 1138 /* 1139 * If this is discard request, set bi_vcnt 0. We don't 1140 * want to confuse SCSI because SCSI will replace payload 1141 */ 1142 if (op == REQ_OP_DISCARD) 1143 bi->bi_vcnt = 0; 1144 if (rrdev) 1145 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 1146 1147 if (conf->mddev->gendisk) 1148 trace_block_bio_remap(bi->bi_disk->queue, 1149 bi, disk_devt(conf->mddev->gendisk), 1150 sh->dev[i].sector); 1151 if (should_defer && op_is_write(op)) 1152 bio_list_add(&pending_bios, bi); 1153 else 1154 generic_make_request(bi); 1155 } 1156 if (rrdev) { 1157 if (s->syncing || s->expanding || s->expanded 1158 || s->replacing) 1159 md_sync_acct(rrdev->bdev, STRIPE_SECTORS); 1160 1161 set_bit(STRIPE_IO_STARTED, &sh->state); 1162 1163 bio_set_dev(rbi, rrdev->bdev); 1164 bio_set_op_attrs(rbi, op, op_flags); 1165 BUG_ON(!op_is_write(op)); 1166 rbi->bi_end_io = raid5_end_write_request; 1167 rbi->bi_private = sh; 1168 1169 pr_debug("%s: for %llu schedule op %d on " 1170 "replacement disc %d\n", 1171 __func__, (unsigned long long)sh->sector, 1172 rbi->bi_opf, i); 1173 atomic_inc(&sh->count); 1174 if (sh != head_sh) 1175 atomic_inc(&head_sh->count); 1176 if (use_new_offset(conf, sh)) 1177 rbi->bi_iter.bi_sector = (sh->sector 1178 + rrdev->new_data_offset); 1179 else 1180 rbi->bi_iter.bi_sector = (sh->sector 1181 + rrdev->data_offset); 1182 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1183 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1184 sh->dev[i].rvec.bv_page = sh->dev[i].page; 1185 rbi->bi_vcnt = 1; 1186 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1187 rbi->bi_io_vec[0].bv_offset = 0; 1188 rbi->bi_iter.bi_size = STRIPE_SIZE; 1189 rbi->bi_write_hint = sh->dev[i].write_hint; 1190 sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET; 1191 /* 1192 * If this is discard request, set bi_vcnt 0. We don't 1193 * want to confuse SCSI because SCSI will replace payload 1194 */ 1195 if (op == REQ_OP_DISCARD) 1196 rbi->bi_vcnt = 0; 1197 if (conf->mddev->gendisk) 1198 trace_block_bio_remap(rbi->bi_disk->queue, 1199 rbi, disk_devt(conf->mddev->gendisk), 1200 sh->dev[i].sector); 1201 if (should_defer && op_is_write(op)) 1202 bio_list_add(&pending_bios, rbi); 1203 else 1204 generic_make_request(rbi); 1205 } 1206 if (!rdev && !rrdev) { 1207 if (op_is_write(op)) 1208 set_bit(STRIPE_DEGRADED, &sh->state); 1209 pr_debug("skip op %d on disc %d for sector %llu\n", 1210 bi->bi_opf, i, (unsigned long long)sh->sector); 1211 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1212 set_bit(STRIPE_HANDLE, &sh->state); 1213 } 1214 1215 if (!head_sh->batch_head) 1216 continue; 1217 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1218 batch_list); 1219 if (sh != head_sh) 1220 goto again; 1221 } 1222 1223 if (should_defer && !bio_list_empty(&pending_bios)) 1224 defer_issue_bios(conf, head_sh->sector, &pending_bios); 1225 } 1226 1227 static struct dma_async_tx_descriptor * 1228 async_copy_data(int frombio, struct bio *bio, struct page **page, 1229 sector_t sector, struct dma_async_tx_descriptor *tx, 1230 struct stripe_head *sh, int no_skipcopy) 1231 { 1232 struct bio_vec bvl; 1233 struct bvec_iter iter; 1234 struct page *bio_page; 1235 int page_offset; 1236 struct async_submit_ctl submit; 1237 enum async_tx_flags flags = 0; 1238 1239 if (bio->bi_iter.bi_sector >= sector) 1240 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 1241 else 1242 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 1243 1244 if (frombio) 1245 flags |= ASYNC_TX_FENCE; 1246 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 1247 1248 bio_for_each_segment(bvl, bio, iter) { 1249 int len = bvl.bv_len; 1250 int clen; 1251 int b_offset = 0; 1252 1253 if (page_offset < 0) { 1254 b_offset = -page_offset; 1255 page_offset += b_offset; 1256 len -= b_offset; 1257 } 1258 1259 if (len > 0 && page_offset + len > STRIPE_SIZE) 1260 clen = STRIPE_SIZE - page_offset; 1261 else 1262 clen = len; 1263 1264 if (clen > 0) { 1265 b_offset += bvl.bv_offset; 1266 bio_page = bvl.bv_page; 1267 if (frombio) { 1268 if (sh->raid_conf->skip_copy && 1269 b_offset == 0 && page_offset == 0 && 1270 clen == STRIPE_SIZE && 1271 !no_skipcopy) 1272 *page = bio_page; 1273 else 1274 tx = async_memcpy(*page, bio_page, page_offset, 1275 b_offset, clen, &submit); 1276 } else 1277 tx = async_memcpy(bio_page, *page, b_offset, 1278 page_offset, clen, &submit); 1279 } 1280 /* chain the operations */ 1281 submit.depend_tx = tx; 1282 1283 if (clen < len) /* hit end of page */ 1284 break; 1285 page_offset += len; 1286 } 1287 1288 return tx; 1289 } 1290 1291 static void ops_complete_biofill(void *stripe_head_ref) 1292 { 1293 struct stripe_head *sh = stripe_head_ref; 1294 int i; 1295 1296 pr_debug("%s: stripe %llu\n", __func__, 1297 (unsigned long long)sh->sector); 1298 1299 /* clear completed biofills */ 1300 for (i = sh->disks; i--; ) { 1301 struct r5dev *dev = &sh->dev[i]; 1302 1303 /* acknowledge completion of a biofill operation */ 1304 /* and check if we need to reply to a read request, 1305 * new R5_Wantfill requests are held off until 1306 * !STRIPE_BIOFILL_RUN 1307 */ 1308 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 1309 struct bio *rbi, *rbi2; 1310 1311 BUG_ON(!dev->read); 1312 rbi = dev->read; 1313 dev->read = NULL; 1314 while (rbi && rbi->bi_iter.bi_sector < 1315 dev->sector + STRIPE_SECTORS) { 1316 rbi2 = r5_next_bio(rbi, dev->sector); 1317 bio_endio(rbi); 1318 rbi = rbi2; 1319 } 1320 } 1321 } 1322 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 1323 1324 set_bit(STRIPE_HANDLE, &sh->state); 1325 raid5_release_stripe(sh); 1326 } 1327 1328 static void ops_run_biofill(struct stripe_head *sh) 1329 { 1330 struct dma_async_tx_descriptor *tx = NULL; 1331 struct async_submit_ctl submit; 1332 int i; 1333 1334 BUG_ON(sh->batch_head); 1335 pr_debug("%s: stripe %llu\n", __func__, 1336 (unsigned long long)sh->sector); 1337 1338 for (i = sh->disks; i--; ) { 1339 struct r5dev *dev = &sh->dev[i]; 1340 if (test_bit(R5_Wantfill, &dev->flags)) { 1341 struct bio *rbi; 1342 spin_lock_irq(&sh->stripe_lock); 1343 dev->read = rbi = dev->toread; 1344 dev->toread = NULL; 1345 spin_unlock_irq(&sh->stripe_lock); 1346 while (rbi && rbi->bi_iter.bi_sector < 1347 dev->sector + STRIPE_SECTORS) { 1348 tx = async_copy_data(0, rbi, &dev->page, 1349 dev->sector, tx, sh, 0); 1350 rbi = r5_next_bio(rbi, dev->sector); 1351 } 1352 } 1353 } 1354 1355 atomic_inc(&sh->count); 1356 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 1357 async_trigger_callback(&submit); 1358 } 1359 1360 static void mark_target_uptodate(struct stripe_head *sh, int target) 1361 { 1362 struct r5dev *tgt; 1363 1364 if (target < 0) 1365 return; 1366 1367 tgt = &sh->dev[target]; 1368 set_bit(R5_UPTODATE, &tgt->flags); 1369 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1370 clear_bit(R5_Wantcompute, &tgt->flags); 1371 } 1372 1373 static void ops_complete_compute(void *stripe_head_ref) 1374 { 1375 struct stripe_head *sh = stripe_head_ref; 1376 1377 pr_debug("%s: stripe %llu\n", __func__, 1378 (unsigned long long)sh->sector); 1379 1380 /* mark the computed target(s) as uptodate */ 1381 mark_target_uptodate(sh, sh->ops.target); 1382 mark_target_uptodate(sh, sh->ops.target2); 1383 1384 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 1385 if (sh->check_state == check_state_compute_run) 1386 sh->check_state = check_state_compute_result; 1387 set_bit(STRIPE_HANDLE, &sh->state); 1388 raid5_release_stripe(sh); 1389 } 1390 1391 /* return a pointer to the address conversion region of the scribble buffer */ 1392 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) 1393 { 1394 return percpu->scribble + i * percpu->scribble_obj_size; 1395 } 1396 1397 /* return a pointer to the address conversion region of the scribble buffer */ 1398 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 1399 struct raid5_percpu *percpu, int i) 1400 { 1401 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); 1402 } 1403 1404 static struct dma_async_tx_descriptor * 1405 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 1406 { 1407 int disks = sh->disks; 1408 struct page **xor_srcs = to_addr_page(percpu, 0); 1409 int target = sh->ops.target; 1410 struct r5dev *tgt = &sh->dev[target]; 1411 struct page *xor_dest = tgt->page; 1412 int count = 0; 1413 struct dma_async_tx_descriptor *tx; 1414 struct async_submit_ctl submit; 1415 int i; 1416 1417 BUG_ON(sh->batch_head); 1418 1419 pr_debug("%s: stripe %llu block: %d\n", 1420 __func__, (unsigned long long)sh->sector, target); 1421 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1422 1423 for (i = disks; i--; ) 1424 if (i != target) 1425 xor_srcs[count++] = sh->dev[i].page; 1426 1427 atomic_inc(&sh->count); 1428 1429 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 1430 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); 1431 if (unlikely(count == 1)) 1432 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1433 else 1434 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1435 1436 return tx; 1437 } 1438 1439 /* set_syndrome_sources - populate source buffers for gen_syndrome 1440 * @srcs - (struct page *) array of size sh->disks 1441 * @sh - stripe_head to parse 1442 * 1443 * Populates srcs in proper layout order for the stripe and returns the 1444 * 'count' of sources to be used in a call to async_gen_syndrome. The P 1445 * destination buffer is recorded in srcs[count] and the Q destination 1446 * is recorded in srcs[count+1]]. 1447 */ 1448 static int set_syndrome_sources(struct page **srcs, 1449 struct stripe_head *sh, 1450 int srctype) 1451 { 1452 int disks = sh->disks; 1453 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1454 int d0_idx = raid6_d0(sh); 1455 int count; 1456 int i; 1457 1458 for (i = 0; i < disks; i++) 1459 srcs[i] = NULL; 1460 1461 count = 0; 1462 i = d0_idx; 1463 do { 1464 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1465 struct r5dev *dev = &sh->dev[i]; 1466 1467 if (i == sh->qd_idx || i == sh->pd_idx || 1468 (srctype == SYNDROME_SRC_ALL) || 1469 (srctype == SYNDROME_SRC_WANT_DRAIN && 1470 (test_bit(R5_Wantdrain, &dev->flags) || 1471 test_bit(R5_InJournal, &dev->flags))) || 1472 (srctype == SYNDROME_SRC_WRITTEN && 1473 (dev->written || 1474 test_bit(R5_InJournal, &dev->flags)))) { 1475 if (test_bit(R5_InJournal, &dev->flags)) 1476 srcs[slot] = sh->dev[i].orig_page; 1477 else 1478 srcs[slot] = sh->dev[i].page; 1479 } 1480 i = raid6_next_disk(i, disks); 1481 } while (i != d0_idx); 1482 1483 return syndrome_disks; 1484 } 1485 1486 static struct dma_async_tx_descriptor * 1487 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 1488 { 1489 int disks = sh->disks; 1490 struct page **blocks = to_addr_page(percpu, 0); 1491 int target; 1492 int qd_idx = sh->qd_idx; 1493 struct dma_async_tx_descriptor *tx; 1494 struct async_submit_ctl submit; 1495 struct r5dev *tgt; 1496 struct page *dest; 1497 int i; 1498 int count; 1499 1500 BUG_ON(sh->batch_head); 1501 if (sh->ops.target < 0) 1502 target = sh->ops.target2; 1503 else if (sh->ops.target2 < 0) 1504 target = sh->ops.target; 1505 else 1506 /* we should only have one valid target */ 1507 BUG(); 1508 BUG_ON(target < 0); 1509 pr_debug("%s: stripe %llu block: %d\n", 1510 __func__, (unsigned long long)sh->sector, target); 1511 1512 tgt = &sh->dev[target]; 1513 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1514 dest = tgt->page; 1515 1516 atomic_inc(&sh->count); 1517 1518 if (target == qd_idx) { 1519 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1520 blocks[count] = NULL; /* regenerating p is not necessary */ 1521 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 1522 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1523 ops_complete_compute, sh, 1524 to_addr_conv(sh, percpu, 0)); 1525 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1526 } else { 1527 /* Compute any data- or p-drive using XOR */ 1528 count = 0; 1529 for (i = disks; i-- ; ) { 1530 if (i == target || i == qd_idx) 1531 continue; 1532 blocks[count++] = sh->dev[i].page; 1533 } 1534 1535 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1536 NULL, ops_complete_compute, sh, 1537 to_addr_conv(sh, percpu, 0)); 1538 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 1539 } 1540 1541 return tx; 1542 } 1543 1544 static struct dma_async_tx_descriptor * 1545 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 1546 { 1547 int i, count, disks = sh->disks; 1548 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1549 int d0_idx = raid6_d0(sh); 1550 int faila = -1, failb = -1; 1551 int target = sh->ops.target; 1552 int target2 = sh->ops.target2; 1553 struct r5dev *tgt = &sh->dev[target]; 1554 struct r5dev *tgt2 = &sh->dev[target2]; 1555 struct dma_async_tx_descriptor *tx; 1556 struct page **blocks = to_addr_page(percpu, 0); 1557 struct async_submit_ctl submit; 1558 1559 BUG_ON(sh->batch_head); 1560 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 1561 __func__, (unsigned long long)sh->sector, target, target2); 1562 BUG_ON(target < 0 || target2 < 0); 1563 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1564 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 1565 1566 /* we need to open-code set_syndrome_sources to handle the 1567 * slot number conversion for 'faila' and 'failb' 1568 */ 1569 for (i = 0; i < disks ; i++) 1570 blocks[i] = NULL; 1571 count = 0; 1572 i = d0_idx; 1573 do { 1574 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1575 1576 blocks[slot] = sh->dev[i].page; 1577 1578 if (i == target) 1579 faila = slot; 1580 if (i == target2) 1581 failb = slot; 1582 i = raid6_next_disk(i, disks); 1583 } while (i != d0_idx); 1584 1585 BUG_ON(faila == failb); 1586 if (failb < faila) 1587 swap(faila, failb); 1588 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 1589 __func__, (unsigned long long)sh->sector, faila, failb); 1590 1591 atomic_inc(&sh->count); 1592 1593 if (failb == syndrome_disks+1) { 1594 /* Q disk is one of the missing disks */ 1595 if (faila == syndrome_disks) { 1596 /* Missing P+Q, just recompute */ 1597 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1598 ops_complete_compute, sh, 1599 to_addr_conv(sh, percpu, 0)); 1600 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 1601 STRIPE_SIZE, &submit); 1602 } else { 1603 struct page *dest; 1604 int data_target; 1605 int qd_idx = sh->qd_idx; 1606 1607 /* Missing D+Q: recompute D from P, then recompute Q */ 1608 if (target == qd_idx) 1609 data_target = target2; 1610 else 1611 data_target = target; 1612 1613 count = 0; 1614 for (i = disks; i-- ; ) { 1615 if (i == data_target || i == qd_idx) 1616 continue; 1617 blocks[count++] = sh->dev[i].page; 1618 } 1619 dest = sh->dev[data_target].page; 1620 init_async_submit(&submit, 1621 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1622 NULL, NULL, NULL, 1623 to_addr_conv(sh, percpu, 0)); 1624 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 1625 &submit); 1626 1627 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1628 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 1629 ops_complete_compute, sh, 1630 to_addr_conv(sh, percpu, 0)); 1631 return async_gen_syndrome(blocks, 0, count+2, 1632 STRIPE_SIZE, &submit); 1633 } 1634 } else { 1635 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1636 ops_complete_compute, sh, 1637 to_addr_conv(sh, percpu, 0)); 1638 if (failb == syndrome_disks) { 1639 /* We're missing D+P. */ 1640 return async_raid6_datap_recov(syndrome_disks+2, 1641 STRIPE_SIZE, faila, 1642 blocks, &submit); 1643 } else { 1644 /* We're missing D+D. */ 1645 return async_raid6_2data_recov(syndrome_disks+2, 1646 STRIPE_SIZE, faila, failb, 1647 blocks, &submit); 1648 } 1649 } 1650 } 1651 1652 static void ops_complete_prexor(void *stripe_head_ref) 1653 { 1654 struct stripe_head *sh = stripe_head_ref; 1655 1656 pr_debug("%s: stripe %llu\n", __func__, 1657 (unsigned long long)sh->sector); 1658 1659 if (r5c_is_writeback(sh->raid_conf->log)) 1660 /* 1661 * raid5-cache write back uses orig_page during prexor. 1662 * After prexor, it is time to free orig_page 1663 */ 1664 r5c_release_extra_page(sh); 1665 } 1666 1667 static struct dma_async_tx_descriptor * 1668 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, 1669 struct dma_async_tx_descriptor *tx) 1670 { 1671 int disks = sh->disks; 1672 struct page **xor_srcs = to_addr_page(percpu, 0); 1673 int count = 0, pd_idx = sh->pd_idx, i; 1674 struct async_submit_ctl submit; 1675 1676 /* existing parity data subtracted */ 1677 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1678 1679 BUG_ON(sh->batch_head); 1680 pr_debug("%s: stripe %llu\n", __func__, 1681 (unsigned long long)sh->sector); 1682 1683 for (i = disks; i--; ) { 1684 struct r5dev *dev = &sh->dev[i]; 1685 /* Only process blocks that are known to be uptodate */ 1686 if (test_bit(R5_InJournal, &dev->flags)) 1687 xor_srcs[count++] = dev->orig_page; 1688 else if (test_bit(R5_Wantdrain, &dev->flags)) 1689 xor_srcs[count++] = dev->page; 1690 } 1691 1692 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1693 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1694 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1695 1696 return tx; 1697 } 1698 1699 static struct dma_async_tx_descriptor * 1700 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, 1701 struct dma_async_tx_descriptor *tx) 1702 { 1703 struct page **blocks = to_addr_page(percpu, 0); 1704 int count; 1705 struct async_submit_ctl submit; 1706 1707 pr_debug("%s: stripe %llu\n", __func__, 1708 (unsigned long long)sh->sector); 1709 1710 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); 1711 1712 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx, 1713 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1714 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1715 1716 return tx; 1717 } 1718 1719 static struct dma_async_tx_descriptor * 1720 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1721 { 1722 struct r5conf *conf = sh->raid_conf; 1723 int disks = sh->disks; 1724 int i; 1725 struct stripe_head *head_sh = sh; 1726 1727 pr_debug("%s: stripe %llu\n", __func__, 1728 (unsigned long long)sh->sector); 1729 1730 for (i = disks; i--; ) { 1731 struct r5dev *dev; 1732 struct bio *chosen; 1733 1734 sh = head_sh; 1735 if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) { 1736 struct bio *wbi; 1737 1738 again: 1739 dev = &sh->dev[i]; 1740 /* 1741 * clear R5_InJournal, so when rewriting a page in 1742 * journal, it is not skipped by r5l_log_stripe() 1743 */ 1744 clear_bit(R5_InJournal, &dev->flags); 1745 spin_lock_irq(&sh->stripe_lock); 1746 chosen = dev->towrite; 1747 dev->towrite = NULL; 1748 sh->overwrite_disks = 0; 1749 BUG_ON(dev->written); 1750 wbi = dev->written = chosen; 1751 spin_unlock_irq(&sh->stripe_lock); 1752 WARN_ON(dev->page != dev->orig_page); 1753 1754 while (wbi && wbi->bi_iter.bi_sector < 1755 dev->sector + STRIPE_SECTORS) { 1756 if (wbi->bi_opf & REQ_FUA) 1757 set_bit(R5_WantFUA, &dev->flags); 1758 if (wbi->bi_opf & REQ_SYNC) 1759 set_bit(R5_SyncIO, &dev->flags); 1760 if (bio_op(wbi) == REQ_OP_DISCARD) 1761 set_bit(R5_Discard, &dev->flags); 1762 else { 1763 tx = async_copy_data(1, wbi, &dev->page, 1764 dev->sector, tx, sh, 1765 r5c_is_writeback(conf->log)); 1766 if (dev->page != dev->orig_page && 1767 !r5c_is_writeback(conf->log)) { 1768 set_bit(R5_SkipCopy, &dev->flags); 1769 clear_bit(R5_UPTODATE, &dev->flags); 1770 clear_bit(R5_OVERWRITE, &dev->flags); 1771 } 1772 } 1773 wbi = r5_next_bio(wbi, dev->sector); 1774 } 1775 1776 if (head_sh->batch_head) { 1777 sh = list_first_entry(&sh->batch_list, 1778 struct stripe_head, 1779 batch_list); 1780 if (sh == head_sh) 1781 continue; 1782 goto again; 1783 } 1784 } 1785 } 1786 1787 return tx; 1788 } 1789 1790 static void ops_complete_reconstruct(void *stripe_head_ref) 1791 { 1792 struct stripe_head *sh = stripe_head_ref; 1793 int disks = sh->disks; 1794 int pd_idx = sh->pd_idx; 1795 int qd_idx = sh->qd_idx; 1796 int i; 1797 bool fua = false, sync = false, discard = false; 1798 1799 pr_debug("%s: stripe %llu\n", __func__, 1800 (unsigned long long)sh->sector); 1801 1802 for (i = disks; i--; ) { 1803 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1804 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1805 discard |= test_bit(R5_Discard, &sh->dev[i].flags); 1806 } 1807 1808 for (i = disks; i--; ) { 1809 struct r5dev *dev = &sh->dev[i]; 1810 1811 if (dev->written || i == pd_idx || i == qd_idx) { 1812 if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) { 1813 set_bit(R5_UPTODATE, &dev->flags); 1814 if (test_bit(STRIPE_EXPAND_READY, &sh->state)) 1815 set_bit(R5_Expanded, &dev->flags); 1816 } 1817 if (fua) 1818 set_bit(R5_WantFUA, &dev->flags); 1819 if (sync) 1820 set_bit(R5_SyncIO, &dev->flags); 1821 } 1822 } 1823 1824 if (sh->reconstruct_state == reconstruct_state_drain_run) 1825 sh->reconstruct_state = reconstruct_state_drain_result; 1826 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1827 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1828 else { 1829 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1830 sh->reconstruct_state = reconstruct_state_result; 1831 } 1832 1833 set_bit(STRIPE_HANDLE, &sh->state); 1834 raid5_release_stripe(sh); 1835 } 1836 1837 static void 1838 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1839 struct dma_async_tx_descriptor *tx) 1840 { 1841 int disks = sh->disks; 1842 struct page **xor_srcs; 1843 struct async_submit_ctl submit; 1844 int count, pd_idx = sh->pd_idx, i; 1845 struct page *xor_dest; 1846 int prexor = 0; 1847 unsigned long flags; 1848 int j = 0; 1849 struct stripe_head *head_sh = sh; 1850 int last_stripe; 1851 1852 pr_debug("%s: stripe %llu\n", __func__, 1853 (unsigned long long)sh->sector); 1854 1855 for (i = 0; i < sh->disks; i++) { 1856 if (pd_idx == i) 1857 continue; 1858 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1859 break; 1860 } 1861 if (i >= sh->disks) { 1862 atomic_inc(&sh->count); 1863 set_bit(R5_Discard, &sh->dev[pd_idx].flags); 1864 ops_complete_reconstruct(sh); 1865 return; 1866 } 1867 again: 1868 count = 0; 1869 xor_srcs = to_addr_page(percpu, j); 1870 /* check if prexor is active which means only process blocks 1871 * that are part of a read-modify-write (written) 1872 */ 1873 if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1874 prexor = 1; 1875 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1876 for (i = disks; i--; ) { 1877 struct r5dev *dev = &sh->dev[i]; 1878 if (head_sh->dev[i].written || 1879 test_bit(R5_InJournal, &head_sh->dev[i].flags)) 1880 xor_srcs[count++] = dev->page; 1881 } 1882 } else { 1883 xor_dest = sh->dev[pd_idx].page; 1884 for (i = disks; i--; ) { 1885 struct r5dev *dev = &sh->dev[i]; 1886 if (i != pd_idx) 1887 xor_srcs[count++] = dev->page; 1888 } 1889 } 1890 1891 /* 1/ if we prexor'd then the dest is reused as a source 1892 * 2/ if we did not prexor then we are redoing the parity 1893 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1894 * for the synchronous xor case 1895 */ 1896 last_stripe = !head_sh->batch_head || 1897 list_first_entry(&sh->batch_list, 1898 struct stripe_head, batch_list) == head_sh; 1899 if (last_stripe) { 1900 flags = ASYNC_TX_ACK | 1901 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1902 1903 atomic_inc(&head_sh->count); 1904 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh, 1905 to_addr_conv(sh, percpu, j)); 1906 } else { 1907 flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST; 1908 init_async_submit(&submit, flags, tx, NULL, NULL, 1909 to_addr_conv(sh, percpu, j)); 1910 } 1911 1912 if (unlikely(count == 1)) 1913 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1914 else 1915 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1916 if (!last_stripe) { 1917 j++; 1918 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1919 batch_list); 1920 goto again; 1921 } 1922 } 1923 1924 static void 1925 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1926 struct dma_async_tx_descriptor *tx) 1927 { 1928 struct async_submit_ctl submit; 1929 struct page **blocks; 1930 int count, i, j = 0; 1931 struct stripe_head *head_sh = sh; 1932 int last_stripe; 1933 int synflags; 1934 unsigned long txflags; 1935 1936 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1937 1938 for (i = 0; i < sh->disks; i++) { 1939 if (sh->pd_idx == i || sh->qd_idx == i) 1940 continue; 1941 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1942 break; 1943 } 1944 if (i >= sh->disks) { 1945 atomic_inc(&sh->count); 1946 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 1947 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 1948 ops_complete_reconstruct(sh); 1949 return; 1950 } 1951 1952 again: 1953 blocks = to_addr_page(percpu, j); 1954 1955 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1956 synflags = SYNDROME_SRC_WRITTEN; 1957 txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST; 1958 } else { 1959 synflags = SYNDROME_SRC_ALL; 1960 txflags = ASYNC_TX_ACK; 1961 } 1962 1963 count = set_syndrome_sources(blocks, sh, synflags); 1964 last_stripe = !head_sh->batch_head || 1965 list_first_entry(&sh->batch_list, 1966 struct stripe_head, batch_list) == head_sh; 1967 1968 if (last_stripe) { 1969 atomic_inc(&head_sh->count); 1970 init_async_submit(&submit, txflags, tx, ops_complete_reconstruct, 1971 head_sh, to_addr_conv(sh, percpu, j)); 1972 } else 1973 init_async_submit(&submit, 0, tx, NULL, NULL, 1974 to_addr_conv(sh, percpu, j)); 1975 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1976 if (!last_stripe) { 1977 j++; 1978 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1979 batch_list); 1980 goto again; 1981 } 1982 } 1983 1984 static void ops_complete_check(void *stripe_head_ref) 1985 { 1986 struct stripe_head *sh = stripe_head_ref; 1987 1988 pr_debug("%s: stripe %llu\n", __func__, 1989 (unsigned long long)sh->sector); 1990 1991 sh->check_state = check_state_check_result; 1992 set_bit(STRIPE_HANDLE, &sh->state); 1993 raid5_release_stripe(sh); 1994 } 1995 1996 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1997 { 1998 int disks = sh->disks; 1999 int pd_idx = sh->pd_idx; 2000 int qd_idx = sh->qd_idx; 2001 struct page *xor_dest; 2002 struct page **xor_srcs = to_addr_page(percpu, 0); 2003 struct dma_async_tx_descriptor *tx; 2004 struct async_submit_ctl submit; 2005 int count; 2006 int i; 2007 2008 pr_debug("%s: stripe %llu\n", __func__, 2009 (unsigned long long)sh->sector); 2010 2011 BUG_ON(sh->batch_head); 2012 count = 0; 2013 xor_dest = sh->dev[pd_idx].page; 2014 xor_srcs[count++] = xor_dest; 2015 for (i = disks; i--; ) { 2016 if (i == pd_idx || i == qd_idx) 2017 continue; 2018 xor_srcs[count++] = sh->dev[i].page; 2019 } 2020 2021 init_async_submit(&submit, 0, NULL, NULL, NULL, 2022 to_addr_conv(sh, percpu, 0)); 2023 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 2024 &sh->ops.zero_sum_result, &submit); 2025 2026 atomic_inc(&sh->count); 2027 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 2028 tx = async_trigger_callback(&submit); 2029 } 2030 2031 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 2032 { 2033 struct page **srcs = to_addr_page(percpu, 0); 2034 struct async_submit_ctl submit; 2035 int count; 2036 2037 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 2038 (unsigned long long)sh->sector, checkp); 2039 2040 BUG_ON(sh->batch_head); 2041 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); 2042 if (!checkp) 2043 srcs[count] = NULL; 2044 2045 atomic_inc(&sh->count); 2046 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 2047 sh, to_addr_conv(sh, percpu, 0)); 2048 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 2049 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 2050 } 2051 2052 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 2053 { 2054 int overlap_clear = 0, i, disks = sh->disks; 2055 struct dma_async_tx_descriptor *tx = NULL; 2056 struct r5conf *conf = sh->raid_conf; 2057 int level = conf->level; 2058 struct raid5_percpu *percpu; 2059 unsigned long cpu; 2060 2061 cpu = get_cpu(); 2062 percpu = per_cpu_ptr(conf->percpu, cpu); 2063 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 2064 ops_run_biofill(sh); 2065 overlap_clear++; 2066 } 2067 2068 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 2069 if (level < 6) 2070 tx = ops_run_compute5(sh, percpu); 2071 else { 2072 if (sh->ops.target2 < 0 || sh->ops.target < 0) 2073 tx = ops_run_compute6_1(sh, percpu); 2074 else 2075 tx = ops_run_compute6_2(sh, percpu); 2076 } 2077 /* terminate the chain if reconstruct is not set to be run */ 2078 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 2079 async_tx_ack(tx); 2080 } 2081 2082 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) { 2083 if (level < 6) 2084 tx = ops_run_prexor5(sh, percpu, tx); 2085 else 2086 tx = ops_run_prexor6(sh, percpu, tx); 2087 } 2088 2089 if (test_bit(STRIPE_OP_PARTIAL_PARITY, &ops_request)) 2090 tx = ops_run_partial_parity(sh, percpu, tx); 2091 2092 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 2093 tx = ops_run_biodrain(sh, tx); 2094 overlap_clear++; 2095 } 2096 2097 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 2098 if (level < 6) 2099 ops_run_reconstruct5(sh, percpu, tx); 2100 else 2101 ops_run_reconstruct6(sh, percpu, tx); 2102 } 2103 2104 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 2105 if (sh->check_state == check_state_run) 2106 ops_run_check_p(sh, percpu); 2107 else if (sh->check_state == check_state_run_q) 2108 ops_run_check_pq(sh, percpu, 0); 2109 else if (sh->check_state == check_state_run_pq) 2110 ops_run_check_pq(sh, percpu, 1); 2111 else 2112 BUG(); 2113 } 2114 2115 if (overlap_clear && !sh->batch_head) 2116 for (i = disks; i--; ) { 2117 struct r5dev *dev = &sh->dev[i]; 2118 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2119 wake_up(&sh->raid_conf->wait_for_overlap); 2120 } 2121 put_cpu(); 2122 } 2123 2124 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) 2125 { 2126 if (sh->ppl_page) 2127 __free_page(sh->ppl_page); 2128 kmem_cache_free(sc, sh); 2129 } 2130 2131 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, 2132 int disks, struct r5conf *conf) 2133 { 2134 struct stripe_head *sh; 2135 int i; 2136 2137 sh = kmem_cache_zalloc(sc, gfp); 2138 if (sh) { 2139 spin_lock_init(&sh->stripe_lock); 2140 spin_lock_init(&sh->batch_lock); 2141 INIT_LIST_HEAD(&sh->batch_list); 2142 INIT_LIST_HEAD(&sh->lru); 2143 INIT_LIST_HEAD(&sh->r5c); 2144 INIT_LIST_HEAD(&sh->log_list); 2145 atomic_set(&sh->count, 1); 2146 sh->raid_conf = conf; 2147 sh->log_start = MaxSector; 2148 for (i = 0; i < disks; i++) { 2149 struct r5dev *dev = &sh->dev[i]; 2150 2151 bio_init(&dev->req, &dev->vec, 1); 2152 bio_init(&dev->rreq, &dev->rvec, 1); 2153 } 2154 2155 if (raid5_has_ppl(conf)) { 2156 sh->ppl_page = alloc_page(gfp); 2157 if (!sh->ppl_page) { 2158 free_stripe(sc, sh); 2159 sh = NULL; 2160 } 2161 } 2162 } 2163 return sh; 2164 } 2165 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) 2166 { 2167 struct stripe_head *sh; 2168 2169 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); 2170 if (!sh) 2171 return 0; 2172 2173 if (grow_buffers(sh, gfp)) { 2174 shrink_buffers(sh); 2175 free_stripe(conf->slab_cache, sh); 2176 return 0; 2177 } 2178 sh->hash_lock_index = 2179 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 2180 /* we just created an active stripe so... */ 2181 atomic_inc(&conf->active_stripes); 2182 2183 raid5_release_stripe(sh); 2184 conf->max_nr_stripes++; 2185 return 1; 2186 } 2187 2188 static int grow_stripes(struct r5conf *conf, int num) 2189 { 2190 struct kmem_cache *sc; 2191 size_t namelen = sizeof(conf->cache_name[0]); 2192 int devs = max(conf->raid_disks, conf->previous_raid_disks); 2193 2194 if (conf->mddev->gendisk) 2195 snprintf(conf->cache_name[0], namelen, 2196 "raid%d-%s", conf->level, mdname(conf->mddev)); 2197 else 2198 snprintf(conf->cache_name[0], namelen, 2199 "raid%d-%p", conf->level, conf->mddev); 2200 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); 2201 2202 conf->active_name = 0; 2203 sc = kmem_cache_create(conf->cache_name[conf->active_name], 2204 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 2205 0, 0, NULL); 2206 if (!sc) 2207 return 1; 2208 conf->slab_cache = sc; 2209 conf->pool_size = devs; 2210 while (num--) 2211 if (!grow_one_stripe(conf, GFP_KERNEL)) 2212 return 1; 2213 2214 return 0; 2215 } 2216 2217 /** 2218 * scribble_len - return the required size of the scribble region 2219 * @num - total number of disks in the array 2220 * 2221 * The size must be enough to contain: 2222 * 1/ a struct page pointer for each device in the array +2 2223 * 2/ room to convert each entry in (1) to its corresponding dma 2224 * (dma_map_page()) or page (page_address()) address. 2225 * 2226 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 2227 * calculate over all devices (not just the data blocks), using zeros in place 2228 * of the P and Q blocks. 2229 */ 2230 static int scribble_alloc(struct raid5_percpu *percpu, 2231 int num, int cnt, gfp_t flags) 2232 { 2233 size_t obj_size = 2234 sizeof(struct page *) * (num+2) + 2235 sizeof(addr_conv_t) * (num+2); 2236 void *scribble; 2237 2238 scribble = kvmalloc_array(cnt, obj_size, flags); 2239 if (!scribble) 2240 return -ENOMEM; 2241 2242 kvfree(percpu->scribble); 2243 2244 percpu->scribble = scribble; 2245 percpu->scribble_obj_size = obj_size; 2246 return 0; 2247 } 2248 2249 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) 2250 { 2251 unsigned long cpu; 2252 int err = 0; 2253 2254 /* 2255 * Never shrink. And mddev_suspend() could deadlock if this is called 2256 * from raid5d. In that case, scribble_disks and scribble_sectors 2257 * should equal to new_disks and new_sectors 2258 */ 2259 if (conf->scribble_disks >= new_disks && 2260 conf->scribble_sectors >= new_sectors) 2261 return 0; 2262 mddev_suspend(conf->mddev); 2263 get_online_cpus(); 2264 2265 for_each_present_cpu(cpu) { 2266 struct raid5_percpu *percpu; 2267 2268 percpu = per_cpu_ptr(conf->percpu, cpu); 2269 err = scribble_alloc(percpu, new_disks, 2270 new_sectors / STRIPE_SECTORS, 2271 GFP_NOIO); 2272 if (err) 2273 break; 2274 } 2275 2276 put_online_cpus(); 2277 mddev_resume(conf->mddev); 2278 if (!err) { 2279 conf->scribble_disks = new_disks; 2280 conf->scribble_sectors = new_sectors; 2281 } 2282 return err; 2283 } 2284 2285 static int resize_stripes(struct r5conf *conf, int newsize) 2286 { 2287 /* Make all the stripes able to hold 'newsize' devices. 2288 * New slots in each stripe get 'page' set to a new page. 2289 * 2290 * This happens in stages: 2291 * 1/ create a new kmem_cache and allocate the required number of 2292 * stripe_heads. 2293 * 2/ gather all the old stripe_heads and transfer the pages across 2294 * to the new stripe_heads. This will have the side effect of 2295 * freezing the array as once all stripe_heads have been collected, 2296 * no IO will be possible. Old stripe heads are freed once their 2297 * pages have been transferred over, and the old kmem_cache is 2298 * freed when all stripes are done. 2299 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 2300 * we simple return a failure status - no need to clean anything up. 2301 * 4/ allocate new pages for the new slots in the new stripe_heads. 2302 * If this fails, we don't bother trying the shrink the 2303 * stripe_heads down again, we just leave them as they are. 2304 * As each stripe_head is processed the new one is released into 2305 * active service. 2306 * 2307 * Once step2 is started, we cannot afford to wait for a write, 2308 * so we use GFP_NOIO allocations. 2309 */ 2310 struct stripe_head *osh, *nsh; 2311 LIST_HEAD(newstripes); 2312 struct disk_info *ndisks; 2313 int err = 0; 2314 struct kmem_cache *sc; 2315 int i; 2316 int hash, cnt; 2317 2318 md_allow_write(conf->mddev); 2319 2320 /* Step 1 */ 2321 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2322 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 2323 0, 0, NULL); 2324 if (!sc) 2325 return -ENOMEM; 2326 2327 /* Need to ensure auto-resizing doesn't interfere */ 2328 mutex_lock(&conf->cache_size_mutex); 2329 2330 for (i = conf->max_nr_stripes; i; i--) { 2331 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); 2332 if (!nsh) 2333 break; 2334 2335 list_add(&nsh->lru, &newstripes); 2336 } 2337 if (i) { 2338 /* didn't get enough, give up */ 2339 while (!list_empty(&newstripes)) { 2340 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2341 list_del(&nsh->lru); 2342 free_stripe(sc, nsh); 2343 } 2344 kmem_cache_destroy(sc); 2345 mutex_unlock(&conf->cache_size_mutex); 2346 return -ENOMEM; 2347 } 2348 /* Step 2 - Must use GFP_NOIO now. 2349 * OK, we have enough stripes, start collecting inactive 2350 * stripes and copying them over 2351 */ 2352 hash = 0; 2353 cnt = 0; 2354 list_for_each_entry(nsh, &newstripes, lru) { 2355 lock_device_hash_lock(conf, hash); 2356 wait_event_cmd(conf->wait_for_stripe, 2357 !list_empty(conf->inactive_list + hash), 2358 unlock_device_hash_lock(conf, hash), 2359 lock_device_hash_lock(conf, hash)); 2360 osh = get_free_stripe(conf, hash); 2361 unlock_device_hash_lock(conf, hash); 2362 2363 for(i=0; i<conf->pool_size; i++) { 2364 nsh->dev[i].page = osh->dev[i].page; 2365 nsh->dev[i].orig_page = osh->dev[i].page; 2366 } 2367 nsh->hash_lock_index = hash; 2368 free_stripe(conf->slab_cache, osh); 2369 cnt++; 2370 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 2371 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 2372 hash++; 2373 cnt = 0; 2374 } 2375 } 2376 kmem_cache_destroy(conf->slab_cache); 2377 2378 /* Step 3. 2379 * At this point, we are holding all the stripes so the array 2380 * is completely stalled, so now is a good time to resize 2381 * conf->disks and the scribble region 2382 */ 2383 ndisks = kcalloc(newsize, sizeof(struct disk_info), GFP_NOIO); 2384 if (ndisks) { 2385 for (i = 0; i < conf->pool_size; i++) 2386 ndisks[i] = conf->disks[i]; 2387 2388 for (i = conf->pool_size; i < newsize; i++) { 2389 ndisks[i].extra_page = alloc_page(GFP_NOIO); 2390 if (!ndisks[i].extra_page) 2391 err = -ENOMEM; 2392 } 2393 2394 if (err) { 2395 for (i = conf->pool_size; i < newsize; i++) 2396 if (ndisks[i].extra_page) 2397 put_page(ndisks[i].extra_page); 2398 kfree(ndisks); 2399 } else { 2400 kfree(conf->disks); 2401 conf->disks = ndisks; 2402 } 2403 } else 2404 err = -ENOMEM; 2405 2406 mutex_unlock(&conf->cache_size_mutex); 2407 2408 conf->slab_cache = sc; 2409 conf->active_name = 1-conf->active_name; 2410 2411 /* Step 4, return new stripes to service */ 2412 while(!list_empty(&newstripes)) { 2413 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2414 list_del_init(&nsh->lru); 2415 2416 for (i=conf->raid_disks; i < newsize; i++) 2417 if (nsh->dev[i].page == NULL) { 2418 struct page *p = alloc_page(GFP_NOIO); 2419 nsh->dev[i].page = p; 2420 nsh->dev[i].orig_page = p; 2421 if (!p) 2422 err = -ENOMEM; 2423 } 2424 raid5_release_stripe(nsh); 2425 } 2426 /* critical section pass, GFP_NOIO no longer needed */ 2427 2428 if (!err) 2429 conf->pool_size = newsize; 2430 return err; 2431 } 2432 2433 static int drop_one_stripe(struct r5conf *conf) 2434 { 2435 struct stripe_head *sh; 2436 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; 2437 2438 spin_lock_irq(conf->hash_locks + hash); 2439 sh = get_free_stripe(conf, hash); 2440 spin_unlock_irq(conf->hash_locks + hash); 2441 if (!sh) 2442 return 0; 2443 BUG_ON(atomic_read(&sh->count)); 2444 shrink_buffers(sh); 2445 free_stripe(conf->slab_cache, sh); 2446 atomic_dec(&conf->active_stripes); 2447 conf->max_nr_stripes--; 2448 return 1; 2449 } 2450 2451 static void shrink_stripes(struct r5conf *conf) 2452 { 2453 while (conf->max_nr_stripes && 2454 drop_one_stripe(conf)) 2455 ; 2456 2457 kmem_cache_destroy(conf->slab_cache); 2458 conf->slab_cache = NULL; 2459 } 2460 2461 static void raid5_end_read_request(struct bio * bi) 2462 { 2463 struct stripe_head *sh = bi->bi_private; 2464 struct r5conf *conf = sh->raid_conf; 2465 int disks = sh->disks, i; 2466 char b[BDEVNAME_SIZE]; 2467 struct md_rdev *rdev = NULL; 2468 sector_t s; 2469 2470 for (i=0 ; i<disks; i++) 2471 if (bi == &sh->dev[i].req) 2472 break; 2473 2474 pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", 2475 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2476 bi->bi_status); 2477 if (i == disks) { 2478 bio_reset(bi); 2479 BUG(); 2480 return; 2481 } 2482 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2483 /* If replacement finished while this request was outstanding, 2484 * 'replacement' might be NULL already. 2485 * In that case it moved down to 'rdev'. 2486 * rdev is not removed until all requests are finished. 2487 */ 2488 rdev = conf->disks[i].replacement; 2489 if (!rdev) 2490 rdev = conf->disks[i].rdev; 2491 2492 if (use_new_offset(conf, sh)) 2493 s = sh->sector + rdev->new_data_offset; 2494 else 2495 s = sh->sector + rdev->data_offset; 2496 if (!bi->bi_status) { 2497 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2498 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2499 /* Note that this cannot happen on a 2500 * replacement device. We just fail those on 2501 * any error 2502 */ 2503 pr_info_ratelimited( 2504 "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n", 2505 mdname(conf->mddev), STRIPE_SECTORS, 2506 (unsigned long long)s, 2507 bdevname(rdev->bdev, b)); 2508 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2509 clear_bit(R5_ReadError, &sh->dev[i].flags); 2510 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2511 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2512 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2513 2514 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 2515 /* 2516 * end read for a page in journal, this 2517 * must be preparing for prexor in rmw 2518 */ 2519 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); 2520 2521 if (atomic_read(&rdev->read_errors)) 2522 atomic_set(&rdev->read_errors, 0); 2523 } else { 2524 const char *bdn = bdevname(rdev->bdev, b); 2525 int retry = 0; 2526 int set_bad = 0; 2527 2528 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 2529 if (!(bi->bi_status == BLK_STS_PROTECTION)) 2530 atomic_inc(&rdev->read_errors); 2531 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2532 pr_warn_ratelimited( 2533 "md/raid:%s: read error on replacement device (sector %llu on %s).\n", 2534 mdname(conf->mddev), 2535 (unsigned long long)s, 2536 bdn); 2537 else if (conf->mddev->degraded >= conf->max_degraded) { 2538 set_bad = 1; 2539 pr_warn_ratelimited( 2540 "md/raid:%s: read error not correctable (sector %llu on %s).\n", 2541 mdname(conf->mddev), 2542 (unsigned long long)s, 2543 bdn); 2544 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { 2545 /* Oh, no!!! */ 2546 set_bad = 1; 2547 pr_warn_ratelimited( 2548 "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n", 2549 mdname(conf->mddev), 2550 (unsigned long long)s, 2551 bdn); 2552 } else if (atomic_read(&rdev->read_errors) 2553 > conf->max_nr_stripes) { 2554 if (!test_bit(Faulty, &rdev->flags)) { 2555 pr_warn("md/raid:%s: %d read_errors > %d stripes\n", 2556 mdname(conf->mddev), 2557 atomic_read(&rdev->read_errors), 2558 conf->max_nr_stripes); 2559 pr_warn("md/raid:%s: Too many read errors, failing device %s.\n", 2560 mdname(conf->mddev), bdn); 2561 } 2562 } else 2563 retry = 1; 2564 if (set_bad && test_bit(In_sync, &rdev->flags) 2565 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2566 retry = 1; 2567 if (retry) 2568 if (sh->qd_idx >= 0 && sh->pd_idx == i) 2569 set_bit(R5_ReadError, &sh->dev[i].flags); 2570 else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2571 set_bit(R5_ReadError, &sh->dev[i].flags); 2572 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2573 } else 2574 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2575 else { 2576 clear_bit(R5_ReadError, &sh->dev[i].flags); 2577 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2578 if (!(set_bad 2579 && test_bit(In_sync, &rdev->flags) 2580 && rdev_set_badblocks( 2581 rdev, sh->sector, STRIPE_SECTORS, 0))) 2582 md_error(conf->mddev, rdev); 2583 } 2584 } 2585 rdev_dec_pending(rdev, conf->mddev); 2586 bio_reset(bi); 2587 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2588 set_bit(STRIPE_HANDLE, &sh->state); 2589 raid5_release_stripe(sh); 2590 } 2591 2592 static void raid5_end_write_request(struct bio *bi) 2593 { 2594 struct stripe_head *sh = bi->bi_private; 2595 struct r5conf *conf = sh->raid_conf; 2596 int disks = sh->disks, i; 2597 struct md_rdev *uninitialized_var(rdev); 2598 sector_t first_bad; 2599 int bad_sectors; 2600 int replacement = 0; 2601 2602 for (i = 0 ; i < disks; i++) { 2603 if (bi == &sh->dev[i].req) { 2604 rdev = conf->disks[i].rdev; 2605 break; 2606 } 2607 if (bi == &sh->dev[i].rreq) { 2608 rdev = conf->disks[i].replacement; 2609 if (rdev) 2610 replacement = 1; 2611 else 2612 /* rdev was removed and 'replacement' 2613 * replaced it. rdev is not removed 2614 * until all requests are finished. 2615 */ 2616 rdev = conf->disks[i].rdev; 2617 break; 2618 } 2619 } 2620 pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", 2621 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2622 bi->bi_status); 2623 if (i == disks) { 2624 bio_reset(bi); 2625 BUG(); 2626 return; 2627 } 2628 2629 if (replacement) { 2630 if (bi->bi_status) 2631 md_error(conf->mddev, rdev); 2632 else if (is_badblock(rdev, sh->sector, 2633 STRIPE_SECTORS, 2634 &first_bad, &bad_sectors)) 2635 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2636 } else { 2637 if (bi->bi_status) { 2638 set_bit(STRIPE_DEGRADED, &sh->state); 2639 set_bit(WriteErrorSeen, &rdev->flags); 2640 set_bit(R5_WriteError, &sh->dev[i].flags); 2641 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2642 set_bit(MD_RECOVERY_NEEDED, 2643 &rdev->mddev->recovery); 2644 } else if (is_badblock(rdev, sh->sector, 2645 STRIPE_SECTORS, 2646 &first_bad, &bad_sectors)) { 2647 set_bit(R5_MadeGood, &sh->dev[i].flags); 2648 if (test_bit(R5_ReadError, &sh->dev[i].flags)) 2649 /* That was a successful write so make 2650 * sure it looks like we already did 2651 * a re-write. 2652 */ 2653 set_bit(R5_ReWrite, &sh->dev[i].flags); 2654 } 2655 } 2656 rdev_dec_pending(rdev, conf->mddev); 2657 2658 if (sh->batch_head && bi->bi_status && !replacement) 2659 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2660 2661 bio_reset(bi); 2662 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2663 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2664 set_bit(STRIPE_HANDLE, &sh->state); 2665 raid5_release_stripe(sh); 2666 2667 if (sh->batch_head && sh != sh->batch_head) 2668 raid5_release_stripe(sh->batch_head); 2669 } 2670 2671 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) 2672 { 2673 char b[BDEVNAME_SIZE]; 2674 struct r5conf *conf = mddev->private; 2675 unsigned long flags; 2676 pr_debug("raid456: error called\n"); 2677 2678 spin_lock_irqsave(&conf->device_lock, flags); 2679 2680 if (test_bit(In_sync, &rdev->flags) && 2681 mddev->degraded == conf->max_degraded) { 2682 /* 2683 * Don't allow to achieve failed state 2684 * Don't try to recover this device 2685 */ 2686 conf->recovery_disabled = mddev->recovery_disabled; 2687 spin_unlock_irqrestore(&conf->device_lock, flags); 2688 return; 2689 } 2690 2691 set_bit(Faulty, &rdev->flags); 2692 clear_bit(In_sync, &rdev->flags); 2693 mddev->degraded = raid5_calc_degraded(conf); 2694 spin_unlock_irqrestore(&conf->device_lock, flags); 2695 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2696 2697 set_bit(Blocked, &rdev->flags); 2698 set_mask_bits(&mddev->sb_flags, 0, 2699 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 2700 pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" 2701 "md/raid:%s: Operation continuing on %d devices.\n", 2702 mdname(mddev), 2703 bdevname(rdev->bdev, b), 2704 mdname(mddev), 2705 conf->raid_disks - mddev->degraded); 2706 r5c_update_on_rdev_error(mddev, rdev); 2707 } 2708 2709 /* 2710 * Input: a 'big' sector number, 2711 * Output: index of the data and parity disk, and the sector # in them. 2712 */ 2713 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2714 int previous, int *dd_idx, 2715 struct stripe_head *sh) 2716 { 2717 sector_t stripe, stripe2; 2718 sector_t chunk_number; 2719 unsigned int chunk_offset; 2720 int pd_idx, qd_idx; 2721 int ddf_layout = 0; 2722 sector_t new_sector; 2723 int algorithm = previous ? conf->prev_algo 2724 : conf->algorithm; 2725 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2726 : conf->chunk_sectors; 2727 int raid_disks = previous ? conf->previous_raid_disks 2728 : conf->raid_disks; 2729 int data_disks = raid_disks - conf->max_degraded; 2730 2731 /* First compute the information on this sector */ 2732 2733 /* 2734 * Compute the chunk number and the sector offset inside the chunk 2735 */ 2736 chunk_offset = sector_div(r_sector, sectors_per_chunk); 2737 chunk_number = r_sector; 2738 2739 /* 2740 * Compute the stripe number 2741 */ 2742 stripe = chunk_number; 2743 *dd_idx = sector_div(stripe, data_disks); 2744 stripe2 = stripe; 2745 /* 2746 * Select the parity disk based on the user selected algorithm. 2747 */ 2748 pd_idx = qd_idx = -1; 2749 switch(conf->level) { 2750 case 4: 2751 pd_idx = data_disks; 2752 break; 2753 case 5: 2754 switch (algorithm) { 2755 case ALGORITHM_LEFT_ASYMMETRIC: 2756 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2757 if (*dd_idx >= pd_idx) 2758 (*dd_idx)++; 2759 break; 2760 case ALGORITHM_RIGHT_ASYMMETRIC: 2761 pd_idx = sector_div(stripe2, raid_disks); 2762 if (*dd_idx >= pd_idx) 2763 (*dd_idx)++; 2764 break; 2765 case ALGORITHM_LEFT_SYMMETRIC: 2766 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2767 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2768 break; 2769 case ALGORITHM_RIGHT_SYMMETRIC: 2770 pd_idx = sector_div(stripe2, raid_disks); 2771 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2772 break; 2773 case ALGORITHM_PARITY_0: 2774 pd_idx = 0; 2775 (*dd_idx)++; 2776 break; 2777 case ALGORITHM_PARITY_N: 2778 pd_idx = data_disks; 2779 break; 2780 default: 2781 BUG(); 2782 } 2783 break; 2784 case 6: 2785 2786 switch (algorithm) { 2787 case ALGORITHM_LEFT_ASYMMETRIC: 2788 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2789 qd_idx = pd_idx + 1; 2790 if (pd_idx == raid_disks-1) { 2791 (*dd_idx)++; /* Q D D D P */ 2792 qd_idx = 0; 2793 } else if (*dd_idx >= pd_idx) 2794 (*dd_idx) += 2; /* D D P Q D */ 2795 break; 2796 case ALGORITHM_RIGHT_ASYMMETRIC: 2797 pd_idx = sector_div(stripe2, raid_disks); 2798 qd_idx = pd_idx + 1; 2799 if (pd_idx == raid_disks-1) { 2800 (*dd_idx)++; /* Q D D D P */ 2801 qd_idx = 0; 2802 } else if (*dd_idx >= pd_idx) 2803 (*dd_idx) += 2; /* D D P Q D */ 2804 break; 2805 case ALGORITHM_LEFT_SYMMETRIC: 2806 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2807 qd_idx = (pd_idx + 1) % raid_disks; 2808 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2809 break; 2810 case ALGORITHM_RIGHT_SYMMETRIC: 2811 pd_idx = sector_div(stripe2, raid_disks); 2812 qd_idx = (pd_idx + 1) % raid_disks; 2813 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2814 break; 2815 2816 case ALGORITHM_PARITY_0: 2817 pd_idx = 0; 2818 qd_idx = 1; 2819 (*dd_idx) += 2; 2820 break; 2821 case ALGORITHM_PARITY_N: 2822 pd_idx = data_disks; 2823 qd_idx = data_disks + 1; 2824 break; 2825 2826 case ALGORITHM_ROTATING_ZERO_RESTART: 2827 /* Exactly the same as RIGHT_ASYMMETRIC, but or 2828 * of blocks for computing Q is different. 2829 */ 2830 pd_idx = sector_div(stripe2, raid_disks); 2831 qd_idx = pd_idx + 1; 2832 if (pd_idx == raid_disks-1) { 2833 (*dd_idx)++; /* Q D D D P */ 2834 qd_idx = 0; 2835 } else if (*dd_idx >= pd_idx) 2836 (*dd_idx) += 2; /* D D P Q D */ 2837 ddf_layout = 1; 2838 break; 2839 2840 case ALGORITHM_ROTATING_N_RESTART: 2841 /* Same a left_asymmetric, by first stripe is 2842 * D D D P Q rather than 2843 * Q D D D P 2844 */ 2845 stripe2 += 1; 2846 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2847 qd_idx = pd_idx + 1; 2848 if (pd_idx == raid_disks-1) { 2849 (*dd_idx)++; /* Q D D D P */ 2850 qd_idx = 0; 2851 } else if (*dd_idx >= pd_idx) 2852 (*dd_idx) += 2; /* D D P Q D */ 2853 ddf_layout = 1; 2854 break; 2855 2856 case ALGORITHM_ROTATING_N_CONTINUE: 2857 /* Same as left_symmetric but Q is before P */ 2858 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2859 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 2860 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2861 ddf_layout = 1; 2862 break; 2863 2864 case ALGORITHM_LEFT_ASYMMETRIC_6: 2865 /* RAID5 left_asymmetric, with Q on last device */ 2866 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2867 if (*dd_idx >= pd_idx) 2868 (*dd_idx)++; 2869 qd_idx = raid_disks - 1; 2870 break; 2871 2872 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2873 pd_idx = sector_div(stripe2, raid_disks-1); 2874 if (*dd_idx >= pd_idx) 2875 (*dd_idx)++; 2876 qd_idx = raid_disks - 1; 2877 break; 2878 2879 case ALGORITHM_LEFT_SYMMETRIC_6: 2880 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2881 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2882 qd_idx = raid_disks - 1; 2883 break; 2884 2885 case ALGORITHM_RIGHT_SYMMETRIC_6: 2886 pd_idx = sector_div(stripe2, raid_disks-1); 2887 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2888 qd_idx = raid_disks - 1; 2889 break; 2890 2891 case ALGORITHM_PARITY_0_6: 2892 pd_idx = 0; 2893 (*dd_idx)++; 2894 qd_idx = raid_disks - 1; 2895 break; 2896 2897 default: 2898 BUG(); 2899 } 2900 break; 2901 } 2902 2903 if (sh) { 2904 sh->pd_idx = pd_idx; 2905 sh->qd_idx = qd_idx; 2906 sh->ddf_layout = ddf_layout; 2907 } 2908 /* 2909 * Finally, compute the new sector number 2910 */ 2911 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 2912 return new_sector; 2913 } 2914 2915 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) 2916 { 2917 struct r5conf *conf = sh->raid_conf; 2918 int raid_disks = sh->disks; 2919 int data_disks = raid_disks - conf->max_degraded; 2920 sector_t new_sector = sh->sector, check; 2921 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2922 : conf->chunk_sectors; 2923 int algorithm = previous ? conf->prev_algo 2924 : conf->algorithm; 2925 sector_t stripe; 2926 int chunk_offset; 2927 sector_t chunk_number; 2928 int dummy1, dd_idx = i; 2929 sector_t r_sector; 2930 struct stripe_head sh2; 2931 2932 chunk_offset = sector_div(new_sector, sectors_per_chunk); 2933 stripe = new_sector; 2934 2935 if (i == sh->pd_idx) 2936 return 0; 2937 switch(conf->level) { 2938 case 4: break; 2939 case 5: 2940 switch (algorithm) { 2941 case ALGORITHM_LEFT_ASYMMETRIC: 2942 case ALGORITHM_RIGHT_ASYMMETRIC: 2943 if (i > sh->pd_idx) 2944 i--; 2945 break; 2946 case ALGORITHM_LEFT_SYMMETRIC: 2947 case ALGORITHM_RIGHT_SYMMETRIC: 2948 if (i < sh->pd_idx) 2949 i += raid_disks; 2950 i -= (sh->pd_idx + 1); 2951 break; 2952 case ALGORITHM_PARITY_0: 2953 i -= 1; 2954 break; 2955 case ALGORITHM_PARITY_N: 2956 break; 2957 default: 2958 BUG(); 2959 } 2960 break; 2961 case 6: 2962 if (i == sh->qd_idx) 2963 return 0; /* It is the Q disk */ 2964 switch (algorithm) { 2965 case ALGORITHM_LEFT_ASYMMETRIC: 2966 case ALGORITHM_RIGHT_ASYMMETRIC: 2967 case ALGORITHM_ROTATING_ZERO_RESTART: 2968 case ALGORITHM_ROTATING_N_RESTART: 2969 if (sh->pd_idx == raid_disks-1) 2970 i--; /* Q D D D P */ 2971 else if (i > sh->pd_idx) 2972 i -= 2; /* D D P Q D */ 2973 break; 2974 case ALGORITHM_LEFT_SYMMETRIC: 2975 case ALGORITHM_RIGHT_SYMMETRIC: 2976 if (sh->pd_idx == raid_disks-1) 2977 i--; /* Q D D D P */ 2978 else { 2979 /* D D P Q D */ 2980 if (i < sh->pd_idx) 2981 i += raid_disks; 2982 i -= (sh->pd_idx + 2); 2983 } 2984 break; 2985 case ALGORITHM_PARITY_0: 2986 i -= 2; 2987 break; 2988 case ALGORITHM_PARITY_N: 2989 break; 2990 case ALGORITHM_ROTATING_N_CONTINUE: 2991 /* Like left_symmetric, but P is before Q */ 2992 if (sh->pd_idx == 0) 2993 i--; /* P D D D Q */ 2994 else { 2995 /* D D Q P D */ 2996 if (i < sh->pd_idx) 2997 i += raid_disks; 2998 i -= (sh->pd_idx + 1); 2999 } 3000 break; 3001 case ALGORITHM_LEFT_ASYMMETRIC_6: 3002 case ALGORITHM_RIGHT_ASYMMETRIC_6: 3003 if (i > sh->pd_idx) 3004 i--; 3005 break; 3006 case ALGORITHM_LEFT_SYMMETRIC_6: 3007 case ALGORITHM_RIGHT_SYMMETRIC_6: 3008 if (i < sh->pd_idx) 3009 i += data_disks + 1; 3010 i -= (sh->pd_idx + 1); 3011 break; 3012 case ALGORITHM_PARITY_0_6: 3013 i -= 1; 3014 break; 3015 default: 3016 BUG(); 3017 } 3018 break; 3019 } 3020 3021 chunk_number = stripe * data_disks + i; 3022 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 3023 3024 check = raid5_compute_sector(conf, r_sector, 3025 previous, &dummy1, &sh2); 3026 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 3027 || sh2.qd_idx != sh->qd_idx) { 3028 pr_warn("md/raid:%s: compute_blocknr: map not correct\n", 3029 mdname(conf->mddev)); 3030 return 0; 3031 } 3032 return r_sector; 3033 } 3034 3035 /* 3036 * There are cases where we want handle_stripe_dirtying() and 3037 * schedule_reconstruction() to delay towrite to some dev of a stripe. 3038 * 3039 * This function checks whether we want to delay the towrite. Specifically, 3040 * we delay the towrite when: 3041 * 3042 * 1. degraded stripe has a non-overwrite to the missing dev, AND this 3043 * stripe has data in journal (for other devices). 3044 * 3045 * In this case, when reading data for the non-overwrite dev, it is 3046 * necessary to handle complex rmw of write back cache (prexor with 3047 * orig_page, and xor with page). To keep read path simple, we would 3048 * like to flush data in journal to RAID disks first, so complex rmw 3049 * is handled in the write patch (handle_stripe_dirtying). 3050 * 3051 * 2. when journal space is critical (R5C_LOG_CRITICAL=1) 3052 * 3053 * It is important to be able to flush all stripes in raid5-cache. 3054 * Therefore, we need reserve some space on the journal device for 3055 * these flushes. If flush operation includes pending writes to the 3056 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe 3057 * for the flush out. If we exclude these pending writes from flush 3058 * operation, we only need (conf->max_degraded + 1) pages per stripe. 3059 * Therefore, excluding pending writes in these cases enables more 3060 * efficient use of the journal device. 3061 * 3062 * Note: To make sure the stripe makes progress, we only delay 3063 * towrite for stripes with data already in journal (injournal > 0). 3064 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to 3065 * no_space_stripes list. 3066 * 3067 * 3. during journal failure 3068 * In journal failure, we try to flush all cached data to raid disks 3069 * based on data in stripe cache. The array is read-only to upper 3070 * layers, so we would skip all pending writes. 3071 * 3072 */ 3073 static inline bool delay_towrite(struct r5conf *conf, 3074 struct r5dev *dev, 3075 struct stripe_head_state *s) 3076 { 3077 /* case 1 above */ 3078 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3079 !test_bit(R5_Insync, &dev->flags) && s->injournal) 3080 return true; 3081 /* case 2 above */ 3082 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 3083 s->injournal > 0) 3084 return true; 3085 /* case 3 above */ 3086 if (s->log_failed && s->injournal) 3087 return true; 3088 return false; 3089 } 3090 3091 static void 3092 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 3093 int rcw, int expand) 3094 { 3095 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; 3096 struct r5conf *conf = sh->raid_conf; 3097 int level = conf->level; 3098 3099 if (rcw) { 3100 /* 3101 * In some cases, handle_stripe_dirtying initially decided to 3102 * run rmw and allocates extra page for prexor. However, rcw is 3103 * cheaper later on. We need to free the extra page now, 3104 * because we won't be able to do that in ops_complete_prexor(). 3105 */ 3106 r5c_release_extra_page(sh); 3107 3108 for (i = disks; i--; ) { 3109 struct r5dev *dev = &sh->dev[i]; 3110 3111 if (dev->towrite && !delay_towrite(conf, dev, s)) { 3112 set_bit(R5_LOCKED, &dev->flags); 3113 set_bit(R5_Wantdrain, &dev->flags); 3114 if (!expand) 3115 clear_bit(R5_UPTODATE, &dev->flags); 3116 s->locked++; 3117 } else if (test_bit(R5_InJournal, &dev->flags)) { 3118 set_bit(R5_LOCKED, &dev->flags); 3119 s->locked++; 3120 } 3121 } 3122 /* if we are not expanding this is a proper write request, and 3123 * there will be bios with new data to be drained into the 3124 * stripe cache 3125 */ 3126 if (!expand) { 3127 if (!s->locked) 3128 /* False alarm, nothing to do */ 3129 return; 3130 sh->reconstruct_state = reconstruct_state_drain_run; 3131 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 3132 } else 3133 sh->reconstruct_state = reconstruct_state_run; 3134 3135 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 3136 3137 if (s->locked + conf->max_degraded == disks) 3138 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 3139 atomic_inc(&conf->pending_full_writes); 3140 } else { 3141 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 3142 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 3143 BUG_ON(level == 6 && 3144 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || 3145 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); 3146 3147 for (i = disks; i--; ) { 3148 struct r5dev *dev = &sh->dev[i]; 3149 if (i == pd_idx || i == qd_idx) 3150 continue; 3151 3152 if (dev->towrite && 3153 (test_bit(R5_UPTODATE, &dev->flags) || 3154 test_bit(R5_Wantcompute, &dev->flags))) { 3155 set_bit(R5_Wantdrain, &dev->flags); 3156 set_bit(R5_LOCKED, &dev->flags); 3157 clear_bit(R5_UPTODATE, &dev->flags); 3158 s->locked++; 3159 } else if (test_bit(R5_InJournal, &dev->flags)) { 3160 set_bit(R5_LOCKED, &dev->flags); 3161 s->locked++; 3162 } 3163 } 3164 if (!s->locked) 3165 /* False alarm - nothing to do */ 3166 return; 3167 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 3168 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 3169 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 3170 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 3171 } 3172 3173 /* keep the parity disk(s) locked while asynchronous operations 3174 * are in flight 3175 */ 3176 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 3177 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 3178 s->locked++; 3179 3180 if (level == 6) { 3181 int qd_idx = sh->qd_idx; 3182 struct r5dev *dev = &sh->dev[qd_idx]; 3183 3184 set_bit(R5_LOCKED, &dev->flags); 3185 clear_bit(R5_UPTODATE, &dev->flags); 3186 s->locked++; 3187 } 3188 3189 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && 3190 test_bit(STRIPE_OP_BIODRAIN, &s->ops_request) && 3191 !test_bit(STRIPE_FULL_WRITE, &sh->state) && 3192 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) 3193 set_bit(STRIPE_OP_PARTIAL_PARITY, &s->ops_request); 3194 3195 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 3196 __func__, (unsigned long long)sh->sector, 3197 s->locked, s->ops_request); 3198 } 3199 3200 /* 3201 * Each stripe/dev can have one or more bion attached. 3202 * toread/towrite point to the first in a chain. 3203 * The bi_next chain must be in order. 3204 */ 3205 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, 3206 int forwrite, int previous) 3207 { 3208 struct bio **bip; 3209 struct r5conf *conf = sh->raid_conf; 3210 int firstwrite=0; 3211 3212 pr_debug("adding bi b#%llu to stripe s#%llu\n", 3213 (unsigned long long)bi->bi_iter.bi_sector, 3214 (unsigned long long)sh->sector); 3215 3216 spin_lock_irq(&sh->stripe_lock); 3217 sh->dev[dd_idx].write_hint = bi->bi_write_hint; 3218 /* Don't allow new IO added to stripes in batch list */ 3219 if (sh->batch_head) 3220 goto overlap; 3221 if (forwrite) { 3222 bip = &sh->dev[dd_idx].towrite; 3223 if (*bip == NULL) 3224 firstwrite = 1; 3225 } else 3226 bip = &sh->dev[dd_idx].toread; 3227 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 3228 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 3229 goto overlap; 3230 bip = & (*bip)->bi_next; 3231 } 3232 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 3233 goto overlap; 3234 3235 if (forwrite && raid5_has_ppl(conf)) { 3236 /* 3237 * With PPL only writes to consecutive data chunks within a 3238 * stripe are allowed because for a single stripe_head we can 3239 * only have one PPL entry at a time, which describes one data 3240 * range. Not really an overlap, but wait_for_overlap can be 3241 * used to handle this. 3242 */ 3243 sector_t sector; 3244 sector_t first = 0; 3245 sector_t last = 0; 3246 int count = 0; 3247 int i; 3248 3249 for (i = 0; i < sh->disks; i++) { 3250 if (i != sh->pd_idx && 3251 (i == dd_idx || sh->dev[i].towrite)) { 3252 sector = sh->dev[i].sector; 3253 if (count == 0 || sector < first) 3254 first = sector; 3255 if (sector > last) 3256 last = sector; 3257 count++; 3258 } 3259 } 3260 3261 if (first + conf->chunk_sectors * (count - 1) != last) 3262 goto overlap; 3263 } 3264 3265 if (!forwrite || previous) 3266 clear_bit(STRIPE_BATCH_READY, &sh->state); 3267 3268 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 3269 if (*bip) 3270 bi->bi_next = *bip; 3271 *bip = bi; 3272 bio_inc_remaining(bi); 3273 md_write_inc(conf->mddev, bi); 3274 3275 if (forwrite) { 3276 /* check if page is covered */ 3277 sector_t sector = sh->dev[dd_idx].sector; 3278 for (bi=sh->dev[dd_idx].towrite; 3279 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 3280 bi && bi->bi_iter.bi_sector <= sector; 3281 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 3282 if (bio_end_sector(bi) >= sector) 3283 sector = bio_end_sector(bi); 3284 } 3285 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 3286 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) 3287 sh->overwrite_disks++; 3288 } 3289 3290 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 3291 (unsigned long long)(*bip)->bi_iter.bi_sector, 3292 (unsigned long long)sh->sector, dd_idx); 3293 3294 if (conf->mddev->bitmap && firstwrite) { 3295 /* Cannot hold spinlock over bitmap_startwrite, 3296 * but must ensure this isn't added to a batch until 3297 * we have added to the bitmap and set bm_seq. 3298 * So set STRIPE_BITMAP_PENDING to prevent 3299 * batching. 3300 * If multiple add_stripe_bio() calls race here they 3301 * much all set STRIPE_BITMAP_PENDING. So only the first one 3302 * to complete "bitmap_startwrite" gets to set 3303 * STRIPE_BIT_DELAY. This is important as once a stripe 3304 * is added to a batch, STRIPE_BIT_DELAY cannot be changed 3305 * any more. 3306 */ 3307 set_bit(STRIPE_BITMAP_PENDING, &sh->state); 3308 spin_unlock_irq(&sh->stripe_lock); 3309 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, 3310 STRIPE_SECTORS, 0); 3311 spin_lock_irq(&sh->stripe_lock); 3312 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); 3313 if (!sh->batch_head) { 3314 sh->bm_seq = conf->seq_flush+1; 3315 set_bit(STRIPE_BIT_DELAY, &sh->state); 3316 } 3317 } 3318 spin_unlock_irq(&sh->stripe_lock); 3319 3320 if (stripe_can_batch(sh)) 3321 stripe_add_to_batch_list(conf, sh); 3322 return 1; 3323 3324 overlap: 3325 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 3326 spin_unlock_irq(&sh->stripe_lock); 3327 return 0; 3328 } 3329 3330 static void end_reshape(struct r5conf *conf); 3331 3332 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 3333 struct stripe_head *sh) 3334 { 3335 int sectors_per_chunk = 3336 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 3337 int dd_idx; 3338 int chunk_offset = sector_div(stripe, sectors_per_chunk); 3339 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 3340 3341 raid5_compute_sector(conf, 3342 stripe * (disks - conf->max_degraded) 3343 *sectors_per_chunk + chunk_offset, 3344 previous, 3345 &dd_idx, sh); 3346 } 3347 3348 static void 3349 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 3350 struct stripe_head_state *s, int disks) 3351 { 3352 int i; 3353 BUG_ON(sh->batch_head); 3354 for (i = disks; i--; ) { 3355 struct bio *bi; 3356 int bitmap_end = 0; 3357 3358 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 3359 struct md_rdev *rdev; 3360 rcu_read_lock(); 3361 rdev = rcu_dereference(conf->disks[i].rdev); 3362 if (rdev && test_bit(In_sync, &rdev->flags) && 3363 !test_bit(Faulty, &rdev->flags)) 3364 atomic_inc(&rdev->nr_pending); 3365 else 3366 rdev = NULL; 3367 rcu_read_unlock(); 3368 if (rdev) { 3369 if (!rdev_set_badblocks( 3370 rdev, 3371 sh->sector, 3372 STRIPE_SECTORS, 0)) 3373 md_error(conf->mddev, rdev); 3374 rdev_dec_pending(rdev, conf->mddev); 3375 } 3376 } 3377 spin_lock_irq(&sh->stripe_lock); 3378 /* fail all writes first */ 3379 bi = sh->dev[i].towrite; 3380 sh->dev[i].towrite = NULL; 3381 sh->overwrite_disks = 0; 3382 spin_unlock_irq(&sh->stripe_lock); 3383 if (bi) 3384 bitmap_end = 1; 3385 3386 log_stripe_write_finished(sh); 3387 3388 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3389 wake_up(&conf->wait_for_overlap); 3390 3391 while (bi && bi->bi_iter.bi_sector < 3392 sh->dev[i].sector + STRIPE_SECTORS) { 3393 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3394 3395 md_write_end(conf->mddev); 3396 bio_io_error(bi); 3397 bi = nextbi; 3398 } 3399 if (bitmap_end) 3400 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3401 STRIPE_SECTORS, 0, 0); 3402 bitmap_end = 0; 3403 /* and fail all 'written' */ 3404 bi = sh->dev[i].written; 3405 sh->dev[i].written = NULL; 3406 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { 3407 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 3408 sh->dev[i].page = sh->dev[i].orig_page; 3409 } 3410 3411 if (bi) bitmap_end = 1; 3412 while (bi && bi->bi_iter.bi_sector < 3413 sh->dev[i].sector + STRIPE_SECTORS) { 3414 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3415 3416 md_write_end(conf->mddev); 3417 bio_io_error(bi); 3418 bi = bi2; 3419 } 3420 3421 /* fail any reads if this device is non-operational and 3422 * the data has not reached the cache yet. 3423 */ 3424 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 3425 s->failed > conf->max_degraded && 3426 (!test_bit(R5_Insync, &sh->dev[i].flags) || 3427 test_bit(R5_ReadError, &sh->dev[i].flags))) { 3428 spin_lock_irq(&sh->stripe_lock); 3429 bi = sh->dev[i].toread; 3430 sh->dev[i].toread = NULL; 3431 spin_unlock_irq(&sh->stripe_lock); 3432 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3433 wake_up(&conf->wait_for_overlap); 3434 if (bi) 3435 s->to_read--; 3436 while (bi && bi->bi_iter.bi_sector < 3437 sh->dev[i].sector + STRIPE_SECTORS) { 3438 struct bio *nextbi = 3439 r5_next_bio(bi, sh->dev[i].sector); 3440 3441 bio_io_error(bi); 3442 bi = nextbi; 3443 } 3444 } 3445 if (bitmap_end) 3446 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3447 STRIPE_SECTORS, 0, 0); 3448 /* If we were in the middle of a write the parity block might 3449 * still be locked - so just clear all R5_LOCKED flags 3450 */ 3451 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3452 } 3453 s->to_write = 0; 3454 s->written = 0; 3455 3456 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3457 if (atomic_dec_and_test(&conf->pending_full_writes)) 3458 md_wakeup_thread(conf->mddev->thread); 3459 } 3460 3461 static void 3462 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 3463 struct stripe_head_state *s) 3464 { 3465 int abort = 0; 3466 int i; 3467 3468 BUG_ON(sh->batch_head); 3469 clear_bit(STRIPE_SYNCING, &sh->state); 3470 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 3471 wake_up(&conf->wait_for_overlap); 3472 s->syncing = 0; 3473 s->replacing = 0; 3474 /* There is nothing more to do for sync/check/repair. 3475 * Don't even need to abort as that is handled elsewhere 3476 * if needed, and not always wanted e.g. if there is a known 3477 * bad block here. 3478 * For recover/replace we need to record a bad block on all 3479 * non-sync devices, or abort the recovery 3480 */ 3481 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { 3482 /* During recovery devices cannot be removed, so 3483 * locking and refcounting of rdevs is not needed 3484 */ 3485 rcu_read_lock(); 3486 for (i = 0; i < conf->raid_disks; i++) { 3487 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 3488 if (rdev 3489 && !test_bit(Faulty, &rdev->flags) 3490 && !test_bit(In_sync, &rdev->flags) 3491 && !rdev_set_badblocks(rdev, sh->sector, 3492 STRIPE_SECTORS, 0)) 3493 abort = 1; 3494 rdev = rcu_dereference(conf->disks[i].replacement); 3495 if (rdev 3496 && !test_bit(Faulty, &rdev->flags) 3497 && !test_bit(In_sync, &rdev->flags) 3498 && !rdev_set_badblocks(rdev, sh->sector, 3499 STRIPE_SECTORS, 0)) 3500 abort = 1; 3501 } 3502 rcu_read_unlock(); 3503 if (abort) 3504 conf->recovery_disabled = 3505 conf->mddev->recovery_disabled; 3506 } 3507 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); 3508 } 3509 3510 static int want_replace(struct stripe_head *sh, int disk_idx) 3511 { 3512 struct md_rdev *rdev; 3513 int rv = 0; 3514 3515 rcu_read_lock(); 3516 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); 3517 if (rdev 3518 && !test_bit(Faulty, &rdev->flags) 3519 && !test_bit(In_sync, &rdev->flags) 3520 && (rdev->recovery_offset <= sh->sector 3521 || rdev->mddev->recovery_cp <= sh->sector)) 3522 rv = 1; 3523 rcu_read_unlock(); 3524 return rv; 3525 } 3526 3527 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, 3528 int disk_idx, int disks) 3529 { 3530 struct r5dev *dev = &sh->dev[disk_idx]; 3531 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 3532 &sh->dev[s->failed_num[1]] }; 3533 int i; 3534 3535 3536 if (test_bit(R5_LOCKED, &dev->flags) || 3537 test_bit(R5_UPTODATE, &dev->flags)) 3538 /* No point reading this as we already have it or have 3539 * decided to get it. 3540 */ 3541 return 0; 3542 3543 if (dev->toread || 3544 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags))) 3545 /* We need this block to directly satisfy a request */ 3546 return 1; 3547 3548 if (s->syncing || s->expanding || 3549 (s->replacing && want_replace(sh, disk_idx))) 3550 /* When syncing, or expanding we read everything. 3551 * When replacing, we need the replaced block. 3552 */ 3553 return 1; 3554 3555 if ((s->failed >= 1 && fdev[0]->toread) || 3556 (s->failed >= 2 && fdev[1]->toread)) 3557 /* If we want to read from a failed device, then 3558 * we need to actually read every other device. 3559 */ 3560 return 1; 3561 3562 /* Sometimes neither read-modify-write nor reconstruct-write 3563 * cycles can work. In those cases we read every block we 3564 * can. Then the parity-update is certain to have enough to 3565 * work with. 3566 * This can only be a problem when we need to write something, 3567 * and some device has failed. If either of those tests 3568 * fail we need look no further. 3569 */ 3570 if (!s->failed || !s->to_write) 3571 return 0; 3572 3573 if (test_bit(R5_Insync, &dev->flags) && 3574 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3575 /* Pre-reads at not permitted until after short delay 3576 * to gather multiple requests. However if this 3577 * device is no Insync, the block could only be computed 3578 * and there is no need to delay that. 3579 */ 3580 return 0; 3581 3582 for (i = 0; i < s->failed && i < 2; i++) { 3583 if (fdev[i]->towrite && 3584 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3585 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3586 /* If we have a partial write to a failed 3587 * device, then we will need to reconstruct 3588 * the content of that device, so all other 3589 * devices must be read. 3590 */ 3591 return 1; 3592 } 3593 3594 /* If we are forced to do a reconstruct-write, either because 3595 * the current RAID6 implementation only supports that, or 3596 * because parity cannot be trusted and we are currently 3597 * recovering it, there is extra need to be careful. 3598 * If one of the devices that we would need to read, because 3599 * it is not being overwritten (and maybe not written at all) 3600 * is missing/faulty, then we need to read everything we can. 3601 */ 3602 if (sh->raid_conf->level != 6 && 3603 sh->sector < sh->raid_conf->mddev->recovery_cp) 3604 /* reconstruct-write isn't being forced */ 3605 return 0; 3606 for (i = 0; i < s->failed && i < 2; i++) { 3607 if (s->failed_num[i] != sh->pd_idx && 3608 s->failed_num[i] != sh->qd_idx && 3609 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3610 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3611 return 1; 3612 } 3613 3614 return 0; 3615 } 3616 3617 /* fetch_block - checks the given member device to see if its data needs 3618 * to be read or computed to satisfy a request. 3619 * 3620 * Returns 1 when no more member devices need to be checked, otherwise returns 3621 * 0 to tell the loop in handle_stripe_fill to continue 3622 */ 3623 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 3624 int disk_idx, int disks) 3625 { 3626 struct r5dev *dev = &sh->dev[disk_idx]; 3627 3628 /* is the data in this block needed, and can we get it? */ 3629 if (need_this_block(sh, s, disk_idx, disks)) { 3630 /* we would like to get this block, possibly by computing it, 3631 * otherwise read it if the backing disk is insync 3632 */ 3633 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 3634 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 3635 BUG_ON(sh->batch_head); 3636 3637 /* 3638 * In the raid6 case if the only non-uptodate disk is P 3639 * then we already trusted P to compute the other failed 3640 * drives. It is safe to compute rather than re-read P. 3641 * In other cases we only compute blocks from failed 3642 * devices, otherwise check/repair might fail to detect 3643 * a real inconsistency. 3644 */ 3645 3646 if ((s->uptodate == disks - 1) && 3647 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || 3648 (s->failed && (disk_idx == s->failed_num[0] || 3649 disk_idx == s->failed_num[1])))) { 3650 /* have disk failed, and we're requested to fetch it; 3651 * do compute it 3652 */ 3653 pr_debug("Computing stripe %llu block %d\n", 3654 (unsigned long long)sh->sector, disk_idx); 3655 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3656 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3657 set_bit(R5_Wantcompute, &dev->flags); 3658 sh->ops.target = disk_idx; 3659 sh->ops.target2 = -1; /* no 2nd target */ 3660 s->req_compute = 1; 3661 /* Careful: from this point on 'uptodate' is in the eye 3662 * of raid_run_ops which services 'compute' operations 3663 * before writes. R5_Wantcompute flags a block that will 3664 * be R5_UPTODATE by the time it is needed for a 3665 * subsequent operation. 3666 */ 3667 s->uptodate++; 3668 return 1; 3669 } else if (s->uptodate == disks-2 && s->failed >= 2) { 3670 /* Computing 2-failure is *very* expensive; only 3671 * do it if failed >= 2 3672 */ 3673 int other; 3674 for (other = disks; other--; ) { 3675 if (other == disk_idx) 3676 continue; 3677 if (!test_bit(R5_UPTODATE, 3678 &sh->dev[other].flags)) 3679 break; 3680 } 3681 BUG_ON(other < 0); 3682 pr_debug("Computing stripe %llu blocks %d,%d\n", 3683 (unsigned long long)sh->sector, 3684 disk_idx, other); 3685 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3686 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3687 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 3688 set_bit(R5_Wantcompute, &sh->dev[other].flags); 3689 sh->ops.target = disk_idx; 3690 sh->ops.target2 = other; 3691 s->uptodate += 2; 3692 s->req_compute = 1; 3693 return 1; 3694 } else if (test_bit(R5_Insync, &dev->flags)) { 3695 set_bit(R5_LOCKED, &dev->flags); 3696 set_bit(R5_Wantread, &dev->flags); 3697 s->locked++; 3698 pr_debug("Reading block %d (sync=%d)\n", 3699 disk_idx, s->syncing); 3700 } 3701 } 3702 3703 return 0; 3704 } 3705 3706 /** 3707 * handle_stripe_fill - read or compute data to satisfy pending requests. 3708 */ 3709 static void handle_stripe_fill(struct stripe_head *sh, 3710 struct stripe_head_state *s, 3711 int disks) 3712 { 3713 int i; 3714 3715 /* look for blocks to read/compute, skip this if a compute 3716 * is already in flight, or if the stripe contents are in the 3717 * midst of changing due to a write 3718 */ 3719 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3720 !sh->reconstruct_state) { 3721 3722 /* 3723 * For degraded stripe with data in journal, do not handle 3724 * read requests yet, instead, flush the stripe to raid 3725 * disks first, this avoids handling complex rmw of write 3726 * back cache (prexor with orig_page, and then xor with 3727 * page) in the read path 3728 */ 3729 if (s->injournal && s->failed) { 3730 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 3731 r5c_make_stripe_write_out(sh); 3732 goto out; 3733 } 3734 3735 for (i = disks; i--; ) 3736 if (fetch_block(sh, s, i, disks)) 3737 break; 3738 } 3739 out: 3740 set_bit(STRIPE_HANDLE, &sh->state); 3741 } 3742 3743 static void break_stripe_batch_list(struct stripe_head *head_sh, 3744 unsigned long handle_flags); 3745 /* handle_stripe_clean_event 3746 * any written block on an uptodate or failed drive can be returned. 3747 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 3748 * never LOCKED, so we don't need to test 'failed' directly. 3749 */ 3750 static void handle_stripe_clean_event(struct r5conf *conf, 3751 struct stripe_head *sh, int disks) 3752 { 3753 int i; 3754 struct r5dev *dev; 3755 int discard_pending = 0; 3756 struct stripe_head *head_sh = sh; 3757 bool do_endio = false; 3758 3759 for (i = disks; i--; ) 3760 if (sh->dev[i].written) { 3761 dev = &sh->dev[i]; 3762 if (!test_bit(R5_LOCKED, &dev->flags) && 3763 (test_bit(R5_UPTODATE, &dev->flags) || 3764 test_bit(R5_Discard, &dev->flags) || 3765 test_bit(R5_SkipCopy, &dev->flags))) { 3766 /* We can return any write requests */ 3767 struct bio *wbi, *wbi2; 3768 pr_debug("Return write for disc %d\n", i); 3769 if (test_and_clear_bit(R5_Discard, &dev->flags)) 3770 clear_bit(R5_UPTODATE, &dev->flags); 3771 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { 3772 WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); 3773 } 3774 do_endio = true; 3775 3776 returnbi: 3777 dev->page = dev->orig_page; 3778 wbi = dev->written; 3779 dev->written = NULL; 3780 while (wbi && wbi->bi_iter.bi_sector < 3781 dev->sector + STRIPE_SECTORS) { 3782 wbi2 = r5_next_bio(wbi, dev->sector); 3783 md_write_end(conf->mddev); 3784 bio_endio(wbi); 3785 wbi = wbi2; 3786 } 3787 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3788 STRIPE_SECTORS, 3789 !test_bit(STRIPE_DEGRADED, &sh->state), 3790 0); 3791 if (head_sh->batch_head) { 3792 sh = list_first_entry(&sh->batch_list, 3793 struct stripe_head, 3794 batch_list); 3795 if (sh != head_sh) { 3796 dev = &sh->dev[i]; 3797 goto returnbi; 3798 } 3799 } 3800 sh = head_sh; 3801 dev = &sh->dev[i]; 3802 } else if (test_bit(R5_Discard, &dev->flags)) 3803 discard_pending = 1; 3804 } 3805 3806 log_stripe_write_finished(sh); 3807 3808 if (!discard_pending && 3809 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3810 int hash; 3811 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 3812 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3813 if (sh->qd_idx >= 0) { 3814 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 3815 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); 3816 } 3817 /* now that discard is done we can proceed with any sync */ 3818 clear_bit(STRIPE_DISCARD, &sh->state); 3819 /* 3820 * SCSI discard will change some bio fields and the stripe has 3821 * no updated data, so remove it from hash list and the stripe 3822 * will be reinitialized 3823 */ 3824 unhash: 3825 hash = sh->hash_lock_index; 3826 spin_lock_irq(conf->hash_locks + hash); 3827 remove_hash(sh); 3828 spin_unlock_irq(conf->hash_locks + hash); 3829 if (head_sh->batch_head) { 3830 sh = list_first_entry(&sh->batch_list, 3831 struct stripe_head, batch_list); 3832 if (sh != head_sh) 3833 goto unhash; 3834 } 3835 sh = head_sh; 3836 3837 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 3838 set_bit(STRIPE_HANDLE, &sh->state); 3839 3840 } 3841 3842 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3843 if (atomic_dec_and_test(&conf->pending_full_writes)) 3844 md_wakeup_thread(conf->mddev->thread); 3845 3846 if (head_sh->batch_head && do_endio) 3847 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); 3848 } 3849 3850 /* 3851 * For RMW in write back cache, we need extra page in prexor to store the 3852 * old data. This page is stored in dev->orig_page. 3853 * 3854 * This function checks whether we have data for prexor. The exact logic 3855 * is: 3856 * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE) 3857 */ 3858 static inline bool uptodate_for_rmw(struct r5dev *dev) 3859 { 3860 return (test_bit(R5_UPTODATE, &dev->flags)) && 3861 (!test_bit(R5_InJournal, &dev->flags) || 3862 test_bit(R5_OrigPageUPTDODATE, &dev->flags)); 3863 } 3864 3865 static int handle_stripe_dirtying(struct r5conf *conf, 3866 struct stripe_head *sh, 3867 struct stripe_head_state *s, 3868 int disks) 3869 { 3870 int rmw = 0, rcw = 0, i; 3871 sector_t recovery_cp = conf->mddev->recovery_cp; 3872 3873 /* Check whether resync is now happening or should start. 3874 * If yes, then the array is dirty (after unclean shutdown or 3875 * initial creation), so parity in some stripes might be inconsistent. 3876 * In this case, we need to always do reconstruct-write, to ensure 3877 * that in case of drive failure or read-error correction, we 3878 * generate correct data from the parity. 3879 */ 3880 if (conf->rmw_level == PARITY_DISABLE_RMW || 3881 (recovery_cp < MaxSector && sh->sector >= recovery_cp && 3882 s->failed == 0)) { 3883 /* Calculate the real rcw later - for now make it 3884 * look like rcw is cheaper 3885 */ 3886 rcw = 1; rmw = 2; 3887 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", 3888 conf->rmw_level, (unsigned long long)recovery_cp, 3889 (unsigned long long)sh->sector); 3890 } else for (i = disks; i--; ) { 3891 /* would I have to read this buffer for read_modify_write */ 3892 struct r5dev *dev = &sh->dev[i]; 3893 if (((dev->towrite && !delay_towrite(conf, dev, s)) || 3894 i == sh->pd_idx || i == sh->qd_idx || 3895 test_bit(R5_InJournal, &dev->flags)) && 3896 !test_bit(R5_LOCKED, &dev->flags) && 3897 !(uptodate_for_rmw(dev) || 3898 test_bit(R5_Wantcompute, &dev->flags))) { 3899 if (test_bit(R5_Insync, &dev->flags)) 3900 rmw++; 3901 else 3902 rmw += 2*disks; /* cannot read it */ 3903 } 3904 /* Would I have to read this buffer for reconstruct_write */ 3905 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3906 i != sh->pd_idx && i != sh->qd_idx && 3907 !test_bit(R5_LOCKED, &dev->flags) && 3908 !(test_bit(R5_UPTODATE, &dev->flags) || 3909 test_bit(R5_Wantcompute, &dev->flags))) { 3910 if (test_bit(R5_Insync, &dev->flags)) 3911 rcw++; 3912 else 3913 rcw += 2*disks; 3914 } 3915 } 3916 3917 pr_debug("for sector %llu state 0x%lx, rmw=%d rcw=%d\n", 3918 (unsigned long long)sh->sector, sh->state, rmw, rcw); 3919 set_bit(STRIPE_HANDLE, &sh->state); 3920 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { 3921 /* prefer read-modify-write, but need to get some data */ 3922 if (conf->mddev->queue) 3923 blk_add_trace_msg(conf->mddev->queue, 3924 "raid5 rmw %llu %d", 3925 (unsigned long long)sh->sector, rmw); 3926 for (i = disks; i--; ) { 3927 struct r5dev *dev = &sh->dev[i]; 3928 if (test_bit(R5_InJournal, &dev->flags) && 3929 dev->page == dev->orig_page && 3930 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { 3931 /* alloc page for prexor */ 3932 struct page *p = alloc_page(GFP_NOIO); 3933 3934 if (p) { 3935 dev->orig_page = p; 3936 continue; 3937 } 3938 3939 /* 3940 * alloc_page() failed, try use 3941 * disk_info->extra_page 3942 */ 3943 if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE, 3944 &conf->cache_state)) { 3945 r5c_use_extra_page(sh); 3946 break; 3947 } 3948 3949 /* extra_page in use, add to delayed_list */ 3950 set_bit(STRIPE_DELAYED, &sh->state); 3951 s->waiting_extra_page = 1; 3952 return -EAGAIN; 3953 } 3954 } 3955 3956 for (i = disks; i--; ) { 3957 struct r5dev *dev = &sh->dev[i]; 3958 if (((dev->towrite && !delay_towrite(conf, dev, s)) || 3959 i == sh->pd_idx || i == sh->qd_idx || 3960 test_bit(R5_InJournal, &dev->flags)) && 3961 !test_bit(R5_LOCKED, &dev->flags) && 3962 !(uptodate_for_rmw(dev) || 3963 test_bit(R5_Wantcompute, &dev->flags)) && 3964 test_bit(R5_Insync, &dev->flags)) { 3965 if (test_bit(STRIPE_PREREAD_ACTIVE, 3966 &sh->state)) { 3967 pr_debug("Read_old block %d for r-m-w\n", 3968 i); 3969 set_bit(R5_LOCKED, &dev->flags); 3970 set_bit(R5_Wantread, &dev->flags); 3971 s->locked++; 3972 } else { 3973 set_bit(STRIPE_DELAYED, &sh->state); 3974 set_bit(STRIPE_HANDLE, &sh->state); 3975 } 3976 } 3977 } 3978 } 3979 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { 3980 /* want reconstruct write, but need to get some data */ 3981 int qread =0; 3982 rcw = 0; 3983 for (i = disks; i--; ) { 3984 struct r5dev *dev = &sh->dev[i]; 3985 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3986 i != sh->pd_idx && i != sh->qd_idx && 3987 !test_bit(R5_LOCKED, &dev->flags) && 3988 !(test_bit(R5_UPTODATE, &dev->flags) || 3989 test_bit(R5_Wantcompute, &dev->flags))) { 3990 rcw++; 3991 if (test_bit(R5_Insync, &dev->flags) && 3992 test_bit(STRIPE_PREREAD_ACTIVE, 3993 &sh->state)) { 3994 pr_debug("Read_old block " 3995 "%d for Reconstruct\n", i); 3996 set_bit(R5_LOCKED, &dev->flags); 3997 set_bit(R5_Wantread, &dev->flags); 3998 s->locked++; 3999 qread++; 4000 } else { 4001 set_bit(STRIPE_DELAYED, &sh->state); 4002 set_bit(STRIPE_HANDLE, &sh->state); 4003 } 4004 } 4005 } 4006 if (rcw && conf->mddev->queue) 4007 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 4008 (unsigned long long)sh->sector, 4009 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 4010 } 4011 4012 if (rcw > disks && rmw > disks && 4013 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4014 set_bit(STRIPE_DELAYED, &sh->state); 4015 4016 /* now if nothing is locked, and if we have enough data, 4017 * we can start a write request 4018 */ 4019 /* since handle_stripe can be called at any time we need to handle the 4020 * case where a compute block operation has been submitted and then a 4021 * subsequent call wants to start a write request. raid_run_ops only 4022 * handles the case where compute block and reconstruct are requested 4023 * simultaneously. If this is not the case then new writes need to be 4024 * held off until the compute completes. 4025 */ 4026 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 4027 (s->locked == 0 && (rcw == 0 || rmw == 0) && 4028 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 4029 schedule_reconstruction(sh, s, rcw == 0, 0); 4030 return 0; 4031 } 4032 4033 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 4034 struct stripe_head_state *s, int disks) 4035 { 4036 struct r5dev *dev = NULL; 4037 4038 BUG_ON(sh->batch_head); 4039 set_bit(STRIPE_HANDLE, &sh->state); 4040 4041 switch (sh->check_state) { 4042 case check_state_idle: 4043 /* start a new check operation if there are no failures */ 4044 if (s->failed == 0) { 4045 BUG_ON(s->uptodate != disks); 4046 sh->check_state = check_state_run; 4047 set_bit(STRIPE_OP_CHECK, &s->ops_request); 4048 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 4049 s->uptodate--; 4050 break; 4051 } 4052 dev = &sh->dev[s->failed_num[0]]; 4053 /* fall through */ 4054 case check_state_compute_result: 4055 sh->check_state = check_state_idle; 4056 if (!dev) 4057 dev = &sh->dev[sh->pd_idx]; 4058 4059 /* check that a write has not made the stripe insync */ 4060 if (test_bit(STRIPE_INSYNC, &sh->state)) 4061 break; 4062 4063 /* either failed parity check, or recovery is happening */ 4064 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 4065 BUG_ON(s->uptodate != disks); 4066 4067 set_bit(R5_LOCKED, &dev->flags); 4068 s->locked++; 4069 set_bit(R5_Wantwrite, &dev->flags); 4070 4071 clear_bit(STRIPE_DEGRADED, &sh->state); 4072 set_bit(STRIPE_INSYNC, &sh->state); 4073 break; 4074 case check_state_run: 4075 break; /* we will be called again upon completion */ 4076 case check_state_check_result: 4077 sh->check_state = check_state_idle; 4078 4079 /* if a failure occurred during the check operation, leave 4080 * STRIPE_INSYNC not set and let the stripe be handled again 4081 */ 4082 if (s->failed) 4083 break; 4084 4085 /* handle a successful check operation, if parity is correct 4086 * we are done. Otherwise update the mismatch count and repair 4087 * parity if !MD_RECOVERY_CHECK 4088 */ 4089 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 4090 /* parity is correct (on disc, 4091 * not in buffer any more) 4092 */ 4093 set_bit(STRIPE_INSYNC, &sh->state); 4094 else { 4095 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4096 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { 4097 /* don't try to repair!! */ 4098 set_bit(STRIPE_INSYNC, &sh->state); 4099 pr_warn_ratelimited("%s: mismatch sector in range " 4100 "%llu-%llu\n", mdname(conf->mddev), 4101 (unsigned long long) sh->sector, 4102 (unsigned long long) sh->sector + 4103 STRIPE_SECTORS); 4104 } else { 4105 sh->check_state = check_state_compute_run; 4106 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4107 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 4108 set_bit(R5_Wantcompute, 4109 &sh->dev[sh->pd_idx].flags); 4110 sh->ops.target = sh->pd_idx; 4111 sh->ops.target2 = -1; 4112 s->uptodate++; 4113 } 4114 } 4115 break; 4116 case check_state_compute_run: 4117 break; 4118 default: 4119 pr_err("%s: unknown check_state: %d sector: %llu\n", 4120 __func__, sh->check_state, 4121 (unsigned long long) sh->sector); 4122 BUG(); 4123 } 4124 } 4125 4126 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 4127 struct stripe_head_state *s, 4128 int disks) 4129 { 4130 int pd_idx = sh->pd_idx; 4131 int qd_idx = sh->qd_idx; 4132 struct r5dev *dev; 4133 4134 BUG_ON(sh->batch_head); 4135 set_bit(STRIPE_HANDLE, &sh->state); 4136 4137 BUG_ON(s->failed > 2); 4138 4139 /* Want to check and possibly repair P and Q. 4140 * However there could be one 'failed' device, in which 4141 * case we can only check one of them, possibly using the 4142 * other to generate missing data 4143 */ 4144 4145 switch (sh->check_state) { 4146 case check_state_idle: 4147 /* start a new check operation if there are < 2 failures */ 4148 if (s->failed == s->q_failed) { 4149 /* The only possible failed device holds Q, so it 4150 * makes sense to check P (If anything else were failed, 4151 * we would have used P to recreate it). 4152 */ 4153 sh->check_state = check_state_run; 4154 } 4155 if (!s->q_failed && s->failed < 2) { 4156 /* Q is not failed, and we didn't use it to generate 4157 * anything, so it makes sense to check it 4158 */ 4159 if (sh->check_state == check_state_run) 4160 sh->check_state = check_state_run_pq; 4161 else 4162 sh->check_state = check_state_run_q; 4163 } 4164 4165 /* discard potentially stale zero_sum_result */ 4166 sh->ops.zero_sum_result = 0; 4167 4168 if (sh->check_state == check_state_run) { 4169 /* async_xor_zero_sum destroys the contents of P */ 4170 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 4171 s->uptodate--; 4172 } 4173 if (sh->check_state >= check_state_run && 4174 sh->check_state <= check_state_run_pq) { 4175 /* async_syndrome_zero_sum preserves P and Q, so 4176 * no need to mark them !uptodate here 4177 */ 4178 set_bit(STRIPE_OP_CHECK, &s->ops_request); 4179 break; 4180 } 4181 4182 /* we have 2-disk failure */ 4183 BUG_ON(s->failed != 2); 4184 /* fall through */ 4185 case check_state_compute_result: 4186 sh->check_state = check_state_idle; 4187 4188 /* check that a write has not made the stripe insync */ 4189 if (test_bit(STRIPE_INSYNC, &sh->state)) 4190 break; 4191 4192 /* now write out any block on a failed drive, 4193 * or P or Q if they were recomputed 4194 */ 4195 dev = NULL; 4196 if (s->failed == 2) { 4197 dev = &sh->dev[s->failed_num[1]]; 4198 s->locked++; 4199 set_bit(R5_LOCKED, &dev->flags); 4200 set_bit(R5_Wantwrite, &dev->flags); 4201 } 4202 if (s->failed >= 1) { 4203 dev = &sh->dev[s->failed_num[0]]; 4204 s->locked++; 4205 set_bit(R5_LOCKED, &dev->flags); 4206 set_bit(R5_Wantwrite, &dev->flags); 4207 } 4208 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 4209 dev = &sh->dev[pd_idx]; 4210 s->locked++; 4211 set_bit(R5_LOCKED, &dev->flags); 4212 set_bit(R5_Wantwrite, &dev->flags); 4213 } 4214 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 4215 dev = &sh->dev[qd_idx]; 4216 s->locked++; 4217 set_bit(R5_LOCKED, &dev->flags); 4218 set_bit(R5_Wantwrite, &dev->flags); 4219 } 4220 if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags), 4221 "%s: disk%td not up to date\n", 4222 mdname(conf->mddev), 4223 dev - (struct r5dev *) &sh->dev)) { 4224 clear_bit(R5_LOCKED, &dev->flags); 4225 clear_bit(R5_Wantwrite, &dev->flags); 4226 s->locked--; 4227 } 4228 clear_bit(STRIPE_DEGRADED, &sh->state); 4229 4230 set_bit(STRIPE_INSYNC, &sh->state); 4231 break; 4232 case check_state_run: 4233 case check_state_run_q: 4234 case check_state_run_pq: 4235 break; /* we will be called again upon completion */ 4236 case check_state_check_result: 4237 sh->check_state = check_state_idle; 4238 4239 /* handle a successful check operation, if parity is correct 4240 * we are done. Otherwise update the mismatch count and repair 4241 * parity if !MD_RECOVERY_CHECK 4242 */ 4243 if (sh->ops.zero_sum_result == 0) { 4244 /* both parities are correct */ 4245 if (!s->failed) 4246 set_bit(STRIPE_INSYNC, &sh->state); 4247 else { 4248 /* in contrast to the raid5 case we can validate 4249 * parity, but still have a failure to write 4250 * back 4251 */ 4252 sh->check_state = check_state_compute_result; 4253 /* Returning at this point means that we may go 4254 * off and bring p and/or q uptodate again so 4255 * we make sure to check zero_sum_result again 4256 * to verify if p or q need writeback 4257 */ 4258 } 4259 } else { 4260 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4261 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { 4262 /* don't try to repair!! */ 4263 set_bit(STRIPE_INSYNC, &sh->state); 4264 pr_warn_ratelimited("%s: mismatch sector in range " 4265 "%llu-%llu\n", mdname(conf->mddev), 4266 (unsigned long long) sh->sector, 4267 (unsigned long long) sh->sector + 4268 STRIPE_SECTORS); 4269 } else { 4270 int *target = &sh->ops.target; 4271 4272 sh->ops.target = -1; 4273 sh->ops.target2 = -1; 4274 sh->check_state = check_state_compute_run; 4275 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4276 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 4277 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 4278 set_bit(R5_Wantcompute, 4279 &sh->dev[pd_idx].flags); 4280 *target = pd_idx; 4281 target = &sh->ops.target2; 4282 s->uptodate++; 4283 } 4284 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 4285 set_bit(R5_Wantcompute, 4286 &sh->dev[qd_idx].flags); 4287 *target = qd_idx; 4288 s->uptodate++; 4289 } 4290 } 4291 } 4292 break; 4293 case check_state_compute_run: 4294 break; 4295 default: 4296 pr_warn("%s: unknown check_state: %d sector: %llu\n", 4297 __func__, sh->check_state, 4298 (unsigned long long) sh->sector); 4299 BUG(); 4300 } 4301 } 4302 4303 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 4304 { 4305 int i; 4306 4307 /* We have read all the blocks in this stripe and now we need to 4308 * copy some of them into a target stripe for expand. 4309 */ 4310 struct dma_async_tx_descriptor *tx = NULL; 4311 BUG_ON(sh->batch_head); 4312 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4313 for (i = 0; i < sh->disks; i++) 4314 if (i != sh->pd_idx && i != sh->qd_idx) { 4315 int dd_idx, j; 4316 struct stripe_head *sh2; 4317 struct async_submit_ctl submit; 4318 4319 sector_t bn = raid5_compute_blocknr(sh, i, 1); 4320 sector_t s = raid5_compute_sector(conf, bn, 0, 4321 &dd_idx, NULL); 4322 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); 4323 if (sh2 == NULL) 4324 /* so far only the early blocks of this stripe 4325 * have been requested. When later blocks 4326 * get requested, we will try again 4327 */ 4328 continue; 4329 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 4330 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 4331 /* must have already done this block */ 4332 raid5_release_stripe(sh2); 4333 continue; 4334 } 4335 4336 /* place all the copies on one channel */ 4337 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 4338 tx = async_memcpy(sh2->dev[dd_idx].page, 4339 sh->dev[i].page, 0, 0, STRIPE_SIZE, 4340 &submit); 4341 4342 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 4343 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 4344 for (j = 0; j < conf->raid_disks; j++) 4345 if (j != sh2->pd_idx && 4346 j != sh2->qd_idx && 4347 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 4348 break; 4349 if (j == conf->raid_disks) { 4350 set_bit(STRIPE_EXPAND_READY, &sh2->state); 4351 set_bit(STRIPE_HANDLE, &sh2->state); 4352 } 4353 raid5_release_stripe(sh2); 4354 4355 } 4356 /* done submitting copies, wait for them to complete */ 4357 async_tx_quiesce(&tx); 4358 } 4359 4360 /* 4361 * handle_stripe - do things to a stripe. 4362 * 4363 * We lock the stripe by setting STRIPE_ACTIVE and then examine the 4364 * state of various bits to see what needs to be done. 4365 * Possible results: 4366 * return some read requests which now have data 4367 * return some write requests which are safely on storage 4368 * schedule a read on some buffers 4369 * schedule a write of some buffers 4370 * return confirmation of parity correctness 4371 * 4372 */ 4373 4374 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 4375 { 4376 struct r5conf *conf = sh->raid_conf; 4377 int disks = sh->disks; 4378 struct r5dev *dev; 4379 int i; 4380 int do_recovery = 0; 4381 4382 memset(s, 0, sizeof(*s)); 4383 4384 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; 4385 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; 4386 s->failed_num[0] = -1; 4387 s->failed_num[1] = -1; 4388 s->log_failed = r5l_log_disk_error(conf); 4389 4390 /* Now to look around and see what can be done */ 4391 rcu_read_lock(); 4392 for (i=disks; i--; ) { 4393 struct md_rdev *rdev; 4394 sector_t first_bad; 4395 int bad_sectors; 4396 int is_bad = 0; 4397 4398 dev = &sh->dev[i]; 4399 4400 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 4401 i, dev->flags, 4402 dev->toread, dev->towrite, dev->written); 4403 /* maybe we can reply to a read 4404 * 4405 * new wantfill requests are only permitted while 4406 * ops_complete_biofill is guaranteed to be inactive 4407 */ 4408 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 4409 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 4410 set_bit(R5_Wantfill, &dev->flags); 4411 4412 /* now count some things */ 4413 if (test_bit(R5_LOCKED, &dev->flags)) 4414 s->locked++; 4415 if (test_bit(R5_UPTODATE, &dev->flags)) 4416 s->uptodate++; 4417 if (test_bit(R5_Wantcompute, &dev->flags)) { 4418 s->compute++; 4419 BUG_ON(s->compute > 2); 4420 } 4421 4422 if (test_bit(R5_Wantfill, &dev->flags)) 4423 s->to_fill++; 4424 else if (dev->toread) 4425 s->to_read++; 4426 if (dev->towrite) { 4427 s->to_write++; 4428 if (!test_bit(R5_OVERWRITE, &dev->flags)) 4429 s->non_overwrite++; 4430 } 4431 if (dev->written) 4432 s->written++; 4433 /* Prefer to use the replacement for reads, but only 4434 * if it is recovered enough and has no bad blocks. 4435 */ 4436 rdev = rcu_dereference(conf->disks[i].replacement); 4437 if (rdev && !test_bit(Faulty, &rdev->flags) && 4438 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && 4439 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4440 &first_bad, &bad_sectors)) 4441 set_bit(R5_ReadRepl, &dev->flags); 4442 else { 4443 if (rdev && !test_bit(Faulty, &rdev->flags)) 4444 set_bit(R5_NeedReplace, &dev->flags); 4445 else 4446 clear_bit(R5_NeedReplace, &dev->flags); 4447 rdev = rcu_dereference(conf->disks[i].rdev); 4448 clear_bit(R5_ReadRepl, &dev->flags); 4449 } 4450 if (rdev && test_bit(Faulty, &rdev->flags)) 4451 rdev = NULL; 4452 if (rdev) { 4453 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4454 &first_bad, &bad_sectors); 4455 if (s->blocked_rdev == NULL 4456 && (test_bit(Blocked, &rdev->flags) 4457 || is_bad < 0)) { 4458 if (is_bad < 0) 4459 set_bit(BlockedBadBlocks, 4460 &rdev->flags); 4461 s->blocked_rdev = rdev; 4462 atomic_inc(&rdev->nr_pending); 4463 } 4464 } 4465 clear_bit(R5_Insync, &dev->flags); 4466 if (!rdev) 4467 /* Not in-sync */; 4468 else if (is_bad) { 4469 /* also not in-sync */ 4470 if (!test_bit(WriteErrorSeen, &rdev->flags) && 4471 test_bit(R5_UPTODATE, &dev->flags)) { 4472 /* treat as in-sync, but with a read error 4473 * which we can now try to correct 4474 */ 4475 set_bit(R5_Insync, &dev->flags); 4476 set_bit(R5_ReadError, &dev->flags); 4477 } 4478 } else if (test_bit(In_sync, &rdev->flags)) 4479 set_bit(R5_Insync, &dev->flags); 4480 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 4481 /* in sync if before recovery_offset */ 4482 set_bit(R5_Insync, &dev->flags); 4483 else if (test_bit(R5_UPTODATE, &dev->flags) && 4484 test_bit(R5_Expanded, &dev->flags)) 4485 /* If we've reshaped into here, we assume it is Insync. 4486 * We will shortly update recovery_offset to make 4487 * it official. 4488 */ 4489 set_bit(R5_Insync, &dev->flags); 4490 4491 if (test_bit(R5_WriteError, &dev->flags)) { 4492 /* This flag does not apply to '.replacement' 4493 * only to .rdev, so make sure to check that*/ 4494 struct md_rdev *rdev2 = rcu_dereference( 4495 conf->disks[i].rdev); 4496 if (rdev2 == rdev) 4497 clear_bit(R5_Insync, &dev->flags); 4498 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4499 s->handle_bad_blocks = 1; 4500 atomic_inc(&rdev2->nr_pending); 4501 } else 4502 clear_bit(R5_WriteError, &dev->flags); 4503 } 4504 if (test_bit(R5_MadeGood, &dev->flags)) { 4505 /* This flag does not apply to '.replacement' 4506 * only to .rdev, so make sure to check that*/ 4507 struct md_rdev *rdev2 = rcu_dereference( 4508 conf->disks[i].rdev); 4509 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4510 s->handle_bad_blocks = 1; 4511 atomic_inc(&rdev2->nr_pending); 4512 } else 4513 clear_bit(R5_MadeGood, &dev->flags); 4514 } 4515 if (test_bit(R5_MadeGoodRepl, &dev->flags)) { 4516 struct md_rdev *rdev2 = rcu_dereference( 4517 conf->disks[i].replacement); 4518 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4519 s->handle_bad_blocks = 1; 4520 atomic_inc(&rdev2->nr_pending); 4521 } else 4522 clear_bit(R5_MadeGoodRepl, &dev->flags); 4523 } 4524 if (!test_bit(R5_Insync, &dev->flags)) { 4525 /* The ReadError flag will just be confusing now */ 4526 clear_bit(R5_ReadError, &dev->flags); 4527 clear_bit(R5_ReWrite, &dev->flags); 4528 } 4529 if (test_bit(R5_ReadError, &dev->flags)) 4530 clear_bit(R5_Insync, &dev->flags); 4531 if (!test_bit(R5_Insync, &dev->flags)) { 4532 if (s->failed < 2) 4533 s->failed_num[s->failed] = i; 4534 s->failed++; 4535 if (rdev && !test_bit(Faulty, &rdev->flags)) 4536 do_recovery = 1; 4537 else if (!rdev) { 4538 rdev = rcu_dereference( 4539 conf->disks[i].replacement); 4540 if (rdev && !test_bit(Faulty, &rdev->flags)) 4541 do_recovery = 1; 4542 } 4543 } 4544 4545 if (test_bit(R5_InJournal, &dev->flags)) 4546 s->injournal++; 4547 if (test_bit(R5_InJournal, &dev->flags) && dev->written) 4548 s->just_cached++; 4549 } 4550 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4551 /* If there is a failed device being replaced, 4552 * we must be recovering. 4553 * else if we are after recovery_cp, we must be syncing 4554 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 4555 * else we can only be replacing 4556 * sync and recovery both need to read all devices, and so 4557 * use the same flag. 4558 */ 4559 if (do_recovery || 4560 sh->sector >= conf->mddev->recovery_cp || 4561 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 4562 s->syncing = 1; 4563 else 4564 s->replacing = 1; 4565 } 4566 rcu_read_unlock(); 4567 } 4568 4569 static int clear_batch_ready(struct stripe_head *sh) 4570 { 4571 /* Return '1' if this is a member of batch, or 4572 * '0' if it is a lone stripe or a head which can now be 4573 * handled. 4574 */ 4575 struct stripe_head *tmp; 4576 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) 4577 return (sh->batch_head && sh->batch_head != sh); 4578 spin_lock(&sh->stripe_lock); 4579 if (!sh->batch_head) { 4580 spin_unlock(&sh->stripe_lock); 4581 return 0; 4582 } 4583 4584 /* 4585 * this stripe could be added to a batch list before we check 4586 * BATCH_READY, skips it 4587 */ 4588 if (sh->batch_head != sh) { 4589 spin_unlock(&sh->stripe_lock); 4590 return 1; 4591 } 4592 spin_lock(&sh->batch_lock); 4593 list_for_each_entry(tmp, &sh->batch_list, batch_list) 4594 clear_bit(STRIPE_BATCH_READY, &tmp->state); 4595 spin_unlock(&sh->batch_lock); 4596 spin_unlock(&sh->stripe_lock); 4597 4598 /* 4599 * BATCH_READY is cleared, no new stripes can be added. 4600 * batch_list can be accessed without lock 4601 */ 4602 return 0; 4603 } 4604 4605 static void break_stripe_batch_list(struct stripe_head *head_sh, 4606 unsigned long handle_flags) 4607 { 4608 struct stripe_head *sh, *next; 4609 int i; 4610 int do_wakeup = 0; 4611 4612 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { 4613 4614 list_del_init(&sh->batch_list); 4615 4616 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | 4617 (1 << STRIPE_SYNCING) | 4618 (1 << STRIPE_REPLACED) | 4619 (1 << STRIPE_DELAYED) | 4620 (1 << STRIPE_BIT_DELAY) | 4621 (1 << STRIPE_FULL_WRITE) | 4622 (1 << STRIPE_BIOFILL_RUN) | 4623 (1 << STRIPE_COMPUTE_RUN) | 4624 (1 << STRIPE_DISCARD) | 4625 (1 << STRIPE_BATCH_READY) | 4626 (1 << STRIPE_BATCH_ERR) | 4627 (1 << STRIPE_BITMAP_PENDING)), 4628 "stripe state: %lx\n", sh->state); 4629 WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | 4630 (1 << STRIPE_REPLACED)), 4631 "head stripe state: %lx\n", head_sh->state); 4632 4633 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | 4634 (1 << STRIPE_PREREAD_ACTIVE) | 4635 (1 << STRIPE_DEGRADED) | 4636 (1 << STRIPE_ON_UNPLUG_LIST)), 4637 head_sh->state & (1 << STRIPE_INSYNC)); 4638 4639 sh->check_state = head_sh->check_state; 4640 sh->reconstruct_state = head_sh->reconstruct_state; 4641 spin_lock_irq(&sh->stripe_lock); 4642 sh->batch_head = NULL; 4643 spin_unlock_irq(&sh->stripe_lock); 4644 for (i = 0; i < sh->disks; i++) { 4645 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 4646 do_wakeup = 1; 4647 sh->dev[i].flags = head_sh->dev[i].flags & 4648 (~((1 << R5_WriteError) | (1 << R5_Overlap))); 4649 } 4650 if (handle_flags == 0 || 4651 sh->state & handle_flags) 4652 set_bit(STRIPE_HANDLE, &sh->state); 4653 raid5_release_stripe(sh); 4654 } 4655 spin_lock_irq(&head_sh->stripe_lock); 4656 head_sh->batch_head = NULL; 4657 spin_unlock_irq(&head_sh->stripe_lock); 4658 for (i = 0; i < head_sh->disks; i++) 4659 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) 4660 do_wakeup = 1; 4661 if (head_sh->state & handle_flags) 4662 set_bit(STRIPE_HANDLE, &head_sh->state); 4663 4664 if (do_wakeup) 4665 wake_up(&head_sh->raid_conf->wait_for_overlap); 4666 } 4667 4668 static void handle_stripe(struct stripe_head *sh) 4669 { 4670 struct stripe_head_state s; 4671 struct r5conf *conf = sh->raid_conf; 4672 int i; 4673 int prexor; 4674 int disks = sh->disks; 4675 struct r5dev *pdev, *qdev; 4676 4677 clear_bit(STRIPE_HANDLE, &sh->state); 4678 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 4679 /* already being handled, ensure it gets handled 4680 * again when current action finishes */ 4681 set_bit(STRIPE_HANDLE, &sh->state); 4682 return; 4683 } 4684 4685 if (clear_batch_ready(sh) ) { 4686 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4687 return; 4688 } 4689 4690 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) 4691 break_stripe_batch_list(sh, 0); 4692 4693 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4694 spin_lock(&sh->stripe_lock); 4695 /* 4696 * Cannot process 'sync' concurrently with 'discard'. 4697 * Flush data in r5cache before 'sync'. 4698 */ 4699 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && 4700 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && 4701 !test_bit(STRIPE_DISCARD, &sh->state) && 4702 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4703 set_bit(STRIPE_SYNCING, &sh->state); 4704 clear_bit(STRIPE_INSYNC, &sh->state); 4705 clear_bit(STRIPE_REPLACED, &sh->state); 4706 } 4707 spin_unlock(&sh->stripe_lock); 4708 } 4709 clear_bit(STRIPE_DELAYED, &sh->state); 4710 4711 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 4712 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 4713 (unsigned long long)sh->sector, sh->state, 4714 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 4715 sh->check_state, sh->reconstruct_state); 4716 4717 analyse_stripe(sh, &s); 4718 4719 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 4720 goto finish; 4721 4722 if (s.handle_bad_blocks || 4723 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { 4724 set_bit(STRIPE_HANDLE, &sh->state); 4725 goto finish; 4726 } 4727 4728 if (unlikely(s.blocked_rdev)) { 4729 if (s.syncing || s.expanding || s.expanded || 4730 s.replacing || s.to_write || s.written) { 4731 set_bit(STRIPE_HANDLE, &sh->state); 4732 goto finish; 4733 } 4734 /* There is nothing for the blocked_rdev to block */ 4735 rdev_dec_pending(s.blocked_rdev, conf->mddev); 4736 s.blocked_rdev = NULL; 4737 } 4738 4739 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 4740 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 4741 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 4742 } 4743 4744 pr_debug("locked=%d uptodate=%d to_read=%d" 4745 " to_write=%d failed=%d failed_num=%d,%d\n", 4746 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4747 s.failed_num[0], s.failed_num[1]); 4748 /* 4749 * check if the array has lost more than max_degraded devices and, 4750 * if so, some requests might need to be failed. 4751 * 4752 * When journal device failed (log_failed), we will only process 4753 * the stripe if there is data need write to raid disks 4754 */ 4755 if (s.failed > conf->max_degraded || 4756 (s.log_failed && s.injournal == 0)) { 4757 sh->check_state = 0; 4758 sh->reconstruct_state = 0; 4759 break_stripe_batch_list(sh, 0); 4760 if (s.to_read+s.to_write+s.written) 4761 handle_failed_stripe(conf, sh, &s, disks); 4762 if (s.syncing + s.replacing) 4763 handle_failed_sync(conf, sh, &s); 4764 } 4765 4766 /* Now we check to see if any write operations have recently 4767 * completed 4768 */ 4769 prexor = 0; 4770 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 4771 prexor = 1; 4772 if (sh->reconstruct_state == reconstruct_state_drain_result || 4773 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 4774 sh->reconstruct_state = reconstruct_state_idle; 4775 4776 /* All the 'written' buffers and the parity block are ready to 4777 * be written back to disk 4778 */ 4779 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && 4780 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); 4781 BUG_ON(sh->qd_idx >= 0 && 4782 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && 4783 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); 4784 for (i = disks; i--; ) { 4785 struct r5dev *dev = &sh->dev[i]; 4786 if (test_bit(R5_LOCKED, &dev->flags) && 4787 (i == sh->pd_idx || i == sh->qd_idx || 4788 dev->written || test_bit(R5_InJournal, 4789 &dev->flags))) { 4790 pr_debug("Writing block %d\n", i); 4791 set_bit(R5_Wantwrite, &dev->flags); 4792 if (prexor) 4793 continue; 4794 if (s.failed > 1) 4795 continue; 4796 if (!test_bit(R5_Insync, &dev->flags) || 4797 ((i == sh->pd_idx || i == sh->qd_idx) && 4798 s.failed == 0)) 4799 set_bit(STRIPE_INSYNC, &sh->state); 4800 } 4801 } 4802 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4803 s.dec_preread_active = 1; 4804 } 4805 4806 /* 4807 * might be able to return some write requests if the parity blocks 4808 * are safe, or on a failed drive 4809 */ 4810 pdev = &sh->dev[sh->pd_idx]; 4811 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 4812 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 4813 qdev = &sh->dev[sh->qd_idx]; 4814 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 4815 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 4816 || conf->level < 6; 4817 4818 if (s.written && 4819 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 4820 && !test_bit(R5_LOCKED, &pdev->flags) 4821 && (test_bit(R5_UPTODATE, &pdev->flags) || 4822 test_bit(R5_Discard, &pdev->flags))))) && 4823 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 4824 && !test_bit(R5_LOCKED, &qdev->flags) 4825 && (test_bit(R5_UPTODATE, &qdev->flags) || 4826 test_bit(R5_Discard, &qdev->flags)))))) 4827 handle_stripe_clean_event(conf, sh, disks); 4828 4829 if (s.just_cached) 4830 r5c_handle_cached_data_endio(conf, sh, disks); 4831 log_stripe_write_finished(sh); 4832 4833 /* Now we might consider reading some blocks, either to check/generate 4834 * parity, or to satisfy requests 4835 * or to load a block that is being partially written. 4836 */ 4837 if (s.to_read || s.non_overwrite 4838 || (conf->level == 6 && s.to_write && s.failed) 4839 || (s.syncing && (s.uptodate + s.compute < disks)) 4840 || s.replacing 4841 || s.expanding) 4842 handle_stripe_fill(sh, &s, disks); 4843 4844 /* 4845 * When the stripe finishes full journal write cycle (write to journal 4846 * and raid disk), this is the clean up procedure so it is ready for 4847 * next operation. 4848 */ 4849 r5c_finish_stripe_write_out(conf, sh, &s); 4850 4851 /* 4852 * Now to consider new write requests, cache write back and what else, 4853 * if anything should be read. We do not handle new writes when: 4854 * 1/ A 'write' operation (copy+xor) is already in flight. 4855 * 2/ A 'check' operation is in flight, as it may clobber the parity 4856 * block. 4857 * 3/ A r5c cache log write is in flight. 4858 */ 4859 4860 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { 4861 if (!r5c_is_writeback(conf->log)) { 4862 if (s.to_write) 4863 handle_stripe_dirtying(conf, sh, &s, disks); 4864 } else { /* write back cache */ 4865 int ret = 0; 4866 4867 /* First, try handle writes in caching phase */ 4868 if (s.to_write) 4869 ret = r5c_try_caching_write(conf, sh, &s, 4870 disks); 4871 /* 4872 * If caching phase failed: ret == -EAGAIN 4873 * OR 4874 * stripe under reclaim: !caching && injournal 4875 * 4876 * fall back to handle_stripe_dirtying() 4877 */ 4878 if (ret == -EAGAIN || 4879 /* stripe under reclaim: !caching && injournal */ 4880 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 4881 s.injournal > 0)) { 4882 ret = handle_stripe_dirtying(conf, sh, &s, 4883 disks); 4884 if (ret == -EAGAIN) 4885 goto finish; 4886 } 4887 } 4888 } 4889 4890 /* maybe we need to check and possibly fix the parity for this stripe 4891 * Any reads will already have been scheduled, so we just see if enough 4892 * data is available. The parity check is held off while parity 4893 * dependent operations are in flight. 4894 */ 4895 if (sh->check_state || 4896 (s.syncing && s.locked == 0 && 4897 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4898 !test_bit(STRIPE_INSYNC, &sh->state))) { 4899 if (conf->level == 6) 4900 handle_parity_checks6(conf, sh, &s, disks); 4901 else 4902 handle_parity_checks5(conf, sh, &s, disks); 4903 } 4904 4905 if ((s.replacing || s.syncing) && s.locked == 0 4906 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) 4907 && !test_bit(STRIPE_REPLACED, &sh->state)) { 4908 /* Write out to replacement devices where possible */ 4909 for (i = 0; i < conf->raid_disks; i++) 4910 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 4911 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); 4912 set_bit(R5_WantReplace, &sh->dev[i].flags); 4913 set_bit(R5_LOCKED, &sh->dev[i].flags); 4914 s.locked++; 4915 } 4916 if (s.replacing) 4917 set_bit(STRIPE_INSYNC, &sh->state); 4918 set_bit(STRIPE_REPLACED, &sh->state); 4919 } 4920 if ((s.syncing || s.replacing) && s.locked == 0 && 4921 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4922 test_bit(STRIPE_INSYNC, &sh->state)) { 4923 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4924 clear_bit(STRIPE_SYNCING, &sh->state); 4925 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 4926 wake_up(&conf->wait_for_overlap); 4927 } 4928 4929 /* If the failed drives are just a ReadError, then we might need 4930 * to progress the repair/check process 4931 */ 4932 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 4933 for (i = 0; i < s.failed; i++) { 4934 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 4935 if (test_bit(R5_ReadError, &dev->flags) 4936 && !test_bit(R5_LOCKED, &dev->flags) 4937 && test_bit(R5_UPTODATE, &dev->flags) 4938 ) { 4939 if (!test_bit(R5_ReWrite, &dev->flags)) { 4940 set_bit(R5_Wantwrite, &dev->flags); 4941 set_bit(R5_ReWrite, &dev->flags); 4942 set_bit(R5_LOCKED, &dev->flags); 4943 s.locked++; 4944 } else { 4945 /* let's read it back */ 4946 set_bit(R5_Wantread, &dev->flags); 4947 set_bit(R5_LOCKED, &dev->flags); 4948 s.locked++; 4949 } 4950 } 4951 } 4952 4953 /* Finish reconstruct operations initiated by the expansion process */ 4954 if (sh->reconstruct_state == reconstruct_state_result) { 4955 struct stripe_head *sh_src 4956 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); 4957 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 4958 /* sh cannot be written until sh_src has been read. 4959 * so arrange for sh to be delayed a little 4960 */ 4961 set_bit(STRIPE_DELAYED, &sh->state); 4962 set_bit(STRIPE_HANDLE, &sh->state); 4963 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 4964 &sh_src->state)) 4965 atomic_inc(&conf->preread_active_stripes); 4966 raid5_release_stripe(sh_src); 4967 goto finish; 4968 } 4969 if (sh_src) 4970 raid5_release_stripe(sh_src); 4971 4972 sh->reconstruct_state = reconstruct_state_idle; 4973 clear_bit(STRIPE_EXPANDING, &sh->state); 4974 for (i = conf->raid_disks; i--; ) { 4975 set_bit(R5_Wantwrite, &sh->dev[i].flags); 4976 set_bit(R5_LOCKED, &sh->dev[i].flags); 4977 s.locked++; 4978 } 4979 } 4980 4981 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 4982 !sh->reconstruct_state) { 4983 /* Need to write out all blocks after computing parity */ 4984 sh->disks = conf->raid_disks; 4985 stripe_set_idx(sh->sector, conf, 0, sh); 4986 schedule_reconstruction(sh, &s, 1, 1); 4987 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 4988 clear_bit(STRIPE_EXPAND_READY, &sh->state); 4989 atomic_dec(&conf->reshape_stripes); 4990 wake_up(&conf->wait_for_overlap); 4991 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4992 } 4993 4994 if (s.expanding && s.locked == 0 && 4995 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 4996 handle_stripe_expansion(conf, sh); 4997 4998 finish: 4999 /* wait for this device to become unblocked */ 5000 if (unlikely(s.blocked_rdev)) { 5001 if (conf->mddev->external) 5002 md_wait_for_blocked_rdev(s.blocked_rdev, 5003 conf->mddev); 5004 else 5005 /* Internal metadata will immediately 5006 * be written by raid5d, so we don't 5007 * need to wait here. 5008 */ 5009 rdev_dec_pending(s.blocked_rdev, 5010 conf->mddev); 5011 } 5012 5013 if (s.handle_bad_blocks) 5014 for (i = disks; i--; ) { 5015 struct md_rdev *rdev; 5016 struct r5dev *dev = &sh->dev[i]; 5017 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 5018 /* We own a safe reference to the rdev */ 5019 rdev = conf->disks[i].rdev; 5020 if (!rdev_set_badblocks(rdev, sh->sector, 5021 STRIPE_SECTORS, 0)) 5022 md_error(conf->mddev, rdev); 5023 rdev_dec_pending(rdev, conf->mddev); 5024 } 5025 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 5026 rdev = conf->disks[i].rdev; 5027 rdev_clear_badblocks(rdev, sh->sector, 5028 STRIPE_SECTORS, 0); 5029 rdev_dec_pending(rdev, conf->mddev); 5030 } 5031 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { 5032 rdev = conf->disks[i].replacement; 5033 if (!rdev) 5034 /* rdev have been moved down */ 5035 rdev = conf->disks[i].rdev; 5036 rdev_clear_badblocks(rdev, sh->sector, 5037 STRIPE_SECTORS, 0); 5038 rdev_dec_pending(rdev, conf->mddev); 5039 } 5040 } 5041 5042 if (s.ops_request) 5043 raid_run_ops(sh, s.ops_request); 5044 5045 ops_run_io(sh, &s); 5046 5047 if (s.dec_preread_active) { 5048 /* We delay this until after ops_run_io so that if make_request 5049 * is waiting on a flush, it won't continue until the writes 5050 * have actually been submitted. 5051 */ 5052 atomic_dec(&conf->preread_active_stripes); 5053 if (atomic_read(&conf->preread_active_stripes) < 5054 IO_THRESHOLD) 5055 md_wakeup_thread(conf->mddev->thread); 5056 } 5057 5058 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 5059 } 5060 5061 static void raid5_activate_delayed(struct r5conf *conf) 5062 { 5063 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 5064 while (!list_empty(&conf->delayed_list)) { 5065 struct list_head *l = conf->delayed_list.next; 5066 struct stripe_head *sh; 5067 sh = list_entry(l, struct stripe_head, lru); 5068 list_del_init(l); 5069 clear_bit(STRIPE_DELAYED, &sh->state); 5070 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5071 atomic_inc(&conf->preread_active_stripes); 5072 list_add_tail(&sh->lru, &conf->hold_list); 5073 raid5_wakeup_stripe_thread(sh); 5074 } 5075 } 5076 } 5077 5078 static void activate_bit_delay(struct r5conf *conf, 5079 struct list_head *temp_inactive_list) 5080 { 5081 /* device_lock is held */ 5082 struct list_head head; 5083 list_add(&head, &conf->bitmap_list); 5084 list_del_init(&conf->bitmap_list); 5085 while (!list_empty(&head)) { 5086 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 5087 int hash; 5088 list_del_init(&sh->lru); 5089 atomic_inc(&sh->count); 5090 hash = sh->hash_lock_index; 5091 __release_stripe(conf, sh, &temp_inactive_list[hash]); 5092 } 5093 } 5094 5095 static int raid5_congested(struct mddev *mddev, int bits) 5096 { 5097 struct r5conf *conf = mddev->private; 5098 5099 /* No difference between reads and writes. Just check 5100 * how busy the stripe_cache is 5101 */ 5102 5103 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) 5104 return 1; 5105 5106 /* Also checks whether there is pressure on r5cache log space */ 5107 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) 5108 return 1; 5109 if (conf->quiesce) 5110 return 1; 5111 if (atomic_read(&conf->empty_inactive_list_nr)) 5112 return 1; 5113 5114 return 0; 5115 } 5116 5117 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 5118 { 5119 struct r5conf *conf = mddev->private; 5120 sector_t sector = bio->bi_iter.bi_sector; 5121 unsigned int chunk_sectors; 5122 unsigned int bio_sectors = bio_sectors(bio); 5123 5124 WARN_ON_ONCE(bio->bi_partno); 5125 5126 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); 5127 return chunk_sectors >= 5128 ((sector & (chunk_sectors - 1)) + bio_sectors); 5129 } 5130 5131 /* 5132 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 5133 * later sampled by raid5d. 5134 */ 5135 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 5136 { 5137 unsigned long flags; 5138 5139 spin_lock_irqsave(&conf->device_lock, flags); 5140 5141 bi->bi_next = conf->retry_read_aligned_list; 5142 conf->retry_read_aligned_list = bi; 5143 5144 spin_unlock_irqrestore(&conf->device_lock, flags); 5145 md_wakeup_thread(conf->mddev->thread); 5146 } 5147 5148 static struct bio *remove_bio_from_retry(struct r5conf *conf, 5149 unsigned int *offset) 5150 { 5151 struct bio *bi; 5152 5153 bi = conf->retry_read_aligned; 5154 if (bi) { 5155 *offset = conf->retry_read_offset; 5156 conf->retry_read_aligned = NULL; 5157 return bi; 5158 } 5159 bi = conf->retry_read_aligned_list; 5160 if(bi) { 5161 conf->retry_read_aligned_list = bi->bi_next; 5162 bi->bi_next = NULL; 5163 *offset = 0; 5164 } 5165 5166 return bi; 5167 } 5168 5169 /* 5170 * The "raid5_align_endio" should check if the read succeeded and if it 5171 * did, call bio_endio on the original bio (having bio_put the new bio 5172 * first). 5173 * If the read failed.. 5174 */ 5175 static void raid5_align_endio(struct bio *bi) 5176 { 5177 struct bio* raid_bi = bi->bi_private; 5178 struct mddev *mddev; 5179 struct r5conf *conf; 5180 struct md_rdev *rdev; 5181 blk_status_t error = bi->bi_status; 5182 5183 bio_put(bi); 5184 5185 rdev = (void*)raid_bi->bi_next; 5186 raid_bi->bi_next = NULL; 5187 mddev = rdev->mddev; 5188 conf = mddev->private; 5189 5190 rdev_dec_pending(rdev, conf->mddev); 5191 5192 if (!error) { 5193 bio_endio(raid_bi); 5194 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5195 wake_up(&conf->wait_for_quiescent); 5196 return; 5197 } 5198 5199 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 5200 5201 add_bio_to_retry(raid_bi, conf); 5202 } 5203 5204 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) 5205 { 5206 struct r5conf *conf = mddev->private; 5207 int dd_idx; 5208 struct bio* align_bi; 5209 struct md_rdev *rdev; 5210 sector_t end_sector; 5211 5212 if (!in_chunk_boundary(mddev, raid_bio)) { 5213 pr_debug("%s: non aligned\n", __func__); 5214 return 0; 5215 } 5216 /* 5217 * use bio_clone_fast to make a copy of the bio 5218 */ 5219 align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); 5220 if (!align_bi) 5221 return 0; 5222 /* 5223 * set bi_end_io to a new function, and set bi_private to the 5224 * original bio. 5225 */ 5226 align_bi->bi_end_io = raid5_align_endio; 5227 align_bi->bi_private = raid_bio; 5228 /* 5229 * compute position 5230 */ 5231 align_bi->bi_iter.bi_sector = 5232 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 5233 0, &dd_idx, NULL); 5234 5235 end_sector = bio_end_sector(align_bi); 5236 rcu_read_lock(); 5237 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 5238 if (!rdev || test_bit(Faulty, &rdev->flags) || 5239 rdev->recovery_offset < end_sector) { 5240 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 5241 if (rdev && 5242 (test_bit(Faulty, &rdev->flags) || 5243 !(test_bit(In_sync, &rdev->flags) || 5244 rdev->recovery_offset >= end_sector))) 5245 rdev = NULL; 5246 } 5247 5248 if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { 5249 rcu_read_unlock(); 5250 bio_put(align_bi); 5251 return 0; 5252 } 5253 5254 if (rdev) { 5255 sector_t first_bad; 5256 int bad_sectors; 5257 5258 atomic_inc(&rdev->nr_pending); 5259 rcu_read_unlock(); 5260 raid_bio->bi_next = (void*)rdev; 5261 bio_set_dev(align_bi, rdev->bdev); 5262 5263 if (is_badblock(rdev, align_bi->bi_iter.bi_sector, 5264 bio_sectors(align_bi), 5265 &first_bad, &bad_sectors)) { 5266 bio_put(align_bi); 5267 rdev_dec_pending(rdev, mddev); 5268 return 0; 5269 } 5270 5271 /* No reshape active, so we can trust rdev->data_offset */ 5272 align_bi->bi_iter.bi_sector += rdev->data_offset; 5273 5274 spin_lock_irq(&conf->device_lock); 5275 wait_event_lock_irq(conf->wait_for_quiescent, 5276 conf->quiesce == 0, 5277 conf->device_lock); 5278 atomic_inc(&conf->active_aligned_reads); 5279 spin_unlock_irq(&conf->device_lock); 5280 5281 if (mddev->gendisk) 5282 trace_block_bio_remap(align_bi->bi_disk->queue, 5283 align_bi, disk_devt(mddev->gendisk), 5284 raid_bio->bi_iter.bi_sector); 5285 generic_make_request(align_bi); 5286 return 1; 5287 } else { 5288 rcu_read_unlock(); 5289 bio_put(align_bi); 5290 return 0; 5291 } 5292 } 5293 5294 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) 5295 { 5296 struct bio *split; 5297 sector_t sector = raid_bio->bi_iter.bi_sector; 5298 unsigned chunk_sects = mddev->chunk_sectors; 5299 unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); 5300 5301 if (sectors < bio_sectors(raid_bio)) { 5302 struct r5conf *conf = mddev->private; 5303 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); 5304 bio_chain(split, raid_bio); 5305 generic_make_request(raid_bio); 5306 raid_bio = split; 5307 } 5308 5309 if (!raid5_read_one_chunk(mddev, raid_bio)) 5310 return raid_bio; 5311 5312 return NULL; 5313 } 5314 5315 /* __get_priority_stripe - get the next stripe to process 5316 * 5317 * Full stripe writes are allowed to pass preread active stripes up until 5318 * the bypass_threshold is exceeded. In general the bypass_count 5319 * increments when the handle_list is handled before the hold_list; however, it 5320 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 5321 * stripe with in flight i/o. The bypass_count will be reset when the 5322 * head of the hold_list has changed, i.e. the head was promoted to the 5323 * handle_list. 5324 */ 5325 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) 5326 { 5327 struct stripe_head *sh, *tmp; 5328 struct list_head *handle_list = NULL; 5329 struct r5worker_group *wg; 5330 bool second_try = !r5c_is_writeback(conf->log) && 5331 !r5l_log_disk_error(conf); 5332 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || 5333 r5l_log_disk_error(conf); 5334 5335 again: 5336 wg = NULL; 5337 sh = NULL; 5338 if (conf->worker_cnt_per_group == 0) { 5339 handle_list = try_loprio ? &conf->loprio_list : 5340 &conf->handle_list; 5341 } else if (group != ANY_GROUP) { 5342 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : 5343 &conf->worker_groups[group].handle_list; 5344 wg = &conf->worker_groups[group]; 5345 } else { 5346 int i; 5347 for (i = 0; i < conf->group_cnt; i++) { 5348 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : 5349 &conf->worker_groups[i].handle_list; 5350 wg = &conf->worker_groups[i]; 5351 if (!list_empty(handle_list)) 5352 break; 5353 } 5354 } 5355 5356 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 5357 __func__, 5358 list_empty(handle_list) ? "empty" : "busy", 5359 list_empty(&conf->hold_list) ? "empty" : "busy", 5360 atomic_read(&conf->pending_full_writes), conf->bypass_count); 5361 5362 if (!list_empty(handle_list)) { 5363 sh = list_entry(handle_list->next, typeof(*sh), lru); 5364 5365 if (list_empty(&conf->hold_list)) 5366 conf->bypass_count = 0; 5367 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 5368 if (conf->hold_list.next == conf->last_hold) 5369 conf->bypass_count++; 5370 else { 5371 conf->last_hold = conf->hold_list.next; 5372 conf->bypass_count -= conf->bypass_threshold; 5373 if (conf->bypass_count < 0) 5374 conf->bypass_count = 0; 5375 } 5376 } 5377 } else if (!list_empty(&conf->hold_list) && 5378 ((conf->bypass_threshold && 5379 conf->bypass_count > conf->bypass_threshold) || 5380 atomic_read(&conf->pending_full_writes) == 0)) { 5381 5382 list_for_each_entry(tmp, &conf->hold_list, lru) { 5383 if (conf->worker_cnt_per_group == 0 || 5384 group == ANY_GROUP || 5385 !cpu_online(tmp->cpu) || 5386 cpu_to_group(tmp->cpu) == group) { 5387 sh = tmp; 5388 break; 5389 } 5390 } 5391 5392 if (sh) { 5393 conf->bypass_count -= conf->bypass_threshold; 5394 if (conf->bypass_count < 0) 5395 conf->bypass_count = 0; 5396 } 5397 wg = NULL; 5398 } 5399 5400 if (!sh) { 5401 if (second_try) 5402 return NULL; 5403 second_try = true; 5404 try_loprio = !try_loprio; 5405 goto again; 5406 } 5407 5408 if (wg) { 5409 wg->stripes_cnt--; 5410 sh->group = NULL; 5411 } 5412 list_del_init(&sh->lru); 5413 BUG_ON(atomic_inc_return(&sh->count) != 1); 5414 return sh; 5415 } 5416 5417 struct raid5_plug_cb { 5418 struct blk_plug_cb cb; 5419 struct list_head list; 5420 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; 5421 }; 5422 5423 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 5424 { 5425 struct raid5_plug_cb *cb = container_of( 5426 blk_cb, struct raid5_plug_cb, cb); 5427 struct stripe_head *sh; 5428 struct mddev *mddev = cb->cb.data; 5429 struct r5conf *conf = mddev->private; 5430 int cnt = 0; 5431 int hash; 5432 5433 if (cb->list.next && !list_empty(&cb->list)) { 5434 spin_lock_irq(&conf->device_lock); 5435 while (!list_empty(&cb->list)) { 5436 sh = list_first_entry(&cb->list, struct stripe_head, lru); 5437 list_del_init(&sh->lru); 5438 /* 5439 * avoid race release_stripe_plug() sees 5440 * STRIPE_ON_UNPLUG_LIST clear but the stripe 5441 * is still in our list 5442 */ 5443 smp_mb__before_atomic(); 5444 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 5445 /* 5446 * STRIPE_ON_RELEASE_LIST could be set here. In that 5447 * case, the count is always > 1 here 5448 */ 5449 hash = sh->hash_lock_index; 5450 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); 5451 cnt++; 5452 } 5453 spin_unlock_irq(&conf->device_lock); 5454 } 5455 release_inactive_stripe_list(conf, cb->temp_inactive_list, 5456 NR_STRIPE_HASH_LOCKS); 5457 if (mddev->queue) 5458 trace_block_unplug(mddev->queue, cnt, !from_schedule); 5459 kfree(cb); 5460 } 5461 5462 static void release_stripe_plug(struct mddev *mddev, 5463 struct stripe_head *sh) 5464 { 5465 struct blk_plug_cb *blk_cb = blk_check_plugged( 5466 raid5_unplug, mddev, 5467 sizeof(struct raid5_plug_cb)); 5468 struct raid5_plug_cb *cb; 5469 5470 if (!blk_cb) { 5471 raid5_release_stripe(sh); 5472 return; 5473 } 5474 5475 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 5476 5477 if (cb->list.next == NULL) { 5478 int i; 5479 INIT_LIST_HEAD(&cb->list); 5480 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5481 INIT_LIST_HEAD(cb->temp_inactive_list + i); 5482 } 5483 5484 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 5485 list_add_tail(&sh->lru, &cb->list); 5486 else 5487 raid5_release_stripe(sh); 5488 } 5489 5490 static void make_discard_request(struct mddev *mddev, struct bio *bi) 5491 { 5492 struct r5conf *conf = mddev->private; 5493 sector_t logical_sector, last_sector; 5494 struct stripe_head *sh; 5495 int stripe_sectors; 5496 5497 if (mddev->reshape_position != MaxSector) 5498 /* Skip discard while reshape is happening */ 5499 return; 5500 5501 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5502 last_sector = bio_end_sector(bi); 5503 5504 bi->bi_next = NULL; 5505 5506 stripe_sectors = conf->chunk_sectors * 5507 (conf->raid_disks - conf->max_degraded); 5508 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, 5509 stripe_sectors); 5510 sector_div(last_sector, stripe_sectors); 5511 5512 logical_sector *= conf->chunk_sectors; 5513 last_sector *= conf->chunk_sectors; 5514 5515 for (; logical_sector < last_sector; 5516 logical_sector += STRIPE_SECTORS) { 5517 DEFINE_WAIT(w); 5518 int d; 5519 again: 5520 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); 5521 prepare_to_wait(&conf->wait_for_overlap, &w, 5522 TASK_UNINTERRUPTIBLE); 5523 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5524 if (test_bit(STRIPE_SYNCING, &sh->state)) { 5525 raid5_release_stripe(sh); 5526 schedule(); 5527 goto again; 5528 } 5529 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5530 spin_lock_irq(&sh->stripe_lock); 5531 for (d = 0; d < conf->raid_disks; d++) { 5532 if (d == sh->pd_idx || d == sh->qd_idx) 5533 continue; 5534 if (sh->dev[d].towrite || sh->dev[d].toread) { 5535 set_bit(R5_Overlap, &sh->dev[d].flags); 5536 spin_unlock_irq(&sh->stripe_lock); 5537 raid5_release_stripe(sh); 5538 schedule(); 5539 goto again; 5540 } 5541 } 5542 set_bit(STRIPE_DISCARD, &sh->state); 5543 finish_wait(&conf->wait_for_overlap, &w); 5544 sh->overwrite_disks = 0; 5545 for (d = 0; d < conf->raid_disks; d++) { 5546 if (d == sh->pd_idx || d == sh->qd_idx) 5547 continue; 5548 sh->dev[d].towrite = bi; 5549 set_bit(R5_OVERWRITE, &sh->dev[d].flags); 5550 bio_inc_remaining(bi); 5551 md_write_inc(mddev, bi); 5552 sh->overwrite_disks++; 5553 } 5554 spin_unlock_irq(&sh->stripe_lock); 5555 if (conf->mddev->bitmap) { 5556 for (d = 0; 5557 d < conf->raid_disks - conf->max_degraded; 5558 d++) 5559 md_bitmap_startwrite(mddev->bitmap, 5560 sh->sector, 5561 STRIPE_SECTORS, 5562 0); 5563 sh->bm_seq = conf->seq_flush + 1; 5564 set_bit(STRIPE_BIT_DELAY, &sh->state); 5565 } 5566 5567 set_bit(STRIPE_HANDLE, &sh->state); 5568 clear_bit(STRIPE_DELAYED, &sh->state); 5569 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5570 atomic_inc(&conf->preread_active_stripes); 5571 release_stripe_plug(mddev, sh); 5572 } 5573 5574 bio_endio(bi); 5575 } 5576 5577 static bool raid5_make_request(struct mddev *mddev, struct bio * bi) 5578 { 5579 struct r5conf *conf = mddev->private; 5580 int dd_idx; 5581 sector_t new_sector; 5582 sector_t logical_sector, last_sector; 5583 struct stripe_head *sh; 5584 const int rw = bio_data_dir(bi); 5585 DEFINE_WAIT(w); 5586 bool do_prepare; 5587 bool do_flush = false; 5588 5589 if (unlikely(bi->bi_opf & REQ_PREFLUSH)) { 5590 int ret = log_handle_flush_request(conf, bi); 5591 5592 if (ret == 0) 5593 return true; 5594 if (ret == -ENODEV) { 5595 if (md_flush_request(mddev, bi)) 5596 return true; 5597 } 5598 /* ret == -EAGAIN, fallback */ 5599 /* 5600 * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH, 5601 * we need to flush journal device 5602 */ 5603 do_flush = bi->bi_opf & REQ_PREFLUSH; 5604 } 5605 5606 if (!md_write_start(mddev, bi)) 5607 return false; 5608 /* 5609 * If array is degraded, better not do chunk aligned read because 5610 * later we might have to read it again in order to reconstruct 5611 * data on failed drives. 5612 */ 5613 if (rw == READ && mddev->degraded == 0 && 5614 mddev->reshape_position == MaxSector) { 5615 bi = chunk_aligned_read(mddev, bi); 5616 if (!bi) 5617 return true; 5618 } 5619 5620 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { 5621 make_discard_request(mddev, bi); 5622 md_write_end(mddev); 5623 return true; 5624 } 5625 5626 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5627 last_sector = bio_end_sector(bi); 5628 bi->bi_next = NULL; 5629 5630 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 5631 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 5632 int previous; 5633 int seq; 5634 5635 do_prepare = false; 5636 retry: 5637 seq = read_seqcount_begin(&conf->gen_lock); 5638 previous = 0; 5639 if (do_prepare) 5640 prepare_to_wait(&conf->wait_for_overlap, &w, 5641 TASK_UNINTERRUPTIBLE); 5642 if (unlikely(conf->reshape_progress != MaxSector)) { 5643 /* spinlock is needed as reshape_progress may be 5644 * 64bit on a 32bit platform, and so it might be 5645 * possible to see a half-updated value 5646 * Of course reshape_progress could change after 5647 * the lock is dropped, so once we get a reference 5648 * to the stripe that we think it is, we will have 5649 * to check again. 5650 */ 5651 spin_lock_irq(&conf->device_lock); 5652 if (mddev->reshape_backwards 5653 ? logical_sector < conf->reshape_progress 5654 : logical_sector >= conf->reshape_progress) { 5655 previous = 1; 5656 } else { 5657 if (mddev->reshape_backwards 5658 ? logical_sector < conf->reshape_safe 5659 : logical_sector >= conf->reshape_safe) { 5660 spin_unlock_irq(&conf->device_lock); 5661 schedule(); 5662 do_prepare = true; 5663 goto retry; 5664 } 5665 } 5666 spin_unlock_irq(&conf->device_lock); 5667 } 5668 5669 new_sector = raid5_compute_sector(conf, logical_sector, 5670 previous, 5671 &dd_idx, NULL); 5672 pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n", 5673 (unsigned long long)new_sector, 5674 (unsigned long long)logical_sector); 5675 5676 sh = raid5_get_active_stripe(conf, new_sector, previous, 5677 (bi->bi_opf & REQ_RAHEAD), 0); 5678 if (sh) { 5679 if (unlikely(previous)) { 5680 /* expansion might have moved on while waiting for a 5681 * stripe, so we must do the range check again. 5682 * Expansion could still move past after this 5683 * test, but as we are holding a reference to 5684 * 'sh', we know that if that happens, 5685 * STRIPE_EXPANDING will get set and the expansion 5686 * won't proceed until we finish with the stripe. 5687 */ 5688 int must_retry = 0; 5689 spin_lock_irq(&conf->device_lock); 5690 if (mddev->reshape_backwards 5691 ? logical_sector >= conf->reshape_progress 5692 : logical_sector < conf->reshape_progress) 5693 /* mismatch, need to try again */ 5694 must_retry = 1; 5695 spin_unlock_irq(&conf->device_lock); 5696 if (must_retry) { 5697 raid5_release_stripe(sh); 5698 schedule(); 5699 do_prepare = true; 5700 goto retry; 5701 } 5702 } 5703 if (read_seqcount_retry(&conf->gen_lock, seq)) { 5704 /* Might have got the wrong stripe_head 5705 * by accident 5706 */ 5707 raid5_release_stripe(sh); 5708 goto retry; 5709 } 5710 5711 if (test_bit(STRIPE_EXPANDING, &sh->state) || 5712 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { 5713 /* Stripe is busy expanding or 5714 * add failed due to overlap. Flush everything 5715 * and wait a while 5716 */ 5717 md_wakeup_thread(mddev->thread); 5718 raid5_release_stripe(sh); 5719 schedule(); 5720 do_prepare = true; 5721 goto retry; 5722 } 5723 if (do_flush) { 5724 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); 5725 /* we only need flush for one stripe */ 5726 do_flush = false; 5727 } 5728 5729 if (!sh->batch_head || sh == sh->batch_head) 5730 set_bit(STRIPE_HANDLE, &sh->state); 5731 clear_bit(STRIPE_DELAYED, &sh->state); 5732 if ((!sh->batch_head || sh == sh->batch_head) && 5733 (bi->bi_opf & REQ_SYNC) && 5734 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5735 atomic_inc(&conf->preread_active_stripes); 5736 release_stripe_plug(mddev, sh); 5737 } else { 5738 /* cannot get stripe for read-ahead, just give-up */ 5739 bi->bi_status = BLK_STS_IOERR; 5740 break; 5741 } 5742 } 5743 finish_wait(&conf->wait_for_overlap, &w); 5744 5745 if (rw == WRITE) 5746 md_write_end(mddev); 5747 bio_endio(bi); 5748 return true; 5749 } 5750 5751 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 5752 5753 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5754 { 5755 /* reshaping is quite different to recovery/resync so it is 5756 * handled quite separately ... here. 5757 * 5758 * On each call to sync_request, we gather one chunk worth of 5759 * destination stripes and flag them as expanding. 5760 * Then we find all the source stripes and request reads. 5761 * As the reads complete, handle_stripe will copy the data 5762 * into the destination stripe and release that stripe. 5763 */ 5764 struct r5conf *conf = mddev->private; 5765 struct stripe_head *sh; 5766 struct md_rdev *rdev; 5767 sector_t first_sector, last_sector; 5768 int raid_disks = conf->previous_raid_disks; 5769 int data_disks = raid_disks - conf->max_degraded; 5770 int new_data_disks = conf->raid_disks - conf->max_degraded; 5771 int i; 5772 int dd_idx; 5773 sector_t writepos, readpos, safepos; 5774 sector_t stripe_addr; 5775 int reshape_sectors; 5776 struct list_head stripes; 5777 sector_t retn; 5778 5779 if (sector_nr == 0) { 5780 /* If restarting in the middle, skip the initial sectors */ 5781 if (mddev->reshape_backwards && 5782 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 5783 sector_nr = raid5_size(mddev, 0, 0) 5784 - conf->reshape_progress; 5785 } else if (mddev->reshape_backwards && 5786 conf->reshape_progress == MaxSector) { 5787 /* shouldn't happen, but just in case, finish up.*/ 5788 sector_nr = MaxSector; 5789 } else if (!mddev->reshape_backwards && 5790 conf->reshape_progress > 0) 5791 sector_nr = conf->reshape_progress; 5792 sector_div(sector_nr, new_data_disks); 5793 if (sector_nr) { 5794 mddev->curr_resync_completed = sector_nr; 5795 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5796 *skipped = 1; 5797 retn = sector_nr; 5798 goto finish; 5799 } 5800 } 5801 5802 /* We need to process a full chunk at a time. 5803 * If old and new chunk sizes differ, we need to process the 5804 * largest of these 5805 */ 5806 5807 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); 5808 5809 /* We update the metadata at least every 10 seconds, or when 5810 * the data about to be copied would over-write the source of 5811 * the data at the front of the range. i.e. one new_stripe 5812 * along from reshape_progress new_maps to after where 5813 * reshape_safe old_maps to 5814 */ 5815 writepos = conf->reshape_progress; 5816 sector_div(writepos, new_data_disks); 5817 readpos = conf->reshape_progress; 5818 sector_div(readpos, data_disks); 5819 safepos = conf->reshape_safe; 5820 sector_div(safepos, data_disks); 5821 if (mddev->reshape_backwards) { 5822 BUG_ON(writepos < reshape_sectors); 5823 writepos -= reshape_sectors; 5824 readpos += reshape_sectors; 5825 safepos += reshape_sectors; 5826 } else { 5827 writepos += reshape_sectors; 5828 /* readpos and safepos are worst-case calculations. 5829 * A negative number is overly pessimistic, and causes 5830 * obvious problems for unsigned storage. So clip to 0. 5831 */ 5832 readpos -= min_t(sector_t, reshape_sectors, readpos); 5833 safepos -= min_t(sector_t, reshape_sectors, safepos); 5834 } 5835 5836 /* Having calculated the 'writepos' possibly use it 5837 * to set 'stripe_addr' which is where we will write to. 5838 */ 5839 if (mddev->reshape_backwards) { 5840 BUG_ON(conf->reshape_progress == 0); 5841 stripe_addr = writepos; 5842 BUG_ON((mddev->dev_sectors & 5843 ~((sector_t)reshape_sectors - 1)) 5844 - reshape_sectors - stripe_addr 5845 != sector_nr); 5846 } else { 5847 BUG_ON(writepos != sector_nr + reshape_sectors); 5848 stripe_addr = sector_nr; 5849 } 5850 5851 /* 'writepos' is the most advanced device address we might write. 5852 * 'readpos' is the least advanced device address we might read. 5853 * 'safepos' is the least address recorded in the metadata as having 5854 * been reshaped. 5855 * If there is a min_offset_diff, these are adjusted either by 5856 * increasing the safepos/readpos if diff is negative, or 5857 * increasing writepos if diff is positive. 5858 * If 'readpos' is then behind 'writepos', there is no way that we can 5859 * ensure safety in the face of a crash - that must be done by userspace 5860 * making a backup of the data. So in that case there is no particular 5861 * rush to update metadata. 5862 * Otherwise if 'safepos' is behind 'writepos', then we really need to 5863 * update the metadata to advance 'safepos' to match 'readpos' so that 5864 * we can be safe in the event of a crash. 5865 * So we insist on updating metadata if safepos is behind writepos and 5866 * readpos is beyond writepos. 5867 * In any case, update the metadata every 10 seconds. 5868 * Maybe that number should be configurable, but I'm not sure it is 5869 * worth it.... maybe it could be a multiple of safemode_delay??? 5870 */ 5871 if (conf->min_offset_diff < 0) { 5872 safepos += -conf->min_offset_diff; 5873 readpos += -conf->min_offset_diff; 5874 } else 5875 writepos += conf->min_offset_diff; 5876 5877 if ((mddev->reshape_backwards 5878 ? (safepos > writepos && readpos < writepos) 5879 : (safepos < writepos && readpos > writepos)) || 5880 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 5881 /* Cannot proceed until we've updated the superblock... */ 5882 wait_event(conf->wait_for_overlap, 5883 atomic_read(&conf->reshape_stripes)==0 5884 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5885 if (atomic_read(&conf->reshape_stripes) != 0) 5886 return 0; 5887 mddev->reshape_position = conf->reshape_progress; 5888 mddev->curr_resync_completed = sector_nr; 5889 if (!mddev->reshape_backwards) 5890 /* Can update recovery_offset */ 5891 rdev_for_each(rdev, mddev) 5892 if (rdev->raid_disk >= 0 && 5893 !test_bit(Journal, &rdev->flags) && 5894 !test_bit(In_sync, &rdev->flags) && 5895 rdev->recovery_offset < sector_nr) 5896 rdev->recovery_offset = sector_nr; 5897 5898 conf->reshape_checkpoint = jiffies; 5899 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 5900 md_wakeup_thread(mddev->thread); 5901 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || 5902 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5903 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5904 return 0; 5905 spin_lock_irq(&conf->device_lock); 5906 conf->reshape_safe = mddev->reshape_position; 5907 spin_unlock_irq(&conf->device_lock); 5908 wake_up(&conf->wait_for_overlap); 5909 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5910 } 5911 5912 INIT_LIST_HEAD(&stripes); 5913 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 5914 int j; 5915 int skipped_disk = 0; 5916 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 5917 set_bit(STRIPE_EXPANDING, &sh->state); 5918 atomic_inc(&conf->reshape_stripes); 5919 /* If any of this stripe is beyond the end of the old 5920 * array, then we need to zero those blocks 5921 */ 5922 for (j=sh->disks; j--;) { 5923 sector_t s; 5924 if (j == sh->pd_idx) 5925 continue; 5926 if (conf->level == 6 && 5927 j == sh->qd_idx) 5928 continue; 5929 s = raid5_compute_blocknr(sh, j, 0); 5930 if (s < raid5_size(mddev, 0, 0)) { 5931 skipped_disk = 1; 5932 continue; 5933 } 5934 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 5935 set_bit(R5_Expanded, &sh->dev[j].flags); 5936 set_bit(R5_UPTODATE, &sh->dev[j].flags); 5937 } 5938 if (!skipped_disk) { 5939 set_bit(STRIPE_EXPAND_READY, &sh->state); 5940 set_bit(STRIPE_HANDLE, &sh->state); 5941 } 5942 list_add(&sh->lru, &stripes); 5943 } 5944 spin_lock_irq(&conf->device_lock); 5945 if (mddev->reshape_backwards) 5946 conf->reshape_progress -= reshape_sectors * new_data_disks; 5947 else 5948 conf->reshape_progress += reshape_sectors * new_data_disks; 5949 spin_unlock_irq(&conf->device_lock); 5950 /* Ok, those stripe are ready. We can start scheduling 5951 * reads on the source stripes. 5952 * The source stripes are determined by mapping the first and last 5953 * block on the destination stripes. 5954 */ 5955 first_sector = 5956 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 5957 1, &dd_idx, NULL); 5958 last_sector = 5959 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 5960 * new_data_disks - 1), 5961 1, &dd_idx, NULL); 5962 if (last_sector >= mddev->dev_sectors) 5963 last_sector = mddev->dev_sectors - 1; 5964 while (first_sector <= last_sector) { 5965 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); 5966 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 5967 set_bit(STRIPE_HANDLE, &sh->state); 5968 raid5_release_stripe(sh); 5969 first_sector += STRIPE_SECTORS; 5970 } 5971 /* Now that the sources are clearly marked, we can release 5972 * the destination stripes 5973 */ 5974 while (!list_empty(&stripes)) { 5975 sh = list_entry(stripes.next, struct stripe_head, lru); 5976 list_del_init(&sh->lru); 5977 raid5_release_stripe(sh); 5978 } 5979 /* If this takes us to the resync_max point where we have to pause, 5980 * then we need to write out the superblock. 5981 */ 5982 sector_nr += reshape_sectors; 5983 retn = reshape_sectors; 5984 finish: 5985 if (mddev->curr_resync_completed > mddev->resync_max || 5986 (sector_nr - mddev->curr_resync_completed) * 2 5987 >= mddev->resync_max - mddev->curr_resync_completed) { 5988 /* Cannot proceed until we've updated the superblock... */ 5989 wait_event(conf->wait_for_overlap, 5990 atomic_read(&conf->reshape_stripes) == 0 5991 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5992 if (atomic_read(&conf->reshape_stripes) != 0) 5993 goto ret; 5994 mddev->reshape_position = conf->reshape_progress; 5995 mddev->curr_resync_completed = sector_nr; 5996 if (!mddev->reshape_backwards) 5997 /* Can update recovery_offset */ 5998 rdev_for_each(rdev, mddev) 5999 if (rdev->raid_disk >= 0 && 6000 !test_bit(Journal, &rdev->flags) && 6001 !test_bit(In_sync, &rdev->flags) && 6002 rdev->recovery_offset < sector_nr) 6003 rdev->recovery_offset = sector_nr; 6004 conf->reshape_checkpoint = jiffies; 6005 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6006 md_wakeup_thread(mddev->thread); 6007 wait_event(mddev->sb_wait, 6008 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) 6009 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 6010 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6011 goto ret; 6012 spin_lock_irq(&conf->device_lock); 6013 conf->reshape_safe = mddev->reshape_position; 6014 spin_unlock_irq(&conf->device_lock); 6015 wake_up(&conf->wait_for_overlap); 6016 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6017 } 6018 ret: 6019 return retn; 6020 } 6021 6022 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, 6023 int *skipped) 6024 { 6025 struct r5conf *conf = mddev->private; 6026 struct stripe_head *sh; 6027 sector_t max_sector = mddev->dev_sectors; 6028 sector_t sync_blocks; 6029 int still_degraded = 0; 6030 int i; 6031 6032 if (sector_nr >= max_sector) { 6033 /* just being told to finish up .. nothing much to do */ 6034 6035 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 6036 end_reshape(conf); 6037 return 0; 6038 } 6039 6040 if (mddev->curr_resync < max_sector) /* aborted */ 6041 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 6042 &sync_blocks, 1); 6043 else /* completed sync */ 6044 conf->fullsync = 0; 6045 md_bitmap_close_sync(mddev->bitmap); 6046 6047 return 0; 6048 } 6049 6050 /* Allow raid5_quiesce to complete */ 6051 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 6052 6053 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6054 return reshape_request(mddev, sector_nr, skipped); 6055 6056 /* No need to check resync_max as we never do more than one 6057 * stripe, and as resync_max will always be on a chunk boundary, 6058 * if the check in md_do_sync didn't fire, there is no chance 6059 * of overstepping resync_max here 6060 */ 6061 6062 /* if there is too many failed drives and we are trying 6063 * to resync, then assert that we are finished, because there is 6064 * nothing we can do. 6065 */ 6066 if (mddev->degraded >= conf->max_degraded && 6067 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6068 sector_t rv = mddev->dev_sectors - sector_nr; 6069 *skipped = 1; 6070 return rv; 6071 } 6072 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 6073 !conf->fullsync && 6074 !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 6075 sync_blocks >= STRIPE_SECTORS) { 6076 /* we can skip this block, and probably more */ 6077 sync_blocks /= STRIPE_SECTORS; 6078 *skipped = 1; 6079 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 6080 } 6081 6082 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); 6083 6084 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); 6085 if (sh == NULL) { 6086 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); 6087 /* make sure we don't swamp the stripe cache if someone else 6088 * is trying to get access 6089 */ 6090 schedule_timeout_uninterruptible(1); 6091 } 6092 /* Need to check if array will still be degraded after recovery/resync 6093 * Note in case of > 1 drive failures it's possible we're rebuilding 6094 * one drive while leaving another faulty drive in array. 6095 */ 6096 rcu_read_lock(); 6097 for (i = 0; i < conf->raid_disks; i++) { 6098 struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); 6099 6100 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) 6101 still_degraded = 1; 6102 } 6103 rcu_read_unlock(); 6104 6105 md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 6106 6107 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 6108 set_bit(STRIPE_HANDLE, &sh->state); 6109 6110 raid5_release_stripe(sh); 6111 6112 return STRIPE_SECTORS; 6113 } 6114 6115 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, 6116 unsigned int offset) 6117 { 6118 /* We may not be able to submit a whole bio at once as there 6119 * may not be enough stripe_heads available. 6120 * We cannot pre-allocate enough stripe_heads as we may need 6121 * more than exist in the cache (if we allow ever large chunks). 6122 * So we do one stripe head at a time and record in 6123 * ->bi_hw_segments how many have been done. 6124 * 6125 * We *know* that this entire raid_bio is in one chunk, so 6126 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 6127 */ 6128 struct stripe_head *sh; 6129 int dd_idx; 6130 sector_t sector, logical_sector, last_sector; 6131 int scnt = 0; 6132 int handled = 0; 6133 6134 logical_sector = raid_bio->bi_iter.bi_sector & 6135 ~((sector_t)STRIPE_SECTORS-1); 6136 sector = raid5_compute_sector(conf, logical_sector, 6137 0, &dd_idx, NULL); 6138 last_sector = bio_end_sector(raid_bio); 6139 6140 for (; logical_sector < last_sector; 6141 logical_sector += STRIPE_SECTORS, 6142 sector += STRIPE_SECTORS, 6143 scnt++) { 6144 6145 if (scnt < offset) 6146 /* already done this stripe */ 6147 continue; 6148 6149 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); 6150 6151 if (!sh) { 6152 /* failed to get a stripe - must wait */ 6153 conf->retry_read_aligned = raid_bio; 6154 conf->retry_read_offset = scnt; 6155 return handled; 6156 } 6157 6158 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 6159 raid5_release_stripe(sh); 6160 conf->retry_read_aligned = raid_bio; 6161 conf->retry_read_offset = scnt; 6162 return handled; 6163 } 6164 6165 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 6166 handle_stripe(sh); 6167 raid5_release_stripe(sh); 6168 handled++; 6169 } 6170 6171 bio_endio(raid_bio); 6172 6173 if (atomic_dec_and_test(&conf->active_aligned_reads)) 6174 wake_up(&conf->wait_for_quiescent); 6175 return handled; 6176 } 6177 6178 static int handle_active_stripes(struct r5conf *conf, int group, 6179 struct r5worker *worker, 6180 struct list_head *temp_inactive_list) 6181 __releases(&conf->device_lock) 6182 __acquires(&conf->device_lock) 6183 { 6184 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 6185 int i, batch_size = 0, hash; 6186 bool release_inactive = false; 6187 6188 while (batch_size < MAX_STRIPE_BATCH && 6189 (sh = __get_priority_stripe(conf, group)) != NULL) 6190 batch[batch_size++] = sh; 6191 6192 if (batch_size == 0) { 6193 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6194 if (!list_empty(temp_inactive_list + i)) 6195 break; 6196 if (i == NR_STRIPE_HASH_LOCKS) { 6197 spin_unlock_irq(&conf->device_lock); 6198 log_flush_stripe_to_raid(conf); 6199 spin_lock_irq(&conf->device_lock); 6200 return batch_size; 6201 } 6202 release_inactive = true; 6203 } 6204 spin_unlock_irq(&conf->device_lock); 6205 6206 release_inactive_stripe_list(conf, temp_inactive_list, 6207 NR_STRIPE_HASH_LOCKS); 6208 6209 r5l_flush_stripe_to_raid(conf->log); 6210 if (release_inactive) { 6211 spin_lock_irq(&conf->device_lock); 6212 return 0; 6213 } 6214 6215 for (i = 0; i < batch_size; i++) 6216 handle_stripe(batch[i]); 6217 log_write_stripe_run(conf); 6218 6219 cond_resched(); 6220 6221 spin_lock_irq(&conf->device_lock); 6222 for (i = 0; i < batch_size; i++) { 6223 hash = batch[i]->hash_lock_index; 6224 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); 6225 } 6226 return batch_size; 6227 } 6228 6229 static void raid5_do_work(struct work_struct *work) 6230 { 6231 struct r5worker *worker = container_of(work, struct r5worker, work); 6232 struct r5worker_group *group = worker->group; 6233 struct r5conf *conf = group->conf; 6234 struct mddev *mddev = conf->mddev; 6235 int group_id = group - conf->worker_groups; 6236 int handled; 6237 struct blk_plug plug; 6238 6239 pr_debug("+++ raid5worker active\n"); 6240 6241 blk_start_plug(&plug); 6242 handled = 0; 6243 spin_lock_irq(&conf->device_lock); 6244 while (1) { 6245 int batch_size, released; 6246 6247 released = release_stripe_list(conf, worker->temp_inactive_list); 6248 6249 batch_size = handle_active_stripes(conf, group_id, worker, 6250 worker->temp_inactive_list); 6251 worker->working = false; 6252 if (!batch_size && !released) 6253 break; 6254 handled += batch_size; 6255 wait_event_lock_irq(mddev->sb_wait, 6256 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), 6257 conf->device_lock); 6258 } 6259 pr_debug("%d stripes handled\n", handled); 6260 6261 spin_unlock_irq(&conf->device_lock); 6262 6263 flush_deferred_bios(conf); 6264 6265 r5l_flush_stripe_to_raid(conf->log); 6266 6267 async_tx_issue_pending_all(); 6268 blk_finish_plug(&plug); 6269 6270 pr_debug("--- raid5worker inactive\n"); 6271 } 6272 6273 /* 6274 * This is our raid5 kernel thread. 6275 * 6276 * We scan the hash table for stripes which can be handled now. 6277 * During the scan, completed stripes are saved for us by the interrupt 6278 * handler, so that they will not have to wait for our next wakeup. 6279 */ 6280 static void raid5d(struct md_thread *thread) 6281 { 6282 struct mddev *mddev = thread->mddev; 6283 struct r5conf *conf = mddev->private; 6284 int handled; 6285 struct blk_plug plug; 6286 6287 pr_debug("+++ raid5d active\n"); 6288 6289 md_check_recovery(mddev); 6290 6291 blk_start_plug(&plug); 6292 handled = 0; 6293 spin_lock_irq(&conf->device_lock); 6294 while (1) { 6295 struct bio *bio; 6296 int batch_size, released; 6297 unsigned int offset; 6298 6299 released = release_stripe_list(conf, conf->temp_inactive_list); 6300 if (released) 6301 clear_bit(R5_DID_ALLOC, &conf->cache_state); 6302 6303 if ( 6304 !list_empty(&conf->bitmap_list)) { 6305 /* Now is a good time to flush some bitmap updates */ 6306 conf->seq_flush++; 6307 spin_unlock_irq(&conf->device_lock); 6308 md_bitmap_unplug(mddev->bitmap); 6309 spin_lock_irq(&conf->device_lock); 6310 conf->seq_write = conf->seq_flush; 6311 activate_bit_delay(conf, conf->temp_inactive_list); 6312 } 6313 raid5_activate_delayed(conf); 6314 6315 while ((bio = remove_bio_from_retry(conf, &offset))) { 6316 int ok; 6317 spin_unlock_irq(&conf->device_lock); 6318 ok = retry_aligned_read(conf, bio, offset); 6319 spin_lock_irq(&conf->device_lock); 6320 if (!ok) 6321 break; 6322 handled++; 6323 } 6324 6325 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, 6326 conf->temp_inactive_list); 6327 if (!batch_size && !released) 6328 break; 6329 handled += batch_size; 6330 6331 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { 6332 spin_unlock_irq(&conf->device_lock); 6333 md_check_recovery(mddev); 6334 spin_lock_irq(&conf->device_lock); 6335 } 6336 } 6337 pr_debug("%d stripes handled\n", handled); 6338 6339 spin_unlock_irq(&conf->device_lock); 6340 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && 6341 mutex_trylock(&conf->cache_size_mutex)) { 6342 grow_one_stripe(conf, __GFP_NOWARN); 6343 /* Set flag even if allocation failed. This helps 6344 * slow down allocation requests when mem is short 6345 */ 6346 set_bit(R5_DID_ALLOC, &conf->cache_state); 6347 mutex_unlock(&conf->cache_size_mutex); 6348 } 6349 6350 flush_deferred_bios(conf); 6351 6352 r5l_flush_stripe_to_raid(conf->log); 6353 6354 async_tx_issue_pending_all(); 6355 blk_finish_plug(&plug); 6356 6357 pr_debug("--- raid5d inactive\n"); 6358 } 6359 6360 static ssize_t 6361 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 6362 { 6363 struct r5conf *conf; 6364 int ret = 0; 6365 spin_lock(&mddev->lock); 6366 conf = mddev->private; 6367 if (conf) 6368 ret = sprintf(page, "%d\n", conf->min_nr_stripes); 6369 spin_unlock(&mddev->lock); 6370 return ret; 6371 } 6372 6373 int 6374 raid5_set_cache_size(struct mddev *mddev, int size) 6375 { 6376 int result = 0; 6377 struct r5conf *conf = mddev->private; 6378 6379 if (size <= 16 || size > 32768) 6380 return -EINVAL; 6381 6382 conf->min_nr_stripes = size; 6383 mutex_lock(&conf->cache_size_mutex); 6384 while (size < conf->max_nr_stripes && 6385 drop_one_stripe(conf)) 6386 ; 6387 mutex_unlock(&conf->cache_size_mutex); 6388 6389 md_allow_write(mddev); 6390 6391 mutex_lock(&conf->cache_size_mutex); 6392 while (size > conf->max_nr_stripes) 6393 if (!grow_one_stripe(conf, GFP_KERNEL)) { 6394 conf->min_nr_stripes = conf->max_nr_stripes; 6395 result = -ENOMEM; 6396 break; 6397 } 6398 mutex_unlock(&conf->cache_size_mutex); 6399 6400 return result; 6401 } 6402 EXPORT_SYMBOL(raid5_set_cache_size); 6403 6404 static ssize_t 6405 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 6406 { 6407 struct r5conf *conf; 6408 unsigned long new; 6409 int err; 6410 6411 if (len >= PAGE_SIZE) 6412 return -EINVAL; 6413 if (kstrtoul(page, 10, &new)) 6414 return -EINVAL; 6415 err = mddev_lock(mddev); 6416 if (err) 6417 return err; 6418 conf = mddev->private; 6419 if (!conf) 6420 err = -ENODEV; 6421 else 6422 err = raid5_set_cache_size(mddev, new); 6423 mddev_unlock(mddev); 6424 6425 return err ?: len; 6426 } 6427 6428 static struct md_sysfs_entry 6429 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 6430 raid5_show_stripe_cache_size, 6431 raid5_store_stripe_cache_size); 6432 6433 static ssize_t 6434 raid5_show_rmw_level(struct mddev *mddev, char *page) 6435 { 6436 struct r5conf *conf = mddev->private; 6437 if (conf) 6438 return sprintf(page, "%d\n", conf->rmw_level); 6439 else 6440 return 0; 6441 } 6442 6443 static ssize_t 6444 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) 6445 { 6446 struct r5conf *conf = mddev->private; 6447 unsigned long new; 6448 6449 if (!conf) 6450 return -ENODEV; 6451 6452 if (len >= PAGE_SIZE) 6453 return -EINVAL; 6454 6455 if (kstrtoul(page, 10, &new)) 6456 return -EINVAL; 6457 6458 if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome) 6459 return -EINVAL; 6460 6461 if (new != PARITY_DISABLE_RMW && 6462 new != PARITY_ENABLE_RMW && 6463 new != PARITY_PREFER_RMW) 6464 return -EINVAL; 6465 6466 conf->rmw_level = new; 6467 return len; 6468 } 6469 6470 static struct md_sysfs_entry 6471 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR, 6472 raid5_show_rmw_level, 6473 raid5_store_rmw_level); 6474 6475 6476 static ssize_t 6477 raid5_show_preread_threshold(struct mddev *mddev, char *page) 6478 { 6479 struct r5conf *conf; 6480 int ret = 0; 6481 spin_lock(&mddev->lock); 6482 conf = mddev->private; 6483 if (conf) 6484 ret = sprintf(page, "%d\n", conf->bypass_threshold); 6485 spin_unlock(&mddev->lock); 6486 return ret; 6487 } 6488 6489 static ssize_t 6490 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 6491 { 6492 struct r5conf *conf; 6493 unsigned long new; 6494 int err; 6495 6496 if (len >= PAGE_SIZE) 6497 return -EINVAL; 6498 if (kstrtoul(page, 10, &new)) 6499 return -EINVAL; 6500 6501 err = mddev_lock(mddev); 6502 if (err) 6503 return err; 6504 conf = mddev->private; 6505 if (!conf) 6506 err = -ENODEV; 6507 else if (new > conf->min_nr_stripes) 6508 err = -EINVAL; 6509 else 6510 conf->bypass_threshold = new; 6511 mddev_unlock(mddev); 6512 return err ?: len; 6513 } 6514 6515 static struct md_sysfs_entry 6516 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 6517 S_IRUGO | S_IWUSR, 6518 raid5_show_preread_threshold, 6519 raid5_store_preread_threshold); 6520 6521 static ssize_t 6522 raid5_show_skip_copy(struct mddev *mddev, char *page) 6523 { 6524 struct r5conf *conf; 6525 int ret = 0; 6526 spin_lock(&mddev->lock); 6527 conf = mddev->private; 6528 if (conf) 6529 ret = sprintf(page, "%d\n", conf->skip_copy); 6530 spin_unlock(&mddev->lock); 6531 return ret; 6532 } 6533 6534 static ssize_t 6535 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) 6536 { 6537 struct r5conf *conf; 6538 unsigned long new; 6539 int err; 6540 6541 if (len >= PAGE_SIZE) 6542 return -EINVAL; 6543 if (kstrtoul(page, 10, &new)) 6544 return -EINVAL; 6545 new = !!new; 6546 6547 err = mddev_lock(mddev); 6548 if (err) 6549 return err; 6550 conf = mddev->private; 6551 if (!conf) 6552 err = -ENODEV; 6553 else if (new != conf->skip_copy) { 6554 mddev_suspend(mddev); 6555 conf->skip_copy = new; 6556 if (new) 6557 mddev->queue->backing_dev_info->capabilities |= 6558 BDI_CAP_STABLE_WRITES; 6559 else 6560 mddev->queue->backing_dev_info->capabilities &= 6561 ~BDI_CAP_STABLE_WRITES; 6562 mddev_resume(mddev); 6563 } 6564 mddev_unlock(mddev); 6565 return err ?: len; 6566 } 6567 6568 static struct md_sysfs_entry 6569 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, 6570 raid5_show_skip_copy, 6571 raid5_store_skip_copy); 6572 6573 static ssize_t 6574 stripe_cache_active_show(struct mddev *mddev, char *page) 6575 { 6576 struct r5conf *conf = mddev->private; 6577 if (conf) 6578 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 6579 else 6580 return 0; 6581 } 6582 6583 static struct md_sysfs_entry 6584 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 6585 6586 static ssize_t 6587 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) 6588 { 6589 struct r5conf *conf; 6590 int ret = 0; 6591 spin_lock(&mddev->lock); 6592 conf = mddev->private; 6593 if (conf) 6594 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); 6595 spin_unlock(&mddev->lock); 6596 return ret; 6597 } 6598 6599 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6600 int *group_cnt, 6601 struct r5worker_group **worker_groups); 6602 static ssize_t 6603 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 6604 { 6605 struct r5conf *conf; 6606 unsigned int new; 6607 int err; 6608 struct r5worker_group *new_groups, *old_groups; 6609 int group_cnt; 6610 6611 if (len >= PAGE_SIZE) 6612 return -EINVAL; 6613 if (kstrtouint(page, 10, &new)) 6614 return -EINVAL; 6615 /* 8192 should be big enough */ 6616 if (new > 8192) 6617 return -EINVAL; 6618 6619 err = mddev_lock(mddev); 6620 if (err) 6621 return err; 6622 conf = mddev->private; 6623 if (!conf) 6624 err = -ENODEV; 6625 else if (new != conf->worker_cnt_per_group) { 6626 mddev_suspend(mddev); 6627 6628 old_groups = conf->worker_groups; 6629 if (old_groups) 6630 flush_workqueue(raid5_wq); 6631 6632 err = alloc_thread_groups(conf, new, &group_cnt, &new_groups); 6633 if (!err) { 6634 spin_lock_irq(&conf->device_lock); 6635 conf->group_cnt = group_cnt; 6636 conf->worker_cnt_per_group = new; 6637 conf->worker_groups = new_groups; 6638 spin_unlock_irq(&conf->device_lock); 6639 6640 if (old_groups) 6641 kfree(old_groups[0].workers); 6642 kfree(old_groups); 6643 } 6644 mddev_resume(mddev); 6645 } 6646 mddev_unlock(mddev); 6647 6648 return err ?: len; 6649 } 6650 6651 static struct md_sysfs_entry 6652 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, 6653 raid5_show_group_thread_cnt, 6654 raid5_store_group_thread_cnt); 6655 6656 static struct attribute *raid5_attrs[] = { 6657 &raid5_stripecache_size.attr, 6658 &raid5_stripecache_active.attr, 6659 &raid5_preread_bypass_threshold.attr, 6660 &raid5_group_thread_cnt.attr, 6661 &raid5_skip_copy.attr, 6662 &raid5_rmw_level.attr, 6663 &r5c_journal_mode.attr, 6664 &ppl_write_hint.attr, 6665 NULL, 6666 }; 6667 static struct attribute_group raid5_attrs_group = { 6668 .name = NULL, 6669 .attrs = raid5_attrs, 6670 }; 6671 6672 static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, 6673 struct r5worker_group **worker_groups) 6674 { 6675 int i, j, k; 6676 ssize_t size; 6677 struct r5worker *workers; 6678 6679 if (cnt == 0) { 6680 *group_cnt = 0; 6681 *worker_groups = NULL; 6682 return 0; 6683 } 6684 *group_cnt = num_possible_nodes(); 6685 size = sizeof(struct r5worker) * cnt; 6686 workers = kcalloc(size, *group_cnt, GFP_NOIO); 6687 *worker_groups = kcalloc(*group_cnt, sizeof(struct r5worker_group), 6688 GFP_NOIO); 6689 if (!*worker_groups || !workers) { 6690 kfree(workers); 6691 kfree(*worker_groups); 6692 return -ENOMEM; 6693 } 6694 6695 for (i = 0; i < *group_cnt; i++) { 6696 struct r5worker_group *group; 6697 6698 group = &(*worker_groups)[i]; 6699 INIT_LIST_HEAD(&group->handle_list); 6700 INIT_LIST_HEAD(&group->loprio_list); 6701 group->conf = conf; 6702 group->workers = workers + i * cnt; 6703 6704 for (j = 0; j < cnt; j++) { 6705 struct r5worker *worker = group->workers + j; 6706 worker->group = group; 6707 INIT_WORK(&worker->work, raid5_do_work); 6708 6709 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) 6710 INIT_LIST_HEAD(worker->temp_inactive_list + k); 6711 } 6712 } 6713 6714 return 0; 6715 } 6716 6717 static void free_thread_groups(struct r5conf *conf) 6718 { 6719 if (conf->worker_groups) 6720 kfree(conf->worker_groups[0].workers); 6721 kfree(conf->worker_groups); 6722 conf->worker_groups = NULL; 6723 } 6724 6725 static sector_t 6726 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 6727 { 6728 struct r5conf *conf = mddev->private; 6729 6730 if (!sectors) 6731 sectors = mddev->dev_sectors; 6732 if (!raid_disks) 6733 /* size is defined by the smallest of previous and new size */ 6734 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 6735 6736 sectors &= ~((sector_t)conf->chunk_sectors - 1); 6737 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); 6738 return sectors * (raid_disks - conf->max_degraded); 6739 } 6740 6741 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6742 { 6743 safe_put_page(percpu->spare_page); 6744 percpu->spare_page = NULL; 6745 kvfree(percpu->scribble); 6746 percpu->scribble = NULL; 6747 } 6748 6749 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6750 { 6751 if (conf->level == 6 && !percpu->spare_page) { 6752 percpu->spare_page = alloc_page(GFP_KERNEL); 6753 if (!percpu->spare_page) 6754 return -ENOMEM; 6755 } 6756 6757 if (scribble_alloc(percpu, 6758 max(conf->raid_disks, 6759 conf->previous_raid_disks), 6760 max(conf->chunk_sectors, 6761 conf->prev_chunk_sectors) 6762 / STRIPE_SECTORS, 6763 GFP_KERNEL)) { 6764 free_scratch_buffer(conf, percpu); 6765 return -ENOMEM; 6766 } 6767 6768 return 0; 6769 } 6770 6771 static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node) 6772 { 6773 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); 6774 6775 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6776 return 0; 6777 } 6778 6779 static void raid5_free_percpu(struct r5conf *conf) 6780 { 6781 if (!conf->percpu) 6782 return; 6783 6784 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); 6785 free_percpu(conf->percpu); 6786 } 6787 6788 static void free_conf(struct r5conf *conf) 6789 { 6790 int i; 6791 6792 log_exit(conf); 6793 6794 unregister_shrinker(&conf->shrinker); 6795 free_thread_groups(conf); 6796 shrink_stripes(conf); 6797 raid5_free_percpu(conf); 6798 for (i = 0; i < conf->pool_size; i++) 6799 if (conf->disks[i].extra_page) 6800 put_page(conf->disks[i].extra_page); 6801 kfree(conf->disks); 6802 bioset_exit(&conf->bio_split); 6803 kfree(conf->stripe_hashtbl); 6804 kfree(conf->pending_data); 6805 kfree(conf); 6806 } 6807 6808 static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) 6809 { 6810 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); 6811 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 6812 6813 if (alloc_scratch_buffer(conf, percpu)) { 6814 pr_warn("%s: failed memory allocation for cpu%u\n", 6815 __func__, cpu); 6816 return -ENOMEM; 6817 } 6818 return 0; 6819 } 6820 6821 static int raid5_alloc_percpu(struct r5conf *conf) 6822 { 6823 int err = 0; 6824 6825 conf->percpu = alloc_percpu(struct raid5_percpu); 6826 if (!conf->percpu) 6827 return -ENOMEM; 6828 6829 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); 6830 if (!err) { 6831 conf->scribble_disks = max(conf->raid_disks, 6832 conf->previous_raid_disks); 6833 conf->scribble_sectors = max(conf->chunk_sectors, 6834 conf->prev_chunk_sectors); 6835 } 6836 return err; 6837 } 6838 6839 static unsigned long raid5_cache_scan(struct shrinker *shrink, 6840 struct shrink_control *sc) 6841 { 6842 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6843 unsigned long ret = SHRINK_STOP; 6844 6845 if (mutex_trylock(&conf->cache_size_mutex)) { 6846 ret= 0; 6847 while (ret < sc->nr_to_scan && 6848 conf->max_nr_stripes > conf->min_nr_stripes) { 6849 if (drop_one_stripe(conf) == 0) { 6850 ret = SHRINK_STOP; 6851 break; 6852 } 6853 ret++; 6854 } 6855 mutex_unlock(&conf->cache_size_mutex); 6856 } 6857 return ret; 6858 } 6859 6860 static unsigned long raid5_cache_count(struct shrinker *shrink, 6861 struct shrink_control *sc) 6862 { 6863 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6864 6865 if (conf->max_nr_stripes < conf->min_nr_stripes) 6866 /* unlikely, but not impossible */ 6867 return 0; 6868 return conf->max_nr_stripes - conf->min_nr_stripes; 6869 } 6870 6871 static struct r5conf *setup_conf(struct mddev *mddev) 6872 { 6873 struct r5conf *conf; 6874 int raid_disk, memory, max_disks; 6875 struct md_rdev *rdev; 6876 struct disk_info *disk; 6877 char pers_name[6]; 6878 int i; 6879 int group_cnt; 6880 struct r5worker_group *new_group; 6881 int ret; 6882 6883 if (mddev->new_level != 5 6884 && mddev->new_level != 4 6885 && mddev->new_level != 6) { 6886 pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n", 6887 mdname(mddev), mddev->new_level); 6888 return ERR_PTR(-EIO); 6889 } 6890 if ((mddev->new_level == 5 6891 && !algorithm_valid_raid5(mddev->new_layout)) || 6892 (mddev->new_level == 6 6893 && !algorithm_valid_raid6(mddev->new_layout))) { 6894 pr_warn("md/raid:%s: layout %d not supported\n", 6895 mdname(mddev), mddev->new_layout); 6896 return ERR_PTR(-EIO); 6897 } 6898 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 6899 pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n", 6900 mdname(mddev), mddev->raid_disks); 6901 return ERR_PTR(-EINVAL); 6902 } 6903 6904 if (!mddev->new_chunk_sectors || 6905 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 6906 !is_power_of_2(mddev->new_chunk_sectors)) { 6907 pr_warn("md/raid:%s: invalid chunk size %d\n", 6908 mdname(mddev), mddev->new_chunk_sectors << 9); 6909 return ERR_PTR(-EINVAL); 6910 } 6911 6912 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 6913 if (conf == NULL) 6914 goto abort; 6915 INIT_LIST_HEAD(&conf->free_list); 6916 INIT_LIST_HEAD(&conf->pending_list); 6917 conf->pending_data = kcalloc(PENDING_IO_MAX, 6918 sizeof(struct r5pending_data), 6919 GFP_KERNEL); 6920 if (!conf->pending_data) 6921 goto abort; 6922 for (i = 0; i < PENDING_IO_MAX; i++) 6923 list_add(&conf->pending_data[i].sibling, &conf->free_list); 6924 /* Don't enable multi-threading by default*/ 6925 if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) { 6926 conf->group_cnt = group_cnt; 6927 conf->worker_cnt_per_group = 0; 6928 conf->worker_groups = new_group; 6929 } else 6930 goto abort; 6931 spin_lock_init(&conf->device_lock); 6932 seqcount_init(&conf->gen_lock); 6933 mutex_init(&conf->cache_size_mutex); 6934 init_waitqueue_head(&conf->wait_for_quiescent); 6935 init_waitqueue_head(&conf->wait_for_stripe); 6936 init_waitqueue_head(&conf->wait_for_overlap); 6937 INIT_LIST_HEAD(&conf->handle_list); 6938 INIT_LIST_HEAD(&conf->loprio_list); 6939 INIT_LIST_HEAD(&conf->hold_list); 6940 INIT_LIST_HEAD(&conf->delayed_list); 6941 INIT_LIST_HEAD(&conf->bitmap_list); 6942 init_llist_head(&conf->released_stripes); 6943 atomic_set(&conf->active_stripes, 0); 6944 atomic_set(&conf->preread_active_stripes, 0); 6945 atomic_set(&conf->active_aligned_reads, 0); 6946 spin_lock_init(&conf->pending_bios_lock); 6947 conf->batch_bio_dispatch = true; 6948 rdev_for_each(rdev, mddev) { 6949 if (test_bit(Journal, &rdev->flags)) 6950 continue; 6951 if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { 6952 conf->batch_bio_dispatch = false; 6953 break; 6954 } 6955 } 6956 6957 conf->bypass_threshold = BYPASS_THRESHOLD; 6958 conf->recovery_disabled = mddev->recovery_disabled - 1; 6959 6960 conf->raid_disks = mddev->raid_disks; 6961 if (mddev->reshape_position == MaxSector) 6962 conf->previous_raid_disks = mddev->raid_disks; 6963 else 6964 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 6965 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 6966 6967 conf->disks = kcalloc(max_disks, sizeof(struct disk_info), 6968 GFP_KERNEL); 6969 6970 if (!conf->disks) 6971 goto abort; 6972 6973 for (i = 0; i < max_disks; i++) { 6974 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); 6975 if (!conf->disks[i].extra_page) 6976 goto abort; 6977 } 6978 6979 ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); 6980 if (ret) 6981 goto abort; 6982 conf->mddev = mddev; 6983 6984 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 6985 goto abort; 6986 6987 /* We init hash_locks[0] separately to that it can be used 6988 * as the reference lock in the spin_lock_nest_lock() call 6989 * in lock_all_device_hash_locks_irq in order to convince 6990 * lockdep that we know what we are doing. 6991 */ 6992 spin_lock_init(conf->hash_locks); 6993 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 6994 spin_lock_init(conf->hash_locks + i); 6995 6996 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6997 INIT_LIST_HEAD(conf->inactive_list + i); 6998 6999 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 7000 INIT_LIST_HEAD(conf->temp_inactive_list + i); 7001 7002 atomic_set(&conf->r5c_cached_full_stripes, 0); 7003 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); 7004 atomic_set(&conf->r5c_cached_partial_stripes, 0); 7005 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); 7006 atomic_set(&conf->r5c_flushing_full_stripes, 0); 7007 atomic_set(&conf->r5c_flushing_partial_stripes, 0); 7008 7009 conf->level = mddev->new_level; 7010 conf->chunk_sectors = mddev->new_chunk_sectors; 7011 if (raid5_alloc_percpu(conf) != 0) 7012 goto abort; 7013 7014 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 7015 7016 rdev_for_each(rdev, mddev) { 7017 raid_disk = rdev->raid_disk; 7018 if (raid_disk >= max_disks 7019 || raid_disk < 0 || test_bit(Journal, &rdev->flags)) 7020 continue; 7021 disk = conf->disks + raid_disk; 7022 7023 if (test_bit(Replacement, &rdev->flags)) { 7024 if (disk->replacement) 7025 goto abort; 7026 disk->replacement = rdev; 7027 } else { 7028 if (disk->rdev) 7029 goto abort; 7030 disk->rdev = rdev; 7031 } 7032 7033 if (test_bit(In_sync, &rdev->flags)) { 7034 char b[BDEVNAME_SIZE]; 7035 pr_info("md/raid:%s: device %s operational as raid disk %d\n", 7036 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 7037 } else if (rdev->saved_raid_disk != raid_disk) 7038 /* Cannot rely on bitmap to complete recovery */ 7039 conf->fullsync = 1; 7040 } 7041 7042 conf->level = mddev->new_level; 7043 if (conf->level == 6) { 7044 conf->max_degraded = 2; 7045 if (raid6_call.xor_syndrome) 7046 conf->rmw_level = PARITY_ENABLE_RMW; 7047 else 7048 conf->rmw_level = PARITY_DISABLE_RMW; 7049 } else { 7050 conf->max_degraded = 1; 7051 conf->rmw_level = PARITY_ENABLE_RMW; 7052 } 7053 conf->algorithm = mddev->new_layout; 7054 conf->reshape_progress = mddev->reshape_position; 7055 if (conf->reshape_progress != MaxSector) { 7056 conf->prev_chunk_sectors = mddev->chunk_sectors; 7057 conf->prev_algo = mddev->layout; 7058 } else { 7059 conf->prev_chunk_sectors = conf->chunk_sectors; 7060 conf->prev_algo = conf->algorithm; 7061 } 7062 7063 conf->min_nr_stripes = NR_STRIPES; 7064 if (mddev->reshape_position != MaxSector) { 7065 int stripes = max_t(int, 7066 ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4, 7067 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4); 7068 conf->min_nr_stripes = max(NR_STRIPES, stripes); 7069 if (conf->min_nr_stripes != NR_STRIPES) 7070 pr_info("md/raid:%s: force stripe size %d for reshape\n", 7071 mdname(mddev), conf->min_nr_stripes); 7072 } 7073 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + 7074 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 7075 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 7076 if (grow_stripes(conf, conf->min_nr_stripes)) { 7077 pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n", 7078 mdname(mddev), memory); 7079 goto abort; 7080 } else 7081 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); 7082 /* 7083 * Losing a stripe head costs more than the time to refill it, 7084 * it reduces the queue depth and so can hurt throughput. 7085 * So set it rather large, scaled by number of devices. 7086 */ 7087 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; 7088 conf->shrinker.scan_objects = raid5_cache_scan; 7089 conf->shrinker.count_objects = raid5_cache_count; 7090 conf->shrinker.batch = 128; 7091 conf->shrinker.flags = 0; 7092 if (register_shrinker(&conf->shrinker)) { 7093 pr_warn("md/raid:%s: couldn't register shrinker.\n", 7094 mdname(mddev)); 7095 goto abort; 7096 } 7097 7098 sprintf(pers_name, "raid%d", mddev->new_level); 7099 conf->thread = md_register_thread(raid5d, mddev, pers_name); 7100 if (!conf->thread) { 7101 pr_warn("md/raid:%s: couldn't allocate thread.\n", 7102 mdname(mddev)); 7103 goto abort; 7104 } 7105 7106 return conf; 7107 7108 abort: 7109 if (conf) { 7110 free_conf(conf); 7111 return ERR_PTR(-EIO); 7112 } else 7113 return ERR_PTR(-ENOMEM); 7114 } 7115 7116 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 7117 { 7118 switch (algo) { 7119 case ALGORITHM_PARITY_0: 7120 if (raid_disk < max_degraded) 7121 return 1; 7122 break; 7123 case ALGORITHM_PARITY_N: 7124 if (raid_disk >= raid_disks - max_degraded) 7125 return 1; 7126 break; 7127 case ALGORITHM_PARITY_0_6: 7128 if (raid_disk == 0 || 7129 raid_disk == raid_disks - 1) 7130 return 1; 7131 break; 7132 case ALGORITHM_LEFT_ASYMMETRIC_6: 7133 case ALGORITHM_RIGHT_ASYMMETRIC_6: 7134 case ALGORITHM_LEFT_SYMMETRIC_6: 7135 case ALGORITHM_RIGHT_SYMMETRIC_6: 7136 if (raid_disk == raid_disks - 1) 7137 return 1; 7138 } 7139 return 0; 7140 } 7141 7142 static int raid5_run(struct mddev *mddev) 7143 { 7144 struct r5conf *conf; 7145 int working_disks = 0; 7146 int dirty_parity_disks = 0; 7147 struct md_rdev *rdev; 7148 struct md_rdev *journal_dev = NULL; 7149 sector_t reshape_offset = 0; 7150 int i; 7151 long long min_offset_diff = 0; 7152 int first = 1; 7153 7154 if (mddev_init_writes_pending(mddev) < 0) 7155 return -ENOMEM; 7156 7157 if (mddev->recovery_cp != MaxSector) 7158 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", 7159 mdname(mddev)); 7160 7161 rdev_for_each(rdev, mddev) { 7162 long long diff; 7163 7164 if (test_bit(Journal, &rdev->flags)) { 7165 journal_dev = rdev; 7166 continue; 7167 } 7168 if (rdev->raid_disk < 0) 7169 continue; 7170 diff = (rdev->new_data_offset - rdev->data_offset); 7171 if (first) { 7172 min_offset_diff = diff; 7173 first = 0; 7174 } else if (mddev->reshape_backwards && 7175 diff < min_offset_diff) 7176 min_offset_diff = diff; 7177 else if (!mddev->reshape_backwards && 7178 diff > min_offset_diff) 7179 min_offset_diff = diff; 7180 } 7181 7182 if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && 7183 (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { 7184 pr_notice("md/raid:%s: array cannot have both journal and bitmap\n", 7185 mdname(mddev)); 7186 return -EINVAL; 7187 } 7188 7189 if (mddev->reshape_position != MaxSector) { 7190 /* Check that we can continue the reshape. 7191 * Difficulties arise if the stripe we would write to 7192 * next is at or after the stripe we would read from next. 7193 * For a reshape that changes the number of devices, this 7194 * is only possible for a very short time, and mdadm makes 7195 * sure that time appears to have past before assembling 7196 * the array. So we fail if that time hasn't passed. 7197 * For a reshape that keeps the number of devices the same 7198 * mdadm must be monitoring the reshape can keeping the 7199 * critical areas read-only and backed up. It will start 7200 * the array in read-only mode, so we check for that. 7201 */ 7202 sector_t here_new, here_old; 7203 int old_disks; 7204 int max_degraded = (mddev->level == 6 ? 2 : 1); 7205 int chunk_sectors; 7206 int new_data_disks; 7207 7208 if (journal_dev) { 7209 pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n", 7210 mdname(mddev)); 7211 return -EINVAL; 7212 } 7213 7214 if (mddev->new_level != mddev->level) { 7215 pr_warn("md/raid:%s: unsupported reshape required - aborting.\n", 7216 mdname(mddev)); 7217 return -EINVAL; 7218 } 7219 old_disks = mddev->raid_disks - mddev->delta_disks; 7220 /* reshape_position must be on a new-stripe boundary, and one 7221 * further up in new geometry must map after here in old 7222 * geometry. 7223 * If the chunk sizes are different, then as we perform reshape 7224 * in units of the largest of the two, reshape_position needs 7225 * be a multiple of the largest chunk size times new data disks. 7226 */ 7227 here_new = mddev->reshape_position; 7228 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); 7229 new_data_disks = mddev->raid_disks - max_degraded; 7230 if (sector_div(here_new, chunk_sectors * new_data_disks)) { 7231 pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n", 7232 mdname(mddev)); 7233 return -EINVAL; 7234 } 7235 reshape_offset = here_new * chunk_sectors; 7236 /* here_new is the stripe we will write to */ 7237 here_old = mddev->reshape_position; 7238 sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); 7239 /* here_old is the first stripe that we might need to read 7240 * from */ 7241 if (mddev->delta_disks == 0) { 7242 /* We cannot be sure it is safe to start an in-place 7243 * reshape. It is only safe if user-space is monitoring 7244 * and taking constant backups. 7245 * mdadm always starts a situation like this in 7246 * readonly mode so it can take control before 7247 * allowing any writes. So just check for that. 7248 */ 7249 if (abs(min_offset_diff) >= mddev->chunk_sectors && 7250 abs(min_offset_diff) >= mddev->new_chunk_sectors) 7251 /* not really in-place - so OK */; 7252 else if (mddev->ro == 0) { 7253 pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n", 7254 mdname(mddev)); 7255 return -EINVAL; 7256 } 7257 } else if (mddev->reshape_backwards 7258 ? (here_new * chunk_sectors + min_offset_diff <= 7259 here_old * chunk_sectors) 7260 : (here_new * chunk_sectors >= 7261 here_old * chunk_sectors + (-min_offset_diff))) { 7262 /* Reading from the same stripe as writing to - bad */ 7263 pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n", 7264 mdname(mddev)); 7265 return -EINVAL; 7266 } 7267 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); 7268 /* OK, we should be able to continue; */ 7269 } else { 7270 BUG_ON(mddev->level != mddev->new_level); 7271 BUG_ON(mddev->layout != mddev->new_layout); 7272 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 7273 BUG_ON(mddev->delta_disks != 0); 7274 } 7275 7276 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && 7277 test_bit(MD_HAS_PPL, &mddev->flags)) { 7278 pr_warn("md/raid:%s: using journal device and PPL not allowed - disabling PPL\n", 7279 mdname(mddev)); 7280 clear_bit(MD_HAS_PPL, &mddev->flags); 7281 clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags); 7282 } 7283 7284 if (mddev->private == NULL) 7285 conf = setup_conf(mddev); 7286 else 7287 conf = mddev->private; 7288 7289 if (IS_ERR(conf)) 7290 return PTR_ERR(conf); 7291 7292 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 7293 if (!journal_dev) { 7294 pr_warn("md/raid:%s: journal disk is missing, force array readonly\n", 7295 mdname(mddev)); 7296 mddev->ro = 1; 7297 set_disk_ro(mddev->gendisk, 1); 7298 } else if (mddev->recovery_cp == MaxSector) 7299 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 7300 } 7301 7302 conf->min_offset_diff = min_offset_diff; 7303 mddev->thread = conf->thread; 7304 conf->thread = NULL; 7305 mddev->private = conf; 7306 7307 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; 7308 i++) { 7309 rdev = conf->disks[i].rdev; 7310 if (!rdev && conf->disks[i].replacement) { 7311 /* The replacement is all we have yet */ 7312 rdev = conf->disks[i].replacement; 7313 conf->disks[i].replacement = NULL; 7314 clear_bit(Replacement, &rdev->flags); 7315 conf->disks[i].rdev = rdev; 7316 } 7317 if (!rdev) 7318 continue; 7319 if (conf->disks[i].replacement && 7320 conf->reshape_progress != MaxSector) { 7321 /* replacements and reshape simply do not mix. */ 7322 pr_warn("md: cannot handle concurrent replacement and reshape.\n"); 7323 goto abort; 7324 } 7325 if (test_bit(In_sync, &rdev->flags)) { 7326 working_disks++; 7327 continue; 7328 } 7329 /* This disc is not fully in-sync. However if it 7330 * just stored parity (beyond the recovery_offset), 7331 * when we don't need to be concerned about the 7332 * array being dirty. 7333 * When reshape goes 'backwards', we never have 7334 * partially completed devices, so we only need 7335 * to worry about reshape going forwards. 7336 */ 7337 /* Hack because v0.91 doesn't store recovery_offset properly. */ 7338 if (mddev->major_version == 0 && 7339 mddev->minor_version > 90) 7340 rdev->recovery_offset = reshape_offset; 7341 7342 if (rdev->recovery_offset < reshape_offset) { 7343 /* We need to check old and new layout */ 7344 if (!only_parity(rdev->raid_disk, 7345 conf->algorithm, 7346 conf->raid_disks, 7347 conf->max_degraded)) 7348 continue; 7349 } 7350 if (!only_parity(rdev->raid_disk, 7351 conf->prev_algo, 7352 conf->previous_raid_disks, 7353 conf->max_degraded)) 7354 continue; 7355 dirty_parity_disks++; 7356 } 7357 7358 /* 7359 * 0 for a fully functional array, 1 or 2 for a degraded array. 7360 */ 7361 mddev->degraded = raid5_calc_degraded(conf); 7362 7363 if (has_failed(conf)) { 7364 pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", 7365 mdname(mddev), mddev->degraded, conf->raid_disks); 7366 goto abort; 7367 } 7368 7369 /* device size must be a multiple of chunk size */ 7370 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 7371 mddev->resync_max_sectors = mddev->dev_sectors; 7372 7373 if (mddev->degraded > dirty_parity_disks && 7374 mddev->recovery_cp != MaxSector) { 7375 if (test_bit(MD_HAS_PPL, &mddev->flags)) 7376 pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n", 7377 mdname(mddev)); 7378 else if (mddev->ok_start_degraded) 7379 pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n", 7380 mdname(mddev)); 7381 else { 7382 pr_crit("md/raid:%s: cannot start dirty degraded array.\n", 7383 mdname(mddev)); 7384 goto abort; 7385 } 7386 } 7387 7388 pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n", 7389 mdname(mddev), conf->level, 7390 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 7391 mddev->new_layout); 7392 7393 print_raid5_conf(conf); 7394 7395 if (conf->reshape_progress != MaxSector) { 7396 conf->reshape_safe = conf->reshape_progress; 7397 atomic_set(&conf->reshape_stripes, 0); 7398 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7399 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7400 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7401 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7402 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7403 "reshape"); 7404 if (!mddev->sync_thread) 7405 goto abort; 7406 } 7407 7408 /* Ok, everything is just fine now */ 7409 if (mddev->to_remove == &raid5_attrs_group) 7410 mddev->to_remove = NULL; 7411 else if (mddev->kobj.sd && 7412 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 7413 pr_warn("raid5: failed to create sysfs attributes for %s\n", 7414 mdname(mddev)); 7415 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 7416 7417 if (mddev->queue) { 7418 int chunk_size; 7419 /* read-ahead size must cover two whole stripes, which 7420 * is 2 * (datadisks) * chunksize where 'n' is the 7421 * number of raid devices 7422 */ 7423 int data_disks = conf->previous_raid_disks - conf->max_degraded; 7424 int stripe = data_disks * 7425 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 7426 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 7427 mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 7428 7429 chunk_size = mddev->chunk_sectors << 9; 7430 blk_queue_io_min(mddev->queue, chunk_size); 7431 blk_queue_io_opt(mddev->queue, chunk_size * 7432 (conf->raid_disks - conf->max_degraded)); 7433 mddev->queue->limits.raid_partial_stripes_expensive = 1; 7434 /* 7435 * We can only discard a whole stripe. It doesn't make sense to 7436 * discard data disk but write parity disk 7437 */ 7438 stripe = stripe * PAGE_SIZE; 7439 /* Round up to power of 2, as discard handling 7440 * currently assumes that */ 7441 while ((stripe-1) & stripe) 7442 stripe = (stripe | (stripe-1)) + 1; 7443 mddev->queue->limits.discard_alignment = stripe; 7444 mddev->queue->limits.discard_granularity = stripe; 7445 7446 blk_queue_max_write_same_sectors(mddev->queue, 0); 7447 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 7448 7449 rdev_for_each(rdev, mddev) { 7450 disk_stack_limits(mddev->gendisk, rdev->bdev, 7451 rdev->data_offset << 9); 7452 disk_stack_limits(mddev->gendisk, rdev->bdev, 7453 rdev->new_data_offset << 9); 7454 } 7455 7456 /* 7457 * zeroing is required, otherwise data 7458 * could be lost. Consider a scenario: discard a stripe 7459 * (the stripe could be inconsistent if 7460 * discard_zeroes_data is 0); write one disk of the 7461 * stripe (the stripe could be inconsistent again 7462 * depending on which disks are used to calculate 7463 * parity); the disk is broken; The stripe data of this 7464 * disk is lost. 7465 * 7466 * We only allow DISCARD if the sysadmin has confirmed that 7467 * only safe devices are in use by setting a module parameter. 7468 * A better idea might be to turn DISCARD into WRITE_ZEROES 7469 * requests, as that is required to be safe. 7470 */ 7471 if (devices_handle_discard_safely && 7472 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && 7473 mddev->queue->limits.discard_granularity >= stripe) 7474 blk_queue_flag_set(QUEUE_FLAG_DISCARD, 7475 mddev->queue); 7476 else 7477 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, 7478 mddev->queue); 7479 7480 blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); 7481 } 7482 7483 if (log_init(conf, journal_dev, raid5_has_ppl(conf))) 7484 goto abort; 7485 7486 return 0; 7487 abort: 7488 md_unregister_thread(&mddev->thread); 7489 print_raid5_conf(conf); 7490 free_conf(conf); 7491 mddev->private = NULL; 7492 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); 7493 return -EIO; 7494 } 7495 7496 static void raid5_free(struct mddev *mddev, void *priv) 7497 { 7498 struct r5conf *conf = priv; 7499 7500 free_conf(conf); 7501 mddev->to_remove = &raid5_attrs_group; 7502 } 7503 7504 static void raid5_status(struct seq_file *seq, struct mddev *mddev) 7505 { 7506 struct r5conf *conf = mddev->private; 7507 int i; 7508 7509 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 7510 conf->chunk_sectors / 2, mddev->layout); 7511 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 7512 rcu_read_lock(); 7513 for (i = 0; i < conf->raid_disks; i++) { 7514 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 7515 seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 7516 } 7517 rcu_read_unlock(); 7518 seq_printf (seq, "]"); 7519 } 7520 7521 static void print_raid5_conf (struct r5conf *conf) 7522 { 7523 int i; 7524 struct disk_info *tmp; 7525 7526 pr_debug("RAID conf printout:\n"); 7527 if (!conf) { 7528 pr_debug("(conf==NULL)\n"); 7529 return; 7530 } 7531 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, 7532 conf->raid_disks, 7533 conf->raid_disks - conf->mddev->degraded); 7534 7535 for (i = 0; i < conf->raid_disks; i++) { 7536 char b[BDEVNAME_SIZE]; 7537 tmp = conf->disks + i; 7538 if (tmp->rdev) 7539 pr_debug(" disk %d, o:%d, dev:%s\n", 7540 i, !test_bit(Faulty, &tmp->rdev->flags), 7541 bdevname(tmp->rdev->bdev, b)); 7542 } 7543 } 7544 7545 static int raid5_spare_active(struct mddev *mddev) 7546 { 7547 int i; 7548 struct r5conf *conf = mddev->private; 7549 struct disk_info *tmp; 7550 int count = 0; 7551 unsigned long flags; 7552 7553 for (i = 0; i < conf->raid_disks; i++) { 7554 tmp = conf->disks + i; 7555 if (tmp->replacement 7556 && tmp->replacement->recovery_offset == MaxSector 7557 && !test_bit(Faulty, &tmp->replacement->flags) 7558 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 7559 /* Replacement has just become active. */ 7560 if (!tmp->rdev 7561 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 7562 count++; 7563 if (tmp->rdev) { 7564 /* Replaced device not technically faulty, 7565 * but we need to be sure it gets removed 7566 * and never re-added. 7567 */ 7568 set_bit(Faulty, &tmp->rdev->flags); 7569 sysfs_notify_dirent_safe( 7570 tmp->rdev->sysfs_state); 7571 } 7572 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 7573 } else if (tmp->rdev 7574 && tmp->rdev->recovery_offset == MaxSector 7575 && !test_bit(Faulty, &tmp->rdev->flags) 7576 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 7577 count++; 7578 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 7579 } 7580 } 7581 spin_lock_irqsave(&conf->device_lock, flags); 7582 mddev->degraded = raid5_calc_degraded(conf); 7583 spin_unlock_irqrestore(&conf->device_lock, flags); 7584 print_raid5_conf(conf); 7585 return count; 7586 } 7587 7588 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 7589 { 7590 struct r5conf *conf = mddev->private; 7591 int err = 0; 7592 int number = rdev->raid_disk; 7593 struct md_rdev **rdevp; 7594 struct disk_info *p = conf->disks + number; 7595 7596 print_raid5_conf(conf); 7597 if (test_bit(Journal, &rdev->flags) && conf->log) { 7598 /* 7599 * we can't wait pending write here, as this is called in 7600 * raid5d, wait will deadlock. 7601 * neilb: there is no locking about new writes here, 7602 * so this cannot be safe. 7603 */ 7604 if (atomic_read(&conf->active_stripes) || 7605 atomic_read(&conf->r5c_cached_full_stripes) || 7606 atomic_read(&conf->r5c_cached_partial_stripes)) { 7607 return -EBUSY; 7608 } 7609 log_exit(conf); 7610 return 0; 7611 } 7612 if (rdev == p->rdev) 7613 rdevp = &p->rdev; 7614 else if (rdev == p->replacement) 7615 rdevp = &p->replacement; 7616 else 7617 return 0; 7618 7619 if (number >= conf->raid_disks && 7620 conf->reshape_progress == MaxSector) 7621 clear_bit(In_sync, &rdev->flags); 7622 7623 if (test_bit(In_sync, &rdev->flags) || 7624 atomic_read(&rdev->nr_pending)) { 7625 err = -EBUSY; 7626 goto abort; 7627 } 7628 /* Only remove non-faulty devices if recovery 7629 * isn't possible. 7630 */ 7631 if (!test_bit(Faulty, &rdev->flags) && 7632 mddev->recovery_disabled != conf->recovery_disabled && 7633 !has_failed(conf) && 7634 (!p->replacement || p->replacement == rdev) && 7635 number < conf->raid_disks) { 7636 err = -EBUSY; 7637 goto abort; 7638 } 7639 *rdevp = NULL; 7640 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 7641 synchronize_rcu(); 7642 if (atomic_read(&rdev->nr_pending)) { 7643 /* lost the race, try later */ 7644 err = -EBUSY; 7645 *rdevp = rdev; 7646 } 7647 } 7648 if (!err) { 7649 err = log_modify(conf, rdev, false); 7650 if (err) 7651 goto abort; 7652 } 7653 if (p->replacement) { 7654 /* We must have just cleared 'rdev' */ 7655 p->rdev = p->replacement; 7656 clear_bit(Replacement, &p->replacement->flags); 7657 smp_mb(); /* Make sure other CPUs may see both as identical 7658 * but will never see neither - if they are careful 7659 */ 7660 p->replacement = NULL; 7661 7662 if (!err) 7663 err = log_modify(conf, p->rdev, true); 7664 } 7665 7666 clear_bit(WantReplacement, &rdev->flags); 7667 abort: 7668 7669 print_raid5_conf(conf); 7670 return err; 7671 } 7672 7673 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 7674 { 7675 struct r5conf *conf = mddev->private; 7676 int ret, err = -EEXIST; 7677 int disk; 7678 struct disk_info *p; 7679 int first = 0; 7680 int last = conf->raid_disks - 1; 7681 7682 if (test_bit(Journal, &rdev->flags)) { 7683 if (conf->log) 7684 return -EBUSY; 7685 7686 rdev->raid_disk = 0; 7687 /* 7688 * The array is in readonly mode if journal is missing, so no 7689 * write requests running. We should be safe 7690 */ 7691 ret = log_init(conf, rdev, false); 7692 if (ret) 7693 return ret; 7694 7695 ret = r5l_start(conf->log); 7696 if (ret) 7697 return ret; 7698 7699 return 0; 7700 } 7701 if (mddev->recovery_disabled == conf->recovery_disabled) 7702 return -EBUSY; 7703 7704 if (rdev->saved_raid_disk < 0 && has_failed(conf)) 7705 /* no point adding a device */ 7706 return -EINVAL; 7707 7708 if (rdev->raid_disk >= 0) 7709 first = last = rdev->raid_disk; 7710 7711 /* 7712 * find the disk ... but prefer rdev->saved_raid_disk 7713 * if possible. 7714 */ 7715 if (rdev->saved_raid_disk >= 0 && 7716 rdev->saved_raid_disk >= first && 7717 conf->disks[rdev->saved_raid_disk].rdev == NULL) 7718 first = rdev->saved_raid_disk; 7719 7720 for (disk = first; disk <= last; disk++) { 7721 p = conf->disks + disk; 7722 if (p->rdev == NULL) { 7723 clear_bit(In_sync, &rdev->flags); 7724 rdev->raid_disk = disk; 7725 if (rdev->saved_raid_disk != disk) 7726 conf->fullsync = 1; 7727 rcu_assign_pointer(p->rdev, rdev); 7728 7729 err = log_modify(conf, rdev, true); 7730 7731 goto out; 7732 } 7733 } 7734 for (disk = first; disk <= last; disk++) { 7735 p = conf->disks + disk; 7736 if (test_bit(WantReplacement, &p->rdev->flags) && 7737 p->replacement == NULL) { 7738 clear_bit(In_sync, &rdev->flags); 7739 set_bit(Replacement, &rdev->flags); 7740 rdev->raid_disk = disk; 7741 err = 0; 7742 conf->fullsync = 1; 7743 rcu_assign_pointer(p->replacement, rdev); 7744 break; 7745 } 7746 } 7747 out: 7748 print_raid5_conf(conf); 7749 return err; 7750 } 7751 7752 static int raid5_resize(struct mddev *mddev, sector_t sectors) 7753 { 7754 /* no resync is happening, and there is enough space 7755 * on all devices, so we can resize. 7756 * We need to make sure resync covers any new space. 7757 * If the array is shrinking we should possibly wait until 7758 * any io in the removed space completes, but it hardly seems 7759 * worth it. 7760 */ 7761 sector_t newsize; 7762 struct r5conf *conf = mddev->private; 7763 7764 if (raid5_has_log(conf) || raid5_has_ppl(conf)) 7765 return -EINVAL; 7766 sectors &= ~((sector_t)conf->chunk_sectors - 1); 7767 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 7768 if (mddev->external_size && 7769 mddev->array_sectors > newsize) 7770 return -EINVAL; 7771 if (mddev->bitmap) { 7772 int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0); 7773 if (ret) 7774 return ret; 7775 } 7776 md_set_array_sectors(mddev, newsize); 7777 if (sectors > mddev->dev_sectors && 7778 mddev->recovery_cp > mddev->dev_sectors) { 7779 mddev->recovery_cp = mddev->dev_sectors; 7780 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7781 } 7782 mddev->dev_sectors = sectors; 7783 mddev->resync_max_sectors = sectors; 7784 return 0; 7785 } 7786 7787 static int check_stripe_cache(struct mddev *mddev) 7788 { 7789 /* Can only proceed if there are plenty of stripe_heads. 7790 * We need a minimum of one full stripe,, and for sensible progress 7791 * it is best to have about 4 times that. 7792 * If we require 4 times, then the default 256 4K stripe_heads will 7793 * allow for chunk sizes up to 256K, which is probably OK. 7794 * If the chunk size is greater, user-space should request more 7795 * stripe_heads first. 7796 */ 7797 struct r5conf *conf = mddev->private; 7798 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 7799 > conf->min_nr_stripes || 7800 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 7801 > conf->min_nr_stripes) { 7802 pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n", 7803 mdname(mddev), 7804 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 7805 / STRIPE_SIZE)*4); 7806 return 0; 7807 } 7808 return 1; 7809 } 7810 7811 static int check_reshape(struct mddev *mddev) 7812 { 7813 struct r5conf *conf = mddev->private; 7814 7815 if (raid5_has_log(conf) || raid5_has_ppl(conf)) 7816 return -EINVAL; 7817 if (mddev->delta_disks == 0 && 7818 mddev->new_layout == mddev->layout && 7819 mddev->new_chunk_sectors == mddev->chunk_sectors) 7820 return 0; /* nothing to do */ 7821 if (has_failed(conf)) 7822 return -EINVAL; 7823 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { 7824 /* We might be able to shrink, but the devices must 7825 * be made bigger first. 7826 * For raid6, 4 is the minimum size. 7827 * Otherwise 2 is the minimum 7828 */ 7829 int min = 2; 7830 if (mddev->level == 6) 7831 min = 4; 7832 if (mddev->raid_disks + mddev->delta_disks < min) 7833 return -EINVAL; 7834 } 7835 7836 if (!check_stripe_cache(mddev)) 7837 return -ENOSPC; 7838 7839 if (mddev->new_chunk_sectors > mddev->chunk_sectors || 7840 mddev->delta_disks > 0) 7841 if (resize_chunks(conf, 7842 conf->previous_raid_disks 7843 + max(0, mddev->delta_disks), 7844 max(mddev->new_chunk_sectors, 7845 mddev->chunk_sectors) 7846 ) < 0) 7847 return -ENOMEM; 7848 7849 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) 7850 return 0; /* never bother to shrink */ 7851 return resize_stripes(conf, (conf->previous_raid_disks 7852 + mddev->delta_disks)); 7853 } 7854 7855 static int raid5_start_reshape(struct mddev *mddev) 7856 { 7857 struct r5conf *conf = mddev->private; 7858 struct md_rdev *rdev; 7859 int spares = 0; 7860 unsigned long flags; 7861 7862 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 7863 return -EBUSY; 7864 7865 if (!check_stripe_cache(mddev)) 7866 return -ENOSPC; 7867 7868 if (has_failed(conf)) 7869 return -EINVAL; 7870 7871 rdev_for_each(rdev, mddev) { 7872 if (!test_bit(In_sync, &rdev->flags) 7873 && !test_bit(Faulty, &rdev->flags)) 7874 spares++; 7875 } 7876 7877 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 7878 /* Not enough devices even to make a degraded array 7879 * of that size 7880 */ 7881 return -EINVAL; 7882 7883 /* Refuse to reduce size of the array. Any reductions in 7884 * array size must be through explicit setting of array_size 7885 * attribute. 7886 */ 7887 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 7888 < mddev->array_sectors) { 7889 pr_warn("md/raid:%s: array size must be reduced before number of disks\n", 7890 mdname(mddev)); 7891 return -EINVAL; 7892 } 7893 7894 atomic_set(&conf->reshape_stripes, 0); 7895 spin_lock_irq(&conf->device_lock); 7896 write_seqcount_begin(&conf->gen_lock); 7897 conf->previous_raid_disks = conf->raid_disks; 7898 conf->raid_disks += mddev->delta_disks; 7899 conf->prev_chunk_sectors = conf->chunk_sectors; 7900 conf->chunk_sectors = mddev->new_chunk_sectors; 7901 conf->prev_algo = conf->algorithm; 7902 conf->algorithm = mddev->new_layout; 7903 conf->generation++; 7904 /* Code that selects data_offset needs to see the generation update 7905 * if reshape_progress has been set - so a memory barrier needed. 7906 */ 7907 smp_mb(); 7908 if (mddev->reshape_backwards) 7909 conf->reshape_progress = raid5_size(mddev, 0, 0); 7910 else 7911 conf->reshape_progress = 0; 7912 conf->reshape_safe = conf->reshape_progress; 7913 write_seqcount_end(&conf->gen_lock); 7914 spin_unlock_irq(&conf->device_lock); 7915 7916 /* Now make sure any requests that proceeded on the assumption 7917 * the reshape wasn't running - like Discard or Read - have 7918 * completed. 7919 */ 7920 mddev_suspend(mddev); 7921 mddev_resume(mddev); 7922 7923 /* Add some new drives, as many as will fit. 7924 * We know there are enough to make the newly sized array work. 7925 * Don't add devices if we are reducing the number of 7926 * devices in the array. This is because it is not possible 7927 * to correctly record the "partially reconstructed" state of 7928 * such devices during the reshape and confusion could result. 7929 */ 7930 if (mddev->delta_disks >= 0) { 7931 rdev_for_each(rdev, mddev) 7932 if (rdev->raid_disk < 0 && 7933 !test_bit(Faulty, &rdev->flags)) { 7934 if (raid5_add_disk(mddev, rdev) == 0) { 7935 if (rdev->raid_disk 7936 >= conf->previous_raid_disks) 7937 set_bit(In_sync, &rdev->flags); 7938 else 7939 rdev->recovery_offset = 0; 7940 7941 if (sysfs_link_rdev(mddev, rdev)) 7942 /* Failure here is OK */; 7943 } 7944 } else if (rdev->raid_disk >= conf->previous_raid_disks 7945 && !test_bit(Faulty, &rdev->flags)) { 7946 /* This is a spare that was manually added */ 7947 set_bit(In_sync, &rdev->flags); 7948 } 7949 7950 /* When a reshape changes the number of devices, 7951 * ->degraded is measured against the larger of the 7952 * pre and post number of devices. 7953 */ 7954 spin_lock_irqsave(&conf->device_lock, flags); 7955 mddev->degraded = raid5_calc_degraded(conf); 7956 spin_unlock_irqrestore(&conf->device_lock, flags); 7957 } 7958 mddev->raid_disks = conf->raid_disks; 7959 mddev->reshape_position = conf->reshape_progress; 7960 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7961 7962 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7963 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7964 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7965 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7966 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7967 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7968 "reshape"); 7969 if (!mddev->sync_thread) { 7970 mddev->recovery = 0; 7971 spin_lock_irq(&conf->device_lock); 7972 write_seqcount_begin(&conf->gen_lock); 7973 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 7974 mddev->new_chunk_sectors = 7975 conf->chunk_sectors = conf->prev_chunk_sectors; 7976 mddev->new_layout = conf->algorithm = conf->prev_algo; 7977 rdev_for_each(rdev, mddev) 7978 rdev->new_data_offset = rdev->data_offset; 7979 smp_wmb(); 7980 conf->generation --; 7981 conf->reshape_progress = MaxSector; 7982 mddev->reshape_position = MaxSector; 7983 write_seqcount_end(&conf->gen_lock); 7984 spin_unlock_irq(&conf->device_lock); 7985 return -EAGAIN; 7986 } 7987 conf->reshape_checkpoint = jiffies; 7988 md_wakeup_thread(mddev->sync_thread); 7989 md_new_event(mddev); 7990 return 0; 7991 } 7992 7993 /* This is called from the reshape thread and should make any 7994 * changes needed in 'conf' 7995 */ 7996 static void end_reshape(struct r5conf *conf) 7997 { 7998 7999 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 8000 struct md_rdev *rdev; 8001 8002 spin_lock_irq(&conf->device_lock); 8003 conf->previous_raid_disks = conf->raid_disks; 8004 md_finish_reshape(conf->mddev); 8005 smp_wmb(); 8006 conf->reshape_progress = MaxSector; 8007 conf->mddev->reshape_position = MaxSector; 8008 rdev_for_each(rdev, conf->mddev) 8009 if (rdev->raid_disk >= 0 && 8010 !test_bit(Journal, &rdev->flags) && 8011 !test_bit(In_sync, &rdev->flags)) 8012 rdev->recovery_offset = MaxSector; 8013 spin_unlock_irq(&conf->device_lock); 8014 wake_up(&conf->wait_for_overlap); 8015 8016 /* read-ahead size must cover two whole stripes, which is 8017 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 8018 */ 8019 if (conf->mddev->queue) { 8020 int data_disks = conf->raid_disks - conf->max_degraded; 8021 int stripe = data_disks * ((conf->chunk_sectors << 9) 8022 / PAGE_SIZE); 8023 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 8024 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 8025 } 8026 } 8027 } 8028 8029 /* This is called from the raid5d thread with mddev_lock held. 8030 * It makes config changes to the device. 8031 */ 8032 static void raid5_finish_reshape(struct mddev *mddev) 8033 { 8034 struct r5conf *conf = mddev->private; 8035 8036 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8037 8038 if (mddev->delta_disks <= 0) { 8039 int d; 8040 spin_lock_irq(&conf->device_lock); 8041 mddev->degraded = raid5_calc_degraded(conf); 8042 spin_unlock_irq(&conf->device_lock); 8043 for (d = conf->raid_disks ; 8044 d < conf->raid_disks - mddev->delta_disks; 8045 d++) { 8046 struct md_rdev *rdev = conf->disks[d].rdev; 8047 if (rdev) 8048 clear_bit(In_sync, &rdev->flags); 8049 rdev = conf->disks[d].replacement; 8050 if (rdev) 8051 clear_bit(In_sync, &rdev->flags); 8052 } 8053 } 8054 mddev->layout = conf->algorithm; 8055 mddev->chunk_sectors = conf->chunk_sectors; 8056 mddev->reshape_position = MaxSector; 8057 mddev->delta_disks = 0; 8058 mddev->reshape_backwards = 0; 8059 } 8060 } 8061 8062 static void raid5_quiesce(struct mddev *mddev, int quiesce) 8063 { 8064 struct r5conf *conf = mddev->private; 8065 8066 if (quiesce) { 8067 /* stop all writes */ 8068 lock_all_device_hash_locks_irq(conf); 8069 /* '2' tells resync/reshape to pause so that all 8070 * active stripes can drain 8071 */ 8072 r5c_flush_cache(conf, INT_MAX); 8073 conf->quiesce = 2; 8074 wait_event_cmd(conf->wait_for_quiescent, 8075 atomic_read(&conf->active_stripes) == 0 && 8076 atomic_read(&conf->active_aligned_reads) == 0, 8077 unlock_all_device_hash_locks_irq(conf), 8078 lock_all_device_hash_locks_irq(conf)); 8079 conf->quiesce = 1; 8080 unlock_all_device_hash_locks_irq(conf); 8081 /* allow reshape to continue */ 8082 wake_up(&conf->wait_for_overlap); 8083 } else { 8084 /* re-enable writes */ 8085 lock_all_device_hash_locks_irq(conf); 8086 conf->quiesce = 0; 8087 wake_up(&conf->wait_for_quiescent); 8088 wake_up(&conf->wait_for_overlap); 8089 unlock_all_device_hash_locks_irq(conf); 8090 } 8091 log_quiesce(conf, quiesce); 8092 } 8093 8094 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 8095 { 8096 struct r0conf *raid0_conf = mddev->private; 8097 sector_t sectors; 8098 8099 /* for raid0 takeover only one zone is supported */ 8100 if (raid0_conf->nr_strip_zones > 1) { 8101 pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n", 8102 mdname(mddev)); 8103 return ERR_PTR(-EINVAL); 8104 } 8105 8106 sectors = raid0_conf->strip_zone[0].zone_end; 8107 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 8108 mddev->dev_sectors = sectors; 8109 mddev->new_level = level; 8110 mddev->new_layout = ALGORITHM_PARITY_N; 8111 mddev->new_chunk_sectors = mddev->chunk_sectors; 8112 mddev->raid_disks += 1; 8113 mddev->delta_disks = 1; 8114 /* make sure it will be not marked as dirty */ 8115 mddev->recovery_cp = MaxSector; 8116 8117 return setup_conf(mddev); 8118 } 8119 8120 static void *raid5_takeover_raid1(struct mddev *mddev) 8121 { 8122 int chunksect; 8123 void *ret; 8124 8125 if (mddev->raid_disks != 2 || 8126 mddev->degraded > 1) 8127 return ERR_PTR(-EINVAL); 8128 8129 /* Should check if there are write-behind devices? */ 8130 8131 chunksect = 64*2; /* 64K by default */ 8132 8133 /* The array must be an exact multiple of chunksize */ 8134 while (chunksect && (mddev->array_sectors & (chunksect-1))) 8135 chunksect >>= 1; 8136 8137 if ((chunksect<<9) < STRIPE_SIZE) 8138 /* array size does not allow a suitable chunk size */ 8139 return ERR_PTR(-EINVAL); 8140 8141 mddev->new_level = 5; 8142 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 8143 mddev->new_chunk_sectors = chunksect; 8144 8145 ret = setup_conf(mddev); 8146 if (!IS_ERR(ret)) 8147 mddev_clear_unsupported_flags(mddev, 8148 UNSUPPORTED_MDDEV_FLAGS); 8149 return ret; 8150 } 8151 8152 static void *raid5_takeover_raid6(struct mddev *mddev) 8153 { 8154 int new_layout; 8155 8156 switch (mddev->layout) { 8157 case ALGORITHM_LEFT_ASYMMETRIC_6: 8158 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 8159 break; 8160 case ALGORITHM_RIGHT_ASYMMETRIC_6: 8161 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 8162 break; 8163 case ALGORITHM_LEFT_SYMMETRIC_6: 8164 new_layout = ALGORITHM_LEFT_SYMMETRIC; 8165 break; 8166 case ALGORITHM_RIGHT_SYMMETRIC_6: 8167 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 8168 break; 8169 case ALGORITHM_PARITY_0_6: 8170 new_layout = ALGORITHM_PARITY_0; 8171 break; 8172 case ALGORITHM_PARITY_N: 8173 new_layout = ALGORITHM_PARITY_N; 8174 break; 8175 default: 8176 return ERR_PTR(-EINVAL); 8177 } 8178 mddev->new_level = 5; 8179 mddev->new_layout = new_layout; 8180 mddev->delta_disks = -1; 8181 mddev->raid_disks -= 1; 8182 return setup_conf(mddev); 8183 } 8184 8185 static int raid5_check_reshape(struct mddev *mddev) 8186 { 8187 /* For a 2-drive array, the layout and chunk size can be changed 8188 * immediately as not restriping is needed. 8189 * For larger arrays we record the new value - after validation 8190 * to be used by a reshape pass. 8191 */ 8192 struct r5conf *conf = mddev->private; 8193 int new_chunk = mddev->new_chunk_sectors; 8194 8195 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 8196 return -EINVAL; 8197 if (new_chunk > 0) { 8198 if (!is_power_of_2(new_chunk)) 8199 return -EINVAL; 8200 if (new_chunk < (PAGE_SIZE>>9)) 8201 return -EINVAL; 8202 if (mddev->array_sectors & (new_chunk-1)) 8203 /* not factor of array size */ 8204 return -EINVAL; 8205 } 8206 8207 /* They look valid */ 8208 8209 if (mddev->raid_disks == 2) { 8210 /* can make the change immediately */ 8211 if (mddev->new_layout >= 0) { 8212 conf->algorithm = mddev->new_layout; 8213 mddev->layout = mddev->new_layout; 8214 } 8215 if (new_chunk > 0) { 8216 conf->chunk_sectors = new_chunk ; 8217 mddev->chunk_sectors = new_chunk; 8218 } 8219 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8220 md_wakeup_thread(mddev->thread); 8221 } 8222 return check_reshape(mddev); 8223 } 8224 8225 static int raid6_check_reshape(struct mddev *mddev) 8226 { 8227 int new_chunk = mddev->new_chunk_sectors; 8228 8229 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 8230 return -EINVAL; 8231 if (new_chunk > 0) { 8232 if (!is_power_of_2(new_chunk)) 8233 return -EINVAL; 8234 if (new_chunk < (PAGE_SIZE >> 9)) 8235 return -EINVAL; 8236 if (mddev->array_sectors & (new_chunk-1)) 8237 /* not factor of array size */ 8238 return -EINVAL; 8239 } 8240 8241 /* They look valid */ 8242 return check_reshape(mddev); 8243 } 8244 8245 static void *raid5_takeover(struct mddev *mddev) 8246 { 8247 /* raid5 can take over: 8248 * raid0 - if there is only one strip zone - make it a raid4 layout 8249 * raid1 - if there are two drives. We need to know the chunk size 8250 * raid4 - trivial - just use a raid4 layout. 8251 * raid6 - Providing it is a *_6 layout 8252 */ 8253 if (mddev->level == 0) 8254 return raid45_takeover_raid0(mddev, 5); 8255 if (mddev->level == 1) 8256 return raid5_takeover_raid1(mddev); 8257 if (mddev->level == 4) { 8258 mddev->new_layout = ALGORITHM_PARITY_N; 8259 mddev->new_level = 5; 8260 return setup_conf(mddev); 8261 } 8262 if (mddev->level == 6) 8263 return raid5_takeover_raid6(mddev); 8264 8265 return ERR_PTR(-EINVAL); 8266 } 8267 8268 static void *raid4_takeover(struct mddev *mddev) 8269 { 8270 /* raid4 can take over: 8271 * raid0 - if there is only one strip zone 8272 * raid5 - if layout is right 8273 */ 8274 if (mddev->level == 0) 8275 return raid45_takeover_raid0(mddev, 4); 8276 if (mddev->level == 5 && 8277 mddev->layout == ALGORITHM_PARITY_N) { 8278 mddev->new_layout = 0; 8279 mddev->new_level = 4; 8280 return setup_conf(mddev); 8281 } 8282 return ERR_PTR(-EINVAL); 8283 } 8284 8285 static struct md_personality raid5_personality; 8286 8287 static void *raid6_takeover(struct mddev *mddev) 8288 { 8289 /* Currently can only take over a raid5. We map the 8290 * personality to an equivalent raid6 personality 8291 * with the Q block at the end. 8292 */ 8293 int new_layout; 8294 8295 if (mddev->pers != &raid5_personality) 8296 return ERR_PTR(-EINVAL); 8297 if (mddev->degraded > 1) 8298 return ERR_PTR(-EINVAL); 8299 if (mddev->raid_disks > 253) 8300 return ERR_PTR(-EINVAL); 8301 if (mddev->raid_disks < 3) 8302 return ERR_PTR(-EINVAL); 8303 8304 switch (mddev->layout) { 8305 case ALGORITHM_LEFT_ASYMMETRIC: 8306 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 8307 break; 8308 case ALGORITHM_RIGHT_ASYMMETRIC: 8309 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 8310 break; 8311 case ALGORITHM_LEFT_SYMMETRIC: 8312 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 8313 break; 8314 case ALGORITHM_RIGHT_SYMMETRIC: 8315 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 8316 break; 8317 case ALGORITHM_PARITY_0: 8318 new_layout = ALGORITHM_PARITY_0_6; 8319 break; 8320 case ALGORITHM_PARITY_N: 8321 new_layout = ALGORITHM_PARITY_N; 8322 break; 8323 default: 8324 return ERR_PTR(-EINVAL); 8325 } 8326 mddev->new_level = 6; 8327 mddev->new_layout = new_layout; 8328 mddev->delta_disks = 1; 8329 mddev->raid_disks += 1; 8330 return setup_conf(mddev); 8331 } 8332 8333 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) 8334 { 8335 struct r5conf *conf; 8336 int err; 8337 8338 err = mddev_lock(mddev); 8339 if (err) 8340 return err; 8341 conf = mddev->private; 8342 if (!conf) { 8343 mddev_unlock(mddev); 8344 return -ENODEV; 8345 } 8346 8347 if (strncmp(buf, "ppl", 3) == 0) { 8348 /* ppl only works with RAID 5 */ 8349 if (!raid5_has_ppl(conf) && conf->level == 5) { 8350 err = log_init(conf, NULL, true); 8351 if (!err) { 8352 err = resize_stripes(conf, conf->pool_size); 8353 if (err) 8354 log_exit(conf); 8355 } 8356 } else 8357 err = -EINVAL; 8358 } else if (strncmp(buf, "resync", 6) == 0) { 8359 if (raid5_has_ppl(conf)) { 8360 mddev_suspend(mddev); 8361 log_exit(conf); 8362 mddev_resume(mddev); 8363 err = resize_stripes(conf, conf->pool_size); 8364 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && 8365 r5l_log_disk_error(conf)) { 8366 bool journal_dev_exists = false; 8367 struct md_rdev *rdev; 8368 8369 rdev_for_each(rdev, mddev) 8370 if (test_bit(Journal, &rdev->flags)) { 8371 journal_dev_exists = true; 8372 break; 8373 } 8374 8375 if (!journal_dev_exists) { 8376 mddev_suspend(mddev); 8377 clear_bit(MD_HAS_JOURNAL, &mddev->flags); 8378 mddev_resume(mddev); 8379 } else /* need remove journal device first */ 8380 err = -EBUSY; 8381 } else 8382 err = -EINVAL; 8383 } else { 8384 err = -EINVAL; 8385 } 8386 8387 if (!err) 8388 md_update_sb(mddev, 1); 8389 8390 mddev_unlock(mddev); 8391 8392 return err; 8393 } 8394 8395 static int raid5_start(struct mddev *mddev) 8396 { 8397 struct r5conf *conf = mddev->private; 8398 8399 return r5l_start(conf->log); 8400 } 8401 8402 static struct md_personality raid6_personality = 8403 { 8404 .name = "raid6", 8405 .level = 6, 8406 .owner = THIS_MODULE, 8407 .make_request = raid5_make_request, 8408 .run = raid5_run, 8409 .start = raid5_start, 8410 .free = raid5_free, 8411 .status = raid5_status, 8412 .error_handler = raid5_error, 8413 .hot_add_disk = raid5_add_disk, 8414 .hot_remove_disk= raid5_remove_disk, 8415 .spare_active = raid5_spare_active, 8416 .sync_request = raid5_sync_request, 8417 .resize = raid5_resize, 8418 .size = raid5_size, 8419 .check_reshape = raid6_check_reshape, 8420 .start_reshape = raid5_start_reshape, 8421 .finish_reshape = raid5_finish_reshape, 8422 .quiesce = raid5_quiesce, 8423 .takeover = raid6_takeover, 8424 .congested = raid5_congested, 8425 .change_consistency_policy = raid5_change_consistency_policy, 8426 }; 8427 static struct md_personality raid5_personality = 8428 { 8429 .name = "raid5", 8430 .level = 5, 8431 .owner = THIS_MODULE, 8432 .make_request = raid5_make_request, 8433 .run = raid5_run, 8434 .start = raid5_start, 8435 .free = raid5_free, 8436 .status = raid5_status, 8437 .error_handler = raid5_error, 8438 .hot_add_disk = raid5_add_disk, 8439 .hot_remove_disk= raid5_remove_disk, 8440 .spare_active = raid5_spare_active, 8441 .sync_request = raid5_sync_request, 8442 .resize = raid5_resize, 8443 .size = raid5_size, 8444 .check_reshape = raid5_check_reshape, 8445 .start_reshape = raid5_start_reshape, 8446 .finish_reshape = raid5_finish_reshape, 8447 .quiesce = raid5_quiesce, 8448 .takeover = raid5_takeover, 8449 .congested = raid5_congested, 8450 .change_consistency_policy = raid5_change_consistency_policy, 8451 }; 8452 8453 static struct md_personality raid4_personality = 8454 { 8455 .name = "raid4", 8456 .level = 4, 8457 .owner = THIS_MODULE, 8458 .make_request = raid5_make_request, 8459 .run = raid5_run, 8460 .start = raid5_start, 8461 .free = raid5_free, 8462 .status = raid5_status, 8463 .error_handler = raid5_error, 8464 .hot_add_disk = raid5_add_disk, 8465 .hot_remove_disk= raid5_remove_disk, 8466 .spare_active = raid5_spare_active, 8467 .sync_request = raid5_sync_request, 8468 .resize = raid5_resize, 8469 .size = raid5_size, 8470 .check_reshape = raid5_check_reshape, 8471 .start_reshape = raid5_start_reshape, 8472 .finish_reshape = raid5_finish_reshape, 8473 .quiesce = raid5_quiesce, 8474 .takeover = raid4_takeover, 8475 .congested = raid5_congested, 8476 .change_consistency_policy = raid5_change_consistency_policy, 8477 }; 8478 8479 static int __init raid5_init(void) 8480 { 8481 int ret; 8482 8483 raid5_wq = alloc_workqueue("raid5wq", 8484 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 8485 if (!raid5_wq) 8486 return -ENOMEM; 8487 8488 ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE, 8489 "md/raid5:prepare", 8490 raid456_cpu_up_prepare, 8491 raid456_cpu_dead); 8492 if (ret) { 8493 destroy_workqueue(raid5_wq); 8494 return ret; 8495 } 8496 register_md_personality(&raid6_personality); 8497 register_md_personality(&raid5_personality); 8498 register_md_personality(&raid4_personality); 8499 return 0; 8500 } 8501 8502 static void raid5_exit(void) 8503 { 8504 unregister_md_personality(&raid6_personality); 8505 unregister_md_personality(&raid5_personality); 8506 unregister_md_personality(&raid4_personality); 8507 cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE); 8508 destroy_workqueue(raid5_wq); 8509 } 8510 8511 module_init(raid5_init); 8512 module_exit(raid5_exit); 8513 MODULE_LICENSE("GPL"); 8514 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 8515 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 8516 MODULE_ALIAS("md-raid5"); 8517 MODULE_ALIAS("md-raid4"); 8518 MODULE_ALIAS("md-level-5"); 8519 MODULE_ALIAS("md-level-4"); 8520 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 8521 MODULE_ALIAS("md-raid6"); 8522 MODULE_ALIAS("md-level-6"); 8523 8524 /* This used to be two separate modules, they were: */ 8525 MODULE_ALIAS("raid5"); 8526 MODULE_ALIAS("raid6"); 8527