1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->seq_write is the number of the last batch successfully written. 31 * conf->seq_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is seq_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/module.h> 51 #include <linux/async.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpu.h> 54 #include <linux/slab.h> 55 #include <linux/ratelimit.h> 56 #include <linux/nodemask.h> 57 #include <linux/flex_array.h> 58 #include <linux/sched/signal.h> 59 60 #include <trace/events/block.h> 61 #include <linux/list_sort.h> 62 63 #include "md.h" 64 #include "raid5.h" 65 #include "raid0.h" 66 #include "bitmap.h" 67 #include "raid5-log.h" 68 69 #define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED) 70 71 #define cpu_to_group(cpu) cpu_to_node(cpu) 72 #define ANY_GROUP NUMA_NO_NODE 73 74 static bool devices_handle_discard_safely = false; 75 module_param(devices_handle_discard_safely, bool, 0644); 76 MODULE_PARM_DESC(devices_handle_discard_safely, 77 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 78 static struct workqueue_struct *raid5_wq; 79 80 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 81 { 82 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 83 return &conf->stripe_hashtbl[hash]; 84 } 85 86 static inline int stripe_hash_locks_hash(sector_t sect) 87 { 88 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; 89 } 90 91 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) 92 { 93 spin_lock_irq(conf->hash_locks + hash); 94 spin_lock(&conf->device_lock); 95 } 96 97 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) 98 { 99 spin_unlock(&conf->device_lock); 100 spin_unlock_irq(conf->hash_locks + hash); 101 } 102 103 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 104 { 105 int i; 106 spin_lock_irq(conf->hash_locks); 107 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 108 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 109 spin_lock(&conf->device_lock); 110 } 111 112 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) 113 { 114 int i; 115 spin_unlock(&conf->device_lock); 116 for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--) 117 spin_unlock(conf->hash_locks + i); 118 spin_unlock_irq(conf->hash_locks); 119 } 120 121 /* Find first data disk in a raid6 stripe */ 122 static inline int raid6_d0(struct stripe_head *sh) 123 { 124 if (sh->ddf_layout) 125 /* ddf always start from first device */ 126 return 0; 127 /* md starts just after Q block */ 128 if (sh->qd_idx == sh->disks - 1) 129 return 0; 130 else 131 return sh->qd_idx + 1; 132 } 133 static inline int raid6_next_disk(int disk, int raid_disks) 134 { 135 disk++; 136 return (disk < raid_disks) ? disk : 0; 137 } 138 139 /* When walking through the disks in a raid5, starting at raid6_d0, 140 * We need to map each disk to a 'slot', where the data disks are slot 141 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 142 * is raid_disks-1. This help does that mapping. 143 */ 144 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 145 int *count, int syndrome_disks) 146 { 147 int slot = *count; 148 149 if (sh->ddf_layout) 150 (*count)++; 151 if (idx == sh->pd_idx) 152 return syndrome_disks; 153 if (idx == sh->qd_idx) 154 return syndrome_disks + 1; 155 if (!sh->ddf_layout) 156 (*count)++; 157 return slot; 158 } 159 160 static void print_raid5_conf (struct r5conf *conf); 161 162 static int stripe_operations_active(struct stripe_head *sh) 163 { 164 return sh->check_state || sh->reconstruct_state || 165 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 166 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 167 } 168 169 static bool stripe_is_lowprio(struct stripe_head *sh) 170 { 171 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || 172 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && 173 !test_bit(STRIPE_R5C_CACHING, &sh->state); 174 } 175 176 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) 177 { 178 struct r5conf *conf = sh->raid_conf; 179 struct r5worker_group *group; 180 int thread_cnt; 181 int i, cpu = sh->cpu; 182 183 if (!cpu_online(cpu)) { 184 cpu = cpumask_any(cpu_online_mask); 185 sh->cpu = cpu; 186 } 187 188 if (list_empty(&sh->lru)) { 189 struct r5worker_group *group; 190 group = conf->worker_groups + cpu_to_group(cpu); 191 if (stripe_is_lowprio(sh)) 192 list_add_tail(&sh->lru, &group->loprio_list); 193 else 194 list_add_tail(&sh->lru, &group->handle_list); 195 group->stripes_cnt++; 196 sh->group = group; 197 } 198 199 if (conf->worker_cnt_per_group == 0) { 200 md_wakeup_thread(conf->mddev->thread); 201 return; 202 } 203 204 group = conf->worker_groups + cpu_to_group(sh->cpu); 205 206 group->workers[0].working = true; 207 /* at least one worker should run to avoid race */ 208 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); 209 210 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; 211 /* wakeup more workers */ 212 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { 213 if (group->workers[i].working == false) { 214 group->workers[i].working = true; 215 queue_work_on(sh->cpu, raid5_wq, 216 &group->workers[i].work); 217 thread_cnt--; 218 } 219 } 220 } 221 222 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, 223 struct list_head *temp_inactive_list) 224 { 225 int i; 226 int injournal = 0; /* number of date pages with R5_InJournal */ 227 228 BUG_ON(!list_empty(&sh->lru)); 229 BUG_ON(atomic_read(&conf->active_stripes)==0); 230 231 if (r5c_is_writeback(conf->log)) 232 for (i = sh->disks; i--; ) 233 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 234 injournal++; 235 /* 236 * In the following cases, the stripe cannot be released to cached 237 * lists. Therefore, we make the stripe write out and set 238 * STRIPE_HANDLE: 239 * 1. when quiesce in r5c write back; 240 * 2. when resync is requested fot the stripe. 241 */ 242 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || 243 (conf->quiesce && r5c_is_writeback(conf->log) && 244 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { 245 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 246 r5c_make_stripe_write_out(sh); 247 set_bit(STRIPE_HANDLE, &sh->state); 248 } 249 250 if (test_bit(STRIPE_HANDLE, &sh->state)) { 251 if (test_bit(STRIPE_DELAYED, &sh->state) && 252 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 253 list_add_tail(&sh->lru, &conf->delayed_list); 254 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 255 sh->bm_seq - conf->seq_write > 0) 256 list_add_tail(&sh->lru, &conf->bitmap_list); 257 else { 258 clear_bit(STRIPE_DELAYED, &sh->state); 259 clear_bit(STRIPE_BIT_DELAY, &sh->state); 260 if (conf->worker_cnt_per_group == 0) { 261 if (stripe_is_lowprio(sh)) 262 list_add_tail(&sh->lru, 263 &conf->loprio_list); 264 else 265 list_add_tail(&sh->lru, 266 &conf->handle_list); 267 } else { 268 raid5_wakeup_stripe_thread(sh); 269 return; 270 } 271 } 272 md_wakeup_thread(conf->mddev->thread); 273 } else { 274 BUG_ON(stripe_operations_active(sh)); 275 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 276 if (atomic_dec_return(&conf->preread_active_stripes) 277 < IO_THRESHOLD) 278 md_wakeup_thread(conf->mddev->thread); 279 atomic_dec(&conf->active_stripes); 280 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 281 if (!r5c_is_writeback(conf->log)) 282 list_add_tail(&sh->lru, temp_inactive_list); 283 else { 284 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); 285 if (injournal == 0) 286 list_add_tail(&sh->lru, temp_inactive_list); 287 else if (injournal == conf->raid_disks - conf->max_degraded) { 288 /* full stripe */ 289 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) 290 atomic_inc(&conf->r5c_cached_full_stripes); 291 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) 292 atomic_dec(&conf->r5c_cached_partial_stripes); 293 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); 294 r5c_check_cached_full_stripe(conf); 295 } else 296 /* 297 * STRIPE_R5C_PARTIAL_STRIPE is set in 298 * r5c_try_caching_write(). No need to 299 * set it again. 300 */ 301 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); 302 } 303 } 304 } 305 } 306 307 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, 308 struct list_head *temp_inactive_list) 309 { 310 if (atomic_dec_and_test(&sh->count)) 311 do_release_stripe(conf, sh, temp_inactive_list); 312 } 313 314 /* 315 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list 316 * 317 * Be careful: Only one task can add/delete stripes from temp_inactive_list at 318 * given time. Adding stripes only takes device lock, while deleting stripes 319 * only takes hash lock. 320 */ 321 static void release_inactive_stripe_list(struct r5conf *conf, 322 struct list_head *temp_inactive_list, 323 int hash) 324 { 325 int size; 326 bool do_wakeup = false; 327 unsigned long flags; 328 329 if (hash == NR_STRIPE_HASH_LOCKS) { 330 size = NR_STRIPE_HASH_LOCKS; 331 hash = NR_STRIPE_HASH_LOCKS - 1; 332 } else 333 size = 1; 334 while (size) { 335 struct list_head *list = &temp_inactive_list[size - 1]; 336 337 /* 338 * We don't hold any lock here yet, raid5_get_active_stripe() might 339 * remove stripes from the list 340 */ 341 if (!list_empty_careful(list)) { 342 spin_lock_irqsave(conf->hash_locks + hash, flags); 343 if (list_empty(conf->inactive_list + hash) && 344 !list_empty(list)) 345 atomic_dec(&conf->empty_inactive_list_nr); 346 list_splice_tail_init(list, conf->inactive_list + hash); 347 do_wakeup = true; 348 spin_unlock_irqrestore(conf->hash_locks + hash, flags); 349 } 350 size--; 351 hash--; 352 } 353 354 if (do_wakeup) { 355 wake_up(&conf->wait_for_stripe); 356 if (atomic_read(&conf->active_stripes) == 0) 357 wake_up(&conf->wait_for_quiescent); 358 if (conf->retry_read_aligned) 359 md_wakeup_thread(conf->mddev->thread); 360 } 361 } 362 363 /* should hold conf->device_lock already */ 364 static int release_stripe_list(struct r5conf *conf, 365 struct list_head *temp_inactive_list) 366 { 367 struct stripe_head *sh, *t; 368 int count = 0; 369 struct llist_node *head; 370 371 head = llist_del_all(&conf->released_stripes); 372 head = llist_reverse_order(head); 373 llist_for_each_entry_safe(sh, t, head, release_list) { 374 int hash; 375 376 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 377 smp_mb(); 378 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); 379 /* 380 * Don't worry the bit is set here, because if the bit is set 381 * again, the count is always > 1. This is true for 382 * STRIPE_ON_UNPLUG_LIST bit too. 383 */ 384 hash = sh->hash_lock_index; 385 __release_stripe(conf, sh, &temp_inactive_list[hash]); 386 count++; 387 } 388 389 return count; 390 } 391 392 void raid5_release_stripe(struct stripe_head *sh) 393 { 394 struct r5conf *conf = sh->raid_conf; 395 unsigned long flags; 396 struct list_head list; 397 int hash; 398 bool wakeup; 399 400 /* Avoid release_list until the last reference. 401 */ 402 if (atomic_add_unless(&sh->count, -1, 1)) 403 return; 404 405 if (unlikely(!conf->mddev->thread) || 406 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 407 goto slow_path; 408 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 409 if (wakeup) 410 md_wakeup_thread(conf->mddev->thread); 411 return; 412 slow_path: 413 local_irq_save(flags); 414 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 415 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { 416 INIT_LIST_HEAD(&list); 417 hash = sh->hash_lock_index; 418 do_release_stripe(conf, sh, &list); 419 spin_unlock(&conf->device_lock); 420 release_inactive_stripe_list(conf, &list, hash); 421 } 422 local_irq_restore(flags); 423 } 424 425 static inline void remove_hash(struct stripe_head *sh) 426 { 427 pr_debug("remove_hash(), stripe %llu\n", 428 (unsigned long long)sh->sector); 429 430 hlist_del_init(&sh->hash); 431 } 432 433 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 434 { 435 struct hlist_head *hp = stripe_hash(conf, sh->sector); 436 437 pr_debug("insert_hash(), stripe %llu\n", 438 (unsigned long long)sh->sector); 439 440 hlist_add_head(&sh->hash, hp); 441 } 442 443 /* find an idle stripe, make sure it is unhashed, and return it. */ 444 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) 445 { 446 struct stripe_head *sh = NULL; 447 struct list_head *first; 448 449 if (list_empty(conf->inactive_list + hash)) 450 goto out; 451 first = (conf->inactive_list + hash)->next; 452 sh = list_entry(first, struct stripe_head, lru); 453 list_del_init(first); 454 remove_hash(sh); 455 atomic_inc(&conf->active_stripes); 456 BUG_ON(hash != sh->hash_lock_index); 457 if (list_empty(conf->inactive_list + hash)) 458 atomic_inc(&conf->empty_inactive_list_nr); 459 out: 460 return sh; 461 } 462 463 static void shrink_buffers(struct stripe_head *sh) 464 { 465 struct page *p; 466 int i; 467 int num = sh->raid_conf->pool_size; 468 469 for (i = 0; i < num ; i++) { 470 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); 471 p = sh->dev[i].page; 472 if (!p) 473 continue; 474 sh->dev[i].page = NULL; 475 put_page(p); 476 } 477 } 478 479 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) 480 { 481 int i; 482 int num = sh->raid_conf->pool_size; 483 484 for (i = 0; i < num; i++) { 485 struct page *page; 486 487 if (!(page = alloc_page(gfp))) { 488 return 1; 489 } 490 sh->dev[i].page = page; 491 sh->dev[i].orig_page = page; 492 } 493 494 return 0; 495 } 496 497 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 498 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 499 struct stripe_head *sh); 500 501 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 502 { 503 struct r5conf *conf = sh->raid_conf; 504 int i, seq; 505 506 BUG_ON(atomic_read(&sh->count) != 0); 507 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 508 BUG_ON(stripe_operations_active(sh)); 509 BUG_ON(sh->batch_head); 510 511 pr_debug("init_stripe called, stripe %llu\n", 512 (unsigned long long)sector); 513 retry: 514 seq = read_seqcount_begin(&conf->gen_lock); 515 sh->generation = conf->generation - previous; 516 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 517 sh->sector = sector; 518 stripe_set_idx(sector, conf, previous, sh); 519 sh->state = 0; 520 521 for (i = sh->disks; i--; ) { 522 struct r5dev *dev = &sh->dev[i]; 523 524 if (dev->toread || dev->read || dev->towrite || dev->written || 525 test_bit(R5_LOCKED, &dev->flags)) { 526 pr_err("sector=%llx i=%d %p %p %p %p %d\n", 527 (unsigned long long)sh->sector, i, dev->toread, 528 dev->read, dev->towrite, dev->written, 529 test_bit(R5_LOCKED, &dev->flags)); 530 WARN_ON(1); 531 } 532 dev->flags = 0; 533 raid5_build_block(sh, i, previous); 534 } 535 if (read_seqcount_retry(&conf->gen_lock, seq)) 536 goto retry; 537 sh->overwrite_disks = 0; 538 insert_hash(conf, sh); 539 sh->cpu = smp_processor_id(); 540 set_bit(STRIPE_BATCH_READY, &sh->state); 541 } 542 543 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 544 short generation) 545 { 546 struct stripe_head *sh; 547 548 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 549 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) 550 if (sh->sector == sector && sh->generation == generation) 551 return sh; 552 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 553 return NULL; 554 } 555 556 /* 557 * Need to check if array has failed when deciding whether to: 558 * - start an array 559 * - remove non-faulty devices 560 * - add a spare 561 * - allow a reshape 562 * This determination is simple when no reshape is happening. 563 * However if there is a reshape, we need to carefully check 564 * both the before and after sections. 565 * This is because some failed devices may only affect one 566 * of the two sections, and some non-in_sync devices may 567 * be insync in the section most affected by failed devices. 568 */ 569 int raid5_calc_degraded(struct r5conf *conf) 570 { 571 int degraded, degraded2; 572 int i; 573 574 rcu_read_lock(); 575 degraded = 0; 576 for (i = 0; i < conf->previous_raid_disks; i++) { 577 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 578 if (rdev && test_bit(Faulty, &rdev->flags)) 579 rdev = rcu_dereference(conf->disks[i].replacement); 580 if (!rdev || test_bit(Faulty, &rdev->flags)) 581 degraded++; 582 else if (test_bit(In_sync, &rdev->flags)) 583 ; 584 else 585 /* not in-sync or faulty. 586 * If the reshape increases the number of devices, 587 * this is being recovered by the reshape, so 588 * this 'previous' section is not in_sync. 589 * If the number of devices is being reduced however, 590 * the device can only be part of the array if 591 * we are reverting a reshape, so this section will 592 * be in-sync. 593 */ 594 if (conf->raid_disks >= conf->previous_raid_disks) 595 degraded++; 596 } 597 rcu_read_unlock(); 598 if (conf->raid_disks == conf->previous_raid_disks) 599 return degraded; 600 rcu_read_lock(); 601 degraded2 = 0; 602 for (i = 0; i < conf->raid_disks; i++) { 603 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 604 if (rdev && test_bit(Faulty, &rdev->flags)) 605 rdev = rcu_dereference(conf->disks[i].replacement); 606 if (!rdev || test_bit(Faulty, &rdev->flags)) 607 degraded2++; 608 else if (test_bit(In_sync, &rdev->flags)) 609 ; 610 else 611 /* not in-sync or faulty. 612 * If reshape increases the number of devices, this 613 * section has already been recovered, else it 614 * almost certainly hasn't. 615 */ 616 if (conf->raid_disks <= conf->previous_raid_disks) 617 degraded2++; 618 } 619 rcu_read_unlock(); 620 if (degraded2 > degraded) 621 return degraded2; 622 return degraded; 623 } 624 625 static int has_failed(struct r5conf *conf) 626 { 627 int degraded; 628 629 if (conf->mddev->reshape_position == MaxSector) 630 return conf->mddev->degraded > conf->max_degraded; 631 632 degraded = raid5_calc_degraded(conf); 633 if (degraded > conf->max_degraded) 634 return 1; 635 return 0; 636 } 637 638 struct stripe_head * 639 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, 640 int previous, int noblock, int noquiesce) 641 { 642 struct stripe_head *sh; 643 int hash = stripe_hash_locks_hash(sector); 644 int inc_empty_inactive_list_flag; 645 646 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 647 648 spin_lock_irq(conf->hash_locks + hash); 649 650 do { 651 wait_event_lock_irq(conf->wait_for_quiescent, 652 conf->quiesce == 0 || noquiesce, 653 *(conf->hash_locks + hash)); 654 sh = __find_stripe(conf, sector, conf->generation - previous); 655 if (!sh) { 656 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { 657 sh = get_free_stripe(conf, hash); 658 if (!sh && !test_bit(R5_DID_ALLOC, 659 &conf->cache_state)) 660 set_bit(R5_ALLOC_MORE, 661 &conf->cache_state); 662 } 663 if (noblock && sh == NULL) 664 break; 665 666 r5c_check_stripe_cache_usage(conf); 667 if (!sh) { 668 set_bit(R5_INACTIVE_BLOCKED, 669 &conf->cache_state); 670 r5l_wake_reclaim(conf->log, 0); 671 wait_event_lock_irq( 672 conf->wait_for_stripe, 673 !list_empty(conf->inactive_list + hash) && 674 (atomic_read(&conf->active_stripes) 675 < (conf->max_nr_stripes * 3 / 4) 676 || !test_bit(R5_INACTIVE_BLOCKED, 677 &conf->cache_state)), 678 *(conf->hash_locks + hash)); 679 clear_bit(R5_INACTIVE_BLOCKED, 680 &conf->cache_state); 681 } else { 682 init_stripe(sh, sector, previous); 683 atomic_inc(&sh->count); 684 } 685 } else if (!atomic_inc_not_zero(&sh->count)) { 686 spin_lock(&conf->device_lock); 687 if (!atomic_read(&sh->count)) { 688 if (!test_bit(STRIPE_HANDLE, &sh->state)) 689 atomic_inc(&conf->active_stripes); 690 BUG_ON(list_empty(&sh->lru) && 691 !test_bit(STRIPE_EXPANDING, &sh->state)); 692 inc_empty_inactive_list_flag = 0; 693 if (!list_empty(conf->inactive_list + hash)) 694 inc_empty_inactive_list_flag = 1; 695 list_del_init(&sh->lru); 696 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) 697 atomic_inc(&conf->empty_inactive_list_nr); 698 if (sh->group) { 699 sh->group->stripes_cnt--; 700 sh->group = NULL; 701 } 702 } 703 atomic_inc(&sh->count); 704 spin_unlock(&conf->device_lock); 705 } 706 } while (sh == NULL); 707 708 spin_unlock_irq(conf->hash_locks + hash); 709 return sh; 710 } 711 712 static bool is_full_stripe_write(struct stripe_head *sh) 713 { 714 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); 715 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); 716 } 717 718 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 719 { 720 if (sh1 > sh2) { 721 spin_lock_irq(&sh2->stripe_lock); 722 spin_lock_nested(&sh1->stripe_lock, 1); 723 } else { 724 spin_lock_irq(&sh1->stripe_lock); 725 spin_lock_nested(&sh2->stripe_lock, 1); 726 } 727 } 728 729 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 730 { 731 spin_unlock(&sh1->stripe_lock); 732 spin_unlock_irq(&sh2->stripe_lock); 733 } 734 735 /* Only freshly new full stripe normal write stripe can be added to a batch list */ 736 static bool stripe_can_batch(struct stripe_head *sh) 737 { 738 struct r5conf *conf = sh->raid_conf; 739 740 if (conf->log || raid5_has_ppl(conf)) 741 return false; 742 return test_bit(STRIPE_BATCH_READY, &sh->state) && 743 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 744 is_full_stripe_write(sh); 745 } 746 747 /* we only do back search */ 748 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) 749 { 750 struct stripe_head *head; 751 sector_t head_sector, tmp_sec; 752 int hash; 753 int dd_idx; 754 int inc_empty_inactive_list_flag; 755 756 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ 757 tmp_sec = sh->sector; 758 if (!sector_div(tmp_sec, conf->chunk_sectors)) 759 return; 760 head_sector = sh->sector - STRIPE_SECTORS; 761 762 hash = stripe_hash_locks_hash(head_sector); 763 spin_lock_irq(conf->hash_locks + hash); 764 head = __find_stripe(conf, head_sector, conf->generation); 765 if (head && !atomic_inc_not_zero(&head->count)) { 766 spin_lock(&conf->device_lock); 767 if (!atomic_read(&head->count)) { 768 if (!test_bit(STRIPE_HANDLE, &head->state)) 769 atomic_inc(&conf->active_stripes); 770 BUG_ON(list_empty(&head->lru) && 771 !test_bit(STRIPE_EXPANDING, &head->state)); 772 inc_empty_inactive_list_flag = 0; 773 if (!list_empty(conf->inactive_list + hash)) 774 inc_empty_inactive_list_flag = 1; 775 list_del_init(&head->lru); 776 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) 777 atomic_inc(&conf->empty_inactive_list_nr); 778 if (head->group) { 779 head->group->stripes_cnt--; 780 head->group = NULL; 781 } 782 } 783 atomic_inc(&head->count); 784 spin_unlock(&conf->device_lock); 785 } 786 spin_unlock_irq(conf->hash_locks + hash); 787 788 if (!head) 789 return; 790 if (!stripe_can_batch(head)) 791 goto out; 792 793 lock_two_stripes(head, sh); 794 /* clear_batch_ready clear the flag */ 795 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) 796 goto unlock_out; 797 798 if (sh->batch_head) 799 goto unlock_out; 800 801 dd_idx = 0; 802 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) 803 dd_idx++; 804 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || 805 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) 806 goto unlock_out; 807 808 if (head->batch_head) { 809 spin_lock(&head->batch_head->batch_lock); 810 /* This batch list is already running */ 811 if (!stripe_can_batch(head)) { 812 spin_unlock(&head->batch_head->batch_lock); 813 goto unlock_out; 814 } 815 816 /* 817 * at this point, head's BATCH_READY could be cleared, but we 818 * can still add the stripe to batch list 819 */ 820 list_add(&sh->batch_list, &head->batch_list); 821 spin_unlock(&head->batch_head->batch_lock); 822 823 sh->batch_head = head->batch_head; 824 } else { 825 head->batch_head = head; 826 sh->batch_head = head->batch_head; 827 spin_lock(&head->batch_lock); 828 list_add_tail(&sh->batch_list, &head->batch_list); 829 spin_unlock(&head->batch_lock); 830 } 831 832 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 833 if (atomic_dec_return(&conf->preread_active_stripes) 834 < IO_THRESHOLD) 835 md_wakeup_thread(conf->mddev->thread); 836 837 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { 838 int seq = sh->bm_seq; 839 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && 840 sh->batch_head->bm_seq > seq) 841 seq = sh->batch_head->bm_seq; 842 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); 843 sh->batch_head->bm_seq = seq; 844 } 845 846 atomic_inc(&sh->count); 847 unlock_out: 848 unlock_two_stripes(head, sh); 849 out: 850 raid5_release_stripe(head); 851 } 852 853 /* Determine if 'data_offset' or 'new_data_offset' should be used 854 * in this stripe_head. 855 */ 856 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) 857 { 858 sector_t progress = conf->reshape_progress; 859 /* Need a memory barrier to make sure we see the value 860 * of conf->generation, or ->data_offset that was set before 861 * reshape_progress was updated. 862 */ 863 smp_rmb(); 864 if (progress == MaxSector) 865 return 0; 866 if (sh->generation == conf->generation - 1) 867 return 0; 868 /* We are in a reshape, and this is a new-generation stripe, 869 * so use new_data_offset. 870 */ 871 return 1; 872 } 873 874 static void dispatch_bio_list(struct bio_list *tmp) 875 { 876 struct bio *bio; 877 878 while ((bio = bio_list_pop(tmp))) 879 generic_make_request(bio); 880 } 881 882 static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b) 883 { 884 const struct r5pending_data *da = list_entry(a, 885 struct r5pending_data, sibling); 886 const struct r5pending_data *db = list_entry(b, 887 struct r5pending_data, sibling); 888 if (da->sector > db->sector) 889 return 1; 890 if (da->sector < db->sector) 891 return -1; 892 return 0; 893 } 894 895 static void dispatch_defer_bios(struct r5conf *conf, int target, 896 struct bio_list *list) 897 { 898 struct r5pending_data *data; 899 struct list_head *first, *next = NULL; 900 int cnt = 0; 901 902 if (conf->pending_data_cnt == 0) 903 return; 904 905 list_sort(NULL, &conf->pending_list, cmp_stripe); 906 907 first = conf->pending_list.next; 908 909 /* temporarily move the head */ 910 if (conf->next_pending_data) 911 list_move_tail(&conf->pending_list, 912 &conf->next_pending_data->sibling); 913 914 while (!list_empty(&conf->pending_list)) { 915 data = list_first_entry(&conf->pending_list, 916 struct r5pending_data, sibling); 917 if (&data->sibling == first) 918 first = data->sibling.next; 919 next = data->sibling.next; 920 921 bio_list_merge(list, &data->bios); 922 list_move(&data->sibling, &conf->free_list); 923 cnt++; 924 if (cnt >= target) 925 break; 926 } 927 conf->pending_data_cnt -= cnt; 928 BUG_ON(conf->pending_data_cnt < 0 || cnt < target); 929 930 if (next != &conf->pending_list) 931 conf->next_pending_data = list_entry(next, 932 struct r5pending_data, sibling); 933 else 934 conf->next_pending_data = NULL; 935 /* list isn't empty */ 936 if (first != &conf->pending_list) 937 list_move_tail(&conf->pending_list, first); 938 } 939 940 static void flush_deferred_bios(struct r5conf *conf) 941 { 942 struct bio_list tmp = BIO_EMPTY_LIST; 943 944 if (conf->pending_data_cnt == 0) 945 return; 946 947 spin_lock(&conf->pending_bios_lock); 948 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); 949 BUG_ON(conf->pending_data_cnt != 0); 950 spin_unlock(&conf->pending_bios_lock); 951 952 dispatch_bio_list(&tmp); 953 } 954 955 static void defer_issue_bios(struct r5conf *conf, sector_t sector, 956 struct bio_list *bios) 957 { 958 struct bio_list tmp = BIO_EMPTY_LIST; 959 struct r5pending_data *ent; 960 961 spin_lock(&conf->pending_bios_lock); 962 ent = list_first_entry(&conf->free_list, struct r5pending_data, 963 sibling); 964 list_move_tail(&ent->sibling, &conf->pending_list); 965 ent->sector = sector; 966 bio_list_init(&ent->bios); 967 bio_list_merge(&ent->bios, bios); 968 conf->pending_data_cnt++; 969 if (conf->pending_data_cnt >= PENDING_IO_MAX) 970 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); 971 972 spin_unlock(&conf->pending_bios_lock); 973 974 dispatch_bio_list(&tmp); 975 } 976 977 static void 978 raid5_end_read_request(struct bio *bi); 979 static void 980 raid5_end_write_request(struct bio *bi); 981 982 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 983 { 984 struct r5conf *conf = sh->raid_conf; 985 int i, disks = sh->disks; 986 struct stripe_head *head_sh = sh; 987 struct bio_list pending_bios = BIO_EMPTY_LIST; 988 bool should_defer; 989 990 might_sleep(); 991 992 if (log_stripe(sh, s) == 0) 993 return; 994 995 should_defer = conf->batch_bio_dispatch && conf->group_cnt; 996 997 for (i = disks; i--; ) { 998 int op, op_flags = 0; 999 int replace_only = 0; 1000 struct bio *bi, *rbi; 1001 struct md_rdev *rdev, *rrdev = NULL; 1002 1003 sh = head_sh; 1004 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 1005 op = REQ_OP_WRITE; 1006 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 1007 op_flags = REQ_FUA; 1008 if (test_bit(R5_Discard, &sh->dev[i].flags)) 1009 op = REQ_OP_DISCARD; 1010 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1011 op = REQ_OP_READ; 1012 else if (test_and_clear_bit(R5_WantReplace, 1013 &sh->dev[i].flags)) { 1014 op = REQ_OP_WRITE; 1015 replace_only = 1; 1016 } else 1017 continue; 1018 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) 1019 op_flags |= REQ_SYNC; 1020 1021 again: 1022 bi = &sh->dev[i].req; 1023 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 1024 1025 rcu_read_lock(); 1026 rrdev = rcu_dereference(conf->disks[i].replacement); 1027 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 1028 rdev = rcu_dereference(conf->disks[i].rdev); 1029 if (!rdev) { 1030 rdev = rrdev; 1031 rrdev = NULL; 1032 } 1033 if (op_is_write(op)) { 1034 if (replace_only) 1035 rdev = NULL; 1036 if (rdev == rrdev) 1037 /* We raced and saw duplicates */ 1038 rrdev = NULL; 1039 } else { 1040 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev) 1041 rdev = rrdev; 1042 rrdev = NULL; 1043 } 1044 1045 if (rdev && test_bit(Faulty, &rdev->flags)) 1046 rdev = NULL; 1047 if (rdev) 1048 atomic_inc(&rdev->nr_pending); 1049 if (rrdev && test_bit(Faulty, &rrdev->flags)) 1050 rrdev = NULL; 1051 if (rrdev) 1052 atomic_inc(&rrdev->nr_pending); 1053 rcu_read_unlock(); 1054 1055 /* We have already checked bad blocks for reads. Now 1056 * need to check for writes. We never accept write errors 1057 * on the replacement, so we don't to check rrdev. 1058 */ 1059 while (op_is_write(op) && rdev && 1060 test_bit(WriteErrorSeen, &rdev->flags)) { 1061 sector_t first_bad; 1062 int bad_sectors; 1063 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 1064 &first_bad, &bad_sectors); 1065 if (!bad) 1066 break; 1067 1068 if (bad < 0) { 1069 set_bit(BlockedBadBlocks, &rdev->flags); 1070 if (!conf->mddev->external && 1071 conf->mddev->sb_flags) { 1072 /* It is very unlikely, but we might 1073 * still need to write out the 1074 * bad block log - better give it 1075 * a chance*/ 1076 md_check_recovery(conf->mddev); 1077 } 1078 /* 1079 * Because md_wait_for_blocked_rdev 1080 * will dec nr_pending, we must 1081 * increment it first. 1082 */ 1083 atomic_inc(&rdev->nr_pending); 1084 md_wait_for_blocked_rdev(rdev, conf->mddev); 1085 } else { 1086 /* Acknowledged bad block - skip the write */ 1087 rdev_dec_pending(rdev, conf->mddev); 1088 rdev = NULL; 1089 } 1090 } 1091 1092 if (rdev) { 1093 if (s->syncing || s->expanding || s->expanded 1094 || s->replacing) 1095 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1096 1097 set_bit(STRIPE_IO_STARTED, &sh->state); 1098 1099 bi->bi_bdev = rdev->bdev; 1100 bio_set_op_attrs(bi, op, op_flags); 1101 bi->bi_end_io = op_is_write(op) 1102 ? raid5_end_write_request 1103 : raid5_end_read_request; 1104 bi->bi_private = sh; 1105 1106 pr_debug("%s: for %llu schedule op %d on disc %d\n", 1107 __func__, (unsigned long long)sh->sector, 1108 bi->bi_opf, i); 1109 atomic_inc(&sh->count); 1110 if (sh != head_sh) 1111 atomic_inc(&head_sh->count); 1112 if (use_new_offset(conf, sh)) 1113 bi->bi_iter.bi_sector = (sh->sector 1114 + rdev->new_data_offset); 1115 else 1116 bi->bi_iter.bi_sector = (sh->sector 1117 + rdev->data_offset); 1118 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) 1119 bi->bi_opf |= REQ_NOMERGE; 1120 1121 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1122 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1123 1124 if (!op_is_write(op) && 1125 test_bit(R5_InJournal, &sh->dev[i].flags)) 1126 /* 1127 * issuing read for a page in journal, this 1128 * must be preparing for prexor in rmw; read 1129 * the data into orig_page 1130 */ 1131 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; 1132 else 1133 sh->dev[i].vec.bv_page = sh->dev[i].page; 1134 bi->bi_vcnt = 1; 1135 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1136 bi->bi_io_vec[0].bv_offset = 0; 1137 bi->bi_iter.bi_size = STRIPE_SIZE; 1138 /* 1139 * If this is discard request, set bi_vcnt 0. We don't 1140 * want to confuse SCSI because SCSI will replace payload 1141 */ 1142 if (op == REQ_OP_DISCARD) 1143 bi->bi_vcnt = 0; 1144 if (rrdev) 1145 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 1146 1147 if (conf->mddev->gendisk) 1148 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 1149 bi, disk_devt(conf->mddev->gendisk), 1150 sh->dev[i].sector); 1151 if (should_defer && op_is_write(op)) 1152 bio_list_add(&pending_bios, bi); 1153 else 1154 generic_make_request(bi); 1155 } 1156 if (rrdev) { 1157 if (s->syncing || s->expanding || s->expanded 1158 || s->replacing) 1159 md_sync_acct(rrdev->bdev, STRIPE_SECTORS); 1160 1161 set_bit(STRIPE_IO_STARTED, &sh->state); 1162 1163 rbi->bi_bdev = rrdev->bdev; 1164 bio_set_op_attrs(rbi, op, op_flags); 1165 BUG_ON(!op_is_write(op)); 1166 rbi->bi_end_io = raid5_end_write_request; 1167 rbi->bi_private = sh; 1168 1169 pr_debug("%s: for %llu schedule op %d on " 1170 "replacement disc %d\n", 1171 __func__, (unsigned long long)sh->sector, 1172 rbi->bi_opf, i); 1173 atomic_inc(&sh->count); 1174 if (sh != head_sh) 1175 atomic_inc(&head_sh->count); 1176 if (use_new_offset(conf, sh)) 1177 rbi->bi_iter.bi_sector = (sh->sector 1178 + rrdev->new_data_offset); 1179 else 1180 rbi->bi_iter.bi_sector = (sh->sector 1181 + rrdev->data_offset); 1182 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1183 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1184 sh->dev[i].rvec.bv_page = sh->dev[i].page; 1185 rbi->bi_vcnt = 1; 1186 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1187 rbi->bi_io_vec[0].bv_offset = 0; 1188 rbi->bi_iter.bi_size = STRIPE_SIZE; 1189 /* 1190 * If this is discard request, set bi_vcnt 0. We don't 1191 * want to confuse SCSI because SCSI will replace payload 1192 */ 1193 if (op == REQ_OP_DISCARD) 1194 rbi->bi_vcnt = 0; 1195 if (conf->mddev->gendisk) 1196 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 1197 rbi, disk_devt(conf->mddev->gendisk), 1198 sh->dev[i].sector); 1199 if (should_defer && op_is_write(op)) 1200 bio_list_add(&pending_bios, rbi); 1201 else 1202 generic_make_request(rbi); 1203 } 1204 if (!rdev && !rrdev) { 1205 if (op_is_write(op)) 1206 set_bit(STRIPE_DEGRADED, &sh->state); 1207 pr_debug("skip op %d on disc %d for sector %llu\n", 1208 bi->bi_opf, i, (unsigned long long)sh->sector); 1209 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1210 set_bit(STRIPE_HANDLE, &sh->state); 1211 } 1212 1213 if (!head_sh->batch_head) 1214 continue; 1215 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1216 batch_list); 1217 if (sh != head_sh) 1218 goto again; 1219 } 1220 1221 if (should_defer && !bio_list_empty(&pending_bios)) 1222 defer_issue_bios(conf, head_sh->sector, &pending_bios); 1223 } 1224 1225 static struct dma_async_tx_descriptor * 1226 async_copy_data(int frombio, struct bio *bio, struct page **page, 1227 sector_t sector, struct dma_async_tx_descriptor *tx, 1228 struct stripe_head *sh, int no_skipcopy) 1229 { 1230 struct bio_vec bvl; 1231 struct bvec_iter iter; 1232 struct page *bio_page; 1233 int page_offset; 1234 struct async_submit_ctl submit; 1235 enum async_tx_flags flags = 0; 1236 1237 if (bio->bi_iter.bi_sector >= sector) 1238 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 1239 else 1240 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 1241 1242 if (frombio) 1243 flags |= ASYNC_TX_FENCE; 1244 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 1245 1246 bio_for_each_segment(bvl, bio, iter) { 1247 int len = bvl.bv_len; 1248 int clen; 1249 int b_offset = 0; 1250 1251 if (page_offset < 0) { 1252 b_offset = -page_offset; 1253 page_offset += b_offset; 1254 len -= b_offset; 1255 } 1256 1257 if (len > 0 && page_offset + len > STRIPE_SIZE) 1258 clen = STRIPE_SIZE - page_offset; 1259 else 1260 clen = len; 1261 1262 if (clen > 0) { 1263 b_offset += bvl.bv_offset; 1264 bio_page = bvl.bv_page; 1265 if (frombio) { 1266 if (sh->raid_conf->skip_copy && 1267 b_offset == 0 && page_offset == 0 && 1268 clen == STRIPE_SIZE && 1269 !no_skipcopy) 1270 *page = bio_page; 1271 else 1272 tx = async_memcpy(*page, bio_page, page_offset, 1273 b_offset, clen, &submit); 1274 } else 1275 tx = async_memcpy(bio_page, *page, b_offset, 1276 page_offset, clen, &submit); 1277 } 1278 /* chain the operations */ 1279 submit.depend_tx = tx; 1280 1281 if (clen < len) /* hit end of page */ 1282 break; 1283 page_offset += len; 1284 } 1285 1286 return tx; 1287 } 1288 1289 static void ops_complete_biofill(void *stripe_head_ref) 1290 { 1291 struct stripe_head *sh = stripe_head_ref; 1292 int i; 1293 1294 pr_debug("%s: stripe %llu\n", __func__, 1295 (unsigned long long)sh->sector); 1296 1297 /* clear completed biofills */ 1298 for (i = sh->disks; i--; ) { 1299 struct r5dev *dev = &sh->dev[i]; 1300 1301 /* acknowledge completion of a biofill operation */ 1302 /* and check if we need to reply to a read request, 1303 * new R5_Wantfill requests are held off until 1304 * !STRIPE_BIOFILL_RUN 1305 */ 1306 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 1307 struct bio *rbi, *rbi2; 1308 1309 BUG_ON(!dev->read); 1310 rbi = dev->read; 1311 dev->read = NULL; 1312 while (rbi && rbi->bi_iter.bi_sector < 1313 dev->sector + STRIPE_SECTORS) { 1314 rbi2 = r5_next_bio(rbi, dev->sector); 1315 bio_endio(rbi); 1316 rbi = rbi2; 1317 } 1318 } 1319 } 1320 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 1321 1322 set_bit(STRIPE_HANDLE, &sh->state); 1323 raid5_release_stripe(sh); 1324 } 1325 1326 static void ops_run_biofill(struct stripe_head *sh) 1327 { 1328 struct dma_async_tx_descriptor *tx = NULL; 1329 struct async_submit_ctl submit; 1330 int i; 1331 1332 BUG_ON(sh->batch_head); 1333 pr_debug("%s: stripe %llu\n", __func__, 1334 (unsigned long long)sh->sector); 1335 1336 for (i = sh->disks; i--; ) { 1337 struct r5dev *dev = &sh->dev[i]; 1338 if (test_bit(R5_Wantfill, &dev->flags)) { 1339 struct bio *rbi; 1340 spin_lock_irq(&sh->stripe_lock); 1341 dev->read = rbi = dev->toread; 1342 dev->toread = NULL; 1343 spin_unlock_irq(&sh->stripe_lock); 1344 while (rbi && rbi->bi_iter.bi_sector < 1345 dev->sector + STRIPE_SECTORS) { 1346 tx = async_copy_data(0, rbi, &dev->page, 1347 dev->sector, tx, sh, 0); 1348 rbi = r5_next_bio(rbi, dev->sector); 1349 } 1350 } 1351 } 1352 1353 atomic_inc(&sh->count); 1354 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 1355 async_trigger_callback(&submit); 1356 } 1357 1358 static void mark_target_uptodate(struct stripe_head *sh, int target) 1359 { 1360 struct r5dev *tgt; 1361 1362 if (target < 0) 1363 return; 1364 1365 tgt = &sh->dev[target]; 1366 set_bit(R5_UPTODATE, &tgt->flags); 1367 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1368 clear_bit(R5_Wantcompute, &tgt->flags); 1369 } 1370 1371 static void ops_complete_compute(void *stripe_head_ref) 1372 { 1373 struct stripe_head *sh = stripe_head_ref; 1374 1375 pr_debug("%s: stripe %llu\n", __func__, 1376 (unsigned long long)sh->sector); 1377 1378 /* mark the computed target(s) as uptodate */ 1379 mark_target_uptodate(sh, sh->ops.target); 1380 mark_target_uptodate(sh, sh->ops.target2); 1381 1382 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 1383 if (sh->check_state == check_state_compute_run) 1384 sh->check_state = check_state_compute_result; 1385 set_bit(STRIPE_HANDLE, &sh->state); 1386 raid5_release_stripe(sh); 1387 } 1388 1389 /* return a pointer to the address conversion region of the scribble buffer */ 1390 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 1391 struct raid5_percpu *percpu, int i) 1392 { 1393 void *addr; 1394 1395 addr = flex_array_get(percpu->scribble, i); 1396 return addr + sizeof(struct page *) * (sh->disks + 2); 1397 } 1398 1399 /* return a pointer to the address conversion region of the scribble buffer */ 1400 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) 1401 { 1402 void *addr; 1403 1404 addr = flex_array_get(percpu->scribble, i); 1405 return addr; 1406 } 1407 1408 static struct dma_async_tx_descriptor * 1409 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 1410 { 1411 int disks = sh->disks; 1412 struct page **xor_srcs = to_addr_page(percpu, 0); 1413 int target = sh->ops.target; 1414 struct r5dev *tgt = &sh->dev[target]; 1415 struct page *xor_dest = tgt->page; 1416 int count = 0; 1417 struct dma_async_tx_descriptor *tx; 1418 struct async_submit_ctl submit; 1419 int i; 1420 1421 BUG_ON(sh->batch_head); 1422 1423 pr_debug("%s: stripe %llu block: %d\n", 1424 __func__, (unsigned long long)sh->sector, target); 1425 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1426 1427 for (i = disks; i--; ) 1428 if (i != target) 1429 xor_srcs[count++] = sh->dev[i].page; 1430 1431 atomic_inc(&sh->count); 1432 1433 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 1434 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); 1435 if (unlikely(count == 1)) 1436 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1437 else 1438 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1439 1440 return tx; 1441 } 1442 1443 /* set_syndrome_sources - populate source buffers for gen_syndrome 1444 * @srcs - (struct page *) array of size sh->disks 1445 * @sh - stripe_head to parse 1446 * 1447 * Populates srcs in proper layout order for the stripe and returns the 1448 * 'count' of sources to be used in a call to async_gen_syndrome. The P 1449 * destination buffer is recorded in srcs[count] and the Q destination 1450 * is recorded in srcs[count+1]]. 1451 */ 1452 static int set_syndrome_sources(struct page **srcs, 1453 struct stripe_head *sh, 1454 int srctype) 1455 { 1456 int disks = sh->disks; 1457 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1458 int d0_idx = raid6_d0(sh); 1459 int count; 1460 int i; 1461 1462 for (i = 0; i < disks; i++) 1463 srcs[i] = NULL; 1464 1465 count = 0; 1466 i = d0_idx; 1467 do { 1468 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1469 struct r5dev *dev = &sh->dev[i]; 1470 1471 if (i == sh->qd_idx || i == sh->pd_idx || 1472 (srctype == SYNDROME_SRC_ALL) || 1473 (srctype == SYNDROME_SRC_WANT_DRAIN && 1474 (test_bit(R5_Wantdrain, &dev->flags) || 1475 test_bit(R5_InJournal, &dev->flags))) || 1476 (srctype == SYNDROME_SRC_WRITTEN && 1477 (dev->written || 1478 test_bit(R5_InJournal, &dev->flags)))) { 1479 if (test_bit(R5_InJournal, &dev->flags)) 1480 srcs[slot] = sh->dev[i].orig_page; 1481 else 1482 srcs[slot] = sh->dev[i].page; 1483 } 1484 i = raid6_next_disk(i, disks); 1485 } while (i != d0_idx); 1486 1487 return syndrome_disks; 1488 } 1489 1490 static struct dma_async_tx_descriptor * 1491 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 1492 { 1493 int disks = sh->disks; 1494 struct page **blocks = to_addr_page(percpu, 0); 1495 int target; 1496 int qd_idx = sh->qd_idx; 1497 struct dma_async_tx_descriptor *tx; 1498 struct async_submit_ctl submit; 1499 struct r5dev *tgt; 1500 struct page *dest; 1501 int i; 1502 int count; 1503 1504 BUG_ON(sh->batch_head); 1505 if (sh->ops.target < 0) 1506 target = sh->ops.target2; 1507 else if (sh->ops.target2 < 0) 1508 target = sh->ops.target; 1509 else 1510 /* we should only have one valid target */ 1511 BUG(); 1512 BUG_ON(target < 0); 1513 pr_debug("%s: stripe %llu block: %d\n", 1514 __func__, (unsigned long long)sh->sector, target); 1515 1516 tgt = &sh->dev[target]; 1517 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1518 dest = tgt->page; 1519 1520 atomic_inc(&sh->count); 1521 1522 if (target == qd_idx) { 1523 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1524 blocks[count] = NULL; /* regenerating p is not necessary */ 1525 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 1526 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1527 ops_complete_compute, sh, 1528 to_addr_conv(sh, percpu, 0)); 1529 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1530 } else { 1531 /* Compute any data- or p-drive using XOR */ 1532 count = 0; 1533 for (i = disks; i-- ; ) { 1534 if (i == target || i == qd_idx) 1535 continue; 1536 blocks[count++] = sh->dev[i].page; 1537 } 1538 1539 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1540 NULL, ops_complete_compute, sh, 1541 to_addr_conv(sh, percpu, 0)); 1542 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 1543 } 1544 1545 return tx; 1546 } 1547 1548 static struct dma_async_tx_descriptor * 1549 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 1550 { 1551 int i, count, disks = sh->disks; 1552 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1553 int d0_idx = raid6_d0(sh); 1554 int faila = -1, failb = -1; 1555 int target = sh->ops.target; 1556 int target2 = sh->ops.target2; 1557 struct r5dev *tgt = &sh->dev[target]; 1558 struct r5dev *tgt2 = &sh->dev[target2]; 1559 struct dma_async_tx_descriptor *tx; 1560 struct page **blocks = to_addr_page(percpu, 0); 1561 struct async_submit_ctl submit; 1562 1563 BUG_ON(sh->batch_head); 1564 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 1565 __func__, (unsigned long long)sh->sector, target, target2); 1566 BUG_ON(target < 0 || target2 < 0); 1567 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1568 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 1569 1570 /* we need to open-code set_syndrome_sources to handle the 1571 * slot number conversion for 'faila' and 'failb' 1572 */ 1573 for (i = 0; i < disks ; i++) 1574 blocks[i] = NULL; 1575 count = 0; 1576 i = d0_idx; 1577 do { 1578 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1579 1580 blocks[slot] = sh->dev[i].page; 1581 1582 if (i == target) 1583 faila = slot; 1584 if (i == target2) 1585 failb = slot; 1586 i = raid6_next_disk(i, disks); 1587 } while (i != d0_idx); 1588 1589 BUG_ON(faila == failb); 1590 if (failb < faila) 1591 swap(faila, failb); 1592 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 1593 __func__, (unsigned long long)sh->sector, faila, failb); 1594 1595 atomic_inc(&sh->count); 1596 1597 if (failb == syndrome_disks+1) { 1598 /* Q disk is one of the missing disks */ 1599 if (faila == syndrome_disks) { 1600 /* Missing P+Q, just recompute */ 1601 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1602 ops_complete_compute, sh, 1603 to_addr_conv(sh, percpu, 0)); 1604 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 1605 STRIPE_SIZE, &submit); 1606 } else { 1607 struct page *dest; 1608 int data_target; 1609 int qd_idx = sh->qd_idx; 1610 1611 /* Missing D+Q: recompute D from P, then recompute Q */ 1612 if (target == qd_idx) 1613 data_target = target2; 1614 else 1615 data_target = target; 1616 1617 count = 0; 1618 for (i = disks; i-- ; ) { 1619 if (i == data_target || i == qd_idx) 1620 continue; 1621 blocks[count++] = sh->dev[i].page; 1622 } 1623 dest = sh->dev[data_target].page; 1624 init_async_submit(&submit, 1625 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1626 NULL, NULL, NULL, 1627 to_addr_conv(sh, percpu, 0)); 1628 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 1629 &submit); 1630 1631 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1632 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 1633 ops_complete_compute, sh, 1634 to_addr_conv(sh, percpu, 0)); 1635 return async_gen_syndrome(blocks, 0, count+2, 1636 STRIPE_SIZE, &submit); 1637 } 1638 } else { 1639 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1640 ops_complete_compute, sh, 1641 to_addr_conv(sh, percpu, 0)); 1642 if (failb == syndrome_disks) { 1643 /* We're missing D+P. */ 1644 return async_raid6_datap_recov(syndrome_disks+2, 1645 STRIPE_SIZE, faila, 1646 blocks, &submit); 1647 } else { 1648 /* We're missing D+D. */ 1649 return async_raid6_2data_recov(syndrome_disks+2, 1650 STRIPE_SIZE, faila, failb, 1651 blocks, &submit); 1652 } 1653 } 1654 } 1655 1656 static void ops_complete_prexor(void *stripe_head_ref) 1657 { 1658 struct stripe_head *sh = stripe_head_ref; 1659 1660 pr_debug("%s: stripe %llu\n", __func__, 1661 (unsigned long long)sh->sector); 1662 1663 if (r5c_is_writeback(sh->raid_conf->log)) 1664 /* 1665 * raid5-cache write back uses orig_page during prexor. 1666 * After prexor, it is time to free orig_page 1667 */ 1668 r5c_release_extra_page(sh); 1669 } 1670 1671 static struct dma_async_tx_descriptor * 1672 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, 1673 struct dma_async_tx_descriptor *tx) 1674 { 1675 int disks = sh->disks; 1676 struct page **xor_srcs = to_addr_page(percpu, 0); 1677 int count = 0, pd_idx = sh->pd_idx, i; 1678 struct async_submit_ctl submit; 1679 1680 /* existing parity data subtracted */ 1681 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1682 1683 BUG_ON(sh->batch_head); 1684 pr_debug("%s: stripe %llu\n", __func__, 1685 (unsigned long long)sh->sector); 1686 1687 for (i = disks; i--; ) { 1688 struct r5dev *dev = &sh->dev[i]; 1689 /* Only process blocks that are known to be uptodate */ 1690 if (test_bit(R5_InJournal, &dev->flags)) 1691 xor_srcs[count++] = dev->orig_page; 1692 else if (test_bit(R5_Wantdrain, &dev->flags)) 1693 xor_srcs[count++] = dev->page; 1694 } 1695 1696 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1697 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1698 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1699 1700 return tx; 1701 } 1702 1703 static struct dma_async_tx_descriptor * 1704 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, 1705 struct dma_async_tx_descriptor *tx) 1706 { 1707 struct page **blocks = to_addr_page(percpu, 0); 1708 int count; 1709 struct async_submit_ctl submit; 1710 1711 pr_debug("%s: stripe %llu\n", __func__, 1712 (unsigned long long)sh->sector); 1713 1714 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); 1715 1716 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx, 1717 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1718 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1719 1720 return tx; 1721 } 1722 1723 static struct dma_async_tx_descriptor * 1724 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1725 { 1726 struct r5conf *conf = sh->raid_conf; 1727 int disks = sh->disks; 1728 int i; 1729 struct stripe_head *head_sh = sh; 1730 1731 pr_debug("%s: stripe %llu\n", __func__, 1732 (unsigned long long)sh->sector); 1733 1734 for (i = disks; i--; ) { 1735 struct r5dev *dev; 1736 struct bio *chosen; 1737 1738 sh = head_sh; 1739 if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) { 1740 struct bio *wbi; 1741 1742 again: 1743 dev = &sh->dev[i]; 1744 /* 1745 * clear R5_InJournal, so when rewriting a page in 1746 * journal, it is not skipped by r5l_log_stripe() 1747 */ 1748 clear_bit(R5_InJournal, &dev->flags); 1749 spin_lock_irq(&sh->stripe_lock); 1750 chosen = dev->towrite; 1751 dev->towrite = NULL; 1752 sh->overwrite_disks = 0; 1753 BUG_ON(dev->written); 1754 wbi = dev->written = chosen; 1755 spin_unlock_irq(&sh->stripe_lock); 1756 WARN_ON(dev->page != dev->orig_page); 1757 1758 while (wbi && wbi->bi_iter.bi_sector < 1759 dev->sector + STRIPE_SECTORS) { 1760 if (wbi->bi_opf & REQ_FUA) 1761 set_bit(R5_WantFUA, &dev->flags); 1762 if (wbi->bi_opf & REQ_SYNC) 1763 set_bit(R5_SyncIO, &dev->flags); 1764 if (bio_op(wbi) == REQ_OP_DISCARD) 1765 set_bit(R5_Discard, &dev->flags); 1766 else { 1767 tx = async_copy_data(1, wbi, &dev->page, 1768 dev->sector, tx, sh, 1769 r5c_is_writeback(conf->log)); 1770 if (dev->page != dev->orig_page && 1771 !r5c_is_writeback(conf->log)) { 1772 set_bit(R5_SkipCopy, &dev->flags); 1773 clear_bit(R5_UPTODATE, &dev->flags); 1774 clear_bit(R5_OVERWRITE, &dev->flags); 1775 } 1776 } 1777 wbi = r5_next_bio(wbi, dev->sector); 1778 } 1779 1780 if (head_sh->batch_head) { 1781 sh = list_first_entry(&sh->batch_list, 1782 struct stripe_head, 1783 batch_list); 1784 if (sh == head_sh) 1785 continue; 1786 goto again; 1787 } 1788 } 1789 } 1790 1791 return tx; 1792 } 1793 1794 static void ops_complete_reconstruct(void *stripe_head_ref) 1795 { 1796 struct stripe_head *sh = stripe_head_ref; 1797 int disks = sh->disks; 1798 int pd_idx = sh->pd_idx; 1799 int qd_idx = sh->qd_idx; 1800 int i; 1801 bool fua = false, sync = false, discard = false; 1802 1803 pr_debug("%s: stripe %llu\n", __func__, 1804 (unsigned long long)sh->sector); 1805 1806 for (i = disks; i--; ) { 1807 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1808 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1809 discard |= test_bit(R5_Discard, &sh->dev[i].flags); 1810 } 1811 1812 for (i = disks; i--; ) { 1813 struct r5dev *dev = &sh->dev[i]; 1814 1815 if (dev->written || i == pd_idx || i == qd_idx) { 1816 if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) 1817 set_bit(R5_UPTODATE, &dev->flags); 1818 if (fua) 1819 set_bit(R5_WantFUA, &dev->flags); 1820 if (sync) 1821 set_bit(R5_SyncIO, &dev->flags); 1822 } 1823 } 1824 1825 if (sh->reconstruct_state == reconstruct_state_drain_run) 1826 sh->reconstruct_state = reconstruct_state_drain_result; 1827 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1828 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1829 else { 1830 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1831 sh->reconstruct_state = reconstruct_state_result; 1832 } 1833 1834 set_bit(STRIPE_HANDLE, &sh->state); 1835 raid5_release_stripe(sh); 1836 } 1837 1838 static void 1839 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1840 struct dma_async_tx_descriptor *tx) 1841 { 1842 int disks = sh->disks; 1843 struct page **xor_srcs; 1844 struct async_submit_ctl submit; 1845 int count, pd_idx = sh->pd_idx, i; 1846 struct page *xor_dest; 1847 int prexor = 0; 1848 unsigned long flags; 1849 int j = 0; 1850 struct stripe_head *head_sh = sh; 1851 int last_stripe; 1852 1853 pr_debug("%s: stripe %llu\n", __func__, 1854 (unsigned long long)sh->sector); 1855 1856 for (i = 0; i < sh->disks; i++) { 1857 if (pd_idx == i) 1858 continue; 1859 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1860 break; 1861 } 1862 if (i >= sh->disks) { 1863 atomic_inc(&sh->count); 1864 set_bit(R5_Discard, &sh->dev[pd_idx].flags); 1865 ops_complete_reconstruct(sh); 1866 return; 1867 } 1868 again: 1869 count = 0; 1870 xor_srcs = to_addr_page(percpu, j); 1871 /* check if prexor is active which means only process blocks 1872 * that are part of a read-modify-write (written) 1873 */ 1874 if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1875 prexor = 1; 1876 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1877 for (i = disks; i--; ) { 1878 struct r5dev *dev = &sh->dev[i]; 1879 if (head_sh->dev[i].written || 1880 test_bit(R5_InJournal, &head_sh->dev[i].flags)) 1881 xor_srcs[count++] = dev->page; 1882 } 1883 } else { 1884 xor_dest = sh->dev[pd_idx].page; 1885 for (i = disks; i--; ) { 1886 struct r5dev *dev = &sh->dev[i]; 1887 if (i != pd_idx) 1888 xor_srcs[count++] = dev->page; 1889 } 1890 } 1891 1892 /* 1/ if we prexor'd then the dest is reused as a source 1893 * 2/ if we did not prexor then we are redoing the parity 1894 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1895 * for the synchronous xor case 1896 */ 1897 last_stripe = !head_sh->batch_head || 1898 list_first_entry(&sh->batch_list, 1899 struct stripe_head, batch_list) == head_sh; 1900 if (last_stripe) { 1901 flags = ASYNC_TX_ACK | 1902 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1903 1904 atomic_inc(&head_sh->count); 1905 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh, 1906 to_addr_conv(sh, percpu, j)); 1907 } else { 1908 flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST; 1909 init_async_submit(&submit, flags, tx, NULL, NULL, 1910 to_addr_conv(sh, percpu, j)); 1911 } 1912 1913 if (unlikely(count == 1)) 1914 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1915 else 1916 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1917 if (!last_stripe) { 1918 j++; 1919 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1920 batch_list); 1921 goto again; 1922 } 1923 } 1924 1925 static void 1926 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1927 struct dma_async_tx_descriptor *tx) 1928 { 1929 struct async_submit_ctl submit; 1930 struct page **blocks; 1931 int count, i, j = 0; 1932 struct stripe_head *head_sh = sh; 1933 int last_stripe; 1934 int synflags; 1935 unsigned long txflags; 1936 1937 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1938 1939 for (i = 0; i < sh->disks; i++) { 1940 if (sh->pd_idx == i || sh->qd_idx == i) 1941 continue; 1942 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1943 break; 1944 } 1945 if (i >= sh->disks) { 1946 atomic_inc(&sh->count); 1947 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 1948 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 1949 ops_complete_reconstruct(sh); 1950 return; 1951 } 1952 1953 again: 1954 blocks = to_addr_page(percpu, j); 1955 1956 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1957 synflags = SYNDROME_SRC_WRITTEN; 1958 txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST; 1959 } else { 1960 synflags = SYNDROME_SRC_ALL; 1961 txflags = ASYNC_TX_ACK; 1962 } 1963 1964 count = set_syndrome_sources(blocks, sh, synflags); 1965 last_stripe = !head_sh->batch_head || 1966 list_first_entry(&sh->batch_list, 1967 struct stripe_head, batch_list) == head_sh; 1968 1969 if (last_stripe) { 1970 atomic_inc(&head_sh->count); 1971 init_async_submit(&submit, txflags, tx, ops_complete_reconstruct, 1972 head_sh, to_addr_conv(sh, percpu, j)); 1973 } else 1974 init_async_submit(&submit, 0, tx, NULL, NULL, 1975 to_addr_conv(sh, percpu, j)); 1976 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1977 if (!last_stripe) { 1978 j++; 1979 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1980 batch_list); 1981 goto again; 1982 } 1983 } 1984 1985 static void ops_complete_check(void *stripe_head_ref) 1986 { 1987 struct stripe_head *sh = stripe_head_ref; 1988 1989 pr_debug("%s: stripe %llu\n", __func__, 1990 (unsigned long long)sh->sector); 1991 1992 sh->check_state = check_state_check_result; 1993 set_bit(STRIPE_HANDLE, &sh->state); 1994 raid5_release_stripe(sh); 1995 } 1996 1997 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1998 { 1999 int disks = sh->disks; 2000 int pd_idx = sh->pd_idx; 2001 int qd_idx = sh->qd_idx; 2002 struct page *xor_dest; 2003 struct page **xor_srcs = to_addr_page(percpu, 0); 2004 struct dma_async_tx_descriptor *tx; 2005 struct async_submit_ctl submit; 2006 int count; 2007 int i; 2008 2009 pr_debug("%s: stripe %llu\n", __func__, 2010 (unsigned long long)sh->sector); 2011 2012 BUG_ON(sh->batch_head); 2013 count = 0; 2014 xor_dest = sh->dev[pd_idx].page; 2015 xor_srcs[count++] = xor_dest; 2016 for (i = disks; i--; ) { 2017 if (i == pd_idx || i == qd_idx) 2018 continue; 2019 xor_srcs[count++] = sh->dev[i].page; 2020 } 2021 2022 init_async_submit(&submit, 0, NULL, NULL, NULL, 2023 to_addr_conv(sh, percpu, 0)); 2024 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 2025 &sh->ops.zero_sum_result, &submit); 2026 2027 atomic_inc(&sh->count); 2028 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 2029 tx = async_trigger_callback(&submit); 2030 } 2031 2032 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 2033 { 2034 struct page **srcs = to_addr_page(percpu, 0); 2035 struct async_submit_ctl submit; 2036 int count; 2037 2038 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 2039 (unsigned long long)sh->sector, checkp); 2040 2041 BUG_ON(sh->batch_head); 2042 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); 2043 if (!checkp) 2044 srcs[count] = NULL; 2045 2046 atomic_inc(&sh->count); 2047 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 2048 sh, to_addr_conv(sh, percpu, 0)); 2049 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 2050 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 2051 } 2052 2053 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 2054 { 2055 int overlap_clear = 0, i, disks = sh->disks; 2056 struct dma_async_tx_descriptor *tx = NULL; 2057 struct r5conf *conf = sh->raid_conf; 2058 int level = conf->level; 2059 struct raid5_percpu *percpu; 2060 unsigned long cpu; 2061 2062 cpu = get_cpu(); 2063 percpu = per_cpu_ptr(conf->percpu, cpu); 2064 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 2065 ops_run_biofill(sh); 2066 overlap_clear++; 2067 } 2068 2069 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 2070 if (level < 6) 2071 tx = ops_run_compute5(sh, percpu); 2072 else { 2073 if (sh->ops.target2 < 0 || sh->ops.target < 0) 2074 tx = ops_run_compute6_1(sh, percpu); 2075 else 2076 tx = ops_run_compute6_2(sh, percpu); 2077 } 2078 /* terminate the chain if reconstruct is not set to be run */ 2079 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 2080 async_tx_ack(tx); 2081 } 2082 2083 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) { 2084 if (level < 6) 2085 tx = ops_run_prexor5(sh, percpu, tx); 2086 else 2087 tx = ops_run_prexor6(sh, percpu, tx); 2088 } 2089 2090 if (test_bit(STRIPE_OP_PARTIAL_PARITY, &ops_request)) 2091 tx = ops_run_partial_parity(sh, percpu, tx); 2092 2093 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 2094 tx = ops_run_biodrain(sh, tx); 2095 overlap_clear++; 2096 } 2097 2098 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 2099 if (level < 6) 2100 ops_run_reconstruct5(sh, percpu, tx); 2101 else 2102 ops_run_reconstruct6(sh, percpu, tx); 2103 } 2104 2105 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 2106 if (sh->check_state == check_state_run) 2107 ops_run_check_p(sh, percpu); 2108 else if (sh->check_state == check_state_run_q) 2109 ops_run_check_pq(sh, percpu, 0); 2110 else if (sh->check_state == check_state_run_pq) 2111 ops_run_check_pq(sh, percpu, 1); 2112 else 2113 BUG(); 2114 } 2115 2116 if (overlap_clear && !sh->batch_head) 2117 for (i = disks; i--; ) { 2118 struct r5dev *dev = &sh->dev[i]; 2119 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2120 wake_up(&sh->raid_conf->wait_for_overlap); 2121 } 2122 put_cpu(); 2123 } 2124 2125 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) 2126 { 2127 if (sh->ppl_page) 2128 __free_page(sh->ppl_page); 2129 kmem_cache_free(sc, sh); 2130 } 2131 2132 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, 2133 int disks, struct r5conf *conf) 2134 { 2135 struct stripe_head *sh; 2136 int i; 2137 2138 sh = kmem_cache_zalloc(sc, gfp); 2139 if (sh) { 2140 spin_lock_init(&sh->stripe_lock); 2141 spin_lock_init(&sh->batch_lock); 2142 INIT_LIST_HEAD(&sh->batch_list); 2143 INIT_LIST_HEAD(&sh->lru); 2144 INIT_LIST_HEAD(&sh->r5c); 2145 INIT_LIST_HEAD(&sh->log_list); 2146 atomic_set(&sh->count, 1); 2147 sh->raid_conf = conf; 2148 sh->log_start = MaxSector; 2149 for (i = 0; i < disks; i++) { 2150 struct r5dev *dev = &sh->dev[i]; 2151 2152 bio_init(&dev->req, &dev->vec, 1); 2153 bio_init(&dev->rreq, &dev->rvec, 1); 2154 } 2155 2156 if (raid5_has_ppl(conf)) { 2157 sh->ppl_page = alloc_page(gfp); 2158 if (!sh->ppl_page) { 2159 free_stripe(sc, sh); 2160 sh = NULL; 2161 } 2162 } 2163 } 2164 return sh; 2165 } 2166 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) 2167 { 2168 struct stripe_head *sh; 2169 2170 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); 2171 if (!sh) 2172 return 0; 2173 2174 if (grow_buffers(sh, gfp)) { 2175 shrink_buffers(sh); 2176 free_stripe(conf->slab_cache, sh); 2177 return 0; 2178 } 2179 sh->hash_lock_index = 2180 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 2181 /* we just created an active stripe so... */ 2182 atomic_inc(&conf->active_stripes); 2183 2184 raid5_release_stripe(sh); 2185 conf->max_nr_stripes++; 2186 return 1; 2187 } 2188 2189 static int grow_stripes(struct r5conf *conf, int num) 2190 { 2191 struct kmem_cache *sc; 2192 int devs = max(conf->raid_disks, conf->previous_raid_disks); 2193 2194 if (conf->mddev->gendisk) 2195 sprintf(conf->cache_name[0], 2196 "raid%d-%s", conf->level, mdname(conf->mddev)); 2197 else 2198 sprintf(conf->cache_name[0], 2199 "raid%d-%p", conf->level, conf->mddev); 2200 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 2201 2202 conf->active_name = 0; 2203 sc = kmem_cache_create(conf->cache_name[conf->active_name], 2204 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 2205 0, 0, NULL); 2206 if (!sc) 2207 return 1; 2208 conf->slab_cache = sc; 2209 conf->pool_size = devs; 2210 while (num--) 2211 if (!grow_one_stripe(conf, GFP_KERNEL)) 2212 return 1; 2213 2214 return 0; 2215 } 2216 2217 /** 2218 * scribble_len - return the required size of the scribble region 2219 * @num - total number of disks in the array 2220 * 2221 * The size must be enough to contain: 2222 * 1/ a struct page pointer for each device in the array +2 2223 * 2/ room to convert each entry in (1) to its corresponding dma 2224 * (dma_map_page()) or page (page_address()) address. 2225 * 2226 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 2227 * calculate over all devices (not just the data blocks), using zeros in place 2228 * of the P and Q blocks. 2229 */ 2230 static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) 2231 { 2232 struct flex_array *ret; 2233 size_t len; 2234 2235 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 2236 ret = flex_array_alloc(len, cnt, flags); 2237 if (!ret) 2238 return NULL; 2239 /* always prealloc all elements, so no locking is required */ 2240 if (flex_array_prealloc(ret, 0, cnt, flags)) { 2241 flex_array_free(ret); 2242 return NULL; 2243 } 2244 return ret; 2245 } 2246 2247 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) 2248 { 2249 unsigned long cpu; 2250 int err = 0; 2251 2252 /* 2253 * Never shrink. And mddev_suspend() could deadlock if this is called 2254 * from raid5d. In that case, scribble_disks and scribble_sectors 2255 * should equal to new_disks and new_sectors 2256 */ 2257 if (conf->scribble_disks >= new_disks && 2258 conf->scribble_sectors >= new_sectors) 2259 return 0; 2260 mddev_suspend(conf->mddev); 2261 get_online_cpus(); 2262 for_each_present_cpu(cpu) { 2263 struct raid5_percpu *percpu; 2264 struct flex_array *scribble; 2265 2266 percpu = per_cpu_ptr(conf->percpu, cpu); 2267 scribble = scribble_alloc(new_disks, 2268 new_sectors / STRIPE_SECTORS, 2269 GFP_NOIO); 2270 2271 if (scribble) { 2272 flex_array_free(percpu->scribble); 2273 percpu->scribble = scribble; 2274 } else { 2275 err = -ENOMEM; 2276 break; 2277 } 2278 } 2279 put_online_cpus(); 2280 mddev_resume(conf->mddev); 2281 if (!err) { 2282 conf->scribble_disks = new_disks; 2283 conf->scribble_sectors = new_sectors; 2284 } 2285 return err; 2286 } 2287 2288 static int resize_stripes(struct r5conf *conf, int newsize) 2289 { 2290 /* Make all the stripes able to hold 'newsize' devices. 2291 * New slots in each stripe get 'page' set to a new page. 2292 * 2293 * This happens in stages: 2294 * 1/ create a new kmem_cache and allocate the required number of 2295 * stripe_heads. 2296 * 2/ gather all the old stripe_heads and transfer the pages across 2297 * to the new stripe_heads. This will have the side effect of 2298 * freezing the array as once all stripe_heads have been collected, 2299 * no IO will be possible. Old stripe heads are freed once their 2300 * pages have been transferred over, and the old kmem_cache is 2301 * freed when all stripes are done. 2302 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 2303 * we simple return a failure status - no need to clean anything up. 2304 * 4/ allocate new pages for the new slots in the new stripe_heads. 2305 * If this fails, we don't bother trying the shrink the 2306 * stripe_heads down again, we just leave them as they are. 2307 * As each stripe_head is processed the new one is released into 2308 * active service. 2309 * 2310 * Once step2 is started, we cannot afford to wait for a write, 2311 * so we use GFP_NOIO allocations. 2312 */ 2313 struct stripe_head *osh, *nsh; 2314 LIST_HEAD(newstripes); 2315 struct disk_info *ndisks; 2316 int err = 0; 2317 struct kmem_cache *sc; 2318 int i; 2319 int hash, cnt; 2320 2321 md_allow_write(conf->mddev); 2322 2323 /* Step 1 */ 2324 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2325 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 2326 0, 0, NULL); 2327 if (!sc) 2328 return -ENOMEM; 2329 2330 /* Need to ensure auto-resizing doesn't interfere */ 2331 mutex_lock(&conf->cache_size_mutex); 2332 2333 for (i = conf->max_nr_stripes; i; i--) { 2334 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); 2335 if (!nsh) 2336 break; 2337 2338 list_add(&nsh->lru, &newstripes); 2339 } 2340 if (i) { 2341 /* didn't get enough, give up */ 2342 while (!list_empty(&newstripes)) { 2343 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2344 list_del(&nsh->lru); 2345 free_stripe(sc, nsh); 2346 } 2347 kmem_cache_destroy(sc); 2348 mutex_unlock(&conf->cache_size_mutex); 2349 return -ENOMEM; 2350 } 2351 /* Step 2 - Must use GFP_NOIO now. 2352 * OK, we have enough stripes, start collecting inactive 2353 * stripes and copying them over 2354 */ 2355 hash = 0; 2356 cnt = 0; 2357 list_for_each_entry(nsh, &newstripes, lru) { 2358 lock_device_hash_lock(conf, hash); 2359 wait_event_cmd(conf->wait_for_stripe, 2360 !list_empty(conf->inactive_list + hash), 2361 unlock_device_hash_lock(conf, hash), 2362 lock_device_hash_lock(conf, hash)); 2363 osh = get_free_stripe(conf, hash); 2364 unlock_device_hash_lock(conf, hash); 2365 2366 for(i=0; i<conf->pool_size; i++) { 2367 nsh->dev[i].page = osh->dev[i].page; 2368 nsh->dev[i].orig_page = osh->dev[i].page; 2369 } 2370 nsh->hash_lock_index = hash; 2371 free_stripe(conf->slab_cache, osh); 2372 cnt++; 2373 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 2374 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 2375 hash++; 2376 cnt = 0; 2377 } 2378 } 2379 kmem_cache_destroy(conf->slab_cache); 2380 2381 /* Step 3. 2382 * At this point, we are holding all the stripes so the array 2383 * is completely stalled, so now is a good time to resize 2384 * conf->disks and the scribble region 2385 */ 2386 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 2387 if (ndisks) { 2388 for (i = 0; i < conf->pool_size; i++) 2389 ndisks[i] = conf->disks[i]; 2390 2391 for (i = conf->pool_size; i < newsize; i++) { 2392 ndisks[i].extra_page = alloc_page(GFP_NOIO); 2393 if (!ndisks[i].extra_page) 2394 err = -ENOMEM; 2395 } 2396 2397 if (err) { 2398 for (i = conf->pool_size; i < newsize; i++) 2399 if (ndisks[i].extra_page) 2400 put_page(ndisks[i].extra_page); 2401 kfree(ndisks); 2402 } else { 2403 kfree(conf->disks); 2404 conf->disks = ndisks; 2405 } 2406 } else 2407 err = -ENOMEM; 2408 2409 mutex_unlock(&conf->cache_size_mutex); 2410 2411 conf->slab_cache = sc; 2412 conf->active_name = 1-conf->active_name; 2413 2414 /* Step 4, return new stripes to service */ 2415 while(!list_empty(&newstripes)) { 2416 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2417 list_del_init(&nsh->lru); 2418 2419 for (i=conf->raid_disks; i < newsize; i++) 2420 if (nsh->dev[i].page == NULL) { 2421 struct page *p = alloc_page(GFP_NOIO); 2422 nsh->dev[i].page = p; 2423 nsh->dev[i].orig_page = p; 2424 if (!p) 2425 err = -ENOMEM; 2426 } 2427 raid5_release_stripe(nsh); 2428 } 2429 /* critical section pass, GFP_NOIO no longer needed */ 2430 2431 if (!err) 2432 conf->pool_size = newsize; 2433 return err; 2434 } 2435 2436 static int drop_one_stripe(struct r5conf *conf) 2437 { 2438 struct stripe_head *sh; 2439 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; 2440 2441 spin_lock_irq(conf->hash_locks + hash); 2442 sh = get_free_stripe(conf, hash); 2443 spin_unlock_irq(conf->hash_locks + hash); 2444 if (!sh) 2445 return 0; 2446 BUG_ON(atomic_read(&sh->count)); 2447 shrink_buffers(sh); 2448 free_stripe(conf->slab_cache, sh); 2449 atomic_dec(&conf->active_stripes); 2450 conf->max_nr_stripes--; 2451 return 1; 2452 } 2453 2454 static void shrink_stripes(struct r5conf *conf) 2455 { 2456 while (conf->max_nr_stripes && 2457 drop_one_stripe(conf)) 2458 ; 2459 2460 kmem_cache_destroy(conf->slab_cache); 2461 conf->slab_cache = NULL; 2462 } 2463 2464 static void raid5_end_read_request(struct bio * bi) 2465 { 2466 struct stripe_head *sh = bi->bi_private; 2467 struct r5conf *conf = sh->raid_conf; 2468 int disks = sh->disks, i; 2469 char b[BDEVNAME_SIZE]; 2470 struct md_rdev *rdev = NULL; 2471 sector_t s; 2472 2473 for (i=0 ; i<disks; i++) 2474 if (bi == &sh->dev[i].req) 2475 break; 2476 2477 pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", 2478 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2479 bi->bi_error); 2480 if (i == disks) { 2481 bio_reset(bi); 2482 BUG(); 2483 return; 2484 } 2485 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2486 /* If replacement finished while this request was outstanding, 2487 * 'replacement' might be NULL already. 2488 * In that case it moved down to 'rdev'. 2489 * rdev is not removed until all requests are finished. 2490 */ 2491 rdev = conf->disks[i].replacement; 2492 if (!rdev) 2493 rdev = conf->disks[i].rdev; 2494 2495 if (use_new_offset(conf, sh)) 2496 s = sh->sector + rdev->new_data_offset; 2497 else 2498 s = sh->sector + rdev->data_offset; 2499 if (!bi->bi_error) { 2500 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2501 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2502 /* Note that this cannot happen on a 2503 * replacement device. We just fail those on 2504 * any error 2505 */ 2506 pr_info_ratelimited( 2507 "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n", 2508 mdname(conf->mddev), STRIPE_SECTORS, 2509 (unsigned long long)s, 2510 bdevname(rdev->bdev, b)); 2511 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2512 clear_bit(R5_ReadError, &sh->dev[i].flags); 2513 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2514 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2515 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2516 2517 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 2518 /* 2519 * end read for a page in journal, this 2520 * must be preparing for prexor in rmw 2521 */ 2522 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); 2523 2524 if (atomic_read(&rdev->read_errors)) 2525 atomic_set(&rdev->read_errors, 0); 2526 } else { 2527 const char *bdn = bdevname(rdev->bdev, b); 2528 int retry = 0; 2529 int set_bad = 0; 2530 2531 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 2532 atomic_inc(&rdev->read_errors); 2533 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2534 pr_warn_ratelimited( 2535 "md/raid:%s: read error on replacement device (sector %llu on %s).\n", 2536 mdname(conf->mddev), 2537 (unsigned long long)s, 2538 bdn); 2539 else if (conf->mddev->degraded >= conf->max_degraded) { 2540 set_bad = 1; 2541 pr_warn_ratelimited( 2542 "md/raid:%s: read error not correctable (sector %llu on %s).\n", 2543 mdname(conf->mddev), 2544 (unsigned long long)s, 2545 bdn); 2546 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { 2547 /* Oh, no!!! */ 2548 set_bad = 1; 2549 pr_warn_ratelimited( 2550 "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n", 2551 mdname(conf->mddev), 2552 (unsigned long long)s, 2553 bdn); 2554 } else if (atomic_read(&rdev->read_errors) 2555 > conf->max_nr_stripes) 2556 pr_warn("md/raid:%s: Too many read errors, failing device %s.\n", 2557 mdname(conf->mddev), bdn); 2558 else 2559 retry = 1; 2560 if (set_bad && test_bit(In_sync, &rdev->flags) 2561 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2562 retry = 1; 2563 if (retry) 2564 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2565 set_bit(R5_ReadError, &sh->dev[i].flags); 2566 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2567 } else 2568 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2569 else { 2570 clear_bit(R5_ReadError, &sh->dev[i].flags); 2571 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2572 if (!(set_bad 2573 && test_bit(In_sync, &rdev->flags) 2574 && rdev_set_badblocks( 2575 rdev, sh->sector, STRIPE_SECTORS, 0))) 2576 md_error(conf->mddev, rdev); 2577 } 2578 } 2579 rdev_dec_pending(rdev, conf->mddev); 2580 bio_reset(bi); 2581 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2582 set_bit(STRIPE_HANDLE, &sh->state); 2583 raid5_release_stripe(sh); 2584 } 2585 2586 static void raid5_end_write_request(struct bio *bi) 2587 { 2588 struct stripe_head *sh = bi->bi_private; 2589 struct r5conf *conf = sh->raid_conf; 2590 int disks = sh->disks, i; 2591 struct md_rdev *uninitialized_var(rdev); 2592 sector_t first_bad; 2593 int bad_sectors; 2594 int replacement = 0; 2595 2596 for (i = 0 ; i < disks; i++) { 2597 if (bi == &sh->dev[i].req) { 2598 rdev = conf->disks[i].rdev; 2599 break; 2600 } 2601 if (bi == &sh->dev[i].rreq) { 2602 rdev = conf->disks[i].replacement; 2603 if (rdev) 2604 replacement = 1; 2605 else 2606 /* rdev was removed and 'replacement' 2607 * replaced it. rdev is not removed 2608 * until all requests are finished. 2609 */ 2610 rdev = conf->disks[i].rdev; 2611 break; 2612 } 2613 } 2614 pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", 2615 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2616 bi->bi_error); 2617 if (i == disks) { 2618 bio_reset(bi); 2619 BUG(); 2620 return; 2621 } 2622 2623 if (replacement) { 2624 if (bi->bi_error) 2625 md_error(conf->mddev, rdev); 2626 else if (is_badblock(rdev, sh->sector, 2627 STRIPE_SECTORS, 2628 &first_bad, &bad_sectors)) 2629 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2630 } else { 2631 if (bi->bi_error) { 2632 set_bit(STRIPE_DEGRADED, &sh->state); 2633 set_bit(WriteErrorSeen, &rdev->flags); 2634 set_bit(R5_WriteError, &sh->dev[i].flags); 2635 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2636 set_bit(MD_RECOVERY_NEEDED, 2637 &rdev->mddev->recovery); 2638 } else if (is_badblock(rdev, sh->sector, 2639 STRIPE_SECTORS, 2640 &first_bad, &bad_sectors)) { 2641 set_bit(R5_MadeGood, &sh->dev[i].flags); 2642 if (test_bit(R5_ReadError, &sh->dev[i].flags)) 2643 /* That was a successful write so make 2644 * sure it looks like we already did 2645 * a re-write. 2646 */ 2647 set_bit(R5_ReWrite, &sh->dev[i].flags); 2648 } 2649 } 2650 rdev_dec_pending(rdev, conf->mddev); 2651 2652 if (sh->batch_head && bi->bi_error && !replacement) 2653 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2654 2655 bio_reset(bi); 2656 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2657 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2658 set_bit(STRIPE_HANDLE, &sh->state); 2659 raid5_release_stripe(sh); 2660 2661 if (sh->batch_head && sh != sh->batch_head) 2662 raid5_release_stripe(sh->batch_head); 2663 } 2664 2665 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2666 { 2667 struct r5dev *dev = &sh->dev[i]; 2668 2669 dev->flags = 0; 2670 dev->sector = raid5_compute_blocknr(sh, i, previous); 2671 } 2672 2673 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) 2674 { 2675 char b[BDEVNAME_SIZE]; 2676 struct r5conf *conf = mddev->private; 2677 unsigned long flags; 2678 pr_debug("raid456: error called\n"); 2679 2680 spin_lock_irqsave(&conf->device_lock, flags); 2681 clear_bit(In_sync, &rdev->flags); 2682 mddev->degraded = raid5_calc_degraded(conf); 2683 spin_unlock_irqrestore(&conf->device_lock, flags); 2684 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2685 2686 set_bit(Blocked, &rdev->flags); 2687 set_bit(Faulty, &rdev->flags); 2688 set_mask_bits(&mddev->sb_flags, 0, 2689 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 2690 pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" 2691 "md/raid:%s: Operation continuing on %d devices.\n", 2692 mdname(mddev), 2693 bdevname(rdev->bdev, b), 2694 mdname(mddev), 2695 conf->raid_disks - mddev->degraded); 2696 r5c_update_on_rdev_error(mddev, rdev); 2697 } 2698 2699 /* 2700 * Input: a 'big' sector number, 2701 * Output: index of the data and parity disk, and the sector # in them. 2702 */ 2703 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2704 int previous, int *dd_idx, 2705 struct stripe_head *sh) 2706 { 2707 sector_t stripe, stripe2; 2708 sector_t chunk_number; 2709 unsigned int chunk_offset; 2710 int pd_idx, qd_idx; 2711 int ddf_layout = 0; 2712 sector_t new_sector; 2713 int algorithm = previous ? conf->prev_algo 2714 : conf->algorithm; 2715 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2716 : conf->chunk_sectors; 2717 int raid_disks = previous ? conf->previous_raid_disks 2718 : conf->raid_disks; 2719 int data_disks = raid_disks - conf->max_degraded; 2720 2721 /* First compute the information on this sector */ 2722 2723 /* 2724 * Compute the chunk number and the sector offset inside the chunk 2725 */ 2726 chunk_offset = sector_div(r_sector, sectors_per_chunk); 2727 chunk_number = r_sector; 2728 2729 /* 2730 * Compute the stripe number 2731 */ 2732 stripe = chunk_number; 2733 *dd_idx = sector_div(stripe, data_disks); 2734 stripe2 = stripe; 2735 /* 2736 * Select the parity disk based on the user selected algorithm. 2737 */ 2738 pd_idx = qd_idx = -1; 2739 switch(conf->level) { 2740 case 4: 2741 pd_idx = data_disks; 2742 break; 2743 case 5: 2744 switch (algorithm) { 2745 case ALGORITHM_LEFT_ASYMMETRIC: 2746 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2747 if (*dd_idx >= pd_idx) 2748 (*dd_idx)++; 2749 break; 2750 case ALGORITHM_RIGHT_ASYMMETRIC: 2751 pd_idx = sector_div(stripe2, raid_disks); 2752 if (*dd_idx >= pd_idx) 2753 (*dd_idx)++; 2754 break; 2755 case ALGORITHM_LEFT_SYMMETRIC: 2756 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2757 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2758 break; 2759 case ALGORITHM_RIGHT_SYMMETRIC: 2760 pd_idx = sector_div(stripe2, raid_disks); 2761 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2762 break; 2763 case ALGORITHM_PARITY_0: 2764 pd_idx = 0; 2765 (*dd_idx)++; 2766 break; 2767 case ALGORITHM_PARITY_N: 2768 pd_idx = data_disks; 2769 break; 2770 default: 2771 BUG(); 2772 } 2773 break; 2774 case 6: 2775 2776 switch (algorithm) { 2777 case ALGORITHM_LEFT_ASYMMETRIC: 2778 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2779 qd_idx = pd_idx + 1; 2780 if (pd_idx == raid_disks-1) { 2781 (*dd_idx)++; /* Q D D D P */ 2782 qd_idx = 0; 2783 } else if (*dd_idx >= pd_idx) 2784 (*dd_idx) += 2; /* D D P Q D */ 2785 break; 2786 case ALGORITHM_RIGHT_ASYMMETRIC: 2787 pd_idx = sector_div(stripe2, raid_disks); 2788 qd_idx = pd_idx + 1; 2789 if (pd_idx == raid_disks-1) { 2790 (*dd_idx)++; /* Q D D D P */ 2791 qd_idx = 0; 2792 } else if (*dd_idx >= pd_idx) 2793 (*dd_idx) += 2; /* D D P Q D */ 2794 break; 2795 case ALGORITHM_LEFT_SYMMETRIC: 2796 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2797 qd_idx = (pd_idx + 1) % raid_disks; 2798 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2799 break; 2800 case ALGORITHM_RIGHT_SYMMETRIC: 2801 pd_idx = sector_div(stripe2, raid_disks); 2802 qd_idx = (pd_idx + 1) % raid_disks; 2803 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2804 break; 2805 2806 case ALGORITHM_PARITY_0: 2807 pd_idx = 0; 2808 qd_idx = 1; 2809 (*dd_idx) += 2; 2810 break; 2811 case ALGORITHM_PARITY_N: 2812 pd_idx = data_disks; 2813 qd_idx = data_disks + 1; 2814 break; 2815 2816 case ALGORITHM_ROTATING_ZERO_RESTART: 2817 /* Exactly the same as RIGHT_ASYMMETRIC, but or 2818 * of blocks for computing Q is different. 2819 */ 2820 pd_idx = sector_div(stripe2, raid_disks); 2821 qd_idx = pd_idx + 1; 2822 if (pd_idx == raid_disks-1) { 2823 (*dd_idx)++; /* Q D D D P */ 2824 qd_idx = 0; 2825 } else if (*dd_idx >= pd_idx) 2826 (*dd_idx) += 2; /* D D P Q D */ 2827 ddf_layout = 1; 2828 break; 2829 2830 case ALGORITHM_ROTATING_N_RESTART: 2831 /* Same a left_asymmetric, by first stripe is 2832 * D D D P Q rather than 2833 * Q D D D P 2834 */ 2835 stripe2 += 1; 2836 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2837 qd_idx = pd_idx + 1; 2838 if (pd_idx == raid_disks-1) { 2839 (*dd_idx)++; /* Q D D D P */ 2840 qd_idx = 0; 2841 } else if (*dd_idx >= pd_idx) 2842 (*dd_idx) += 2; /* D D P Q D */ 2843 ddf_layout = 1; 2844 break; 2845 2846 case ALGORITHM_ROTATING_N_CONTINUE: 2847 /* Same as left_symmetric but Q is before P */ 2848 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2849 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 2850 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2851 ddf_layout = 1; 2852 break; 2853 2854 case ALGORITHM_LEFT_ASYMMETRIC_6: 2855 /* RAID5 left_asymmetric, with Q on last device */ 2856 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2857 if (*dd_idx >= pd_idx) 2858 (*dd_idx)++; 2859 qd_idx = raid_disks - 1; 2860 break; 2861 2862 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2863 pd_idx = sector_div(stripe2, raid_disks-1); 2864 if (*dd_idx >= pd_idx) 2865 (*dd_idx)++; 2866 qd_idx = raid_disks - 1; 2867 break; 2868 2869 case ALGORITHM_LEFT_SYMMETRIC_6: 2870 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2871 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2872 qd_idx = raid_disks - 1; 2873 break; 2874 2875 case ALGORITHM_RIGHT_SYMMETRIC_6: 2876 pd_idx = sector_div(stripe2, raid_disks-1); 2877 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2878 qd_idx = raid_disks - 1; 2879 break; 2880 2881 case ALGORITHM_PARITY_0_6: 2882 pd_idx = 0; 2883 (*dd_idx)++; 2884 qd_idx = raid_disks - 1; 2885 break; 2886 2887 default: 2888 BUG(); 2889 } 2890 break; 2891 } 2892 2893 if (sh) { 2894 sh->pd_idx = pd_idx; 2895 sh->qd_idx = qd_idx; 2896 sh->ddf_layout = ddf_layout; 2897 } 2898 /* 2899 * Finally, compute the new sector number 2900 */ 2901 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 2902 return new_sector; 2903 } 2904 2905 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) 2906 { 2907 struct r5conf *conf = sh->raid_conf; 2908 int raid_disks = sh->disks; 2909 int data_disks = raid_disks - conf->max_degraded; 2910 sector_t new_sector = sh->sector, check; 2911 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2912 : conf->chunk_sectors; 2913 int algorithm = previous ? conf->prev_algo 2914 : conf->algorithm; 2915 sector_t stripe; 2916 int chunk_offset; 2917 sector_t chunk_number; 2918 int dummy1, dd_idx = i; 2919 sector_t r_sector; 2920 struct stripe_head sh2; 2921 2922 chunk_offset = sector_div(new_sector, sectors_per_chunk); 2923 stripe = new_sector; 2924 2925 if (i == sh->pd_idx) 2926 return 0; 2927 switch(conf->level) { 2928 case 4: break; 2929 case 5: 2930 switch (algorithm) { 2931 case ALGORITHM_LEFT_ASYMMETRIC: 2932 case ALGORITHM_RIGHT_ASYMMETRIC: 2933 if (i > sh->pd_idx) 2934 i--; 2935 break; 2936 case ALGORITHM_LEFT_SYMMETRIC: 2937 case ALGORITHM_RIGHT_SYMMETRIC: 2938 if (i < sh->pd_idx) 2939 i += raid_disks; 2940 i -= (sh->pd_idx + 1); 2941 break; 2942 case ALGORITHM_PARITY_0: 2943 i -= 1; 2944 break; 2945 case ALGORITHM_PARITY_N: 2946 break; 2947 default: 2948 BUG(); 2949 } 2950 break; 2951 case 6: 2952 if (i == sh->qd_idx) 2953 return 0; /* It is the Q disk */ 2954 switch (algorithm) { 2955 case ALGORITHM_LEFT_ASYMMETRIC: 2956 case ALGORITHM_RIGHT_ASYMMETRIC: 2957 case ALGORITHM_ROTATING_ZERO_RESTART: 2958 case ALGORITHM_ROTATING_N_RESTART: 2959 if (sh->pd_idx == raid_disks-1) 2960 i--; /* Q D D D P */ 2961 else if (i > sh->pd_idx) 2962 i -= 2; /* D D P Q D */ 2963 break; 2964 case ALGORITHM_LEFT_SYMMETRIC: 2965 case ALGORITHM_RIGHT_SYMMETRIC: 2966 if (sh->pd_idx == raid_disks-1) 2967 i--; /* Q D D D P */ 2968 else { 2969 /* D D P Q D */ 2970 if (i < sh->pd_idx) 2971 i += raid_disks; 2972 i -= (sh->pd_idx + 2); 2973 } 2974 break; 2975 case ALGORITHM_PARITY_0: 2976 i -= 2; 2977 break; 2978 case ALGORITHM_PARITY_N: 2979 break; 2980 case ALGORITHM_ROTATING_N_CONTINUE: 2981 /* Like left_symmetric, but P is before Q */ 2982 if (sh->pd_idx == 0) 2983 i--; /* P D D D Q */ 2984 else { 2985 /* D D Q P D */ 2986 if (i < sh->pd_idx) 2987 i += raid_disks; 2988 i -= (sh->pd_idx + 1); 2989 } 2990 break; 2991 case ALGORITHM_LEFT_ASYMMETRIC_6: 2992 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2993 if (i > sh->pd_idx) 2994 i--; 2995 break; 2996 case ALGORITHM_LEFT_SYMMETRIC_6: 2997 case ALGORITHM_RIGHT_SYMMETRIC_6: 2998 if (i < sh->pd_idx) 2999 i += data_disks + 1; 3000 i -= (sh->pd_idx + 1); 3001 break; 3002 case ALGORITHM_PARITY_0_6: 3003 i -= 1; 3004 break; 3005 default: 3006 BUG(); 3007 } 3008 break; 3009 } 3010 3011 chunk_number = stripe * data_disks + i; 3012 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 3013 3014 check = raid5_compute_sector(conf, r_sector, 3015 previous, &dummy1, &sh2); 3016 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 3017 || sh2.qd_idx != sh->qd_idx) { 3018 pr_warn("md/raid:%s: compute_blocknr: map not correct\n", 3019 mdname(conf->mddev)); 3020 return 0; 3021 } 3022 return r_sector; 3023 } 3024 3025 /* 3026 * There are cases where we want handle_stripe_dirtying() and 3027 * schedule_reconstruction() to delay towrite to some dev of a stripe. 3028 * 3029 * This function checks whether we want to delay the towrite. Specifically, 3030 * we delay the towrite when: 3031 * 3032 * 1. degraded stripe has a non-overwrite to the missing dev, AND this 3033 * stripe has data in journal (for other devices). 3034 * 3035 * In this case, when reading data for the non-overwrite dev, it is 3036 * necessary to handle complex rmw of write back cache (prexor with 3037 * orig_page, and xor with page). To keep read path simple, we would 3038 * like to flush data in journal to RAID disks first, so complex rmw 3039 * is handled in the write patch (handle_stripe_dirtying). 3040 * 3041 * 2. when journal space is critical (R5C_LOG_CRITICAL=1) 3042 * 3043 * It is important to be able to flush all stripes in raid5-cache. 3044 * Therefore, we need reserve some space on the journal device for 3045 * these flushes. If flush operation includes pending writes to the 3046 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe 3047 * for the flush out. If we exclude these pending writes from flush 3048 * operation, we only need (conf->max_degraded + 1) pages per stripe. 3049 * Therefore, excluding pending writes in these cases enables more 3050 * efficient use of the journal device. 3051 * 3052 * Note: To make sure the stripe makes progress, we only delay 3053 * towrite for stripes with data already in journal (injournal > 0). 3054 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to 3055 * no_space_stripes list. 3056 * 3057 * 3. during journal failure 3058 * In journal failure, we try to flush all cached data to raid disks 3059 * based on data in stripe cache. The array is read-only to upper 3060 * layers, so we would skip all pending writes. 3061 * 3062 */ 3063 static inline bool delay_towrite(struct r5conf *conf, 3064 struct r5dev *dev, 3065 struct stripe_head_state *s) 3066 { 3067 /* case 1 above */ 3068 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3069 !test_bit(R5_Insync, &dev->flags) && s->injournal) 3070 return true; 3071 /* case 2 above */ 3072 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 3073 s->injournal > 0) 3074 return true; 3075 /* case 3 above */ 3076 if (s->log_failed && s->injournal) 3077 return true; 3078 return false; 3079 } 3080 3081 static void 3082 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 3083 int rcw, int expand) 3084 { 3085 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; 3086 struct r5conf *conf = sh->raid_conf; 3087 int level = conf->level; 3088 3089 if (rcw) { 3090 /* 3091 * In some cases, handle_stripe_dirtying initially decided to 3092 * run rmw and allocates extra page for prexor. However, rcw is 3093 * cheaper later on. We need to free the extra page now, 3094 * because we won't be able to do that in ops_complete_prexor(). 3095 */ 3096 r5c_release_extra_page(sh); 3097 3098 for (i = disks; i--; ) { 3099 struct r5dev *dev = &sh->dev[i]; 3100 3101 if (dev->towrite && !delay_towrite(conf, dev, s)) { 3102 set_bit(R5_LOCKED, &dev->flags); 3103 set_bit(R5_Wantdrain, &dev->flags); 3104 if (!expand) 3105 clear_bit(R5_UPTODATE, &dev->flags); 3106 s->locked++; 3107 } else if (test_bit(R5_InJournal, &dev->flags)) { 3108 set_bit(R5_LOCKED, &dev->flags); 3109 s->locked++; 3110 } 3111 } 3112 /* if we are not expanding this is a proper write request, and 3113 * there will be bios with new data to be drained into the 3114 * stripe cache 3115 */ 3116 if (!expand) { 3117 if (!s->locked) 3118 /* False alarm, nothing to do */ 3119 return; 3120 sh->reconstruct_state = reconstruct_state_drain_run; 3121 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 3122 } else 3123 sh->reconstruct_state = reconstruct_state_run; 3124 3125 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 3126 3127 if (s->locked + conf->max_degraded == disks) 3128 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 3129 atomic_inc(&conf->pending_full_writes); 3130 } else { 3131 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 3132 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 3133 BUG_ON(level == 6 && 3134 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || 3135 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); 3136 3137 for (i = disks; i--; ) { 3138 struct r5dev *dev = &sh->dev[i]; 3139 if (i == pd_idx || i == qd_idx) 3140 continue; 3141 3142 if (dev->towrite && 3143 (test_bit(R5_UPTODATE, &dev->flags) || 3144 test_bit(R5_Wantcompute, &dev->flags))) { 3145 set_bit(R5_Wantdrain, &dev->flags); 3146 set_bit(R5_LOCKED, &dev->flags); 3147 clear_bit(R5_UPTODATE, &dev->flags); 3148 s->locked++; 3149 } else if (test_bit(R5_InJournal, &dev->flags)) { 3150 set_bit(R5_LOCKED, &dev->flags); 3151 s->locked++; 3152 } 3153 } 3154 if (!s->locked) 3155 /* False alarm - nothing to do */ 3156 return; 3157 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 3158 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 3159 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 3160 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 3161 } 3162 3163 /* keep the parity disk(s) locked while asynchronous operations 3164 * are in flight 3165 */ 3166 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 3167 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 3168 s->locked++; 3169 3170 if (level == 6) { 3171 int qd_idx = sh->qd_idx; 3172 struct r5dev *dev = &sh->dev[qd_idx]; 3173 3174 set_bit(R5_LOCKED, &dev->flags); 3175 clear_bit(R5_UPTODATE, &dev->flags); 3176 s->locked++; 3177 } 3178 3179 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && 3180 test_bit(STRIPE_OP_BIODRAIN, &s->ops_request) && 3181 !test_bit(STRIPE_FULL_WRITE, &sh->state) && 3182 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) 3183 set_bit(STRIPE_OP_PARTIAL_PARITY, &s->ops_request); 3184 3185 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 3186 __func__, (unsigned long long)sh->sector, 3187 s->locked, s->ops_request); 3188 } 3189 3190 /* 3191 * Each stripe/dev can have one or more bion attached. 3192 * toread/towrite point to the first in a chain. 3193 * The bi_next chain must be in order. 3194 */ 3195 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, 3196 int forwrite, int previous) 3197 { 3198 struct bio **bip; 3199 struct r5conf *conf = sh->raid_conf; 3200 int firstwrite=0; 3201 3202 pr_debug("adding bi b#%llu to stripe s#%llu\n", 3203 (unsigned long long)bi->bi_iter.bi_sector, 3204 (unsigned long long)sh->sector); 3205 3206 spin_lock_irq(&sh->stripe_lock); 3207 /* Don't allow new IO added to stripes in batch list */ 3208 if (sh->batch_head) 3209 goto overlap; 3210 if (forwrite) { 3211 bip = &sh->dev[dd_idx].towrite; 3212 if (*bip == NULL) 3213 firstwrite = 1; 3214 } else 3215 bip = &sh->dev[dd_idx].toread; 3216 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 3217 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 3218 goto overlap; 3219 bip = & (*bip)->bi_next; 3220 } 3221 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 3222 goto overlap; 3223 3224 if (forwrite && raid5_has_ppl(conf)) { 3225 /* 3226 * With PPL only writes to consecutive data chunks within a 3227 * stripe are allowed because for a single stripe_head we can 3228 * only have one PPL entry at a time, which describes one data 3229 * range. Not really an overlap, but wait_for_overlap can be 3230 * used to handle this. 3231 */ 3232 sector_t sector; 3233 sector_t first = 0; 3234 sector_t last = 0; 3235 int count = 0; 3236 int i; 3237 3238 for (i = 0; i < sh->disks; i++) { 3239 if (i != sh->pd_idx && 3240 (i == dd_idx || sh->dev[i].towrite)) { 3241 sector = sh->dev[i].sector; 3242 if (count == 0 || sector < first) 3243 first = sector; 3244 if (sector > last) 3245 last = sector; 3246 count++; 3247 } 3248 } 3249 3250 if (first + conf->chunk_sectors * (count - 1) != last) 3251 goto overlap; 3252 } 3253 3254 if (!forwrite || previous) 3255 clear_bit(STRIPE_BATCH_READY, &sh->state); 3256 3257 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 3258 if (*bip) 3259 bi->bi_next = *bip; 3260 *bip = bi; 3261 bio_inc_remaining(bi); 3262 md_write_inc(conf->mddev, bi); 3263 3264 if (forwrite) { 3265 /* check if page is covered */ 3266 sector_t sector = sh->dev[dd_idx].sector; 3267 for (bi=sh->dev[dd_idx].towrite; 3268 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 3269 bi && bi->bi_iter.bi_sector <= sector; 3270 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 3271 if (bio_end_sector(bi) >= sector) 3272 sector = bio_end_sector(bi); 3273 } 3274 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 3275 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) 3276 sh->overwrite_disks++; 3277 } 3278 3279 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 3280 (unsigned long long)(*bip)->bi_iter.bi_sector, 3281 (unsigned long long)sh->sector, dd_idx); 3282 3283 if (conf->mddev->bitmap && firstwrite) { 3284 /* Cannot hold spinlock over bitmap_startwrite, 3285 * but must ensure this isn't added to a batch until 3286 * we have added to the bitmap and set bm_seq. 3287 * So set STRIPE_BITMAP_PENDING to prevent 3288 * batching. 3289 * If multiple add_stripe_bio() calls race here they 3290 * much all set STRIPE_BITMAP_PENDING. So only the first one 3291 * to complete "bitmap_startwrite" gets to set 3292 * STRIPE_BIT_DELAY. This is important as once a stripe 3293 * is added to a batch, STRIPE_BIT_DELAY cannot be changed 3294 * any more. 3295 */ 3296 set_bit(STRIPE_BITMAP_PENDING, &sh->state); 3297 spin_unlock_irq(&sh->stripe_lock); 3298 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 3299 STRIPE_SECTORS, 0); 3300 spin_lock_irq(&sh->stripe_lock); 3301 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); 3302 if (!sh->batch_head) { 3303 sh->bm_seq = conf->seq_flush+1; 3304 set_bit(STRIPE_BIT_DELAY, &sh->state); 3305 } 3306 } 3307 spin_unlock_irq(&sh->stripe_lock); 3308 3309 if (stripe_can_batch(sh)) 3310 stripe_add_to_batch_list(conf, sh); 3311 return 1; 3312 3313 overlap: 3314 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 3315 spin_unlock_irq(&sh->stripe_lock); 3316 return 0; 3317 } 3318 3319 static void end_reshape(struct r5conf *conf); 3320 3321 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 3322 struct stripe_head *sh) 3323 { 3324 int sectors_per_chunk = 3325 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 3326 int dd_idx; 3327 int chunk_offset = sector_div(stripe, sectors_per_chunk); 3328 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 3329 3330 raid5_compute_sector(conf, 3331 stripe * (disks - conf->max_degraded) 3332 *sectors_per_chunk + chunk_offset, 3333 previous, 3334 &dd_idx, sh); 3335 } 3336 3337 static void 3338 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 3339 struct stripe_head_state *s, int disks) 3340 { 3341 int i; 3342 BUG_ON(sh->batch_head); 3343 for (i = disks; i--; ) { 3344 struct bio *bi; 3345 int bitmap_end = 0; 3346 3347 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 3348 struct md_rdev *rdev; 3349 rcu_read_lock(); 3350 rdev = rcu_dereference(conf->disks[i].rdev); 3351 if (rdev && test_bit(In_sync, &rdev->flags) && 3352 !test_bit(Faulty, &rdev->flags)) 3353 atomic_inc(&rdev->nr_pending); 3354 else 3355 rdev = NULL; 3356 rcu_read_unlock(); 3357 if (rdev) { 3358 if (!rdev_set_badblocks( 3359 rdev, 3360 sh->sector, 3361 STRIPE_SECTORS, 0)) 3362 md_error(conf->mddev, rdev); 3363 rdev_dec_pending(rdev, conf->mddev); 3364 } 3365 } 3366 spin_lock_irq(&sh->stripe_lock); 3367 /* fail all writes first */ 3368 bi = sh->dev[i].towrite; 3369 sh->dev[i].towrite = NULL; 3370 sh->overwrite_disks = 0; 3371 spin_unlock_irq(&sh->stripe_lock); 3372 if (bi) 3373 bitmap_end = 1; 3374 3375 log_stripe_write_finished(sh); 3376 3377 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3378 wake_up(&conf->wait_for_overlap); 3379 3380 while (bi && bi->bi_iter.bi_sector < 3381 sh->dev[i].sector + STRIPE_SECTORS) { 3382 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3383 3384 bi->bi_error = -EIO; 3385 md_write_end(conf->mddev); 3386 bio_endio(bi); 3387 bi = nextbi; 3388 } 3389 if (bitmap_end) 3390 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3391 STRIPE_SECTORS, 0, 0); 3392 bitmap_end = 0; 3393 /* and fail all 'written' */ 3394 bi = sh->dev[i].written; 3395 sh->dev[i].written = NULL; 3396 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { 3397 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 3398 sh->dev[i].page = sh->dev[i].orig_page; 3399 } 3400 3401 if (bi) bitmap_end = 1; 3402 while (bi && bi->bi_iter.bi_sector < 3403 sh->dev[i].sector + STRIPE_SECTORS) { 3404 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3405 3406 bi->bi_error = -EIO; 3407 md_write_end(conf->mddev); 3408 bio_endio(bi); 3409 bi = bi2; 3410 } 3411 3412 /* fail any reads if this device is non-operational and 3413 * the data has not reached the cache yet. 3414 */ 3415 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 3416 s->failed > conf->max_degraded && 3417 (!test_bit(R5_Insync, &sh->dev[i].flags) || 3418 test_bit(R5_ReadError, &sh->dev[i].flags))) { 3419 spin_lock_irq(&sh->stripe_lock); 3420 bi = sh->dev[i].toread; 3421 sh->dev[i].toread = NULL; 3422 spin_unlock_irq(&sh->stripe_lock); 3423 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3424 wake_up(&conf->wait_for_overlap); 3425 if (bi) 3426 s->to_read--; 3427 while (bi && bi->bi_iter.bi_sector < 3428 sh->dev[i].sector + STRIPE_SECTORS) { 3429 struct bio *nextbi = 3430 r5_next_bio(bi, sh->dev[i].sector); 3431 3432 bi->bi_error = -EIO; 3433 bio_endio(bi); 3434 bi = nextbi; 3435 } 3436 } 3437 if (bitmap_end) 3438 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3439 STRIPE_SECTORS, 0, 0); 3440 /* If we were in the middle of a write the parity block might 3441 * still be locked - so just clear all R5_LOCKED flags 3442 */ 3443 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3444 } 3445 s->to_write = 0; 3446 s->written = 0; 3447 3448 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3449 if (atomic_dec_and_test(&conf->pending_full_writes)) 3450 md_wakeup_thread(conf->mddev->thread); 3451 } 3452 3453 static void 3454 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 3455 struct stripe_head_state *s) 3456 { 3457 int abort = 0; 3458 int i; 3459 3460 BUG_ON(sh->batch_head); 3461 clear_bit(STRIPE_SYNCING, &sh->state); 3462 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 3463 wake_up(&conf->wait_for_overlap); 3464 s->syncing = 0; 3465 s->replacing = 0; 3466 /* There is nothing more to do for sync/check/repair. 3467 * Don't even need to abort as that is handled elsewhere 3468 * if needed, and not always wanted e.g. if there is a known 3469 * bad block here. 3470 * For recover/replace we need to record a bad block on all 3471 * non-sync devices, or abort the recovery 3472 */ 3473 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { 3474 /* During recovery devices cannot be removed, so 3475 * locking and refcounting of rdevs is not needed 3476 */ 3477 rcu_read_lock(); 3478 for (i = 0; i < conf->raid_disks; i++) { 3479 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 3480 if (rdev 3481 && !test_bit(Faulty, &rdev->flags) 3482 && !test_bit(In_sync, &rdev->flags) 3483 && !rdev_set_badblocks(rdev, sh->sector, 3484 STRIPE_SECTORS, 0)) 3485 abort = 1; 3486 rdev = rcu_dereference(conf->disks[i].replacement); 3487 if (rdev 3488 && !test_bit(Faulty, &rdev->flags) 3489 && !test_bit(In_sync, &rdev->flags) 3490 && !rdev_set_badblocks(rdev, sh->sector, 3491 STRIPE_SECTORS, 0)) 3492 abort = 1; 3493 } 3494 rcu_read_unlock(); 3495 if (abort) 3496 conf->recovery_disabled = 3497 conf->mddev->recovery_disabled; 3498 } 3499 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); 3500 } 3501 3502 static int want_replace(struct stripe_head *sh, int disk_idx) 3503 { 3504 struct md_rdev *rdev; 3505 int rv = 0; 3506 3507 rcu_read_lock(); 3508 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); 3509 if (rdev 3510 && !test_bit(Faulty, &rdev->flags) 3511 && !test_bit(In_sync, &rdev->flags) 3512 && (rdev->recovery_offset <= sh->sector 3513 || rdev->mddev->recovery_cp <= sh->sector)) 3514 rv = 1; 3515 rcu_read_unlock(); 3516 return rv; 3517 } 3518 3519 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, 3520 int disk_idx, int disks) 3521 { 3522 struct r5dev *dev = &sh->dev[disk_idx]; 3523 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 3524 &sh->dev[s->failed_num[1]] }; 3525 int i; 3526 3527 3528 if (test_bit(R5_LOCKED, &dev->flags) || 3529 test_bit(R5_UPTODATE, &dev->flags)) 3530 /* No point reading this as we already have it or have 3531 * decided to get it. 3532 */ 3533 return 0; 3534 3535 if (dev->toread || 3536 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags))) 3537 /* We need this block to directly satisfy a request */ 3538 return 1; 3539 3540 if (s->syncing || s->expanding || 3541 (s->replacing && want_replace(sh, disk_idx))) 3542 /* When syncing, or expanding we read everything. 3543 * When replacing, we need the replaced block. 3544 */ 3545 return 1; 3546 3547 if ((s->failed >= 1 && fdev[0]->toread) || 3548 (s->failed >= 2 && fdev[1]->toread)) 3549 /* If we want to read from a failed device, then 3550 * we need to actually read every other device. 3551 */ 3552 return 1; 3553 3554 /* Sometimes neither read-modify-write nor reconstruct-write 3555 * cycles can work. In those cases we read every block we 3556 * can. Then the parity-update is certain to have enough to 3557 * work with. 3558 * This can only be a problem when we need to write something, 3559 * and some device has failed. If either of those tests 3560 * fail we need look no further. 3561 */ 3562 if (!s->failed || !s->to_write) 3563 return 0; 3564 3565 if (test_bit(R5_Insync, &dev->flags) && 3566 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3567 /* Pre-reads at not permitted until after short delay 3568 * to gather multiple requests. However if this 3569 * device is no Insync, the block could only be computed 3570 * and there is no need to delay that. 3571 */ 3572 return 0; 3573 3574 for (i = 0; i < s->failed && i < 2; i++) { 3575 if (fdev[i]->towrite && 3576 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3577 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3578 /* If we have a partial write to a failed 3579 * device, then we will need to reconstruct 3580 * the content of that device, so all other 3581 * devices must be read. 3582 */ 3583 return 1; 3584 } 3585 3586 /* If we are forced to do a reconstruct-write, either because 3587 * the current RAID6 implementation only supports that, or 3588 * because parity cannot be trusted and we are currently 3589 * recovering it, there is extra need to be careful. 3590 * If one of the devices that we would need to read, because 3591 * it is not being overwritten (and maybe not written at all) 3592 * is missing/faulty, then we need to read everything we can. 3593 */ 3594 if (sh->raid_conf->level != 6 && 3595 sh->sector < sh->raid_conf->mddev->recovery_cp) 3596 /* reconstruct-write isn't being forced */ 3597 return 0; 3598 for (i = 0; i < s->failed && i < 2; i++) { 3599 if (s->failed_num[i] != sh->pd_idx && 3600 s->failed_num[i] != sh->qd_idx && 3601 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3602 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3603 return 1; 3604 } 3605 3606 return 0; 3607 } 3608 3609 /* fetch_block - checks the given member device to see if its data needs 3610 * to be read or computed to satisfy a request. 3611 * 3612 * Returns 1 when no more member devices need to be checked, otherwise returns 3613 * 0 to tell the loop in handle_stripe_fill to continue 3614 */ 3615 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 3616 int disk_idx, int disks) 3617 { 3618 struct r5dev *dev = &sh->dev[disk_idx]; 3619 3620 /* is the data in this block needed, and can we get it? */ 3621 if (need_this_block(sh, s, disk_idx, disks)) { 3622 /* we would like to get this block, possibly by computing it, 3623 * otherwise read it if the backing disk is insync 3624 */ 3625 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 3626 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 3627 BUG_ON(sh->batch_head); 3628 3629 /* 3630 * In the raid6 case if the only non-uptodate disk is P 3631 * then we already trusted P to compute the other failed 3632 * drives. It is safe to compute rather than re-read P. 3633 * In other cases we only compute blocks from failed 3634 * devices, otherwise check/repair might fail to detect 3635 * a real inconsistency. 3636 */ 3637 3638 if ((s->uptodate == disks - 1) && 3639 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || 3640 (s->failed && (disk_idx == s->failed_num[0] || 3641 disk_idx == s->failed_num[1])))) { 3642 /* have disk failed, and we're requested to fetch it; 3643 * do compute it 3644 */ 3645 pr_debug("Computing stripe %llu block %d\n", 3646 (unsigned long long)sh->sector, disk_idx); 3647 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3648 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3649 set_bit(R5_Wantcompute, &dev->flags); 3650 sh->ops.target = disk_idx; 3651 sh->ops.target2 = -1; /* no 2nd target */ 3652 s->req_compute = 1; 3653 /* Careful: from this point on 'uptodate' is in the eye 3654 * of raid_run_ops which services 'compute' operations 3655 * before writes. R5_Wantcompute flags a block that will 3656 * be R5_UPTODATE by the time it is needed for a 3657 * subsequent operation. 3658 */ 3659 s->uptodate++; 3660 return 1; 3661 } else if (s->uptodate == disks-2 && s->failed >= 2) { 3662 /* Computing 2-failure is *very* expensive; only 3663 * do it if failed >= 2 3664 */ 3665 int other; 3666 for (other = disks; other--; ) { 3667 if (other == disk_idx) 3668 continue; 3669 if (!test_bit(R5_UPTODATE, 3670 &sh->dev[other].flags)) 3671 break; 3672 } 3673 BUG_ON(other < 0); 3674 pr_debug("Computing stripe %llu blocks %d,%d\n", 3675 (unsigned long long)sh->sector, 3676 disk_idx, other); 3677 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3678 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3679 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 3680 set_bit(R5_Wantcompute, &sh->dev[other].flags); 3681 sh->ops.target = disk_idx; 3682 sh->ops.target2 = other; 3683 s->uptodate += 2; 3684 s->req_compute = 1; 3685 return 1; 3686 } else if (test_bit(R5_Insync, &dev->flags)) { 3687 set_bit(R5_LOCKED, &dev->flags); 3688 set_bit(R5_Wantread, &dev->flags); 3689 s->locked++; 3690 pr_debug("Reading block %d (sync=%d)\n", 3691 disk_idx, s->syncing); 3692 } 3693 } 3694 3695 return 0; 3696 } 3697 3698 /** 3699 * handle_stripe_fill - read or compute data to satisfy pending requests. 3700 */ 3701 static void handle_stripe_fill(struct stripe_head *sh, 3702 struct stripe_head_state *s, 3703 int disks) 3704 { 3705 int i; 3706 3707 /* look for blocks to read/compute, skip this if a compute 3708 * is already in flight, or if the stripe contents are in the 3709 * midst of changing due to a write 3710 */ 3711 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3712 !sh->reconstruct_state) { 3713 3714 /* 3715 * For degraded stripe with data in journal, do not handle 3716 * read requests yet, instead, flush the stripe to raid 3717 * disks first, this avoids handling complex rmw of write 3718 * back cache (prexor with orig_page, and then xor with 3719 * page) in the read path 3720 */ 3721 if (s->injournal && s->failed) { 3722 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 3723 r5c_make_stripe_write_out(sh); 3724 goto out; 3725 } 3726 3727 for (i = disks; i--; ) 3728 if (fetch_block(sh, s, i, disks)) 3729 break; 3730 } 3731 out: 3732 set_bit(STRIPE_HANDLE, &sh->state); 3733 } 3734 3735 static void break_stripe_batch_list(struct stripe_head *head_sh, 3736 unsigned long handle_flags); 3737 /* handle_stripe_clean_event 3738 * any written block on an uptodate or failed drive can be returned. 3739 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 3740 * never LOCKED, so we don't need to test 'failed' directly. 3741 */ 3742 static void handle_stripe_clean_event(struct r5conf *conf, 3743 struct stripe_head *sh, int disks) 3744 { 3745 int i; 3746 struct r5dev *dev; 3747 int discard_pending = 0; 3748 struct stripe_head *head_sh = sh; 3749 bool do_endio = false; 3750 3751 for (i = disks; i--; ) 3752 if (sh->dev[i].written) { 3753 dev = &sh->dev[i]; 3754 if (!test_bit(R5_LOCKED, &dev->flags) && 3755 (test_bit(R5_UPTODATE, &dev->flags) || 3756 test_bit(R5_Discard, &dev->flags) || 3757 test_bit(R5_SkipCopy, &dev->flags))) { 3758 /* We can return any write requests */ 3759 struct bio *wbi, *wbi2; 3760 pr_debug("Return write for disc %d\n", i); 3761 if (test_and_clear_bit(R5_Discard, &dev->flags)) 3762 clear_bit(R5_UPTODATE, &dev->flags); 3763 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { 3764 WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); 3765 } 3766 do_endio = true; 3767 3768 returnbi: 3769 dev->page = dev->orig_page; 3770 wbi = dev->written; 3771 dev->written = NULL; 3772 while (wbi && wbi->bi_iter.bi_sector < 3773 dev->sector + STRIPE_SECTORS) { 3774 wbi2 = r5_next_bio(wbi, dev->sector); 3775 md_write_end(conf->mddev); 3776 bio_endio(wbi); 3777 wbi = wbi2; 3778 } 3779 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3780 STRIPE_SECTORS, 3781 !test_bit(STRIPE_DEGRADED, &sh->state), 3782 0); 3783 if (head_sh->batch_head) { 3784 sh = list_first_entry(&sh->batch_list, 3785 struct stripe_head, 3786 batch_list); 3787 if (sh != head_sh) { 3788 dev = &sh->dev[i]; 3789 goto returnbi; 3790 } 3791 } 3792 sh = head_sh; 3793 dev = &sh->dev[i]; 3794 } else if (test_bit(R5_Discard, &dev->flags)) 3795 discard_pending = 1; 3796 } 3797 3798 log_stripe_write_finished(sh); 3799 3800 if (!discard_pending && 3801 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3802 int hash; 3803 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 3804 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3805 if (sh->qd_idx >= 0) { 3806 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 3807 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); 3808 } 3809 /* now that discard is done we can proceed with any sync */ 3810 clear_bit(STRIPE_DISCARD, &sh->state); 3811 /* 3812 * SCSI discard will change some bio fields and the stripe has 3813 * no updated data, so remove it from hash list and the stripe 3814 * will be reinitialized 3815 */ 3816 unhash: 3817 hash = sh->hash_lock_index; 3818 spin_lock_irq(conf->hash_locks + hash); 3819 remove_hash(sh); 3820 spin_unlock_irq(conf->hash_locks + hash); 3821 if (head_sh->batch_head) { 3822 sh = list_first_entry(&sh->batch_list, 3823 struct stripe_head, batch_list); 3824 if (sh != head_sh) 3825 goto unhash; 3826 } 3827 sh = head_sh; 3828 3829 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 3830 set_bit(STRIPE_HANDLE, &sh->state); 3831 3832 } 3833 3834 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3835 if (atomic_dec_and_test(&conf->pending_full_writes)) 3836 md_wakeup_thread(conf->mddev->thread); 3837 3838 if (head_sh->batch_head && do_endio) 3839 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); 3840 } 3841 3842 /* 3843 * For RMW in write back cache, we need extra page in prexor to store the 3844 * old data. This page is stored in dev->orig_page. 3845 * 3846 * This function checks whether we have data for prexor. The exact logic 3847 * is: 3848 * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE) 3849 */ 3850 static inline bool uptodate_for_rmw(struct r5dev *dev) 3851 { 3852 return (test_bit(R5_UPTODATE, &dev->flags)) && 3853 (!test_bit(R5_InJournal, &dev->flags) || 3854 test_bit(R5_OrigPageUPTDODATE, &dev->flags)); 3855 } 3856 3857 static int handle_stripe_dirtying(struct r5conf *conf, 3858 struct stripe_head *sh, 3859 struct stripe_head_state *s, 3860 int disks) 3861 { 3862 int rmw = 0, rcw = 0, i; 3863 sector_t recovery_cp = conf->mddev->recovery_cp; 3864 3865 /* Check whether resync is now happening or should start. 3866 * If yes, then the array is dirty (after unclean shutdown or 3867 * initial creation), so parity in some stripes might be inconsistent. 3868 * In this case, we need to always do reconstruct-write, to ensure 3869 * that in case of drive failure or read-error correction, we 3870 * generate correct data from the parity. 3871 */ 3872 if (conf->rmw_level == PARITY_DISABLE_RMW || 3873 (recovery_cp < MaxSector && sh->sector >= recovery_cp && 3874 s->failed == 0)) { 3875 /* Calculate the real rcw later - for now make it 3876 * look like rcw is cheaper 3877 */ 3878 rcw = 1; rmw = 2; 3879 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", 3880 conf->rmw_level, (unsigned long long)recovery_cp, 3881 (unsigned long long)sh->sector); 3882 } else for (i = disks; i--; ) { 3883 /* would I have to read this buffer for read_modify_write */ 3884 struct r5dev *dev = &sh->dev[i]; 3885 if (((dev->towrite && !delay_towrite(conf, dev, s)) || 3886 i == sh->pd_idx || i == sh->qd_idx || 3887 test_bit(R5_InJournal, &dev->flags)) && 3888 !test_bit(R5_LOCKED, &dev->flags) && 3889 !(uptodate_for_rmw(dev) || 3890 test_bit(R5_Wantcompute, &dev->flags))) { 3891 if (test_bit(R5_Insync, &dev->flags)) 3892 rmw++; 3893 else 3894 rmw += 2*disks; /* cannot read it */ 3895 } 3896 /* Would I have to read this buffer for reconstruct_write */ 3897 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3898 i != sh->pd_idx && i != sh->qd_idx && 3899 !test_bit(R5_LOCKED, &dev->flags) && 3900 !(test_bit(R5_UPTODATE, &dev->flags) || 3901 test_bit(R5_Wantcompute, &dev->flags))) { 3902 if (test_bit(R5_Insync, &dev->flags)) 3903 rcw++; 3904 else 3905 rcw += 2*disks; 3906 } 3907 } 3908 3909 pr_debug("for sector %llu state 0x%lx, rmw=%d rcw=%d\n", 3910 (unsigned long long)sh->sector, sh->state, rmw, rcw); 3911 set_bit(STRIPE_HANDLE, &sh->state); 3912 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { 3913 /* prefer read-modify-write, but need to get some data */ 3914 if (conf->mddev->queue) 3915 blk_add_trace_msg(conf->mddev->queue, 3916 "raid5 rmw %llu %d", 3917 (unsigned long long)sh->sector, rmw); 3918 for (i = disks; i--; ) { 3919 struct r5dev *dev = &sh->dev[i]; 3920 if (test_bit(R5_InJournal, &dev->flags) && 3921 dev->page == dev->orig_page && 3922 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { 3923 /* alloc page for prexor */ 3924 struct page *p = alloc_page(GFP_NOIO); 3925 3926 if (p) { 3927 dev->orig_page = p; 3928 continue; 3929 } 3930 3931 /* 3932 * alloc_page() failed, try use 3933 * disk_info->extra_page 3934 */ 3935 if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE, 3936 &conf->cache_state)) { 3937 r5c_use_extra_page(sh); 3938 break; 3939 } 3940 3941 /* extra_page in use, add to delayed_list */ 3942 set_bit(STRIPE_DELAYED, &sh->state); 3943 s->waiting_extra_page = 1; 3944 return -EAGAIN; 3945 } 3946 } 3947 3948 for (i = disks; i--; ) { 3949 struct r5dev *dev = &sh->dev[i]; 3950 if (((dev->towrite && !delay_towrite(conf, dev, s)) || 3951 i == sh->pd_idx || i == sh->qd_idx || 3952 test_bit(R5_InJournal, &dev->flags)) && 3953 !test_bit(R5_LOCKED, &dev->flags) && 3954 !(uptodate_for_rmw(dev) || 3955 test_bit(R5_Wantcompute, &dev->flags)) && 3956 test_bit(R5_Insync, &dev->flags)) { 3957 if (test_bit(STRIPE_PREREAD_ACTIVE, 3958 &sh->state)) { 3959 pr_debug("Read_old block %d for r-m-w\n", 3960 i); 3961 set_bit(R5_LOCKED, &dev->flags); 3962 set_bit(R5_Wantread, &dev->flags); 3963 s->locked++; 3964 } else { 3965 set_bit(STRIPE_DELAYED, &sh->state); 3966 set_bit(STRIPE_HANDLE, &sh->state); 3967 } 3968 } 3969 } 3970 } 3971 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { 3972 /* want reconstruct write, but need to get some data */ 3973 int qread =0; 3974 rcw = 0; 3975 for (i = disks; i--; ) { 3976 struct r5dev *dev = &sh->dev[i]; 3977 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3978 i != sh->pd_idx && i != sh->qd_idx && 3979 !test_bit(R5_LOCKED, &dev->flags) && 3980 !(test_bit(R5_UPTODATE, &dev->flags) || 3981 test_bit(R5_Wantcompute, &dev->flags))) { 3982 rcw++; 3983 if (test_bit(R5_Insync, &dev->flags) && 3984 test_bit(STRIPE_PREREAD_ACTIVE, 3985 &sh->state)) { 3986 pr_debug("Read_old block " 3987 "%d for Reconstruct\n", i); 3988 set_bit(R5_LOCKED, &dev->flags); 3989 set_bit(R5_Wantread, &dev->flags); 3990 s->locked++; 3991 qread++; 3992 } else { 3993 set_bit(STRIPE_DELAYED, &sh->state); 3994 set_bit(STRIPE_HANDLE, &sh->state); 3995 } 3996 } 3997 } 3998 if (rcw && conf->mddev->queue) 3999 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 4000 (unsigned long long)sh->sector, 4001 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 4002 } 4003 4004 if (rcw > disks && rmw > disks && 4005 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4006 set_bit(STRIPE_DELAYED, &sh->state); 4007 4008 /* now if nothing is locked, and if we have enough data, 4009 * we can start a write request 4010 */ 4011 /* since handle_stripe can be called at any time we need to handle the 4012 * case where a compute block operation has been submitted and then a 4013 * subsequent call wants to start a write request. raid_run_ops only 4014 * handles the case where compute block and reconstruct are requested 4015 * simultaneously. If this is not the case then new writes need to be 4016 * held off until the compute completes. 4017 */ 4018 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 4019 (s->locked == 0 && (rcw == 0 || rmw == 0) && 4020 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 4021 schedule_reconstruction(sh, s, rcw == 0, 0); 4022 return 0; 4023 } 4024 4025 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 4026 struct stripe_head_state *s, int disks) 4027 { 4028 struct r5dev *dev = NULL; 4029 4030 BUG_ON(sh->batch_head); 4031 set_bit(STRIPE_HANDLE, &sh->state); 4032 4033 switch (sh->check_state) { 4034 case check_state_idle: 4035 /* start a new check operation if there are no failures */ 4036 if (s->failed == 0) { 4037 BUG_ON(s->uptodate != disks); 4038 sh->check_state = check_state_run; 4039 set_bit(STRIPE_OP_CHECK, &s->ops_request); 4040 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 4041 s->uptodate--; 4042 break; 4043 } 4044 dev = &sh->dev[s->failed_num[0]]; 4045 /* fall through */ 4046 case check_state_compute_result: 4047 sh->check_state = check_state_idle; 4048 if (!dev) 4049 dev = &sh->dev[sh->pd_idx]; 4050 4051 /* check that a write has not made the stripe insync */ 4052 if (test_bit(STRIPE_INSYNC, &sh->state)) 4053 break; 4054 4055 /* either failed parity check, or recovery is happening */ 4056 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 4057 BUG_ON(s->uptodate != disks); 4058 4059 set_bit(R5_LOCKED, &dev->flags); 4060 s->locked++; 4061 set_bit(R5_Wantwrite, &dev->flags); 4062 4063 clear_bit(STRIPE_DEGRADED, &sh->state); 4064 set_bit(STRIPE_INSYNC, &sh->state); 4065 break; 4066 case check_state_run: 4067 break; /* we will be called again upon completion */ 4068 case check_state_check_result: 4069 sh->check_state = check_state_idle; 4070 4071 /* if a failure occurred during the check operation, leave 4072 * STRIPE_INSYNC not set and let the stripe be handled again 4073 */ 4074 if (s->failed) 4075 break; 4076 4077 /* handle a successful check operation, if parity is correct 4078 * we are done. Otherwise update the mismatch count and repair 4079 * parity if !MD_RECOVERY_CHECK 4080 */ 4081 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 4082 /* parity is correct (on disc, 4083 * not in buffer any more) 4084 */ 4085 set_bit(STRIPE_INSYNC, &sh->state); 4086 else { 4087 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4088 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4089 /* don't try to repair!! */ 4090 set_bit(STRIPE_INSYNC, &sh->state); 4091 else { 4092 sh->check_state = check_state_compute_run; 4093 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4094 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 4095 set_bit(R5_Wantcompute, 4096 &sh->dev[sh->pd_idx].flags); 4097 sh->ops.target = sh->pd_idx; 4098 sh->ops.target2 = -1; 4099 s->uptodate++; 4100 } 4101 } 4102 break; 4103 case check_state_compute_run: 4104 break; 4105 default: 4106 pr_err("%s: unknown check_state: %d sector: %llu\n", 4107 __func__, sh->check_state, 4108 (unsigned long long) sh->sector); 4109 BUG(); 4110 } 4111 } 4112 4113 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 4114 struct stripe_head_state *s, 4115 int disks) 4116 { 4117 int pd_idx = sh->pd_idx; 4118 int qd_idx = sh->qd_idx; 4119 struct r5dev *dev; 4120 4121 BUG_ON(sh->batch_head); 4122 set_bit(STRIPE_HANDLE, &sh->state); 4123 4124 BUG_ON(s->failed > 2); 4125 4126 /* Want to check and possibly repair P and Q. 4127 * However there could be one 'failed' device, in which 4128 * case we can only check one of them, possibly using the 4129 * other to generate missing data 4130 */ 4131 4132 switch (sh->check_state) { 4133 case check_state_idle: 4134 /* start a new check operation if there are < 2 failures */ 4135 if (s->failed == s->q_failed) { 4136 /* The only possible failed device holds Q, so it 4137 * makes sense to check P (If anything else were failed, 4138 * we would have used P to recreate it). 4139 */ 4140 sh->check_state = check_state_run; 4141 } 4142 if (!s->q_failed && s->failed < 2) { 4143 /* Q is not failed, and we didn't use it to generate 4144 * anything, so it makes sense to check it 4145 */ 4146 if (sh->check_state == check_state_run) 4147 sh->check_state = check_state_run_pq; 4148 else 4149 sh->check_state = check_state_run_q; 4150 } 4151 4152 /* discard potentially stale zero_sum_result */ 4153 sh->ops.zero_sum_result = 0; 4154 4155 if (sh->check_state == check_state_run) { 4156 /* async_xor_zero_sum destroys the contents of P */ 4157 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 4158 s->uptodate--; 4159 } 4160 if (sh->check_state >= check_state_run && 4161 sh->check_state <= check_state_run_pq) { 4162 /* async_syndrome_zero_sum preserves P and Q, so 4163 * no need to mark them !uptodate here 4164 */ 4165 set_bit(STRIPE_OP_CHECK, &s->ops_request); 4166 break; 4167 } 4168 4169 /* we have 2-disk failure */ 4170 BUG_ON(s->failed != 2); 4171 /* fall through */ 4172 case check_state_compute_result: 4173 sh->check_state = check_state_idle; 4174 4175 /* check that a write has not made the stripe insync */ 4176 if (test_bit(STRIPE_INSYNC, &sh->state)) 4177 break; 4178 4179 /* now write out any block on a failed drive, 4180 * or P or Q if they were recomputed 4181 */ 4182 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 4183 if (s->failed == 2) { 4184 dev = &sh->dev[s->failed_num[1]]; 4185 s->locked++; 4186 set_bit(R5_LOCKED, &dev->flags); 4187 set_bit(R5_Wantwrite, &dev->flags); 4188 } 4189 if (s->failed >= 1) { 4190 dev = &sh->dev[s->failed_num[0]]; 4191 s->locked++; 4192 set_bit(R5_LOCKED, &dev->flags); 4193 set_bit(R5_Wantwrite, &dev->flags); 4194 } 4195 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 4196 dev = &sh->dev[pd_idx]; 4197 s->locked++; 4198 set_bit(R5_LOCKED, &dev->flags); 4199 set_bit(R5_Wantwrite, &dev->flags); 4200 } 4201 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 4202 dev = &sh->dev[qd_idx]; 4203 s->locked++; 4204 set_bit(R5_LOCKED, &dev->flags); 4205 set_bit(R5_Wantwrite, &dev->flags); 4206 } 4207 clear_bit(STRIPE_DEGRADED, &sh->state); 4208 4209 set_bit(STRIPE_INSYNC, &sh->state); 4210 break; 4211 case check_state_run: 4212 case check_state_run_q: 4213 case check_state_run_pq: 4214 break; /* we will be called again upon completion */ 4215 case check_state_check_result: 4216 sh->check_state = check_state_idle; 4217 4218 /* handle a successful check operation, if parity is correct 4219 * we are done. Otherwise update the mismatch count and repair 4220 * parity if !MD_RECOVERY_CHECK 4221 */ 4222 if (sh->ops.zero_sum_result == 0) { 4223 /* both parities are correct */ 4224 if (!s->failed) 4225 set_bit(STRIPE_INSYNC, &sh->state); 4226 else { 4227 /* in contrast to the raid5 case we can validate 4228 * parity, but still have a failure to write 4229 * back 4230 */ 4231 sh->check_state = check_state_compute_result; 4232 /* Returning at this point means that we may go 4233 * off and bring p and/or q uptodate again so 4234 * we make sure to check zero_sum_result again 4235 * to verify if p or q need writeback 4236 */ 4237 } 4238 } else { 4239 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4240 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4241 /* don't try to repair!! */ 4242 set_bit(STRIPE_INSYNC, &sh->state); 4243 else { 4244 int *target = &sh->ops.target; 4245 4246 sh->ops.target = -1; 4247 sh->ops.target2 = -1; 4248 sh->check_state = check_state_compute_run; 4249 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4250 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 4251 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 4252 set_bit(R5_Wantcompute, 4253 &sh->dev[pd_idx].flags); 4254 *target = pd_idx; 4255 target = &sh->ops.target2; 4256 s->uptodate++; 4257 } 4258 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 4259 set_bit(R5_Wantcompute, 4260 &sh->dev[qd_idx].flags); 4261 *target = qd_idx; 4262 s->uptodate++; 4263 } 4264 } 4265 } 4266 break; 4267 case check_state_compute_run: 4268 break; 4269 default: 4270 pr_warn("%s: unknown check_state: %d sector: %llu\n", 4271 __func__, sh->check_state, 4272 (unsigned long long) sh->sector); 4273 BUG(); 4274 } 4275 } 4276 4277 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 4278 { 4279 int i; 4280 4281 /* We have read all the blocks in this stripe and now we need to 4282 * copy some of them into a target stripe for expand. 4283 */ 4284 struct dma_async_tx_descriptor *tx = NULL; 4285 BUG_ON(sh->batch_head); 4286 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4287 for (i = 0; i < sh->disks; i++) 4288 if (i != sh->pd_idx && i != sh->qd_idx) { 4289 int dd_idx, j; 4290 struct stripe_head *sh2; 4291 struct async_submit_ctl submit; 4292 4293 sector_t bn = raid5_compute_blocknr(sh, i, 1); 4294 sector_t s = raid5_compute_sector(conf, bn, 0, 4295 &dd_idx, NULL); 4296 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); 4297 if (sh2 == NULL) 4298 /* so far only the early blocks of this stripe 4299 * have been requested. When later blocks 4300 * get requested, we will try again 4301 */ 4302 continue; 4303 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 4304 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 4305 /* must have already done this block */ 4306 raid5_release_stripe(sh2); 4307 continue; 4308 } 4309 4310 /* place all the copies on one channel */ 4311 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 4312 tx = async_memcpy(sh2->dev[dd_idx].page, 4313 sh->dev[i].page, 0, 0, STRIPE_SIZE, 4314 &submit); 4315 4316 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 4317 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 4318 for (j = 0; j < conf->raid_disks; j++) 4319 if (j != sh2->pd_idx && 4320 j != sh2->qd_idx && 4321 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 4322 break; 4323 if (j == conf->raid_disks) { 4324 set_bit(STRIPE_EXPAND_READY, &sh2->state); 4325 set_bit(STRIPE_HANDLE, &sh2->state); 4326 } 4327 raid5_release_stripe(sh2); 4328 4329 } 4330 /* done submitting copies, wait for them to complete */ 4331 async_tx_quiesce(&tx); 4332 } 4333 4334 /* 4335 * handle_stripe - do things to a stripe. 4336 * 4337 * We lock the stripe by setting STRIPE_ACTIVE and then examine the 4338 * state of various bits to see what needs to be done. 4339 * Possible results: 4340 * return some read requests which now have data 4341 * return some write requests which are safely on storage 4342 * schedule a read on some buffers 4343 * schedule a write of some buffers 4344 * return confirmation of parity correctness 4345 * 4346 */ 4347 4348 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 4349 { 4350 struct r5conf *conf = sh->raid_conf; 4351 int disks = sh->disks; 4352 struct r5dev *dev; 4353 int i; 4354 int do_recovery = 0; 4355 4356 memset(s, 0, sizeof(*s)); 4357 4358 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; 4359 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; 4360 s->failed_num[0] = -1; 4361 s->failed_num[1] = -1; 4362 s->log_failed = r5l_log_disk_error(conf); 4363 4364 /* Now to look around and see what can be done */ 4365 rcu_read_lock(); 4366 for (i=disks; i--; ) { 4367 struct md_rdev *rdev; 4368 sector_t first_bad; 4369 int bad_sectors; 4370 int is_bad = 0; 4371 4372 dev = &sh->dev[i]; 4373 4374 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 4375 i, dev->flags, 4376 dev->toread, dev->towrite, dev->written); 4377 /* maybe we can reply to a read 4378 * 4379 * new wantfill requests are only permitted while 4380 * ops_complete_biofill is guaranteed to be inactive 4381 */ 4382 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 4383 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 4384 set_bit(R5_Wantfill, &dev->flags); 4385 4386 /* now count some things */ 4387 if (test_bit(R5_LOCKED, &dev->flags)) 4388 s->locked++; 4389 if (test_bit(R5_UPTODATE, &dev->flags)) 4390 s->uptodate++; 4391 if (test_bit(R5_Wantcompute, &dev->flags)) { 4392 s->compute++; 4393 BUG_ON(s->compute > 2); 4394 } 4395 4396 if (test_bit(R5_Wantfill, &dev->flags)) 4397 s->to_fill++; 4398 else if (dev->toread) 4399 s->to_read++; 4400 if (dev->towrite) { 4401 s->to_write++; 4402 if (!test_bit(R5_OVERWRITE, &dev->flags)) 4403 s->non_overwrite++; 4404 } 4405 if (dev->written) 4406 s->written++; 4407 /* Prefer to use the replacement for reads, but only 4408 * if it is recovered enough and has no bad blocks. 4409 */ 4410 rdev = rcu_dereference(conf->disks[i].replacement); 4411 if (rdev && !test_bit(Faulty, &rdev->flags) && 4412 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && 4413 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4414 &first_bad, &bad_sectors)) 4415 set_bit(R5_ReadRepl, &dev->flags); 4416 else { 4417 if (rdev && !test_bit(Faulty, &rdev->flags)) 4418 set_bit(R5_NeedReplace, &dev->flags); 4419 else 4420 clear_bit(R5_NeedReplace, &dev->flags); 4421 rdev = rcu_dereference(conf->disks[i].rdev); 4422 clear_bit(R5_ReadRepl, &dev->flags); 4423 } 4424 if (rdev && test_bit(Faulty, &rdev->flags)) 4425 rdev = NULL; 4426 if (rdev) { 4427 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4428 &first_bad, &bad_sectors); 4429 if (s->blocked_rdev == NULL 4430 && (test_bit(Blocked, &rdev->flags) 4431 || is_bad < 0)) { 4432 if (is_bad < 0) 4433 set_bit(BlockedBadBlocks, 4434 &rdev->flags); 4435 s->blocked_rdev = rdev; 4436 atomic_inc(&rdev->nr_pending); 4437 } 4438 } 4439 clear_bit(R5_Insync, &dev->flags); 4440 if (!rdev) 4441 /* Not in-sync */; 4442 else if (is_bad) { 4443 /* also not in-sync */ 4444 if (!test_bit(WriteErrorSeen, &rdev->flags) && 4445 test_bit(R5_UPTODATE, &dev->flags)) { 4446 /* treat as in-sync, but with a read error 4447 * which we can now try to correct 4448 */ 4449 set_bit(R5_Insync, &dev->flags); 4450 set_bit(R5_ReadError, &dev->flags); 4451 } 4452 } else if (test_bit(In_sync, &rdev->flags)) 4453 set_bit(R5_Insync, &dev->flags); 4454 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 4455 /* in sync if before recovery_offset */ 4456 set_bit(R5_Insync, &dev->flags); 4457 else if (test_bit(R5_UPTODATE, &dev->flags) && 4458 test_bit(R5_Expanded, &dev->flags)) 4459 /* If we've reshaped into here, we assume it is Insync. 4460 * We will shortly update recovery_offset to make 4461 * it official. 4462 */ 4463 set_bit(R5_Insync, &dev->flags); 4464 4465 if (test_bit(R5_WriteError, &dev->flags)) { 4466 /* This flag does not apply to '.replacement' 4467 * only to .rdev, so make sure to check that*/ 4468 struct md_rdev *rdev2 = rcu_dereference( 4469 conf->disks[i].rdev); 4470 if (rdev2 == rdev) 4471 clear_bit(R5_Insync, &dev->flags); 4472 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4473 s->handle_bad_blocks = 1; 4474 atomic_inc(&rdev2->nr_pending); 4475 } else 4476 clear_bit(R5_WriteError, &dev->flags); 4477 } 4478 if (test_bit(R5_MadeGood, &dev->flags)) { 4479 /* This flag does not apply to '.replacement' 4480 * only to .rdev, so make sure to check that*/ 4481 struct md_rdev *rdev2 = rcu_dereference( 4482 conf->disks[i].rdev); 4483 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4484 s->handle_bad_blocks = 1; 4485 atomic_inc(&rdev2->nr_pending); 4486 } else 4487 clear_bit(R5_MadeGood, &dev->flags); 4488 } 4489 if (test_bit(R5_MadeGoodRepl, &dev->flags)) { 4490 struct md_rdev *rdev2 = rcu_dereference( 4491 conf->disks[i].replacement); 4492 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4493 s->handle_bad_blocks = 1; 4494 atomic_inc(&rdev2->nr_pending); 4495 } else 4496 clear_bit(R5_MadeGoodRepl, &dev->flags); 4497 } 4498 if (!test_bit(R5_Insync, &dev->flags)) { 4499 /* The ReadError flag will just be confusing now */ 4500 clear_bit(R5_ReadError, &dev->flags); 4501 clear_bit(R5_ReWrite, &dev->flags); 4502 } 4503 if (test_bit(R5_ReadError, &dev->flags)) 4504 clear_bit(R5_Insync, &dev->flags); 4505 if (!test_bit(R5_Insync, &dev->flags)) { 4506 if (s->failed < 2) 4507 s->failed_num[s->failed] = i; 4508 s->failed++; 4509 if (rdev && !test_bit(Faulty, &rdev->flags)) 4510 do_recovery = 1; 4511 } 4512 4513 if (test_bit(R5_InJournal, &dev->flags)) 4514 s->injournal++; 4515 if (test_bit(R5_InJournal, &dev->flags) && dev->written) 4516 s->just_cached++; 4517 } 4518 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4519 /* If there is a failed device being replaced, 4520 * we must be recovering. 4521 * else if we are after recovery_cp, we must be syncing 4522 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 4523 * else we can only be replacing 4524 * sync and recovery both need to read all devices, and so 4525 * use the same flag. 4526 */ 4527 if (do_recovery || 4528 sh->sector >= conf->mddev->recovery_cp || 4529 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 4530 s->syncing = 1; 4531 else 4532 s->replacing = 1; 4533 } 4534 rcu_read_unlock(); 4535 } 4536 4537 static int clear_batch_ready(struct stripe_head *sh) 4538 { 4539 /* Return '1' if this is a member of batch, or 4540 * '0' if it is a lone stripe or a head which can now be 4541 * handled. 4542 */ 4543 struct stripe_head *tmp; 4544 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) 4545 return (sh->batch_head && sh->batch_head != sh); 4546 spin_lock(&sh->stripe_lock); 4547 if (!sh->batch_head) { 4548 spin_unlock(&sh->stripe_lock); 4549 return 0; 4550 } 4551 4552 /* 4553 * this stripe could be added to a batch list before we check 4554 * BATCH_READY, skips it 4555 */ 4556 if (sh->batch_head != sh) { 4557 spin_unlock(&sh->stripe_lock); 4558 return 1; 4559 } 4560 spin_lock(&sh->batch_lock); 4561 list_for_each_entry(tmp, &sh->batch_list, batch_list) 4562 clear_bit(STRIPE_BATCH_READY, &tmp->state); 4563 spin_unlock(&sh->batch_lock); 4564 spin_unlock(&sh->stripe_lock); 4565 4566 /* 4567 * BATCH_READY is cleared, no new stripes can be added. 4568 * batch_list can be accessed without lock 4569 */ 4570 return 0; 4571 } 4572 4573 static void break_stripe_batch_list(struct stripe_head *head_sh, 4574 unsigned long handle_flags) 4575 { 4576 struct stripe_head *sh, *next; 4577 int i; 4578 int do_wakeup = 0; 4579 4580 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { 4581 4582 list_del_init(&sh->batch_list); 4583 4584 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | 4585 (1 << STRIPE_SYNCING) | 4586 (1 << STRIPE_REPLACED) | 4587 (1 << STRIPE_DELAYED) | 4588 (1 << STRIPE_BIT_DELAY) | 4589 (1 << STRIPE_FULL_WRITE) | 4590 (1 << STRIPE_BIOFILL_RUN) | 4591 (1 << STRIPE_COMPUTE_RUN) | 4592 (1 << STRIPE_OPS_REQ_PENDING) | 4593 (1 << STRIPE_DISCARD) | 4594 (1 << STRIPE_BATCH_READY) | 4595 (1 << STRIPE_BATCH_ERR) | 4596 (1 << STRIPE_BITMAP_PENDING)), 4597 "stripe state: %lx\n", sh->state); 4598 WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | 4599 (1 << STRIPE_REPLACED)), 4600 "head stripe state: %lx\n", head_sh->state); 4601 4602 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | 4603 (1 << STRIPE_PREREAD_ACTIVE) | 4604 (1 << STRIPE_DEGRADED)), 4605 head_sh->state & (1 << STRIPE_INSYNC)); 4606 4607 sh->check_state = head_sh->check_state; 4608 sh->reconstruct_state = head_sh->reconstruct_state; 4609 for (i = 0; i < sh->disks; i++) { 4610 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 4611 do_wakeup = 1; 4612 sh->dev[i].flags = head_sh->dev[i].flags & 4613 (~((1 << R5_WriteError) | (1 << R5_Overlap))); 4614 } 4615 spin_lock_irq(&sh->stripe_lock); 4616 sh->batch_head = NULL; 4617 spin_unlock_irq(&sh->stripe_lock); 4618 if (handle_flags == 0 || 4619 sh->state & handle_flags) 4620 set_bit(STRIPE_HANDLE, &sh->state); 4621 raid5_release_stripe(sh); 4622 } 4623 spin_lock_irq(&head_sh->stripe_lock); 4624 head_sh->batch_head = NULL; 4625 spin_unlock_irq(&head_sh->stripe_lock); 4626 for (i = 0; i < head_sh->disks; i++) 4627 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) 4628 do_wakeup = 1; 4629 if (head_sh->state & handle_flags) 4630 set_bit(STRIPE_HANDLE, &head_sh->state); 4631 4632 if (do_wakeup) 4633 wake_up(&head_sh->raid_conf->wait_for_overlap); 4634 } 4635 4636 static void handle_stripe(struct stripe_head *sh) 4637 { 4638 struct stripe_head_state s; 4639 struct r5conf *conf = sh->raid_conf; 4640 int i; 4641 int prexor; 4642 int disks = sh->disks; 4643 struct r5dev *pdev, *qdev; 4644 4645 clear_bit(STRIPE_HANDLE, &sh->state); 4646 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 4647 /* already being handled, ensure it gets handled 4648 * again when current action finishes */ 4649 set_bit(STRIPE_HANDLE, &sh->state); 4650 return; 4651 } 4652 4653 if (clear_batch_ready(sh) ) { 4654 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4655 return; 4656 } 4657 4658 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) 4659 break_stripe_batch_list(sh, 0); 4660 4661 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4662 spin_lock(&sh->stripe_lock); 4663 /* 4664 * Cannot process 'sync' concurrently with 'discard'. 4665 * Flush data in r5cache before 'sync'. 4666 */ 4667 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && 4668 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && 4669 !test_bit(STRIPE_DISCARD, &sh->state) && 4670 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4671 set_bit(STRIPE_SYNCING, &sh->state); 4672 clear_bit(STRIPE_INSYNC, &sh->state); 4673 clear_bit(STRIPE_REPLACED, &sh->state); 4674 } 4675 spin_unlock(&sh->stripe_lock); 4676 } 4677 clear_bit(STRIPE_DELAYED, &sh->state); 4678 4679 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 4680 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 4681 (unsigned long long)sh->sector, sh->state, 4682 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 4683 sh->check_state, sh->reconstruct_state); 4684 4685 analyse_stripe(sh, &s); 4686 4687 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 4688 goto finish; 4689 4690 if (s.handle_bad_blocks || 4691 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { 4692 set_bit(STRIPE_HANDLE, &sh->state); 4693 goto finish; 4694 } 4695 4696 if (unlikely(s.blocked_rdev)) { 4697 if (s.syncing || s.expanding || s.expanded || 4698 s.replacing || s.to_write || s.written) { 4699 set_bit(STRIPE_HANDLE, &sh->state); 4700 goto finish; 4701 } 4702 /* There is nothing for the blocked_rdev to block */ 4703 rdev_dec_pending(s.blocked_rdev, conf->mddev); 4704 s.blocked_rdev = NULL; 4705 } 4706 4707 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 4708 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 4709 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 4710 } 4711 4712 pr_debug("locked=%d uptodate=%d to_read=%d" 4713 " to_write=%d failed=%d failed_num=%d,%d\n", 4714 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4715 s.failed_num[0], s.failed_num[1]); 4716 /* 4717 * check if the array has lost more than max_degraded devices and, 4718 * if so, some requests might need to be failed. 4719 * 4720 * When journal device failed (log_failed), we will only process 4721 * the stripe if there is data need write to raid disks 4722 */ 4723 if (s.failed > conf->max_degraded || 4724 (s.log_failed && s.injournal == 0)) { 4725 sh->check_state = 0; 4726 sh->reconstruct_state = 0; 4727 break_stripe_batch_list(sh, 0); 4728 if (s.to_read+s.to_write+s.written) 4729 handle_failed_stripe(conf, sh, &s, disks); 4730 if (s.syncing + s.replacing) 4731 handle_failed_sync(conf, sh, &s); 4732 } 4733 4734 /* Now we check to see if any write operations have recently 4735 * completed 4736 */ 4737 prexor = 0; 4738 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 4739 prexor = 1; 4740 if (sh->reconstruct_state == reconstruct_state_drain_result || 4741 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 4742 sh->reconstruct_state = reconstruct_state_idle; 4743 4744 /* All the 'written' buffers and the parity block are ready to 4745 * be written back to disk 4746 */ 4747 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && 4748 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); 4749 BUG_ON(sh->qd_idx >= 0 && 4750 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && 4751 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); 4752 for (i = disks; i--; ) { 4753 struct r5dev *dev = &sh->dev[i]; 4754 if (test_bit(R5_LOCKED, &dev->flags) && 4755 (i == sh->pd_idx || i == sh->qd_idx || 4756 dev->written || test_bit(R5_InJournal, 4757 &dev->flags))) { 4758 pr_debug("Writing block %d\n", i); 4759 set_bit(R5_Wantwrite, &dev->flags); 4760 if (prexor) 4761 continue; 4762 if (s.failed > 1) 4763 continue; 4764 if (!test_bit(R5_Insync, &dev->flags) || 4765 ((i == sh->pd_idx || i == sh->qd_idx) && 4766 s.failed == 0)) 4767 set_bit(STRIPE_INSYNC, &sh->state); 4768 } 4769 } 4770 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4771 s.dec_preread_active = 1; 4772 } 4773 4774 /* 4775 * might be able to return some write requests if the parity blocks 4776 * are safe, or on a failed drive 4777 */ 4778 pdev = &sh->dev[sh->pd_idx]; 4779 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 4780 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 4781 qdev = &sh->dev[sh->qd_idx]; 4782 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 4783 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 4784 || conf->level < 6; 4785 4786 if (s.written && 4787 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 4788 && !test_bit(R5_LOCKED, &pdev->flags) 4789 && (test_bit(R5_UPTODATE, &pdev->flags) || 4790 test_bit(R5_Discard, &pdev->flags))))) && 4791 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 4792 && !test_bit(R5_LOCKED, &qdev->flags) 4793 && (test_bit(R5_UPTODATE, &qdev->flags) || 4794 test_bit(R5_Discard, &qdev->flags)))))) 4795 handle_stripe_clean_event(conf, sh, disks); 4796 4797 if (s.just_cached) 4798 r5c_handle_cached_data_endio(conf, sh, disks); 4799 log_stripe_write_finished(sh); 4800 4801 /* Now we might consider reading some blocks, either to check/generate 4802 * parity, or to satisfy requests 4803 * or to load a block that is being partially written. 4804 */ 4805 if (s.to_read || s.non_overwrite 4806 || (conf->level == 6 && s.to_write && s.failed) 4807 || (s.syncing && (s.uptodate + s.compute < disks)) 4808 || s.replacing 4809 || s.expanding) 4810 handle_stripe_fill(sh, &s, disks); 4811 4812 /* 4813 * When the stripe finishes full journal write cycle (write to journal 4814 * and raid disk), this is the clean up procedure so it is ready for 4815 * next operation. 4816 */ 4817 r5c_finish_stripe_write_out(conf, sh, &s); 4818 4819 /* 4820 * Now to consider new write requests, cache write back and what else, 4821 * if anything should be read. We do not handle new writes when: 4822 * 1/ A 'write' operation (copy+xor) is already in flight. 4823 * 2/ A 'check' operation is in flight, as it may clobber the parity 4824 * block. 4825 * 3/ A r5c cache log write is in flight. 4826 */ 4827 4828 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { 4829 if (!r5c_is_writeback(conf->log)) { 4830 if (s.to_write) 4831 handle_stripe_dirtying(conf, sh, &s, disks); 4832 } else { /* write back cache */ 4833 int ret = 0; 4834 4835 /* First, try handle writes in caching phase */ 4836 if (s.to_write) 4837 ret = r5c_try_caching_write(conf, sh, &s, 4838 disks); 4839 /* 4840 * If caching phase failed: ret == -EAGAIN 4841 * OR 4842 * stripe under reclaim: !caching && injournal 4843 * 4844 * fall back to handle_stripe_dirtying() 4845 */ 4846 if (ret == -EAGAIN || 4847 /* stripe under reclaim: !caching && injournal */ 4848 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 4849 s.injournal > 0)) { 4850 ret = handle_stripe_dirtying(conf, sh, &s, 4851 disks); 4852 if (ret == -EAGAIN) 4853 goto finish; 4854 } 4855 } 4856 } 4857 4858 /* maybe we need to check and possibly fix the parity for this stripe 4859 * Any reads will already have been scheduled, so we just see if enough 4860 * data is available. The parity check is held off while parity 4861 * dependent operations are in flight. 4862 */ 4863 if (sh->check_state || 4864 (s.syncing && s.locked == 0 && 4865 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4866 !test_bit(STRIPE_INSYNC, &sh->state))) { 4867 if (conf->level == 6) 4868 handle_parity_checks6(conf, sh, &s, disks); 4869 else 4870 handle_parity_checks5(conf, sh, &s, disks); 4871 } 4872 4873 if ((s.replacing || s.syncing) && s.locked == 0 4874 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) 4875 && !test_bit(STRIPE_REPLACED, &sh->state)) { 4876 /* Write out to replacement devices where possible */ 4877 for (i = 0; i < conf->raid_disks; i++) 4878 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 4879 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); 4880 set_bit(R5_WantReplace, &sh->dev[i].flags); 4881 set_bit(R5_LOCKED, &sh->dev[i].flags); 4882 s.locked++; 4883 } 4884 if (s.replacing) 4885 set_bit(STRIPE_INSYNC, &sh->state); 4886 set_bit(STRIPE_REPLACED, &sh->state); 4887 } 4888 if ((s.syncing || s.replacing) && s.locked == 0 && 4889 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4890 test_bit(STRIPE_INSYNC, &sh->state)) { 4891 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4892 clear_bit(STRIPE_SYNCING, &sh->state); 4893 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 4894 wake_up(&conf->wait_for_overlap); 4895 } 4896 4897 /* If the failed drives are just a ReadError, then we might need 4898 * to progress the repair/check process 4899 */ 4900 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 4901 for (i = 0; i < s.failed; i++) { 4902 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 4903 if (test_bit(R5_ReadError, &dev->flags) 4904 && !test_bit(R5_LOCKED, &dev->flags) 4905 && test_bit(R5_UPTODATE, &dev->flags) 4906 ) { 4907 if (!test_bit(R5_ReWrite, &dev->flags)) { 4908 set_bit(R5_Wantwrite, &dev->flags); 4909 set_bit(R5_ReWrite, &dev->flags); 4910 set_bit(R5_LOCKED, &dev->flags); 4911 s.locked++; 4912 } else { 4913 /* let's read it back */ 4914 set_bit(R5_Wantread, &dev->flags); 4915 set_bit(R5_LOCKED, &dev->flags); 4916 s.locked++; 4917 } 4918 } 4919 } 4920 4921 /* Finish reconstruct operations initiated by the expansion process */ 4922 if (sh->reconstruct_state == reconstruct_state_result) { 4923 struct stripe_head *sh_src 4924 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); 4925 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 4926 /* sh cannot be written until sh_src has been read. 4927 * so arrange for sh to be delayed a little 4928 */ 4929 set_bit(STRIPE_DELAYED, &sh->state); 4930 set_bit(STRIPE_HANDLE, &sh->state); 4931 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 4932 &sh_src->state)) 4933 atomic_inc(&conf->preread_active_stripes); 4934 raid5_release_stripe(sh_src); 4935 goto finish; 4936 } 4937 if (sh_src) 4938 raid5_release_stripe(sh_src); 4939 4940 sh->reconstruct_state = reconstruct_state_idle; 4941 clear_bit(STRIPE_EXPANDING, &sh->state); 4942 for (i = conf->raid_disks; i--; ) { 4943 set_bit(R5_Wantwrite, &sh->dev[i].flags); 4944 set_bit(R5_LOCKED, &sh->dev[i].flags); 4945 s.locked++; 4946 } 4947 } 4948 4949 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 4950 !sh->reconstruct_state) { 4951 /* Need to write out all blocks after computing parity */ 4952 sh->disks = conf->raid_disks; 4953 stripe_set_idx(sh->sector, conf, 0, sh); 4954 schedule_reconstruction(sh, &s, 1, 1); 4955 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 4956 clear_bit(STRIPE_EXPAND_READY, &sh->state); 4957 atomic_dec(&conf->reshape_stripes); 4958 wake_up(&conf->wait_for_overlap); 4959 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4960 } 4961 4962 if (s.expanding && s.locked == 0 && 4963 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 4964 handle_stripe_expansion(conf, sh); 4965 4966 finish: 4967 /* wait for this device to become unblocked */ 4968 if (unlikely(s.blocked_rdev)) { 4969 if (conf->mddev->external) 4970 md_wait_for_blocked_rdev(s.blocked_rdev, 4971 conf->mddev); 4972 else 4973 /* Internal metadata will immediately 4974 * be written by raid5d, so we don't 4975 * need to wait here. 4976 */ 4977 rdev_dec_pending(s.blocked_rdev, 4978 conf->mddev); 4979 } 4980 4981 if (s.handle_bad_blocks) 4982 for (i = disks; i--; ) { 4983 struct md_rdev *rdev; 4984 struct r5dev *dev = &sh->dev[i]; 4985 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 4986 /* We own a safe reference to the rdev */ 4987 rdev = conf->disks[i].rdev; 4988 if (!rdev_set_badblocks(rdev, sh->sector, 4989 STRIPE_SECTORS, 0)) 4990 md_error(conf->mddev, rdev); 4991 rdev_dec_pending(rdev, conf->mddev); 4992 } 4993 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 4994 rdev = conf->disks[i].rdev; 4995 rdev_clear_badblocks(rdev, sh->sector, 4996 STRIPE_SECTORS, 0); 4997 rdev_dec_pending(rdev, conf->mddev); 4998 } 4999 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { 5000 rdev = conf->disks[i].replacement; 5001 if (!rdev) 5002 /* rdev have been moved down */ 5003 rdev = conf->disks[i].rdev; 5004 rdev_clear_badblocks(rdev, sh->sector, 5005 STRIPE_SECTORS, 0); 5006 rdev_dec_pending(rdev, conf->mddev); 5007 } 5008 } 5009 5010 if (s.ops_request) 5011 raid_run_ops(sh, s.ops_request); 5012 5013 ops_run_io(sh, &s); 5014 5015 if (s.dec_preread_active) { 5016 /* We delay this until after ops_run_io so that if make_request 5017 * is waiting on a flush, it won't continue until the writes 5018 * have actually been submitted. 5019 */ 5020 atomic_dec(&conf->preread_active_stripes); 5021 if (atomic_read(&conf->preread_active_stripes) < 5022 IO_THRESHOLD) 5023 md_wakeup_thread(conf->mddev->thread); 5024 } 5025 5026 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 5027 } 5028 5029 static void raid5_activate_delayed(struct r5conf *conf) 5030 { 5031 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 5032 while (!list_empty(&conf->delayed_list)) { 5033 struct list_head *l = conf->delayed_list.next; 5034 struct stripe_head *sh; 5035 sh = list_entry(l, struct stripe_head, lru); 5036 list_del_init(l); 5037 clear_bit(STRIPE_DELAYED, &sh->state); 5038 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5039 atomic_inc(&conf->preread_active_stripes); 5040 list_add_tail(&sh->lru, &conf->hold_list); 5041 raid5_wakeup_stripe_thread(sh); 5042 } 5043 } 5044 } 5045 5046 static void activate_bit_delay(struct r5conf *conf, 5047 struct list_head *temp_inactive_list) 5048 { 5049 /* device_lock is held */ 5050 struct list_head head; 5051 list_add(&head, &conf->bitmap_list); 5052 list_del_init(&conf->bitmap_list); 5053 while (!list_empty(&head)) { 5054 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 5055 int hash; 5056 list_del_init(&sh->lru); 5057 atomic_inc(&sh->count); 5058 hash = sh->hash_lock_index; 5059 __release_stripe(conf, sh, &temp_inactive_list[hash]); 5060 } 5061 } 5062 5063 static int raid5_congested(struct mddev *mddev, int bits) 5064 { 5065 struct r5conf *conf = mddev->private; 5066 5067 /* No difference between reads and writes. Just check 5068 * how busy the stripe_cache is 5069 */ 5070 5071 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) 5072 return 1; 5073 5074 /* Also checks whether there is pressure on r5cache log space */ 5075 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) 5076 return 1; 5077 if (conf->quiesce) 5078 return 1; 5079 if (atomic_read(&conf->empty_inactive_list_nr)) 5080 return 1; 5081 5082 return 0; 5083 } 5084 5085 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 5086 { 5087 struct r5conf *conf = mddev->private; 5088 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); 5089 unsigned int chunk_sectors; 5090 unsigned int bio_sectors = bio_sectors(bio); 5091 5092 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); 5093 return chunk_sectors >= 5094 ((sector & (chunk_sectors - 1)) + bio_sectors); 5095 } 5096 5097 /* 5098 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 5099 * later sampled by raid5d. 5100 */ 5101 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 5102 { 5103 unsigned long flags; 5104 5105 spin_lock_irqsave(&conf->device_lock, flags); 5106 5107 bi->bi_next = conf->retry_read_aligned_list; 5108 conf->retry_read_aligned_list = bi; 5109 5110 spin_unlock_irqrestore(&conf->device_lock, flags); 5111 md_wakeup_thread(conf->mddev->thread); 5112 } 5113 5114 static struct bio *remove_bio_from_retry(struct r5conf *conf, 5115 unsigned int *offset) 5116 { 5117 struct bio *bi; 5118 5119 bi = conf->retry_read_aligned; 5120 if (bi) { 5121 *offset = conf->retry_read_offset; 5122 conf->retry_read_aligned = NULL; 5123 return bi; 5124 } 5125 bi = conf->retry_read_aligned_list; 5126 if(bi) { 5127 conf->retry_read_aligned_list = bi->bi_next; 5128 bi->bi_next = NULL; 5129 *offset = 0; 5130 } 5131 5132 return bi; 5133 } 5134 5135 /* 5136 * The "raid5_align_endio" should check if the read succeeded and if it 5137 * did, call bio_endio on the original bio (having bio_put the new bio 5138 * first). 5139 * If the read failed.. 5140 */ 5141 static void raid5_align_endio(struct bio *bi) 5142 { 5143 struct bio* raid_bi = bi->bi_private; 5144 struct mddev *mddev; 5145 struct r5conf *conf; 5146 struct md_rdev *rdev; 5147 int error = bi->bi_error; 5148 5149 bio_put(bi); 5150 5151 rdev = (void*)raid_bi->bi_next; 5152 raid_bi->bi_next = NULL; 5153 mddev = rdev->mddev; 5154 conf = mddev->private; 5155 5156 rdev_dec_pending(rdev, conf->mddev); 5157 5158 if (!error) { 5159 bio_endio(raid_bi); 5160 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5161 wake_up(&conf->wait_for_quiescent); 5162 return; 5163 } 5164 5165 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 5166 5167 add_bio_to_retry(raid_bi, conf); 5168 } 5169 5170 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) 5171 { 5172 struct r5conf *conf = mddev->private; 5173 int dd_idx; 5174 struct bio* align_bi; 5175 struct md_rdev *rdev; 5176 sector_t end_sector; 5177 5178 if (!in_chunk_boundary(mddev, raid_bio)) { 5179 pr_debug("%s: non aligned\n", __func__); 5180 return 0; 5181 } 5182 /* 5183 * use bio_clone_fast to make a copy of the bio 5184 */ 5185 align_bi = bio_clone_fast(raid_bio, GFP_NOIO, mddev->bio_set); 5186 if (!align_bi) 5187 return 0; 5188 /* 5189 * set bi_end_io to a new function, and set bi_private to the 5190 * original bio. 5191 */ 5192 align_bi->bi_end_io = raid5_align_endio; 5193 align_bi->bi_private = raid_bio; 5194 /* 5195 * compute position 5196 */ 5197 align_bi->bi_iter.bi_sector = 5198 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 5199 0, &dd_idx, NULL); 5200 5201 end_sector = bio_end_sector(align_bi); 5202 rcu_read_lock(); 5203 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 5204 if (!rdev || test_bit(Faulty, &rdev->flags) || 5205 rdev->recovery_offset < end_sector) { 5206 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 5207 if (rdev && 5208 (test_bit(Faulty, &rdev->flags) || 5209 !(test_bit(In_sync, &rdev->flags) || 5210 rdev->recovery_offset >= end_sector))) 5211 rdev = NULL; 5212 } 5213 5214 if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { 5215 rcu_read_unlock(); 5216 bio_put(align_bi); 5217 return 0; 5218 } 5219 5220 if (rdev) { 5221 sector_t first_bad; 5222 int bad_sectors; 5223 5224 atomic_inc(&rdev->nr_pending); 5225 rcu_read_unlock(); 5226 raid_bio->bi_next = (void*)rdev; 5227 align_bi->bi_bdev = rdev->bdev; 5228 bio_clear_flag(align_bi, BIO_SEG_VALID); 5229 5230 if (is_badblock(rdev, align_bi->bi_iter.bi_sector, 5231 bio_sectors(align_bi), 5232 &first_bad, &bad_sectors)) { 5233 bio_put(align_bi); 5234 rdev_dec_pending(rdev, mddev); 5235 return 0; 5236 } 5237 5238 /* No reshape active, so we can trust rdev->data_offset */ 5239 align_bi->bi_iter.bi_sector += rdev->data_offset; 5240 5241 spin_lock_irq(&conf->device_lock); 5242 wait_event_lock_irq(conf->wait_for_quiescent, 5243 conf->quiesce == 0, 5244 conf->device_lock); 5245 atomic_inc(&conf->active_aligned_reads); 5246 spin_unlock_irq(&conf->device_lock); 5247 5248 if (mddev->gendisk) 5249 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 5250 align_bi, disk_devt(mddev->gendisk), 5251 raid_bio->bi_iter.bi_sector); 5252 generic_make_request(align_bi); 5253 return 1; 5254 } else { 5255 rcu_read_unlock(); 5256 bio_put(align_bi); 5257 return 0; 5258 } 5259 } 5260 5261 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) 5262 { 5263 struct bio *split; 5264 sector_t sector = raid_bio->bi_iter.bi_sector; 5265 unsigned chunk_sects = mddev->chunk_sectors; 5266 unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); 5267 5268 if (sectors < bio_sectors(raid_bio)) { 5269 struct r5conf *conf = mddev->private; 5270 split = bio_split(raid_bio, sectors, GFP_NOIO, conf->bio_split); 5271 bio_chain(split, raid_bio); 5272 generic_make_request(raid_bio); 5273 raid_bio = split; 5274 } 5275 5276 if (!raid5_read_one_chunk(mddev, raid_bio)) 5277 return raid_bio; 5278 5279 return NULL; 5280 } 5281 5282 /* __get_priority_stripe - get the next stripe to process 5283 * 5284 * Full stripe writes are allowed to pass preread active stripes up until 5285 * the bypass_threshold is exceeded. In general the bypass_count 5286 * increments when the handle_list is handled before the hold_list; however, it 5287 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 5288 * stripe with in flight i/o. The bypass_count will be reset when the 5289 * head of the hold_list has changed, i.e. the head was promoted to the 5290 * handle_list. 5291 */ 5292 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) 5293 { 5294 struct stripe_head *sh, *tmp; 5295 struct list_head *handle_list = NULL; 5296 struct r5worker_group *wg; 5297 bool second_try = !r5c_is_writeback(conf->log) && 5298 !r5l_log_disk_error(conf); 5299 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || 5300 r5l_log_disk_error(conf); 5301 5302 again: 5303 wg = NULL; 5304 sh = NULL; 5305 if (conf->worker_cnt_per_group == 0) { 5306 handle_list = try_loprio ? &conf->loprio_list : 5307 &conf->handle_list; 5308 } else if (group != ANY_GROUP) { 5309 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : 5310 &conf->worker_groups[group].handle_list; 5311 wg = &conf->worker_groups[group]; 5312 } else { 5313 int i; 5314 for (i = 0; i < conf->group_cnt; i++) { 5315 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : 5316 &conf->worker_groups[i].handle_list; 5317 wg = &conf->worker_groups[i]; 5318 if (!list_empty(handle_list)) 5319 break; 5320 } 5321 } 5322 5323 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 5324 __func__, 5325 list_empty(handle_list) ? "empty" : "busy", 5326 list_empty(&conf->hold_list) ? "empty" : "busy", 5327 atomic_read(&conf->pending_full_writes), conf->bypass_count); 5328 5329 if (!list_empty(handle_list)) { 5330 sh = list_entry(handle_list->next, typeof(*sh), lru); 5331 5332 if (list_empty(&conf->hold_list)) 5333 conf->bypass_count = 0; 5334 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 5335 if (conf->hold_list.next == conf->last_hold) 5336 conf->bypass_count++; 5337 else { 5338 conf->last_hold = conf->hold_list.next; 5339 conf->bypass_count -= conf->bypass_threshold; 5340 if (conf->bypass_count < 0) 5341 conf->bypass_count = 0; 5342 } 5343 } 5344 } else if (!list_empty(&conf->hold_list) && 5345 ((conf->bypass_threshold && 5346 conf->bypass_count > conf->bypass_threshold) || 5347 atomic_read(&conf->pending_full_writes) == 0)) { 5348 5349 list_for_each_entry(tmp, &conf->hold_list, lru) { 5350 if (conf->worker_cnt_per_group == 0 || 5351 group == ANY_GROUP || 5352 !cpu_online(tmp->cpu) || 5353 cpu_to_group(tmp->cpu) == group) { 5354 sh = tmp; 5355 break; 5356 } 5357 } 5358 5359 if (sh) { 5360 conf->bypass_count -= conf->bypass_threshold; 5361 if (conf->bypass_count < 0) 5362 conf->bypass_count = 0; 5363 } 5364 wg = NULL; 5365 } 5366 5367 if (!sh) { 5368 if (second_try) 5369 return NULL; 5370 second_try = true; 5371 try_loprio = !try_loprio; 5372 goto again; 5373 } 5374 5375 if (wg) { 5376 wg->stripes_cnt--; 5377 sh->group = NULL; 5378 } 5379 list_del_init(&sh->lru); 5380 BUG_ON(atomic_inc_return(&sh->count) != 1); 5381 return sh; 5382 } 5383 5384 struct raid5_plug_cb { 5385 struct blk_plug_cb cb; 5386 struct list_head list; 5387 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; 5388 }; 5389 5390 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 5391 { 5392 struct raid5_plug_cb *cb = container_of( 5393 blk_cb, struct raid5_plug_cb, cb); 5394 struct stripe_head *sh; 5395 struct mddev *mddev = cb->cb.data; 5396 struct r5conf *conf = mddev->private; 5397 int cnt = 0; 5398 int hash; 5399 5400 if (cb->list.next && !list_empty(&cb->list)) { 5401 spin_lock_irq(&conf->device_lock); 5402 while (!list_empty(&cb->list)) { 5403 sh = list_first_entry(&cb->list, struct stripe_head, lru); 5404 list_del_init(&sh->lru); 5405 /* 5406 * avoid race release_stripe_plug() sees 5407 * STRIPE_ON_UNPLUG_LIST clear but the stripe 5408 * is still in our list 5409 */ 5410 smp_mb__before_atomic(); 5411 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 5412 /* 5413 * STRIPE_ON_RELEASE_LIST could be set here. In that 5414 * case, the count is always > 1 here 5415 */ 5416 hash = sh->hash_lock_index; 5417 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); 5418 cnt++; 5419 } 5420 spin_unlock_irq(&conf->device_lock); 5421 } 5422 release_inactive_stripe_list(conf, cb->temp_inactive_list, 5423 NR_STRIPE_HASH_LOCKS); 5424 if (mddev->queue) 5425 trace_block_unplug(mddev->queue, cnt, !from_schedule); 5426 kfree(cb); 5427 } 5428 5429 static void release_stripe_plug(struct mddev *mddev, 5430 struct stripe_head *sh) 5431 { 5432 struct blk_plug_cb *blk_cb = blk_check_plugged( 5433 raid5_unplug, mddev, 5434 sizeof(struct raid5_plug_cb)); 5435 struct raid5_plug_cb *cb; 5436 5437 if (!blk_cb) { 5438 raid5_release_stripe(sh); 5439 return; 5440 } 5441 5442 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 5443 5444 if (cb->list.next == NULL) { 5445 int i; 5446 INIT_LIST_HEAD(&cb->list); 5447 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5448 INIT_LIST_HEAD(cb->temp_inactive_list + i); 5449 } 5450 5451 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 5452 list_add_tail(&sh->lru, &cb->list); 5453 else 5454 raid5_release_stripe(sh); 5455 } 5456 5457 static void make_discard_request(struct mddev *mddev, struct bio *bi) 5458 { 5459 struct r5conf *conf = mddev->private; 5460 sector_t logical_sector, last_sector; 5461 struct stripe_head *sh; 5462 int stripe_sectors; 5463 5464 if (mddev->reshape_position != MaxSector) 5465 /* Skip discard while reshape is happening */ 5466 return; 5467 5468 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5469 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); 5470 5471 bi->bi_next = NULL; 5472 md_write_start(mddev, bi); 5473 5474 stripe_sectors = conf->chunk_sectors * 5475 (conf->raid_disks - conf->max_degraded); 5476 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, 5477 stripe_sectors); 5478 sector_div(last_sector, stripe_sectors); 5479 5480 logical_sector *= conf->chunk_sectors; 5481 last_sector *= conf->chunk_sectors; 5482 5483 for (; logical_sector < last_sector; 5484 logical_sector += STRIPE_SECTORS) { 5485 DEFINE_WAIT(w); 5486 int d; 5487 again: 5488 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); 5489 prepare_to_wait(&conf->wait_for_overlap, &w, 5490 TASK_UNINTERRUPTIBLE); 5491 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5492 if (test_bit(STRIPE_SYNCING, &sh->state)) { 5493 raid5_release_stripe(sh); 5494 schedule(); 5495 goto again; 5496 } 5497 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5498 spin_lock_irq(&sh->stripe_lock); 5499 for (d = 0; d < conf->raid_disks; d++) { 5500 if (d == sh->pd_idx || d == sh->qd_idx) 5501 continue; 5502 if (sh->dev[d].towrite || sh->dev[d].toread) { 5503 set_bit(R5_Overlap, &sh->dev[d].flags); 5504 spin_unlock_irq(&sh->stripe_lock); 5505 raid5_release_stripe(sh); 5506 schedule(); 5507 goto again; 5508 } 5509 } 5510 set_bit(STRIPE_DISCARD, &sh->state); 5511 finish_wait(&conf->wait_for_overlap, &w); 5512 sh->overwrite_disks = 0; 5513 for (d = 0; d < conf->raid_disks; d++) { 5514 if (d == sh->pd_idx || d == sh->qd_idx) 5515 continue; 5516 sh->dev[d].towrite = bi; 5517 set_bit(R5_OVERWRITE, &sh->dev[d].flags); 5518 bio_inc_remaining(bi); 5519 md_write_inc(mddev, bi); 5520 sh->overwrite_disks++; 5521 } 5522 spin_unlock_irq(&sh->stripe_lock); 5523 if (conf->mddev->bitmap) { 5524 for (d = 0; 5525 d < conf->raid_disks - conf->max_degraded; 5526 d++) 5527 bitmap_startwrite(mddev->bitmap, 5528 sh->sector, 5529 STRIPE_SECTORS, 5530 0); 5531 sh->bm_seq = conf->seq_flush + 1; 5532 set_bit(STRIPE_BIT_DELAY, &sh->state); 5533 } 5534 5535 set_bit(STRIPE_HANDLE, &sh->state); 5536 clear_bit(STRIPE_DELAYED, &sh->state); 5537 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5538 atomic_inc(&conf->preread_active_stripes); 5539 release_stripe_plug(mddev, sh); 5540 } 5541 5542 md_write_end(mddev); 5543 bio_endio(bi); 5544 } 5545 5546 static void raid5_make_request(struct mddev *mddev, struct bio * bi) 5547 { 5548 struct r5conf *conf = mddev->private; 5549 int dd_idx; 5550 sector_t new_sector; 5551 sector_t logical_sector, last_sector; 5552 struct stripe_head *sh; 5553 const int rw = bio_data_dir(bi); 5554 DEFINE_WAIT(w); 5555 bool do_prepare; 5556 bool do_flush = false; 5557 5558 if (unlikely(bi->bi_opf & REQ_PREFLUSH)) { 5559 int ret = r5l_handle_flush_request(conf->log, bi); 5560 5561 if (ret == 0) 5562 return; 5563 if (ret == -ENODEV) { 5564 md_flush_request(mddev, bi); 5565 return; 5566 } 5567 /* ret == -EAGAIN, fallback */ 5568 /* 5569 * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH, 5570 * we need to flush journal device 5571 */ 5572 do_flush = bi->bi_opf & REQ_PREFLUSH; 5573 } 5574 5575 /* 5576 * If array is degraded, better not do chunk aligned read because 5577 * later we might have to read it again in order to reconstruct 5578 * data on failed drives. 5579 */ 5580 if (rw == READ && mddev->degraded == 0 && 5581 mddev->reshape_position == MaxSector) { 5582 bi = chunk_aligned_read(mddev, bi); 5583 if (!bi) 5584 return; 5585 } 5586 5587 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { 5588 make_discard_request(mddev, bi); 5589 return; 5590 } 5591 5592 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5593 last_sector = bio_end_sector(bi); 5594 bi->bi_next = NULL; 5595 md_write_start(mddev, bi); 5596 5597 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 5598 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 5599 int previous; 5600 int seq; 5601 5602 do_prepare = false; 5603 retry: 5604 seq = read_seqcount_begin(&conf->gen_lock); 5605 previous = 0; 5606 if (do_prepare) 5607 prepare_to_wait(&conf->wait_for_overlap, &w, 5608 TASK_UNINTERRUPTIBLE); 5609 if (unlikely(conf->reshape_progress != MaxSector)) { 5610 /* spinlock is needed as reshape_progress may be 5611 * 64bit on a 32bit platform, and so it might be 5612 * possible to see a half-updated value 5613 * Of course reshape_progress could change after 5614 * the lock is dropped, so once we get a reference 5615 * to the stripe that we think it is, we will have 5616 * to check again. 5617 */ 5618 spin_lock_irq(&conf->device_lock); 5619 if (mddev->reshape_backwards 5620 ? logical_sector < conf->reshape_progress 5621 : logical_sector >= conf->reshape_progress) { 5622 previous = 1; 5623 } else { 5624 if (mddev->reshape_backwards 5625 ? logical_sector < conf->reshape_safe 5626 : logical_sector >= conf->reshape_safe) { 5627 spin_unlock_irq(&conf->device_lock); 5628 schedule(); 5629 do_prepare = true; 5630 goto retry; 5631 } 5632 } 5633 spin_unlock_irq(&conf->device_lock); 5634 } 5635 5636 new_sector = raid5_compute_sector(conf, logical_sector, 5637 previous, 5638 &dd_idx, NULL); 5639 pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n", 5640 (unsigned long long)new_sector, 5641 (unsigned long long)logical_sector); 5642 5643 sh = raid5_get_active_stripe(conf, new_sector, previous, 5644 (bi->bi_opf & REQ_RAHEAD), 0); 5645 if (sh) { 5646 if (unlikely(previous)) { 5647 /* expansion might have moved on while waiting for a 5648 * stripe, so we must do the range check again. 5649 * Expansion could still move past after this 5650 * test, but as we are holding a reference to 5651 * 'sh', we know that if that happens, 5652 * STRIPE_EXPANDING will get set and the expansion 5653 * won't proceed until we finish with the stripe. 5654 */ 5655 int must_retry = 0; 5656 spin_lock_irq(&conf->device_lock); 5657 if (mddev->reshape_backwards 5658 ? logical_sector >= conf->reshape_progress 5659 : logical_sector < conf->reshape_progress) 5660 /* mismatch, need to try again */ 5661 must_retry = 1; 5662 spin_unlock_irq(&conf->device_lock); 5663 if (must_retry) { 5664 raid5_release_stripe(sh); 5665 schedule(); 5666 do_prepare = true; 5667 goto retry; 5668 } 5669 } 5670 if (read_seqcount_retry(&conf->gen_lock, seq)) { 5671 /* Might have got the wrong stripe_head 5672 * by accident 5673 */ 5674 raid5_release_stripe(sh); 5675 goto retry; 5676 } 5677 5678 if (rw == WRITE && 5679 logical_sector >= mddev->suspend_lo && 5680 logical_sector < mddev->suspend_hi) { 5681 raid5_release_stripe(sh); 5682 /* As the suspend_* range is controlled by 5683 * userspace, we want an interruptible 5684 * wait. 5685 */ 5686 flush_signals(current); 5687 prepare_to_wait(&conf->wait_for_overlap, 5688 &w, TASK_INTERRUPTIBLE); 5689 if (logical_sector >= mddev->suspend_lo && 5690 logical_sector < mddev->suspend_hi) { 5691 schedule(); 5692 do_prepare = true; 5693 } 5694 goto retry; 5695 } 5696 5697 if (test_bit(STRIPE_EXPANDING, &sh->state) || 5698 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { 5699 /* Stripe is busy expanding or 5700 * add failed due to overlap. Flush everything 5701 * and wait a while 5702 */ 5703 md_wakeup_thread(mddev->thread); 5704 raid5_release_stripe(sh); 5705 schedule(); 5706 do_prepare = true; 5707 goto retry; 5708 } 5709 if (do_flush) { 5710 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); 5711 /* we only need flush for one stripe */ 5712 do_flush = false; 5713 } 5714 5715 set_bit(STRIPE_HANDLE, &sh->state); 5716 clear_bit(STRIPE_DELAYED, &sh->state); 5717 if ((!sh->batch_head || sh == sh->batch_head) && 5718 (bi->bi_opf & REQ_SYNC) && 5719 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5720 atomic_inc(&conf->preread_active_stripes); 5721 release_stripe_plug(mddev, sh); 5722 } else { 5723 /* cannot get stripe for read-ahead, just give-up */ 5724 bi->bi_error = -EIO; 5725 break; 5726 } 5727 } 5728 finish_wait(&conf->wait_for_overlap, &w); 5729 5730 if (rw == WRITE) 5731 md_write_end(mddev); 5732 bio_endio(bi); 5733 } 5734 5735 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 5736 5737 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5738 { 5739 /* reshaping is quite different to recovery/resync so it is 5740 * handled quite separately ... here. 5741 * 5742 * On each call to sync_request, we gather one chunk worth of 5743 * destination stripes and flag them as expanding. 5744 * Then we find all the source stripes and request reads. 5745 * As the reads complete, handle_stripe will copy the data 5746 * into the destination stripe and release that stripe. 5747 */ 5748 struct r5conf *conf = mddev->private; 5749 struct stripe_head *sh; 5750 sector_t first_sector, last_sector; 5751 int raid_disks = conf->previous_raid_disks; 5752 int data_disks = raid_disks - conf->max_degraded; 5753 int new_data_disks = conf->raid_disks - conf->max_degraded; 5754 int i; 5755 int dd_idx; 5756 sector_t writepos, readpos, safepos; 5757 sector_t stripe_addr; 5758 int reshape_sectors; 5759 struct list_head stripes; 5760 sector_t retn; 5761 5762 if (sector_nr == 0) { 5763 /* If restarting in the middle, skip the initial sectors */ 5764 if (mddev->reshape_backwards && 5765 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 5766 sector_nr = raid5_size(mddev, 0, 0) 5767 - conf->reshape_progress; 5768 } else if (mddev->reshape_backwards && 5769 conf->reshape_progress == MaxSector) { 5770 /* shouldn't happen, but just in case, finish up.*/ 5771 sector_nr = MaxSector; 5772 } else if (!mddev->reshape_backwards && 5773 conf->reshape_progress > 0) 5774 sector_nr = conf->reshape_progress; 5775 sector_div(sector_nr, new_data_disks); 5776 if (sector_nr) { 5777 mddev->curr_resync_completed = sector_nr; 5778 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5779 *skipped = 1; 5780 retn = sector_nr; 5781 goto finish; 5782 } 5783 } 5784 5785 /* We need to process a full chunk at a time. 5786 * If old and new chunk sizes differ, we need to process the 5787 * largest of these 5788 */ 5789 5790 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); 5791 5792 /* We update the metadata at least every 10 seconds, or when 5793 * the data about to be copied would over-write the source of 5794 * the data at the front of the range. i.e. one new_stripe 5795 * along from reshape_progress new_maps to after where 5796 * reshape_safe old_maps to 5797 */ 5798 writepos = conf->reshape_progress; 5799 sector_div(writepos, new_data_disks); 5800 readpos = conf->reshape_progress; 5801 sector_div(readpos, data_disks); 5802 safepos = conf->reshape_safe; 5803 sector_div(safepos, data_disks); 5804 if (mddev->reshape_backwards) { 5805 BUG_ON(writepos < reshape_sectors); 5806 writepos -= reshape_sectors; 5807 readpos += reshape_sectors; 5808 safepos += reshape_sectors; 5809 } else { 5810 writepos += reshape_sectors; 5811 /* readpos and safepos are worst-case calculations. 5812 * A negative number is overly pessimistic, and causes 5813 * obvious problems for unsigned storage. So clip to 0. 5814 */ 5815 readpos -= min_t(sector_t, reshape_sectors, readpos); 5816 safepos -= min_t(sector_t, reshape_sectors, safepos); 5817 } 5818 5819 /* Having calculated the 'writepos' possibly use it 5820 * to set 'stripe_addr' which is where we will write to. 5821 */ 5822 if (mddev->reshape_backwards) { 5823 BUG_ON(conf->reshape_progress == 0); 5824 stripe_addr = writepos; 5825 BUG_ON((mddev->dev_sectors & 5826 ~((sector_t)reshape_sectors - 1)) 5827 - reshape_sectors - stripe_addr 5828 != sector_nr); 5829 } else { 5830 BUG_ON(writepos != sector_nr + reshape_sectors); 5831 stripe_addr = sector_nr; 5832 } 5833 5834 /* 'writepos' is the most advanced device address we might write. 5835 * 'readpos' is the least advanced device address we might read. 5836 * 'safepos' is the least address recorded in the metadata as having 5837 * been reshaped. 5838 * If there is a min_offset_diff, these are adjusted either by 5839 * increasing the safepos/readpos if diff is negative, or 5840 * increasing writepos if diff is positive. 5841 * If 'readpos' is then behind 'writepos', there is no way that we can 5842 * ensure safety in the face of a crash - that must be done by userspace 5843 * making a backup of the data. So in that case there is no particular 5844 * rush to update metadata. 5845 * Otherwise if 'safepos' is behind 'writepos', then we really need to 5846 * update the metadata to advance 'safepos' to match 'readpos' so that 5847 * we can be safe in the event of a crash. 5848 * So we insist on updating metadata if safepos is behind writepos and 5849 * readpos is beyond writepos. 5850 * In any case, update the metadata every 10 seconds. 5851 * Maybe that number should be configurable, but I'm not sure it is 5852 * worth it.... maybe it could be a multiple of safemode_delay??? 5853 */ 5854 if (conf->min_offset_diff < 0) { 5855 safepos += -conf->min_offset_diff; 5856 readpos += -conf->min_offset_diff; 5857 } else 5858 writepos += conf->min_offset_diff; 5859 5860 if ((mddev->reshape_backwards 5861 ? (safepos > writepos && readpos < writepos) 5862 : (safepos < writepos && readpos > writepos)) || 5863 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 5864 /* Cannot proceed until we've updated the superblock... */ 5865 wait_event(conf->wait_for_overlap, 5866 atomic_read(&conf->reshape_stripes)==0 5867 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5868 if (atomic_read(&conf->reshape_stripes) != 0) 5869 return 0; 5870 mddev->reshape_position = conf->reshape_progress; 5871 mddev->curr_resync_completed = sector_nr; 5872 conf->reshape_checkpoint = jiffies; 5873 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 5874 md_wakeup_thread(mddev->thread); 5875 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || 5876 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5877 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5878 return 0; 5879 spin_lock_irq(&conf->device_lock); 5880 conf->reshape_safe = mddev->reshape_position; 5881 spin_unlock_irq(&conf->device_lock); 5882 wake_up(&conf->wait_for_overlap); 5883 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5884 } 5885 5886 INIT_LIST_HEAD(&stripes); 5887 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 5888 int j; 5889 int skipped_disk = 0; 5890 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 5891 set_bit(STRIPE_EXPANDING, &sh->state); 5892 atomic_inc(&conf->reshape_stripes); 5893 /* If any of this stripe is beyond the end of the old 5894 * array, then we need to zero those blocks 5895 */ 5896 for (j=sh->disks; j--;) { 5897 sector_t s; 5898 if (j == sh->pd_idx) 5899 continue; 5900 if (conf->level == 6 && 5901 j == sh->qd_idx) 5902 continue; 5903 s = raid5_compute_blocknr(sh, j, 0); 5904 if (s < raid5_size(mddev, 0, 0)) { 5905 skipped_disk = 1; 5906 continue; 5907 } 5908 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 5909 set_bit(R5_Expanded, &sh->dev[j].flags); 5910 set_bit(R5_UPTODATE, &sh->dev[j].flags); 5911 } 5912 if (!skipped_disk) { 5913 set_bit(STRIPE_EXPAND_READY, &sh->state); 5914 set_bit(STRIPE_HANDLE, &sh->state); 5915 } 5916 list_add(&sh->lru, &stripes); 5917 } 5918 spin_lock_irq(&conf->device_lock); 5919 if (mddev->reshape_backwards) 5920 conf->reshape_progress -= reshape_sectors * new_data_disks; 5921 else 5922 conf->reshape_progress += reshape_sectors * new_data_disks; 5923 spin_unlock_irq(&conf->device_lock); 5924 /* Ok, those stripe are ready. We can start scheduling 5925 * reads on the source stripes. 5926 * The source stripes are determined by mapping the first and last 5927 * block on the destination stripes. 5928 */ 5929 first_sector = 5930 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 5931 1, &dd_idx, NULL); 5932 last_sector = 5933 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 5934 * new_data_disks - 1), 5935 1, &dd_idx, NULL); 5936 if (last_sector >= mddev->dev_sectors) 5937 last_sector = mddev->dev_sectors - 1; 5938 while (first_sector <= last_sector) { 5939 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); 5940 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 5941 set_bit(STRIPE_HANDLE, &sh->state); 5942 raid5_release_stripe(sh); 5943 first_sector += STRIPE_SECTORS; 5944 } 5945 /* Now that the sources are clearly marked, we can release 5946 * the destination stripes 5947 */ 5948 while (!list_empty(&stripes)) { 5949 sh = list_entry(stripes.next, struct stripe_head, lru); 5950 list_del_init(&sh->lru); 5951 raid5_release_stripe(sh); 5952 } 5953 /* If this takes us to the resync_max point where we have to pause, 5954 * then we need to write out the superblock. 5955 */ 5956 sector_nr += reshape_sectors; 5957 retn = reshape_sectors; 5958 finish: 5959 if (mddev->curr_resync_completed > mddev->resync_max || 5960 (sector_nr - mddev->curr_resync_completed) * 2 5961 >= mddev->resync_max - mddev->curr_resync_completed) { 5962 /* Cannot proceed until we've updated the superblock... */ 5963 wait_event(conf->wait_for_overlap, 5964 atomic_read(&conf->reshape_stripes) == 0 5965 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5966 if (atomic_read(&conf->reshape_stripes) != 0) 5967 goto ret; 5968 mddev->reshape_position = conf->reshape_progress; 5969 mddev->curr_resync_completed = sector_nr; 5970 conf->reshape_checkpoint = jiffies; 5971 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 5972 md_wakeup_thread(mddev->thread); 5973 wait_event(mddev->sb_wait, 5974 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) 5975 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5976 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5977 goto ret; 5978 spin_lock_irq(&conf->device_lock); 5979 conf->reshape_safe = mddev->reshape_position; 5980 spin_unlock_irq(&conf->device_lock); 5981 wake_up(&conf->wait_for_overlap); 5982 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5983 } 5984 ret: 5985 return retn; 5986 } 5987 5988 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, 5989 int *skipped) 5990 { 5991 struct r5conf *conf = mddev->private; 5992 struct stripe_head *sh; 5993 sector_t max_sector = mddev->dev_sectors; 5994 sector_t sync_blocks; 5995 int still_degraded = 0; 5996 int i; 5997 5998 if (sector_nr >= max_sector) { 5999 /* just being told to finish up .. nothing much to do */ 6000 6001 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 6002 end_reshape(conf); 6003 return 0; 6004 } 6005 6006 if (mddev->curr_resync < max_sector) /* aborted */ 6007 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 6008 &sync_blocks, 1); 6009 else /* completed sync */ 6010 conf->fullsync = 0; 6011 bitmap_close_sync(mddev->bitmap); 6012 6013 return 0; 6014 } 6015 6016 /* Allow raid5_quiesce to complete */ 6017 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 6018 6019 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6020 return reshape_request(mddev, sector_nr, skipped); 6021 6022 /* No need to check resync_max as we never do more than one 6023 * stripe, and as resync_max will always be on a chunk boundary, 6024 * if the check in md_do_sync didn't fire, there is no chance 6025 * of overstepping resync_max here 6026 */ 6027 6028 /* if there is too many failed drives and we are trying 6029 * to resync, then assert that we are finished, because there is 6030 * nothing we can do. 6031 */ 6032 if (mddev->degraded >= conf->max_degraded && 6033 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6034 sector_t rv = mddev->dev_sectors - sector_nr; 6035 *skipped = 1; 6036 return rv; 6037 } 6038 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 6039 !conf->fullsync && 6040 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 6041 sync_blocks >= STRIPE_SECTORS) { 6042 /* we can skip this block, and probably more */ 6043 sync_blocks /= STRIPE_SECTORS; 6044 *skipped = 1; 6045 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 6046 } 6047 6048 bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); 6049 6050 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); 6051 if (sh == NULL) { 6052 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); 6053 /* make sure we don't swamp the stripe cache if someone else 6054 * is trying to get access 6055 */ 6056 schedule_timeout_uninterruptible(1); 6057 } 6058 /* Need to check if array will still be degraded after recovery/resync 6059 * Note in case of > 1 drive failures it's possible we're rebuilding 6060 * one drive while leaving another faulty drive in array. 6061 */ 6062 rcu_read_lock(); 6063 for (i = 0; i < conf->raid_disks; i++) { 6064 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); 6065 6066 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) 6067 still_degraded = 1; 6068 } 6069 rcu_read_unlock(); 6070 6071 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 6072 6073 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 6074 set_bit(STRIPE_HANDLE, &sh->state); 6075 6076 raid5_release_stripe(sh); 6077 6078 return STRIPE_SECTORS; 6079 } 6080 6081 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, 6082 unsigned int offset) 6083 { 6084 /* We may not be able to submit a whole bio at once as there 6085 * may not be enough stripe_heads available. 6086 * We cannot pre-allocate enough stripe_heads as we may need 6087 * more than exist in the cache (if we allow ever large chunks). 6088 * So we do one stripe head at a time and record in 6089 * ->bi_hw_segments how many have been done. 6090 * 6091 * We *know* that this entire raid_bio is in one chunk, so 6092 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 6093 */ 6094 struct stripe_head *sh; 6095 int dd_idx; 6096 sector_t sector, logical_sector, last_sector; 6097 int scnt = 0; 6098 int handled = 0; 6099 6100 logical_sector = raid_bio->bi_iter.bi_sector & 6101 ~((sector_t)STRIPE_SECTORS-1); 6102 sector = raid5_compute_sector(conf, logical_sector, 6103 0, &dd_idx, NULL); 6104 last_sector = bio_end_sector(raid_bio); 6105 6106 for (; logical_sector < last_sector; 6107 logical_sector += STRIPE_SECTORS, 6108 sector += STRIPE_SECTORS, 6109 scnt++) { 6110 6111 if (scnt < offset) 6112 /* already done this stripe */ 6113 continue; 6114 6115 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); 6116 6117 if (!sh) { 6118 /* failed to get a stripe - must wait */ 6119 conf->retry_read_aligned = raid_bio; 6120 conf->retry_read_offset = scnt; 6121 return handled; 6122 } 6123 6124 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 6125 raid5_release_stripe(sh); 6126 conf->retry_read_aligned = raid_bio; 6127 conf->retry_read_offset = scnt; 6128 return handled; 6129 } 6130 6131 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 6132 handle_stripe(sh); 6133 raid5_release_stripe(sh); 6134 handled++; 6135 } 6136 6137 bio_endio(raid_bio); 6138 6139 if (atomic_dec_and_test(&conf->active_aligned_reads)) 6140 wake_up(&conf->wait_for_quiescent); 6141 return handled; 6142 } 6143 6144 static int handle_active_stripes(struct r5conf *conf, int group, 6145 struct r5worker *worker, 6146 struct list_head *temp_inactive_list) 6147 { 6148 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 6149 int i, batch_size = 0, hash; 6150 bool release_inactive = false; 6151 6152 while (batch_size < MAX_STRIPE_BATCH && 6153 (sh = __get_priority_stripe(conf, group)) != NULL) 6154 batch[batch_size++] = sh; 6155 6156 if (batch_size == 0) { 6157 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6158 if (!list_empty(temp_inactive_list + i)) 6159 break; 6160 if (i == NR_STRIPE_HASH_LOCKS) { 6161 spin_unlock_irq(&conf->device_lock); 6162 r5l_flush_stripe_to_raid(conf->log); 6163 spin_lock_irq(&conf->device_lock); 6164 return batch_size; 6165 } 6166 release_inactive = true; 6167 } 6168 spin_unlock_irq(&conf->device_lock); 6169 6170 release_inactive_stripe_list(conf, temp_inactive_list, 6171 NR_STRIPE_HASH_LOCKS); 6172 6173 r5l_flush_stripe_to_raid(conf->log); 6174 if (release_inactive) { 6175 spin_lock_irq(&conf->device_lock); 6176 return 0; 6177 } 6178 6179 for (i = 0; i < batch_size; i++) 6180 handle_stripe(batch[i]); 6181 log_write_stripe_run(conf); 6182 6183 cond_resched(); 6184 6185 spin_lock_irq(&conf->device_lock); 6186 for (i = 0; i < batch_size; i++) { 6187 hash = batch[i]->hash_lock_index; 6188 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); 6189 } 6190 return batch_size; 6191 } 6192 6193 static void raid5_do_work(struct work_struct *work) 6194 { 6195 struct r5worker *worker = container_of(work, struct r5worker, work); 6196 struct r5worker_group *group = worker->group; 6197 struct r5conf *conf = group->conf; 6198 struct mddev *mddev = conf->mddev; 6199 int group_id = group - conf->worker_groups; 6200 int handled; 6201 struct blk_plug plug; 6202 6203 pr_debug("+++ raid5worker active\n"); 6204 6205 blk_start_plug(&plug); 6206 handled = 0; 6207 spin_lock_irq(&conf->device_lock); 6208 while (1) { 6209 int batch_size, released; 6210 6211 released = release_stripe_list(conf, worker->temp_inactive_list); 6212 6213 batch_size = handle_active_stripes(conf, group_id, worker, 6214 worker->temp_inactive_list); 6215 worker->working = false; 6216 if (!batch_size && !released) 6217 break; 6218 handled += batch_size; 6219 wait_event_lock_irq(mddev->sb_wait, 6220 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), 6221 conf->device_lock); 6222 } 6223 pr_debug("%d stripes handled\n", handled); 6224 6225 spin_unlock_irq(&conf->device_lock); 6226 blk_finish_plug(&plug); 6227 6228 pr_debug("--- raid5worker inactive\n"); 6229 } 6230 6231 /* 6232 * This is our raid5 kernel thread. 6233 * 6234 * We scan the hash table for stripes which can be handled now. 6235 * During the scan, completed stripes are saved for us by the interrupt 6236 * handler, so that they will not have to wait for our next wakeup. 6237 */ 6238 static void raid5d(struct md_thread *thread) 6239 { 6240 struct mddev *mddev = thread->mddev; 6241 struct r5conf *conf = mddev->private; 6242 int handled; 6243 struct blk_plug plug; 6244 6245 pr_debug("+++ raid5d active\n"); 6246 6247 md_check_recovery(mddev); 6248 6249 blk_start_plug(&plug); 6250 handled = 0; 6251 spin_lock_irq(&conf->device_lock); 6252 while (1) { 6253 struct bio *bio; 6254 int batch_size, released; 6255 unsigned int offset; 6256 6257 released = release_stripe_list(conf, conf->temp_inactive_list); 6258 if (released) 6259 clear_bit(R5_DID_ALLOC, &conf->cache_state); 6260 6261 if ( 6262 !list_empty(&conf->bitmap_list)) { 6263 /* Now is a good time to flush some bitmap updates */ 6264 conf->seq_flush++; 6265 spin_unlock_irq(&conf->device_lock); 6266 bitmap_unplug(mddev->bitmap); 6267 spin_lock_irq(&conf->device_lock); 6268 conf->seq_write = conf->seq_flush; 6269 activate_bit_delay(conf, conf->temp_inactive_list); 6270 } 6271 raid5_activate_delayed(conf); 6272 6273 while ((bio = remove_bio_from_retry(conf, &offset))) { 6274 int ok; 6275 spin_unlock_irq(&conf->device_lock); 6276 ok = retry_aligned_read(conf, bio, offset); 6277 spin_lock_irq(&conf->device_lock); 6278 if (!ok) 6279 break; 6280 handled++; 6281 } 6282 6283 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, 6284 conf->temp_inactive_list); 6285 if (!batch_size && !released) 6286 break; 6287 handled += batch_size; 6288 6289 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { 6290 spin_unlock_irq(&conf->device_lock); 6291 md_check_recovery(mddev); 6292 spin_lock_irq(&conf->device_lock); 6293 } 6294 } 6295 pr_debug("%d stripes handled\n", handled); 6296 6297 spin_unlock_irq(&conf->device_lock); 6298 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && 6299 mutex_trylock(&conf->cache_size_mutex)) { 6300 grow_one_stripe(conf, __GFP_NOWARN); 6301 /* Set flag even if allocation failed. This helps 6302 * slow down allocation requests when mem is short 6303 */ 6304 set_bit(R5_DID_ALLOC, &conf->cache_state); 6305 mutex_unlock(&conf->cache_size_mutex); 6306 } 6307 6308 flush_deferred_bios(conf); 6309 6310 r5l_flush_stripe_to_raid(conf->log); 6311 6312 async_tx_issue_pending_all(); 6313 blk_finish_plug(&plug); 6314 6315 pr_debug("--- raid5d inactive\n"); 6316 } 6317 6318 static ssize_t 6319 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 6320 { 6321 struct r5conf *conf; 6322 int ret = 0; 6323 spin_lock(&mddev->lock); 6324 conf = mddev->private; 6325 if (conf) 6326 ret = sprintf(page, "%d\n", conf->min_nr_stripes); 6327 spin_unlock(&mddev->lock); 6328 return ret; 6329 } 6330 6331 int 6332 raid5_set_cache_size(struct mddev *mddev, int size) 6333 { 6334 struct r5conf *conf = mddev->private; 6335 6336 if (size <= 16 || size > 32768) 6337 return -EINVAL; 6338 6339 conf->min_nr_stripes = size; 6340 mutex_lock(&conf->cache_size_mutex); 6341 while (size < conf->max_nr_stripes && 6342 drop_one_stripe(conf)) 6343 ; 6344 mutex_unlock(&conf->cache_size_mutex); 6345 6346 md_allow_write(mddev); 6347 6348 mutex_lock(&conf->cache_size_mutex); 6349 while (size > conf->max_nr_stripes) 6350 if (!grow_one_stripe(conf, GFP_KERNEL)) 6351 break; 6352 mutex_unlock(&conf->cache_size_mutex); 6353 6354 return 0; 6355 } 6356 EXPORT_SYMBOL(raid5_set_cache_size); 6357 6358 static ssize_t 6359 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 6360 { 6361 struct r5conf *conf; 6362 unsigned long new; 6363 int err; 6364 6365 if (len >= PAGE_SIZE) 6366 return -EINVAL; 6367 if (kstrtoul(page, 10, &new)) 6368 return -EINVAL; 6369 err = mddev_lock(mddev); 6370 if (err) 6371 return err; 6372 conf = mddev->private; 6373 if (!conf) 6374 err = -ENODEV; 6375 else 6376 err = raid5_set_cache_size(mddev, new); 6377 mddev_unlock(mddev); 6378 6379 return err ?: len; 6380 } 6381 6382 static struct md_sysfs_entry 6383 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 6384 raid5_show_stripe_cache_size, 6385 raid5_store_stripe_cache_size); 6386 6387 static ssize_t 6388 raid5_show_rmw_level(struct mddev *mddev, char *page) 6389 { 6390 struct r5conf *conf = mddev->private; 6391 if (conf) 6392 return sprintf(page, "%d\n", conf->rmw_level); 6393 else 6394 return 0; 6395 } 6396 6397 static ssize_t 6398 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) 6399 { 6400 struct r5conf *conf = mddev->private; 6401 unsigned long new; 6402 6403 if (!conf) 6404 return -ENODEV; 6405 6406 if (len >= PAGE_SIZE) 6407 return -EINVAL; 6408 6409 if (kstrtoul(page, 10, &new)) 6410 return -EINVAL; 6411 6412 if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome) 6413 return -EINVAL; 6414 6415 if (new != PARITY_DISABLE_RMW && 6416 new != PARITY_ENABLE_RMW && 6417 new != PARITY_PREFER_RMW) 6418 return -EINVAL; 6419 6420 conf->rmw_level = new; 6421 return len; 6422 } 6423 6424 static struct md_sysfs_entry 6425 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR, 6426 raid5_show_rmw_level, 6427 raid5_store_rmw_level); 6428 6429 6430 static ssize_t 6431 raid5_show_preread_threshold(struct mddev *mddev, char *page) 6432 { 6433 struct r5conf *conf; 6434 int ret = 0; 6435 spin_lock(&mddev->lock); 6436 conf = mddev->private; 6437 if (conf) 6438 ret = sprintf(page, "%d\n", conf->bypass_threshold); 6439 spin_unlock(&mddev->lock); 6440 return ret; 6441 } 6442 6443 static ssize_t 6444 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 6445 { 6446 struct r5conf *conf; 6447 unsigned long new; 6448 int err; 6449 6450 if (len >= PAGE_SIZE) 6451 return -EINVAL; 6452 if (kstrtoul(page, 10, &new)) 6453 return -EINVAL; 6454 6455 err = mddev_lock(mddev); 6456 if (err) 6457 return err; 6458 conf = mddev->private; 6459 if (!conf) 6460 err = -ENODEV; 6461 else if (new > conf->min_nr_stripes) 6462 err = -EINVAL; 6463 else 6464 conf->bypass_threshold = new; 6465 mddev_unlock(mddev); 6466 return err ?: len; 6467 } 6468 6469 static struct md_sysfs_entry 6470 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 6471 S_IRUGO | S_IWUSR, 6472 raid5_show_preread_threshold, 6473 raid5_store_preread_threshold); 6474 6475 static ssize_t 6476 raid5_show_skip_copy(struct mddev *mddev, char *page) 6477 { 6478 struct r5conf *conf; 6479 int ret = 0; 6480 spin_lock(&mddev->lock); 6481 conf = mddev->private; 6482 if (conf) 6483 ret = sprintf(page, "%d\n", conf->skip_copy); 6484 spin_unlock(&mddev->lock); 6485 return ret; 6486 } 6487 6488 static ssize_t 6489 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) 6490 { 6491 struct r5conf *conf; 6492 unsigned long new; 6493 int err; 6494 6495 if (len >= PAGE_SIZE) 6496 return -EINVAL; 6497 if (kstrtoul(page, 10, &new)) 6498 return -EINVAL; 6499 new = !!new; 6500 6501 err = mddev_lock(mddev); 6502 if (err) 6503 return err; 6504 conf = mddev->private; 6505 if (!conf) 6506 err = -ENODEV; 6507 else if (new != conf->skip_copy) { 6508 mddev_suspend(mddev); 6509 conf->skip_copy = new; 6510 if (new) 6511 mddev->queue->backing_dev_info->capabilities |= 6512 BDI_CAP_STABLE_WRITES; 6513 else 6514 mddev->queue->backing_dev_info->capabilities &= 6515 ~BDI_CAP_STABLE_WRITES; 6516 mddev_resume(mddev); 6517 } 6518 mddev_unlock(mddev); 6519 return err ?: len; 6520 } 6521 6522 static struct md_sysfs_entry 6523 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, 6524 raid5_show_skip_copy, 6525 raid5_store_skip_copy); 6526 6527 static ssize_t 6528 stripe_cache_active_show(struct mddev *mddev, char *page) 6529 { 6530 struct r5conf *conf = mddev->private; 6531 if (conf) 6532 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 6533 else 6534 return 0; 6535 } 6536 6537 static struct md_sysfs_entry 6538 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 6539 6540 static ssize_t 6541 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) 6542 { 6543 struct r5conf *conf; 6544 int ret = 0; 6545 spin_lock(&mddev->lock); 6546 conf = mddev->private; 6547 if (conf) 6548 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); 6549 spin_unlock(&mddev->lock); 6550 return ret; 6551 } 6552 6553 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6554 int *group_cnt, 6555 int *worker_cnt_per_group, 6556 struct r5worker_group **worker_groups); 6557 static ssize_t 6558 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 6559 { 6560 struct r5conf *conf; 6561 unsigned long new; 6562 int err; 6563 struct r5worker_group *new_groups, *old_groups; 6564 int group_cnt, worker_cnt_per_group; 6565 6566 if (len >= PAGE_SIZE) 6567 return -EINVAL; 6568 if (kstrtoul(page, 10, &new)) 6569 return -EINVAL; 6570 6571 err = mddev_lock(mddev); 6572 if (err) 6573 return err; 6574 conf = mddev->private; 6575 if (!conf) 6576 err = -ENODEV; 6577 else if (new != conf->worker_cnt_per_group) { 6578 mddev_suspend(mddev); 6579 6580 old_groups = conf->worker_groups; 6581 if (old_groups) 6582 flush_workqueue(raid5_wq); 6583 6584 err = alloc_thread_groups(conf, new, 6585 &group_cnt, &worker_cnt_per_group, 6586 &new_groups); 6587 if (!err) { 6588 spin_lock_irq(&conf->device_lock); 6589 conf->group_cnt = group_cnt; 6590 conf->worker_cnt_per_group = worker_cnt_per_group; 6591 conf->worker_groups = new_groups; 6592 spin_unlock_irq(&conf->device_lock); 6593 6594 if (old_groups) 6595 kfree(old_groups[0].workers); 6596 kfree(old_groups); 6597 } 6598 mddev_resume(mddev); 6599 } 6600 mddev_unlock(mddev); 6601 6602 return err ?: len; 6603 } 6604 6605 static struct md_sysfs_entry 6606 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, 6607 raid5_show_group_thread_cnt, 6608 raid5_store_group_thread_cnt); 6609 6610 static struct attribute *raid5_attrs[] = { 6611 &raid5_stripecache_size.attr, 6612 &raid5_stripecache_active.attr, 6613 &raid5_preread_bypass_threshold.attr, 6614 &raid5_group_thread_cnt.attr, 6615 &raid5_skip_copy.attr, 6616 &raid5_rmw_level.attr, 6617 &r5c_journal_mode.attr, 6618 NULL, 6619 }; 6620 static struct attribute_group raid5_attrs_group = { 6621 .name = NULL, 6622 .attrs = raid5_attrs, 6623 }; 6624 6625 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6626 int *group_cnt, 6627 int *worker_cnt_per_group, 6628 struct r5worker_group **worker_groups) 6629 { 6630 int i, j, k; 6631 ssize_t size; 6632 struct r5worker *workers; 6633 6634 *worker_cnt_per_group = cnt; 6635 if (cnt == 0) { 6636 *group_cnt = 0; 6637 *worker_groups = NULL; 6638 return 0; 6639 } 6640 *group_cnt = num_possible_nodes(); 6641 size = sizeof(struct r5worker) * cnt; 6642 workers = kzalloc(size * *group_cnt, GFP_NOIO); 6643 *worker_groups = kzalloc(sizeof(struct r5worker_group) * 6644 *group_cnt, GFP_NOIO); 6645 if (!*worker_groups || !workers) { 6646 kfree(workers); 6647 kfree(*worker_groups); 6648 return -ENOMEM; 6649 } 6650 6651 for (i = 0; i < *group_cnt; i++) { 6652 struct r5worker_group *group; 6653 6654 group = &(*worker_groups)[i]; 6655 INIT_LIST_HEAD(&group->handle_list); 6656 INIT_LIST_HEAD(&group->loprio_list); 6657 group->conf = conf; 6658 group->workers = workers + i * cnt; 6659 6660 for (j = 0; j < cnt; j++) { 6661 struct r5worker *worker = group->workers + j; 6662 worker->group = group; 6663 INIT_WORK(&worker->work, raid5_do_work); 6664 6665 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) 6666 INIT_LIST_HEAD(worker->temp_inactive_list + k); 6667 } 6668 } 6669 6670 return 0; 6671 } 6672 6673 static void free_thread_groups(struct r5conf *conf) 6674 { 6675 if (conf->worker_groups) 6676 kfree(conf->worker_groups[0].workers); 6677 kfree(conf->worker_groups); 6678 conf->worker_groups = NULL; 6679 } 6680 6681 static sector_t 6682 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 6683 { 6684 struct r5conf *conf = mddev->private; 6685 6686 if (!sectors) 6687 sectors = mddev->dev_sectors; 6688 if (!raid_disks) 6689 /* size is defined by the smallest of previous and new size */ 6690 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 6691 6692 sectors &= ~((sector_t)conf->chunk_sectors - 1); 6693 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); 6694 return sectors * (raid_disks - conf->max_degraded); 6695 } 6696 6697 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6698 { 6699 safe_put_page(percpu->spare_page); 6700 if (percpu->scribble) 6701 flex_array_free(percpu->scribble); 6702 percpu->spare_page = NULL; 6703 percpu->scribble = NULL; 6704 } 6705 6706 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6707 { 6708 if (conf->level == 6 && !percpu->spare_page) 6709 percpu->spare_page = alloc_page(GFP_KERNEL); 6710 if (!percpu->scribble) 6711 percpu->scribble = scribble_alloc(max(conf->raid_disks, 6712 conf->previous_raid_disks), 6713 max(conf->chunk_sectors, 6714 conf->prev_chunk_sectors) 6715 / STRIPE_SECTORS, 6716 GFP_KERNEL); 6717 6718 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { 6719 free_scratch_buffer(conf, percpu); 6720 return -ENOMEM; 6721 } 6722 6723 return 0; 6724 } 6725 6726 static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node) 6727 { 6728 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); 6729 6730 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6731 return 0; 6732 } 6733 6734 static void raid5_free_percpu(struct r5conf *conf) 6735 { 6736 if (!conf->percpu) 6737 return; 6738 6739 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); 6740 free_percpu(conf->percpu); 6741 } 6742 6743 static void free_conf(struct r5conf *conf) 6744 { 6745 int i; 6746 6747 log_exit(conf); 6748 6749 if (conf->shrinker.nr_deferred) 6750 unregister_shrinker(&conf->shrinker); 6751 6752 free_thread_groups(conf); 6753 shrink_stripes(conf); 6754 raid5_free_percpu(conf); 6755 for (i = 0; i < conf->pool_size; i++) 6756 if (conf->disks[i].extra_page) 6757 put_page(conf->disks[i].extra_page); 6758 kfree(conf->disks); 6759 if (conf->bio_split) 6760 bioset_free(conf->bio_split); 6761 kfree(conf->stripe_hashtbl); 6762 kfree(conf->pending_data); 6763 kfree(conf); 6764 } 6765 6766 static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) 6767 { 6768 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); 6769 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 6770 6771 if (alloc_scratch_buffer(conf, percpu)) { 6772 pr_warn("%s: failed memory allocation for cpu%u\n", 6773 __func__, cpu); 6774 return -ENOMEM; 6775 } 6776 return 0; 6777 } 6778 6779 static int raid5_alloc_percpu(struct r5conf *conf) 6780 { 6781 int err = 0; 6782 6783 conf->percpu = alloc_percpu(struct raid5_percpu); 6784 if (!conf->percpu) 6785 return -ENOMEM; 6786 6787 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); 6788 if (!err) { 6789 conf->scribble_disks = max(conf->raid_disks, 6790 conf->previous_raid_disks); 6791 conf->scribble_sectors = max(conf->chunk_sectors, 6792 conf->prev_chunk_sectors); 6793 } 6794 return err; 6795 } 6796 6797 static unsigned long raid5_cache_scan(struct shrinker *shrink, 6798 struct shrink_control *sc) 6799 { 6800 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6801 unsigned long ret = SHRINK_STOP; 6802 6803 if (mutex_trylock(&conf->cache_size_mutex)) { 6804 ret= 0; 6805 while (ret < sc->nr_to_scan && 6806 conf->max_nr_stripes > conf->min_nr_stripes) { 6807 if (drop_one_stripe(conf) == 0) { 6808 ret = SHRINK_STOP; 6809 break; 6810 } 6811 ret++; 6812 } 6813 mutex_unlock(&conf->cache_size_mutex); 6814 } 6815 return ret; 6816 } 6817 6818 static unsigned long raid5_cache_count(struct shrinker *shrink, 6819 struct shrink_control *sc) 6820 { 6821 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6822 6823 if (conf->max_nr_stripes < conf->min_nr_stripes) 6824 /* unlikely, but not impossible */ 6825 return 0; 6826 return conf->max_nr_stripes - conf->min_nr_stripes; 6827 } 6828 6829 static struct r5conf *setup_conf(struct mddev *mddev) 6830 { 6831 struct r5conf *conf; 6832 int raid_disk, memory, max_disks; 6833 struct md_rdev *rdev; 6834 struct disk_info *disk; 6835 char pers_name[6]; 6836 int i; 6837 int group_cnt, worker_cnt_per_group; 6838 struct r5worker_group *new_group; 6839 6840 if (mddev->new_level != 5 6841 && mddev->new_level != 4 6842 && mddev->new_level != 6) { 6843 pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n", 6844 mdname(mddev), mddev->new_level); 6845 return ERR_PTR(-EIO); 6846 } 6847 if ((mddev->new_level == 5 6848 && !algorithm_valid_raid5(mddev->new_layout)) || 6849 (mddev->new_level == 6 6850 && !algorithm_valid_raid6(mddev->new_layout))) { 6851 pr_warn("md/raid:%s: layout %d not supported\n", 6852 mdname(mddev), mddev->new_layout); 6853 return ERR_PTR(-EIO); 6854 } 6855 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 6856 pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n", 6857 mdname(mddev), mddev->raid_disks); 6858 return ERR_PTR(-EINVAL); 6859 } 6860 6861 if (!mddev->new_chunk_sectors || 6862 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 6863 !is_power_of_2(mddev->new_chunk_sectors)) { 6864 pr_warn("md/raid:%s: invalid chunk size %d\n", 6865 mdname(mddev), mddev->new_chunk_sectors << 9); 6866 return ERR_PTR(-EINVAL); 6867 } 6868 6869 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 6870 if (conf == NULL) 6871 goto abort; 6872 INIT_LIST_HEAD(&conf->free_list); 6873 INIT_LIST_HEAD(&conf->pending_list); 6874 conf->pending_data = kzalloc(sizeof(struct r5pending_data) * 6875 PENDING_IO_MAX, GFP_KERNEL); 6876 if (!conf->pending_data) 6877 goto abort; 6878 for (i = 0; i < PENDING_IO_MAX; i++) 6879 list_add(&conf->pending_data[i].sibling, &conf->free_list); 6880 /* Don't enable multi-threading by default*/ 6881 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, 6882 &new_group)) { 6883 conf->group_cnt = group_cnt; 6884 conf->worker_cnt_per_group = worker_cnt_per_group; 6885 conf->worker_groups = new_group; 6886 } else 6887 goto abort; 6888 spin_lock_init(&conf->device_lock); 6889 seqcount_init(&conf->gen_lock); 6890 mutex_init(&conf->cache_size_mutex); 6891 init_waitqueue_head(&conf->wait_for_quiescent); 6892 init_waitqueue_head(&conf->wait_for_stripe); 6893 init_waitqueue_head(&conf->wait_for_overlap); 6894 INIT_LIST_HEAD(&conf->handle_list); 6895 INIT_LIST_HEAD(&conf->loprio_list); 6896 INIT_LIST_HEAD(&conf->hold_list); 6897 INIT_LIST_HEAD(&conf->delayed_list); 6898 INIT_LIST_HEAD(&conf->bitmap_list); 6899 init_llist_head(&conf->released_stripes); 6900 atomic_set(&conf->active_stripes, 0); 6901 atomic_set(&conf->preread_active_stripes, 0); 6902 atomic_set(&conf->active_aligned_reads, 0); 6903 spin_lock_init(&conf->pending_bios_lock); 6904 conf->batch_bio_dispatch = true; 6905 rdev_for_each(rdev, mddev) { 6906 if (test_bit(Journal, &rdev->flags)) 6907 continue; 6908 if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { 6909 conf->batch_bio_dispatch = false; 6910 break; 6911 } 6912 } 6913 6914 conf->bypass_threshold = BYPASS_THRESHOLD; 6915 conf->recovery_disabled = mddev->recovery_disabled - 1; 6916 6917 conf->raid_disks = mddev->raid_disks; 6918 if (mddev->reshape_position == MaxSector) 6919 conf->previous_raid_disks = mddev->raid_disks; 6920 else 6921 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 6922 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 6923 6924 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 6925 GFP_KERNEL); 6926 6927 if (!conf->disks) 6928 goto abort; 6929 6930 for (i = 0; i < max_disks; i++) { 6931 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); 6932 if (!conf->disks[i].extra_page) 6933 goto abort; 6934 } 6935 6936 conf->bio_split = bioset_create(BIO_POOL_SIZE, 0); 6937 if (!conf->bio_split) 6938 goto abort; 6939 conf->mddev = mddev; 6940 6941 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 6942 goto abort; 6943 6944 /* We init hash_locks[0] separately to that it can be used 6945 * as the reference lock in the spin_lock_nest_lock() call 6946 * in lock_all_device_hash_locks_irq in order to convince 6947 * lockdep that we know what we are doing. 6948 */ 6949 spin_lock_init(conf->hash_locks); 6950 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 6951 spin_lock_init(conf->hash_locks + i); 6952 6953 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6954 INIT_LIST_HEAD(conf->inactive_list + i); 6955 6956 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6957 INIT_LIST_HEAD(conf->temp_inactive_list + i); 6958 6959 atomic_set(&conf->r5c_cached_full_stripes, 0); 6960 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); 6961 atomic_set(&conf->r5c_cached_partial_stripes, 0); 6962 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); 6963 atomic_set(&conf->r5c_flushing_full_stripes, 0); 6964 atomic_set(&conf->r5c_flushing_partial_stripes, 0); 6965 6966 conf->level = mddev->new_level; 6967 conf->chunk_sectors = mddev->new_chunk_sectors; 6968 if (raid5_alloc_percpu(conf) != 0) 6969 goto abort; 6970 6971 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 6972 6973 rdev_for_each(rdev, mddev) { 6974 raid_disk = rdev->raid_disk; 6975 if (raid_disk >= max_disks 6976 || raid_disk < 0 || test_bit(Journal, &rdev->flags)) 6977 continue; 6978 disk = conf->disks + raid_disk; 6979 6980 if (test_bit(Replacement, &rdev->flags)) { 6981 if (disk->replacement) 6982 goto abort; 6983 disk->replacement = rdev; 6984 } else { 6985 if (disk->rdev) 6986 goto abort; 6987 disk->rdev = rdev; 6988 } 6989 6990 if (test_bit(In_sync, &rdev->flags)) { 6991 char b[BDEVNAME_SIZE]; 6992 pr_info("md/raid:%s: device %s operational as raid disk %d\n", 6993 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 6994 } else if (rdev->saved_raid_disk != raid_disk) 6995 /* Cannot rely on bitmap to complete recovery */ 6996 conf->fullsync = 1; 6997 } 6998 6999 conf->level = mddev->new_level; 7000 if (conf->level == 6) { 7001 conf->max_degraded = 2; 7002 if (raid6_call.xor_syndrome) 7003 conf->rmw_level = PARITY_ENABLE_RMW; 7004 else 7005 conf->rmw_level = PARITY_DISABLE_RMW; 7006 } else { 7007 conf->max_degraded = 1; 7008 conf->rmw_level = PARITY_ENABLE_RMW; 7009 } 7010 conf->algorithm = mddev->new_layout; 7011 conf->reshape_progress = mddev->reshape_position; 7012 if (conf->reshape_progress != MaxSector) { 7013 conf->prev_chunk_sectors = mddev->chunk_sectors; 7014 conf->prev_algo = mddev->layout; 7015 } else { 7016 conf->prev_chunk_sectors = conf->chunk_sectors; 7017 conf->prev_algo = conf->algorithm; 7018 } 7019 7020 conf->min_nr_stripes = NR_STRIPES; 7021 if (mddev->reshape_position != MaxSector) { 7022 int stripes = max_t(int, 7023 ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4, 7024 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4); 7025 conf->min_nr_stripes = max(NR_STRIPES, stripes); 7026 if (conf->min_nr_stripes != NR_STRIPES) 7027 pr_info("md/raid:%s: force stripe size %d for reshape\n", 7028 mdname(mddev), conf->min_nr_stripes); 7029 } 7030 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + 7031 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 7032 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 7033 if (grow_stripes(conf, conf->min_nr_stripes)) { 7034 pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n", 7035 mdname(mddev), memory); 7036 goto abort; 7037 } else 7038 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); 7039 /* 7040 * Losing a stripe head costs more than the time to refill it, 7041 * it reduces the queue depth and so can hurt throughput. 7042 * So set it rather large, scaled by number of devices. 7043 */ 7044 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; 7045 conf->shrinker.scan_objects = raid5_cache_scan; 7046 conf->shrinker.count_objects = raid5_cache_count; 7047 conf->shrinker.batch = 128; 7048 conf->shrinker.flags = 0; 7049 if (register_shrinker(&conf->shrinker)) { 7050 pr_warn("md/raid:%s: couldn't register shrinker.\n", 7051 mdname(mddev)); 7052 goto abort; 7053 } 7054 7055 sprintf(pers_name, "raid%d", mddev->new_level); 7056 conf->thread = md_register_thread(raid5d, mddev, pers_name); 7057 if (!conf->thread) { 7058 pr_warn("md/raid:%s: couldn't allocate thread.\n", 7059 mdname(mddev)); 7060 goto abort; 7061 } 7062 7063 return conf; 7064 7065 abort: 7066 if (conf) { 7067 free_conf(conf); 7068 return ERR_PTR(-EIO); 7069 } else 7070 return ERR_PTR(-ENOMEM); 7071 } 7072 7073 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 7074 { 7075 switch (algo) { 7076 case ALGORITHM_PARITY_0: 7077 if (raid_disk < max_degraded) 7078 return 1; 7079 break; 7080 case ALGORITHM_PARITY_N: 7081 if (raid_disk >= raid_disks - max_degraded) 7082 return 1; 7083 break; 7084 case ALGORITHM_PARITY_0_6: 7085 if (raid_disk == 0 || 7086 raid_disk == raid_disks - 1) 7087 return 1; 7088 break; 7089 case ALGORITHM_LEFT_ASYMMETRIC_6: 7090 case ALGORITHM_RIGHT_ASYMMETRIC_6: 7091 case ALGORITHM_LEFT_SYMMETRIC_6: 7092 case ALGORITHM_RIGHT_SYMMETRIC_6: 7093 if (raid_disk == raid_disks - 1) 7094 return 1; 7095 } 7096 return 0; 7097 } 7098 7099 static int raid5_run(struct mddev *mddev) 7100 { 7101 struct r5conf *conf; 7102 int working_disks = 0; 7103 int dirty_parity_disks = 0; 7104 struct md_rdev *rdev; 7105 struct md_rdev *journal_dev = NULL; 7106 sector_t reshape_offset = 0; 7107 int i; 7108 long long min_offset_diff = 0; 7109 int first = 1; 7110 7111 if (mddev->recovery_cp != MaxSector) 7112 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", 7113 mdname(mddev)); 7114 7115 rdev_for_each(rdev, mddev) { 7116 long long diff; 7117 7118 if (test_bit(Journal, &rdev->flags)) { 7119 journal_dev = rdev; 7120 continue; 7121 } 7122 if (rdev->raid_disk < 0) 7123 continue; 7124 diff = (rdev->new_data_offset - rdev->data_offset); 7125 if (first) { 7126 min_offset_diff = diff; 7127 first = 0; 7128 } else if (mddev->reshape_backwards && 7129 diff < min_offset_diff) 7130 min_offset_diff = diff; 7131 else if (!mddev->reshape_backwards && 7132 diff > min_offset_diff) 7133 min_offset_diff = diff; 7134 } 7135 7136 if (mddev->reshape_position != MaxSector) { 7137 /* Check that we can continue the reshape. 7138 * Difficulties arise if the stripe we would write to 7139 * next is at or after the stripe we would read from next. 7140 * For a reshape that changes the number of devices, this 7141 * is only possible for a very short time, and mdadm makes 7142 * sure that time appears to have past before assembling 7143 * the array. So we fail if that time hasn't passed. 7144 * For a reshape that keeps the number of devices the same 7145 * mdadm must be monitoring the reshape can keeping the 7146 * critical areas read-only and backed up. It will start 7147 * the array in read-only mode, so we check for that. 7148 */ 7149 sector_t here_new, here_old; 7150 int old_disks; 7151 int max_degraded = (mddev->level == 6 ? 2 : 1); 7152 int chunk_sectors; 7153 int new_data_disks; 7154 7155 if (journal_dev) { 7156 pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n", 7157 mdname(mddev)); 7158 return -EINVAL; 7159 } 7160 7161 if (mddev->new_level != mddev->level) { 7162 pr_warn("md/raid:%s: unsupported reshape required - aborting.\n", 7163 mdname(mddev)); 7164 return -EINVAL; 7165 } 7166 old_disks = mddev->raid_disks - mddev->delta_disks; 7167 /* reshape_position must be on a new-stripe boundary, and one 7168 * further up in new geometry must map after here in old 7169 * geometry. 7170 * If the chunk sizes are different, then as we perform reshape 7171 * in units of the largest of the two, reshape_position needs 7172 * be a multiple of the largest chunk size times new data disks. 7173 */ 7174 here_new = mddev->reshape_position; 7175 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); 7176 new_data_disks = mddev->raid_disks - max_degraded; 7177 if (sector_div(here_new, chunk_sectors * new_data_disks)) { 7178 pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n", 7179 mdname(mddev)); 7180 return -EINVAL; 7181 } 7182 reshape_offset = here_new * chunk_sectors; 7183 /* here_new is the stripe we will write to */ 7184 here_old = mddev->reshape_position; 7185 sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); 7186 /* here_old is the first stripe that we might need to read 7187 * from */ 7188 if (mddev->delta_disks == 0) { 7189 /* We cannot be sure it is safe to start an in-place 7190 * reshape. It is only safe if user-space is monitoring 7191 * and taking constant backups. 7192 * mdadm always starts a situation like this in 7193 * readonly mode so it can take control before 7194 * allowing any writes. So just check for that. 7195 */ 7196 if (abs(min_offset_diff) >= mddev->chunk_sectors && 7197 abs(min_offset_diff) >= mddev->new_chunk_sectors) 7198 /* not really in-place - so OK */; 7199 else if (mddev->ro == 0) { 7200 pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n", 7201 mdname(mddev)); 7202 return -EINVAL; 7203 } 7204 } else if (mddev->reshape_backwards 7205 ? (here_new * chunk_sectors + min_offset_diff <= 7206 here_old * chunk_sectors) 7207 : (here_new * chunk_sectors >= 7208 here_old * chunk_sectors + (-min_offset_diff))) { 7209 /* Reading from the same stripe as writing to - bad */ 7210 pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n", 7211 mdname(mddev)); 7212 return -EINVAL; 7213 } 7214 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); 7215 /* OK, we should be able to continue; */ 7216 } else { 7217 BUG_ON(mddev->level != mddev->new_level); 7218 BUG_ON(mddev->layout != mddev->new_layout); 7219 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 7220 BUG_ON(mddev->delta_disks != 0); 7221 } 7222 7223 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && 7224 test_bit(MD_HAS_PPL, &mddev->flags)) { 7225 pr_warn("md/raid:%s: using journal device and PPL not allowed - disabling PPL\n", 7226 mdname(mddev)); 7227 clear_bit(MD_HAS_PPL, &mddev->flags); 7228 } 7229 7230 if (mddev->private == NULL) 7231 conf = setup_conf(mddev); 7232 else 7233 conf = mddev->private; 7234 7235 if (IS_ERR(conf)) 7236 return PTR_ERR(conf); 7237 7238 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 7239 if (!journal_dev) { 7240 pr_warn("md/raid:%s: journal disk is missing, force array readonly\n", 7241 mdname(mddev)); 7242 mddev->ro = 1; 7243 set_disk_ro(mddev->gendisk, 1); 7244 } else if (mddev->recovery_cp == MaxSector) 7245 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 7246 } 7247 7248 conf->min_offset_diff = min_offset_diff; 7249 mddev->thread = conf->thread; 7250 conf->thread = NULL; 7251 mddev->private = conf; 7252 7253 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; 7254 i++) { 7255 rdev = conf->disks[i].rdev; 7256 if (!rdev && conf->disks[i].replacement) { 7257 /* The replacement is all we have yet */ 7258 rdev = conf->disks[i].replacement; 7259 conf->disks[i].replacement = NULL; 7260 clear_bit(Replacement, &rdev->flags); 7261 conf->disks[i].rdev = rdev; 7262 } 7263 if (!rdev) 7264 continue; 7265 if (conf->disks[i].replacement && 7266 conf->reshape_progress != MaxSector) { 7267 /* replacements and reshape simply do not mix. */ 7268 pr_warn("md: cannot handle concurrent replacement and reshape.\n"); 7269 goto abort; 7270 } 7271 if (test_bit(In_sync, &rdev->flags)) { 7272 working_disks++; 7273 continue; 7274 } 7275 /* This disc is not fully in-sync. However if it 7276 * just stored parity (beyond the recovery_offset), 7277 * when we don't need to be concerned about the 7278 * array being dirty. 7279 * When reshape goes 'backwards', we never have 7280 * partially completed devices, so we only need 7281 * to worry about reshape going forwards. 7282 */ 7283 /* Hack because v0.91 doesn't store recovery_offset properly. */ 7284 if (mddev->major_version == 0 && 7285 mddev->minor_version > 90) 7286 rdev->recovery_offset = reshape_offset; 7287 7288 if (rdev->recovery_offset < reshape_offset) { 7289 /* We need to check old and new layout */ 7290 if (!only_parity(rdev->raid_disk, 7291 conf->algorithm, 7292 conf->raid_disks, 7293 conf->max_degraded)) 7294 continue; 7295 } 7296 if (!only_parity(rdev->raid_disk, 7297 conf->prev_algo, 7298 conf->previous_raid_disks, 7299 conf->max_degraded)) 7300 continue; 7301 dirty_parity_disks++; 7302 } 7303 7304 /* 7305 * 0 for a fully functional array, 1 or 2 for a degraded array. 7306 */ 7307 mddev->degraded = raid5_calc_degraded(conf); 7308 7309 if (has_failed(conf)) { 7310 pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", 7311 mdname(mddev), mddev->degraded, conf->raid_disks); 7312 goto abort; 7313 } 7314 7315 /* device size must be a multiple of chunk size */ 7316 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 7317 mddev->resync_max_sectors = mddev->dev_sectors; 7318 7319 if (mddev->degraded > dirty_parity_disks && 7320 mddev->recovery_cp != MaxSector) { 7321 if (test_bit(MD_HAS_PPL, &mddev->flags)) 7322 pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n", 7323 mdname(mddev)); 7324 else if (mddev->ok_start_degraded) 7325 pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n", 7326 mdname(mddev)); 7327 else { 7328 pr_crit("md/raid:%s: cannot start dirty degraded array.\n", 7329 mdname(mddev)); 7330 goto abort; 7331 } 7332 } 7333 7334 pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n", 7335 mdname(mddev), conf->level, 7336 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 7337 mddev->new_layout); 7338 7339 print_raid5_conf(conf); 7340 7341 if (conf->reshape_progress != MaxSector) { 7342 conf->reshape_safe = conf->reshape_progress; 7343 atomic_set(&conf->reshape_stripes, 0); 7344 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7345 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7346 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7347 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7348 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7349 "reshape"); 7350 } 7351 7352 /* Ok, everything is just fine now */ 7353 if (mddev->to_remove == &raid5_attrs_group) 7354 mddev->to_remove = NULL; 7355 else if (mddev->kobj.sd && 7356 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 7357 pr_warn("raid5: failed to create sysfs attributes for %s\n", 7358 mdname(mddev)); 7359 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 7360 7361 if (mddev->queue) { 7362 int chunk_size; 7363 /* read-ahead size must cover two whole stripes, which 7364 * is 2 * (datadisks) * chunksize where 'n' is the 7365 * number of raid devices 7366 */ 7367 int data_disks = conf->previous_raid_disks - conf->max_degraded; 7368 int stripe = data_disks * 7369 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 7370 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 7371 mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 7372 7373 chunk_size = mddev->chunk_sectors << 9; 7374 blk_queue_io_min(mddev->queue, chunk_size); 7375 blk_queue_io_opt(mddev->queue, chunk_size * 7376 (conf->raid_disks - conf->max_degraded)); 7377 mddev->queue->limits.raid_partial_stripes_expensive = 1; 7378 /* 7379 * We can only discard a whole stripe. It doesn't make sense to 7380 * discard data disk but write parity disk 7381 */ 7382 stripe = stripe * PAGE_SIZE; 7383 /* Round up to power of 2, as discard handling 7384 * currently assumes that */ 7385 while ((stripe-1) & stripe) 7386 stripe = (stripe | (stripe-1)) + 1; 7387 mddev->queue->limits.discard_alignment = stripe; 7388 mddev->queue->limits.discard_granularity = stripe; 7389 7390 blk_queue_max_write_same_sectors(mddev->queue, 0); 7391 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 7392 7393 rdev_for_each(rdev, mddev) { 7394 disk_stack_limits(mddev->gendisk, rdev->bdev, 7395 rdev->data_offset << 9); 7396 disk_stack_limits(mddev->gendisk, rdev->bdev, 7397 rdev->new_data_offset << 9); 7398 } 7399 7400 /* 7401 * zeroing is required, otherwise data 7402 * could be lost. Consider a scenario: discard a stripe 7403 * (the stripe could be inconsistent if 7404 * discard_zeroes_data is 0); write one disk of the 7405 * stripe (the stripe could be inconsistent again 7406 * depending on which disks are used to calculate 7407 * parity); the disk is broken; The stripe data of this 7408 * disk is lost. 7409 * 7410 * We only allow DISCARD if the sysadmin has confirmed that 7411 * only safe devices are in use by setting a module parameter. 7412 * A better idea might be to turn DISCARD into WRITE_ZEROES 7413 * requests, as that is required to be safe. 7414 */ 7415 if (devices_handle_discard_safely && 7416 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && 7417 mddev->queue->limits.discard_granularity >= stripe) 7418 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 7419 mddev->queue); 7420 else 7421 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 7422 mddev->queue); 7423 7424 blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); 7425 } 7426 7427 if (log_init(conf, journal_dev, raid5_has_ppl(conf))) 7428 goto abort; 7429 7430 return 0; 7431 abort: 7432 md_unregister_thread(&mddev->thread); 7433 print_raid5_conf(conf); 7434 free_conf(conf); 7435 mddev->private = NULL; 7436 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); 7437 return -EIO; 7438 } 7439 7440 static void raid5_free(struct mddev *mddev, void *priv) 7441 { 7442 struct r5conf *conf = priv; 7443 7444 free_conf(conf); 7445 mddev->to_remove = &raid5_attrs_group; 7446 } 7447 7448 static void raid5_status(struct seq_file *seq, struct mddev *mddev) 7449 { 7450 struct r5conf *conf = mddev->private; 7451 int i; 7452 7453 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 7454 conf->chunk_sectors / 2, mddev->layout); 7455 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 7456 rcu_read_lock(); 7457 for (i = 0; i < conf->raid_disks; i++) { 7458 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 7459 seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 7460 } 7461 rcu_read_unlock(); 7462 seq_printf (seq, "]"); 7463 } 7464 7465 static void print_raid5_conf (struct r5conf *conf) 7466 { 7467 int i; 7468 struct disk_info *tmp; 7469 7470 pr_debug("RAID conf printout:\n"); 7471 if (!conf) { 7472 pr_debug("(conf==NULL)\n"); 7473 return; 7474 } 7475 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, 7476 conf->raid_disks, 7477 conf->raid_disks - conf->mddev->degraded); 7478 7479 for (i = 0; i < conf->raid_disks; i++) { 7480 char b[BDEVNAME_SIZE]; 7481 tmp = conf->disks + i; 7482 if (tmp->rdev) 7483 pr_debug(" disk %d, o:%d, dev:%s\n", 7484 i, !test_bit(Faulty, &tmp->rdev->flags), 7485 bdevname(tmp->rdev->bdev, b)); 7486 } 7487 } 7488 7489 static int raid5_spare_active(struct mddev *mddev) 7490 { 7491 int i; 7492 struct r5conf *conf = mddev->private; 7493 struct disk_info *tmp; 7494 int count = 0; 7495 unsigned long flags; 7496 7497 for (i = 0; i < conf->raid_disks; i++) { 7498 tmp = conf->disks + i; 7499 if (tmp->replacement 7500 && tmp->replacement->recovery_offset == MaxSector 7501 && !test_bit(Faulty, &tmp->replacement->flags) 7502 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 7503 /* Replacement has just become active. */ 7504 if (!tmp->rdev 7505 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 7506 count++; 7507 if (tmp->rdev) { 7508 /* Replaced device not technically faulty, 7509 * but we need to be sure it gets removed 7510 * and never re-added. 7511 */ 7512 set_bit(Faulty, &tmp->rdev->flags); 7513 sysfs_notify_dirent_safe( 7514 tmp->rdev->sysfs_state); 7515 } 7516 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 7517 } else if (tmp->rdev 7518 && tmp->rdev->recovery_offset == MaxSector 7519 && !test_bit(Faulty, &tmp->rdev->flags) 7520 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 7521 count++; 7522 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 7523 } 7524 } 7525 spin_lock_irqsave(&conf->device_lock, flags); 7526 mddev->degraded = raid5_calc_degraded(conf); 7527 spin_unlock_irqrestore(&conf->device_lock, flags); 7528 print_raid5_conf(conf); 7529 return count; 7530 } 7531 7532 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 7533 { 7534 struct r5conf *conf = mddev->private; 7535 int err = 0; 7536 int number = rdev->raid_disk; 7537 struct md_rdev **rdevp; 7538 struct disk_info *p = conf->disks + number; 7539 7540 print_raid5_conf(conf); 7541 if (test_bit(Journal, &rdev->flags) && conf->log) { 7542 /* 7543 * we can't wait pending write here, as this is called in 7544 * raid5d, wait will deadlock. 7545 * neilb: there is no locking about new writes here, 7546 * so this cannot be safe. 7547 */ 7548 if (atomic_read(&conf->active_stripes) || 7549 atomic_read(&conf->r5c_cached_full_stripes) || 7550 atomic_read(&conf->r5c_cached_partial_stripes)) { 7551 return -EBUSY; 7552 } 7553 log_exit(conf); 7554 return 0; 7555 } 7556 if (rdev == p->rdev) 7557 rdevp = &p->rdev; 7558 else if (rdev == p->replacement) 7559 rdevp = &p->replacement; 7560 else 7561 return 0; 7562 7563 if (number >= conf->raid_disks && 7564 conf->reshape_progress == MaxSector) 7565 clear_bit(In_sync, &rdev->flags); 7566 7567 if (test_bit(In_sync, &rdev->flags) || 7568 atomic_read(&rdev->nr_pending)) { 7569 err = -EBUSY; 7570 goto abort; 7571 } 7572 /* Only remove non-faulty devices if recovery 7573 * isn't possible. 7574 */ 7575 if (!test_bit(Faulty, &rdev->flags) && 7576 mddev->recovery_disabled != conf->recovery_disabled && 7577 !has_failed(conf) && 7578 (!p->replacement || p->replacement == rdev) && 7579 number < conf->raid_disks) { 7580 err = -EBUSY; 7581 goto abort; 7582 } 7583 *rdevp = NULL; 7584 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 7585 synchronize_rcu(); 7586 if (atomic_read(&rdev->nr_pending)) { 7587 /* lost the race, try later */ 7588 err = -EBUSY; 7589 *rdevp = rdev; 7590 } 7591 } 7592 if (!err) { 7593 err = log_modify(conf, rdev, false); 7594 if (err) 7595 goto abort; 7596 } 7597 if (p->replacement) { 7598 /* We must have just cleared 'rdev' */ 7599 p->rdev = p->replacement; 7600 clear_bit(Replacement, &p->replacement->flags); 7601 smp_mb(); /* Make sure other CPUs may see both as identical 7602 * but will never see neither - if they are careful 7603 */ 7604 p->replacement = NULL; 7605 7606 if (!err) 7607 err = log_modify(conf, p->rdev, true); 7608 } 7609 7610 clear_bit(WantReplacement, &rdev->flags); 7611 abort: 7612 7613 print_raid5_conf(conf); 7614 return err; 7615 } 7616 7617 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 7618 { 7619 struct r5conf *conf = mddev->private; 7620 int err = -EEXIST; 7621 int disk; 7622 struct disk_info *p; 7623 int first = 0; 7624 int last = conf->raid_disks - 1; 7625 7626 if (test_bit(Journal, &rdev->flags)) { 7627 if (conf->log) 7628 return -EBUSY; 7629 7630 rdev->raid_disk = 0; 7631 /* 7632 * The array is in readonly mode if journal is missing, so no 7633 * write requests running. We should be safe 7634 */ 7635 log_init(conf, rdev, false); 7636 return 0; 7637 } 7638 if (mddev->recovery_disabled == conf->recovery_disabled) 7639 return -EBUSY; 7640 7641 if (rdev->saved_raid_disk < 0 && has_failed(conf)) 7642 /* no point adding a device */ 7643 return -EINVAL; 7644 7645 if (rdev->raid_disk >= 0) 7646 first = last = rdev->raid_disk; 7647 7648 /* 7649 * find the disk ... but prefer rdev->saved_raid_disk 7650 * if possible. 7651 */ 7652 if (rdev->saved_raid_disk >= 0 && 7653 rdev->saved_raid_disk >= first && 7654 conf->disks[rdev->saved_raid_disk].rdev == NULL) 7655 first = rdev->saved_raid_disk; 7656 7657 for (disk = first; disk <= last; disk++) { 7658 p = conf->disks + disk; 7659 if (p->rdev == NULL) { 7660 clear_bit(In_sync, &rdev->flags); 7661 rdev->raid_disk = disk; 7662 if (rdev->saved_raid_disk != disk) 7663 conf->fullsync = 1; 7664 rcu_assign_pointer(p->rdev, rdev); 7665 7666 err = log_modify(conf, rdev, true); 7667 7668 goto out; 7669 } 7670 } 7671 for (disk = first; disk <= last; disk++) { 7672 p = conf->disks + disk; 7673 if (test_bit(WantReplacement, &p->rdev->flags) && 7674 p->replacement == NULL) { 7675 clear_bit(In_sync, &rdev->flags); 7676 set_bit(Replacement, &rdev->flags); 7677 rdev->raid_disk = disk; 7678 err = 0; 7679 conf->fullsync = 1; 7680 rcu_assign_pointer(p->replacement, rdev); 7681 break; 7682 } 7683 } 7684 out: 7685 print_raid5_conf(conf); 7686 return err; 7687 } 7688 7689 static int raid5_resize(struct mddev *mddev, sector_t sectors) 7690 { 7691 /* no resync is happening, and there is enough space 7692 * on all devices, so we can resize. 7693 * We need to make sure resync covers any new space. 7694 * If the array is shrinking we should possibly wait until 7695 * any io in the removed space completes, but it hardly seems 7696 * worth it. 7697 */ 7698 sector_t newsize; 7699 struct r5conf *conf = mddev->private; 7700 7701 if (conf->log || raid5_has_ppl(conf)) 7702 return -EINVAL; 7703 sectors &= ~((sector_t)conf->chunk_sectors - 1); 7704 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 7705 if (mddev->external_size && 7706 mddev->array_sectors > newsize) 7707 return -EINVAL; 7708 if (mddev->bitmap) { 7709 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); 7710 if (ret) 7711 return ret; 7712 } 7713 md_set_array_sectors(mddev, newsize); 7714 if (sectors > mddev->dev_sectors && 7715 mddev->recovery_cp > mddev->dev_sectors) { 7716 mddev->recovery_cp = mddev->dev_sectors; 7717 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7718 } 7719 mddev->dev_sectors = sectors; 7720 mddev->resync_max_sectors = sectors; 7721 return 0; 7722 } 7723 7724 static int check_stripe_cache(struct mddev *mddev) 7725 { 7726 /* Can only proceed if there are plenty of stripe_heads. 7727 * We need a minimum of one full stripe,, and for sensible progress 7728 * it is best to have about 4 times that. 7729 * If we require 4 times, then the default 256 4K stripe_heads will 7730 * allow for chunk sizes up to 256K, which is probably OK. 7731 * If the chunk size is greater, user-space should request more 7732 * stripe_heads first. 7733 */ 7734 struct r5conf *conf = mddev->private; 7735 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 7736 > conf->min_nr_stripes || 7737 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 7738 > conf->min_nr_stripes) { 7739 pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n", 7740 mdname(mddev), 7741 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 7742 / STRIPE_SIZE)*4); 7743 return 0; 7744 } 7745 return 1; 7746 } 7747 7748 static int check_reshape(struct mddev *mddev) 7749 { 7750 struct r5conf *conf = mddev->private; 7751 7752 if (conf->log || raid5_has_ppl(conf)) 7753 return -EINVAL; 7754 if (mddev->delta_disks == 0 && 7755 mddev->new_layout == mddev->layout && 7756 mddev->new_chunk_sectors == mddev->chunk_sectors) 7757 return 0; /* nothing to do */ 7758 if (has_failed(conf)) 7759 return -EINVAL; 7760 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { 7761 /* We might be able to shrink, but the devices must 7762 * be made bigger first. 7763 * For raid6, 4 is the minimum size. 7764 * Otherwise 2 is the minimum 7765 */ 7766 int min = 2; 7767 if (mddev->level == 6) 7768 min = 4; 7769 if (mddev->raid_disks + mddev->delta_disks < min) 7770 return -EINVAL; 7771 } 7772 7773 if (!check_stripe_cache(mddev)) 7774 return -ENOSPC; 7775 7776 if (mddev->new_chunk_sectors > mddev->chunk_sectors || 7777 mddev->delta_disks > 0) 7778 if (resize_chunks(conf, 7779 conf->previous_raid_disks 7780 + max(0, mddev->delta_disks), 7781 max(mddev->new_chunk_sectors, 7782 mddev->chunk_sectors) 7783 ) < 0) 7784 return -ENOMEM; 7785 7786 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) 7787 return 0; /* never bother to shrink */ 7788 return resize_stripes(conf, (conf->previous_raid_disks 7789 + mddev->delta_disks)); 7790 } 7791 7792 static int raid5_start_reshape(struct mddev *mddev) 7793 { 7794 struct r5conf *conf = mddev->private; 7795 struct md_rdev *rdev; 7796 int spares = 0; 7797 unsigned long flags; 7798 7799 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 7800 return -EBUSY; 7801 7802 if (!check_stripe_cache(mddev)) 7803 return -ENOSPC; 7804 7805 if (has_failed(conf)) 7806 return -EINVAL; 7807 7808 rdev_for_each(rdev, mddev) { 7809 if (!test_bit(In_sync, &rdev->flags) 7810 && !test_bit(Faulty, &rdev->flags)) 7811 spares++; 7812 } 7813 7814 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 7815 /* Not enough devices even to make a degraded array 7816 * of that size 7817 */ 7818 return -EINVAL; 7819 7820 /* Refuse to reduce size of the array. Any reductions in 7821 * array size must be through explicit setting of array_size 7822 * attribute. 7823 */ 7824 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 7825 < mddev->array_sectors) { 7826 pr_warn("md/raid:%s: array size must be reduced before number of disks\n", 7827 mdname(mddev)); 7828 return -EINVAL; 7829 } 7830 7831 atomic_set(&conf->reshape_stripes, 0); 7832 spin_lock_irq(&conf->device_lock); 7833 write_seqcount_begin(&conf->gen_lock); 7834 conf->previous_raid_disks = conf->raid_disks; 7835 conf->raid_disks += mddev->delta_disks; 7836 conf->prev_chunk_sectors = conf->chunk_sectors; 7837 conf->chunk_sectors = mddev->new_chunk_sectors; 7838 conf->prev_algo = conf->algorithm; 7839 conf->algorithm = mddev->new_layout; 7840 conf->generation++; 7841 /* Code that selects data_offset needs to see the generation update 7842 * if reshape_progress has been set - so a memory barrier needed. 7843 */ 7844 smp_mb(); 7845 if (mddev->reshape_backwards) 7846 conf->reshape_progress = raid5_size(mddev, 0, 0); 7847 else 7848 conf->reshape_progress = 0; 7849 conf->reshape_safe = conf->reshape_progress; 7850 write_seqcount_end(&conf->gen_lock); 7851 spin_unlock_irq(&conf->device_lock); 7852 7853 /* Now make sure any requests that proceeded on the assumption 7854 * the reshape wasn't running - like Discard or Read - have 7855 * completed. 7856 */ 7857 mddev_suspend(mddev); 7858 mddev_resume(mddev); 7859 7860 /* Add some new drives, as many as will fit. 7861 * We know there are enough to make the newly sized array work. 7862 * Don't add devices if we are reducing the number of 7863 * devices in the array. This is because it is not possible 7864 * to correctly record the "partially reconstructed" state of 7865 * such devices during the reshape and confusion could result. 7866 */ 7867 if (mddev->delta_disks >= 0) { 7868 rdev_for_each(rdev, mddev) 7869 if (rdev->raid_disk < 0 && 7870 !test_bit(Faulty, &rdev->flags)) { 7871 if (raid5_add_disk(mddev, rdev) == 0) { 7872 if (rdev->raid_disk 7873 >= conf->previous_raid_disks) 7874 set_bit(In_sync, &rdev->flags); 7875 else 7876 rdev->recovery_offset = 0; 7877 7878 if (sysfs_link_rdev(mddev, rdev)) 7879 /* Failure here is OK */; 7880 } 7881 } else if (rdev->raid_disk >= conf->previous_raid_disks 7882 && !test_bit(Faulty, &rdev->flags)) { 7883 /* This is a spare that was manually added */ 7884 set_bit(In_sync, &rdev->flags); 7885 } 7886 7887 /* When a reshape changes the number of devices, 7888 * ->degraded is measured against the larger of the 7889 * pre and post number of devices. 7890 */ 7891 spin_lock_irqsave(&conf->device_lock, flags); 7892 mddev->degraded = raid5_calc_degraded(conf); 7893 spin_unlock_irqrestore(&conf->device_lock, flags); 7894 } 7895 mddev->raid_disks = conf->raid_disks; 7896 mddev->reshape_position = conf->reshape_progress; 7897 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7898 7899 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7900 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7901 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7902 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7903 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7904 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7905 "reshape"); 7906 if (!mddev->sync_thread) { 7907 mddev->recovery = 0; 7908 spin_lock_irq(&conf->device_lock); 7909 write_seqcount_begin(&conf->gen_lock); 7910 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 7911 mddev->new_chunk_sectors = 7912 conf->chunk_sectors = conf->prev_chunk_sectors; 7913 mddev->new_layout = conf->algorithm = conf->prev_algo; 7914 rdev_for_each(rdev, mddev) 7915 rdev->new_data_offset = rdev->data_offset; 7916 smp_wmb(); 7917 conf->generation --; 7918 conf->reshape_progress = MaxSector; 7919 mddev->reshape_position = MaxSector; 7920 write_seqcount_end(&conf->gen_lock); 7921 spin_unlock_irq(&conf->device_lock); 7922 return -EAGAIN; 7923 } 7924 conf->reshape_checkpoint = jiffies; 7925 md_wakeup_thread(mddev->sync_thread); 7926 md_new_event(mddev); 7927 return 0; 7928 } 7929 7930 /* This is called from the reshape thread and should make any 7931 * changes needed in 'conf' 7932 */ 7933 static void end_reshape(struct r5conf *conf) 7934 { 7935 7936 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 7937 struct md_rdev *rdev; 7938 7939 spin_lock_irq(&conf->device_lock); 7940 conf->previous_raid_disks = conf->raid_disks; 7941 rdev_for_each(rdev, conf->mddev) 7942 rdev->data_offset = rdev->new_data_offset; 7943 smp_wmb(); 7944 conf->reshape_progress = MaxSector; 7945 conf->mddev->reshape_position = MaxSector; 7946 spin_unlock_irq(&conf->device_lock); 7947 wake_up(&conf->wait_for_overlap); 7948 7949 /* read-ahead size must cover two whole stripes, which is 7950 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 7951 */ 7952 if (conf->mddev->queue) { 7953 int data_disks = conf->raid_disks - conf->max_degraded; 7954 int stripe = data_disks * ((conf->chunk_sectors << 9) 7955 / PAGE_SIZE); 7956 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 7957 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 7958 } 7959 } 7960 } 7961 7962 /* This is called from the raid5d thread with mddev_lock held. 7963 * It makes config changes to the device. 7964 */ 7965 static void raid5_finish_reshape(struct mddev *mddev) 7966 { 7967 struct r5conf *conf = mddev->private; 7968 7969 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7970 7971 if (mddev->delta_disks > 0) { 7972 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 7973 if (mddev->queue) { 7974 set_capacity(mddev->gendisk, mddev->array_sectors); 7975 revalidate_disk(mddev->gendisk); 7976 } 7977 } else { 7978 int d; 7979 spin_lock_irq(&conf->device_lock); 7980 mddev->degraded = raid5_calc_degraded(conf); 7981 spin_unlock_irq(&conf->device_lock); 7982 for (d = conf->raid_disks ; 7983 d < conf->raid_disks - mddev->delta_disks; 7984 d++) { 7985 struct md_rdev *rdev = conf->disks[d].rdev; 7986 if (rdev) 7987 clear_bit(In_sync, &rdev->flags); 7988 rdev = conf->disks[d].replacement; 7989 if (rdev) 7990 clear_bit(In_sync, &rdev->flags); 7991 } 7992 } 7993 mddev->layout = conf->algorithm; 7994 mddev->chunk_sectors = conf->chunk_sectors; 7995 mddev->reshape_position = MaxSector; 7996 mddev->delta_disks = 0; 7997 mddev->reshape_backwards = 0; 7998 } 7999 } 8000 8001 static void raid5_quiesce(struct mddev *mddev, int state) 8002 { 8003 struct r5conf *conf = mddev->private; 8004 8005 switch(state) { 8006 case 2: /* resume for a suspend */ 8007 wake_up(&conf->wait_for_overlap); 8008 break; 8009 8010 case 1: /* stop all writes */ 8011 lock_all_device_hash_locks_irq(conf); 8012 /* '2' tells resync/reshape to pause so that all 8013 * active stripes can drain 8014 */ 8015 r5c_flush_cache(conf, INT_MAX); 8016 conf->quiesce = 2; 8017 wait_event_cmd(conf->wait_for_quiescent, 8018 atomic_read(&conf->active_stripes) == 0 && 8019 atomic_read(&conf->active_aligned_reads) == 0, 8020 unlock_all_device_hash_locks_irq(conf), 8021 lock_all_device_hash_locks_irq(conf)); 8022 conf->quiesce = 1; 8023 unlock_all_device_hash_locks_irq(conf); 8024 /* allow reshape to continue */ 8025 wake_up(&conf->wait_for_overlap); 8026 break; 8027 8028 case 0: /* re-enable writes */ 8029 lock_all_device_hash_locks_irq(conf); 8030 conf->quiesce = 0; 8031 wake_up(&conf->wait_for_quiescent); 8032 wake_up(&conf->wait_for_overlap); 8033 unlock_all_device_hash_locks_irq(conf); 8034 break; 8035 } 8036 r5l_quiesce(conf->log, state); 8037 } 8038 8039 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 8040 { 8041 struct r0conf *raid0_conf = mddev->private; 8042 sector_t sectors; 8043 8044 /* for raid0 takeover only one zone is supported */ 8045 if (raid0_conf->nr_strip_zones > 1) { 8046 pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n", 8047 mdname(mddev)); 8048 return ERR_PTR(-EINVAL); 8049 } 8050 8051 sectors = raid0_conf->strip_zone[0].zone_end; 8052 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 8053 mddev->dev_sectors = sectors; 8054 mddev->new_level = level; 8055 mddev->new_layout = ALGORITHM_PARITY_N; 8056 mddev->new_chunk_sectors = mddev->chunk_sectors; 8057 mddev->raid_disks += 1; 8058 mddev->delta_disks = 1; 8059 /* make sure it will be not marked as dirty */ 8060 mddev->recovery_cp = MaxSector; 8061 8062 return setup_conf(mddev); 8063 } 8064 8065 static void *raid5_takeover_raid1(struct mddev *mddev) 8066 { 8067 int chunksect; 8068 void *ret; 8069 8070 if (mddev->raid_disks != 2 || 8071 mddev->degraded > 1) 8072 return ERR_PTR(-EINVAL); 8073 8074 /* Should check if there are write-behind devices? */ 8075 8076 chunksect = 64*2; /* 64K by default */ 8077 8078 /* The array must be an exact multiple of chunksize */ 8079 while (chunksect && (mddev->array_sectors & (chunksect-1))) 8080 chunksect >>= 1; 8081 8082 if ((chunksect<<9) < STRIPE_SIZE) 8083 /* array size does not allow a suitable chunk size */ 8084 return ERR_PTR(-EINVAL); 8085 8086 mddev->new_level = 5; 8087 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 8088 mddev->new_chunk_sectors = chunksect; 8089 8090 ret = setup_conf(mddev); 8091 if (!IS_ERR(ret)) 8092 mddev_clear_unsupported_flags(mddev, 8093 UNSUPPORTED_MDDEV_FLAGS); 8094 return ret; 8095 } 8096 8097 static void *raid5_takeover_raid6(struct mddev *mddev) 8098 { 8099 int new_layout; 8100 8101 switch (mddev->layout) { 8102 case ALGORITHM_LEFT_ASYMMETRIC_6: 8103 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 8104 break; 8105 case ALGORITHM_RIGHT_ASYMMETRIC_6: 8106 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 8107 break; 8108 case ALGORITHM_LEFT_SYMMETRIC_6: 8109 new_layout = ALGORITHM_LEFT_SYMMETRIC; 8110 break; 8111 case ALGORITHM_RIGHT_SYMMETRIC_6: 8112 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 8113 break; 8114 case ALGORITHM_PARITY_0_6: 8115 new_layout = ALGORITHM_PARITY_0; 8116 break; 8117 case ALGORITHM_PARITY_N: 8118 new_layout = ALGORITHM_PARITY_N; 8119 break; 8120 default: 8121 return ERR_PTR(-EINVAL); 8122 } 8123 mddev->new_level = 5; 8124 mddev->new_layout = new_layout; 8125 mddev->delta_disks = -1; 8126 mddev->raid_disks -= 1; 8127 return setup_conf(mddev); 8128 } 8129 8130 static int raid5_check_reshape(struct mddev *mddev) 8131 { 8132 /* For a 2-drive array, the layout and chunk size can be changed 8133 * immediately as not restriping is needed. 8134 * For larger arrays we record the new value - after validation 8135 * to be used by a reshape pass. 8136 */ 8137 struct r5conf *conf = mddev->private; 8138 int new_chunk = mddev->new_chunk_sectors; 8139 8140 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 8141 return -EINVAL; 8142 if (new_chunk > 0) { 8143 if (!is_power_of_2(new_chunk)) 8144 return -EINVAL; 8145 if (new_chunk < (PAGE_SIZE>>9)) 8146 return -EINVAL; 8147 if (mddev->array_sectors & (new_chunk-1)) 8148 /* not factor of array size */ 8149 return -EINVAL; 8150 } 8151 8152 /* They look valid */ 8153 8154 if (mddev->raid_disks == 2) { 8155 /* can make the change immediately */ 8156 if (mddev->new_layout >= 0) { 8157 conf->algorithm = mddev->new_layout; 8158 mddev->layout = mddev->new_layout; 8159 } 8160 if (new_chunk > 0) { 8161 conf->chunk_sectors = new_chunk ; 8162 mddev->chunk_sectors = new_chunk; 8163 } 8164 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8165 md_wakeup_thread(mddev->thread); 8166 } 8167 return check_reshape(mddev); 8168 } 8169 8170 static int raid6_check_reshape(struct mddev *mddev) 8171 { 8172 int new_chunk = mddev->new_chunk_sectors; 8173 8174 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 8175 return -EINVAL; 8176 if (new_chunk > 0) { 8177 if (!is_power_of_2(new_chunk)) 8178 return -EINVAL; 8179 if (new_chunk < (PAGE_SIZE >> 9)) 8180 return -EINVAL; 8181 if (mddev->array_sectors & (new_chunk-1)) 8182 /* not factor of array size */ 8183 return -EINVAL; 8184 } 8185 8186 /* They look valid */ 8187 return check_reshape(mddev); 8188 } 8189 8190 static void *raid5_takeover(struct mddev *mddev) 8191 { 8192 /* raid5 can take over: 8193 * raid0 - if there is only one strip zone - make it a raid4 layout 8194 * raid1 - if there are two drives. We need to know the chunk size 8195 * raid4 - trivial - just use a raid4 layout. 8196 * raid6 - Providing it is a *_6 layout 8197 */ 8198 if (mddev->level == 0) 8199 return raid45_takeover_raid0(mddev, 5); 8200 if (mddev->level == 1) 8201 return raid5_takeover_raid1(mddev); 8202 if (mddev->level == 4) { 8203 mddev->new_layout = ALGORITHM_PARITY_N; 8204 mddev->new_level = 5; 8205 return setup_conf(mddev); 8206 } 8207 if (mddev->level == 6) 8208 return raid5_takeover_raid6(mddev); 8209 8210 return ERR_PTR(-EINVAL); 8211 } 8212 8213 static void *raid4_takeover(struct mddev *mddev) 8214 { 8215 /* raid4 can take over: 8216 * raid0 - if there is only one strip zone 8217 * raid5 - if layout is right 8218 */ 8219 if (mddev->level == 0) 8220 return raid45_takeover_raid0(mddev, 4); 8221 if (mddev->level == 5 && 8222 mddev->layout == ALGORITHM_PARITY_N) { 8223 mddev->new_layout = 0; 8224 mddev->new_level = 4; 8225 return setup_conf(mddev); 8226 } 8227 return ERR_PTR(-EINVAL); 8228 } 8229 8230 static struct md_personality raid5_personality; 8231 8232 static void *raid6_takeover(struct mddev *mddev) 8233 { 8234 /* Currently can only take over a raid5. We map the 8235 * personality to an equivalent raid6 personality 8236 * with the Q block at the end. 8237 */ 8238 int new_layout; 8239 8240 if (mddev->pers != &raid5_personality) 8241 return ERR_PTR(-EINVAL); 8242 if (mddev->degraded > 1) 8243 return ERR_PTR(-EINVAL); 8244 if (mddev->raid_disks > 253) 8245 return ERR_PTR(-EINVAL); 8246 if (mddev->raid_disks < 3) 8247 return ERR_PTR(-EINVAL); 8248 8249 switch (mddev->layout) { 8250 case ALGORITHM_LEFT_ASYMMETRIC: 8251 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 8252 break; 8253 case ALGORITHM_RIGHT_ASYMMETRIC: 8254 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 8255 break; 8256 case ALGORITHM_LEFT_SYMMETRIC: 8257 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 8258 break; 8259 case ALGORITHM_RIGHT_SYMMETRIC: 8260 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 8261 break; 8262 case ALGORITHM_PARITY_0: 8263 new_layout = ALGORITHM_PARITY_0_6; 8264 break; 8265 case ALGORITHM_PARITY_N: 8266 new_layout = ALGORITHM_PARITY_N; 8267 break; 8268 default: 8269 return ERR_PTR(-EINVAL); 8270 } 8271 mddev->new_level = 6; 8272 mddev->new_layout = new_layout; 8273 mddev->delta_disks = 1; 8274 mddev->raid_disks += 1; 8275 return setup_conf(mddev); 8276 } 8277 8278 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) 8279 { 8280 struct r5conf *conf; 8281 int err; 8282 8283 err = mddev_lock(mddev); 8284 if (err) 8285 return err; 8286 conf = mddev->private; 8287 if (!conf) { 8288 mddev_unlock(mddev); 8289 return -ENODEV; 8290 } 8291 8292 if (strncmp(buf, "ppl", 3) == 0) { 8293 /* ppl only works with RAID 5 */ 8294 if (!raid5_has_ppl(conf) && conf->level == 5) { 8295 err = log_init(conf, NULL, true); 8296 if (!err) { 8297 err = resize_stripes(conf, conf->pool_size); 8298 if (err) 8299 log_exit(conf); 8300 } 8301 } else 8302 err = -EINVAL; 8303 } else if (strncmp(buf, "resync", 6) == 0) { 8304 if (raid5_has_ppl(conf)) { 8305 mddev_suspend(mddev); 8306 log_exit(conf); 8307 mddev_resume(mddev); 8308 err = resize_stripes(conf, conf->pool_size); 8309 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && 8310 r5l_log_disk_error(conf)) { 8311 bool journal_dev_exists = false; 8312 struct md_rdev *rdev; 8313 8314 rdev_for_each(rdev, mddev) 8315 if (test_bit(Journal, &rdev->flags)) { 8316 journal_dev_exists = true; 8317 break; 8318 } 8319 8320 if (!journal_dev_exists) { 8321 mddev_suspend(mddev); 8322 clear_bit(MD_HAS_JOURNAL, &mddev->flags); 8323 mddev_resume(mddev); 8324 } else /* need remove journal device first */ 8325 err = -EBUSY; 8326 } else 8327 err = -EINVAL; 8328 } else { 8329 err = -EINVAL; 8330 } 8331 8332 if (!err) 8333 md_update_sb(mddev, 1); 8334 8335 mddev_unlock(mddev); 8336 8337 return err; 8338 } 8339 8340 static struct md_personality raid6_personality = 8341 { 8342 .name = "raid6", 8343 .level = 6, 8344 .owner = THIS_MODULE, 8345 .make_request = raid5_make_request, 8346 .run = raid5_run, 8347 .free = raid5_free, 8348 .status = raid5_status, 8349 .error_handler = raid5_error, 8350 .hot_add_disk = raid5_add_disk, 8351 .hot_remove_disk= raid5_remove_disk, 8352 .spare_active = raid5_spare_active, 8353 .sync_request = raid5_sync_request, 8354 .resize = raid5_resize, 8355 .size = raid5_size, 8356 .check_reshape = raid6_check_reshape, 8357 .start_reshape = raid5_start_reshape, 8358 .finish_reshape = raid5_finish_reshape, 8359 .quiesce = raid5_quiesce, 8360 .takeover = raid6_takeover, 8361 .congested = raid5_congested, 8362 .change_consistency_policy = raid5_change_consistency_policy, 8363 }; 8364 static struct md_personality raid5_personality = 8365 { 8366 .name = "raid5", 8367 .level = 5, 8368 .owner = THIS_MODULE, 8369 .make_request = raid5_make_request, 8370 .run = raid5_run, 8371 .free = raid5_free, 8372 .status = raid5_status, 8373 .error_handler = raid5_error, 8374 .hot_add_disk = raid5_add_disk, 8375 .hot_remove_disk= raid5_remove_disk, 8376 .spare_active = raid5_spare_active, 8377 .sync_request = raid5_sync_request, 8378 .resize = raid5_resize, 8379 .size = raid5_size, 8380 .check_reshape = raid5_check_reshape, 8381 .start_reshape = raid5_start_reshape, 8382 .finish_reshape = raid5_finish_reshape, 8383 .quiesce = raid5_quiesce, 8384 .takeover = raid5_takeover, 8385 .congested = raid5_congested, 8386 .change_consistency_policy = raid5_change_consistency_policy, 8387 }; 8388 8389 static struct md_personality raid4_personality = 8390 { 8391 .name = "raid4", 8392 .level = 4, 8393 .owner = THIS_MODULE, 8394 .make_request = raid5_make_request, 8395 .run = raid5_run, 8396 .free = raid5_free, 8397 .status = raid5_status, 8398 .error_handler = raid5_error, 8399 .hot_add_disk = raid5_add_disk, 8400 .hot_remove_disk= raid5_remove_disk, 8401 .spare_active = raid5_spare_active, 8402 .sync_request = raid5_sync_request, 8403 .resize = raid5_resize, 8404 .size = raid5_size, 8405 .check_reshape = raid5_check_reshape, 8406 .start_reshape = raid5_start_reshape, 8407 .finish_reshape = raid5_finish_reshape, 8408 .quiesce = raid5_quiesce, 8409 .takeover = raid4_takeover, 8410 .congested = raid5_congested, 8411 .change_consistency_policy = raid5_change_consistency_policy, 8412 }; 8413 8414 static int __init raid5_init(void) 8415 { 8416 int ret; 8417 8418 raid5_wq = alloc_workqueue("raid5wq", 8419 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 8420 if (!raid5_wq) 8421 return -ENOMEM; 8422 8423 ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE, 8424 "md/raid5:prepare", 8425 raid456_cpu_up_prepare, 8426 raid456_cpu_dead); 8427 if (ret) { 8428 destroy_workqueue(raid5_wq); 8429 return ret; 8430 } 8431 register_md_personality(&raid6_personality); 8432 register_md_personality(&raid5_personality); 8433 register_md_personality(&raid4_personality); 8434 return 0; 8435 } 8436 8437 static void raid5_exit(void) 8438 { 8439 unregister_md_personality(&raid6_personality); 8440 unregister_md_personality(&raid5_personality); 8441 unregister_md_personality(&raid4_personality); 8442 cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE); 8443 destroy_workqueue(raid5_wq); 8444 } 8445 8446 module_init(raid5_init); 8447 module_exit(raid5_exit); 8448 MODULE_LICENSE("GPL"); 8449 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 8450 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 8451 MODULE_ALIAS("md-raid5"); 8452 MODULE_ALIAS("md-raid4"); 8453 MODULE_ALIAS("md-level-5"); 8454 MODULE_ALIAS("md-level-4"); 8455 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 8456 MODULE_ALIAS("md-raid6"); 8457 MODULE_ALIAS("md-level-6"); 8458 8459 /* This used to be two separate modules, they were: */ 8460 MODULE_ALIAS("raid5"); 8461 MODULE_ALIAS("raid6"); 8462