1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->seq_write is the number of the last batch successfully written. 31 * conf->seq_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is seq_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/module.h> 51 #include <linux/async.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpu.h> 54 #include <linux/slab.h> 55 #include <linux/ratelimit.h> 56 #include <linux/nodemask.h> 57 #include <linux/flex_array.h> 58 #include <trace/events/block.h> 59 60 #include "md.h" 61 #include "raid5.h" 62 #include "raid0.h" 63 #include "bitmap.h" 64 65 #define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED) 66 67 #define cpu_to_group(cpu) cpu_to_node(cpu) 68 #define ANY_GROUP NUMA_NO_NODE 69 70 static bool devices_handle_discard_safely = false; 71 module_param(devices_handle_discard_safely, bool, 0644); 72 MODULE_PARM_DESC(devices_handle_discard_safely, 73 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 74 static struct workqueue_struct *raid5_wq; 75 76 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 77 { 78 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 79 return &conf->stripe_hashtbl[hash]; 80 } 81 82 static inline int stripe_hash_locks_hash(sector_t sect) 83 { 84 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; 85 } 86 87 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) 88 { 89 spin_lock_irq(conf->hash_locks + hash); 90 spin_lock(&conf->device_lock); 91 } 92 93 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) 94 { 95 spin_unlock(&conf->device_lock); 96 spin_unlock_irq(conf->hash_locks + hash); 97 } 98 99 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 100 { 101 int i; 102 local_irq_disable(); 103 spin_lock(conf->hash_locks); 104 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 105 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 106 spin_lock(&conf->device_lock); 107 } 108 109 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) 110 { 111 int i; 112 spin_unlock(&conf->device_lock); 113 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 114 spin_unlock(conf->hash_locks + i - 1); 115 local_irq_enable(); 116 } 117 118 /* Find first data disk in a raid6 stripe */ 119 static inline int raid6_d0(struct stripe_head *sh) 120 { 121 if (sh->ddf_layout) 122 /* ddf always start from first device */ 123 return 0; 124 /* md starts just after Q block */ 125 if (sh->qd_idx == sh->disks - 1) 126 return 0; 127 else 128 return sh->qd_idx + 1; 129 } 130 static inline int raid6_next_disk(int disk, int raid_disks) 131 { 132 disk++; 133 return (disk < raid_disks) ? disk : 0; 134 } 135 136 /* When walking through the disks in a raid5, starting at raid6_d0, 137 * We need to map each disk to a 'slot', where the data disks are slot 138 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 139 * is raid_disks-1. This help does that mapping. 140 */ 141 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 142 int *count, int syndrome_disks) 143 { 144 int slot = *count; 145 146 if (sh->ddf_layout) 147 (*count)++; 148 if (idx == sh->pd_idx) 149 return syndrome_disks; 150 if (idx == sh->qd_idx) 151 return syndrome_disks + 1; 152 if (!sh->ddf_layout) 153 (*count)++; 154 return slot; 155 } 156 157 static void return_io(struct bio_list *return_bi) 158 { 159 struct bio *bi; 160 while ((bi = bio_list_pop(return_bi)) != NULL) { 161 bi->bi_iter.bi_size = 0; 162 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 163 bi, 0); 164 bio_endio(bi); 165 } 166 } 167 168 static void print_raid5_conf (struct r5conf *conf); 169 170 static int stripe_operations_active(struct stripe_head *sh) 171 { 172 return sh->check_state || sh->reconstruct_state || 173 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 174 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 175 } 176 177 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) 178 { 179 struct r5conf *conf = sh->raid_conf; 180 struct r5worker_group *group; 181 int thread_cnt; 182 int i, cpu = sh->cpu; 183 184 if (!cpu_online(cpu)) { 185 cpu = cpumask_any(cpu_online_mask); 186 sh->cpu = cpu; 187 } 188 189 if (list_empty(&sh->lru)) { 190 struct r5worker_group *group; 191 group = conf->worker_groups + cpu_to_group(cpu); 192 list_add_tail(&sh->lru, &group->handle_list); 193 group->stripes_cnt++; 194 sh->group = group; 195 } 196 197 if (conf->worker_cnt_per_group == 0) { 198 md_wakeup_thread(conf->mddev->thread); 199 return; 200 } 201 202 group = conf->worker_groups + cpu_to_group(sh->cpu); 203 204 group->workers[0].working = true; 205 /* at least one worker should run to avoid race */ 206 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); 207 208 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; 209 /* wakeup more workers */ 210 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { 211 if (group->workers[i].working == false) { 212 group->workers[i].working = true; 213 queue_work_on(sh->cpu, raid5_wq, 214 &group->workers[i].work); 215 thread_cnt--; 216 } 217 } 218 } 219 220 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, 221 struct list_head *temp_inactive_list) 222 { 223 int i; 224 int injournal = 0; /* number of date pages with R5_InJournal */ 225 226 BUG_ON(!list_empty(&sh->lru)); 227 BUG_ON(atomic_read(&conf->active_stripes)==0); 228 229 if (r5c_is_writeback(conf->log)) 230 for (i = sh->disks; i--; ) 231 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 232 injournal++; 233 /* 234 * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with 235 * data in journal, so they are not released to cached lists 236 */ 237 if (conf->quiesce && r5c_is_writeback(conf->log) && 238 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) { 239 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 240 r5c_make_stripe_write_out(sh); 241 set_bit(STRIPE_HANDLE, &sh->state); 242 } 243 244 if (test_bit(STRIPE_HANDLE, &sh->state)) { 245 if (test_bit(STRIPE_DELAYED, &sh->state) && 246 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 247 list_add_tail(&sh->lru, &conf->delayed_list); 248 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 249 sh->bm_seq - conf->seq_write > 0) 250 list_add_tail(&sh->lru, &conf->bitmap_list); 251 else { 252 clear_bit(STRIPE_DELAYED, &sh->state); 253 clear_bit(STRIPE_BIT_DELAY, &sh->state); 254 if (conf->worker_cnt_per_group == 0) { 255 list_add_tail(&sh->lru, &conf->handle_list); 256 } else { 257 raid5_wakeup_stripe_thread(sh); 258 return; 259 } 260 } 261 md_wakeup_thread(conf->mddev->thread); 262 } else { 263 BUG_ON(stripe_operations_active(sh)); 264 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 265 if (atomic_dec_return(&conf->preread_active_stripes) 266 < IO_THRESHOLD) 267 md_wakeup_thread(conf->mddev->thread); 268 atomic_dec(&conf->active_stripes); 269 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 270 if (!r5c_is_writeback(conf->log)) 271 list_add_tail(&sh->lru, temp_inactive_list); 272 else { 273 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); 274 if (injournal == 0) 275 list_add_tail(&sh->lru, temp_inactive_list); 276 else if (injournal == conf->raid_disks - conf->max_degraded) { 277 /* full stripe */ 278 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) 279 atomic_inc(&conf->r5c_cached_full_stripes); 280 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) 281 atomic_dec(&conf->r5c_cached_partial_stripes); 282 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); 283 r5c_check_cached_full_stripe(conf); 284 } else 285 /* 286 * STRIPE_R5C_PARTIAL_STRIPE is set in 287 * r5c_try_caching_write(). No need to 288 * set it again. 289 */ 290 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); 291 } 292 } 293 } 294 } 295 296 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, 297 struct list_head *temp_inactive_list) 298 { 299 if (atomic_dec_and_test(&sh->count)) 300 do_release_stripe(conf, sh, temp_inactive_list); 301 } 302 303 /* 304 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list 305 * 306 * Be careful: Only one task can add/delete stripes from temp_inactive_list at 307 * given time. Adding stripes only takes device lock, while deleting stripes 308 * only takes hash lock. 309 */ 310 static void release_inactive_stripe_list(struct r5conf *conf, 311 struct list_head *temp_inactive_list, 312 int hash) 313 { 314 int size; 315 bool do_wakeup = false; 316 unsigned long flags; 317 318 if (hash == NR_STRIPE_HASH_LOCKS) { 319 size = NR_STRIPE_HASH_LOCKS; 320 hash = NR_STRIPE_HASH_LOCKS - 1; 321 } else 322 size = 1; 323 while (size) { 324 struct list_head *list = &temp_inactive_list[size - 1]; 325 326 /* 327 * We don't hold any lock here yet, raid5_get_active_stripe() might 328 * remove stripes from the list 329 */ 330 if (!list_empty_careful(list)) { 331 spin_lock_irqsave(conf->hash_locks + hash, flags); 332 if (list_empty(conf->inactive_list + hash) && 333 !list_empty(list)) 334 atomic_dec(&conf->empty_inactive_list_nr); 335 list_splice_tail_init(list, conf->inactive_list + hash); 336 do_wakeup = true; 337 spin_unlock_irqrestore(conf->hash_locks + hash, flags); 338 } 339 size--; 340 hash--; 341 } 342 343 if (do_wakeup) { 344 wake_up(&conf->wait_for_stripe); 345 if (atomic_read(&conf->active_stripes) == 0) 346 wake_up(&conf->wait_for_quiescent); 347 if (conf->retry_read_aligned) 348 md_wakeup_thread(conf->mddev->thread); 349 } 350 } 351 352 /* should hold conf->device_lock already */ 353 static int release_stripe_list(struct r5conf *conf, 354 struct list_head *temp_inactive_list) 355 { 356 struct stripe_head *sh, *t; 357 int count = 0; 358 struct llist_node *head; 359 360 head = llist_del_all(&conf->released_stripes); 361 head = llist_reverse_order(head); 362 llist_for_each_entry_safe(sh, t, head, release_list) { 363 int hash; 364 365 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 366 smp_mb(); 367 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); 368 /* 369 * Don't worry the bit is set here, because if the bit is set 370 * again, the count is always > 1. This is true for 371 * STRIPE_ON_UNPLUG_LIST bit too. 372 */ 373 hash = sh->hash_lock_index; 374 __release_stripe(conf, sh, &temp_inactive_list[hash]); 375 count++; 376 } 377 378 return count; 379 } 380 381 void raid5_release_stripe(struct stripe_head *sh) 382 { 383 struct r5conf *conf = sh->raid_conf; 384 unsigned long flags; 385 struct list_head list; 386 int hash; 387 bool wakeup; 388 389 /* Avoid release_list until the last reference. 390 */ 391 if (atomic_add_unless(&sh->count, -1, 1)) 392 return; 393 394 if (unlikely(!conf->mddev->thread) || 395 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 396 goto slow_path; 397 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 398 if (wakeup) 399 md_wakeup_thread(conf->mddev->thread); 400 return; 401 slow_path: 402 local_irq_save(flags); 403 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 404 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { 405 INIT_LIST_HEAD(&list); 406 hash = sh->hash_lock_index; 407 do_release_stripe(conf, sh, &list); 408 spin_unlock(&conf->device_lock); 409 release_inactive_stripe_list(conf, &list, hash); 410 } 411 local_irq_restore(flags); 412 } 413 414 static inline void remove_hash(struct stripe_head *sh) 415 { 416 pr_debug("remove_hash(), stripe %llu\n", 417 (unsigned long long)sh->sector); 418 419 hlist_del_init(&sh->hash); 420 } 421 422 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 423 { 424 struct hlist_head *hp = stripe_hash(conf, sh->sector); 425 426 pr_debug("insert_hash(), stripe %llu\n", 427 (unsigned long long)sh->sector); 428 429 hlist_add_head(&sh->hash, hp); 430 } 431 432 /* find an idle stripe, make sure it is unhashed, and return it. */ 433 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) 434 { 435 struct stripe_head *sh = NULL; 436 struct list_head *first; 437 438 if (list_empty(conf->inactive_list + hash)) 439 goto out; 440 first = (conf->inactive_list + hash)->next; 441 sh = list_entry(first, struct stripe_head, lru); 442 list_del_init(first); 443 remove_hash(sh); 444 atomic_inc(&conf->active_stripes); 445 BUG_ON(hash != sh->hash_lock_index); 446 if (list_empty(conf->inactive_list + hash)) 447 atomic_inc(&conf->empty_inactive_list_nr); 448 out: 449 return sh; 450 } 451 452 static void shrink_buffers(struct stripe_head *sh) 453 { 454 struct page *p; 455 int i; 456 int num = sh->raid_conf->pool_size; 457 458 for (i = 0; i < num ; i++) { 459 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); 460 p = sh->dev[i].page; 461 if (!p) 462 continue; 463 sh->dev[i].page = NULL; 464 put_page(p); 465 } 466 } 467 468 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) 469 { 470 int i; 471 int num = sh->raid_conf->pool_size; 472 473 for (i = 0; i < num; i++) { 474 struct page *page; 475 476 if (!(page = alloc_page(gfp))) { 477 return 1; 478 } 479 sh->dev[i].page = page; 480 sh->dev[i].orig_page = page; 481 } 482 return 0; 483 } 484 485 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 486 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 487 struct stripe_head *sh); 488 489 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 490 { 491 struct r5conf *conf = sh->raid_conf; 492 int i, seq; 493 494 BUG_ON(atomic_read(&sh->count) != 0); 495 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 496 BUG_ON(stripe_operations_active(sh)); 497 BUG_ON(sh->batch_head); 498 499 pr_debug("init_stripe called, stripe %llu\n", 500 (unsigned long long)sector); 501 retry: 502 seq = read_seqcount_begin(&conf->gen_lock); 503 sh->generation = conf->generation - previous; 504 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 505 sh->sector = sector; 506 stripe_set_idx(sector, conf, previous, sh); 507 sh->state = 0; 508 509 for (i = sh->disks; i--; ) { 510 struct r5dev *dev = &sh->dev[i]; 511 512 if (dev->toread || dev->read || dev->towrite || dev->written || 513 test_bit(R5_LOCKED, &dev->flags)) { 514 pr_err("sector=%llx i=%d %p %p %p %p %d\n", 515 (unsigned long long)sh->sector, i, dev->toread, 516 dev->read, dev->towrite, dev->written, 517 test_bit(R5_LOCKED, &dev->flags)); 518 WARN_ON(1); 519 } 520 dev->flags = 0; 521 raid5_build_block(sh, i, previous); 522 } 523 if (read_seqcount_retry(&conf->gen_lock, seq)) 524 goto retry; 525 sh->overwrite_disks = 0; 526 insert_hash(conf, sh); 527 sh->cpu = smp_processor_id(); 528 set_bit(STRIPE_BATCH_READY, &sh->state); 529 } 530 531 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 532 short generation) 533 { 534 struct stripe_head *sh; 535 536 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 537 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) 538 if (sh->sector == sector && sh->generation == generation) 539 return sh; 540 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 541 return NULL; 542 } 543 544 /* 545 * Need to check if array has failed when deciding whether to: 546 * - start an array 547 * - remove non-faulty devices 548 * - add a spare 549 * - allow a reshape 550 * This determination is simple when no reshape is happening. 551 * However if there is a reshape, we need to carefully check 552 * both the before and after sections. 553 * This is because some failed devices may only affect one 554 * of the two sections, and some non-in_sync devices may 555 * be insync in the section most affected by failed devices. 556 */ 557 int raid5_calc_degraded(struct r5conf *conf) 558 { 559 int degraded, degraded2; 560 int i; 561 562 rcu_read_lock(); 563 degraded = 0; 564 for (i = 0; i < conf->previous_raid_disks; i++) { 565 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 566 if (rdev && test_bit(Faulty, &rdev->flags)) 567 rdev = rcu_dereference(conf->disks[i].replacement); 568 if (!rdev || test_bit(Faulty, &rdev->flags)) 569 degraded++; 570 else if (test_bit(In_sync, &rdev->flags)) 571 ; 572 else 573 /* not in-sync or faulty. 574 * If the reshape increases the number of devices, 575 * this is being recovered by the reshape, so 576 * this 'previous' section is not in_sync. 577 * If the number of devices is being reduced however, 578 * the device can only be part of the array if 579 * we are reverting a reshape, so this section will 580 * be in-sync. 581 */ 582 if (conf->raid_disks >= conf->previous_raid_disks) 583 degraded++; 584 } 585 rcu_read_unlock(); 586 if (conf->raid_disks == conf->previous_raid_disks) 587 return degraded; 588 rcu_read_lock(); 589 degraded2 = 0; 590 for (i = 0; i < conf->raid_disks; i++) { 591 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 592 if (rdev && test_bit(Faulty, &rdev->flags)) 593 rdev = rcu_dereference(conf->disks[i].replacement); 594 if (!rdev || test_bit(Faulty, &rdev->flags)) 595 degraded2++; 596 else if (test_bit(In_sync, &rdev->flags)) 597 ; 598 else 599 /* not in-sync or faulty. 600 * If reshape increases the number of devices, this 601 * section has already been recovered, else it 602 * almost certainly hasn't. 603 */ 604 if (conf->raid_disks <= conf->previous_raid_disks) 605 degraded2++; 606 } 607 rcu_read_unlock(); 608 if (degraded2 > degraded) 609 return degraded2; 610 return degraded; 611 } 612 613 static int has_failed(struct r5conf *conf) 614 { 615 int degraded; 616 617 if (conf->mddev->reshape_position == MaxSector) 618 return conf->mddev->degraded > conf->max_degraded; 619 620 degraded = raid5_calc_degraded(conf); 621 if (degraded > conf->max_degraded) 622 return 1; 623 return 0; 624 } 625 626 struct stripe_head * 627 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, 628 int previous, int noblock, int noquiesce) 629 { 630 struct stripe_head *sh; 631 int hash = stripe_hash_locks_hash(sector); 632 int inc_empty_inactive_list_flag; 633 634 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 635 636 spin_lock_irq(conf->hash_locks + hash); 637 638 do { 639 wait_event_lock_irq(conf->wait_for_quiescent, 640 conf->quiesce == 0 || noquiesce, 641 *(conf->hash_locks + hash)); 642 sh = __find_stripe(conf, sector, conf->generation - previous); 643 if (!sh) { 644 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { 645 sh = get_free_stripe(conf, hash); 646 if (!sh && !test_bit(R5_DID_ALLOC, 647 &conf->cache_state)) 648 set_bit(R5_ALLOC_MORE, 649 &conf->cache_state); 650 } 651 if (noblock && sh == NULL) 652 break; 653 654 r5c_check_stripe_cache_usage(conf); 655 if (!sh) { 656 set_bit(R5_INACTIVE_BLOCKED, 657 &conf->cache_state); 658 r5l_wake_reclaim(conf->log, 0); 659 wait_event_lock_irq( 660 conf->wait_for_stripe, 661 !list_empty(conf->inactive_list + hash) && 662 (atomic_read(&conf->active_stripes) 663 < (conf->max_nr_stripes * 3 / 4) 664 || !test_bit(R5_INACTIVE_BLOCKED, 665 &conf->cache_state)), 666 *(conf->hash_locks + hash)); 667 clear_bit(R5_INACTIVE_BLOCKED, 668 &conf->cache_state); 669 } else { 670 init_stripe(sh, sector, previous); 671 atomic_inc(&sh->count); 672 } 673 } else if (!atomic_inc_not_zero(&sh->count)) { 674 spin_lock(&conf->device_lock); 675 if (!atomic_read(&sh->count)) { 676 if (!test_bit(STRIPE_HANDLE, &sh->state)) 677 atomic_inc(&conf->active_stripes); 678 BUG_ON(list_empty(&sh->lru) && 679 !test_bit(STRIPE_EXPANDING, &sh->state)); 680 inc_empty_inactive_list_flag = 0; 681 if (!list_empty(conf->inactive_list + hash)) 682 inc_empty_inactive_list_flag = 1; 683 list_del_init(&sh->lru); 684 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) 685 atomic_inc(&conf->empty_inactive_list_nr); 686 if (sh->group) { 687 sh->group->stripes_cnt--; 688 sh->group = NULL; 689 } 690 } 691 atomic_inc(&sh->count); 692 spin_unlock(&conf->device_lock); 693 } 694 } while (sh == NULL); 695 696 spin_unlock_irq(conf->hash_locks + hash); 697 return sh; 698 } 699 700 static bool is_full_stripe_write(struct stripe_head *sh) 701 { 702 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); 703 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); 704 } 705 706 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 707 { 708 local_irq_disable(); 709 if (sh1 > sh2) { 710 spin_lock(&sh2->stripe_lock); 711 spin_lock_nested(&sh1->stripe_lock, 1); 712 } else { 713 spin_lock(&sh1->stripe_lock); 714 spin_lock_nested(&sh2->stripe_lock, 1); 715 } 716 } 717 718 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 719 { 720 spin_unlock(&sh1->stripe_lock); 721 spin_unlock(&sh2->stripe_lock); 722 local_irq_enable(); 723 } 724 725 /* Only freshly new full stripe normal write stripe can be added to a batch list */ 726 static bool stripe_can_batch(struct stripe_head *sh) 727 { 728 struct r5conf *conf = sh->raid_conf; 729 730 if (conf->log) 731 return false; 732 return test_bit(STRIPE_BATCH_READY, &sh->state) && 733 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 734 is_full_stripe_write(sh); 735 } 736 737 /* we only do back search */ 738 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) 739 { 740 struct stripe_head *head; 741 sector_t head_sector, tmp_sec; 742 int hash; 743 int dd_idx; 744 int inc_empty_inactive_list_flag; 745 746 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ 747 tmp_sec = sh->sector; 748 if (!sector_div(tmp_sec, conf->chunk_sectors)) 749 return; 750 head_sector = sh->sector - STRIPE_SECTORS; 751 752 hash = stripe_hash_locks_hash(head_sector); 753 spin_lock_irq(conf->hash_locks + hash); 754 head = __find_stripe(conf, head_sector, conf->generation); 755 if (head && !atomic_inc_not_zero(&head->count)) { 756 spin_lock(&conf->device_lock); 757 if (!atomic_read(&head->count)) { 758 if (!test_bit(STRIPE_HANDLE, &head->state)) 759 atomic_inc(&conf->active_stripes); 760 BUG_ON(list_empty(&head->lru) && 761 !test_bit(STRIPE_EXPANDING, &head->state)); 762 inc_empty_inactive_list_flag = 0; 763 if (!list_empty(conf->inactive_list + hash)) 764 inc_empty_inactive_list_flag = 1; 765 list_del_init(&head->lru); 766 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) 767 atomic_inc(&conf->empty_inactive_list_nr); 768 if (head->group) { 769 head->group->stripes_cnt--; 770 head->group = NULL; 771 } 772 } 773 atomic_inc(&head->count); 774 spin_unlock(&conf->device_lock); 775 } 776 spin_unlock_irq(conf->hash_locks + hash); 777 778 if (!head) 779 return; 780 if (!stripe_can_batch(head)) 781 goto out; 782 783 lock_two_stripes(head, sh); 784 /* clear_batch_ready clear the flag */ 785 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) 786 goto unlock_out; 787 788 if (sh->batch_head) 789 goto unlock_out; 790 791 dd_idx = 0; 792 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) 793 dd_idx++; 794 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || 795 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) 796 goto unlock_out; 797 798 if (head->batch_head) { 799 spin_lock(&head->batch_head->batch_lock); 800 /* This batch list is already running */ 801 if (!stripe_can_batch(head)) { 802 spin_unlock(&head->batch_head->batch_lock); 803 goto unlock_out; 804 } 805 806 /* 807 * at this point, head's BATCH_READY could be cleared, but we 808 * can still add the stripe to batch list 809 */ 810 list_add(&sh->batch_list, &head->batch_list); 811 spin_unlock(&head->batch_head->batch_lock); 812 813 sh->batch_head = head->batch_head; 814 } else { 815 head->batch_head = head; 816 sh->batch_head = head->batch_head; 817 spin_lock(&head->batch_lock); 818 list_add_tail(&sh->batch_list, &head->batch_list); 819 spin_unlock(&head->batch_lock); 820 } 821 822 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 823 if (atomic_dec_return(&conf->preread_active_stripes) 824 < IO_THRESHOLD) 825 md_wakeup_thread(conf->mddev->thread); 826 827 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { 828 int seq = sh->bm_seq; 829 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && 830 sh->batch_head->bm_seq > seq) 831 seq = sh->batch_head->bm_seq; 832 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); 833 sh->batch_head->bm_seq = seq; 834 } 835 836 atomic_inc(&sh->count); 837 unlock_out: 838 unlock_two_stripes(head, sh); 839 out: 840 raid5_release_stripe(head); 841 } 842 843 /* Determine if 'data_offset' or 'new_data_offset' should be used 844 * in this stripe_head. 845 */ 846 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) 847 { 848 sector_t progress = conf->reshape_progress; 849 /* Need a memory barrier to make sure we see the value 850 * of conf->generation, or ->data_offset that was set before 851 * reshape_progress was updated. 852 */ 853 smp_rmb(); 854 if (progress == MaxSector) 855 return 0; 856 if (sh->generation == conf->generation - 1) 857 return 0; 858 /* We are in a reshape, and this is a new-generation stripe, 859 * so use new_data_offset. 860 */ 861 return 1; 862 } 863 864 static void flush_deferred_bios(struct r5conf *conf) 865 { 866 struct bio_list tmp; 867 struct bio *bio; 868 869 if (!conf->batch_bio_dispatch || !conf->group_cnt) 870 return; 871 872 bio_list_init(&tmp); 873 spin_lock(&conf->pending_bios_lock); 874 bio_list_merge(&tmp, &conf->pending_bios); 875 bio_list_init(&conf->pending_bios); 876 spin_unlock(&conf->pending_bios_lock); 877 878 while ((bio = bio_list_pop(&tmp))) 879 generic_make_request(bio); 880 } 881 882 static void defer_bio_issue(struct r5conf *conf, struct bio *bio) 883 { 884 /* 885 * change group_cnt will drain all bios, so this is safe 886 * 887 * A read generally means a read-modify-write, which usually means a 888 * randwrite, so we don't delay it 889 */ 890 if (!conf->batch_bio_dispatch || !conf->group_cnt || 891 bio_op(bio) == REQ_OP_READ) { 892 generic_make_request(bio); 893 return; 894 } 895 spin_lock(&conf->pending_bios_lock); 896 bio_list_add(&conf->pending_bios, bio); 897 spin_unlock(&conf->pending_bios_lock); 898 md_wakeup_thread(conf->mddev->thread); 899 } 900 901 static void 902 raid5_end_read_request(struct bio *bi); 903 static void 904 raid5_end_write_request(struct bio *bi); 905 906 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 907 { 908 struct r5conf *conf = sh->raid_conf; 909 int i, disks = sh->disks; 910 struct stripe_head *head_sh = sh; 911 912 might_sleep(); 913 914 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { 915 /* writing out phase */ 916 if (s->waiting_extra_page) 917 return; 918 if (r5l_write_stripe(conf->log, sh) == 0) 919 return; 920 } else { /* caching phase */ 921 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) { 922 r5c_cache_data(conf->log, sh, s); 923 return; 924 } 925 } 926 927 for (i = disks; i--; ) { 928 int op, op_flags = 0; 929 int replace_only = 0; 930 struct bio *bi, *rbi; 931 struct md_rdev *rdev, *rrdev = NULL; 932 933 sh = head_sh; 934 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 935 op = REQ_OP_WRITE; 936 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 937 op_flags = REQ_FUA; 938 if (test_bit(R5_Discard, &sh->dev[i].flags)) 939 op = REQ_OP_DISCARD; 940 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 941 op = REQ_OP_READ; 942 else if (test_and_clear_bit(R5_WantReplace, 943 &sh->dev[i].flags)) { 944 op = REQ_OP_WRITE; 945 replace_only = 1; 946 } else 947 continue; 948 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) 949 op_flags |= REQ_SYNC; 950 951 again: 952 bi = &sh->dev[i].req; 953 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 954 955 rcu_read_lock(); 956 rrdev = rcu_dereference(conf->disks[i].replacement); 957 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 958 rdev = rcu_dereference(conf->disks[i].rdev); 959 if (!rdev) { 960 rdev = rrdev; 961 rrdev = NULL; 962 } 963 if (op_is_write(op)) { 964 if (replace_only) 965 rdev = NULL; 966 if (rdev == rrdev) 967 /* We raced and saw duplicates */ 968 rrdev = NULL; 969 } else { 970 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev) 971 rdev = rrdev; 972 rrdev = NULL; 973 } 974 975 if (rdev && test_bit(Faulty, &rdev->flags)) 976 rdev = NULL; 977 if (rdev) 978 atomic_inc(&rdev->nr_pending); 979 if (rrdev && test_bit(Faulty, &rrdev->flags)) 980 rrdev = NULL; 981 if (rrdev) 982 atomic_inc(&rrdev->nr_pending); 983 rcu_read_unlock(); 984 985 /* We have already checked bad blocks for reads. Now 986 * need to check for writes. We never accept write errors 987 * on the replacement, so we don't to check rrdev. 988 */ 989 while (op_is_write(op) && rdev && 990 test_bit(WriteErrorSeen, &rdev->flags)) { 991 sector_t first_bad; 992 int bad_sectors; 993 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 994 &first_bad, &bad_sectors); 995 if (!bad) 996 break; 997 998 if (bad < 0) { 999 set_bit(BlockedBadBlocks, &rdev->flags); 1000 if (!conf->mddev->external && 1001 conf->mddev->sb_flags) { 1002 /* It is very unlikely, but we might 1003 * still need to write out the 1004 * bad block log - better give it 1005 * a chance*/ 1006 md_check_recovery(conf->mddev); 1007 } 1008 /* 1009 * Because md_wait_for_blocked_rdev 1010 * will dec nr_pending, we must 1011 * increment it first. 1012 */ 1013 atomic_inc(&rdev->nr_pending); 1014 md_wait_for_blocked_rdev(rdev, conf->mddev); 1015 } else { 1016 /* Acknowledged bad block - skip the write */ 1017 rdev_dec_pending(rdev, conf->mddev); 1018 rdev = NULL; 1019 } 1020 } 1021 1022 if (rdev) { 1023 if (s->syncing || s->expanding || s->expanded 1024 || s->replacing) 1025 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1026 1027 set_bit(STRIPE_IO_STARTED, &sh->state); 1028 1029 bi->bi_bdev = rdev->bdev; 1030 bio_set_op_attrs(bi, op, op_flags); 1031 bi->bi_end_io = op_is_write(op) 1032 ? raid5_end_write_request 1033 : raid5_end_read_request; 1034 bi->bi_private = sh; 1035 1036 pr_debug("%s: for %llu schedule op %d on disc %d\n", 1037 __func__, (unsigned long long)sh->sector, 1038 bi->bi_opf, i); 1039 atomic_inc(&sh->count); 1040 if (sh != head_sh) 1041 atomic_inc(&head_sh->count); 1042 if (use_new_offset(conf, sh)) 1043 bi->bi_iter.bi_sector = (sh->sector 1044 + rdev->new_data_offset); 1045 else 1046 bi->bi_iter.bi_sector = (sh->sector 1047 + rdev->data_offset); 1048 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) 1049 bi->bi_opf |= REQ_NOMERGE; 1050 1051 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1052 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1053 1054 if (!op_is_write(op) && 1055 test_bit(R5_InJournal, &sh->dev[i].flags)) 1056 /* 1057 * issuing read for a page in journal, this 1058 * must be preparing for prexor in rmw; read 1059 * the data into orig_page 1060 */ 1061 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; 1062 else 1063 sh->dev[i].vec.bv_page = sh->dev[i].page; 1064 bi->bi_vcnt = 1; 1065 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1066 bi->bi_io_vec[0].bv_offset = 0; 1067 bi->bi_iter.bi_size = STRIPE_SIZE; 1068 /* 1069 * If this is discard request, set bi_vcnt 0. We don't 1070 * want to confuse SCSI because SCSI will replace payload 1071 */ 1072 if (op == REQ_OP_DISCARD) 1073 bi->bi_vcnt = 0; 1074 if (rrdev) 1075 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 1076 1077 if (conf->mddev->gendisk) 1078 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 1079 bi, disk_devt(conf->mddev->gendisk), 1080 sh->dev[i].sector); 1081 defer_bio_issue(conf, bi); 1082 } 1083 if (rrdev) { 1084 if (s->syncing || s->expanding || s->expanded 1085 || s->replacing) 1086 md_sync_acct(rrdev->bdev, STRIPE_SECTORS); 1087 1088 set_bit(STRIPE_IO_STARTED, &sh->state); 1089 1090 rbi->bi_bdev = rrdev->bdev; 1091 bio_set_op_attrs(rbi, op, op_flags); 1092 BUG_ON(!op_is_write(op)); 1093 rbi->bi_end_io = raid5_end_write_request; 1094 rbi->bi_private = sh; 1095 1096 pr_debug("%s: for %llu schedule op %d on " 1097 "replacement disc %d\n", 1098 __func__, (unsigned long long)sh->sector, 1099 rbi->bi_opf, i); 1100 atomic_inc(&sh->count); 1101 if (sh != head_sh) 1102 atomic_inc(&head_sh->count); 1103 if (use_new_offset(conf, sh)) 1104 rbi->bi_iter.bi_sector = (sh->sector 1105 + rrdev->new_data_offset); 1106 else 1107 rbi->bi_iter.bi_sector = (sh->sector 1108 + rrdev->data_offset); 1109 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1110 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1111 sh->dev[i].rvec.bv_page = sh->dev[i].page; 1112 rbi->bi_vcnt = 1; 1113 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1114 rbi->bi_io_vec[0].bv_offset = 0; 1115 rbi->bi_iter.bi_size = STRIPE_SIZE; 1116 /* 1117 * If this is discard request, set bi_vcnt 0. We don't 1118 * want to confuse SCSI because SCSI will replace payload 1119 */ 1120 if (op == REQ_OP_DISCARD) 1121 rbi->bi_vcnt = 0; 1122 if (conf->mddev->gendisk) 1123 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 1124 rbi, disk_devt(conf->mddev->gendisk), 1125 sh->dev[i].sector); 1126 defer_bio_issue(conf, rbi); 1127 } 1128 if (!rdev && !rrdev) { 1129 if (op_is_write(op)) 1130 set_bit(STRIPE_DEGRADED, &sh->state); 1131 pr_debug("skip op %d on disc %d for sector %llu\n", 1132 bi->bi_opf, i, (unsigned long long)sh->sector); 1133 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1134 set_bit(STRIPE_HANDLE, &sh->state); 1135 } 1136 1137 if (!head_sh->batch_head) 1138 continue; 1139 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1140 batch_list); 1141 if (sh != head_sh) 1142 goto again; 1143 } 1144 } 1145 1146 static struct dma_async_tx_descriptor * 1147 async_copy_data(int frombio, struct bio *bio, struct page **page, 1148 sector_t sector, struct dma_async_tx_descriptor *tx, 1149 struct stripe_head *sh, int no_skipcopy) 1150 { 1151 struct bio_vec bvl; 1152 struct bvec_iter iter; 1153 struct page *bio_page; 1154 int page_offset; 1155 struct async_submit_ctl submit; 1156 enum async_tx_flags flags = 0; 1157 1158 if (bio->bi_iter.bi_sector >= sector) 1159 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 1160 else 1161 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 1162 1163 if (frombio) 1164 flags |= ASYNC_TX_FENCE; 1165 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 1166 1167 bio_for_each_segment(bvl, bio, iter) { 1168 int len = bvl.bv_len; 1169 int clen; 1170 int b_offset = 0; 1171 1172 if (page_offset < 0) { 1173 b_offset = -page_offset; 1174 page_offset += b_offset; 1175 len -= b_offset; 1176 } 1177 1178 if (len > 0 && page_offset + len > STRIPE_SIZE) 1179 clen = STRIPE_SIZE - page_offset; 1180 else 1181 clen = len; 1182 1183 if (clen > 0) { 1184 b_offset += bvl.bv_offset; 1185 bio_page = bvl.bv_page; 1186 if (frombio) { 1187 if (sh->raid_conf->skip_copy && 1188 b_offset == 0 && page_offset == 0 && 1189 clen == STRIPE_SIZE && 1190 !no_skipcopy) 1191 *page = bio_page; 1192 else 1193 tx = async_memcpy(*page, bio_page, page_offset, 1194 b_offset, clen, &submit); 1195 } else 1196 tx = async_memcpy(bio_page, *page, b_offset, 1197 page_offset, clen, &submit); 1198 } 1199 /* chain the operations */ 1200 submit.depend_tx = tx; 1201 1202 if (clen < len) /* hit end of page */ 1203 break; 1204 page_offset += len; 1205 } 1206 1207 return tx; 1208 } 1209 1210 static void ops_complete_biofill(void *stripe_head_ref) 1211 { 1212 struct stripe_head *sh = stripe_head_ref; 1213 struct bio_list return_bi = BIO_EMPTY_LIST; 1214 int i; 1215 1216 pr_debug("%s: stripe %llu\n", __func__, 1217 (unsigned long long)sh->sector); 1218 1219 /* clear completed biofills */ 1220 for (i = sh->disks; i--; ) { 1221 struct r5dev *dev = &sh->dev[i]; 1222 1223 /* acknowledge completion of a biofill operation */ 1224 /* and check if we need to reply to a read request, 1225 * new R5_Wantfill requests are held off until 1226 * !STRIPE_BIOFILL_RUN 1227 */ 1228 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 1229 struct bio *rbi, *rbi2; 1230 1231 BUG_ON(!dev->read); 1232 rbi = dev->read; 1233 dev->read = NULL; 1234 while (rbi && rbi->bi_iter.bi_sector < 1235 dev->sector + STRIPE_SECTORS) { 1236 rbi2 = r5_next_bio(rbi, dev->sector); 1237 if (!raid5_dec_bi_active_stripes(rbi)) 1238 bio_list_add(&return_bi, rbi); 1239 rbi = rbi2; 1240 } 1241 } 1242 } 1243 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 1244 1245 return_io(&return_bi); 1246 1247 set_bit(STRIPE_HANDLE, &sh->state); 1248 raid5_release_stripe(sh); 1249 } 1250 1251 static void ops_run_biofill(struct stripe_head *sh) 1252 { 1253 struct dma_async_tx_descriptor *tx = NULL; 1254 struct async_submit_ctl submit; 1255 int i; 1256 1257 BUG_ON(sh->batch_head); 1258 pr_debug("%s: stripe %llu\n", __func__, 1259 (unsigned long long)sh->sector); 1260 1261 for (i = sh->disks; i--; ) { 1262 struct r5dev *dev = &sh->dev[i]; 1263 if (test_bit(R5_Wantfill, &dev->flags)) { 1264 struct bio *rbi; 1265 spin_lock_irq(&sh->stripe_lock); 1266 dev->read = rbi = dev->toread; 1267 dev->toread = NULL; 1268 spin_unlock_irq(&sh->stripe_lock); 1269 while (rbi && rbi->bi_iter.bi_sector < 1270 dev->sector + STRIPE_SECTORS) { 1271 tx = async_copy_data(0, rbi, &dev->page, 1272 dev->sector, tx, sh, 0); 1273 rbi = r5_next_bio(rbi, dev->sector); 1274 } 1275 } 1276 } 1277 1278 atomic_inc(&sh->count); 1279 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 1280 async_trigger_callback(&submit); 1281 } 1282 1283 static void mark_target_uptodate(struct stripe_head *sh, int target) 1284 { 1285 struct r5dev *tgt; 1286 1287 if (target < 0) 1288 return; 1289 1290 tgt = &sh->dev[target]; 1291 set_bit(R5_UPTODATE, &tgt->flags); 1292 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1293 clear_bit(R5_Wantcompute, &tgt->flags); 1294 } 1295 1296 static void ops_complete_compute(void *stripe_head_ref) 1297 { 1298 struct stripe_head *sh = stripe_head_ref; 1299 1300 pr_debug("%s: stripe %llu\n", __func__, 1301 (unsigned long long)sh->sector); 1302 1303 /* mark the computed target(s) as uptodate */ 1304 mark_target_uptodate(sh, sh->ops.target); 1305 mark_target_uptodate(sh, sh->ops.target2); 1306 1307 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 1308 if (sh->check_state == check_state_compute_run) 1309 sh->check_state = check_state_compute_result; 1310 set_bit(STRIPE_HANDLE, &sh->state); 1311 raid5_release_stripe(sh); 1312 } 1313 1314 /* return a pointer to the address conversion region of the scribble buffer */ 1315 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 1316 struct raid5_percpu *percpu, int i) 1317 { 1318 void *addr; 1319 1320 addr = flex_array_get(percpu->scribble, i); 1321 return addr + sizeof(struct page *) * (sh->disks + 2); 1322 } 1323 1324 /* return a pointer to the address conversion region of the scribble buffer */ 1325 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) 1326 { 1327 void *addr; 1328 1329 addr = flex_array_get(percpu->scribble, i); 1330 return addr; 1331 } 1332 1333 static struct dma_async_tx_descriptor * 1334 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 1335 { 1336 int disks = sh->disks; 1337 struct page **xor_srcs = to_addr_page(percpu, 0); 1338 int target = sh->ops.target; 1339 struct r5dev *tgt = &sh->dev[target]; 1340 struct page *xor_dest = tgt->page; 1341 int count = 0; 1342 struct dma_async_tx_descriptor *tx; 1343 struct async_submit_ctl submit; 1344 int i; 1345 1346 BUG_ON(sh->batch_head); 1347 1348 pr_debug("%s: stripe %llu block: %d\n", 1349 __func__, (unsigned long long)sh->sector, target); 1350 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1351 1352 for (i = disks; i--; ) 1353 if (i != target) 1354 xor_srcs[count++] = sh->dev[i].page; 1355 1356 atomic_inc(&sh->count); 1357 1358 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 1359 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); 1360 if (unlikely(count == 1)) 1361 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1362 else 1363 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1364 1365 return tx; 1366 } 1367 1368 /* set_syndrome_sources - populate source buffers for gen_syndrome 1369 * @srcs - (struct page *) array of size sh->disks 1370 * @sh - stripe_head to parse 1371 * 1372 * Populates srcs in proper layout order for the stripe and returns the 1373 * 'count' of sources to be used in a call to async_gen_syndrome. The P 1374 * destination buffer is recorded in srcs[count] and the Q destination 1375 * is recorded in srcs[count+1]]. 1376 */ 1377 static int set_syndrome_sources(struct page **srcs, 1378 struct stripe_head *sh, 1379 int srctype) 1380 { 1381 int disks = sh->disks; 1382 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1383 int d0_idx = raid6_d0(sh); 1384 int count; 1385 int i; 1386 1387 for (i = 0; i < disks; i++) 1388 srcs[i] = NULL; 1389 1390 count = 0; 1391 i = d0_idx; 1392 do { 1393 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1394 struct r5dev *dev = &sh->dev[i]; 1395 1396 if (i == sh->qd_idx || i == sh->pd_idx || 1397 (srctype == SYNDROME_SRC_ALL) || 1398 (srctype == SYNDROME_SRC_WANT_DRAIN && 1399 (test_bit(R5_Wantdrain, &dev->flags) || 1400 test_bit(R5_InJournal, &dev->flags))) || 1401 (srctype == SYNDROME_SRC_WRITTEN && 1402 dev->written)) { 1403 if (test_bit(R5_InJournal, &dev->flags)) 1404 srcs[slot] = sh->dev[i].orig_page; 1405 else 1406 srcs[slot] = sh->dev[i].page; 1407 } 1408 i = raid6_next_disk(i, disks); 1409 } while (i != d0_idx); 1410 1411 return syndrome_disks; 1412 } 1413 1414 static struct dma_async_tx_descriptor * 1415 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 1416 { 1417 int disks = sh->disks; 1418 struct page **blocks = to_addr_page(percpu, 0); 1419 int target; 1420 int qd_idx = sh->qd_idx; 1421 struct dma_async_tx_descriptor *tx; 1422 struct async_submit_ctl submit; 1423 struct r5dev *tgt; 1424 struct page *dest; 1425 int i; 1426 int count; 1427 1428 BUG_ON(sh->batch_head); 1429 if (sh->ops.target < 0) 1430 target = sh->ops.target2; 1431 else if (sh->ops.target2 < 0) 1432 target = sh->ops.target; 1433 else 1434 /* we should only have one valid target */ 1435 BUG(); 1436 BUG_ON(target < 0); 1437 pr_debug("%s: stripe %llu block: %d\n", 1438 __func__, (unsigned long long)sh->sector, target); 1439 1440 tgt = &sh->dev[target]; 1441 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1442 dest = tgt->page; 1443 1444 atomic_inc(&sh->count); 1445 1446 if (target == qd_idx) { 1447 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1448 blocks[count] = NULL; /* regenerating p is not necessary */ 1449 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 1450 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1451 ops_complete_compute, sh, 1452 to_addr_conv(sh, percpu, 0)); 1453 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1454 } else { 1455 /* Compute any data- or p-drive using XOR */ 1456 count = 0; 1457 for (i = disks; i-- ; ) { 1458 if (i == target || i == qd_idx) 1459 continue; 1460 blocks[count++] = sh->dev[i].page; 1461 } 1462 1463 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1464 NULL, ops_complete_compute, sh, 1465 to_addr_conv(sh, percpu, 0)); 1466 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 1467 } 1468 1469 return tx; 1470 } 1471 1472 static struct dma_async_tx_descriptor * 1473 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 1474 { 1475 int i, count, disks = sh->disks; 1476 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1477 int d0_idx = raid6_d0(sh); 1478 int faila = -1, failb = -1; 1479 int target = sh->ops.target; 1480 int target2 = sh->ops.target2; 1481 struct r5dev *tgt = &sh->dev[target]; 1482 struct r5dev *tgt2 = &sh->dev[target2]; 1483 struct dma_async_tx_descriptor *tx; 1484 struct page **blocks = to_addr_page(percpu, 0); 1485 struct async_submit_ctl submit; 1486 1487 BUG_ON(sh->batch_head); 1488 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 1489 __func__, (unsigned long long)sh->sector, target, target2); 1490 BUG_ON(target < 0 || target2 < 0); 1491 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1492 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 1493 1494 /* we need to open-code set_syndrome_sources to handle the 1495 * slot number conversion for 'faila' and 'failb' 1496 */ 1497 for (i = 0; i < disks ; i++) 1498 blocks[i] = NULL; 1499 count = 0; 1500 i = d0_idx; 1501 do { 1502 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1503 1504 blocks[slot] = sh->dev[i].page; 1505 1506 if (i == target) 1507 faila = slot; 1508 if (i == target2) 1509 failb = slot; 1510 i = raid6_next_disk(i, disks); 1511 } while (i != d0_idx); 1512 1513 BUG_ON(faila == failb); 1514 if (failb < faila) 1515 swap(faila, failb); 1516 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 1517 __func__, (unsigned long long)sh->sector, faila, failb); 1518 1519 atomic_inc(&sh->count); 1520 1521 if (failb == syndrome_disks+1) { 1522 /* Q disk is one of the missing disks */ 1523 if (faila == syndrome_disks) { 1524 /* Missing P+Q, just recompute */ 1525 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1526 ops_complete_compute, sh, 1527 to_addr_conv(sh, percpu, 0)); 1528 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 1529 STRIPE_SIZE, &submit); 1530 } else { 1531 struct page *dest; 1532 int data_target; 1533 int qd_idx = sh->qd_idx; 1534 1535 /* Missing D+Q: recompute D from P, then recompute Q */ 1536 if (target == qd_idx) 1537 data_target = target2; 1538 else 1539 data_target = target; 1540 1541 count = 0; 1542 for (i = disks; i-- ; ) { 1543 if (i == data_target || i == qd_idx) 1544 continue; 1545 blocks[count++] = sh->dev[i].page; 1546 } 1547 dest = sh->dev[data_target].page; 1548 init_async_submit(&submit, 1549 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1550 NULL, NULL, NULL, 1551 to_addr_conv(sh, percpu, 0)); 1552 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 1553 &submit); 1554 1555 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1556 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 1557 ops_complete_compute, sh, 1558 to_addr_conv(sh, percpu, 0)); 1559 return async_gen_syndrome(blocks, 0, count+2, 1560 STRIPE_SIZE, &submit); 1561 } 1562 } else { 1563 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1564 ops_complete_compute, sh, 1565 to_addr_conv(sh, percpu, 0)); 1566 if (failb == syndrome_disks) { 1567 /* We're missing D+P. */ 1568 return async_raid6_datap_recov(syndrome_disks+2, 1569 STRIPE_SIZE, faila, 1570 blocks, &submit); 1571 } else { 1572 /* We're missing D+D. */ 1573 return async_raid6_2data_recov(syndrome_disks+2, 1574 STRIPE_SIZE, faila, failb, 1575 blocks, &submit); 1576 } 1577 } 1578 } 1579 1580 static void ops_complete_prexor(void *stripe_head_ref) 1581 { 1582 struct stripe_head *sh = stripe_head_ref; 1583 1584 pr_debug("%s: stripe %llu\n", __func__, 1585 (unsigned long long)sh->sector); 1586 1587 if (r5c_is_writeback(sh->raid_conf->log)) 1588 /* 1589 * raid5-cache write back uses orig_page during prexor. 1590 * After prexor, it is time to free orig_page 1591 */ 1592 r5c_release_extra_page(sh); 1593 } 1594 1595 static struct dma_async_tx_descriptor * 1596 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, 1597 struct dma_async_tx_descriptor *tx) 1598 { 1599 int disks = sh->disks; 1600 struct page **xor_srcs = to_addr_page(percpu, 0); 1601 int count = 0, pd_idx = sh->pd_idx, i; 1602 struct async_submit_ctl submit; 1603 1604 /* existing parity data subtracted */ 1605 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1606 1607 BUG_ON(sh->batch_head); 1608 pr_debug("%s: stripe %llu\n", __func__, 1609 (unsigned long long)sh->sector); 1610 1611 for (i = disks; i--; ) { 1612 struct r5dev *dev = &sh->dev[i]; 1613 /* Only process blocks that are known to be uptodate */ 1614 if (test_bit(R5_InJournal, &dev->flags)) 1615 xor_srcs[count++] = dev->orig_page; 1616 else if (test_bit(R5_Wantdrain, &dev->flags)) 1617 xor_srcs[count++] = dev->page; 1618 } 1619 1620 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1621 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1622 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1623 1624 return tx; 1625 } 1626 1627 static struct dma_async_tx_descriptor * 1628 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, 1629 struct dma_async_tx_descriptor *tx) 1630 { 1631 struct page **blocks = to_addr_page(percpu, 0); 1632 int count; 1633 struct async_submit_ctl submit; 1634 1635 pr_debug("%s: stripe %llu\n", __func__, 1636 (unsigned long long)sh->sector); 1637 1638 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); 1639 1640 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx, 1641 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1642 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1643 1644 return tx; 1645 } 1646 1647 static struct dma_async_tx_descriptor * 1648 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1649 { 1650 struct r5conf *conf = sh->raid_conf; 1651 int disks = sh->disks; 1652 int i; 1653 struct stripe_head *head_sh = sh; 1654 1655 pr_debug("%s: stripe %llu\n", __func__, 1656 (unsigned long long)sh->sector); 1657 1658 for (i = disks; i--; ) { 1659 struct r5dev *dev; 1660 struct bio *chosen; 1661 1662 sh = head_sh; 1663 if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) { 1664 struct bio *wbi; 1665 1666 again: 1667 dev = &sh->dev[i]; 1668 /* 1669 * clear R5_InJournal, so when rewriting a page in 1670 * journal, it is not skipped by r5l_log_stripe() 1671 */ 1672 clear_bit(R5_InJournal, &dev->flags); 1673 spin_lock_irq(&sh->stripe_lock); 1674 chosen = dev->towrite; 1675 dev->towrite = NULL; 1676 sh->overwrite_disks = 0; 1677 BUG_ON(dev->written); 1678 wbi = dev->written = chosen; 1679 spin_unlock_irq(&sh->stripe_lock); 1680 WARN_ON(dev->page != dev->orig_page); 1681 1682 while (wbi && wbi->bi_iter.bi_sector < 1683 dev->sector + STRIPE_SECTORS) { 1684 if (wbi->bi_opf & REQ_FUA) 1685 set_bit(R5_WantFUA, &dev->flags); 1686 if (wbi->bi_opf & REQ_SYNC) 1687 set_bit(R5_SyncIO, &dev->flags); 1688 if (bio_op(wbi) == REQ_OP_DISCARD) 1689 set_bit(R5_Discard, &dev->flags); 1690 else { 1691 tx = async_copy_data(1, wbi, &dev->page, 1692 dev->sector, tx, sh, 1693 r5c_is_writeback(conf->log)); 1694 if (dev->page != dev->orig_page && 1695 !r5c_is_writeback(conf->log)) { 1696 set_bit(R5_SkipCopy, &dev->flags); 1697 clear_bit(R5_UPTODATE, &dev->flags); 1698 clear_bit(R5_OVERWRITE, &dev->flags); 1699 } 1700 } 1701 wbi = r5_next_bio(wbi, dev->sector); 1702 } 1703 1704 if (head_sh->batch_head) { 1705 sh = list_first_entry(&sh->batch_list, 1706 struct stripe_head, 1707 batch_list); 1708 if (sh == head_sh) 1709 continue; 1710 goto again; 1711 } 1712 } 1713 } 1714 1715 return tx; 1716 } 1717 1718 static void ops_complete_reconstruct(void *stripe_head_ref) 1719 { 1720 struct stripe_head *sh = stripe_head_ref; 1721 int disks = sh->disks; 1722 int pd_idx = sh->pd_idx; 1723 int qd_idx = sh->qd_idx; 1724 int i; 1725 bool fua = false, sync = false, discard = false; 1726 1727 pr_debug("%s: stripe %llu\n", __func__, 1728 (unsigned long long)sh->sector); 1729 1730 for (i = disks; i--; ) { 1731 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1732 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1733 discard |= test_bit(R5_Discard, &sh->dev[i].flags); 1734 } 1735 1736 for (i = disks; i--; ) { 1737 struct r5dev *dev = &sh->dev[i]; 1738 1739 if (dev->written || i == pd_idx || i == qd_idx) { 1740 if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) 1741 set_bit(R5_UPTODATE, &dev->flags); 1742 if (fua) 1743 set_bit(R5_WantFUA, &dev->flags); 1744 if (sync) 1745 set_bit(R5_SyncIO, &dev->flags); 1746 } 1747 } 1748 1749 if (sh->reconstruct_state == reconstruct_state_drain_run) 1750 sh->reconstruct_state = reconstruct_state_drain_result; 1751 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1752 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1753 else { 1754 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1755 sh->reconstruct_state = reconstruct_state_result; 1756 } 1757 1758 set_bit(STRIPE_HANDLE, &sh->state); 1759 raid5_release_stripe(sh); 1760 } 1761 1762 static void 1763 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1764 struct dma_async_tx_descriptor *tx) 1765 { 1766 int disks = sh->disks; 1767 struct page **xor_srcs; 1768 struct async_submit_ctl submit; 1769 int count, pd_idx = sh->pd_idx, i; 1770 struct page *xor_dest; 1771 int prexor = 0; 1772 unsigned long flags; 1773 int j = 0; 1774 struct stripe_head *head_sh = sh; 1775 int last_stripe; 1776 1777 pr_debug("%s: stripe %llu\n", __func__, 1778 (unsigned long long)sh->sector); 1779 1780 for (i = 0; i < sh->disks; i++) { 1781 if (pd_idx == i) 1782 continue; 1783 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1784 break; 1785 } 1786 if (i >= sh->disks) { 1787 atomic_inc(&sh->count); 1788 set_bit(R5_Discard, &sh->dev[pd_idx].flags); 1789 ops_complete_reconstruct(sh); 1790 return; 1791 } 1792 again: 1793 count = 0; 1794 xor_srcs = to_addr_page(percpu, j); 1795 /* check if prexor is active which means only process blocks 1796 * that are part of a read-modify-write (written) 1797 */ 1798 if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1799 prexor = 1; 1800 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1801 for (i = disks; i--; ) { 1802 struct r5dev *dev = &sh->dev[i]; 1803 if (head_sh->dev[i].written || 1804 test_bit(R5_InJournal, &head_sh->dev[i].flags)) 1805 xor_srcs[count++] = dev->page; 1806 } 1807 } else { 1808 xor_dest = sh->dev[pd_idx].page; 1809 for (i = disks; i--; ) { 1810 struct r5dev *dev = &sh->dev[i]; 1811 if (i != pd_idx) 1812 xor_srcs[count++] = dev->page; 1813 } 1814 } 1815 1816 /* 1/ if we prexor'd then the dest is reused as a source 1817 * 2/ if we did not prexor then we are redoing the parity 1818 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1819 * for the synchronous xor case 1820 */ 1821 last_stripe = !head_sh->batch_head || 1822 list_first_entry(&sh->batch_list, 1823 struct stripe_head, batch_list) == head_sh; 1824 if (last_stripe) { 1825 flags = ASYNC_TX_ACK | 1826 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1827 1828 atomic_inc(&head_sh->count); 1829 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh, 1830 to_addr_conv(sh, percpu, j)); 1831 } else { 1832 flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST; 1833 init_async_submit(&submit, flags, tx, NULL, NULL, 1834 to_addr_conv(sh, percpu, j)); 1835 } 1836 1837 if (unlikely(count == 1)) 1838 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1839 else 1840 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1841 if (!last_stripe) { 1842 j++; 1843 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1844 batch_list); 1845 goto again; 1846 } 1847 } 1848 1849 static void 1850 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1851 struct dma_async_tx_descriptor *tx) 1852 { 1853 struct async_submit_ctl submit; 1854 struct page **blocks; 1855 int count, i, j = 0; 1856 struct stripe_head *head_sh = sh; 1857 int last_stripe; 1858 int synflags; 1859 unsigned long txflags; 1860 1861 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1862 1863 for (i = 0; i < sh->disks; i++) { 1864 if (sh->pd_idx == i || sh->qd_idx == i) 1865 continue; 1866 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1867 break; 1868 } 1869 if (i >= sh->disks) { 1870 atomic_inc(&sh->count); 1871 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 1872 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 1873 ops_complete_reconstruct(sh); 1874 return; 1875 } 1876 1877 again: 1878 blocks = to_addr_page(percpu, j); 1879 1880 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1881 synflags = SYNDROME_SRC_WRITTEN; 1882 txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST; 1883 } else { 1884 synflags = SYNDROME_SRC_ALL; 1885 txflags = ASYNC_TX_ACK; 1886 } 1887 1888 count = set_syndrome_sources(blocks, sh, synflags); 1889 last_stripe = !head_sh->batch_head || 1890 list_first_entry(&sh->batch_list, 1891 struct stripe_head, batch_list) == head_sh; 1892 1893 if (last_stripe) { 1894 atomic_inc(&head_sh->count); 1895 init_async_submit(&submit, txflags, tx, ops_complete_reconstruct, 1896 head_sh, to_addr_conv(sh, percpu, j)); 1897 } else 1898 init_async_submit(&submit, 0, tx, NULL, NULL, 1899 to_addr_conv(sh, percpu, j)); 1900 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1901 if (!last_stripe) { 1902 j++; 1903 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1904 batch_list); 1905 goto again; 1906 } 1907 } 1908 1909 static void ops_complete_check(void *stripe_head_ref) 1910 { 1911 struct stripe_head *sh = stripe_head_ref; 1912 1913 pr_debug("%s: stripe %llu\n", __func__, 1914 (unsigned long long)sh->sector); 1915 1916 sh->check_state = check_state_check_result; 1917 set_bit(STRIPE_HANDLE, &sh->state); 1918 raid5_release_stripe(sh); 1919 } 1920 1921 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1922 { 1923 int disks = sh->disks; 1924 int pd_idx = sh->pd_idx; 1925 int qd_idx = sh->qd_idx; 1926 struct page *xor_dest; 1927 struct page **xor_srcs = to_addr_page(percpu, 0); 1928 struct dma_async_tx_descriptor *tx; 1929 struct async_submit_ctl submit; 1930 int count; 1931 int i; 1932 1933 pr_debug("%s: stripe %llu\n", __func__, 1934 (unsigned long long)sh->sector); 1935 1936 BUG_ON(sh->batch_head); 1937 count = 0; 1938 xor_dest = sh->dev[pd_idx].page; 1939 xor_srcs[count++] = xor_dest; 1940 for (i = disks; i--; ) { 1941 if (i == pd_idx || i == qd_idx) 1942 continue; 1943 xor_srcs[count++] = sh->dev[i].page; 1944 } 1945 1946 init_async_submit(&submit, 0, NULL, NULL, NULL, 1947 to_addr_conv(sh, percpu, 0)); 1948 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 1949 &sh->ops.zero_sum_result, &submit); 1950 1951 atomic_inc(&sh->count); 1952 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 1953 tx = async_trigger_callback(&submit); 1954 } 1955 1956 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 1957 { 1958 struct page **srcs = to_addr_page(percpu, 0); 1959 struct async_submit_ctl submit; 1960 int count; 1961 1962 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 1963 (unsigned long long)sh->sector, checkp); 1964 1965 BUG_ON(sh->batch_head); 1966 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); 1967 if (!checkp) 1968 srcs[count] = NULL; 1969 1970 atomic_inc(&sh->count); 1971 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 1972 sh, to_addr_conv(sh, percpu, 0)); 1973 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 1974 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1975 } 1976 1977 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1978 { 1979 int overlap_clear = 0, i, disks = sh->disks; 1980 struct dma_async_tx_descriptor *tx = NULL; 1981 struct r5conf *conf = sh->raid_conf; 1982 int level = conf->level; 1983 struct raid5_percpu *percpu; 1984 unsigned long cpu; 1985 1986 cpu = get_cpu(); 1987 percpu = per_cpu_ptr(conf->percpu, cpu); 1988 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 1989 ops_run_biofill(sh); 1990 overlap_clear++; 1991 } 1992 1993 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1994 if (level < 6) 1995 tx = ops_run_compute5(sh, percpu); 1996 else { 1997 if (sh->ops.target2 < 0 || sh->ops.target < 0) 1998 tx = ops_run_compute6_1(sh, percpu); 1999 else 2000 tx = ops_run_compute6_2(sh, percpu); 2001 } 2002 /* terminate the chain if reconstruct is not set to be run */ 2003 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 2004 async_tx_ack(tx); 2005 } 2006 2007 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) { 2008 if (level < 6) 2009 tx = ops_run_prexor5(sh, percpu, tx); 2010 else 2011 tx = ops_run_prexor6(sh, percpu, tx); 2012 } 2013 2014 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 2015 tx = ops_run_biodrain(sh, tx); 2016 overlap_clear++; 2017 } 2018 2019 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 2020 if (level < 6) 2021 ops_run_reconstruct5(sh, percpu, tx); 2022 else 2023 ops_run_reconstruct6(sh, percpu, tx); 2024 } 2025 2026 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 2027 if (sh->check_state == check_state_run) 2028 ops_run_check_p(sh, percpu); 2029 else if (sh->check_state == check_state_run_q) 2030 ops_run_check_pq(sh, percpu, 0); 2031 else if (sh->check_state == check_state_run_pq) 2032 ops_run_check_pq(sh, percpu, 1); 2033 else 2034 BUG(); 2035 } 2036 2037 if (overlap_clear && !sh->batch_head) 2038 for (i = disks; i--; ) { 2039 struct r5dev *dev = &sh->dev[i]; 2040 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2041 wake_up(&sh->raid_conf->wait_for_overlap); 2042 } 2043 put_cpu(); 2044 } 2045 2046 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, 2047 int disks) 2048 { 2049 struct stripe_head *sh; 2050 int i; 2051 2052 sh = kmem_cache_zalloc(sc, gfp); 2053 if (sh) { 2054 spin_lock_init(&sh->stripe_lock); 2055 spin_lock_init(&sh->batch_lock); 2056 INIT_LIST_HEAD(&sh->batch_list); 2057 INIT_LIST_HEAD(&sh->lru); 2058 INIT_LIST_HEAD(&sh->r5c); 2059 INIT_LIST_HEAD(&sh->log_list); 2060 atomic_set(&sh->count, 1); 2061 sh->log_start = MaxSector; 2062 for (i = 0; i < disks; i++) { 2063 struct r5dev *dev = &sh->dev[i]; 2064 2065 bio_init(&dev->req, &dev->vec, 1); 2066 bio_init(&dev->rreq, &dev->rvec, 1); 2067 } 2068 } 2069 return sh; 2070 } 2071 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) 2072 { 2073 struct stripe_head *sh; 2074 2075 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size); 2076 if (!sh) 2077 return 0; 2078 2079 sh->raid_conf = conf; 2080 2081 if (grow_buffers(sh, gfp)) { 2082 shrink_buffers(sh); 2083 kmem_cache_free(conf->slab_cache, sh); 2084 return 0; 2085 } 2086 sh->hash_lock_index = 2087 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 2088 /* we just created an active stripe so... */ 2089 atomic_inc(&conf->active_stripes); 2090 2091 raid5_release_stripe(sh); 2092 conf->max_nr_stripes++; 2093 return 1; 2094 } 2095 2096 static int grow_stripes(struct r5conf *conf, int num) 2097 { 2098 struct kmem_cache *sc; 2099 int devs = max(conf->raid_disks, conf->previous_raid_disks); 2100 2101 if (conf->mddev->gendisk) 2102 sprintf(conf->cache_name[0], 2103 "raid%d-%s", conf->level, mdname(conf->mddev)); 2104 else 2105 sprintf(conf->cache_name[0], 2106 "raid%d-%p", conf->level, conf->mddev); 2107 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 2108 2109 conf->active_name = 0; 2110 sc = kmem_cache_create(conf->cache_name[conf->active_name], 2111 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 2112 0, 0, NULL); 2113 if (!sc) 2114 return 1; 2115 conf->slab_cache = sc; 2116 conf->pool_size = devs; 2117 while (num--) 2118 if (!grow_one_stripe(conf, GFP_KERNEL)) 2119 return 1; 2120 2121 return 0; 2122 } 2123 2124 /** 2125 * scribble_len - return the required size of the scribble region 2126 * @num - total number of disks in the array 2127 * 2128 * The size must be enough to contain: 2129 * 1/ a struct page pointer for each device in the array +2 2130 * 2/ room to convert each entry in (1) to its corresponding dma 2131 * (dma_map_page()) or page (page_address()) address. 2132 * 2133 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 2134 * calculate over all devices (not just the data blocks), using zeros in place 2135 * of the P and Q blocks. 2136 */ 2137 static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) 2138 { 2139 struct flex_array *ret; 2140 size_t len; 2141 2142 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 2143 ret = flex_array_alloc(len, cnt, flags); 2144 if (!ret) 2145 return NULL; 2146 /* always prealloc all elements, so no locking is required */ 2147 if (flex_array_prealloc(ret, 0, cnt, flags)) { 2148 flex_array_free(ret); 2149 return NULL; 2150 } 2151 return ret; 2152 } 2153 2154 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) 2155 { 2156 unsigned long cpu; 2157 int err = 0; 2158 2159 /* 2160 * Never shrink. And mddev_suspend() could deadlock if this is called 2161 * from raid5d. In that case, scribble_disks and scribble_sectors 2162 * should equal to new_disks and new_sectors 2163 */ 2164 if (conf->scribble_disks >= new_disks && 2165 conf->scribble_sectors >= new_sectors) 2166 return 0; 2167 mddev_suspend(conf->mddev); 2168 get_online_cpus(); 2169 for_each_present_cpu(cpu) { 2170 struct raid5_percpu *percpu; 2171 struct flex_array *scribble; 2172 2173 percpu = per_cpu_ptr(conf->percpu, cpu); 2174 scribble = scribble_alloc(new_disks, 2175 new_sectors / STRIPE_SECTORS, 2176 GFP_NOIO); 2177 2178 if (scribble) { 2179 flex_array_free(percpu->scribble); 2180 percpu->scribble = scribble; 2181 } else { 2182 err = -ENOMEM; 2183 break; 2184 } 2185 } 2186 put_online_cpus(); 2187 mddev_resume(conf->mddev); 2188 if (!err) { 2189 conf->scribble_disks = new_disks; 2190 conf->scribble_sectors = new_sectors; 2191 } 2192 return err; 2193 } 2194 2195 static int resize_stripes(struct r5conf *conf, int newsize) 2196 { 2197 /* Make all the stripes able to hold 'newsize' devices. 2198 * New slots in each stripe get 'page' set to a new page. 2199 * 2200 * This happens in stages: 2201 * 1/ create a new kmem_cache and allocate the required number of 2202 * stripe_heads. 2203 * 2/ gather all the old stripe_heads and transfer the pages across 2204 * to the new stripe_heads. This will have the side effect of 2205 * freezing the array as once all stripe_heads have been collected, 2206 * no IO will be possible. Old stripe heads are freed once their 2207 * pages have been transferred over, and the old kmem_cache is 2208 * freed when all stripes are done. 2209 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 2210 * we simple return a failre status - no need to clean anything up. 2211 * 4/ allocate new pages for the new slots in the new stripe_heads. 2212 * If this fails, we don't bother trying the shrink the 2213 * stripe_heads down again, we just leave them as they are. 2214 * As each stripe_head is processed the new one is released into 2215 * active service. 2216 * 2217 * Once step2 is started, we cannot afford to wait for a write, 2218 * so we use GFP_NOIO allocations. 2219 */ 2220 struct stripe_head *osh, *nsh; 2221 LIST_HEAD(newstripes); 2222 struct disk_info *ndisks; 2223 int err; 2224 struct kmem_cache *sc; 2225 int i; 2226 int hash, cnt; 2227 2228 if (newsize <= conf->pool_size) 2229 return 0; /* never bother to shrink */ 2230 2231 err = md_allow_write(conf->mddev); 2232 if (err) 2233 return err; 2234 2235 /* Step 1 */ 2236 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2237 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 2238 0, 0, NULL); 2239 if (!sc) 2240 return -ENOMEM; 2241 2242 /* Need to ensure auto-resizing doesn't interfere */ 2243 mutex_lock(&conf->cache_size_mutex); 2244 2245 for (i = conf->max_nr_stripes; i; i--) { 2246 nsh = alloc_stripe(sc, GFP_KERNEL, newsize); 2247 if (!nsh) 2248 break; 2249 2250 nsh->raid_conf = conf; 2251 list_add(&nsh->lru, &newstripes); 2252 } 2253 if (i) { 2254 /* didn't get enough, give up */ 2255 while (!list_empty(&newstripes)) { 2256 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2257 list_del(&nsh->lru); 2258 kmem_cache_free(sc, nsh); 2259 } 2260 kmem_cache_destroy(sc); 2261 mutex_unlock(&conf->cache_size_mutex); 2262 return -ENOMEM; 2263 } 2264 /* Step 2 - Must use GFP_NOIO now. 2265 * OK, we have enough stripes, start collecting inactive 2266 * stripes and copying them over 2267 */ 2268 hash = 0; 2269 cnt = 0; 2270 list_for_each_entry(nsh, &newstripes, lru) { 2271 lock_device_hash_lock(conf, hash); 2272 wait_event_cmd(conf->wait_for_stripe, 2273 !list_empty(conf->inactive_list + hash), 2274 unlock_device_hash_lock(conf, hash), 2275 lock_device_hash_lock(conf, hash)); 2276 osh = get_free_stripe(conf, hash); 2277 unlock_device_hash_lock(conf, hash); 2278 2279 for(i=0; i<conf->pool_size; i++) { 2280 nsh->dev[i].page = osh->dev[i].page; 2281 nsh->dev[i].orig_page = osh->dev[i].page; 2282 } 2283 nsh->hash_lock_index = hash; 2284 kmem_cache_free(conf->slab_cache, osh); 2285 cnt++; 2286 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 2287 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 2288 hash++; 2289 cnt = 0; 2290 } 2291 } 2292 kmem_cache_destroy(conf->slab_cache); 2293 2294 /* Step 3. 2295 * At this point, we are holding all the stripes so the array 2296 * is completely stalled, so now is a good time to resize 2297 * conf->disks and the scribble region 2298 */ 2299 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 2300 if (ndisks) { 2301 for (i = 0; i < conf->pool_size; i++) 2302 ndisks[i] = conf->disks[i]; 2303 2304 for (i = conf->pool_size; i < newsize; i++) { 2305 ndisks[i].extra_page = alloc_page(GFP_NOIO); 2306 if (!ndisks[i].extra_page) 2307 err = -ENOMEM; 2308 } 2309 2310 if (err) { 2311 for (i = conf->pool_size; i < newsize; i++) 2312 if (ndisks[i].extra_page) 2313 put_page(ndisks[i].extra_page); 2314 kfree(ndisks); 2315 } else { 2316 kfree(conf->disks); 2317 conf->disks = ndisks; 2318 } 2319 } else 2320 err = -ENOMEM; 2321 2322 mutex_unlock(&conf->cache_size_mutex); 2323 /* Step 4, return new stripes to service */ 2324 while(!list_empty(&newstripes)) { 2325 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2326 list_del_init(&nsh->lru); 2327 2328 for (i=conf->raid_disks; i < newsize; i++) 2329 if (nsh->dev[i].page == NULL) { 2330 struct page *p = alloc_page(GFP_NOIO); 2331 nsh->dev[i].page = p; 2332 nsh->dev[i].orig_page = p; 2333 if (!p) 2334 err = -ENOMEM; 2335 } 2336 raid5_release_stripe(nsh); 2337 } 2338 /* critical section pass, GFP_NOIO no longer needed */ 2339 2340 conf->slab_cache = sc; 2341 conf->active_name = 1-conf->active_name; 2342 if (!err) 2343 conf->pool_size = newsize; 2344 return err; 2345 } 2346 2347 static int drop_one_stripe(struct r5conf *conf) 2348 { 2349 struct stripe_head *sh; 2350 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; 2351 2352 spin_lock_irq(conf->hash_locks + hash); 2353 sh = get_free_stripe(conf, hash); 2354 spin_unlock_irq(conf->hash_locks + hash); 2355 if (!sh) 2356 return 0; 2357 BUG_ON(atomic_read(&sh->count)); 2358 shrink_buffers(sh); 2359 kmem_cache_free(conf->slab_cache, sh); 2360 atomic_dec(&conf->active_stripes); 2361 conf->max_nr_stripes--; 2362 return 1; 2363 } 2364 2365 static void shrink_stripes(struct r5conf *conf) 2366 { 2367 while (conf->max_nr_stripes && 2368 drop_one_stripe(conf)) 2369 ; 2370 2371 kmem_cache_destroy(conf->slab_cache); 2372 conf->slab_cache = NULL; 2373 } 2374 2375 static void raid5_end_read_request(struct bio * bi) 2376 { 2377 struct stripe_head *sh = bi->bi_private; 2378 struct r5conf *conf = sh->raid_conf; 2379 int disks = sh->disks, i; 2380 char b[BDEVNAME_SIZE]; 2381 struct md_rdev *rdev = NULL; 2382 sector_t s; 2383 2384 for (i=0 ; i<disks; i++) 2385 if (bi == &sh->dev[i].req) 2386 break; 2387 2388 pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", 2389 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2390 bi->bi_error); 2391 if (i == disks) { 2392 bio_reset(bi); 2393 BUG(); 2394 return; 2395 } 2396 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2397 /* If replacement finished while this request was outstanding, 2398 * 'replacement' might be NULL already. 2399 * In that case it moved down to 'rdev'. 2400 * rdev is not removed until all requests are finished. 2401 */ 2402 rdev = conf->disks[i].replacement; 2403 if (!rdev) 2404 rdev = conf->disks[i].rdev; 2405 2406 if (use_new_offset(conf, sh)) 2407 s = sh->sector + rdev->new_data_offset; 2408 else 2409 s = sh->sector + rdev->data_offset; 2410 if (!bi->bi_error) { 2411 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2412 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2413 /* Note that this cannot happen on a 2414 * replacement device. We just fail those on 2415 * any error 2416 */ 2417 pr_info_ratelimited( 2418 "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n", 2419 mdname(conf->mddev), STRIPE_SECTORS, 2420 (unsigned long long)s, 2421 bdevname(rdev->bdev, b)); 2422 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2423 clear_bit(R5_ReadError, &sh->dev[i].flags); 2424 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2425 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2426 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2427 2428 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 2429 /* 2430 * end read for a page in journal, this 2431 * must be preparing for prexor in rmw 2432 */ 2433 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); 2434 2435 if (atomic_read(&rdev->read_errors)) 2436 atomic_set(&rdev->read_errors, 0); 2437 } else { 2438 const char *bdn = bdevname(rdev->bdev, b); 2439 int retry = 0; 2440 int set_bad = 0; 2441 2442 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 2443 atomic_inc(&rdev->read_errors); 2444 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2445 pr_warn_ratelimited( 2446 "md/raid:%s: read error on replacement device (sector %llu on %s).\n", 2447 mdname(conf->mddev), 2448 (unsigned long long)s, 2449 bdn); 2450 else if (conf->mddev->degraded >= conf->max_degraded) { 2451 set_bad = 1; 2452 pr_warn_ratelimited( 2453 "md/raid:%s: read error not correctable (sector %llu on %s).\n", 2454 mdname(conf->mddev), 2455 (unsigned long long)s, 2456 bdn); 2457 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { 2458 /* Oh, no!!! */ 2459 set_bad = 1; 2460 pr_warn_ratelimited( 2461 "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n", 2462 mdname(conf->mddev), 2463 (unsigned long long)s, 2464 bdn); 2465 } else if (atomic_read(&rdev->read_errors) 2466 > conf->max_nr_stripes) 2467 pr_warn("md/raid:%s: Too many read errors, failing device %s.\n", 2468 mdname(conf->mddev), bdn); 2469 else 2470 retry = 1; 2471 if (set_bad && test_bit(In_sync, &rdev->flags) 2472 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2473 retry = 1; 2474 if (retry) 2475 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2476 set_bit(R5_ReadError, &sh->dev[i].flags); 2477 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2478 } else 2479 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2480 else { 2481 clear_bit(R5_ReadError, &sh->dev[i].flags); 2482 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2483 if (!(set_bad 2484 && test_bit(In_sync, &rdev->flags) 2485 && rdev_set_badblocks( 2486 rdev, sh->sector, STRIPE_SECTORS, 0))) 2487 md_error(conf->mddev, rdev); 2488 } 2489 } 2490 rdev_dec_pending(rdev, conf->mddev); 2491 bio_reset(bi); 2492 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2493 set_bit(STRIPE_HANDLE, &sh->state); 2494 raid5_release_stripe(sh); 2495 } 2496 2497 static void raid5_end_write_request(struct bio *bi) 2498 { 2499 struct stripe_head *sh = bi->bi_private; 2500 struct r5conf *conf = sh->raid_conf; 2501 int disks = sh->disks, i; 2502 struct md_rdev *uninitialized_var(rdev); 2503 sector_t first_bad; 2504 int bad_sectors; 2505 int replacement = 0; 2506 2507 for (i = 0 ; i < disks; i++) { 2508 if (bi == &sh->dev[i].req) { 2509 rdev = conf->disks[i].rdev; 2510 break; 2511 } 2512 if (bi == &sh->dev[i].rreq) { 2513 rdev = conf->disks[i].replacement; 2514 if (rdev) 2515 replacement = 1; 2516 else 2517 /* rdev was removed and 'replacement' 2518 * replaced it. rdev is not removed 2519 * until all requests are finished. 2520 */ 2521 rdev = conf->disks[i].rdev; 2522 break; 2523 } 2524 } 2525 pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", 2526 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2527 bi->bi_error); 2528 if (i == disks) { 2529 bio_reset(bi); 2530 BUG(); 2531 return; 2532 } 2533 2534 if (replacement) { 2535 if (bi->bi_error) 2536 md_error(conf->mddev, rdev); 2537 else if (is_badblock(rdev, sh->sector, 2538 STRIPE_SECTORS, 2539 &first_bad, &bad_sectors)) 2540 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2541 } else { 2542 if (bi->bi_error) { 2543 set_bit(STRIPE_DEGRADED, &sh->state); 2544 set_bit(WriteErrorSeen, &rdev->flags); 2545 set_bit(R5_WriteError, &sh->dev[i].flags); 2546 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2547 set_bit(MD_RECOVERY_NEEDED, 2548 &rdev->mddev->recovery); 2549 } else if (is_badblock(rdev, sh->sector, 2550 STRIPE_SECTORS, 2551 &first_bad, &bad_sectors)) { 2552 set_bit(R5_MadeGood, &sh->dev[i].flags); 2553 if (test_bit(R5_ReadError, &sh->dev[i].flags)) 2554 /* That was a successful write so make 2555 * sure it looks like we already did 2556 * a re-write. 2557 */ 2558 set_bit(R5_ReWrite, &sh->dev[i].flags); 2559 } 2560 } 2561 rdev_dec_pending(rdev, conf->mddev); 2562 2563 if (sh->batch_head && bi->bi_error && !replacement) 2564 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2565 2566 bio_reset(bi); 2567 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2568 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2569 set_bit(STRIPE_HANDLE, &sh->state); 2570 raid5_release_stripe(sh); 2571 2572 if (sh->batch_head && sh != sh->batch_head) 2573 raid5_release_stripe(sh->batch_head); 2574 } 2575 2576 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2577 { 2578 struct r5dev *dev = &sh->dev[i]; 2579 2580 dev->flags = 0; 2581 dev->sector = raid5_compute_blocknr(sh, i, previous); 2582 } 2583 2584 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) 2585 { 2586 char b[BDEVNAME_SIZE]; 2587 struct r5conf *conf = mddev->private; 2588 unsigned long flags; 2589 pr_debug("raid456: error called\n"); 2590 2591 spin_lock_irqsave(&conf->device_lock, flags); 2592 clear_bit(In_sync, &rdev->flags); 2593 mddev->degraded = raid5_calc_degraded(conf); 2594 spin_unlock_irqrestore(&conf->device_lock, flags); 2595 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2596 2597 set_bit(Blocked, &rdev->flags); 2598 set_bit(Faulty, &rdev->flags); 2599 set_mask_bits(&mddev->sb_flags, 0, 2600 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 2601 pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" 2602 "md/raid:%s: Operation continuing on %d devices.\n", 2603 mdname(mddev), 2604 bdevname(rdev->bdev, b), 2605 mdname(mddev), 2606 conf->raid_disks - mddev->degraded); 2607 r5c_update_on_rdev_error(mddev); 2608 } 2609 2610 /* 2611 * Input: a 'big' sector number, 2612 * Output: index of the data and parity disk, and the sector # in them. 2613 */ 2614 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2615 int previous, int *dd_idx, 2616 struct stripe_head *sh) 2617 { 2618 sector_t stripe, stripe2; 2619 sector_t chunk_number; 2620 unsigned int chunk_offset; 2621 int pd_idx, qd_idx; 2622 int ddf_layout = 0; 2623 sector_t new_sector; 2624 int algorithm = previous ? conf->prev_algo 2625 : conf->algorithm; 2626 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2627 : conf->chunk_sectors; 2628 int raid_disks = previous ? conf->previous_raid_disks 2629 : conf->raid_disks; 2630 int data_disks = raid_disks - conf->max_degraded; 2631 2632 /* First compute the information on this sector */ 2633 2634 /* 2635 * Compute the chunk number and the sector offset inside the chunk 2636 */ 2637 chunk_offset = sector_div(r_sector, sectors_per_chunk); 2638 chunk_number = r_sector; 2639 2640 /* 2641 * Compute the stripe number 2642 */ 2643 stripe = chunk_number; 2644 *dd_idx = sector_div(stripe, data_disks); 2645 stripe2 = stripe; 2646 /* 2647 * Select the parity disk based on the user selected algorithm. 2648 */ 2649 pd_idx = qd_idx = -1; 2650 switch(conf->level) { 2651 case 4: 2652 pd_idx = data_disks; 2653 break; 2654 case 5: 2655 switch (algorithm) { 2656 case ALGORITHM_LEFT_ASYMMETRIC: 2657 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2658 if (*dd_idx >= pd_idx) 2659 (*dd_idx)++; 2660 break; 2661 case ALGORITHM_RIGHT_ASYMMETRIC: 2662 pd_idx = sector_div(stripe2, raid_disks); 2663 if (*dd_idx >= pd_idx) 2664 (*dd_idx)++; 2665 break; 2666 case ALGORITHM_LEFT_SYMMETRIC: 2667 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2668 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2669 break; 2670 case ALGORITHM_RIGHT_SYMMETRIC: 2671 pd_idx = sector_div(stripe2, raid_disks); 2672 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2673 break; 2674 case ALGORITHM_PARITY_0: 2675 pd_idx = 0; 2676 (*dd_idx)++; 2677 break; 2678 case ALGORITHM_PARITY_N: 2679 pd_idx = data_disks; 2680 break; 2681 default: 2682 BUG(); 2683 } 2684 break; 2685 case 6: 2686 2687 switch (algorithm) { 2688 case ALGORITHM_LEFT_ASYMMETRIC: 2689 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2690 qd_idx = pd_idx + 1; 2691 if (pd_idx == raid_disks-1) { 2692 (*dd_idx)++; /* Q D D D P */ 2693 qd_idx = 0; 2694 } else if (*dd_idx >= pd_idx) 2695 (*dd_idx) += 2; /* D D P Q D */ 2696 break; 2697 case ALGORITHM_RIGHT_ASYMMETRIC: 2698 pd_idx = sector_div(stripe2, raid_disks); 2699 qd_idx = pd_idx + 1; 2700 if (pd_idx == raid_disks-1) { 2701 (*dd_idx)++; /* Q D D D P */ 2702 qd_idx = 0; 2703 } else if (*dd_idx >= pd_idx) 2704 (*dd_idx) += 2; /* D D P Q D */ 2705 break; 2706 case ALGORITHM_LEFT_SYMMETRIC: 2707 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2708 qd_idx = (pd_idx + 1) % raid_disks; 2709 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2710 break; 2711 case ALGORITHM_RIGHT_SYMMETRIC: 2712 pd_idx = sector_div(stripe2, raid_disks); 2713 qd_idx = (pd_idx + 1) % raid_disks; 2714 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2715 break; 2716 2717 case ALGORITHM_PARITY_0: 2718 pd_idx = 0; 2719 qd_idx = 1; 2720 (*dd_idx) += 2; 2721 break; 2722 case ALGORITHM_PARITY_N: 2723 pd_idx = data_disks; 2724 qd_idx = data_disks + 1; 2725 break; 2726 2727 case ALGORITHM_ROTATING_ZERO_RESTART: 2728 /* Exactly the same as RIGHT_ASYMMETRIC, but or 2729 * of blocks for computing Q is different. 2730 */ 2731 pd_idx = sector_div(stripe2, raid_disks); 2732 qd_idx = pd_idx + 1; 2733 if (pd_idx == raid_disks-1) { 2734 (*dd_idx)++; /* Q D D D P */ 2735 qd_idx = 0; 2736 } else if (*dd_idx >= pd_idx) 2737 (*dd_idx) += 2; /* D D P Q D */ 2738 ddf_layout = 1; 2739 break; 2740 2741 case ALGORITHM_ROTATING_N_RESTART: 2742 /* Same a left_asymmetric, by first stripe is 2743 * D D D P Q rather than 2744 * Q D D D P 2745 */ 2746 stripe2 += 1; 2747 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2748 qd_idx = pd_idx + 1; 2749 if (pd_idx == raid_disks-1) { 2750 (*dd_idx)++; /* Q D D D P */ 2751 qd_idx = 0; 2752 } else if (*dd_idx >= pd_idx) 2753 (*dd_idx) += 2; /* D D P Q D */ 2754 ddf_layout = 1; 2755 break; 2756 2757 case ALGORITHM_ROTATING_N_CONTINUE: 2758 /* Same as left_symmetric but Q is before P */ 2759 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2760 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 2761 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2762 ddf_layout = 1; 2763 break; 2764 2765 case ALGORITHM_LEFT_ASYMMETRIC_6: 2766 /* RAID5 left_asymmetric, with Q on last device */ 2767 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2768 if (*dd_idx >= pd_idx) 2769 (*dd_idx)++; 2770 qd_idx = raid_disks - 1; 2771 break; 2772 2773 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2774 pd_idx = sector_div(stripe2, raid_disks-1); 2775 if (*dd_idx >= pd_idx) 2776 (*dd_idx)++; 2777 qd_idx = raid_disks - 1; 2778 break; 2779 2780 case ALGORITHM_LEFT_SYMMETRIC_6: 2781 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2782 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2783 qd_idx = raid_disks - 1; 2784 break; 2785 2786 case ALGORITHM_RIGHT_SYMMETRIC_6: 2787 pd_idx = sector_div(stripe2, raid_disks-1); 2788 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2789 qd_idx = raid_disks - 1; 2790 break; 2791 2792 case ALGORITHM_PARITY_0_6: 2793 pd_idx = 0; 2794 (*dd_idx)++; 2795 qd_idx = raid_disks - 1; 2796 break; 2797 2798 default: 2799 BUG(); 2800 } 2801 break; 2802 } 2803 2804 if (sh) { 2805 sh->pd_idx = pd_idx; 2806 sh->qd_idx = qd_idx; 2807 sh->ddf_layout = ddf_layout; 2808 } 2809 /* 2810 * Finally, compute the new sector number 2811 */ 2812 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 2813 return new_sector; 2814 } 2815 2816 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) 2817 { 2818 struct r5conf *conf = sh->raid_conf; 2819 int raid_disks = sh->disks; 2820 int data_disks = raid_disks - conf->max_degraded; 2821 sector_t new_sector = sh->sector, check; 2822 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2823 : conf->chunk_sectors; 2824 int algorithm = previous ? conf->prev_algo 2825 : conf->algorithm; 2826 sector_t stripe; 2827 int chunk_offset; 2828 sector_t chunk_number; 2829 int dummy1, dd_idx = i; 2830 sector_t r_sector; 2831 struct stripe_head sh2; 2832 2833 chunk_offset = sector_div(new_sector, sectors_per_chunk); 2834 stripe = new_sector; 2835 2836 if (i == sh->pd_idx) 2837 return 0; 2838 switch(conf->level) { 2839 case 4: break; 2840 case 5: 2841 switch (algorithm) { 2842 case ALGORITHM_LEFT_ASYMMETRIC: 2843 case ALGORITHM_RIGHT_ASYMMETRIC: 2844 if (i > sh->pd_idx) 2845 i--; 2846 break; 2847 case ALGORITHM_LEFT_SYMMETRIC: 2848 case ALGORITHM_RIGHT_SYMMETRIC: 2849 if (i < sh->pd_idx) 2850 i += raid_disks; 2851 i -= (sh->pd_idx + 1); 2852 break; 2853 case ALGORITHM_PARITY_0: 2854 i -= 1; 2855 break; 2856 case ALGORITHM_PARITY_N: 2857 break; 2858 default: 2859 BUG(); 2860 } 2861 break; 2862 case 6: 2863 if (i == sh->qd_idx) 2864 return 0; /* It is the Q disk */ 2865 switch (algorithm) { 2866 case ALGORITHM_LEFT_ASYMMETRIC: 2867 case ALGORITHM_RIGHT_ASYMMETRIC: 2868 case ALGORITHM_ROTATING_ZERO_RESTART: 2869 case ALGORITHM_ROTATING_N_RESTART: 2870 if (sh->pd_idx == raid_disks-1) 2871 i--; /* Q D D D P */ 2872 else if (i > sh->pd_idx) 2873 i -= 2; /* D D P Q D */ 2874 break; 2875 case ALGORITHM_LEFT_SYMMETRIC: 2876 case ALGORITHM_RIGHT_SYMMETRIC: 2877 if (sh->pd_idx == raid_disks-1) 2878 i--; /* Q D D D P */ 2879 else { 2880 /* D D P Q D */ 2881 if (i < sh->pd_idx) 2882 i += raid_disks; 2883 i -= (sh->pd_idx + 2); 2884 } 2885 break; 2886 case ALGORITHM_PARITY_0: 2887 i -= 2; 2888 break; 2889 case ALGORITHM_PARITY_N: 2890 break; 2891 case ALGORITHM_ROTATING_N_CONTINUE: 2892 /* Like left_symmetric, but P is before Q */ 2893 if (sh->pd_idx == 0) 2894 i--; /* P D D D Q */ 2895 else { 2896 /* D D Q P D */ 2897 if (i < sh->pd_idx) 2898 i += raid_disks; 2899 i -= (sh->pd_idx + 1); 2900 } 2901 break; 2902 case ALGORITHM_LEFT_ASYMMETRIC_6: 2903 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2904 if (i > sh->pd_idx) 2905 i--; 2906 break; 2907 case ALGORITHM_LEFT_SYMMETRIC_6: 2908 case ALGORITHM_RIGHT_SYMMETRIC_6: 2909 if (i < sh->pd_idx) 2910 i += data_disks + 1; 2911 i -= (sh->pd_idx + 1); 2912 break; 2913 case ALGORITHM_PARITY_0_6: 2914 i -= 1; 2915 break; 2916 default: 2917 BUG(); 2918 } 2919 break; 2920 } 2921 2922 chunk_number = stripe * data_disks + i; 2923 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 2924 2925 check = raid5_compute_sector(conf, r_sector, 2926 previous, &dummy1, &sh2); 2927 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 2928 || sh2.qd_idx != sh->qd_idx) { 2929 pr_warn("md/raid:%s: compute_blocknr: map not correct\n", 2930 mdname(conf->mddev)); 2931 return 0; 2932 } 2933 return r_sector; 2934 } 2935 2936 /* 2937 * There are cases where we want handle_stripe_dirtying() and 2938 * schedule_reconstruction() to delay towrite to some dev of a stripe. 2939 * 2940 * This function checks whether we want to delay the towrite. Specifically, 2941 * we delay the towrite when: 2942 * 2943 * 1. degraded stripe has a non-overwrite to the missing dev, AND this 2944 * stripe has data in journal (for other devices). 2945 * 2946 * In this case, when reading data for the non-overwrite dev, it is 2947 * necessary to handle complex rmw of write back cache (prexor with 2948 * orig_page, and xor with page). To keep read path simple, we would 2949 * like to flush data in journal to RAID disks first, so complex rmw 2950 * is handled in the write patch (handle_stripe_dirtying). 2951 * 2952 * 2. when journal space is critical (R5C_LOG_CRITICAL=1) 2953 * 2954 * It is important to be able to flush all stripes in raid5-cache. 2955 * Therefore, we need reserve some space on the journal device for 2956 * these flushes. If flush operation includes pending writes to the 2957 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe 2958 * for the flush out. If we exclude these pending writes from flush 2959 * operation, we only need (conf->max_degraded + 1) pages per stripe. 2960 * Therefore, excluding pending writes in these cases enables more 2961 * efficient use of the journal device. 2962 * 2963 * Note: To make sure the stripe makes progress, we only delay 2964 * towrite for stripes with data already in journal (injournal > 0). 2965 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to 2966 * no_space_stripes list. 2967 * 2968 */ 2969 static inline bool delay_towrite(struct r5conf *conf, 2970 struct r5dev *dev, 2971 struct stripe_head_state *s) 2972 { 2973 /* case 1 above */ 2974 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2975 !test_bit(R5_Insync, &dev->flags) && s->injournal) 2976 return true; 2977 /* case 2 above */ 2978 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 2979 s->injournal > 0) 2980 return true; 2981 return false; 2982 } 2983 2984 static void 2985 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2986 int rcw, int expand) 2987 { 2988 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; 2989 struct r5conf *conf = sh->raid_conf; 2990 int level = conf->level; 2991 2992 if (rcw) { 2993 /* 2994 * In some cases, handle_stripe_dirtying initially decided to 2995 * run rmw and allocates extra page for prexor. However, rcw is 2996 * cheaper later on. We need to free the extra page now, 2997 * because we won't be able to do that in ops_complete_prexor(). 2998 */ 2999 r5c_release_extra_page(sh); 3000 3001 for (i = disks; i--; ) { 3002 struct r5dev *dev = &sh->dev[i]; 3003 3004 if (dev->towrite && !delay_towrite(conf, dev, s)) { 3005 set_bit(R5_LOCKED, &dev->flags); 3006 set_bit(R5_Wantdrain, &dev->flags); 3007 if (!expand) 3008 clear_bit(R5_UPTODATE, &dev->flags); 3009 s->locked++; 3010 } else if (test_bit(R5_InJournal, &dev->flags)) { 3011 set_bit(R5_LOCKED, &dev->flags); 3012 s->locked++; 3013 } 3014 } 3015 /* if we are not expanding this is a proper write request, and 3016 * there will be bios with new data to be drained into the 3017 * stripe cache 3018 */ 3019 if (!expand) { 3020 if (!s->locked) 3021 /* False alarm, nothing to do */ 3022 return; 3023 sh->reconstruct_state = reconstruct_state_drain_run; 3024 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 3025 } else 3026 sh->reconstruct_state = reconstruct_state_run; 3027 3028 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 3029 3030 if (s->locked + conf->max_degraded == disks) 3031 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 3032 atomic_inc(&conf->pending_full_writes); 3033 } else { 3034 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 3035 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 3036 BUG_ON(level == 6 && 3037 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || 3038 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); 3039 3040 for (i = disks; i--; ) { 3041 struct r5dev *dev = &sh->dev[i]; 3042 if (i == pd_idx || i == qd_idx) 3043 continue; 3044 3045 if (dev->towrite && 3046 (test_bit(R5_UPTODATE, &dev->flags) || 3047 test_bit(R5_Wantcompute, &dev->flags))) { 3048 set_bit(R5_Wantdrain, &dev->flags); 3049 set_bit(R5_LOCKED, &dev->flags); 3050 clear_bit(R5_UPTODATE, &dev->flags); 3051 s->locked++; 3052 } else if (test_bit(R5_InJournal, &dev->flags)) { 3053 set_bit(R5_LOCKED, &dev->flags); 3054 s->locked++; 3055 } 3056 } 3057 if (!s->locked) 3058 /* False alarm - nothing to do */ 3059 return; 3060 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 3061 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 3062 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 3063 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 3064 } 3065 3066 /* keep the parity disk(s) locked while asynchronous operations 3067 * are in flight 3068 */ 3069 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 3070 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 3071 s->locked++; 3072 3073 if (level == 6) { 3074 int qd_idx = sh->qd_idx; 3075 struct r5dev *dev = &sh->dev[qd_idx]; 3076 3077 set_bit(R5_LOCKED, &dev->flags); 3078 clear_bit(R5_UPTODATE, &dev->flags); 3079 s->locked++; 3080 } 3081 3082 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 3083 __func__, (unsigned long long)sh->sector, 3084 s->locked, s->ops_request); 3085 } 3086 3087 /* 3088 * Each stripe/dev can have one or more bion attached. 3089 * toread/towrite point to the first in a chain. 3090 * The bi_next chain must be in order. 3091 */ 3092 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, 3093 int forwrite, int previous) 3094 { 3095 struct bio **bip; 3096 struct r5conf *conf = sh->raid_conf; 3097 int firstwrite=0; 3098 3099 pr_debug("adding bi b#%llu to stripe s#%llu\n", 3100 (unsigned long long)bi->bi_iter.bi_sector, 3101 (unsigned long long)sh->sector); 3102 3103 /* 3104 * If several bio share a stripe. The bio bi_phys_segments acts as a 3105 * reference count to avoid race. The reference count should already be 3106 * increased before this function is called (for example, in 3107 * raid5_make_request()), so other bio sharing this stripe will not free the 3108 * stripe. If a stripe is owned by one stripe, the stripe lock will 3109 * protect it. 3110 */ 3111 spin_lock_irq(&sh->stripe_lock); 3112 /* Don't allow new IO added to stripes in batch list */ 3113 if (sh->batch_head) 3114 goto overlap; 3115 if (forwrite) { 3116 bip = &sh->dev[dd_idx].towrite; 3117 if (*bip == NULL) 3118 firstwrite = 1; 3119 } else 3120 bip = &sh->dev[dd_idx].toread; 3121 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 3122 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 3123 goto overlap; 3124 bip = & (*bip)->bi_next; 3125 } 3126 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 3127 goto overlap; 3128 3129 if (!forwrite || previous) 3130 clear_bit(STRIPE_BATCH_READY, &sh->state); 3131 3132 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 3133 if (*bip) 3134 bi->bi_next = *bip; 3135 *bip = bi; 3136 raid5_inc_bi_active_stripes(bi); 3137 3138 if (forwrite) { 3139 /* check if page is covered */ 3140 sector_t sector = sh->dev[dd_idx].sector; 3141 for (bi=sh->dev[dd_idx].towrite; 3142 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 3143 bi && bi->bi_iter.bi_sector <= sector; 3144 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 3145 if (bio_end_sector(bi) >= sector) 3146 sector = bio_end_sector(bi); 3147 } 3148 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 3149 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) 3150 sh->overwrite_disks++; 3151 } 3152 3153 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 3154 (unsigned long long)(*bip)->bi_iter.bi_sector, 3155 (unsigned long long)sh->sector, dd_idx); 3156 3157 if (conf->mddev->bitmap && firstwrite) { 3158 /* Cannot hold spinlock over bitmap_startwrite, 3159 * but must ensure this isn't added to a batch until 3160 * we have added to the bitmap and set bm_seq. 3161 * So set STRIPE_BITMAP_PENDING to prevent 3162 * batching. 3163 * If multiple add_stripe_bio() calls race here they 3164 * much all set STRIPE_BITMAP_PENDING. So only the first one 3165 * to complete "bitmap_startwrite" gets to set 3166 * STRIPE_BIT_DELAY. This is important as once a stripe 3167 * is added to a batch, STRIPE_BIT_DELAY cannot be changed 3168 * any more. 3169 */ 3170 set_bit(STRIPE_BITMAP_PENDING, &sh->state); 3171 spin_unlock_irq(&sh->stripe_lock); 3172 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 3173 STRIPE_SECTORS, 0); 3174 spin_lock_irq(&sh->stripe_lock); 3175 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); 3176 if (!sh->batch_head) { 3177 sh->bm_seq = conf->seq_flush+1; 3178 set_bit(STRIPE_BIT_DELAY, &sh->state); 3179 } 3180 } 3181 spin_unlock_irq(&sh->stripe_lock); 3182 3183 if (stripe_can_batch(sh)) 3184 stripe_add_to_batch_list(conf, sh); 3185 return 1; 3186 3187 overlap: 3188 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 3189 spin_unlock_irq(&sh->stripe_lock); 3190 return 0; 3191 } 3192 3193 static void end_reshape(struct r5conf *conf); 3194 3195 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 3196 struct stripe_head *sh) 3197 { 3198 int sectors_per_chunk = 3199 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 3200 int dd_idx; 3201 int chunk_offset = sector_div(stripe, sectors_per_chunk); 3202 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 3203 3204 raid5_compute_sector(conf, 3205 stripe * (disks - conf->max_degraded) 3206 *sectors_per_chunk + chunk_offset, 3207 previous, 3208 &dd_idx, sh); 3209 } 3210 3211 static void 3212 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 3213 struct stripe_head_state *s, int disks, 3214 struct bio_list *return_bi) 3215 { 3216 int i; 3217 BUG_ON(sh->batch_head); 3218 for (i = disks; i--; ) { 3219 struct bio *bi; 3220 int bitmap_end = 0; 3221 3222 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 3223 struct md_rdev *rdev; 3224 rcu_read_lock(); 3225 rdev = rcu_dereference(conf->disks[i].rdev); 3226 if (rdev && test_bit(In_sync, &rdev->flags) && 3227 !test_bit(Faulty, &rdev->flags)) 3228 atomic_inc(&rdev->nr_pending); 3229 else 3230 rdev = NULL; 3231 rcu_read_unlock(); 3232 if (rdev) { 3233 if (!rdev_set_badblocks( 3234 rdev, 3235 sh->sector, 3236 STRIPE_SECTORS, 0)) 3237 md_error(conf->mddev, rdev); 3238 rdev_dec_pending(rdev, conf->mddev); 3239 } 3240 } 3241 spin_lock_irq(&sh->stripe_lock); 3242 /* fail all writes first */ 3243 bi = sh->dev[i].towrite; 3244 sh->dev[i].towrite = NULL; 3245 sh->overwrite_disks = 0; 3246 spin_unlock_irq(&sh->stripe_lock); 3247 if (bi) 3248 bitmap_end = 1; 3249 3250 r5l_stripe_write_finished(sh); 3251 3252 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3253 wake_up(&conf->wait_for_overlap); 3254 3255 while (bi && bi->bi_iter.bi_sector < 3256 sh->dev[i].sector + STRIPE_SECTORS) { 3257 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3258 3259 bi->bi_error = -EIO; 3260 if (!raid5_dec_bi_active_stripes(bi)) { 3261 md_write_end(conf->mddev); 3262 bio_list_add(return_bi, bi); 3263 } 3264 bi = nextbi; 3265 } 3266 if (bitmap_end) 3267 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3268 STRIPE_SECTORS, 0, 0); 3269 bitmap_end = 0; 3270 /* and fail all 'written' */ 3271 bi = sh->dev[i].written; 3272 sh->dev[i].written = NULL; 3273 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { 3274 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 3275 sh->dev[i].page = sh->dev[i].orig_page; 3276 } 3277 3278 if (bi) bitmap_end = 1; 3279 while (bi && bi->bi_iter.bi_sector < 3280 sh->dev[i].sector + STRIPE_SECTORS) { 3281 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3282 3283 bi->bi_error = -EIO; 3284 if (!raid5_dec_bi_active_stripes(bi)) { 3285 md_write_end(conf->mddev); 3286 bio_list_add(return_bi, bi); 3287 } 3288 bi = bi2; 3289 } 3290 3291 /* fail any reads if this device is non-operational and 3292 * the data has not reached the cache yet. 3293 */ 3294 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 3295 s->failed > conf->max_degraded && 3296 (!test_bit(R5_Insync, &sh->dev[i].flags) || 3297 test_bit(R5_ReadError, &sh->dev[i].flags))) { 3298 spin_lock_irq(&sh->stripe_lock); 3299 bi = sh->dev[i].toread; 3300 sh->dev[i].toread = NULL; 3301 spin_unlock_irq(&sh->stripe_lock); 3302 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3303 wake_up(&conf->wait_for_overlap); 3304 if (bi) 3305 s->to_read--; 3306 while (bi && bi->bi_iter.bi_sector < 3307 sh->dev[i].sector + STRIPE_SECTORS) { 3308 struct bio *nextbi = 3309 r5_next_bio(bi, sh->dev[i].sector); 3310 3311 bi->bi_error = -EIO; 3312 if (!raid5_dec_bi_active_stripes(bi)) 3313 bio_list_add(return_bi, bi); 3314 bi = nextbi; 3315 } 3316 } 3317 if (bitmap_end) 3318 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3319 STRIPE_SECTORS, 0, 0); 3320 /* If we were in the middle of a write the parity block might 3321 * still be locked - so just clear all R5_LOCKED flags 3322 */ 3323 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3324 } 3325 s->to_write = 0; 3326 s->written = 0; 3327 3328 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3329 if (atomic_dec_and_test(&conf->pending_full_writes)) 3330 md_wakeup_thread(conf->mddev->thread); 3331 } 3332 3333 static void 3334 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 3335 struct stripe_head_state *s) 3336 { 3337 int abort = 0; 3338 int i; 3339 3340 BUG_ON(sh->batch_head); 3341 clear_bit(STRIPE_SYNCING, &sh->state); 3342 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 3343 wake_up(&conf->wait_for_overlap); 3344 s->syncing = 0; 3345 s->replacing = 0; 3346 /* There is nothing more to do for sync/check/repair. 3347 * Don't even need to abort as that is handled elsewhere 3348 * if needed, and not always wanted e.g. if there is a known 3349 * bad block here. 3350 * For recover/replace we need to record a bad block on all 3351 * non-sync devices, or abort the recovery 3352 */ 3353 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { 3354 /* During recovery devices cannot be removed, so 3355 * locking and refcounting of rdevs is not needed 3356 */ 3357 rcu_read_lock(); 3358 for (i = 0; i < conf->raid_disks; i++) { 3359 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 3360 if (rdev 3361 && !test_bit(Faulty, &rdev->flags) 3362 && !test_bit(In_sync, &rdev->flags) 3363 && !rdev_set_badblocks(rdev, sh->sector, 3364 STRIPE_SECTORS, 0)) 3365 abort = 1; 3366 rdev = rcu_dereference(conf->disks[i].replacement); 3367 if (rdev 3368 && !test_bit(Faulty, &rdev->flags) 3369 && !test_bit(In_sync, &rdev->flags) 3370 && !rdev_set_badblocks(rdev, sh->sector, 3371 STRIPE_SECTORS, 0)) 3372 abort = 1; 3373 } 3374 rcu_read_unlock(); 3375 if (abort) 3376 conf->recovery_disabled = 3377 conf->mddev->recovery_disabled; 3378 } 3379 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); 3380 } 3381 3382 static int want_replace(struct stripe_head *sh, int disk_idx) 3383 { 3384 struct md_rdev *rdev; 3385 int rv = 0; 3386 3387 rcu_read_lock(); 3388 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); 3389 if (rdev 3390 && !test_bit(Faulty, &rdev->flags) 3391 && !test_bit(In_sync, &rdev->flags) 3392 && (rdev->recovery_offset <= sh->sector 3393 || rdev->mddev->recovery_cp <= sh->sector)) 3394 rv = 1; 3395 rcu_read_unlock(); 3396 return rv; 3397 } 3398 3399 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, 3400 int disk_idx, int disks) 3401 { 3402 struct r5dev *dev = &sh->dev[disk_idx]; 3403 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 3404 &sh->dev[s->failed_num[1]] }; 3405 int i; 3406 3407 3408 if (test_bit(R5_LOCKED, &dev->flags) || 3409 test_bit(R5_UPTODATE, &dev->flags)) 3410 /* No point reading this as we already have it or have 3411 * decided to get it. 3412 */ 3413 return 0; 3414 3415 if (dev->toread || 3416 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags))) 3417 /* We need this block to directly satisfy a request */ 3418 return 1; 3419 3420 if (s->syncing || s->expanding || 3421 (s->replacing && want_replace(sh, disk_idx))) 3422 /* When syncing, or expanding we read everything. 3423 * When replacing, we need the replaced block. 3424 */ 3425 return 1; 3426 3427 if ((s->failed >= 1 && fdev[0]->toread) || 3428 (s->failed >= 2 && fdev[1]->toread)) 3429 /* If we want to read from a failed device, then 3430 * we need to actually read every other device. 3431 */ 3432 return 1; 3433 3434 /* Sometimes neither read-modify-write nor reconstruct-write 3435 * cycles can work. In those cases we read every block we 3436 * can. Then the parity-update is certain to have enough to 3437 * work with. 3438 * This can only be a problem when we need to write something, 3439 * and some device has failed. If either of those tests 3440 * fail we need look no further. 3441 */ 3442 if (!s->failed || !s->to_write) 3443 return 0; 3444 3445 if (test_bit(R5_Insync, &dev->flags) && 3446 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3447 /* Pre-reads at not permitted until after short delay 3448 * to gather multiple requests. However if this 3449 * device is no Insync, the block could only be be computed 3450 * and there is no need to delay that. 3451 */ 3452 return 0; 3453 3454 for (i = 0; i < s->failed && i < 2; i++) { 3455 if (fdev[i]->towrite && 3456 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3457 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3458 /* If we have a partial write to a failed 3459 * device, then we will need to reconstruct 3460 * the content of that device, so all other 3461 * devices must be read. 3462 */ 3463 return 1; 3464 } 3465 3466 /* If we are forced to do a reconstruct-write, either because 3467 * the current RAID6 implementation only supports that, or 3468 * or because parity cannot be trusted and we are currently 3469 * recovering it, there is extra need to be careful. 3470 * If one of the devices that we would need to read, because 3471 * it is not being overwritten (and maybe not written at all) 3472 * is missing/faulty, then we need to read everything we can. 3473 */ 3474 if (sh->raid_conf->level != 6 && 3475 sh->sector < sh->raid_conf->mddev->recovery_cp) 3476 /* reconstruct-write isn't being forced */ 3477 return 0; 3478 for (i = 0; i < s->failed && i < 2; i++) { 3479 if (s->failed_num[i] != sh->pd_idx && 3480 s->failed_num[i] != sh->qd_idx && 3481 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3482 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3483 return 1; 3484 } 3485 3486 return 0; 3487 } 3488 3489 /* fetch_block - checks the given member device to see if its data needs 3490 * to be read or computed to satisfy a request. 3491 * 3492 * Returns 1 when no more member devices need to be checked, otherwise returns 3493 * 0 to tell the loop in handle_stripe_fill to continue 3494 */ 3495 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 3496 int disk_idx, int disks) 3497 { 3498 struct r5dev *dev = &sh->dev[disk_idx]; 3499 3500 /* is the data in this block needed, and can we get it? */ 3501 if (need_this_block(sh, s, disk_idx, disks)) { 3502 /* we would like to get this block, possibly by computing it, 3503 * otherwise read it if the backing disk is insync 3504 */ 3505 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 3506 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 3507 BUG_ON(sh->batch_head); 3508 if ((s->uptodate == disks - 1) && 3509 (s->failed && (disk_idx == s->failed_num[0] || 3510 disk_idx == s->failed_num[1]))) { 3511 /* have disk failed, and we're requested to fetch it; 3512 * do compute it 3513 */ 3514 pr_debug("Computing stripe %llu block %d\n", 3515 (unsigned long long)sh->sector, disk_idx); 3516 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3517 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3518 set_bit(R5_Wantcompute, &dev->flags); 3519 sh->ops.target = disk_idx; 3520 sh->ops.target2 = -1; /* no 2nd target */ 3521 s->req_compute = 1; 3522 /* Careful: from this point on 'uptodate' is in the eye 3523 * of raid_run_ops which services 'compute' operations 3524 * before writes. R5_Wantcompute flags a block that will 3525 * be R5_UPTODATE by the time it is needed for a 3526 * subsequent operation. 3527 */ 3528 s->uptodate++; 3529 return 1; 3530 } else if (s->uptodate == disks-2 && s->failed >= 2) { 3531 /* Computing 2-failure is *very* expensive; only 3532 * do it if failed >= 2 3533 */ 3534 int other; 3535 for (other = disks; other--; ) { 3536 if (other == disk_idx) 3537 continue; 3538 if (!test_bit(R5_UPTODATE, 3539 &sh->dev[other].flags)) 3540 break; 3541 } 3542 BUG_ON(other < 0); 3543 pr_debug("Computing stripe %llu blocks %d,%d\n", 3544 (unsigned long long)sh->sector, 3545 disk_idx, other); 3546 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3547 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3548 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 3549 set_bit(R5_Wantcompute, &sh->dev[other].flags); 3550 sh->ops.target = disk_idx; 3551 sh->ops.target2 = other; 3552 s->uptodate += 2; 3553 s->req_compute = 1; 3554 return 1; 3555 } else if (test_bit(R5_Insync, &dev->flags)) { 3556 set_bit(R5_LOCKED, &dev->flags); 3557 set_bit(R5_Wantread, &dev->flags); 3558 s->locked++; 3559 pr_debug("Reading block %d (sync=%d)\n", 3560 disk_idx, s->syncing); 3561 } 3562 } 3563 3564 return 0; 3565 } 3566 3567 /** 3568 * handle_stripe_fill - read or compute data to satisfy pending requests. 3569 */ 3570 static void handle_stripe_fill(struct stripe_head *sh, 3571 struct stripe_head_state *s, 3572 int disks) 3573 { 3574 int i; 3575 3576 /* look for blocks to read/compute, skip this if a compute 3577 * is already in flight, or if the stripe contents are in the 3578 * midst of changing due to a write 3579 */ 3580 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3581 !sh->reconstruct_state) { 3582 3583 /* 3584 * For degraded stripe with data in journal, do not handle 3585 * read requests yet, instead, flush the stripe to raid 3586 * disks first, this avoids handling complex rmw of write 3587 * back cache (prexor with orig_page, and then xor with 3588 * page) in the read path 3589 */ 3590 if (s->injournal && s->failed) { 3591 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 3592 r5c_make_stripe_write_out(sh); 3593 goto out; 3594 } 3595 3596 for (i = disks; i--; ) 3597 if (fetch_block(sh, s, i, disks)) 3598 break; 3599 } 3600 out: 3601 set_bit(STRIPE_HANDLE, &sh->state); 3602 } 3603 3604 static void break_stripe_batch_list(struct stripe_head *head_sh, 3605 unsigned long handle_flags); 3606 /* handle_stripe_clean_event 3607 * any written block on an uptodate or failed drive can be returned. 3608 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 3609 * never LOCKED, so we don't need to test 'failed' directly. 3610 */ 3611 static void handle_stripe_clean_event(struct r5conf *conf, 3612 struct stripe_head *sh, int disks, struct bio_list *return_bi) 3613 { 3614 int i; 3615 struct r5dev *dev; 3616 int discard_pending = 0; 3617 struct stripe_head *head_sh = sh; 3618 bool do_endio = false; 3619 3620 for (i = disks; i--; ) 3621 if (sh->dev[i].written) { 3622 dev = &sh->dev[i]; 3623 if (!test_bit(R5_LOCKED, &dev->flags) && 3624 (test_bit(R5_UPTODATE, &dev->flags) || 3625 test_bit(R5_Discard, &dev->flags) || 3626 test_bit(R5_SkipCopy, &dev->flags))) { 3627 /* We can return any write requests */ 3628 struct bio *wbi, *wbi2; 3629 pr_debug("Return write for disc %d\n", i); 3630 if (test_and_clear_bit(R5_Discard, &dev->flags)) 3631 clear_bit(R5_UPTODATE, &dev->flags); 3632 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { 3633 WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); 3634 } 3635 do_endio = true; 3636 3637 returnbi: 3638 dev->page = dev->orig_page; 3639 wbi = dev->written; 3640 dev->written = NULL; 3641 while (wbi && wbi->bi_iter.bi_sector < 3642 dev->sector + STRIPE_SECTORS) { 3643 wbi2 = r5_next_bio(wbi, dev->sector); 3644 if (!raid5_dec_bi_active_stripes(wbi)) { 3645 md_write_end(conf->mddev); 3646 bio_list_add(return_bi, wbi); 3647 } 3648 wbi = wbi2; 3649 } 3650 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3651 STRIPE_SECTORS, 3652 !test_bit(STRIPE_DEGRADED, &sh->state), 3653 0); 3654 if (head_sh->batch_head) { 3655 sh = list_first_entry(&sh->batch_list, 3656 struct stripe_head, 3657 batch_list); 3658 if (sh != head_sh) { 3659 dev = &sh->dev[i]; 3660 goto returnbi; 3661 } 3662 } 3663 sh = head_sh; 3664 dev = &sh->dev[i]; 3665 } else if (test_bit(R5_Discard, &dev->flags)) 3666 discard_pending = 1; 3667 } 3668 3669 r5l_stripe_write_finished(sh); 3670 3671 if (!discard_pending && 3672 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3673 int hash; 3674 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 3675 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3676 if (sh->qd_idx >= 0) { 3677 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 3678 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); 3679 } 3680 /* now that discard is done we can proceed with any sync */ 3681 clear_bit(STRIPE_DISCARD, &sh->state); 3682 /* 3683 * SCSI discard will change some bio fields and the stripe has 3684 * no updated data, so remove it from hash list and the stripe 3685 * will be reinitialized 3686 */ 3687 unhash: 3688 hash = sh->hash_lock_index; 3689 spin_lock_irq(conf->hash_locks + hash); 3690 remove_hash(sh); 3691 spin_unlock_irq(conf->hash_locks + hash); 3692 if (head_sh->batch_head) { 3693 sh = list_first_entry(&sh->batch_list, 3694 struct stripe_head, batch_list); 3695 if (sh != head_sh) 3696 goto unhash; 3697 } 3698 sh = head_sh; 3699 3700 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 3701 set_bit(STRIPE_HANDLE, &sh->state); 3702 3703 } 3704 3705 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3706 if (atomic_dec_and_test(&conf->pending_full_writes)) 3707 md_wakeup_thread(conf->mddev->thread); 3708 3709 if (head_sh->batch_head && do_endio) 3710 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); 3711 } 3712 3713 /* 3714 * For RMW in write back cache, we need extra page in prexor to store the 3715 * old data. This page is stored in dev->orig_page. 3716 * 3717 * This function checks whether we have data for prexor. The exact logic 3718 * is: 3719 * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE) 3720 */ 3721 static inline bool uptodate_for_rmw(struct r5dev *dev) 3722 { 3723 return (test_bit(R5_UPTODATE, &dev->flags)) && 3724 (!test_bit(R5_InJournal, &dev->flags) || 3725 test_bit(R5_OrigPageUPTDODATE, &dev->flags)); 3726 } 3727 3728 static int handle_stripe_dirtying(struct r5conf *conf, 3729 struct stripe_head *sh, 3730 struct stripe_head_state *s, 3731 int disks) 3732 { 3733 int rmw = 0, rcw = 0, i; 3734 sector_t recovery_cp = conf->mddev->recovery_cp; 3735 3736 /* Check whether resync is now happening or should start. 3737 * If yes, then the array is dirty (after unclean shutdown or 3738 * initial creation), so parity in some stripes might be inconsistent. 3739 * In this case, we need to always do reconstruct-write, to ensure 3740 * that in case of drive failure or read-error correction, we 3741 * generate correct data from the parity. 3742 */ 3743 if (conf->rmw_level == PARITY_DISABLE_RMW || 3744 (recovery_cp < MaxSector && sh->sector >= recovery_cp && 3745 s->failed == 0)) { 3746 /* Calculate the real rcw later - for now make it 3747 * look like rcw is cheaper 3748 */ 3749 rcw = 1; rmw = 2; 3750 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", 3751 conf->rmw_level, (unsigned long long)recovery_cp, 3752 (unsigned long long)sh->sector); 3753 } else for (i = disks; i--; ) { 3754 /* would I have to read this buffer for read_modify_write */ 3755 struct r5dev *dev = &sh->dev[i]; 3756 if (((dev->towrite && !delay_towrite(conf, dev, s)) || 3757 i == sh->pd_idx || i == sh->qd_idx || 3758 test_bit(R5_InJournal, &dev->flags)) && 3759 !test_bit(R5_LOCKED, &dev->flags) && 3760 !(uptodate_for_rmw(dev) || 3761 test_bit(R5_Wantcompute, &dev->flags))) { 3762 if (test_bit(R5_Insync, &dev->flags)) 3763 rmw++; 3764 else 3765 rmw += 2*disks; /* cannot read it */ 3766 } 3767 /* Would I have to read this buffer for reconstruct_write */ 3768 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3769 i != sh->pd_idx && i != sh->qd_idx && 3770 !test_bit(R5_LOCKED, &dev->flags) && 3771 !(test_bit(R5_UPTODATE, &dev->flags) || 3772 test_bit(R5_Wantcompute, &dev->flags))) { 3773 if (test_bit(R5_Insync, &dev->flags)) 3774 rcw++; 3775 else 3776 rcw += 2*disks; 3777 } 3778 } 3779 3780 pr_debug("for sector %llu state 0x%lx, rmw=%d rcw=%d\n", 3781 (unsigned long long)sh->sector, sh->state, rmw, rcw); 3782 set_bit(STRIPE_HANDLE, &sh->state); 3783 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { 3784 /* prefer read-modify-write, but need to get some data */ 3785 if (conf->mddev->queue) 3786 blk_add_trace_msg(conf->mddev->queue, 3787 "raid5 rmw %llu %d", 3788 (unsigned long long)sh->sector, rmw); 3789 for (i = disks; i--; ) { 3790 struct r5dev *dev = &sh->dev[i]; 3791 if (test_bit(R5_InJournal, &dev->flags) && 3792 dev->page == dev->orig_page && 3793 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { 3794 /* alloc page for prexor */ 3795 struct page *p = alloc_page(GFP_NOIO); 3796 3797 if (p) { 3798 dev->orig_page = p; 3799 continue; 3800 } 3801 3802 /* 3803 * alloc_page() failed, try use 3804 * disk_info->extra_page 3805 */ 3806 if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE, 3807 &conf->cache_state)) { 3808 r5c_use_extra_page(sh); 3809 break; 3810 } 3811 3812 /* extra_page in use, add to delayed_list */ 3813 set_bit(STRIPE_DELAYED, &sh->state); 3814 s->waiting_extra_page = 1; 3815 return -EAGAIN; 3816 } 3817 } 3818 3819 for (i = disks; i--; ) { 3820 struct r5dev *dev = &sh->dev[i]; 3821 if (((dev->towrite && !delay_towrite(conf, dev, s)) || 3822 i == sh->pd_idx || i == sh->qd_idx || 3823 test_bit(R5_InJournal, &dev->flags)) && 3824 !test_bit(R5_LOCKED, &dev->flags) && 3825 !(uptodate_for_rmw(dev) || 3826 test_bit(R5_Wantcompute, &dev->flags)) && 3827 test_bit(R5_Insync, &dev->flags)) { 3828 if (test_bit(STRIPE_PREREAD_ACTIVE, 3829 &sh->state)) { 3830 pr_debug("Read_old block %d for r-m-w\n", 3831 i); 3832 set_bit(R5_LOCKED, &dev->flags); 3833 set_bit(R5_Wantread, &dev->flags); 3834 s->locked++; 3835 } else { 3836 set_bit(STRIPE_DELAYED, &sh->state); 3837 set_bit(STRIPE_HANDLE, &sh->state); 3838 } 3839 } 3840 } 3841 } 3842 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { 3843 /* want reconstruct write, but need to get some data */ 3844 int qread =0; 3845 rcw = 0; 3846 for (i = disks; i--; ) { 3847 struct r5dev *dev = &sh->dev[i]; 3848 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3849 i != sh->pd_idx && i != sh->qd_idx && 3850 !test_bit(R5_LOCKED, &dev->flags) && 3851 !(test_bit(R5_UPTODATE, &dev->flags) || 3852 test_bit(R5_Wantcompute, &dev->flags))) { 3853 rcw++; 3854 if (test_bit(R5_Insync, &dev->flags) && 3855 test_bit(STRIPE_PREREAD_ACTIVE, 3856 &sh->state)) { 3857 pr_debug("Read_old block " 3858 "%d for Reconstruct\n", i); 3859 set_bit(R5_LOCKED, &dev->flags); 3860 set_bit(R5_Wantread, &dev->flags); 3861 s->locked++; 3862 qread++; 3863 } else { 3864 set_bit(STRIPE_DELAYED, &sh->state); 3865 set_bit(STRIPE_HANDLE, &sh->state); 3866 } 3867 } 3868 } 3869 if (rcw && conf->mddev->queue) 3870 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 3871 (unsigned long long)sh->sector, 3872 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3873 } 3874 3875 if (rcw > disks && rmw > disks && 3876 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3877 set_bit(STRIPE_DELAYED, &sh->state); 3878 3879 /* now if nothing is locked, and if we have enough data, 3880 * we can start a write request 3881 */ 3882 /* since handle_stripe can be called at any time we need to handle the 3883 * case where a compute block operation has been submitted and then a 3884 * subsequent call wants to start a write request. raid_run_ops only 3885 * handles the case where compute block and reconstruct are requested 3886 * simultaneously. If this is not the case then new writes need to be 3887 * held off until the compute completes. 3888 */ 3889 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 3890 (s->locked == 0 && (rcw == 0 || rmw == 0) && 3891 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 3892 schedule_reconstruction(sh, s, rcw == 0, 0); 3893 return 0; 3894 } 3895 3896 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 3897 struct stripe_head_state *s, int disks) 3898 { 3899 struct r5dev *dev = NULL; 3900 3901 BUG_ON(sh->batch_head); 3902 set_bit(STRIPE_HANDLE, &sh->state); 3903 3904 switch (sh->check_state) { 3905 case check_state_idle: 3906 /* start a new check operation if there are no failures */ 3907 if (s->failed == 0) { 3908 BUG_ON(s->uptodate != disks); 3909 sh->check_state = check_state_run; 3910 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3911 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3912 s->uptodate--; 3913 break; 3914 } 3915 dev = &sh->dev[s->failed_num[0]]; 3916 /* fall through */ 3917 case check_state_compute_result: 3918 sh->check_state = check_state_idle; 3919 if (!dev) 3920 dev = &sh->dev[sh->pd_idx]; 3921 3922 /* check that a write has not made the stripe insync */ 3923 if (test_bit(STRIPE_INSYNC, &sh->state)) 3924 break; 3925 3926 /* either failed parity check, or recovery is happening */ 3927 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 3928 BUG_ON(s->uptodate != disks); 3929 3930 set_bit(R5_LOCKED, &dev->flags); 3931 s->locked++; 3932 set_bit(R5_Wantwrite, &dev->flags); 3933 3934 clear_bit(STRIPE_DEGRADED, &sh->state); 3935 set_bit(STRIPE_INSYNC, &sh->state); 3936 break; 3937 case check_state_run: 3938 break; /* we will be called again upon completion */ 3939 case check_state_check_result: 3940 sh->check_state = check_state_idle; 3941 3942 /* if a failure occurred during the check operation, leave 3943 * STRIPE_INSYNC not set and let the stripe be handled again 3944 */ 3945 if (s->failed) 3946 break; 3947 3948 /* handle a successful check operation, if parity is correct 3949 * we are done. Otherwise update the mismatch count and repair 3950 * parity if !MD_RECOVERY_CHECK 3951 */ 3952 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 3953 /* parity is correct (on disc, 3954 * not in buffer any more) 3955 */ 3956 set_bit(STRIPE_INSYNC, &sh->state); 3957 else { 3958 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3959 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3960 /* don't try to repair!! */ 3961 set_bit(STRIPE_INSYNC, &sh->state); 3962 else { 3963 sh->check_state = check_state_compute_run; 3964 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3965 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3966 set_bit(R5_Wantcompute, 3967 &sh->dev[sh->pd_idx].flags); 3968 sh->ops.target = sh->pd_idx; 3969 sh->ops.target2 = -1; 3970 s->uptodate++; 3971 } 3972 } 3973 break; 3974 case check_state_compute_run: 3975 break; 3976 default: 3977 pr_err("%s: unknown check_state: %d sector: %llu\n", 3978 __func__, sh->check_state, 3979 (unsigned long long) sh->sector); 3980 BUG(); 3981 } 3982 } 3983 3984 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 3985 struct stripe_head_state *s, 3986 int disks) 3987 { 3988 int pd_idx = sh->pd_idx; 3989 int qd_idx = sh->qd_idx; 3990 struct r5dev *dev; 3991 3992 BUG_ON(sh->batch_head); 3993 set_bit(STRIPE_HANDLE, &sh->state); 3994 3995 BUG_ON(s->failed > 2); 3996 3997 /* Want to check and possibly repair P and Q. 3998 * However there could be one 'failed' device, in which 3999 * case we can only check one of them, possibly using the 4000 * other to generate missing data 4001 */ 4002 4003 switch (sh->check_state) { 4004 case check_state_idle: 4005 /* start a new check operation if there are < 2 failures */ 4006 if (s->failed == s->q_failed) { 4007 /* The only possible failed device holds Q, so it 4008 * makes sense to check P (If anything else were failed, 4009 * we would have used P to recreate it). 4010 */ 4011 sh->check_state = check_state_run; 4012 } 4013 if (!s->q_failed && s->failed < 2) { 4014 /* Q is not failed, and we didn't use it to generate 4015 * anything, so it makes sense to check it 4016 */ 4017 if (sh->check_state == check_state_run) 4018 sh->check_state = check_state_run_pq; 4019 else 4020 sh->check_state = check_state_run_q; 4021 } 4022 4023 /* discard potentially stale zero_sum_result */ 4024 sh->ops.zero_sum_result = 0; 4025 4026 if (sh->check_state == check_state_run) { 4027 /* async_xor_zero_sum destroys the contents of P */ 4028 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 4029 s->uptodate--; 4030 } 4031 if (sh->check_state >= check_state_run && 4032 sh->check_state <= check_state_run_pq) { 4033 /* async_syndrome_zero_sum preserves P and Q, so 4034 * no need to mark them !uptodate here 4035 */ 4036 set_bit(STRIPE_OP_CHECK, &s->ops_request); 4037 break; 4038 } 4039 4040 /* we have 2-disk failure */ 4041 BUG_ON(s->failed != 2); 4042 /* fall through */ 4043 case check_state_compute_result: 4044 sh->check_state = check_state_idle; 4045 4046 /* check that a write has not made the stripe insync */ 4047 if (test_bit(STRIPE_INSYNC, &sh->state)) 4048 break; 4049 4050 /* now write out any block on a failed drive, 4051 * or P or Q if they were recomputed 4052 */ 4053 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 4054 if (s->failed == 2) { 4055 dev = &sh->dev[s->failed_num[1]]; 4056 s->locked++; 4057 set_bit(R5_LOCKED, &dev->flags); 4058 set_bit(R5_Wantwrite, &dev->flags); 4059 } 4060 if (s->failed >= 1) { 4061 dev = &sh->dev[s->failed_num[0]]; 4062 s->locked++; 4063 set_bit(R5_LOCKED, &dev->flags); 4064 set_bit(R5_Wantwrite, &dev->flags); 4065 } 4066 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 4067 dev = &sh->dev[pd_idx]; 4068 s->locked++; 4069 set_bit(R5_LOCKED, &dev->flags); 4070 set_bit(R5_Wantwrite, &dev->flags); 4071 } 4072 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 4073 dev = &sh->dev[qd_idx]; 4074 s->locked++; 4075 set_bit(R5_LOCKED, &dev->flags); 4076 set_bit(R5_Wantwrite, &dev->flags); 4077 } 4078 clear_bit(STRIPE_DEGRADED, &sh->state); 4079 4080 set_bit(STRIPE_INSYNC, &sh->state); 4081 break; 4082 case check_state_run: 4083 case check_state_run_q: 4084 case check_state_run_pq: 4085 break; /* we will be called again upon completion */ 4086 case check_state_check_result: 4087 sh->check_state = check_state_idle; 4088 4089 /* handle a successful check operation, if parity is correct 4090 * we are done. Otherwise update the mismatch count and repair 4091 * parity if !MD_RECOVERY_CHECK 4092 */ 4093 if (sh->ops.zero_sum_result == 0) { 4094 /* both parities are correct */ 4095 if (!s->failed) 4096 set_bit(STRIPE_INSYNC, &sh->state); 4097 else { 4098 /* in contrast to the raid5 case we can validate 4099 * parity, but still have a failure to write 4100 * back 4101 */ 4102 sh->check_state = check_state_compute_result; 4103 /* Returning at this point means that we may go 4104 * off and bring p and/or q uptodate again so 4105 * we make sure to check zero_sum_result again 4106 * to verify if p or q need writeback 4107 */ 4108 } 4109 } else { 4110 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4111 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4112 /* don't try to repair!! */ 4113 set_bit(STRIPE_INSYNC, &sh->state); 4114 else { 4115 int *target = &sh->ops.target; 4116 4117 sh->ops.target = -1; 4118 sh->ops.target2 = -1; 4119 sh->check_state = check_state_compute_run; 4120 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4121 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 4122 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 4123 set_bit(R5_Wantcompute, 4124 &sh->dev[pd_idx].flags); 4125 *target = pd_idx; 4126 target = &sh->ops.target2; 4127 s->uptodate++; 4128 } 4129 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 4130 set_bit(R5_Wantcompute, 4131 &sh->dev[qd_idx].flags); 4132 *target = qd_idx; 4133 s->uptodate++; 4134 } 4135 } 4136 } 4137 break; 4138 case check_state_compute_run: 4139 break; 4140 default: 4141 pr_warn("%s: unknown check_state: %d sector: %llu\n", 4142 __func__, sh->check_state, 4143 (unsigned long long) sh->sector); 4144 BUG(); 4145 } 4146 } 4147 4148 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 4149 { 4150 int i; 4151 4152 /* We have read all the blocks in this stripe and now we need to 4153 * copy some of them into a target stripe for expand. 4154 */ 4155 struct dma_async_tx_descriptor *tx = NULL; 4156 BUG_ON(sh->batch_head); 4157 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4158 for (i = 0; i < sh->disks; i++) 4159 if (i != sh->pd_idx && i != sh->qd_idx) { 4160 int dd_idx, j; 4161 struct stripe_head *sh2; 4162 struct async_submit_ctl submit; 4163 4164 sector_t bn = raid5_compute_blocknr(sh, i, 1); 4165 sector_t s = raid5_compute_sector(conf, bn, 0, 4166 &dd_idx, NULL); 4167 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); 4168 if (sh2 == NULL) 4169 /* so far only the early blocks of this stripe 4170 * have been requested. When later blocks 4171 * get requested, we will try again 4172 */ 4173 continue; 4174 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 4175 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 4176 /* must have already done this block */ 4177 raid5_release_stripe(sh2); 4178 continue; 4179 } 4180 4181 /* place all the copies on one channel */ 4182 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 4183 tx = async_memcpy(sh2->dev[dd_idx].page, 4184 sh->dev[i].page, 0, 0, STRIPE_SIZE, 4185 &submit); 4186 4187 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 4188 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 4189 for (j = 0; j < conf->raid_disks; j++) 4190 if (j != sh2->pd_idx && 4191 j != sh2->qd_idx && 4192 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 4193 break; 4194 if (j == conf->raid_disks) { 4195 set_bit(STRIPE_EXPAND_READY, &sh2->state); 4196 set_bit(STRIPE_HANDLE, &sh2->state); 4197 } 4198 raid5_release_stripe(sh2); 4199 4200 } 4201 /* done submitting copies, wait for them to complete */ 4202 async_tx_quiesce(&tx); 4203 } 4204 4205 /* 4206 * handle_stripe - do things to a stripe. 4207 * 4208 * We lock the stripe by setting STRIPE_ACTIVE and then examine the 4209 * state of various bits to see what needs to be done. 4210 * Possible results: 4211 * return some read requests which now have data 4212 * return some write requests which are safely on storage 4213 * schedule a read on some buffers 4214 * schedule a write of some buffers 4215 * return confirmation of parity correctness 4216 * 4217 */ 4218 4219 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 4220 { 4221 struct r5conf *conf = sh->raid_conf; 4222 int disks = sh->disks; 4223 struct r5dev *dev; 4224 int i; 4225 int do_recovery = 0; 4226 4227 memset(s, 0, sizeof(*s)); 4228 4229 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; 4230 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; 4231 s->failed_num[0] = -1; 4232 s->failed_num[1] = -1; 4233 s->log_failed = r5l_log_disk_error(conf); 4234 4235 /* Now to look around and see what can be done */ 4236 rcu_read_lock(); 4237 for (i=disks; i--; ) { 4238 struct md_rdev *rdev; 4239 sector_t first_bad; 4240 int bad_sectors; 4241 int is_bad = 0; 4242 4243 dev = &sh->dev[i]; 4244 4245 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 4246 i, dev->flags, 4247 dev->toread, dev->towrite, dev->written); 4248 /* maybe we can reply to a read 4249 * 4250 * new wantfill requests are only permitted while 4251 * ops_complete_biofill is guaranteed to be inactive 4252 */ 4253 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 4254 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 4255 set_bit(R5_Wantfill, &dev->flags); 4256 4257 /* now count some things */ 4258 if (test_bit(R5_LOCKED, &dev->flags)) 4259 s->locked++; 4260 if (test_bit(R5_UPTODATE, &dev->flags)) 4261 s->uptodate++; 4262 if (test_bit(R5_Wantcompute, &dev->flags)) { 4263 s->compute++; 4264 BUG_ON(s->compute > 2); 4265 } 4266 4267 if (test_bit(R5_Wantfill, &dev->flags)) 4268 s->to_fill++; 4269 else if (dev->toread) 4270 s->to_read++; 4271 if (dev->towrite) { 4272 s->to_write++; 4273 if (!test_bit(R5_OVERWRITE, &dev->flags)) 4274 s->non_overwrite++; 4275 } 4276 if (dev->written) 4277 s->written++; 4278 /* Prefer to use the replacement for reads, but only 4279 * if it is recovered enough and has no bad blocks. 4280 */ 4281 rdev = rcu_dereference(conf->disks[i].replacement); 4282 if (rdev && !test_bit(Faulty, &rdev->flags) && 4283 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && 4284 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4285 &first_bad, &bad_sectors)) 4286 set_bit(R5_ReadRepl, &dev->flags); 4287 else { 4288 if (rdev && !test_bit(Faulty, &rdev->flags)) 4289 set_bit(R5_NeedReplace, &dev->flags); 4290 else 4291 clear_bit(R5_NeedReplace, &dev->flags); 4292 rdev = rcu_dereference(conf->disks[i].rdev); 4293 clear_bit(R5_ReadRepl, &dev->flags); 4294 } 4295 if (rdev && test_bit(Faulty, &rdev->flags)) 4296 rdev = NULL; 4297 if (rdev) { 4298 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4299 &first_bad, &bad_sectors); 4300 if (s->blocked_rdev == NULL 4301 && (test_bit(Blocked, &rdev->flags) 4302 || is_bad < 0)) { 4303 if (is_bad < 0) 4304 set_bit(BlockedBadBlocks, 4305 &rdev->flags); 4306 s->blocked_rdev = rdev; 4307 atomic_inc(&rdev->nr_pending); 4308 } 4309 } 4310 clear_bit(R5_Insync, &dev->flags); 4311 if (!rdev) 4312 /* Not in-sync */; 4313 else if (is_bad) { 4314 /* also not in-sync */ 4315 if (!test_bit(WriteErrorSeen, &rdev->flags) && 4316 test_bit(R5_UPTODATE, &dev->flags)) { 4317 /* treat as in-sync, but with a read error 4318 * which we can now try to correct 4319 */ 4320 set_bit(R5_Insync, &dev->flags); 4321 set_bit(R5_ReadError, &dev->flags); 4322 } 4323 } else if (test_bit(In_sync, &rdev->flags)) 4324 set_bit(R5_Insync, &dev->flags); 4325 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 4326 /* in sync if before recovery_offset */ 4327 set_bit(R5_Insync, &dev->flags); 4328 else if (test_bit(R5_UPTODATE, &dev->flags) && 4329 test_bit(R5_Expanded, &dev->flags)) 4330 /* If we've reshaped into here, we assume it is Insync. 4331 * We will shortly update recovery_offset to make 4332 * it official. 4333 */ 4334 set_bit(R5_Insync, &dev->flags); 4335 4336 if (test_bit(R5_WriteError, &dev->flags)) { 4337 /* This flag does not apply to '.replacement' 4338 * only to .rdev, so make sure to check that*/ 4339 struct md_rdev *rdev2 = rcu_dereference( 4340 conf->disks[i].rdev); 4341 if (rdev2 == rdev) 4342 clear_bit(R5_Insync, &dev->flags); 4343 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4344 s->handle_bad_blocks = 1; 4345 atomic_inc(&rdev2->nr_pending); 4346 } else 4347 clear_bit(R5_WriteError, &dev->flags); 4348 } 4349 if (test_bit(R5_MadeGood, &dev->flags)) { 4350 /* This flag does not apply to '.replacement' 4351 * only to .rdev, so make sure to check that*/ 4352 struct md_rdev *rdev2 = rcu_dereference( 4353 conf->disks[i].rdev); 4354 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4355 s->handle_bad_blocks = 1; 4356 atomic_inc(&rdev2->nr_pending); 4357 } else 4358 clear_bit(R5_MadeGood, &dev->flags); 4359 } 4360 if (test_bit(R5_MadeGoodRepl, &dev->flags)) { 4361 struct md_rdev *rdev2 = rcu_dereference( 4362 conf->disks[i].replacement); 4363 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4364 s->handle_bad_blocks = 1; 4365 atomic_inc(&rdev2->nr_pending); 4366 } else 4367 clear_bit(R5_MadeGoodRepl, &dev->flags); 4368 } 4369 if (!test_bit(R5_Insync, &dev->flags)) { 4370 /* The ReadError flag will just be confusing now */ 4371 clear_bit(R5_ReadError, &dev->flags); 4372 clear_bit(R5_ReWrite, &dev->flags); 4373 } 4374 if (test_bit(R5_ReadError, &dev->flags)) 4375 clear_bit(R5_Insync, &dev->flags); 4376 if (!test_bit(R5_Insync, &dev->flags)) { 4377 if (s->failed < 2) 4378 s->failed_num[s->failed] = i; 4379 s->failed++; 4380 if (rdev && !test_bit(Faulty, &rdev->flags)) 4381 do_recovery = 1; 4382 } 4383 4384 if (test_bit(R5_InJournal, &dev->flags)) 4385 s->injournal++; 4386 if (test_bit(R5_InJournal, &dev->flags) && dev->written) 4387 s->just_cached++; 4388 } 4389 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4390 /* If there is a failed device being replaced, 4391 * we must be recovering. 4392 * else if we are after recovery_cp, we must be syncing 4393 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 4394 * else we can only be replacing 4395 * sync and recovery both need to read all devices, and so 4396 * use the same flag. 4397 */ 4398 if (do_recovery || 4399 sh->sector >= conf->mddev->recovery_cp || 4400 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 4401 s->syncing = 1; 4402 else 4403 s->replacing = 1; 4404 } 4405 rcu_read_unlock(); 4406 } 4407 4408 static int clear_batch_ready(struct stripe_head *sh) 4409 { 4410 /* Return '1' if this is a member of batch, or 4411 * '0' if it is a lone stripe or a head which can now be 4412 * handled. 4413 */ 4414 struct stripe_head *tmp; 4415 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) 4416 return (sh->batch_head && sh->batch_head != sh); 4417 spin_lock(&sh->stripe_lock); 4418 if (!sh->batch_head) { 4419 spin_unlock(&sh->stripe_lock); 4420 return 0; 4421 } 4422 4423 /* 4424 * this stripe could be added to a batch list before we check 4425 * BATCH_READY, skips it 4426 */ 4427 if (sh->batch_head != sh) { 4428 spin_unlock(&sh->stripe_lock); 4429 return 1; 4430 } 4431 spin_lock(&sh->batch_lock); 4432 list_for_each_entry(tmp, &sh->batch_list, batch_list) 4433 clear_bit(STRIPE_BATCH_READY, &tmp->state); 4434 spin_unlock(&sh->batch_lock); 4435 spin_unlock(&sh->stripe_lock); 4436 4437 /* 4438 * BATCH_READY is cleared, no new stripes can be added. 4439 * batch_list can be accessed without lock 4440 */ 4441 return 0; 4442 } 4443 4444 static void break_stripe_batch_list(struct stripe_head *head_sh, 4445 unsigned long handle_flags) 4446 { 4447 struct stripe_head *sh, *next; 4448 int i; 4449 int do_wakeup = 0; 4450 4451 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { 4452 4453 list_del_init(&sh->batch_list); 4454 4455 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | 4456 (1 << STRIPE_SYNCING) | 4457 (1 << STRIPE_REPLACED) | 4458 (1 << STRIPE_DELAYED) | 4459 (1 << STRIPE_BIT_DELAY) | 4460 (1 << STRIPE_FULL_WRITE) | 4461 (1 << STRIPE_BIOFILL_RUN) | 4462 (1 << STRIPE_COMPUTE_RUN) | 4463 (1 << STRIPE_OPS_REQ_PENDING) | 4464 (1 << STRIPE_DISCARD) | 4465 (1 << STRIPE_BATCH_READY) | 4466 (1 << STRIPE_BATCH_ERR) | 4467 (1 << STRIPE_BITMAP_PENDING)), 4468 "stripe state: %lx\n", sh->state); 4469 WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | 4470 (1 << STRIPE_REPLACED)), 4471 "head stripe state: %lx\n", head_sh->state); 4472 4473 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | 4474 (1 << STRIPE_PREREAD_ACTIVE) | 4475 (1 << STRIPE_DEGRADED)), 4476 head_sh->state & (1 << STRIPE_INSYNC)); 4477 4478 sh->check_state = head_sh->check_state; 4479 sh->reconstruct_state = head_sh->reconstruct_state; 4480 for (i = 0; i < sh->disks; i++) { 4481 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 4482 do_wakeup = 1; 4483 sh->dev[i].flags = head_sh->dev[i].flags & 4484 (~((1 << R5_WriteError) | (1 << R5_Overlap))); 4485 } 4486 spin_lock_irq(&sh->stripe_lock); 4487 sh->batch_head = NULL; 4488 spin_unlock_irq(&sh->stripe_lock); 4489 if (handle_flags == 0 || 4490 sh->state & handle_flags) 4491 set_bit(STRIPE_HANDLE, &sh->state); 4492 raid5_release_stripe(sh); 4493 } 4494 spin_lock_irq(&head_sh->stripe_lock); 4495 head_sh->batch_head = NULL; 4496 spin_unlock_irq(&head_sh->stripe_lock); 4497 for (i = 0; i < head_sh->disks; i++) 4498 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) 4499 do_wakeup = 1; 4500 if (head_sh->state & handle_flags) 4501 set_bit(STRIPE_HANDLE, &head_sh->state); 4502 4503 if (do_wakeup) 4504 wake_up(&head_sh->raid_conf->wait_for_overlap); 4505 } 4506 4507 static void handle_stripe(struct stripe_head *sh) 4508 { 4509 struct stripe_head_state s; 4510 struct r5conf *conf = sh->raid_conf; 4511 int i; 4512 int prexor; 4513 int disks = sh->disks; 4514 struct r5dev *pdev, *qdev; 4515 4516 clear_bit(STRIPE_HANDLE, &sh->state); 4517 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 4518 /* already being handled, ensure it gets handled 4519 * again when current action finishes */ 4520 set_bit(STRIPE_HANDLE, &sh->state); 4521 return; 4522 } 4523 4524 if (clear_batch_ready(sh) ) { 4525 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4526 return; 4527 } 4528 4529 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) 4530 break_stripe_batch_list(sh, 0); 4531 4532 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4533 spin_lock(&sh->stripe_lock); 4534 /* Cannot process 'sync' concurrently with 'discard' */ 4535 if (!test_bit(STRIPE_DISCARD, &sh->state) && 4536 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4537 set_bit(STRIPE_SYNCING, &sh->state); 4538 clear_bit(STRIPE_INSYNC, &sh->state); 4539 clear_bit(STRIPE_REPLACED, &sh->state); 4540 } 4541 spin_unlock(&sh->stripe_lock); 4542 } 4543 clear_bit(STRIPE_DELAYED, &sh->state); 4544 4545 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 4546 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 4547 (unsigned long long)sh->sector, sh->state, 4548 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 4549 sh->check_state, sh->reconstruct_state); 4550 4551 analyse_stripe(sh, &s); 4552 4553 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 4554 goto finish; 4555 4556 if (s.handle_bad_blocks) { 4557 set_bit(STRIPE_HANDLE, &sh->state); 4558 goto finish; 4559 } 4560 4561 if (unlikely(s.blocked_rdev)) { 4562 if (s.syncing || s.expanding || s.expanded || 4563 s.replacing || s.to_write || s.written) { 4564 set_bit(STRIPE_HANDLE, &sh->state); 4565 goto finish; 4566 } 4567 /* There is nothing for the blocked_rdev to block */ 4568 rdev_dec_pending(s.blocked_rdev, conf->mddev); 4569 s.blocked_rdev = NULL; 4570 } 4571 4572 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 4573 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 4574 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 4575 } 4576 4577 pr_debug("locked=%d uptodate=%d to_read=%d" 4578 " to_write=%d failed=%d failed_num=%d,%d\n", 4579 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4580 s.failed_num[0], s.failed_num[1]); 4581 /* check if the array has lost more than max_degraded devices and, 4582 * if so, some requests might need to be failed. 4583 */ 4584 if (s.failed > conf->max_degraded || s.log_failed) { 4585 sh->check_state = 0; 4586 sh->reconstruct_state = 0; 4587 break_stripe_batch_list(sh, 0); 4588 if (s.to_read+s.to_write+s.written) 4589 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 4590 if (s.syncing + s.replacing) 4591 handle_failed_sync(conf, sh, &s); 4592 } 4593 4594 /* Now we check to see if any write operations have recently 4595 * completed 4596 */ 4597 prexor = 0; 4598 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 4599 prexor = 1; 4600 if (sh->reconstruct_state == reconstruct_state_drain_result || 4601 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 4602 sh->reconstruct_state = reconstruct_state_idle; 4603 4604 /* All the 'written' buffers and the parity block are ready to 4605 * be written back to disk 4606 */ 4607 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && 4608 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); 4609 BUG_ON(sh->qd_idx >= 0 && 4610 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && 4611 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); 4612 for (i = disks; i--; ) { 4613 struct r5dev *dev = &sh->dev[i]; 4614 if (test_bit(R5_LOCKED, &dev->flags) && 4615 (i == sh->pd_idx || i == sh->qd_idx || 4616 dev->written || test_bit(R5_InJournal, 4617 &dev->flags))) { 4618 pr_debug("Writing block %d\n", i); 4619 set_bit(R5_Wantwrite, &dev->flags); 4620 if (prexor) 4621 continue; 4622 if (s.failed > 1) 4623 continue; 4624 if (!test_bit(R5_Insync, &dev->flags) || 4625 ((i == sh->pd_idx || i == sh->qd_idx) && 4626 s.failed == 0)) 4627 set_bit(STRIPE_INSYNC, &sh->state); 4628 } 4629 } 4630 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4631 s.dec_preread_active = 1; 4632 } 4633 4634 /* 4635 * might be able to return some write requests if the parity blocks 4636 * are safe, or on a failed drive 4637 */ 4638 pdev = &sh->dev[sh->pd_idx]; 4639 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 4640 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 4641 qdev = &sh->dev[sh->qd_idx]; 4642 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 4643 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 4644 || conf->level < 6; 4645 4646 if (s.written && 4647 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 4648 && !test_bit(R5_LOCKED, &pdev->flags) 4649 && (test_bit(R5_UPTODATE, &pdev->flags) || 4650 test_bit(R5_Discard, &pdev->flags))))) && 4651 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 4652 && !test_bit(R5_LOCKED, &qdev->flags) 4653 && (test_bit(R5_UPTODATE, &qdev->flags) || 4654 test_bit(R5_Discard, &qdev->flags)))))) 4655 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); 4656 4657 if (s.just_cached) 4658 r5c_handle_cached_data_endio(conf, sh, disks, &s.return_bi); 4659 r5l_stripe_write_finished(sh); 4660 4661 /* Now we might consider reading some blocks, either to check/generate 4662 * parity, or to satisfy requests 4663 * or to load a block that is being partially written. 4664 */ 4665 if (s.to_read || s.non_overwrite 4666 || (conf->level == 6 && s.to_write && s.failed) 4667 || (s.syncing && (s.uptodate + s.compute < disks)) 4668 || s.replacing 4669 || s.expanding) 4670 handle_stripe_fill(sh, &s, disks); 4671 4672 /* 4673 * When the stripe finishes full journal write cycle (write to journal 4674 * and raid disk), this is the clean up procedure so it is ready for 4675 * next operation. 4676 */ 4677 r5c_finish_stripe_write_out(conf, sh, &s); 4678 4679 /* 4680 * Now to consider new write requests, cache write back and what else, 4681 * if anything should be read. We do not handle new writes when: 4682 * 1/ A 'write' operation (copy+xor) is already in flight. 4683 * 2/ A 'check' operation is in flight, as it may clobber the parity 4684 * block. 4685 * 3/ A r5c cache log write is in flight. 4686 */ 4687 4688 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { 4689 if (!r5c_is_writeback(conf->log)) { 4690 if (s.to_write) 4691 handle_stripe_dirtying(conf, sh, &s, disks); 4692 } else { /* write back cache */ 4693 int ret = 0; 4694 4695 /* First, try handle writes in caching phase */ 4696 if (s.to_write) 4697 ret = r5c_try_caching_write(conf, sh, &s, 4698 disks); 4699 /* 4700 * If caching phase failed: ret == -EAGAIN 4701 * OR 4702 * stripe under reclaim: !caching && injournal 4703 * 4704 * fall back to handle_stripe_dirtying() 4705 */ 4706 if (ret == -EAGAIN || 4707 /* stripe under reclaim: !caching && injournal */ 4708 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 4709 s.injournal > 0)) { 4710 ret = handle_stripe_dirtying(conf, sh, &s, 4711 disks); 4712 if (ret == -EAGAIN) 4713 goto finish; 4714 } 4715 } 4716 } 4717 4718 /* maybe we need to check and possibly fix the parity for this stripe 4719 * Any reads will already have been scheduled, so we just see if enough 4720 * data is available. The parity check is held off while parity 4721 * dependent operations are in flight. 4722 */ 4723 if (sh->check_state || 4724 (s.syncing && s.locked == 0 && 4725 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4726 !test_bit(STRIPE_INSYNC, &sh->state))) { 4727 if (conf->level == 6) 4728 handle_parity_checks6(conf, sh, &s, disks); 4729 else 4730 handle_parity_checks5(conf, sh, &s, disks); 4731 } 4732 4733 if ((s.replacing || s.syncing) && s.locked == 0 4734 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) 4735 && !test_bit(STRIPE_REPLACED, &sh->state)) { 4736 /* Write out to replacement devices where possible */ 4737 for (i = 0; i < conf->raid_disks; i++) 4738 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 4739 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); 4740 set_bit(R5_WantReplace, &sh->dev[i].flags); 4741 set_bit(R5_LOCKED, &sh->dev[i].flags); 4742 s.locked++; 4743 } 4744 if (s.replacing) 4745 set_bit(STRIPE_INSYNC, &sh->state); 4746 set_bit(STRIPE_REPLACED, &sh->state); 4747 } 4748 if ((s.syncing || s.replacing) && s.locked == 0 && 4749 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4750 test_bit(STRIPE_INSYNC, &sh->state)) { 4751 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4752 clear_bit(STRIPE_SYNCING, &sh->state); 4753 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 4754 wake_up(&conf->wait_for_overlap); 4755 } 4756 4757 /* If the failed drives are just a ReadError, then we might need 4758 * to progress the repair/check process 4759 */ 4760 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 4761 for (i = 0; i < s.failed; i++) { 4762 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 4763 if (test_bit(R5_ReadError, &dev->flags) 4764 && !test_bit(R5_LOCKED, &dev->flags) 4765 && test_bit(R5_UPTODATE, &dev->flags) 4766 ) { 4767 if (!test_bit(R5_ReWrite, &dev->flags)) { 4768 set_bit(R5_Wantwrite, &dev->flags); 4769 set_bit(R5_ReWrite, &dev->flags); 4770 set_bit(R5_LOCKED, &dev->flags); 4771 s.locked++; 4772 } else { 4773 /* let's read it back */ 4774 set_bit(R5_Wantread, &dev->flags); 4775 set_bit(R5_LOCKED, &dev->flags); 4776 s.locked++; 4777 } 4778 } 4779 } 4780 4781 /* Finish reconstruct operations initiated by the expansion process */ 4782 if (sh->reconstruct_state == reconstruct_state_result) { 4783 struct stripe_head *sh_src 4784 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); 4785 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 4786 /* sh cannot be written until sh_src has been read. 4787 * so arrange for sh to be delayed a little 4788 */ 4789 set_bit(STRIPE_DELAYED, &sh->state); 4790 set_bit(STRIPE_HANDLE, &sh->state); 4791 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 4792 &sh_src->state)) 4793 atomic_inc(&conf->preread_active_stripes); 4794 raid5_release_stripe(sh_src); 4795 goto finish; 4796 } 4797 if (sh_src) 4798 raid5_release_stripe(sh_src); 4799 4800 sh->reconstruct_state = reconstruct_state_idle; 4801 clear_bit(STRIPE_EXPANDING, &sh->state); 4802 for (i = conf->raid_disks; i--; ) { 4803 set_bit(R5_Wantwrite, &sh->dev[i].flags); 4804 set_bit(R5_LOCKED, &sh->dev[i].flags); 4805 s.locked++; 4806 } 4807 } 4808 4809 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 4810 !sh->reconstruct_state) { 4811 /* Need to write out all blocks after computing parity */ 4812 sh->disks = conf->raid_disks; 4813 stripe_set_idx(sh->sector, conf, 0, sh); 4814 schedule_reconstruction(sh, &s, 1, 1); 4815 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 4816 clear_bit(STRIPE_EXPAND_READY, &sh->state); 4817 atomic_dec(&conf->reshape_stripes); 4818 wake_up(&conf->wait_for_overlap); 4819 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4820 } 4821 4822 if (s.expanding && s.locked == 0 && 4823 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 4824 handle_stripe_expansion(conf, sh); 4825 4826 finish: 4827 /* wait for this device to become unblocked */ 4828 if (unlikely(s.blocked_rdev)) { 4829 if (conf->mddev->external) 4830 md_wait_for_blocked_rdev(s.blocked_rdev, 4831 conf->mddev); 4832 else 4833 /* Internal metadata will immediately 4834 * be written by raid5d, so we don't 4835 * need to wait here. 4836 */ 4837 rdev_dec_pending(s.blocked_rdev, 4838 conf->mddev); 4839 } 4840 4841 if (s.handle_bad_blocks) 4842 for (i = disks; i--; ) { 4843 struct md_rdev *rdev; 4844 struct r5dev *dev = &sh->dev[i]; 4845 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 4846 /* We own a safe reference to the rdev */ 4847 rdev = conf->disks[i].rdev; 4848 if (!rdev_set_badblocks(rdev, sh->sector, 4849 STRIPE_SECTORS, 0)) 4850 md_error(conf->mddev, rdev); 4851 rdev_dec_pending(rdev, conf->mddev); 4852 } 4853 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 4854 rdev = conf->disks[i].rdev; 4855 rdev_clear_badblocks(rdev, sh->sector, 4856 STRIPE_SECTORS, 0); 4857 rdev_dec_pending(rdev, conf->mddev); 4858 } 4859 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { 4860 rdev = conf->disks[i].replacement; 4861 if (!rdev) 4862 /* rdev have been moved down */ 4863 rdev = conf->disks[i].rdev; 4864 rdev_clear_badblocks(rdev, sh->sector, 4865 STRIPE_SECTORS, 0); 4866 rdev_dec_pending(rdev, conf->mddev); 4867 } 4868 } 4869 4870 if (s.ops_request) 4871 raid_run_ops(sh, s.ops_request); 4872 4873 ops_run_io(sh, &s); 4874 4875 if (s.dec_preread_active) { 4876 /* We delay this until after ops_run_io so that if make_request 4877 * is waiting on a flush, it won't continue until the writes 4878 * have actually been submitted. 4879 */ 4880 atomic_dec(&conf->preread_active_stripes); 4881 if (atomic_read(&conf->preread_active_stripes) < 4882 IO_THRESHOLD) 4883 md_wakeup_thread(conf->mddev->thread); 4884 } 4885 4886 if (!bio_list_empty(&s.return_bi)) { 4887 if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { 4888 spin_lock_irq(&conf->device_lock); 4889 bio_list_merge(&conf->return_bi, &s.return_bi); 4890 spin_unlock_irq(&conf->device_lock); 4891 md_wakeup_thread(conf->mddev->thread); 4892 } else 4893 return_io(&s.return_bi); 4894 } 4895 4896 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4897 } 4898 4899 static void raid5_activate_delayed(struct r5conf *conf) 4900 { 4901 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 4902 while (!list_empty(&conf->delayed_list)) { 4903 struct list_head *l = conf->delayed_list.next; 4904 struct stripe_head *sh; 4905 sh = list_entry(l, struct stripe_head, lru); 4906 list_del_init(l); 4907 clear_bit(STRIPE_DELAYED, &sh->state); 4908 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4909 atomic_inc(&conf->preread_active_stripes); 4910 list_add_tail(&sh->lru, &conf->hold_list); 4911 raid5_wakeup_stripe_thread(sh); 4912 } 4913 } 4914 } 4915 4916 static void activate_bit_delay(struct r5conf *conf, 4917 struct list_head *temp_inactive_list) 4918 { 4919 /* device_lock is held */ 4920 struct list_head head; 4921 list_add(&head, &conf->bitmap_list); 4922 list_del_init(&conf->bitmap_list); 4923 while (!list_empty(&head)) { 4924 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 4925 int hash; 4926 list_del_init(&sh->lru); 4927 atomic_inc(&sh->count); 4928 hash = sh->hash_lock_index; 4929 __release_stripe(conf, sh, &temp_inactive_list[hash]); 4930 } 4931 } 4932 4933 static int raid5_congested(struct mddev *mddev, int bits) 4934 { 4935 struct r5conf *conf = mddev->private; 4936 4937 /* No difference between reads and writes. Just check 4938 * how busy the stripe_cache is 4939 */ 4940 4941 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) 4942 return 1; 4943 4944 /* Also checks whether there is pressure on r5cache log space */ 4945 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) 4946 return 1; 4947 if (conf->quiesce) 4948 return 1; 4949 if (atomic_read(&conf->empty_inactive_list_nr)) 4950 return 1; 4951 4952 return 0; 4953 } 4954 4955 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4956 { 4957 struct r5conf *conf = mddev->private; 4958 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); 4959 unsigned int chunk_sectors; 4960 unsigned int bio_sectors = bio_sectors(bio); 4961 4962 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); 4963 return chunk_sectors >= 4964 ((sector & (chunk_sectors - 1)) + bio_sectors); 4965 } 4966 4967 /* 4968 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 4969 * later sampled by raid5d. 4970 */ 4971 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 4972 { 4973 unsigned long flags; 4974 4975 spin_lock_irqsave(&conf->device_lock, flags); 4976 4977 bi->bi_next = conf->retry_read_aligned_list; 4978 conf->retry_read_aligned_list = bi; 4979 4980 spin_unlock_irqrestore(&conf->device_lock, flags); 4981 md_wakeup_thread(conf->mddev->thread); 4982 } 4983 4984 static struct bio *remove_bio_from_retry(struct r5conf *conf) 4985 { 4986 struct bio *bi; 4987 4988 bi = conf->retry_read_aligned; 4989 if (bi) { 4990 conf->retry_read_aligned = NULL; 4991 return bi; 4992 } 4993 bi = conf->retry_read_aligned_list; 4994 if(bi) { 4995 conf->retry_read_aligned_list = bi->bi_next; 4996 bi->bi_next = NULL; 4997 /* 4998 * this sets the active strip count to 1 and the processed 4999 * strip count to zero (upper 8 bits) 5000 */ 5001 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ 5002 } 5003 5004 return bi; 5005 } 5006 5007 /* 5008 * The "raid5_align_endio" should check if the read succeeded and if it 5009 * did, call bio_endio on the original bio (having bio_put the new bio 5010 * first). 5011 * If the read failed.. 5012 */ 5013 static void raid5_align_endio(struct bio *bi) 5014 { 5015 struct bio* raid_bi = bi->bi_private; 5016 struct mddev *mddev; 5017 struct r5conf *conf; 5018 struct md_rdev *rdev; 5019 int error = bi->bi_error; 5020 5021 bio_put(bi); 5022 5023 rdev = (void*)raid_bi->bi_next; 5024 raid_bi->bi_next = NULL; 5025 mddev = rdev->mddev; 5026 conf = mddev->private; 5027 5028 rdev_dec_pending(rdev, conf->mddev); 5029 5030 if (!error) { 5031 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), 5032 raid_bi, 0); 5033 bio_endio(raid_bi); 5034 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5035 wake_up(&conf->wait_for_quiescent); 5036 return; 5037 } 5038 5039 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 5040 5041 add_bio_to_retry(raid_bi, conf); 5042 } 5043 5044 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) 5045 { 5046 struct r5conf *conf = mddev->private; 5047 int dd_idx; 5048 struct bio* align_bi; 5049 struct md_rdev *rdev; 5050 sector_t end_sector; 5051 5052 if (!in_chunk_boundary(mddev, raid_bio)) { 5053 pr_debug("%s: non aligned\n", __func__); 5054 return 0; 5055 } 5056 /* 5057 * use bio_clone_fast to make a copy of the bio 5058 */ 5059 align_bi = bio_clone_fast(raid_bio, GFP_NOIO, mddev->bio_set); 5060 if (!align_bi) 5061 return 0; 5062 /* 5063 * set bi_end_io to a new function, and set bi_private to the 5064 * original bio. 5065 */ 5066 align_bi->bi_end_io = raid5_align_endio; 5067 align_bi->bi_private = raid_bio; 5068 /* 5069 * compute position 5070 */ 5071 align_bi->bi_iter.bi_sector = 5072 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 5073 0, &dd_idx, NULL); 5074 5075 end_sector = bio_end_sector(align_bi); 5076 rcu_read_lock(); 5077 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 5078 if (!rdev || test_bit(Faulty, &rdev->flags) || 5079 rdev->recovery_offset < end_sector) { 5080 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 5081 if (rdev && 5082 (test_bit(Faulty, &rdev->flags) || 5083 !(test_bit(In_sync, &rdev->flags) || 5084 rdev->recovery_offset >= end_sector))) 5085 rdev = NULL; 5086 } 5087 5088 if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { 5089 rcu_read_unlock(); 5090 bio_put(align_bi); 5091 return 0; 5092 } 5093 5094 if (rdev) { 5095 sector_t first_bad; 5096 int bad_sectors; 5097 5098 atomic_inc(&rdev->nr_pending); 5099 rcu_read_unlock(); 5100 raid_bio->bi_next = (void*)rdev; 5101 align_bi->bi_bdev = rdev->bdev; 5102 bio_clear_flag(align_bi, BIO_SEG_VALID); 5103 5104 if (is_badblock(rdev, align_bi->bi_iter.bi_sector, 5105 bio_sectors(align_bi), 5106 &first_bad, &bad_sectors)) { 5107 bio_put(align_bi); 5108 rdev_dec_pending(rdev, mddev); 5109 return 0; 5110 } 5111 5112 /* No reshape active, so we can trust rdev->data_offset */ 5113 align_bi->bi_iter.bi_sector += rdev->data_offset; 5114 5115 spin_lock_irq(&conf->device_lock); 5116 wait_event_lock_irq(conf->wait_for_quiescent, 5117 conf->quiesce == 0, 5118 conf->device_lock); 5119 atomic_inc(&conf->active_aligned_reads); 5120 spin_unlock_irq(&conf->device_lock); 5121 5122 if (mddev->gendisk) 5123 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 5124 align_bi, disk_devt(mddev->gendisk), 5125 raid_bio->bi_iter.bi_sector); 5126 generic_make_request(align_bi); 5127 return 1; 5128 } else { 5129 rcu_read_unlock(); 5130 bio_put(align_bi); 5131 return 0; 5132 } 5133 } 5134 5135 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) 5136 { 5137 struct bio *split; 5138 5139 do { 5140 sector_t sector = raid_bio->bi_iter.bi_sector; 5141 unsigned chunk_sects = mddev->chunk_sectors; 5142 unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); 5143 5144 if (sectors < bio_sectors(raid_bio)) { 5145 split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set); 5146 bio_chain(split, raid_bio); 5147 } else 5148 split = raid_bio; 5149 5150 if (!raid5_read_one_chunk(mddev, split)) { 5151 if (split != raid_bio) 5152 generic_make_request(raid_bio); 5153 return split; 5154 } 5155 } while (split != raid_bio); 5156 5157 return NULL; 5158 } 5159 5160 /* __get_priority_stripe - get the next stripe to process 5161 * 5162 * Full stripe writes are allowed to pass preread active stripes up until 5163 * the bypass_threshold is exceeded. In general the bypass_count 5164 * increments when the handle_list is handled before the hold_list; however, it 5165 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 5166 * stripe with in flight i/o. The bypass_count will be reset when the 5167 * head of the hold_list has changed, i.e. the head was promoted to the 5168 * handle_list. 5169 */ 5170 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) 5171 { 5172 struct stripe_head *sh = NULL, *tmp; 5173 struct list_head *handle_list = NULL; 5174 struct r5worker_group *wg = NULL; 5175 5176 if (conf->worker_cnt_per_group == 0) { 5177 handle_list = &conf->handle_list; 5178 } else if (group != ANY_GROUP) { 5179 handle_list = &conf->worker_groups[group].handle_list; 5180 wg = &conf->worker_groups[group]; 5181 } else { 5182 int i; 5183 for (i = 0; i < conf->group_cnt; i++) { 5184 handle_list = &conf->worker_groups[i].handle_list; 5185 wg = &conf->worker_groups[i]; 5186 if (!list_empty(handle_list)) 5187 break; 5188 } 5189 } 5190 5191 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 5192 __func__, 5193 list_empty(handle_list) ? "empty" : "busy", 5194 list_empty(&conf->hold_list) ? "empty" : "busy", 5195 atomic_read(&conf->pending_full_writes), conf->bypass_count); 5196 5197 if (!list_empty(handle_list)) { 5198 sh = list_entry(handle_list->next, typeof(*sh), lru); 5199 5200 if (list_empty(&conf->hold_list)) 5201 conf->bypass_count = 0; 5202 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 5203 if (conf->hold_list.next == conf->last_hold) 5204 conf->bypass_count++; 5205 else { 5206 conf->last_hold = conf->hold_list.next; 5207 conf->bypass_count -= conf->bypass_threshold; 5208 if (conf->bypass_count < 0) 5209 conf->bypass_count = 0; 5210 } 5211 } 5212 } else if (!list_empty(&conf->hold_list) && 5213 ((conf->bypass_threshold && 5214 conf->bypass_count > conf->bypass_threshold) || 5215 atomic_read(&conf->pending_full_writes) == 0)) { 5216 5217 list_for_each_entry(tmp, &conf->hold_list, lru) { 5218 if (conf->worker_cnt_per_group == 0 || 5219 group == ANY_GROUP || 5220 !cpu_online(tmp->cpu) || 5221 cpu_to_group(tmp->cpu) == group) { 5222 sh = tmp; 5223 break; 5224 } 5225 } 5226 5227 if (sh) { 5228 conf->bypass_count -= conf->bypass_threshold; 5229 if (conf->bypass_count < 0) 5230 conf->bypass_count = 0; 5231 } 5232 wg = NULL; 5233 } 5234 5235 if (!sh) 5236 return NULL; 5237 5238 if (wg) { 5239 wg->stripes_cnt--; 5240 sh->group = NULL; 5241 } 5242 list_del_init(&sh->lru); 5243 BUG_ON(atomic_inc_return(&sh->count) != 1); 5244 return sh; 5245 } 5246 5247 struct raid5_plug_cb { 5248 struct blk_plug_cb cb; 5249 struct list_head list; 5250 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; 5251 }; 5252 5253 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 5254 { 5255 struct raid5_plug_cb *cb = container_of( 5256 blk_cb, struct raid5_plug_cb, cb); 5257 struct stripe_head *sh; 5258 struct mddev *mddev = cb->cb.data; 5259 struct r5conf *conf = mddev->private; 5260 int cnt = 0; 5261 int hash; 5262 5263 if (cb->list.next && !list_empty(&cb->list)) { 5264 spin_lock_irq(&conf->device_lock); 5265 while (!list_empty(&cb->list)) { 5266 sh = list_first_entry(&cb->list, struct stripe_head, lru); 5267 list_del_init(&sh->lru); 5268 /* 5269 * avoid race release_stripe_plug() sees 5270 * STRIPE_ON_UNPLUG_LIST clear but the stripe 5271 * is still in our list 5272 */ 5273 smp_mb__before_atomic(); 5274 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 5275 /* 5276 * STRIPE_ON_RELEASE_LIST could be set here. In that 5277 * case, the count is always > 1 here 5278 */ 5279 hash = sh->hash_lock_index; 5280 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); 5281 cnt++; 5282 } 5283 spin_unlock_irq(&conf->device_lock); 5284 } 5285 release_inactive_stripe_list(conf, cb->temp_inactive_list, 5286 NR_STRIPE_HASH_LOCKS); 5287 if (mddev->queue) 5288 trace_block_unplug(mddev->queue, cnt, !from_schedule); 5289 kfree(cb); 5290 } 5291 5292 static void release_stripe_plug(struct mddev *mddev, 5293 struct stripe_head *sh) 5294 { 5295 struct blk_plug_cb *blk_cb = blk_check_plugged( 5296 raid5_unplug, mddev, 5297 sizeof(struct raid5_plug_cb)); 5298 struct raid5_plug_cb *cb; 5299 5300 if (!blk_cb) { 5301 raid5_release_stripe(sh); 5302 return; 5303 } 5304 5305 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 5306 5307 if (cb->list.next == NULL) { 5308 int i; 5309 INIT_LIST_HEAD(&cb->list); 5310 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5311 INIT_LIST_HEAD(cb->temp_inactive_list + i); 5312 } 5313 5314 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 5315 list_add_tail(&sh->lru, &cb->list); 5316 else 5317 raid5_release_stripe(sh); 5318 } 5319 5320 static void make_discard_request(struct mddev *mddev, struct bio *bi) 5321 { 5322 struct r5conf *conf = mddev->private; 5323 sector_t logical_sector, last_sector; 5324 struct stripe_head *sh; 5325 int remaining; 5326 int stripe_sectors; 5327 5328 if (mddev->reshape_position != MaxSector) 5329 /* Skip discard while reshape is happening */ 5330 return; 5331 5332 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5333 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); 5334 5335 bi->bi_next = NULL; 5336 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 5337 5338 stripe_sectors = conf->chunk_sectors * 5339 (conf->raid_disks - conf->max_degraded); 5340 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, 5341 stripe_sectors); 5342 sector_div(last_sector, stripe_sectors); 5343 5344 logical_sector *= conf->chunk_sectors; 5345 last_sector *= conf->chunk_sectors; 5346 5347 for (; logical_sector < last_sector; 5348 logical_sector += STRIPE_SECTORS) { 5349 DEFINE_WAIT(w); 5350 int d; 5351 again: 5352 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); 5353 prepare_to_wait(&conf->wait_for_overlap, &w, 5354 TASK_UNINTERRUPTIBLE); 5355 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5356 if (test_bit(STRIPE_SYNCING, &sh->state)) { 5357 raid5_release_stripe(sh); 5358 schedule(); 5359 goto again; 5360 } 5361 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5362 spin_lock_irq(&sh->stripe_lock); 5363 for (d = 0; d < conf->raid_disks; d++) { 5364 if (d == sh->pd_idx || d == sh->qd_idx) 5365 continue; 5366 if (sh->dev[d].towrite || sh->dev[d].toread) { 5367 set_bit(R5_Overlap, &sh->dev[d].flags); 5368 spin_unlock_irq(&sh->stripe_lock); 5369 raid5_release_stripe(sh); 5370 schedule(); 5371 goto again; 5372 } 5373 } 5374 set_bit(STRIPE_DISCARD, &sh->state); 5375 finish_wait(&conf->wait_for_overlap, &w); 5376 sh->overwrite_disks = 0; 5377 for (d = 0; d < conf->raid_disks; d++) { 5378 if (d == sh->pd_idx || d == sh->qd_idx) 5379 continue; 5380 sh->dev[d].towrite = bi; 5381 set_bit(R5_OVERWRITE, &sh->dev[d].flags); 5382 raid5_inc_bi_active_stripes(bi); 5383 sh->overwrite_disks++; 5384 } 5385 spin_unlock_irq(&sh->stripe_lock); 5386 if (conf->mddev->bitmap) { 5387 for (d = 0; 5388 d < conf->raid_disks - conf->max_degraded; 5389 d++) 5390 bitmap_startwrite(mddev->bitmap, 5391 sh->sector, 5392 STRIPE_SECTORS, 5393 0); 5394 sh->bm_seq = conf->seq_flush + 1; 5395 set_bit(STRIPE_BIT_DELAY, &sh->state); 5396 } 5397 5398 set_bit(STRIPE_HANDLE, &sh->state); 5399 clear_bit(STRIPE_DELAYED, &sh->state); 5400 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5401 atomic_inc(&conf->preread_active_stripes); 5402 release_stripe_plug(mddev, sh); 5403 } 5404 5405 remaining = raid5_dec_bi_active_stripes(bi); 5406 if (remaining == 0) { 5407 md_write_end(mddev); 5408 bio_endio(bi); 5409 } 5410 } 5411 5412 static void raid5_make_request(struct mddev *mddev, struct bio * bi) 5413 { 5414 struct r5conf *conf = mddev->private; 5415 int dd_idx; 5416 sector_t new_sector; 5417 sector_t logical_sector, last_sector; 5418 struct stripe_head *sh; 5419 const int rw = bio_data_dir(bi); 5420 int remaining; 5421 DEFINE_WAIT(w); 5422 bool do_prepare; 5423 bool do_flush = false; 5424 5425 if (unlikely(bi->bi_opf & REQ_PREFLUSH)) { 5426 int ret = r5l_handle_flush_request(conf->log, bi); 5427 5428 if (ret == 0) 5429 return; 5430 if (ret == -ENODEV) { 5431 md_flush_request(mddev, bi); 5432 return; 5433 } 5434 /* ret == -EAGAIN, fallback */ 5435 /* 5436 * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH, 5437 * we need to flush journal device 5438 */ 5439 do_flush = bi->bi_opf & REQ_PREFLUSH; 5440 } 5441 5442 md_write_start(mddev, bi); 5443 5444 /* 5445 * If array is degraded, better not do chunk aligned read because 5446 * later we might have to read it again in order to reconstruct 5447 * data on failed drives. 5448 */ 5449 if (rw == READ && mddev->degraded == 0 && 5450 mddev->reshape_position == MaxSector) { 5451 bi = chunk_aligned_read(mddev, bi); 5452 if (!bi) 5453 return; 5454 } 5455 5456 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { 5457 make_discard_request(mddev, bi); 5458 return; 5459 } 5460 5461 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5462 last_sector = bio_end_sector(bi); 5463 bi->bi_next = NULL; 5464 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 5465 5466 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 5467 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 5468 int previous; 5469 int seq; 5470 5471 do_prepare = false; 5472 retry: 5473 seq = read_seqcount_begin(&conf->gen_lock); 5474 previous = 0; 5475 if (do_prepare) 5476 prepare_to_wait(&conf->wait_for_overlap, &w, 5477 TASK_UNINTERRUPTIBLE); 5478 if (unlikely(conf->reshape_progress != MaxSector)) { 5479 /* spinlock is needed as reshape_progress may be 5480 * 64bit on a 32bit platform, and so it might be 5481 * possible to see a half-updated value 5482 * Of course reshape_progress could change after 5483 * the lock is dropped, so once we get a reference 5484 * to the stripe that we think it is, we will have 5485 * to check again. 5486 */ 5487 spin_lock_irq(&conf->device_lock); 5488 if (mddev->reshape_backwards 5489 ? logical_sector < conf->reshape_progress 5490 : logical_sector >= conf->reshape_progress) { 5491 previous = 1; 5492 } else { 5493 if (mddev->reshape_backwards 5494 ? logical_sector < conf->reshape_safe 5495 : logical_sector >= conf->reshape_safe) { 5496 spin_unlock_irq(&conf->device_lock); 5497 schedule(); 5498 do_prepare = true; 5499 goto retry; 5500 } 5501 } 5502 spin_unlock_irq(&conf->device_lock); 5503 } 5504 5505 new_sector = raid5_compute_sector(conf, logical_sector, 5506 previous, 5507 &dd_idx, NULL); 5508 pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n", 5509 (unsigned long long)new_sector, 5510 (unsigned long long)logical_sector); 5511 5512 sh = raid5_get_active_stripe(conf, new_sector, previous, 5513 (bi->bi_opf & REQ_RAHEAD), 0); 5514 if (sh) { 5515 if (unlikely(previous)) { 5516 /* expansion might have moved on while waiting for a 5517 * stripe, so we must do the range check again. 5518 * Expansion could still move past after this 5519 * test, but as we are holding a reference to 5520 * 'sh', we know that if that happens, 5521 * STRIPE_EXPANDING will get set and the expansion 5522 * won't proceed until we finish with the stripe. 5523 */ 5524 int must_retry = 0; 5525 spin_lock_irq(&conf->device_lock); 5526 if (mddev->reshape_backwards 5527 ? logical_sector >= conf->reshape_progress 5528 : logical_sector < conf->reshape_progress) 5529 /* mismatch, need to try again */ 5530 must_retry = 1; 5531 spin_unlock_irq(&conf->device_lock); 5532 if (must_retry) { 5533 raid5_release_stripe(sh); 5534 schedule(); 5535 do_prepare = true; 5536 goto retry; 5537 } 5538 } 5539 if (read_seqcount_retry(&conf->gen_lock, seq)) { 5540 /* Might have got the wrong stripe_head 5541 * by accident 5542 */ 5543 raid5_release_stripe(sh); 5544 goto retry; 5545 } 5546 5547 if (rw == WRITE && 5548 logical_sector >= mddev->suspend_lo && 5549 logical_sector < mddev->suspend_hi) { 5550 raid5_release_stripe(sh); 5551 /* As the suspend_* range is controlled by 5552 * userspace, we want an interruptible 5553 * wait. 5554 */ 5555 flush_signals(current); 5556 prepare_to_wait(&conf->wait_for_overlap, 5557 &w, TASK_INTERRUPTIBLE); 5558 if (logical_sector >= mddev->suspend_lo && 5559 logical_sector < mddev->suspend_hi) { 5560 schedule(); 5561 do_prepare = true; 5562 } 5563 goto retry; 5564 } 5565 5566 if (test_bit(STRIPE_EXPANDING, &sh->state) || 5567 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { 5568 /* Stripe is busy expanding or 5569 * add failed due to overlap. Flush everything 5570 * and wait a while 5571 */ 5572 md_wakeup_thread(mddev->thread); 5573 raid5_release_stripe(sh); 5574 schedule(); 5575 do_prepare = true; 5576 goto retry; 5577 } 5578 if (do_flush) { 5579 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); 5580 /* we only need flush for one stripe */ 5581 do_flush = false; 5582 } 5583 5584 set_bit(STRIPE_HANDLE, &sh->state); 5585 clear_bit(STRIPE_DELAYED, &sh->state); 5586 if ((!sh->batch_head || sh == sh->batch_head) && 5587 (bi->bi_opf & REQ_SYNC) && 5588 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5589 atomic_inc(&conf->preread_active_stripes); 5590 release_stripe_plug(mddev, sh); 5591 } else { 5592 /* cannot get stripe for read-ahead, just give-up */ 5593 bi->bi_error = -EIO; 5594 break; 5595 } 5596 } 5597 finish_wait(&conf->wait_for_overlap, &w); 5598 5599 remaining = raid5_dec_bi_active_stripes(bi); 5600 if (remaining == 0) { 5601 5602 if ( rw == WRITE ) 5603 md_write_end(mddev); 5604 5605 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 5606 bi, 0); 5607 bio_endio(bi); 5608 } 5609 } 5610 5611 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 5612 5613 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5614 { 5615 /* reshaping is quite different to recovery/resync so it is 5616 * handled quite separately ... here. 5617 * 5618 * On each call to sync_request, we gather one chunk worth of 5619 * destination stripes and flag them as expanding. 5620 * Then we find all the source stripes and request reads. 5621 * As the reads complete, handle_stripe will copy the data 5622 * into the destination stripe and release that stripe. 5623 */ 5624 struct r5conf *conf = mddev->private; 5625 struct stripe_head *sh; 5626 sector_t first_sector, last_sector; 5627 int raid_disks = conf->previous_raid_disks; 5628 int data_disks = raid_disks - conf->max_degraded; 5629 int new_data_disks = conf->raid_disks - conf->max_degraded; 5630 int i; 5631 int dd_idx; 5632 sector_t writepos, readpos, safepos; 5633 sector_t stripe_addr; 5634 int reshape_sectors; 5635 struct list_head stripes; 5636 sector_t retn; 5637 5638 if (sector_nr == 0) { 5639 /* If restarting in the middle, skip the initial sectors */ 5640 if (mddev->reshape_backwards && 5641 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 5642 sector_nr = raid5_size(mddev, 0, 0) 5643 - conf->reshape_progress; 5644 } else if (mddev->reshape_backwards && 5645 conf->reshape_progress == MaxSector) { 5646 /* shouldn't happen, but just in case, finish up.*/ 5647 sector_nr = MaxSector; 5648 } else if (!mddev->reshape_backwards && 5649 conf->reshape_progress > 0) 5650 sector_nr = conf->reshape_progress; 5651 sector_div(sector_nr, new_data_disks); 5652 if (sector_nr) { 5653 mddev->curr_resync_completed = sector_nr; 5654 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5655 *skipped = 1; 5656 retn = sector_nr; 5657 goto finish; 5658 } 5659 } 5660 5661 /* We need to process a full chunk at a time. 5662 * If old and new chunk sizes differ, we need to process the 5663 * largest of these 5664 */ 5665 5666 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); 5667 5668 /* We update the metadata at least every 10 seconds, or when 5669 * the data about to be copied would over-write the source of 5670 * the data at the front of the range. i.e. one new_stripe 5671 * along from reshape_progress new_maps to after where 5672 * reshape_safe old_maps to 5673 */ 5674 writepos = conf->reshape_progress; 5675 sector_div(writepos, new_data_disks); 5676 readpos = conf->reshape_progress; 5677 sector_div(readpos, data_disks); 5678 safepos = conf->reshape_safe; 5679 sector_div(safepos, data_disks); 5680 if (mddev->reshape_backwards) { 5681 BUG_ON(writepos < reshape_sectors); 5682 writepos -= reshape_sectors; 5683 readpos += reshape_sectors; 5684 safepos += reshape_sectors; 5685 } else { 5686 writepos += reshape_sectors; 5687 /* readpos and safepos are worst-case calculations. 5688 * A negative number is overly pessimistic, and causes 5689 * obvious problems for unsigned storage. So clip to 0. 5690 */ 5691 readpos -= min_t(sector_t, reshape_sectors, readpos); 5692 safepos -= min_t(sector_t, reshape_sectors, safepos); 5693 } 5694 5695 /* Having calculated the 'writepos' possibly use it 5696 * to set 'stripe_addr' which is where we will write to. 5697 */ 5698 if (mddev->reshape_backwards) { 5699 BUG_ON(conf->reshape_progress == 0); 5700 stripe_addr = writepos; 5701 BUG_ON((mddev->dev_sectors & 5702 ~((sector_t)reshape_sectors - 1)) 5703 - reshape_sectors - stripe_addr 5704 != sector_nr); 5705 } else { 5706 BUG_ON(writepos != sector_nr + reshape_sectors); 5707 stripe_addr = sector_nr; 5708 } 5709 5710 /* 'writepos' is the most advanced device address we might write. 5711 * 'readpos' is the least advanced device address we might read. 5712 * 'safepos' is the least address recorded in the metadata as having 5713 * been reshaped. 5714 * If there is a min_offset_diff, these are adjusted either by 5715 * increasing the safepos/readpos if diff is negative, or 5716 * increasing writepos if diff is positive. 5717 * If 'readpos' is then behind 'writepos', there is no way that we can 5718 * ensure safety in the face of a crash - that must be done by userspace 5719 * making a backup of the data. So in that case there is no particular 5720 * rush to update metadata. 5721 * Otherwise if 'safepos' is behind 'writepos', then we really need to 5722 * update the metadata to advance 'safepos' to match 'readpos' so that 5723 * we can be safe in the event of a crash. 5724 * So we insist on updating metadata if safepos is behind writepos and 5725 * readpos is beyond writepos. 5726 * In any case, update the metadata every 10 seconds. 5727 * Maybe that number should be configurable, but I'm not sure it is 5728 * worth it.... maybe it could be a multiple of safemode_delay??? 5729 */ 5730 if (conf->min_offset_diff < 0) { 5731 safepos += -conf->min_offset_diff; 5732 readpos += -conf->min_offset_diff; 5733 } else 5734 writepos += conf->min_offset_diff; 5735 5736 if ((mddev->reshape_backwards 5737 ? (safepos > writepos && readpos < writepos) 5738 : (safepos < writepos && readpos > writepos)) || 5739 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 5740 /* Cannot proceed until we've updated the superblock... */ 5741 wait_event(conf->wait_for_overlap, 5742 atomic_read(&conf->reshape_stripes)==0 5743 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5744 if (atomic_read(&conf->reshape_stripes) != 0) 5745 return 0; 5746 mddev->reshape_position = conf->reshape_progress; 5747 mddev->curr_resync_completed = sector_nr; 5748 conf->reshape_checkpoint = jiffies; 5749 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 5750 md_wakeup_thread(mddev->thread); 5751 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || 5752 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5753 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5754 return 0; 5755 spin_lock_irq(&conf->device_lock); 5756 conf->reshape_safe = mddev->reshape_position; 5757 spin_unlock_irq(&conf->device_lock); 5758 wake_up(&conf->wait_for_overlap); 5759 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5760 } 5761 5762 INIT_LIST_HEAD(&stripes); 5763 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 5764 int j; 5765 int skipped_disk = 0; 5766 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 5767 set_bit(STRIPE_EXPANDING, &sh->state); 5768 atomic_inc(&conf->reshape_stripes); 5769 /* If any of this stripe is beyond the end of the old 5770 * array, then we need to zero those blocks 5771 */ 5772 for (j=sh->disks; j--;) { 5773 sector_t s; 5774 if (j == sh->pd_idx) 5775 continue; 5776 if (conf->level == 6 && 5777 j == sh->qd_idx) 5778 continue; 5779 s = raid5_compute_blocknr(sh, j, 0); 5780 if (s < raid5_size(mddev, 0, 0)) { 5781 skipped_disk = 1; 5782 continue; 5783 } 5784 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 5785 set_bit(R5_Expanded, &sh->dev[j].flags); 5786 set_bit(R5_UPTODATE, &sh->dev[j].flags); 5787 } 5788 if (!skipped_disk) { 5789 set_bit(STRIPE_EXPAND_READY, &sh->state); 5790 set_bit(STRIPE_HANDLE, &sh->state); 5791 } 5792 list_add(&sh->lru, &stripes); 5793 } 5794 spin_lock_irq(&conf->device_lock); 5795 if (mddev->reshape_backwards) 5796 conf->reshape_progress -= reshape_sectors * new_data_disks; 5797 else 5798 conf->reshape_progress += reshape_sectors * new_data_disks; 5799 spin_unlock_irq(&conf->device_lock); 5800 /* Ok, those stripe are ready. We can start scheduling 5801 * reads on the source stripes. 5802 * The source stripes are determined by mapping the first and last 5803 * block on the destination stripes. 5804 */ 5805 first_sector = 5806 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 5807 1, &dd_idx, NULL); 5808 last_sector = 5809 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 5810 * new_data_disks - 1), 5811 1, &dd_idx, NULL); 5812 if (last_sector >= mddev->dev_sectors) 5813 last_sector = mddev->dev_sectors - 1; 5814 while (first_sector <= last_sector) { 5815 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); 5816 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 5817 set_bit(STRIPE_HANDLE, &sh->state); 5818 raid5_release_stripe(sh); 5819 first_sector += STRIPE_SECTORS; 5820 } 5821 /* Now that the sources are clearly marked, we can release 5822 * the destination stripes 5823 */ 5824 while (!list_empty(&stripes)) { 5825 sh = list_entry(stripes.next, struct stripe_head, lru); 5826 list_del_init(&sh->lru); 5827 raid5_release_stripe(sh); 5828 } 5829 /* If this takes us to the resync_max point where we have to pause, 5830 * then we need to write out the superblock. 5831 */ 5832 sector_nr += reshape_sectors; 5833 retn = reshape_sectors; 5834 finish: 5835 if (mddev->curr_resync_completed > mddev->resync_max || 5836 (sector_nr - mddev->curr_resync_completed) * 2 5837 >= mddev->resync_max - mddev->curr_resync_completed) { 5838 /* Cannot proceed until we've updated the superblock... */ 5839 wait_event(conf->wait_for_overlap, 5840 atomic_read(&conf->reshape_stripes) == 0 5841 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5842 if (atomic_read(&conf->reshape_stripes) != 0) 5843 goto ret; 5844 mddev->reshape_position = conf->reshape_progress; 5845 mddev->curr_resync_completed = sector_nr; 5846 conf->reshape_checkpoint = jiffies; 5847 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 5848 md_wakeup_thread(mddev->thread); 5849 wait_event(mddev->sb_wait, 5850 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) 5851 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5852 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5853 goto ret; 5854 spin_lock_irq(&conf->device_lock); 5855 conf->reshape_safe = mddev->reshape_position; 5856 spin_unlock_irq(&conf->device_lock); 5857 wake_up(&conf->wait_for_overlap); 5858 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5859 } 5860 ret: 5861 return retn; 5862 } 5863 5864 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, 5865 int *skipped) 5866 { 5867 struct r5conf *conf = mddev->private; 5868 struct stripe_head *sh; 5869 sector_t max_sector = mddev->dev_sectors; 5870 sector_t sync_blocks; 5871 int still_degraded = 0; 5872 int i; 5873 5874 if (sector_nr >= max_sector) { 5875 /* just being told to finish up .. nothing much to do */ 5876 5877 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 5878 end_reshape(conf); 5879 return 0; 5880 } 5881 5882 if (mddev->curr_resync < max_sector) /* aborted */ 5883 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 5884 &sync_blocks, 1); 5885 else /* completed sync */ 5886 conf->fullsync = 0; 5887 bitmap_close_sync(mddev->bitmap); 5888 5889 return 0; 5890 } 5891 5892 /* Allow raid5_quiesce to complete */ 5893 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 5894 5895 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5896 return reshape_request(mddev, sector_nr, skipped); 5897 5898 /* No need to check resync_max as we never do more than one 5899 * stripe, and as resync_max will always be on a chunk boundary, 5900 * if the check in md_do_sync didn't fire, there is no chance 5901 * of overstepping resync_max here 5902 */ 5903 5904 /* if there is too many failed drives and we are trying 5905 * to resync, then assert that we are finished, because there is 5906 * nothing we can do. 5907 */ 5908 if (mddev->degraded >= conf->max_degraded && 5909 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5910 sector_t rv = mddev->dev_sectors - sector_nr; 5911 *skipped = 1; 5912 return rv; 5913 } 5914 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 5915 !conf->fullsync && 5916 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 5917 sync_blocks >= STRIPE_SECTORS) { 5918 /* we can skip this block, and probably more */ 5919 sync_blocks /= STRIPE_SECTORS; 5920 *skipped = 1; 5921 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 5922 } 5923 5924 bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); 5925 5926 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); 5927 if (sh == NULL) { 5928 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); 5929 /* make sure we don't swamp the stripe cache if someone else 5930 * is trying to get access 5931 */ 5932 schedule_timeout_uninterruptible(1); 5933 } 5934 /* Need to check if array will still be degraded after recovery/resync 5935 * Note in case of > 1 drive failures it's possible we're rebuilding 5936 * one drive while leaving another faulty drive in array. 5937 */ 5938 rcu_read_lock(); 5939 for (i = 0; i < conf->raid_disks; i++) { 5940 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); 5941 5942 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) 5943 still_degraded = 1; 5944 } 5945 rcu_read_unlock(); 5946 5947 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 5948 5949 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 5950 set_bit(STRIPE_HANDLE, &sh->state); 5951 5952 raid5_release_stripe(sh); 5953 5954 return STRIPE_SECTORS; 5955 } 5956 5957 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) 5958 { 5959 /* We may not be able to submit a whole bio at once as there 5960 * may not be enough stripe_heads available. 5961 * We cannot pre-allocate enough stripe_heads as we may need 5962 * more than exist in the cache (if we allow ever large chunks). 5963 * So we do one stripe head at a time and record in 5964 * ->bi_hw_segments how many have been done. 5965 * 5966 * We *know* that this entire raid_bio is in one chunk, so 5967 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 5968 */ 5969 struct stripe_head *sh; 5970 int dd_idx; 5971 sector_t sector, logical_sector, last_sector; 5972 int scnt = 0; 5973 int remaining; 5974 int handled = 0; 5975 5976 logical_sector = raid_bio->bi_iter.bi_sector & 5977 ~((sector_t)STRIPE_SECTORS-1); 5978 sector = raid5_compute_sector(conf, logical_sector, 5979 0, &dd_idx, NULL); 5980 last_sector = bio_end_sector(raid_bio); 5981 5982 for (; logical_sector < last_sector; 5983 logical_sector += STRIPE_SECTORS, 5984 sector += STRIPE_SECTORS, 5985 scnt++) { 5986 5987 if (scnt < raid5_bi_processed_stripes(raid_bio)) 5988 /* already done this stripe */ 5989 continue; 5990 5991 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); 5992 5993 if (!sh) { 5994 /* failed to get a stripe - must wait */ 5995 raid5_set_bi_processed_stripes(raid_bio, scnt); 5996 conf->retry_read_aligned = raid_bio; 5997 return handled; 5998 } 5999 6000 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 6001 raid5_release_stripe(sh); 6002 raid5_set_bi_processed_stripes(raid_bio, scnt); 6003 conf->retry_read_aligned = raid_bio; 6004 return handled; 6005 } 6006 6007 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 6008 handle_stripe(sh); 6009 raid5_release_stripe(sh); 6010 handled++; 6011 } 6012 remaining = raid5_dec_bi_active_stripes(raid_bio); 6013 if (remaining == 0) { 6014 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), 6015 raid_bio, 0); 6016 bio_endio(raid_bio); 6017 } 6018 if (atomic_dec_and_test(&conf->active_aligned_reads)) 6019 wake_up(&conf->wait_for_quiescent); 6020 return handled; 6021 } 6022 6023 static int handle_active_stripes(struct r5conf *conf, int group, 6024 struct r5worker *worker, 6025 struct list_head *temp_inactive_list) 6026 { 6027 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 6028 int i, batch_size = 0, hash; 6029 bool release_inactive = false; 6030 6031 while (batch_size < MAX_STRIPE_BATCH && 6032 (sh = __get_priority_stripe(conf, group)) != NULL) 6033 batch[batch_size++] = sh; 6034 6035 if (batch_size == 0) { 6036 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6037 if (!list_empty(temp_inactive_list + i)) 6038 break; 6039 if (i == NR_STRIPE_HASH_LOCKS) { 6040 spin_unlock_irq(&conf->device_lock); 6041 r5l_flush_stripe_to_raid(conf->log); 6042 spin_lock_irq(&conf->device_lock); 6043 return batch_size; 6044 } 6045 release_inactive = true; 6046 } 6047 spin_unlock_irq(&conf->device_lock); 6048 6049 release_inactive_stripe_list(conf, temp_inactive_list, 6050 NR_STRIPE_HASH_LOCKS); 6051 6052 r5l_flush_stripe_to_raid(conf->log); 6053 if (release_inactive) { 6054 spin_lock_irq(&conf->device_lock); 6055 return 0; 6056 } 6057 6058 for (i = 0; i < batch_size; i++) 6059 handle_stripe(batch[i]); 6060 r5l_write_stripe_run(conf->log); 6061 6062 cond_resched(); 6063 6064 spin_lock_irq(&conf->device_lock); 6065 for (i = 0; i < batch_size; i++) { 6066 hash = batch[i]->hash_lock_index; 6067 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); 6068 } 6069 return batch_size; 6070 } 6071 6072 static void raid5_do_work(struct work_struct *work) 6073 { 6074 struct r5worker *worker = container_of(work, struct r5worker, work); 6075 struct r5worker_group *group = worker->group; 6076 struct r5conf *conf = group->conf; 6077 int group_id = group - conf->worker_groups; 6078 int handled; 6079 struct blk_plug plug; 6080 6081 pr_debug("+++ raid5worker active\n"); 6082 6083 blk_start_plug(&plug); 6084 handled = 0; 6085 spin_lock_irq(&conf->device_lock); 6086 while (1) { 6087 int batch_size, released; 6088 6089 released = release_stripe_list(conf, worker->temp_inactive_list); 6090 6091 batch_size = handle_active_stripes(conf, group_id, worker, 6092 worker->temp_inactive_list); 6093 worker->working = false; 6094 if (!batch_size && !released) 6095 break; 6096 handled += batch_size; 6097 } 6098 pr_debug("%d stripes handled\n", handled); 6099 6100 spin_unlock_irq(&conf->device_lock); 6101 blk_finish_plug(&plug); 6102 6103 pr_debug("--- raid5worker inactive\n"); 6104 } 6105 6106 /* 6107 * This is our raid5 kernel thread. 6108 * 6109 * We scan the hash table for stripes which can be handled now. 6110 * During the scan, completed stripes are saved for us by the interrupt 6111 * handler, so that they will not have to wait for our next wakeup. 6112 */ 6113 static void raid5d(struct md_thread *thread) 6114 { 6115 struct mddev *mddev = thread->mddev; 6116 struct r5conf *conf = mddev->private; 6117 int handled; 6118 struct blk_plug plug; 6119 6120 pr_debug("+++ raid5d active\n"); 6121 6122 md_check_recovery(mddev); 6123 6124 if (!bio_list_empty(&conf->return_bi) && 6125 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 6126 struct bio_list tmp = BIO_EMPTY_LIST; 6127 spin_lock_irq(&conf->device_lock); 6128 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 6129 bio_list_merge(&tmp, &conf->return_bi); 6130 bio_list_init(&conf->return_bi); 6131 } 6132 spin_unlock_irq(&conf->device_lock); 6133 return_io(&tmp); 6134 } 6135 6136 blk_start_plug(&plug); 6137 handled = 0; 6138 spin_lock_irq(&conf->device_lock); 6139 while (1) { 6140 struct bio *bio; 6141 int batch_size, released; 6142 6143 released = release_stripe_list(conf, conf->temp_inactive_list); 6144 if (released) 6145 clear_bit(R5_DID_ALLOC, &conf->cache_state); 6146 6147 if ( 6148 !list_empty(&conf->bitmap_list)) { 6149 /* Now is a good time to flush some bitmap updates */ 6150 conf->seq_flush++; 6151 spin_unlock_irq(&conf->device_lock); 6152 bitmap_unplug(mddev->bitmap); 6153 spin_lock_irq(&conf->device_lock); 6154 conf->seq_write = conf->seq_flush; 6155 activate_bit_delay(conf, conf->temp_inactive_list); 6156 } 6157 raid5_activate_delayed(conf); 6158 6159 while ((bio = remove_bio_from_retry(conf))) { 6160 int ok; 6161 spin_unlock_irq(&conf->device_lock); 6162 ok = retry_aligned_read(conf, bio); 6163 spin_lock_irq(&conf->device_lock); 6164 if (!ok) 6165 break; 6166 handled++; 6167 } 6168 6169 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, 6170 conf->temp_inactive_list); 6171 if (!batch_size && !released) 6172 break; 6173 handled += batch_size; 6174 6175 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { 6176 spin_unlock_irq(&conf->device_lock); 6177 md_check_recovery(mddev); 6178 spin_lock_irq(&conf->device_lock); 6179 } 6180 } 6181 pr_debug("%d stripes handled\n", handled); 6182 6183 spin_unlock_irq(&conf->device_lock); 6184 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && 6185 mutex_trylock(&conf->cache_size_mutex)) { 6186 grow_one_stripe(conf, __GFP_NOWARN); 6187 /* Set flag even if allocation failed. This helps 6188 * slow down allocation requests when mem is short 6189 */ 6190 set_bit(R5_DID_ALLOC, &conf->cache_state); 6191 mutex_unlock(&conf->cache_size_mutex); 6192 } 6193 6194 flush_deferred_bios(conf); 6195 6196 r5l_flush_stripe_to_raid(conf->log); 6197 6198 async_tx_issue_pending_all(); 6199 blk_finish_plug(&plug); 6200 6201 pr_debug("--- raid5d inactive\n"); 6202 } 6203 6204 static ssize_t 6205 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 6206 { 6207 struct r5conf *conf; 6208 int ret = 0; 6209 spin_lock(&mddev->lock); 6210 conf = mddev->private; 6211 if (conf) 6212 ret = sprintf(page, "%d\n", conf->min_nr_stripes); 6213 spin_unlock(&mddev->lock); 6214 return ret; 6215 } 6216 6217 int 6218 raid5_set_cache_size(struct mddev *mddev, int size) 6219 { 6220 struct r5conf *conf = mddev->private; 6221 int err; 6222 6223 if (size <= 16 || size > 32768) 6224 return -EINVAL; 6225 6226 conf->min_nr_stripes = size; 6227 mutex_lock(&conf->cache_size_mutex); 6228 while (size < conf->max_nr_stripes && 6229 drop_one_stripe(conf)) 6230 ; 6231 mutex_unlock(&conf->cache_size_mutex); 6232 6233 6234 err = md_allow_write(mddev); 6235 if (err) 6236 return err; 6237 6238 mutex_lock(&conf->cache_size_mutex); 6239 while (size > conf->max_nr_stripes) 6240 if (!grow_one_stripe(conf, GFP_KERNEL)) 6241 break; 6242 mutex_unlock(&conf->cache_size_mutex); 6243 6244 return 0; 6245 } 6246 EXPORT_SYMBOL(raid5_set_cache_size); 6247 6248 static ssize_t 6249 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 6250 { 6251 struct r5conf *conf; 6252 unsigned long new; 6253 int err; 6254 6255 if (len >= PAGE_SIZE) 6256 return -EINVAL; 6257 if (kstrtoul(page, 10, &new)) 6258 return -EINVAL; 6259 err = mddev_lock(mddev); 6260 if (err) 6261 return err; 6262 conf = mddev->private; 6263 if (!conf) 6264 err = -ENODEV; 6265 else 6266 err = raid5_set_cache_size(mddev, new); 6267 mddev_unlock(mddev); 6268 6269 return err ?: len; 6270 } 6271 6272 static struct md_sysfs_entry 6273 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 6274 raid5_show_stripe_cache_size, 6275 raid5_store_stripe_cache_size); 6276 6277 static ssize_t 6278 raid5_show_rmw_level(struct mddev *mddev, char *page) 6279 { 6280 struct r5conf *conf = mddev->private; 6281 if (conf) 6282 return sprintf(page, "%d\n", conf->rmw_level); 6283 else 6284 return 0; 6285 } 6286 6287 static ssize_t 6288 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) 6289 { 6290 struct r5conf *conf = mddev->private; 6291 unsigned long new; 6292 6293 if (!conf) 6294 return -ENODEV; 6295 6296 if (len >= PAGE_SIZE) 6297 return -EINVAL; 6298 6299 if (kstrtoul(page, 10, &new)) 6300 return -EINVAL; 6301 6302 if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome) 6303 return -EINVAL; 6304 6305 if (new != PARITY_DISABLE_RMW && 6306 new != PARITY_ENABLE_RMW && 6307 new != PARITY_PREFER_RMW) 6308 return -EINVAL; 6309 6310 conf->rmw_level = new; 6311 return len; 6312 } 6313 6314 static struct md_sysfs_entry 6315 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR, 6316 raid5_show_rmw_level, 6317 raid5_store_rmw_level); 6318 6319 6320 static ssize_t 6321 raid5_show_preread_threshold(struct mddev *mddev, char *page) 6322 { 6323 struct r5conf *conf; 6324 int ret = 0; 6325 spin_lock(&mddev->lock); 6326 conf = mddev->private; 6327 if (conf) 6328 ret = sprintf(page, "%d\n", conf->bypass_threshold); 6329 spin_unlock(&mddev->lock); 6330 return ret; 6331 } 6332 6333 static ssize_t 6334 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 6335 { 6336 struct r5conf *conf; 6337 unsigned long new; 6338 int err; 6339 6340 if (len >= PAGE_SIZE) 6341 return -EINVAL; 6342 if (kstrtoul(page, 10, &new)) 6343 return -EINVAL; 6344 6345 err = mddev_lock(mddev); 6346 if (err) 6347 return err; 6348 conf = mddev->private; 6349 if (!conf) 6350 err = -ENODEV; 6351 else if (new > conf->min_nr_stripes) 6352 err = -EINVAL; 6353 else 6354 conf->bypass_threshold = new; 6355 mddev_unlock(mddev); 6356 return err ?: len; 6357 } 6358 6359 static struct md_sysfs_entry 6360 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 6361 S_IRUGO | S_IWUSR, 6362 raid5_show_preread_threshold, 6363 raid5_store_preread_threshold); 6364 6365 static ssize_t 6366 raid5_show_skip_copy(struct mddev *mddev, char *page) 6367 { 6368 struct r5conf *conf; 6369 int ret = 0; 6370 spin_lock(&mddev->lock); 6371 conf = mddev->private; 6372 if (conf) 6373 ret = sprintf(page, "%d\n", conf->skip_copy); 6374 spin_unlock(&mddev->lock); 6375 return ret; 6376 } 6377 6378 static ssize_t 6379 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) 6380 { 6381 struct r5conf *conf; 6382 unsigned long new; 6383 int err; 6384 6385 if (len >= PAGE_SIZE) 6386 return -EINVAL; 6387 if (kstrtoul(page, 10, &new)) 6388 return -EINVAL; 6389 new = !!new; 6390 6391 err = mddev_lock(mddev); 6392 if (err) 6393 return err; 6394 conf = mddev->private; 6395 if (!conf) 6396 err = -ENODEV; 6397 else if (new != conf->skip_copy) { 6398 mddev_suspend(mddev); 6399 conf->skip_copy = new; 6400 if (new) 6401 mddev->queue->backing_dev_info->capabilities |= 6402 BDI_CAP_STABLE_WRITES; 6403 else 6404 mddev->queue->backing_dev_info->capabilities &= 6405 ~BDI_CAP_STABLE_WRITES; 6406 mddev_resume(mddev); 6407 } 6408 mddev_unlock(mddev); 6409 return err ?: len; 6410 } 6411 6412 static struct md_sysfs_entry 6413 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, 6414 raid5_show_skip_copy, 6415 raid5_store_skip_copy); 6416 6417 static ssize_t 6418 stripe_cache_active_show(struct mddev *mddev, char *page) 6419 { 6420 struct r5conf *conf = mddev->private; 6421 if (conf) 6422 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 6423 else 6424 return 0; 6425 } 6426 6427 static struct md_sysfs_entry 6428 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 6429 6430 static ssize_t 6431 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) 6432 { 6433 struct r5conf *conf; 6434 int ret = 0; 6435 spin_lock(&mddev->lock); 6436 conf = mddev->private; 6437 if (conf) 6438 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); 6439 spin_unlock(&mddev->lock); 6440 return ret; 6441 } 6442 6443 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6444 int *group_cnt, 6445 int *worker_cnt_per_group, 6446 struct r5worker_group **worker_groups); 6447 static ssize_t 6448 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 6449 { 6450 struct r5conf *conf; 6451 unsigned long new; 6452 int err; 6453 struct r5worker_group *new_groups, *old_groups; 6454 int group_cnt, worker_cnt_per_group; 6455 6456 if (len >= PAGE_SIZE) 6457 return -EINVAL; 6458 if (kstrtoul(page, 10, &new)) 6459 return -EINVAL; 6460 6461 err = mddev_lock(mddev); 6462 if (err) 6463 return err; 6464 conf = mddev->private; 6465 if (!conf) 6466 err = -ENODEV; 6467 else if (new != conf->worker_cnt_per_group) { 6468 mddev_suspend(mddev); 6469 6470 old_groups = conf->worker_groups; 6471 if (old_groups) 6472 flush_workqueue(raid5_wq); 6473 6474 err = alloc_thread_groups(conf, new, 6475 &group_cnt, &worker_cnt_per_group, 6476 &new_groups); 6477 if (!err) { 6478 spin_lock_irq(&conf->device_lock); 6479 conf->group_cnt = group_cnt; 6480 conf->worker_cnt_per_group = worker_cnt_per_group; 6481 conf->worker_groups = new_groups; 6482 spin_unlock_irq(&conf->device_lock); 6483 6484 if (old_groups) 6485 kfree(old_groups[0].workers); 6486 kfree(old_groups); 6487 } 6488 mddev_resume(mddev); 6489 } 6490 mddev_unlock(mddev); 6491 6492 return err ?: len; 6493 } 6494 6495 static struct md_sysfs_entry 6496 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, 6497 raid5_show_group_thread_cnt, 6498 raid5_store_group_thread_cnt); 6499 6500 static struct attribute *raid5_attrs[] = { 6501 &raid5_stripecache_size.attr, 6502 &raid5_stripecache_active.attr, 6503 &raid5_preread_bypass_threshold.attr, 6504 &raid5_group_thread_cnt.attr, 6505 &raid5_skip_copy.attr, 6506 &raid5_rmw_level.attr, 6507 &r5c_journal_mode.attr, 6508 NULL, 6509 }; 6510 static struct attribute_group raid5_attrs_group = { 6511 .name = NULL, 6512 .attrs = raid5_attrs, 6513 }; 6514 6515 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6516 int *group_cnt, 6517 int *worker_cnt_per_group, 6518 struct r5worker_group **worker_groups) 6519 { 6520 int i, j, k; 6521 ssize_t size; 6522 struct r5worker *workers; 6523 6524 *worker_cnt_per_group = cnt; 6525 if (cnt == 0) { 6526 *group_cnt = 0; 6527 *worker_groups = NULL; 6528 return 0; 6529 } 6530 *group_cnt = num_possible_nodes(); 6531 size = sizeof(struct r5worker) * cnt; 6532 workers = kzalloc(size * *group_cnt, GFP_NOIO); 6533 *worker_groups = kzalloc(sizeof(struct r5worker_group) * 6534 *group_cnt, GFP_NOIO); 6535 if (!*worker_groups || !workers) { 6536 kfree(workers); 6537 kfree(*worker_groups); 6538 return -ENOMEM; 6539 } 6540 6541 for (i = 0; i < *group_cnt; i++) { 6542 struct r5worker_group *group; 6543 6544 group = &(*worker_groups)[i]; 6545 INIT_LIST_HEAD(&group->handle_list); 6546 group->conf = conf; 6547 group->workers = workers + i * cnt; 6548 6549 for (j = 0; j < cnt; j++) { 6550 struct r5worker *worker = group->workers + j; 6551 worker->group = group; 6552 INIT_WORK(&worker->work, raid5_do_work); 6553 6554 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) 6555 INIT_LIST_HEAD(worker->temp_inactive_list + k); 6556 } 6557 } 6558 6559 return 0; 6560 } 6561 6562 static void free_thread_groups(struct r5conf *conf) 6563 { 6564 if (conf->worker_groups) 6565 kfree(conf->worker_groups[0].workers); 6566 kfree(conf->worker_groups); 6567 conf->worker_groups = NULL; 6568 } 6569 6570 static sector_t 6571 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 6572 { 6573 struct r5conf *conf = mddev->private; 6574 6575 if (!sectors) 6576 sectors = mddev->dev_sectors; 6577 if (!raid_disks) 6578 /* size is defined by the smallest of previous and new size */ 6579 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 6580 6581 sectors &= ~((sector_t)conf->chunk_sectors - 1); 6582 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); 6583 return sectors * (raid_disks - conf->max_degraded); 6584 } 6585 6586 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6587 { 6588 safe_put_page(percpu->spare_page); 6589 if (percpu->scribble) 6590 flex_array_free(percpu->scribble); 6591 percpu->spare_page = NULL; 6592 percpu->scribble = NULL; 6593 } 6594 6595 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6596 { 6597 if (conf->level == 6 && !percpu->spare_page) 6598 percpu->spare_page = alloc_page(GFP_KERNEL); 6599 if (!percpu->scribble) 6600 percpu->scribble = scribble_alloc(max(conf->raid_disks, 6601 conf->previous_raid_disks), 6602 max(conf->chunk_sectors, 6603 conf->prev_chunk_sectors) 6604 / STRIPE_SECTORS, 6605 GFP_KERNEL); 6606 6607 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { 6608 free_scratch_buffer(conf, percpu); 6609 return -ENOMEM; 6610 } 6611 6612 return 0; 6613 } 6614 6615 static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node) 6616 { 6617 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); 6618 6619 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6620 return 0; 6621 } 6622 6623 static void raid5_free_percpu(struct r5conf *conf) 6624 { 6625 if (!conf->percpu) 6626 return; 6627 6628 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); 6629 free_percpu(conf->percpu); 6630 } 6631 6632 static void free_conf(struct r5conf *conf) 6633 { 6634 int i; 6635 6636 if (conf->log) 6637 r5l_exit_log(conf->log); 6638 if (conf->shrinker.nr_deferred) 6639 unregister_shrinker(&conf->shrinker); 6640 6641 free_thread_groups(conf); 6642 shrink_stripes(conf); 6643 raid5_free_percpu(conf); 6644 for (i = 0; i < conf->pool_size; i++) 6645 if (conf->disks[i].extra_page) 6646 put_page(conf->disks[i].extra_page); 6647 kfree(conf->disks); 6648 kfree(conf->stripe_hashtbl); 6649 kfree(conf); 6650 } 6651 6652 static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) 6653 { 6654 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); 6655 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 6656 6657 if (alloc_scratch_buffer(conf, percpu)) { 6658 pr_warn("%s: failed memory allocation for cpu%u\n", 6659 __func__, cpu); 6660 return -ENOMEM; 6661 } 6662 return 0; 6663 } 6664 6665 static int raid5_alloc_percpu(struct r5conf *conf) 6666 { 6667 int err = 0; 6668 6669 conf->percpu = alloc_percpu(struct raid5_percpu); 6670 if (!conf->percpu) 6671 return -ENOMEM; 6672 6673 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); 6674 if (!err) { 6675 conf->scribble_disks = max(conf->raid_disks, 6676 conf->previous_raid_disks); 6677 conf->scribble_sectors = max(conf->chunk_sectors, 6678 conf->prev_chunk_sectors); 6679 } 6680 return err; 6681 } 6682 6683 static unsigned long raid5_cache_scan(struct shrinker *shrink, 6684 struct shrink_control *sc) 6685 { 6686 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6687 unsigned long ret = SHRINK_STOP; 6688 6689 if (mutex_trylock(&conf->cache_size_mutex)) { 6690 ret= 0; 6691 while (ret < sc->nr_to_scan && 6692 conf->max_nr_stripes > conf->min_nr_stripes) { 6693 if (drop_one_stripe(conf) == 0) { 6694 ret = SHRINK_STOP; 6695 break; 6696 } 6697 ret++; 6698 } 6699 mutex_unlock(&conf->cache_size_mutex); 6700 } 6701 return ret; 6702 } 6703 6704 static unsigned long raid5_cache_count(struct shrinker *shrink, 6705 struct shrink_control *sc) 6706 { 6707 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6708 6709 if (conf->max_nr_stripes < conf->min_nr_stripes) 6710 /* unlikely, but not impossible */ 6711 return 0; 6712 return conf->max_nr_stripes - conf->min_nr_stripes; 6713 } 6714 6715 static struct r5conf *setup_conf(struct mddev *mddev) 6716 { 6717 struct r5conf *conf; 6718 int raid_disk, memory, max_disks; 6719 struct md_rdev *rdev; 6720 struct disk_info *disk; 6721 char pers_name[6]; 6722 int i; 6723 int group_cnt, worker_cnt_per_group; 6724 struct r5worker_group *new_group; 6725 6726 if (mddev->new_level != 5 6727 && mddev->new_level != 4 6728 && mddev->new_level != 6) { 6729 pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n", 6730 mdname(mddev), mddev->new_level); 6731 return ERR_PTR(-EIO); 6732 } 6733 if ((mddev->new_level == 5 6734 && !algorithm_valid_raid5(mddev->new_layout)) || 6735 (mddev->new_level == 6 6736 && !algorithm_valid_raid6(mddev->new_layout))) { 6737 pr_warn("md/raid:%s: layout %d not supported\n", 6738 mdname(mddev), mddev->new_layout); 6739 return ERR_PTR(-EIO); 6740 } 6741 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 6742 pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n", 6743 mdname(mddev), mddev->raid_disks); 6744 return ERR_PTR(-EINVAL); 6745 } 6746 6747 if (!mddev->new_chunk_sectors || 6748 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 6749 !is_power_of_2(mddev->new_chunk_sectors)) { 6750 pr_warn("md/raid:%s: invalid chunk size %d\n", 6751 mdname(mddev), mddev->new_chunk_sectors << 9); 6752 return ERR_PTR(-EINVAL); 6753 } 6754 6755 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 6756 if (conf == NULL) 6757 goto abort; 6758 /* Don't enable multi-threading by default*/ 6759 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, 6760 &new_group)) { 6761 conf->group_cnt = group_cnt; 6762 conf->worker_cnt_per_group = worker_cnt_per_group; 6763 conf->worker_groups = new_group; 6764 } else 6765 goto abort; 6766 spin_lock_init(&conf->device_lock); 6767 seqcount_init(&conf->gen_lock); 6768 mutex_init(&conf->cache_size_mutex); 6769 init_waitqueue_head(&conf->wait_for_quiescent); 6770 init_waitqueue_head(&conf->wait_for_stripe); 6771 init_waitqueue_head(&conf->wait_for_overlap); 6772 INIT_LIST_HEAD(&conf->handle_list); 6773 INIT_LIST_HEAD(&conf->hold_list); 6774 INIT_LIST_HEAD(&conf->delayed_list); 6775 INIT_LIST_HEAD(&conf->bitmap_list); 6776 bio_list_init(&conf->return_bi); 6777 init_llist_head(&conf->released_stripes); 6778 atomic_set(&conf->active_stripes, 0); 6779 atomic_set(&conf->preread_active_stripes, 0); 6780 atomic_set(&conf->active_aligned_reads, 0); 6781 bio_list_init(&conf->pending_bios); 6782 spin_lock_init(&conf->pending_bios_lock); 6783 conf->batch_bio_dispatch = true; 6784 rdev_for_each(rdev, mddev) { 6785 if (test_bit(Journal, &rdev->flags)) 6786 continue; 6787 if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { 6788 conf->batch_bio_dispatch = false; 6789 break; 6790 } 6791 } 6792 6793 conf->bypass_threshold = BYPASS_THRESHOLD; 6794 conf->recovery_disabled = mddev->recovery_disabled - 1; 6795 6796 conf->raid_disks = mddev->raid_disks; 6797 if (mddev->reshape_position == MaxSector) 6798 conf->previous_raid_disks = mddev->raid_disks; 6799 else 6800 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 6801 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 6802 6803 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 6804 GFP_KERNEL); 6805 6806 if (!conf->disks) 6807 goto abort; 6808 6809 for (i = 0; i < max_disks; i++) { 6810 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); 6811 if (!conf->disks[i].extra_page) 6812 goto abort; 6813 } 6814 6815 conf->mddev = mddev; 6816 6817 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 6818 goto abort; 6819 6820 /* We init hash_locks[0] separately to that it can be used 6821 * as the reference lock in the spin_lock_nest_lock() call 6822 * in lock_all_device_hash_locks_irq in order to convince 6823 * lockdep that we know what we are doing. 6824 */ 6825 spin_lock_init(conf->hash_locks); 6826 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 6827 spin_lock_init(conf->hash_locks + i); 6828 6829 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6830 INIT_LIST_HEAD(conf->inactive_list + i); 6831 6832 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6833 INIT_LIST_HEAD(conf->temp_inactive_list + i); 6834 6835 atomic_set(&conf->r5c_cached_full_stripes, 0); 6836 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); 6837 atomic_set(&conf->r5c_cached_partial_stripes, 0); 6838 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); 6839 atomic_set(&conf->r5c_flushing_full_stripes, 0); 6840 atomic_set(&conf->r5c_flushing_partial_stripes, 0); 6841 6842 conf->level = mddev->new_level; 6843 conf->chunk_sectors = mddev->new_chunk_sectors; 6844 if (raid5_alloc_percpu(conf) != 0) 6845 goto abort; 6846 6847 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 6848 6849 rdev_for_each(rdev, mddev) { 6850 raid_disk = rdev->raid_disk; 6851 if (raid_disk >= max_disks 6852 || raid_disk < 0 || test_bit(Journal, &rdev->flags)) 6853 continue; 6854 disk = conf->disks + raid_disk; 6855 6856 if (test_bit(Replacement, &rdev->flags)) { 6857 if (disk->replacement) 6858 goto abort; 6859 disk->replacement = rdev; 6860 } else { 6861 if (disk->rdev) 6862 goto abort; 6863 disk->rdev = rdev; 6864 } 6865 6866 if (test_bit(In_sync, &rdev->flags)) { 6867 char b[BDEVNAME_SIZE]; 6868 pr_info("md/raid:%s: device %s operational as raid disk %d\n", 6869 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 6870 } else if (rdev->saved_raid_disk != raid_disk) 6871 /* Cannot rely on bitmap to complete recovery */ 6872 conf->fullsync = 1; 6873 } 6874 6875 conf->level = mddev->new_level; 6876 if (conf->level == 6) { 6877 conf->max_degraded = 2; 6878 if (raid6_call.xor_syndrome) 6879 conf->rmw_level = PARITY_ENABLE_RMW; 6880 else 6881 conf->rmw_level = PARITY_DISABLE_RMW; 6882 } else { 6883 conf->max_degraded = 1; 6884 conf->rmw_level = PARITY_ENABLE_RMW; 6885 } 6886 conf->algorithm = mddev->new_layout; 6887 conf->reshape_progress = mddev->reshape_position; 6888 if (conf->reshape_progress != MaxSector) { 6889 conf->prev_chunk_sectors = mddev->chunk_sectors; 6890 conf->prev_algo = mddev->layout; 6891 } else { 6892 conf->prev_chunk_sectors = conf->chunk_sectors; 6893 conf->prev_algo = conf->algorithm; 6894 } 6895 6896 conf->min_nr_stripes = NR_STRIPES; 6897 if (mddev->reshape_position != MaxSector) { 6898 int stripes = max_t(int, 6899 ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4, 6900 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4); 6901 conf->min_nr_stripes = max(NR_STRIPES, stripes); 6902 if (conf->min_nr_stripes != NR_STRIPES) 6903 pr_info("md/raid:%s: force stripe size %d for reshape\n", 6904 mdname(mddev), conf->min_nr_stripes); 6905 } 6906 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + 6907 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 6908 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 6909 if (grow_stripes(conf, conf->min_nr_stripes)) { 6910 pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n", 6911 mdname(mddev), memory); 6912 goto abort; 6913 } else 6914 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); 6915 /* 6916 * Losing a stripe head costs more than the time to refill it, 6917 * it reduces the queue depth and so can hurt throughput. 6918 * So set it rather large, scaled by number of devices. 6919 */ 6920 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; 6921 conf->shrinker.scan_objects = raid5_cache_scan; 6922 conf->shrinker.count_objects = raid5_cache_count; 6923 conf->shrinker.batch = 128; 6924 conf->shrinker.flags = 0; 6925 if (register_shrinker(&conf->shrinker)) { 6926 pr_warn("md/raid:%s: couldn't register shrinker.\n", 6927 mdname(mddev)); 6928 goto abort; 6929 } 6930 6931 sprintf(pers_name, "raid%d", mddev->new_level); 6932 conf->thread = md_register_thread(raid5d, mddev, pers_name); 6933 if (!conf->thread) { 6934 pr_warn("md/raid:%s: couldn't allocate thread.\n", 6935 mdname(mddev)); 6936 goto abort; 6937 } 6938 6939 return conf; 6940 6941 abort: 6942 if (conf) { 6943 free_conf(conf); 6944 return ERR_PTR(-EIO); 6945 } else 6946 return ERR_PTR(-ENOMEM); 6947 } 6948 6949 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 6950 { 6951 switch (algo) { 6952 case ALGORITHM_PARITY_0: 6953 if (raid_disk < max_degraded) 6954 return 1; 6955 break; 6956 case ALGORITHM_PARITY_N: 6957 if (raid_disk >= raid_disks - max_degraded) 6958 return 1; 6959 break; 6960 case ALGORITHM_PARITY_0_6: 6961 if (raid_disk == 0 || 6962 raid_disk == raid_disks - 1) 6963 return 1; 6964 break; 6965 case ALGORITHM_LEFT_ASYMMETRIC_6: 6966 case ALGORITHM_RIGHT_ASYMMETRIC_6: 6967 case ALGORITHM_LEFT_SYMMETRIC_6: 6968 case ALGORITHM_RIGHT_SYMMETRIC_6: 6969 if (raid_disk == raid_disks - 1) 6970 return 1; 6971 } 6972 return 0; 6973 } 6974 6975 static int raid5_run(struct mddev *mddev) 6976 { 6977 struct r5conf *conf; 6978 int working_disks = 0; 6979 int dirty_parity_disks = 0; 6980 struct md_rdev *rdev; 6981 struct md_rdev *journal_dev = NULL; 6982 sector_t reshape_offset = 0; 6983 int i; 6984 long long min_offset_diff = 0; 6985 int first = 1; 6986 6987 if (mddev->recovery_cp != MaxSector) 6988 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", 6989 mdname(mddev)); 6990 6991 rdev_for_each(rdev, mddev) { 6992 long long diff; 6993 6994 if (test_bit(Journal, &rdev->flags)) { 6995 journal_dev = rdev; 6996 continue; 6997 } 6998 if (rdev->raid_disk < 0) 6999 continue; 7000 diff = (rdev->new_data_offset - rdev->data_offset); 7001 if (first) { 7002 min_offset_diff = diff; 7003 first = 0; 7004 } else if (mddev->reshape_backwards && 7005 diff < min_offset_diff) 7006 min_offset_diff = diff; 7007 else if (!mddev->reshape_backwards && 7008 diff > min_offset_diff) 7009 min_offset_diff = diff; 7010 } 7011 7012 if (mddev->reshape_position != MaxSector) { 7013 /* Check that we can continue the reshape. 7014 * Difficulties arise if the stripe we would write to 7015 * next is at or after the stripe we would read from next. 7016 * For a reshape that changes the number of devices, this 7017 * is only possible for a very short time, and mdadm makes 7018 * sure that time appears to have past before assembling 7019 * the array. So we fail if that time hasn't passed. 7020 * For a reshape that keeps the number of devices the same 7021 * mdadm must be monitoring the reshape can keeping the 7022 * critical areas read-only and backed up. It will start 7023 * the array in read-only mode, so we check for that. 7024 */ 7025 sector_t here_new, here_old; 7026 int old_disks; 7027 int max_degraded = (mddev->level == 6 ? 2 : 1); 7028 int chunk_sectors; 7029 int new_data_disks; 7030 7031 if (journal_dev) { 7032 pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n", 7033 mdname(mddev)); 7034 return -EINVAL; 7035 } 7036 7037 if (mddev->new_level != mddev->level) { 7038 pr_warn("md/raid:%s: unsupported reshape required - aborting.\n", 7039 mdname(mddev)); 7040 return -EINVAL; 7041 } 7042 old_disks = mddev->raid_disks - mddev->delta_disks; 7043 /* reshape_position must be on a new-stripe boundary, and one 7044 * further up in new geometry must map after here in old 7045 * geometry. 7046 * If the chunk sizes are different, then as we perform reshape 7047 * in units of the largest of the two, reshape_position needs 7048 * be a multiple of the largest chunk size times new data disks. 7049 */ 7050 here_new = mddev->reshape_position; 7051 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); 7052 new_data_disks = mddev->raid_disks - max_degraded; 7053 if (sector_div(here_new, chunk_sectors * new_data_disks)) { 7054 pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n", 7055 mdname(mddev)); 7056 return -EINVAL; 7057 } 7058 reshape_offset = here_new * chunk_sectors; 7059 /* here_new is the stripe we will write to */ 7060 here_old = mddev->reshape_position; 7061 sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); 7062 /* here_old is the first stripe that we might need to read 7063 * from */ 7064 if (mddev->delta_disks == 0) { 7065 /* We cannot be sure it is safe to start an in-place 7066 * reshape. It is only safe if user-space is monitoring 7067 * and taking constant backups. 7068 * mdadm always starts a situation like this in 7069 * readonly mode so it can take control before 7070 * allowing any writes. So just check for that. 7071 */ 7072 if (abs(min_offset_diff) >= mddev->chunk_sectors && 7073 abs(min_offset_diff) >= mddev->new_chunk_sectors) 7074 /* not really in-place - so OK */; 7075 else if (mddev->ro == 0) { 7076 pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n", 7077 mdname(mddev)); 7078 return -EINVAL; 7079 } 7080 } else if (mddev->reshape_backwards 7081 ? (here_new * chunk_sectors + min_offset_diff <= 7082 here_old * chunk_sectors) 7083 : (here_new * chunk_sectors >= 7084 here_old * chunk_sectors + (-min_offset_diff))) { 7085 /* Reading from the same stripe as writing to - bad */ 7086 pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n", 7087 mdname(mddev)); 7088 return -EINVAL; 7089 } 7090 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); 7091 /* OK, we should be able to continue; */ 7092 } else { 7093 BUG_ON(mddev->level != mddev->new_level); 7094 BUG_ON(mddev->layout != mddev->new_layout); 7095 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 7096 BUG_ON(mddev->delta_disks != 0); 7097 } 7098 7099 if (mddev->private == NULL) 7100 conf = setup_conf(mddev); 7101 else 7102 conf = mddev->private; 7103 7104 if (IS_ERR(conf)) 7105 return PTR_ERR(conf); 7106 7107 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 7108 if (!journal_dev) { 7109 pr_warn("md/raid:%s: journal disk is missing, force array readonly\n", 7110 mdname(mddev)); 7111 mddev->ro = 1; 7112 set_disk_ro(mddev->gendisk, 1); 7113 } else if (mddev->recovery_cp == MaxSector) 7114 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 7115 } 7116 7117 conf->min_offset_diff = min_offset_diff; 7118 mddev->thread = conf->thread; 7119 conf->thread = NULL; 7120 mddev->private = conf; 7121 7122 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; 7123 i++) { 7124 rdev = conf->disks[i].rdev; 7125 if (!rdev && conf->disks[i].replacement) { 7126 /* The replacement is all we have yet */ 7127 rdev = conf->disks[i].replacement; 7128 conf->disks[i].replacement = NULL; 7129 clear_bit(Replacement, &rdev->flags); 7130 conf->disks[i].rdev = rdev; 7131 } 7132 if (!rdev) 7133 continue; 7134 if (conf->disks[i].replacement && 7135 conf->reshape_progress != MaxSector) { 7136 /* replacements and reshape simply do not mix. */ 7137 pr_warn("md: cannot handle concurrent replacement and reshape.\n"); 7138 goto abort; 7139 } 7140 if (test_bit(In_sync, &rdev->flags)) { 7141 working_disks++; 7142 continue; 7143 } 7144 /* This disc is not fully in-sync. However if it 7145 * just stored parity (beyond the recovery_offset), 7146 * when we don't need to be concerned about the 7147 * array being dirty. 7148 * When reshape goes 'backwards', we never have 7149 * partially completed devices, so we only need 7150 * to worry about reshape going forwards. 7151 */ 7152 /* Hack because v0.91 doesn't store recovery_offset properly. */ 7153 if (mddev->major_version == 0 && 7154 mddev->minor_version > 90) 7155 rdev->recovery_offset = reshape_offset; 7156 7157 if (rdev->recovery_offset < reshape_offset) { 7158 /* We need to check old and new layout */ 7159 if (!only_parity(rdev->raid_disk, 7160 conf->algorithm, 7161 conf->raid_disks, 7162 conf->max_degraded)) 7163 continue; 7164 } 7165 if (!only_parity(rdev->raid_disk, 7166 conf->prev_algo, 7167 conf->previous_raid_disks, 7168 conf->max_degraded)) 7169 continue; 7170 dirty_parity_disks++; 7171 } 7172 7173 /* 7174 * 0 for a fully functional array, 1 or 2 for a degraded array. 7175 */ 7176 mddev->degraded = raid5_calc_degraded(conf); 7177 7178 if (has_failed(conf)) { 7179 pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", 7180 mdname(mddev), mddev->degraded, conf->raid_disks); 7181 goto abort; 7182 } 7183 7184 /* device size must be a multiple of chunk size */ 7185 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 7186 mddev->resync_max_sectors = mddev->dev_sectors; 7187 7188 if (mddev->degraded > dirty_parity_disks && 7189 mddev->recovery_cp != MaxSector) { 7190 if (mddev->ok_start_degraded) 7191 pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n", 7192 mdname(mddev)); 7193 else { 7194 pr_crit("md/raid:%s: cannot start dirty degraded array.\n", 7195 mdname(mddev)); 7196 goto abort; 7197 } 7198 } 7199 7200 pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n", 7201 mdname(mddev), conf->level, 7202 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 7203 mddev->new_layout); 7204 7205 print_raid5_conf(conf); 7206 7207 if (conf->reshape_progress != MaxSector) { 7208 conf->reshape_safe = conf->reshape_progress; 7209 atomic_set(&conf->reshape_stripes, 0); 7210 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7211 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7212 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7213 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7214 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7215 "reshape"); 7216 } 7217 7218 /* Ok, everything is just fine now */ 7219 if (mddev->to_remove == &raid5_attrs_group) 7220 mddev->to_remove = NULL; 7221 else if (mddev->kobj.sd && 7222 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 7223 pr_warn("raid5: failed to create sysfs attributes for %s\n", 7224 mdname(mddev)); 7225 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 7226 7227 if (mddev->queue) { 7228 int chunk_size; 7229 bool discard_supported = true; 7230 /* read-ahead size must cover two whole stripes, which 7231 * is 2 * (datadisks) * chunksize where 'n' is the 7232 * number of raid devices 7233 */ 7234 int data_disks = conf->previous_raid_disks - conf->max_degraded; 7235 int stripe = data_disks * 7236 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 7237 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 7238 mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 7239 7240 chunk_size = mddev->chunk_sectors << 9; 7241 blk_queue_io_min(mddev->queue, chunk_size); 7242 blk_queue_io_opt(mddev->queue, chunk_size * 7243 (conf->raid_disks - conf->max_degraded)); 7244 mddev->queue->limits.raid_partial_stripes_expensive = 1; 7245 /* 7246 * We can only discard a whole stripe. It doesn't make sense to 7247 * discard data disk but write parity disk 7248 */ 7249 stripe = stripe * PAGE_SIZE; 7250 /* Round up to power of 2, as discard handling 7251 * currently assumes that */ 7252 while ((stripe-1) & stripe) 7253 stripe = (stripe | (stripe-1)) + 1; 7254 mddev->queue->limits.discard_alignment = stripe; 7255 mddev->queue->limits.discard_granularity = stripe; 7256 7257 /* 7258 * We use 16-bit counter of active stripes in bi_phys_segments 7259 * (minus one for over-loaded initialization) 7260 */ 7261 blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS); 7262 blk_queue_max_discard_sectors(mddev->queue, 7263 0xfffe * STRIPE_SECTORS); 7264 7265 /* 7266 * unaligned part of discard request will be ignored, so can't 7267 * guarantee discard_zeroes_data 7268 */ 7269 mddev->queue->limits.discard_zeroes_data = 0; 7270 7271 blk_queue_max_write_same_sectors(mddev->queue, 0); 7272 7273 rdev_for_each(rdev, mddev) { 7274 disk_stack_limits(mddev->gendisk, rdev->bdev, 7275 rdev->data_offset << 9); 7276 disk_stack_limits(mddev->gendisk, rdev->bdev, 7277 rdev->new_data_offset << 9); 7278 /* 7279 * discard_zeroes_data is required, otherwise data 7280 * could be lost. Consider a scenario: discard a stripe 7281 * (the stripe could be inconsistent if 7282 * discard_zeroes_data is 0); write one disk of the 7283 * stripe (the stripe could be inconsistent again 7284 * depending on which disks are used to calculate 7285 * parity); the disk is broken; The stripe data of this 7286 * disk is lost. 7287 */ 7288 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || 7289 !bdev_get_queue(rdev->bdev)-> 7290 limits.discard_zeroes_data) 7291 discard_supported = false; 7292 /* Unfortunately, discard_zeroes_data is not currently 7293 * a guarantee - just a hint. So we only allow DISCARD 7294 * if the sysadmin has confirmed that only safe devices 7295 * are in use by setting a module parameter. 7296 */ 7297 if (!devices_handle_discard_safely) { 7298 if (discard_supported) { 7299 pr_info("md/raid456: discard support disabled due to uncertainty.\n"); 7300 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); 7301 } 7302 discard_supported = false; 7303 } 7304 } 7305 7306 if (discard_supported && 7307 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && 7308 mddev->queue->limits.discard_granularity >= stripe) 7309 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 7310 mddev->queue); 7311 else 7312 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 7313 mddev->queue); 7314 7315 blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); 7316 } 7317 7318 if (journal_dev) { 7319 char b[BDEVNAME_SIZE]; 7320 7321 pr_debug("md/raid:%s: using device %s as journal\n", 7322 mdname(mddev), bdevname(journal_dev->bdev, b)); 7323 if (r5l_init_log(conf, journal_dev)) 7324 goto abort; 7325 } 7326 7327 return 0; 7328 abort: 7329 md_unregister_thread(&mddev->thread); 7330 print_raid5_conf(conf); 7331 free_conf(conf); 7332 mddev->private = NULL; 7333 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); 7334 return -EIO; 7335 } 7336 7337 static void raid5_free(struct mddev *mddev, void *priv) 7338 { 7339 struct r5conf *conf = priv; 7340 7341 free_conf(conf); 7342 mddev->to_remove = &raid5_attrs_group; 7343 } 7344 7345 static void raid5_status(struct seq_file *seq, struct mddev *mddev) 7346 { 7347 struct r5conf *conf = mddev->private; 7348 int i; 7349 7350 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 7351 conf->chunk_sectors / 2, mddev->layout); 7352 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 7353 rcu_read_lock(); 7354 for (i = 0; i < conf->raid_disks; i++) { 7355 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 7356 seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 7357 } 7358 rcu_read_unlock(); 7359 seq_printf (seq, "]"); 7360 } 7361 7362 static void print_raid5_conf (struct r5conf *conf) 7363 { 7364 int i; 7365 struct disk_info *tmp; 7366 7367 pr_debug("RAID conf printout:\n"); 7368 if (!conf) { 7369 pr_debug("(conf==NULL)\n"); 7370 return; 7371 } 7372 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, 7373 conf->raid_disks, 7374 conf->raid_disks - conf->mddev->degraded); 7375 7376 for (i = 0; i < conf->raid_disks; i++) { 7377 char b[BDEVNAME_SIZE]; 7378 tmp = conf->disks + i; 7379 if (tmp->rdev) 7380 pr_debug(" disk %d, o:%d, dev:%s\n", 7381 i, !test_bit(Faulty, &tmp->rdev->flags), 7382 bdevname(tmp->rdev->bdev, b)); 7383 } 7384 } 7385 7386 static int raid5_spare_active(struct mddev *mddev) 7387 { 7388 int i; 7389 struct r5conf *conf = mddev->private; 7390 struct disk_info *tmp; 7391 int count = 0; 7392 unsigned long flags; 7393 7394 for (i = 0; i < conf->raid_disks; i++) { 7395 tmp = conf->disks + i; 7396 if (tmp->replacement 7397 && tmp->replacement->recovery_offset == MaxSector 7398 && !test_bit(Faulty, &tmp->replacement->flags) 7399 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 7400 /* Replacement has just become active. */ 7401 if (!tmp->rdev 7402 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 7403 count++; 7404 if (tmp->rdev) { 7405 /* Replaced device not technically faulty, 7406 * but we need to be sure it gets removed 7407 * and never re-added. 7408 */ 7409 set_bit(Faulty, &tmp->rdev->flags); 7410 sysfs_notify_dirent_safe( 7411 tmp->rdev->sysfs_state); 7412 } 7413 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 7414 } else if (tmp->rdev 7415 && tmp->rdev->recovery_offset == MaxSector 7416 && !test_bit(Faulty, &tmp->rdev->flags) 7417 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 7418 count++; 7419 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 7420 } 7421 } 7422 spin_lock_irqsave(&conf->device_lock, flags); 7423 mddev->degraded = raid5_calc_degraded(conf); 7424 spin_unlock_irqrestore(&conf->device_lock, flags); 7425 print_raid5_conf(conf); 7426 return count; 7427 } 7428 7429 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 7430 { 7431 struct r5conf *conf = mddev->private; 7432 int err = 0; 7433 int number = rdev->raid_disk; 7434 struct md_rdev **rdevp; 7435 struct disk_info *p = conf->disks + number; 7436 7437 print_raid5_conf(conf); 7438 if (test_bit(Journal, &rdev->flags) && conf->log) { 7439 struct r5l_log *log; 7440 /* 7441 * we can't wait pending write here, as this is called in 7442 * raid5d, wait will deadlock. 7443 */ 7444 if (atomic_read(&mddev->writes_pending)) 7445 return -EBUSY; 7446 log = conf->log; 7447 conf->log = NULL; 7448 synchronize_rcu(); 7449 r5l_exit_log(log); 7450 return 0; 7451 } 7452 if (rdev == p->rdev) 7453 rdevp = &p->rdev; 7454 else if (rdev == p->replacement) 7455 rdevp = &p->replacement; 7456 else 7457 return 0; 7458 7459 if (number >= conf->raid_disks && 7460 conf->reshape_progress == MaxSector) 7461 clear_bit(In_sync, &rdev->flags); 7462 7463 if (test_bit(In_sync, &rdev->flags) || 7464 atomic_read(&rdev->nr_pending)) { 7465 err = -EBUSY; 7466 goto abort; 7467 } 7468 /* Only remove non-faulty devices if recovery 7469 * isn't possible. 7470 */ 7471 if (!test_bit(Faulty, &rdev->flags) && 7472 mddev->recovery_disabled != conf->recovery_disabled && 7473 !has_failed(conf) && 7474 (!p->replacement || p->replacement == rdev) && 7475 number < conf->raid_disks) { 7476 err = -EBUSY; 7477 goto abort; 7478 } 7479 *rdevp = NULL; 7480 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 7481 synchronize_rcu(); 7482 if (atomic_read(&rdev->nr_pending)) { 7483 /* lost the race, try later */ 7484 err = -EBUSY; 7485 *rdevp = rdev; 7486 } 7487 } 7488 if (p->replacement) { 7489 /* We must have just cleared 'rdev' */ 7490 p->rdev = p->replacement; 7491 clear_bit(Replacement, &p->replacement->flags); 7492 smp_mb(); /* Make sure other CPUs may see both as identical 7493 * but will never see neither - if they are careful 7494 */ 7495 p->replacement = NULL; 7496 clear_bit(WantReplacement, &rdev->flags); 7497 } else 7498 /* We might have just removed the Replacement as faulty- 7499 * clear the bit just in case 7500 */ 7501 clear_bit(WantReplacement, &rdev->flags); 7502 abort: 7503 7504 print_raid5_conf(conf); 7505 return err; 7506 } 7507 7508 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 7509 { 7510 struct r5conf *conf = mddev->private; 7511 int err = -EEXIST; 7512 int disk; 7513 struct disk_info *p; 7514 int first = 0; 7515 int last = conf->raid_disks - 1; 7516 7517 if (test_bit(Journal, &rdev->flags)) { 7518 char b[BDEVNAME_SIZE]; 7519 if (conf->log) 7520 return -EBUSY; 7521 7522 rdev->raid_disk = 0; 7523 /* 7524 * The array is in readonly mode if journal is missing, so no 7525 * write requests running. We should be safe 7526 */ 7527 r5l_init_log(conf, rdev); 7528 pr_debug("md/raid:%s: using device %s as journal\n", 7529 mdname(mddev), bdevname(rdev->bdev, b)); 7530 return 0; 7531 } 7532 if (mddev->recovery_disabled == conf->recovery_disabled) 7533 return -EBUSY; 7534 7535 if (rdev->saved_raid_disk < 0 && has_failed(conf)) 7536 /* no point adding a device */ 7537 return -EINVAL; 7538 7539 if (rdev->raid_disk >= 0) 7540 first = last = rdev->raid_disk; 7541 7542 /* 7543 * find the disk ... but prefer rdev->saved_raid_disk 7544 * if possible. 7545 */ 7546 if (rdev->saved_raid_disk >= 0 && 7547 rdev->saved_raid_disk >= first && 7548 conf->disks[rdev->saved_raid_disk].rdev == NULL) 7549 first = rdev->saved_raid_disk; 7550 7551 for (disk = first; disk <= last; disk++) { 7552 p = conf->disks + disk; 7553 if (p->rdev == NULL) { 7554 clear_bit(In_sync, &rdev->flags); 7555 rdev->raid_disk = disk; 7556 err = 0; 7557 if (rdev->saved_raid_disk != disk) 7558 conf->fullsync = 1; 7559 rcu_assign_pointer(p->rdev, rdev); 7560 goto out; 7561 } 7562 } 7563 for (disk = first; disk <= last; disk++) { 7564 p = conf->disks + disk; 7565 if (test_bit(WantReplacement, &p->rdev->flags) && 7566 p->replacement == NULL) { 7567 clear_bit(In_sync, &rdev->flags); 7568 set_bit(Replacement, &rdev->flags); 7569 rdev->raid_disk = disk; 7570 err = 0; 7571 conf->fullsync = 1; 7572 rcu_assign_pointer(p->replacement, rdev); 7573 break; 7574 } 7575 } 7576 out: 7577 print_raid5_conf(conf); 7578 return err; 7579 } 7580 7581 static int raid5_resize(struct mddev *mddev, sector_t sectors) 7582 { 7583 /* no resync is happening, and there is enough space 7584 * on all devices, so we can resize. 7585 * We need to make sure resync covers any new space. 7586 * If the array is shrinking we should possibly wait until 7587 * any io in the removed space completes, but it hardly seems 7588 * worth it. 7589 */ 7590 sector_t newsize; 7591 struct r5conf *conf = mddev->private; 7592 7593 if (conf->log) 7594 return -EINVAL; 7595 sectors &= ~((sector_t)conf->chunk_sectors - 1); 7596 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 7597 if (mddev->external_size && 7598 mddev->array_sectors > newsize) 7599 return -EINVAL; 7600 if (mddev->bitmap) { 7601 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); 7602 if (ret) 7603 return ret; 7604 } 7605 md_set_array_sectors(mddev, newsize); 7606 set_capacity(mddev->gendisk, mddev->array_sectors); 7607 revalidate_disk(mddev->gendisk); 7608 if (sectors > mddev->dev_sectors && 7609 mddev->recovery_cp > mddev->dev_sectors) { 7610 mddev->recovery_cp = mddev->dev_sectors; 7611 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7612 } 7613 mddev->dev_sectors = sectors; 7614 mddev->resync_max_sectors = sectors; 7615 return 0; 7616 } 7617 7618 static int check_stripe_cache(struct mddev *mddev) 7619 { 7620 /* Can only proceed if there are plenty of stripe_heads. 7621 * We need a minimum of one full stripe,, and for sensible progress 7622 * it is best to have about 4 times that. 7623 * If we require 4 times, then the default 256 4K stripe_heads will 7624 * allow for chunk sizes up to 256K, which is probably OK. 7625 * If the chunk size is greater, user-space should request more 7626 * stripe_heads first. 7627 */ 7628 struct r5conf *conf = mddev->private; 7629 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 7630 > conf->min_nr_stripes || 7631 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 7632 > conf->min_nr_stripes) { 7633 pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n", 7634 mdname(mddev), 7635 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 7636 / STRIPE_SIZE)*4); 7637 return 0; 7638 } 7639 return 1; 7640 } 7641 7642 static int check_reshape(struct mddev *mddev) 7643 { 7644 struct r5conf *conf = mddev->private; 7645 7646 if (conf->log) 7647 return -EINVAL; 7648 if (mddev->delta_disks == 0 && 7649 mddev->new_layout == mddev->layout && 7650 mddev->new_chunk_sectors == mddev->chunk_sectors) 7651 return 0; /* nothing to do */ 7652 if (has_failed(conf)) 7653 return -EINVAL; 7654 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { 7655 /* We might be able to shrink, but the devices must 7656 * be made bigger first. 7657 * For raid6, 4 is the minimum size. 7658 * Otherwise 2 is the minimum 7659 */ 7660 int min = 2; 7661 if (mddev->level == 6) 7662 min = 4; 7663 if (mddev->raid_disks + mddev->delta_disks < min) 7664 return -EINVAL; 7665 } 7666 7667 if (!check_stripe_cache(mddev)) 7668 return -ENOSPC; 7669 7670 if (mddev->new_chunk_sectors > mddev->chunk_sectors || 7671 mddev->delta_disks > 0) 7672 if (resize_chunks(conf, 7673 conf->previous_raid_disks 7674 + max(0, mddev->delta_disks), 7675 max(mddev->new_chunk_sectors, 7676 mddev->chunk_sectors) 7677 ) < 0) 7678 return -ENOMEM; 7679 return resize_stripes(conf, (conf->previous_raid_disks 7680 + mddev->delta_disks)); 7681 } 7682 7683 static int raid5_start_reshape(struct mddev *mddev) 7684 { 7685 struct r5conf *conf = mddev->private; 7686 struct md_rdev *rdev; 7687 int spares = 0; 7688 unsigned long flags; 7689 7690 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 7691 return -EBUSY; 7692 7693 if (!check_stripe_cache(mddev)) 7694 return -ENOSPC; 7695 7696 if (has_failed(conf)) 7697 return -EINVAL; 7698 7699 rdev_for_each(rdev, mddev) { 7700 if (!test_bit(In_sync, &rdev->flags) 7701 && !test_bit(Faulty, &rdev->flags)) 7702 spares++; 7703 } 7704 7705 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 7706 /* Not enough devices even to make a degraded array 7707 * of that size 7708 */ 7709 return -EINVAL; 7710 7711 /* Refuse to reduce size of the array. Any reductions in 7712 * array size must be through explicit setting of array_size 7713 * attribute. 7714 */ 7715 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 7716 < mddev->array_sectors) { 7717 pr_warn("md/raid:%s: array size must be reduced before number of disks\n", 7718 mdname(mddev)); 7719 return -EINVAL; 7720 } 7721 7722 atomic_set(&conf->reshape_stripes, 0); 7723 spin_lock_irq(&conf->device_lock); 7724 write_seqcount_begin(&conf->gen_lock); 7725 conf->previous_raid_disks = conf->raid_disks; 7726 conf->raid_disks += mddev->delta_disks; 7727 conf->prev_chunk_sectors = conf->chunk_sectors; 7728 conf->chunk_sectors = mddev->new_chunk_sectors; 7729 conf->prev_algo = conf->algorithm; 7730 conf->algorithm = mddev->new_layout; 7731 conf->generation++; 7732 /* Code that selects data_offset needs to see the generation update 7733 * if reshape_progress has been set - so a memory barrier needed. 7734 */ 7735 smp_mb(); 7736 if (mddev->reshape_backwards) 7737 conf->reshape_progress = raid5_size(mddev, 0, 0); 7738 else 7739 conf->reshape_progress = 0; 7740 conf->reshape_safe = conf->reshape_progress; 7741 write_seqcount_end(&conf->gen_lock); 7742 spin_unlock_irq(&conf->device_lock); 7743 7744 /* Now make sure any requests that proceeded on the assumption 7745 * the reshape wasn't running - like Discard or Read - have 7746 * completed. 7747 */ 7748 mddev_suspend(mddev); 7749 mddev_resume(mddev); 7750 7751 /* Add some new drives, as many as will fit. 7752 * We know there are enough to make the newly sized array work. 7753 * Don't add devices if we are reducing the number of 7754 * devices in the array. This is because it is not possible 7755 * to correctly record the "partially reconstructed" state of 7756 * such devices during the reshape and confusion could result. 7757 */ 7758 if (mddev->delta_disks >= 0) { 7759 rdev_for_each(rdev, mddev) 7760 if (rdev->raid_disk < 0 && 7761 !test_bit(Faulty, &rdev->flags)) { 7762 if (raid5_add_disk(mddev, rdev) == 0) { 7763 if (rdev->raid_disk 7764 >= conf->previous_raid_disks) 7765 set_bit(In_sync, &rdev->flags); 7766 else 7767 rdev->recovery_offset = 0; 7768 7769 if (sysfs_link_rdev(mddev, rdev)) 7770 /* Failure here is OK */; 7771 } 7772 } else if (rdev->raid_disk >= conf->previous_raid_disks 7773 && !test_bit(Faulty, &rdev->flags)) { 7774 /* This is a spare that was manually added */ 7775 set_bit(In_sync, &rdev->flags); 7776 } 7777 7778 /* When a reshape changes the number of devices, 7779 * ->degraded is measured against the larger of the 7780 * pre and post number of devices. 7781 */ 7782 spin_lock_irqsave(&conf->device_lock, flags); 7783 mddev->degraded = raid5_calc_degraded(conf); 7784 spin_unlock_irqrestore(&conf->device_lock, flags); 7785 } 7786 mddev->raid_disks = conf->raid_disks; 7787 mddev->reshape_position = conf->reshape_progress; 7788 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7789 7790 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7791 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7792 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7793 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7794 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7795 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7796 "reshape"); 7797 if (!mddev->sync_thread) { 7798 mddev->recovery = 0; 7799 spin_lock_irq(&conf->device_lock); 7800 write_seqcount_begin(&conf->gen_lock); 7801 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 7802 mddev->new_chunk_sectors = 7803 conf->chunk_sectors = conf->prev_chunk_sectors; 7804 mddev->new_layout = conf->algorithm = conf->prev_algo; 7805 rdev_for_each(rdev, mddev) 7806 rdev->new_data_offset = rdev->data_offset; 7807 smp_wmb(); 7808 conf->generation --; 7809 conf->reshape_progress = MaxSector; 7810 mddev->reshape_position = MaxSector; 7811 write_seqcount_end(&conf->gen_lock); 7812 spin_unlock_irq(&conf->device_lock); 7813 return -EAGAIN; 7814 } 7815 conf->reshape_checkpoint = jiffies; 7816 md_wakeup_thread(mddev->sync_thread); 7817 md_new_event(mddev); 7818 return 0; 7819 } 7820 7821 /* This is called from the reshape thread and should make any 7822 * changes needed in 'conf' 7823 */ 7824 static void end_reshape(struct r5conf *conf) 7825 { 7826 7827 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 7828 struct md_rdev *rdev; 7829 7830 spin_lock_irq(&conf->device_lock); 7831 conf->previous_raid_disks = conf->raid_disks; 7832 rdev_for_each(rdev, conf->mddev) 7833 rdev->data_offset = rdev->new_data_offset; 7834 smp_wmb(); 7835 conf->reshape_progress = MaxSector; 7836 conf->mddev->reshape_position = MaxSector; 7837 spin_unlock_irq(&conf->device_lock); 7838 wake_up(&conf->wait_for_overlap); 7839 7840 /* read-ahead size must cover two whole stripes, which is 7841 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 7842 */ 7843 if (conf->mddev->queue) { 7844 int data_disks = conf->raid_disks - conf->max_degraded; 7845 int stripe = data_disks * ((conf->chunk_sectors << 9) 7846 / PAGE_SIZE); 7847 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) 7848 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; 7849 } 7850 } 7851 } 7852 7853 /* This is called from the raid5d thread with mddev_lock held. 7854 * It makes config changes to the device. 7855 */ 7856 static void raid5_finish_reshape(struct mddev *mddev) 7857 { 7858 struct r5conf *conf = mddev->private; 7859 7860 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7861 7862 if (mddev->delta_disks > 0) { 7863 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 7864 if (mddev->queue) { 7865 set_capacity(mddev->gendisk, mddev->array_sectors); 7866 revalidate_disk(mddev->gendisk); 7867 } 7868 } else { 7869 int d; 7870 spin_lock_irq(&conf->device_lock); 7871 mddev->degraded = raid5_calc_degraded(conf); 7872 spin_unlock_irq(&conf->device_lock); 7873 for (d = conf->raid_disks ; 7874 d < conf->raid_disks - mddev->delta_disks; 7875 d++) { 7876 struct md_rdev *rdev = conf->disks[d].rdev; 7877 if (rdev) 7878 clear_bit(In_sync, &rdev->flags); 7879 rdev = conf->disks[d].replacement; 7880 if (rdev) 7881 clear_bit(In_sync, &rdev->flags); 7882 } 7883 } 7884 mddev->layout = conf->algorithm; 7885 mddev->chunk_sectors = conf->chunk_sectors; 7886 mddev->reshape_position = MaxSector; 7887 mddev->delta_disks = 0; 7888 mddev->reshape_backwards = 0; 7889 } 7890 } 7891 7892 static void raid5_quiesce(struct mddev *mddev, int state) 7893 { 7894 struct r5conf *conf = mddev->private; 7895 7896 switch(state) { 7897 case 2: /* resume for a suspend */ 7898 wake_up(&conf->wait_for_overlap); 7899 break; 7900 7901 case 1: /* stop all writes */ 7902 lock_all_device_hash_locks_irq(conf); 7903 /* '2' tells resync/reshape to pause so that all 7904 * active stripes can drain 7905 */ 7906 r5c_flush_cache(conf, INT_MAX); 7907 conf->quiesce = 2; 7908 wait_event_cmd(conf->wait_for_quiescent, 7909 atomic_read(&conf->active_stripes) == 0 && 7910 atomic_read(&conf->active_aligned_reads) == 0, 7911 unlock_all_device_hash_locks_irq(conf), 7912 lock_all_device_hash_locks_irq(conf)); 7913 conf->quiesce = 1; 7914 unlock_all_device_hash_locks_irq(conf); 7915 /* allow reshape to continue */ 7916 wake_up(&conf->wait_for_overlap); 7917 break; 7918 7919 case 0: /* re-enable writes */ 7920 lock_all_device_hash_locks_irq(conf); 7921 conf->quiesce = 0; 7922 wake_up(&conf->wait_for_quiescent); 7923 wake_up(&conf->wait_for_overlap); 7924 unlock_all_device_hash_locks_irq(conf); 7925 break; 7926 } 7927 r5l_quiesce(conf->log, state); 7928 } 7929 7930 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 7931 { 7932 struct r0conf *raid0_conf = mddev->private; 7933 sector_t sectors; 7934 7935 /* for raid0 takeover only one zone is supported */ 7936 if (raid0_conf->nr_strip_zones > 1) { 7937 pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n", 7938 mdname(mddev)); 7939 return ERR_PTR(-EINVAL); 7940 } 7941 7942 sectors = raid0_conf->strip_zone[0].zone_end; 7943 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 7944 mddev->dev_sectors = sectors; 7945 mddev->new_level = level; 7946 mddev->new_layout = ALGORITHM_PARITY_N; 7947 mddev->new_chunk_sectors = mddev->chunk_sectors; 7948 mddev->raid_disks += 1; 7949 mddev->delta_disks = 1; 7950 /* make sure it will be not marked as dirty */ 7951 mddev->recovery_cp = MaxSector; 7952 7953 return setup_conf(mddev); 7954 } 7955 7956 static void *raid5_takeover_raid1(struct mddev *mddev) 7957 { 7958 int chunksect; 7959 void *ret; 7960 7961 if (mddev->raid_disks != 2 || 7962 mddev->degraded > 1) 7963 return ERR_PTR(-EINVAL); 7964 7965 /* Should check if there are write-behind devices? */ 7966 7967 chunksect = 64*2; /* 64K by default */ 7968 7969 /* The array must be an exact multiple of chunksize */ 7970 while (chunksect && (mddev->array_sectors & (chunksect-1))) 7971 chunksect >>= 1; 7972 7973 if ((chunksect<<9) < STRIPE_SIZE) 7974 /* array size does not allow a suitable chunk size */ 7975 return ERR_PTR(-EINVAL); 7976 7977 mddev->new_level = 5; 7978 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 7979 mddev->new_chunk_sectors = chunksect; 7980 7981 ret = setup_conf(mddev); 7982 if (!IS_ERR(ret)) 7983 mddev_clear_unsupported_flags(mddev, 7984 UNSUPPORTED_MDDEV_FLAGS); 7985 return ret; 7986 } 7987 7988 static void *raid5_takeover_raid6(struct mddev *mddev) 7989 { 7990 int new_layout; 7991 7992 switch (mddev->layout) { 7993 case ALGORITHM_LEFT_ASYMMETRIC_6: 7994 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 7995 break; 7996 case ALGORITHM_RIGHT_ASYMMETRIC_6: 7997 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 7998 break; 7999 case ALGORITHM_LEFT_SYMMETRIC_6: 8000 new_layout = ALGORITHM_LEFT_SYMMETRIC; 8001 break; 8002 case ALGORITHM_RIGHT_SYMMETRIC_6: 8003 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 8004 break; 8005 case ALGORITHM_PARITY_0_6: 8006 new_layout = ALGORITHM_PARITY_0; 8007 break; 8008 case ALGORITHM_PARITY_N: 8009 new_layout = ALGORITHM_PARITY_N; 8010 break; 8011 default: 8012 return ERR_PTR(-EINVAL); 8013 } 8014 mddev->new_level = 5; 8015 mddev->new_layout = new_layout; 8016 mddev->delta_disks = -1; 8017 mddev->raid_disks -= 1; 8018 return setup_conf(mddev); 8019 } 8020 8021 static int raid5_check_reshape(struct mddev *mddev) 8022 { 8023 /* For a 2-drive array, the layout and chunk size can be changed 8024 * immediately as not restriping is needed. 8025 * For larger arrays we record the new value - after validation 8026 * to be used by a reshape pass. 8027 */ 8028 struct r5conf *conf = mddev->private; 8029 int new_chunk = mddev->new_chunk_sectors; 8030 8031 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 8032 return -EINVAL; 8033 if (new_chunk > 0) { 8034 if (!is_power_of_2(new_chunk)) 8035 return -EINVAL; 8036 if (new_chunk < (PAGE_SIZE>>9)) 8037 return -EINVAL; 8038 if (mddev->array_sectors & (new_chunk-1)) 8039 /* not factor of array size */ 8040 return -EINVAL; 8041 } 8042 8043 /* They look valid */ 8044 8045 if (mddev->raid_disks == 2) { 8046 /* can make the change immediately */ 8047 if (mddev->new_layout >= 0) { 8048 conf->algorithm = mddev->new_layout; 8049 mddev->layout = mddev->new_layout; 8050 } 8051 if (new_chunk > 0) { 8052 conf->chunk_sectors = new_chunk ; 8053 mddev->chunk_sectors = new_chunk; 8054 } 8055 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8056 md_wakeup_thread(mddev->thread); 8057 } 8058 return check_reshape(mddev); 8059 } 8060 8061 static int raid6_check_reshape(struct mddev *mddev) 8062 { 8063 int new_chunk = mddev->new_chunk_sectors; 8064 8065 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 8066 return -EINVAL; 8067 if (new_chunk > 0) { 8068 if (!is_power_of_2(new_chunk)) 8069 return -EINVAL; 8070 if (new_chunk < (PAGE_SIZE >> 9)) 8071 return -EINVAL; 8072 if (mddev->array_sectors & (new_chunk-1)) 8073 /* not factor of array size */ 8074 return -EINVAL; 8075 } 8076 8077 /* They look valid */ 8078 return check_reshape(mddev); 8079 } 8080 8081 static void *raid5_takeover(struct mddev *mddev) 8082 { 8083 /* raid5 can take over: 8084 * raid0 - if there is only one strip zone - make it a raid4 layout 8085 * raid1 - if there are two drives. We need to know the chunk size 8086 * raid4 - trivial - just use a raid4 layout. 8087 * raid6 - Providing it is a *_6 layout 8088 */ 8089 if (mddev->level == 0) 8090 return raid45_takeover_raid0(mddev, 5); 8091 if (mddev->level == 1) 8092 return raid5_takeover_raid1(mddev); 8093 if (mddev->level == 4) { 8094 mddev->new_layout = ALGORITHM_PARITY_N; 8095 mddev->new_level = 5; 8096 return setup_conf(mddev); 8097 } 8098 if (mddev->level == 6) 8099 return raid5_takeover_raid6(mddev); 8100 8101 return ERR_PTR(-EINVAL); 8102 } 8103 8104 static void *raid4_takeover(struct mddev *mddev) 8105 { 8106 /* raid4 can take over: 8107 * raid0 - if there is only one strip zone 8108 * raid5 - if layout is right 8109 */ 8110 if (mddev->level == 0) 8111 return raid45_takeover_raid0(mddev, 4); 8112 if (mddev->level == 5 && 8113 mddev->layout == ALGORITHM_PARITY_N) { 8114 mddev->new_layout = 0; 8115 mddev->new_level = 4; 8116 return setup_conf(mddev); 8117 } 8118 return ERR_PTR(-EINVAL); 8119 } 8120 8121 static struct md_personality raid5_personality; 8122 8123 static void *raid6_takeover(struct mddev *mddev) 8124 { 8125 /* Currently can only take over a raid5. We map the 8126 * personality to an equivalent raid6 personality 8127 * with the Q block at the end. 8128 */ 8129 int new_layout; 8130 8131 if (mddev->pers != &raid5_personality) 8132 return ERR_PTR(-EINVAL); 8133 if (mddev->degraded > 1) 8134 return ERR_PTR(-EINVAL); 8135 if (mddev->raid_disks > 253) 8136 return ERR_PTR(-EINVAL); 8137 if (mddev->raid_disks < 3) 8138 return ERR_PTR(-EINVAL); 8139 8140 switch (mddev->layout) { 8141 case ALGORITHM_LEFT_ASYMMETRIC: 8142 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 8143 break; 8144 case ALGORITHM_RIGHT_ASYMMETRIC: 8145 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 8146 break; 8147 case ALGORITHM_LEFT_SYMMETRIC: 8148 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 8149 break; 8150 case ALGORITHM_RIGHT_SYMMETRIC: 8151 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 8152 break; 8153 case ALGORITHM_PARITY_0: 8154 new_layout = ALGORITHM_PARITY_0_6; 8155 break; 8156 case ALGORITHM_PARITY_N: 8157 new_layout = ALGORITHM_PARITY_N; 8158 break; 8159 default: 8160 return ERR_PTR(-EINVAL); 8161 } 8162 mddev->new_level = 6; 8163 mddev->new_layout = new_layout; 8164 mddev->delta_disks = 1; 8165 mddev->raid_disks += 1; 8166 return setup_conf(mddev); 8167 } 8168 8169 static struct md_personality raid6_personality = 8170 { 8171 .name = "raid6", 8172 .level = 6, 8173 .owner = THIS_MODULE, 8174 .make_request = raid5_make_request, 8175 .run = raid5_run, 8176 .free = raid5_free, 8177 .status = raid5_status, 8178 .error_handler = raid5_error, 8179 .hot_add_disk = raid5_add_disk, 8180 .hot_remove_disk= raid5_remove_disk, 8181 .spare_active = raid5_spare_active, 8182 .sync_request = raid5_sync_request, 8183 .resize = raid5_resize, 8184 .size = raid5_size, 8185 .check_reshape = raid6_check_reshape, 8186 .start_reshape = raid5_start_reshape, 8187 .finish_reshape = raid5_finish_reshape, 8188 .quiesce = raid5_quiesce, 8189 .takeover = raid6_takeover, 8190 .congested = raid5_congested, 8191 }; 8192 static struct md_personality raid5_personality = 8193 { 8194 .name = "raid5", 8195 .level = 5, 8196 .owner = THIS_MODULE, 8197 .make_request = raid5_make_request, 8198 .run = raid5_run, 8199 .free = raid5_free, 8200 .status = raid5_status, 8201 .error_handler = raid5_error, 8202 .hot_add_disk = raid5_add_disk, 8203 .hot_remove_disk= raid5_remove_disk, 8204 .spare_active = raid5_spare_active, 8205 .sync_request = raid5_sync_request, 8206 .resize = raid5_resize, 8207 .size = raid5_size, 8208 .check_reshape = raid5_check_reshape, 8209 .start_reshape = raid5_start_reshape, 8210 .finish_reshape = raid5_finish_reshape, 8211 .quiesce = raid5_quiesce, 8212 .takeover = raid5_takeover, 8213 .congested = raid5_congested, 8214 }; 8215 8216 static struct md_personality raid4_personality = 8217 { 8218 .name = "raid4", 8219 .level = 4, 8220 .owner = THIS_MODULE, 8221 .make_request = raid5_make_request, 8222 .run = raid5_run, 8223 .free = raid5_free, 8224 .status = raid5_status, 8225 .error_handler = raid5_error, 8226 .hot_add_disk = raid5_add_disk, 8227 .hot_remove_disk= raid5_remove_disk, 8228 .spare_active = raid5_spare_active, 8229 .sync_request = raid5_sync_request, 8230 .resize = raid5_resize, 8231 .size = raid5_size, 8232 .check_reshape = raid5_check_reshape, 8233 .start_reshape = raid5_start_reshape, 8234 .finish_reshape = raid5_finish_reshape, 8235 .quiesce = raid5_quiesce, 8236 .takeover = raid4_takeover, 8237 .congested = raid5_congested, 8238 }; 8239 8240 static int __init raid5_init(void) 8241 { 8242 int ret; 8243 8244 raid5_wq = alloc_workqueue("raid5wq", 8245 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 8246 if (!raid5_wq) 8247 return -ENOMEM; 8248 8249 ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE, 8250 "md/raid5:prepare", 8251 raid456_cpu_up_prepare, 8252 raid456_cpu_dead); 8253 if (ret) { 8254 destroy_workqueue(raid5_wq); 8255 return ret; 8256 } 8257 register_md_personality(&raid6_personality); 8258 register_md_personality(&raid5_personality); 8259 register_md_personality(&raid4_personality); 8260 return 0; 8261 } 8262 8263 static void raid5_exit(void) 8264 { 8265 unregister_md_personality(&raid6_personality); 8266 unregister_md_personality(&raid5_personality); 8267 unregister_md_personality(&raid4_personality); 8268 cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE); 8269 destroy_workqueue(raid5_wq); 8270 } 8271 8272 module_init(raid5_init); 8273 module_exit(raid5_exit); 8274 MODULE_LICENSE("GPL"); 8275 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 8276 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 8277 MODULE_ALIAS("md-raid5"); 8278 MODULE_ALIAS("md-raid4"); 8279 MODULE_ALIAS("md-level-5"); 8280 MODULE_ALIAS("md-level-4"); 8281 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 8282 MODULE_ALIAS("md-raid6"); 8283 MODULE_ALIAS("md-level-6"); 8284 8285 /* This used to be two separate modules, they were: */ 8286 MODULE_ALIAS("raid5"); 8287 MODULE_ALIAS("raid6"); 8288