1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2012 Fusion-io All rights reserved. 4 * Copyright (C) 2012 Intel Corp. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/raid/pq.h> 12 #include <linux/hash.h> 13 #include <linux/list_sort.h> 14 #include <linux/raid/xor.h> 15 #include <linux/mm.h> 16 #include "ctree.h" 17 #include "disk-io.h" 18 #include "volumes.h" 19 #include "raid56.h" 20 #include "async-thread.h" 21 22 /* set when additional merges to this rbio are not allowed */ 23 #define RBIO_RMW_LOCKED_BIT 1 24 25 /* 26 * set when this rbio is sitting in the hash, but it is just a cache 27 * of past RMW 28 */ 29 #define RBIO_CACHE_BIT 2 30 31 /* 32 * set when it is safe to trust the stripe_pages for caching 33 */ 34 #define RBIO_CACHE_READY_BIT 3 35 36 #define RBIO_CACHE_SIZE 1024 37 38 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 39 40 /* Used by the raid56 code to lock stripes for read/modify/write */ 41 struct btrfs_stripe_hash { 42 struct list_head hash_list; 43 spinlock_t lock; 44 }; 45 46 /* Used by the raid56 code to lock stripes for read/modify/write */ 47 struct btrfs_stripe_hash_table { 48 struct list_head stripe_cache; 49 spinlock_t cache_lock; 50 int cache_size; 51 struct btrfs_stripe_hash table[]; 52 }; 53 54 enum btrfs_rbio_ops { 55 BTRFS_RBIO_WRITE, 56 BTRFS_RBIO_READ_REBUILD, 57 BTRFS_RBIO_PARITY_SCRUB, 58 BTRFS_RBIO_REBUILD_MISSING, 59 }; 60 61 struct btrfs_raid_bio { 62 struct btrfs_fs_info *fs_info; 63 struct btrfs_bio *bbio; 64 65 /* while we're doing rmw on a stripe 66 * we put it into a hash table so we can 67 * lock the stripe and merge more rbios 68 * into it. 69 */ 70 struct list_head hash_list; 71 72 /* 73 * LRU list for the stripe cache 74 */ 75 struct list_head stripe_cache; 76 77 /* 78 * for scheduling work in the helper threads 79 */ 80 struct btrfs_work work; 81 82 /* 83 * bio list and bio_list_lock are used 84 * to add more bios into the stripe 85 * in hopes of avoiding the full rmw 86 */ 87 struct bio_list bio_list; 88 spinlock_t bio_list_lock; 89 90 /* also protected by the bio_list_lock, the 91 * plug list is used by the plugging code 92 * to collect partial bios while plugged. The 93 * stripe locking code also uses it to hand off 94 * the stripe lock to the next pending IO 95 */ 96 struct list_head plug_list; 97 98 /* 99 * flags that tell us if it is safe to 100 * merge with this bio 101 */ 102 unsigned long flags; 103 104 /* size of each individual stripe on disk */ 105 int stripe_len; 106 107 /* number of data stripes (no p/q) */ 108 int nr_data; 109 110 int real_stripes; 111 112 int stripe_npages; 113 /* 114 * set if we're doing a parity rebuild 115 * for a read from higher up, which is handled 116 * differently from a parity rebuild as part of 117 * rmw 118 */ 119 enum btrfs_rbio_ops operation; 120 121 /* first bad stripe */ 122 int faila; 123 124 /* second bad stripe (for raid6 use) */ 125 int failb; 126 127 int scrubp; 128 /* 129 * number of pages needed to represent the full 130 * stripe 131 */ 132 int nr_pages; 133 134 /* 135 * size of all the bios in the bio_list. This 136 * helps us decide if the rbio maps to a full 137 * stripe or not 138 */ 139 int bio_list_bytes; 140 141 int generic_bio_cnt; 142 143 refcount_t refs; 144 145 atomic_t stripes_pending; 146 147 atomic_t error; 148 /* 149 * these are two arrays of pointers. We allocate the 150 * rbio big enough to hold them both and setup their 151 * locations when the rbio is allocated 152 */ 153 154 /* pointers to pages that we allocated for 155 * reading/writing stripes directly from the disk (including P/Q) 156 */ 157 struct page **stripe_pages; 158 159 /* 160 * pointers to the pages in the bio_list. Stored 161 * here for faster lookup 162 */ 163 struct page **bio_pages; 164 165 /* 166 * bitmap to record which horizontal stripe has data 167 */ 168 unsigned long *dbitmap; 169 170 /* allocated with real_stripes-many pointers for finish_*() calls */ 171 void **finish_pointers; 172 173 /* allocated with stripe_npages-many bits for finish_*() calls */ 174 unsigned long *finish_pbitmap; 175 }; 176 177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio); 179 static void rmw_work(struct btrfs_work *work); 180 static void read_rebuild_work(struct btrfs_work *work); 181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); 182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); 183 static void __free_raid_bio(struct btrfs_raid_bio *rbio); 184 static void index_rbio_pages(struct btrfs_raid_bio *rbio); 185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 186 187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 188 int need_check); 189 static void scrub_parity_work(struct btrfs_work *work); 190 191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) 192 { 193 btrfs_init_work(&rbio->work, work_func, NULL, NULL); 194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); 195 } 196 197 /* 198 * the stripe hash table is used for locking, and to collect 199 * bios in hopes of making a full stripe 200 */ 201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 202 { 203 struct btrfs_stripe_hash_table *table; 204 struct btrfs_stripe_hash_table *x; 205 struct btrfs_stripe_hash *cur; 206 struct btrfs_stripe_hash *h; 207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; 208 int i; 209 int table_size; 210 211 if (info->stripe_hash_table) 212 return 0; 213 214 /* 215 * The table is large, starting with order 4 and can go as high as 216 * order 7 in case lock debugging is turned on. 217 * 218 * Try harder to allocate and fallback to vmalloc to lower the chance 219 * of a failing mount. 220 */ 221 table_size = sizeof(*table) + sizeof(*h) * num_entries; 222 table = kvzalloc(table_size, GFP_KERNEL); 223 if (!table) 224 return -ENOMEM; 225 226 spin_lock_init(&table->cache_lock); 227 INIT_LIST_HEAD(&table->stripe_cache); 228 229 h = table->table; 230 231 for (i = 0; i < num_entries; i++) { 232 cur = h + i; 233 INIT_LIST_HEAD(&cur->hash_list); 234 spin_lock_init(&cur->lock); 235 } 236 237 x = cmpxchg(&info->stripe_hash_table, NULL, table); 238 if (x) 239 kvfree(x); 240 return 0; 241 } 242 243 /* 244 * caching an rbio means to copy anything from the 245 * bio_pages array into the stripe_pages array. We 246 * use the page uptodate bit in the stripe cache array 247 * to indicate if it has valid data 248 * 249 * once the caching is done, we set the cache ready 250 * bit. 251 */ 252 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 253 { 254 int i; 255 char *s; 256 char *d; 257 int ret; 258 259 ret = alloc_rbio_pages(rbio); 260 if (ret) 261 return; 262 263 for (i = 0; i < rbio->nr_pages; i++) { 264 if (!rbio->bio_pages[i]) 265 continue; 266 267 s = kmap(rbio->bio_pages[i]); 268 d = kmap(rbio->stripe_pages[i]); 269 270 copy_page(d, s); 271 272 kunmap(rbio->bio_pages[i]); 273 kunmap(rbio->stripe_pages[i]); 274 SetPageUptodate(rbio->stripe_pages[i]); 275 } 276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 277 } 278 279 /* 280 * we hash on the first logical address of the stripe 281 */ 282 static int rbio_bucket(struct btrfs_raid_bio *rbio) 283 { 284 u64 num = rbio->bbio->raid_map[0]; 285 286 /* 287 * we shift down quite a bit. We're using byte 288 * addressing, and most of the lower bits are zeros. 289 * This tends to upset hash_64, and it consistently 290 * returns just one or two different values. 291 * 292 * shifting off the lower bits fixes things. 293 */ 294 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 295 } 296 297 /* 298 * stealing an rbio means taking all the uptodate pages from the stripe 299 * array in the source rbio and putting them into the destination rbio 300 */ 301 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 302 { 303 int i; 304 struct page *s; 305 struct page *d; 306 307 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 308 return; 309 310 for (i = 0; i < dest->nr_pages; i++) { 311 s = src->stripe_pages[i]; 312 if (!s || !PageUptodate(s)) { 313 continue; 314 } 315 316 d = dest->stripe_pages[i]; 317 if (d) 318 __free_page(d); 319 320 dest->stripe_pages[i] = s; 321 src->stripe_pages[i] = NULL; 322 } 323 } 324 325 /* 326 * merging means we take the bio_list from the victim and 327 * splice it into the destination. The victim should 328 * be discarded afterwards. 329 * 330 * must be called with dest->rbio_list_lock held 331 */ 332 static void merge_rbio(struct btrfs_raid_bio *dest, 333 struct btrfs_raid_bio *victim) 334 { 335 bio_list_merge(&dest->bio_list, &victim->bio_list); 336 dest->bio_list_bytes += victim->bio_list_bytes; 337 dest->generic_bio_cnt += victim->generic_bio_cnt; 338 bio_list_init(&victim->bio_list); 339 } 340 341 /* 342 * used to prune items that are in the cache. The caller 343 * must hold the hash table lock. 344 */ 345 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 346 { 347 int bucket = rbio_bucket(rbio); 348 struct btrfs_stripe_hash_table *table; 349 struct btrfs_stripe_hash *h; 350 int freeit = 0; 351 352 /* 353 * check the bit again under the hash table lock. 354 */ 355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 356 return; 357 358 table = rbio->fs_info->stripe_hash_table; 359 h = table->table + bucket; 360 361 /* hold the lock for the bucket because we may be 362 * removing it from the hash table 363 */ 364 spin_lock(&h->lock); 365 366 /* 367 * hold the lock for the bio list because we need 368 * to make sure the bio list is empty 369 */ 370 spin_lock(&rbio->bio_list_lock); 371 372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 373 list_del_init(&rbio->stripe_cache); 374 table->cache_size -= 1; 375 freeit = 1; 376 377 /* if the bio list isn't empty, this rbio is 378 * still involved in an IO. We take it out 379 * of the cache list, and drop the ref that 380 * was held for the list. 381 * 382 * If the bio_list was empty, we also remove 383 * the rbio from the hash_table, and drop 384 * the corresponding ref 385 */ 386 if (bio_list_empty(&rbio->bio_list)) { 387 if (!list_empty(&rbio->hash_list)) { 388 list_del_init(&rbio->hash_list); 389 refcount_dec(&rbio->refs); 390 BUG_ON(!list_empty(&rbio->plug_list)); 391 } 392 } 393 } 394 395 spin_unlock(&rbio->bio_list_lock); 396 spin_unlock(&h->lock); 397 398 if (freeit) 399 __free_raid_bio(rbio); 400 } 401 402 /* 403 * prune a given rbio from the cache 404 */ 405 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 406 { 407 struct btrfs_stripe_hash_table *table; 408 unsigned long flags; 409 410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 411 return; 412 413 table = rbio->fs_info->stripe_hash_table; 414 415 spin_lock_irqsave(&table->cache_lock, flags); 416 __remove_rbio_from_cache(rbio); 417 spin_unlock_irqrestore(&table->cache_lock, flags); 418 } 419 420 /* 421 * remove everything in the cache 422 */ 423 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 424 { 425 struct btrfs_stripe_hash_table *table; 426 unsigned long flags; 427 struct btrfs_raid_bio *rbio; 428 429 table = info->stripe_hash_table; 430 431 spin_lock_irqsave(&table->cache_lock, flags); 432 while (!list_empty(&table->stripe_cache)) { 433 rbio = list_entry(table->stripe_cache.next, 434 struct btrfs_raid_bio, 435 stripe_cache); 436 __remove_rbio_from_cache(rbio); 437 } 438 spin_unlock_irqrestore(&table->cache_lock, flags); 439 } 440 441 /* 442 * remove all cached entries and free the hash table 443 * used by unmount 444 */ 445 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 446 { 447 if (!info->stripe_hash_table) 448 return; 449 btrfs_clear_rbio_cache(info); 450 kvfree(info->stripe_hash_table); 451 info->stripe_hash_table = NULL; 452 } 453 454 /* 455 * insert an rbio into the stripe cache. It 456 * must have already been prepared by calling 457 * cache_rbio_pages 458 * 459 * If this rbio was already cached, it gets 460 * moved to the front of the lru. 461 * 462 * If the size of the rbio cache is too big, we 463 * prune an item. 464 */ 465 static void cache_rbio(struct btrfs_raid_bio *rbio) 466 { 467 struct btrfs_stripe_hash_table *table; 468 unsigned long flags; 469 470 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 471 return; 472 473 table = rbio->fs_info->stripe_hash_table; 474 475 spin_lock_irqsave(&table->cache_lock, flags); 476 spin_lock(&rbio->bio_list_lock); 477 478 /* bump our ref if we were not in the list before */ 479 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 480 refcount_inc(&rbio->refs); 481 482 if (!list_empty(&rbio->stripe_cache)){ 483 list_move(&rbio->stripe_cache, &table->stripe_cache); 484 } else { 485 list_add(&rbio->stripe_cache, &table->stripe_cache); 486 table->cache_size += 1; 487 } 488 489 spin_unlock(&rbio->bio_list_lock); 490 491 if (table->cache_size > RBIO_CACHE_SIZE) { 492 struct btrfs_raid_bio *found; 493 494 found = list_entry(table->stripe_cache.prev, 495 struct btrfs_raid_bio, 496 stripe_cache); 497 498 if (found != rbio) 499 __remove_rbio_from_cache(found); 500 } 501 502 spin_unlock_irqrestore(&table->cache_lock, flags); 503 } 504 505 /* 506 * helper function to run the xor_blocks api. It is only 507 * able to do MAX_XOR_BLOCKS at a time, so we need to 508 * loop through. 509 */ 510 static void run_xor(void **pages, int src_cnt, ssize_t len) 511 { 512 int src_off = 0; 513 int xor_src_cnt = 0; 514 void *dest = pages[src_cnt]; 515 516 while(src_cnt > 0) { 517 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 518 xor_blocks(xor_src_cnt, len, dest, pages + src_off); 519 520 src_cnt -= xor_src_cnt; 521 src_off += xor_src_cnt; 522 } 523 } 524 525 /* 526 * Returns true if the bio list inside this rbio covers an entire stripe (no 527 * rmw required). 528 */ 529 static int rbio_is_full(struct btrfs_raid_bio *rbio) 530 { 531 unsigned long flags; 532 unsigned long size = rbio->bio_list_bytes; 533 int ret = 1; 534 535 spin_lock_irqsave(&rbio->bio_list_lock, flags); 536 if (size != rbio->nr_data * rbio->stripe_len) 537 ret = 0; 538 BUG_ON(size > rbio->nr_data * rbio->stripe_len); 539 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 540 541 return ret; 542 } 543 544 /* 545 * returns 1 if it is safe to merge two rbios together. 546 * The merging is safe if the two rbios correspond to 547 * the same stripe and if they are both going in the same 548 * direction (read vs write), and if neither one is 549 * locked for final IO 550 * 551 * The caller is responsible for locking such that 552 * rmw_locked is safe to test 553 */ 554 static int rbio_can_merge(struct btrfs_raid_bio *last, 555 struct btrfs_raid_bio *cur) 556 { 557 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 558 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 559 return 0; 560 561 /* 562 * we can't merge with cached rbios, since the 563 * idea is that when we merge the destination 564 * rbio is going to run our IO for us. We can 565 * steal from cached rbios though, other functions 566 * handle that. 567 */ 568 if (test_bit(RBIO_CACHE_BIT, &last->flags) || 569 test_bit(RBIO_CACHE_BIT, &cur->flags)) 570 return 0; 571 572 if (last->bbio->raid_map[0] != 573 cur->bbio->raid_map[0]) 574 return 0; 575 576 /* we can't merge with different operations */ 577 if (last->operation != cur->operation) 578 return 0; 579 /* 580 * We've need read the full stripe from the drive. 581 * check and repair the parity and write the new results. 582 * 583 * We're not allowed to add any new bios to the 584 * bio list here, anyone else that wants to 585 * change this stripe needs to do their own rmw. 586 */ 587 if (last->operation == BTRFS_RBIO_PARITY_SCRUB) 588 return 0; 589 590 if (last->operation == BTRFS_RBIO_REBUILD_MISSING) 591 return 0; 592 593 if (last->operation == BTRFS_RBIO_READ_REBUILD) { 594 int fa = last->faila; 595 int fb = last->failb; 596 int cur_fa = cur->faila; 597 int cur_fb = cur->failb; 598 599 if (last->faila >= last->failb) { 600 fa = last->failb; 601 fb = last->faila; 602 } 603 604 if (cur->faila >= cur->failb) { 605 cur_fa = cur->failb; 606 cur_fb = cur->faila; 607 } 608 609 if (fa != cur_fa || fb != cur_fb) 610 return 0; 611 } 612 return 1; 613 } 614 615 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, 616 int index) 617 { 618 return stripe * rbio->stripe_npages + index; 619 } 620 621 /* 622 * these are just the pages from the rbio array, not from anything 623 * the FS sent down to us 624 */ 625 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, 626 int index) 627 { 628 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; 629 } 630 631 /* 632 * helper to index into the pstripe 633 */ 634 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) 635 { 636 return rbio_stripe_page(rbio, rbio->nr_data, index); 637 } 638 639 /* 640 * helper to index into the qstripe, returns null 641 * if there is no qstripe 642 */ 643 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) 644 { 645 if (rbio->nr_data + 1 == rbio->real_stripes) 646 return NULL; 647 return rbio_stripe_page(rbio, rbio->nr_data + 1, index); 648 } 649 650 /* 651 * The first stripe in the table for a logical address 652 * has the lock. rbios are added in one of three ways: 653 * 654 * 1) Nobody has the stripe locked yet. The rbio is given 655 * the lock and 0 is returned. The caller must start the IO 656 * themselves. 657 * 658 * 2) Someone has the stripe locked, but we're able to merge 659 * with the lock owner. The rbio is freed and the IO will 660 * start automatically along with the existing rbio. 1 is returned. 661 * 662 * 3) Someone has the stripe locked, but we're not able to merge. 663 * The rbio is added to the lock owner's plug list, or merged into 664 * an rbio already on the plug list. When the lock owner unlocks, 665 * the next rbio on the list is run and the IO is started automatically. 666 * 1 is returned 667 * 668 * If we return 0, the caller still owns the rbio and must continue with 669 * IO submission. If we return 1, the caller must assume the rbio has 670 * already been freed. 671 */ 672 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 673 { 674 struct btrfs_stripe_hash *h; 675 struct btrfs_raid_bio *cur; 676 struct btrfs_raid_bio *pending; 677 unsigned long flags; 678 struct btrfs_raid_bio *freeit = NULL; 679 struct btrfs_raid_bio *cache_drop = NULL; 680 int ret = 0; 681 682 h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio); 683 684 spin_lock_irqsave(&h->lock, flags); 685 list_for_each_entry(cur, &h->hash_list, hash_list) { 686 if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0]) 687 continue; 688 689 spin_lock(&cur->bio_list_lock); 690 691 /* Can we steal this cached rbio's pages? */ 692 if (bio_list_empty(&cur->bio_list) && 693 list_empty(&cur->plug_list) && 694 test_bit(RBIO_CACHE_BIT, &cur->flags) && 695 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 696 list_del_init(&cur->hash_list); 697 refcount_dec(&cur->refs); 698 699 steal_rbio(cur, rbio); 700 cache_drop = cur; 701 spin_unlock(&cur->bio_list_lock); 702 703 goto lockit; 704 } 705 706 /* Can we merge into the lock owner? */ 707 if (rbio_can_merge(cur, rbio)) { 708 merge_rbio(cur, rbio); 709 spin_unlock(&cur->bio_list_lock); 710 freeit = rbio; 711 ret = 1; 712 goto out; 713 } 714 715 716 /* 717 * We couldn't merge with the running rbio, see if we can merge 718 * with the pending ones. We don't have to check for rmw_locked 719 * because there is no way they are inside finish_rmw right now 720 */ 721 list_for_each_entry(pending, &cur->plug_list, plug_list) { 722 if (rbio_can_merge(pending, rbio)) { 723 merge_rbio(pending, rbio); 724 spin_unlock(&cur->bio_list_lock); 725 freeit = rbio; 726 ret = 1; 727 goto out; 728 } 729 } 730 731 /* 732 * No merging, put us on the tail of the plug list, our rbio 733 * will be started with the currently running rbio unlocks 734 */ 735 list_add_tail(&rbio->plug_list, &cur->plug_list); 736 spin_unlock(&cur->bio_list_lock); 737 ret = 1; 738 goto out; 739 } 740 lockit: 741 refcount_inc(&rbio->refs); 742 list_add(&rbio->hash_list, &h->hash_list); 743 out: 744 spin_unlock_irqrestore(&h->lock, flags); 745 if (cache_drop) 746 remove_rbio_from_cache(cache_drop); 747 if (freeit) 748 __free_raid_bio(freeit); 749 return ret; 750 } 751 752 /* 753 * called as rmw or parity rebuild is completed. If the plug list has more 754 * rbios waiting for this stripe, the next one on the list will be started 755 */ 756 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 757 { 758 int bucket; 759 struct btrfs_stripe_hash *h; 760 unsigned long flags; 761 int keep_cache = 0; 762 763 bucket = rbio_bucket(rbio); 764 h = rbio->fs_info->stripe_hash_table->table + bucket; 765 766 if (list_empty(&rbio->plug_list)) 767 cache_rbio(rbio); 768 769 spin_lock_irqsave(&h->lock, flags); 770 spin_lock(&rbio->bio_list_lock); 771 772 if (!list_empty(&rbio->hash_list)) { 773 /* 774 * if we're still cached and there is no other IO 775 * to perform, just leave this rbio here for others 776 * to steal from later 777 */ 778 if (list_empty(&rbio->plug_list) && 779 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 780 keep_cache = 1; 781 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 782 BUG_ON(!bio_list_empty(&rbio->bio_list)); 783 goto done; 784 } 785 786 list_del_init(&rbio->hash_list); 787 refcount_dec(&rbio->refs); 788 789 /* 790 * we use the plug list to hold all the rbios 791 * waiting for the chance to lock this stripe. 792 * hand the lock over to one of them. 793 */ 794 if (!list_empty(&rbio->plug_list)) { 795 struct btrfs_raid_bio *next; 796 struct list_head *head = rbio->plug_list.next; 797 798 next = list_entry(head, struct btrfs_raid_bio, 799 plug_list); 800 801 list_del_init(&rbio->plug_list); 802 803 list_add(&next->hash_list, &h->hash_list); 804 refcount_inc(&next->refs); 805 spin_unlock(&rbio->bio_list_lock); 806 spin_unlock_irqrestore(&h->lock, flags); 807 808 if (next->operation == BTRFS_RBIO_READ_REBUILD) 809 start_async_work(next, read_rebuild_work); 810 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) { 811 steal_rbio(rbio, next); 812 start_async_work(next, read_rebuild_work); 813 } else if (next->operation == BTRFS_RBIO_WRITE) { 814 steal_rbio(rbio, next); 815 start_async_work(next, rmw_work); 816 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { 817 steal_rbio(rbio, next); 818 start_async_work(next, scrub_parity_work); 819 } 820 821 goto done_nolock; 822 } 823 } 824 done: 825 spin_unlock(&rbio->bio_list_lock); 826 spin_unlock_irqrestore(&h->lock, flags); 827 828 done_nolock: 829 if (!keep_cache) 830 remove_rbio_from_cache(rbio); 831 } 832 833 static void __free_raid_bio(struct btrfs_raid_bio *rbio) 834 { 835 int i; 836 837 if (!refcount_dec_and_test(&rbio->refs)) 838 return; 839 840 WARN_ON(!list_empty(&rbio->stripe_cache)); 841 WARN_ON(!list_empty(&rbio->hash_list)); 842 WARN_ON(!bio_list_empty(&rbio->bio_list)); 843 844 for (i = 0; i < rbio->nr_pages; i++) { 845 if (rbio->stripe_pages[i]) { 846 __free_page(rbio->stripe_pages[i]); 847 rbio->stripe_pages[i] = NULL; 848 } 849 } 850 851 btrfs_put_bbio(rbio->bbio); 852 kfree(rbio); 853 } 854 855 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err) 856 { 857 struct bio *next; 858 859 while (cur) { 860 next = cur->bi_next; 861 cur->bi_next = NULL; 862 cur->bi_status = err; 863 bio_endio(cur); 864 cur = next; 865 } 866 } 867 868 /* 869 * this frees the rbio and runs through all the bios in the 870 * bio_list and calls end_io on them 871 */ 872 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) 873 { 874 struct bio *cur = bio_list_get(&rbio->bio_list); 875 struct bio *extra; 876 877 if (rbio->generic_bio_cnt) 878 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); 879 880 /* 881 * At this moment, rbio->bio_list is empty, however since rbio does not 882 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the 883 * hash list, rbio may be merged with others so that rbio->bio_list 884 * becomes non-empty. 885 * Once unlock_stripe() is done, rbio->bio_list will not be updated any 886 * more and we can call bio_endio() on all queued bios. 887 */ 888 unlock_stripe(rbio); 889 extra = bio_list_get(&rbio->bio_list); 890 __free_raid_bio(rbio); 891 892 rbio_endio_bio_list(cur, err); 893 if (extra) 894 rbio_endio_bio_list(extra, err); 895 } 896 897 /* 898 * end io function used by finish_rmw. When we finally 899 * get here, we've written a full stripe 900 */ 901 static void raid_write_end_io(struct bio *bio) 902 { 903 struct btrfs_raid_bio *rbio = bio->bi_private; 904 blk_status_t err = bio->bi_status; 905 int max_errors; 906 907 if (err) 908 fail_bio_stripe(rbio, bio); 909 910 bio_put(bio); 911 912 if (!atomic_dec_and_test(&rbio->stripes_pending)) 913 return; 914 915 err = BLK_STS_OK; 916 917 /* OK, we have read all the stripes we need to. */ 918 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? 919 0 : rbio->bbio->max_errors; 920 if (atomic_read(&rbio->error) > max_errors) 921 err = BLK_STS_IOERR; 922 923 rbio_orig_end_io(rbio, err); 924 } 925 926 /* 927 * the read/modify/write code wants to use the original bio for 928 * any pages it included, and then use the rbio for everything 929 * else. This function decides if a given index (stripe number) 930 * and page number in that stripe fall inside the original bio 931 * or the rbio. 932 * 933 * if you set bio_list_only, you'll get a NULL back for any ranges 934 * that are outside the bio_list 935 * 936 * This doesn't take any refs on anything, you get a bare page pointer 937 * and the caller must bump refs as required. 938 * 939 * You must call index_rbio_pages once before you can trust 940 * the answers from this function. 941 */ 942 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, 943 int index, int pagenr, int bio_list_only) 944 { 945 int chunk_page; 946 struct page *p = NULL; 947 948 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; 949 950 spin_lock_irq(&rbio->bio_list_lock); 951 p = rbio->bio_pages[chunk_page]; 952 spin_unlock_irq(&rbio->bio_list_lock); 953 954 if (p || bio_list_only) 955 return p; 956 957 return rbio->stripe_pages[chunk_page]; 958 } 959 960 /* 961 * number of pages we need for the entire stripe across all the 962 * drives 963 */ 964 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) 965 { 966 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes; 967 } 968 969 /* 970 * allocation and initial setup for the btrfs_raid_bio. Not 971 * this does not allocate any pages for rbio->pages. 972 */ 973 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, 974 struct btrfs_bio *bbio, 975 u64 stripe_len) 976 { 977 struct btrfs_raid_bio *rbio; 978 int nr_data = 0; 979 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; 980 int num_pages = rbio_nr_pages(stripe_len, real_stripes); 981 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); 982 void *p; 983 984 rbio = kzalloc(sizeof(*rbio) + 985 sizeof(*rbio->stripe_pages) * num_pages + 986 sizeof(*rbio->bio_pages) * num_pages + 987 sizeof(*rbio->finish_pointers) * real_stripes + 988 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) + 989 sizeof(*rbio->finish_pbitmap) * 990 BITS_TO_LONGS(stripe_npages), 991 GFP_NOFS); 992 if (!rbio) 993 return ERR_PTR(-ENOMEM); 994 995 bio_list_init(&rbio->bio_list); 996 INIT_LIST_HEAD(&rbio->plug_list); 997 spin_lock_init(&rbio->bio_list_lock); 998 INIT_LIST_HEAD(&rbio->stripe_cache); 999 INIT_LIST_HEAD(&rbio->hash_list); 1000 rbio->bbio = bbio; 1001 rbio->fs_info = fs_info; 1002 rbio->stripe_len = stripe_len; 1003 rbio->nr_pages = num_pages; 1004 rbio->real_stripes = real_stripes; 1005 rbio->stripe_npages = stripe_npages; 1006 rbio->faila = -1; 1007 rbio->failb = -1; 1008 refcount_set(&rbio->refs, 1); 1009 atomic_set(&rbio->error, 0); 1010 atomic_set(&rbio->stripes_pending, 0); 1011 1012 /* 1013 * the stripe_pages, bio_pages, etc arrays point to the extra 1014 * memory we allocated past the end of the rbio 1015 */ 1016 p = rbio + 1; 1017 #define CONSUME_ALLOC(ptr, count) do { \ 1018 ptr = p; \ 1019 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \ 1020 } while (0) 1021 CONSUME_ALLOC(rbio->stripe_pages, num_pages); 1022 CONSUME_ALLOC(rbio->bio_pages, num_pages); 1023 CONSUME_ALLOC(rbio->finish_pointers, real_stripes); 1024 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages)); 1025 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages)); 1026 #undef CONSUME_ALLOC 1027 1028 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) 1029 nr_data = real_stripes - 1; 1030 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) 1031 nr_data = real_stripes - 2; 1032 else 1033 BUG(); 1034 1035 rbio->nr_data = nr_data; 1036 return rbio; 1037 } 1038 1039 /* allocate pages for all the stripes in the bio, including parity */ 1040 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 1041 { 1042 int i; 1043 struct page *page; 1044 1045 for (i = 0; i < rbio->nr_pages; i++) { 1046 if (rbio->stripe_pages[i]) 1047 continue; 1048 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1049 if (!page) 1050 return -ENOMEM; 1051 rbio->stripe_pages[i] = page; 1052 } 1053 return 0; 1054 } 1055 1056 /* only allocate pages for p/q stripes */ 1057 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 1058 { 1059 int i; 1060 struct page *page; 1061 1062 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); 1063 1064 for (; i < rbio->nr_pages; i++) { 1065 if (rbio->stripe_pages[i]) 1066 continue; 1067 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1068 if (!page) 1069 return -ENOMEM; 1070 rbio->stripe_pages[i] = page; 1071 } 1072 return 0; 1073 } 1074 1075 /* 1076 * add a single page from a specific stripe into our list of bios for IO 1077 * this will try to merge into existing bios if possible, and returns 1078 * zero if all went well. 1079 */ 1080 static int rbio_add_io_page(struct btrfs_raid_bio *rbio, 1081 struct bio_list *bio_list, 1082 struct page *page, 1083 int stripe_nr, 1084 unsigned long page_index, 1085 unsigned long bio_max_len) 1086 { 1087 struct bio *last = bio_list->tail; 1088 u64 last_end = 0; 1089 int ret; 1090 struct bio *bio; 1091 struct btrfs_bio_stripe *stripe; 1092 u64 disk_start; 1093 1094 stripe = &rbio->bbio->stripes[stripe_nr]; 1095 disk_start = stripe->physical + (page_index << PAGE_SHIFT); 1096 1097 /* if the device is missing, just fail this stripe */ 1098 if (!stripe->dev->bdev) 1099 return fail_rbio_index(rbio, stripe_nr); 1100 1101 /* see if we can add this page onto our existing bio */ 1102 if (last) { 1103 last_end = (u64)last->bi_iter.bi_sector << 9; 1104 last_end += last->bi_iter.bi_size; 1105 1106 /* 1107 * we can't merge these if they are from different 1108 * devices or if they are not contiguous 1109 */ 1110 if (last_end == disk_start && stripe->dev->bdev && 1111 !last->bi_status && 1112 last->bi_disk == stripe->dev->bdev->bd_disk && 1113 last->bi_partno == stripe->dev->bdev->bd_partno) { 1114 ret = bio_add_page(last, page, PAGE_SIZE, 0); 1115 if (ret == PAGE_SIZE) 1116 return 0; 1117 } 1118 } 1119 1120 /* put a new bio on the list */ 1121 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1); 1122 bio->bi_iter.bi_size = 0; 1123 bio_set_dev(bio, stripe->dev->bdev); 1124 bio->bi_iter.bi_sector = disk_start >> 9; 1125 1126 bio_add_page(bio, page, PAGE_SIZE, 0); 1127 bio_list_add(bio_list, bio); 1128 return 0; 1129 } 1130 1131 /* 1132 * while we're doing the read/modify/write cycle, we could 1133 * have errors in reading pages off the disk. This checks 1134 * for errors and if we're not able to read the page it'll 1135 * trigger parity reconstruction. The rmw will be finished 1136 * after we've reconstructed the failed stripes 1137 */ 1138 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 1139 { 1140 if (rbio->faila >= 0 || rbio->failb >= 0) { 1141 BUG_ON(rbio->faila == rbio->real_stripes - 1); 1142 __raid56_parity_recover(rbio); 1143 } else { 1144 finish_rmw(rbio); 1145 } 1146 } 1147 1148 /* 1149 * helper function to walk our bio list and populate the bio_pages array with 1150 * the result. This seems expensive, but it is faster than constantly 1151 * searching through the bio list as we setup the IO in finish_rmw or stripe 1152 * reconstruction. 1153 * 1154 * This must be called before you trust the answers from page_in_rbio 1155 */ 1156 static void index_rbio_pages(struct btrfs_raid_bio *rbio) 1157 { 1158 struct bio *bio; 1159 u64 start; 1160 unsigned long stripe_offset; 1161 unsigned long page_index; 1162 1163 spin_lock_irq(&rbio->bio_list_lock); 1164 bio_list_for_each(bio, &rbio->bio_list) { 1165 struct bio_vec bvec; 1166 struct bvec_iter iter; 1167 int i = 0; 1168 1169 start = (u64)bio->bi_iter.bi_sector << 9; 1170 stripe_offset = start - rbio->bbio->raid_map[0]; 1171 page_index = stripe_offset >> PAGE_SHIFT; 1172 1173 if (bio_flagged(bio, BIO_CLONED)) 1174 bio->bi_iter = btrfs_io_bio(bio)->iter; 1175 1176 bio_for_each_segment(bvec, bio, iter) { 1177 rbio->bio_pages[page_index + i] = bvec.bv_page; 1178 i++; 1179 } 1180 } 1181 spin_unlock_irq(&rbio->bio_list_lock); 1182 } 1183 1184 /* 1185 * this is called from one of two situations. We either 1186 * have a full stripe from the higher layers, or we've read all 1187 * the missing bits off disk. 1188 * 1189 * This will calculate the parity and then send down any 1190 * changed blocks. 1191 */ 1192 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 1193 { 1194 struct btrfs_bio *bbio = rbio->bbio; 1195 void **pointers = rbio->finish_pointers; 1196 int nr_data = rbio->nr_data; 1197 int stripe; 1198 int pagenr; 1199 int p_stripe = -1; 1200 int q_stripe = -1; 1201 struct bio_list bio_list; 1202 struct bio *bio; 1203 int ret; 1204 1205 bio_list_init(&bio_list); 1206 1207 if (rbio->real_stripes - rbio->nr_data == 1) { 1208 p_stripe = rbio->real_stripes - 1; 1209 } else if (rbio->real_stripes - rbio->nr_data == 2) { 1210 p_stripe = rbio->real_stripes - 2; 1211 q_stripe = rbio->real_stripes - 1; 1212 } else { 1213 BUG(); 1214 } 1215 1216 /* at this point we either have a full stripe, 1217 * or we've read the full stripe from the drive. 1218 * recalculate the parity and write the new results. 1219 * 1220 * We're not allowed to add any new bios to the 1221 * bio list here, anyone else that wants to 1222 * change this stripe needs to do their own rmw. 1223 */ 1224 spin_lock_irq(&rbio->bio_list_lock); 1225 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 1226 spin_unlock_irq(&rbio->bio_list_lock); 1227 1228 atomic_set(&rbio->error, 0); 1229 1230 /* 1231 * now that we've set rmw_locked, run through the 1232 * bio list one last time and map the page pointers 1233 * 1234 * We don't cache full rbios because we're assuming 1235 * the higher layers are unlikely to use this area of 1236 * the disk again soon. If they do use it again, 1237 * hopefully they will send another full bio. 1238 */ 1239 index_rbio_pages(rbio); 1240 if (!rbio_is_full(rbio)) 1241 cache_rbio_pages(rbio); 1242 else 1243 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 1244 1245 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1246 struct page *p; 1247 /* first collect one page from each data stripe */ 1248 for (stripe = 0; stripe < nr_data; stripe++) { 1249 p = page_in_rbio(rbio, stripe, pagenr, 0); 1250 pointers[stripe] = kmap(p); 1251 } 1252 1253 /* then add the parity stripe */ 1254 p = rbio_pstripe_page(rbio, pagenr); 1255 SetPageUptodate(p); 1256 pointers[stripe++] = kmap(p); 1257 1258 if (q_stripe != -1) { 1259 1260 /* 1261 * raid6, add the qstripe and call the 1262 * library function to fill in our p/q 1263 */ 1264 p = rbio_qstripe_page(rbio, pagenr); 1265 SetPageUptodate(p); 1266 pointers[stripe++] = kmap(p); 1267 1268 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, 1269 pointers); 1270 } else { 1271 /* raid5 */ 1272 copy_page(pointers[nr_data], pointers[0]); 1273 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); 1274 } 1275 1276 1277 for (stripe = 0; stripe < rbio->real_stripes; stripe++) 1278 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 1279 } 1280 1281 /* 1282 * time to start writing. Make bios for everything from the 1283 * higher layers (the bio_list in our rbio) and our p/q. Ignore 1284 * everything else. 1285 */ 1286 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1287 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1288 struct page *page; 1289 if (stripe < rbio->nr_data) { 1290 page = page_in_rbio(rbio, stripe, pagenr, 1); 1291 if (!page) 1292 continue; 1293 } else { 1294 page = rbio_stripe_page(rbio, stripe, pagenr); 1295 } 1296 1297 ret = rbio_add_io_page(rbio, &bio_list, 1298 page, stripe, pagenr, rbio->stripe_len); 1299 if (ret) 1300 goto cleanup; 1301 } 1302 } 1303 1304 if (likely(!bbio->num_tgtdevs)) 1305 goto write_data; 1306 1307 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1308 if (!bbio->tgtdev_map[stripe]) 1309 continue; 1310 1311 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1312 struct page *page; 1313 if (stripe < rbio->nr_data) { 1314 page = page_in_rbio(rbio, stripe, pagenr, 1); 1315 if (!page) 1316 continue; 1317 } else { 1318 page = rbio_stripe_page(rbio, stripe, pagenr); 1319 } 1320 1321 ret = rbio_add_io_page(rbio, &bio_list, page, 1322 rbio->bbio->tgtdev_map[stripe], 1323 pagenr, rbio->stripe_len); 1324 if (ret) 1325 goto cleanup; 1326 } 1327 } 1328 1329 write_data: 1330 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); 1331 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); 1332 1333 while (1) { 1334 bio = bio_list_pop(&bio_list); 1335 if (!bio) 1336 break; 1337 1338 bio->bi_private = rbio; 1339 bio->bi_end_io = raid_write_end_io; 1340 bio->bi_opf = REQ_OP_WRITE; 1341 1342 submit_bio(bio); 1343 } 1344 return; 1345 1346 cleanup: 1347 rbio_orig_end_io(rbio, BLK_STS_IOERR); 1348 1349 while ((bio = bio_list_pop(&bio_list))) 1350 bio_put(bio); 1351 } 1352 1353 /* 1354 * helper to find the stripe number for a given bio. Used to figure out which 1355 * stripe has failed. This expects the bio to correspond to a physical disk, 1356 * so it looks up based on physical sector numbers. 1357 */ 1358 static int find_bio_stripe(struct btrfs_raid_bio *rbio, 1359 struct bio *bio) 1360 { 1361 u64 physical = bio->bi_iter.bi_sector; 1362 u64 stripe_start; 1363 int i; 1364 struct btrfs_bio_stripe *stripe; 1365 1366 physical <<= 9; 1367 1368 for (i = 0; i < rbio->bbio->num_stripes; i++) { 1369 stripe = &rbio->bbio->stripes[i]; 1370 stripe_start = stripe->physical; 1371 if (physical >= stripe_start && 1372 physical < stripe_start + rbio->stripe_len && 1373 stripe->dev->bdev && 1374 bio->bi_disk == stripe->dev->bdev->bd_disk && 1375 bio->bi_partno == stripe->dev->bdev->bd_partno) { 1376 return i; 1377 } 1378 } 1379 return -1; 1380 } 1381 1382 /* 1383 * helper to find the stripe number for a given 1384 * bio (before mapping). Used to figure out which stripe has 1385 * failed. This looks up based on logical block numbers. 1386 */ 1387 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 1388 struct bio *bio) 1389 { 1390 u64 logical = bio->bi_iter.bi_sector; 1391 u64 stripe_start; 1392 int i; 1393 1394 logical <<= 9; 1395 1396 for (i = 0; i < rbio->nr_data; i++) { 1397 stripe_start = rbio->bbio->raid_map[i]; 1398 if (logical >= stripe_start && 1399 logical < stripe_start + rbio->stripe_len) { 1400 return i; 1401 } 1402 } 1403 return -1; 1404 } 1405 1406 /* 1407 * returns -EIO if we had too many failures 1408 */ 1409 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) 1410 { 1411 unsigned long flags; 1412 int ret = 0; 1413 1414 spin_lock_irqsave(&rbio->bio_list_lock, flags); 1415 1416 /* we already know this stripe is bad, move on */ 1417 if (rbio->faila == failed || rbio->failb == failed) 1418 goto out; 1419 1420 if (rbio->faila == -1) { 1421 /* first failure on this rbio */ 1422 rbio->faila = failed; 1423 atomic_inc(&rbio->error); 1424 } else if (rbio->failb == -1) { 1425 /* second failure on this rbio */ 1426 rbio->failb = failed; 1427 atomic_inc(&rbio->error); 1428 } else { 1429 ret = -EIO; 1430 } 1431 out: 1432 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 1433 1434 return ret; 1435 } 1436 1437 /* 1438 * helper to fail a stripe based on a physical disk 1439 * bio. 1440 */ 1441 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, 1442 struct bio *bio) 1443 { 1444 int failed = find_bio_stripe(rbio, bio); 1445 1446 if (failed < 0) 1447 return -EIO; 1448 1449 return fail_rbio_index(rbio, failed); 1450 } 1451 1452 /* 1453 * this sets each page in the bio uptodate. It should only be used on private 1454 * rbio pages, nothing that comes in from the higher layers 1455 */ 1456 static void set_bio_pages_uptodate(struct bio *bio) 1457 { 1458 struct bio_vec *bvec; 1459 struct bvec_iter_all iter_all; 1460 1461 ASSERT(!bio_flagged(bio, BIO_CLONED)); 1462 1463 bio_for_each_segment_all(bvec, bio, iter_all) 1464 SetPageUptodate(bvec->bv_page); 1465 } 1466 1467 /* 1468 * end io for the read phase of the rmw cycle. All the bios here are physical 1469 * stripe bios we've read from the disk so we can recalculate the parity of the 1470 * stripe. 1471 * 1472 * This will usually kick off finish_rmw once all the bios are read in, but it 1473 * may trigger parity reconstruction if we had any errors along the way 1474 */ 1475 static void raid_rmw_end_io(struct bio *bio) 1476 { 1477 struct btrfs_raid_bio *rbio = bio->bi_private; 1478 1479 if (bio->bi_status) 1480 fail_bio_stripe(rbio, bio); 1481 else 1482 set_bio_pages_uptodate(bio); 1483 1484 bio_put(bio); 1485 1486 if (!atomic_dec_and_test(&rbio->stripes_pending)) 1487 return; 1488 1489 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 1490 goto cleanup; 1491 1492 /* 1493 * this will normally call finish_rmw to start our write 1494 * but if there are any failed stripes we'll reconstruct 1495 * from parity first 1496 */ 1497 validate_rbio_for_rmw(rbio); 1498 return; 1499 1500 cleanup: 1501 1502 rbio_orig_end_io(rbio, BLK_STS_IOERR); 1503 } 1504 1505 /* 1506 * the stripe must be locked by the caller. It will 1507 * unlock after all the writes are done 1508 */ 1509 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 1510 { 1511 int bios_to_read = 0; 1512 struct bio_list bio_list; 1513 int ret; 1514 int pagenr; 1515 int stripe; 1516 struct bio *bio; 1517 1518 bio_list_init(&bio_list); 1519 1520 ret = alloc_rbio_pages(rbio); 1521 if (ret) 1522 goto cleanup; 1523 1524 index_rbio_pages(rbio); 1525 1526 atomic_set(&rbio->error, 0); 1527 /* 1528 * build a list of bios to read all the missing parts of this 1529 * stripe 1530 */ 1531 for (stripe = 0; stripe < rbio->nr_data; stripe++) { 1532 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1533 struct page *page; 1534 /* 1535 * we want to find all the pages missing from 1536 * the rbio and read them from the disk. If 1537 * page_in_rbio finds a page in the bio list 1538 * we don't need to read it off the stripe. 1539 */ 1540 page = page_in_rbio(rbio, stripe, pagenr, 1); 1541 if (page) 1542 continue; 1543 1544 page = rbio_stripe_page(rbio, stripe, pagenr); 1545 /* 1546 * the bio cache may have handed us an uptodate 1547 * page. If so, be happy and use it 1548 */ 1549 if (PageUptodate(page)) 1550 continue; 1551 1552 ret = rbio_add_io_page(rbio, &bio_list, page, 1553 stripe, pagenr, rbio->stripe_len); 1554 if (ret) 1555 goto cleanup; 1556 } 1557 } 1558 1559 bios_to_read = bio_list_size(&bio_list); 1560 if (!bios_to_read) { 1561 /* 1562 * this can happen if others have merged with 1563 * us, it means there is nothing left to read. 1564 * But if there are missing devices it may not be 1565 * safe to do the full stripe write yet. 1566 */ 1567 goto finish; 1568 } 1569 1570 /* 1571 * the bbio may be freed once we submit the last bio. Make sure 1572 * not to touch it after that 1573 */ 1574 atomic_set(&rbio->stripes_pending, bios_to_read); 1575 while (1) { 1576 bio = bio_list_pop(&bio_list); 1577 if (!bio) 1578 break; 1579 1580 bio->bi_private = rbio; 1581 bio->bi_end_io = raid_rmw_end_io; 1582 bio->bi_opf = REQ_OP_READ; 1583 1584 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 1585 1586 submit_bio(bio); 1587 } 1588 /* the actual write will happen once the reads are done */ 1589 return 0; 1590 1591 cleanup: 1592 rbio_orig_end_io(rbio, BLK_STS_IOERR); 1593 1594 while ((bio = bio_list_pop(&bio_list))) 1595 bio_put(bio); 1596 1597 return -EIO; 1598 1599 finish: 1600 validate_rbio_for_rmw(rbio); 1601 return 0; 1602 } 1603 1604 /* 1605 * if the upper layers pass in a full stripe, we thank them by only allocating 1606 * enough pages to hold the parity, and sending it all down quickly. 1607 */ 1608 static int full_stripe_write(struct btrfs_raid_bio *rbio) 1609 { 1610 int ret; 1611 1612 ret = alloc_rbio_parity_pages(rbio); 1613 if (ret) { 1614 __free_raid_bio(rbio); 1615 return ret; 1616 } 1617 1618 ret = lock_stripe_add(rbio); 1619 if (ret == 0) 1620 finish_rmw(rbio); 1621 return 0; 1622 } 1623 1624 /* 1625 * partial stripe writes get handed over to async helpers. 1626 * We're really hoping to merge a few more writes into this 1627 * rbio before calculating new parity 1628 */ 1629 static int partial_stripe_write(struct btrfs_raid_bio *rbio) 1630 { 1631 int ret; 1632 1633 ret = lock_stripe_add(rbio); 1634 if (ret == 0) 1635 start_async_work(rbio, rmw_work); 1636 return 0; 1637 } 1638 1639 /* 1640 * sometimes while we were reading from the drive to 1641 * recalculate parity, enough new bios come into create 1642 * a full stripe. So we do a check here to see if we can 1643 * go directly to finish_rmw 1644 */ 1645 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) 1646 { 1647 /* head off into rmw land if we don't have a full stripe */ 1648 if (!rbio_is_full(rbio)) 1649 return partial_stripe_write(rbio); 1650 return full_stripe_write(rbio); 1651 } 1652 1653 /* 1654 * We use plugging call backs to collect full stripes. 1655 * Any time we get a partial stripe write while plugged 1656 * we collect it into a list. When the unplug comes down, 1657 * we sort the list by logical block number and merge 1658 * everything we can into the same rbios 1659 */ 1660 struct btrfs_plug_cb { 1661 struct blk_plug_cb cb; 1662 struct btrfs_fs_info *info; 1663 struct list_head rbio_list; 1664 struct btrfs_work work; 1665 }; 1666 1667 /* 1668 * rbios on the plug list are sorted for easier merging. 1669 */ 1670 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) 1671 { 1672 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 1673 plug_list); 1674 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 1675 plug_list); 1676 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 1677 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 1678 1679 if (a_sector < b_sector) 1680 return -1; 1681 if (a_sector > b_sector) 1682 return 1; 1683 return 0; 1684 } 1685 1686 static void run_plug(struct btrfs_plug_cb *plug) 1687 { 1688 struct btrfs_raid_bio *cur; 1689 struct btrfs_raid_bio *last = NULL; 1690 1691 /* 1692 * sort our plug list then try to merge 1693 * everything we can in hopes of creating full 1694 * stripes. 1695 */ 1696 list_sort(NULL, &plug->rbio_list, plug_cmp); 1697 while (!list_empty(&plug->rbio_list)) { 1698 cur = list_entry(plug->rbio_list.next, 1699 struct btrfs_raid_bio, plug_list); 1700 list_del_init(&cur->plug_list); 1701 1702 if (rbio_is_full(cur)) { 1703 int ret; 1704 1705 /* we have a full stripe, send it down */ 1706 ret = full_stripe_write(cur); 1707 BUG_ON(ret); 1708 continue; 1709 } 1710 if (last) { 1711 if (rbio_can_merge(last, cur)) { 1712 merge_rbio(last, cur); 1713 __free_raid_bio(cur); 1714 continue; 1715 1716 } 1717 __raid56_parity_write(last); 1718 } 1719 last = cur; 1720 } 1721 if (last) { 1722 __raid56_parity_write(last); 1723 } 1724 kfree(plug); 1725 } 1726 1727 /* 1728 * if the unplug comes from schedule, we have to push the 1729 * work off to a helper thread 1730 */ 1731 static void unplug_work(struct btrfs_work *work) 1732 { 1733 struct btrfs_plug_cb *plug; 1734 plug = container_of(work, struct btrfs_plug_cb, work); 1735 run_plug(plug); 1736 } 1737 1738 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 1739 { 1740 struct btrfs_plug_cb *plug; 1741 plug = container_of(cb, struct btrfs_plug_cb, cb); 1742 1743 if (from_schedule) { 1744 btrfs_init_work(&plug->work, unplug_work, NULL, NULL); 1745 btrfs_queue_work(plug->info->rmw_workers, 1746 &plug->work); 1747 return; 1748 } 1749 run_plug(plug); 1750 } 1751 1752 /* 1753 * our main entry point for writes from the rest of the FS. 1754 */ 1755 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio, 1756 struct btrfs_bio *bbio, u64 stripe_len) 1757 { 1758 struct btrfs_raid_bio *rbio; 1759 struct btrfs_plug_cb *plug = NULL; 1760 struct blk_plug_cb *cb; 1761 int ret; 1762 1763 rbio = alloc_rbio(fs_info, bbio, stripe_len); 1764 if (IS_ERR(rbio)) { 1765 btrfs_put_bbio(bbio); 1766 return PTR_ERR(rbio); 1767 } 1768 bio_list_add(&rbio->bio_list, bio); 1769 rbio->bio_list_bytes = bio->bi_iter.bi_size; 1770 rbio->operation = BTRFS_RBIO_WRITE; 1771 1772 btrfs_bio_counter_inc_noblocked(fs_info); 1773 rbio->generic_bio_cnt = 1; 1774 1775 /* 1776 * don't plug on full rbios, just get them out the door 1777 * as quickly as we can 1778 */ 1779 if (rbio_is_full(rbio)) { 1780 ret = full_stripe_write(rbio); 1781 if (ret) 1782 btrfs_bio_counter_dec(fs_info); 1783 return ret; 1784 } 1785 1786 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug)); 1787 if (cb) { 1788 plug = container_of(cb, struct btrfs_plug_cb, cb); 1789 if (!plug->info) { 1790 plug->info = fs_info; 1791 INIT_LIST_HEAD(&plug->rbio_list); 1792 } 1793 list_add_tail(&rbio->plug_list, &plug->rbio_list); 1794 ret = 0; 1795 } else { 1796 ret = __raid56_parity_write(rbio); 1797 if (ret) 1798 btrfs_bio_counter_dec(fs_info); 1799 } 1800 return ret; 1801 } 1802 1803 /* 1804 * all parity reconstruction happens here. We've read in everything 1805 * we can find from the drives and this does the heavy lifting of 1806 * sorting the good from the bad. 1807 */ 1808 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) 1809 { 1810 int pagenr, stripe; 1811 void **pointers; 1812 int faila = -1, failb = -1; 1813 struct page *page; 1814 blk_status_t err; 1815 int i; 1816 1817 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 1818 if (!pointers) { 1819 err = BLK_STS_RESOURCE; 1820 goto cleanup_io; 1821 } 1822 1823 faila = rbio->faila; 1824 failb = rbio->failb; 1825 1826 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 1827 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 1828 spin_lock_irq(&rbio->bio_list_lock); 1829 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 1830 spin_unlock_irq(&rbio->bio_list_lock); 1831 } 1832 1833 index_rbio_pages(rbio); 1834 1835 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1836 /* 1837 * Now we just use bitmap to mark the horizontal stripes in 1838 * which we have data when doing parity scrub. 1839 */ 1840 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && 1841 !test_bit(pagenr, rbio->dbitmap)) 1842 continue; 1843 1844 /* setup our array of pointers with pages 1845 * from each stripe 1846 */ 1847 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1848 /* 1849 * if we're rebuilding a read, we have to use 1850 * pages from the bio list 1851 */ 1852 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || 1853 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && 1854 (stripe == faila || stripe == failb)) { 1855 page = page_in_rbio(rbio, stripe, pagenr, 0); 1856 } else { 1857 page = rbio_stripe_page(rbio, stripe, pagenr); 1858 } 1859 pointers[stripe] = kmap(page); 1860 } 1861 1862 /* all raid6 handling here */ 1863 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { 1864 /* 1865 * single failure, rebuild from parity raid5 1866 * style 1867 */ 1868 if (failb < 0) { 1869 if (faila == rbio->nr_data) { 1870 /* 1871 * Just the P stripe has failed, without 1872 * a bad data or Q stripe. 1873 * TODO, we should redo the xor here. 1874 */ 1875 err = BLK_STS_IOERR; 1876 goto cleanup; 1877 } 1878 /* 1879 * a single failure in raid6 is rebuilt 1880 * in the pstripe code below 1881 */ 1882 goto pstripe; 1883 } 1884 1885 /* make sure our ps and qs are in order */ 1886 if (faila > failb) { 1887 int tmp = failb; 1888 failb = faila; 1889 faila = tmp; 1890 } 1891 1892 /* if the q stripe is failed, do a pstripe reconstruction 1893 * from the xors. 1894 * If both the q stripe and the P stripe are failed, we're 1895 * here due to a crc mismatch and we can't give them the 1896 * data they want 1897 */ 1898 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { 1899 if (rbio->bbio->raid_map[faila] == 1900 RAID5_P_STRIPE) { 1901 err = BLK_STS_IOERR; 1902 goto cleanup; 1903 } 1904 /* 1905 * otherwise we have one bad data stripe and 1906 * a good P stripe. raid5! 1907 */ 1908 goto pstripe; 1909 } 1910 1911 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { 1912 raid6_datap_recov(rbio->real_stripes, 1913 PAGE_SIZE, faila, pointers); 1914 } else { 1915 raid6_2data_recov(rbio->real_stripes, 1916 PAGE_SIZE, faila, failb, 1917 pointers); 1918 } 1919 } else { 1920 void *p; 1921 1922 /* rebuild from P stripe here (raid5 or raid6) */ 1923 BUG_ON(failb != -1); 1924 pstripe: 1925 /* Copy parity block into failed block to start with */ 1926 copy_page(pointers[faila], pointers[rbio->nr_data]); 1927 1928 /* rearrange the pointer array */ 1929 p = pointers[faila]; 1930 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) 1931 pointers[stripe] = pointers[stripe + 1]; 1932 pointers[rbio->nr_data - 1] = p; 1933 1934 /* xor in the rest */ 1935 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); 1936 } 1937 /* if we're doing this rebuild as part of an rmw, go through 1938 * and set all of our private rbio pages in the 1939 * failed stripes as uptodate. This way finish_rmw will 1940 * know they can be trusted. If this was a read reconstruction, 1941 * other endio functions will fiddle the uptodate bits 1942 */ 1943 if (rbio->operation == BTRFS_RBIO_WRITE) { 1944 for (i = 0; i < rbio->stripe_npages; i++) { 1945 if (faila != -1) { 1946 page = rbio_stripe_page(rbio, faila, i); 1947 SetPageUptodate(page); 1948 } 1949 if (failb != -1) { 1950 page = rbio_stripe_page(rbio, failb, i); 1951 SetPageUptodate(page); 1952 } 1953 } 1954 } 1955 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1956 /* 1957 * if we're rebuilding a read, we have to use 1958 * pages from the bio list 1959 */ 1960 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || 1961 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && 1962 (stripe == faila || stripe == failb)) { 1963 page = page_in_rbio(rbio, stripe, pagenr, 0); 1964 } else { 1965 page = rbio_stripe_page(rbio, stripe, pagenr); 1966 } 1967 kunmap(page); 1968 } 1969 } 1970 1971 err = BLK_STS_OK; 1972 cleanup: 1973 kfree(pointers); 1974 1975 cleanup_io: 1976 /* 1977 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a 1978 * valid rbio which is consistent with ondisk content, thus such a 1979 * valid rbio can be cached to avoid further disk reads. 1980 */ 1981 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 1982 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 1983 /* 1984 * - In case of two failures, where rbio->failb != -1: 1985 * 1986 * Do not cache this rbio since the above read reconstruction 1987 * (raid6_datap_recov() or raid6_2data_recov()) may have 1988 * changed some content of stripes which are not identical to 1989 * on-disk content any more, otherwise, a later write/recover 1990 * may steal stripe_pages from this rbio and end up with 1991 * corruptions or rebuild failures. 1992 * 1993 * - In case of single failure, where rbio->failb == -1: 1994 * 1995 * Cache this rbio iff the above read reconstruction is 1996 * executed without problems. 1997 */ 1998 if (err == BLK_STS_OK && rbio->failb < 0) 1999 cache_rbio_pages(rbio); 2000 else 2001 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2002 2003 rbio_orig_end_io(rbio, err); 2004 } else if (err == BLK_STS_OK) { 2005 rbio->faila = -1; 2006 rbio->failb = -1; 2007 2008 if (rbio->operation == BTRFS_RBIO_WRITE) 2009 finish_rmw(rbio); 2010 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) 2011 finish_parity_scrub(rbio, 0); 2012 else 2013 BUG(); 2014 } else { 2015 rbio_orig_end_io(rbio, err); 2016 } 2017 } 2018 2019 /* 2020 * This is called only for stripes we've read from disk to 2021 * reconstruct the parity. 2022 */ 2023 static void raid_recover_end_io(struct bio *bio) 2024 { 2025 struct btrfs_raid_bio *rbio = bio->bi_private; 2026 2027 /* 2028 * we only read stripe pages off the disk, set them 2029 * up to date if there were no errors 2030 */ 2031 if (bio->bi_status) 2032 fail_bio_stripe(rbio, bio); 2033 else 2034 set_bio_pages_uptodate(bio); 2035 bio_put(bio); 2036 2037 if (!atomic_dec_and_test(&rbio->stripes_pending)) 2038 return; 2039 2040 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 2041 rbio_orig_end_io(rbio, BLK_STS_IOERR); 2042 else 2043 __raid_recover_end_io(rbio); 2044 } 2045 2046 /* 2047 * reads everything we need off the disk to reconstruct 2048 * the parity. endio handlers trigger final reconstruction 2049 * when the IO is done. 2050 * 2051 * This is used both for reads from the higher layers and for 2052 * parity construction required to finish a rmw cycle. 2053 */ 2054 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 2055 { 2056 int bios_to_read = 0; 2057 struct bio_list bio_list; 2058 int ret; 2059 int pagenr; 2060 int stripe; 2061 struct bio *bio; 2062 2063 bio_list_init(&bio_list); 2064 2065 ret = alloc_rbio_pages(rbio); 2066 if (ret) 2067 goto cleanup; 2068 2069 atomic_set(&rbio->error, 0); 2070 2071 /* 2072 * read everything that hasn't failed. Thanks to the 2073 * stripe cache, it is possible that some or all of these 2074 * pages are going to be uptodate. 2075 */ 2076 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 2077 if (rbio->faila == stripe || rbio->failb == stripe) { 2078 atomic_inc(&rbio->error); 2079 continue; 2080 } 2081 2082 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 2083 struct page *p; 2084 2085 /* 2086 * the rmw code may have already read this 2087 * page in 2088 */ 2089 p = rbio_stripe_page(rbio, stripe, pagenr); 2090 if (PageUptodate(p)) 2091 continue; 2092 2093 ret = rbio_add_io_page(rbio, &bio_list, 2094 rbio_stripe_page(rbio, stripe, pagenr), 2095 stripe, pagenr, rbio->stripe_len); 2096 if (ret < 0) 2097 goto cleanup; 2098 } 2099 } 2100 2101 bios_to_read = bio_list_size(&bio_list); 2102 if (!bios_to_read) { 2103 /* 2104 * we might have no bios to read just because the pages 2105 * were up to date, or we might have no bios to read because 2106 * the devices were gone. 2107 */ 2108 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { 2109 __raid_recover_end_io(rbio); 2110 goto out; 2111 } else { 2112 goto cleanup; 2113 } 2114 } 2115 2116 /* 2117 * the bbio may be freed once we submit the last bio. Make sure 2118 * not to touch it after that 2119 */ 2120 atomic_set(&rbio->stripes_pending, bios_to_read); 2121 while (1) { 2122 bio = bio_list_pop(&bio_list); 2123 if (!bio) 2124 break; 2125 2126 bio->bi_private = rbio; 2127 bio->bi_end_io = raid_recover_end_io; 2128 bio->bi_opf = REQ_OP_READ; 2129 2130 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 2131 2132 submit_bio(bio); 2133 } 2134 out: 2135 return 0; 2136 2137 cleanup: 2138 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2139 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) 2140 rbio_orig_end_io(rbio, BLK_STS_IOERR); 2141 2142 while ((bio = bio_list_pop(&bio_list))) 2143 bio_put(bio); 2144 2145 return -EIO; 2146 } 2147 2148 /* 2149 * the main entry point for reads from the higher layers. This 2150 * is really only called when the normal read path had a failure, 2151 * so we assume the bio they send down corresponds to a failed part 2152 * of the drive. 2153 */ 2154 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, 2155 struct btrfs_bio *bbio, u64 stripe_len, 2156 int mirror_num, int generic_io) 2157 { 2158 struct btrfs_raid_bio *rbio; 2159 int ret; 2160 2161 if (generic_io) { 2162 ASSERT(bbio->mirror_num == mirror_num); 2163 btrfs_io_bio(bio)->mirror_num = mirror_num; 2164 } 2165 2166 rbio = alloc_rbio(fs_info, bbio, stripe_len); 2167 if (IS_ERR(rbio)) { 2168 if (generic_io) 2169 btrfs_put_bbio(bbio); 2170 return PTR_ERR(rbio); 2171 } 2172 2173 rbio->operation = BTRFS_RBIO_READ_REBUILD; 2174 bio_list_add(&rbio->bio_list, bio); 2175 rbio->bio_list_bytes = bio->bi_iter.bi_size; 2176 2177 rbio->faila = find_logical_bio_stripe(rbio, bio); 2178 if (rbio->faila == -1) { 2179 btrfs_warn(fs_info, 2180 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)", 2181 __func__, (u64)bio->bi_iter.bi_sector << 9, 2182 (u64)bio->bi_iter.bi_size, bbio->map_type); 2183 if (generic_io) 2184 btrfs_put_bbio(bbio); 2185 kfree(rbio); 2186 return -EIO; 2187 } 2188 2189 if (generic_io) { 2190 btrfs_bio_counter_inc_noblocked(fs_info); 2191 rbio->generic_bio_cnt = 1; 2192 } else { 2193 btrfs_get_bbio(bbio); 2194 } 2195 2196 /* 2197 * Loop retry: 2198 * for 'mirror == 2', reconstruct from all other stripes. 2199 * for 'mirror_num > 2', select a stripe to fail on every retry. 2200 */ 2201 if (mirror_num > 2) { 2202 /* 2203 * 'mirror == 3' is to fail the p stripe and 2204 * reconstruct from the q stripe. 'mirror > 3' is to 2205 * fail a data stripe and reconstruct from p+q stripe. 2206 */ 2207 rbio->failb = rbio->real_stripes - (mirror_num - 1); 2208 ASSERT(rbio->failb > 0); 2209 if (rbio->failb <= rbio->faila) 2210 rbio->failb--; 2211 } 2212 2213 ret = lock_stripe_add(rbio); 2214 2215 /* 2216 * __raid56_parity_recover will end the bio with 2217 * any errors it hits. We don't want to return 2218 * its error value up the stack because our caller 2219 * will end up calling bio_endio with any nonzero 2220 * return 2221 */ 2222 if (ret == 0) 2223 __raid56_parity_recover(rbio); 2224 /* 2225 * our rbio has been added to the list of 2226 * rbios that will be handled after the 2227 * currently lock owner is done 2228 */ 2229 return 0; 2230 2231 } 2232 2233 static void rmw_work(struct btrfs_work *work) 2234 { 2235 struct btrfs_raid_bio *rbio; 2236 2237 rbio = container_of(work, struct btrfs_raid_bio, work); 2238 raid56_rmw_stripe(rbio); 2239 } 2240 2241 static void read_rebuild_work(struct btrfs_work *work) 2242 { 2243 struct btrfs_raid_bio *rbio; 2244 2245 rbio = container_of(work, struct btrfs_raid_bio, work); 2246 __raid56_parity_recover(rbio); 2247 } 2248 2249 /* 2250 * The following code is used to scrub/replace the parity stripe 2251 * 2252 * Caller must have already increased bio_counter for getting @bbio. 2253 * 2254 * Note: We need make sure all the pages that add into the scrub/replace 2255 * raid bio are correct and not be changed during the scrub/replace. That 2256 * is those pages just hold metadata or file data with checksum. 2257 */ 2258 2259 struct btrfs_raid_bio * 2260 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, 2261 struct btrfs_bio *bbio, u64 stripe_len, 2262 struct btrfs_device *scrub_dev, 2263 unsigned long *dbitmap, int stripe_nsectors) 2264 { 2265 struct btrfs_raid_bio *rbio; 2266 int i; 2267 2268 rbio = alloc_rbio(fs_info, bbio, stripe_len); 2269 if (IS_ERR(rbio)) 2270 return NULL; 2271 bio_list_add(&rbio->bio_list, bio); 2272 /* 2273 * This is a special bio which is used to hold the completion handler 2274 * and make the scrub rbio is similar to the other types 2275 */ 2276 ASSERT(!bio->bi_iter.bi_size); 2277 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 2278 2279 /* 2280 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted 2281 * to the end position, so this search can start from the first parity 2282 * stripe. 2283 */ 2284 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { 2285 if (bbio->stripes[i].dev == scrub_dev) { 2286 rbio->scrubp = i; 2287 break; 2288 } 2289 } 2290 ASSERT(i < rbio->real_stripes); 2291 2292 /* Now we just support the sectorsize equals to page size */ 2293 ASSERT(fs_info->sectorsize == PAGE_SIZE); 2294 ASSERT(rbio->stripe_npages == stripe_nsectors); 2295 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); 2296 2297 /* 2298 * We have already increased bio_counter when getting bbio, record it 2299 * so we can free it at rbio_orig_end_io(). 2300 */ 2301 rbio->generic_bio_cnt = 1; 2302 2303 return rbio; 2304 } 2305 2306 /* Used for both parity scrub and missing. */ 2307 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, 2308 u64 logical) 2309 { 2310 int stripe_offset; 2311 int index; 2312 2313 ASSERT(logical >= rbio->bbio->raid_map[0]); 2314 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + 2315 rbio->stripe_len * rbio->nr_data); 2316 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); 2317 index = stripe_offset >> PAGE_SHIFT; 2318 rbio->bio_pages[index] = page; 2319 } 2320 2321 /* 2322 * We just scrub the parity that we have correct data on the same horizontal, 2323 * so we needn't allocate all pages for all the stripes. 2324 */ 2325 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) 2326 { 2327 int i; 2328 int bit; 2329 int index; 2330 struct page *page; 2331 2332 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { 2333 for (i = 0; i < rbio->real_stripes; i++) { 2334 index = i * rbio->stripe_npages + bit; 2335 if (rbio->stripe_pages[index]) 2336 continue; 2337 2338 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2339 if (!page) 2340 return -ENOMEM; 2341 rbio->stripe_pages[index] = page; 2342 } 2343 } 2344 return 0; 2345 } 2346 2347 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 2348 int need_check) 2349 { 2350 struct btrfs_bio *bbio = rbio->bbio; 2351 void **pointers = rbio->finish_pointers; 2352 unsigned long *pbitmap = rbio->finish_pbitmap; 2353 int nr_data = rbio->nr_data; 2354 int stripe; 2355 int pagenr; 2356 int p_stripe = -1; 2357 int q_stripe = -1; 2358 struct page *p_page = NULL; 2359 struct page *q_page = NULL; 2360 struct bio_list bio_list; 2361 struct bio *bio; 2362 int is_replace = 0; 2363 int ret; 2364 2365 bio_list_init(&bio_list); 2366 2367 if (rbio->real_stripes - rbio->nr_data == 1) { 2368 p_stripe = rbio->real_stripes - 1; 2369 } else if (rbio->real_stripes - rbio->nr_data == 2) { 2370 p_stripe = rbio->real_stripes - 2; 2371 q_stripe = rbio->real_stripes - 1; 2372 } else { 2373 BUG(); 2374 } 2375 2376 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { 2377 is_replace = 1; 2378 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); 2379 } 2380 2381 /* 2382 * Because the higher layers(scrubber) are unlikely to 2383 * use this area of the disk again soon, so don't cache 2384 * it. 2385 */ 2386 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2387 2388 if (!need_check) 2389 goto writeback; 2390 2391 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2392 if (!p_page) 2393 goto cleanup; 2394 SetPageUptodate(p_page); 2395 2396 if (q_stripe != -1) { 2397 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2398 if (!q_page) { 2399 __free_page(p_page); 2400 goto cleanup; 2401 } 2402 SetPageUptodate(q_page); 2403 } 2404 2405 atomic_set(&rbio->error, 0); 2406 2407 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 2408 struct page *p; 2409 void *parity; 2410 /* first collect one page from each data stripe */ 2411 for (stripe = 0; stripe < nr_data; stripe++) { 2412 p = page_in_rbio(rbio, stripe, pagenr, 0); 2413 pointers[stripe] = kmap(p); 2414 } 2415 2416 /* then add the parity stripe */ 2417 pointers[stripe++] = kmap(p_page); 2418 2419 if (q_stripe != -1) { 2420 2421 /* 2422 * raid6, add the qstripe and call the 2423 * library function to fill in our p/q 2424 */ 2425 pointers[stripe++] = kmap(q_page); 2426 2427 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, 2428 pointers); 2429 } else { 2430 /* raid5 */ 2431 copy_page(pointers[nr_data], pointers[0]); 2432 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); 2433 } 2434 2435 /* Check scrubbing parity and repair it */ 2436 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2437 parity = kmap(p); 2438 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) 2439 copy_page(parity, pointers[rbio->scrubp]); 2440 else 2441 /* Parity is right, needn't writeback */ 2442 bitmap_clear(rbio->dbitmap, pagenr, 1); 2443 kunmap(p); 2444 2445 for (stripe = 0; stripe < nr_data; stripe++) 2446 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 2447 kunmap(p_page); 2448 } 2449 2450 __free_page(p_page); 2451 if (q_page) 2452 __free_page(q_page); 2453 2454 writeback: 2455 /* 2456 * time to start writing. Make bios for everything from the 2457 * higher layers (the bio_list in our rbio) and our p/q. Ignore 2458 * everything else. 2459 */ 2460 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 2461 struct page *page; 2462 2463 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2464 ret = rbio_add_io_page(rbio, &bio_list, 2465 page, rbio->scrubp, pagenr, rbio->stripe_len); 2466 if (ret) 2467 goto cleanup; 2468 } 2469 2470 if (!is_replace) 2471 goto submit_write; 2472 2473 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { 2474 struct page *page; 2475 2476 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2477 ret = rbio_add_io_page(rbio, &bio_list, page, 2478 bbio->tgtdev_map[rbio->scrubp], 2479 pagenr, rbio->stripe_len); 2480 if (ret) 2481 goto cleanup; 2482 } 2483 2484 submit_write: 2485 nr_data = bio_list_size(&bio_list); 2486 if (!nr_data) { 2487 /* Every parity is right */ 2488 rbio_orig_end_io(rbio, BLK_STS_OK); 2489 return; 2490 } 2491 2492 atomic_set(&rbio->stripes_pending, nr_data); 2493 2494 while (1) { 2495 bio = bio_list_pop(&bio_list); 2496 if (!bio) 2497 break; 2498 2499 bio->bi_private = rbio; 2500 bio->bi_end_io = raid_write_end_io; 2501 bio->bi_opf = REQ_OP_WRITE; 2502 2503 submit_bio(bio); 2504 } 2505 return; 2506 2507 cleanup: 2508 rbio_orig_end_io(rbio, BLK_STS_IOERR); 2509 2510 while ((bio = bio_list_pop(&bio_list))) 2511 bio_put(bio); 2512 } 2513 2514 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 2515 { 2516 if (stripe >= 0 && stripe < rbio->nr_data) 2517 return 1; 2518 return 0; 2519 } 2520 2521 /* 2522 * While we're doing the parity check and repair, we could have errors 2523 * in reading pages off the disk. This checks for errors and if we're 2524 * not able to read the page it'll trigger parity reconstruction. The 2525 * parity scrub will be finished after we've reconstructed the failed 2526 * stripes 2527 */ 2528 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) 2529 { 2530 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 2531 goto cleanup; 2532 2533 if (rbio->faila >= 0 || rbio->failb >= 0) { 2534 int dfail = 0, failp = -1; 2535 2536 if (is_data_stripe(rbio, rbio->faila)) 2537 dfail++; 2538 else if (is_parity_stripe(rbio->faila)) 2539 failp = rbio->faila; 2540 2541 if (is_data_stripe(rbio, rbio->failb)) 2542 dfail++; 2543 else if (is_parity_stripe(rbio->failb)) 2544 failp = rbio->failb; 2545 2546 /* 2547 * Because we can not use a scrubbing parity to repair 2548 * the data, so the capability of the repair is declined. 2549 * (In the case of RAID5, we can not repair anything) 2550 */ 2551 if (dfail > rbio->bbio->max_errors - 1) 2552 goto cleanup; 2553 2554 /* 2555 * If all data is good, only parity is correctly, just 2556 * repair the parity. 2557 */ 2558 if (dfail == 0) { 2559 finish_parity_scrub(rbio, 0); 2560 return; 2561 } 2562 2563 /* 2564 * Here means we got one corrupted data stripe and one 2565 * corrupted parity on RAID6, if the corrupted parity 2566 * is scrubbing parity, luckily, use the other one to repair 2567 * the data, or we can not repair the data stripe. 2568 */ 2569 if (failp != rbio->scrubp) 2570 goto cleanup; 2571 2572 __raid_recover_end_io(rbio); 2573 } else { 2574 finish_parity_scrub(rbio, 1); 2575 } 2576 return; 2577 2578 cleanup: 2579 rbio_orig_end_io(rbio, BLK_STS_IOERR); 2580 } 2581 2582 /* 2583 * end io for the read phase of the rmw cycle. All the bios here are physical 2584 * stripe bios we've read from the disk so we can recalculate the parity of the 2585 * stripe. 2586 * 2587 * This will usually kick off finish_rmw once all the bios are read in, but it 2588 * may trigger parity reconstruction if we had any errors along the way 2589 */ 2590 static void raid56_parity_scrub_end_io(struct bio *bio) 2591 { 2592 struct btrfs_raid_bio *rbio = bio->bi_private; 2593 2594 if (bio->bi_status) 2595 fail_bio_stripe(rbio, bio); 2596 else 2597 set_bio_pages_uptodate(bio); 2598 2599 bio_put(bio); 2600 2601 if (!atomic_dec_and_test(&rbio->stripes_pending)) 2602 return; 2603 2604 /* 2605 * this will normally call finish_rmw to start our write 2606 * but if there are any failed stripes we'll reconstruct 2607 * from parity first 2608 */ 2609 validate_rbio_for_parity_scrub(rbio); 2610 } 2611 2612 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) 2613 { 2614 int bios_to_read = 0; 2615 struct bio_list bio_list; 2616 int ret; 2617 int pagenr; 2618 int stripe; 2619 struct bio *bio; 2620 2621 bio_list_init(&bio_list); 2622 2623 ret = alloc_rbio_essential_pages(rbio); 2624 if (ret) 2625 goto cleanup; 2626 2627 atomic_set(&rbio->error, 0); 2628 /* 2629 * build a list of bios to read all the missing parts of this 2630 * stripe 2631 */ 2632 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 2633 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 2634 struct page *page; 2635 /* 2636 * we want to find all the pages missing from 2637 * the rbio and read them from the disk. If 2638 * page_in_rbio finds a page in the bio list 2639 * we don't need to read it off the stripe. 2640 */ 2641 page = page_in_rbio(rbio, stripe, pagenr, 1); 2642 if (page) 2643 continue; 2644 2645 page = rbio_stripe_page(rbio, stripe, pagenr); 2646 /* 2647 * the bio cache may have handed us an uptodate 2648 * page. If so, be happy and use it 2649 */ 2650 if (PageUptodate(page)) 2651 continue; 2652 2653 ret = rbio_add_io_page(rbio, &bio_list, page, 2654 stripe, pagenr, rbio->stripe_len); 2655 if (ret) 2656 goto cleanup; 2657 } 2658 } 2659 2660 bios_to_read = bio_list_size(&bio_list); 2661 if (!bios_to_read) { 2662 /* 2663 * this can happen if others have merged with 2664 * us, it means there is nothing left to read. 2665 * But if there are missing devices it may not be 2666 * safe to do the full stripe write yet. 2667 */ 2668 goto finish; 2669 } 2670 2671 /* 2672 * the bbio may be freed once we submit the last bio. Make sure 2673 * not to touch it after that 2674 */ 2675 atomic_set(&rbio->stripes_pending, bios_to_read); 2676 while (1) { 2677 bio = bio_list_pop(&bio_list); 2678 if (!bio) 2679 break; 2680 2681 bio->bi_private = rbio; 2682 bio->bi_end_io = raid56_parity_scrub_end_io; 2683 bio->bi_opf = REQ_OP_READ; 2684 2685 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 2686 2687 submit_bio(bio); 2688 } 2689 /* the actual write will happen once the reads are done */ 2690 return; 2691 2692 cleanup: 2693 rbio_orig_end_io(rbio, BLK_STS_IOERR); 2694 2695 while ((bio = bio_list_pop(&bio_list))) 2696 bio_put(bio); 2697 2698 return; 2699 2700 finish: 2701 validate_rbio_for_parity_scrub(rbio); 2702 } 2703 2704 static void scrub_parity_work(struct btrfs_work *work) 2705 { 2706 struct btrfs_raid_bio *rbio; 2707 2708 rbio = container_of(work, struct btrfs_raid_bio, work); 2709 raid56_parity_scrub_stripe(rbio); 2710 } 2711 2712 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) 2713 { 2714 if (!lock_stripe_add(rbio)) 2715 start_async_work(rbio, scrub_parity_work); 2716 } 2717 2718 /* The following code is used for dev replace of a missing RAID 5/6 device. */ 2719 2720 struct btrfs_raid_bio * 2721 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, 2722 struct btrfs_bio *bbio, u64 length) 2723 { 2724 struct btrfs_raid_bio *rbio; 2725 2726 rbio = alloc_rbio(fs_info, bbio, length); 2727 if (IS_ERR(rbio)) 2728 return NULL; 2729 2730 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; 2731 bio_list_add(&rbio->bio_list, bio); 2732 /* 2733 * This is a special bio which is used to hold the completion handler 2734 * and make the scrub rbio is similar to the other types 2735 */ 2736 ASSERT(!bio->bi_iter.bi_size); 2737 2738 rbio->faila = find_logical_bio_stripe(rbio, bio); 2739 if (rbio->faila == -1) { 2740 BUG(); 2741 kfree(rbio); 2742 return NULL; 2743 } 2744 2745 /* 2746 * When we get bbio, we have already increased bio_counter, record it 2747 * so we can free it at rbio_orig_end_io() 2748 */ 2749 rbio->generic_bio_cnt = 1; 2750 2751 return rbio; 2752 } 2753 2754 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) 2755 { 2756 if (!lock_stripe_add(rbio)) 2757 start_async_work(rbio, read_rebuild_work); 2758 } 2759