1 /* 2 * Copyright (C) 2012 Fusion-io All rights reserved. 3 * Copyright (C) 2012 Intel Corp. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public 7 * License v2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public 15 * License along with this program; if not, write to the 16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 17 * Boston, MA 021110-1307, USA. 18 */ 19 #include <linux/sched.h> 20 #include <linux/wait.h> 21 #include <linux/bio.h> 22 #include <linux/slab.h> 23 #include <linux/buffer_head.h> 24 #include <linux/blkdev.h> 25 #include <linux/random.h> 26 #include <linux/iocontext.h> 27 #include <linux/capability.h> 28 #include <linux/ratelimit.h> 29 #include <linux/kthread.h> 30 #include <linux/raid/pq.h> 31 #include <linux/hash.h> 32 #include <linux/list_sort.h> 33 #include <linux/raid/xor.h> 34 #include <linux/vmalloc.h> 35 #include <asm/div64.h> 36 #include "ctree.h" 37 #include "extent_map.h" 38 #include "disk-io.h" 39 #include "transaction.h" 40 #include "print-tree.h" 41 #include "volumes.h" 42 #include "raid56.h" 43 #include "async-thread.h" 44 #include "check-integrity.h" 45 #include "rcu-string.h" 46 47 /* set when additional merges to this rbio are not allowed */ 48 #define RBIO_RMW_LOCKED_BIT 1 49 50 /* 51 * set when this rbio is sitting in the hash, but it is just a cache 52 * of past RMW 53 */ 54 #define RBIO_CACHE_BIT 2 55 56 /* 57 * set when it is safe to trust the stripe_pages for caching 58 */ 59 #define RBIO_CACHE_READY_BIT 3 60 61 #define RBIO_CACHE_SIZE 1024 62 63 enum btrfs_rbio_ops { 64 BTRFS_RBIO_WRITE, 65 BTRFS_RBIO_READ_REBUILD, 66 BTRFS_RBIO_PARITY_SCRUB, 67 BTRFS_RBIO_REBUILD_MISSING, 68 }; 69 70 struct btrfs_raid_bio { 71 struct btrfs_fs_info *fs_info; 72 struct btrfs_bio *bbio; 73 74 /* while we're doing rmw on a stripe 75 * we put it into a hash table so we can 76 * lock the stripe and merge more rbios 77 * into it. 78 */ 79 struct list_head hash_list; 80 81 /* 82 * LRU list for the stripe cache 83 */ 84 struct list_head stripe_cache; 85 86 /* 87 * for scheduling work in the helper threads 88 */ 89 struct btrfs_work work; 90 91 /* 92 * bio list and bio_list_lock are used 93 * to add more bios into the stripe 94 * in hopes of avoiding the full rmw 95 */ 96 struct bio_list bio_list; 97 spinlock_t bio_list_lock; 98 99 /* also protected by the bio_list_lock, the 100 * plug list is used by the plugging code 101 * to collect partial bios while plugged. The 102 * stripe locking code also uses it to hand off 103 * the stripe lock to the next pending IO 104 */ 105 struct list_head plug_list; 106 107 /* 108 * flags that tell us if it is safe to 109 * merge with this bio 110 */ 111 unsigned long flags; 112 113 /* size of each individual stripe on disk */ 114 int stripe_len; 115 116 /* number of data stripes (no p/q) */ 117 int nr_data; 118 119 int real_stripes; 120 121 int stripe_npages; 122 /* 123 * set if we're doing a parity rebuild 124 * for a read from higher up, which is handled 125 * differently from a parity rebuild as part of 126 * rmw 127 */ 128 enum btrfs_rbio_ops operation; 129 130 /* first bad stripe */ 131 int faila; 132 133 /* second bad stripe (for raid6 use) */ 134 int failb; 135 136 int scrubp; 137 /* 138 * number of pages needed to represent the full 139 * stripe 140 */ 141 int nr_pages; 142 143 /* 144 * size of all the bios in the bio_list. This 145 * helps us decide if the rbio maps to a full 146 * stripe or not 147 */ 148 int bio_list_bytes; 149 150 int generic_bio_cnt; 151 152 atomic_t refs; 153 154 atomic_t stripes_pending; 155 156 atomic_t error; 157 /* 158 * these are two arrays of pointers. We allocate the 159 * rbio big enough to hold them both and setup their 160 * locations when the rbio is allocated 161 */ 162 163 /* pointers to pages that we allocated for 164 * reading/writing stripes directly from the disk (including P/Q) 165 */ 166 struct page **stripe_pages; 167 168 /* 169 * pointers to the pages in the bio_list. Stored 170 * here for faster lookup 171 */ 172 struct page **bio_pages; 173 174 /* 175 * bitmap to record which horizontal stripe has data 176 */ 177 unsigned long *dbitmap; 178 }; 179 180 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 181 static noinline void finish_rmw(struct btrfs_raid_bio *rbio); 182 static void rmw_work(struct btrfs_work *work); 183 static void read_rebuild_work(struct btrfs_work *work); 184 static void async_rmw_stripe(struct btrfs_raid_bio *rbio); 185 static void async_read_rebuild(struct btrfs_raid_bio *rbio); 186 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); 187 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); 188 static void __free_raid_bio(struct btrfs_raid_bio *rbio); 189 static void index_rbio_pages(struct btrfs_raid_bio *rbio); 190 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 191 192 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 193 int need_check); 194 static void async_scrub_parity(struct btrfs_raid_bio *rbio); 195 196 /* 197 * the stripe hash table is used for locking, and to collect 198 * bios in hopes of making a full stripe 199 */ 200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 201 { 202 struct btrfs_stripe_hash_table *table; 203 struct btrfs_stripe_hash_table *x; 204 struct btrfs_stripe_hash *cur; 205 struct btrfs_stripe_hash *h; 206 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; 207 int i; 208 int table_size; 209 210 if (info->stripe_hash_table) 211 return 0; 212 213 /* 214 * The table is large, starting with order 4 and can go as high as 215 * order 7 in case lock debugging is turned on. 216 * 217 * Try harder to allocate and fallback to vmalloc to lower the chance 218 * of a failing mount. 219 */ 220 table_size = sizeof(*table) + sizeof(*h) * num_entries; 221 table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 222 if (!table) { 223 table = vzalloc(table_size); 224 if (!table) 225 return -ENOMEM; 226 } 227 228 spin_lock_init(&table->cache_lock); 229 INIT_LIST_HEAD(&table->stripe_cache); 230 231 h = table->table; 232 233 for (i = 0; i < num_entries; i++) { 234 cur = h + i; 235 INIT_LIST_HEAD(&cur->hash_list); 236 spin_lock_init(&cur->lock); 237 init_waitqueue_head(&cur->wait); 238 } 239 240 x = cmpxchg(&info->stripe_hash_table, NULL, table); 241 if (x) 242 kvfree(x); 243 return 0; 244 } 245 246 /* 247 * caching an rbio means to copy anything from the 248 * bio_pages array into the stripe_pages array. We 249 * use the page uptodate bit in the stripe cache array 250 * to indicate if it has valid data 251 * 252 * once the caching is done, we set the cache ready 253 * bit. 254 */ 255 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 256 { 257 int i; 258 char *s; 259 char *d; 260 int ret; 261 262 ret = alloc_rbio_pages(rbio); 263 if (ret) 264 return; 265 266 for (i = 0; i < rbio->nr_pages; i++) { 267 if (!rbio->bio_pages[i]) 268 continue; 269 270 s = kmap(rbio->bio_pages[i]); 271 d = kmap(rbio->stripe_pages[i]); 272 273 memcpy(d, s, PAGE_CACHE_SIZE); 274 275 kunmap(rbio->bio_pages[i]); 276 kunmap(rbio->stripe_pages[i]); 277 SetPageUptodate(rbio->stripe_pages[i]); 278 } 279 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 280 } 281 282 /* 283 * we hash on the first logical address of the stripe 284 */ 285 static int rbio_bucket(struct btrfs_raid_bio *rbio) 286 { 287 u64 num = rbio->bbio->raid_map[0]; 288 289 /* 290 * we shift down quite a bit. We're using byte 291 * addressing, and most of the lower bits are zeros. 292 * This tends to upset hash_64, and it consistently 293 * returns just one or two different values. 294 * 295 * shifting off the lower bits fixes things. 296 */ 297 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 298 } 299 300 /* 301 * stealing an rbio means taking all the uptodate pages from the stripe 302 * array in the source rbio and putting them into the destination rbio 303 */ 304 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 305 { 306 int i; 307 struct page *s; 308 struct page *d; 309 310 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 311 return; 312 313 for (i = 0; i < dest->nr_pages; i++) { 314 s = src->stripe_pages[i]; 315 if (!s || !PageUptodate(s)) { 316 continue; 317 } 318 319 d = dest->stripe_pages[i]; 320 if (d) 321 __free_page(d); 322 323 dest->stripe_pages[i] = s; 324 src->stripe_pages[i] = NULL; 325 } 326 } 327 328 /* 329 * merging means we take the bio_list from the victim and 330 * splice it into the destination. The victim should 331 * be discarded afterwards. 332 * 333 * must be called with dest->rbio_list_lock held 334 */ 335 static void merge_rbio(struct btrfs_raid_bio *dest, 336 struct btrfs_raid_bio *victim) 337 { 338 bio_list_merge(&dest->bio_list, &victim->bio_list); 339 dest->bio_list_bytes += victim->bio_list_bytes; 340 dest->generic_bio_cnt += victim->generic_bio_cnt; 341 bio_list_init(&victim->bio_list); 342 } 343 344 /* 345 * used to prune items that are in the cache. The caller 346 * must hold the hash table lock. 347 */ 348 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 349 { 350 int bucket = rbio_bucket(rbio); 351 struct btrfs_stripe_hash_table *table; 352 struct btrfs_stripe_hash *h; 353 int freeit = 0; 354 355 /* 356 * check the bit again under the hash table lock. 357 */ 358 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 359 return; 360 361 table = rbio->fs_info->stripe_hash_table; 362 h = table->table + bucket; 363 364 /* hold the lock for the bucket because we may be 365 * removing it from the hash table 366 */ 367 spin_lock(&h->lock); 368 369 /* 370 * hold the lock for the bio list because we need 371 * to make sure the bio list is empty 372 */ 373 spin_lock(&rbio->bio_list_lock); 374 375 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 376 list_del_init(&rbio->stripe_cache); 377 table->cache_size -= 1; 378 freeit = 1; 379 380 /* if the bio list isn't empty, this rbio is 381 * still involved in an IO. We take it out 382 * of the cache list, and drop the ref that 383 * was held for the list. 384 * 385 * If the bio_list was empty, we also remove 386 * the rbio from the hash_table, and drop 387 * the corresponding ref 388 */ 389 if (bio_list_empty(&rbio->bio_list)) { 390 if (!list_empty(&rbio->hash_list)) { 391 list_del_init(&rbio->hash_list); 392 atomic_dec(&rbio->refs); 393 BUG_ON(!list_empty(&rbio->plug_list)); 394 } 395 } 396 } 397 398 spin_unlock(&rbio->bio_list_lock); 399 spin_unlock(&h->lock); 400 401 if (freeit) 402 __free_raid_bio(rbio); 403 } 404 405 /* 406 * prune a given rbio from the cache 407 */ 408 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 409 { 410 struct btrfs_stripe_hash_table *table; 411 unsigned long flags; 412 413 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 414 return; 415 416 table = rbio->fs_info->stripe_hash_table; 417 418 spin_lock_irqsave(&table->cache_lock, flags); 419 __remove_rbio_from_cache(rbio); 420 spin_unlock_irqrestore(&table->cache_lock, flags); 421 } 422 423 /* 424 * remove everything in the cache 425 */ 426 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 427 { 428 struct btrfs_stripe_hash_table *table; 429 unsigned long flags; 430 struct btrfs_raid_bio *rbio; 431 432 table = info->stripe_hash_table; 433 434 spin_lock_irqsave(&table->cache_lock, flags); 435 while (!list_empty(&table->stripe_cache)) { 436 rbio = list_entry(table->stripe_cache.next, 437 struct btrfs_raid_bio, 438 stripe_cache); 439 __remove_rbio_from_cache(rbio); 440 } 441 spin_unlock_irqrestore(&table->cache_lock, flags); 442 } 443 444 /* 445 * remove all cached entries and free the hash table 446 * used by unmount 447 */ 448 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 449 { 450 if (!info->stripe_hash_table) 451 return; 452 btrfs_clear_rbio_cache(info); 453 kvfree(info->stripe_hash_table); 454 info->stripe_hash_table = NULL; 455 } 456 457 /* 458 * insert an rbio into the stripe cache. It 459 * must have already been prepared by calling 460 * cache_rbio_pages 461 * 462 * If this rbio was already cached, it gets 463 * moved to the front of the lru. 464 * 465 * If the size of the rbio cache is too big, we 466 * prune an item. 467 */ 468 static void cache_rbio(struct btrfs_raid_bio *rbio) 469 { 470 struct btrfs_stripe_hash_table *table; 471 unsigned long flags; 472 473 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 474 return; 475 476 table = rbio->fs_info->stripe_hash_table; 477 478 spin_lock_irqsave(&table->cache_lock, flags); 479 spin_lock(&rbio->bio_list_lock); 480 481 /* bump our ref if we were not in the list before */ 482 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 483 atomic_inc(&rbio->refs); 484 485 if (!list_empty(&rbio->stripe_cache)){ 486 list_move(&rbio->stripe_cache, &table->stripe_cache); 487 } else { 488 list_add(&rbio->stripe_cache, &table->stripe_cache); 489 table->cache_size += 1; 490 } 491 492 spin_unlock(&rbio->bio_list_lock); 493 494 if (table->cache_size > RBIO_CACHE_SIZE) { 495 struct btrfs_raid_bio *found; 496 497 found = list_entry(table->stripe_cache.prev, 498 struct btrfs_raid_bio, 499 stripe_cache); 500 501 if (found != rbio) 502 __remove_rbio_from_cache(found); 503 } 504 505 spin_unlock_irqrestore(&table->cache_lock, flags); 506 } 507 508 /* 509 * helper function to run the xor_blocks api. It is only 510 * able to do MAX_XOR_BLOCKS at a time, so we need to 511 * loop through. 512 */ 513 static void run_xor(void **pages, int src_cnt, ssize_t len) 514 { 515 int src_off = 0; 516 int xor_src_cnt = 0; 517 void *dest = pages[src_cnt]; 518 519 while(src_cnt > 0) { 520 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 521 xor_blocks(xor_src_cnt, len, dest, pages + src_off); 522 523 src_cnt -= xor_src_cnt; 524 src_off += xor_src_cnt; 525 } 526 } 527 528 /* 529 * returns true if the bio list inside this rbio 530 * covers an entire stripe (no rmw required). 531 * Must be called with the bio list lock held, or 532 * at a time when you know it is impossible to add 533 * new bios into the list 534 */ 535 static int __rbio_is_full(struct btrfs_raid_bio *rbio) 536 { 537 unsigned long size = rbio->bio_list_bytes; 538 int ret = 1; 539 540 if (size != rbio->nr_data * rbio->stripe_len) 541 ret = 0; 542 543 BUG_ON(size > rbio->nr_data * rbio->stripe_len); 544 return ret; 545 } 546 547 static int rbio_is_full(struct btrfs_raid_bio *rbio) 548 { 549 unsigned long flags; 550 int ret; 551 552 spin_lock_irqsave(&rbio->bio_list_lock, flags); 553 ret = __rbio_is_full(rbio); 554 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 555 return ret; 556 } 557 558 /* 559 * returns 1 if it is safe to merge two rbios together. 560 * The merging is safe if the two rbios correspond to 561 * the same stripe and if they are both going in the same 562 * direction (read vs write), and if neither one is 563 * locked for final IO 564 * 565 * The caller is responsible for locking such that 566 * rmw_locked is safe to test 567 */ 568 static int rbio_can_merge(struct btrfs_raid_bio *last, 569 struct btrfs_raid_bio *cur) 570 { 571 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 572 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 573 return 0; 574 575 /* 576 * we can't merge with cached rbios, since the 577 * idea is that when we merge the destination 578 * rbio is going to run our IO for us. We can 579 * steal from cached rbio's though, other functions 580 * handle that. 581 */ 582 if (test_bit(RBIO_CACHE_BIT, &last->flags) || 583 test_bit(RBIO_CACHE_BIT, &cur->flags)) 584 return 0; 585 586 if (last->bbio->raid_map[0] != 587 cur->bbio->raid_map[0]) 588 return 0; 589 590 /* we can't merge with different operations */ 591 if (last->operation != cur->operation) 592 return 0; 593 /* 594 * We've need read the full stripe from the drive. 595 * check and repair the parity and write the new results. 596 * 597 * We're not allowed to add any new bios to the 598 * bio list here, anyone else that wants to 599 * change this stripe needs to do their own rmw. 600 */ 601 if (last->operation == BTRFS_RBIO_PARITY_SCRUB || 602 cur->operation == BTRFS_RBIO_PARITY_SCRUB) 603 return 0; 604 605 if (last->operation == BTRFS_RBIO_REBUILD_MISSING || 606 cur->operation == BTRFS_RBIO_REBUILD_MISSING) 607 return 0; 608 609 return 1; 610 } 611 612 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, 613 int index) 614 { 615 return stripe * rbio->stripe_npages + index; 616 } 617 618 /* 619 * these are just the pages from the rbio array, not from anything 620 * the FS sent down to us 621 */ 622 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, 623 int index) 624 { 625 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; 626 } 627 628 /* 629 * helper to index into the pstripe 630 */ 631 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) 632 { 633 return rbio_stripe_page(rbio, rbio->nr_data, index); 634 } 635 636 /* 637 * helper to index into the qstripe, returns null 638 * if there is no qstripe 639 */ 640 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) 641 { 642 if (rbio->nr_data + 1 == rbio->real_stripes) 643 return NULL; 644 return rbio_stripe_page(rbio, rbio->nr_data + 1, index); 645 } 646 647 /* 648 * The first stripe in the table for a logical address 649 * has the lock. rbios are added in one of three ways: 650 * 651 * 1) Nobody has the stripe locked yet. The rbio is given 652 * the lock and 0 is returned. The caller must start the IO 653 * themselves. 654 * 655 * 2) Someone has the stripe locked, but we're able to merge 656 * with the lock owner. The rbio is freed and the IO will 657 * start automatically along with the existing rbio. 1 is returned. 658 * 659 * 3) Someone has the stripe locked, but we're not able to merge. 660 * The rbio is added to the lock owner's plug list, or merged into 661 * an rbio already on the plug list. When the lock owner unlocks, 662 * the next rbio on the list is run and the IO is started automatically. 663 * 1 is returned 664 * 665 * If we return 0, the caller still owns the rbio and must continue with 666 * IO submission. If we return 1, the caller must assume the rbio has 667 * already been freed. 668 */ 669 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 670 { 671 int bucket = rbio_bucket(rbio); 672 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; 673 struct btrfs_raid_bio *cur; 674 struct btrfs_raid_bio *pending; 675 unsigned long flags; 676 DEFINE_WAIT(wait); 677 struct btrfs_raid_bio *freeit = NULL; 678 struct btrfs_raid_bio *cache_drop = NULL; 679 int ret = 0; 680 int walk = 0; 681 682 spin_lock_irqsave(&h->lock, flags); 683 list_for_each_entry(cur, &h->hash_list, hash_list) { 684 walk++; 685 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { 686 spin_lock(&cur->bio_list_lock); 687 688 /* can we steal this cached rbio's pages? */ 689 if (bio_list_empty(&cur->bio_list) && 690 list_empty(&cur->plug_list) && 691 test_bit(RBIO_CACHE_BIT, &cur->flags) && 692 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 693 list_del_init(&cur->hash_list); 694 atomic_dec(&cur->refs); 695 696 steal_rbio(cur, rbio); 697 cache_drop = cur; 698 spin_unlock(&cur->bio_list_lock); 699 700 goto lockit; 701 } 702 703 /* can we merge into the lock owner? */ 704 if (rbio_can_merge(cur, rbio)) { 705 merge_rbio(cur, rbio); 706 spin_unlock(&cur->bio_list_lock); 707 freeit = rbio; 708 ret = 1; 709 goto out; 710 } 711 712 713 /* 714 * we couldn't merge with the running 715 * rbio, see if we can merge with the 716 * pending ones. We don't have to 717 * check for rmw_locked because there 718 * is no way they are inside finish_rmw 719 * right now 720 */ 721 list_for_each_entry(pending, &cur->plug_list, 722 plug_list) { 723 if (rbio_can_merge(pending, rbio)) { 724 merge_rbio(pending, rbio); 725 spin_unlock(&cur->bio_list_lock); 726 freeit = rbio; 727 ret = 1; 728 goto out; 729 } 730 } 731 732 /* no merging, put us on the tail of the plug list, 733 * our rbio will be started with the currently 734 * running rbio unlocks 735 */ 736 list_add_tail(&rbio->plug_list, &cur->plug_list); 737 spin_unlock(&cur->bio_list_lock); 738 ret = 1; 739 goto out; 740 } 741 } 742 lockit: 743 atomic_inc(&rbio->refs); 744 list_add(&rbio->hash_list, &h->hash_list); 745 out: 746 spin_unlock_irqrestore(&h->lock, flags); 747 if (cache_drop) 748 remove_rbio_from_cache(cache_drop); 749 if (freeit) 750 __free_raid_bio(freeit); 751 return ret; 752 } 753 754 /* 755 * called as rmw or parity rebuild is completed. If the plug list has more 756 * rbios waiting for this stripe, the next one on the list will be started 757 */ 758 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 759 { 760 int bucket; 761 struct btrfs_stripe_hash *h; 762 unsigned long flags; 763 int keep_cache = 0; 764 765 bucket = rbio_bucket(rbio); 766 h = rbio->fs_info->stripe_hash_table->table + bucket; 767 768 if (list_empty(&rbio->plug_list)) 769 cache_rbio(rbio); 770 771 spin_lock_irqsave(&h->lock, flags); 772 spin_lock(&rbio->bio_list_lock); 773 774 if (!list_empty(&rbio->hash_list)) { 775 /* 776 * if we're still cached and there is no other IO 777 * to perform, just leave this rbio here for others 778 * to steal from later 779 */ 780 if (list_empty(&rbio->plug_list) && 781 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 782 keep_cache = 1; 783 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 784 BUG_ON(!bio_list_empty(&rbio->bio_list)); 785 goto done; 786 } 787 788 list_del_init(&rbio->hash_list); 789 atomic_dec(&rbio->refs); 790 791 /* 792 * we use the plug list to hold all the rbios 793 * waiting for the chance to lock this stripe. 794 * hand the lock over to one of them. 795 */ 796 if (!list_empty(&rbio->plug_list)) { 797 struct btrfs_raid_bio *next; 798 struct list_head *head = rbio->plug_list.next; 799 800 next = list_entry(head, struct btrfs_raid_bio, 801 plug_list); 802 803 list_del_init(&rbio->plug_list); 804 805 list_add(&next->hash_list, &h->hash_list); 806 atomic_inc(&next->refs); 807 spin_unlock(&rbio->bio_list_lock); 808 spin_unlock_irqrestore(&h->lock, flags); 809 810 if (next->operation == BTRFS_RBIO_READ_REBUILD) 811 async_read_rebuild(next); 812 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) { 813 steal_rbio(rbio, next); 814 async_read_rebuild(next); 815 } else if (next->operation == BTRFS_RBIO_WRITE) { 816 steal_rbio(rbio, next); 817 async_rmw_stripe(next); 818 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { 819 steal_rbio(rbio, next); 820 async_scrub_parity(next); 821 } 822 823 goto done_nolock; 824 /* 825 * The barrier for this waitqueue_active is not needed, 826 * we're protected by h->lock and can't miss a wakeup. 827 */ 828 } else if (waitqueue_active(&h->wait)) { 829 spin_unlock(&rbio->bio_list_lock); 830 spin_unlock_irqrestore(&h->lock, flags); 831 wake_up(&h->wait); 832 goto done_nolock; 833 } 834 } 835 done: 836 spin_unlock(&rbio->bio_list_lock); 837 spin_unlock_irqrestore(&h->lock, flags); 838 839 done_nolock: 840 if (!keep_cache) 841 remove_rbio_from_cache(rbio); 842 } 843 844 static void __free_raid_bio(struct btrfs_raid_bio *rbio) 845 { 846 int i; 847 848 WARN_ON(atomic_read(&rbio->refs) < 0); 849 if (!atomic_dec_and_test(&rbio->refs)) 850 return; 851 852 WARN_ON(!list_empty(&rbio->stripe_cache)); 853 WARN_ON(!list_empty(&rbio->hash_list)); 854 WARN_ON(!bio_list_empty(&rbio->bio_list)); 855 856 for (i = 0; i < rbio->nr_pages; i++) { 857 if (rbio->stripe_pages[i]) { 858 __free_page(rbio->stripe_pages[i]); 859 rbio->stripe_pages[i] = NULL; 860 } 861 } 862 863 btrfs_put_bbio(rbio->bbio); 864 kfree(rbio); 865 } 866 867 static void free_raid_bio(struct btrfs_raid_bio *rbio) 868 { 869 unlock_stripe(rbio); 870 __free_raid_bio(rbio); 871 } 872 873 /* 874 * this frees the rbio and runs through all the bios in the 875 * bio_list and calls end_io on them 876 */ 877 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) 878 { 879 struct bio *cur = bio_list_get(&rbio->bio_list); 880 struct bio *next; 881 882 if (rbio->generic_bio_cnt) 883 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); 884 885 free_raid_bio(rbio); 886 887 while (cur) { 888 next = cur->bi_next; 889 cur->bi_next = NULL; 890 cur->bi_error = err; 891 bio_endio(cur); 892 cur = next; 893 } 894 } 895 896 /* 897 * end io function used by finish_rmw. When we finally 898 * get here, we've written a full stripe 899 */ 900 static void raid_write_end_io(struct bio *bio) 901 { 902 struct btrfs_raid_bio *rbio = bio->bi_private; 903 int err = bio->bi_error; 904 int max_errors; 905 906 if (err) 907 fail_bio_stripe(rbio, bio); 908 909 bio_put(bio); 910 911 if (!atomic_dec_and_test(&rbio->stripes_pending)) 912 return; 913 914 err = 0; 915 916 /* OK, we have read all the stripes we need to. */ 917 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? 918 0 : rbio->bbio->max_errors; 919 if (atomic_read(&rbio->error) > max_errors) 920 err = -EIO; 921 922 rbio_orig_end_io(rbio, err); 923 } 924 925 /* 926 * the read/modify/write code wants to use the original bio for 927 * any pages it included, and then use the rbio for everything 928 * else. This function decides if a given index (stripe number) 929 * and page number in that stripe fall inside the original bio 930 * or the rbio. 931 * 932 * if you set bio_list_only, you'll get a NULL back for any ranges 933 * that are outside the bio_list 934 * 935 * This doesn't take any refs on anything, you get a bare page pointer 936 * and the caller must bump refs as required. 937 * 938 * You must call index_rbio_pages once before you can trust 939 * the answers from this function. 940 */ 941 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, 942 int index, int pagenr, int bio_list_only) 943 { 944 int chunk_page; 945 struct page *p = NULL; 946 947 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; 948 949 spin_lock_irq(&rbio->bio_list_lock); 950 p = rbio->bio_pages[chunk_page]; 951 spin_unlock_irq(&rbio->bio_list_lock); 952 953 if (p || bio_list_only) 954 return p; 955 956 return rbio->stripe_pages[chunk_page]; 957 } 958 959 /* 960 * number of pages we need for the entire stripe across all the 961 * drives 962 */ 963 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) 964 { 965 return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes; 966 } 967 968 /* 969 * allocation and initial setup for the btrfs_raid_bio. Not 970 * this does not allocate any pages for rbio->pages. 971 */ 972 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, 973 struct btrfs_bio *bbio, u64 stripe_len) 974 { 975 struct btrfs_raid_bio *rbio; 976 int nr_data = 0; 977 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; 978 int num_pages = rbio_nr_pages(stripe_len, real_stripes); 979 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); 980 void *p; 981 982 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + 983 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) * 984 sizeof(long), GFP_NOFS); 985 if (!rbio) 986 return ERR_PTR(-ENOMEM); 987 988 bio_list_init(&rbio->bio_list); 989 INIT_LIST_HEAD(&rbio->plug_list); 990 spin_lock_init(&rbio->bio_list_lock); 991 INIT_LIST_HEAD(&rbio->stripe_cache); 992 INIT_LIST_HEAD(&rbio->hash_list); 993 rbio->bbio = bbio; 994 rbio->fs_info = root->fs_info; 995 rbio->stripe_len = stripe_len; 996 rbio->nr_pages = num_pages; 997 rbio->real_stripes = real_stripes; 998 rbio->stripe_npages = stripe_npages; 999 rbio->faila = -1; 1000 rbio->failb = -1; 1001 atomic_set(&rbio->refs, 1); 1002 atomic_set(&rbio->error, 0); 1003 atomic_set(&rbio->stripes_pending, 0); 1004 1005 /* 1006 * the stripe_pages and bio_pages array point to the extra 1007 * memory we allocated past the end of the rbio 1008 */ 1009 p = rbio + 1; 1010 rbio->stripe_pages = p; 1011 rbio->bio_pages = p + sizeof(struct page *) * num_pages; 1012 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; 1013 1014 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) 1015 nr_data = real_stripes - 1; 1016 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) 1017 nr_data = real_stripes - 2; 1018 else 1019 BUG(); 1020 1021 rbio->nr_data = nr_data; 1022 return rbio; 1023 } 1024 1025 /* allocate pages for all the stripes in the bio, including parity */ 1026 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 1027 { 1028 int i; 1029 struct page *page; 1030 1031 for (i = 0; i < rbio->nr_pages; i++) { 1032 if (rbio->stripe_pages[i]) 1033 continue; 1034 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1035 if (!page) 1036 return -ENOMEM; 1037 rbio->stripe_pages[i] = page; 1038 } 1039 return 0; 1040 } 1041 1042 /* only allocate pages for p/q stripes */ 1043 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 1044 { 1045 int i; 1046 struct page *page; 1047 1048 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); 1049 1050 for (; i < rbio->nr_pages; i++) { 1051 if (rbio->stripe_pages[i]) 1052 continue; 1053 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1054 if (!page) 1055 return -ENOMEM; 1056 rbio->stripe_pages[i] = page; 1057 } 1058 return 0; 1059 } 1060 1061 /* 1062 * add a single page from a specific stripe into our list of bios for IO 1063 * this will try to merge into existing bios if possible, and returns 1064 * zero if all went well. 1065 */ 1066 static int rbio_add_io_page(struct btrfs_raid_bio *rbio, 1067 struct bio_list *bio_list, 1068 struct page *page, 1069 int stripe_nr, 1070 unsigned long page_index, 1071 unsigned long bio_max_len) 1072 { 1073 struct bio *last = bio_list->tail; 1074 u64 last_end = 0; 1075 int ret; 1076 struct bio *bio; 1077 struct btrfs_bio_stripe *stripe; 1078 u64 disk_start; 1079 1080 stripe = &rbio->bbio->stripes[stripe_nr]; 1081 disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT); 1082 1083 /* if the device is missing, just fail this stripe */ 1084 if (!stripe->dev->bdev) 1085 return fail_rbio_index(rbio, stripe_nr); 1086 1087 /* see if we can add this page onto our existing bio */ 1088 if (last) { 1089 last_end = (u64)last->bi_iter.bi_sector << 9; 1090 last_end += last->bi_iter.bi_size; 1091 1092 /* 1093 * we can't merge these if they are from different 1094 * devices or if they are not contiguous 1095 */ 1096 if (last_end == disk_start && stripe->dev->bdev && 1097 !last->bi_error && 1098 last->bi_bdev == stripe->dev->bdev) { 1099 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); 1100 if (ret == PAGE_CACHE_SIZE) 1101 return 0; 1102 } 1103 } 1104 1105 /* put a new bio on the list */ 1106 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); 1107 if (!bio) 1108 return -ENOMEM; 1109 1110 bio->bi_iter.bi_size = 0; 1111 bio->bi_bdev = stripe->dev->bdev; 1112 bio->bi_iter.bi_sector = disk_start >> 9; 1113 1114 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 1115 bio_list_add(bio_list, bio); 1116 return 0; 1117 } 1118 1119 /* 1120 * while we're doing the read/modify/write cycle, we could 1121 * have errors in reading pages off the disk. This checks 1122 * for errors and if we're not able to read the page it'll 1123 * trigger parity reconstruction. The rmw will be finished 1124 * after we've reconstructed the failed stripes 1125 */ 1126 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 1127 { 1128 if (rbio->faila >= 0 || rbio->failb >= 0) { 1129 BUG_ON(rbio->faila == rbio->real_stripes - 1); 1130 __raid56_parity_recover(rbio); 1131 } else { 1132 finish_rmw(rbio); 1133 } 1134 } 1135 1136 /* 1137 * helper function to walk our bio list and populate the bio_pages array with 1138 * the result. This seems expensive, but it is faster than constantly 1139 * searching through the bio list as we setup the IO in finish_rmw or stripe 1140 * reconstruction. 1141 * 1142 * This must be called before you trust the answers from page_in_rbio 1143 */ 1144 static void index_rbio_pages(struct btrfs_raid_bio *rbio) 1145 { 1146 struct bio *bio; 1147 u64 start; 1148 unsigned long stripe_offset; 1149 unsigned long page_index; 1150 struct page *p; 1151 int i; 1152 1153 spin_lock_irq(&rbio->bio_list_lock); 1154 bio_list_for_each(bio, &rbio->bio_list) { 1155 start = (u64)bio->bi_iter.bi_sector << 9; 1156 stripe_offset = start - rbio->bbio->raid_map[0]; 1157 page_index = stripe_offset >> PAGE_CACHE_SHIFT; 1158 1159 for (i = 0; i < bio->bi_vcnt; i++) { 1160 p = bio->bi_io_vec[i].bv_page; 1161 rbio->bio_pages[page_index + i] = p; 1162 } 1163 } 1164 spin_unlock_irq(&rbio->bio_list_lock); 1165 } 1166 1167 /* 1168 * this is called from one of two situations. We either 1169 * have a full stripe from the higher layers, or we've read all 1170 * the missing bits off disk. 1171 * 1172 * This will calculate the parity and then send down any 1173 * changed blocks. 1174 */ 1175 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 1176 { 1177 struct btrfs_bio *bbio = rbio->bbio; 1178 void *pointers[rbio->real_stripes]; 1179 int nr_data = rbio->nr_data; 1180 int stripe; 1181 int pagenr; 1182 int p_stripe = -1; 1183 int q_stripe = -1; 1184 struct bio_list bio_list; 1185 struct bio *bio; 1186 int ret; 1187 1188 bio_list_init(&bio_list); 1189 1190 if (rbio->real_stripes - rbio->nr_data == 1) { 1191 p_stripe = rbio->real_stripes - 1; 1192 } else if (rbio->real_stripes - rbio->nr_data == 2) { 1193 p_stripe = rbio->real_stripes - 2; 1194 q_stripe = rbio->real_stripes - 1; 1195 } else { 1196 BUG(); 1197 } 1198 1199 /* at this point we either have a full stripe, 1200 * or we've read the full stripe from the drive. 1201 * recalculate the parity and write the new results. 1202 * 1203 * We're not allowed to add any new bios to the 1204 * bio list here, anyone else that wants to 1205 * change this stripe needs to do their own rmw. 1206 */ 1207 spin_lock_irq(&rbio->bio_list_lock); 1208 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 1209 spin_unlock_irq(&rbio->bio_list_lock); 1210 1211 atomic_set(&rbio->error, 0); 1212 1213 /* 1214 * now that we've set rmw_locked, run through the 1215 * bio list one last time and map the page pointers 1216 * 1217 * We don't cache full rbios because we're assuming 1218 * the higher layers are unlikely to use this area of 1219 * the disk again soon. If they do use it again, 1220 * hopefully they will send another full bio. 1221 */ 1222 index_rbio_pages(rbio); 1223 if (!rbio_is_full(rbio)) 1224 cache_rbio_pages(rbio); 1225 else 1226 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 1227 1228 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1229 struct page *p; 1230 /* first collect one page from each data stripe */ 1231 for (stripe = 0; stripe < nr_data; stripe++) { 1232 p = page_in_rbio(rbio, stripe, pagenr, 0); 1233 pointers[stripe] = kmap(p); 1234 } 1235 1236 /* then add the parity stripe */ 1237 p = rbio_pstripe_page(rbio, pagenr); 1238 SetPageUptodate(p); 1239 pointers[stripe++] = kmap(p); 1240 1241 if (q_stripe != -1) { 1242 1243 /* 1244 * raid6, add the qstripe and call the 1245 * library function to fill in our p/q 1246 */ 1247 p = rbio_qstripe_page(rbio, pagenr); 1248 SetPageUptodate(p); 1249 pointers[stripe++] = kmap(p); 1250 1251 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, 1252 pointers); 1253 } else { 1254 /* raid5 */ 1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 1256 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 1257 } 1258 1259 1260 for (stripe = 0; stripe < rbio->real_stripes; stripe++) 1261 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 1262 } 1263 1264 /* 1265 * time to start writing. Make bios for everything from the 1266 * higher layers (the bio_list in our rbio) and our p/q. Ignore 1267 * everything else. 1268 */ 1269 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1270 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1271 struct page *page; 1272 if (stripe < rbio->nr_data) { 1273 page = page_in_rbio(rbio, stripe, pagenr, 1); 1274 if (!page) 1275 continue; 1276 } else { 1277 page = rbio_stripe_page(rbio, stripe, pagenr); 1278 } 1279 1280 ret = rbio_add_io_page(rbio, &bio_list, 1281 page, stripe, pagenr, rbio->stripe_len); 1282 if (ret) 1283 goto cleanup; 1284 } 1285 } 1286 1287 if (likely(!bbio->num_tgtdevs)) 1288 goto write_data; 1289 1290 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1291 if (!bbio->tgtdev_map[stripe]) 1292 continue; 1293 1294 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1295 struct page *page; 1296 if (stripe < rbio->nr_data) { 1297 page = page_in_rbio(rbio, stripe, pagenr, 1); 1298 if (!page) 1299 continue; 1300 } else { 1301 page = rbio_stripe_page(rbio, stripe, pagenr); 1302 } 1303 1304 ret = rbio_add_io_page(rbio, &bio_list, page, 1305 rbio->bbio->tgtdev_map[stripe], 1306 pagenr, rbio->stripe_len); 1307 if (ret) 1308 goto cleanup; 1309 } 1310 } 1311 1312 write_data: 1313 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); 1314 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); 1315 1316 while (1) { 1317 bio = bio_list_pop(&bio_list); 1318 if (!bio) 1319 break; 1320 1321 bio->bi_private = rbio; 1322 bio->bi_end_io = raid_write_end_io; 1323 submit_bio(WRITE, bio); 1324 } 1325 return; 1326 1327 cleanup: 1328 rbio_orig_end_io(rbio, -EIO); 1329 } 1330 1331 /* 1332 * helper to find the stripe number for a given bio. Used to figure out which 1333 * stripe has failed. This expects the bio to correspond to a physical disk, 1334 * so it looks up based on physical sector numbers. 1335 */ 1336 static int find_bio_stripe(struct btrfs_raid_bio *rbio, 1337 struct bio *bio) 1338 { 1339 u64 physical = bio->bi_iter.bi_sector; 1340 u64 stripe_start; 1341 int i; 1342 struct btrfs_bio_stripe *stripe; 1343 1344 physical <<= 9; 1345 1346 for (i = 0; i < rbio->bbio->num_stripes; i++) { 1347 stripe = &rbio->bbio->stripes[i]; 1348 stripe_start = stripe->physical; 1349 if (physical >= stripe_start && 1350 physical < stripe_start + rbio->stripe_len && 1351 bio->bi_bdev == stripe->dev->bdev) { 1352 return i; 1353 } 1354 } 1355 return -1; 1356 } 1357 1358 /* 1359 * helper to find the stripe number for a given 1360 * bio (before mapping). Used to figure out which stripe has 1361 * failed. This looks up based on logical block numbers. 1362 */ 1363 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 1364 struct bio *bio) 1365 { 1366 u64 logical = bio->bi_iter.bi_sector; 1367 u64 stripe_start; 1368 int i; 1369 1370 logical <<= 9; 1371 1372 for (i = 0; i < rbio->nr_data; i++) { 1373 stripe_start = rbio->bbio->raid_map[i]; 1374 if (logical >= stripe_start && 1375 logical < stripe_start + rbio->stripe_len) { 1376 return i; 1377 } 1378 } 1379 return -1; 1380 } 1381 1382 /* 1383 * returns -EIO if we had too many failures 1384 */ 1385 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) 1386 { 1387 unsigned long flags; 1388 int ret = 0; 1389 1390 spin_lock_irqsave(&rbio->bio_list_lock, flags); 1391 1392 /* we already know this stripe is bad, move on */ 1393 if (rbio->faila == failed || rbio->failb == failed) 1394 goto out; 1395 1396 if (rbio->faila == -1) { 1397 /* first failure on this rbio */ 1398 rbio->faila = failed; 1399 atomic_inc(&rbio->error); 1400 } else if (rbio->failb == -1) { 1401 /* second failure on this rbio */ 1402 rbio->failb = failed; 1403 atomic_inc(&rbio->error); 1404 } else { 1405 ret = -EIO; 1406 } 1407 out: 1408 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 1409 1410 return ret; 1411 } 1412 1413 /* 1414 * helper to fail a stripe based on a physical disk 1415 * bio. 1416 */ 1417 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, 1418 struct bio *bio) 1419 { 1420 int failed = find_bio_stripe(rbio, bio); 1421 1422 if (failed < 0) 1423 return -EIO; 1424 1425 return fail_rbio_index(rbio, failed); 1426 } 1427 1428 /* 1429 * this sets each page in the bio uptodate. It should only be used on private 1430 * rbio pages, nothing that comes in from the higher layers 1431 */ 1432 static void set_bio_pages_uptodate(struct bio *bio) 1433 { 1434 int i; 1435 struct page *p; 1436 1437 for (i = 0; i < bio->bi_vcnt; i++) { 1438 p = bio->bi_io_vec[i].bv_page; 1439 SetPageUptodate(p); 1440 } 1441 } 1442 1443 /* 1444 * end io for the read phase of the rmw cycle. All the bios here are physical 1445 * stripe bios we've read from the disk so we can recalculate the parity of the 1446 * stripe. 1447 * 1448 * This will usually kick off finish_rmw once all the bios are read in, but it 1449 * may trigger parity reconstruction if we had any errors along the way 1450 */ 1451 static void raid_rmw_end_io(struct bio *bio) 1452 { 1453 struct btrfs_raid_bio *rbio = bio->bi_private; 1454 1455 if (bio->bi_error) 1456 fail_bio_stripe(rbio, bio); 1457 else 1458 set_bio_pages_uptodate(bio); 1459 1460 bio_put(bio); 1461 1462 if (!atomic_dec_and_test(&rbio->stripes_pending)) 1463 return; 1464 1465 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 1466 goto cleanup; 1467 1468 /* 1469 * this will normally call finish_rmw to start our write 1470 * but if there are any failed stripes we'll reconstruct 1471 * from parity first 1472 */ 1473 validate_rbio_for_rmw(rbio); 1474 return; 1475 1476 cleanup: 1477 1478 rbio_orig_end_io(rbio, -EIO); 1479 } 1480 1481 static void async_rmw_stripe(struct btrfs_raid_bio *rbio) 1482 { 1483 btrfs_init_work(&rbio->work, btrfs_rmw_helper, 1484 rmw_work, NULL, NULL); 1485 1486 btrfs_queue_work(rbio->fs_info->rmw_workers, 1487 &rbio->work); 1488 } 1489 1490 static void async_read_rebuild(struct btrfs_raid_bio *rbio) 1491 { 1492 btrfs_init_work(&rbio->work, btrfs_rmw_helper, 1493 read_rebuild_work, NULL, NULL); 1494 1495 btrfs_queue_work(rbio->fs_info->rmw_workers, 1496 &rbio->work); 1497 } 1498 1499 /* 1500 * the stripe must be locked by the caller. It will 1501 * unlock after all the writes are done 1502 */ 1503 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 1504 { 1505 int bios_to_read = 0; 1506 struct bio_list bio_list; 1507 int ret; 1508 int pagenr; 1509 int stripe; 1510 struct bio *bio; 1511 1512 bio_list_init(&bio_list); 1513 1514 ret = alloc_rbio_pages(rbio); 1515 if (ret) 1516 goto cleanup; 1517 1518 index_rbio_pages(rbio); 1519 1520 atomic_set(&rbio->error, 0); 1521 /* 1522 * build a list of bios to read all the missing parts of this 1523 * stripe 1524 */ 1525 for (stripe = 0; stripe < rbio->nr_data; stripe++) { 1526 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1527 struct page *page; 1528 /* 1529 * we want to find all the pages missing from 1530 * the rbio and read them from the disk. If 1531 * page_in_rbio finds a page in the bio list 1532 * we don't need to read it off the stripe. 1533 */ 1534 page = page_in_rbio(rbio, stripe, pagenr, 1); 1535 if (page) 1536 continue; 1537 1538 page = rbio_stripe_page(rbio, stripe, pagenr); 1539 /* 1540 * the bio cache may have handed us an uptodate 1541 * page. If so, be happy and use it 1542 */ 1543 if (PageUptodate(page)) 1544 continue; 1545 1546 ret = rbio_add_io_page(rbio, &bio_list, page, 1547 stripe, pagenr, rbio->stripe_len); 1548 if (ret) 1549 goto cleanup; 1550 } 1551 } 1552 1553 bios_to_read = bio_list_size(&bio_list); 1554 if (!bios_to_read) { 1555 /* 1556 * this can happen if others have merged with 1557 * us, it means there is nothing left to read. 1558 * But if there are missing devices it may not be 1559 * safe to do the full stripe write yet. 1560 */ 1561 goto finish; 1562 } 1563 1564 /* 1565 * the bbio may be freed once we submit the last bio. Make sure 1566 * not to touch it after that 1567 */ 1568 atomic_set(&rbio->stripes_pending, bios_to_read); 1569 while (1) { 1570 bio = bio_list_pop(&bio_list); 1571 if (!bio) 1572 break; 1573 1574 bio->bi_private = rbio; 1575 bio->bi_end_io = raid_rmw_end_io; 1576 1577 btrfs_bio_wq_end_io(rbio->fs_info, bio, 1578 BTRFS_WQ_ENDIO_RAID56); 1579 1580 submit_bio(READ, bio); 1581 } 1582 /* the actual write will happen once the reads are done */ 1583 return 0; 1584 1585 cleanup: 1586 rbio_orig_end_io(rbio, -EIO); 1587 return -EIO; 1588 1589 finish: 1590 validate_rbio_for_rmw(rbio); 1591 return 0; 1592 } 1593 1594 /* 1595 * if the upper layers pass in a full stripe, we thank them by only allocating 1596 * enough pages to hold the parity, and sending it all down quickly. 1597 */ 1598 static int full_stripe_write(struct btrfs_raid_bio *rbio) 1599 { 1600 int ret; 1601 1602 ret = alloc_rbio_parity_pages(rbio); 1603 if (ret) { 1604 __free_raid_bio(rbio); 1605 return ret; 1606 } 1607 1608 ret = lock_stripe_add(rbio); 1609 if (ret == 0) 1610 finish_rmw(rbio); 1611 return 0; 1612 } 1613 1614 /* 1615 * partial stripe writes get handed over to async helpers. 1616 * We're really hoping to merge a few more writes into this 1617 * rbio before calculating new parity 1618 */ 1619 static int partial_stripe_write(struct btrfs_raid_bio *rbio) 1620 { 1621 int ret; 1622 1623 ret = lock_stripe_add(rbio); 1624 if (ret == 0) 1625 async_rmw_stripe(rbio); 1626 return 0; 1627 } 1628 1629 /* 1630 * sometimes while we were reading from the drive to 1631 * recalculate parity, enough new bios come into create 1632 * a full stripe. So we do a check here to see if we can 1633 * go directly to finish_rmw 1634 */ 1635 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) 1636 { 1637 /* head off into rmw land if we don't have a full stripe */ 1638 if (!rbio_is_full(rbio)) 1639 return partial_stripe_write(rbio); 1640 return full_stripe_write(rbio); 1641 } 1642 1643 /* 1644 * We use plugging call backs to collect full stripes. 1645 * Any time we get a partial stripe write while plugged 1646 * we collect it into a list. When the unplug comes down, 1647 * we sort the list by logical block number and merge 1648 * everything we can into the same rbios 1649 */ 1650 struct btrfs_plug_cb { 1651 struct blk_plug_cb cb; 1652 struct btrfs_fs_info *info; 1653 struct list_head rbio_list; 1654 struct btrfs_work work; 1655 }; 1656 1657 /* 1658 * rbios on the plug list are sorted for easier merging. 1659 */ 1660 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) 1661 { 1662 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 1663 plug_list); 1664 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 1665 plug_list); 1666 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 1667 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 1668 1669 if (a_sector < b_sector) 1670 return -1; 1671 if (a_sector > b_sector) 1672 return 1; 1673 return 0; 1674 } 1675 1676 static void run_plug(struct btrfs_plug_cb *plug) 1677 { 1678 struct btrfs_raid_bio *cur; 1679 struct btrfs_raid_bio *last = NULL; 1680 1681 /* 1682 * sort our plug list then try to merge 1683 * everything we can in hopes of creating full 1684 * stripes. 1685 */ 1686 list_sort(NULL, &plug->rbio_list, plug_cmp); 1687 while (!list_empty(&plug->rbio_list)) { 1688 cur = list_entry(plug->rbio_list.next, 1689 struct btrfs_raid_bio, plug_list); 1690 list_del_init(&cur->plug_list); 1691 1692 if (rbio_is_full(cur)) { 1693 /* we have a full stripe, send it down */ 1694 full_stripe_write(cur); 1695 continue; 1696 } 1697 if (last) { 1698 if (rbio_can_merge(last, cur)) { 1699 merge_rbio(last, cur); 1700 __free_raid_bio(cur); 1701 continue; 1702 1703 } 1704 __raid56_parity_write(last); 1705 } 1706 last = cur; 1707 } 1708 if (last) { 1709 __raid56_parity_write(last); 1710 } 1711 kfree(plug); 1712 } 1713 1714 /* 1715 * if the unplug comes from schedule, we have to push the 1716 * work off to a helper thread 1717 */ 1718 static void unplug_work(struct btrfs_work *work) 1719 { 1720 struct btrfs_plug_cb *plug; 1721 plug = container_of(work, struct btrfs_plug_cb, work); 1722 run_plug(plug); 1723 } 1724 1725 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 1726 { 1727 struct btrfs_plug_cb *plug; 1728 plug = container_of(cb, struct btrfs_plug_cb, cb); 1729 1730 if (from_schedule) { 1731 btrfs_init_work(&plug->work, btrfs_rmw_helper, 1732 unplug_work, NULL, NULL); 1733 btrfs_queue_work(plug->info->rmw_workers, 1734 &plug->work); 1735 return; 1736 } 1737 run_plug(plug); 1738 } 1739 1740 /* 1741 * our main entry point for writes from the rest of the FS. 1742 */ 1743 int raid56_parity_write(struct btrfs_root *root, struct bio *bio, 1744 struct btrfs_bio *bbio, u64 stripe_len) 1745 { 1746 struct btrfs_raid_bio *rbio; 1747 struct btrfs_plug_cb *plug = NULL; 1748 struct blk_plug_cb *cb; 1749 int ret; 1750 1751 rbio = alloc_rbio(root, bbio, stripe_len); 1752 if (IS_ERR(rbio)) { 1753 btrfs_put_bbio(bbio); 1754 return PTR_ERR(rbio); 1755 } 1756 bio_list_add(&rbio->bio_list, bio); 1757 rbio->bio_list_bytes = bio->bi_iter.bi_size; 1758 rbio->operation = BTRFS_RBIO_WRITE; 1759 1760 btrfs_bio_counter_inc_noblocked(root->fs_info); 1761 rbio->generic_bio_cnt = 1; 1762 1763 /* 1764 * don't plug on full rbios, just get them out the door 1765 * as quickly as we can 1766 */ 1767 if (rbio_is_full(rbio)) { 1768 ret = full_stripe_write(rbio); 1769 if (ret) 1770 btrfs_bio_counter_dec(root->fs_info); 1771 return ret; 1772 } 1773 1774 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info, 1775 sizeof(*plug)); 1776 if (cb) { 1777 plug = container_of(cb, struct btrfs_plug_cb, cb); 1778 if (!plug->info) { 1779 plug->info = root->fs_info; 1780 INIT_LIST_HEAD(&plug->rbio_list); 1781 } 1782 list_add_tail(&rbio->plug_list, &plug->rbio_list); 1783 ret = 0; 1784 } else { 1785 ret = __raid56_parity_write(rbio); 1786 if (ret) 1787 btrfs_bio_counter_dec(root->fs_info); 1788 } 1789 return ret; 1790 } 1791 1792 /* 1793 * all parity reconstruction happens here. We've read in everything 1794 * we can find from the drives and this does the heavy lifting of 1795 * sorting the good from the bad. 1796 */ 1797 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) 1798 { 1799 int pagenr, stripe; 1800 void **pointers; 1801 int faila = -1, failb = -1; 1802 struct page *page; 1803 int err; 1804 int i; 1805 1806 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 1807 if (!pointers) { 1808 err = -ENOMEM; 1809 goto cleanup_io; 1810 } 1811 1812 faila = rbio->faila; 1813 failb = rbio->failb; 1814 1815 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 1816 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 1817 spin_lock_irq(&rbio->bio_list_lock); 1818 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 1819 spin_unlock_irq(&rbio->bio_list_lock); 1820 } 1821 1822 index_rbio_pages(rbio); 1823 1824 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 1825 /* 1826 * Now we just use bitmap to mark the horizontal stripes in 1827 * which we have data when doing parity scrub. 1828 */ 1829 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && 1830 !test_bit(pagenr, rbio->dbitmap)) 1831 continue; 1832 1833 /* setup our array of pointers with pages 1834 * from each stripe 1835 */ 1836 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1837 /* 1838 * if we're rebuilding a read, we have to use 1839 * pages from the bio list 1840 */ 1841 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || 1842 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && 1843 (stripe == faila || stripe == failb)) { 1844 page = page_in_rbio(rbio, stripe, pagenr, 0); 1845 } else { 1846 page = rbio_stripe_page(rbio, stripe, pagenr); 1847 } 1848 pointers[stripe] = kmap(page); 1849 } 1850 1851 /* all raid6 handling here */ 1852 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { 1853 /* 1854 * single failure, rebuild from parity raid5 1855 * style 1856 */ 1857 if (failb < 0) { 1858 if (faila == rbio->nr_data) { 1859 /* 1860 * Just the P stripe has failed, without 1861 * a bad data or Q stripe. 1862 * TODO, we should redo the xor here. 1863 */ 1864 err = -EIO; 1865 goto cleanup; 1866 } 1867 /* 1868 * a single failure in raid6 is rebuilt 1869 * in the pstripe code below 1870 */ 1871 goto pstripe; 1872 } 1873 1874 /* make sure our ps and qs are in order */ 1875 if (faila > failb) { 1876 int tmp = failb; 1877 failb = faila; 1878 faila = tmp; 1879 } 1880 1881 /* if the q stripe is failed, do a pstripe reconstruction 1882 * from the xors. 1883 * If both the q stripe and the P stripe are failed, we're 1884 * here due to a crc mismatch and we can't give them the 1885 * data they want 1886 */ 1887 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { 1888 if (rbio->bbio->raid_map[faila] == 1889 RAID5_P_STRIPE) { 1890 err = -EIO; 1891 goto cleanup; 1892 } 1893 /* 1894 * otherwise we have one bad data stripe and 1895 * a good P stripe. raid5! 1896 */ 1897 goto pstripe; 1898 } 1899 1900 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { 1901 raid6_datap_recov(rbio->real_stripes, 1902 PAGE_SIZE, faila, pointers); 1903 } else { 1904 raid6_2data_recov(rbio->real_stripes, 1905 PAGE_SIZE, faila, failb, 1906 pointers); 1907 } 1908 } else { 1909 void *p; 1910 1911 /* rebuild from P stripe here (raid5 or raid6) */ 1912 BUG_ON(failb != -1); 1913 pstripe: 1914 /* Copy parity block into failed block to start with */ 1915 memcpy(pointers[faila], 1916 pointers[rbio->nr_data], 1917 PAGE_CACHE_SIZE); 1918 1919 /* rearrange the pointer array */ 1920 p = pointers[faila]; 1921 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) 1922 pointers[stripe] = pointers[stripe + 1]; 1923 pointers[rbio->nr_data - 1] = p; 1924 1925 /* xor in the rest */ 1926 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); 1927 } 1928 /* if we're doing this rebuild as part of an rmw, go through 1929 * and set all of our private rbio pages in the 1930 * failed stripes as uptodate. This way finish_rmw will 1931 * know they can be trusted. If this was a read reconstruction, 1932 * other endio functions will fiddle the uptodate bits 1933 */ 1934 if (rbio->operation == BTRFS_RBIO_WRITE) { 1935 for (i = 0; i < rbio->stripe_npages; i++) { 1936 if (faila != -1) { 1937 page = rbio_stripe_page(rbio, faila, i); 1938 SetPageUptodate(page); 1939 } 1940 if (failb != -1) { 1941 page = rbio_stripe_page(rbio, failb, i); 1942 SetPageUptodate(page); 1943 } 1944 } 1945 } 1946 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1947 /* 1948 * if we're rebuilding a read, we have to use 1949 * pages from the bio list 1950 */ 1951 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || 1952 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && 1953 (stripe == faila || stripe == failb)) { 1954 page = page_in_rbio(rbio, stripe, pagenr, 0); 1955 } else { 1956 page = rbio_stripe_page(rbio, stripe, pagenr); 1957 } 1958 kunmap(page); 1959 } 1960 } 1961 1962 err = 0; 1963 cleanup: 1964 kfree(pointers); 1965 1966 cleanup_io: 1967 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1968 if (err == 0) 1969 cache_rbio_pages(rbio); 1970 else 1971 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 1972 1973 rbio_orig_end_io(rbio, err); 1974 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 1975 rbio_orig_end_io(rbio, err); 1976 } else if (err == 0) { 1977 rbio->faila = -1; 1978 rbio->failb = -1; 1979 1980 if (rbio->operation == BTRFS_RBIO_WRITE) 1981 finish_rmw(rbio); 1982 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) 1983 finish_parity_scrub(rbio, 0); 1984 else 1985 BUG(); 1986 } else { 1987 rbio_orig_end_io(rbio, err); 1988 } 1989 } 1990 1991 /* 1992 * This is called only for stripes we've read from disk to 1993 * reconstruct the parity. 1994 */ 1995 static void raid_recover_end_io(struct bio *bio) 1996 { 1997 struct btrfs_raid_bio *rbio = bio->bi_private; 1998 1999 /* 2000 * we only read stripe pages off the disk, set them 2001 * up to date if there were no errors 2002 */ 2003 if (bio->bi_error) 2004 fail_bio_stripe(rbio, bio); 2005 else 2006 set_bio_pages_uptodate(bio); 2007 bio_put(bio); 2008 2009 if (!atomic_dec_and_test(&rbio->stripes_pending)) 2010 return; 2011 2012 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 2013 rbio_orig_end_io(rbio, -EIO); 2014 else 2015 __raid_recover_end_io(rbio); 2016 } 2017 2018 /* 2019 * reads everything we need off the disk to reconstruct 2020 * the parity. endio handlers trigger final reconstruction 2021 * when the IO is done. 2022 * 2023 * This is used both for reads from the higher layers and for 2024 * parity construction required to finish a rmw cycle. 2025 */ 2026 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 2027 { 2028 int bios_to_read = 0; 2029 struct bio_list bio_list; 2030 int ret; 2031 int pagenr; 2032 int stripe; 2033 struct bio *bio; 2034 2035 bio_list_init(&bio_list); 2036 2037 ret = alloc_rbio_pages(rbio); 2038 if (ret) 2039 goto cleanup; 2040 2041 atomic_set(&rbio->error, 0); 2042 2043 /* 2044 * read everything that hasn't failed. Thanks to the 2045 * stripe cache, it is possible that some or all of these 2046 * pages are going to be uptodate. 2047 */ 2048 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 2049 if (rbio->faila == stripe || rbio->failb == stripe) { 2050 atomic_inc(&rbio->error); 2051 continue; 2052 } 2053 2054 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 2055 struct page *p; 2056 2057 /* 2058 * the rmw code may have already read this 2059 * page in 2060 */ 2061 p = rbio_stripe_page(rbio, stripe, pagenr); 2062 if (PageUptodate(p)) 2063 continue; 2064 2065 ret = rbio_add_io_page(rbio, &bio_list, 2066 rbio_stripe_page(rbio, stripe, pagenr), 2067 stripe, pagenr, rbio->stripe_len); 2068 if (ret < 0) 2069 goto cleanup; 2070 } 2071 } 2072 2073 bios_to_read = bio_list_size(&bio_list); 2074 if (!bios_to_read) { 2075 /* 2076 * we might have no bios to read just because the pages 2077 * were up to date, or we might have no bios to read because 2078 * the devices were gone. 2079 */ 2080 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { 2081 __raid_recover_end_io(rbio); 2082 goto out; 2083 } else { 2084 goto cleanup; 2085 } 2086 } 2087 2088 /* 2089 * the bbio may be freed once we submit the last bio. Make sure 2090 * not to touch it after that 2091 */ 2092 atomic_set(&rbio->stripes_pending, bios_to_read); 2093 while (1) { 2094 bio = bio_list_pop(&bio_list); 2095 if (!bio) 2096 break; 2097 2098 bio->bi_private = rbio; 2099 bio->bi_end_io = raid_recover_end_io; 2100 2101 btrfs_bio_wq_end_io(rbio->fs_info, bio, 2102 BTRFS_WQ_ENDIO_RAID56); 2103 2104 submit_bio(READ, bio); 2105 } 2106 out: 2107 return 0; 2108 2109 cleanup: 2110 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2111 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) 2112 rbio_orig_end_io(rbio, -EIO); 2113 return -EIO; 2114 } 2115 2116 /* 2117 * the main entry point for reads from the higher layers. This 2118 * is really only called when the normal read path had a failure, 2119 * so we assume the bio they send down corresponds to a failed part 2120 * of the drive. 2121 */ 2122 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 2123 struct btrfs_bio *bbio, u64 stripe_len, 2124 int mirror_num, int generic_io) 2125 { 2126 struct btrfs_raid_bio *rbio; 2127 int ret; 2128 2129 rbio = alloc_rbio(root, bbio, stripe_len); 2130 if (IS_ERR(rbio)) { 2131 if (generic_io) 2132 btrfs_put_bbio(bbio); 2133 return PTR_ERR(rbio); 2134 } 2135 2136 rbio->operation = BTRFS_RBIO_READ_REBUILD; 2137 bio_list_add(&rbio->bio_list, bio); 2138 rbio->bio_list_bytes = bio->bi_iter.bi_size; 2139 2140 rbio->faila = find_logical_bio_stripe(rbio, bio); 2141 if (rbio->faila == -1) { 2142 BUG(); 2143 if (generic_io) 2144 btrfs_put_bbio(bbio); 2145 kfree(rbio); 2146 return -EIO; 2147 } 2148 2149 if (generic_io) { 2150 btrfs_bio_counter_inc_noblocked(root->fs_info); 2151 rbio->generic_bio_cnt = 1; 2152 } else { 2153 btrfs_get_bbio(bbio); 2154 } 2155 2156 /* 2157 * reconstruct from the q stripe if they are 2158 * asking for mirror 3 2159 */ 2160 if (mirror_num == 3) 2161 rbio->failb = rbio->real_stripes - 2; 2162 2163 ret = lock_stripe_add(rbio); 2164 2165 /* 2166 * __raid56_parity_recover will end the bio with 2167 * any errors it hits. We don't want to return 2168 * its error value up the stack because our caller 2169 * will end up calling bio_endio with any nonzero 2170 * return 2171 */ 2172 if (ret == 0) 2173 __raid56_parity_recover(rbio); 2174 /* 2175 * our rbio has been added to the list of 2176 * rbios that will be handled after the 2177 * currently lock owner is done 2178 */ 2179 return 0; 2180 2181 } 2182 2183 static void rmw_work(struct btrfs_work *work) 2184 { 2185 struct btrfs_raid_bio *rbio; 2186 2187 rbio = container_of(work, struct btrfs_raid_bio, work); 2188 raid56_rmw_stripe(rbio); 2189 } 2190 2191 static void read_rebuild_work(struct btrfs_work *work) 2192 { 2193 struct btrfs_raid_bio *rbio; 2194 2195 rbio = container_of(work, struct btrfs_raid_bio, work); 2196 __raid56_parity_recover(rbio); 2197 } 2198 2199 /* 2200 * The following code is used to scrub/replace the parity stripe 2201 * 2202 * Note: We need make sure all the pages that add into the scrub/replace 2203 * raid bio are correct and not be changed during the scrub/replace. That 2204 * is those pages just hold metadata or file data with checksum. 2205 */ 2206 2207 struct btrfs_raid_bio * 2208 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, 2209 struct btrfs_bio *bbio, u64 stripe_len, 2210 struct btrfs_device *scrub_dev, 2211 unsigned long *dbitmap, int stripe_nsectors) 2212 { 2213 struct btrfs_raid_bio *rbio; 2214 int i; 2215 2216 rbio = alloc_rbio(root, bbio, stripe_len); 2217 if (IS_ERR(rbio)) 2218 return NULL; 2219 bio_list_add(&rbio->bio_list, bio); 2220 /* 2221 * This is a special bio which is used to hold the completion handler 2222 * and make the scrub rbio is similar to the other types 2223 */ 2224 ASSERT(!bio->bi_iter.bi_size); 2225 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 2226 2227 for (i = 0; i < rbio->real_stripes; i++) { 2228 if (bbio->stripes[i].dev == scrub_dev) { 2229 rbio->scrubp = i; 2230 break; 2231 } 2232 } 2233 2234 /* Now we just support the sectorsize equals to page size */ 2235 ASSERT(root->sectorsize == PAGE_SIZE); 2236 ASSERT(rbio->stripe_npages == stripe_nsectors); 2237 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); 2238 2239 return rbio; 2240 } 2241 2242 /* Used for both parity scrub and missing. */ 2243 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, 2244 u64 logical) 2245 { 2246 int stripe_offset; 2247 int index; 2248 2249 ASSERT(logical >= rbio->bbio->raid_map[0]); 2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + 2251 rbio->stripe_len * rbio->nr_data); 2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); 2253 index = stripe_offset >> PAGE_CACHE_SHIFT; 2254 rbio->bio_pages[index] = page; 2255 } 2256 2257 /* 2258 * We just scrub the parity that we have correct data on the same horizontal, 2259 * so we needn't allocate all pages for all the stripes. 2260 */ 2261 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) 2262 { 2263 int i; 2264 int bit; 2265 int index; 2266 struct page *page; 2267 2268 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { 2269 for (i = 0; i < rbio->real_stripes; i++) { 2270 index = i * rbio->stripe_npages + bit; 2271 if (rbio->stripe_pages[index]) 2272 continue; 2273 2274 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2275 if (!page) 2276 return -ENOMEM; 2277 rbio->stripe_pages[index] = page; 2278 } 2279 } 2280 return 0; 2281 } 2282 2283 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 2284 int need_check) 2285 { 2286 struct btrfs_bio *bbio = rbio->bbio; 2287 void *pointers[rbio->real_stripes]; 2288 DECLARE_BITMAP(pbitmap, rbio->stripe_npages); 2289 int nr_data = rbio->nr_data; 2290 int stripe; 2291 int pagenr; 2292 int p_stripe = -1; 2293 int q_stripe = -1; 2294 struct page *p_page = NULL; 2295 struct page *q_page = NULL; 2296 struct bio_list bio_list; 2297 struct bio *bio; 2298 int is_replace = 0; 2299 int ret; 2300 2301 bio_list_init(&bio_list); 2302 2303 if (rbio->real_stripes - rbio->nr_data == 1) { 2304 p_stripe = rbio->real_stripes - 1; 2305 } else if (rbio->real_stripes - rbio->nr_data == 2) { 2306 p_stripe = rbio->real_stripes - 2; 2307 q_stripe = rbio->real_stripes - 1; 2308 } else { 2309 BUG(); 2310 } 2311 2312 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { 2313 is_replace = 1; 2314 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); 2315 } 2316 2317 /* 2318 * Because the higher layers(scrubber) are unlikely to 2319 * use this area of the disk again soon, so don't cache 2320 * it. 2321 */ 2322 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2323 2324 if (!need_check) 2325 goto writeback; 2326 2327 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2328 if (!p_page) 2329 goto cleanup; 2330 SetPageUptodate(p_page); 2331 2332 if (q_stripe != -1) { 2333 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2334 if (!q_page) { 2335 __free_page(p_page); 2336 goto cleanup; 2337 } 2338 SetPageUptodate(q_page); 2339 } 2340 2341 atomic_set(&rbio->error, 0); 2342 2343 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 2344 struct page *p; 2345 void *parity; 2346 /* first collect one page from each data stripe */ 2347 for (stripe = 0; stripe < nr_data; stripe++) { 2348 p = page_in_rbio(rbio, stripe, pagenr, 0); 2349 pointers[stripe] = kmap(p); 2350 } 2351 2352 /* then add the parity stripe */ 2353 pointers[stripe++] = kmap(p_page); 2354 2355 if (q_stripe != -1) { 2356 2357 /* 2358 * raid6, add the qstripe and call the 2359 * library function to fill in our p/q 2360 */ 2361 pointers[stripe++] = kmap(q_page); 2362 2363 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, 2364 pointers); 2365 } else { 2366 /* raid5 */ 2367 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 2368 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 2369 } 2370 2371 /* Check scrubbing pairty and repair it */ 2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2373 parity = kmap(p); 2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) 2375 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); 2376 else 2377 /* Parity is right, needn't writeback */ 2378 bitmap_clear(rbio->dbitmap, pagenr, 1); 2379 kunmap(p); 2380 2381 for (stripe = 0; stripe < rbio->real_stripes; stripe++) 2382 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 2383 } 2384 2385 __free_page(p_page); 2386 if (q_page) 2387 __free_page(q_page); 2388 2389 writeback: 2390 /* 2391 * time to start writing. Make bios for everything from the 2392 * higher layers (the bio_list in our rbio) and our p/q. Ignore 2393 * everything else. 2394 */ 2395 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 2396 struct page *page; 2397 2398 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2399 ret = rbio_add_io_page(rbio, &bio_list, 2400 page, rbio->scrubp, pagenr, rbio->stripe_len); 2401 if (ret) 2402 goto cleanup; 2403 } 2404 2405 if (!is_replace) 2406 goto submit_write; 2407 2408 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { 2409 struct page *page; 2410 2411 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2412 ret = rbio_add_io_page(rbio, &bio_list, page, 2413 bbio->tgtdev_map[rbio->scrubp], 2414 pagenr, rbio->stripe_len); 2415 if (ret) 2416 goto cleanup; 2417 } 2418 2419 submit_write: 2420 nr_data = bio_list_size(&bio_list); 2421 if (!nr_data) { 2422 /* Every parity is right */ 2423 rbio_orig_end_io(rbio, 0); 2424 return; 2425 } 2426 2427 atomic_set(&rbio->stripes_pending, nr_data); 2428 2429 while (1) { 2430 bio = bio_list_pop(&bio_list); 2431 if (!bio) 2432 break; 2433 2434 bio->bi_private = rbio; 2435 bio->bi_end_io = raid_write_end_io; 2436 submit_bio(WRITE, bio); 2437 } 2438 return; 2439 2440 cleanup: 2441 rbio_orig_end_io(rbio, -EIO); 2442 } 2443 2444 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 2445 { 2446 if (stripe >= 0 && stripe < rbio->nr_data) 2447 return 1; 2448 return 0; 2449 } 2450 2451 /* 2452 * While we're doing the parity check and repair, we could have errors 2453 * in reading pages off the disk. This checks for errors and if we're 2454 * not able to read the page it'll trigger parity reconstruction. The 2455 * parity scrub will be finished after we've reconstructed the failed 2456 * stripes 2457 */ 2458 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) 2459 { 2460 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 2461 goto cleanup; 2462 2463 if (rbio->faila >= 0 || rbio->failb >= 0) { 2464 int dfail = 0, failp = -1; 2465 2466 if (is_data_stripe(rbio, rbio->faila)) 2467 dfail++; 2468 else if (is_parity_stripe(rbio->faila)) 2469 failp = rbio->faila; 2470 2471 if (is_data_stripe(rbio, rbio->failb)) 2472 dfail++; 2473 else if (is_parity_stripe(rbio->failb)) 2474 failp = rbio->failb; 2475 2476 /* 2477 * Because we can not use a scrubbing parity to repair 2478 * the data, so the capability of the repair is declined. 2479 * (In the case of RAID5, we can not repair anything) 2480 */ 2481 if (dfail > rbio->bbio->max_errors - 1) 2482 goto cleanup; 2483 2484 /* 2485 * If all data is good, only parity is correctly, just 2486 * repair the parity. 2487 */ 2488 if (dfail == 0) { 2489 finish_parity_scrub(rbio, 0); 2490 return; 2491 } 2492 2493 /* 2494 * Here means we got one corrupted data stripe and one 2495 * corrupted parity on RAID6, if the corrupted parity 2496 * is scrubbing parity, luckly, use the other one to repair 2497 * the data, or we can not repair the data stripe. 2498 */ 2499 if (failp != rbio->scrubp) 2500 goto cleanup; 2501 2502 __raid_recover_end_io(rbio); 2503 } else { 2504 finish_parity_scrub(rbio, 1); 2505 } 2506 return; 2507 2508 cleanup: 2509 rbio_orig_end_io(rbio, -EIO); 2510 } 2511 2512 /* 2513 * end io for the read phase of the rmw cycle. All the bios here are physical 2514 * stripe bios we've read from the disk so we can recalculate the parity of the 2515 * stripe. 2516 * 2517 * This will usually kick off finish_rmw once all the bios are read in, but it 2518 * may trigger parity reconstruction if we had any errors along the way 2519 */ 2520 static void raid56_parity_scrub_end_io(struct bio *bio) 2521 { 2522 struct btrfs_raid_bio *rbio = bio->bi_private; 2523 2524 if (bio->bi_error) 2525 fail_bio_stripe(rbio, bio); 2526 else 2527 set_bio_pages_uptodate(bio); 2528 2529 bio_put(bio); 2530 2531 if (!atomic_dec_and_test(&rbio->stripes_pending)) 2532 return; 2533 2534 /* 2535 * this will normally call finish_rmw to start our write 2536 * but if there are any failed stripes we'll reconstruct 2537 * from parity first 2538 */ 2539 validate_rbio_for_parity_scrub(rbio); 2540 } 2541 2542 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) 2543 { 2544 int bios_to_read = 0; 2545 struct bio_list bio_list; 2546 int ret; 2547 int pagenr; 2548 int stripe; 2549 struct bio *bio; 2550 2551 ret = alloc_rbio_essential_pages(rbio); 2552 if (ret) 2553 goto cleanup; 2554 2555 bio_list_init(&bio_list); 2556 2557 atomic_set(&rbio->error, 0); 2558 /* 2559 * build a list of bios to read all the missing parts of this 2560 * stripe 2561 */ 2562 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 2563 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 2564 struct page *page; 2565 /* 2566 * we want to find all the pages missing from 2567 * the rbio and read them from the disk. If 2568 * page_in_rbio finds a page in the bio list 2569 * we don't need to read it off the stripe. 2570 */ 2571 page = page_in_rbio(rbio, stripe, pagenr, 1); 2572 if (page) 2573 continue; 2574 2575 page = rbio_stripe_page(rbio, stripe, pagenr); 2576 /* 2577 * the bio cache may have handed us an uptodate 2578 * page. If so, be happy and use it 2579 */ 2580 if (PageUptodate(page)) 2581 continue; 2582 2583 ret = rbio_add_io_page(rbio, &bio_list, page, 2584 stripe, pagenr, rbio->stripe_len); 2585 if (ret) 2586 goto cleanup; 2587 } 2588 } 2589 2590 bios_to_read = bio_list_size(&bio_list); 2591 if (!bios_to_read) { 2592 /* 2593 * this can happen if others have merged with 2594 * us, it means there is nothing left to read. 2595 * But if there are missing devices it may not be 2596 * safe to do the full stripe write yet. 2597 */ 2598 goto finish; 2599 } 2600 2601 /* 2602 * the bbio may be freed once we submit the last bio. Make sure 2603 * not to touch it after that 2604 */ 2605 atomic_set(&rbio->stripes_pending, bios_to_read); 2606 while (1) { 2607 bio = bio_list_pop(&bio_list); 2608 if (!bio) 2609 break; 2610 2611 bio->bi_private = rbio; 2612 bio->bi_end_io = raid56_parity_scrub_end_io; 2613 2614 btrfs_bio_wq_end_io(rbio->fs_info, bio, 2615 BTRFS_WQ_ENDIO_RAID56); 2616 2617 submit_bio(READ, bio); 2618 } 2619 /* the actual write will happen once the reads are done */ 2620 return; 2621 2622 cleanup: 2623 rbio_orig_end_io(rbio, -EIO); 2624 return; 2625 2626 finish: 2627 validate_rbio_for_parity_scrub(rbio); 2628 } 2629 2630 static void scrub_parity_work(struct btrfs_work *work) 2631 { 2632 struct btrfs_raid_bio *rbio; 2633 2634 rbio = container_of(work, struct btrfs_raid_bio, work); 2635 raid56_parity_scrub_stripe(rbio); 2636 } 2637 2638 static void async_scrub_parity(struct btrfs_raid_bio *rbio) 2639 { 2640 btrfs_init_work(&rbio->work, btrfs_rmw_helper, 2641 scrub_parity_work, NULL, NULL); 2642 2643 btrfs_queue_work(rbio->fs_info->rmw_workers, 2644 &rbio->work); 2645 } 2646 2647 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) 2648 { 2649 if (!lock_stripe_add(rbio)) 2650 async_scrub_parity(rbio); 2651 } 2652 2653 /* The following code is used for dev replace of a missing RAID 5/6 device. */ 2654 2655 struct btrfs_raid_bio * 2656 raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio, 2657 struct btrfs_bio *bbio, u64 length) 2658 { 2659 struct btrfs_raid_bio *rbio; 2660 2661 rbio = alloc_rbio(root, bbio, length); 2662 if (IS_ERR(rbio)) 2663 return NULL; 2664 2665 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; 2666 bio_list_add(&rbio->bio_list, bio); 2667 /* 2668 * This is a special bio which is used to hold the completion handler 2669 * and make the scrub rbio is similar to the other types 2670 */ 2671 ASSERT(!bio->bi_iter.bi_size); 2672 2673 rbio->faila = find_logical_bio_stripe(rbio, bio); 2674 if (rbio->faila == -1) { 2675 BUG(); 2676 kfree(rbio); 2677 return NULL; 2678 } 2679 2680 return rbio; 2681 } 2682 2683 static void missing_raid56_work(struct btrfs_work *work) 2684 { 2685 struct btrfs_raid_bio *rbio; 2686 2687 rbio = container_of(work, struct btrfs_raid_bio, work); 2688 __raid56_parity_recover(rbio); 2689 } 2690 2691 static void async_missing_raid56(struct btrfs_raid_bio *rbio) 2692 { 2693 btrfs_init_work(&rbio->work, btrfs_rmw_helper, 2694 missing_raid56_work, NULL, NULL); 2695 2696 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); 2697 } 2698 2699 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) 2700 { 2701 if (!lock_stripe_add(rbio)) 2702 async_missing_raid56(rbio); 2703 } 2704