1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011, 2012 STRATO. All rights reserved. 4 */ 5 6 #include <linux/blkdev.h> 7 #include <linux/ratelimit.h> 8 #include <linux/sched/mm.h> 9 #include <crypto/hash.h> 10 #include "ctree.h" 11 #include "discard.h" 12 #include "volumes.h" 13 #include "disk-io.h" 14 #include "ordered-data.h" 15 #include "transaction.h" 16 #include "backref.h" 17 #include "extent_io.h" 18 #include "dev-replace.h" 19 #include "check-integrity.h" 20 #include "raid56.h" 21 #include "block-group.h" 22 #include "zoned.h" 23 #include "fs.h" 24 #include "accessors.h" 25 #include "file-item.h" 26 #include "scrub.h" 27 28 /* 29 * This is only the first step towards a full-features scrub. It reads all 30 * extent and super block and verifies the checksums. In case a bad checksum 31 * is found or the extent cannot be read, good data will be written back if 32 * any can be found. 33 * 34 * Future enhancements: 35 * - In case an unrepairable extent is encountered, track which files are 36 * affected and report them 37 * - track and record media errors, throw out bad devices 38 * - add a mode to also read unallocated space 39 */ 40 41 struct scrub_ctx; 42 43 /* 44 * The following value only influences the performance. 45 * 46 * This determines the batch size for stripe submitted in one go. 47 */ 48 #define SCRUB_STRIPES_PER_SCTX 8 /* That would be 8 64K stripe per-device. */ 49 50 /* 51 * The following value times PAGE_SIZE needs to be large enough to match the 52 * largest node/leaf/sector size that shall be supported. 53 */ 54 #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K) 55 56 /* Represent one sector and its needed info to verify the content. */ 57 struct scrub_sector_verification { 58 bool is_metadata; 59 60 union { 61 /* 62 * Csum pointer for data csum verification. Should point to a 63 * sector csum inside scrub_stripe::csums. 64 * 65 * NULL if this data sector has no csum. 66 */ 67 u8 *csum; 68 69 /* 70 * Extra info for metadata verification. All sectors inside a 71 * tree block share the same generation. 72 */ 73 u64 generation; 74 }; 75 }; 76 77 enum scrub_stripe_flags { 78 /* Set when @mirror_num, @dev, @physical and @logical are set. */ 79 SCRUB_STRIPE_FLAG_INITIALIZED, 80 81 /* Set when the read-repair is finished. */ 82 SCRUB_STRIPE_FLAG_REPAIR_DONE, 83 84 /* 85 * Set for data stripes if it's triggered from P/Q stripe. 86 * During such scrub, we should not report errors in data stripes, nor 87 * update the accounting. 88 */ 89 SCRUB_STRIPE_FLAG_NO_REPORT, 90 }; 91 92 #define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE) 93 94 /* 95 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN. 96 */ 97 struct scrub_stripe { 98 struct scrub_ctx *sctx; 99 struct btrfs_block_group *bg; 100 101 struct page *pages[SCRUB_STRIPE_PAGES]; 102 struct scrub_sector_verification *sectors; 103 104 struct btrfs_device *dev; 105 u64 logical; 106 u64 physical; 107 108 u16 mirror_num; 109 110 /* Should be BTRFS_STRIPE_LEN / sectorsize. */ 111 u16 nr_sectors; 112 113 /* 114 * How many data/meta extents are in this stripe. Only for scrub status 115 * reporting purposes. 116 */ 117 u16 nr_data_extents; 118 u16 nr_meta_extents; 119 120 atomic_t pending_io; 121 wait_queue_head_t io_wait; 122 wait_queue_head_t repair_wait; 123 124 /* 125 * Indicate the states of the stripe. Bits are defined in 126 * scrub_stripe_flags enum. 127 */ 128 unsigned long state; 129 130 /* Indicate which sectors are covered by extent items. */ 131 unsigned long extent_sector_bitmap; 132 133 /* 134 * The errors hit during the initial read of the stripe. 135 * 136 * Would be utilized for error reporting and repair. 137 * 138 * The remaining init_nr_* records the number of errors hit, only used 139 * by error reporting. 140 */ 141 unsigned long init_error_bitmap; 142 unsigned int init_nr_io_errors; 143 unsigned int init_nr_csum_errors; 144 unsigned int init_nr_meta_errors; 145 146 /* 147 * The following error bitmaps are all for the current status. 148 * Every time we submit a new read, these bitmaps may be updated. 149 * 150 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap; 151 * 152 * IO and csum errors can happen for both metadata and data. 153 */ 154 unsigned long error_bitmap; 155 unsigned long io_error_bitmap; 156 unsigned long csum_error_bitmap; 157 unsigned long meta_error_bitmap; 158 159 /* For writeback (repair or replace) error reporting. */ 160 unsigned long write_error_bitmap; 161 162 /* Writeback can be concurrent, thus we need to protect the bitmap. */ 163 spinlock_t write_error_lock; 164 165 /* 166 * Checksum for the whole stripe if this stripe is inside a data block 167 * group. 168 */ 169 u8 *csums; 170 171 struct work_struct work; 172 }; 173 174 struct scrub_ctx { 175 struct scrub_stripe stripes[SCRUB_STRIPES_PER_SCTX]; 176 struct scrub_stripe *raid56_data_stripes; 177 struct btrfs_fs_info *fs_info; 178 int first_free; 179 int cur_stripe; 180 atomic_t cancel_req; 181 int readonly; 182 int sectors_per_bio; 183 184 /* State of IO submission throttling affecting the associated device */ 185 ktime_t throttle_deadline; 186 u64 throttle_sent; 187 188 int is_dev_replace; 189 u64 write_pointer; 190 191 struct mutex wr_lock; 192 struct btrfs_device *wr_tgtdev; 193 194 /* 195 * statistics 196 */ 197 struct btrfs_scrub_progress stat; 198 spinlock_t stat_lock; 199 200 /* 201 * Use a ref counter to avoid use-after-free issues. Scrub workers 202 * decrement bios_in_flight and workers_pending and then do a wakeup 203 * on the list_wait wait queue. We must ensure the main scrub task 204 * doesn't free the scrub context before or while the workers are 205 * doing the wakeup() call. 206 */ 207 refcount_t refs; 208 }; 209 210 struct scrub_warning { 211 struct btrfs_path *path; 212 u64 extent_item_size; 213 const char *errstr; 214 u64 physical; 215 u64 logical; 216 struct btrfs_device *dev; 217 }; 218 219 static void release_scrub_stripe(struct scrub_stripe *stripe) 220 { 221 if (!stripe) 222 return; 223 224 for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) { 225 if (stripe->pages[i]) 226 __free_page(stripe->pages[i]); 227 stripe->pages[i] = NULL; 228 } 229 kfree(stripe->sectors); 230 kfree(stripe->csums); 231 stripe->sectors = NULL; 232 stripe->csums = NULL; 233 stripe->sctx = NULL; 234 stripe->state = 0; 235 } 236 237 static int init_scrub_stripe(struct btrfs_fs_info *fs_info, 238 struct scrub_stripe *stripe) 239 { 240 int ret; 241 242 memset(stripe, 0, sizeof(*stripe)); 243 244 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; 245 stripe->state = 0; 246 247 init_waitqueue_head(&stripe->io_wait); 248 init_waitqueue_head(&stripe->repair_wait); 249 atomic_set(&stripe->pending_io, 0); 250 spin_lock_init(&stripe->write_error_lock); 251 252 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages); 253 if (ret < 0) 254 goto error; 255 256 stripe->sectors = kcalloc(stripe->nr_sectors, 257 sizeof(struct scrub_sector_verification), 258 GFP_KERNEL); 259 if (!stripe->sectors) 260 goto error; 261 262 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits, 263 fs_info->csum_size, GFP_KERNEL); 264 if (!stripe->csums) 265 goto error; 266 return 0; 267 error: 268 release_scrub_stripe(stripe); 269 return -ENOMEM; 270 } 271 272 static void wait_scrub_stripe_io(struct scrub_stripe *stripe) 273 { 274 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0); 275 } 276 277 static void scrub_put_ctx(struct scrub_ctx *sctx); 278 279 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 280 { 281 while (atomic_read(&fs_info->scrub_pause_req)) { 282 mutex_unlock(&fs_info->scrub_lock); 283 wait_event(fs_info->scrub_pause_wait, 284 atomic_read(&fs_info->scrub_pause_req) == 0); 285 mutex_lock(&fs_info->scrub_lock); 286 } 287 } 288 289 static void scrub_pause_on(struct btrfs_fs_info *fs_info) 290 { 291 atomic_inc(&fs_info->scrubs_paused); 292 wake_up(&fs_info->scrub_pause_wait); 293 } 294 295 static void scrub_pause_off(struct btrfs_fs_info *fs_info) 296 { 297 mutex_lock(&fs_info->scrub_lock); 298 __scrub_blocked_if_needed(fs_info); 299 atomic_dec(&fs_info->scrubs_paused); 300 mutex_unlock(&fs_info->scrub_lock); 301 302 wake_up(&fs_info->scrub_pause_wait); 303 } 304 305 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 306 { 307 scrub_pause_on(fs_info); 308 scrub_pause_off(fs_info); 309 } 310 311 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) 312 { 313 int i; 314 315 if (!sctx) 316 return; 317 318 for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) 319 release_scrub_stripe(&sctx->stripes[i]); 320 321 kfree(sctx); 322 } 323 324 static void scrub_put_ctx(struct scrub_ctx *sctx) 325 { 326 if (refcount_dec_and_test(&sctx->refs)) 327 scrub_free_ctx(sctx); 328 } 329 330 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( 331 struct btrfs_fs_info *fs_info, int is_dev_replace) 332 { 333 struct scrub_ctx *sctx; 334 int i; 335 336 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); 337 if (!sctx) 338 goto nomem; 339 refcount_set(&sctx->refs, 1); 340 sctx->is_dev_replace = is_dev_replace; 341 sctx->fs_info = fs_info; 342 for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) { 343 int ret; 344 345 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]); 346 if (ret < 0) 347 goto nomem; 348 sctx->stripes[i].sctx = sctx; 349 } 350 sctx->first_free = 0; 351 atomic_set(&sctx->cancel_req, 0); 352 353 spin_lock_init(&sctx->stat_lock); 354 sctx->throttle_deadline = 0; 355 356 mutex_init(&sctx->wr_lock); 357 if (is_dev_replace) { 358 WARN_ON(!fs_info->dev_replace.tgtdev); 359 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; 360 } 361 362 return sctx; 363 364 nomem: 365 scrub_free_ctx(sctx); 366 return ERR_PTR(-ENOMEM); 367 } 368 369 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 370 u64 root, void *warn_ctx) 371 { 372 u32 nlink; 373 int ret; 374 int i; 375 unsigned nofs_flag; 376 struct extent_buffer *eb; 377 struct btrfs_inode_item *inode_item; 378 struct scrub_warning *swarn = warn_ctx; 379 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; 380 struct inode_fs_paths *ipath = NULL; 381 struct btrfs_root *local_root; 382 struct btrfs_key key; 383 384 local_root = btrfs_get_fs_root(fs_info, root, true); 385 if (IS_ERR(local_root)) { 386 ret = PTR_ERR(local_root); 387 goto err; 388 } 389 390 /* 391 * this makes the path point to (inum INODE_ITEM ioff) 392 */ 393 key.objectid = inum; 394 key.type = BTRFS_INODE_ITEM_KEY; 395 key.offset = 0; 396 397 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); 398 if (ret) { 399 btrfs_put_root(local_root); 400 btrfs_release_path(swarn->path); 401 goto err; 402 } 403 404 eb = swarn->path->nodes[0]; 405 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], 406 struct btrfs_inode_item); 407 nlink = btrfs_inode_nlink(eb, inode_item); 408 btrfs_release_path(swarn->path); 409 410 /* 411 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub 412 * uses GFP_NOFS in this context, so we keep it consistent but it does 413 * not seem to be strictly necessary. 414 */ 415 nofs_flag = memalloc_nofs_save(); 416 ipath = init_ipath(4096, local_root, swarn->path); 417 memalloc_nofs_restore(nofs_flag); 418 if (IS_ERR(ipath)) { 419 btrfs_put_root(local_root); 420 ret = PTR_ERR(ipath); 421 ipath = NULL; 422 goto err; 423 } 424 ret = paths_from_inode(inum, ipath); 425 426 if (ret < 0) 427 goto err; 428 429 /* 430 * we deliberately ignore the bit ipath might have been too small to 431 * hold all of the paths here 432 */ 433 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 434 btrfs_warn_in_rcu(fs_info, 435 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)", 436 swarn->errstr, swarn->logical, 437 btrfs_dev_name(swarn->dev), 438 swarn->physical, 439 root, inum, offset, 440 fs_info->sectorsize, nlink, 441 (char *)(unsigned long)ipath->fspath->val[i]); 442 443 btrfs_put_root(local_root); 444 free_ipath(ipath); 445 return 0; 446 447 err: 448 btrfs_warn_in_rcu(fs_info, 449 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", 450 swarn->errstr, swarn->logical, 451 btrfs_dev_name(swarn->dev), 452 swarn->physical, 453 root, inum, offset, ret); 454 455 free_ipath(ipath); 456 return 0; 457 } 458 459 static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev, 460 bool is_super, u64 logical, u64 physical) 461 { 462 struct btrfs_fs_info *fs_info = dev->fs_info; 463 struct btrfs_path *path; 464 struct btrfs_key found_key; 465 struct extent_buffer *eb; 466 struct btrfs_extent_item *ei; 467 struct scrub_warning swarn; 468 u64 flags = 0; 469 u32 item_size; 470 int ret; 471 472 /* Super block error, no need to search extent tree. */ 473 if (is_super) { 474 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu", 475 errstr, btrfs_dev_name(dev), physical); 476 return; 477 } 478 path = btrfs_alloc_path(); 479 if (!path) 480 return; 481 482 swarn.physical = physical; 483 swarn.logical = logical; 484 swarn.errstr = errstr; 485 swarn.dev = NULL; 486 487 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, 488 &flags); 489 if (ret < 0) 490 goto out; 491 492 swarn.extent_item_size = found_key.offset; 493 494 eb = path->nodes[0]; 495 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 496 item_size = btrfs_item_size(eb, path->slots[0]); 497 498 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 499 unsigned long ptr = 0; 500 u8 ref_level; 501 u64 ref_root; 502 503 while (true) { 504 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 505 item_size, &ref_root, 506 &ref_level); 507 if (ret < 0) { 508 btrfs_warn(fs_info, 509 "failed to resolve tree backref for logical %llu: %d", 510 swarn.logical, ret); 511 break; 512 } 513 if (ret > 0) 514 break; 515 btrfs_warn_in_rcu(fs_info, 516 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", 517 errstr, swarn.logical, btrfs_dev_name(dev), 518 swarn.physical, (ref_level ? "node" : "leaf"), 519 ref_level, ref_root); 520 } 521 btrfs_release_path(path); 522 } else { 523 struct btrfs_backref_walk_ctx ctx = { 0 }; 524 525 btrfs_release_path(path); 526 527 ctx.bytenr = found_key.objectid; 528 ctx.extent_item_pos = swarn.logical - found_key.objectid; 529 ctx.fs_info = fs_info; 530 531 swarn.path = path; 532 swarn.dev = dev; 533 534 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn); 535 } 536 537 out: 538 btrfs_free_path(path); 539 } 540 541 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) 542 { 543 int ret = 0; 544 u64 length; 545 546 if (!btrfs_is_zoned(sctx->fs_info)) 547 return 0; 548 549 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) 550 return 0; 551 552 if (sctx->write_pointer < physical) { 553 length = physical - sctx->write_pointer; 554 555 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, 556 sctx->write_pointer, length); 557 if (!ret) 558 sctx->write_pointer = physical; 559 } 560 return ret; 561 } 562 563 static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr) 564 { 565 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 566 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT; 567 568 return stripe->pages[page_index]; 569 } 570 571 static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe, 572 int sector_nr) 573 { 574 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 575 576 return offset_in_page(sector_nr << fs_info->sectorsize_bits); 577 } 578 579 static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr) 580 { 581 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 582 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; 583 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits); 584 const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr); 585 const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr); 586 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 587 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 588 u8 calculated_csum[BTRFS_CSUM_SIZE]; 589 struct btrfs_header *header; 590 591 /* 592 * Here we don't have a good way to attach the pages (and subpages) 593 * to a dummy extent buffer, thus we have to directly grab the members 594 * from pages. 595 */ 596 header = (struct btrfs_header *)(page_address(first_page) + first_off); 597 memcpy(on_disk_csum, header->csum, fs_info->csum_size); 598 599 if (logical != btrfs_stack_header_bytenr(header)) { 600 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); 601 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); 602 btrfs_warn_rl(fs_info, 603 "tree block %llu mirror %u has bad bytenr, has %llu want %llu", 604 logical, stripe->mirror_num, 605 btrfs_stack_header_bytenr(header), logical); 606 return; 607 } 608 if (memcmp(header->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE) != 0) { 609 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); 610 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); 611 btrfs_warn_rl(fs_info, 612 "tree block %llu mirror %u has bad fsid, has %pU want %pU", 613 logical, stripe->mirror_num, 614 header->fsid, fs_info->fs_devices->fsid); 615 return; 616 } 617 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid, 618 BTRFS_UUID_SIZE) != 0) { 619 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); 620 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); 621 btrfs_warn_rl(fs_info, 622 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU", 623 logical, stripe->mirror_num, 624 header->chunk_tree_uuid, fs_info->chunk_tree_uuid); 625 return; 626 } 627 628 /* Now check tree block csum. */ 629 shash->tfm = fs_info->csum_shash; 630 crypto_shash_init(shash); 631 crypto_shash_update(shash, page_address(first_page) + first_off + 632 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE); 633 634 for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) { 635 struct page *page = scrub_stripe_get_page(stripe, i); 636 unsigned int page_off = scrub_stripe_get_page_offset(stripe, i); 637 638 crypto_shash_update(shash, page_address(page) + page_off, 639 fs_info->sectorsize); 640 } 641 642 crypto_shash_final(shash, calculated_csum); 643 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) { 644 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); 645 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); 646 btrfs_warn_rl(fs_info, 647 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT, 648 logical, stripe->mirror_num, 649 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum), 650 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum)); 651 return; 652 } 653 if (stripe->sectors[sector_nr].generation != 654 btrfs_stack_header_generation(header)) { 655 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); 656 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); 657 btrfs_warn_rl(fs_info, 658 "tree block %llu mirror %u has bad generation, has %llu want %llu", 659 logical, stripe->mirror_num, 660 btrfs_stack_header_generation(header), 661 stripe->sectors[sector_nr].generation); 662 return; 663 } 664 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree); 665 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); 666 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); 667 } 668 669 static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr) 670 { 671 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 672 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr]; 673 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; 674 struct page *page = scrub_stripe_get_page(stripe, sector_nr); 675 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr); 676 u8 csum_buf[BTRFS_CSUM_SIZE]; 677 int ret; 678 679 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); 680 681 /* Sector not utilized, skip it. */ 682 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap)) 683 return; 684 685 /* IO error, no need to check. */ 686 if (test_bit(sector_nr, &stripe->io_error_bitmap)) 687 return; 688 689 /* Metadata, verify the full tree block. */ 690 if (sector->is_metadata) { 691 /* 692 * Check if the tree block crosses the stripe boudary. If 693 * crossed the boundary, we cannot verify it but only give a 694 * warning. 695 * 696 * This can only happen on a very old filesystem where chunks 697 * are not ensured to be stripe aligned. 698 */ 699 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) { 700 btrfs_warn_rl(fs_info, 701 "tree block at %llu crosses stripe boundary %llu", 702 stripe->logical + 703 (sector_nr << fs_info->sectorsize_bits), 704 stripe->logical); 705 return; 706 } 707 scrub_verify_one_metadata(stripe, sector_nr); 708 return; 709 } 710 711 /* 712 * Data is easier, we just verify the data csum (if we have it). For 713 * cases without csum, we have no other choice but to trust it. 714 */ 715 if (!sector->csum) { 716 clear_bit(sector_nr, &stripe->error_bitmap); 717 return; 718 } 719 720 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum); 721 if (ret < 0) { 722 set_bit(sector_nr, &stripe->csum_error_bitmap); 723 set_bit(sector_nr, &stripe->error_bitmap); 724 } else { 725 clear_bit(sector_nr, &stripe->csum_error_bitmap); 726 clear_bit(sector_nr, &stripe->error_bitmap); 727 } 728 } 729 730 /* Verify specified sectors of a stripe. */ 731 static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap) 732 { 733 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 734 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; 735 int sector_nr; 736 737 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { 738 scrub_verify_one_sector(stripe, sector_nr); 739 if (stripe->sectors[sector_nr].is_metadata) 740 sector_nr += sectors_per_tree - 1; 741 } 742 } 743 744 static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec) 745 { 746 int i; 747 748 for (i = 0; i < stripe->nr_sectors; i++) { 749 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page && 750 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset) 751 break; 752 } 753 ASSERT(i < stripe->nr_sectors); 754 return i; 755 } 756 757 /* 758 * Repair read is different to the regular read: 759 * 760 * - Only reads the failed sectors 761 * - May have extra blocksize limits 762 */ 763 static void scrub_repair_read_endio(struct btrfs_bio *bbio) 764 { 765 struct scrub_stripe *stripe = bbio->private; 766 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 767 struct bio_vec *bvec; 768 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); 769 u32 bio_size = 0; 770 int i; 771 772 ASSERT(sector_nr < stripe->nr_sectors); 773 774 bio_for_each_bvec_all(bvec, &bbio->bio, i) 775 bio_size += bvec->bv_len; 776 777 if (bbio->bio.bi_status) { 778 bitmap_set(&stripe->io_error_bitmap, sector_nr, 779 bio_size >> fs_info->sectorsize_bits); 780 bitmap_set(&stripe->error_bitmap, sector_nr, 781 bio_size >> fs_info->sectorsize_bits); 782 } else { 783 bitmap_clear(&stripe->io_error_bitmap, sector_nr, 784 bio_size >> fs_info->sectorsize_bits); 785 } 786 bio_put(&bbio->bio); 787 if (atomic_dec_and_test(&stripe->pending_io)) 788 wake_up(&stripe->io_wait); 789 } 790 791 static int calc_next_mirror(int mirror, int num_copies) 792 { 793 ASSERT(mirror <= num_copies); 794 return (mirror + 1 > num_copies) ? 1 : mirror + 1; 795 } 796 797 static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe, 798 int mirror, int blocksize, bool wait) 799 { 800 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 801 struct btrfs_bio *bbio = NULL; 802 const unsigned long old_error_bitmap = stripe->error_bitmap; 803 int i; 804 805 ASSERT(stripe->mirror_num >= 1); 806 ASSERT(atomic_read(&stripe->pending_io) == 0); 807 808 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { 809 struct page *page; 810 int pgoff; 811 int ret; 812 813 page = scrub_stripe_get_page(stripe, i); 814 pgoff = scrub_stripe_get_page_offset(stripe, i); 815 816 /* The current sector cannot be merged, submit the bio. */ 817 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) || 818 bbio->bio.bi_iter.bi_size >= blocksize)) { 819 ASSERT(bbio->bio.bi_iter.bi_size); 820 atomic_inc(&stripe->pending_io); 821 btrfs_submit_bio(bbio, mirror); 822 if (wait) 823 wait_scrub_stripe_io(stripe); 824 bbio = NULL; 825 } 826 827 if (!bbio) { 828 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ, 829 fs_info, scrub_repair_read_endio, stripe); 830 bbio->bio.bi_iter.bi_sector = (stripe->logical + 831 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; 832 } 833 834 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); 835 ASSERT(ret == fs_info->sectorsize); 836 } 837 if (bbio) { 838 ASSERT(bbio->bio.bi_iter.bi_size); 839 atomic_inc(&stripe->pending_io); 840 btrfs_submit_bio(bbio, mirror); 841 if (wait) 842 wait_scrub_stripe_io(stripe); 843 } 844 } 845 846 static void scrub_stripe_report_errors(struct scrub_ctx *sctx, 847 struct scrub_stripe *stripe) 848 { 849 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 850 DEFAULT_RATELIMIT_BURST); 851 struct btrfs_fs_info *fs_info = sctx->fs_info; 852 struct btrfs_device *dev = NULL; 853 u64 physical = 0; 854 int nr_data_sectors = 0; 855 int nr_meta_sectors = 0; 856 int nr_nodatacsum_sectors = 0; 857 int nr_repaired_sectors = 0; 858 int sector_nr; 859 860 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state)) 861 return; 862 863 /* 864 * Init needed infos for error reporting. 865 * 866 * Although our scrub_stripe infrastucture is mostly based on btrfs_submit_bio() 867 * thus no need for dev/physical, error reporting still needs dev and physical. 868 */ 869 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) { 870 u64 mapped_len = fs_info->sectorsize; 871 struct btrfs_io_context *bioc = NULL; 872 int stripe_index = stripe->mirror_num - 1; 873 int ret; 874 875 /* For scrub, our mirror_num should always start at 1. */ 876 ASSERT(stripe->mirror_num >= 1); 877 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 878 stripe->logical, &mapped_len, &bioc, 879 NULL, NULL, 1); 880 /* 881 * If we failed, dev will be NULL, and later detailed reports 882 * will just be skipped. 883 */ 884 if (ret < 0) 885 goto skip; 886 physical = bioc->stripes[stripe_index].physical; 887 dev = bioc->stripes[stripe_index].dev; 888 btrfs_put_bioc(bioc); 889 } 890 891 skip: 892 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) { 893 bool repaired = false; 894 895 if (stripe->sectors[sector_nr].is_metadata) { 896 nr_meta_sectors++; 897 } else { 898 nr_data_sectors++; 899 if (!stripe->sectors[sector_nr].csum) 900 nr_nodatacsum_sectors++; 901 } 902 903 if (test_bit(sector_nr, &stripe->init_error_bitmap) && 904 !test_bit(sector_nr, &stripe->error_bitmap)) { 905 nr_repaired_sectors++; 906 repaired = true; 907 } 908 909 /* Good sector from the beginning, nothing need to be done. */ 910 if (!test_bit(sector_nr, &stripe->init_error_bitmap)) 911 continue; 912 913 /* 914 * Report error for the corrupted sectors. If repaired, just 915 * output the message of repaired message. 916 */ 917 if (repaired) { 918 if (dev) { 919 btrfs_err_rl_in_rcu(fs_info, 920 "fixed up error at logical %llu on dev %s physical %llu", 921 stripe->logical, btrfs_dev_name(dev), 922 physical); 923 } else { 924 btrfs_err_rl_in_rcu(fs_info, 925 "fixed up error at logical %llu on mirror %u", 926 stripe->logical, stripe->mirror_num); 927 } 928 continue; 929 } 930 931 /* The remaining are all for unrepaired. */ 932 if (dev) { 933 btrfs_err_rl_in_rcu(fs_info, 934 "unable to fixup (regular) error at logical %llu on dev %s physical %llu", 935 stripe->logical, btrfs_dev_name(dev), 936 physical); 937 } else { 938 btrfs_err_rl_in_rcu(fs_info, 939 "unable to fixup (regular) error at logical %llu on mirror %u", 940 stripe->logical, stripe->mirror_num); 941 } 942 943 if (test_bit(sector_nr, &stripe->io_error_bitmap)) 944 if (__ratelimit(&rs) && dev) 945 scrub_print_common_warning("i/o error", dev, false, 946 stripe->logical, physical); 947 if (test_bit(sector_nr, &stripe->csum_error_bitmap)) 948 if (__ratelimit(&rs) && dev) 949 scrub_print_common_warning("checksum error", dev, false, 950 stripe->logical, physical); 951 if (test_bit(sector_nr, &stripe->meta_error_bitmap)) 952 if (__ratelimit(&rs) && dev) 953 scrub_print_common_warning("header error", dev, false, 954 stripe->logical, physical); 955 } 956 957 spin_lock(&sctx->stat_lock); 958 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; 959 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; 960 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits; 961 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits; 962 sctx->stat.no_csum += nr_nodatacsum_sectors; 963 sctx->stat.read_errors += stripe->init_nr_io_errors; 964 sctx->stat.csum_errors += stripe->init_nr_csum_errors; 965 sctx->stat.verify_errors += stripe->init_nr_meta_errors; 966 sctx->stat.uncorrectable_errors += 967 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors); 968 sctx->stat.corrected_errors += nr_repaired_sectors; 969 spin_unlock(&sctx->stat_lock); 970 } 971 972 /* 973 * The main entrance for all read related scrub work, including: 974 * 975 * - Wait for the initial read to finish 976 * - Verify and locate any bad sectors 977 * - Go through the remaining mirrors and try to read as large blocksize as 978 * possible 979 * - Go through all mirrors (including the failed mirror) sector-by-sector 980 * 981 * Writeback does not happen here, it needs extra synchronization. 982 */ 983 static void scrub_stripe_read_repair_worker(struct work_struct *work) 984 { 985 struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work); 986 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 987 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, 988 stripe->bg->length); 989 int mirror; 990 int i; 991 992 ASSERT(stripe->mirror_num > 0); 993 994 wait_scrub_stripe_io(stripe); 995 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap); 996 /* Save the initial failed bitmap for later repair and report usage. */ 997 stripe->init_error_bitmap = stripe->error_bitmap; 998 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap, 999 stripe->nr_sectors); 1000 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap, 1001 stripe->nr_sectors); 1002 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap, 1003 stripe->nr_sectors); 1004 1005 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) 1006 goto out; 1007 1008 /* 1009 * Try all remaining mirrors. 1010 * 1011 * Here we still try to read as large block as possible, as this is 1012 * faster and we have extra safety nets to rely on. 1013 */ 1014 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies); 1015 mirror != stripe->mirror_num; 1016 mirror = calc_next_mirror(mirror, num_copies)) { 1017 const unsigned long old_error_bitmap = stripe->error_bitmap; 1018 1019 scrub_stripe_submit_repair_read(stripe, mirror, 1020 BTRFS_STRIPE_LEN, false); 1021 wait_scrub_stripe_io(stripe); 1022 scrub_verify_one_stripe(stripe, old_error_bitmap); 1023 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) 1024 goto out; 1025 } 1026 1027 /* 1028 * Last safety net, try re-checking all mirrors, including the failed 1029 * one, sector-by-sector. 1030 * 1031 * As if one sector failed the drive's internal csum, the whole read 1032 * containing the offending sector would be marked as error. 1033 * Thus here we do sector-by-sector read. 1034 * 1035 * This can be slow, thus we only try it as the last resort. 1036 */ 1037 1038 for (i = 0, mirror = stripe->mirror_num; 1039 i < num_copies; 1040 i++, mirror = calc_next_mirror(mirror, num_copies)) { 1041 const unsigned long old_error_bitmap = stripe->error_bitmap; 1042 1043 scrub_stripe_submit_repair_read(stripe, mirror, 1044 fs_info->sectorsize, true); 1045 wait_scrub_stripe_io(stripe); 1046 scrub_verify_one_stripe(stripe, old_error_bitmap); 1047 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) 1048 goto out; 1049 } 1050 out: 1051 scrub_stripe_report_errors(stripe->sctx, stripe); 1052 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state); 1053 wake_up(&stripe->repair_wait); 1054 } 1055 1056 static void scrub_read_endio(struct btrfs_bio *bbio) 1057 { 1058 struct scrub_stripe *stripe = bbio->private; 1059 1060 if (bbio->bio.bi_status) { 1061 bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors); 1062 bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors); 1063 } else { 1064 bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors); 1065 } 1066 bio_put(&bbio->bio); 1067 if (atomic_dec_and_test(&stripe->pending_io)) { 1068 wake_up(&stripe->io_wait); 1069 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker); 1070 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work); 1071 } 1072 } 1073 1074 static void scrub_write_endio(struct btrfs_bio *bbio) 1075 { 1076 struct scrub_stripe *stripe = bbio->private; 1077 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 1078 struct bio_vec *bvec; 1079 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); 1080 u32 bio_size = 0; 1081 int i; 1082 1083 bio_for_each_bvec_all(bvec, &bbio->bio, i) 1084 bio_size += bvec->bv_len; 1085 1086 if (bbio->bio.bi_status) { 1087 unsigned long flags; 1088 1089 spin_lock_irqsave(&stripe->write_error_lock, flags); 1090 bitmap_set(&stripe->write_error_bitmap, sector_nr, 1091 bio_size >> fs_info->sectorsize_bits); 1092 spin_unlock_irqrestore(&stripe->write_error_lock, flags); 1093 } 1094 bio_put(&bbio->bio); 1095 1096 if (atomic_dec_and_test(&stripe->pending_io)) 1097 wake_up(&stripe->io_wait); 1098 } 1099 1100 static void scrub_submit_write_bio(struct scrub_ctx *sctx, 1101 struct scrub_stripe *stripe, 1102 struct btrfs_bio *bbio, bool dev_replace) 1103 { 1104 struct btrfs_fs_info *fs_info = sctx->fs_info; 1105 u32 bio_len = bbio->bio.bi_iter.bi_size; 1106 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) - 1107 stripe->logical; 1108 1109 fill_writer_pointer_gap(sctx, stripe->physical + bio_off); 1110 atomic_inc(&stripe->pending_io); 1111 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); 1112 if (!btrfs_is_zoned(fs_info)) 1113 return; 1114 /* 1115 * For zoned writeback, queue depth must be 1, thus we must wait for 1116 * the write to finish before the next write. 1117 */ 1118 wait_scrub_stripe_io(stripe); 1119 1120 /* 1121 * And also need to update the write pointer if write finished 1122 * successfully. 1123 */ 1124 if (!test_bit(bio_off >> fs_info->sectorsize_bits, 1125 &stripe->write_error_bitmap)) 1126 sctx->write_pointer += bio_len; 1127 } 1128 1129 /* 1130 * Submit the write bio(s) for the sectors specified by @write_bitmap. 1131 * 1132 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits: 1133 * 1134 * - Only needs logical bytenr and mirror_num 1135 * Just like the scrub read path 1136 * 1137 * - Would only result in writes to the specified mirror 1138 * Unlike the regular writeback path, which would write back to all stripes 1139 * 1140 * - Handle dev-replace and read-repair writeback differently 1141 */ 1142 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe, 1143 unsigned long write_bitmap, bool dev_replace) 1144 { 1145 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 1146 struct btrfs_bio *bbio = NULL; 1147 int sector_nr; 1148 1149 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { 1150 struct page *page = scrub_stripe_get_page(stripe, sector_nr); 1151 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr); 1152 int ret; 1153 1154 /* We should only writeback sectors covered by an extent. */ 1155 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap)); 1156 1157 /* Cannot merge with previous sector, submit the current one. */ 1158 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { 1159 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); 1160 bbio = NULL; 1161 } 1162 if (!bbio) { 1163 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE, 1164 fs_info, scrub_write_endio, stripe); 1165 bbio->bio.bi_iter.bi_sector = (stripe->logical + 1166 (sector_nr << fs_info->sectorsize_bits)) >> 1167 SECTOR_SHIFT; 1168 } 1169 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); 1170 ASSERT(ret == fs_info->sectorsize); 1171 } 1172 if (bbio) 1173 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); 1174 } 1175 1176 /* 1177 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1 1178 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max. 1179 */ 1180 static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device, 1181 unsigned int bio_size) 1182 { 1183 const int time_slice = 1000; 1184 s64 delta; 1185 ktime_t now; 1186 u32 div; 1187 u64 bwlimit; 1188 1189 bwlimit = READ_ONCE(device->scrub_speed_max); 1190 if (bwlimit == 0) 1191 return; 1192 1193 /* 1194 * Slice is divided into intervals when the IO is submitted, adjust by 1195 * bwlimit and maximum of 64 intervals. 1196 */ 1197 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024))); 1198 div = min_t(u32, 64, div); 1199 1200 /* Start new epoch, set deadline */ 1201 now = ktime_get(); 1202 if (sctx->throttle_deadline == 0) { 1203 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); 1204 sctx->throttle_sent = 0; 1205 } 1206 1207 /* Still in the time to send? */ 1208 if (ktime_before(now, sctx->throttle_deadline)) { 1209 /* If current bio is within the limit, send it */ 1210 sctx->throttle_sent += bio_size; 1211 if (sctx->throttle_sent <= div_u64(bwlimit, div)) 1212 return; 1213 1214 /* We're over the limit, sleep until the rest of the slice */ 1215 delta = ktime_ms_delta(sctx->throttle_deadline, now); 1216 } else { 1217 /* New request after deadline, start new epoch */ 1218 delta = 0; 1219 } 1220 1221 if (delta) { 1222 long timeout; 1223 1224 timeout = div_u64(delta * HZ, 1000); 1225 schedule_timeout_interruptible(timeout); 1226 } 1227 1228 /* Next call will start the deadline period */ 1229 sctx->throttle_deadline = 0; 1230 } 1231 1232 /* 1233 * Given a physical address, this will calculate it's 1234 * logical offset. if this is a parity stripe, it will return 1235 * the most left data stripe's logical offset. 1236 * 1237 * return 0 if it is a data stripe, 1 means parity stripe. 1238 */ 1239 static int get_raid56_logic_offset(u64 physical, int num, 1240 struct map_lookup *map, u64 *offset, 1241 u64 *stripe_start) 1242 { 1243 int i; 1244 int j = 0; 1245 u64 last_offset; 1246 const int data_stripes = nr_data_stripes(map); 1247 1248 last_offset = (physical - map->stripes[num].physical) * data_stripes; 1249 if (stripe_start) 1250 *stripe_start = last_offset; 1251 1252 *offset = last_offset; 1253 for (i = 0; i < data_stripes; i++) { 1254 u32 stripe_nr; 1255 u32 stripe_index; 1256 u32 rot; 1257 1258 *offset = last_offset + btrfs_stripe_nr_to_offset(i); 1259 1260 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes; 1261 1262 /* Work out the disk rotation on this stripe-set */ 1263 rot = stripe_nr % map->num_stripes; 1264 stripe_nr /= map->num_stripes; 1265 /* calculate which stripe this data locates */ 1266 rot += i; 1267 stripe_index = rot % map->num_stripes; 1268 if (stripe_index == num) 1269 return 0; 1270 if (stripe_index < num) 1271 j++; 1272 } 1273 *offset = last_offset + btrfs_stripe_nr_to_offset(j); 1274 return 1; 1275 } 1276 1277 /* 1278 * Return 0 if the extent item range covers any byte of the range. 1279 * Return <0 if the extent item is before @search_start. 1280 * Return >0 if the extent item is after @start_start + @search_len. 1281 */ 1282 static int compare_extent_item_range(struct btrfs_path *path, 1283 u64 search_start, u64 search_len) 1284 { 1285 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; 1286 u64 len; 1287 struct btrfs_key key; 1288 1289 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1290 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY || 1291 key.type == BTRFS_METADATA_ITEM_KEY); 1292 if (key.type == BTRFS_METADATA_ITEM_KEY) 1293 len = fs_info->nodesize; 1294 else 1295 len = key.offset; 1296 1297 if (key.objectid + len <= search_start) 1298 return -1; 1299 if (key.objectid >= search_start + search_len) 1300 return 1; 1301 return 0; 1302 } 1303 1304 /* 1305 * Locate one extent item which covers any byte in range 1306 * [@search_start, @search_start + @search_length) 1307 * 1308 * If the path is not initialized, we will initialize the search by doing 1309 * a btrfs_search_slot(). 1310 * If the path is already initialized, we will use the path as the initial 1311 * slot, to avoid duplicated btrfs_search_slot() calls. 1312 * 1313 * NOTE: If an extent item starts before @search_start, we will still 1314 * return the extent item. This is for data extent crossing stripe boundary. 1315 * 1316 * Return 0 if we found such extent item, and @path will point to the extent item. 1317 * Return >0 if no such extent item can be found, and @path will be released. 1318 * Return <0 if hit fatal error, and @path will be released. 1319 */ 1320 static int find_first_extent_item(struct btrfs_root *extent_root, 1321 struct btrfs_path *path, 1322 u64 search_start, u64 search_len) 1323 { 1324 struct btrfs_fs_info *fs_info = extent_root->fs_info; 1325 struct btrfs_key key; 1326 int ret; 1327 1328 /* Continue using the existing path */ 1329 if (path->nodes[0]) 1330 goto search_forward; 1331 1332 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1333 key.type = BTRFS_METADATA_ITEM_KEY; 1334 else 1335 key.type = BTRFS_EXTENT_ITEM_KEY; 1336 key.objectid = search_start; 1337 key.offset = (u64)-1; 1338 1339 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 1340 if (ret < 0) 1341 return ret; 1342 1343 ASSERT(ret > 0); 1344 /* 1345 * Here we intentionally pass 0 as @min_objectid, as there could be 1346 * an extent item starting before @search_start. 1347 */ 1348 ret = btrfs_previous_extent_item(extent_root, path, 0); 1349 if (ret < 0) 1350 return ret; 1351 /* 1352 * No matter whether we have found an extent item, the next loop will 1353 * properly do every check on the key. 1354 */ 1355 search_forward: 1356 while (true) { 1357 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1358 if (key.objectid >= search_start + search_len) 1359 break; 1360 if (key.type != BTRFS_METADATA_ITEM_KEY && 1361 key.type != BTRFS_EXTENT_ITEM_KEY) 1362 goto next; 1363 1364 ret = compare_extent_item_range(path, search_start, search_len); 1365 if (ret == 0) 1366 return ret; 1367 if (ret > 0) 1368 break; 1369 next: 1370 path->slots[0]++; 1371 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 1372 ret = btrfs_next_leaf(extent_root, path); 1373 if (ret) { 1374 /* Either no more item or fatal error */ 1375 btrfs_release_path(path); 1376 return ret; 1377 } 1378 } 1379 } 1380 btrfs_release_path(path); 1381 return 1; 1382 } 1383 1384 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret, 1385 u64 *size_ret, u64 *flags_ret, u64 *generation_ret) 1386 { 1387 struct btrfs_key key; 1388 struct btrfs_extent_item *ei; 1389 1390 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1391 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY || 1392 key.type == BTRFS_EXTENT_ITEM_KEY); 1393 *extent_start_ret = key.objectid; 1394 if (key.type == BTRFS_METADATA_ITEM_KEY) 1395 *size_ret = path->nodes[0]->fs_info->nodesize; 1396 else 1397 *size_ret = key.offset; 1398 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); 1399 *flags_ret = btrfs_extent_flags(path->nodes[0], ei); 1400 *generation_ret = btrfs_extent_generation(path->nodes[0], ei); 1401 } 1402 1403 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical, 1404 u64 physical, u64 physical_end) 1405 { 1406 struct btrfs_fs_info *fs_info = sctx->fs_info; 1407 int ret = 0; 1408 1409 if (!btrfs_is_zoned(fs_info)) 1410 return 0; 1411 1412 mutex_lock(&sctx->wr_lock); 1413 if (sctx->write_pointer < physical_end) { 1414 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, 1415 physical, 1416 sctx->write_pointer); 1417 if (ret) 1418 btrfs_err(fs_info, 1419 "zoned: failed to recover write pointer"); 1420 } 1421 mutex_unlock(&sctx->wr_lock); 1422 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); 1423 1424 return ret; 1425 } 1426 1427 static void fill_one_extent_info(struct btrfs_fs_info *fs_info, 1428 struct scrub_stripe *stripe, 1429 u64 extent_start, u64 extent_len, 1430 u64 extent_flags, u64 extent_gen) 1431 { 1432 for (u64 cur_logical = max(stripe->logical, extent_start); 1433 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN, 1434 extent_start + extent_len); 1435 cur_logical += fs_info->sectorsize) { 1436 const int nr_sector = (cur_logical - stripe->logical) >> 1437 fs_info->sectorsize_bits; 1438 struct scrub_sector_verification *sector = 1439 &stripe->sectors[nr_sector]; 1440 1441 set_bit(nr_sector, &stripe->extent_sector_bitmap); 1442 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1443 sector->is_metadata = true; 1444 sector->generation = extent_gen; 1445 } 1446 } 1447 } 1448 1449 static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe) 1450 { 1451 stripe->extent_sector_bitmap = 0; 1452 stripe->init_error_bitmap = 0; 1453 stripe->init_nr_io_errors = 0; 1454 stripe->init_nr_csum_errors = 0; 1455 stripe->init_nr_meta_errors = 0; 1456 stripe->error_bitmap = 0; 1457 stripe->io_error_bitmap = 0; 1458 stripe->csum_error_bitmap = 0; 1459 stripe->meta_error_bitmap = 0; 1460 } 1461 1462 /* 1463 * Locate one stripe which has at least one extent in its range. 1464 * 1465 * Return 0 if found such stripe, and store its info into @stripe. 1466 * Return >0 if there is no such stripe in the specified range. 1467 * Return <0 for error. 1468 */ 1469 static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg, 1470 struct btrfs_device *dev, u64 physical, 1471 int mirror_num, u64 logical_start, 1472 u32 logical_len, 1473 struct scrub_stripe *stripe) 1474 { 1475 struct btrfs_fs_info *fs_info = bg->fs_info; 1476 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start); 1477 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start); 1478 const u64 logical_end = logical_start + logical_len; 1479 struct btrfs_path path = { 0 }; 1480 u64 cur_logical = logical_start; 1481 u64 stripe_end; 1482 u64 extent_start; 1483 u64 extent_len; 1484 u64 extent_flags; 1485 u64 extent_gen; 1486 int ret; 1487 1488 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * 1489 stripe->nr_sectors); 1490 scrub_stripe_reset_bitmaps(stripe); 1491 1492 /* The range must be inside the bg. */ 1493 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); 1494 1495 path.search_commit_root = 1; 1496 path.skip_locking = 1; 1497 1498 ret = find_first_extent_item(extent_root, &path, logical_start, logical_len); 1499 /* Either error or not found. */ 1500 if (ret) 1501 goto out; 1502 get_extent_info(&path, &extent_start, &extent_len, &extent_flags, &extent_gen); 1503 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1504 stripe->nr_meta_extents++; 1505 if (extent_flags & BTRFS_EXTENT_FLAG_DATA) 1506 stripe->nr_data_extents++; 1507 cur_logical = max(extent_start, cur_logical); 1508 1509 /* 1510 * Round down to stripe boundary. 1511 * 1512 * The extra calculation against bg->start is to handle block groups 1513 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned. 1514 */ 1515 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) + 1516 bg->start; 1517 stripe->physical = physical + stripe->logical - logical_start; 1518 stripe->dev = dev; 1519 stripe->bg = bg; 1520 stripe->mirror_num = mirror_num; 1521 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1; 1522 1523 /* Fill the first extent info into stripe->sectors[] array. */ 1524 fill_one_extent_info(fs_info, stripe, extent_start, extent_len, 1525 extent_flags, extent_gen); 1526 cur_logical = extent_start + extent_len; 1527 1528 /* Fill the extent info for the remaining sectors. */ 1529 while (cur_logical <= stripe_end) { 1530 ret = find_first_extent_item(extent_root, &path, cur_logical, 1531 stripe_end - cur_logical + 1); 1532 if (ret < 0) 1533 goto out; 1534 if (ret > 0) { 1535 ret = 0; 1536 break; 1537 } 1538 get_extent_info(&path, &extent_start, &extent_len, 1539 &extent_flags, &extent_gen); 1540 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1541 stripe->nr_meta_extents++; 1542 if (extent_flags & BTRFS_EXTENT_FLAG_DATA) 1543 stripe->nr_data_extents++; 1544 fill_one_extent_info(fs_info, stripe, extent_start, extent_len, 1545 extent_flags, extent_gen); 1546 cur_logical = extent_start + extent_len; 1547 } 1548 1549 /* Now fill the data csum. */ 1550 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) { 1551 int sector_nr; 1552 unsigned long csum_bitmap = 0; 1553 1554 /* Csum space should have already been allocated. */ 1555 ASSERT(stripe->csums); 1556 1557 /* 1558 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN 1559 * should contain at most 16 sectors. 1560 */ 1561 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); 1562 1563 ret = btrfs_lookup_csums_bitmap(csum_root, stripe->logical, 1564 stripe_end, stripe->csums, 1565 &csum_bitmap, true); 1566 if (ret < 0) 1567 goto out; 1568 if (ret > 0) 1569 ret = 0; 1570 1571 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) { 1572 stripe->sectors[sector_nr].csum = stripe->csums + 1573 sector_nr * fs_info->csum_size; 1574 } 1575 } 1576 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); 1577 out: 1578 btrfs_release_path(&path); 1579 return ret; 1580 } 1581 1582 static void scrub_reset_stripe(struct scrub_stripe *stripe) 1583 { 1584 scrub_stripe_reset_bitmaps(stripe); 1585 1586 stripe->nr_meta_extents = 0; 1587 stripe->nr_data_extents = 0; 1588 stripe->state = 0; 1589 1590 for (int i = 0; i < stripe->nr_sectors; i++) { 1591 stripe->sectors[i].is_metadata = false; 1592 stripe->sectors[i].csum = NULL; 1593 stripe->sectors[i].generation = 0; 1594 } 1595 } 1596 1597 static void scrub_submit_initial_read(struct scrub_ctx *sctx, 1598 struct scrub_stripe *stripe) 1599 { 1600 struct btrfs_fs_info *fs_info = sctx->fs_info; 1601 struct btrfs_bio *bbio; 1602 int mirror = stripe->mirror_num; 1603 1604 ASSERT(stripe->bg); 1605 ASSERT(stripe->mirror_num > 0); 1606 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); 1607 1608 bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info, 1609 scrub_read_endio, stripe); 1610 1611 /* Read the whole stripe. */ 1612 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; 1613 for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) { 1614 int ret; 1615 1616 ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0); 1617 /* We should have allocated enough bio vectors. */ 1618 ASSERT(ret == PAGE_SIZE); 1619 } 1620 atomic_inc(&stripe->pending_io); 1621 1622 /* 1623 * For dev-replace, either user asks to avoid the source dev, or 1624 * the device is missing, we try the next mirror instead. 1625 */ 1626 if (sctx->is_dev_replace && 1627 (fs_info->dev_replace.cont_reading_from_srcdev_mode == 1628 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID || 1629 !stripe->dev->bdev)) { 1630 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, 1631 stripe->bg->length); 1632 1633 mirror = calc_next_mirror(mirror, num_copies); 1634 } 1635 btrfs_submit_bio(bbio, mirror); 1636 } 1637 1638 static bool stripe_has_metadata_error(struct scrub_stripe *stripe) 1639 { 1640 int i; 1641 1642 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) { 1643 if (stripe->sectors[i].is_metadata) { 1644 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 1645 1646 btrfs_err(fs_info, 1647 "stripe %llu has unrepaired metadata sector at %llu", 1648 stripe->logical, 1649 stripe->logical + (i << fs_info->sectorsize_bits)); 1650 return true; 1651 } 1652 } 1653 return false; 1654 } 1655 1656 static int flush_scrub_stripes(struct scrub_ctx *sctx) 1657 { 1658 struct btrfs_fs_info *fs_info = sctx->fs_info; 1659 struct scrub_stripe *stripe; 1660 const int nr_stripes = sctx->cur_stripe; 1661 int ret = 0; 1662 1663 if (!nr_stripes) 1664 return 0; 1665 1666 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); 1667 1668 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev, 1669 btrfs_stripe_nr_to_offset(nr_stripes)); 1670 for (int i = 0; i < nr_stripes; i++) { 1671 stripe = &sctx->stripes[i]; 1672 scrub_submit_initial_read(sctx, stripe); 1673 } 1674 1675 for (int i = 0; i < nr_stripes; i++) { 1676 stripe = &sctx->stripes[i]; 1677 1678 wait_event(stripe->repair_wait, 1679 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); 1680 } 1681 1682 /* 1683 * Submit the repaired sectors. For zoned case, we cannot do repair 1684 * in-place, but queue the bg to be relocated. 1685 */ 1686 if (btrfs_is_zoned(fs_info)) { 1687 for (int i = 0; i < nr_stripes; i++) { 1688 stripe = &sctx->stripes[i]; 1689 1690 if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) { 1691 btrfs_repair_one_zone(fs_info, 1692 sctx->stripes[0].bg->start); 1693 break; 1694 } 1695 } 1696 } else if (!sctx->readonly) { 1697 for (int i = 0; i < nr_stripes; i++) { 1698 unsigned long repaired; 1699 1700 stripe = &sctx->stripes[i]; 1701 1702 bitmap_andnot(&repaired, &stripe->init_error_bitmap, 1703 &stripe->error_bitmap, stripe->nr_sectors); 1704 scrub_write_sectors(sctx, stripe, repaired, false); 1705 } 1706 } 1707 1708 /* Submit for dev-replace. */ 1709 if (sctx->is_dev_replace) { 1710 /* 1711 * For dev-replace, if we know there is something wrong with 1712 * metadata, we should immedately abort. 1713 */ 1714 for (int i = 0; i < nr_stripes; i++) { 1715 if (stripe_has_metadata_error(&sctx->stripes[i])) { 1716 ret = -EIO; 1717 goto out; 1718 } 1719 } 1720 for (int i = 0; i < nr_stripes; i++) { 1721 unsigned long good; 1722 1723 stripe = &sctx->stripes[i]; 1724 1725 ASSERT(stripe->dev == fs_info->dev_replace.srcdev); 1726 1727 bitmap_andnot(&good, &stripe->extent_sector_bitmap, 1728 &stripe->error_bitmap, stripe->nr_sectors); 1729 scrub_write_sectors(sctx, stripe, good, true); 1730 } 1731 } 1732 1733 /* Wait for the above writebacks to finish. */ 1734 for (int i = 0; i < nr_stripes; i++) { 1735 stripe = &sctx->stripes[i]; 1736 1737 wait_scrub_stripe_io(stripe); 1738 scrub_reset_stripe(stripe); 1739 } 1740 out: 1741 sctx->cur_stripe = 0; 1742 return ret; 1743 } 1744 1745 static void raid56_scrub_wait_endio(struct bio *bio) 1746 { 1747 complete(bio->bi_private); 1748 } 1749 1750 static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg, 1751 struct btrfs_device *dev, int mirror_num, 1752 u64 logical, u32 length, u64 physical) 1753 { 1754 struct scrub_stripe *stripe; 1755 int ret; 1756 1757 /* No available slot, submit all stripes and wait for them. */ 1758 if (sctx->cur_stripe >= SCRUB_STRIPES_PER_SCTX) { 1759 ret = flush_scrub_stripes(sctx); 1760 if (ret < 0) 1761 return ret; 1762 } 1763 1764 stripe = &sctx->stripes[sctx->cur_stripe]; 1765 1766 /* We can queue one stripe using the remaining slot. */ 1767 scrub_reset_stripe(stripe); 1768 ret = scrub_find_fill_first_stripe(bg, dev, physical, mirror_num, 1769 logical, length, stripe); 1770 /* Either >0 as no more extents or <0 for error. */ 1771 if (ret) 1772 return ret; 1773 sctx->cur_stripe++; 1774 return 0; 1775 } 1776 1777 static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, 1778 struct btrfs_device *scrub_dev, 1779 struct btrfs_block_group *bg, 1780 struct map_lookup *map, 1781 u64 full_stripe_start) 1782 { 1783 DECLARE_COMPLETION_ONSTACK(io_done); 1784 struct btrfs_fs_info *fs_info = sctx->fs_info; 1785 struct btrfs_raid_bio *rbio; 1786 struct btrfs_io_context *bioc = NULL; 1787 struct bio *bio; 1788 struct scrub_stripe *stripe; 1789 bool all_empty = true; 1790 const int data_stripes = nr_data_stripes(map); 1791 unsigned long extent_bitmap = 0; 1792 u64 length = btrfs_stripe_nr_to_offset(data_stripes); 1793 int ret; 1794 1795 ASSERT(sctx->raid56_data_stripes); 1796 1797 for (int i = 0; i < data_stripes; i++) { 1798 int stripe_index; 1799 int rot; 1800 u64 physical; 1801 1802 stripe = &sctx->raid56_data_stripes[i]; 1803 rot = div_u64(full_stripe_start - bg->start, 1804 data_stripes) >> BTRFS_STRIPE_LEN_SHIFT; 1805 stripe_index = (i + rot) % map->num_stripes; 1806 physical = map->stripes[stripe_index].physical + 1807 btrfs_stripe_nr_to_offset(rot); 1808 1809 scrub_reset_stripe(stripe); 1810 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state); 1811 ret = scrub_find_fill_first_stripe(bg, 1812 map->stripes[stripe_index].dev, physical, 1, 1813 full_stripe_start + btrfs_stripe_nr_to_offset(i), 1814 BTRFS_STRIPE_LEN, stripe); 1815 if (ret < 0) 1816 goto out; 1817 /* 1818 * No extent in this data stripe, need to manually mark them 1819 * initialized to make later read submission happy. 1820 */ 1821 if (ret > 0) { 1822 stripe->logical = full_stripe_start + 1823 btrfs_stripe_nr_to_offset(i); 1824 stripe->dev = map->stripes[stripe_index].dev; 1825 stripe->mirror_num = 1; 1826 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); 1827 } 1828 } 1829 1830 /* Check if all data stripes are empty. */ 1831 for (int i = 0; i < data_stripes; i++) { 1832 stripe = &sctx->raid56_data_stripes[i]; 1833 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) { 1834 all_empty = false; 1835 break; 1836 } 1837 } 1838 if (all_empty) { 1839 ret = 0; 1840 goto out; 1841 } 1842 1843 for (int i = 0; i < data_stripes; i++) { 1844 stripe = &sctx->raid56_data_stripes[i]; 1845 scrub_submit_initial_read(sctx, stripe); 1846 } 1847 for (int i = 0; i < data_stripes; i++) { 1848 stripe = &sctx->raid56_data_stripes[i]; 1849 1850 wait_event(stripe->repair_wait, 1851 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); 1852 } 1853 /* For now, no zoned support for RAID56. */ 1854 ASSERT(!btrfs_is_zoned(sctx->fs_info)); 1855 1856 /* Writeback for the repaired sectors. */ 1857 for (int i = 0; i < data_stripes; i++) { 1858 unsigned long repaired; 1859 1860 stripe = &sctx->raid56_data_stripes[i]; 1861 1862 bitmap_andnot(&repaired, &stripe->init_error_bitmap, 1863 &stripe->error_bitmap, stripe->nr_sectors); 1864 scrub_write_sectors(sctx, stripe, repaired, false); 1865 } 1866 1867 /* Wait for the above writebacks to finish. */ 1868 for (int i = 0; i < data_stripes; i++) { 1869 stripe = &sctx->raid56_data_stripes[i]; 1870 1871 wait_scrub_stripe_io(stripe); 1872 } 1873 1874 /* 1875 * Now all data stripes are properly verified. Check if we have any 1876 * unrepaired, if so abort immediately or we could further corrupt the 1877 * P/Q stripes. 1878 * 1879 * During the loop, also populate extent_bitmap. 1880 */ 1881 for (int i = 0; i < data_stripes; i++) { 1882 unsigned long error; 1883 1884 stripe = &sctx->raid56_data_stripes[i]; 1885 1886 /* 1887 * We should only check the errors where there is an extent. 1888 * As we may hit an empty data stripe while it's missing. 1889 */ 1890 bitmap_and(&error, &stripe->error_bitmap, 1891 &stripe->extent_sector_bitmap, stripe->nr_sectors); 1892 if (!bitmap_empty(&error, stripe->nr_sectors)) { 1893 btrfs_err(fs_info, 1894 "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl", 1895 full_stripe_start, i, stripe->nr_sectors, 1896 &error); 1897 ret = -EIO; 1898 goto out; 1899 } 1900 bitmap_or(&extent_bitmap, &extent_bitmap, 1901 &stripe->extent_sector_bitmap, stripe->nr_sectors); 1902 } 1903 1904 /* Now we can check and regenerate the P/Q stripe. */ 1905 bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS); 1906 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT; 1907 bio->bi_private = &io_done; 1908 bio->bi_end_io = raid56_scrub_wait_endio; 1909 1910 btrfs_bio_counter_inc_blocked(fs_info); 1911 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start, 1912 &length, &bioc, NULL, NULL, 1); 1913 if (ret < 0) { 1914 btrfs_put_bioc(bioc); 1915 btrfs_bio_counter_dec(fs_info); 1916 goto out; 1917 } 1918 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap, 1919 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); 1920 btrfs_put_bioc(bioc); 1921 if (!rbio) { 1922 ret = -ENOMEM; 1923 btrfs_bio_counter_dec(fs_info); 1924 goto out; 1925 } 1926 /* Use the recovered stripes as cache to avoid read them from disk again. */ 1927 for (int i = 0; i < data_stripes; i++) { 1928 stripe = &sctx->raid56_data_stripes[i]; 1929 1930 raid56_parity_cache_data_pages(rbio, stripe->pages, 1931 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT)); 1932 } 1933 raid56_parity_submit_scrub_rbio(rbio); 1934 wait_for_completion_io(&io_done); 1935 ret = blk_status_to_errno(bio->bi_status); 1936 bio_put(bio); 1937 btrfs_bio_counter_dec(fs_info); 1938 1939 out: 1940 return ret; 1941 } 1942 1943 /* 1944 * Scrub one range which can only has simple mirror based profile. 1945 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in 1946 * RAID0/RAID10). 1947 * 1948 * Since we may need to handle a subset of block group, we need @logical_start 1949 * and @logical_length parameter. 1950 */ 1951 static int scrub_simple_mirror(struct scrub_ctx *sctx, 1952 struct btrfs_block_group *bg, 1953 struct map_lookup *map, 1954 u64 logical_start, u64 logical_length, 1955 struct btrfs_device *device, 1956 u64 physical, int mirror_num) 1957 { 1958 struct btrfs_fs_info *fs_info = sctx->fs_info; 1959 const u64 logical_end = logical_start + logical_length; 1960 /* An artificial limit, inherit from old scrub behavior */ 1961 struct btrfs_path path = { 0 }; 1962 u64 cur_logical = logical_start; 1963 int ret; 1964 1965 /* The range must be inside the bg */ 1966 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); 1967 1968 path.search_commit_root = 1; 1969 path.skip_locking = 1; 1970 /* Go through each extent items inside the logical range */ 1971 while (cur_logical < logical_end) { 1972 u64 cur_physical = physical + cur_logical - logical_start; 1973 1974 /* Canceled? */ 1975 if (atomic_read(&fs_info->scrub_cancel_req) || 1976 atomic_read(&sctx->cancel_req)) { 1977 ret = -ECANCELED; 1978 break; 1979 } 1980 /* Paused? */ 1981 if (atomic_read(&fs_info->scrub_pause_req)) { 1982 /* Push queued extents */ 1983 scrub_blocked_if_needed(fs_info); 1984 } 1985 /* Block group removed? */ 1986 spin_lock(&bg->lock); 1987 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { 1988 spin_unlock(&bg->lock); 1989 ret = 0; 1990 break; 1991 } 1992 spin_unlock(&bg->lock); 1993 1994 ret = queue_scrub_stripe(sctx, bg, device, mirror_num, 1995 cur_logical, logical_end - cur_logical, 1996 cur_physical); 1997 if (ret > 0) { 1998 /* No more extent, just update the accounting */ 1999 sctx->stat.last_physical = physical + logical_length; 2000 ret = 0; 2001 break; 2002 } 2003 if (ret < 0) 2004 break; 2005 2006 ASSERT(sctx->cur_stripe > 0); 2007 cur_logical = sctx->stripes[sctx->cur_stripe - 1].logical 2008 + BTRFS_STRIPE_LEN; 2009 2010 /* Don't hold CPU for too long time */ 2011 cond_resched(); 2012 } 2013 btrfs_release_path(&path); 2014 return ret; 2015 } 2016 2017 /* Calculate the full stripe length for simple stripe based profiles */ 2018 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map) 2019 { 2020 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2021 BTRFS_BLOCK_GROUP_RAID10)); 2022 2023 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes); 2024 } 2025 2026 /* Get the logical bytenr for the stripe */ 2027 static u64 simple_stripe_get_logical(struct map_lookup *map, 2028 struct btrfs_block_group *bg, 2029 int stripe_index) 2030 { 2031 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2032 BTRFS_BLOCK_GROUP_RAID10)); 2033 ASSERT(stripe_index < map->num_stripes); 2034 2035 /* 2036 * (stripe_index / sub_stripes) gives how many data stripes we need to 2037 * skip. 2038 */ 2039 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) + 2040 bg->start; 2041 } 2042 2043 /* Get the mirror number for the stripe */ 2044 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index) 2045 { 2046 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2047 BTRFS_BLOCK_GROUP_RAID10)); 2048 ASSERT(stripe_index < map->num_stripes); 2049 2050 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */ 2051 return stripe_index % map->sub_stripes + 1; 2052 } 2053 2054 static int scrub_simple_stripe(struct scrub_ctx *sctx, 2055 struct btrfs_block_group *bg, 2056 struct map_lookup *map, 2057 struct btrfs_device *device, 2058 int stripe_index) 2059 { 2060 const u64 logical_increment = simple_stripe_full_stripe_len(map); 2061 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index); 2062 const u64 orig_physical = map->stripes[stripe_index].physical; 2063 const int mirror_num = simple_stripe_mirror_num(map, stripe_index); 2064 u64 cur_logical = orig_logical; 2065 u64 cur_physical = orig_physical; 2066 int ret = 0; 2067 2068 while (cur_logical < bg->start + bg->length) { 2069 /* 2070 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is 2071 * just RAID1, so we can reuse scrub_simple_mirror() to scrub 2072 * this stripe. 2073 */ 2074 ret = scrub_simple_mirror(sctx, bg, map, cur_logical, 2075 BTRFS_STRIPE_LEN, device, cur_physical, 2076 mirror_num); 2077 if (ret) 2078 return ret; 2079 /* Skip to next stripe which belongs to the target device */ 2080 cur_logical += logical_increment; 2081 /* For physical offset, we just go to next stripe */ 2082 cur_physical += BTRFS_STRIPE_LEN; 2083 } 2084 return ret; 2085 } 2086 2087 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 2088 struct btrfs_block_group *bg, 2089 struct extent_map *em, 2090 struct btrfs_device *scrub_dev, 2091 int stripe_index) 2092 { 2093 struct btrfs_fs_info *fs_info = sctx->fs_info; 2094 struct map_lookup *map = em->map_lookup; 2095 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; 2096 const u64 chunk_logical = bg->start; 2097 int ret; 2098 int ret2; 2099 u64 physical = map->stripes[stripe_index].physical; 2100 const u64 dev_stripe_len = btrfs_calc_stripe_length(em); 2101 const u64 physical_end = physical + dev_stripe_len; 2102 u64 logical; 2103 u64 logic_end; 2104 /* The logical increment after finishing one stripe */ 2105 u64 increment; 2106 /* Offset inside the chunk */ 2107 u64 offset; 2108 u64 stripe_logical; 2109 int stop_loop = 0; 2110 2111 scrub_blocked_if_needed(fs_info); 2112 2113 if (sctx->is_dev_replace && 2114 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { 2115 mutex_lock(&sctx->wr_lock); 2116 sctx->write_pointer = physical; 2117 mutex_unlock(&sctx->wr_lock); 2118 } 2119 2120 /* Prepare the extra data stripes used by RAID56. */ 2121 if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) { 2122 ASSERT(sctx->raid56_data_stripes == NULL); 2123 2124 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map), 2125 sizeof(struct scrub_stripe), 2126 GFP_KERNEL); 2127 if (!sctx->raid56_data_stripes) { 2128 ret = -ENOMEM; 2129 goto out; 2130 } 2131 for (int i = 0; i < nr_data_stripes(map); i++) { 2132 ret = init_scrub_stripe(fs_info, 2133 &sctx->raid56_data_stripes[i]); 2134 if (ret < 0) 2135 goto out; 2136 sctx->raid56_data_stripes[i].bg = bg; 2137 sctx->raid56_data_stripes[i].sctx = sctx; 2138 } 2139 } 2140 /* 2141 * There used to be a big double loop to handle all profiles using the 2142 * same routine, which grows larger and more gross over time. 2143 * 2144 * So here we handle each profile differently, so simpler profiles 2145 * have simpler scrubbing function. 2146 */ 2147 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 | 2148 BTRFS_BLOCK_GROUP_RAID56_MASK))) { 2149 /* 2150 * Above check rules out all complex profile, the remaining 2151 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple 2152 * mirrored duplication without stripe. 2153 * 2154 * Only @physical and @mirror_num needs to calculated using 2155 * @stripe_index. 2156 */ 2157 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length, 2158 scrub_dev, map->stripes[stripe_index].physical, 2159 stripe_index + 1); 2160 offset = 0; 2161 goto out; 2162 } 2163 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 2164 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index); 2165 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes); 2166 goto out; 2167 } 2168 2169 /* Only RAID56 goes through the old code */ 2170 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); 2171 ret = 0; 2172 2173 /* Calculate the logical end of the stripe */ 2174 get_raid56_logic_offset(physical_end, stripe_index, 2175 map, &logic_end, NULL); 2176 logic_end += chunk_logical; 2177 2178 /* Initialize @offset in case we need to go to out: label */ 2179 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); 2180 increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 2181 2182 /* 2183 * Due to the rotation, for RAID56 it's better to iterate each stripe 2184 * using their physical offset. 2185 */ 2186 while (physical < physical_end) { 2187 ret = get_raid56_logic_offset(physical, stripe_index, map, 2188 &logical, &stripe_logical); 2189 logical += chunk_logical; 2190 if (ret) { 2191 /* it is parity strip */ 2192 stripe_logical += chunk_logical; 2193 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg, 2194 map, stripe_logical); 2195 if (ret) 2196 goto out; 2197 goto next; 2198 } 2199 2200 /* 2201 * Now we're at a data stripe, scrub each extents in the range. 2202 * 2203 * At this stage, if we ignore the repair part, inside each data 2204 * stripe it is no different than SINGLE profile. 2205 * We can reuse scrub_simple_mirror() here, as the repair part 2206 * is still based on @mirror_num. 2207 */ 2208 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN, 2209 scrub_dev, physical, 1); 2210 if (ret < 0) 2211 goto out; 2212 next: 2213 logical += increment; 2214 physical += BTRFS_STRIPE_LEN; 2215 spin_lock(&sctx->stat_lock); 2216 if (stop_loop) 2217 sctx->stat.last_physical = 2218 map->stripes[stripe_index].physical + dev_stripe_len; 2219 else 2220 sctx->stat.last_physical = physical; 2221 spin_unlock(&sctx->stat_lock); 2222 if (stop_loop) 2223 break; 2224 } 2225 out: 2226 ret2 = flush_scrub_stripes(sctx); 2227 if (!ret) 2228 ret = ret2; 2229 if (sctx->raid56_data_stripes) { 2230 for (int i = 0; i < nr_data_stripes(map); i++) 2231 release_scrub_stripe(&sctx->raid56_data_stripes[i]); 2232 kfree(sctx->raid56_data_stripes); 2233 sctx->raid56_data_stripes = NULL; 2234 } 2235 2236 if (sctx->is_dev_replace && ret >= 0) { 2237 int ret2; 2238 2239 ret2 = sync_write_pointer_for_zoned(sctx, 2240 chunk_logical + offset, 2241 map->stripes[stripe_index].physical, 2242 physical_end); 2243 if (ret2) 2244 ret = ret2; 2245 } 2246 2247 return ret < 0 ? ret : 0; 2248 } 2249 2250 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 2251 struct btrfs_block_group *bg, 2252 struct btrfs_device *scrub_dev, 2253 u64 dev_offset, 2254 u64 dev_extent_len) 2255 { 2256 struct btrfs_fs_info *fs_info = sctx->fs_info; 2257 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 2258 struct map_lookup *map; 2259 struct extent_map *em; 2260 int i; 2261 int ret = 0; 2262 2263 read_lock(&map_tree->lock); 2264 em = lookup_extent_mapping(map_tree, bg->start, bg->length); 2265 read_unlock(&map_tree->lock); 2266 2267 if (!em) { 2268 /* 2269 * Might have been an unused block group deleted by the cleaner 2270 * kthread or relocation. 2271 */ 2272 spin_lock(&bg->lock); 2273 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) 2274 ret = -EINVAL; 2275 spin_unlock(&bg->lock); 2276 2277 return ret; 2278 } 2279 if (em->start != bg->start) 2280 goto out; 2281 if (em->len < dev_extent_len) 2282 goto out; 2283 2284 map = em->map_lookup; 2285 for (i = 0; i < map->num_stripes; ++i) { 2286 if (map->stripes[i].dev->bdev == scrub_dev->bdev && 2287 map->stripes[i].physical == dev_offset) { 2288 ret = scrub_stripe(sctx, bg, em, scrub_dev, i); 2289 if (ret) 2290 goto out; 2291 } 2292 } 2293 out: 2294 free_extent_map(em); 2295 2296 return ret; 2297 } 2298 2299 static int finish_extent_writes_for_zoned(struct btrfs_root *root, 2300 struct btrfs_block_group *cache) 2301 { 2302 struct btrfs_fs_info *fs_info = cache->fs_info; 2303 struct btrfs_trans_handle *trans; 2304 2305 if (!btrfs_is_zoned(fs_info)) 2306 return 0; 2307 2308 btrfs_wait_block_group_reservations(cache); 2309 btrfs_wait_nocow_writers(cache); 2310 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); 2311 2312 trans = btrfs_join_transaction(root); 2313 if (IS_ERR(trans)) 2314 return PTR_ERR(trans); 2315 return btrfs_commit_transaction(trans); 2316 } 2317 2318 static noinline_for_stack 2319 int scrub_enumerate_chunks(struct scrub_ctx *sctx, 2320 struct btrfs_device *scrub_dev, u64 start, u64 end) 2321 { 2322 struct btrfs_dev_extent *dev_extent = NULL; 2323 struct btrfs_path *path; 2324 struct btrfs_fs_info *fs_info = sctx->fs_info; 2325 struct btrfs_root *root = fs_info->dev_root; 2326 u64 chunk_offset; 2327 int ret = 0; 2328 int ro_set; 2329 int slot; 2330 struct extent_buffer *l; 2331 struct btrfs_key key; 2332 struct btrfs_key found_key; 2333 struct btrfs_block_group *cache; 2334 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 2335 2336 path = btrfs_alloc_path(); 2337 if (!path) 2338 return -ENOMEM; 2339 2340 path->reada = READA_FORWARD; 2341 path->search_commit_root = 1; 2342 path->skip_locking = 1; 2343 2344 key.objectid = scrub_dev->devid; 2345 key.offset = 0ull; 2346 key.type = BTRFS_DEV_EXTENT_KEY; 2347 2348 while (1) { 2349 u64 dev_extent_len; 2350 2351 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2352 if (ret < 0) 2353 break; 2354 if (ret > 0) { 2355 if (path->slots[0] >= 2356 btrfs_header_nritems(path->nodes[0])) { 2357 ret = btrfs_next_leaf(root, path); 2358 if (ret < 0) 2359 break; 2360 if (ret > 0) { 2361 ret = 0; 2362 break; 2363 } 2364 } else { 2365 ret = 0; 2366 } 2367 } 2368 2369 l = path->nodes[0]; 2370 slot = path->slots[0]; 2371 2372 btrfs_item_key_to_cpu(l, &found_key, slot); 2373 2374 if (found_key.objectid != scrub_dev->devid) 2375 break; 2376 2377 if (found_key.type != BTRFS_DEV_EXTENT_KEY) 2378 break; 2379 2380 if (found_key.offset >= end) 2381 break; 2382 2383 if (found_key.offset < key.offset) 2384 break; 2385 2386 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2387 dev_extent_len = btrfs_dev_extent_length(l, dev_extent); 2388 2389 if (found_key.offset + dev_extent_len <= start) 2390 goto skip; 2391 2392 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 2393 2394 /* 2395 * get a reference on the corresponding block group to prevent 2396 * the chunk from going away while we scrub it 2397 */ 2398 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2399 2400 /* some chunks are removed but not committed to disk yet, 2401 * continue scrubbing */ 2402 if (!cache) 2403 goto skip; 2404 2405 ASSERT(cache->start <= chunk_offset); 2406 /* 2407 * We are using the commit root to search for device extents, so 2408 * that means we could have found a device extent item from a 2409 * block group that was deleted in the current transaction. The 2410 * logical start offset of the deleted block group, stored at 2411 * @chunk_offset, might be part of the logical address range of 2412 * a new block group (which uses different physical extents). 2413 * In this case btrfs_lookup_block_group() has returned the new 2414 * block group, and its start address is less than @chunk_offset. 2415 * 2416 * We skip such new block groups, because it's pointless to 2417 * process them, as we won't find their extents because we search 2418 * for them using the commit root of the extent tree. For a device 2419 * replace it's also fine to skip it, we won't miss copying them 2420 * to the target device because we have the write duplication 2421 * setup through the regular write path (by btrfs_map_block()), 2422 * and we have committed a transaction when we started the device 2423 * replace, right after setting up the device replace state. 2424 */ 2425 if (cache->start < chunk_offset) { 2426 btrfs_put_block_group(cache); 2427 goto skip; 2428 } 2429 2430 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { 2431 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { 2432 btrfs_put_block_group(cache); 2433 goto skip; 2434 } 2435 } 2436 2437 /* 2438 * Make sure that while we are scrubbing the corresponding block 2439 * group doesn't get its logical address and its device extents 2440 * reused for another block group, which can possibly be of a 2441 * different type and different profile. We do this to prevent 2442 * false error detections and crashes due to bogus attempts to 2443 * repair extents. 2444 */ 2445 spin_lock(&cache->lock); 2446 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { 2447 spin_unlock(&cache->lock); 2448 btrfs_put_block_group(cache); 2449 goto skip; 2450 } 2451 btrfs_freeze_block_group(cache); 2452 spin_unlock(&cache->lock); 2453 2454 /* 2455 * we need call btrfs_inc_block_group_ro() with scrubs_paused, 2456 * to avoid deadlock caused by: 2457 * btrfs_inc_block_group_ro() 2458 * -> btrfs_wait_for_commit() 2459 * -> btrfs_commit_transaction() 2460 * -> btrfs_scrub_pause() 2461 */ 2462 scrub_pause_on(fs_info); 2463 2464 /* 2465 * Don't do chunk preallocation for scrub. 2466 * 2467 * This is especially important for SYSTEM bgs, or we can hit 2468 * -EFBIG from btrfs_finish_chunk_alloc() like: 2469 * 1. The only SYSTEM bg is marked RO. 2470 * Since SYSTEM bg is small, that's pretty common. 2471 * 2. New SYSTEM bg will be allocated 2472 * Due to regular version will allocate new chunk. 2473 * 3. New SYSTEM bg is empty and will get cleaned up 2474 * Before cleanup really happens, it's marked RO again. 2475 * 4. Empty SYSTEM bg get scrubbed 2476 * We go back to 2. 2477 * 2478 * This can easily boost the amount of SYSTEM chunks if cleaner 2479 * thread can't be triggered fast enough, and use up all space 2480 * of btrfs_super_block::sys_chunk_array 2481 * 2482 * While for dev replace, we need to try our best to mark block 2483 * group RO, to prevent race between: 2484 * - Write duplication 2485 * Contains latest data 2486 * - Scrub copy 2487 * Contains data from commit tree 2488 * 2489 * If target block group is not marked RO, nocow writes can 2490 * be overwritten by scrub copy, causing data corruption. 2491 * So for dev-replace, it's not allowed to continue if a block 2492 * group is not RO. 2493 */ 2494 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); 2495 if (!ret && sctx->is_dev_replace) { 2496 ret = finish_extent_writes_for_zoned(root, cache); 2497 if (ret) { 2498 btrfs_dec_block_group_ro(cache); 2499 scrub_pause_off(fs_info); 2500 btrfs_put_block_group(cache); 2501 break; 2502 } 2503 } 2504 2505 if (ret == 0) { 2506 ro_set = 1; 2507 } else if (ret == -ENOSPC && !sctx->is_dev_replace && 2508 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) { 2509 /* 2510 * btrfs_inc_block_group_ro return -ENOSPC when it 2511 * failed in creating new chunk for metadata. 2512 * It is not a problem for scrub, because 2513 * metadata are always cowed, and our scrub paused 2514 * commit_transactions. 2515 * 2516 * For RAID56 chunks, we have to mark them read-only 2517 * for scrub, as later we would use our own cache 2518 * out of RAID56 realm. 2519 * Thus we want the RAID56 bg to be marked RO to 2520 * prevent RMW from screwing up out cache. 2521 */ 2522 ro_set = 0; 2523 } else if (ret == -ETXTBSY) { 2524 btrfs_warn(fs_info, 2525 "skipping scrub of block group %llu due to active swapfile", 2526 cache->start); 2527 scrub_pause_off(fs_info); 2528 ret = 0; 2529 goto skip_unfreeze; 2530 } else { 2531 btrfs_warn(fs_info, 2532 "failed setting block group ro: %d", ret); 2533 btrfs_unfreeze_block_group(cache); 2534 btrfs_put_block_group(cache); 2535 scrub_pause_off(fs_info); 2536 break; 2537 } 2538 2539 /* 2540 * Now the target block is marked RO, wait for nocow writes to 2541 * finish before dev-replace. 2542 * COW is fine, as COW never overwrites extents in commit tree. 2543 */ 2544 if (sctx->is_dev_replace) { 2545 btrfs_wait_nocow_writers(cache); 2546 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, 2547 cache->length); 2548 } 2549 2550 scrub_pause_off(fs_info); 2551 down_write(&dev_replace->rwsem); 2552 dev_replace->cursor_right = found_key.offset + dev_extent_len; 2553 dev_replace->cursor_left = found_key.offset; 2554 dev_replace->item_needs_writeback = 1; 2555 up_write(&dev_replace->rwsem); 2556 2557 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset, 2558 dev_extent_len); 2559 if (sctx->is_dev_replace && 2560 !btrfs_finish_block_group_to_copy(dev_replace->srcdev, 2561 cache, found_key.offset)) 2562 ro_set = 0; 2563 2564 down_write(&dev_replace->rwsem); 2565 dev_replace->cursor_left = dev_replace->cursor_right; 2566 dev_replace->item_needs_writeback = 1; 2567 up_write(&dev_replace->rwsem); 2568 2569 if (ro_set) 2570 btrfs_dec_block_group_ro(cache); 2571 2572 /* 2573 * We might have prevented the cleaner kthread from deleting 2574 * this block group if it was already unused because we raced 2575 * and set it to RO mode first. So add it back to the unused 2576 * list, otherwise it might not ever be deleted unless a manual 2577 * balance is triggered or it becomes used and unused again. 2578 */ 2579 spin_lock(&cache->lock); 2580 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && 2581 !cache->ro && cache->reserved == 0 && cache->used == 0) { 2582 spin_unlock(&cache->lock); 2583 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) 2584 btrfs_discard_queue_work(&fs_info->discard_ctl, 2585 cache); 2586 else 2587 btrfs_mark_bg_unused(cache); 2588 } else { 2589 spin_unlock(&cache->lock); 2590 } 2591 skip_unfreeze: 2592 btrfs_unfreeze_block_group(cache); 2593 btrfs_put_block_group(cache); 2594 if (ret) 2595 break; 2596 if (sctx->is_dev_replace && 2597 atomic64_read(&dev_replace->num_write_errors) > 0) { 2598 ret = -EIO; 2599 break; 2600 } 2601 if (sctx->stat.malloc_errors > 0) { 2602 ret = -ENOMEM; 2603 break; 2604 } 2605 skip: 2606 key.offset = found_key.offset + dev_extent_len; 2607 btrfs_release_path(path); 2608 } 2609 2610 btrfs_free_path(path); 2611 2612 return ret; 2613 } 2614 2615 static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev, 2616 struct page *page, u64 physical, u64 generation) 2617 { 2618 struct btrfs_fs_info *fs_info = sctx->fs_info; 2619 struct bio_vec bvec; 2620 struct bio bio; 2621 struct btrfs_super_block *sb = page_address(page); 2622 int ret; 2623 2624 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ); 2625 bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT; 2626 __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0); 2627 ret = submit_bio_wait(&bio); 2628 bio_uninit(&bio); 2629 2630 if (ret < 0) 2631 return ret; 2632 ret = btrfs_check_super_csum(fs_info, sb); 2633 if (ret != 0) { 2634 btrfs_err_rl(fs_info, 2635 "super block at physical %llu devid %llu has bad csum", 2636 physical, dev->devid); 2637 return -EIO; 2638 } 2639 if (btrfs_super_generation(sb) != generation) { 2640 btrfs_err_rl(fs_info, 2641 "super block at physical %llu devid %llu has bad generation %llu expect %llu", 2642 physical, dev->devid, 2643 btrfs_super_generation(sb), generation); 2644 return -EUCLEAN; 2645 } 2646 2647 return btrfs_validate_super(fs_info, sb, -1); 2648 } 2649 2650 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, 2651 struct btrfs_device *scrub_dev) 2652 { 2653 int i; 2654 u64 bytenr; 2655 u64 gen; 2656 int ret = 0; 2657 struct page *page; 2658 struct btrfs_fs_info *fs_info = sctx->fs_info; 2659 2660 if (BTRFS_FS_ERROR(fs_info)) 2661 return -EROFS; 2662 2663 page = alloc_page(GFP_KERNEL); 2664 if (!page) { 2665 spin_lock(&sctx->stat_lock); 2666 sctx->stat.malloc_errors++; 2667 spin_unlock(&sctx->stat_lock); 2668 return -ENOMEM; 2669 } 2670 2671 /* Seed devices of a new filesystem has their own generation. */ 2672 if (scrub_dev->fs_devices != fs_info->fs_devices) 2673 gen = scrub_dev->generation; 2674 else 2675 gen = fs_info->last_trans_committed; 2676 2677 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2678 bytenr = btrfs_sb_offset(i); 2679 if (bytenr + BTRFS_SUPER_INFO_SIZE > 2680 scrub_dev->commit_total_bytes) 2681 break; 2682 if (!btrfs_check_super_location(scrub_dev, bytenr)) 2683 continue; 2684 2685 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen); 2686 if (ret) { 2687 spin_lock(&sctx->stat_lock); 2688 sctx->stat.super_errors++; 2689 spin_unlock(&sctx->stat_lock); 2690 } 2691 } 2692 __free_page(page); 2693 return 0; 2694 } 2695 2696 static void scrub_workers_put(struct btrfs_fs_info *fs_info) 2697 { 2698 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, 2699 &fs_info->scrub_lock)) { 2700 struct workqueue_struct *scrub_workers = fs_info->scrub_workers; 2701 2702 fs_info->scrub_workers = NULL; 2703 mutex_unlock(&fs_info->scrub_lock); 2704 2705 if (scrub_workers) 2706 destroy_workqueue(scrub_workers); 2707 } 2708 } 2709 2710 /* 2711 * get a reference count on fs_info->scrub_workers. start worker if necessary 2712 */ 2713 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, 2714 int is_dev_replace) 2715 { 2716 struct workqueue_struct *scrub_workers = NULL; 2717 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; 2718 int max_active = fs_info->thread_pool_size; 2719 int ret = -ENOMEM; 2720 2721 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) 2722 return 0; 2723 2724 if (is_dev_replace) 2725 scrub_workers = alloc_ordered_workqueue("btrfs-scrub", flags); 2726 else 2727 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active); 2728 if (!scrub_workers) 2729 return -ENOMEM; 2730 2731 mutex_lock(&fs_info->scrub_lock); 2732 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { 2733 ASSERT(fs_info->scrub_workers == NULL); 2734 fs_info->scrub_workers = scrub_workers; 2735 refcount_set(&fs_info->scrub_workers_refcnt, 1); 2736 mutex_unlock(&fs_info->scrub_lock); 2737 return 0; 2738 } 2739 /* Other thread raced in and created the workers for us */ 2740 refcount_inc(&fs_info->scrub_workers_refcnt); 2741 mutex_unlock(&fs_info->scrub_lock); 2742 2743 ret = 0; 2744 2745 destroy_workqueue(scrub_workers); 2746 return ret; 2747 } 2748 2749 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 2750 u64 end, struct btrfs_scrub_progress *progress, 2751 int readonly, int is_dev_replace) 2752 { 2753 struct btrfs_dev_lookup_args args = { .devid = devid }; 2754 struct scrub_ctx *sctx; 2755 int ret; 2756 struct btrfs_device *dev; 2757 unsigned int nofs_flag; 2758 bool need_commit = false; 2759 2760 if (btrfs_fs_closing(fs_info)) 2761 return -EAGAIN; 2762 2763 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */ 2764 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); 2765 2766 /* 2767 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible 2768 * value (max nodesize / min sectorsize), thus nodesize should always 2769 * be fine. 2770 */ 2771 ASSERT(fs_info->nodesize <= 2772 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); 2773 2774 /* Allocate outside of device_list_mutex */ 2775 sctx = scrub_setup_ctx(fs_info, is_dev_replace); 2776 if (IS_ERR(sctx)) 2777 return PTR_ERR(sctx); 2778 2779 ret = scrub_workers_get(fs_info, is_dev_replace); 2780 if (ret) 2781 goto out_free_ctx; 2782 2783 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2784 dev = btrfs_find_device(fs_info->fs_devices, &args); 2785 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && 2786 !is_dev_replace)) { 2787 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2788 ret = -ENODEV; 2789 goto out; 2790 } 2791 2792 if (!is_dev_replace && !readonly && 2793 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { 2794 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2795 btrfs_err_in_rcu(fs_info, 2796 "scrub on devid %llu: filesystem on %s is not writable", 2797 devid, btrfs_dev_name(dev)); 2798 ret = -EROFS; 2799 goto out; 2800 } 2801 2802 mutex_lock(&fs_info->scrub_lock); 2803 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 2804 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { 2805 mutex_unlock(&fs_info->scrub_lock); 2806 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2807 ret = -EIO; 2808 goto out; 2809 } 2810 2811 down_read(&fs_info->dev_replace.rwsem); 2812 if (dev->scrub_ctx || 2813 (!is_dev_replace && 2814 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { 2815 up_read(&fs_info->dev_replace.rwsem); 2816 mutex_unlock(&fs_info->scrub_lock); 2817 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2818 ret = -EINPROGRESS; 2819 goto out; 2820 } 2821 up_read(&fs_info->dev_replace.rwsem); 2822 2823 sctx->readonly = readonly; 2824 dev->scrub_ctx = sctx; 2825 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2826 2827 /* 2828 * checking @scrub_pause_req here, we can avoid 2829 * race between committing transaction and scrubbing. 2830 */ 2831 __scrub_blocked_if_needed(fs_info); 2832 atomic_inc(&fs_info->scrubs_running); 2833 mutex_unlock(&fs_info->scrub_lock); 2834 2835 /* 2836 * In order to avoid deadlock with reclaim when there is a transaction 2837 * trying to pause scrub, make sure we use GFP_NOFS for all the 2838 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity() 2839 * invoked by our callees. The pausing request is done when the 2840 * transaction commit starts, and it blocks the transaction until scrub 2841 * is paused (done at specific points at scrub_stripe() or right above 2842 * before incrementing fs_info->scrubs_running). 2843 */ 2844 nofs_flag = memalloc_nofs_save(); 2845 if (!is_dev_replace) { 2846 u64 old_super_errors; 2847 2848 spin_lock(&sctx->stat_lock); 2849 old_super_errors = sctx->stat.super_errors; 2850 spin_unlock(&sctx->stat_lock); 2851 2852 btrfs_info(fs_info, "scrub: started on devid %llu", devid); 2853 /* 2854 * by holding device list mutex, we can 2855 * kick off writing super in log tree sync. 2856 */ 2857 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2858 ret = scrub_supers(sctx, dev); 2859 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2860 2861 spin_lock(&sctx->stat_lock); 2862 /* 2863 * Super block errors found, but we can not commit transaction 2864 * at current context, since btrfs_commit_transaction() needs 2865 * to pause the current running scrub (hold by ourselves). 2866 */ 2867 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) 2868 need_commit = true; 2869 spin_unlock(&sctx->stat_lock); 2870 } 2871 2872 if (!ret) 2873 ret = scrub_enumerate_chunks(sctx, dev, start, end); 2874 memalloc_nofs_restore(nofs_flag); 2875 2876 atomic_dec(&fs_info->scrubs_running); 2877 wake_up(&fs_info->scrub_pause_wait); 2878 2879 if (progress) 2880 memcpy(progress, &sctx->stat, sizeof(*progress)); 2881 2882 if (!is_dev_replace) 2883 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d", 2884 ret ? "not finished" : "finished", devid, ret); 2885 2886 mutex_lock(&fs_info->scrub_lock); 2887 dev->scrub_ctx = NULL; 2888 mutex_unlock(&fs_info->scrub_lock); 2889 2890 scrub_workers_put(fs_info); 2891 scrub_put_ctx(sctx); 2892 2893 /* 2894 * We found some super block errors before, now try to force a 2895 * transaction commit, as scrub has finished. 2896 */ 2897 if (need_commit) { 2898 struct btrfs_trans_handle *trans; 2899 2900 trans = btrfs_start_transaction(fs_info->tree_root, 0); 2901 if (IS_ERR(trans)) { 2902 ret = PTR_ERR(trans); 2903 btrfs_err(fs_info, 2904 "scrub: failed to start transaction to fix super block errors: %d", ret); 2905 return ret; 2906 } 2907 ret = btrfs_commit_transaction(trans); 2908 if (ret < 0) 2909 btrfs_err(fs_info, 2910 "scrub: failed to commit transaction to fix super block errors: %d", ret); 2911 } 2912 return ret; 2913 out: 2914 scrub_workers_put(fs_info); 2915 out_free_ctx: 2916 scrub_free_ctx(sctx); 2917 2918 return ret; 2919 } 2920 2921 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) 2922 { 2923 mutex_lock(&fs_info->scrub_lock); 2924 atomic_inc(&fs_info->scrub_pause_req); 2925 while (atomic_read(&fs_info->scrubs_paused) != 2926 atomic_read(&fs_info->scrubs_running)) { 2927 mutex_unlock(&fs_info->scrub_lock); 2928 wait_event(fs_info->scrub_pause_wait, 2929 atomic_read(&fs_info->scrubs_paused) == 2930 atomic_read(&fs_info->scrubs_running)); 2931 mutex_lock(&fs_info->scrub_lock); 2932 } 2933 mutex_unlock(&fs_info->scrub_lock); 2934 } 2935 2936 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) 2937 { 2938 atomic_dec(&fs_info->scrub_pause_req); 2939 wake_up(&fs_info->scrub_pause_wait); 2940 } 2941 2942 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) 2943 { 2944 mutex_lock(&fs_info->scrub_lock); 2945 if (!atomic_read(&fs_info->scrubs_running)) { 2946 mutex_unlock(&fs_info->scrub_lock); 2947 return -ENOTCONN; 2948 } 2949 2950 atomic_inc(&fs_info->scrub_cancel_req); 2951 while (atomic_read(&fs_info->scrubs_running)) { 2952 mutex_unlock(&fs_info->scrub_lock); 2953 wait_event(fs_info->scrub_pause_wait, 2954 atomic_read(&fs_info->scrubs_running) == 0); 2955 mutex_lock(&fs_info->scrub_lock); 2956 } 2957 atomic_dec(&fs_info->scrub_cancel_req); 2958 mutex_unlock(&fs_info->scrub_lock); 2959 2960 return 0; 2961 } 2962 2963 int btrfs_scrub_cancel_dev(struct btrfs_device *dev) 2964 { 2965 struct btrfs_fs_info *fs_info = dev->fs_info; 2966 struct scrub_ctx *sctx; 2967 2968 mutex_lock(&fs_info->scrub_lock); 2969 sctx = dev->scrub_ctx; 2970 if (!sctx) { 2971 mutex_unlock(&fs_info->scrub_lock); 2972 return -ENOTCONN; 2973 } 2974 atomic_inc(&sctx->cancel_req); 2975 while (dev->scrub_ctx) { 2976 mutex_unlock(&fs_info->scrub_lock); 2977 wait_event(fs_info->scrub_pause_wait, 2978 dev->scrub_ctx == NULL); 2979 mutex_lock(&fs_info->scrub_lock); 2980 } 2981 mutex_unlock(&fs_info->scrub_lock); 2982 2983 return 0; 2984 } 2985 2986 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, 2987 struct btrfs_scrub_progress *progress) 2988 { 2989 struct btrfs_dev_lookup_args args = { .devid = devid }; 2990 struct btrfs_device *dev; 2991 struct scrub_ctx *sctx = NULL; 2992 2993 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2994 dev = btrfs_find_device(fs_info->fs_devices, &args); 2995 if (dev) 2996 sctx = dev->scrub_ctx; 2997 if (sctx) 2998 memcpy(progress, &sctx->stat, sizeof(*progress)); 2999 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3000 3001 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; 3002 } 3003