1 /* 2 * Copyright (C) 2011, 2012 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/blkdev.h> 20 #include <linux/ratelimit.h> 21 #include "ctree.h" 22 #include "volumes.h" 23 #include "disk-io.h" 24 #include "ordered-data.h" 25 #include "transaction.h" 26 #include "backref.h" 27 #include "extent_io.h" 28 #include "dev-replace.h" 29 #include "check-integrity.h" 30 #include "rcu-string.h" 31 #include "raid56.h" 32 33 /* 34 * This is only the first step towards a full-features scrub. It reads all 35 * extent and super block and verifies the checksums. In case a bad checksum 36 * is found or the extent cannot be read, good data will be written back if 37 * any can be found. 38 * 39 * Future enhancements: 40 * - In case an unrepairable extent is encountered, track which files are 41 * affected and report them 42 * - track and record media errors, throw out bad devices 43 * - add a mode to also read unallocated space 44 */ 45 46 struct scrub_block; 47 struct scrub_ctx; 48 49 /* 50 * the following three values only influence the performance. 51 * The last one configures the number of parallel and outstanding I/O 52 * operations. The first two values configure an upper limit for the number 53 * of (dynamically allocated) pages that are added to a bio. 54 */ 55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */ 56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */ 57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */ 58 59 /* 60 * the following value times PAGE_SIZE needs to be large enough to match the 61 * largest node/leaf/sector size that shall be supported. 62 * Values larger than BTRFS_STRIPE_LEN are not supported. 63 */ 64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ 65 66 struct scrub_page { 67 struct scrub_block *sblock; 68 struct page *page; 69 struct btrfs_device *dev; 70 u64 flags; /* extent flags */ 71 u64 generation; 72 u64 logical; 73 u64 physical; 74 u64 physical_for_dev_replace; 75 atomic_t ref_count; 76 struct { 77 unsigned int mirror_num:8; 78 unsigned int have_csum:1; 79 unsigned int io_error:1; 80 }; 81 u8 csum[BTRFS_CSUM_SIZE]; 82 }; 83 84 struct scrub_bio { 85 int index; 86 struct scrub_ctx *sctx; 87 struct btrfs_device *dev; 88 struct bio *bio; 89 int err; 90 u64 logical; 91 u64 physical; 92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO 93 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO]; 94 #else 95 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO]; 96 #endif 97 int page_count; 98 int next_free; 99 struct btrfs_work work; 100 }; 101 102 struct scrub_block { 103 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; 104 int page_count; 105 atomic_t outstanding_pages; 106 atomic_t ref_count; /* free mem on transition to zero */ 107 struct scrub_ctx *sctx; 108 struct { 109 unsigned int header_error:1; 110 unsigned int checksum_error:1; 111 unsigned int no_io_error_seen:1; 112 unsigned int generation_error:1; /* also sets header_error */ 113 }; 114 }; 115 116 struct scrub_wr_ctx { 117 struct scrub_bio *wr_curr_bio; 118 struct btrfs_device *tgtdev; 119 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ 120 atomic_t flush_all_writes; 121 struct mutex wr_lock; 122 }; 123 124 struct scrub_ctx { 125 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; 126 struct btrfs_root *dev_root; 127 int first_free; 128 int curr; 129 atomic_t bios_in_flight; 130 atomic_t workers_pending; 131 spinlock_t list_lock; 132 wait_queue_head_t list_wait; 133 u16 csum_size; 134 struct list_head csum_list; 135 atomic_t cancel_req; 136 int readonly; 137 int pages_per_rd_bio; 138 u32 sectorsize; 139 u32 nodesize; 140 u32 leafsize; 141 142 int is_dev_replace; 143 struct scrub_wr_ctx wr_ctx; 144 145 /* 146 * statistics 147 */ 148 struct btrfs_scrub_progress stat; 149 spinlock_t stat_lock; 150 }; 151 152 struct scrub_fixup_nodatasum { 153 struct scrub_ctx *sctx; 154 struct btrfs_device *dev; 155 u64 logical; 156 struct btrfs_root *root; 157 struct btrfs_work work; 158 int mirror_num; 159 }; 160 161 struct scrub_nocow_inode { 162 u64 inum; 163 u64 offset; 164 u64 root; 165 struct list_head list; 166 }; 167 168 struct scrub_copy_nocow_ctx { 169 struct scrub_ctx *sctx; 170 u64 logical; 171 u64 len; 172 int mirror_num; 173 u64 physical_for_dev_replace; 174 struct list_head inodes; 175 struct btrfs_work work; 176 }; 177 178 struct scrub_warning { 179 struct btrfs_path *path; 180 u64 extent_item_size; 181 char *scratch_buf; 182 char *msg_buf; 183 const char *errstr; 184 sector_t sector; 185 u64 logical; 186 struct btrfs_device *dev; 187 int msg_bufsize; 188 int scratch_bufsize; 189 }; 190 191 192 static void scrub_pending_bio_inc(struct scrub_ctx *sctx); 193 static void scrub_pending_bio_dec(struct scrub_ctx *sctx); 194 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx); 195 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx); 196 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); 197 static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 198 struct btrfs_fs_info *fs_info, 199 struct scrub_block *original_sblock, 200 u64 length, u64 logical, 201 struct scrub_block *sblocks_for_recheck); 202 static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 203 struct scrub_block *sblock, int is_metadata, 204 int have_csum, u8 *csum, u64 generation, 205 u16 csum_size); 206 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, 207 struct scrub_block *sblock, 208 int is_metadata, int have_csum, 209 const u8 *csum, u64 generation, 210 u16 csum_size); 211 static void scrub_complete_bio_end_io(struct bio *bio, int err); 212 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 213 struct scrub_block *sblock_good, 214 int force_write); 215 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, 216 struct scrub_block *sblock_good, 217 int page_num, int force_write); 218 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); 219 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, 220 int page_num); 221 static int scrub_checksum_data(struct scrub_block *sblock); 222 static int scrub_checksum_tree_block(struct scrub_block *sblock); 223 static int scrub_checksum_super(struct scrub_block *sblock); 224 static void scrub_block_get(struct scrub_block *sblock); 225 static void scrub_block_put(struct scrub_block *sblock); 226 static void scrub_page_get(struct scrub_page *spage); 227 static void scrub_page_put(struct scrub_page *spage); 228 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, 229 struct scrub_page *spage); 230 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 231 u64 physical, struct btrfs_device *dev, u64 flags, 232 u64 gen, int mirror_num, u8 *csum, int force, 233 u64 physical_for_dev_replace); 234 static void scrub_bio_end_io(struct bio *bio, int err); 235 static void scrub_bio_end_io_worker(struct btrfs_work *work); 236 static void scrub_block_complete(struct scrub_block *sblock); 237 static void scrub_remap_extent(struct btrfs_fs_info *fs_info, 238 u64 extent_logical, u64 extent_len, 239 u64 *extent_physical, 240 struct btrfs_device **extent_dev, 241 int *extent_mirror_num); 242 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, 243 struct scrub_wr_ctx *wr_ctx, 244 struct btrfs_fs_info *fs_info, 245 struct btrfs_device *dev, 246 int is_dev_replace); 247 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx); 248 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, 249 struct scrub_page *spage); 250 static void scrub_wr_submit(struct scrub_ctx *sctx); 251 static void scrub_wr_bio_end_io(struct bio *bio, int err); 252 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); 253 static int write_page_nocow(struct scrub_ctx *sctx, 254 u64 physical_for_dev_replace, struct page *page); 255 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, 256 struct scrub_copy_nocow_ctx *ctx); 257 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 258 int mirror_num, u64 physical_for_dev_replace); 259 static void copy_nocow_pages_worker(struct btrfs_work *work); 260 261 262 static void scrub_pending_bio_inc(struct scrub_ctx *sctx) 263 { 264 atomic_inc(&sctx->bios_in_flight); 265 } 266 267 static void scrub_pending_bio_dec(struct scrub_ctx *sctx) 268 { 269 atomic_dec(&sctx->bios_in_flight); 270 wake_up(&sctx->list_wait); 271 } 272 273 /* 274 * used for workers that require transaction commits (i.e., for the 275 * NOCOW case) 276 */ 277 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) 278 { 279 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 280 281 /* 282 * increment scrubs_running to prevent cancel requests from 283 * completing as long as a worker is running. we must also 284 * increment scrubs_paused to prevent deadlocking on pause 285 * requests used for transactions commits (as the worker uses a 286 * transaction context). it is safe to regard the worker 287 * as paused for all matters practical. effectively, we only 288 * avoid cancellation requests from completing. 289 */ 290 mutex_lock(&fs_info->scrub_lock); 291 atomic_inc(&fs_info->scrubs_running); 292 atomic_inc(&fs_info->scrubs_paused); 293 mutex_unlock(&fs_info->scrub_lock); 294 atomic_inc(&sctx->workers_pending); 295 } 296 297 /* used for workers that require transaction commits */ 298 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) 299 { 300 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 301 302 /* 303 * see scrub_pending_trans_workers_inc() why we're pretending 304 * to be paused in the scrub counters 305 */ 306 mutex_lock(&fs_info->scrub_lock); 307 atomic_dec(&fs_info->scrubs_running); 308 atomic_dec(&fs_info->scrubs_paused); 309 mutex_unlock(&fs_info->scrub_lock); 310 atomic_dec(&sctx->workers_pending); 311 wake_up(&fs_info->scrub_pause_wait); 312 wake_up(&sctx->list_wait); 313 } 314 315 static void scrub_free_csums(struct scrub_ctx *sctx) 316 { 317 while (!list_empty(&sctx->csum_list)) { 318 struct btrfs_ordered_sum *sum; 319 sum = list_first_entry(&sctx->csum_list, 320 struct btrfs_ordered_sum, list); 321 list_del(&sum->list); 322 kfree(sum); 323 } 324 } 325 326 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) 327 { 328 int i; 329 330 if (!sctx) 331 return; 332 333 scrub_free_wr_ctx(&sctx->wr_ctx); 334 335 /* this can happen when scrub is cancelled */ 336 if (sctx->curr != -1) { 337 struct scrub_bio *sbio = sctx->bios[sctx->curr]; 338 339 for (i = 0; i < sbio->page_count; i++) { 340 WARN_ON(!sbio->pagev[i]->page); 341 scrub_block_put(sbio->pagev[i]->sblock); 342 } 343 bio_put(sbio->bio); 344 } 345 346 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 347 struct scrub_bio *sbio = sctx->bios[i]; 348 349 if (!sbio) 350 break; 351 kfree(sbio); 352 } 353 354 scrub_free_csums(sctx); 355 kfree(sctx); 356 } 357 358 static noinline_for_stack 359 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) 360 { 361 struct scrub_ctx *sctx; 362 int i; 363 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 364 int pages_per_rd_bio; 365 int ret; 366 367 /* 368 * the setting of pages_per_rd_bio is correct for scrub but might 369 * be wrong for the dev_replace code where we might read from 370 * different devices in the initial huge bios. However, that 371 * code is able to correctly handle the case when adding a page 372 * to a bio fails. 373 */ 374 if (dev->bdev) 375 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO, 376 bio_get_nr_vecs(dev->bdev)); 377 else 378 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; 379 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); 380 if (!sctx) 381 goto nomem; 382 sctx->is_dev_replace = is_dev_replace; 383 sctx->pages_per_rd_bio = pages_per_rd_bio; 384 sctx->curr = -1; 385 sctx->dev_root = dev->dev_root; 386 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 387 struct scrub_bio *sbio; 388 389 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 390 if (!sbio) 391 goto nomem; 392 sctx->bios[i] = sbio; 393 394 sbio->index = i; 395 sbio->sctx = sctx; 396 sbio->page_count = 0; 397 sbio->work.func = scrub_bio_end_io_worker; 398 399 if (i != SCRUB_BIOS_PER_SCTX - 1) 400 sctx->bios[i]->next_free = i + 1; 401 else 402 sctx->bios[i]->next_free = -1; 403 } 404 sctx->first_free = 0; 405 sctx->nodesize = dev->dev_root->nodesize; 406 sctx->leafsize = dev->dev_root->leafsize; 407 sctx->sectorsize = dev->dev_root->sectorsize; 408 atomic_set(&sctx->bios_in_flight, 0); 409 atomic_set(&sctx->workers_pending, 0); 410 atomic_set(&sctx->cancel_req, 0); 411 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); 412 INIT_LIST_HEAD(&sctx->csum_list); 413 414 spin_lock_init(&sctx->list_lock); 415 spin_lock_init(&sctx->stat_lock); 416 init_waitqueue_head(&sctx->list_wait); 417 418 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info, 419 fs_info->dev_replace.tgtdev, is_dev_replace); 420 if (ret) { 421 scrub_free_ctx(sctx); 422 return ERR_PTR(ret); 423 } 424 return sctx; 425 426 nomem: 427 scrub_free_ctx(sctx); 428 return ERR_PTR(-ENOMEM); 429 } 430 431 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, 432 void *warn_ctx) 433 { 434 u64 isize; 435 u32 nlink; 436 int ret; 437 int i; 438 struct extent_buffer *eb; 439 struct btrfs_inode_item *inode_item; 440 struct scrub_warning *swarn = warn_ctx; 441 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info; 442 struct inode_fs_paths *ipath = NULL; 443 struct btrfs_root *local_root; 444 struct btrfs_key root_key; 445 446 root_key.objectid = root; 447 root_key.type = BTRFS_ROOT_ITEM_KEY; 448 root_key.offset = (u64)-1; 449 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); 450 if (IS_ERR(local_root)) { 451 ret = PTR_ERR(local_root); 452 goto err; 453 } 454 455 ret = inode_item_info(inum, 0, local_root, swarn->path); 456 if (ret) { 457 btrfs_release_path(swarn->path); 458 goto err; 459 } 460 461 eb = swarn->path->nodes[0]; 462 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], 463 struct btrfs_inode_item); 464 isize = btrfs_inode_size(eb, inode_item); 465 nlink = btrfs_inode_nlink(eb, inode_item); 466 btrfs_release_path(swarn->path); 467 468 ipath = init_ipath(4096, local_root, swarn->path); 469 if (IS_ERR(ipath)) { 470 ret = PTR_ERR(ipath); 471 ipath = NULL; 472 goto err; 473 } 474 ret = paths_from_inode(inum, ipath); 475 476 if (ret < 0) 477 goto err; 478 479 /* 480 * we deliberately ignore the bit ipath might have been too small to 481 * hold all of the paths here 482 */ 483 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 484 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " 485 "%s, sector %llu, root %llu, inode %llu, offset %llu, " 486 "length %llu, links %u (path: %s)\n", swarn->errstr, 487 swarn->logical, rcu_str_deref(swarn->dev->name), 488 (unsigned long long)swarn->sector, root, inum, offset, 489 min(isize - offset, (u64)PAGE_SIZE), nlink, 490 (char *)(unsigned long)ipath->fspath->val[i]); 491 492 free_ipath(ipath); 493 return 0; 494 495 err: 496 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " 497 "%s, sector %llu, root %llu, inode %llu, offset %llu: path " 498 "resolving failed with ret=%d\n", swarn->errstr, 499 swarn->logical, rcu_str_deref(swarn->dev->name), 500 (unsigned long long)swarn->sector, root, inum, offset, ret); 501 502 free_ipath(ipath); 503 return 0; 504 } 505 506 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) 507 { 508 struct btrfs_device *dev; 509 struct btrfs_fs_info *fs_info; 510 struct btrfs_path *path; 511 struct btrfs_key found_key; 512 struct extent_buffer *eb; 513 struct btrfs_extent_item *ei; 514 struct scrub_warning swarn; 515 unsigned long ptr = 0; 516 u64 extent_item_pos; 517 u64 flags = 0; 518 u64 ref_root; 519 u32 item_size; 520 u8 ref_level; 521 const int bufsize = 4096; 522 int ret; 523 524 WARN_ON(sblock->page_count < 1); 525 dev = sblock->pagev[0]->dev; 526 fs_info = sblock->sctx->dev_root->fs_info; 527 528 path = btrfs_alloc_path(); 529 530 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS); 531 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS); 532 swarn.sector = (sblock->pagev[0]->physical) >> 9; 533 swarn.logical = sblock->pagev[0]->logical; 534 swarn.errstr = errstr; 535 swarn.dev = NULL; 536 swarn.msg_bufsize = bufsize; 537 swarn.scratch_bufsize = bufsize; 538 539 if (!path || !swarn.scratch_buf || !swarn.msg_buf) 540 goto out; 541 542 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, 543 &flags); 544 if (ret < 0) 545 goto out; 546 547 extent_item_pos = swarn.logical - found_key.objectid; 548 swarn.extent_item_size = found_key.offset; 549 550 eb = path->nodes[0]; 551 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 552 item_size = btrfs_item_size_nr(eb, path->slots[0]); 553 554 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 555 do { 556 ret = tree_backref_for_extent(&ptr, eb, ei, item_size, 557 &ref_root, &ref_level); 558 printk_in_rcu(KERN_WARNING 559 "btrfs: %s at logical %llu on dev %s, " 560 "sector %llu: metadata %s (level %d) in tree " 561 "%llu\n", errstr, swarn.logical, 562 rcu_str_deref(dev->name), 563 (unsigned long long)swarn.sector, 564 ref_level ? "node" : "leaf", 565 ret < 0 ? -1 : ref_level, 566 ret < 0 ? -1 : ref_root); 567 } while (ret != 1); 568 btrfs_release_path(path); 569 } else { 570 btrfs_release_path(path); 571 swarn.path = path; 572 swarn.dev = dev; 573 iterate_extent_inodes(fs_info, found_key.objectid, 574 extent_item_pos, 1, 575 scrub_print_warning_inode, &swarn); 576 } 577 578 out: 579 btrfs_free_path(path); 580 kfree(swarn.scratch_buf); 581 kfree(swarn.msg_buf); 582 } 583 584 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) 585 { 586 struct page *page = NULL; 587 unsigned long index; 588 struct scrub_fixup_nodatasum *fixup = fixup_ctx; 589 int ret; 590 int corrected = 0; 591 struct btrfs_key key; 592 struct inode *inode = NULL; 593 struct btrfs_fs_info *fs_info; 594 u64 end = offset + PAGE_SIZE - 1; 595 struct btrfs_root *local_root; 596 int srcu_index; 597 598 key.objectid = root; 599 key.type = BTRFS_ROOT_ITEM_KEY; 600 key.offset = (u64)-1; 601 602 fs_info = fixup->root->fs_info; 603 srcu_index = srcu_read_lock(&fs_info->subvol_srcu); 604 605 local_root = btrfs_read_fs_root_no_name(fs_info, &key); 606 if (IS_ERR(local_root)) { 607 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 608 return PTR_ERR(local_root); 609 } 610 611 key.type = BTRFS_INODE_ITEM_KEY; 612 key.objectid = inum; 613 key.offset = 0; 614 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 615 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 616 if (IS_ERR(inode)) 617 return PTR_ERR(inode); 618 619 index = offset >> PAGE_CACHE_SHIFT; 620 621 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 622 if (!page) { 623 ret = -ENOMEM; 624 goto out; 625 } 626 627 if (PageUptodate(page)) { 628 if (PageDirty(page)) { 629 /* 630 * we need to write the data to the defect sector. the 631 * data that was in that sector is not in memory, 632 * because the page was modified. we must not write the 633 * modified page to that sector. 634 * 635 * TODO: what could be done here: wait for the delalloc 636 * runner to write out that page (might involve 637 * COW) and see whether the sector is still 638 * referenced afterwards. 639 * 640 * For the meantime, we'll treat this error 641 * incorrectable, although there is a chance that a 642 * later scrub will find the bad sector again and that 643 * there's no dirty page in memory, then. 644 */ 645 ret = -EIO; 646 goto out; 647 } 648 fs_info = BTRFS_I(inode)->root->fs_info; 649 ret = repair_io_failure(fs_info, offset, PAGE_SIZE, 650 fixup->logical, page, 651 fixup->mirror_num); 652 unlock_page(page); 653 corrected = !ret; 654 } else { 655 /* 656 * we need to get good data first. the general readpage path 657 * will call repair_io_failure for us, we just have to make 658 * sure we read the bad mirror. 659 */ 660 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, 661 EXTENT_DAMAGED, GFP_NOFS); 662 if (ret) { 663 /* set_extent_bits should give proper error */ 664 WARN_ON(ret > 0); 665 if (ret > 0) 666 ret = -EFAULT; 667 goto out; 668 } 669 670 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page, 671 btrfs_get_extent, 672 fixup->mirror_num); 673 wait_on_page_locked(page); 674 675 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset, 676 end, EXTENT_DAMAGED, 0, NULL); 677 if (!corrected) 678 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, 679 EXTENT_DAMAGED, GFP_NOFS); 680 } 681 682 out: 683 if (page) 684 put_page(page); 685 if (inode) 686 iput(inode); 687 688 if (ret < 0) 689 return ret; 690 691 if (ret == 0 && corrected) { 692 /* 693 * we only need to call readpage for one of the inodes belonging 694 * to this extent. so make iterate_extent_inodes stop 695 */ 696 return 1; 697 } 698 699 return -EIO; 700 } 701 702 static void scrub_fixup_nodatasum(struct btrfs_work *work) 703 { 704 int ret; 705 struct scrub_fixup_nodatasum *fixup; 706 struct scrub_ctx *sctx; 707 struct btrfs_trans_handle *trans = NULL; 708 struct btrfs_fs_info *fs_info; 709 struct btrfs_path *path; 710 int uncorrectable = 0; 711 712 fixup = container_of(work, struct scrub_fixup_nodatasum, work); 713 sctx = fixup->sctx; 714 fs_info = fixup->root->fs_info; 715 716 path = btrfs_alloc_path(); 717 if (!path) { 718 spin_lock(&sctx->stat_lock); 719 ++sctx->stat.malloc_errors; 720 spin_unlock(&sctx->stat_lock); 721 uncorrectable = 1; 722 goto out; 723 } 724 725 trans = btrfs_join_transaction(fixup->root); 726 if (IS_ERR(trans)) { 727 uncorrectable = 1; 728 goto out; 729 } 730 731 /* 732 * the idea is to trigger a regular read through the standard path. we 733 * read a page from the (failed) logical address by specifying the 734 * corresponding copynum of the failed sector. thus, that readpage is 735 * expected to fail. 736 * that is the point where on-the-fly error correction will kick in 737 * (once it's finished) and rewrite the failed sector if a good copy 738 * can be found. 739 */ 740 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info, 741 path, scrub_fixup_readpage, 742 fixup); 743 if (ret < 0) { 744 uncorrectable = 1; 745 goto out; 746 } 747 WARN_ON(ret != 1); 748 749 spin_lock(&sctx->stat_lock); 750 ++sctx->stat.corrected_errors; 751 spin_unlock(&sctx->stat_lock); 752 753 out: 754 if (trans && !IS_ERR(trans)) 755 btrfs_end_transaction(trans, fixup->root); 756 if (uncorrectable) { 757 spin_lock(&sctx->stat_lock); 758 ++sctx->stat.uncorrectable_errors; 759 spin_unlock(&sctx->stat_lock); 760 btrfs_dev_replace_stats_inc( 761 &sctx->dev_root->fs_info->dev_replace. 762 num_uncorrectable_read_errors); 763 printk_ratelimited_in_rcu(KERN_ERR 764 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", 765 fixup->logical, rcu_str_deref(fixup->dev->name)); 766 } 767 768 btrfs_free_path(path); 769 kfree(fixup); 770 771 scrub_pending_trans_workers_dec(sctx); 772 } 773 774 /* 775 * scrub_handle_errored_block gets called when either verification of the 776 * pages failed or the bio failed to read, e.g. with EIO. In the latter 777 * case, this function handles all pages in the bio, even though only one 778 * may be bad. 779 * The goal of this function is to repair the errored block by using the 780 * contents of one of the mirrors. 781 */ 782 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) 783 { 784 struct scrub_ctx *sctx = sblock_to_check->sctx; 785 struct btrfs_device *dev; 786 struct btrfs_fs_info *fs_info; 787 u64 length; 788 u64 logical; 789 u64 generation; 790 unsigned int failed_mirror_index; 791 unsigned int is_metadata; 792 unsigned int have_csum; 793 u8 *csum; 794 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ 795 struct scrub_block *sblock_bad; 796 int ret; 797 int mirror_index; 798 int page_num; 799 int success; 800 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 801 DEFAULT_RATELIMIT_BURST); 802 803 BUG_ON(sblock_to_check->page_count < 1); 804 fs_info = sctx->dev_root->fs_info; 805 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { 806 /* 807 * if we find an error in a super block, we just report it. 808 * They will get written with the next transaction commit 809 * anyway 810 */ 811 spin_lock(&sctx->stat_lock); 812 ++sctx->stat.super_errors; 813 spin_unlock(&sctx->stat_lock); 814 return 0; 815 } 816 length = sblock_to_check->page_count * PAGE_SIZE; 817 logical = sblock_to_check->pagev[0]->logical; 818 generation = sblock_to_check->pagev[0]->generation; 819 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); 820 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; 821 is_metadata = !(sblock_to_check->pagev[0]->flags & 822 BTRFS_EXTENT_FLAG_DATA); 823 have_csum = sblock_to_check->pagev[0]->have_csum; 824 csum = sblock_to_check->pagev[0]->csum; 825 dev = sblock_to_check->pagev[0]->dev; 826 827 if (sctx->is_dev_replace && !is_metadata && !have_csum) { 828 sblocks_for_recheck = NULL; 829 goto nodatasum_case; 830 } 831 832 /* 833 * read all mirrors one after the other. This includes to 834 * re-read the extent or metadata block that failed (that was 835 * the cause that this fixup code is called) another time, 836 * page by page this time in order to know which pages 837 * caused I/O errors and which ones are good (for all mirrors). 838 * It is the goal to handle the situation when more than one 839 * mirror contains I/O errors, but the errors do not 840 * overlap, i.e. the data can be repaired by selecting the 841 * pages from those mirrors without I/O error on the 842 * particular pages. One example (with blocks >= 2 * PAGE_SIZE) 843 * would be that mirror #1 has an I/O error on the first page, 844 * the second page is good, and mirror #2 has an I/O error on 845 * the second page, but the first page is good. 846 * Then the first page of the first mirror can be repaired by 847 * taking the first page of the second mirror, and the 848 * second page of the second mirror can be repaired by 849 * copying the contents of the 2nd page of the 1st mirror. 850 * One more note: if the pages of one mirror contain I/O 851 * errors, the checksum cannot be verified. In order to get 852 * the best data for repairing, the first attempt is to find 853 * a mirror without I/O errors and with a validated checksum. 854 * Only if this is not possible, the pages are picked from 855 * mirrors with I/O errors without considering the checksum. 856 * If the latter is the case, at the end, the checksum of the 857 * repaired area is verified in order to correctly maintain 858 * the statistics. 859 */ 860 861 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS * 862 sizeof(*sblocks_for_recheck), 863 GFP_NOFS); 864 if (!sblocks_for_recheck) { 865 spin_lock(&sctx->stat_lock); 866 sctx->stat.malloc_errors++; 867 sctx->stat.read_errors++; 868 sctx->stat.uncorrectable_errors++; 869 spin_unlock(&sctx->stat_lock); 870 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 871 goto out; 872 } 873 874 /* setup the context, map the logical blocks and alloc the pages */ 875 ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length, 876 logical, sblocks_for_recheck); 877 if (ret) { 878 spin_lock(&sctx->stat_lock); 879 sctx->stat.read_errors++; 880 sctx->stat.uncorrectable_errors++; 881 spin_unlock(&sctx->stat_lock); 882 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 883 goto out; 884 } 885 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); 886 sblock_bad = sblocks_for_recheck + failed_mirror_index; 887 888 /* build and submit the bios for the failed mirror, check checksums */ 889 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, 890 csum, generation, sctx->csum_size); 891 892 if (!sblock_bad->header_error && !sblock_bad->checksum_error && 893 sblock_bad->no_io_error_seen) { 894 /* 895 * the error disappeared after reading page by page, or 896 * the area was part of a huge bio and other parts of the 897 * bio caused I/O errors, or the block layer merged several 898 * read requests into one and the error is caused by a 899 * different bio (usually one of the two latter cases is 900 * the cause) 901 */ 902 spin_lock(&sctx->stat_lock); 903 sctx->stat.unverified_errors++; 904 spin_unlock(&sctx->stat_lock); 905 906 if (sctx->is_dev_replace) 907 scrub_write_block_to_dev_replace(sblock_bad); 908 goto out; 909 } 910 911 if (!sblock_bad->no_io_error_seen) { 912 spin_lock(&sctx->stat_lock); 913 sctx->stat.read_errors++; 914 spin_unlock(&sctx->stat_lock); 915 if (__ratelimit(&_rs)) 916 scrub_print_warning("i/o error", sblock_to_check); 917 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 918 } else if (sblock_bad->checksum_error) { 919 spin_lock(&sctx->stat_lock); 920 sctx->stat.csum_errors++; 921 spin_unlock(&sctx->stat_lock); 922 if (__ratelimit(&_rs)) 923 scrub_print_warning("checksum error", sblock_to_check); 924 btrfs_dev_stat_inc_and_print(dev, 925 BTRFS_DEV_STAT_CORRUPTION_ERRS); 926 } else if (sblock_bad->header_error) { 927 spin_lock(&sctx->stat_lock); 928 sctx->stat.verify_errors++; 929 spin_unlock(&sctx->stat_lock); 930 if (__ratelimit(&_rs)) 931 scrub_print_warning("checksum/header error", 932 sblock_to_check); 933 if (sblock_bad->generation_error) 934 btrfs_dev_stat_inc_and_print(dev, 935 BTRFS_DEV_STAT_GENERATION_ERRS); 936 else 937 btrfs_dev_stat_inc_and_print(dev, 938 BTRFS_DEV_STAT_CORRUPTION_ERRS); 939 } 940 941 if (sctx->readonly) { 942 ASSERT(!sctx->is_dev_replace); 943 goto out; 944 } 945 946 if (!is_metadata && !have_csum) { 947 struct scrub_fixup_nodatasum *fixup_nodatasum; 948 949 nodatasum_case: 950 WARN_ON(sctx->is_dev_replace); 951 952 /* 953 * !is_metadata and !have_csum, this means that the data 954 * might not be COW'ed, that it might be modified 955 * concurrently. The general strategy to work on the 956 * commit root does not help in the case when COW is not 957 * used. 958 */ 959 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS); 960 if (!fixup_nodatasum) 961 goto did_not_correct_error; 962 fixup_nodatasum->sctx = sctx; 963 fixup_nodatasum->dev = dev; 964 fixup_nodatasum->logical = logical; 965 fixup_nodatasum->root = fs_info->extent_root; 966 fixup_nodatasum->mirror_num = failed_mirror_index + 1; 967 scrub_pending_trans_workers_inc(sctx); 968 fixup_nodatasum->work.func = scrub_fixup_nodatasum; 969 btrfs_queue_worker(&fs_info->scrub_workers, 970 &fixup_nodatasum->work); 971 goto out; 972 } 973 974 /* 975 * now build and submit the bios for the other mirrors, check 976 * checksums. 977 * First try to pick the mirror which is completely without I/O 978 * errors and also does not have a checksum error. 979 * If one is found, and if a checksum is present, the full block 980 * that is known to contain an error is rewritten. Afterwards 981 * the block is known to be corrected. 982 * If a mirror is found which is completely correct, and no 983 * checksum is present, only those pages are rewritten that had 984 * an I/O error in the block to be repaired, since it cannot be 985 * determined, which copy of the other pages is better (and it 986 * could happen otherwise that a correct page would be 987 * overwritten by a bad one). 988 */ 989 for (mirror_index = 0; 990 mirror_index < BTRFS_MAX_MIRRORS && 991 sblocks_for_recheck[mirror_index].page_count > 0; 992 mirror_index++) { 993 struct scrub_block *sblock_other; 994 995 if (mirror_index == failed_mirror_index) 996 continue; 997 sblock_other = sblocks_for_recheck + mirror_index; 998 999 /* build and submit the bios, check checksums */ 1000 scrub_recheck_block(fs_info, sblock_other, is_metadata, 1001 have_csum, csum, generation, 1002 sctx->csum_size); 1003 1004 if (!sblock_other->header_error && 1005 !sblock_other->checksum_error && 1006 sblock_other->no_io_error_seen) { 1007 if (sctx->is_dev_replace) { 1008 scrub_write_block_to_dev_replace(sblock_other); 1009 } else { 1010 int force_write = is_metadata || have_csum; 1011 1012 ret = scrub_repair_block_from_good_copy( 1013 sblock_bad, sblock_other, 1014 force_write); 1015 } 1016 if (0 == ret) 1017 goto corrected_error; 1018 } 1019 } 1020 1021 /* 1022 * for dev_replace, pick good pages and write to the target device. 1023 */ 1024 if (sctx->is_dev_replace) { 1025 success = 1; 1026 for (page_num = 0; page_num < sblock_bad->page_count; 1027 page_num++) { 1028 int sub_success; 1029 1030 sub_success = 0; 1031 for (mirror_index = 0; 1032 mirror_index < BTRFS_MAX_MIRRORS && 1033 sblocks_for_recheck[mirror_index].page_count > 0; 1034 mirror_index++) { 1035 struct scrub_block *sblock_other = 1036 sblocks_for_recheck + mirror_index; 1037 struct scrub_page *page_other = 1038 sblock_other->pagev[page_num]; 1039 1040 if (!page_other->io_error) { 1041 ret = scrub_write_page_to_dev_replace( 1042 sblock_other, page_num); 1043 if (ret == 0) { 1044 /* succeeded for this page */ 1045 sub_success = 1; 1046 break; 1047 } else { 1048 btrfs_dev_replace_stats_inc( 1049 &sctx->dev_root-> 1050 fs_info->dev_replace. 1051 num_write_errors); 1052 } 1053 } 1054 } 1055 1056 if (!sub_success) { 1057 /* 1058 * did not find a mirror to fetch the page 1059 * from. scrub_write_page_to_dev_replace() 1060 * handles this case (page->io_error), by 1061 * filling the block with zeros before 1062 * submitting the write request 1063 */ 1064 success = 0; 1065 ret = scrub_write_page_to_dev_replace( 1066 sblock_bad, page_num); 1067 if (ret) 1068 btrfs_dev_replace_stats_inc( 1069 &sctx->dev_root->fs_info-> 1070 dev_replace.num_write_errors); 1071 } 1072 } 1073 1074 goto out; 1075 } 1076 1077 /* 1078 * for regular scrub, repair those pages that are errored. 1079 * In case of I/O errors in the area that is supposed to be 1080 * repaired, continue by picking good copies of those pages. 1081 * Select the good pages from mirrors to rewrite bad pages from 1082 * the area to fix. Afterwards verify the checksum of the block 1083 * that is supposed to be repaired. This verification step is 1084 * only done for the purpose of statistic counting and for the 1085 * final scrub report, whether errors remain. 1086 * A perfect algorithm could make use of the checksum and try 1087 * all possible combinations of pages from the different mirrors 1088 * until the checksum verification succeeds. For example, when 1089 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page 1090 * of mirror #2 is readable but the final checksum test fails, 1091 * then the 2nd page of mirror #3 could be tried, whether now 1092 * the final checksum succeedes. But this would be a rare 1093 * exception and is therefore not implemented. At least it is 1094 * avoided that the good copy is overwritten. 1095 * A more useful improvement would be to pick the sectors 1096 * without I/O error based on sector sizes (512 bytes on legacy 1097 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one 1098 * mirror could be repaired by taking 512 byte of a different 1099 * mirror, even if other 512 byte sectors in the same PAGE_SIZE 1100 * area are unreadable. 1101 */ 1102 1103 /* can only fix I/O errors from here on */ 1104 if (sblock_bad->no_io_error_seen) 1105 goto did_not_correct_error; 1106 1107 success = 1; 1108 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { 1109 struct scrub_page *page_bad = sblock_bad->pagev[page_num]; 1110 1111 if (!page_bad->io_error) 1112 continue; 1113 1114 for (mirror_index = 0; 1115 mirror_index < BTRFS_MAX_MIRRORS && 1116 sblocks_for_recheck[mirror_index].page_count > 0; 1117 mirror_index++) { 1118 struct scrub_block *sblock_other = sblocks_for_recheck + 1119 mirror_index; 1120 struct scrub_page *page_other = sblock_other->pagev[ 1121 page_num]; 1122 1123 if (!page_other->io_error) { 1124 ret = scrub_repair_page_from_good_copy( 1125 sblock_bad, sblock_other, page_num, 0); 1126 if (0 == ret) { 1127 page_bad->io_error = 0; 1128 break; /* succeeded for this page */ 1129 } 1130 } 1131 } 1132 1133 if (page_bad->io_error) { 1134 /* did not find a mirror to copy the page from */ 1135 success = 0; 1136 } 1137 } 1138 1139 if (success) { 1140 if (is_metadata || have_csum) { 1141 /* 1142 * need to verify the checksum now that all 1143 * sectors on disk are repaired (the write 1144 * request for data to be repaired is on its way). 1145 * Just be lazy and use scrub_recheck_block() 1146 * which re-reads the data before the checksum 1147 * is verified, but most likely the data comes out 1148 * of the page cache. 1149 */ 1150 scrub_recheck_block(fs_info, sblock_bad, 1151 is_metadata, have_csum, csum, 1152 generation, sctx->csum_size); 1153 if (!sblock_bad->header_error && 1154 !sblock_bad->checksum_error && 1155 sblock_bad->no_io_error_seen) 1156 goto corrected_error; 1157 else 1158 goto did_not_correct_error; 1159 } else { 1160 corrected_error: 1161 spin_lock(&sctx->stat_lock); 1162 sctx->stat.corrected_errors++; 1163 spin_unlock(&sctx->stat_lock); 1164 printk_ratelimited_in_rcu(KERN_ERR 1165 "btrfs: fixed up error at logical %llu on dev %s\n", 1166 logical, rcu_str_deref(dev->name)); 1167 } 1168 } else { 1169 did_not_correct_error: 1170 spin_lock(&sctx->stat_lock); 1171 sctx->stat.uncorrectable_errors++; 1172 spin_unlock(&sctx->stat_lock); 1173 printk_ratelimited_in_rcu(KERN_ERR 1174 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", 1175 logical, rcu_str_deref(dev->name)); 1176 } 1177 1178 out: 1179 if (sblocks_for_recheck) { 1180 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; 1181 mirror_index++) { 1182 struct scrub_block *sblock = sblocks_for_recheck + 1183 mirror_index; 1184 int page_index; 1185 1186 for (page_index = 0; page_index < sblock->page_count; 1187 page_index++) { 1188 sblock->pagev[page_index]->sblock = NULL; 1189 scrub_page_put(sblock->pagev[page_index]); 1190 } 1191 } 1192 kfree(sblocks_for_recheck); 1193 } 1194 1195 return 0; 1196 } 1197 1198 static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 1199 struct btrfs_fs_info *fs_info, 1200 struct scrub_block *original_sblock, 1201 u64 length, u64 logical, 1202 struct scrub_block *sblocks_for_recheck) 1203 { 1204 int page_index; 1205 int mirror_index; 1206 int ret; 1207 1208 /* 1209 * note: the two members ref_count and outstanding_pages 1210 * are not used (and not set) in the blocks that are used for 1211 * the recheck procedure 1212 */ 1213 1214 page_index = 0; 1215 while (length > 0) { 1216 u64 sublen = min_t(u64, length, PAGE_SIZE); 1217 u64 mapped_length = sublen; 1218 struct btrfs_bio *bbio = NULL; 1219 1220 /* 1221 * with a length of PAGE_SIZE, each returned stripe 1222 * represents one mirror 1223 */ 1224 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, 1225 &mapped_length, &bbio, 0); 1226 if (ret || !bbio || mapped_length < sublen) { 1227 kfree(bbio); 1228 return -EIO; 1229 } 1230 1231 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); 1232 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes; 1233 mirror_index++) { 1234 struct scrub_block *sblock; 1235 struct scrub_page *page; 1236 1237 if (mirror_index >= BTRFS_MAX_MIRRORS) 1238 continue; 1239 1240 sblock = sblocks_for_recheck + mirror_index; 1241 sblock->sctx = sctx; 1242 page = kzalloc(sizeof(*page), GFP_NOFS); 1243 if (!page) { 1244 leave_nomem: 1245 spin_lock(&sctx->stat_lock); 1246 sctx->stat.malloc_errors++; 1247 spin_unlock(&sctx->stat_lock); 1248 kfree(bbio); 1249 return -ENOMEM; 1250 } 1251 scrub_page_get(page); 1252 sblock->pagev[page_index] = page; 1253 page->logical = logical; 1254 page->physical = bbio->stripes[mirror_index].physical; 1255 BUG_ON(page_index >= original_sblock->page_count); 1256 page->physical_for_dev_replace = 1257 original_sblock->pagev[page_index]-> 1258 physical_for_dev_replace; 1259 /* for missing devices, dev->bdev is NULL */ 1260 page->dev = bbio->stripes[mirror_index].dev; 1261 page->mirror_num = mirror_index + 1; 1262 sblock->page_count++; 1263 page->page = alloc_page(GFP_NOFS); 1264 if (!page->page) 1265 goto leave_nomem; 1266 } 1267 kfree(bbio); 1268 length -= sublen; 1269 logical += sublen; 1270 page_index++; 1271 } 1272 1273 return 0; 1274 } 1275 1276 /* 1277 * this function will check the on disk data for checksum errors, header 1278 * errors and read I/O errors. If any I/O errors happen, the exact pages 1279 * which are errored are marked as being bad. The goal is to enable scrub 1280 * to take those pages that are not errored from all the mirrors so that 1281 * the pages that are errored in the just handled mirror can be repaired. 1282 */ 1283 static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 1284 struct scrub_block *sblock, int is_metadata, 1285 int have_csum, u8 *csum, u64 generation, 1286 u16 csum_size) 1287 { 1288 int page_num; 1289 1290 sblock->no_io_error_seen = 1; 1291 sblock->header_error = 0; 1292 sblock->checksum_error = 0; 1293 1294 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1295 struct bio *bio; 1296 struct scrub_page *page = sblock->pagev[page_num]; 1297 DECLARE_COMPLETION_ONSTACK(complete); 1298 1299 if (page->dev->bdev == NULL) { 1300 page->io_error = 1; 1301 sblock->no_io_error_seen = 0; 1302 continue; 1303 } 1304 1305 WARN_ON(!page->page); 1306 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1307 if (!bio) { 1308 page->io_error = 1; 1309 sblock->no_io_error_seen = 0; 1310 continue; 1311 } 1312 bio->bi_bdev = page->dev->bdev; 1313 bio->bi_sector = page->physical >> 9; 1314 bio->bi_end_io = scrub_complete_bio_end_io; 1315 bio->bi_private = &complete; 1316 1317 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1318 btrfsic_submit_bio(READ, bio); 1319 1320 /* this will also unplug the queue */ 1321 wait_for_completion(&complete); 1322 1323 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags); 1324 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1325 sblock->no_io_error_seen = 0; 1326 bio_put(bio); 1327 } 1328 1329 if (sblock->no_io_error_seen) 1330 scrub_recheck_block_checksum(fs_info, sblock, is_metadata, 1331 have_csum, csum, generation, 1332 csum_size); 1333 1334 return; 1335 } 1336 1337 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, 1338 struct scrub_block *sblock, 1339 int is_metadata, int have_csum, 1340 const u8 *csum, u64 generation, 1341 u16 csum_size) 1342 { 1343 int page_num; 1344 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1345 u32 crc = ~(u32)0; 1346 void *mapped_buffer; 1347 1348 WARN_ON(!sblock->pagev[0]->page); 1349 if (is_metadata) { 1350 struct btrfs_header *h; 1351 1352 mapped_buffer = kmap_atomic(sblock->pagev[0]->page); 1353 h = (struct btrfs_header *)mapped_buffer; 1354 1355 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) || 1356 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) || 1357 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 1358 BTRFS_UUID_SIZE)) { 1359 sblock->header_error = 1; 1360 } else if (generation != btrfs_stack_header_generation(h)) { 1361 sblock->header_error = 1; 1362 sblock->generation_error = 1; 1363 } 1364 csum = h->csum; 1365 } else { 1366 if (!have_csum) 1367 return; 1368 1369 mapped_buffer = kmap_atomic(sblock->pagev[0]->page); 1370 } 1371 1372 for (page_num = 0;;) { 1373 if (page_num == 0 && is_metadata) 1374 crc = btrfs_csum_data( 1375 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE, 1376 crc, PAGE_SIZE - BTRFS_CSUM_SIZE); 1377 else 1378 crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE); 1379 1380 kunmap_atomic(mapped_buffer); 1381 page_num++; 1382 if (page_num >= sblock->page_count) 1383 break; 1384 WARN_ON(!sblock->pagev[page_num]->page); 1385 1386 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page); 1387 } 1388 1389 btrfs_csum_final(crc, calculated_csum); 1390 if (memcmp(calculated_csum, csum, csum_size)) 1391 sblock->checksum_error = 1; 1392 } 1393 1394 static void scrub_complete_bio_end_io(struct bio *bio, int err) 1395 { 1396 complete((struct completion *)bio->bi_private); 1397 } 1398 1399 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1400 struct scrub_block *sblock_good, 1401 int force_write) 1402 { 1403 int page_num; 1404 int ret = 0; 1405 1406 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { 1407 int ret_sub; 1408 1409 ret_sub = scrub_repair_page_from_good_copy(sblock_bad, 1410 sblock_good, 1411 page_num, 1412 force_write); 1413 if (ret_sub) 1414 ret = ret_sub; 1415 } 1416 1417 return ret; 1418 } 1419 1420 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, 1421 struct scrub_block *sblock_good, 1422 int page_num, int force_write) 1423 { 1424 struct scrub_page *page_bad = sblock_bad->pagev[page_num]; 1425 struct scrub_page *page_good = sblock_good->pagev[page_num]; 1426 1427 BUG_ON(page_bad->page == NULL); 1428 BUG_ON(page_good->page == NULL); 1429 if (force_write || sblock_bad->header_error || 1430 sblock_bad->checksum_error || page_bad->io_error) { 1431 struct bio *bio; 1432 int ret; 1433 DECLARE_COMPLETION_ONSTACK(complete); 1434 1435 if (!page_bad->dev->bdev) { 1436 printk_ratelimited(KERN_WARNING 1437 "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n"); 1438 return -EIO; 1439 } 1440 1441 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1442 if (!bio) 1443 return -EIO; 1444 bio->bi_bdev = page_bad->dev->bdev; 1445 bio->bi_sector = page_bad->physical >> 9; 1446 bio->bi_end_io = scrub_complete_bio_end_io; 1447 bio->bi_private = &complete; 1448 1449 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1450 if (PAGE_SIZE != ret) { 1451 bio_put(bio); 1452 return -EIO; 1453 } 1454 btrfsic_submit_bio(WRITE, bio); 1455 1456 /* this will also unplug the queue */ 1457 wait_for_completion(&complete); 1458 if (!bio_flagged(bio, BIO_UPTODATE)) { 1459 btrfs_dev_stat_inc_and_print(page_bad->dev, 1460 BTRFS_DEV_STAT_WRITE_ERRS); 1461 btrfs_dev_replace_stats_inc( 1462 &sblock_bad->sctx->dev_root->fs_info-> 1463 dev_replace.num_write_errors); 1464 bio_put(bio); 1465 return -EIO; 1466 } 1467 bio_put(bio); 1468 } 1469 1470 return 0; 1471 } 1472 1473 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) 1474 { 1475 int page_num; 1476 1477 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1478 int ret; 1479 1480 ret = scrub_write_page_to_dev_replace(sblock, page_num); 1481 if (ret) 1482 btrfs_dev_replace_stats_inc( 1483 &sblock->sctx->dev_root->fs_info->dev_replace. 1484 num_write_errors); 1485 } 1486 } 1487 1488 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, 1489 int page_num) 1490 { 1491 struct scrub_page *spage = sblock->pagev[page_num]; 1492 1493 BUG_ON(spage->page == NULL); 1494 if (spage->io_error) { 1495 void *mapped_buffer = kmap_atomic(spage->page); 1496 1497 memset(mapped_buffer, 0, PAGE_CACHE_SIZE); 1498 flush_dcache_page(spage->page); 1499 kunmap_atomic(mapped_buffer); 1500 } 1501 return scrub_add_page_to_wr_bio(sblock->sctx, spage); 1502 } 1503 1504 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, 1505 struct scrub_page *spage) 1506 { 1507 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; 1508 struct scrub_bio *sbio; 1509 int ret; 1510 1511 mutex_lock(&wr_ctx->wr_lock); 1512 again: 1513 if (!wr_ctx->wr_curr_bio) { 1514 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio), 1515 GFP_NOFS); 1516 if (!wr_ctx->wr_curr_bio) { 1517 mutex_unlock(&wr_ctx->wr_lock); 1518 return -ENOMEM; 1519 } 1520 wr_ctx->wr_curr_bio->sctx = sctx; 1521 wr_ctx->wr_curr_bio->page_count = 0; 1522 } 1523 sbio = wr_ctx->wr_curr_bio; 1524 if (sbio->page_count == 0) { 1525 struct bio *bio; 1526 1527 sbio->physical = spage->physical_for_dev_replace; 1528 sbio->logical = spage->logical; 1529 sbio->dev = wr_ctx->tgtdev; 1530 bio = sbio->bio; 1531 if (!bio) { 1532 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); 1533 if (!bio) { 1534 mutex_unlock(&wr_ctx->wr_lock); 1535 return -ENOMEM; 1536 } 1537 sbio->bio = bio; 1538 } 1539 1540 bio->bi_private = sbio; 1541 bio->bi_end_io = scrub_wr_bio_end_io; 1542 bio->bi_bdev = sbio->dev->bdev; 1543 bio->bi_sector = sbio->physical >> 9; 1544 sbio->err = 0; 1545 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1546 spage->physical_for_dev_replace || 1547 sbio->logical + sbio->page_count * PAGE_SIZE != 1548 spage->logical) { 1549 scrub_wr_submit(sctx); 1550 goto again; 1551 } 1552 1553 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); 1554 if (ret != PAGE_SIZE) { 1555 if (sbio->page_count < 1) { 1556 bio_put(sbio->bio); 1557 sbio->bio = NULL; 1558 mutex_unlock(&wr_ctx->wr_lock); 1559 return -EIO; 1560 } 1561 scrub_wr_submit(sctx); 1562 goto again; 1563 } 1564 1565 sbio->pagev[sbio->page_count] = spage; 1566 scrub_page_get(spage); 1567 sbio->page_count++; 1568 if (sbio->page_count == wr_ctx->pages_per_wr_bio) 1569 scrub_wr_submit(sctx); 1570 mutex_unlock(&wr_ctx->wr_lock); 1571 1572 return 0; 1573 } 1574 1575 static void scrub_wr_submit(struct scrub_ctx *sctx) 1576 { 1577 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; 1578 struct scrub_bio *sbio; 1579 1580 if (!wr_ctx->wr_curr_bio) 1581 return; 1582 1583 sbio = wr_ctx->wr_curr_bio; 1584 wr_ctx->wr_curr_bio = NULL; 1585 WARN_ON(!sbio->bio->bi_bdev); 1586 scrub_pending_bio_inc(sctx); 1587 /* process all writes in a single worker thread. Then the block layer 1588 * orders the requests before sending them to the driver which 1589 * doubled the write performance on spinning disks when measured 1590 * with Linux 3.5 */ 1591 btrfsic_submit_bio(WRITE, sbio->bio); 1592 } 1593 1594 static void scrub_wr_bio_end_io(struct bio *bio, int err) 1595 { 1596 struct scrub_bio *sbio = bio->bi_private; 1597 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; 1598 1599 sbio->err = err; 1600 sbio->bio = bio; 1601 1602 sbio->work.func = scrub_wr_bio_end_io_worker; 1603 btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work); 1604 } 1605 1606 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) 1607 { 1608 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 1609 struct scrub_ctx *sctx = sbio->sctx; 1610 int i; 1611 1612 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); 1613 if (sbio->err) { 1614 struct btrfs_dev_replace *dev_replace = 1615 &sbio->sctx->dev_root->fs_info->dev_replace; 1616 1617 for (i = 0; i < sbio->page_count; i++) { 1618 struct scrub_page *spage = sbio->pagev[i]; 1619 1620 spage->io_error = 1; 1621 btrfs_dev_replace_stats_inc(&dev_replace-> 1622 num_write_errors); 1623 } 1624 } 1625 1626 for (i = 0; i < sbio->page_count; i++) 1627 scrub_page_put(sbio->pagev[i]); 1628 1629 bio_put(sbio->bio); 1630 kfree(sbio); 1631 scrub_pending_bio_dec(sctx); 1632 } 1633 1634 static int scrub_checksum(struct scrub_block *sblock) 1635 { 1636 u64 flags; 1637 int ret; 1638 1639 WARN_ON(sblock->page_count < 1); 1640 flags = sblock->pagev[0]->flags; 1641 ret = 0; 1642 if (flags & BTRFS_EXTENT_FLAG_DATA) 1643 ret = scrub_checksum_data(sblock); 1644 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1645 ret = scrub_checksum_tree_block(sblock); 1646 else if (flags & BTRFS_EXTENT_FLAG_SUPER) 1647 (void)scrub_checksum_super(sblock); 1648 else 1649 WARN_ON(1); 1650 if (ret) 1651 scrub_handle_errored_block(sblock); 1652 1653 return ret; 1654 } 1655 1656 static int scrub_checksum_data(struct scrub_block *sblock) 1657 { 1658 struct scrub_ctx *sctx = sblock->sctx; 1659 u8 csum[BTRFS_CSUM_SIZE]; 1660 u8 *on_disk_csum; 1661 struct page *page; 1662 void *buffer; 1663 u32 crc = ~(u32)0; 1664 int fail = 0; 1665 u64 len; 1666 int index; 1667 1668 BUG_ON(sblock->page_count < 1); 1669 if (!sblock->pagev[0]->have_csum) 1670 return 0; 1671 1672 on_disk_csum = sblock->pagev[0]->csum; 1673 page = sblock->pagev[0]->page; 1674 buffer = kmap_atomic(page); 1675 1676 len = sctx->sectorsize; 1677 index = 0; 1678 for (;;) { 1679 u64 l = min_t(u64, len, PAGE_SIZE); 1680 1681 crc = btrfs_csum_data(buffer, crc, l); 1682 kunmap_atomic(buffer); 1683 len -= l; 1684 if (len == 0) 1685 break; 1686 index++; 1687 BUG_ON(index >= sblock->page_count); 1688 BUG_ON(!sblock->pagev[index]->page); 1689 page = sblock->pagev[index]->page; 1690 buffer = kmap_atomic(page); 1691 } 1692 1693 btrfs_csum_final(crc, csum); 1694 if (memcmp(csum, on_disk_csum, sctx->csum_size)) 1695 fail = 1; 1696 1697 return fail; 1698 } 1699 1700 static int scrub_checksum_tree_block(struct scrub_block *sblock) 1701 { 1702 struct scrub_ctx *sctx = sblock->sctx; 1703 struct btrfs_header *h; 1704 struct btrfs_root *root = sctx->dev_root; 1705 struct btrfs_fs_info *fs_info = root->fs_info; 1706 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1707 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1708 struct page *page; 1709 void *mapped_buffer; 1710 u64 mapped_size; 1711 void *p; 1712 u32 crc = ~(u32)0; 1713 int fail = 0; 1714 int crc_fail = 0; 1715 u64 len; 1716 int index; 1717 1718 BUG_ON(sblock->page_count < 1); 1719 page = sblock->pagev[0]->page; 1720 mapped_buffer = kmap_atomic(page); 1721 h = (struct btrfs_header *)mapped_buffer; 1722 memcpy(on_disk_csum, h->csum, sctx->csum_size); 1723 1724 /* 1725 * we don't use the getter functions here, as we 1726 * a) don't have an extent buffer and 1727 * b) the page is already kmapped 1728 */ 1729 1730 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) 1731 ++fail; 1732 1733 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) 1734 ++fail; 1735 1736 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1737 ++fail; 1738 1739 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 1740 BTRFS_UUID_SIZE)) 1741 ++fail; 1742 1743 WARN_ON(sctx->nodesize != sctx->leafsize); 1744 len = sctx->nodesize - BTRFS_CSUM_SIZE; 1745 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; 1746 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; 1747 index = 0; 1748 for (;;) { 1749 u64 l = min_t(u64, len, mapped_size); 1750 1751 crc = btrfs_csum_data(p, crc, l); 1752 kunmap_atomic(mapped_buffer); 1753 len -= l; 1754 if (len == 0) 1755 break; 1756 index++; 1757 BUG_ON(index >= sblock->page_count); 1758 BUG_ON(!sblock->pagev[index]->page); 1759 page = sblock->pagev[index]->page; 1760 mapped_buffer = kmap_atomic(page); 1761 mapped_size = PAGE_SIZE; 1762 p = mapped_buffer; 1763 } 1764 1765 btrfs_csum_final(crc, calculated_csum); 1766 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) 1767 ++crc_fail; 1768 1769 return fail || crc_fail; 1770 } 1771 1772 static int scrub_checksum_super(struct scrub_block *sblock) 1773 { 1774 struct btrfs_super_block *s; 1775 struct scrub_ctx *sctx = sblock->sctx; 1776 struct btrfs_root *root = sctx->dev_root; 1777 struct btrfs_fs_info *fs_info = root->fs_info; 1778 u8 calculated_csum[BTRFS_CSUM_SIZE]; 1779 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 1780 struct page *page; 1781 void *mapped_buffer; 1782 u64 mapped_size; 1783 void *p; 1784 u32 crc = ~(u32)0; 1785 int fail_gen = 0; 1786 int fail_cor = 0; 1787 u64 len; 1788 int index; 1789 1790 BUG_ON(sblock->page_count < 1); 1791 page = sblock->pagev[0]->page; 1792 mapped_buffer = kmap_atomic(page); 1793 s = (struct btrfs_super_block *)mapped_buffer; 1794 memcpy(on_disk_csum, s->csum, sctx->csum_size); 1795 1796 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s)) 1797 ++fail_cor; 1798 1799 if (sblock->pagev[0]->generation != btrfs_super_generation(s)) 1800 ++fail_gen; 1801 1802 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1803 ++fail_cor; 1804 1805 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; 1806 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; 1807 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; 1808 index = 0; 1809 for (;;) { 1810 u64 l = min_t(u64, len, mapped_size); 1811 1812 crc = btrfs_csum_data(p, crc, l); 1813 kunmap_atomic(mapped_buffer); 1814 len -= l; 1815 if (len == 0) 1816 break; 1817 index++; 1818 BUG_ON(index >= sblock->page_count); 1819 BUG_ON(!sblock->pagev[index]->page); 1820 page = sblock->pagev[index]->page; 1821 mapped_buffer = kmap_atomic(page); 1822 mapped_size = PAGE_SIZE; 1823 p = mapped_buffer; 1824 } 1825 1826 btrfs_csum_final(crc, calculated_csum); 1827 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) 1828 ++fail_cor; 1829 1830 if (fail_cor + fail_gen) { 1831 /* 1832 * if we find an error in a super block, we just report it. 1833 * They will get written with the next transaction commit 1834 * anyway 1835 */ 1836 spin_lock(&sctx->stat_lock); 1837 ++sctx->stat.super_errors; 1838 spin_unlock(&sctx->stat_lock); 1839 if (fail_cor) 1840 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, 1841 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1842 else 1843 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, 1844 BTRFS_DEV_STAT_GENERATION_ERRS); 1845 } 1846 1847 return fail_cor + fail_gen; 1848 } 1849 1850 static void scrub_block_get(struct scrub_block *sblock) 1851 { 1852 atomic_inc(&sblock->ref_count); 1853 } 1854 1855 static void scrub_block_put(struct scrub_block *sblock) 1856 { 1857 if (atomic_dec_and_test(&sblock->ref_count)) { 1858 int i; 1859 1860 for (i = 0; i < sblock->page_count; i++) 1861 scrub_page_put(sblock->pagev[i]); 1862 kfree(sblock); 1863 } 1864 } 1865 1866 static void scrub_page_get(struct scrub_page *spage) 1867 { 1868 atomic_inc(&spage->ref_count); 1869 } 1870 1871 static void scrub_page_put(struct scrub_page *spage) 1872 { 1873 if (atomic_dec_and_test(&spage->ref_count)) { 1874 if (spage->page) 1875 __free_page(spage->page); 1876 kfree(spage); 1877 } 1878 } 1879 1880 static void scrub_submit(struct scrub_ctx *sctx) 1881 { 1882 struct scrub_bio *sbio; 1883 1884 if (sctx->curr == -1) 1885 return; 1886 1887 sbio = sctx->bios[sctx->curr]; 1888 sctx->curr = -1; 1889 scrub_pending_bio_inc(sctx); 1890 1891 if (!sbio->bio->bi_bdev) { 1892 /* 1893 * this case should not happen. If btrfs_map_block() is 1894 * wrong, it could happen for dev-replace operations on 1895 * missing devices when no mirrors are available, but in 1896 * this case it should already fail the mount. 1897 * This case is handled correctly (but _very_ slowly). 1898 */ 1899 printk_ratelimited(KERN_WARNING 1900 "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n"); 1901 bio_endio(sbio->bio, -EIO); 1902 } else { 1903 btrfsic_submit_bio(READ, sbio->bio); 1904 } 1905 } 1906 1907 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, 1908 struct scrub_page *spage) 1909 { 1910 struct scrub_block *sblock = spage->sblock; 1911 struct scrub_bio *sbio; 1912 int ret; 1913 1914 again: 1915 /* 1916 * grab a fresh bio or wait for one to become available 1917 */ 1918 while (sctx->curr == -1) { 1919 spin_lock(&sctx->list_lock); 1920 sctx->curr = sctx->first_free; 1921 if (sctx->curr != -1) { 1922 sctx->first_free = sctx->bios[sctx->curr]->next_free; 1923 sctx->bios[sctx->curr]->next_free = -1; 1924 sctx->bios[sctx->curr]->page_count = 0; 1925 spin_unlock(&sctx->list_lock); 1926 } else { 1927 spin_unlock(&sctx->list_lock); 1928 wait_event(sctx->list_wait, sctx->first_free != -1); 1929 } 1930 } 1931 sbio = sctx->bios[sctx->curr]; 1932 if (sbio->page_count == 0) { 1933 struct bio *bio; 1934 1935 sbio->physical = spage->physical; 1936 sbio->logical = spage->logical; 1937 sbio->dev = spage->dev; 1938 bio = sbio->bio; 1939 if (!bio) { 1940 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); 1941 if (!bio) 1942 return -ENOMEM; 1943 sbio->bio = bio; 1944 } 1945 1946 bio->bi_private = sbio; 1947 bio->bi_end_io = scrub_bio_end_io; 1948 bio->bi_bdev = sbio->dev->bdev; 1949 bio->bi_sector = sbio->physical >> 9; 1950 sbio->err = 0; 1951 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1952 spage->physical || 1953 sbio->logical + sbio->page_count * PAGE_SIZE != 1954 spage->logical || 1955 sbio->dev != spage->dev) { 1956 scrub_submit(sctx); 1957 goto again; 1958 } 1959 1960 sbio->pagev[sbio->page_count] = spage; 1961 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); 1962 if (ret != PAGE_SIZE) { 1963 if (sbio->page_count < 1) { 1964 bio_put(sbio->bio); 1965 sbio->bio = NULL; 1966 return -EIO; 1967 } 1968 scrub_submit(sctx); 1969 goto again; 1970 } 1971 1972 scrub_block_get(sblock); /* one for the page added to the bio */ 1973 atomic_inc(&sblock->outstanding_pages); 1974 sbio->page_count++; 1975 if (sbio->page_count == sctx->pages_per_rd_bio) 1976 scrub_submit(sctx); 1977 1978 return 0; 1979 } 1980 1981 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 1982 u64 physical, struct btrfs_device *dev, u64 flags, 1983 u64 gen, int mirror_num, u8 *csum, int force, 1984 u64 physical_for_dev_replace) 1985 { 1986 struct scrub_block *sblock; 1987 int index; 1988 1989 sblock = kzalloc(sizeof(*sblock), GFP_NOFS); 1990 if (!sblock) { 1991 spin_lock(&sctx->stat_lock); 1992 sctx->stat.malloc_errors++; 1993 spin_unlock(&sctx->stat_lock); 1994 return -ENOMEM; 1995 } 1996 1997 /* one ref inside this function, plus one for each page added to 1998 * a bio later on */ 1999 atomic_set(&sblock->ref_count, 1); 2000 sblock->sctx = sctx; 2001 sblock->no_io_error_seen = 1; 2002 2003 for (index = 0; len > 0; index++) { 2004 struct scrub_page *spage; 2005 u64 l = min_t(u64, len, PAGE_SIZE); 2006 2007 spage = kzalloc(sizeof(*spage), GFP_NOFS); 2008 if (!spage) { 2009 leave_nomem: 2010 spin_lock(&sctx->stat_lock); 2011 sctx->stat.malloc_errors++; 2012 spin_unlock(&sctx->stat_lock); 2013 scrub_block_put(sblock); 2014 return -ENOMEM; 2015 } 2016 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); 2017 scrub_page_get(spage); 2018 sblock->pagev[index] = spage; 2019 spage->sblock = sblock; 2020 spage->dev = dev; 2021 spage->flags = flags; 2022 spage->generation = gen; 2023 spage->logical = logical; 2024 spage->physical = physical; 2025 spage->physical_for_dev_replace = physical_for_dev_replace; 2026 spage->mirror_num = mirror_num; 2027 if (csum) { 2028 spage->have_csum = 1; 2029 memcpy(spage->csum, csum, sctx->csum_size); 2030 } else { 2031 spage->have_csum = 0; 2032 } 2033 sblock->page_count++; 2034 spage->page = alloc_page(GFP_NOFS); 2035 if (!spage->page) 2036 goto leave_nomem; 2037 len -= l; 2038 logical += l; 2039 physical += l; 2040 physical_for_dev_replace += l; 2041 } 2042 2043 WARN_ON(sblock->page_count == 0); 2044 for (index = 0; index < sblock->page_count; index++) { 2045 struct scrub_page *spage = sblock->pagev[index]; 2046 int ret; 2047 2048 ret = scrub_add_page_to_rd_bio(sctx, spage); 2049 if (ret) { 2050 scrub_block_put(sblock); 2051 return ret; 2052 } 2053 } 2054 2055 if (force) 2056 scrub_submit(sctx); 2057 2058 /* last one frees, either here or in bio completion for last page */ 2059 scrub_block_put(sblock); 2060 return 0; 2061 } 2062 2063 static void scrub_bio_end_io(struct bio *bio, int err) 2064 { 2065 struct scrub_bio *sbio = bio->bi_private; 2066 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; 2067 2068 sbio->err = err; 2069 sbio->bio = bio; 2070 2071 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 2072 } 2073 2074 static void scrub_bio_end_io_worker(struct btrfs_work *work) 2075 { 2076 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 2077 struct scrub_ctx *sctx = sbio->sctx; 2078 int i; 2079 2080 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); 2081 if (sbio->err) { 2082 for (i = 0; i < sbio->page_count; i++) { 2083 struct scrub_page *spage = sbio->pagev[i]; 2084 2085 spage->io_error = 1; 2086 spage->sblock->no_io_error_seen = 0; 2087 } 2088 } 2089 2090 /* now complete the scrub_block items that have all pages completed */ 2091 for (i = 0; i < sbio->page_count; i++) { 2092 struct scrub_page *spage = sbio->pagev[i]; 2093 struct scrub_block *sblock = spage->sblock; 2094 2095 if (atomic_dec_and_test(&sblock->outstanding_pages)) 2096 scrub_block_complete(sblock); 2097 scrub_block_put(sblock); 2098 } 2099 2100 bio_put(sbio->bio); 2101 sbio->bio = NULL; 2102 spin_lock(&sctx->list_lock); 2103 sbio->next_free = sctx->first_free; 2104 sctx->first_free = sbio->index; 2105 spin_unlock(&sctx->list_lock); 2106 2107 if (sctx->is_dev_replace && 2108 atomic_read(&sctx->wr_ctx.flush_all_writes)) { 2109 mutex_lock(&sctx->wr_ctx.wr_lock); 2110 scrub_wr_submit(sctx); 2111 mutex_unlock(&sctx->wr_ctx.wr_lock); 2112 } 2113 2114 scrub_pending_bio_dec(sctx); 2115 } 2116 2117 static void scrub_block_complete(struct scrub_block *sblock) 2118 { 2119 if (!sblock->no_io_error_seen) { 2120 scrub_handle_errored_block(sblock); 2121 } else { 2122 /* 2123 * if has checksum error, write via repair mechanism in 2124 * dev replace case, otherwise write here in dev replace 2125 * case. 2126 */ 2127 if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace) 2128 scrub_write_block_to_dev_replace(sblock); 2129 } 2130 } 2131 2132 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len, 2133 u8 *csum) 2134 { 2135 struct btrfs_ordered_sum *sum = NULL; 2136 unsigned long index; 2137 unsigned long num_sectors; 2138 2139 while (!list_empty(&sctx->csum_list)) { 2140 sum = list_first_entry(&sctx->csum_list, 2141 struct btrfs_ordered_sum, list); 2142 if (sum->bytenr > logical) 2143 return 0; 2144 if (sum->bytenr + sum->len > logical) 2145 break; 2146 2147 ++sctx->stat.csum_discards; 2148 list_del(&sum->list); 2149 kfree(sum); 2150 sum = NULL; 2151 } 2152 if (!sum) 2153 return 0; 2154 2155 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize; 2156 num_sectors = sum->len / sctx->sectorsize; 2157 memcpy(csum, sum->sums + index, sctx->csum_size); 2158 if (index == num_sectors - 1) { 2159 list_del(&sum->list); 2160 kfree(sum); 2161 } 2162 return 1; 2163 } 2164 2165 /* scrub extent tries to collect up to 64 kB for each bio */ 2166 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, 2167 u64 physical, struct btrfs_device *dev, u64 flags, 2168 u64 gen, int mirror_num, u64 physical_for_dev_replace) 2169 { 2170 int ret; 2171 u8 csum[BTRFS_CSUM_SIZE]; 2172 u32 blocksize; 2173 2174 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2175 blocksize = sctx->sectorsize; 2176 spin_lock(&sctx->stat_lock); 2177 sctx->stat.data_extents_scrubbed++; 2178 sctx->stat.data_bytes_scrubbed += len; 2179 spin_unlock(&sctx->stat_lock); 2180 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 2181 WARN_ON(sctx->nodesize != sctx->leafsize); 2182 blocksize = sctx->nodesize; 2183 spin_lock(&sctx->stat_lock); 2184 sctx->stat.tree_extents_scrubbed++; 2185 sctx->stat.tree_bytes_scrubbed += len; 2186 spin_unlock(&sctx->stat_lock); 2187 } else { 2188 blocksize = sctx->sectorsize; 2189 WARN_ON(1); 2190 } 2191 2192 while (len) { 2193 u64 l = min_t(u64, len, blocksize); 2194 int have_csum = 0; 2195 2196 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2197 /* push csums to sbio */ 2198 have_csum = scrub_find_csum(sctx, logical, l, csum); 2199 if (have_csum == 0) 2200 ++sctx->stat.no_csum; 2201 if (sctx->is_dev_replace && !have_csum) { 2202 ret = copy_nocow_pages(sctx, logical, l, 2203 mirror_num, 2204 physical_for_dev_replace); 2205 goto behind_scrub_pages; 2206 } 2207 } 2208 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, 2209 mirror_num, have_csum ? csum : NULL, 0, 2210 physical_for_dev_replace); 2211 behind_scrub_pages: 2212 if (ret) 2213 return ret; 2214 len -= l; 2215 logical += l; 2216 physical += l; 2217 physical_for_dev_replace += l; 2218 } 2219 return 0; 2220 } 2221 2222 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 2223 struct map_lookup *map, 2224 struct btrfs_device *scrub_dev, 2225 int num, u64 base, u64 length, 2226 int is_dev_replace) 2227 { 2228 struct btrfs_path *path; 2229 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 2230 struct btrfs_root *root = fs_info->extent_root; 2231 struct btrfs_root *csum_root = fs_info->csum_root; 2232 struct btrfs_extent_item *extent; 2233 struct blk_plug plug; 2234 u64 flags; 2235 int ret; 2236 int slot; 2237 u64 nstripes; 2238 struct extent_buffer *l; 2239 struct btrfs_key key; 2240 u64 physical; 2241 u64 logical; 2242 u64 logic_end; 2243 u64 generation; 2244 int mirror_num; 2245 struct reada_control *reada1; 2246 struct reada_control *reada2; 2247 struct btrfs_key key_start; 2248 struct btrfs_key key_end; 2249 u64 increment = map->stripe_len; 2250 u64 offset; 2251 u64 extent_logical; 2252 u64 extent_physical; 2253 u64 extent_len; 2254 struct btrfs_device *extent_dev; 2255 int extent_mirror_num; 2256 int stop_loop; 2257 2258 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 2259 BTRFS_BLOCK_GROUP_RAID6)) { 2260 if (num >= nr_data_stripes(map)) { 2261 return 0; 2262 } 2263 } 2264 2265 nstripes = length; 2266 offset = 0; 2267 do_div(nstripes, map->stripe_len); 2268 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 2269 offset = map->stripe_len * num; 2270 increment = map->stripe_len * map->num_stripes; 2271 mirror_num = 1; 2272 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 2273 int factor = map->num_stripes / map->sub_stripes; 2274 offset = map->stripe_len * (num / map->sub_stripes); 2275 increment = map->stripe_len * factor; 2276 mirror_num = num % map->sub_stripes + 1; 2277 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 2278 increment = map->stripe_len; 2279 mirror_num = num % map->num_stripes + 1; 2280 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 2281 increment = map->stripe_len; 2282 mirror_num = num % map->num_stripes + 1; 2283 } else { 2284 increment = map->stripe_len; 2285 mirror_num = 1; 2286 } 2287 2288 path = btrfs_alloc_path(); 2289 if (!path) 2290 return -ENOMEM; 2291 2292 /* 2293 * work on commit root. The related disk blocks are static as 2294 * long as COW is applied. This means, it is save to rewrite 2295 * them to repair disk errors without any race conditions 2296 */ 2297 path->search_commit_root = 1; 2298 path->skip_locking = 1; 2299 2300 /* 2301 * trigger the readahead for extent tree csum tree and wait for 2302 * completion. During readahead, the scrub is officially paused 2303 * to not hold off transaction commits 2304 */ 2305 logical = base + offset; 2306 2307 wait_event(sctx->list_wait, 2308 atomic_read(&sctx->bios_in_flight) == 0); 2309 atomic_inc(&fs_info->scrubs_paused); 2310 wake_up(&fs_info->scrub_pause_wait); 2311 2312 /* FIXME it might be better to start readahead at commit root */ 2313 key_start.objectid = logical; 2314 key_start.type = BTRFS_EXTENT_ITEM_KEY; 2315 key_start.offset = (u64)0; 2316 key_end.objectid = base + offset + nstripes * increment; 2317 key_end.type = BTRFS_METADATA_ITEM_KEY; 2318 key_end.offset = (u64)-1; 2319 reada1 = btrfs_reada_add(root, &key_start, &key_end); 2320 2321 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 2322 key_start.type = BTRFS_EXTENT_CSUM_KEY; 2323 key_start.offset = logical; 2324 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 2325 key_end.type = BTRFS_EXTENT_CSUM_KEY; 2326 key_end.offset = base + offset + nstripes * increment; 2327 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end); 2328 2329 if (!IS_ERR(reada1)) 2330 btrfs_reada_wait(reada1); 2331 if (!IS_ERR(reada2)) 2332 btrfs_reada_wait(reada2); 2333 2334 mutex_lock(&fs_info->scrub_lock); 2335 while (atomic_read(&fs_info->scrub_pause_req)) { 2336 mutex_unlock(&fs_info->scrub_lock); 2337 wait_event(fs_info->scrub_pause_wait, 2338 atomic_read(&fs_info->scrub_pause_req) == 0); 2339 mutex_lock(&fs_info->scrub_lock); 2340 } 2341 atomic_dec(&fs_info->scrubs_paused); 2342 mutex_unlock(&fs_info->scrub_lock); 2343 wake_up(&fs_info->scrub_pause_wait); 2344 2345 /* 2346 * collect all data csums for the stripe to avoid seeking during 2347 * the scrub. This might currently (crc32) end up to be about 1MB 2348 */ 2349 blk_start_plug(&plug); 2350 2351 /* 2352 * now find all extents for each stripe and scrub them 2353 */ 2354 logical = base + offset; 2355 physical = map->stripes[num].physical; 2356 logic_end = logical + increment * nstripes; 2357 ret = 0; 2358 while (logical < logic_end) { 2359 /* 2360 * canceled? 2361 */ 2362 if (atomic_read(&fs_info->scrub_cancel_req) || 2363 atomic_read(&sctx->cancel_req)) { 2364 ret = -ECANCELED; 2365 goto out; 2366 } 2367 /* 2368 * check to see if we have to pause 2369 */ 2370 if (atomic_read(&fs_info->scrub_pause_req)) { 2371 /* push queued extents */ 2372 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); 2373 scrub_submit(sctx); 2374 mutex_lock(&sctx->wr_ctx.wr_lock); 2375 scrub_wr_submit(sctx); 2376 mutex_unlock(&sctx->wr_ctx.wr_lock); 2377 wait_event(sctx->list_wait, 2378 atomic_read(&sctx->bios_in_flight) == 0); 2379 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); 2380 atomic_inc(&fs_info->scrubs_paused); 2381 wake_up(&fs_info->scrub_pause_wait); 2382 mutex_lock(&fs_info->scrub_lock); 2383 while (atomic_read(&fs_info->scrub_pause_req)) { 2384 mutex_unlock(&fs_info->scrub_lock); 2385 wait_event(fs_info->scrub_pause_wait, 2386 atomic_read(&fs_info->scrub_pause_req) == 0); 2387 mutex_lock(&fs_info->scrub_lock); 2388 } 2389 atomic_dec(&fs_info->scrubs_paused); 2390 mutex_unlock(&fs_info->scrub_lock); 2391 wake_up(&fs_info->scrub_pause_wait); 2392 } 2393 2394 key.objectid = logical; 2395 key.type = BTRFS_EXTENT_ITEM_KEY; 2396 key.offset = (u64)-1; 2397 2398 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2399 if (ret < 0) 2400 goto out; 2401 2402 if (ret > 0) { 2403 ret = btrfs_previous_item(root, path, 0, 2404 BTRFS_EXTENT_ITEM_KEY); 2405 if (ret < 0) 2406 goto out; 2407 if (ret > 0) { 2408 /* there's no smaller item, so stick with the 2409 * larger one */ 2410 btrfs_release_path(path); 2411 ret = btrfs_search_slot(NULL, root, &key, 2412 path, 0, 0); 2413 if (ret < 0) 2414 goto out; 2415 } 2416 } 2417 2418 stop_loop = 0; 2419 while (1) { 2420 u64 bytes; 2421 2422 l = path->nodes[0]; 2423 slot = path->slots[0]; 2424 if (slot >= btrfs_header_nritems(l)) { 2425 ret = btrfs_next_leaf(root, path); 2426 if (ret == 0) 2427 continue; 2428 if (ret < 0) 2429 goto out; 2430 2431 stop_loop = 1; 2432 break; 2433 } 2434 btrfs_item_key_to_cpu(l, &key, slot); 2435 2436 if (key.type == BTRFS_METADATA_ITEM_KEY) 2437 bytes = root->leafsize; 2438 else 2439 bytes = key.offset; 2440 2441 if (key.objectid + bytes <= logical) 2442 goto next; 2443 2444 if (key.type != BTRFS_EXTENT_ITEM_KEY && 2445 key.type != BTRFS_METADATA_ITEM_KEY) 2446 goto next; 2447 2448 if (key.objectid >= logical + map->stripe_len) { 2449 /* out of this device extent */ 2450 if (key.objectid >= logic_end) 2451 stop_loop = 1; 2452 break; 2453 } 2454 2455 extent = btrfs_item_ptr(l, slot, 2456 struct btrfs_extent_item); 2457 flags = btrfs_extent_flags(l, extent); 2458 generation = btrfs_extent_generation(l, extent); 2459 2460 if (key.objectid < logical && 2461 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { 2462 printk(KERN_ERR 2463 "btrfs scrub: tree block %llu spanning " 2464 "stripes, ignored. logical=%llu\n", 2465 key.objectid, logical); 2466 goto next; 2467 } 2468 2469 again: 2470 extent_logical = key.objectid; 2471 extent_len = bytes; 2472 2473 /* 2474 * trim extent to this stripe 2475 */ 2476 if (extent_logical < logical) { 2477 extent_len -= logical - extent_logical; 2478 extent_logical = logical; 2479 } 2480 if (extent_logical + extent_len > 2481 logical + map->stripe_len) { 2482 extent_len = logical + map->stripe_len - 2483 extent_logical; 2484 } 2485 2486 extent_physical = extent_logical - logical + physical; 2487 extent_dev = scrub_dev; 2488 extent_mirror_num = mirror_num; 2489 if (is_dev_replace) 2490 scrub_remap_extent(fs_info, extent_logical, 2491 extent_len, &extent_physical, 2492 &extent_dev, 2493 &extent_mirror_num); 2494 2495 ret = btrfs_lookup_csums_range(csum_root, logical, 2496 logical + map->stripe_len - 1, 2497 &sctx->csum_list, 1); 2498 if (ret) 2499 goto out; 2500 2501 ret = scrub_extent(sctx, extent_logical, extent_len, 2502 extent_physical, extent_dev, flags, 2503 generation, extent_mirror_num, 2504 extent_logical - logical + physical); 2505 if (ret) 2506 goto out; 2507 2508 scrub_free_csums(sctx); 2509 if (extent_logical + extent_len < 2510 key.objectid + bytes) { 2511 logical += increment; 2512 physical += map->stripe_len; 2513 2514 if (logical < key.objectid + bytes) { 2515 cond_resched(); 2516 goto again; 2517 } 2518 2519 if (logical >= logic_end) { 2520 stop_loop = 1; 2521 break; 2522 } 2523 } 2524 next: 2525 path->slots[0]++; 2526 } 2527 btrfs_release_path(path); 2528 logical += increment; 2529 physical += map->stripe_len; 2530 spin_lock(&sctx->stat_lock); 2531 if (stop_loop) 2532 sctx->stat.last_physical = map->stripes[num].physical + 2533 length; 2534 else 2535 sctx->stat.last_physical = physical; 2536 spin_unlock(&sctx->stat_lock); 2537 if (stop_loop) 2538 break; 2539 } 2540 out: 2541 /* push queued extents */ 2542 scrub_submit(sctx); 2543 mutex_lock(&sctx->wr_ctx.wr_lock); 2544 scrub_wr_submit(sctx); 2545 mutex_unlock(&sctx->wr_ctx.wr_lock); 2546 2547 blk_finish_plug(&plug); 2548 btrfs_free_path(path); 2549 return ret < 0 ? ret : 0; 2550 } 2551 2552 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 2553 struct btrfs_device *scrub_dev, 2554 u64 chunk_tree, u64 chunk_objectid, 2555 u64 chunk_offset, u64 length, 2556 u64 dev_offset, int is_dev_replace) 2557 { 2558 struct btrfs_mapping_tree *map_tree = 2559 &sctx->dev_root->fs_info->mapping_tree; 2560 struct map_lookup *map; 2561 struct extent_map *em; 2562 int i; 2563 int ret = 0; 2564 2565 read_lock(&map_tree->map_tree.lock); 2566 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2567 read_unlock(&map_tree->map_tree.lock); 2568 2569 if (!em) 2570 return -EINVAL; 2571 2572 map = (struct map_lookup *)em->bdev; 2573 if (em->start != chunk_offset) 2574 goto out; 2575 2576 if (em->len < length) 2577 goto out; 2578 2579 for (i = 0; i < map->num_stripes; ++i) { 2580 if (map->stripes[i].dev->bdev == scrub_dev->bdev && 2581 map->stripes[i].physical == dev_offset) { 2582 ret = scrub_stripe(sctx, map, scrub_dev, i, 2583 chunk_offset, length, 2584 is_dev_replace); 2585 if (ret) 2586 goto out; 2587 } 2588 } 2589 out: 2590 free_extent_map(em); 2591 2592 return ret; 2593 } 2594 2595 static noinline_for_stack 2596 int scrub_enumerate_chunks(struct scrub_ctx *sctx, 2597 struct btrfs_device *scrub_dev, u64 start, u64 end, 2598 int is_dev_replace) 2599 { 2600 struct btrfs_dev_extent *dev_extent = NULL; 2601 struct btrfs_path *path; 2602 struct btrfs_root *root = sctx->dev_root; 2603 struct btrfs_fs_info *fs_info = root->fs_info; 2604 u64 length; 2605 u64 chunk_tree; 2606 u64 chunk_objectid; 2607 u64 chunk_offset; 2608 int ret; 2609 int slot; 2610 struct extent_buffer *l; 2611 struct btrfs_key key; 2612 struct btrfs_key found_key; 2613 struct btrfs_block_group_cache *cache; 2614 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 2615 2616 path = btrfs_alloc_path(); 2617 if (!path) 2618 return -ENOMEM; 2619 2620 path->reada = 2; 2621 path->search_commit_root = 1; 2622 path->skip_locking = 1; 2623 2624 key.objectid = scrub_dev->devid; 2625 key.offset = 0ull; 2626 key.type = BTRFS_DEV_EXTENT_KEY; 2627 2628 while (1) { 2629 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2630 if (ret < 0) 2631 break; 2632 if (ret > 0) { 2633 if (path->slots[0] >= 2634 btrfs_header_nritems(path->nodes[0])) { 2635 ret = btrfs_next_leaf(root, path); 2636 if (ret) 2637 break; 2638 } 2639 } 2640 2641 l = path->nodes[0]; 2642 slot = path->slots[0]; 2643 2644 btrfs_item_key_to_cpu(l, &found_key, slot); 2645 2646 if (found_key.objectid != scrub_dev->devid) 2647 break; 2648 2649 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) 2650 break; 2651 2652 if (found_key.offset >= end) 2653 break; 2654 2655 if (found_key.offset < key.offset) 2656 break; 2657 2658 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2659 length = btrfs_dev_extent_length(l, dev_extent); 2660 2661 if (found_key.offset + length <= start) { 2662 key.offset = found_key.offset + length; 2663 btrfs_release_path(path); 2664 continue; 2665 } 2666 2667 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2668 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2669 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 2670 2671 /* 2672 * get a reference on the corresponding block group to prevent 2673 * the chunk from going away while we scrub it 2674 */ 2675 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2676 if (!cache) { 2677 ret = -ENOENT; 2678 break; 2679 } 2680 dev_replace->cursor_right = found_key.offset + length; 2681 dev_replace->cursor_left = found_key.offset; 2682 dev_replace->item_needs_writeback = 1; 2683 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid, 2684 chunk_offset, length, found_key.offset, 2685 is_dev_replace); 2686 2687 /* 2688 * flush, submit all pending read and write bios, afterwards 2689 * wait for them. 2690 * Note that in the dev replace case, a read request causes 2691 * write requests that are submitted in the read completion 2692 * worker. Therefore in the current situation, it is required 2693 * that all write requests are flushed, so that all read and 2694 * write requests are really completed when bios_in_flight 2695 * changes to 0. 2696 */ 2697 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); 2698 scrub_submit(sctx); 2699 mutex_lock(&sctx->wr_ctx.wr_lock); 2700 scrub_wr_submit(sctx); 2701 mutex_unlock(&sctx->wr_ctx.wr_lock); 2702 2703 wait_event(sctx->list_wait, 2704 atomic_read(&sctx->bios_in_flight) == 0); 2705 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); 2706 atomic_inc(&fs_info->scrubs_paused); 2707 wake_up(&fs_info->scrub_pause_wait); 2708 wait_event(sctx->list_wait, 2709 atomic_read(&sctx->workers_pending) == 0); 2710 2711 mutex_lock(&fs_info->scrub_lock); 2712 while (atomic_read(&fs_info->scrub_pause_req)) { 2713 mutex_unlock(&fs_info->scrub_lock); 2714 wait_event(fs_info->scrub_pause_wait, 2715 atomic_read(&fs_info->scrub_pause_req) == 0); 2716 mutex_lock(&fs_info->scrub_lock); 2717 } 2718 atomic_dec(&fs_info->scrubs_paused); 2719 mutex_unlock(&fs_info->scrub_lock); 2720 wake_up(&fs_info->scrub_pause_wait); 2721 2722 btrfs_put_block_group(cache); 2723 if (ret) 2724 break; 2725 if (is_dev_replace && 2726 atomic64_read(&dev_replace->num_write_errors) > 0) { 2727 ret = -EIO; 2728 break; 2729 } 2730 if (sctx->stat.malloc_errors > 0) { 2731 ret = -ENOMEM; 2732 break; 2733 } 2734 2735 dev_replace->cursor_left = dev_replace->cursor_right; 2736 dev_replace->item_needs_writeback = 1; 2737 2738 key.offset = found_key.offset + length; 2739 btrfs_release_path(path); 2740 } 2741 2742 btrfs_free_path(path); 2743 2744 /* 2745 * ret can still be 1 from search_slot or next_leaf, 2746 * that's not an error 2747 */ 2748 return ret < 0 ? ret : 0; 2749 } 2750 2751 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, 2752 struct btrfs_device *scrub_dev) 2753 { 2754 int i; 2755 u64 bytenr; 2756 u64 gen; 2757 int ret; 2758 struct btrfs_root *root = sctx->dev_root; 2759 2760 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 2761 return -EIO; 2762 2763 gen = root->fs_info->last_trans_committed; 2764 2765 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2766 bytenr = btrfs_sb_offset(i); 2767 if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes) 2768 break; 2769 2770 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, 2771 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, 2772 NULL, 1, bytenr); 2773 if (ret) 2774 return ret; 2775 } 2776 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 2777 2778 return 0; 2779 } 2780 2781 /* 2782 * get a reference count on fs_info->scrub_workers. start worker if necessary 2783 */ 2784 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, 2785 int is_dev_replace) 2786 { 2787 int ret = 0; 2788 2789 if (fs_info->scrub_workers_refcnt == 0) { 2790 if (is_dev_replace) 2791 btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1, 2792 &fs_info->generic_worker); 2793 else 2794 btrfs_init_workers(&fs_info->scrub_workers, "scrub", 2795 fs_info->thread_pool_size, 2796 &fs_info->generic_worker); 2797 fs_info->scrub_workers.idle_thresh = 4; 2798 ret = btrfs_start_workers(&fs_info->scrub_workers); 2799 if (ret) 2800 goto out; 2801 btrfs_init_workers(&fs_info->scrub_wr_completion_workers, 2802 "scrubwrc", 2803 fs_info->thread_pool_size, 2804 &fs_info->generic_worker); 2805 fs_info->scrub_wr_completion_workers.idle_thresh = 2; 2806 ret = btrfs_start_workers( 2807 &fs_info->scrub_wr_completion_workers); 2808 if (ret) 2809 goto out; 2810 btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1, 2811 &fs_info->generic_worker); 2812 ret = btrfs_start_workers(&fs_info->scrub_nocow_workers); 2813 if (ret) 2814 goto out; 2815 } 2816 ++fs_info->scrub_workers_refcnt; 2817 out: 2818 return ret; 2819 } 2820 2821 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) 2822 { 2823 if (--fs_info->scrub_workers_refcnt == 0) { 2824 btrfs_stop_workers(&fs_info->scrub_workers); 2825 btrfs_stop_workers(&fs_info->scrub_wr_completion_workers); 2826 btrfs_stop_workers(&fs_info->scrub_nocow_workers); 2827 } 2828 WARN_ON(fs_info->scrub_workers_refcnt < 0); 2829 } 2830 2831 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 2832 u64 end, struct btrfs_scrub_progress *progress, 2833 int readonly, int is_dev_replace) 2834 { 2835 struct scrub_ctx *sctx; 2836 int ret; 2837 struct btrfs_device *dev; 2838 2839 if (btrfs_fs_closing(fs_info)) 2840 return -EINVAL; 2841 2842 /* 2843 * check some assumptions 2844 */ 2845 if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) { 2846 printk(KERN_ERR 2847 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n", 2848 fs_info->chunk_root->nodesize, 2849 fs_info->chunk_root->leafsize); 2850 return -EINVAL; 2851 } 2852 2853 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) { 2854 /* 2855 * in this case scrub is unable to calculate the checksum 2856 * the way scrub is implemented. Do not handle this 2857 * situation at all because it won't ever happen. 2858 */ 2859 printk(KERN_ERR 2860 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n", 2861 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN); 2862 return -EINVAL; 2863 } 2864 2865 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) { 2866 /* not supported for data w/o checksums */ 2867 printk(KERN_ERR 2868 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails\n", 2869 fs_info->chunk_root->sectorsize, PAGE_SIZE); 2870 return -EINVAL; 2871 } 2872 2873 if (fs_info->chunk_root->nodesize > 2874 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || 2875 fs_info->chunk_root->sectorsize > 2876 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { 2877 /* 2878 * would exhaust the array bounds of pagev member in 2879 * struct scrub_block 2880 */ 2881 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n", 2882 fs_info->chunk_root->nodesize, 2883 SCRUB_MAX_PAGES_PER_BLOCK, 2884 fs_info->chunk_root->sectorsize, 2885 SCRUB_MAX_PAGES_PER_BLOCK); 2886 return -EINVAL; 2887 } 2888 2889 2890 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2891 dev = btrfs_find_device(fs_info, devid, NULL, NULL); 2892 if (!dev || (dev->missing && !is_dev_replace)) { 2893 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2894 return -ENODEV; 2895 } 2896 2897 mutex_lock(&fs_info->scrub_lock); 2898 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) { 2899 mutex_unlock(&fs_info->scrub_lock); 2900 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2901 return -EIO; 2902 } 2903 2904 btrfs_dev_replace_lock(&fs_info->dev_replace); 2905 if (dev->scrub_device || 2906 (!is_dev_replace && 2907 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { 2908 btrfs_dev_replace_unlock(&fs_info->dev_replace); 2909 mutex_unlock(&fs_info->scrub_lock); 2910 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2911 return -EINPROGRESS; 2912 } 2913 btrfs_dev_replace_unlock(&fs_info->dev_replace); 2914 2915 ret = scrub_workers_get(fs_info, is_dev_replace); 2916 if (ret) { 2917 mutex_unlock(&fs_info->scrub_lock); 2918 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2919 return ret; 2920 } 2921 2922 sctx = scrub_setup_ctx(dev, is_dev_replace); 2923 if (IS_ERR(sctx)) { 2924 mutex_unlock(&fs_info->scrub_lock); 2925 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2926 scrub_workers_put(fs_info); 2927 return PTR_ERR(sctx); 2928 } 2929 sctx->readonly = readonly; 2930 dev->scrub_device = sctx; 2931 2932 atomic_inc(&fs_info->scrubs_running); 2933 mutex_unlock(&fs_info->scrub_lock); 2934 2935 if (!is_dev_replace) { 2936 /* 2937 * by holding device list mutex, we can 2938 * kick off writing super in log tree sync. 2939 */ 2940 ret = scrub_supers(sctx, dev); 2941 } 2942 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2943 2944 if (!ret) 2945 ret = scrub_enumerate_chunks(sctx, dev, start, end, 2946 is_dev_replace); 2947 2948 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 2949 atomic_dec(&fs_info->scrubs_running); 2950 wake_up(&fs_info->scrub_pause_wait); 2951 2952 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); 2953 2954 if (progress) 2955 memcpy(progress, &sctx->stat, sizeof(*progress)); 2956 2957 mutex_lock(&fs_info->scrub_lock); 2958 dev->scrub_device = NULL; 2959 scrub_workers_put(fs_info); 2960 mutex_unlock(&fs_info->scrub_lock); 2961 2962 scrub_free_ctx(sctx); 2963 2964 return ret; 2965 } 2966 2967 void btrfs_scrub_pause(struct btrfs_root *root) 2968 { 2969 struct btrfs_fs_info *fs_info = root->fs_info; 2970 2971 mutex_lock(&fs_info->scrub_lock); 2972 atomic_inc(&fs_info->scrub_pause_req); 2973 while (atomic_read(&fs_info->scrubs_paused) != 2974 atomic_read(&fs_info->scrubs_running)) { 2975 mutex_unlock(&fs_info->scrub_lock); 2976 wait_event(fs_info->scrub_pause_wait, 2977 atomic_read(&fs_info->scrubs_paused) == 2978 atomic_read(&fs_info->scrubs_running)); 2979 mutex_lock(&fs_info->scrub_lock); 2980 } 2981 mutex_unlock(&fs_info->scrub_lock); 2982 } 2983 2984 void btrfs_scrub_continue(struct btrfs_root *root) 2985 { 2986 struct btrfs_fs_info *fs_info = root->fs_info; 2987 2988 atomic_dec(&fs_info->scrub_pause_req); 2989 wake_up(&fs_info->scrub_pause_wait); 2990 } 2991 2992 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) 2993 { 2994 mutex_lock(&fs_info->scrub_lock); 2995 if (!atomic_read(&fs_info->scrubs_running)) { 2996 mutex_unlock(&fs_info->scrub_lock); 2997 return -ENOTCONN; 2998 } 2999 3000 atomic_inc(&fs_info->scrub_cancel_req); 3001 while (atomic_read(&fs_info->scrubs_running)) { 3002 mutex_unlock(&fs_info->scrub_lock); 3003 wait_event(fs_info->scrub_pause_wait, 3004 atomic_read(&fs_info->scrubs_running) == 0); 3005 mutex_lock(&fs_info->scrub_lock); 3006 } 3007 atomic_dec(&fs_info->scrub_cancel_req); 3008 mutex_unlock(&fs_info->scrub_lock); 3009 3010 return 0; 3011 } 3012 3013 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info, 3014 struct btrfs_device *dev) 3015 { 3016 struct scrub_ctx *sctx; 3017 3018 mutex_lock(&fs_info->scrub_lock); 3019 sctx = dev->scrub_device; 3020 if (!sctx) { 3021 mutex_unlock(&fs_info->scrub_lock); 3022 return -ENOTCONN; 3023 } 3024 atomic_inc(&sctx->cancel_req); 3025 while (dev->scrub_device) { 3026 mutex_unlock(&fs_info->scrub_lock); 3027 wait_event(fs_info->scrub_pause_wait, 3028 dev->scrub_device == NULL); 3029 mutex_lock(&fs_info->scrub_lock); 3030 } 3031 mutex_unlock(&fs_info->scrub_lock); 3032 3033 return 0; 3034 } 3035 3036 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 3037 struct btrfs_scrub_progress *progress) 3038 { 3039 struct btrfs_device *dev; 3040 struct scrub_ctx *sctx = NULL; 3041 3042 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 3043 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL); 3044 if (dev) 3045 sctx = dev->scrub_device; 3046 if (sctx) 3047 memcpy(progress, &sctx->stat, sizeof(*progress)); 3048 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 3049 3050 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; 3051 } 3052 3053 static void scrub_remap_extent(struct btrfs_fs_info *fs_info, 3054 u64 extent_logical, u64 extent_len, 3055 u64 *extent_physical, 3056 struct btrfs_device **extent_dev, 3057 int *extent_mirror_num) 3058 { 3059 u64 mapped_length; 3060 struct btrfs_bio *bbio = NULL; 3061 int ret; 3062 3063 mapped_length = extent_len; 3064 ret = btrfs_map_block(fs_info, READ, extent_logical, 3065 &mapped_length, &bbio, 0); 3066 if (ret || !bbio || mapped_length < extent_len || 3067 !bbio->stripes[0].dev->bdev) { 3068 kfree(bbio); 3069 return; 3070 } 3071 3072 *extent_physical = bbio->stripes[0].physical; 3073 *extent_mirror_num = bbio->mirror_num; 3074 *extent_dev = bbio->stripes[0].dev; 3075 kfree(bbio); 3076 } 3077 3078 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, 3079 struct scrub_wr_ctx *wr_ctx, 3080 struct btrfs_fs_info *fs_info, 3081 struct btrfs_device *dev, 3082 int is_dev_replace) 3083 { 3084 WARN_ON(wr_ctx->wr_curr_bio != NULL); 3085 3086 mutex_init(&wr_ctx->wr_lock); 3087 wr_ctx->wr_curr_bio = NULL; 3088 if (!is_dev_replace) 3089 return 0; 3090 3091 WARN_ON(!dev->bdev); 3092 wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO, 3093 bio_get_nr_vecs(dev->bdev)); 3094 wr_ctx->tgtdev = dev; 3095 atomic_set(&wr_ctx->flush_all_writes, 0); 3096 return 0; 3097 } 3098 3099 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx) 3100 { 3101 mutex_lock(&wr_ctx->wr_lock); 3102 kfree(wr_ctx->wr_curr_bio); 3103 wr_ctx->wr_curr_bio = NULL; 3104 mutex_unlock(&wr_ctx->wr_lock); 3105 } 3106 3107 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 3108 int mirror_num, u64 physical_for_dev_replace) 3109 { 3110 struct scrub_copy_nocow_ctx *nocow_ctx; 3111 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 3112 3113 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS); 3114 if (!nocow_ctx) { 3115 spin_lock(&sctx->stat_lock); 3116 sctx->stat.malloc_errors++; 3117 spin_unlock(&sctx->stat_lock); 3118 return -ENOMEM; 3119 } 3120 3121 scrub_pending_trans_workers_inc(sctx); 3122 3123 nocow_ctx->sctx = sctx; 3124 nocow_ctx->logical = logical; 3125 nocow_ctx->len = len; 3126 nocow_ctx->mirror_num = mirror_num; 3127 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; 3128 nocow_ctx->work.func = copy_nocow_pages_worker; 3129 INIT_LIST_HEAD(&nocow_ctx->inodes); 3130 btrfs_queue_worker(&fs_info->scrub_nocow_workers, 3131 &nocow_ctx->work); 3132 3133 return 0; 3134 } 3135 3136 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx) 3137 { 3138 struct scrub_copy_nocow_ctx *nocow_ctx = ctx; 3139 struct scrub_nocow_inode *nocow_inode; 3140 3141 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS); 3142 if (!nocow_inode) 3143 return -ENOMEM; 3144 nocow_inode->inum = inum; 3145 nocow_inode->offset = offset; 3146 nocow_inode->root = root; 3147 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes); 3148 return 0; 3149 } 3150 3151 #define COPY_COMPLETE 1 3152 3153 static void copy_nocow_pages_worker(struct btrfs_work *work) 3154 { 3155 struct scrub_copy_nocow_ctx *nocow_ctx = 3156 container_of(work, struct scrub_copy_nocow_ctx, work); 3157 struct scrub_ctx *sctx = nocow_ctx->sctx; 3158 u64 logical = nocow_ctx->logical; 3159 u64 len = nocow_ctx->len; 3160 int mirror_num = nocow_ctx->mirror_num; 3161 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; 3162 int ret; 3163 struct btrfs_trans_handle *trans = NULL; 3164 struct btrfs_fs_info *fs_info; 3165 struct btrfs_path *path; 3166 struct btrfs_root *root; 3167 int not_written = 0; 3168 3169 fs_info = sctx->dev_root->fs_info; 3170 root = fs_info->extent_root; 3171 3172 path = btrfs_alloc_path(); 3173 if (!path) { 3174 spin_lock(&sctx->stat_lock); 3175 sctx->stat.malloc_errors++; 3176 spin_unlock(&sctx->stat_lock); 3177 not_written = 1; 3178 goto out; 3179 } 3180 3181 trans = btrfs_join_transaction(root); 3182 if (IS_ERR(trans)) { 3183 not_written = 1; 3184 goto out; 3185 } 3186 3187 ret = iterate_inodes_from_logical(logical, fs_info, path, 3188 record_inode_for_nocow, nocow_ctx); 3189 if (ret != 0 && ret != -ENOENT) { 3190 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n", 3191 logical, physical_for_dev_replace, len, mirror_num, 3192 ret); 3193 not_written = 1; 3194 goto out; 3195 } 3196 3197 btrfs_end_transaction(trans, root); 3198 trans = NULL; 3199 while (!list_empty(&nocow_ctx->inodes)) { 3200 struct scrub_nocow_inode *entry; 3201 entry = list_first_entry(&nocow_ctx->inodes, 3202 struct scrub_nocow_inode, 3203 list); 3204 list_del_init(&entry->list); 3205 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset, 3206 entry->root, nocow_ctx); 3207 kfree(entry); 3208 if (ret == COPY_COMPLETE) { 3209 ret = 0; 3210 break; 3211 } else if (ret) { 3212 break; 3213 } 3214 } 3215 out: 3216 while (!list_empty(&nocow_ctx->inodes)) { 3217 struct scrub_nocow_inode *entry; 3218 entry = list_first_entry(&nocow_ctx->inodes, 3219 struct scrub_nocow_inode, 3220 list); 3221 list_del_init(&entry->list); 3222 kfree(entry); 3223 } 3224 if (trans && !IS_ERR(trans)) 3225 btrfs_end_transaction(trans, root); 3226 if (not_written) 3227 btrfs_dev_replace_stats_inc(&fs_info->dev_replace. 3228 num_uncorrectable_read_errors); 3229 3230 btrfs_free_path(path); 3231 kfree(nocow_ctx); 3232 3233 scrub_pending_trans_workers_dec(sctx); 3234 } 3235 3236 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, 3237 struct scrub_copy_nocow_ctx *nocow_ctx) 3238 { 3239 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; 3240 struct btrfs_key key; 3241 struct inode *inode; 3242 struct page *page; 3243 struct btrfs_root *local_root; 3244 struct btrfs_ordered_extent *ordered; 3245 struct extent_map *em; 3246 struct extent_state *cached_state = NULL; 3247 struct extent_io_tree *io_tree; 3248 u64 physical_for_dev_replace; 3249 u64 len = nocow_ctx->len; 3250 u64 lockstart = offset, lockend = offset + len - 1; 3251 unsigned long index; 3252 int srcu_index; 3253 int ret = 0; 3254 int err = 0; 3255 3256 key.objectid = root; 3257 key.type = BTRFS_ROOT_ITEM_KEY; 3258 key.offset = (u64)-1; 3259 3260 srcu_index = srcu_read_lock(&fs_info->subvol_srcu); 3261 3262 local_root = btrfs_read_fs_root_no_name(fs_info, &key); 3263 if (IS_ERR(local_root)) { 3264 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3265 return PTR_ERR(local_root); 3266 } 3267 3268 key.type = BTRFS_INODE_ITEM_KEY; 3269 key.objectid = inum; 3270 key.offset = 0; 3271 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 3272 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3273 if (IS_ERR(inode)) 3274 return PTR_ERR(inode); 3275 3276 /* Avoid truncate/dio/punch hole.. */ 3277 mutex_lock(&inode->i_mutex); 3278 inode_dio_wait(inode); 3279 3280 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; 3281 io_tree = &BTRFS_I(inode)->io_tree; 3282 3283 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state); 3284 ordered = btrfs_lookup_ordered_range(inode, lockstart, len); 3285 if (ordered) { 3286 btrfs_put_ordered_extent(ordered); 3287 goto out_unlock; 3288 } 3289 3290 em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0); 3291 if (IS_ERR(em)) { 3292 ret = PTR_ERR(em); 3293 goto out_unlock; 3294 } 3295 3296 /* 3297 * This extent does not actually cover the logical extent anymore, 3298 * move on to the next inode. 3299 */ 3300 if (em->block_start > nocow_ctx->logical || 3301 em->block_start + em->block_len < nocow_ctx->logical + len) { 3302 free_extent_map(em); 3303 goto out_unlock; 3304 } 3305 free_extent_map(em); 3306 3307 while (len >= PAGE_CACHE_SIZE) { 3308 index = offset >> PAGE_CACHE_SHIFT; 3309 again: 3310 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 3311 if (!page) { 3312 pr_err("find_or_create_page() failed\n"); 3313 ret = -ENOMEM; 3314 goto out; 3315 } 3316 3317 if (PageUptodate(page)) { 3318 if (PageDirty(page)) 3319 goto next_page; 3320 } else { 3321 ClearPageError(page); 3322 err = extent_read_full_page_nolock(io_tree, page, 3323 btrfs_get_extent, 3324 nocow_ctx->mirror_num); 3325 if (err) { 3326 ret = err; 3327 goto next_page; 3328 } 3329 3330 lock_page(page); 3331 /* 3332 * If the page has been remove from the page cache, 3333 * the data on it is meaningless, because it may be 3334 * old one, the new data may be written into the new 3335 * page in the page cache. 3336 */ 3337 if (page->mapping != inode->i_mapping) { 3338 unlock_page(page); 3339 page_cache_release(page); 3340 goto again; 3341 } 3342 if (!PageUptodate(page)) { 3343 ret = -EIO; 3344 goto next_page; 3345 } 3346 } 3347 err = write_page_nocow(nocow_ctx->sctx, 3348 physical_for_dev_replace, page); 3349 if (err) 3350 ret = err; 3351 next_page: 3352 unlock_page(page); 3353 page_cache_release(page); 3354 3355 if (ret) 3356 break; 3357 3358 offset += PAGE_CACHE_SIZE; 3359 physical_for_dev_replace += PAGE_CACHE_SIZE; 3360 len -= PAGE_CACHE_SIZE; 3361 } 3362 ret = COPY_COMPLETE; 3363 out_unlock: 3364 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state, 3365 GFP_NOFS); 3366 out: 3367 mutex_unlock(&inode->i_mutex); 3368 iput(inode); 3369 return ret; 3370 } 3371 3372 static int write_page_nocow(struct scrub_ctx *sctx, 3373 u64 physical_for_dev_replace, struct page *page) 3374 { 3375 struct bio *bio; 3376 struct btrfs_device *dev; 3377 int ret; 3378 DECLARE_COMPLETION_ONSTACK(compl); 3379 3380 dev = sctx->wr_ctx.tgtdev; 3381 if (!dev) 3382 return -EIO; 3383 if (!dev->bdev) { 3384 printk_ratelimited(KERN_WARNING 3385 "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n"); 3386 return -EIO; 3387 } 3388 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 3389 if (!bio) { 3390 spin_lock(&sctx->stat_lock); 3391 sctx->stat.malloc_errors++; 3392 spin_unlock(&sctx->stat_lock); 3393 return -ENOMEM; 3394 } 3395 bio->bi_private = &compl; 3396 bio->bi_end_io = scrub_complete_bio_end_io; 3397 bio->bi_size = 0; 3398 bio->bi_sector = physical_for_dev_replace >> 9; 3399 bio->bi_bdev = dev->bdev; 3400 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 3401 if (ret != PAGE_CACHE_SIZE) { 3402 leave_with_eio: 3403 bio_put(bio); 3404 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 3405 return -EIO; 3406 } 3407 btrfsic_submit_bio(WRITE_SYNC, bio); 3408 wait_for_completion(&compl); 3409 3410 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 3411 goto leave_with_eio; 3412 3413 bio_put(bio); 3414 return 0; 3415 } 3416