1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include <crypto/hash.h>
10 #include "ctree.h"
11 #include "discard.h"
12 #include "volumes.h"
13 #include "disk-io.h"
14 #include "ordered-data.h"
15 #include "transaction.h"
16 #include "backref.h"
17 #include "extent_io.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "raid56.h"
21 #include "block-group.h"
22 #include "zoned.h"
23 #include "fs.h"
24 #include "accessors.h"
25 #include "file-item.h"
26 #include "scrub.h"
27
28 /*
29 * This is only the first step towards a full-features scrub. It reads all
30 * extent and super block and verifies the checksums. In case a bad checksum
31 * is found or the extent cannot be read, good data will be written back if
32 * any can be found.
33 *
34 * Future enhancements:
35 * - In case an unrepairable extent is encountered, track which files are
36 * affected and report them
37 * - track and record media errors, throw out bad devices
38 * - add a mode to also read unallocated space
39 */
40
41 struct scrub_ctx;
42
43 /*
44 * The following value only influences the performance.
45 *
46 * This detemines how many stripes would be submitted in one go,
47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
48 */
49 #define SCRUB_STRIPES_PER_GROUP 8
50
51 /*
52 * How many groups we have for each sctx.
53 *
54 * This would be 8M per device, the same value as the old scrub in-flight bios
55 * size limit.
56 */
57 #define SCRUB_GROUPS_PER_SCTX 16
58
59 #define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
60
61 /*
62 * The following value times PAGE_SIZE needs to be large enough to match the
63 * largest node/leaf/sector size that shall be supported.
64 */
65 #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
66
67 /* Represent one sector and its needed info to verify the content. */
68 struct scrub_sector_verification {
69 bool is_metadata;
70
71 union {
72 /*
73 * Csum pointer for data csum verification. Should point to a
74 * sector csum inside scrub_stripe::csums.
75 *
76 * NULL if this data sector has no csum.
77 */
78 u8 *csum;
79
80 /*
81 * Extra info for metadata verification. All sectors inside a
82 * tree block share the same generation.
83 */
84 u64 generation;
85 };
86 };
87
88 enum scrub_stripe_flags {
89 /* Set when @mirror_num, @dev, @physical and @logical are set. */
90 SCRUB_STRIPE_FLAG_INITIALIZED,
91
92 /* Set when the read-repair is finished. */
93 SCRUB_STRIPE_FLAG_REPAIR_DONE,
94
95 /*
96 * Set for data stripes if it's triggered from P/Q stripe.
97 * During such scrub, we should not report errors in data stripes, nor
98 * update the accounting.
99 */
100 SCRUB_STRIPE_FLAG_NO_REPORT,
101 };
102
103 #define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE)
104
105 /*
106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
107 */
108 struct scrub_stripe {
109 struct scrub_ctx *sctx;
110 struct btrfs_block_group *bg;
111
112 struct page *pages[SCRUB_STRIPE_PAGES];
113 struct scrub_sector_verification *sectors;
114
115 struct btrfs_device *dev;
116 u64 logical;
117 u64 physical;
118
119 u16 mirror_num;
120
121 /* Should be BTRFS_STRIPE_LEN / sectorsize. */
122 u16 nr_sectors;
123
124 /*
125 * How many data/meta extents are in this stripe. Only for scrub status
126 * reporting purposes.
127 */
128 u16 nr_data_extents;
129 u16 nr_meta_extents;
130
131 atomic_t pending_io;
132 wait_queue_head_t io_wait;
133 wait_queue_head_t repair_wait;
134
135 /*
136 * Indicate the states of the stripe. Bits are defined in
137 * scrub_stripe_flags enum.
138 */
139 unsigned long state;
140
141 /* Indicate which sectors are covered by extent items. */
142 unsigned long extent_sector_bitmap;
143
144 /*
145 * The errors hit during the initial read of the stripe.
146 *
147 * Would be utilized for error reporting and repair.
148 *
149 * The remaining init_nr_* records the number of errors hit, only used
150 * by error reporting.
151 */
152 unsigned long init_error_bitmap;
153 unsigned int init_nr_io_errors;
154 unsigned int init_nr_csum_errors;
155 unsigned int init_nr_meta_errors;
156
157 /*
158 * The following error bitmaps are all for the current status.
159 * Every time we submit a new read, these bitmaps may be updated.
160 *
161 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
162 *
163 * IO and csum errors can happen for both metadata and data.
164 */
165 unsigned long error_bitmap;
166 unsigned long io_error_bitmap;
167 unsigned long csum_error_bitmap;
168 unsigned long meta_error_bitmap;
169
170 /* For writeback (repair or replace) error reporting. */
171 unsigned long write_error_bitmap;
172
173 /* Writeback can be concurrent, thus we need to protect the bitmap. */
174 spinlock_t write_error_lock;
175
176 /*
177 * Checksum for the whole stripe if this stripe is inside a data block
178 * group.
179 */
180 u8 *csums;
181
182 struct work_struct work;
183 };
184
185 struct scrub_ctx {
186 struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES];
187 struct scrub_stripe *raid56_data_stripes;
188 struct btrfs_fs_info *fs_info;
189 struct btrfs_path extent_path;
190 struct btrfs_path csum_path;
191 int first_free;
192 int cur_stripe;
193 atomic_t cancel_req;
194 int readonly;
195 int sectors_per_bio;
196
197 /* State of IO submission throttling affecting the associated device */
198 ktime_t throttle_deadline;
199 u64 throttle_sent;
200
201 int is_dev_replace;
202 u64 write_pointer;
203
204 struct mutex wr_lock;
205 struct btrfs_device *wr_tgtdev;
206
207 /*
208 * statistics
209 */
210 struct btrfs_scrub_progress stat;
211 spinlock_t stat_lock;
212
213 /*
214 * Use a ref counter to avoid use-after-free issues. Scrub workers
215 * decrement bios_in_flight and workers_pending and then do a wakeup
216 * on the list_wait wait queue. We must ensure the main scrub task
217 * doesn't free the scrub context before or while the workers are
218 * doing the wakeup() call.
219 */
220 refcount_t refs;
221 };
222
223 struct scrub_warning {
224 struct btrfs_path *path;
225 u64 extent_item_size;
226 const char *errstr;
227 u64 physical;
228 u64 logical;
229 struct btrfs_device *dev;
230 };
231
release_scrub_stripe(struct scrub_stripe * stripe)232 static void release_scrub_stripe(struct scrub_stripe *stripe)
233 {
234 if (!stripe)
235 return;
236
237 for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
238 if (stripe->pages[i])
239 __free_page(stripe->pages[i]);
240 stripe->pages[i] = NULL;
241 }
242 kfree(stripe->sectors);
243 kfree(stripe->csums);
244 stripe->sectors = NULL;
245 stripe->csums = NULL;
246 stripe->sctx = NULL;
247 stripe->state = 0;
248 }
249
init_scrub_stripe(struct btrfs_fs_info * fs_info,struct scrub_stripe * stripe)250 static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
251 struct scrub_stripe *stripe)
252 {
253 int ret;
254
255 memset(stripe, 0, sizeof(*stripe));
256
257 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
258 stripe->state = 0;
259
260 init_waitqueue_head(&stripe->io_wait);
261 init_waitqueue_head(&stripe->repair_wait);
262 atomic_set(&stripe->pending_io, 0);
263 spin_lock_init(&stripe->write_error_lock);
264
265 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages);
266 if (ret < 0)
267 goto error;
268
269 stripe->sectors = kcalloc(stripe->nr_sectors,
270 sizeof(struct scrub_sector_verification),
271 GFP_KERNEL);
272 if (!stripe->sectors)
273 goto error;
274
275 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
276 fs_info->csum_size, GFP_KERNEL);
277 if (!stripe->csums)
278 goto error;
279 return 0;
280 error:
281 release_scrub_stripe(stripe);
282 return -ENOMEM;
283 }
284
wait_scrub_stripe_io(struct scrub_stripe * stripe)285 static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
286 {
287 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
288 }
289
290 static void scrub_put_ctx(struct scrub_ctx *sctx);
291
__scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)292 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
293 {
294 while (atomic_read(&fs_info->scrub_pause_req)) {
295 mutex_unlock(&fs_info->scrub_lock);
296 wait_event(fs_info->scrub_pause_wait,
297 atomic_read(&fs_info->scrub_pause_req) == 0);
298 mutex_lock(&fs_info->scrub_lock);
299 }
300 }
301
scrub_pause_on(struct btrfs_fs_info * fs_info)302 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
303 {
304 atomic_inc(&fs_info->scrubs_paused);
305 wake_up(&fs_info->scrub_pause_wait);
306 }
307
scrub_pause_off(struct btrfs_fs_info * fs_info)308 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
309 {
310 mutex_lock(&fs_info->scrub_lock);
311 __scrub_blocked_if_needed(fs_info);
312 atomic_dec(&fs_info->scrubs_paused);
313 mutex_unlock(&fs_info->scrub_lock);
314
315 wake_up(&fs_info->scrub_pause_wait);
316 }
317
scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)318 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
319 {
320 scrub_pause_on(fs_info);
321 scrub_pause_off(fs_info);
322 }
323
scrub_free_ctx(struct scrub_ctx * sctx)324 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
325 {
326 int i;
327
328 if (!sctx)
329 return;
330
331 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
332 release_scrub_stripe(&sctx->stripes[i]);
333
334 kvfree(sctx);
335 }
336
scrub_put_ctx(struct scrub_ctx * sctx)337 static void scrub_put_ctx(struct scrub_ctx *sctx)
338 {
339 if (refcount_dec_and_test(&sctx->refs))
340 scrub_free_ctx(sctx);
341 }
342
scrub_setup_ctx(struct btrfs_fs_info * fs_info,int is_dev_replace)343 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
344 struct btrfs_fs_info *fs_info, int is_dev_replace)
345 {
346 struct scrub_ctx *sctx;
347 int i;
348
349 /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
350 * kvzalloc().
351 */
352 sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
353 if (!sctx)
354 goto nomem;
355 refcount_set(&sctx->refs, 1);
356 sctx->is_dev_replace = is_dev_replace;
357 sctx->fs_info = fs_info;
358 sctx->extent_path.search_commit_root = 1;
359 sctx->extent_path.skip_locking = 1;
360 sctx->csum_path.search_commit_root = 1;
361 sctx->csum_path.skip_locking = 1;
362 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
363 int ret;
364
365 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
366 if (ret < 0)
367 goto nomem;
368 sctx->stripes[i].sctx = sctx;
369 }
370 sctx->first_free = 0;
371 atomic_set(&sctx->cancel_req, 0);
372
373 spin_lock_init(&sctx->stat_lock);
374 sctx->throttle_deadline = 0;
375
376 mutex_init(&sctx->wr_lock);
377 if (is_dev_replace) {
378 WARN_ON(!fs_info->dev_replace.tgtdev);
379 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
380 }
381
382 return sctx;
383
384 nomem:
385 scrub_free_ctx(sctx);
386 return ERR_PTR(-ENOMEM);
387 }
388
scrub_print_warning_inode(u64 inum,u64 offset,u64 num_bytes,u64 root,void * warn_ctx)389 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
390 u64 root, void *warn_ctx)
391 {
392 u32 nlink;
393 int ret;
394 int i;
395 unsigned nofs_flag;
396 struct extent_buffer *eb;
397 struct btrfs_inode_item *inode_item;
398 struct scrub_warning *swarn = warn_ctx;
399 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
400 struct inode_fs_paths *ipath = NULL;
401 struct btrfs_root *local_root;
402 struct btrfs_key key;
403
404 local_root = btrfs_get_fs_root(fs_info, root, true);
405 if (IS_ERR(local_root)) {
406 ret = PTR_ERR(local_root);
407 goto err;
408 }
409
410 /*
411 * this makes the path point to (inum INODE_ITEM ioff)
412 */
413 key.objectid = inum;
414 key.type = BTRFS_INODE_ITEM_KEY;
415 key.offset = 0;
416
417 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
418 if (ret) {
419 btrfs_put_root(local_root);
420 btrfs_release_path(swarn->path);
421 goto err;
422 }
423
424 eb = swarn->path->nodes[0];
425 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
426 struct btrfs_inode_item);
427 nlink = btrfs_inode_nlink(eb, inode_item);
428 btrfs_release_path(swarn->path);
429
430 /*
431 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
432 * uses GFP_NOFS in this context, so we keep it consistent but it does
433 * not seem to be strictly necessary.
434 */
435 nofs_flag = memalloc_nofs_save();
436 ipath = init_ipath(4096, local_root, swarn->path);
437 memalloc_nofs_restore(nofs_flag);
438 if (IS_ERR(ipath)) {
439 btrfs_put_root(local_root);
440 ret = PTR_ERR(ipath);
441 ipath = NULL;
442 goto err;
443 }
444 ret = paths_from_inode(inum, ipath);
445
446 if (ret < 0)
447 goto err;
448
449 /*
450 * we deliberately ignore the bit ipath might have been too small to
451 * hold all of the paths here
452 */
453 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
454 btrfs_warn_in_rcu(fs_info,
455 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
456 swarn->errstr, swarn->logical,
457 btrfs_dev_name(swarn->dev),
458 swarn->physical,
459 root, inum, offset,
460 fs_info->sectorsize, nlink,
461 (char *)(unsigned long)ipath->fspath->val[i]);
462
463 btrfs_put_root(local_root);
464 free_ipath(ipath);
465 return 0;
466
467 err:
468 btrfs_warn_in_rcu(fs_info,
469 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
470 swarn->errstr, swarn->logical,
471 btrfs_dev_name(swarn->dev),
472 swarn->physical,
473 root, inum, offset, ret);
474
475 free_ipath(ipath);
476 return 0;
477 }
478
scrub_print_common_warning(const char * errstr,struct btrfs_device * dev,bool is_super,u64 logical,u64 physical)479 static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
480 bool is_super, u64 logical, u64 physical)
481 {
482 struct btrfs_fs_info *fs_info = dev->fs_info;
483 struct btrfs_path *path;
484 struct btrfs_key found_key;
485 struct extent_buffer *eb;
486 struct btrfs_extent_item *ei;
487 struct scrub_warning swarn;
488 u64 flags = 0;
489 u32 item_size;
490 int ret;
491
492 /* Super block error, no need to search extent tree. */
493 if (is_super) {
494 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
495 errstr, btrfs_dev_name(dev), physical);
496 return;
497 }
498 path = btrfs_alloc_path();
499 if (!path)
500 return;
501
502 swarn.physical = physical;
503 swarn.logical = logical;
504 swarn.errstr = errstr;
505 swarn.dev = NULL;
506
507 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
508 &flags);
509 if (ret < 0)
510 goto out;
511
512 swarn.extent_item_size = found_key.offset;
513
514 eb = path->nodes[0];
515 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
516 item_size = btrfs_item_size(eb, path->slots[0]);
517
518 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
519 unsigned long ptr = 0;
520 u8 ref_level;
521 u64 ref_root;
522
523 while (true) {
524 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
525 item_size, &ref_root,
526 &ref_level);
527 if (ret < 0) {
528 btrfs_warn(fs_info,
529 "failed to resolve tree backref for logical %llu: %d",
530 swarn.logical, ret);
531 break;
532 }
533 if (ret > 0)
534 break;
535 btrfs_warn_in_rcu(fs_info,
536 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
537 errstr, swarn.logical, btrfs_dev_name(dev),
538 swarn.physical, (ref_level ? "node" : "leaf"),
539 ref_level, ref_root);
540 }
541 btrfs_release_path(path);
542 } else {
543 struct btrfs_backref_walk_ctx ctx = { 0 };
544
545 btrfs_release_path(path);
546
547 ctx.bytenr = found_key.objectid;
548 ctx.extent_item_pos = swarn.logical - found_key.objectid;
549 ctx.fs_info = fs_info;
550
551 swarn.path = path;
552 swarn.dev = dev;
553
554 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
555 }
556
557 out:
558 btrfs_free_path(path);
559 }
560
fill_writer_pointer_gap(struct scrub_ctx * sctx,u64 physical)561 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
562 {
563 int ret = 0;
564 u64 length;
565
566 if (!btrfs_is_zoned(sctx->fs_info))
567 return 0;
568
569 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
570 return 0;
571
572 if (sctx->write_pointer < physical) {
573 length = physical - sctx->write_pointer;
574
575 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
576 sctx->write_pointer, length);
577 if (!ret)
578 sctx->write_pointer = physical;
579 }
580 return ret;
581 }
582
scrub_stripe_get_page(struct scrub_stripe * stripe,int sector_nr)583 static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
584 {
585 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
586 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
587
588 return stripe->pages[page_index];
589 }
590
scrub_stripe_get_page_offset(struct scrub_stripe * stripe,int sector_nr)591 static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
592 int sector_nr)
593 {
594 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
595
596 return offset_in_page(sector_nr << fs_info->sectorsize_bits);
597 }
598
scrub_verify_one_metadata(struct scrub_stripe * stripe,int sector_nr)599 static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
600 {
601 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
602 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
603 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
604 const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
605 const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
606 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
607 u8 on_disk_csum[BTRFS_CSUM_SIZE];
608 u8 calculated_csum[BTRFS_CSUM_SIZE];
609 struct btrfs_header *header;
610
611 /*
612 * Here we don't have a good way to attach the pages (and subpages)
613 * to a dummy extent buffer, thus we have to directly grab the members
614 * from pages.
615 */
616 header = (struct btrfs_header *)(page_address(first_page) + first_off);
617 memcpy(on_disk_csum, header->csum, fs_info->csum_size);
618
619 if (logical != btrfs_stack_header_bytenr(header)) {
620 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
621 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
622 btrfs_warn_rl(fs_info,
623 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
624 logical, stripe->mirror_num,
625 btrfs_stack_header_bytenr(header), logical);
626 return;
627 }
628 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
629 BTRFS_FSID_SIZE) != 0) {
630 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
631 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
632 btrfs_warn_rl(fs_info,
633 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
634 logical, stripe->mirror_num,
635 header->fsid, fs_info->fs_devices->fsid);
636 return;
637 }
638 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
639 BTRFS_UUID_SIZE) != 0) {
640 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
641 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
642 btrfs_warn_rl(fs_info,
643 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
644 logical, stripe->mirror_num,
645 header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
646 return;
647 }
648
649 /* Now check tree block csum. */
650 shash->tfm = fs_info->csum_shash;
651 crypto_shash_init(shash);
652 crypto_shash_update(shash, page_address(first_page) + first_off +
653 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
654
655 for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
656 struct page *page = scrub_stripe_get_page(stripe, i);
657 unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
658
659 crypto_shash_update(shash, page_address(page) + page_off,
660 fs_info->sectorsize);
661 }
662
663 crypto_shash_final(shash, calculated_csum);
664 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
665 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
666 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
667 btrfs_warn_rl(fs_info,
668 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
669 logical, stripe->mirror_num,
670 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
671 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
672 return;
673 }
674 if (stripe->sectors[sector_nr].generation !=
675 btrfs_stack_header_generation(header)) {
676 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
677 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
678 btrfs_warn_rl(fs_info,
679 "tree block %llu mirror %u has bad generation, has %llu want %llu",
680 logical, stripe->mirror_num,
681 btrfs_stack_header_generation(header),
682 stripe->sectors[sector_nr].generation);
683 return;
684 }
685 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
686 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
687 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
688 }
689
scrub_verify_one_sector(struct scrub_stripe * stripe,int sector_nr)690 static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
691 {
692 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
693 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
694 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
695 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
696 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
697 u8 csum_buf[BTRFS_CSUM_SIZE];
698 int ret;
699
700 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
701
702 /* Sector not utilized, skip it. */
703 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
704 return;
705
706 /* IO error, no need to check. */
707 if (test_bit(sector_nr, &stripe->io_error_bitmap))
708 return;
709
710 /* Metadata, verify the full tree block. */
711 if (sector->is_metadata) {
712 /*
713 * Check if the tree block crosses the stripe boudary. If
714 * crossed the boundary, we cannot verify it but only give a
715 * warning.
716 *
717 * This can only happen on a very old filesystem where chunks
718 * are not ensured to be stripe aligned.
719 */
720 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
721 btrfs_warn_rl(fs_info,
722 "tree block at %llu crosses stripe boundary %llu",
723 stripe->logical +
724 (sector_nr << fs_info->sectorsize_bits),
725 stripe->logical);
726 return;
727 }
728 scrub_verify_one_metadata(stripe, sector_nr);
729 return;
730 }
731
732 /*
733 * Data is easier, we just verify the data csum (if we have it). For
734 * cases without csum, we have no other choice but to trust it.
735 */
736 if (!sector->csum) {
737 clear_bit(sector_nr, &stripe->error_bitmap);
738 return;
739 }
740
741 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
742 if (ret < 0) {
743 set_bit(sector_nr, &stripe->csum_error_bitmap);
744 set_bit(sector_nr, &stripe->error_bitmap);
745 } else {
746 clear_bit(sector_nr, &stripe->csum_error_bitmap);
747 clear_bit(sector_nr, &stripe->error_bitmap);
748 }
749 }
750
751 /* Verify specified sectors of a stripe. */
scrub_verify_one_stripe(struct scrub_stripe * stripe,unsigned long bitmap)752 static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
753 {
754 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
755 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
756 int sector_nr;
757
758 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
759 scrub_verify_one_sector(stripe, sector_nr);
760 if (stripe->sectors[sector_nr].is_metadata)
761 sector_nr += sectors_per_tree - 1;
762 }
763 }
764
calc_sector_number(struct scrub_stripe * stripe,struct bio_vec * first_bvec)765 static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
766 {
767 int i;
768
769 for (i = 0; i < stripe->nr_sectors; i++) {
770 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
771 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
772 break;
773 }
774 ASSERT(i < stripe->nr_sectors);
775 return i;
776 }
777
778 /*
779 * Repair read is different to the regular read:
780 *
781 * - Only reads the failed sectors
782 * - May have extra blocksize limits
783 */
scrub_repair_read_endio(struct btrfs_bio * bbio)784 static void scrub_repair_read_endio(struct btrfs_bio *bbio)
785 {
786 struct scrub_stripe *stripe = bbio->private;
787 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
788 struct bio_vec *bvec;
789 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
790 u32 bio_size = 0;
791 int i;
792
793 ASSERT(sector_nr < stripe->nr_sectors);
794
795 bio_for_each_bvec_all(bvec, &bbio->bio, i)
796 bio_size += bvec->bv_len;
797
798 if (bbio->bio.bi_status) {
799 bitmap_set(&stripe->io_error_bitmap, sector_nr,
800 bio_size >> fs_info->sectorsize_bits);
801 bitmap_set(&stripe->error_bitmap, sector_nr,
802 bio_size >> fs_info->sectorsize_bits);
803 } else {
804 bitmap_clear(&stripe->io_error_bitmap, sector_nr,
805 bio_size >> fs_info->sectorsize_bits);
806 }
807 bio_put(&bbio->bio);
808 if (atomic_dec_and_test(&stripe->pending_io))
809 wake_up(&stripe->io_wait);
810 }
811
calc_next_mirror(int mirror,int num_copies)812 static int calc_next_mirror(int mirror, int num_copies)
813 {
814 ASSERT(mirror <= num_copies);
815 return (mirror + 1 > num_copies) ? 1 : mirror + 1;
816 }
817
scrub_stripe_submit_repair_read(struct scrub_stripe * stripe,int mirror,int blocksize,bool wait)818 static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
819 int mirror, int blocksize, bool wait)
820 {
821 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
822 struct btrfs_bio *bbio = NULL;
823 const unsigned long old_error_bitmap = stripe->error_bitmap;
824 int i;
825
826 ASSERT(stripe->mirror_num >= 1);
827 ASSERT(atomic_read(&stripe->pending_io) == 0);
828
829 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
830 struct page *page;
831 int pgoff;
832 int ret;
833
834 page = scrub_stripe_get_page(stripe, i);
835 pgoff = scrub_stripe_get_page_offset(stripe, i);
836
837 /* The current sector cannot be merged, submit the bio. */
838 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
839 bbio->bio.bi_iter.bi_size >= blocksize)) {
840 ASSERT(bbio->bio.bi_iter.bi_size);
841 atomic_inc(&stripe->pending_io);
842 btrfs_submit_bio(bbio, mirror);
843 if (wait)
844 wait_scrub_stripe_io(stripe);
845 bbio = NULL;
846 }
847
848 if (!bbio) {
849 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
850 fs_info, scrub_repair_read_endio, stripe);
851 bbio->bio.bi_iter.bi_sector = (stripe->logical +
852 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
853 }
854
855 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
856 ASSERT(ret == fs_info->sectorsize);
857 }
858 if (bbio) {
859 ASSERT(bbio->bio.bi_iter.bi_size);
860 atomic_inc(&stripe->pending_io);
861 btrfs_submit_bio(bbio, mirror);
862 if (wait)
863 wait_scrub_stripe_io(stripe);
864 }
865 }
866
scrub_stripe_report_errors(struct scrub_ctx * sctx,struct scrub_stripe * stripe)867 static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
868 struct scrub_stripe *stripe)
869 {
870 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
871 DEFAULT_RATELIMIT_BURST);
872 struct btrfs_fs_info *fs_info = sctx->fs_info;
873 struct btrfs_device *dev = NULL;
874 u64 physical = 0;
875 int nr_data_sectors = 0;
876 int nr_meta_sectors = 0;
877 int nr_nodatacsum_sectors = 0;
878 int nr_repaired_sectors = 0;
879 int sector_nr;
880
881 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
882 return;
883
884 /*
885 * Init needed infos for error reporting.
886 *
887 * Although our scrub_stripe infrastucture is mostly based on btrfs_submit_bio()
888 * thus no need for dev/physical, error reporting still needs dev and physical.
889 */
890 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
891 u64 mapped_len = fs_info->sectorsize;
892 struct btrfs_io_context *bioc = NULL;
893 int stripe_index = stripe->mirror_num - 1;
894 int ret;
895
896 /* For scrub, our mirror_num should always start at 1. */
897 ASSERT(stripe->mirror_num >= 1);
898 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
899 stripe->logical, &mapped_len, &bioc,
900 NULL, NULL, 1);
901 /*
902 * If we failed, dev will be NULL, and later detailed reports
903 * will just be skipped.
904 */
905 if (ret < 0)
906 goto skip;
907 physical = bioc->stripes[stripe_index].physical;
908 dev = bioc->stripes[stripe_index].dev;
909 btrfs_put_bioc(bioc);
910 }
911
912 skip:
913 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
914 bool repaired = false;
915
916 if (stripe->sectors[sector_nr].is_metadata) {
917 nr_meta_sectors++;
918 } else {
919 nr_data_sectors++;
920 if (!stripe->sectors[sector_nr].csum)
921 nr_nodatacsum_sectors++;
922 }
923
924 if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
925 !test_bit(sector_nr, &stripe->error_bitmap)) {
926 nr_repaired_sectors++;
927 repaired = true;
928 }
929
930 /* Good sector from the beginning, nothing need to be done. */
931 if (!test_bit(sector_nr, &stripe->init_error_bitmap))
932 continue;
933
934 /*
935 * Report error for the corrupted sectors. If repaired, just
936 * output the message of repaired message.
937 */
938 if (repaired) {
939 if (dev) {
940 btrfs_err_rl_in_rcu(fs_info,
941 "fixed up error at logical %llu on dev %s physical %llu",
942 stripe->logical, btrfs_dev_name(dev),
943 physical);
944 } else {
945 btrfs_err_rl_in_rcu(fs_info,
946 "fixed up error at logical %llu on mirror %u",
947 stripe->logical, stripe->mirror_num);
948 }
949 continue;
950 }
951
952 /* The remaining are all for unrepaired. */
953 if (dev) {
954 btrfs_err_rl_in_rcu(fs_info,
955 "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
956 stripe->logical, btrfs_dev_name(dev),
957 physical);
958 } else {
959 btrfs_err_rl_in_rcu(fs_info,
960 "unable to fixup (regular) error at logical %llu on mirror %u",
961 stripe->logical, stripe->mirror_num);
962 }
963
964 if (test_bit(sector_nr, &stripe->io_error_bitmap))
965 if (__ratelimit(&rs) && dev)
966 scrub_print_common_warning("i/o error", dev, false,
967 stripe->logical, physical);
968 if (test_bit(sector_nr, &stripe->csum_error_bitmap))
969 if (__ratelimit(&rs) && dev)
970 scrub_print_common_warning("checksum error", dev, false,
971 stripe->logical, physical);
972 if (test_bit(sector_nr, &stripe->meta_error_bitmap))
973 if (__ratelimit(&rs) && dev)
974 scrub_print_common_warning("header error", dev, false,
975 stripe->logical, physical);
976 }
977
978 spin_lock(&sctx->stat_lock);
979 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
980 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
981 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
982 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
983 sctx->stat.no_csum += nr_nodatacsum_sectors;
984 sctx->stat.read_errors += stripe->init_nr_io_errors;
985 sctx->stat.csum_errors += stripe->init_nr_csum_errors;
986 sctx->stat.verify_errors += stripe->init_nr_meta_errors;
987 sctx->stat.uncorrectable_errors +=
988 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
989 sctx->stat.corrected_errors += nr_repaired_sectors;
990 spin_unlock(&sctx->stat_lock);
991 }
992
993 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
994 unsigned long write_bitmap, bool dev_replace);
995
996 /*
997 * The main entrance for all read related scrub work, including:
998 *
999 * - Wait for the initial read to finish
1000 * - Verify and locate any bad sectors
1001 * - Go through the remaining mirrors and try to read as large blocksize as
1002 * possible
1003 * - Go through all mirrors (including the failed mirror) sector-by-sector
1004 * - Submit writeback for repaired sectors
1005 *
1006 * Writeback for dev-replace does not happen here, it needs extra
1007 * synchronization for zoned devices.
1008 */
scrub_stripe_read_repair_worker(struct work_struct * work)1009 static void scrub_stripe_read_repair_worker(struct work_struct *work)
1010 {
1011 struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1012 struct scrub_ctx *sctx = stripe->sctx;
1013 struct btrfs_fs_info *fs_info = sctx->fs_info;
1014 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1015 stripe->bg->length);
1016 unsigned long repaired;
1017 int mirror;
1018 int i;
1019
1020 ASSERT(stripe->mirror_num > 0);
1021
1022 wait_scrub_stripe_io(stripe);
1023 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1024 /* Save the initial failed bitmap for later repair and report usage. */
1025 stripe->init_error_bitmap = stripe->error_bitmap;
1026 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1027 stripe->nr_sectors);
1028 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1029 stripe->nr_sectors);
1030 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1031 stripe->nr_sectors);
1032
1033 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1034 goto out;
1035
1036 /*
1037 * Try all remaining mirrors.
1038 *
1039 * Here we still try to read as large block as possible, as this is
1040 * faster and we have extra safety nets to rely on.
1041 */
1042 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1043 mirror != stripe->mirror_num;
1044 mirror = calc_next_mirror(mirror, num_copies)) {
1045 const unsigned long old_error_bitmap = stripe->error_bitmap;
1046
1047 scrub_stripe_submit_repair_read(stripe, mirror,
1048 BTRFS_STRIPE_LEN, false);
1049 wait_scrub_stripe_io(stripe);
1050 scrub_verify_one_stripe(stripe, old_error_bitmap);
1051 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1052 goto out;
1053 }
1054
1055 /*
1056 * Last safety net, try re-checking all mirrors, including the failed
1057 * one, sector-by-sector.
1058 *
1059 * As if one sector failed the drive's internal csum, the whole read
1060 * containing the offending sector would be marked as error.
1061 * Thus here we do sector-by-sector read.
1062 *
1063 * This can be slow, thus we only try it as the last resort.
1064 */
1065
1066 for (i = 0, mirror = stripe->mirror_num;
1067 i < num_copies;
1068 i++, mirror = calc_next_mirror(mirror, num_copies)) {
1069 const unsigned long old_error_bitmap = stripe->error_bitmap;
1070
1071 scrub_stripe_submit_repair_read(stripe, mirror,
1072 fs_info->sectorsize, true);
1073 wait_scrub_stripe_io(stripe);
1074 scrub_verify_one_stripe(stripe, old_error_bitmap);
1075 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1076 goto out;
1077 }
1078 out:
1079 /*
1080 * Submit the repaired sectors. For zoned case, we cannot do repair
1081 * in-place, but queue the bg to be relocated.
1082 */
1083 bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
1084 stripe->nr_sectors);
1085 if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
1086 if (btrfs_is_zoned(fs_info)) {
1087 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1088 } else {
1089 scrub_write_sectors(sctx, stripe, repaired, false);
1090 wait_scrub_stripe_io(stripe);
1091 }
1092 }
1093
1094 scrub_stripe_report_errors(sctx, stripe);
1095 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1096 wake_up(&stripe->repair_wait);
1097 }
1098
scrub_read_endio(struct btrfs_bio * bbio)1099 static void scrub_read_endio(struct btrfs_bio *bbio)
1100 {
1101 struct scrub_stripe *stripe = bbio->private;
1102 struct bio_vec *bvec;
1103 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1104 int num_sectors;
1105 u32 bio_size = 0;
1106 int i;
1107
1108 ASSERT(sector_nr < stripe->nr_sectors);
1109 bio_for_each_bvec_all(bvec, &bbio->bio, i)
1110 bio_size += bvec->bv_len;
1111 num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1112
1113 if (bbio->bio.bi_status) {
1114 bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
1115 bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
1116 } else {
1117 bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
1118 }
1119 bio_put(&bbio->bio);
1120 if (atomic_dec_and_test(&stripe->pending_io)) {
1121 wake_up(&stripe->io_wait);
1122 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1123 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1124 }
1125 }
1126
scrub_write_endio(struct btrfs_bio * bbio)1127 static void scrub_write_endio(struct btrfs_bio *bbio)
1128 {
1129 struct scrub_stripe *stripe = bbio->private;
1130 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1131 struct bio_vec *bvec;
1132 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1133 u32 bio_size = 0;
1134 int i;
1135
1136 bio_for_each_bvec_all(bvec, &bbio->bio, i)
1137 bio_size += bvec->bv_len;
1138
1139 if (bbio->bio.bi_status) {
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&stripe->write_error_lock, flags);
1143 bitmap_set(&stripe->write_error_bitmap, sector_nr,
1144 bio_size >> fs_info->sectorsize_bits);
1145 spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1146 }
1147 bio_put(&bbio->bio);
1148
1149 if (atomic_dec_and_test(&stripe->pending_io))
1150 wake_up(&stripe->io_wait);
1151 }
1152
scrub_submit_write_bio(struct scrub_ctx * sctx,struct scrub_stripe * stripe,struct btrfs_bio * bbio,bool dev_replace)1153 static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1154 struct scrub_stripe *stripe,
1155 struct btrfs_bio *bbio, bool dev_replace)
1156 {
1157 struct btrfs_fs_info *fs_info = sctx->fs_info;
1158 u32 bio_len = bbio->bio.bi_iter.bi_size;
1159 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1160 stripe->logical;
1161
1162 fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1163 atomic_inc(&stripe->pending_io);
1164 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1165 if (!btrfs_is_zoned(fs_info))
1166 return;
1167 /*
1168 * For zoned writeback, queue depth must be 1, thus we must wait for
1169 * the write to finish before the next write.
1170 */
1171 wait_scrub_stripe_io(stripe);
1172
1173 /*
1174 * And also need to update the write pointer if write finished
1175 * successfully.
1176 */
1177 if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1178 &stripe->write_error_bitmap))
1179 sctx->write_pointer += bio_len;
1180 }
1181
1182 /*
1183 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1184 *
1185 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1186 *
1187 * - Only needs logical bytenr and mirror_num
1188 * Just like the scrub read path
1189 *
1190 * - Would only result in writes to the specified mirror
1191 * Unlike the regular writeback path, which would write back to all stripes
1192 *
1193 * - Handle dev-replace and read-repair writeback differently
1194 */
scrub_write_sectors(struct scrub_ctx * sctx,struct scrub_stripe * stripe,unsigned long write_bitmap,bool dev_replace)1195 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1196 unsigned long write_bitmap, bool dev_replace)
1197 {
1198 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1199 struct btrfs_bio *bbio = NULL;
1200 int sector_nr;
1201
1202 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1203 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1204 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1205 int ret;
1206
1207 /* We should only writeback sectors covered by an extent. */
1208 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1209
1210 /* Cannot merge with previous sector, submit the current one. */
1211 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1212 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1213 bbio = NULL;
1214 }
1215 if (!bbio) {
1216 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1217 fs_info, scrub_write_endio, stripe);
1218 bbio->bio.bi_iter.bi_sector = (stripe->logical +
1219 (sector_nr << fs_info->sectorsize_bits)) >>
1220 SECTOR_SHIFT;
1221 }
1222 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1223 ASSERT(ret == fs_info->sectorsize);
1224 }
1225 if (bbio)
1226 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1227 }
1228
1229 /*
1230 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1231 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1232 */
scrub_throttle_dev_io(struct scrub_ctx * sctx,struct btrfs_device * device,unsigned int bio_size)1233 static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1234 unsigned int bio_size)
1235 {
1236 const int time_slice = 1000;
1237 s64 delta;
1238 ktime_t now;
1239 u32 div;
1240 u64 bwlimit;
1241
1242 bwlimit = READ_ONCE(device->scrub_speed_max);
1243 if (bwlimit == 0)
1244 return;
1245
1246 /*
1247 * Slice is divided into intervals when the IO is submitted, adjust by
1248 * bwlimit and maximum of 64 intervals.
1249 */
1250 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1251 div = min_t(u32, 64, div);
1252
1253 /* Start new epoch, set deadline */
1254 now = ktime_get();
1255 if (sctx->throttle_deadline == 0) {
1256 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1257 sctx->throttle_sent = 0;
1258 }
1259
1260 /* Still in the time to send? */
1261 if (ktime_before(now, sctx->throttle_deadline)) {
1262 /* If current bio is within the limit, send it */
1263 sctx->throttle_sent += bio_size;
1264 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1265 return;
1266
1267 /* We're over the limit, sleep until the rest of the slice */
1268 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1269 } else {
1270 /* New request after deadline, start new epoch */
1271 delta = 0;
1272 }
1273
1274 if (delta) {
1275 long timeout;
1276
1277 timeout = div_u64(delta * HZ, 1000);
1278 schedule_timeout_interruptible(timeout);
1279 }
1280
1281 /* Next call will start the deadline period */
1282 sctx->throttle_deadline = 0;
1283 }
1284
1285 /*
1286 * Given a physical address, this will calculate it's
1287 * logical offset. if this is a parity stripe, it will return
1288 * the most left data stripe's logical offset.
1289 *
1290 * return 0 if it is a data stripe, 1 means parity stripe.
1291 */
get_raid56_logic_offset(u64 physical,int num,struct map_lookup * map,u64 * offset,u64 * stripe_start)1292 static int get_raid56_logic_offset(u64 physical, int num,
1293 struct map_lookup *map, u64 *offset,
1294 u64 *stripe_start)
1295 {
1296 int i;
1297 int j = 0;
1298 u64 last_offset;
1299 const int data_stripes = nr_data_stripes(map);
1300
1301 last_offset = (physical - map->stripes[num].physical) * data_stripes;
1302 if (stripe_start)
1303 *stripe_start = last_offset;
1304
1305 *offset = last_offset;
1306 for (i = 0; i < data_stripes; i++) {
1307 u32 stripe_nr;
1308 u32 stripe_index;
1309 u32 rot;
1310
1311 *offset = last_offset + btrfs_stripe_nr_to_offset(i);
1312
1313 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1314
1315 /* Work out the disk rotation on this stripe-set */
1316 rot = stripe_nr % map->num_stripes;
1317 /* calculate which stripe this data locates */
1318 rot += i;
1319 stripe_index = rot % map->num_stripes;
1320 if (stripe_index == num)
1321 return 0;
1322 if (stripe_index < num)
1323 j++;
1324 }
1325 *offset = last_offset + btrfs_stripe_nr_to_offset(j);
1326 return 1;
1327 }
1328
1329 /*
1330 * Return 0 if the extent item range covers any byte of the range.
1331 * Return <0 if the extent item is before @search_start.
1332 * Return >0 if the extent item is after @start_start + @search_len.
1333 */
compare_extent_item_range(struct btrfs_path * path,u64 search_start,u64 search_len)1334 static int compare_extent_item_range(struct btrfs_path *path,
1335 u64 search_start, u64 search_len)
1336 {
1337 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1338 u64 len;
1339 struct btrfs_key key;
1340
1341 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1342 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1343 key.type == BTRFS_METADATA_ITEM_KEY);
1344 if (key.type == BTRFS_METADATA_ITEM_KEY)
1345 len = fs_info->nodesize;
1346 else
1347 len = key.offset;
1348
1349 if (key.objectid + len <= search_start)
1350 return -1;
1351 if (key.objectid >= search_start + search_len)
1352 return 1;
1353 return 0;
1354 }
1355
1356 /*
1357 * Locate one extent item which covers any byte in range
1358 * [@search_start, @search_start + @search_length)
1359 *
1360 * If the path is not initialized, we will initialize the search by doing
1361 * a btrfs_search_slot().
1362 * If the path is already initialized, we will use the path as the initial
1363 * slot, to avoid duplicated btrfs_search_slot() calls.
1364 *
1365 * NOTE: If an extent item starts before @search_start, we will still
1366 * return the extent item. This is for data extent crossing stripe boundary.
1367 *
1368 * Return 0 if we found such extent item, and @path will point to the extent item.
1369 * Return >0 if no such extent item can be found, and @path will be released.
1370 * Return <0 if hit fatal error, and @path will be released.
1371 */
find_first_extent_item(struct btrfs_root * extent_root,struct btrfs_path * path,u64 search_start,u64 search_len)1372 static int find_first_extent_item(struct btrfs_root *extent_root,
1373 struct btrfs_path *path,
1374 u64 search_start, u64 search_len)
1375 {
1376 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1377 struct btrfs_key key;
1378 int ret;
1379
1380 /* Continue using the existing path */
1381 if (path->nodes[0])
1382 goto search_forward;
1383
1384 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1385 key.type = BTRFS_METADATA_ITEM_KEY;
1386 else
1387 key.type = BTRFS_EXTENT_ITEM_KEY;
1388 key.objectid = search_start;
1389 key.offset = (u64)-1;
1390
1391 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1392 if (ret < 0)
1393 return ret;
1394
1395 ASSERT(ret > 0);
1396 /*
1397 * Here we intentionally pass 0 as @min_objectid, as there could be
1398 * an extent item starting before @search_start.
1399 */
1400 ret = btrfs_previous_extent_item(extent_root, path, 0);
1401 if (ret < 0)
1402 return ret;
1403 /*
1404 * No matter whether we have found an extent item, the next loop will
1405 * properly do every check on the key.
1406 */
1407 search_forward:
1408 while (true) {
1409 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1410 if (key.objectid >= search_start + search_len)
1411 break;
1412 if (key.type != BTRFS_METADATA_ITEM_KEY &&
1413 key.type != BTRFS_EXTENT_ITEM_KEY)
1414 goto next;
1415
1416 ret = compare_extent_item_range(path, search_start, search_len);
1417 if (ret == 0)
1418 return ret;
1419 if (ret > 0)
1420 break;
1421 next:
1422 path->slots[0]++;
1423 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
1424 ret = btrfs_next_leaf(extent_root, path);
1425 if (ret) {
1426 /* Either no more item or fatal error */
1427 btrfs_release_path(path);
1428 return ret;
1429 }
1430 }
1431 }
1432 btrfs_release_path(path);
1433 return 1;
1434 }
1435
get_extent_info(struct btrfs_path * path,u64 * extent_start_ret,u64 * size_ret,u64 * flags_ret,u64 * generation_ret)1436 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1437 u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1438 {
1439 struct btrfs_key key;
1440 struct btrfs_extent_item *ei;
1441
1442 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1443 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1444 key.type == BTRFS_EXTENT_ITEM_KEY);
1445 *extent_start_ret = key.objectid;
1446 if (key.type == BTRFS_METADATA_ITEM_KEY)
1447 *size_ret = path->nodes[0]->fs_info->nodesize;
1448 else
1449 *size_ret = key.offset;
1450 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1451 *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1452 *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1453 }
1454
sync_write_pointer_for_zoned(struct scrub_ctx * sctx,u64 logical,u64 physical,u64 physical_end)1455 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1456 u64 physical, u64 physical_end)
1457 {
1458 struct btrfs_fs_info *fs_info = sctx->fs_info;
1459 int ret = 0;
1460
1461 if (!btrfs_is_zoned(fs_info))
1462 return 0;
1463
1464 mutex_lock(&sctx->wr_lock);
1465 if (sctx->write_pointer < physical_end) {
1466 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1467 physical,
1468 sctx->write_pointer);
1469 if (ret)
1470 btrfs_err(fs_info,
1471 "zoned: failed to recover write pointer");
1472 }
1473 mutex_unlock(&sctx->wr_lock);
1474 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1475
1476 return ret;
1477 }
1478
fill_one_extent_info(struct btrfs_fs_info * fs_info,struct scrub_stripe * stripe,u64 extent_start,u64 extent_len,u64 extent_flags,u64 extent_gen)1479 static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1480 struct scrub_stripe *stripe,
1481 u64 extent_start, u64 extent_len,
1482 u64 extent_flags, u64 extent_gen)
1483 {
1484 for (u64 cur_logical = max(stripe->logical, extent_start);
1485 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1486 extent_start + extent_len);
1487 cur_logical += fs_info->sectorsize) {
1488 const int nr_sector = (cur_logical - stripe->logical) >>
1489 fs_info->sectorsize_bits;
1490 struct scrub_sector_verification *sector =
1491 &stripe->sectors[nr_sector];
1492
1493 set_bit(nr_sector, &stripe->extent_sector_bitmap);
1494 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1495 sector->is_metadata = true;
1496 sector->generation = extent_gen;
1497 }
1498 }
1499 }
1500
scrub_stripe_reset_bitmaps(struct scrub_stripe * stripe)1501 static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1502 {
1503 stripe->extent_sector_bitmap = 0;
1504 stripe->init_error_bitmap = 0;
1505 stripe->init_nr_io_errors = 0;
1506 stripe->init_nr_csum_errors = 0;
1507 stripe->init_nr_meta_errors = 0;
1508 stripe->error_bitmap = 0;
1509 stripe->io_error_bitmap = 0;
1510 stripe->csum_error_bitmap = 0;
1511 stripe->meta_error_bitmap = 0;
1512 }
1513
1514 /*
1515 * Locate one stripe which has at least one extent in its range.
1516 *
1517 * Return 0 if found such stripe, and store its info into @stripe.
1518 * Return >0 if there is no such stripe in the specified range.
1519 * Return <0 for error.
1520 */
scrub_find_fill_first_stripe(struct btrfs_block_group * bg,struct btrfs_path * extent_path,struct btrfs_path * csum_path,struct btrfs_device * dev,u64 physical,int mirror_num,u64 logical_start,u32 logical_len,struct scrub_stripe * stripe)1521 static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1522 struct btrfs_path *extent_path,
1523 struct btrfs_path *csum_path,
1524 struct btrfs_device *dev, u64 physical,
1525 int mirror_num, u64 logical_start,
1526 u32 logical_len,
1527 struct scrub_stripe *stripe)
1528 {
1529 struct btrfs_fs_info *fs_info = bg->fs_info;
1530 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1531 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1532 const u64 logical_end = logical_start + logical_len;
1533 u64 cur_logical = logical_start;
1534 u64 stripe_end;
1535 u64 extent_start;
1536 u64 extent_len;
1537 u64 extent_flags;
1538 u64 extent_gen;
1539 int ret;
1540
1541 if (unlikely(!extent_root)) {
1542 btrfs_err(fs_info, "no valid extent root for scrub");
1543 return -EUCLEAN;
1544 }
1545 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1546 stripe->nr_sectors);
1547 scrub_stripe_reset_bitmaps(stripe);
1548
1549 /* The range must be inside the bg. */
1550 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1551
1552 ret = find_first_extent_item(extent_root, extent_path, logical_start,
1553 logical_len);
1554 /* Either error or not found. */
1555 if (ret)
1556 goto out;
1557 get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1558 &extent_gen);
1559 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1560 stripe->nr_meta_extents++;
1561 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1562 stripe->nr_data_extents++;
1563 cur_logical = max(extent_start, cur_logical);
1564
1565 /*
1566 * Round down to stripe boundary.
1567 *
1568 * The extra calculation against bg->start is to handle block groups
1569 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1570 */
1571 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1572 bg->start;
1573 stripe->physical = physical + stripe->logical - logical_start;
1574 stripe->dev = dev;
1575 stripe->bg = bg;
1576 stripe->mirror_num = mirror_num;
1577 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1578
1579 /* Fill the first extent info into stripe->sectors[] array. */
1580 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1581 extent_flags, extent_gen);
1582 cur_logical = extent_start + extent_len;
1583
1584 /* Fill the extent info for the remaining sectors. */
1585 while (cur_logical <= stripe_end) {
1586 ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1587 stripe_end - cur_logical + 1);
1588 if (ret < 0)
1589 goto out;
1590 if (ret > 0) {
1591 ret = 0;
1592 break;
1593 }
1594 get_extent_info(extent_path, &extent_start, &extent_len,
1595 &extent_flags, &extent_gen);
1596 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1597 stripe->nr_meta_extents++;
1598 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1599 stripe->nr_data_extents++;
1600 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1601 extent_flags, extent_gen);
1602 cur_logical = extent_start + extent_len;
1603 }
1604
1605 /* Now fill the data csum. */
1606 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1607 int sector_nr;
1608 unsigned long csum_bitmap = 0;
1609
1610 /* Csum space should have already been allocated. */
1611 ASSERT(stripe->csums);
1612
1613 /*
1614 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1615 * should contain at most 16 sectors.
1616 */
1617 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1618
1619 ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1620 stripe->logical, stripe_end,
1621 stripe->csums, &csum_bitmap);
1622 if (ret < 0)
1623 goto out;
1624 if (ret > 0)
1625 ret = 0;
1626
1627 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1628 stripe->sectors[sector_nr].csum = stripe->csums +
1629 sector_nr * fs_info->csum_size;
1630 }
1631 }
1632 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1633 out:
1634 return ret;
1635 }
1636
scrub_reset_stripe(struct scrub_stripe * stripe)1637 static void scrub_reset_stripe(struct scrub_stripe *stripe)
1638 {
1639 scrub_stripe_reset_bitmaps(stripe);
1640
1641 stripe->nr_meta_extents = 0;
1642 stripe->nr_data_extents = 0;
1643 stripe->state = 0;
1644
1645 for (int i = 0; i < stripe->nr_sectors; i++) {
1646 stripe->sectors[i].is_metadata = false;
1647 stripe->sectors[i].csum = NULL;
1648 stripe->sectors[i].generation = 0;
1649 }
1650 }
1651
scrub_submit_initial_read(struct scrub_ctx * sctx,struct scrub_stripe * stripe)1652 static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1653 struct scrub_stripe *stripe)
1654 {
1655 struct btrfs_fs_info *fs_info = sctx->fs_info;
1656 struct btrfs_bio *bbio;
1657 unsigned int nr_sectors = min_t(u64, BTRFS_STRIPE_LEN, stripe->bg->start +
1658 stripe->bg->length - stripe->logical) >>
1659 fs_info->sectorsize_bits;
1660 int mirror = stripe->mirror_num;
1661
1662 ASSERT(stripe->bg);
1663 ASSERT(stripe->mirror_num > 0);
1664 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1665
1666 bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1667 scrub_read_endio, stripe);
1668
1669 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1670 /* Read the whole range inside the chunk boundary. */
1671 for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1672 struct page *page = scrub_stripe_get_page(stripe, cur);
1673 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
1674 int ret;
1675
1676 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1677 /* We should have allocated enough bio vectors. */
1678 ASSERT(ret == fs_info->sectorsize);
1679 }
1680 atomic_inc(&stripe->pending_io);
1681
1682 /*
1683 * For dev-replace, either user asks to avoid the source dev, or
1684 * the device is missing, we try the next mirror instead.
1685 */
1686 if (sctx->is_dev_replace &&
1687 (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1688 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1689 !stripe->dev->bdev)) {
1690 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1691 stripe->bg->length);
1692
1693 mirror = calc_next_mirror(mirror, num_copies);
1694 }
1695 btrfs_submit_bio(bbio, mirror);
1696 }
1697
stripe_has_metadata_error(struct scrub_stripe * stripe)1698 static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1699 {
1700 int i;
1701
1702 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1703 if (stripe->sectors[i].is_metadata) {
1704 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1705
1706 btrfs_err(fs_info,
1707 "stripe %llu has unrepaired metadata sector at %llu",
1708 stripe->logical,
1709 stripe->logical + (i << fs_info->sectorsize_bits));
1710 return true;
1711 }
1712 }
1713 return false;
1714 }
1715
submit_initial_group_read(struct scrub_ctx * sctx,unsigned int first_slot,unsigned int nr_stripes)1716 static void submit_initial_group_read(struct scrub_ctx *sctx,
1717 unsigned int first_slot,
1718 unsigned int nr_stripes)
1719 {
1720 struct blk_plug plug;
1721
1722 ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1723 ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
1724
1725 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1726 btrfs_stripe_nr_to_offset(nr_stripes));
1727 blk_start_plug(&plug);
1728 for (int i = 0; i < nr_stripes; i++) {
1729 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1730
1731 /* Those stripes should be initialized. */
1732 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1733 scrub_submit_initial_read(sctx, stripe);
1734 }
1735 blk_finish_plug(&plug);
1736 }
1737
flush_scrub_stripes(struct scrub_ctx * sctx)1738 static int flush_scrub_stripes(struct scrub_ctx *sctx)
1739 {
1740 struct btrfs_fs_info *fs_info = sctx->fs_info;
1741 struct scrub_stripe *stripe;
1742 const int nr_stripes = sctx->cur_stripe;
1743 int ret = 0;
1744
1745 if (!nr_stripes)
1746 return 0;
1747
1748 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1749
1750 /* Submit the stripes which are populated but not submitted. */
1751 if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1752 const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1753
1754 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
1755 }
1756
1757 for (int i = 0; i < nr_stripes; i++) {
1758 stripe = &sctx->stripes[i];
1759
1760 wait_event(stripe->repair_wait,
1761 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1762 }
1763
1764 /* Submit for dev-replace. */
1765 if (sctx->is_dev_replace) {
1766 /*
1767 * For dev-replace, if we know there is something wrong with
1768 * metadata, we should immedately abort.
1769 */
1770 for (int i = 0; i < nr_stripes; i++) {
1771 if (stripe_has_metadata_error(&sctx->stripes[i])) {
1772 ret = -EIO;
1773 goto out;
1774 }
1775 }
1776 for (int i = 0; i < nr_stripes; i++) {
1777 unsigned long good;
1778
1779 stripe = &sctx->stripes[i];
1780
1781 ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1782
1783 bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1784 &stripe->error_bitmap, stripe->nr_sectors);
1785 scrub_write_sectors(sctx, stripe, good, true);
1786 }
1787 }
1788
1789 /* Wait for the above writebacks to finish. */
1790 for (int i = 0; i < nr_stripes; i++) {
1791 stripe = &sctx->stripes[i];
1792
1793 wait_scrub_stripe_io(stripe);
1794 scrub_reset_stripe(stripe);
1795 }
1796 out:
1797 sctx->cur_stripe = 0;
1798 return ret;
1799 }
1800
raid56_scrub_wait_endio(struct bio * bio)1801 static void raid56_scrub_wait_endio(struct bio *bio)
1802 {
1803 complete(bio->bi_private);
1804 }
1805
queue_scrub_stripe(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct btrfs_device * dev,int mirror_num,u64 logical,u32 length,u64 physical,u64 * found_logical_ret)1806 static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1807 struct btrfs_device *dev, int mirror_num,
1808 u64 logical, u32 length, u64 physical,
1809 u64 *found_logical_ret)
1810 {
1811 struct scrub_stripe *stripe;
1812 int ret;
1813
1814 /*
1815 * There should always be one slot left, as caller filling the last
1816 * slot should flush them all.
1817 */
1818 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1819
1820 /* @found_logical_ret must be specified. */
1821 ASSERT(found_logical_ret);
1822
1823 stripe = &sctx->stripes[sctx->cur_stripe];
1824 scrub_reset_stripe(stripe);
1825 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1826 &sctx->csum_path, dev, physical,
1827 mirror_num, logical, length, stripe);
1828 /* Either >0 as no more extents or <0 for error. */
1829 if (ret)
1830 return ret;
1831 *found_logical_ret = stripe->logical;
1832 sctx->cur_stripe++;
1833
1834 /* We filled one group, submit it. */
1835 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1836 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1837
1838 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1839 }
1840
1841 /* Last slot used, flush them all. */
1842 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1843 return flush_scrub_stripes(sctx);
1844 return 0;
1845 }
1846
scrub_raid56_parity_stripe(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev,struct btrfs_block_group * bg,struct map_lookup * map,u64 full_stripe_start)1847 static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1848 struct btrfs_device *scrub_dev,
1849 struct btrfs_block_group *bg,
1850 struct map_lookup *map,
1851 u64 full_stripe_start)
1852 {
1853 DECLARE_COMPLETION_ONSTACK(io_done);
1854 struct btrfs_fs_info *fs_info = sctx->fs_info;
1855 struct btrfs_raid_bio *rbio;
1856 struct btrfs_io_context *bioc = NULL;
1857 struct btrfs_path extent_path = { 0 };
1858 struct btrfs_path csum_path = { 0 };
1859 struct bio *bio;
1860 struct scrub_stripe *stripe;
1861 bool all_empty = true;
1862 const int data_stripes = nr_data_stripes(map);
1863 unsigned long extent_bitmap = 0;
1864 u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1865 int ret;
1866
1867 ASSERT(sctx->raid56_data_stripes);
1868
1869 /*
1870 * For data stripe search, we cannot re-use the same extent/csum paths,
1871 * as the data stripe bytenr may be smaller than previous extent. Thus
1872 * we have to use our own extent/csum paths.
1873 */
1874 extent_path.search_commit_root = 1;
1875 extent_path.skip_locking = 1;
1876 csum_path.search_commit_root = 1;
1877 csum_path.skip_locking = 1;
1878
1879 for (int i = 0; i < data_stripes; i++) {
1880 int stripe_index;
1881 int rot;
1882 u64 physical;
1883
1884 stripe = &sctx->raid56_data_stripes[i];
1885 rot = div_u64(full_stripe_start - bg->start,
1886 data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1887 stripe_index = (i + rot) % map->num_stripes;
1888 physical = map->stripes[stripe_index].physical +
1889 btrfs_stripe_nr_to_offset(rot);
1890
1891 scrub_reset_stripe(stripe);
1892 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1893 ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1894 map->stripes[stripe_index].dev, physical, 1,
1895 full_stripe_start + btrfs_stripe_nr_to_offset(i),
1896 BTRFS_STRIPE_LEN, stripe);
1897 if (ret < 0)
1898 goto out;
1899 /*
1900 * No extent in this data stripe, need to manually mark them
1901 * initialized to make later read submission happy.
1902 */
1903 if (ret > 0) {
1904 stripe->logical = full_stripe_start +
1905 btrfs_stripe_nr_to_offset(i);
1906 stripe->dev = map->stripes[stripe_index].dev;
1907 stripe->mirror_num = 1;
1908 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1909 }
1910 }
1911
1912 /* Check if all data stripes are empty. */
1913 for (int i = 0; i < data_stripes; i++) {
1914 stripe = &sctx->raid56_data_stripes[i];
1915 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1916 all_empty = false;
1917 break;
1918 }
1919 }
1920 if (all_empty) {
1921 ret = 0;
1922 goto out;
1923 }
1924
1925 for (int i = 0; i < data_stripes; i++) {
1926 stripe = &sctx->raid56_data_stripes[i];
1927 scrub_submit_initial_read(sctx, stripe);
1928 }
1929 for (int i = 0; i < data_stripes; i++) {
1930 stripe = &sctx->raid56_data_stripes[i];
1931
1932 wait_event(stripe->repair_wait,
1933 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1934 }
1935 /* For now, no zoned support for RAID56. */
1936 ASSERT(!btrfs_is_zoned(sctx->fs_info));
1937
1938 /*
1939 * Now all data stripes are properly verified. Check if we have any
1940 * unrepaired, if so abort immediately or we could further corrupt the
1941 * P/Q stripes.
1942 *
1943 * During the loop, also populate extent_bitmap.
1944 */
1945 for (int i = 0; i < data_stripes; i++) {
1946 unsigned long error;
1947
1948 stripe = &sctx->raid56_data_stripes[i];
1949
1950 /*
1951 * We should only check the errors where there is an extent.
1952 * As we may hit an empty data stripe while it's missing.
1953 */
1954 bitmap_and(&error, &stripe->error_bitmap,
1955 &stripe->extent_sector_bitmap, stripe->nr_sectors);
1956 if (!bitmap_empty(&error, stripe->nr_sectors)) {
1957 btrfs_err(fs_info,
1958 "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
1959 full_stripe_start, i, stripe->nr_sectors,
1960 &error);
1961 ret = -EIO;
1962 goto out;
1963 }
1964 bitmap_or(&extent_bitmap, &extent_bitmap,
1965 &stripe->extent_sector_bitmap, stripe->nr_sectors);
1966 }
1967
1968 /* Now we can check and regenerate the P/Q stripe. */
1969 bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
1970 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
1971 bio->bi_private = &io_done;
1972 bio->bi_end_io = raid56_scrub_wait_endio;
1973
1974 btrfs_bio_counter_inc_blocked(fs_info);
1975 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
1976 &length, &bioc, NULL, NULL, 1);
1977 if (ret < 0) {
1978 btrfs_put_bioc(bioc);
1979 btrfs_bio_counter_dec(fs_info);
1980 goto out;
1981 }
1982 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
1983 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1984 btrfs_put_bioc(bioc);
1985 if (!rbio) {
1986 ret = -ENOMEM;
1987 btrfs_bio_counter_dec(fs_info);
1988 goto out;
1989 }
1990 /* Use the recovered stripes as cache to avoid read them from disk again. */
1991 for (int i = 0; i < data_stripes; i++) {
1992 stripe = &sctx->raid56_data_stripes[i];
1993
1994 raid56_parity_cache_data_pages(rbio, stripe->pages,
1995 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
1996 }
1997 raid56_parity_submit_scrub_rbio(rbio);
1998 wait_for_completion_io(&io_done);
1999 ret = blk_status_to_errno(bio->bi_status);
2000 bio_put(bio);
2001 btrfs_bio_counter_dec(fs_info);
2002
2003 btrfs_release_path(&extent_path);
2004 btrfs_release_path(&csum_path);
2005 out:
2006 return ret;
2007 }
2008
2009 /*
2010 * Scrub one range which can only has simple mirror based profile.
2011 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2012 * RAID0/RAID10).
2013 *
2014 * Since we may need to handle a subset of block group, we need @logical_start
2015 * and @logical_length parameter.
2016 */
scrub_simple_mirror(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct map_lookup * map,u64 logical_start,u64 logical_length,struct btrfs_device * device,u64 physical,int mirror_num)2017 static int scrub_simple_mirror(struct scrub_ctx *sctx,
2018 struct btrfs_block_group *bg,
2019 struct map_lookup *map,
2020 u64 logical_start, u64 logical_length,
2021 struct btrfs_device *device,
2022 u64 physical, int mirror_num)
2023 {
2024 struct btrfs_fs_info *fs_info = sctx->fs_info;
2025 const u64 logical_end = logical_start + logical_length;
2026 u64 cur_logical = logical_start;
2027 int ret = 0;
2028
2029 /* The range must be inside the bg */
2030 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2031
2032 /* Go through each extent items inside the logical range */
2033 while (cur_logical < logical_end) {
2034 u64 found_logical = U64_MAX;
2035 u64 cur_physical = physical + cur_logical - logical_start;
2036
2037 /* Canceled? */
2038 if (atomic_read(&fs_info->scrub_cancel_req) ||
2039 atomic_read(&sctx->cancel_req)) {
2040 ret = -ECANCELED;
2041 break;
2042 }
2043 /* Paused? */
2044 if (atomic_read(&fs_info->scrub_pause_req)) {
2045 /* Push queued extents */
2046 scrub_blocked_if_needed(fs_info);
2047 }
2048 /* Block group removed? */
2049 spin_lock(&bg->lock);
2050 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2051 spin_unlock(&bg->lock);
2052 ret = 0;
2053 break;
2054 }
2055 spin_unlock(&bg->lock);
2056
2057 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2058 cur_logical, logical_end - cur_logical,
2059 cur_physical, &found_logical);
2060 if (ret > 0) {
2061 /* No more extent, just update the accounting */
2062 sctx->stat.last_physical = physical + logical_length;
2063 ret = 0;
2064 break;
2065 }
2066 if (ret < 0)
2067 break;
2068
2069 /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2070 ASSERT(found_logical != U64_MAX);
2071 cur_logical = found_logical + BTRFS_STRIPE_LEN;
2072
2073 /* Don't hold CPU for too long time */
2074 cond_resched();
2075 }
2076 return ret;
2077 }
2078
2079 /* Calculate the full stripe length for simple stripe based profiles */
simple_stripe_full_stripe_len(const struct map_lookup * map)2080 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
2081 {
2082 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2083 BTRFS_BLOCK_GROUP_RAID10));
2084
2085 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2086 }
2087
2088 /* Get the logical bytenr for the stripe */
simple_stripe_get_logical(struct map_lookup * map,struct btrfs_block_group * bg,int stripe_index)2089 static u64 simple_stripe_get_logical(struct map_lookup *map,
2090 struct btrfs_block_group *bg,
2091 int stripe_index)
2092 {
2093 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2094 BTRFS_BLOCK_GROUP_RAID10));
2095 ASSERT(stripe_index < map->num_stripes);
2096
2097 /*
2098 * (stripe_index / sub_stripes) gives how many data stripes we need to
2099 * skip.
2100 */
2101 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2102 bg->start;
2103 }
2104
2105 /* Get the mirror number for the stripe */
simple_stripe_mirror_num(struct map_lookup * map,int stripe_index)2106 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
2107 {
2108 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2109 BTRFS_BLOCK_GROUP_RAID10));
2110 ASSERT(stripe_index < map->num_stripes);
2111
2112 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2113 return stripe_index % map->sub_stripes + 1;
2114 }
2115
scrub_simple_stripe(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct map_lookup * map,struct btrfs_device * device,int stripe_index)2116 static int scrub_simple_stripe(struct scrub_ctx *sctx,
2117 struct btrfs_block_group *bg,
2118 struct map_lookup *map,
2119 struct btrfs_device *device,
2120 int stripe_index)
2121 {
2122 const u64 logical_increment = simple_stripe_full_stripe_len(map);
2123 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2124 const u64 orig_physical = map->stripes[stripe_index].physical;
2125 const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2126 u64 cur_logical = orig_logical;
2127 u64 cur_physical = orig_physical;
2128 int ret = 0;
2129
2130 while (cur_logical < bg->start + bg->length) {
2131 /*
2132 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2133 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2134 * this stripe.
2135 */
2136 ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2137 BTRFS_STRIPE_LEN, device, cur_physical,
2138 mirror_num);
2139 if (ret)
2140 return ret;
2141 /* Skip to next stripe which belongs to the target device */
2142 cur_logical += logical_increment;
2143 /* For physical offset, we just go to next stripe */
2144 cur_physical += BTRFS_STRIPE_LEN;
2145 }
2146 return ret;
2147 }
2148
scrub_stripe(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct extent_map * em,struct btrfs_device * scrub_dev,int stripe_index)2149 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2150 struct btrfs_block_group *bg,
2151 struct extent_map *em,
2152 struct btrfs_device *scrub_dev,
2153 int stripe_index)
2154 {
2155 struct btrfs_fs_info *fs_info = sctx->fs_info;
2156 struct map_lookup *map = em->map_lookup;
2157 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2158 const u64 chunk_logical = bg->start;
2159 int ret;
2160 int ret2;
2161 u64 physical = map->stripes[stripe_index].physical;
2162 const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
2163 const u64 physical_end = physical + dev_stripe_len;
2164 u64 logical;
2165 u64 logic_end;
2166 /* The logical increment after finishing one stripe */
2167 u64 increment;
2168 /* Offset inside the chunk */
2169 u64 offset;
2170 u64 stripe_logical;
2171 int stop_loop = 0;
2172
2173 /* Extent_path should be released by now. */
2174 ASSERT(sctx->extent_path.nodes[0] == NULL);
2175
2176 scrub_blocked_if_needed(fs_info);
2177
2178 if (sctx->is_dev_replace &&
2179 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2180 mutex_lock(&sctx->wr_lock);
2181 sctx->write_pointer = physical;
2182 mutex_unlock(&sctx->wr_lock);
2183 }
2184
2185 /* Prepare the extra data stripes used by RAID56. */
2186 if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2187 ASSERT(sctx->raid56_data_stripes == NULL);
2188
2189 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2190 sizeof(struct scrub_stripe),
2191 GFP_KERNEL);
2192 if (!sctx->raid56_data_stripes) {
2193 ret = -ENOMEM;
2194 goto out;
2195 }
2196 for (int i = 0; i < nr_data_stripes(map); i++) {
2197 ret = init_scrub_stripe(fs_info,
2198 &sctx->raid56_data_stripes[i]);
2199 if (ret < 0)
2200 goto out;
2201 sctx->raid56_data_stripes[i].bg = bg;
2202 sctx->raid56_data_stripes[i].sctx = sctx;
2203 }
2204 }
2205 /*
2206 * There used to be a big double loop to handle all profiles using the
2207 * same routine, which grows larger and more gross over time.
2208 *
2209 * So here we handle each profile differently, so simpler profiles
2210 * have simpler scrubbing function.
2211 */
2212 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2213 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2214 /*
2215 * Above check rules out all complex profile, the remaining
2216 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2217 * mirrored duplication without stripe.
2218 *
2219 * Only @physical and @mirror_num needs to calculated using
2220 * @stripe_index.
2221 */
2222 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2223 scrub_dev, map->stripes[stripe_index].physical,
2224 stripe_index + 1);
2225 offset = 0;
2226 goto out;
2227 }
2228 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2229 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2230 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2231 goto out;
2232 }
2233
2234 /* Only RAID56 goes through the old code */
2235 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2236 ret = 0;
2237
2238 /* Calculate the logical end of the stripe */
2239 get_raid56_logic_offset(physical_end, stripe_index,
2240 map, &logic_end, NULL);
2241 logic_end += chunk_logical;
2242
2243 /* Initialize @offset in case we need to go to out: label */
2244 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2245 increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2246
2247 /*
2248 * Due to the rotation, for RAID56 it's better to iterate each stripe
2249 * using their physical offset.
2250 */
2251 while (physical < physical_end) {
2252 ret = get_raid56_logic_offset(physical, stripe_index, map,
2253 &logical, &stripe_logical);
2254 logical += chunk_logical;
2255 if (ret) {
2256 /* it is parity strip */
2257 stripe_logical += chunk_logical;
2258 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2259 map, stripe_logical);
2260 if (ret)
2261 goto out;
2262 goto next;
2263 }
2264
2265 /*
2266 * Now we're at a data stripe, scrub each extents in the range.
2267 *
2268 * At this stage, if we ignore the repair part, inside each data
2269 * stripe it is no different than SINGLE profile.
2270 * We can reuse scrub_simple_mirror() here, as the repair part
2271 * is still based on @mirror_num.
2272 */
2273 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
2274 scrub_dev, physical, 1);
2275 if (ret < 0)
2276 goto out;
2277 next:
2278 logical += increment;
2279 physical += BTRFS_STRIPE_LEN;
2280 spin_lock(&sctx->stat_lock);
2281 if (stop_loop)
2282 sctx->stat.last_physical =
2283 map->stripes[stripe_index].physical + dev_stripe_len;
2284 else
2285 sctx->stat.last_physical = physical;
2286 spin_unlock(&sctx->stat_lock);
2287 if (stop_loop)
2288 break;
2289 }
2290 out:
2291 ret2 = flush_scrub_stripes(sctx);
2292 if (!ret)
2293 ret = ret2;
2294 btrfs_release_path(&sctx->extent_path);
2295 btrfs_release_path(&sctx->csum_path);
2296
2297 if (sctx->raid56_data_stripes) {
2298 for (int i = 0; i < nr_data_stripes(map); i++)
2299 release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2300 kfree(sctx->raid56_data_stripes);
2301 sctx->raid56_data_stripes = NULL;
2302 }
2303
2304 if (sctx->is_dev_replace && ret >= 0) {
2305 int ret2;
2306
2307 ret2 = sync_write_pointer_for_zoned(sctx,
2308 chunk_logical + offset,
2309 map->stripes[stripe_index].physical,
2310 physical_end);
2311 if (ret2)
2312 ret = ret2;
2313 }
2314
2315 return ret < 0 ? ret : 0;
2316 }
2317
scrub_chunk(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct btrfs_device * scrub_dev,u64 dev_offset,u64 dev_extent_len)2318 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2319 struct btrfs_block_group *bg,
2320 struct btrfs_device *scrub_dev,
2321 u64 dev_offset,
2322 u64 dev_extent_len)
2323 {
2324 struct btrfs_fs_info *fs_info = sctx->fs_info;
2325 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2326 struct map_lookup *map;
2327 struct extent_map *em;
2328 int i;
2329 int ret = 0;
2330
2331 read_lock(&map_tree->lock);
2332 em = lookup_extent_mapping(map_tree, bg->start, bg->length);
2333 read_unlock(&map_tree->lock);
2334
2335 if (!em) {
2336 /*
2337 * Might have been an unused block group deleted by the cleaner
2338 * kthread or relocation.
2339 */
2340 spin_lock(&bg->lock);
2341 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2342 ret = -EINVAL;
2343 spin_unlock(&bg->lock);
2344
2345 return ret;
2346 }
2347 if (em->start != bg->start)
2348 goto out;
2349 if (em->len < dev_extent_len)
2350 goto out;
2351
2352 map = em->map_lookup;
2353 for (i = 0; i < map->num_stripes; ++i) {
2354 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2355 map->stripes[i].physical == dev_offset) {
2356 ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
2357 if (ret)
2358 goto out;
2359 }
2360 }
2361 out:
2362 free_extent_map(em);
2363
2364 return ret;
2365 }
2366
finish_extent_writes_for_zoned(struct btrfs_root * root,struct btrfs_block_group * cache)2367 static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2368 struct btrfs_block_group *cache)
2369 {
2370 struct btrfs_fs_info *fs_info = cache->fs_info;
2371 struct btrfs_trans_handle *trans;
2372
2373 if (!btrfs_is_zoned(fs_info))
2374 return 0;
2375
2376 btrfs_wait_block_group_reservations(cache);
2377 btrfs_wait_nocow_writers(cache);
2378 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2379
2380 trans = btrfs_join_transaction(root);
2381 if (IS_ERR(trans))
2382 return PTR_ERR(trans);
2383 return btrfs_commit_transaction(trans);
2384 }
2385
2386 static noinline_for_stack
scrub_enumerate_chunks(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev,u64 start,u64 end)2387 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2388 struct btrfs_device *scrub_dev, u64 start, u64 end)
2389 {
2390 struct btrfs_dev_extent *dev_extent = NULL;
2391 struct btrfs_path *path;
2392 struct btrfs_fs_info *fs_info = sctx->fs_info;
2393 struct btrfs_root *root = fs_info->dev_root;
2394 u64 chunk_offset;
2395 int ret = 0;
2396 int ro_set;
2397 int slot;
2398 struct extent_buffer *l;
2399 struct btrfs_key key;
2400 struct btrfs_key found_key;
2401 struct btrfs_block_group *cache;
2402 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2403
2404 path = btrfs_alloc_path();
2405 if (!path)
2406 return -ENOMEM;
2407
2408 path->reada = READA_FORWARD;
2409 path->search_commit_root = 1;
2410 path->skip_locking = 1;
2411
2412 key.objectid = scrub_dev->devid;
2413 key.offset = 0ull;
2414 key.type = BTRFS_DEV_EXTENT_KEY;
2415
2416 while (1) {
2417 u64 dev_extent_len;
2418
2419 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2420 if (ret < 0)
2421 break;
2422 if (ret > 0) {
2423 if (path->slots[0] >=
2424 btrfs_header_nritems(path->nodes[0])) {
2425 ret = btrfs_next_leaf(root, path);
2426 if (ret < 0)
2427 break;
2428 if (ret > 0) {
2429 ret = 0;
2430 break;
2431 }
2432 } else {
2433 ret = 0;
2434 }
2435 }
2436
2437 l = path->nodes[0];
2438 slot = path->slots[0];
2439
2440 btrfs_item_key_to_cpu(l, &found_key, slot);
2441
2442 if (found_key.objectid != scrub_dev->devid)
2443 break;
2444
2445 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2446 break;
2447
2448 if (found_key.offset >= end)
2449 break;
2450
2451 if (found_key.offset < key.offset)
2452 break;
2453
2454 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2455 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2456
2457 if (found_key.offset + dev_extent_len <= start)
2458 goto skip;
2459
2460 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2461
2462 /*
2463 * get a reference on the corresponding block group to prevent
2464 * the chunk from going away while we scrub it
2465 */
2466 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2467
2468 /* some chunks are removed but not committed to disk yet,
2469 * continue scrubbing */
2470 if (!cache)
2471 goto skip;
2472
2473 ASSERT(cache->start <= chunk_offset);
2474 /*
2475 * We are using the commit root to search for device extents, so
2476 * that means we could have found a device extent item from a
2477 * block group that was deleted in the current transaction. The
2478 * logical start offset of the deleted block group, stored at
2479 * @chunk_offset, might be part of the logical address range of
2480 * a new block group (which uses different physical extents).
2481 * In this case btrfs_lookup_block_group() has returned the new
2482 * block group, and its start address is less than @chunk_offset.
2483 *
2484 * We skip such new block groups, because it's pointless to
2485 * process them, as we won't find their extents because we search
2486 * for them using the commit root of the extent tree. For a device
2487 * replace it's also fine to skip it, we won't miss copying them
2488 * to the target device because we have the write duplication
2489 * setup through the regular write path (by btrfs_map_block()),
2490 * and we have committed a transaction when we started the device
2491 * replace, right after setting up the device replace state.
2492 */
2493 if (cache->start < chunk_offset) {
2494 btrfs_put_block_group(cache);
2495 goto skip;
2496 }
2497
2498 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2499 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2500 btrfs_put_block_group(cache);
2501 goto skip;
2502 }
2503 }
2504
2505 /*
2506 * Make sure that while we are scrubbing the corresponding block
2507 * group doesn't get its logical address and its device extents
2508 * reused for another block group, which can possibly be of a
2509 * different type and different profile. We do this to prevent
2510 * false error detections and crashes due to bogus attempts to
2511 * repair extents.
2512 */
2513 spin_lock(&cache->lock);
2514 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2515 spin_unlock(&cache->lock);
2516 btrfs_put_block_group(cache);
2517 goto skip;
2518 }
2519 btrfs_freeze_block_group(cache);
2520 spin_unlock(&cache->lock);
2521
2522 /*
2523 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2524 * to avoid deadlock caused by:
2525 * btrfs_inc_block_group_ro()
2526 * -> btrfs_wait_for_commit()
2527 * -> btrfs_commit_transaction()
2528 * -> btrfs_scrub_pause()
2529 */
2530 scrub_pause_on(fs_info);
2531
2532 /*
2533 * Don't do chunk preallocation for scrub.
2534 *
2535 * This is especially important for SYSTEM bgs, or we can hit
2536 * -EFBIG from btrfs_finish_chunk_alloc() like:
2537 * 1. The only SYSTEM bg is marked RO.
2538 * Since SYSTEM bg is small, that's pretty common.
2539 * 2. New SYSTEM bg will be allocated
2540 * Due to regular version will allocate new chunk.
2541 * 3. New SYSTEM bg is empty and will get cleaned up
2542 * Before cleanup really happens, it's marked RO again.
2543 * 4. Empty SYSTEM bg get scrubbed
2544 * We go back to 2.
2545 *
2546 * This can easily boost the amount of SYSTEM chunks if cleaner
2547 * thread can't be triggered fast enough, and use up all space
2548 * of btrfs_super_block::sys_chunk_array
2549 *
2550 * While for dev replace, we need to try our best to mark block
2551 * group RO, to prevent race between:
2552 * - Write duplication
2553 * Contains latest data
2554 * - Scrub copy
2555 * Contains data from commit tree
2556 *
2557 * If target block group is not marked RO, nocow writes can
2558 * be overwritten by scrub copy, causing data corruption.
2559 * So for dev-replace, it's not allowed to continue if a block
2560 * group is not RO.
2561 */
2562 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2563 if (!ret && sctx->is_dev_replace) {
2564 ret = finish_extent_writes_for_zoned(root, cache);
2565 if (ret) {
2566 btrfs_dec_block_group_ro(cache);
2567 scrub_pause_off(fs_info);
2568 btrfs_put_block_group(cache);
2569 break;
2570 }
2571 }
2572
2573 if (ret == 0) {
2574 ro_set = 1;
2575 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2576 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2577 /*
2578 * btrfs_inc_block_group_ro return -ENOSPC when it
2579 * failed in creating new chunk for metadata.
2580 * It is not a problem for scrub, because
2581 * metadata are always cowed, and our scrub paused
2582 * commit_transactions.
2583 *
2584 * For RAID56 chunks, we have to mark them read-only
2585 * for scrub, as later we would use our own cache
2586 * out of RAID56 realm.
2587 * Thus we want the RAID56 bg to be marked RO to
2588 * prevent RMW from screwing up out cache.
2589 */
2590 ro_set = 0;
2591 } else if (ret == -ETXTBSY) {
2592 btrfs_warn(fs_info,
2593 "skipping scrub of block group %llu due to active swapfile",
2594 cache->start);
2595 scrub_pause_off(fs_info);
2596 ret = 0;
2597 goto skip_unfreeze;
2598 } else {
2599 btrfs_warn(fs_info,
2600 "failed setting block group ro: %d", ret);
2601 btrfs_unfreeze_block_group(cache);
2602 btrfs_put_block_group(cache);
2603 scrub_pause_off(fs_info);
2604 break;
2605 }
2606
2607 /*
2608 * Now the target block is marked RO, wait for nocow writes to
2609 * finish before dev-replace.
2610 * COW is fine, as COW never overwrites extents in commit tree.
2611 */
2612 if (sctx->is_dev_replace) {
2613 btrfs_wait_nocow_writers(cache);
2614 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2615 cache->length);
2616 }
2617
2618 scrub_pause_off(fs_info);
2619 down_write(&dev_replace->rwsem);
2620 dev_replace->cursor_right = found_key.offset + dev_extent_len;
2621 dev_replace->cursor_left = found_key.offset;
2622 dev_replace->item_needs_writeback = 1;
2623 up_write(&dev_replace->rwsem);
2624
2625 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2626 dev_extent_len);
2627 if (sctx->is_dev_replace &&
2628 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2629 cache, found_key.offset))
2630 ro_set = 0;
2631
2632 down_write(&dev_replace->rwsem);
2633 dev_replace->cursor_left = dev_replace->cursor_right;
2634 dev_replace->item_needs_writeback = 1;
2635 up_write(&dev_replace->rwsem);
2636
2637 if (ro_set)
2638 btrfs_dec_block_group_ro(cache);
2639
2640 /*
2641 * We might have prevented the cleaner kthread from deleting
2642 * this block group if it was already unused because we raced
2643 * and set it to RO mode first. So add it back to the unused
2644 * list, otherwise it might not ever be deleted unless a manual
2645 * balance is triggered or it becomes used and unused again.
2646 */
2647 spin_lock(&cache->lock);
2648 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2649 !cache->ro && cache->reserved == 0 && cache->used == 0) {
2650 spin_unlock(&cache->lock);
2651 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2652 btrfs_discard_queue_work(&fs_info->discard_ctl,
2653 cache);
2654 else
2655 btrfs_mark_bg_unused(cache);
2656 } else {
2657 spin_unlock(&cache->lock);
2658 }
2659 skip_unfreeze:
2660 btrfs_unfreeze_block_group(cache);
2661 btrfs_put_block_group(cache);
2662 if (ret)
2663 break;
2664 if (sctx->is_dev_replace &&
2665 atomic64_read(&dev_replace->num_write_errors) > 0) {
2666 ret = -EIO;
2667 break;
2668 }
2669 if (sctx->stat.malloc_errors > 0) {
2670 ret = -ENOMEM;
2671 break;
2672 }
2673 skip:
2674 key.offset = found_key.offset + dev_extent_len;
2675 btrfs_release_path(path);
2676 }
2677
2678 btrfs_free_path(path);
2679
2680 return ret;
2681 }
2682
scrub_one_super(struct scrub_ctx * sctx,struct btrfs_device * dev,struct page * page,u64 physical,u64 generation)2683 static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2684 struct page *page, u64 physical, u64 generation)
2685 {
2686 struct btrfs_fs_info *fs_info = sctx->fs_info;
2687 struct bio_vec bvec;
2688 struct bio bio;
2689 struct btrfs_super_block *sb = page_address(page);
2690 int ret;
2691
2692 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2693 bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2694 __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2695 ret = submit_bio_wait(&bio);
2696 bio_uninit(&bio);
2697
2698 if (ret < 0)
2699 return ret;
2700 ret = btrfs_check_super_csum(fs_info, sb);
2701 if (ret != 0) {
2702 btrfs_err_rl(fs_info,
2703 "super block at physical %llu devid %llu has bad csum",
2704 physical, dev->devid);
2705 return -EIO;
2706 }
2707 if (btrfs_super_generation(sb) != generation) {
2708 btrfs_err_rl(fs_info,
2709 "super block at physical %llu devid %llu has bad generation %llu expect %llu",
2710 physical, dev->devid,
2711 btrfs_super_generation(sb), generation);
2712 return -EUCLEAN;
2713 }
2714
2715 return btrfs_validate_super(fs_info, sb, -1);
2716 }
2717
scrub_supers(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev)2718 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2719 struct btrfs_device *scrub_dev)
2720 {
2721 int i;
2722 u64 bytenr;
2723 u64 gen;
2724 int ret = 0;
2725 struct page *page;
2726 struct btrfs_fs_info *fs_info = sctx->fs_info;
2727
2728 if (BTRFS_FS_ERROR(fs_info))
2729 return -EROFS;
2730
2731 page = alloc_page(GFP_KERNEL);
2732 if (!page) {
2733 spin_lock(&sctx->stat_lock);
2734 sctx->stat.malloc_errors++;
2735 spin_unlock(&sctx->stat_lock);
2736 return -ENOMEM;
2737 }
2738
2739 /* Seed devices of a new filesystem has their own generation. */
2740 if (scrub_dev->fs_devices != fs_info->fs_devices)
2741 gen = scrub_dev->generation;
2742 else
2743 gen = fs_info->last_trans_committed;
2744
2745 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2746 ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
2747 if (ret == -ENOENT)
2748 break;
2749
2750 if (ret) {
2751 spin_lock(&sctx->stat_lock);
2752 sctx->stat.super_errors++;
2753 spin_unlock(&sctx->stat_lock);
2754 continue;
2755 }
2756
2757 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2758 scrub_dev->commit_total_bytes)
2759 break;
2760 if (!btrfs_check_super_location(scrub_dev, bytenr))
2761 continue;
2762
2763 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2764 if (ret) {
2765 spin_lock(&sctx->stat_lock);
2766 sctx->stat.super_errors++;
2767 spin_unlock(&sctx->stat_lock);
2768 }
2769 }
2770 __free_page(page);
2771 return 0;
2772 }
2773
scrub_workers_put(struct btrfs_fs_info * fs_info)2774 static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2775 {
2776 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2777 &fs_info->scrub_lock)) {
2778 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2779
2780 fs_info->scrub_workers = NULL;
2781 mutex_unlock(&fs_info->scrub_lock);
2782
2783 if (scrub_workers)
2784 destroy_workqueue(scrub_workers);
2785 }
2786 }
2787
2788 /*
2789 * get a reference count on fs_info->scrub_workers. start worker if necessary
2790 */
scrub_workers_get(struct btrfs_fs_info * fs_info)2791 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
2792 {
2793 struct workqueue_struct *scrub_workers = NULL;
2794 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2795 int max_active = fs_info->thread_pool_size;
2796 int ret = -ENOMEM;
2797
2798 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2799 return 0;
2800
2801 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2802 if (!scrub_workers)
2803 return -ENOMEM;
2804
2805 mutex_lock(&fs_info->scrub_lock);
2806 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2807 ASSERT(fs_info->scrub_workers == NULL);
2808 fs_info->scrub_workers = scrub_workers;
2809 refcount_set(&fs_info->scrub_workers_refcnt, 1);
2810 mutex_unlock(&fs_info->scrub_lock);
2811 return 0;
2812 }
2813 /* Other thread raced in and created the workers for us */
2814 refcount_inc(&fs_info->scrub_workers_refcnt);
2815 mutex_unlock(&fs_info->scrub_lock);
2816
2817 ret = 0;
2818
2819 destroy_workqueue(scrub_workers);
2820 return ret;
2821 }
2822
btrfs_scrub_dev(struct btrfs_fs_info * fs_info,u64 devid,u64 start,u64 end,struct btrfs_scrub_progress * progress,int readonly,int is_dev_replace)2823 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2824 u64 end, struct btrfs_scrub_progress *progress,
2825 int readonly, int is_dev_replace)
2826 {
2827 struct btrfs_dev_lookup_args args = { .devid = devid };
2828 struct scrub_ctx *sctx;
2829 int ret;
2830 struct btrfs_device *dev;
2831 unsigned int nofs_flag;
2832 bool need_commit = false;
2833
2834 if (btrfs_fs_closing(fs_info))
2835 return -EAGAIN;
2836
2837 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2838 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2839
2840 /*
2841 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2842 * value (max nodesize / min sectorsize), thus nodesize should always
2843 * be fine.
2844 */
2845 ASSERT(fs_info->nodesize <=
2846 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2847
2848 /* Allocate outside of device_list_mutex */
2849 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2850 if (IS_ERR(sctx))
2851 return PTR_ERR(sctx);
2852
2853 ret = scrub_workers_get(fs_info);
2854 if (ret)
2855 goto out_free_ctx;
2856
2857 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2858 dev = btrfs_find_device(fs_info->fs_devices, &args);
2859 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2860 !is_dev_replace)) {
2861 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2862 ret = -ENODEV;
2863 goto out;
2864 }
2865
2866 if (!is_dev_replace && !readonly &&
2867 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2868 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2869 btrfs_err_in_rcu(fs_info,
2870 "scrub on devid %llu: filesystem on %s is not writable",
2871 devid, btrfs_dev_name(dev));
2872 ret = -EROFS;
2873 goto out;
2874 }
2875
2876 mutex_lock(&fs_info->scrub_lock);
2877 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2878 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2879 mutex_unlock(&fs_info->scrub_lock);
2880 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2881 ret = -EIO;
2882 goto out;
2883 }
2884
2885 down_read(&fs_info->dev_replace.rwsem);
2886 if (dev->scrub_ctx ||
2887 (!is_dev_replace &&
2888 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2889 up_read(&fs_info->dev_replace.rwsem);
2890 mutex_unlock(&fs_info->scrub_lock);
2891 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2892 ret = -EINPROGRESS;
2893 goto out;
2894 }
2895 up_read(&fs_info->dev_replace.rwsem);
2896
2897 sctx->readonly = readonly;
2898 dev->scrub_ctx = sctx;
2899 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2900
2901 /*
2902 * checking @scrub_pause_req here, we can avoid
2903 * race between committing transaction and scrubbing.
2904 */
2905 __scrub_blocked_if_needed(fs_info);
2906 atomic_inc(&fs_info->scrubs_running);
2907 mutex_unlock(&fs_info->scrub_lock);
2908
2909 /*
2910 * In order to avoid deadlock with reclaim when there is a transaction
2911 * trying to pause scrub, make sure we use GFP_NOFS for all the
2912 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2913 * invoked by our callees. The pausing request is done when the
2914 * transaction commit starts, and it blocks the transaction until scrub
2915 * is paused (done at specific points at scrub_stripe() or right above
2916 * before incrementing fs_info->scrubs_running).
2917 */
2918 nofs_flag = memalloc_nofs_save();
2919 if (!is_dev_replace) {
2920 u64 old_super_errors;
2921
2922 spin_lock(&sctx->stat_lock);
2923 old_super_errors = sctx->stat.super_errors;
2924 spin_unlock(&sctx->stat_lock);
2925
2926 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2927 /*
2928 * by holding device list mutex, we can
2929 * kick off writing super in log tree sync.
2930 */
2931 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2932 ret = scrub_supers(sctx, dev);
2933 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2934
2935 spin_lock(&sctx->stat_lock);
2936 /*
2937 * Super block errors found, but we can not commit transaction
2938 * at current context, since btrfs_commit_transaction() needs
2939 * to pause the current running scrub (hold by ourselves).
2940 */
2941 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2942 need_commit = true;
2943 spin_unlock(&sctx->stat_lock);
2944 }
2945
2946 if (!ret)
2947 ret = scrub_enumerate_chunks(sctx, dev, start, end);
2948 memalloc_nofs_restore(nofs_flag);
2949
2950 atomic_dec(&fs_info->scrubs_running);
2951 wake_up(&fs_info->scrub_pause_wait);
2952
2953 if (progress)
2954 memcpy(progress, &sctx->stat, sizeof(*progress));
2955
2956 if (!is_dev_replace)
2957 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
2958 ret ? "not finished" : "finished", devid, ret);
2959
2960 mutex_lock(&fs_info->scrub_lock);
2961 dev->scrub_ctx = NULL;
2962 mutex_unlock(&fs_info->scrub_lock);
2963
2964 scrub_workers_put(fs_info);
2965 scrub_put_ctx(sctx);
2966
2967 /*
2968 * We found some super block errors before, now try to force a
2969 * transaction commit, as scrub has finished.
2970 */
2971 if (need_commit) {
2972 struct btrfs_trans_handle *trans;
2973
2974 trans = btrfs_start_transaction(fs_info->tree_root, 0);
2975 if (IS_ERR(trans)) {
2976 ret = PTR_ERR(trans);
2977 btrfs_err(fs_info,
2978 "scrub: failed to start transaction to fix super block errors: %d", ret);
2979 return ret;
2980 }
2981 ret = btrfs_commit_transaction(trans);
2982 if (ret < 0)
2983 btrfs_err(fs_info,
2984 "scrub: failed to commit transaction to fix super block errors: %d", ret);
2985 }
2986 return ret;
2987 out:
2988 scrub_workers_put(fs_info);
2989 out_free_ctx:
2990 scrub_free_ctx(sctx);
2991
2992 return ret;
2993 }
2994
btrfs_scrub_pause(struct btrfs_fs_info * fs_info)2995 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
2996 {
2997 mutex_lock(&fs_info->scrub_lock);
2998 atomic_inc(&fs_info->scrub_pause_req);
2999 while (atomic_read(&fs_info->scrubs_paused) !=
3000 atomic_read(&fs_info->scrubs_running)) {
3001 mutex_unlock(&fs_info->scrub_lock);
3002 wait_event(fs_info->scrub_pause_wait,
3003 atomic_read(&fs_info->scrubs_paused) ==
3004 atomic_read(&fs_info->scrubs_running));
3005 mutex_lock(&fs_info->scrub_lock);
3006 }
3007 mutex_unlock(&fs_info->scrub_lock);
3008 }
3009
btrfs_scrub_continue(struct btrfs_fs_info * fs_info)3010 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3011 {
3012 atomic_dec(&fs_info->scrub_pause_req);
3013 wake_up(&fs_info->scrub_pause_wait);
3014 }
3015
btrfs_scrub_cancel(struct btrfs_fs_info * fs_info)3016 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3017 {
3018 mutex_lock(&fs_info->scrub_lock);
3019 if (!atomic_read(&fs_info->scrubs_running)) {
3020 mutex_unlock(&fs_info->scrub_lock);
3021 return -ENOTCONN;
3022 }
3023
3024 atomic_inc(&fs_info->scrub_cancel_req);
3025 while (atomic_read(&fs_info->scrubs_running)) {
3026 mutex_unlock(&fs_info->scrub_lock);
3027 wait_event(fs_info->scrub_pause_wait,
3028 atomic_read(&fs_info->scrubs_running) == 0);
3029 mutex_lock(&fs_info->scrub_lock);
3030 }
3031 atomic_dec(&fs_info->scrub_cancel_req);
3032 mutex_unlock(&fs_info->scrub_lock);
3033
3034 return 0;
3035 }
3036
btrfs_scrub_cancel_dev(struct btrfs_device * dev)3037 int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3038 {
3039 struct btrfs_fs_info *fs_info = dev->fs_info;
3040 struct scrub_ctx *sctx;
3041
3042 mutex_lock(&fs_info->scrub_lock);
3043 sctx = dev->scrub_ctx;
3044 if (!sctx) {
3045 mutex_unlock(&fs_info->scrub_lock);
3046 return -ENOTCONN;
3047 }
3048 atomic_inc(&sctx->cancel_req);
3049 while (dev->scrub_ctx) {
3050 mutex_unlock(&fs_info->scrub_lock);
3051 wait_event(fs_info->scrub_pause_wait,
3052 dev->scrub_ctx == NULL);
3053 mutex_lock(&fs_info->scrub_lock);
3054 }
3055 mutex_unlock(&fs_info->scrub_lock);
3056
3057 return 0;
3058 }
3059
btrfs_scrub_progress(struct btrfs_fs_info * fs_info,u64 devid,struct btrfs_scrub_progress * progress)3060 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3061 struct btrfs_scrub_progress *progress)
3062 {
3063 struct btrfs_dev_lookup_args args = { .devid = devid };
3064 struct btrfs_device *dev;
3065 struct scrub_ctx *sctx = NULL;
3066
3067 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3068 dev = btrfs_find_device(fs_info->fs_devices, &args);
3069 if (dev)
3070 sctx = dev->scrub_ctx;
3071 if (sctx)
3072 memcpy(progress, &sctx->stat, sizeof(*progress));
3073 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3074
3075 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3076 }
3077