xref: /openbmc/linux/fs/btrfs/scrub.c (revision 53809828)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include "ctree.h"
10 #include "volumes.h"
11 #include "disk-io.h"
12 #include "ordered-data.h"
13 #include "transaction.h"
14 #include "backref.h"
15 #include "extent_io.h"
16 #include "dev-replace.h"
17 #include "check-integrity.h"
18 #include "rcu-string.h"
19 #include "raid56.h"
20 
21 /*
22  * This is only the first step towards a full-features scrub. It reads all
23  * extent and super block and verifies the checksums. In case a bad checksum
24  * is found or the extent cannot be read, good data will be written back if
25  * any can be found.
26  *
27  * Future enhancements:
28  *  - In case an unrepairable extent is encountered, track which files are
29  *    affected and report them
30  *  - track and record media errors, throw out bad devices
31  *  - add a mode to also read unallocated space
32  */
33 
34 struct scrub_block;
35 struct scrub_ctx;
36 
37 /*
38  * the following three values only influence the performance.
39  * The last one configures the number of parallel and outstanding I/O
40  * operations. The first two values configure an upper limit for the number
41  * of (dynamically allocated) pages that are added to a bio.
42  */
43 #define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
44 #define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
45 #define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
46 
47 /*
48  * the following value times PAGE_SIZE needs to be large enough to match the
49  * largest node/leaf/sector size that shall be supported.
50  * Values larger than BTRFS_STRIPE_LEN are not supported.
51  */
52 #define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
53 
54 struct scrub_recover {
55 	refcount_t		refs;
56 	struct btrfs_bio	*bbio;
57 	u64			map_length;
58 };
59 
60 struct scrub_page {
61 	struct scrub_block	*sblock;
62 	struct page		*page;
63 	struct btrfs_device	*dev;
64 	struct list_head	list;
65 	u64			flags;  /* extent flags */
66 	u64			generation;
67 	u64			logical;
68 	u64			physical;
69 	u64			physical_for_dev_replace;
70 	atomic_t		refs;
71 	struct {
72 		unsigned int	mirror_num:8;
73 		unsigned int	have_csum:1;
74 		unsigned int	io_error:1;
75 	};
76 	u8			csum[BTRFS_CSUM_SIZE];
77 
78 	struct scrub_recover	*recover;
79 };
80 
81 struct scrub_bio {
82 	int			index;
83 	struct scrub_ctx	*sctx;
84 	struct btrfs_device	*dev;
85 	struct bio		*bio;
86 	blk_status_t		status;
87 	u64			logical;
88 	u64			physical;
89 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
90 	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO];
91 #else
92 	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO];
93 #endif
94 	int			page_count;
95 	int			next_free;
96 	struct btrfs_work	work;
97 };
98 
99 struct scrub_block {
100 	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK];
101 	int			page_count;
102 	atomic_t		outstanding_pages;
103 	refcount_t		refs; /* free mem on transition to zero */
104 	struct scrub_ctx	*sctx;
105 	struct scrub_parity	*sparity;
106 	struct {
107 		unsigned int	header_error:1;
108 		unsigned int	checksum_error:1;
109 		unsigned int	no_io_error_seen:1;
110 		unsigned int	generation_error:1; /* also sets header_error */
111 
112 		/* The following is for the data used to check parity */
113 		/* It is for the data with checksum */
114 		unsigned int	data_corrected:1;
115 	};
116 	struct btrfs_work	work;
117 };
118 
119 /* Used for the chunks with parity stripe such RAID5/6 */
120 struct scrub_parity {
121 	struct scrub_ctx	*sctx;
122 
123 	struct btrfs_device	*scrub_dev;
124 
125 	u64			logic_start;
126 
127 	u64			logic_end;
128 
129 	int			nsectors;
130 
131 	u64			stripe_len;
132 
133 	refcount_t		refs;
134 
135 	struct list_head	spages;
136 
137 	/* Work of parity check and repair */
138 	struct btrfs_work	work;
139 
140 	/* Mark the parity blocks which have data */
141 	unsigned long		*dbitmap;
142 
143 	/*
144 	 * Mark the parity blocks which have data, but errors happen when
145 	 * read data or check data
146 	 */
147 	unsigned long		*ebitmap;
148 
149 	unsigned long		bitmap[0];
150 };
151 
152 struct scrub_ctx {
153 	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
154 	struct btrfs_fs_info	*fs_info;
155 	int			first_free;
156 	int			curr;
157 	atomic_t		bios_in_flight;
158 	atomic_t		workers_pending;
159 	spinlock_t		list_lock;
160 	wait_queue_head_t	list_wait;
161 	u16			csum_size;
162 	struct list_head	csum_list;
163 	atomic_t		cancel_req;
164 	int			readonly;
165 	int			pages_per_rd_bio;
166 
167 	int			is_dev_replace;
168 
169 	struct scrub_bio        *wr_curr_bio;
170 	struct mutex            wr_lock;
171 	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
172 	struct btrfs_device     *wr_tgtdev;
173 	bool                    flush_all_writes;
174 
175 	/*
176 	 * statistics
177 	 */
178 	struct btrfs_scrub_progress stat;
179 	spinlock_t		stat_lock;
180 
181 	/*
182 	 * Use a ref counter to avoid use-after-free issues. Scrub workers
183 	 * decrement bios_in_flight and workers_pending and then do a wakeup
184 	 * on the list_wait wait queue. We must ensure the main scrub task
185 	 * doesn't free the scrub context before or while the workers are
186 	 * doing the wakeup() call.
187 	 */
188 	refcount_t              refs;
189 };
190 
191 struct scrub_warning {
192 	struct btrfs_path	*path;
193 	u64			extent_item_size;
194 	const char		*errstr;
195 	u64			physical;
196 	u64			logical;
197 	struct btrfs_device	*dev;
198 };
199 
200 struct full_stripe_lock {
201 	struct rb_node node;
202 	u64 logical;
203 	u64 refs;
204 	struct mutex mutex;
205 };
206 
207 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
208 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
209 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
210 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
211 				     struct scrub_block *sblocks_for_recheck);
212 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
213 				struct scrub_block *sblock,
214 				int retry_failed_mirror);
215 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
216 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
217 					     struct scrub_block *sblock_good);
218 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
219 					    struct scrub_block *sblock_good,
220 					    int page_num, int force_write);
221 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
222 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
223 					   int page_num);
224 static int scrub_checksum_data(struct scrub_block *sblock);
225 static int scrub_checksum_tree_block(struct scrub_block *sblock);
226 static int scrub_checksum_super(struct scrub_block *sblock);
227 static void scrub_block_get(struct scrub_block *sblock);
228 static void scrub_block_put(struct scrub_block *sblock);
229 static void scrub_page_get(struct scrub_page *spage);
230 static void scrub_page_put(struct scrub_page *spage);
231 static void scrub_parity_get(struct scrub_parity *sparity);
232 static void scrub_parity_put(struct scrub_parity *sparity);
233 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
234 				    struct scrub_page *spage);
235 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
236 		       u64 physical, struct btrfs_device *dev, u64 flags,
237 		       u64 gen, int mirror_num, u8 *csum, int force,
238 		       u64 physical_for_dev_replace);
239 static void scrub_bio_end_io(struct bio *bio);
240 static void scrub_bio_end_io_worker(struct btrfs_work *work);
241 static void scrub_block_complete(struct scrub_block *sblock);
242 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
243 			       u64 extent_logical, u64 extent_len,
244 			       u64 *extent_physical,
245 			       struct btrfs_device **extent_dev,
246 			       int *extent_mirror_num);
247 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
248 				    struct scrub_page *spage);
249 static void scrub_wr_submit(struct scrub_ctx *sctx);
250 static void scrub_wr_bio_end_io(struct bio *bio);
251 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
252 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
253 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
254 static void scrub_put_ctx(struct scrub_ctx *sctx);
255 
256 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
257 {
258 	return page->recover &&
259 	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
260 }
261 
262 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
263 {
264 	refcount_inc(&sctx->refs);
265 	atomic_inc(&sctx->bios_in_flight);
266 }
267 
268 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
269 {
270 	atomic_dec(&sctx->bios_in_flight);
271 	wake_up(&sctx->list_wait);
272 	scrub_put_ctx(sctx);
273 }
274 
275 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
276 {
277 	while (atomic_read(&fs_info->scrub_pause_req)) {
278 		mutex_unlock(&fs_info->scrub_lock);
279 		wait_event(fs_info->scrub_pause_wait,
280 		   atomic_read(&fs_info->scrub_pause_req) == 0);
281 		mutex_lock(&fs_info->scrub_lock);
282 	}
283 }
284 
285 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
286 {
287 	atomic_inc(&fs_info->scrubs_paused);
288 	wake_up(&fs_info->scrub_pause_wait);
289 }
290 
291 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
292 {
293 	mutex_lock(&fs_info->scrub_lock);
294 	__scrub_blocked_if_needed(fs_info);
295 	atomic_dec(&fs_info->scrubs_paused);
296 	mutex_unlock(&fs_info->scrub_lock);
297 
298 	wake_up(&fs_info->scrub_pause_wait);
299 }
300 
301 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
302 {
303 	scrub_pause_on(fs_info);
304 	scrub_pause_off(fs_info);
305 }
306 
307 /*
308  * Insert new full stripe lock into full stripe locks tree
309  *
310  * Return pointer to existing or newly inserted full_stripe_lock structure if
311  * everything works well.
312  * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
313  *
314  * NOTE: caller must hold full_stripe_locks_root->lock before calling this
315  * function
316  */
317 static struct full_stripe_lock *insert_full_stripe_lock(
318 		struct btrfs_full_stripe_locks_tree *locks_root,
319 		u64 fstripe_logical)
320 {
321 	struct rb_node **p;
322 	struct rb_node *parent = NULL;
323 	struct full_stripe_lock *entry;
324 	struct full_stripe_lock *ret;
325 
326 	lockdep_assert_held(&locks_root->lock);
327 
328 	p = &locks_root->root.rb_node;
329 	while (*p) {
330 		parent = *p;
331 		entry = rb_entry(parent, struct full_stripe_lock, node);
332 		if (fstripe_logical < entry->logical) {
333 			p = &(*p)->rb_left;
334 		} else if (fstripe_logical > entry->logical) {
335 			p = &(*p)->rb_right;
336 		} else {
337 			entry->refs++;
338 			return entry;
339 		}
340 	}
341 
342 	/* Insert new lock */
343 	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
344 	if (!ret)
345 		return ERR_PTR(-ENOMEM);
346 	ret->logical = fstripe_logical;
347 	ret->refs = 1;
348 	mutex_init(&ret->mutex);
349 
350 	rb_link_node(&ret->node, parent, p);
351 	rb_insert_color(&ret->node, &locks_root->root);
352 	return ret;
353 }
354 
355 /*
356  * Search for a full stripe lock of a block group
357  *
358  * Return pointer to existing full stripe lock if found
359  * Return NULL if not found
360  */
361 static struct full_stripe_lock *search_full_stripe_lock(
362 		struct btrfs_full_stripe_locks_tree *locks_root,
363 		u64 fstripe_logical)
364 {
365 	struct rb_node *node;
366 	struct full_stripe_lock *entry;
367 
368 	lockdep_assert_held(&locks_root->lock);
369 
370 	node = locks_root->root.rb_node;
371 	while (node) {
372 		entry = rb_entry(node, struct full_stripe_lock, node);
373 		if (fstripe_logical < entry->logical)
374 			node = node->rb_left;
375 		else if (fstripe_logical > entry->logical)
376 			node = node->rb_right;
377 		else
378 			return entry;
379 	}
380 	return NULL;
381 }
382 
383 /*
384  * Helper to get full stripe logical from a normal bytenr.
385  *
386  * Caller must ensure @cache is a RAID56 block group.
387  */
388 static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
389 				   u64 bytenr)
390 {
391 	u64 ret;
392 
393 	/*
394 	 * Due to chunk item size limit, full stripe length should not be
395 	 * larger than U32_MAX. Just a sanity check here.
396 	 */
397 	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
398 
399 	/*
400 	 * round_down() can only handle power of 2, while RAID56 full
401 	 * stripe length can be 64KiB * n, so we need to manually round down.
402 	 */
403 	ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
404 		cache->full_stripe_len + cache->key.objectid;
405 	return ret;
406 }
407 
408 /*
409  * Lock a full stripe to avoid concurrency of recovery and read
410  *
411  * It's only used for profiles with parities (RAID5/6), for other profiles it
412  * does nothing.
413  *
414  * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
415  * So caller must call unlock_full_stripe() at the same context.
416  *
417  * Return <0 if encounters error.
418  */
419 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
420 			    bool *locked_ret)
421 {
422 	struct btrfs_block_group_cache *bg_cache;
423 	struct btrfs_full_stripe_locks_tree *locks_root;
424 	struct full_stripe_lock *existing;
425 	u64 fstripe_start;
426 	int ret = 0;
427 
428 	*locked_ret = false;
429 	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
430 	if (!bg_cache) {
431 		ASSERT(0);
432 		return -ENOENT;
433 	}
434 
435 	/* Profiles not based on parity don't need full stripe lock */
436 	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
437 		goto out;
438 	locks_root = &bg_cache->full_stripe_locks_root;
439 
440 	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
441 
442 	/* Now insert the full stripe lock */
443 	mutex_lock(&locks_root->lock);
444 	existing = insert_full_stripe_lock(locks_root, fstripe_start);
445 	mutex_unlock(&locks_root->lock);
446 	if (IS_ERR(existing)) {
447 		ret = PTR_ERR(existing);
448 		goto out;
449 	}
450 	mutex_lock(&existing->mutex);
451 	*locked_ret = true;
452 out:
453 	btrfs_put_block_group(bg_cache);
454 	return ret;
455 }
456 
457 /*
458  * Unlock a full stripe.
459  *
460  * NOTE: Caller must ensure it's the same context calling corresponding
461  * lock_full_stripe().
462  *
463  * Return 0 if we unlock full stripe without problem.
464  * Return <0 for error
465  */
466 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
467 			      bool locked)
468 {
469 	struct btrfs_block_group_cache *bg_cache;
470 	struct btrfs_full_stripe_locks_tree *locks_root;
471 	struct full_stripe_lock *fstripe_lock;
472 	u64 fstripe_start;
473 	bool freeit = false;
474 	int ret = 0;
475 
476 	/* If we didn't acquire full stripe lock, no need to continue */
477 	if (!locked)
478 		return 0;
479 
480 	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
481 	if (!bg_cache) {
482 		ASSERT(0);
483 		return -ENOENT;
484 	}
485 	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
486 		goto out;
487 
488 	locks_root = &bg_cache->full_stripe_locks_root;
489 	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
490 
491 	mutex_lock(&locks_root->lock);
492 	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
493 	/* Unpaired unlock_full_stripe() detected */
494 	if (!fstripe_lock) {
495 		WARN_ON(1);
496 		ret = -ENOENT;
497 		mutex_unlock(&locks_root->lock);
498 		goto out;
499 	}
500 
501 	if (fstripe_lock->refs == 0) {
502 		WARN_ON(1);
503 		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
504 			fstripe_lock->logical);
505 	} else {
506 		fstripe_lock->refs--;
507 	}
508 
509 	if (fstripe_lock->refs == 0) {
510 		rb_erase(&fstripe_lock->node, &locks_root->root);
511 		freeit = true;
512 	}
513 	mutex_unlock(&locks_root->lock);
514 
515 	mutex_unlock(&fstripe_lock->mutex);
516 	if (freeit)
517 		kfree(fstripe_lock);
518 out:
519 	btrfs_put_block_group(bg_cache);
520 	return ret;
521 }
522 
523 static void scrub_free_csums(struct scrub_ctx *sctx)
524 {
525 	while (!list_empty(&sctx->csum_list)) {
526 		struct btrfs_ordered_sum *sum;
527 		sum = list_first_entry(&sctx->csum_list,
528 				       struct btrfs_ordered_sum, list);
529 		list_del(&sum->list);
530 		kfree(sum);
531 	}
532 }
533 
534 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
535 {
536 	int i;
537 
538 	if (!sctx)
539 		return;
540 
541 	/* this can happen when scrub is cancelled */
542 	if (sctx->curr != -1) {
543 		struct scrub_bio *sbio = sctx->bios[sctx->curr];
544 
545 		for (i = 0; i < sbio->page_count; i++) {
546 			WARN_ON(!sbio->pagev[i]->page);
547 			scrub_block_put(sbio->pagev[i]->sblock);
548 		}
549 		bio_put(sbio->bio);
550 	}
551 
552 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
553 		struct scrub_bio *sbio = sctx->bios[i];
554 
555 		if (!sbio)
556 			break;
557 		kfree(sbio);
558 	}
559 
560 	kfree(sctx->wr_curr_bio);
561 	scrub_free_csums(sctx);
562 	kfree(sctx);
563 }
564 
565 static void scrub_put_ctx(struct scrub_ctx *sctx)
566 {
567 	if (refcount_dec_and_test(&sctx->refs))
568 		scrub_free_ctx(sctx);
569 }
570 
571 static noinline_for_stack
572 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
573 {
574 	struct scrub_ctx *sctx;
575 	int		i;
576 	struct btrfs_fs_info *fs_info = dev->fs_info;
577 
578 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
579 	if (!sctx)
580 		goto nomem;
581 	refcount_set(&sctx->refs, 1);
582 	sctx->is_dev_replace = is_dev_replace;
583 	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
584 	sctx->curr = -1;
585 	sctx->fs_info = dev->fs_info;
586 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
587 		struct scrub_bio *sbio;
588 
589 		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
590 		if (!sbio)
591 			goto nomem;
592 		sctx->bios[i] = sbio;
593 
594 		sbio->index = i;
595 		sbio->sctx = sctx;
596 		sbio->page_count = 0;
597 		btrfs_init_work(&sbio->work, btrfs_scrub_helper,
598 				scrub_bio_end_io_worker, NULL, NULL);
599 
600 		if (i != SCRUB_BIOS_PER_SCTX - 1)
601 			sctx->bios[i]->next_free = i + 1;
602 		else
603 			sctx->bios[i]->next_free = -1;
604 	}
605 	sctx->first_free = 0;
606 	atomic_set(&sctx->bios_in_flight, 0);
607 	atomic_set(&sctx->workers_pending, 0);
608 	atomic_set(&sctx->cancel_req, 0);
609 	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
610 	INIT_LIST_HEAD(&sctx->csum_list);
611 
612 	spin_lock_init(&sctx->list_lock);
613 	spin_lock_init(&sctx->stat_lock);
614 	init_waitqueue_head(&sctx->list_wait);
615 
616 	WARN_ON(sctx->wr_curr_bio != NULL);
617 	mutex_init(&sctx->wr_lock);
618 	sctx->wr_curr_bio = NULL;
619 	if (is_dev_replace) {
620 		WARN_ON(!fs_info->dev_replace.tgtdev);
621 		sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
622 		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
623 		sctx->flush_all_writes = false;
624 	}
625 
626 	return sctx;
627 
628 nomem:
629 	scrub_free_ctx(sctx);
630 	return ERR_PTR(-ENOMEM);
631 }
632 
633 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
634 				     void *warn_ctx)
635 {
636 	u64 isize;
637 	u32 nlink;
638 	int ret;
639 	int i;
640 	unsigned nofs_flag;
641 	struct extent_buffer *eb;
642 	struct btrfs_inode_item *inode_item;
643 	struct scrub_warning *swarn = warn_ctx;
644 	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
645 	struct inode_fs_paths *ipath = NULL;
646 	struct btrfs_root *local_root;
647 	struct btrfs_key root_key;
648 	struct btrfs_key key;
649 
650 	root_key.objectid = root;
651 	root_key.type = BTRFS_ROOT_ITEM_KEY;
652 	root_key.offset = (u64)-1;
653 	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
654 	if (IS_ERR(local_root)) {
655 		ret = PTR_ERR(local_root);
656 		goto err;
657 	}
658 
659 	/*
660 	 * this makes the path point to (inum INODE_ITEM ioff)
661 	 */
662 	key.objectid = inum;
663 	key.type = BTRFS_INODE_ITEM_KEY;
664 	key.offset = 0;
665 
666 	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
667 	if (ret) {
668 		btrfs_release_path(swarn->path);
669 		goto err;
670 	}
671 
672 	eb = swarn->path->nodes[0];
673 	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
674 					struct btrfs_inode_item);
675 	isize = btrfs_inode_size(eb, inode_item);
676 	nlink = btrfs_inode_nlink(eb, inode_item);
677 	btrfs_release_path(swarn->path);
678 
679 	/*
680 	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
681 	 * uses GFP_NOFS in this context, so we keep it consistent but it does
682 	 * not seem to be strictly necessary.
683 	 */
684 	nofs_flag = memalloc_nofs_save();
685 	ipath = init_ipath(4096, local_root, swarn->path);
686 	memalloc_nofs_restore(nofs_flag);
687 	if (IS_ERR(ipath)) {
688 		ret = PTR_ERR(ipath);
689 		ipath = NULL;
690 		goto err;
691 	}
692 	ret = paths_from_inode(inum, ipath);
693 
694 	if (ret < 0)
695 		goto err;
696 
697 	/*
698 	 * we deliberately ignore the bit ipath might have been too small to
699 	 * hold all of the paths here
700 	 */
701 	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
702 		btrfs_warn_in_rcu(fs_info,
703 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
704 				  swarn->errstr, swarn->logical,
705 				  rcu_str_deref(swarn->dev->name),
706 				  swarn->physical,
707 				  root, inum, offset,
708 				  min(isize - offset, (u64)PAGE_SIZE), nlink,
709 				  (char *)(unsigned long)ipath->fspath->val[i]);
710 
711 	free_ipath(ipath);
712 	return 0;
713 
714 err:
715 	btrfs_warn_in_rcu(fs_info,
716 			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
717 			  swarn->errstr, swarn->logical,
718 			  rcu_str_deref(swarn->dev->name),
719 			  swarn->physical,
720 			  root, inum, offset, ret);
721 
722 	free_ipath(ipath);
723 	return 0;
724 }
725 
726 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
727 {
728 	struct btrfs_device *dev;
729 	struct btrfs_fs_info *fs_info;
730 	struct btrfs_path *path;
731 	struct btrfs_key found_key;
732 	struct extent_buffer *eb;
733 	struct btrfs_extent_item *ei;
734 	struct scrub_warning swarn;
735 	unsigned long ptr = 0;
736 	u64 extent_item_pos;
737 	u64 flags = 0;
738 	u64 ref_root;
739 	u32 item_size;
740 	u8 ref_level = 0;
741 	int ret;
742 
743 	WARN_ON(sblock->page_count < 1);
744 	dev = sblock->pagev[0]->dev;
745 	fs_info = sblock->sctx->fs_info;
746 
747 	path = btrfs_alloc_path();
748 	if (!path)
749 		return;
750 
751 	swarn.physical = sblock->pagev[0]->physical;
752 	swarn.logical = sblock->pagev[0]->logical;
753 	swarn.errstr = errstr;
754 	swarn.dev = NULL;
755 
756 	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
757 				  &flags);
758 	if (ret < 0)
759 		goto out;
760 
761 	extent_item_pos = swarn.logical - found_key.objectid;
762 	swarn.extent_item_size = found_key.offset;
763 
764 	eb = path->nodes[0];
765 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
766 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
767 
768 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
769 		do {
770 			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
771 						      item_size, &ref_root,
772 						      &ref_level);
773 			btrfs_warn_in_rcu(fs_info,
774 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
775 				errstr, swarn.logical,
776 				rcu_str_deref(dev->name),
777 				swarn.physical,
778 				ref_level ? "node" : "leaf",
779 				ret < 0 ? -1 : ref_level,
780 				ret < 0 ? -1 : ref_root);
781 		} while (ret != 1);
782 		btrfs_release_path(path);
783 	} else {
784 		btrfs_release_path(path);
785 		swarn.path = path;
786 		swarn.dev = dev;
787 		iterate_extent_inodes(fs_info, found_key.objectid,
788 					extent_item_pos, 1,
789 					scrub_print_warning_inode, &swarn, false);
790 	}
791 
792 out:
793 	btrfs_free_path(path);
794 }
795 
796 static inline void scrub_get_recover(struct scrub_recover *recover)
797 {
798 	refcount_inc(&recover->refs);
799 }
800 
801 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
802 				     struct scrub_recover *recover)
803 {
804 	if (refcount_dec_and_test(&recover->refs)) {
805 		btrfs_bio_counter_dec(fs_info);
806 		btrfs_put_bbio(recover->bbio);
807 		kfree(recover);
808 	}
809 }
810 
811 /*
812  * scrub_handle_errored_block gets called when either verification of the
813  * pages failed or the bio failed to read, e.g. with EIO. In the latter
814  * case, this function handles all pages in the bio, even though only one
815  * may be bad.
816  * The goal of this function is to repair the errored block by using the
817  * contents of one of the mirrors.
818  */
819 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
820 {
821 	struct scrub_ctx *sctx = sblock_to_check->sctx;
822 	struct btrfs_device *dev;
823 	struct btrfs_fs_info *fs_info;
824 	u64 logical;
825 	unsigned int failed_mirror_index;
826 	unsigned int is_metadata;
827 	unsigned int have_csum;
828 	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
829 	struct scrub_block *sblock_bad;
830 	int ret;
831 	int mirror_index;
832 	int page_num;
833 	int success;
834 	bool full_stripe_locked;
835 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
836 				      DEFAULT_RATELIMIT_BURST);
837 
838 	BUG_ON(sblock_to_check->page_count < 1);
839 	fs_info = sctx->fs_info;
840 	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
841 		/*
842 		 * if we find an error in a super block, we just report it.
843 		 * They will get written with the next transaction commit
844 		 * anyway
845 		 */
846 		spin_lock(&sctx->stat_lock);
847 		++sctx->stat.super_errors;
848 		spin_unlock(&sctx->stat_lock);
849 		return 0;
850 	}
851 	logical = sblock_to_check->pagev[0]->logical;
852 	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
853 	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
854 	is_metadata = !(sblock_to_check->pagev[0]->flags &
855 			BTRFS_EXTENT_FLAG_DATA);
856 	have_csum = sblock_to_check->pagev[0]->have_csum;
857 	dev = sblock_to_check->pagev[0]->dev;
858 
859 	/*
860 	 * For RAID5/6, race can happen for a different device scrub thread.
861 	 * For data corruption, Parity and Data threads will both try
862 	 * to recovery the data.
863 	 * Race can lead to doubly added csum error, or even unrecoverable
864 	 * error.
865 	 */
866 	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
867 	if (ret < 0) {
868 		spin_lock(&sctx->stat_lock);
869 		if (ret == -ENOMEM)
870 			sctx->stat.malloc_errors++;
871 		sctx->stat.read_errors++;
872 		sctx->stat.uncorrectable_errors++;
873 		spin_unlock(&sctx->stat_lock);
874 		return ret;
875 	}
876 
877 	/*
878 	 * read all mirrors one after the other. This includes to
879 	 * re-read the extent or metadata block that failed (that was
880 	 * the cause that this fixup code is called) another time,
881 	 * page by page this time in order to know which pages
882 	 * caused I/O errors and which ones are good (for all mirrors).
883 	 * It is the goal to handle the situation when more than one
884 	 * mirror contains I/O errors, but the errors do not
885 	 * overlap, i.e. the data can be repaired by selecting the
886 	 * pages from those mirrors without I/O error on the
887 	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
888 	 * would be that mirror #1 has an I/O error on the first page,
889 	 * the second page is good, and mirror #2 has an I/O error on
890 	 * the second page, but the first page is good.
891 	 * Then the first page of the first mirror can be repaired by
892 	 * taking the first page of the second mirror, and the
893 	 * second page of the second mirror can be repaired by
894 	 * copying the contents of the 2nd page of the 1st mirror.
895 	 * One more note: if the pages of one mirror contain I/O
896 	 * errors, the checksum cannot be verified. In order to get
897 	 * the best data for repairing, the first attempt is to find
898 	 * a mirror without I/O errors and with a validated checksum.
899 	 * Only if this is not possible, the pages are picked from
900 	 * mirrors with I/O errors without considering the checksum.
901 	 * If the latter is the case, at the end, the checksum of the
902 	 * repaired area is verified in order to correctly maintain
903 	 * the statistics.
904 	 */
905 
906 	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
907 				      sizeof(*sblocks_for_recheck), GFP_NOFS);
908 	if (!sblocks_for_recheck) {
909 		spin_lock(&sctx->stat_lock);
910 		sctx->stat.malloc_errors++;
911 		sctx->stat.read_errors++;
912 		sctx->stat.uncorrectable_errors++;
913 		spin_unlock(&sctx->stat_lock);
914 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
915 		goto out;
916 	}
917 
918 	/* setup the context, map the logical blocks and alloc the pages */
919 	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
920 	if (ret) {
921 		spin_lock(&sctx->stat_lock);
922 		sctx->stat.read_errors++;
923 		sctx->stat.uncorrectable_errors++;
924 		spin_unlock(&sctx->stat_lock);
925 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
926 		goto out;
927 	}
928 	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
929 	sblock_bad = sblocks_for_recheck + failed_mirror_index;
930 
931 	/* build and submit the bios for the failed mirror, check checksums */
932 	scrub_recheck_block(fs_info, sblock_bad, 1);
933 
934 	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
935 	    sblock_bad->no_io_error_seen) {
936 		/*
937 		 * the error disappeared after reading page by page, or
938 		 * the area was part of a huge bio and other parts of the
939 		 * bio caused I/O errors, or the block layer merged several
940 		 * read requests into one and the error is caused by a
941 		 * different bio (usually one of the two latter cases is
942 		 * the cause)
943 		 */
944 		spin_lock(&sctx->stat_lock);
945 		sctx->stat.unverified_errors++;
946 		sblock_to_check->data_corrected = 1;
947 		spin_unlock(&sctx->stat_lock);
948 
949 		if (sctx->is_dev_replace)
950 			scrub_write_block_to_dev_replace(sblock_bad);
951 		goto out;
952 	}
953 
954 	if (!sblock_bad->no_io_error_seen) {
955 		spin_lock(&sctx->stat_lock);
956 		sctx->stat.read_errors++;
957 		spin_unlock(&sctx->stat_lock);
958 		if (__ratelimit(&_rs))
959 			scrub_print_warning("i/o error", sblock_to_check);
960 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
961 	} else if (sblock_bad->checksum_error) {
962 		spin_lock(&sctx->stat_lock);
963 		sctx->stat.csum_errors++;
964 		spin_unlock(&sctx->stat_lock);
965 		if (__ratelimit(&_rs))
966 			scrub_print_warning("checksum error", sblock_to_check);
967 		btrfs_dev_stat_inc_and_print(dev,
968 					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
969 	} else if (sblock_bad->header_error) {
970 		spin_lock(&sctx->stat_lock);
971 		sctx->stat.verify_errors++;
972 		spin_unlock(&sctx->stat_lock);
973 		if (__ratelimit(&_rs))
974 			scrub_print_warning("checksum/header error",
975 					    sblock_to_check);
976 		if (sblock_bad->generation_error)
977 			btrfs_dev_stat_inc_and_print(dev,
978 				BTRFS_DEV_STAT_GENERATION_ERRS);
979 		else
980 			btrfs_dev_stat_inc_and_print(dev,
981 				BTRFS_DEV_STAT_CORRUPTION_ERRS);
982 	}
983 
984 	if (sctx->readonly) {
985 		ASSERT(!sctx->is_dev_replace);
986 		goto out;
987 	}
988 
989 	/*
990 	 * now build and submit the bios for the other mirrors, check
991 	 * checksums.
992 	 * First try to pick the mirror which is completely without I/O
993 	 * errors and also does not have a checksum error.
994 	 * If one is found, and if a checksum is present, the full block
995 	 * that is known to contain an error is rewritten. Afterwards
996 	 * the block is known to be corrected.
997 	 * If a mirror is found which is completely correct, and no
998 	 * checksum is present, only those pages are rewritten that had
999 	 * an I/O error in the block to be repaired, since it cannot be
1000 	 * determined, which copy of the other pages is better (and it
1001 	 * could happen otherwise that a correct page would be
1002 	 * overwritten by a bad one).
1003 	 */
1004 	for (mirror_index = 0; ;mirror_index++) {
1005 		struct scrub_block *sblock_other;
1006 
1007 		if (mirror_index == failed_mirror_index)
1008 			continue;
1009 
1010 		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1011 		if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1012 			if (mirror_index >= BTRFS_MAX_MIRRORS)
1013 				break;
1014 			if (!sblocks_for_recheck[mirror_index].page_count)
1015 				break;
1016 
1017 			sblock_other = sblocks_for_recheck + mirror_index;
1018 		} else {
1019 			struct scrub_recover *r = sblock_bad->pagev[0]->recover;
1020 			int max_allowed = r->bbio->num_stripes -
1021 						r->bbio->num_tgtdevs;
1022 
1023 			if (mirror_index >= max_allowed)
1024 				break;
1025 			if (!sblocks_for_recheck[1].page_count)
1026 				break;
1027 
1028 			ASSERT(failed_mirror_index == 0);
1029 			sblock_other = sblocks_for_recheck + 1;
1030 			sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
1031 		}
1032 
1033 		/* build and submit the bios, check checksums */
1034 		scrub_recheck_block(fs_info, sblock_other, 0);
1035 
1036 		if (!sblock_other->header_error &&
1037 		    !sblock_other->checksum_error &&
1038 		    sblock_other->no_io_error_seen) {
1039 			if (sctx->is_dev_replace) {
1040 				scrub_write_block_to_dev_replace(sblock_other);
1041 				goto corrected_error;
1042 			} else {
1043 				ret = scrub_repair_block_from_good_copy(
1044 						sblock_bad, sblock_other);
1045 				if (!ret)
1046 					goto corrected_error;
1047 			}
1048 		}
1049 	}
1050 
1051 	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1052 		goto did_not_correct_error;
1053 
1054 	/*
1055 	 * In case of I/O errors in the area that is supposed to be
1056 	 * repaired, continue by picking good copies of those pages.
1057 	 * Select the good pages from mirrors to rewrite bad pages from
1058 	 * the area to fix. Afterwards verify the checksum of the block
1059 	 * that is supposed to be repaired. This verification step is
1060 	 * only done for the purpose of statistic counting and for the
1061 	 * final scrub report, whether errors remain.
1062 	 * A perfect algorithm could make use of the checksum and try
1063 	 * all possible combinations of pages from the different mirrors
1064 	 * until the checksum verification succeeds. For example, when
1065 	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1066 	 * of mirror #2 is readable but the final checksum test fails,
1067 	 * then the 2nd page of mirror #3 could be tried, whether now
1068 	 * the final checksum succeeds. But this would be a rare
1069 	 * exception and is therefore not implemented. At least it is
1070 	 * avoided that the good copy is overwritten.
1071 	 * A more useful improvement would be to pick the sectors
1072 	 * without I/O error based on sector sizes (512 bytes on legacy
1073 	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1074 	 * mirror could be repaired by taking 512 byte of a different
1075 	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1076 	 * area are unreadable.
1077 	 */
1078 	success = 1;
1079 	for (page_num = 0; page_num < sblock_bad->page_count;
1080 	     page_num++) {
1081 		struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1082 		struct scrub_block *sblock_other = NULL;
1083 
1084 		/* skip no-io-error page in scrub */
1085 		if (!page_bad->io_error && !sctx->is_dev_replace)
1086 			continue;
1087 
1088 		if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1089 			/*
1090 			 * In case of dev replace, if raid56 rebuild process
1091 			 * didn't work out correct data, then copy the content
1092 			 * in sblock_bad to make sure target device is identical
1093 			 * to source device, instead of writing garbage data in
1094 			 * sblock_for_recheck array to target device.
1095 			 */
1096 			sblock_other = NULL;
1097 		} else if (page_bad->io_error) {
1098 			/* try to find no-io-error page in mirrors */
1099 			for (mirror_index = 0;
1100 			     mirror_index < BTRFS_MAX_MIRRORS &&
1101 			     sblocks_for_recheck[mirror_index].page_count > 0;
1102 			     mirror_index++) {
1103 				if (!sblocks_for_recheck[mirror_index].
1104 				    pagev[page_num]->io_error) {
1105 					sblock_other = sblocks_for_recheck +
1106 						       mirror_index;
1107 					break;
1108 				}
1109 			}
1110 			if (!sblock_other)
1111 				success = 0;
1112 		}
1113 
1114 		if (sctx->is_dev_replace) {
1115 			/*
1116 			 * did not find a mirror to fetch the page
1117 			 * from. scrub_write_page_to_dev_replace()
1118 			 * handles this case (page->io_error), by
1119 			 * filling the block with zeros before
1120 			 * submitting the write request
1121 			 */
1122 			if (!sblock_other)
1123 				sblock_other = sblock_bad;
1124 
1125 			if (scrub_write_page_to_dev_replace(sblock_other,
1126 							    page_num) != 0) {
1127 				atomic64_inc(
1128 					&fs_info->dev_replace.num_write_errors);
1129 				success = 0;
1130 			}
1131 		} else if (sblock_other) {
1132 			ret = scrub_repair_page_from_good_copy(sblock_bad,
1133 							       sblock_other,
1134 							       page_num, 0);
1135 			if (0 == ret)
1136 				page_bad->io_error = 0;
1137 			else
1138 				success = 0;
1139 		}
1140 	}
1141 
1142 	if (success && !sctx->is_dev_replace) {
1143 		if (is_metadata || have_csum) {
1144 			/*
1145 			 * need to verify the checksum now that all
1146 			 * sectors on disk are repaired (the write
1147 			 * request for data to be repaired is on its way).
1148 			 * Just be lazy and use scrub_recheck_block()
1149 			 * which re-reads the data before the checksum
1150 			 * is verified, but most likely the data comes out
1151 			 * of the page cache.
1152 			 */
1153 			scrub_recheck_block(fs_info, sblock_bad, 1);
1154 			if (!sblock_bad->header_error &&
1155 			    !sblock_bad->checksum_error &&
1156 			    sblock_bad->no_io_error_seen)
1157 				goto corrected_error;
1158 			else
1159 				goto did_not_correct_error;
1160 		} else {
1161 corrected_error:
1162 			spin_lock(&sctx->stat_lock);
1163 			sctx->stat.corrected_errors++;
1164 			sblock_to_check->data_corrected = 1;
1165 			spin_unlock(&sctx->stat_lock);
1166 			btrfs_err_rl_in_rcu(fs_info,
1167 				"fixed up error at logical %llu on dev %s",
1168 				logical, rcu_str_deref(dev->name));
1169 		}
1170 	} else {
1171 did_not_correct_error:
1172 		spin_lock(&sctx->stat_lock);
1173 		sctx->stat.uncorrectable_errors++;
1174 		spin_unlock(&sctx->stat_lock);
1175 		btrfs_err_rl_in_rcu(fs_info,
1176 			"unable to fixup (regular) error at logical %llu on dev %s",
1177 			logical, rcu_str_deref(dev->name));
1178 	}
1179 
1180 out:
1181 	if (sblocks_for_recheck) {
1182 		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1183 		     mirror_index++) {
1184 			struct scrub_block *sblock = sblocks_for_recheck +
1185 						     mirror_index;
1186 			struct scrub_recover *recover;
1187 			int page_index;
1188 
1189 			for (page_index = 0; page_index < sblock->page_count;
1190 			     page_index++) {
1191 				sblock->pagev[page_index]->sblock = NULL;
1192 				recover = sblock->pagev[page_index]->recover;
1193 				if (recover) {
1194 					scrub_put_recover(fs_info, recover);
1195 					sblock->pagev[page_index]->recover =
1196 									NULL;
1197 				}
1198 				scrub_page_put(sblock->pagev[page_index]);
1199 			}
1200 		}
1201 		kfree(sblocks_for_recheck);
1202 	}
1203 
1204 	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1205 	if (ret < 0)
1206 		return ret;
1207 	return 0;
1208 }
1209 
1210 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1211 {
1212 	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1213 		return 2;
1214 	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1215 		return 3;
1216 	else
1217 		return (int)bbio->num_stripes;
1218 }
1219 
1220 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1221 						 u64 *raid_map,
1222 						 u64 mapped_length,
1223 						 int nstripes, int mirror,
1224 						 int *stripe_index,
1225 						 u64 *stripe_offset)
1226 {
1227 	int i;
1228 
1229 	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1230 		/* RAID5/6 */
1231 		for (i = 0; i < nstripes; i++) {
1232 			if (raid_map[i] == RAID6_Q_STRIPE ||
1233 			    raid_map[i] == RAID5_P_STRIPE)
1234 				continue;
1235 
1236 			if (logical >= raid_map[i] &&
1237 			    logical < raid_map[i] + mapped_length)
1238 				break;
1239 		}
1240 
1241 		*stripe_index = i;
1242 		*stripe_offset = logical - raid_map[i];
1243 	} else {
1244 		/* The other RAID type */
1245 		*stripe_index = mirror;
1246 		*stripe_offset = 0;
1247 	}
1248 }
1249 
1250 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1251 				     struct scrub_block *sblocks_for_recheck)
1252 {
1253 	struct scrub_ctx *sctx = original_sblock->sctx;
1254 	struct btrfs_fs_info *fs_info = sctx->fs_info;
1255 	u64 length = original_sblock->page_count * PAGE_SIZE;
1256 	u64 logical = original_sblock->pagev[0]->logical;
1257 	u64 generation = original_sblock->pagev[0]->generation;
1258 	u64 flags = original_sblock->pagev[0]->flags;
1259 	u64 have_csum = original_sblock->pagev[0]->have_csum;
1260 	struct scrub_recover *recover;
1261 	struct btrfs_bio *bbio;
1262 	u64 sublen;
1263 	u64 mapped_length;
1264 	u64 stripe_offset;
1265 	int stripe_index;
1266 	int page_index = 0;
1267 	int mirror_index;
1268 	int nmirrors;
1269 	int ret;
1270 
1271 	/*
1272 	 * note: the two members refs and outstanding_pages
1273 	 * are not used (and not set) in the blocks that are used for
1274 	 * the recheck procedure
1275 	 */
1276 
1277 	while (length > 0) {
1278 		sublen = min_t(u64, length, PAGE_SIZE);
1279 		mapped_length = sublen;
1280 		bbio = NULL;
1281 
1282 		/*
1283 		 * with a length of PAGE_SIZE, each returned stripe
1284 		 * represents one mirror
1285 		 */
1286 		btrfs_bio_counter_inc_blocked(fs_info);
1287 		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1288 				logical, &mapped_length, &bbio);
1289 		if (ret || !bbio || mapped_length < sublen) {
1290 			btrfs_put_bbio(bbio);
1291 			btrfs_bio_counter_dec(fs_info);
1292 			return -EIO;
1293 		}
1294 
1295 		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1296 		if (!recover) {
1297 			btrfs_put_bbio(bbio);
1298 			btrfs_bio_counter_dec(fs_info);
1299 			return -ENOMEM;
1300 		}
1301 
1302 		refcount_set(&recover->refs, 1);
1303 		recover->bbio = bbio;
1304 		recover->map_length = mapped_length;
1305 
1306 		BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1307 
1308 		nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1309 
1310 		for (mirror_index = 0; mirror_index < nmirrors;
1311 		     mirror_index++) {
1312 			struct scrub_block *sblock;
1313 			struct scrub_page *page;
1314 
1315 			sblock = sblocks_for_recheck + mirror_index;
1316 			sblock->sctx = sctx;
1317 
1318 			page = kzalloc(sizeof(*page), GFP_NOFS);
1319 			if (!page) {
1320 leave_nomem:
1321 				spin_lock(&sctx->stat_lock);
1322 				sctx->stat.malloc_errors++;
1323 				spin_unlock(&sctx->stat_lock);
1324 				scrub_put_recover(fs_info, recover);
1325 				return -ENOMEM;
1326 			}
1327 			scrub_page_get(page);
1328 			sblock->pagev[page_index] = page;
1329 			page->sblock = sblock;
1330 			page->flags = flags;
1331 			page->generation = generation;
1332 			page->logical = logical;
1333 			page->have_csum = have_csum;
1334 			if (have_csum)
1335 				memcpy(page->csum,
1336 				       original_sblock->pagev[0]->csum,
1337 				       sctx->csum_size);
1338 
1339 			scrub_stripe_index_and_offset(logical,
1340 						      bbio->map_type,
1341 						      bbio->raid_map,
1342 						      mapped_length,
1343 						      bbio->num_stripes -
1344 						      bbio->num_tgtdevs,
1345 						      mirror_index,
1346 						      &stripe_index,
1347 						      &stripe_offset);
1348 			page->physical = bbio->stripes[stripe_index].physical +
1349 					 stripe_offset;
1350 			page->dev = bbio->stripes[stripe_index].dev;
1351 
1352 			BUG_ON(page_index >= original_sblock->page_count);
1353 			page->physical_for_dev_replace =
1354 				original_sblock->pagev[page_index]->
1355 				physical_for_dev_replace;
1356 			/* for missing devices, dev->bdev is NULL */
1357 			page->mirror_num = mirror_index + 1;
1358 			sblock->page_count++;
1359 			page->page = alloc_page(GFP_NOFS);
1360 			if (!page->page)
1361 				goto leave_nomem;
1362 
1363 			scrub_get_recover(recover);
1364 			page->recover = recover;
1365 		}
1366 		scrub_put_recover(fs_info, recover);
1367 		length -= sublen;
1368 		logical += sublen;
1369 		page_index++;
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 static void scrub_bio_wait_endio(struct bio *bio)
1376 {
1377 	complete(bio->bi_private);
1378 }
1379 
1380 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1381 					struct bio *bio,
1382 					struct scrub_page *page)
1383 {
1384 	DECLARE_COMPLETION_ONSTACK(done);
1385 	int ret;
1386 	int mirror_num;
1387 
1388 	bio->bi_iter.bi_sector = page->logical >> 9;
1389 	bio->bi_private = &done;
1390 	bio->bi_end_io = scrub_bio_wait_endio;
1391 
1392 	mirror_num = page->sblock->pagev[0]->mirror_num;
1393 	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1394 				    page->recover->map_length,
1395 				    mirror_num, 0);
1396 	if (ret)
1397 		return ret;
1398 
1399 	wait_for_completion_io(&done);
1400 	return blk_status_to_errno(bio->bi_status);
1401 }
1402 
1403 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1404 					  struct scrub_block *sblock)
1405 {
1406 	struct scrub_page *first_page = sblock->pagev[0];
1407 	struct bio *bio;
1408 	int page_num;
1409 
1410 	/* All pages in sblock belong to the same stripe on the same device. */
1411 	ASSERT(first_page->dev);
1412 	if (!first_page->dev->bdev)
1413 		goto out;
1414 
1415 	bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
1416 	bio_set_dev(bio, first_page->dev->bdev);
1417 
1418 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1419 		struct scrub_page *page = sblock->pagev[page_num];
1420 
1421 		WARN_ON(!page->page);
1422 		bio_add_page(bio, page->page, PAGE_SIZE, 0);
1423 	}
1424 
1425 	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
1426 		bio_put(bio);
1427 		goto out;
1428 	}
1429 
1430 	bio_put(bio);
1431 
1432 	scrub_recheck_block_checksum(sblock);
1433 
1434 	return;
1435 out:
1436 	for (page_num = 0; page_num < sblock->page_count; page_num++)
1437 		sblock->pagev[page_num]->io_error = 1;
1438 
1439 	sblock->no_io_error_seen = 0;
1440 }
1441 
1442 /*
1443  * this function will check the on disk data for checksum errors, header
1444  * errors and read I/O errors. If any I/O errors happen, the exact pages
1445  * which are errored are marked as being bad. The goal is to enable scrub
1446  * to take those pages that are not errored from all the mirrors so that
1447  * the pages that are errored in the just handled mirror can be repaired.
1448  */
1449 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1450 				struct scrub_block *sblock,
1451 				int retry_failed_mirror)
1452 {
1453 	int page_num;
1454 
1455 	sblock->no_io_error_seen = 1;
1456 
1457 	/* short cut for raid56 */
1458 	if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
1459 		return scrub_recheck_block_on_raid56(fs_info, sblock);
1460 
1461 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1462 		struct bio *bio;
1463 		struct scrub_page *page = sblock->pagev[page_num];
1464 
1465 		if (page->dev->bdev == NULL) {
1466 			page->io_error = 1;
1467 			sblock->no_io_error_seen = 0;
1468 			continue;
1469 		}
1470 
1471 		WARN_ON(!page->page);
1472 		bio = btrfs_io_bio_alloc(1);
1473 		bio_set_dev(bio, page->dev->bdev);
1474 
1475 		bio_add_page(bio, page->page, PAGE_SIZE, 0);
1476 		bio->bi_iter.bi_sector = page->physical >> 9;
1477 		bio->bi_opf = REQ_OP_READ;
1478 
1479 		if (btrfsic_submit_bio_wait(bio)) {
1480 			page->io_error = 1;
1481 			sblock->no_io_error_seen = 0;
1482 		}
1483 
1484 		bio_put(bio);
1485 	}
1486 
1487 	if (sblock->no_io_error_seen)
1488 		scrub_recheck_block_checksum(sblock);
1489 }
1490 
1491 static inline int scrub_check_fsid(u8 fsid[],
1492 				   struct scrub_page *spage)
1493 {
1494 	struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1495 	int ret;
1496 
1497 	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1498 	return !ret;
1499 }
1500 
1501 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1502 {
1503 	sblock->header_error = 0;
1504 	sblock->checksum_error = 0;
1505 	sblock->generation_error = 0;
1506 
1507 	if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1508 		scrub_checksum_data(sblock);
1509 	else
1510 		scrub_checksum_tree_block(sblock);
1511 }
1512 
1513 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1514 					     struct scrub_block *sblock_good)
1515 {
1516 	int page_num;
1517 	int ret = 0;
1518 
1519 	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1520 		int ret_sub;
1521 
1522 		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1523 							   sblock_good,
1524 							   page_num, 1);
1525 		if (ret_sub)
1526 			ret = ret_sub;
1527 	}
1528 
1529 	return ret;
1530 }
1531 
1532 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1533 					    struct scrub_block *sblock_good,
1534 					    int page_num, int force_write)
1535 {
1536 	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1537 	struct scrub_page *page_good = sblock_good->pagev[page_num];
1538 	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1539 
1540 	BUG_ON(page_bad->page == NULL);
1541 	BUG_ON(page_good->page == NULL);
1542 	if (force_write || sblock_bad->header_error ||
1543 	    sblock_bad->checksum_error || page_bad->io_error) {
1544 		struct bio *bio;
1545 		int ret;
1546 
1547 		if (!page_bad->dev->bdev) {
1548 			btrfs_warn_rl(fs_info,
1549 				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1550 			return -EIO;
1551 		}
1552 
1553 		bio = btrfs_io_bio_alloc(1);
1554 		bio_set_dev(bio, page_bad->dev->bdev);
1555 		bio->bi_iter.bi_sector = page_bad->physical >> 9;
1556 		bio->bi_opf = REQ_OP_WRITE;
1557 
1558 		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1559 		if (PAGE_SIZE != ret) {
1560 			bio_put(bio);
1561 			return -EIO;
1562 		}
1563 
1564 		if (btrfsic_submit_bio_wait(bio)) {
1565 			btrfs_dev_stat_inc_and_print(page_bad->dev,
1566 				BTRFS_DEV_STAT_WRITE_ERRS);
1567 			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1568 			bio_put(bio);
1569 			return -EIO;
1570 		}
1571 		bio_put(bio);
1572 	}
1573 
1574 	return 0;
1575 }
1576 
1577 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1578 {
1579 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1580 	int page_num;
1581 
1582 	/*
1583 	 * This block is used for the check of the parity on the source device,
1584 	 * so the data needn't be written into the destination device.
1585 	 */
1586 	if (sblock->sparity)
1587 		return;
1588 
1589 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1590 		int ret;
1591 
1592 		ret = scrub_write_page_to_dev_replace(sblock, page_num);
1593 		if (ret)
1594 			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1595 	}
1596 }
1597 
1598 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1599 					   int page_num)
1600 {
1601 	struct scrub_page *spage = sblock->pagev[page_num];
1602 
1603 	BUG_ON(spage->page == NULL);
1604 	if (spage->io_error) {
1605 		void *mapped_buffer = kmap_atomic(spage->page);
1606 
1607 		clear_page(mapped_buffer);
1608 		flush_dcache_page(spage->page);
1609 		kunmap_atomic(mapped_buffer);
1610 	}
1611 	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1612 }
1613 
1614 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1615 				    struct scrub_page *spage)
1616 {
1617 	struct scrub_bio *sbio;
1618 	int ret;
1619 
1620 	mutex_lock(&sctx->wr_lock);
1621 again:
1622 	if (!sctx->wr_curr_bio) {
1623 		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1624 					      GFP_KERNEL);
1625 		if (!sctx->wr_curr_bio) {
1626 			mutex_unlock(&sctx->wr_lock);
1627 			return -ENOMEM;
1628 		}
1629 		sctx->wr_curr_bio->sctx = sctx;
1630 		sctx->wr_curr_bio->page_count = 0;
1631 	}
1632 	sbio = sctx->wr_curr_bio;
1633 	if (sbio->page_count == 0) {
1634 		struct bio *bio;
1635 
1636 		sbio->physical = spage->physical_for_dev_replace;
1637 		sbio->logical = spage->logical;
1638 		sbio->dev = sctx->wr_tgtdev;
1639 		bio = sbio->bio;
1640 		if (!bio) {
1641 			bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1642 			sbio->bio = bio;
1643 		}
1644 
1645 		bio->bi_private = sbio;
1646 		bio->bi_end_io = scrub_wr_bio_end_io;
1647 		bio_set_dev(bio, sbio->dev->bdev);
1648 		bio->bi_iter.bi_sector = sbio->physical >> 9;
1649 		bio->bi_opf = REQ_OP_WRITE;
1650 		sbio->status = 0;
1651 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1652 		   spage->physical_for_dev_replace ||
1653 		   sbio->logical + sbio->page_count * PAGE_SIZE !=
1654 		   spage->logical) {
1655 		scrub_wr_submit(sctx);
1656 		goto again;
1657 	}
1658 
1659 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1660 	if (ret != PAGE_SIZE) {
1661 		if (sbio->page_count < 1) {
1662 			bio_put(sbio->bio);
1663 			sbio->bio = NULL;
1664 			mutex_unlock(&sctx->wr_lock);
1665 			return -EIO;
1666 		}
1667 		scrub_wr_submit(sctx);
1668 		goto again;
1669 	}
1670 
1671 	sbio->pagev[sbio->page_count] = spage;
1672 	scrub_page_get(spage);
1673 	sbio->page_count++;
1674 	if (sbio->page_count == sctx->pages_per_wr_bio)
1675 		scrub_wr_submit(sctx);
1676 	mutex_unlock(&sctx->wr_lock);
1677 
1678 	return 0;
1679 }
1680 
1681 static void scrub_wr_submit(struct scrub_ctx *sctx)
1682 {
1683 	struct scrub_bio *sbio;
1684 
1685 	if (!sctx->wr_curr_bio)
1686 		return;
1687 
1688 	sbio = sctx->wr_curr_bio;
1689 	sctx->wr_curr_bio = NULL;
1690 	WARN_ON(!sbio->bio->bi_disk);
1691 	scrub_pending_bio_inc(sctx);
1692 	/* process all writes in a single worker thread. Then the block layer
1693 	 * orders the requests before sending them to the driver which
1694 	 * doubled the write performance on spinning disks when measured
1695 	 * with Linux 3.5 */
1696 	btrfsic_submit_bio(sbio->bio);
1697 }
1698 
1699 static void scrub_wr_bio_end_io(struct bio *bio)
1700 {
1701 	struct scrub_bio *sbio = bio->bi_private;
1702 	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1703 
1704 	sbio->status = bio->bi_status;
1705 	sbio->bio = bio;
1706 
1707 	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1708 			 scrub_wr_bio_end_io_worker, NULL, NULL);
1709 	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1710 }
1711 
1712 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1713 {
1714 	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1715 	struct scrub_ctx *sctx = sbio->sctx;
1716 	int i;
1717 
1718 	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1719 	if (sbio->status) {
1720 		struct btrfs_dev_replace *dev_replace =
1721 			&sbio->sctx->fs_info->dev_replace;
1722 
1723 		for (i = 0; i < sbio->page_count; i++) {
1724 			struct scrub_page *spage = sbio->pagev[i];
1725 
1726 			spage->io_error = 1;
1727 			atomic64_inc(&dev_replace->num_write_errors);
1728 		}
1729 	}
1730 
1731 	for (i = 0; i < sbio->page_count; i++)
1732 		scrub_page_put(sbio->pagev[i]);
1733 
1734 	bio_put(sbio->bio);
1735 	kfree(sbio);
1736 	scrub_pending_bio_dec(sctx);
1737 }
1738 
1739 static int scrub_checksum(struct scrub_block *sblock)
1740 {
1741 	u64 flags;
1742 	int ret;
1743 
1744 	/*
1745 	 * No need to initialize these stats currently,
1746 	 * because this function only use return value
1747 	 * instead of these stats value.
1748 	 *
1749 	 * Todo:
1750 	 * always use stats
1751 	 */
1752 	sblock->header_error = 0;
1753 	sblock->generation_error = 0;
1754 	sblock->checksum_error = 0;
1755 
1756 	WARN_ON(sblock->page_count < 1);
1757 	flags = sblock->pagev[0]->flags;
1758 	ret = 0;
1759 	if (flags & BTRFS_EXTENT_FLAG_DATA)
1760 		ret = scrub_checksum_data(sblock);
1761 	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1762 		ret = scrub_checksum_tree_block(sblock);
1763 	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1764 		(void)scrub_checksum_super(sblock);
1765 	else
1766 		WARN_ON(1);
1767 	if (ret)
1768 		scrub_handle_errored_block(sblock);
1769 
1770 	return ret;
1771 }
1772 
1773 static int scrub_checksum_data(struct scrub_block *sblock)
1774 {
1775 	struct scrub_ctx *sctx = sblock->sctx;
1776 	u8 csum[BTRFS_CSUM_SIZE];
1777 	u8 *on_disk_csum;
1778 	struct page *page;
1779 	void *buffer;
1780 	u32 crc = ~(u32)0;
1781 	u64 len;
1782 	int index;
1783 
1784 	BUG_ON(sblock->page_count < 1);
1785 	if (!sblock->pagev[0]->have_csum)
1786 		return 0;
1787 
1788 	on_disk_csum = sblock->pagev[0]->csum;
1789 	page = sblock->pagev[0]->page;
1790 	buffer = kmap_atomic(page);
1791 
1792 	len = sctx->fs_info->sectorsize;
1793 	index = 0;
1794 	for (;;) {
1795 		u64 l = min_t(u64, len, PAGE_SIZE);
1796 
1797 		crc = btrfs_csum_data(buffer, crc, l);
1798 		kunmap_atomic(buffer);
1799 		len -= l;
1800 		if (len == 0)
1801 			break;
1802 		index++;
1803 		BUG_ON(index >= sblock->page_count);
1804 		BUG_ON(!sblock->pagev[index]->page);
1805 		page = sblock->pagev[index]->page;
1806 		buffer = kmap_atomic(page);
1807 	}
1808 
1809 	btrfs_csum_final(crc, csum);
1810 	if (memcmp(csum, on_disk_csum, sctx->csum_size))
1811 		sblock->checksum_error = 1;
1812 
1813 	return sblock->checksum_error;
1814 }
1815 
1816 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1817 {
1818 	struct scrub_ctx *sctx = sblock->sctx;
1819 	struct btrfs_header *h;
1820 	struct btrfs_fs_info *fs_info = sctx->fs_info;
1821 	u8 calculated_csum[BTRFS_CSUM_SIZE];
1822 	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1823 	struct page *page;
1824 	void *mapped_buffer;
1825 	u64 mapped_size;
1826 	void *p;
1827 	u32 crc = ~(u32)0;
1828 	u64 len;
1829 	int index;
1830 
1831 	BUG_ON(sblock->page_count < 1);
1832 	page = sblock->pagev[0]->page;
1833 	mapped_buffer = kmap_atomic(page);
1834 	h = (struct btrfs_header *)mapped_buffer;
1835 	memcpy(on_disk_csum, h->csum, sctx->csum_size);
1836 
1837 	/*
1838 	 * we don't use the getter functions here, as we
1839 	 * a) don't have an extent buffer and
1840 	 * b) the page is already kmapped
1841 	 */
1842 	if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1843 		sblock->header_error = 1;
1844 
1845 	if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1846 		sblock->header_error = 1;
1847 		sblock->generation_error = 1;
1848 	}
1849 
1850 	if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1851 		sblock->header_error = 1;
1852 
1853 	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1854 		   BTRFS_UUID_SIZE))
1855 		sblock->header_error = 1;
1856 
1857 	len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
1858 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1859 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1860 	index = 0;
1861 	for (;;) {
1862 		u64 l = min_t(u64, len, mapped_size);
1863 
1864 		crc = btrfs_csum_data(p, crc, l);
1865 		kunmap_atomic(mapped_buffer);
1866 		len -= l;
1867 		if (len == 0)
1868 			break;
1869 		index++;
1870 		BUG_ON(index >= sblock->page_count);
1871 		BUG_ON(!sblock->pagev[index]->page);
1872 		page = sblock->pagev[index]->page;
1873 		mapped_buffer = kmap_atomic(page);
1874 		mapped_size = PAGE_SIZE;
1875 		p = mapped_buffer;
1876 	}
1877 
1878 	btrfs_csum_final(crc, calculated_csum);
1879 	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1880 		sblock->checksum_error = 1;
1881 
1882 	return sblock->header_error || sblock->checksum_error;
1883 }
1884 
1885 static int scrub_checksum_super(struct scrub_block *sblock)
1886 {
1887 	struct btrfs_super_block *s;
1888 	struct scrub_ctx *sctx = sblock->sctx;
1889 	u8 calculated_csum[BTRFS_CSUM_SIZE];
1890 	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1891 	struct page *page;
1892 	void *mapped_buffer;
1893 	u64 mapped_size;
1894 	void *p;
1895 	u32 crc = ~(u32)0;
1896 	int fail_gen = 0;
1897 	int fail_cor = 0;
1898 	u64 len;
1899 	int index;
1900 
1901 	BUG_ON(sblock->page_count < 1);
1902 	page = sblock->pagev[0]->page;
1903 	mapped_buffer = kmap_atomic(page);
1904 	s = (struct btrfs_super_block *)mapped_buffer;
1905 	memcpy(on_disk_csum, s->csum, sctx->csum_size);
1906 
1907 	if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1908 		++fail_cor;
1909 
1910 	if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1911 		++fail_gen;
1912 
1913 	if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1914 		++fail_cor;
1915 
1916 	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1917 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1918 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1919 	index = 0;
1920 	for (;;) {
1921 		u64 l = min_t(u64, len, mapped_size);
1922 
1923 		crc = btrfs_csum_data(p, crc, l);
1924 		kunmap_atomic(mapped_buffer);
1925 		len -= l;
1926 		if (len == 0)
1927 			break;
1928 		index++;
1929 		BUG_ON(index >= sblock->page_count);
1930 		BUG_ON(!sblock->pagev[index]->page);
1931 		page = sblock->pagev[index]->page;
1932 		mapped_buffer = kmap_atomic(page);
1933 		mapped_size = PAGE_SIZE;
1934 		p = mapped_buffer;
1935 	}
1936 
1937 	btrfs_csum_final(crc, calculated_csum);
1938 	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1939 		++fail_cor;
1940 
1941 	if (fail_cor + fail_gen) {
1942 		/*
1943 		 * if we find an error in a super block, we just report it.
1944 		 * They will get written with the next transaction commit
1945 		 * anyway
1946 		 */
1947 		spin_lock(&sctx->stat_lock);
1948 		++sctx->stat.super_errors;
1949 		spin_unlock(&sctx->stat_lock);
1950 		if (fail_cor)
1951 			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1952 				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1953 		else
1954 			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1955 				BTRFS_DEV_STAT_GENERATION_ERRS);
1956 	}
1957 
1958 	return fail_cor + fail_gen;
1959 }
1960 
1961 static void scrub_block_get(struct scrub_block *sblock)
1962 {
1963 	refcount_inc(&sblock->refs);
1964 }
1965 
1966 static void scrub_block_put(struct scrub_block *sblock)
1967 {
1968 	if (refcount_dec_and_test(&sblock->refs)) {
1969 		int i;
1970 
1971 		if (sblock->sparity)
1972 			scrub_parity_put(sblock->sparity);
1973 
1974 		for (i = 0; i < sblock->page_count; i++)
1975 			scrub_page_put(sblock->pagev[i]);
1976 		kfree(sblock);
1977 	}
1978 }
1979 
1980 static void scrub_page_get(struct scrub_page *spage)
1981 {
1982 	atomic_inc(&spage->refs);
1983 }
1984 
1985 static void scrub_page_put(struct scrub_page *spage)
1986 {
1987 	if (atomic_dec_and_test(&spage->refs)) {
1988 		if (spage->page)
1989 			__free_page(spage->page);
1990 		kfree(spage);
1991 	}
1992 }
1993 
1994 static void scrub_submit(struct scrub_ctx *sctx)
1995 {
1996 	struct scrub_bio *sbio;
1997 
1998 	if (sctx->curr == -1)
1999 		return;
2000 
2001 	sbio = sctx->bios[sctx->curr];
2002 	sctx->curr = -1;
2003 	scrub_pending_bio_inc(sctx);
2004 	btrfsic_submit_bio(sbio->bio);
2005 }
2006 
2007 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2008 				    struct scrub_page *spage)
2009 {
2010 	struct scrub_block *sblock = spage->sblock;
2011 	struct scrub_bio *sbio;
2012 	int ret;
2013 
2014 again:
2015 	/*
2016 	 * grab a fresh bio or wait for one to become available
2017 	 */
2018 	while (sctx->curr == -1) {
2019 		spin_lock(&sctx->list_lock);
2020 		sctx->curr = sctx->first_free;
2021 		if (sctx->curr != -1) {
2022 			sctx->first_free = sctx->bios[sctx->curr]->next_free;
2023 			sctx->bios[sctx->curr]->next_free = -1;
2024 			sctx->bios[sctx->curr]->page_count = 0;
2025 			spin_unlock(&sctx->list_lock);
2026 		} else {
2027 			spin_unlock(&sctx->list_lock);
2028 			wait_event(sctx->list_wait, sctx->first_free != -1);
2029 		}
2030 	}
2031 	sbio = sctx->bios[sctx->curr];
2032 	if (sbio->page_count == 0) {
2033 		struct bio *bio;
2034 
2035 		sbio->physical = spage->physical;
2036 		sbio->logical = spage->logical;
2037 		sbio->dev = spage->dev;
2038 		bio = sbio->bio;
2039 		if (!bio) {
2040 			bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
2041 			sbio->bio = bio;
2042 		}
2043 
2044 		bio->bi_private = sbio;
2045 		bio->bi_end_io = scrub_bio_end_io;
2046 		bio_set_dev(bio, sbio->dev->bdev);
2047 		bio->bi_iter.bi_sector = sbio->physical >> 9;
2048 		bio->bi_opf = REQ_OP_READ;
2049 		sbio->status = 0;
2050 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2051 		   spage->physical ||
2052 		   sbio->logical + sbio->page_count * PAGE_SIZE !=
2053 		   spage->logical ||
2054 		   sbio->dev != spage->dev) {
2055 		scrub_submit(sctx);
2056 		goto again;
2057 	}
2058 
2059 	sbio->pagev[sbio->page_count] = spage;
2060 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2061 	if (ret != PAGE_SIZE) {
2062 		if (sbio->page_count < 1) {
2063 			bio_put(sbio->bio);
2064 			sbio->bio = NULL;
2065 			return -EIO;
2066 		}
2067 		scrub_submit(sctx);
2068 		goto again;
2069 	}
2070 
2071 	scrub_block_get(sblock); /* one for the page added to the bio */
2072 	atomic_inc(&sblock->outstanding_pages);
2073 	sbio->page_count++;
2074 	if (sbio->page_count == sctx->pages_per_rd_bio)
2075 		scrub_submit(sctx);
2076 
2077 	return 0;
2078 }
2079 
2080 static void scrub_missing_raid56_end_io(struct bio *bio)
2081 {
2082 	struct scrub_block *sblock = bio->bi_private;
2083 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2084 
2085 	if (bio->bi_status)
2086 		sblock->no_io_error_seen = 0;
2087 
2088 	bio_put(bio);
2089 
2090 	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2091 }
2092 
2093 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2094 {
2095 	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2096 	struct scrub_ctx *sctx = sblock->sctx;
2097 	struct btrfs_fs_info *fs_info = sctx->fs_info;
2098 	u64 logical;
2099 	struct btrfs_device *dev;
2100 
2101 	logical = sblock->pagev[0]->logical;
2102 	dev = sblock->pagev[0]->dev;
2103 
2104 	if (sblock->no_io_error_seen)
2105 		scrub_recheck_block_checksum(sblock);
2106 
2107 	if (!sblock->no_io_error_seen) {
2108 		spin_lock(&sctx->stat_lock);
2109 		sctx->stat.read_errors++;
2110 		spin_unlock(&sctx->stat_lock);
2111 		btrfs_err_rl_in_rcu(fs_info,
2112 			"IO error rebuilding logical %llu for dev %s",
2113 			logical, rcu_str_deref(dev->name));
2114 	} else if (sblock->header_error || sblock->checksum_error) {
2115 		spin_lock(&sctx->stat_lock);
2116 		sctx->stat.uncorrectable_errors++;
2117 		spin_unlock(&sctx->stat_lock);
2118 		btrfs_err_rl_in_rcu(fs_info,
2119 			"failed to rebuild valid logical %llu for dev %s",
2120 			logical, rcu_str_deref(dev->name));
2121 	} else {
2122 		scrub_write_block_to_dev_replace(sblock);
2123 	}
2124 
2125 	scrub_block_put(sblock);
2126 
2127 	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2128 		mutex_lock(&sctx->wr_lock);
2129 		scrub_wr_submit(sctx);
2130 		mutex_unlock(&sctx->wr_lock);
2131 	}
2132 
2133 	scrub_pending_bio_dec(sctx);
2134 }
2135 
2136 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2137 {
2138 	struct scrub_ctx *sctx = sblock->sctx;
2139 	struct btrfs_fs_info *fs_info = sctx->fs_info;
2140 	u64 length = sblock->page_count * PAGE_SIZE;
2141 	u64 logical = sblock->pagev[0]->logical;
2142 	struct btrfs_bio *bbio = NULL;
2143 	struct bio *bio;
2144 	struct btrfs_raid_bio *rbio;
2145 	int ret;
2146 	int i;
2147 
2148 	btrfs_bio_counter_inc_blocked(fs_info);
2149 	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2150 			&length, &bbio);
2151 	if (ret || !bbio || !bbio->raid_map)
2152 		goto bbio_out;
2153 
2154 	if (WARN_ON(!sctx->is_dev_replace ||
2155 		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2156 		/*
2157 		 * We shouldn't be scrubbing a missing device. Even for dev
2158 		 * replace, we should only get here for RAID 5/6. We either
2159 		 * managed to mount something with no mirrors remaining or
2160 		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2161 		 */
2162 		goto bbio_out;
2163 	}
2164 
2165 	bio = btrfs_io_bio_alloc(0);
2166 	bio->bi_iter.bi_sector = logical >> 9;
2167 	bio->bi_private = sblock;
2168 	bio->bi_end_io = scrub_missing_raid56_end_io;
2169 
2170 	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2171 	if (!rbio)
2172 		goto rbio_out;
2173 
2174 	for (i = 0; i < sblock->page_count; i++) {
2175 		struct scrub_page *spage = sblock->pagev[i];
2176 
2177 		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2178 	}
2179 
2180 	btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2181 			scrub_missing_raid56_worker, NULL, NULL);
2182 	scrub_block_get(sblock);
2183 	scrub_pending_bio_inc(sctx);
2184 	raid56_submit_missing_rbio(rbio);
2185 	return;
2186 
2187 rbio_out:
2188 	bio_put(bio);
2189 bbio_out:
2190 	btrfs_bio_counter_dec(fs_info);
2191 	btrfs_put_bbio(bbio);
2192 	spin_lock(&sctx->stat_lock);
2193 	sctx->stat.malloc_errors++;
2194 	spin_unlock(&sctx->stat_lock);
2195 }
2196 
2197 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2198 		       u64 physical, struct btrfs_device *dev, u64 flags,
2199 		       u64 gen, int mirror_num, u8 *csum, int force,
2200 		       u64 physical_for_dev_replace)
2201 {
2202 	struct scrub_block *sblock;
2203 	int index;
2204 
2205 	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2206 	if (!sblock) {
2207 		spin_lock(&sctx->stat_lock);
2208 		sctx->stat.malloc_errors++;
2209 		spin_unlock(&sctx->stat_lock);
2210 		return -ENOMEM;
2211 	}
2212 
2213 	/* one ref inside this function, plus one for each page added to
2214 	 * a bio later on */
2215 	refcount_set(&sblock->refs, 1);
2216 	sblock->sctx = sctx;
2217 	sblock->no_io_error_seen = 1;
2218 
2219 	for (index = 0; len > 0; index++) {
2220 		struct scrub_page *spage;
2221 		u64 l = min_t(u64, len, PAGE_SIZE);
2222 
2223 		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2224 		if (!spage) {
2225 leave_nomem:
2226 			spin_lock(&sctx->stat_lock);
2227 			sctx->stat.malloc_errors++;
2228 			spin_unlock(&sctx->stat_lock);
2229 			scrub_block_put(sblock);
2230 			return -ENOMEM;
2231 		}
2232 		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2233 		scrub_page_get(spage);
2234 		sblock->pagev[index] = spage;
2235 		spage->sblock = sblock;
2236 		spage->dev = dev;
2237 		spage->flags = flags;
2238 		spage->generation = gen;
2239 		spage->logical = logical;
2240 		spage->physical = physical;
2241 		spage->physical_for_dev_replace = physical_for_dev_replace;
2242 		spage->mirror_num = mirror_num;
2243 		if (csum) {
2244 			spage->have_csum = 1;
2245 			memcpy(spage->csum, csum, sctx->csum_size);
2246 		} else {
2247 			spage->have_csum = 0;
2248 		}
2249 		sblock->page_count++;
2250 		spage->page = alloc_page(GFP_KERNEL);
2251 		if (!spage->page)
2252 			goto leave_nomem;
2253 		len -= l;
2254 		logical += l;
2255 		physical += l;
2256 		physical_for_dev_replace += l;
2257 	}
2258 
2259 	WARN_ON(sblock->page_count == 0);
2260 	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2261 		/*
2262 		 * This case should only be hit for RAID 5/6 device replace. See
2263 		 * the comment in scrub_missing_raid56_pages() for details.
2264 		 */
2265 		scrub_missing_raid56_pages(sblock);
2266 	} else {
2267 		for (index = 0; index < sblock->page_count; index++) {
2268 			struct scrub_page *spage = sblock->pagev[index];
2269 			int ret;
2270 
2271 			ret = scrub_add_page_to_rd_bio(sctx, spage);
2272 			if (ret) {
2273 				scrub_block_put(sblock);
2274 				return ret;
2275 			}
2276 		}
2277 
2278 		if (force)
2279 			scrub_submit(sctx);
2280 	}
2281 
2282 	/* last one frees, either here or in bio completion for last page */
2283 	scrub_block_put(sblock);
2284 	return 0;
2285 }
2286 
2287 static void scrub_bio_end_io(struct bio *bio)
2288 {
2289 	struct scrub_bio *sbio = bio->bi_private;
2290 	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2291 
2292 	sbio->status = bio->bi_status;
2293 	sbio->bio = bio;
2294 
2295 	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2296 }
2297 
2298 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2299 {
2300 	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2301 	struct scrub_ctx *sctx = sbio->sctx;
2302 	int i;
2303 
2304 	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2305 	if (sbio->status) {
2306 		for (i = 0; i < sbio->page_count; i++) {
2307 			struct scrub_page *spage = sbio->pagev[i];
2308 
2309 			spage->io_error = 1;
2310 			spage->sblock->no_io_error_seen = 0;
2311 		}
2312 	}
2313 
2314 	/* now complete the scrub_block items that have all pages completed */
2315 	for (i = 0; i < sbio->page_count; i++) {
2316 		struct scrub_page *spage = sbio->pagev[i];
2317 		struct scrub_block *sblock = spage->sblock;
2318 
2319 		if (atomic_dec_and_test(&sblock->outstanding_pages))
2320 			scrub_block_complete(sblock);
2321 		scrub_block_put(sblock);
2322 	}
2323 
2324 	bio_put(sbio->bio);
2325 	sbio->bio = NULL;
2326 	spin_lock(&sctx->list_lock);
2327 	sbio->next_free = sctx->first_free;
2328 	sctx->first_free = sbio->index;
2329 	spin_unlock(&sctx->list_lock);
2330 
2331 	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2332 		mutex_lock(&sctx->wr_lock);
2333 		scrub_wr_submit(sctx);
2334 		mutex_unlock(&sctx->wr_lock);
2335 	}
2336 
2337 	scrub_pending_bio_dec(sctx);
2338 }
2339 
2340 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2341 				       unsigned long *bitmap,
2342 				       u64 start, u64 len)
2343 {
2344 	u64 offset;
2345 	u64 nsectors64;
2346 	u32 nsectors;
2347 	int sectorsize = sparity->sctx->fs_info->sectorsize;
2348 
2349 	if (len >= sparity->stripe_len) {
2350 		bitmap_set(bitmap, 0, sparity->nsectors);
2351 		return;
2352 	}
2353 
2354 	start -= sparity->logic_start;
2355 	start = div64_u64_rem(start, sparity->stripe_len, &offset);
2356 	offset = div_u64(offset, sectorsize);
2357 	nsectors64 = div_u64(len, sectorsize);
2358 
2359 	ASSERT(nsectors64 < UINT_MAX);
2360 	nsectors = (u32)nsectors64;
2361 
2362 	if (offset + nsectors <= sparity->nsectors) {
2363 		bitmap_set(bitmap, offset, nsectors);
2364 		return;
2365 	}
2366 
2367 	bitmap_set(bitmap, offset, sparity->nsectors - offset);
2368 	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2369 }
2370 
2371 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2372 						   u64 start, u64 len)
2373 {
2374 	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2375 }
2376 
2377 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2378 						  u64 start, u64 len)
2379 {
2380 	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2381 }
2382 
2383 static void scrub_block_complete(struct scrub_block *sblock)
2384 {
2385 	int corrupted = 0;
2386 
2387 	if (!sblock->no_io_error_seen) {
2388 		corrupted = 1;
2389 		scrub_handle_errored_block(sblock);
2390 	} else {
2391 		/*
2392 		 * if has checksum error, write via repair mechanism in
2393 		 * dev replace case, otherwise write here in dev replace
2394 		 * case.
2395 		 */
2396 		corrupted = scrub_checksum(sblock);
2397 		if (!corrupted && sblock->sctx->is_dev_replace)
2398 			scrub_write_block_to_dev_replace(sblock);
2399 	}
2400 
2401 	if (sblock->sparity && corrupted && !sblock->data_corrected) {
2402 		u64 start = sblock->pagev[0]->logical;
2403 		u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2404 			  PAGE_SIZE;
2405 
2406 		scrub_parity_mark_sectors_error(sblock->sparity,
2407 						start, end - start);
2408 	}
2409 }
2410 
2411 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2412 {
2413 	struct btrfs_ordered_sum *sum = NULL;
2414 	unsigned long index;
2415 	unsigned long num_sectors;
2416 
2417 	while (!list_empty(&sctx->csum_list)) {
2418 		sum = list_first_entry(&sctx->csum_list,
2419 				       struct btrfs_ordered_sum, list);
2420 		if (sum->bytenr > logical)
2421 			return 0;
2422 		if (sum->bytenr + sum->len > logical)
2423 			break;
2424 
2425 		++sctx->stat.csum_discards;
2426 		list_del(&sum->list);
2427 		kfree(sum);
2428 		sum = NULL;
2429 	}
2430 	if (!sum)
2431 		return 0;
2432 
2433 	index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2434 	ASSERT(index < UINT_MAX);
2435 
2436 	num_sectors = sum->len / sctx->fs_info->sectorsize;
2437 	memcpy(csum, sum->sums + index, sctx->csum_size);
2438 	if (index == num_sectors - 1) {
2439 		list_del(&sum->list);
2440 		kfree(sum);
2441 	}
2442 	return 1;
2443 }
2444 
2445 /* scrub extent tries to collect up to 64 kB for each bio */
2446 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2447 			u64 logical, u64 len,
2448 			u64 physical, struct btrfs_device *dev, u64 flags,
2449 			u64 gen, int mirror_num, u64 physical_for_dev_replace)
2450 {
2451 	int ret;
2452 	u8 csum[BTRFS_CSUM_SIZE];
2453 	u32 blocksize;
2454 
2455 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2456 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2457 			blocksize = map->stripe_len;
2458 		else
2459 			blocksize = sctx->fs_info->sectorsize;
2460 		spin_lock(&sctx->stat_lock);
2461 		sctx->stat.data_extents_scrubbed++;
2462 		sctx->stat.data_bytes_scrubbed += len;
2463 		spin_unlock(&sctx->stat_lock);
2464 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2465 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2466 			blocksize = map->stripe_len;
2467 		else
2468 			blocksize = sctx->fs_info->nodesize;
2469 		spin_lock(&sctx->stat_lock);
2470 		sctx->stat.tree_extents_scrubbed++;
2471 		sctx->stat.tree_bytes_scrubbed += len;
2472 		spin_unlock(&sctx->stat_lock);
2473 	} else {
2474 		blocksize = sctx->fs_info->sectorsize;
2475 		WARN_ON(1);
2476 	}
2477 
2478 	while (len) {
2479 		u64 l = min_t(u64, len, blocksize);
2480 		int have_csum = 0;
2481 
2482 		if (flags & BTRFS_EXTENT_FLAG_DATA) {
2483 			/* push csums to sbio */
2484 			have_csum = scrub_find_csum(sctx, logical, csum);
2485 			if (have_csum == 0)
2486 				++sctx->stat.no_csum;
2487 		}
2488 		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2489 				  mirror_num, have_csum ? csum : NULL, 0,
2490 				  physical_for_dev_replace);
2491 		if (ret)
2492 			return ret;
2493 		len -= l;
2494 		logical += l;
2495 		physical += l;
2496 		physical_for_dev_replace += l;
2497 	}
2498 	return 0;
2499 }
2500 
2501 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2502 				  u64 logical, u64 len,
2503 				  u64 physical, struct btrfs_device *dev,
2504 				  u64 flags, u64 gen, int mirror_num, u8 *csum)
2505 {
2506 	struct scrub_ctx *sctx = sparity->sctx;
2507 	struct scrub_block *sblock;
2508 	int index;
2509 
2510 	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2511 	if (!sblock) {
2512 		spin_lock(&sctx->stat_lock);
2513 		sctx->stat.malloc_errors++;
2514 		spin_unlock(&sctx->stat_lock);
2515 		return -ENOMEM;
2516 	}
2517 
2518 	/* one ref inside this function, plus one for each page added to
2519 	 * a bio later on */
2520 	refcount_set(&sblock->refs, 1);
2521 	sblock->sctx = sctx;
2522 	sblock->no_io_error_seen = 1;
2523 	sblock->sparity = sparity;
2524 	scrub_parity_get(sparity);
2525 
2526 	for (index = 0; len > 0; index++) {
2527 		struct scrub_page *spage;
2528 		u64 l = min_t(u64, len, PAGE_SIZE);
2529 
2530 		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2531 		if (!spage) {
2532 leave_nomem:
2533 			spin_lock(&sctx->stat_lock);
2534 			sctx->stat.malloc_errors++;
2535 			spin_unlock(&sctx->stat_lock);
2536 			scrub_block_put(sblock);
2537 			return -ENOMEM;
2538 		}
2539 		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2540 		/* For scrub block */
2541 		scrub_page_get(spage);
2542 		sblock->pagev[index] = spage;
2543 		/* For scrub parity */
2544 		scrub_page_get(spage);
2545 		list_add_tail(&spage->list, &sparity->spages);
2546 		spage->sblock = sblock;
2547 		spage->dev = dev;
2548 		spage->flags = flags;
2549 		spage->generation = gen;
2550 		spage->logical = logical;
2551 		spage->physical = physical;
2552 		spage->mirror_num = mirror_num;
2553 		if (csum) {
2554 			spage->have_csum = 1;
2555 			memcpy(spage->csum, csum, sctx->csum_size);
2556 		} else {
2557 			spage->have_csum = 0;
2558 		}
2559 		sblock->page_count++;
2560 		spage->page = alloc_page(GFP_KERNEL);
2561 		if (!spage->page)
2562 			goto leave_nomem;
2563 		len -= l;
2564 		logical += l;
2565 		physical += l;
2566 	}
2567 
2568 	WARN_ON(sblock->page_count == 0);
2569 	for (index = 0; index < sblock->page_count; index++) {
2570 		struct scrub_page *spage = sblock->pagev[index];
2571 		int ret;
2572 
2573 		ret = scrub_add_page_to_rd_bio(sctx, spage);
2574 		if (ret) {
2575 			scrub_block_put(sblock);
2576 			return ret;
2577 		}
2578 	}
2579 
2580 	/* last one frees, either here or in bio completion for last page */
2581 	scrub_block_put(sblock);
2582 	return 0;
2583 }
2584 
2585 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2586 				   u64 logical, u64 len,
2587 				   u64 physical, struct btrfs_device *dev,
2588 				   u64 flags, u64 gen, int mirror_num)
2589 {
2590 	struct scrub_ctx *sctx = sparity->sctx;
2591 	int ret;
2592 	u8 csum[BTRFS_CSUM_SIZE];
2593 	u32 blocksize;
2594 
2595 	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2596 		scrub_parity_mark_sectors_error(sparity, logical, len);
2597 		return 0;
2598 	}
2599 
2600 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2601 		blocksize = sparity->stripe_len;
2602 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2603 		blocksize = sparity->stripe_len;
2604 	} else {
2605 		blocksize = sctx->fs_info->sectorsize;
2606 		WARN_ON(1);
2607 	}
2608 
2609 	while (len) {
2610 		u64 l = min_t(u64, len, blocksize);
2611 		int have_csum = 0;
2612 
2613 		if (flags & BTRFS_EXTENT_FLAG_DATA) {
2614 			/* push csums to sbio */
2615 			have_csum = scrub_find_csum(sctx, logical, csum);
2616 			if (have_csum == 0)
2617 				goto skip;
2618 		}
2619 		ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2620 					     flags, gen, mirror_num,
2621 					     have_csum ? csum : NULL);
2622 		if (ret)
2623 			return ret;
2624 skip:
2625 		len -= l;
2626 		logical += l;
2627 		physical += l;
2628 	}
2629 	return 0;
2630 }
2631 
2632 /*
2633  * Given a physical address, this will calculate it's
2634  * logical offset. if this is a parity stripe, it will return
2635  * the most left data stripe's logical offset.
2636  *
2637  * return 0 if it is a data stripe, 1 means parity stripe.
2638  */
2639 static int get_raid56_logic_offset(u64 physical, int num,
2640 				   struct map_lookup *map, u64 *offset,
2641 				   u64 *stripe_start)
2642 {
2643 	int i;
2644 	int j = 0;
2645 	u64 stripe_nr;
2646 	u64 last_offset;
2647 	u32 stripe_index;
2648 	u32 rot;
2649 
2650 	last_offset = (physical - map->stripes[num].physical) *
2651 		      nr_data_stripes(map);
2652 	if (stripe_start)
2653 		*stripe_start = last_offset;
2654 
2655 	*offset = last_offset;
2656 	for (i = 0; i < nr_data_stripes(map); i++) {
2657 		*offset = last_offset + i * map->stripe_len;
2658 
2659 		stripe_nr = div64_u64(*offset, map->stripe_len);
2660 		stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2661 
2662 		/* Work out the disk rotation on this stripe-set */
2663 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2664 		/* calculate which stripe this data locates */
2665 		rot += i;
2666 		stripe_index = rot % map->num_stripes;
2667 		if (stripe_index == num)
2668 			return 0;
2669 		if (stripe_index < num)
2670 			j++;
2671 	}
2672 	*offset = last_offset + j * map->stripe_len;
2673 	return 1;
2674 }
2675 
2676 static void scrub_free_parity(struct scrub_parity *sparity)
2677 {
2678 	struct scrub_ctx *sctx = sparity->sctx;
2679 	struct scrub_page *curr, *next;
2680 	int nbits;
2681 
2682 	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2683 	if (nbits) {
2684 		spin_lock(&sctx->stat_lock);
2685 		sctx->stat.read_errors += nbits;
2686 		sctx->stat.uncorrectable_errors += nbits;
2687 		spin_unlock(&sctx->stat_lock);
2688 	}
2689 
2690 	list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2691 		list_del_init(&curr->list);
2692 		scrub_page_put(curr);
2693 	}
2694 
2695 	kfree(sparity);
2696 }
2697 
2698 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2699 {
2700 	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2701 						    work);
2702 	struct scrub_ctx *sctx = sparity->sctx;
2703 
2704 	scrub_free_parity(sparity);
2705 	scrub_pending_bio_dec(sctx);
2706 }
2707 
2708 static void scrub_parity_bio_endio(struct bio *bio)
2709 {
2710 	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2711 	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2712 
2713 	if (bio->bi_status)
2714 		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2715 			  sparity->nsectors);
2716 
2717 	bio_put(bio);
2718 
2719 	btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2720 			scrub_parity_bio_endio_worker, NULL, NULL);
2721 	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2722 }
2723 
2724 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2725 {
2726 	struct scrub_ctx *sctx = sparity->sctx;
2727 	struct btrfs_fs_info *fs_info = sctx->fs_info;
2728 	struct bio *bio;
2729 	struct btrfs_raid_bio *rbio;
2730 	struct btrfs_bio *bbio = NULL;
2731 	u64 length;
2732 	int ret;
2733 
2734 	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2735 			   sparity->nsectors))
2736 		goto out;
2737 
2738 	length = sparity->logic_end - sparity->logic_start;
2739 
2740 	btrfs_bio_counter_inc_blocked(fs_info);
2741 	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2742 			       &length, &bbio);
2743 	if (ret || !bbio || !bbio->raid_map)
2744 		goto bbio_out;
2745 
2746 	bio = btrfs_io_bio_alloc(0);
2747 	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2748 	bio->bi_private = sparity;
2749 	bio->bi_end_io = scrub_parity_bio_endio;
2750 
2751 	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
2752 					      length, sparity->scrub_dev,
2753 					      sparity->dbitmap,
2754 					      sparity->nsectors);
2755 	if (!rbio)
2756 		goto rbio_out;
2757 
2758 	scrub_pending_bio_inc(sctx);
2759 	raid56_parity_submit_scrub_rbio(rbio);
2760 	return;
2761 
2762 rbio_out:
2763 	bio_put(bio);
2764 bbio_out:
2765 	btrfs_bio_counter_dec(fs_info);
2766 	btrfs_put_bbio(bbio);
2767 	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2768 		  sparity->nsectors);
2769 	spin_lock(&sctx->stat_lock);
2770 	sctx->stat.malloc_errors++;
2771 	spin_unlock(&sctx->stat_lock);
2772 out:
2773 	scrub_free_parity(sparity);
2774 }
2775 
2776 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2777 {
2778 	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2779 }
2780 
2781 static void scrub_parity_get(struct scrub_parity *sparity)
2782 {
2783 	refcount_inc(&sparity->refs);
2784 }
2785 
2786 static void scrub_parity_put(struct scrub_parity *sparity)
2787 {
2788 	if (!refcount_dec_and_test(&sparity->refs))
2789 		return;
2790 
2791 	scrub_parity_check_and_repair(sparity);
2792 }
2793 
2794 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2795 						  struct map_lookup *map,
2796 						  struct btrfs_device *sdev,
2797 						  struct btrfs_path *path,
2798 						  u64 logic_start,
2799 						  u64 logic_end)
2800 {
2801 	struct btrfs_fs_info *fs_info = sctx->fs_info;
2802 	struct btrfs_root *root = fs_info->extent_root;
2803 	struct btrfs_root *csum_root = fs_info->csum_root;
2804 	struct btrfs_extent_item *extent;
2805 	struct btrfs_bio *bbio = NULL;
2806 	u64 flags;
2807 	int ret;
2808 	int slot;
2809 	struct extent_buffer *l;
2810 	struct btrfs_key key;
2811 	u64 generation;
2812 	u64 extent_logical;
2813 	u64 extent_physical;
2814 	u64 extent_len;
2815 	u64 mapped_length;
2816 	struct btrfs_device *extent_dev;
2817 	struct scrub_parity *sparity;
2818 	int nsectors;
2819 	int bitmap_len;
2820 	int extent_mirror_num;
2821 	int stop_loop = 0;
2822 
2823 	nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
2824 	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2825 	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2826 			  GFP_NOFS);
2827 	if (!sparity) {
2828 		spin_lock(&sctx->stat_lock);
2829 		sctx->stat.malloc_errors++;
2830 		spin_unlock(&sctx->stat_lock);
2831 		return -ENOMEM;
2832 	}
2833 
2834 	sparity->stripe_len = map->stripe_len;
2835 	sparity->nsectors = nsectors;
2836 	sparity->sctx = sctx;
2837 	sparity->scrub_dev = sdev;
2838 	sparity->logic_start = logic_start;
2839 	sparity->logic_end = logic_end;
2840 	refcount_set(&sparity->refs, 1);
2841 	INIT_LIST_HEAD(&sparity->spages);
2842 	sparity->dbitmap = sparity->bitmap;
2843 	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2844 
2845 	ret = 0;
2846 	while (logic_start < logic_end) {
2847 		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2848 			key.type = BTRFS_METADATA_ITEM_KEY;
2849 		else
2850 			key.type = BTRFS_EXTENT_ITEM_KEY;
2851 		key.objectid = logic_start;
2852 		key.offset = (u64)-1;
2853 
2854 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2855 		if (ret < 0)
2856 			goto out;
2857 
2858 		if (ret > 0) {
2859 			ret = btrfs_previous_extent_item(root, path, 0);
2860 			if (ret < 0)
2861 				goto out;
2862 			if (ret > 0) {
2863 				btrfs_release_path(path);
2864 				ret = btrfs_search_slot(NULL, root, &key,
2865 							path, 0, 0);
2866 				if (ret < 0)
2867 					goto out;
2868 			}
2869 		}
2870 
2871 		stop_loop = 0;
2872 		while (1) {
2873 			u64 bytes;
2874 
2875 			l = path->nodes[0];
2876 			slot = path->slots[0];
2877 			if (slot >= btrfs_header_nritems(l)) {
2878 				ret = btrfs_next_leaf(root, path);
2879 				if (ret == 0)
2880 					continue;
2881 				if (ret < 0)
2882 					goto out;
2883 
2884 				stop_loop = 1;
2885 				break;
2886 			}
2887 			btrfs_item_key_to_cpu(l, &key, slot);
2888 
2889 			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2890 			    key.type != BTRFS_METADATA_ITEM_KEY)
2891 				goto next;
2892 
2893 			if (key.type == BTRFS_METADATA_ITEM_KEY)
2894 				bytes = fs_info->nodesize;
2895 			else
2896 				bytes = key.offset;
2897 
2898 			if (key.objectid + bytes <= logic_start)
2899 				goto next;
2900 
2901 			if (key.objectid >= logic_end) {
2902 				stop_loop = 1;
2903 				break;
2904 			}
2905 
2906 			while (key.objectid >= logic_start + map->stripe_len)
2907 				logic_start += map->stripe_len;
2908 
2909 			extent = btrfs_item_ptr(l, slot,
2910 						struct btrfs_extent_item);
2911 			flags = btrfs_extent_flags(l, extent);
2912 			generation = btrfs_extent_generation(l, extent);
2913 
2914 			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2915 			    (key.objectid < logic_start ||
2916 			     key.objectid + bytes >
2917 			     logic_start + map->stripe_len)) {
2918 				btrfs_err(fs_info,
2919 					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2920 					  key.objectid, logic_start);
2921 				spin_lock(&sctx->stat_lock);
2922 				sctx->stat.uncorrectable_errors++;
2923 				spin_unlock(&sctx->stat_lock);
2924 				goto next;
2925 			}
2926 again:
2927 			extent_logical = key.objectid;
2928 			extent_len = bytes;
2929 
2930 			if (extent_logical < logic_start) {
2931 				extent_len -= logic_start - extent_logical;
2932 				extent_logical = logic_start;
2933 			}
2934 
2935 			if (extent_logical + extent_len >
2936 			    logic_start + map->stripe_len)
2937 				extent_len = logic_start + map->stripe_len -
2938 					     extent_logical;
2939 
2940 			scrub_parity_mark_sectors_data(sparity, extent_logical,
2941 						       extent_len);
2942 
2943 			mapped_length = extent_len;
2944 			bbio = NULL;
2945 			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
2946 					extent_logical, &mapped_length, &bbio,
2947 					0);
2948 			if (!ret) {
2949 				if (!bbio || mapped_length < extent_len)
2950 					ret = -EIO;
2951 			}
2952 			if (ret) {
2953 				btrfs_put_bbio(bbio);
2954 				goto out;
2955 			}
2956 			extent_physical = bbio->stripes[0].physical;
2957 			extent_mirror_num = bbio->mirror_num;
2958 			extent_dev = bbio->stripes[0].dev;
2959 			btrfs_put_bbio(bbio);
2960 
2961 			ret = btrfs_lookup_csums_range(csum_root,
2962 						extent_logical,
2963 						extent_logical + extent_len - 1,
2964 						&sctx->csum_list, 1);
2965 			if (ret)
2966 				goto out;
2967 
2968 			ret = scrub_extent_for_parity(sparity, extent_logical,
2969 						      extent_len,
2970 						      extent_physical,
2971 						      extent_dev, flags,
2972 						      generation,
2973 						      extent_mirror_num);
2974 
2975 			scrub_free_csums(sctx);
2976 
2977 			if (ret)
2978 				goto out;
2979 
2980 			if (extent_logical + extent_len <
2981 			    key.objectid + bytes) {
2982 				logic_start += map->stripe_len;
2983 
2984 				if (logic_start >= logic_end) {
2985 					stop_loop = 1;
2986 					break;
2987 				}
2988 
2989 				if (logic_start < key.objectid + bytes) {
2990 					cond_resched();
2991 					goto again;
2992 				}
2993 			}
2994 next:
2995 			path->slots[0]++;
2996 		}
2997 
2998 		btrfs_release_path(path);
2999 
3000 		if (stop_loop)
3001 			break;
3002 
3003 		logic_start += map->stripe_len;
3004 	}
3005 out:
3006 	if (ret < 0)
3007 		scrub_parity_mark_sectors_error(sparity, logic_start,
3008 						logic_end - logic_start);
3009 	scrub_parity_put(sparity);
3010 	scrub_submit(sctx);
3011 	mutex_lock(&sctx->wr_lock);
3012 	scrub_wr_submit(sctx);
3013 	mutex_unlock(&sctx->wr_lock);
3014 
3015 	btrfs_release_path(path);
3016 	return ret < 0 ? ret : 0;
3017 }
3018 
3019 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3020 					   struct map_lookup *map,
3021 					   struct btrfs_device *scrub_dev,
3022 					   int num, u64 base, u64 length)
3023 {
3024 	struct btrfs_path *path, *ppath;
3025 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3026 	struct btrfs_root *root = fs_info->extent_root;
3027 	struct btrfs_root *csum_root = fs_info->csum_root;
3028 	struct btrfs_extent_item *extent;
3029 	struct blk_plug plug;
3030 	u64 flags;
3031 	int ret;
3032 	int slot;
3033 	u64 nstripes;
3034 	struct extent_buffer *l;
3035 	u64 physical;
3036 	u64 logical;
3037 	u64 logic_end;
3038 	u64 physical_end;
3039 	u64 generation;
3040 	int mirror_num;
3041 	struct reada_control *reada1;
3042 	struct reada_control *reada2;
3043 	struct btrfs_key key;
3044 	struct btrfs_key key_end;
3045 	u64 increment = map->stripe_len;
3046 	u64 offset;
3047 	u64 extent_logical;
3048 	u64 extent_physical;
3049 	u64 extent_len;
3050 	u64 stripe_logical;
3051 	u64 stripe_end;
3052 	struct btrfs_device *extent_dev;
3053 	int extent_mirror_num;
3054 	int stop_loop = 0;
3055 
3056 	physical = map->stripes[num].physical;
3057 	offset = 0;
3058 	nstripes = div64_u64(length, map->stripe_len);
3059 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3060 		offset = map->stripe_len * num;
3061 		increment = map->stripe_len * map->num_stripes;
3062 		mirror_num = 1;
3063 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3064 		int factor = map->num_stripes / map->sub_stripes;
3065 		offset = map->stripe_len * (num / map->sub_stripes);
3066 		increment = map->stripe_len * factor;
3067 		mirror_num = num % map->sub_stripes + 1;
3068 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3069 		increment = map->stripe_len;
3070 		mirror_num = num % map->num_stripes + 1;
3071 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3072 		increment = map->stripe_len;
3073 		mirror_num = num % map->num_stripes + 1;
3074 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3075 		get_raid56_logic_offset(physical, num, map, &offset, NULL);
3076 		increment = map->stripe_len * nr_data_stripes(map);
3077 		mirror_num = 1;
3078 	} else {
3079 		increment = map->stripe_len;
3080 		mirror_num = 1;
3081 	}
3082 
3083 	path = btrfs_alloc_path();
3084 	if (!path)
3085 		return -ENOMEM;
3086 
3087 	ppath = btrfs_alloc_path();
3088 	if (!ppath) {
3089 		btrfs_free_path(path);
3090 		return -ENOMEM;
3091 	}
3092 
3093 	/*
3094 	 * work on commit root. The related disk blocks are static as
3095 	 * long as COW is applied. This means, it is save to rewrite
3096 	 * them to repair disk errors without any race conditions
3097 	 */
3098 	path->search_commit_root = 1;
3099 	path->skip_locking = 1;
3100 
3101 	ppath->search_commit_root = 1;
3102 	ppath->skip_locking = 1;
3103 	/*
3104 	 * trigger the readahead for extent tree csum tree and wait for
3105 	 * completion. During readahead, the scrub is officially paused
3106 	 * to not hold off transaction commits
3107 	 */
3108 	logical = base + offset;
3109 	physical_end = physical + nstripes * map->stripe_len;
3110 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3111 		get_raid56_logic_offset(physical_end, num,
3112 					map, &logic_end, NULL);
3113 		logic_end += base;
3114 	} else {
3115 		logic_end = logical + increment * nstripes;
3116 	}
3117 	wait_event(sctx->list_wait,
3118 		   atomic_read(&sctx->bios_in_flight) == 0);
3119 	scrub_blocked_if_needed(fs_info);
3120 
3121 	/* FIXME it might be better to start readahead at commit root */
3122 	key.objectid = logical;
3123 	key.type = BTRFS_EXTENT_ITEM_KEY;
3124 	key.offset = (u64)0;
3125 	key_end.objectid = logic_end;
3126 	key_end.type = BTRFS_METADATA_ITEM_KEY;
3127 	key_end.offset = (u64)-1;
3128 	reada1 = btrfs_reada_add(root, &key, &key_end);
3129 
3130 	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3131 	key.type = BTRFS_EXTENT_CSUM_KEY;
3132 	key.offset = logical;
3133 	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3134 	key_end.type = BTRFS_EXTENT_CSUM_KEY;
3135 	key_end.offset = logic_end;
3136 	reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3137 
3138 	if (!IS_ERR(reada1))
3139 		btrfs_reada_wait(reada1);
3140 	if (!IS_ERR(reada2))
3141 		btrfs_reada_wait(reada2);
3142 
3143 
3144 	/*
3145 	 * collect all data csums for the stripe to avoid seeking during
3146 	 * the scrub. This might currently (crc32) end up to be about 1MB
3147 	 */
3148 	blk_start_plug(&plug);
3149 
3150 	/*
3151 	 * now find all extents for each stripe and scrub them
3152 	 */
3153 	ret = 0;
3154 	while (physical < physical_end) {
3155 		/*
3156 		 * canceled?
3157 		 */
3158 		if (atomic_read(&fs_info->scrub_cancel_req) ||
3159 		    atomic_read(&sctx->cancel_req)) {
3160 			ret = -ECANCELED;
3161 			goto out;
3162 		}
3163 		/*
3164 		 * check to see if we have to pause
3165 		 */
3166 		if (atomic_read(&fs_info->scrub_pause_req)) {
3167 			/* push queued extents */
3168 			sctx->flush_all_writes = true;
3169 			scrub_submit(sctx);
3170 			mutex_lock(&sctx->wr_lock);
3171 			scrub_wr_submit(sctx);
3172 			mutex_unlock(&sctx->wr_lock);
3173 			wait_event(sctx->list_wait,
3174 				   atomic_read(&sctx->bios_in_flight) == 0);
3175 			sctx->flush_all_writes = false;
3176 			scrub_blocked_if_needed(fs_info);
3177 		}
3178 
3179 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3180 			ret = get_raid56_logic_offset(physical, num, map,
3181 						      &logical,
3182 						      &stripe_logical);
3183 			logical += base;
3184 			if (ret) {
3185 				/* it is parity strip */
3186 				stripe_logical += base;
3187 				stripe_end = stripe_logical + increment;
3188 				ret = scrub_raid56_parity(sctx, map, scrub_dev,
3189 							  ppath, stripe_logical,
3190 							  stripe_end);
3191 				if (ret)
3192 					goto out;
3193 				goto skip;
3194 			}
3195 		}
3196 
3197 		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3198 			key.type = BTRFS_METADATA_ITEM_KEY;
3199 		else
3200 			key.type = BTRFS_EXTENT_ITEM_KEY;
3201 		key.objectid = logical;
3202 		key.offset = (u64)-1;
3203 
3204 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3205 		if (ret < 0)
3206 			goto out;
3207 
3208 		if (ret > 0) {
3209 			ret = btrfs_previous_extent_item(root, path, 0);
3210 			if (ret < 0)
3211 				goto out;
3212 			if (ret > 0) {
3213 				/* there's no smaller item, so stick with the
3214 				 * larger one */
3215 				btrfs_release_path(path);
3216 				ret = btrfs_search_slot(NULL, root, &key,
3217 							path, 0, 0);
3218 				if (ret < 0)
3219 					goto out;
3220 			}
3221 		}
3222 
3223 		stop_loop = 0;
3224 		while (1) {
3225 			u64 bytes;
3226 
3227 			l = path->nodes[0];
3228 			slot = path->slots[0];
3229 			if (slot >= btrfs_header_nritems(l)) {
3230 				ret = btrfs_next_leaf(root, path);
3231 				if (ret == 0)
3232 					continue;
3233 				if (ret < 0)
3234 					goto out;
3235 
3236 				stop_loop = 1;
3237 				break;
3238 			}
3239 			btrfs_item_key_to_cpu(l, &key, slot);
3240 
3241 			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3242 			    key.type != BTRFS_METADATA_ITEM_KEY)
3243 				goto next;
3244 
3245 			if (key.type == BTRFS_METADATA_ITEM_KEY)
3246 				bytes = fs_info->nodesize;
3247 			else
3248 				bytes = key.offset;
3249 
3250 			if (key.objectid + bytes <= logical)
3251 				goto next;
3252 
3253 			if (key.objectid >= logical + map->stripe_len) {
3254 				/* out of this device extent */
3255 				if (key.objectid >= logic_end)
3256 					stop_loop = 1;
3257 				break;
3258 			}
3259 
3260 			extent = btrfs_item_ptr(l, slot,
3261 						struct btrfs_extent_item);
3262 			flags = btrfs_extent_flags(l, extent);
3263 			generation = btrfs_extent_generation(l, extent);
3264 
3265 			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3266 			    (key.objectid < logical ||
3267 			     key.objectid + bytes >
3268 			     logical + map->stripe_len)) {
3269 				btrfs_err(fs_info,
3270 					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3271 				       key.objectid, logical);
3272 				spin_lock(&sctx->stat_lock);
3273 				sctx->stat.uncorrectable_errors++;
3274 				spin_unlock(&sctx->stat_lock);
3275 				goto next;
3276 			}
3277 
3278 again:
3279 			extent_logical = key.objectid;
3280 			extent_len = bytes;
3281 
3282 			/*
3283 			 * trim extent to this stripe
3284 			 */
3285 			if (extent_logical < logical) {
3286 				extent_len -= logical - extent_logical;
3287 				extent_logical = logical;
3288 			}
3289 			if (extent_logical + extent_len >
3290 			    logical + map->stripe_len) {
3291 				extent_len = logical + map->stripe_len -
3292 					     extent_logical;
3293 			}
3294 
3295 			extent_physical = extent_logical - logical + physical;
3296 			extent_dev = scrub_dev;
3297 			extent_mirror_num = mirror_num;
3298 			if (sctx->is_dev_replace)
3299 				scrub_remap_extent(fs_info, extent_logical,
3300 						   extent_len, &extent_physical,
3301 						   &extent_dev,
3302 						   &extent_mirror_num);
3303 
3304 			ret = btrfs_lookup_csums_range(csum_root,
3305 						       extent_logical,
3306 						       extent_logical +
3307 						       extent_len - 1,
3308 						       &sctx->csum_list, 1);
3309 			if (ret)
3310 				goto out;
3311 
3312 			ret = scrub_extent(sctx, map, extent_logical, extent_len,
3313 					   extent_physical, extent_dev, flags,
3314 					   generation, extent_mirror_num,
3315 					   extent_logical - logical + physical);
3316 
3317 			scrub_free_csums(sctx);
3318 
3319 			if (ret)
3320 				goto out;
3321 
3322 			if (extent_logical + extent_len <
3323 			    key.objectid + bytes) {
3324 				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3325 					/*
3326 					 * loop until we find next data stripe
3327 					 * or we have finished all stripes.
3328 					 */
3329 loop:
3330 					physical += map->stripe_len;
3331 					ret = get_raid56_logic_offset(physical,
3332 							num, map, &logical,
3333 							&stripe_logical);
3334 					logical += base;
3335 
3336 					if (ret && physical < physical_end) {
3337 						stripe_logical += base;
3338 						stripe_end = stripe_logical +
3339 								increment;
3340 						ret = scrub_raid56_parity(sctx,
3341 							map, scrub_dev, ppath,
3342 							stripe_logical,
3343 							stripe_end);
3344 						if (ret)
3345 							goto out;
3346 						goto loop;
3347 					}
3348 				} else {
3349 					physical += map->stripe_len;
3350 					logical += increment;
3351 				}
3352 				if (logical < key.objectid + bytes) {
3353 					cond_resched();
3354 					goto again;
3355 				}
3356 
3357 				if (physical >= physical_end) {
3358 					stop_loop = 1;
3359 					break;
3360 				}
3361 			}
3362 next:
3363 			path->slots[0]++;
3364 		}
3365 		btrfs_release_path(path);
3366 skip:
3367 		logical += increment;
3368 		physical += map->stripe_len;
3369 		spin_lock(&sctx->stat_lock);
3370 		if (stop_loop)
3371 			sctx->stat.last_physical = map->stripes[num].physical +
3372 						   length;
3373 		else
3374 			sctx->stat.last_physical = physical;
3375 		spin_unlock(&sctx->stat_lock);
3376 		if (stop_loop)
3377 			break;
3378 	}
3379 out:
3380 	/* push queued extents */
3381 	scrub_submit(sctx);
3382 	mutex_lock(&sctx->wr_lock);
3383 	scrub_wr_submit(sctx);
3384 	mutex_unlock(&sctx->wr_lock);
3385 
3386 	blk_finish_plug(&plug);
3387 	btrfs_free_path(path);
3388 	btrfs_free_path(ppath);
3389 	return ret < 0 ? ret : 0;
3390 }
3391 
3392 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3393 					  struct btrfs_device *scrub_dev,
3394 					  u64 chunk_offset, u64 length,
3395 					  u64 dev_offset,
3396 					  struct btrfs_block_group_cache *cache)
3397 {
3398 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3399 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3400 	struct map_lookup *map;
3401 	struct extent_map *em;
3402 	int i;
3403 	int ret = 0;
3404 
3405 	read_lock(&map_tree->map_tree.lock);
3406 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3407 	read_unlock(&map_tree->map_tree.lock);
3408 
3409 	if (!em) {
3410 		/*
3411 		 * Might have been an unused block group deleted by the cleaner
3412 		 * kthread or relocation.
3413 		 */
3414 		spin_lock(&cache->lock);
3415 		if (!cache->removed)
3416 			ret = -EINVAL;
3417 		spin_unlock(&cache->lock);
3418 
3419 		return ret;
3420 	}
3421 
3422 	map = em->map_lookup;
3423 	if (em->start != chunk_offset)
3424 		goto out;
3425 
3426 	if (em->len < length)
3427 		goto out;
3428 
3429 	for (i = 0; i < map->num_stripes; ++i) {
3430 		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3431 		    map->stripes[i].physical == dev_offset) {
3432 			ret = scrub_stripe(sctx, map, scrub_dev, i,
3433 					   chunk_offset, length);
3434 			if (ret)
3435 				goto out;
3436 		}
3437 	}
3438 out:
3439 	free_extent_map(em);
3440 
3441 	return ret;
3442 }
3443 
3444 static noinline_for_stack
3445 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3446 			   struct btrfs_device *scrub_dev, u64 start, u64 end)
3447 {
3448 	struct btrfs_dev_extent *dev_extent = NULL;
3449 	struct btrfs_path *path;
3450 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3451 	struct btrfs_root *root = fs_info->dev_root;
3452 	u64 length;
3453 	u64 chunk_offset;
3454 	int ret = 0;
3455 	int ro_set;
3456 	int slot;
3457 	struct extent_buffer *l;
3458 	struct btrfs_key key;
3459 	struct btrfs_key found_key;
3460 	struct btrfs_block_group_cache *cache;
3461 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3462 
3463 	path = btrfs_alloc_path();
3464 	if (!path)
3465 		return -ENOMEM;
3466 
3467 	path->reada = READA_FORWARD;
3468 	path->search_commit_root = 1;
3469 	path->skip_locking = 1;
3470 
3471 	key.objectid = scrub_dev->devid;
3472 	key.offset = 0ull;
3473 	key.type = BTRFS_DEV_EXTENT_KEY;
3474 
3475 	while (1) {
3476 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3477 		if (ret < 0)
3478 			break;
3479 		if (ret > 0) {
3480 			if (path->slots[0] >=
3481 			    btrfs_header_nritems(path->nodes[0])) {
3482 				ret = btrfs_next_leaf(root, path);
3483 				if (ret < 0)
3484 					break;
3485 				if (ret > 0) {
3486 					ret = 0;
3487 					break;
3488 				}
3489 			} else {
3490 				ret = 0;
3491 			}
3492 		}
3493 
3494 		l = path->nodes[0];
3495 		slot = path->slots[0];
3496 
3497 		btrfs_item_key_to_cpu(l, &found_key, slot);
3498 
3499 		if (found_key.objectid != scrub_dev->devid)
3500 			break;
3501 
3502 		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3503 			break;
3504 
3505 		if (found_key.offset >= end)
3506 			break;
3507 
3508 		if (found_key.offset < key.offset)
3509 			break;
3510 
3511 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3512 		length = btrfs_dev_extent_length(l, dev_extent);
3513 
3514 		if (found_key.offset + length <= start)
3515 			goto skip;
3516 
3517 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3518 
3519 		/*
3520 		 * get a reference on the corresponding block group to prevent
3521 		 * the chunk from going away while we scrub it
3522 		 */
3523 		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3524 
3525 		/* some chunks are removed but not committed to disk yet,
3526 		 * continue scrubbing */
3527 		if (!cache)
3528 			goto skip;
3529 
3530 		/*
3531 		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3532 		 * to avoid deadlock caused by:
3533 		 * btrfs_inc_block_group_ro()
3534 		 * -> btrfs_wait_for_commit()
3535 		 * -> btrfs_commit_transaction()
3536 		 * -> btrfs_scrub_pause()
3537 		 */
3538 		scrub_pause_on(fs_info);
3539 		ret = btrfs_inc_block_group_ro(cache);
3540 		if (!ret && sctx->is_dev_replace) {
3541 			/*
3542 			 * If we are doing a device replace wait for any tasks
3543 			 * that started dellaloc right before we set the block
3544 			 * group to RO mode, as they might have just allocated
3545 			 * an extent from it or decided they could do a nocow
3546 			 * write. And if any such tasks did that, wait for their
3547 			 * ordered extents to complete and then commit the
3548 			 * current transaction, so that we can later see the new
3549 			 * extent items in the extent tree - the ordered extents
3550 			 * create delayed data references (for cow writes) when
3551 			 * they complete, which will be run and insert the
3552 			 * corresponding extent items into the extent tree when
3553 			 * we commit the transaction they used when running
3554 			 * inode.c:btrfs_finish_ordered_io(). We later use
3555 			 * the commit root of the extent tree to find extents
3556 			 * to copy from the srcdev into the tgtdev, and we don't
3557 			 * want to miss any new extents.
3558 			 */
3559 			btrfs_wait_block_group_reservations(cache);
3560 			btrfs_wait_nocow_writers(cache);
3561 			ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
3562 						       cache->key.objectid,
3563 						       cache->key.offset);
3564 			if (ret > 0) {
3565 				struct btrfs_trans_handle *trans;
3566 
3567 				trans = btrfs_join_transaction(root);
3568 				if (IS_ERR(trans))
3569 					ret = PTR_ERR(trans);
3570 				else
3571 					ret = btrfs_commit_transaction(trans);
3572 				if (ret) {
3573 					scrub_pause_off(fs_info);
3574 					btrfs_put_block_group(cache);
3575 					break;
3576 				}
3577 			}
3578 		}
3579 		scrub_pause_off(fs_info);
3580 
3581 		if (ret == 0) {
3582 			ro_set = 1;
3583 		} else if (ret == -ENOSPC) {
3584 			/*
3585 			 * btrfs_inc_block_group_ro return -ENOSPC when it
3586 			 * failed in creating new chunk for metadata.
3587 			 * It is not a problem for scrub/replace, because
3588 			 * metadata are always cowed, and our scrub paused
3589 			 * commit_transactions.
3590 			 */
3591 			ro_set = 0;
3592 		} else {
3593 			btrfs_warn(fs_info,
3594 				   "failed setting block group ro: %d", ret);
3595 			btrfs_put_block_group(cache);
3596 			break;
3597 		}
3598 
3599 		btrfs_dev_replace_write_lock(&fs_info->dev_replace);
3600 		dev_replace->cursor_right = found_key.offset + length;
3601 		dev_replace->cursor_left = found_key.offset;
3602 		dev_replace->item_needs_writeback = 1;
3603 		btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
3604 		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3605 				  found_key.offset, cache);
3606 
3607 		/*
3608 		 * flush, submit all pending read and write bios, afterwards
3609 		 * wait for them.
3610 		 * Note that in the dev replace case, a read request causes
3611 		 * write requests that are submitted in the read completion
3612 		 * worker. Therefore in the current situation, it is required
3613 		 * that all write requests are flushed, so that all read and
3614 		 * write requests are really completed when bios_in_flight
3615 		 * changes to 0.
3616 		 */
3617 		sctx->flush_all_writes = true;
3618 		scrub_submit(sctx);
3619 		mutex_lock(&sctx->wr_lock);
3620 		scrub_wr_submit(sctx);
3621 		mutex_unlock(&sctx->wr_lock);
3622 
3623 		wait_event(sctx->list_wait,
3624 			   atomic_read(&sctx->bios_in_flight) == 0);
3625 
3626 		scrub_pause_on(fs_info);
3627 
3628 		/*
3629 		 * must be called before we decrease @scrub_paused.
3630 		 * make sure we don't block transaction commit while
3631 		 * we are waiting pending workers finished.
3632 		 */
3633 		wait_event(sctx->list_wait,
3634 			   atomic_read(&sctx->workers_pending) == 0);
3635 		sctx->flush_all_writes = false;
3636 
3637 		scrub_pause_off(fs_info);
3638 
3639 		btrfs_dev_replace_write_lock(&fs_info->dev_replace);
3640 		dev_replace->cursor_left = dev_replace->cursor_right;
3641 		dev_replace->item_needs_writeback = 1;
3642 		btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
3643 
3644 		if (ro_set)
3645 			btrfs_dec_block_group_ro(cache);
3646 
3647 		/*
3648 		 * We might have prevented the cleaner kthread from deleting
3649 		 * this block group if it was already unused because we raced
3650 		 * and set it to RO mode first. So add it back to the unused
3651 		 * list, otherwise it might not ever be deleted unless a manual
3652 		 * balance is triggered or it becomes used and unused again.
3653 		 */
3654 		spin_lock(&cache->lock);
3655 		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3656 		    btrfs_block_group_used(&cache->item) == 0) {
3657 			spin_unlock(&cache->lock);
3658 			btrfs_mark_bg_unused(cache);
3659 		} else {
3660 			spin_unlock(&cache->lock);
3661 		}
3662 
3663 		btrfs_put_block_group(cache);
3664 		if (ret)
3665 			break;
3666 		if (sctx->is_dev_replace &&
3667 		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3668 			ret = -EIO;
3669 			break;
3670 		}
3671 		if (sctx->stat.malloc_errors > 0) {
3672 			ret = -ENOMEM;
3673 			break;
3674 		}
3675 skip:
3676 		key.offset = found_key.offset + length;
3677 		btrfs_release_path(path);
3678 	}
3679 
3680 	btrfs_free_path(path);
3681 
3682 	return ret;
3683 }
3684 
3685 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3686 					   struct btrfs_device *scrub_dev)
3687 {
3688 	int	i;
3689 	u64	bytenr;
3690 	u64	gen;
3691 	int	ret;
3692 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3693 
3694 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3695 		return -EIO;
3696 
3697 	/* Seed devices of a new filesystem has their own generation. */
3698 	if (scrub_dev->fs_devices != fs_info->fs_devices)
3699 		gen = scrub_dev->generation;
3700 	else
3701 		gen = fs_info->last_trans_committed;
3702 
3703 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3704 		bytenr = btrfs_sb_offset(i);
3705 		if (bytenr + BTRFS_SUPER_INFO_SIZE >
3706 		    scrub_dev->commit_total_bytes)
3707 			break;
3708 
3709 		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3710 				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3711 				  NULL, 1, bytenr);
3712 		if (ret)
3713 			return ret;
3714 	}
3715 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3716 
3717 	return 0;
3718 }
3719 
3720 /*
3721  * get a reference count on fs_info->scrub_workers. start worker if necessary
3722  */
3723 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3724 						int is_dev_replace)
3725 {
3726 	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3727 	int max_active = fs_info->thread_pool_size;
3728 
3729 	if (fs_info->scrub_workers_refcnt == 0) {
3730 		fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
3731 				flags, is_dev_replace ? 1 : max_active, 4);
3732 		if (!fs_info->scrub_workers)
3733 			goto fail_scrub_workers;
3734 
3735 		fs_info->scrub_wr_completion_workers =
3736 			btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3737 					      max_active, 2);
3738 		if (!fs_info->scrub_wr_completion_workers)
3739 			goto fail_scrub_wr_completion_workers;
3740 
3741 		fs_info->scrub_parity_workers =
3742 			btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
3743 					      max_active, 2);
3744 		if (!fs_info->scrub_parity_workers)
3745 			goto fail_scrub_parity_workers;
3746 	}
3747 	++fs_info->scrub_workers_refcnt;
3748 	return 0;
3749 
3750 fail_scrub_parity_workers:
3751 	btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3752 fail_scrub_wr_completion_workers:
3753 	btrfs_destroy_workqueue(fs_info->scrub_workers);
3754 fail_scrub_workers:
3755 	return -ENOMEM;
3756 }
3757 
3758 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3759 {
3760 	if (--fs_info->scrub_workers_refcnt == 0) {
3761 		btrfs_destroy_workqueue(fs_info->scrub_workers);
3762 		btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3763 		btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3764 	}
3765 	WARN_ON(fs_info->scrub_workers_refcnt < 0);
3766 }
3767 
3768 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3769 		    u64 end, struct btrfs_scrub_progress *progress,
3770 		    int readonly, int is_dev_replace)
3771 {
3772 	struct scrub_ctx *sctx;
3773 	int ret;
3774 	struct btrfs_device *dev;
3775 
3776 	if (btrfs_fs_closing(fs_info))
3777 		return -EINVAL;
3778 
3779 	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
3780 		/*
3781 		 * in this case scrub is unable to calculate the checksum
3782 		 * the way scrub is implemented. Do not handle this
3783 		 * situation at all because it won't ever happen.
3784 		 */
3785 		btrfs_err(fs_info,
3786 			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3787 		       fs_info->nodesize,
3788 		       BTRFS_STRIPE_LEN);
3789 		return -EINVAL;
3790 	}
3791 
3792 	if (fs_info->sectorsize != PAGE_SIZE) {
3793 		/* not supported for data w/o checksums */
3794 		btrfs_err_rl(fs_info,
3795 			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3796 		       fs_info->sectorsize, PAGE_SIZE);
3797 		return -EINVAL;
3798 	}
3799 
3800 	if (fs_info->nodesize >
3801 	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3802 	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3803 		/*
3804 		 * would exhaust the array bounds of pagev member in
3805 		 * struct scrub_block
3806 		 */
3807 		btrfs_err(fs_info,
3808 			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3809 		       fs_info->nodesize,
3810 		       SCRUB_MAX_PAGES_PER_BLOCK,
3811 		       fs_info->sectorsize,
3812 		       SCRUB_MAX_PAGES_PER_BLOCK);
3813 		return -EINVAL;
3814 	}
3815 
3816 
3817 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3818 	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3819 	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
3820 		     !is_dev_replace)) {
3821 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3822 		return -ENODEV;
3823 	}
3824 
3825 	if (!is_dev_replace && !readonly &&
3826 	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
3827 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3828 		btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
3829 				rcu_str_deref(dev->name));
3830 		return -EROFS;
3831 	}
3832 
3833 	mutex_lock(&fs_info->scrub_lock);
3834 	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3835 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
3836 		mutex_unlock(&fs_info->scrub_lock);
3837 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3838 		return -EIO;
3839 	}
3840 
3841 	btrfs_dev_replace_read_lock(&fs_info->dev_replace);
3842 	if (dev->scrub_ctx ||
3843 	    (!is_dev_replace &&
3844 	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3845 		btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
3846 		mutex_unlock(&fs_info->scrub_lock);
3847 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3848 		return -EINPROGRESS;
3849 	}
3850 	btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
3851 
3852 	ret = scrub_workers_get(fs_info, is_dev_replace);
3853 	if (ret) {
3854 		mutex_unlock(&fs_info->scrub_lock);
3855 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3856 		return ret;
3857 	}
3858 
3859 	sctx = scrub_setup_ctx(dev, is_dev_replace);
3860 	if (IS_ERR(sctx)) {
3861 		mutex_unlock(&fs_info->scrub_lock);
3862 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3863 		scrub_workers_put(fs_info);
3864 		return PTR_ERR(sctx);
3865 	}
3866 	sctx->readonly = readonly;
3867 	dev->scrub_ctx = sctx;
3868 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3869 
3870 	/*
3871 	 * checking @scrub_pause_req here, we can avoid
3872 	 * race between committing transaction and scrubbing.
3873 	 */
3874 	__scrub_blocked_if_needed(fs_info);
3875 	atomic_inc(&fs_info->scrubs_running);
3876 	mutex_unlock(&fs_info->scrub_lock);
3877 
3878 	if (!is_dev_replace) {
3879 		/*
3880 		 * by holding device list mutex, we can
3881 		 * kick off writing super in log tree sync.
3882 		 */
3883 		mutex_lock(&fs_info->fs_devices->device_list_mutex);
3884 		ret = scrub_supers(sctx, dev);
3885 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3886 	}
3887 
3888 	if (!ret)
3889 		ret = scrub_enumerate_chunks(sctx, dev, start, end);
3890 
3891 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3892 	atomic_dec(&fs_info->scrubs_running);
3893 	wake_up(&fs_info->scrub_pause_wait);
3894 
3895 	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3896 
3897 	if (progress)
3898 		memcpy(progress, &sctx->stat, sizeof(*progress));
3899 
3900 	mutex_lock(&fs_info->scrub_lock);
3901 	dev->scrub_ctx = NULL;
3902 	scrub_workers_put(fs_info);
3903 	mutex_unlock(&fs_info->scrub_lock);
3904 
3905 	scrub_put_ctx(sctx);
3906 
3907 	return ret;
3908 }
3909 
3910 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3911 {
3912 	mutex_lock(&fs_info->scrub_lock);
3913 	atomic_inc(&fs_info->scrub_pause_req);
3914 	while (atomic_read(&fs_info->scrubs_paused) !=
3915 	       atomic_read(&fs_info->scrubs_running)) {
3916 		mutex_unlock(&fs_info->scrub_lock);
3917 		wait_event(fs_info->scrub_pause_wait,
3918 			   atomic_read(&fs_info->scrubs_paused) ==
3919 			   atomic_read(&fs_info->scrubs_running));
3920 		mutex_lock(&fs_info->scrub_lock);
3921 	}
3922 	mutex_unlock(&fs_info->scrub_lock);
3923 }
3924 
3925 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3926 {
3927 	atomic_dec(&fs_info->scrub_pause_req);
3928 	wake_up(&fs_info->scrub_pause_wait);
3929 }
3930 
3931 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3932 {
3933 	mutex_lock(&fs_info->scrub_lock);
3934 	if (!atomic_read(&fs_info->scrubs_running)) {
3935 		mutex_unlock(&fs_info->scrub_lock);
3936 		return -ENOTCONN;
3937 	}
3938 
3939 	atomic_inc(&fs_info->scrub_cancel_req);
3940 	while (atomic_read(&fs_info->scrubs_running)) {
3941 		mutex_unlock(&fs_info->scrub_lock);
3942 		wait_event(fs_info->scrub_pause_wait,
3943 			   atomic_read(&fs_info->scrubs_running) == 0);
3944 		mutex_lock(&fs_info->scrub_lock);
3945 	}
3946 	atomic_dec(&fs_info->scrub_cancel_req);
3947 	mutex_unlock(&fs_info->scrub_lock);
3948 
3949 	return 0;
3950 }
3951 
3952 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3953 			   struct btrfs_device *dev)
3954 {
3955 	struct scrub_ctx *sctx;
3956 
3957 	mutex_lock(&fs_info->scrub_lock);
3958 	sctx = dev->scrub_ctx;
3959 	if (!sctx) {
3960 		mutex_unlock(&fs_info->scrub_lock);
3961 		return -ENOTCONN;
3962 	}
3963 	atomic_inc(&sctx->cancel_req);
3964 	while (dev->scrub_ctx) {
3965 		mutex_unlock(&fs_info->scrub_lock);
3966 		wait_event(fs_info->scrub_pause_wait,
3967 			   dev->scrub_ctx == NULL);
3968 		mutex_lock(&fs_info->scrub_lock);
3969 	}
3970 	mutex_unlock(&fs_info->scrub_lock);
3971 
3972 	return 0;
3973 }
3974 
3975 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3976 			 struct btrfs_scrub_progress *progress)
3977 {
3978 	struct btrfs_device *dev;
3979 	struct scrub_ctx *sctx = NULL;
3980 
3981 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3982 	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3983 	if (dev)
3984 		sctx = dev->scrub_ctx;
3985 	if (sctx)
3986 		memcpy(progress, &sctx->stat, sizeof(*progress));
3987 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3988 
3989 	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3990 }
3991 
3992 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3993 			       u64 extent_logical, u64 extent_len,
3994 			       u64 *extent_physical,
3995 			       struct btrfs_device **extent_dev,
3996 			       int *extent_mirror_num)
3997 {
3998 	u64 mapped_length;
3999 	struct btrfs_bio *bbio = NULL;
4000 	int ret;
4001 
4002 	mapped_length = extent_len;
4003 	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4004 			      &mapped_length, &bbio, 0);
4005 	if (ret || !bbio || mapped_length < extent_len ||
4006 	    !bbio->stripes[0].dev->bdev) {
4007 		btrfs_put_bbio(bbio);
4008 		return;
4009 	}
4010 
4011 	*extent_physical = bbio->stripes[0].physical;
4012 	*extent_mirror_num = bbio->mirror_num;
4013 	*extent_dev = bbio->stripes[0].dev;
4014 	btrfs_put_bbio(bbio);
4015 }
4016