xref: /openbmc/linux/fs/btrfs/bio.c (revision da023618)
1103c1972SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2103c1972SChristoph Hellwig /*
3103c1972SChristoph Hellwig  * Copyright (C) 2007 Oracle.  All rights reserved.
4103c1972SChristoph Hellwig  * Copyright (C) 2022 Christoph Hellwig.
5103c1972SChristoph Hellwig  */
6103c1972SChristoph Hellwig 
7103c1972SChristoph Hellwig #include <linux/bio.h>
8103c1972SChristoph Hellwig #include "bio.h"
9103c1972SChristoph Hellwig #include "ctree.h"
10103c1972SChristoph Hellwig #include "volumes.h"
11103c1972SChristoph Hellwig #include "raid56.h"
12103c1972SChristoph Hellwig #include "async-thread.h"
13103c1972SChristoph Hellwig #include "check-integrity.h"
14103c1972SChristoph Hellwig #include "dev-replace.h"
15103c1972SChristoph Hellwig #include "rcu-string.h"
16103c1972SChristoph Hellwig #include "zoned.h"
171c2b3ee3SChristoph Hellwig #include "file-item.h"
18103c1972SChristoph Hellwig 
19103c1972SChristoph Hellwig static struct bio_set btrfs_bioset;
20852eee62SChristoph Hellwig static struct bio_set btrfs_clone_bioset;
217609afacSChristoph Hellwig static struct bio_set btrfs_repair_bioset;
227609afacSChristoph Hellwig static mempool_t btrfs_failed_bio_pool;
237609afacSChristoph Hellwig 
247609afacSChristoph Hellwig struct btrfs_failed_bio {
257609afacSChristoph Hellwig 	struct btrfs_bio *bbio;
267609afacSChristoph Hellwig 	int num_copies;
277609afacSChristoph Hellwig 	atomic_t repair_count;
287609afacSChristoph Hellwig };
29103c1972SChristoph Hellwig 
30103c1972SChristoph Hellwig /*
31103c1972SChristoph Hellwig  * Initialize a btrfs_bio structure.  This skips the embedded bio itself as it
32103c1972SChristoph Hellwig  * is already initialized by the block layer.
33103c1972SChristoph Hellwig  */
344317ff00SQu Wenruo void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
35103c1972SChristoph Hellwig 		    btrfs_bio_end_io_t end_io, void *private)
36103c1972SChristoph Hellwig {
37103c1972SChristoph Hellwig 	memset(bbio, 0, offsetof(struct btrfs_bio, bio));
384317ff00SQu Wenruo 	bbio->fs_info = fs_info;
39103c1972SChristoph Hellwig 	bbio->end_io = end_io;
40103c1972SChristoph Hellwig 	bbio->private = private;
41852eee62SChristoph Hellwig 	atomic_set(&bbio->pending_ios, 1);
42103c1972SChristoph Hellwig }
43103c1972SChristoph Hellwig 
44103c1972SChristoph Hellwig /*
45103c1972SChristoph Hellwig  * Allocate a btrfs_bio structure.  The btrfs_bio is the main I/O container for
46103c1972SChristoph Hellwig  * btrfs, and is used for all I/O submitted through btrfs_submit_bio.
47103c1972SChristoph Hellwig  *
48103c1972SChristoph Hellwig  * Just like the underlying bio_alloc_bioset it will not fail as it is backed by
49103c1972SChristoph Hellwig  * a mempool.
50103c1972SChristoph Hellwig  */
51b41bbd29SChristoph Hellwig struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
524317ff00SQu Wenruo 				  struct btrfs_fs_info *fs_info,
53103c1972SChristoph Hellwig 				  btrfs_bio_end_io_t end_io, void *private)
54103c1972SChristoph Hellwig {
55b41bbd29SChristoph Hellwig 	struct btrfs_bio *bbio;
56103c1972SChristoph Hellwig 	struct bio *bio;
57103c1972SChristoph Hellwig 
58103c1972SChristoph Hellwig 	bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
59b41bbd29SChristoph Hellwig 	bbio = btrfs_bio(bio);
604317ff00SQu Wenruo 	btrfs_bio_init(bbio, fs_info, end_io, private);
61b41bbd29SChristoph Hellwig 	return bbio;
62103c1972SChristoph Hellwig }
63103c1972SChristoph Hellwig 
647edd339cSChristoph Hellwig static blk_status_t btrfs_bio_extract_ordered_extent(struct btrfs_bio *bbio)
657edd339cSChristoph Hellwig {
667edd339cSChristoph Hellwig 	struct btrfs_ordered_extent *ordered;
677edd339cSChristoph Hellwig 	int ret;
687edd339cSChristoph Hellwig 
697edd339cSChristoph Hellwig 	ordered = btrfs_lookup_ordered_extent(bbio->inode, bbio->file_offset);
707edd339cSChristoph Hellwig 	if (WARN_ON_ONCE(!ordered))
717edd339cSChristoph Hellwig 		return BLK_STS_IOERR;
727edd339cSChristoph Hellwig 	ret = btrfs_extract_ordered_extent(bbio, ordered);
737edd339cSChristoph Hellwig 	btrfs_put_ordered_extent(ordered);
747edd339cSChristoph Hellwig 
757edd339cSChristoph Hellwig 	return errno_to_blk_status(ret);
767edd339cSChristoph Hellwig }
777edd339cSChristoph Hellwig 
782cef0c79SChristoph Hellwig static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
792cef0c79SChristoph Hellwig 					 struct btrfs_bio *orig_bbio,
802cef0c79SChristoph Hellwig 					 u64 map_length, bool use_append)
81852eee62SChristoph Hellwig {
822cef0c79SChristoph Hellwig 	struct btrfs_bio *bbio;
83852eee62SChristoph Hellwig 	struct bio *bio;
84852eee62SChristoph Hellwig 
85d5e4377dSChristoph Hellwig 	if (use_append) {
86d5e4377dSChristoph Hellwig 		unsigned int nr_segs;
87d5e4377dSChristoph Hellwig 
882cef0c79SChristoph Hellwig 		bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs,
89d5e4377dSChristoph Hellwig 				   &btrfs_clone_bioset, map_length);
90d5e4377dSChristoph Hellwig 	} else {
912cef0c79SChristoph Hellwig 		bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT,
922cef0c79SChristoph Hellwig 				GFP_NOFS, &btrfs_clone_bioset);
93d5e4377dSChristoph Hellwig 	}
942cef0c79SChristoph Hellwig 	bbio = btrfs_bio(bio);
954317ff00SQu Wenruo 	btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
964317ff00SQu Wenruo 	bbio->inode = orig_bbio->inode;
972cef0c79SChristoph Hellwig 	bbio->file_offset = orig_bbio->file_offset;
982cef0c79SChristoph Hellwig 	if (!(orig_bbio->bio.bi_opf & REQ_BTRFS_ONE_ORDERED))
99852eee62SChristoph Hellwig 		orig_bbio->file_offset += map_length;
100852eee62SChristoph Hellwig 
101852eee62SChristoph Hellwig 	atomic_inc(&orig_bbio->pending_ios);
1022cef0c79SChristoph Hellwig 	return bbio;
103852eee62SChristoph Hellwig }
104852eee62SChristoph Hellwig 
105852eee62SChristoph Hellwig static void btrfs_orig_write_end_io(struct bio *bio);
106852eee62SChristoph Hellwig 
107852eee62SChristoph Hellwig static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
108852eee62SChristoph Hellwig 				       struct btrfs_bio *orig_bbio)
109852eee62SChristoph Hellwig {
110852eee62SChristoph Hellwig 	/*
111852eee62SChristoph Hellwig 	 * For writes we tolerate nr_mirrors - 1 write failures, so we can't
112852eee62SChristoph Hellwig 	 * just blindly propagate a write failure here.  Instead increment the
113852eee62SChristoph Hellwig 	 * error count in the original I/O context so that it is guaranteed to
114852eee62SChristoph Hellwig 	 * be larger than the error tolerance.
115852eee62SChristoph Hellwig 	 */
116852eee62SChristoph Hellwig 	if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) {
117852eee62SChristoph Hellwig 		struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private;
118852eee62SChristoph Hellwig 		struct btrfs_io_context *orig_bioc = orig_stripe->bioc;
119852eee62SChristoph Hellwig 
120852eee62SChristoph Hellwig 		atomic_add(orig_bioc->max_errors, &orig_bioc->error);
121852eee62SChristoph Hellwig 	} else {
122852eee62SChristoph Hellwig 		orig_bbio->bio.bi_status = bbio->bio.bi_status;
123852eee62SChristoph Hellwig 	}
124852eee62SChristoph Hellwig }
125852eee62SChristoph Hellwig 
126852eee62SChristoph Hellwig static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio)
127852eee62SChristoph Hellwig {
128852eee62SChristoph Hellwig 	if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
129852eee62SChristoph Hellwig 		struct btrfs_bio *orig_bbio = bbio->private;
130852eee62SChristoph Hellwig 
131852eee62SChristoph Hellwig 		if (bbio->bio.bi_status)
132852eee62SChristoph Hellwig 			btrfs_bbio_propagate_error(bbio, orig_bbio);
133852eee62SChristoph Hellwig 		bio_put(&bbio->bio);
134852eee62SChristoph Hellwig 		bbio = orig_bbio;
135852eee62SChristoph Hellwig 	}
136852eee62SChristoph Hellwig 
137852eee62SChristoph Hellwig 	if (atomic_dec_and_test(&bbio->pending_ios))
138852eee62SChristoph Hellwig 		bbio->end_io(bbio);
139852eee62SChristoph Hellwig }
140852eee62SChristoph Hellwig 
1417609afacSChristoph Hellwig static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
1427609afacSChristoph Hellwig {
1437609afacSChristoph Hellwig 	if (cur_mirror == fbio->num_copies)
1447609afacSChristoph Hellwig 		return cur_mirror + 1 - fbio->num_copies;
1457609afacSChristoph Hellwig 	return cur_mirror + 1;
1467609afacSChristoph Hellwig }
1477609afacSChristoph Hellwig 
1487609afacSChristoph Hellwig static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
1497609afacSChristoph Hellwig {
1507609afacSChristoph Hellwig 	if (cur_mirror == 1)
1517609afacSChristoph Hellwig 		return fbio->num_copies;
1527609afacSChristoph Hellwig 	return cur_mirror - 1;
1537609afacSChristoph Hellwig }
1547609afacSChristoph Hellwig 
1557609afacSChristoph Hellwig static void btrfs_repair_done(struct btrfs_failed_bio *fbio)
1567609afacSChristoph Hellwig {
1577609afacSChristoph Hellwig 	if (atomic_dec_and_test(&fbio->repair_count)) {
158852eee62SChristoph Hellwig 		btrfs_orig_bbio_end_io(fbio->bbio);
1597609afacSChristoph Hellwig 		mempool_free(fbio, &btrfs_failed_bio_pool);
1607609afacSChristoph Hellwig 	}
1617609afacSChristoph Hellwig }
1627609afacSChristoph Hellwig 
1637609afacSChristoph Hellwig static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio,
1647609afacSChristoph Hellwig 				 struct btrfs_device *dev)
1657609afacSChristoph Hellwig {
1667609afacSChristoph Hellwig 	struct btrfs_failed_bio *fbio = repair_bbio->private;
1677609afacSChristoph Hellwig 	struct btrfs_inode *inode = repair_bbio->inode;
1687609afacSChristoph Hellwig 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1697609afacSChristoph Hellwig 	struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio);
1707609afacSChristoph Hellwig 	int mirror = repair_bbio->mirror_num;
1717609afacSChristoph Hellwig 
1727609afacSChristoph Hellwig 	if (repair_bbio->bio.bi_status ||
1737609afacSChristoph Hellwig 	    !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) {
1747609afacSChristoph Hellwig 		bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ);
1750d3acb25SChristoph Hellwig 		repair_bbio->bio.bi_iter = repair_bbio->saved_iter;
1767609afacSChristoph Hellwig 
1777609afacSChristoph Hellwig 		mirror = next_repair_mirror(fbio, mirror);
1787609afacSChristoph Hellwig 		if (mirror == fbio->bbio->mirror_num) {
1797609afacSChristoph Hellwig 			btrfs_debug(fs_info, "no mirror left");
1807609afacSChristoph Hellwig 			fbio->bbio->bio.bi_status = BLK_STS_IOERR;
1817609afacSChristoph Hellwig 			goto done;
1827609afacSChristoph Hellwig 		}
1837609afacSChristoph Hellwig 
184ae42a154SChristoph Hellwig 		btrfs_submit_bio(repair_bbio, mirror);
1857609afacSChristoph Hellwig 		return;
1867609afacSChristoph Hellwig 	}
1877609afacSChristoph Hellwig 
1887609afacSChristoph Hellwig 	do {
1897609afacSChristoph Hellwig 		mirror = prev_repair_mirror(fbio, mirror);
1907609afacSChristoph Hellwig 		btrfs_repair_io_failure(fs_info, btrfs_ino(inode),
1917609afacSChristoph Hellwig 				  repair_bbio->file_offset, fs_info->sectorsize,
1920d3acb25SChristoph Hellwig 				  repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT,
1937609afacSChristoph Hellwig 				  bv->bv_page, bv->bv_offset, mirror);
1947609afacSChristoph Hellwig 	} while (mirror != fbio->bbio->mirror_num);
1957609afacSChristoph Hellwig 
1967609afacSChristoph Hellwig done:
1977609afacSChristoph Hellwig 	btrfs_repair_done(fbio);
1987609afacSChristoph Hellwig 	bio_put(&repair_bbio->bio);
1997609afacSChristoph Hellwig }
2007609afacSChristoph Hellwig 
2017609afacSChristoph Hellwig /*
2027609afacSChristoph Hellwig  * Try to kick off a repair read to the next available mirror for a bad sector.
2037609afacSChristoph Hellwig  *
2047609afacSChristoph Hellwig  * This primarily tries to recover good data to serve the actual read request,
2057609afacSChristoph Hellwig  * but also tries to write the good data back to the bad mirror(s) when a
2067609afacSChristoph Hellwig  * read succeeded to restore the redundancy.
2077609afacSChristoph Hellwig  */
2087609afacSChristoph Hellwig static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
2097609afacSChristoph Hellwig 						  u32 bio_offset,
2107609afacSChristoph Hellwig 						  struct bio_vec *bv,
2117609afacSChristoph Hellwig 						  struct btrfs_failed_bio *fbio)
2127609afacSChristoph Hellwig {
2137609afacSChristoph Hellwig 	struct btrfs_inode *inode = failed_bbio->inode;
2147609afacSChristoph Hellwig 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2157609afacSChristoph Hellwig 	const u32 sectorsize = fs_info->sectorsize;
2160d3acb25SChristoph Hellwig 	const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT);
2177609afacSChristoph Hellwig 	struct btrfs_bio *repair_bbio;
2187609afacSChristoph Hellwig 	struct bio *repair_bio;
2197609afacSChristoph Hellwig 	int num_copies;
2207609afacSChristoph Hellwig 	int mirror;
2217609afacSChristoph Hellwig 
2227609afacSChristoph Hellwig 	btrfs_debug(fs_info, "repair read error: read error at %llu",
2237609afacSChristoph Hellwig 		    failed_bbio->file_offset + bio_offset);
2247609afacSChristoph Hellwig 
2257609afacSChristoph Hellwig 	num_copies = btrfs_num_copies(fs_info, logical, sectorsize);
2267609afacSChristoph Hellwig 	if (num_copies == 1) {
2277609afacSChristoph Hellwig 		btrfs_debug(fs_info, "no copy to repair from");
2287609afacSChristoph Hellwig 		failed_bbio->bio.bi_status = BLK_STS_IOERR;
2297609afacSChristoph Hellwig 		return fbio;
2307609afacSChristoph Hellwig 	}
2317609afacSChristoph Hellwig 
2327609afacSChristoph Hellwig 	if (!fbio) {
2337609afacSChristoph Hellwig 		fbio = mempool_alloc(&btrfs_failed_bio_pool, GFP_NOFS);
2347609afacSChristoph Hellwig 		fbio->bbio = failed_bbio;
2357609afacSChristoph Hellwig 		fbio->num_copies = num_copies;
2367609afacSChristoph Hellwig 		atomic_set(&fbio->repair_count, 1);
2377609afacSChristoph Hellwig 	}
2387609afacSChristoph Hellwig 
2397609afacSChristoph Hellwig 	atomic_inc(&fbio->repair_count);
2407609afacSChristoph Hellwig 
2417609afacSChristoph Hellwig 	repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS,
2427609afacSChristoph Hellwig 				      &btrfs_repair_bioset);
2430d3acb25SChristoph Hellwig 	repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector;
244078e4cf5SJohannes Thumshirn 	__bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset);
2457609afacSChristoph Hellwig 
2467609afacSChristoph Hellwig 	repair_bbio = btrfs_bio(repair_bio);
2474317ff00SQu Wenruo 	btrfs_bio_init(repair_bbio, fs_info, NULL, fbio);
2484317ff00SQu Wenruo 	repair_bbio->inode = failed_bbio->inode;
2497609afacSChristoph Hellwig 	repair_bbio->file_offset = failed_bbio->file_offset + bio_offset;
2507609afacSChristoph Hellwig 
2517609afacSChristoph Hellwig 	mirror = next_repair_mirror(fbio, failed_bbio->mirror_num);
2527609afacSChristoph Hellwig 	btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror);
253ae42a154SChristoph Hellwig 	btrfs_submit_bio(repair_bbio, mirror);
2547609afacSChristoph Hellwig 	return fbio;
2557609afacSChristoph Hellwig }
2567609afacSChristoph Hellwig 
2577609afacSChristoph Hellwig static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev)
2587609afacSChristoph Hellwig {
2597609afacSChristoph Hellwig 	struct btrfs_inode *inode = bbio->inode;
2607609afacSChristoph Hellwig 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2617609afacSChristoph Hellwig 	u32 sectorsize = fs_info->sectorsize;
2620d3acb25SChristoph Hellwig 	struct bvec_iter *iter = &bbio->saved_iter;
2637609afacSChristoph Hellwig 	blk_status_t status = bbio->bio.bi_status;
2647609afacSChristoph Hellwig 	struct btrfs_failed_bio *fbio = NULL;
2657609afacSChristoph Hellwig 	u32 offset = 0;
2667609afacSChristoph Hellwig 
2674317ff00SQu Wenruo 	/* Read-repair requires the inode field to be set by the submitter. */
2684317ff00SQu Wenruo 	ASSERT(inode);
2694317ff00SQu Wenruo 
2707609afacSChristoph Hellwig 	/*
2717609afacSChristoph Hellwig 	 * Hand off repair bios to the repair code as there is no upper level
2727609afacSChristoph Hellwig 	 * submitter for them.
2737609afacSChristoph Hellwig 	 */
2747609afacSChristoph Hellwig 	if (bbio->bio.bi_pool == &btrfs_repair_bioset) {
2757609afacSChristoph Hellwig 		btrfs_end_repair_bio(bbio, dev);
2767609afacSChristoph Hellwig 		return;
2777609afacSChristoph Hellwig 	}
2787609afacSChristoph Hellwig 
2797609afacSChristoph Hellwig 	/* Clear the I/O error. A failed repair will reset it. */
2807609afacSChristoph Hellwig 	bbio->bio.bi_status = BLK_STS_OK;
2817609afacSChristoph Hellwig 
2827609afacSChristoph Hellwig 	while (iter->bi_size) {
2837609afacSChristoph Hellwig 		struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter);
2847609afacSChristoph Hellwig 
2857609afacSChristoph Hellwig 		bv.bv_len = min(bv.bv_len, sectorsize);
2867609afacSChristoph Hellwig 		if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv))
2877609afacSChristoph Hellwig 			fbio = repair_one_sector(bbio, offset, &bv, fbio);
2887609afacSChristoph Hellwig 
2897609afacSChristoph Hellwig 		bio_advance_iter_single(&bbio->bio, iter, sectorsize);
2907609afacSChristoph Hellwig 		offset += sectorsize;
2917609afacSChristoph Hellwig 	}
2927609afacSChristoph Hellwig 
2937ab0fdfcSChristoph Hellwig 	if (bbio->csum != bbio->csum_inline)
2947ab0fdfcSChristoph Hellwig 		kfree(bbio->csum);
2957609afacSChristoph Hellwig 
2967609afacSChristoph Hellwig 	if (fbio)
2977609afacSChristoph Hellwig 		btrfs_repair_done(fbio);
2987609afacSChristoph Hellwig 	else
299852eee62SChristoph Hellwig 		btrfs_orig_bbio_end_io(bbio);
3007609afacSChristoph Hellwig }
3017609afacSChristoph Hellwig 
302103c1972SChristoph Hellwig static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
303103c1972SChristoph Hellwig {
304103c1972SChristoph Hellwig 	if (!dev || !dev->bdev)
305103c1972SChristoph Hellwig 		return;
306103c1972SChristoph Hellwig 	if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET)
307103c1972SChristoph Hellwig 		return;
308103c1972SChristoph Hellwig 
309103c1972SChristoph Hellwig 	if (btrfs_op(bio) == BTRFS_MAP_WRITE)
310103c1972SChristoph Hellwig 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
31198e8d36aSNaohiro Aota 	else if (!(bio->bi_opf & REQ_RAHEAD))
312103c1972SChristoph Hellwig 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
313103c1972SChristoph Hellwig 	if (bio->bi_opf & REQ_PREFLUSH)
314103c1972SChristoph Hellwig 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
315103c1972SChristoph Hellwig }
316103c1972SChristoph Hellwig 
317103c1972SChristoph Hellwig static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info,
318103c1972SChristoph Hellwig 						struct bio *bio)
319103c1972SChristoph Hellwig {
320103c1972SChristoph Hellwig 	if (bio->bi_opf & REQ_META)
321103c1972SChristoph Hellwig 		return fs_info->endio_meta_workers;
322103c1972SChristoph Hellwig 	return fs_info->endio_workers;
323103c1972SChristoph Hellwig }
324103c1972SChristoph Hellwig 
325103c1972SChristoph Hellwig static void btrfs_end_bio_work(struct work_struct *work)
326103c1972SChristoph Hellwig {
327103c1972SChristoph Hellwig 	struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
328103c1972SChristoph Hellwig 
3297609afacSChristoph Hellwig 	/* Metadata reads are checked and repaired by the submitter. */
3304317ff00SQu Wenruo 	if (bbio->inode && !(bbio->bio.bi_opf & REQ_META))
331860c8c45SChristoph Hellwig 		btrfs_check_read_bio(bbio, bbio->bio.bi_private);
3324317ff00SQu Wenruo 	else
33345c2f368SChristoph Hellwig 		btrfs_orig_bbio_end_io(bbio);
334103c1972SChristoph Hellwig }
335103c1972SChristoph Hellwig 
336103c1972SChristoph Hellwig static void btrfs_simple_end_io(struct bio *bio)
337103c1972SChristoph Hellwig {
338103c1972SChristoph Hellwig 	struct btrfs_bio *bbio = btrfs_bio(bio);
339860c8c45SChristoph Hellwig 	struct btrfs_device *dev = bio->bi_private;
3404317ff00SQu Wenruo 	struct btrfs_fs_info *fs_info = bbio->fs_info;
341103c1972SChristoph Hellwig 
342103c1972SChristoph Hellwig 	btrfs_bio_counter_dec(fs_info);
343103c1972SChristoph Hellwig 
344103c1972SChristoph Hellwig 	if (bio->bi_status)
345860c8c45SChristoph Hellwig 		btrfs_log_dev_io_error(bio, dev);
346103c1972SChristoph Hellwig 
347103c1972SChristoph Hellwig 	if (bio_op(bio) == REQ_OP_READ) {
348103c1972SChristoph Hellwig 		INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
349103c1972SChristoph Hellwig 		queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
350103c1972SChristoph Hellwig 	} else {
35169ccf3f4SChristoph Hellwig 		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
35269ccf3f4SChristoph Hellwig 			btrfs_record_physical_zoned(bbio);
353852eee62SChristoph Hellwig 		btrfs_orig_bbio_end_io(bbio);
354103c1972SChristoph Hellwig 	}
355103c1972SChristoph Hellwig }
356103c1972SChristoph Hellwig 
357103c1972SChristoph Hellwig static void btrfs_raid56_end_io(struct bio *bio)
358103c1972SChristoph Hellwig {
359103c1972SChristoph Hellwig 	struct btrfs_io_context *bioc = bio->bi_private;
360103c1972SChristoph Hellwig 	struct btrfs_bio *bbio = btrfs_bio(bio);
361103c1972SChristoph Hellwig 
362103c1972SChristoph Hellwig 	btrfs_bio_counter_dec(bioc->fs_info);
363103c1972SChristoph Hellwig 	bbio->mirror_num = bioc->mirror_num;
3644317ff00SQu Wenruo 	if (bio_op(bio) == REQ_OP_READ && bbio->inode &&
3654317ff00SQu Wenruo 	    !(bbio->bio.bi_opf & REQ_META))
3667609afacSChristoph Hellwig 		btrfs_check_read_bio(bbio, NULL);
3677609afacSChristoph Hellwig 	else
368852eee62SChristoph Hellwig 		btrfs_orig_bbio_end_io(bbio);
369103c1972SChristoph Hellwig 
370103c1972SChristoph Hellwig 	btrfs_put_bioc(bioc);
371103c1972SChristoph Hellwig }
372103c1972SChristoph Hellwig 
373103c1972SChristoph Hellwig static void btrfs_orig_write_end_io(struct bio *bio)
374103c1972SChristoph Hellwig {
375103c1972SChristoph Hellwig 	struct btrfs_io_stripe *stripe = bio->bi_private;
376103c1972SChristoph Hellwig 	struct btrfs_io_context *bioc = stripe->bioc;
377103c1972SChristoph Hellwig 	struct btrfs_bio *bbio = btrfs_bio(bio);
378103c1972SChristoph Hellwig 
379103c1972SChristoph Hellwig 	btrfs_bio_counter_dec(bioc->fs_info);
380103c1972SChristoph Hellwig 
381103c1972SChristoph Hellwig 	if (bio->bi_status) {
382103c1972SChristoph Hellwig 		atomic_inc(&bioc->error);
383103c1972SChristoph Hellwig 		btrfs_log_dev_io_error(bio, stripe->dev);
384103c1972SChristoph Hellwig 	}
385103c1972SChristoph Hellwig 
386103c1972SChristoph Hellwig 	/*
387103c1972SChristoph Hellwig 	 * Only send an error to the higher layers if it is beyond the tolerance
388103c1972SChristoph Hellwig 	 * threshold.
389103c1972SChristoph Hellwig 	 */
390103c1972SChristoph Hellwig 	if (atomic_read(&bioc->error) > bioc->max_errors)
391103c1972SChristoph Hellwig 		bio->bi_status = BLK_STS_IOERR;
392103c1972SChristoph Hellwig 	else
393103c1972SChristoph Hellwig 		bio->bi_status = BLK_STS_OK;
394103c1972SChristoph Hellwig 
395852eee62SChristoph Hellwig 	btrfs_orig_bbio_end_io(bbio);
396103c1972SChristoph Hellwig 	btrfs_put_bioc(bioc);
397103c1972SChristoph Hellwig }
398103c1972SChristoph Hellwig 
399103c1972SChristoph Hellwig static void btrfs_clone_write_end_io(struct bio *bio)
400103c1972SChristoph Hellwig {
401103c1972SChristoph Hellwig 	struct btrfs_io_stripe *stripe = bio->bi_private;
402103c1972SChristoph Hellwig 
403103c1972SChristoph Hellwig 	if (bio->bi_status) {
404103c1972SChristoph Hellwig 		atomic_inc(&stripe->bioc->error);
405103c1972SChristoph Hellwig 		btrfs_log_dev_io_error(bio, stripe->dev);
406103c1972SChristoph Hellwig 	}
407103c1972SChristoph Hellwig 
408103c1972SChristoph Hellwig 	/* Pass on control to the original bio this one was cloned from */
409103c1972SChristoph Hellwig 	bio_endio(stripe->bioc->orig_bio);
410103c1972SChristoph Hellwig 	bio_put(bio);
411103c1972SChristoph Hellwig }
412103c1972SChristoph Hellwig 
413103c1972SChristoph Hellwig static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
414103c1972SChristoph Hellwig {
415103c1972SChristoph Hellwig 	if (!dev || !dev->bdev ||
416103c1972SChristoph Hellwig 	    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
417103c1972SChristoph Hellwig 	    (btrfs_op(bio) == BTRFS_MAP_WRITE &&
418103c1972SChristoph Hellwig 	     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
419103c1972SChristoph Hellwig 		bio_io_error(bio);
420103c1972SChristoph Hellwig 		return;
421103c1972SChristoph Hellwig 	}
422103c1972SChristoph Hellwig 
423103c1972SChristoph Hellwig 	bio_set_dev(bio, dev->bdev);
424103c1972SChristoph Hellwig 
425103c1972SChristoph Hellwig 	/*
426103c1972SChristoph Hellwig 	 * For zone append writing, bi_sector must point the beginning of the
427103c1972SChristoph Hellwig 	 * zone
428103c1972SChristoph Hellwig 	 */
429103c1972SChristoph Hellwig 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
430103c1972SChristoph Hellwig 		u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
431d5e4377dSChristoph Hellwig 		u64 zone_start = round_down(physical, dev->fs_info->zone_size);
432103c1972SChristoph Hellwig 
433d5e4377dSChristoph Hellwig 		ASSERT(btrfs_dev_is_sequential(dev, physical));
434103c1972SChristoph Hellwig 		bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
435103c1972SChristoph Hellwig 	}
436103c1972SChristoph Hellwig 	btrfs_debug_in_rcu(dev->fs_info,
437103c1972SChristoph Hellwig 	"%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
438103c1972SChristoph Hellwig 		__func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
439103c1972SChristoph Hellwig 		(unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev),
440103c1972SChristoph Hellwig 		dev->devid, bio->bi_iter.bi_size);
441103c1972SChristoph Hellwig 
442103c1972SChristoph Hellwig 	btrfsic_check_bio(bio);
4433480373eSChristoph Hellwig 
4443480373eSChristoph Hellwig 	if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
4453480373eSChristoph Hellwig 		blkcg_punt_bio_submit(bio);
4463480373eSChristoph Hellwig 	else
447103c1972SChristoph Hellwig 		submit_bio(bio);
448103c1972SChristoph Hellwig }
449103c1972SChristoph Hellwig 
450103c1972SChristoph Hellwig static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
451103c1972SChristoph Hellwig {
452103c1972SChristoph Hellwig 	struct bio *orig_bio = bioc->orig_bio, *bio;
453103c1972SChristoph Hellwig 
454103c1972SChristoph Hellwig 	ASSERT(bio_op(orig_bio) != REQ_OP_READ);
455103c1972SChristoph Hellwig 
456103c1972SChristoph Hellwig 	/* Reuse the bio embedded into the btrfs_bio for the last mirror */
457103c1972SChristoph Hellwig 	if (dev_nr == bioc->num_stripes - 1) {
458103c1972SChristoph Hellwig 		bio = orig_bio;
459103c1972SChristoph Hellwig 		bio->bi_end_io = btrfs_orig_write_end_io;
460103c1972SChristoph Hellwig 	} else {
461103c1972SChristoph Hellwig 		bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set);
462103c1972SChristoph Hellwig 		bio_inc_remaining(orig_bio);
463103c1972SChristoph Hellwig 		bio->bi_end_io = btrfs_clone_write_end_io;
464103c1972SChristoph Hellwig 	}
465103c1972SChristoph Hellwig 
466103c1972SChristoph Hellwig 	bio->bi_private = &bioc->stripes[dev_nr];
467103c1972SChristoph Hellwig 	bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
468103c1972SChristoph Hellwig 	bioc->stripes[dev_nr].bioc = bioc;
469103c1972SChristoph Hellwig 	btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
470103c1972SChristoph Hellwig }
471103c1972SChristoph Hellwig 
472f8a53bb5SChristoph Hellwig static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
473f8a53bb5SChristoph Hellwig 			       struct btrfs_io_stripe *smap, int mirror_num)
474f8a53bb5SChristoph Hellwig {
475f8a53bb5SChristoph Hellwig 	/* Do not leak our private flag into the block layer. */
476f8a53bb5SChristoph Hellwig 	bio->bi_opf &= ~REQ_BTRFS_ONE_ORDERED;
477f8a53bb5SChristoph Hellwig 
478f8a53bb5SChristoph Hellwig 	if (!bioc) {
479f8a53bb5SChristoph Hellwig 		/* Single mirror read/write fast path. */
480f8a53bb5SChristoph Hellwig 		btrfs_bio(bio)->mirror_num = mirror_num;
481f8a53bb5SChristoph Hellwig 		bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT;
482f8a53bb5SChristoph Hellwig 		bio->bi_private = smap->dev;
483f8a53bb5SChristoph Hellwig 		bio->bi_end_io = btrfs_simple_end_io;
484f8a53bb5SChristoph Hellwig 		btrfs_submit_dev_bio(smap->dev, bio);
485f8a53bb5SChristoph Hellwig 	} else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
486f8a53bb5SChristoph Hellwig 		/* Parity RAID write or read recovery. */
487f8a53bb5SChristoph Hellwig 		bio->bi_private = bioc;
488f8a53bb5SChristoph Hellwig 		bio->bi_end_io = btrfs_raid56_end_io;
489f8a53bb5SChristoph Hellwig 		if (bio_op(bio) == REQ_OP_READ)
490f8a53bb5SChristoph Hellwig 			raid56_parity_recover(bio, bioc, mirror_num);
491f8a53bb5SChristoph Hellwig 		else
492f8a53bb5SChristoph Hellwig 			raid56_parity_write(bio, bioc);
493f8a53bb5SChristoph Hellwig 	} else {
494f8a53bb5SChristoph Hellwig 		/* Write to multiple mirrors. */
495f8a53bb5SChristoph Hellwig 		int total_devs = bioc->num_stripes;
496f8a53bb5SChristoph Hellwig 
497f8a53bb5SChristoph Hellwig 		bioc->orig_bio = bio;
498f8a53bb5SChristoph Hellwig 		for (int dev_nr = 0; dev_nr < total_devs; dev_nr++)
499f8a53bb5SChristoph Hellwig 			btrfs_submit_mirrored_bio(bioc, dev_nr);
500f8a53bb5SChristoph Hellwig 	}
501f8a53bb5SChristoph Hellwig }
502f8a53bb5SChristoph Hellwig 
503f8a53bb5SChristoph Hellwig static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio)
504f8a53bb5SChristoph Hellwig {
505f8a53bb5SChristoph Hellwig 	if (bbio->bio.bi_opf & REQ_META)
506542e300eSChristoph Hellwig 		return btree_csum_one_bio(bbio);
507f8a53bb5SChristoph Hellwig 	return btrfs_csum_one_bio(bbio);
508f8a53bb5SChristoph Hellwig }
509f8a53bb5SChristoph Hellwig 
510f8a53bb5SChristoph Hellwig /*
511f8a53bb5SChristoph Hellwig  * Async submit bios are used to offload expensive checksumming onto the worker
512f8a53bb5SChristoph Hellwig  * threads.
513f8a53bb5SChristoph Hellwig  */
514f8a53bb5SChristoph Hellwig struct async_submit_bio {
515f8a53bb5SChristoph Hellwig 	struct btrfs_bio *bbio;
516f8a53bb5SChristoph Hellwig 	struct btrfs_io_context *bioc;
517f8a53bb5SChristoph Hellwig 	struct btrfs_io_stripe smap;
518f8a53bb5SChristoph Hellwig 	int mirror_num;
519f8a53bb5SChristoph Hellwig 	struct btrfs_work work;
520f8a53bb5SChristoph Hellwig };
521f8a53bb5SChristoph Hellwig 
522f8a53bb5SChristoph Hellwig /*
523f8a53bb5SChristoph Hellwig  * In order to insert checksums into the metadata in large chunks, we wait
524f8a53bb5SChristoph Hellwig  * until bio submission time.   All the pages in the bio are checksummed and
525f8a53bb5SChristoph Hellwig  * sums are attached onto the ordered extent record.
526f8a53bb5SChristoph Hellwig  *
527f8a53bb5SChristoph Hellwig  * At IO completion time the csums attached on the ordered extent record are
528f8a53bb5SChristoph Hellwig  * inserted into the btree.
529f8a53bb5SChristoph Hellwig  */
530f8a53bb5SChristoph Hellwig static void run_one_async_start(struct btrfs_work *work)
531f8a53bb5SChristoph Hellwig {
532f8a53bb5SChristoph Hellwig 	struct async_submit_bio *async =
533f8a53bb5SChristoph Hellwig 		container_of(work, struct async_submit_bio, work);
534f8a53bb5SChristoph Hellwig 	blk_status_t ret;
535f8a53bb5SChristoph Hellwig 
536f8a53bb5SChristoph Hellwig 	ret = btrfs_bio_csum(async->bbio);
537f8a53bb5SChristoph Hellwig 	if (ret)
538f8a53bb5SChristoph Hellwig 		async->bbio->bio.bi_status = ret;
539f8a53bb5SChristoph Hellwig }
540f8a53bb5SChristoph Hellwig 
541f8a53bb5SChristoph Hellwig /*
542f8a53bb5SChristoph Hellwig  * In order to insert checksums into the metadata in large chunks, we wait
543f8a53bb5SChristoph Hellwig  * until bio submission time.   All the pages in the bio are checksummed and
544f8a53bb5SChristoph Hellwig  * sums are attached onto the ordered extent record.
545f8a53bb5SChristoph Hellwig  *
546f8a53bb5SChristoph Hellwig  * At IO completion time the csums attached on the ordered extent record are
547f8a53bb5SChristoph Hellwig  * inserted into the tree.
548f8a53bb5SChristoph Hellwig  */
549f8a53bb5SChristoph Hellwig static void run_one_async_done(struct btrfs_work *work)
550f8a53bb5SChristoph Hellwig {
551f8a53bb5SChristoph Hellwig 	struct async_submit_bio *async =
552f8a53bb5SChristoph Hellwig 		container_of(work, struct async_submit_bio, work);
553f8a53bb5SChristoph Hellwig 	struct bio *bio = &async->bbio->bio;
554f8a53bb5SChristoph Hellwig 
555f8a53bb5SChristoph Hellwig 	/* If an error occurred we just want to clean up the bio and move on. */
556f8a53bb5SChristoph Hellwig 	if (bio->bi_status) {
557852eee62SChristoph Hellwig 		btrfs_orig_bbio_end_io(async->bbio);
558f8a53bb5SChristoph Hellwig 		return;
559f8a53bb5SChristoph Hellwig 	}
560f8a53bb5SChristoph Hellwig 
561f8a53bb5SChristoph Hellwig 	/*
562f8a53bb5SChristoph Hellwig 	 * All of the bios that pass through here are from async helpers.
5633480373eSChristoph Hellwig 	 * Use REQ_BTRFS_CGROUP_PUNT to issue them from the owning cgroup's
5643480373eSChristoph Hellwig 	 * context.  This changes nothing when cgroups aren't in use.
565f8a53bb5SChristoph Hellwig 	 */
5663480373eSChristoph Hellwig 	bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT;
567f8a53bb5SChristoph Hellwig 	__btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
568f8a53bb5SChristoph Hellwig }
569f8a53bb5SChristoph Hellwig 
570f8a53bb5SChristoph Hellwig static void run_one_async_free(struct btrfs_work *work)
571f8a53bb5SChristoph Hellwig {
572f8a53bb5SChristoph Hellwig 	kfree(container_of(work, struct async_submit_bio, work));
573f8a53bb5SChristoph Hellwig }
574f8a53bb5SChristoph Hellwig 
575f8a53bb5SChristoph Hellwig static bool should_async_write(struct btrfs_bio *bbio)
576f8a53bb5SChristoph Hellwig {
577*da023618SChristoph Hellwig 	/* Submit synchronously if the checksum implementation is fast. */
578*da023618SChristoph Hellwig 	if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &bbio->fs_info->flags))
579*da023618SChristoph Hellwig 		return false;
580*da023618SChristoph Hellwig 
581f8a53bb5SChristoph Hellwig 	/*
582f8a53bb5SChristoph Hellwig 	 * If the I/O is not issued by fsync and friends, (->sync_writers != 0),
583f8a53bb5SChristoph Hellwig 	 * then try to defer the submission to a workqueue to parallelize the
584f8a53bb5SChristoph Hellwig 	 * checksum calculation.
585f8a53bb5SChristoph Hellwig 	 */
586f8a53bb5SChristoph Hellwig 	if (atomic_read(&bbio->inode->sync_writers))
587f8a53bb5SChristoph Hellwig 		return false;
588f8a53bb5SChristoph Hellwig 
589*da023618SChristoph Hellwig 	/* Zoned devices require I/O to be submitted in order. */
590*da023618SChristoph Hellwig 	if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info))
591f8a53bb5SChristoph Hellwig 		return false;
592f8a53bb5SChristoph Hellwig 
593f8a53bb5SChristoph Hellwig 	return true;
594f8a53bb5SChristoph Hellwig }
595f8a53bb5SChristoph Hellwig 
596f8a53bb5SChristoph Hellwig /*
597f8a53bb5SChristoph Hellwig  * Submit bio to an async queue.
598f8a53bb5SChristoph Hellwig  *
599f8a53bb5SChristoph Hellwig  * Return true if the work has been succesfuly submitted, else false.
600f8a53bb5SChristoph Hellwig  */
601f8a53bb5SChristoph Hellwig static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
602f8a53bb5SChristoph Hellwig 				struct btrfs_io_context *bioc,
603f8a53bb5SChristoph Hellwig 				struct btrfs_io_stripe *smap, int mirror_num)
604f8a53bb5SChristoph Hellwig {
6054317ff00SQu Wenruo 	struct btrfs_fs_info *fs_info = bbio->fs_info;
606f8a53bb5SChristoph Hellwig 	struct async_submit_bio *async;
607f8a53bb5SChristoph Hellwig 
608f8a53bb5SChristoph Hellwig 	async = kmalloc(sizeof(*async), GFP_NOFS);
609f8a53bb5SChristoph Hellwig 	if (!async)
610f8a53bb5SChristoph Hellwig 		return false;
611f8a53bb5SChristoph Hellwig 
612f8a53bb5SChristoph Hellwig 	async->bbio = bbio;
613f8a53bb5SChristoph Hellwig 	async->bioc = bioc;
614f8a53bb5SChristoph Hellwig 	async->smap = *smap;
615f8a53bb5SChristoph Hellwig 	async->mirror_num = mirror_num;
616f8a53bb5SChristoph Hellwig 
617f8a53bb5SChristoph Hellwig 	btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
618f8a53bb5SChristoph Hellwig 			run_one_async_free);
619f8a53bb5SChristoph Hellwig 	if (op_is_sync(bbio->bio.bi_opf))
620f8a53bb5SChristoph Hellwig 		btrfs_queue_work(fs_info->hipri_workers, &async->work);
621f8a53bb5SChristoph Hellwig 	else
622f8a53bb5SChristoph Hellwig 		btrfs_queue_work(fs_info->workers, &async->work);
623f8a53bb5SChristoph Hellwig 	return true;
624f8a53bb5SChristoph Hellwig }
625f8a53bb5SChristoph Hellwig 
626ae42a154SChristoph Hellwig static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
627103c1972SChristoph Hellwig {
628d5e4377dSChristoph Hellwig 	struct btrfs_inode *inode = bbio->inode;
6294317ff00SQu Wenruo 	struct btrfs_fs_info *fs_info = bbio->fs_info;
630852eee62SChristoph Hellwig 	struct btrfs_bio *orig_bbio = bbio;
631ae42a154SChristoph Hellwig 	struct bio *bio = &bbio->bio;
632adbe7e38SAnand Jain 	u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
633103c1972SChristoph Hellwig 	u64 length = bio->bi_iter.bi_size;
634103c1972SChristoph Hellwig 	u64 map_length = length;
635921603c7SChristoph Hellwig 	bool use_append = btrfs_use_zone_append(bbio);
636103c1972SChristoph Hellwig 	struct btrfs_io_context *bioc = NULL;
637103c1972SChristoph Hellwig 	struct btrfs_io_stripe smap;
6389ba0004bSChristoph Hellwig 	blk_status_t ret;
6399ba0004bSChristoph Hellwig 	int error;
640103c1972SChristoph Hellwig 
641103c1972SChristoph Hellwig 	btrfs_bio_counter_inc_blocked(fs_info);
6429ba0004bSChristoph Hellwig 	error = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
643103c1972SChristoph Hellwig 				  &bioc, &smap, &mirror_num, 1);
6449ba0004bSChristoph Hellwig 	if (error) {
6459ba0004bSChristoph Hellwig 		ret = errno_to_blk_status(error);
6469ba0004bSChristoph Hellwig 		goto fail;
647103c1972SChristoph Hellwig 	}
648103c1972SChristoph Hellwig 
649852eee62SChristoph Hellwig 	map_length = min(map_length, length);
650d5e4377dSChristoph Hellwig 	if (use_append)
651d5e4377dSChristoph Hellwig 		map_length = min(map_length, fs_info->max_zone_append_size);
652d5e4377dSChristoph Hellwig 
653103c1972SChristoph Hellwig 	if (map_length < length) {
6542cef0c79SChristoph Hellwig 		bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append);
6552cef0c79SChristoph Hellwig 		bio = &bbio->bio;
656103c1972SChristoph Hellwig 	}
657103c1972SChristoph Hellwig 
6581c2b3ee3SChristoph Hellwig 	/*
6591c2b3ee3SChristoph Hellwig 	 * Save the iter for the end_io handler and preload the checksums for
6601c2b3ee3SChristoph Hellwig 	 * data reads.
6611c2b3ee3SChristoph Hellwig 	 */
6624317ff00SQu Wenruo 	if (bio_op(bio) == REQ_OP_READ && inode && !(bio->bi_opf & REQ_META)) {
6630d3acb25SChristoph Hellwig 		bbio->saved_iter = bio->bi_iter;
6641c2b3ee3SChristoph Hellwig 		ret = btrfs_lookup_bio_sums(bbio);
6651c2b3ee3SChristoph Hellwig 		if (ret)
666852eee62SChristoph Hellwig 			goto fail_put_bio;
6671c2b3ee3SChristoph Hellwig 	}
6687276aa7dSChristoph Hellwig 
669f8a53bb5SChristoph Hellwig 	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
670d5e4377dSChristoph Hellwig 		if (use_append) {
671d5e4377dSChristoph Hellwig 			bio->bi_opf &= ~REQ_OP_WRITE;
672d5e4377dSChristoph Hellwig 			bio->bi_opf |= REQ_OP_ZONE_APPEND;
6737edd339cSChristoph Hellwig 			ret = btrfs_bio_extract_ordered_extent(bbio);
67469ccf3f4SChristoph Hellwig 			if (ret)
675852eee62SChristoph Hellwig 				goto fail_put_bio;
67669ccf3f4SChristoph Hellwig 		}
67769ccf3f4SChristoph Hellwig 
678f8a53bb5SChristoph Hellwig 		/*
679f8a53bb5SChristoph Hellwig 		 * Csum items for reloc roots have already been cloned at this
680f8a53bb5SChristoph Hellwig 		 * point, so they are handled as part of the no-checksum case.
681f8a53bb5SChristoph Hellwig 		 */
6824317ff00SQu Wenruo 		if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) &&
683f8a53bb5SChristoph Hellwig 		    !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) &&
684d5e4377dSChristoph Hellwig 		    !btrfs_is_data_reloc_root(inode->root)) {
685f8a53bb5SChristoph Hellwig 			if (should_async_write(bbio) &&
686f8a53bb5SChristoph Hellwig 			    btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num))
687852eee62SChristoph Hellwig 				goto done;
688f8c44673SChristoph Hellwig 
689f8a53bb5SChristoph Hellwig 			ret = btrfs_bio_csum(bbio);
690f8a53bb5SChristoph Hellwig 			if (ret)
691852eee62SChristoph Hellwig 				goto fail_put_bio;
692103c1972SChristoph Hellwig 		}
693f8a53bb5SChristoph Hellwig 	}
694f8a53bb5SChristoph Hellwig 
695f8a53bb5SChristoph Hellwig 	__btrfs_submit_bio(bio, bioc, &smap, mirror_num);
696852eee62SChristoph Hellwig done:
697852eee62SChristoph Hellwig 	return map_length == length;
6989ba0004bSChristoph Hellwig 
699852eee62SChristoph Hellwig fail_put_bio:
700852eee62SChristoph Hellwig 	if (map_length < length)
701852eee62SChristoph Hellwig 		bio_put(bio);
7029ba0004bSChristoph Hellwig fail:
7039ba0004bSChristoph Hellwig 	btrfs_bio_counter_dec(fs_info);
704852eee62SChristoph Hellwig 	btrfs_bio_end_io(orig_bbio, ret);
705852eee62SChristoph Hellwig 	/* Do not submit another chunk */
706852eee62SChristoph Hellwig 	return true;
707852eee62SChristoph Hellwig }
708852eee62SChristoph Hellwig 
709ae42a154SChristoph Hellwig void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num)
710852eee62SChristoph Hellwig {
7114317ff00SQu Wenruo 	/* If bbio->inode is not populated, its file_offset must be 0. */
7124317ff00SQu Wenruo 	ASSERT(bbio->inode || bbio->file_offset == 0);
7134317ff00SQu Wenruo 
714ae42a154SChristoph Hellwig 	while (!btrfs_submit_chunk(bbio, mirror_num))
715852eee62SChristoph Hellwig 		;
716103c1972SChristoph Hellwig }
717103c1972SChristoph Hellwig 
718bacf60e5SChristoph Hellwig /*
719bacf60e5SChristoph Hellwig  * Submit a repair write.
720bacf60e5SChristoph Hellwig  *
721bacf60e5SChristoph Hellwig  * This bypasses btrfs_submit_bio deliberately, as that writes all copies in a
722bacf60e5SChristoph Hellwig  * RAID setup.  Here we only want to write the one bad copy, so we do the
723bacf60e5SChristoph Hellwig  * mapping ourselves and submit the bio directly.
724bacf60e5SChristoph Hellwig  *
72567da05b3SColin Ian King  * The I/O is issued synchronously to block the repair read completion from
726bacf60e5SChristoph Hellwig  * freeing the bio.
727bacf60e5SChristoph Hellwig  */
728bacf60e5SChristoph Hellwig int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
729bacf60e5SChristoph Hellwig 			    u64 length, u64 logical, struct page *page,
730bacf60e5SChristoph Hellwig 			    unsigned int pg_offset, int mirror_num)
731bacf60e5SChristoph Hellwig {
7324886ff7bSQu Wenruo 	struct btrfs_io_stripe smap = { 0 };
733bacf60e5SChristoph Hellwig 	struct bio_vec bvec;
734bacf60e5SChristoph Hellwig 	struct bio bio;
735bacf60e5SChristoph Hellwig 	int ret = 0;
736bacf60e5SChristoph Hellwig 
737bacf60e5SChristoph Hellwig 	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
738bacf60e5SChristoph Hellwig 	BUG_ON(!mirror_num);
739bacf60e5SChristoph Hellwig 
740bacf60e5SChristoph Hellwig 	if (btrfs_repair_one_zone(fs_info, logical))
741bacf60e5SChristoph Hellwig 		return 0;
742bacf60e5SChristoph Hellwig 
743bacf60e5SChristoph Hellwig 	/*
744bacf60e5SChristoph Hellwig 	 * Avoid races with device replace and make sure our bioc has devices
745bacf60e5SChristoph Hellwig 	 * associated to its stripes that don't go away while we are doing the
746bacf60e5SChristoph Hellwig 	 * read repair operation.
747bacf60e5SChristoph Hellwig 	 */
748bacf60e5SChristoph Hellwig 	btrfs_bio_counter_inc_blocked(fs_info);
7494886ff7bSQu Wenruo 	ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num);
7504886ff7bSQu Wenruo 	if (ret < 0)
751bacf60e5SChristoph Hellwig 		goto out_counter_dec;
752bacf60e5SChristoph Hellwig 
7534886ff7bSQu Wenruo 	if (!smap.dev->bdev ||
7544886ff7bSQu Wenruo 	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state)) {
755bacf60e5SChristoph Hellwig 		ret = -EIO;
756bacf60e5SChristoph Hellwig 		goto out_counter_dec;
757bacf60e5SChristoph Hellwig 	}
758bacf60e5SChristoph Hellwig 
7594886ff7bSQu Wenruo 	bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC);
7604886ff7bSQu Wenruo 	bio.bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
761bacf60e5SChristoph Hellwig 	__bio_add_page(&bio, page, length, pg_offset);
762bacf60e5SChristoph Hellwig 
763bacf60e5SChristoph Hellwig 	btrfsic_check_bio(&bio);
764bacf60e5SChristoph Hellwig 	ret = submit_bio_wait(&bio);
765bacf60e5SChristoph Hellwig 	if (ret) {
766bacf60e5SChristoph Hellwig 		/* try to remap that extent elsewhere? */
7674886ff7bSQu Wenruo 		btrfs_dev_stat_inc_and_print(smap.dev, BTRFS_DEV_STAT_WRITE_ERRS);
768bacf60e5SChristoph Hellwig 		goto out_bio_uninit;
769bacf60e5SChristoph Hellwig 	}
770bacf60e5SChristoph Hellwig 
771bacf60e5SChristoph Hellwig 	btrfs_info_rl_in_rcu(fs_info,
772bacf60e5SChristoph Hellwig 		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
7734886ff7bSQu Wenruo 			     ino, start, btrfs_dev_name(smap.dev),
7744886ff7bSQu Wenruo 			     smap.physical >> SECTOR_SHIFT);
775bacf60e5SChristoph Hellwig 	ret = 0;
776bacf60e5SChristoph Hellwig 
777bacf60e5SChristoph Hellwig out_bio_uninit:
778bacf60e5SChristoph Hellwig 	bio_uninit(&bio);
779bacf60e5SChristoph Hellwig out_counter_dec:
780bacf60e5SChristoph Hellwig 	btrfs_bio_counter_dec(fs_info);
781bacf60e5SChristoph Hellwig 	return ret;
782bacf60e5SChristoph Hellwig }
783bacf60e5SChristoph Hellwig 
7844886ff7bSQu Wenruo /*
7854886ff7bSQu Wenruo  * Submit a btrfs_bio based repair write.
7864886ff7bSQu Wenruo  *
7874886ff7bSQu Wenruo  * If @dev_replace is true, the write would be submitted to dev-replace target.
7884886ff7bSQu Wenruo  */
7894886ff7bSQu Wenruo void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace)
7904886ff7bSQu Wenruo {
7914886ff7bSQu Wenruo 	struct btrfs_fs_info *fs_info = bbio->fs_info;
7924886ff7bSQu Wenruo 	u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
7934886ff7bSQu Wenruo 	u64 length = bbio->bio.bi_iter.bi_size;
7944886ff7bSQu Wenruo 	struct btrfs_io_stripe smap = { 0 };
7954886ff7bSQu Wenruo 	int ret;
7964886ff7bSQu Wenruo 
7974886ff7bSQu Wenruo 	ASSERT(fs_info);
7984886ff7bSQu Wenruo 	ASSERT(mirror_num > 0);
7994886ff7bSQu Wenruo 	ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
8004886ff7bSQu Wenruo 	ASSERT(!bbio->inode);
8014886ff7bSQu Wenruo 
8024886ff7bSQu Wenruo 	btrfs_bio_counter_inc_blocked(fs_info);
8034886ff7bSQu Wenruo 	ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num);
8044886ff7bSQu Wenruo 	if (ret < 0)
8054886ff7bSQu Wenruo 		goto fail;
8064886ff7bSQu Wenruo 
8074886ff7bSQu Wenruo 	if (dev_replace) {
8084886ff7bSQu Wenruo 		ASSERT(smap.dev == fs_info->dev_replace.srcdev);
8094886ff7bSQu Wenruo 		smap.dev = fs_info->dev_replace.tgtdev;
8104886ff7bSQu Wenruo 	}
8114886ff7bSQu Wenruo 	__btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num);
8124886ff7bSQu Wenruo 	return;
8134886ff7bSQu Wenruo 
8144886ff7bSQu Wenruo fail:
8154886ff7bSQu Wenruo 	btrfs_bio_counter_dec(fs_info);
8164886ff7bSQu Wenruo 	btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
8174886ff7bSQu Wenruo }
8184886ff7bSQu Wenruo 
819103c1972SChristoph Hellwig int __init btrfs_bioset_init(void)
820103c1972SChristoph Hellwig {
821103c1972SChristoph Hellwig 	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
822103c1972SChristoph Hellwig 			offsetof(struct btrfs_bio, bio),
823103c1972SChristoph Hellwig 			BIOSET_NEED_BVECS))
824103c1972SChristoph Hellwig 		return -ENOMEM;
825852eee62SChristoph Hellwig 	if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE,
826852eee62SChristoph Hellwig 			offsetof(struct btrfs_bio, bio), 0))
827852eee62SChristoph Hellwig 		goto out_free_bioset;
8287609afacSChristoph Hellwig 	if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE,
8297609afacSChristoph Hellwig 			offsetof(struct btrfs_bio, bio),
8307609afacSChristoph Hellwig 			BIOSET_NEED_BVECS))
831852eee62SChristoph Hellwig 		goto out_free_clone_bioset;
8327609afacSChristoph Hellwig 	if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE,
8337609afacSChristoph Hellwig 				      sizeof(struct btrfs_failed_bio)))
8347609afacSChristoph Hellwig 		goto out_free_repair_bioset;
835103c1972SChristoph Hellwig 	return 0;
8367609afacSChristoph Hellwig 
8377609afacSChristoph Hellwig out_free_repair_bioset:
8387609afacSChristoph Hellwig 	bioset_exit(&btrfs_repair_bioset);
839852eee62SChristoph Hellwig out_free_clone_bioset:
840852eee62SChristoph Hellwig 	bioset_exit(&btrfs_clone_bioset);
8417609afacSChristoph Hellwig out_free_bioset:
8427609afacSChristoph Hellwig 	bioset_exit(&btrfs_bioset);
8437609afacSChristoph Hellwig 	return -ENOMEM;
844103c1972SChristoph Hellwig }
845103c1972SChristoph Hellwig 
846103c1972SChristoph Hellwig void __cold btrfs_bioset_exit(void)
847103c1972SChristoph Hellwig {
8487609afacSChristoph Hellwig 	mempool_exit(&btrfs_failed_bio_pool);
8497609afacSChristoph Hellwig 	bioset_exit(&btrfs_repair_bioset);
850852eee62SChristoph Hellwig 	bioset_exit(&btrfs_clone_bioset);
851103c1972SChristoph Hellwig 	bioset_exit(&btrfs_bioset);
852103c1972SChristoph Hellwig }
853