1103c1972SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2103c1972SChristoph Hellwig /* 3103c1972SChristoph Hellwig * Copyright (C) 2007 Oracle. All rights reserved. 4103c1972SChristoph Hellwig * Copyright (C) 2022 Christoph Hellwig. 5103c1972SChristoph Hellwig */ 6103c1972SChristoph Hellwig 7103c1972SChristoph Hellwig #include <linux/bio.h> 8103c1972SChristoph Hellwig #include "bio.h" 9103c1972SChristoph Hellwig #include "ctree.h" 10103c1972SChristoph Hellwig #include "volumes.h" 11103c1972SChristoph Hellwig #include "raid56.h" 12103c1972SChristoph Hellwig #include "async-thread.h" 13103c1972SChristoph Hellwig #include "check-integrity.h" 14103c1972SChristoph Hellwig #include "dev-replace.h" 15103c1972SChristoph Hellwig #include "rcu-string.h" 16103c1972SChristoph Hellwig #include "zoned.h" 171c2b3ee3SChristoph Hellwig #include "file-item.h" 18103c1972SChristoph Hellwig 19103c1972SChristoph Hellwig static struct bio_set btrfs_bioset; 20852eee62SChristoph Hellwig static struct bio_set btrfs_clone_bioset; 217609afacSChristoph Hellwig static struct bio_set btrfs_repair_bioset; 227609afacSChristoph Hellwig static mempool_t btrfs_failed_bio_pool; 237609afacSChristoph Hellwig 247609afacSChristoph Hellwig struct btrfs_failed_bio { 257609afacSChristoph Hellwig struct btrfs_bio *bbio; 267609afacSChristoph Hellwig int num_copies; 277609afacSChristoph Hellwig atomic_t repair_count; 287609afacSChristoph Hellwig }; 29103c1972SChristoph Hellwig 30103c1972SChristoph Hellwig /* 31103c1972SChristoph Hellwig * Initialize a btrfs_bio structure. This skips the embedded bio itself as it 32103c1972SChristoph Hellwig * is already initialized by the block layer. 33103c1972SChristoph Hellwig */ 3467d66982SChristoph Hellwig void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, 35103c1972SChristoph Hellwig btrfs_bio_end_io_t end_io, void *private) 36103c1972SChristoph Hellwig { 37103c1972SChristoph Hellwig memset(bbio, 0, offsetof(struct btrfs_bio, bio)); 38d0e5cb2bSChristoph Hellwig bbio->inode = inode; 39103c1972SChristoph Hellwig bbio->end_io = end_io; 40103c1972SChristoph Hellwig bbio->private = private; 41852eee62SChristoph Hellwig atomic_set(&bbio->pending_ios, 1); 42103c1972SChristoph Hellwig } 43103c1972SChristoph Hellwig 44103c1972SChristoph Hellwig /* 45103c1972SChristoph Hellwig * Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for 46103c1972SChristoph Hellwig * btrfs, and is used for all I/O submitted through btrfs_submit_bio. 47103c1972SChristoph Hellwig * 48103c1972SChristoph Hellwig * Just like the underlying bio_alloc_bioset it will not fail as it is backed by 49103c1972SChristoph Hellwig * a mempool. 50103c1972SChristoph Hellwig */ 51b41bbd29SChristoph Hellwig struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, 52d0e5cb2bSChristoph Hellwig struct btrfs_inode *inode, 53103c1972SChristoph Hellwig btrfs_bio_end_io_t end_io, void *private) 54103c1972SChristoph Hellwig { 55b41bbd29SChristoph Hellwig struct btrfs_bio *bbio; 56103c1972SChristoph Hellwig struct bio *bio; 57103c1972SChristoph Hellwig 58103c1972SChristoph Hellwig bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); 59b41bbd29SChristoph Hellwig bbio = btrfs_bio(bio); 60b41bbd29SChristoph Hellwig btrfs_bio_init(bbio, inode, end_io, private); 61b41bbd29SChristoph Hellwig return bbio; 62103c1972SChristoph Hellwig } 63103c1972SChristoph Hellwig 642cef0c79SChristoph Hellwig static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, 652cef0c79SChristoph Hellwig struct btrfs_bio *orig_bbio, 662cef0c79SChristoph Hellwig u64 map_length, bool use_append) 67852eee62SChristoph Hellwig { 682cef0c79SChristoph Hellwig struct btrfs_bio *bbio; 69852eee62SChristoph Hellwig struct bio *bio; 70852eee62SChristoph Hellwig 71d5e4377dSChristoph Hellwig if (use_append) { 72d5e4377dSChristoph Hellwig unsigned int nr_segs; 73d5e4377dSChristoph Hellwig 742cef0c79SChristoph Hellwig bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, 75d5e4377dSChristoph Hellwig &btrfs_clone_bioset, map_length); 76d5e4377dSChristoph Hellwig } else { 772cef0c79SChristoph Hellwig bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, 782cef0c79SChristoph Hellwig GFP_NOFS, &btrfs_clone_bioset); 79d5e4377dSChristoph Hellwig } 802cef0c79SChristoph Hellwig bbio = btrfs_bio(bio); 812cef0c79SChristoph Hellwig btrfs_bio_init(bbio, orig_bbio->inode, NULL, orig_bbio); 82852eee62SChristoph Hellwig 832cef0c79SChristoph Hellwig bbio->file_offset = orig_bbio->file_offset; 842cef0c79SChristoph Hellwig if (!(orig_bbio->bio.bi_opf & REQ_BTRFS_ONE_ORDERED)) 85852eee62SChristoph Hellwig orig_bbio->file_offset += map_length; 86852eee62SChristoph Hellwig 87852eee62SChristoph Hellwig atomic_inc(&orig_bbio->pending_ios); 882cef0c79SChristoph Hellwig return bbio; 89852eee62SChristoph Hellwig } 90852eee62SChristoph Hellwig 91852eee62SChristoph Hellwig static void btrfs_orig_write_end_io(struct bio *bio); 92852eee62SChristoph Hellwig 93852eee62SChristoph Hellwig static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, 94852eee62SChristoph Hellwig struct btrfs_bio *orig_bbio) 95852eee62SChristoph Hellwig { 96852eee62SChristoph Hellwig /* 97852eee62SChristoph Hellwig * For writes we tolerate nr_mirrors - 1 write failures, so we can't 98852eee62SChristoph Hellwig * just blindly propagate a write failure here. Instead increment the 99852eee62SChristoph Hellwig * error count in the original I/O context so that it is guaranteed to 100852eee62SChristoph Hellwig * be larger than the error tolerance. 101852eee62SChristoph Hellwig */ 102852eee62SChristoph Hellwig if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) { 103852eee62SChristoph Hellwig struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private; 104852eee62SChristoph Hellwig struct btrfs_io_context *orig_bioc = orig_stripe->bioc; 105852eee62SChristoph Hellwig 106852eee62SChristoph Hellwig atomic_add(orig_bioc->max_errors, &orig_bioc->error); 107852eee62SChristoph Hellwig } else { 108852eee62SChristoph Hellwig orig_bbio->bio.bi_status = bbio->bio.bi_status; 109852eee62SChristoph Hellwig } 110852eee62SChristoph Hellwig } 111852eee62SChristoph Hellwig 112852eee62SChristoph Hellwig static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) 113852eee62SChristoph Hellwig { 114852eee62SChristoph Hellwig if (bbio->bio.bi_pool == &btrfs_clone_bioset) { 115852eee62SChristoph Hellwig struct btrfs_bio *orig_bbio = bbio->private; 116852eee62SChristoph Hellwig 117852eee62SChristoph Hellwig if (bbio->bio.bi_status) 118852eee62SChristoph Hellwig btrfs_bbio_propagate_error(bbio, orig_bbio); 119852eee62SChristoph Hellwig bio_put(&bbio->bio); 120852eee62SChristoph Hellwig bbio = orig_bbio; 121852eee62SChristoph Hellwig } 122852eee62SChristoph Hellwig 123852eee62SChristoph Hellwig if (atomic_dec_and_test(&bbio->pending_ios)) 124852eee62SChristoph Hellwig bbio->end_io(bbio); 125852eee62SChristoph Hellwig } 126852eee62SChristoph Hellwig 1277609afacSChristoph Hellwig static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 1287609afacSChristoph Hellwig { 1297609afacSChristoph Hellwig if (cur_mirror == fbio->num_copies) 1307609afacSChristoph Hellwig return cur_mirror + 1 - fbio->num_copies; 1317609afacSChristoph Hellwig return cur_mirror + 1; 1327609afacSChristoph Hellwig } 1337609afacSChristoph Hellwig 1347609afacSChristoph Hellwig static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 1357609afacSChristoph Hellwig { 1367609afacSChristoph Hellwig if (cur_mirror == 1) 1377609afacSChristoph Hellwig return fbio->num_copies; 1387609afacSChristoph Hellwig return cur_mirror - 1; 1397609afacSChristoph Hellwig } 1407609afacSChristoph Hellwig 1417609afacSChristoph Hellwig static void btrfs_repair_done(struct btrfs_failed_bio *fbio) 1427609afacSChristoph Hellwig { 1437609afacSChristoph Hellwig if (atomic_dec_and_test(&fbio->repair_count)) { 144852eee62SChristoph Hellwig btrfs_orig_bbio_end_io(fbio->bbio); 1457609afacSChristoph Hellwig mempool_free(fbio, &btrfs_failed_bio_pool); 1467609afacSChristoph Hellwig } 1477609afacSChristoph Hellwig } 1487609afacSChristoph Hellwig 1497609afacSChristoph Hellwig static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio, 1507609afacSChristoph Hellwig struct btrfs_device *dev) 1517609afacSChristoph Hellwig { 1527609afacSChristoph Hellwig struct btrfs_failed_bio *fbio = repair_bbio->private; 1537609afacSChristoph Hellwig struct btrfs_inode *inode = repair_bbio->inode; 1547609afacSChristoph Hellwig struct btrfs_fs_info *fs_info = inode->root->fs_info; 1557609afacSChristoph Hellwig struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio); 1567609afacSChristoph Hellwig int mirror = repair_bbio->mirror_num; 1577609afacSChristoph Hellwig 1587609afacSChristoph Hellwig if (repair_bbio->bio.bi_status || 1597609afacSChristoph Hellwig !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) { 1607609afacSChristoph Hellwig bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ); 1610d3acb25SChristoph Hellwig repair_bbio->bio.bi_iter = repair_bbio->saved_iter; 1627609afacSChristoph Hellwig 1637609afacSChristoph Hellwig mirror = next_repair_mirror(fbio, mirror); 1647609afacSChristoph Hellwig if (mirror == fbio->bbio->mirror_num) { 1657609afacSChristoph Hellwig btrfs_debug(fs_info, "no mirror left"); 1667609afacSChristoph Hellwig fbio->bbio->bio.bi_status = BLK_STS_IOERR; 1677609afacSChristoph Hellwig goto done; 1687609afacSChristoph Hellwig } 1697609afacSChristoph Hellwig 170ae42a154SChristoph Hellwig btrfs_submit_bio(repair_bbio, mirror); 1717609afacSChristoph Hellwig return; 1727609afacSChristoph Hellwig } 1737609afacSChristoph Hellwig 1747609afacSChristoph Hellwig do { 1757609afacSChristoph Hellwig mirror = prev_repair_mirror(fbio, mirror); 1767609afacSChristoph Hellwig btrfs_repair_io_failure(fs_info, btrfs_ino(inode), 1777609afacSChristoph Hellwig repair_bbio->file_offset, fs_info->sectorsize, 1780d3acb25SChristoph Hellwig repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT, 1797609afacSChristoph Hellwig bv->bv_page, bv->bv_offset, mirror); 1807609afacSChristoph Hellwig } while (mirror != fbio->bbio->mirror_num); 1817609afacSChristoph Hellwig 1827609afacSChristoph Hellwig done: 1837609afacSChristoph Hellwig btrfs_repair_done(fbio); 1847609afacSChristoph Hellwig bio_put(&repair_bbio->bio); 1857609afacSChristoph Hellwig } 1867609afacSChristoph Hellwig 1877609afacSChristoph Hellwig /* 1887609afacSChristoph Hellwig * Try to kick off a repair read to the next available mirror for a bad sector. 1897609afacSChristoph Hellwig * 1907609afacSChristoph Hellwig * This primarily tries to recover good data to serve the actual read request, 1917609afacSChristoph Hellwig * but also tries to write the good data back to the bad mirror(s) when a 1927609afacSChristoph Hellwig * read succeeded to restore the redundancy. 1937609afacSChristoph Hellwig */ 1947609afacSChristoph Hellwig static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio, 1957609afacSChristoph Hellwig u32 bio_offset, 1967609afacSChristoph Hellwig struct bio_vec *bv, 1977609afacSChristoph Hellwig struct btrfs_failed_bio *fbio) 1987609afacSChristoph Hellwig { 1997609afacSChristoph Hellwig struct btrfs_inode *inode = failed_bbio->inode; 2007609afacSChristoph Hellwig struct btrfs_fs_info *fs_info = inode->root->fs_info; 2017609afacSChristoph Hellwig const u32 sectorsize = fs_info->sectorsize; 2020d3acb25SChristoph Hellwig const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT); 2037609afacSChristoph Hellwig struct btrfs_bio *repair_bbio; 2047609afacSChristoph Hellwig struct bio *repair_bio; 2057609afacSChristoph Hellwig int num_copies; 2067609afacSChristoph Hellwig int mirror; 2077609afacSChristoph Hellwig 2087609afacSChristoph Hellwig btrfs_debug(fs_info, "repair read error: read error at %llu", 2097609afacSChristoph Hellwig failed_bbio->file_offset + bio_offset); 2107609afacSChristoph Hellwig 2117609afacSChristoph Hellwig num_copies = btrfs_num_copies(fs_info, logical, sectorsize); 2127609afacSChristoph Hellwig if (num_copies == 1) { 2137609afacSChristoph Hellwig btrfs_debug(fs_info, "no copy to repair from"); 2147609afacSChristoph Hellwig failed_bbio->bio.bi_status = BLK_STS_IOERR; 2157609afacSChristoph Hellwig return fbio; 2167609afacSChristoph Hellwig } 2177609afacSChristoph Hellwig 2187609afacSChristoph Hellwig if (!fbio) { 2197609afacSChristoph Hellwig fbio = mempool_alloc(&btrfs_failed_bio_pool, GFP_NOFS); 2207609afacSChristoph Hellwig fbio->bbio = failed_bbio; 2217609afacSChristoph Hellwig fbio->num_copies = num_copies; 2227609afacSChristoph Hellwig atomic_set(&fbio->repair_count, 1); 2237609afacSChristoph Hellwig } 2247609afacSChristoph Hellwig 2257609afacSChristoph Hellwig atomic_inc(&fbio->repair_count); 2267609afacSChristoph Hellwig 2277609afacSChristoph Hellwig repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, 2287609afacSChristoph Hellwig &btrfs_repair_bioset); 2290d3acb25SChristoph Hellwig repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector; 230*078e4cf5SJohannes Thumshirn __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); 2317609afacSChristoph Hellwig 2327609afacSChristoph Hellwig repair_bbio = btrfs_bio(repair_bio); 2337609afacSChristoph Hellwig btrfs_bio_init(repair_bbio, failed_bbio->inode, NULL, fbio); 2347609afacSChristoph Hellwig repair_bbio->file_offset = failed_bbio->file_offset + bio_offset; 2357609afacSChristoph Hellwig 2367609afacSChristoph Hellwig mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); 2377609afacSChristoph Hellwig btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror); 238ae42a154SChristoph Hellwig btrfs_submit_bio(repair_bbio, mirror); 2397609afacSChristoph Hellwig return fbio; 2407609afacSChristoph Hellwig } 2417609afacSChristoph Hellwig 2427609afacSChristoph Hellwig static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev) 2437609afacSChristoph Hellwig { 2447609afacSChristoph Hellwig struct btrfs_inode *inode = bbio->inode; 2457609afacSChristoph Hellwig struct btrfs_fs_info *fs_info = inode->root->fs_info; 2467609afacSChristoph Hellwig u32 sectorsize = fs_info->sectorsize; 2470d3acb25SChristoph Hellwig struct bvec_iter *iter = &bbio->saved_iter; 2487609afacSChristoph Hellwig blk_status_t status = bbio->bio.bi_status; 2497609afacSChristoph Hellwig struct btrfs_failed_bio *fbio = NULL; 2507609afacSChristoph Hellwig u32 offset = 0; 2517609afacSChristoph Hellwig 2527609afacSChristoph Hellwig /* 2537609afacSChristoph Hellwig * Hand off repair bios to the repair code as there is no upper level 2547609afacSChristoph Hellwig * submitter for them. 2557609afacSChristoph Hellwig */ 2567609afacSChristoph Hellwig if (bbio->bio.bi_pool == &btrfs_repair_bioset) { 2577609afacSChristoph Hellwig btrfs_end_repair_bio(bbio, dev); 2587609afacSChristoph Hellwig return; 2597609afacSChristoph Hellwig } 2607609afacSChristoph Hellwig 2617609afacSChristoph Hellwig /* Clear the I/O error. A failed repair will reset it. */ 2627609afacSChristoph Hellwig bbio->bio.bi_status = BLK_STS_OK; 2637609afacSChristoph Hellwig 2647609afacSChristoph Hellwig while (iter->bi_size) { 2657609afacSChristoph Hellwig struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter); 2667609afacSChristoph Hellwig 2677609afacSChristoph Hellwig bv.bv_len = min(bv.bv_len, sectorsize); 2687609afacSChristoph Hellwig if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv)) 2697609afacSChristoph Hellwig fbio = repair_one_sector(bbio, offset, &bv, fbio); 2707609afacSChristoph Hellwig 2717609afacSChristoph Hellwig bio_advance_iter_single(&bbio->bio, iter, sectorsize); 2727609afacSChristoph Hellwig offset += sectorsize; 2737609afacSChristoph Hellwig } 2747609afacSChristoph Hellwig 2757ab0fdfcSChristoph Hellwig if (bbio->csum != bbio->csum_inline) 2767ab0fdfcSChristoph Hellwig kfree(bbio->csum); 2777609afacSChristoph Hellwig 2787609afacSChristoph Hellwig if (fbio) 2797609afacSChristoph Hellwig btrfs_repair_done(fbio); 2807609afacSChristoph Hellwig else 281852eee62SChristoph Hellwig btrfs_orig_bbio_end_io(bbio); 2827609afacSChristoph Hellwig } 2837609afacSChristoph Hellwig 284103c1972SChristoph Hellwig static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) 285103c1972SChristoph Hellwig { 286103c1972SChristoph Hellwig if (!dev || !dev->bdev) 287103c1972SChristoph Hellwig return; 288103c1972SChristoph Hellwig if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET) 289103c1972SChristoph Hellwig return; 290103c1972SChristoph Hellwig 291103c1972SChristoph Hellwig if (btrfs_op(bio) == BTRFS_MAP_WRITE) 292103c1972SChristoph Hellwig btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 29398e8d36aSNaohiro Aota else if (!(bio->bi_opf & REQ_RAHEAD)) 294103c1972SChristoph Hellwig btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 295103c1972SChristoph Hellwig if (bio->bi_opf & REQ_PREFLUSH) 296103c1972SChristoph Hellwig btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); 297103c1972SChristoph Hellwig } 298103c1972SChristoph Hellwig 299103c1972SChristoph Hellwig static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info, 300103c1972SChristoph Hellwig struct bio *bio) 301103c1972SChristoph Hellwig { 302103c1972SChristoph Hellwig if (bio->bi_opf & REQ_META) 303103c1972SChristoph Hellwig return fs_info->endio_meta_workers; 304103c1972SChristoph Hellwig return fs_info->endio_workers; 305103c1972SChristoph Hellwig } 306103c1972SChristoph Hellwig 307103c1972SChristoph Hellwig static void btrfs_end_bio_work(struct work_struct *work) 308103c1972SChristoph Hellwig { 309103c1972SChristoph Hellwig struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work); 310103c1972SChristoph Hellwig 3117609afacSChristoph Hellwig /* Metadata reads are checked and repaired by the submitter. */ 3127609afacSChristoph Hellwig if (bbio->bio.bi_opf & REQ_META) 313103c1972SChristoph Hellwig bbio->end_io(bbio); 3147609afacSChristoph Hellwig else 315860c8c45SChristoph Hellwig btrfs_check_read_bio(bbio, bbio->bio.bi_private); 316103c1972SChristoph Hellwig } 317103c1972SChristoph Hellwig 318103c1972SChristoph Hellwig static void btrfs_simple_end_io(struct bio *bio) 319103c1972SChristoph Hellwig { 320103c1972SChristoph Hellwig struct btrfs_bio *bbio = btrfs_bio(bio); 321860c8c45SChristoph Hellwig struct btrfs_device *dev = bio->bi_private; 322860c8c45SChristoph Hellwig struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 323103c1972SChristoph Hellwig 324103c1972SChristoph Hellwig btrfs_bio_counter_dec(fs_info); 325103c1972SChristoph Hellwig 326103c1972SChristoph Hellwig if (bio->bi_status) 327860c8c45SChristoph Hellwig btrfs_log_dev_io_error(bio, dev); 328103c1972SChristoph Hellwig 329103c1972SChristoph Hellwig if (bio_op(bio) == REQ_OP_READ) { 330103c1972SChristoph Hellwig INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); 331103c1972SChristoph Hellwig queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work); 332103c1972SChristoph Hellwig } else { 33369ccf3f4SChristoph Hellwig if (bio_op(bio) == REQ_OP_ZONE_APPEND) 33469ccf3f4SChristoph Hellwig btrfs_record_physical_zoned(bbio); 335852eee62SChristoph Hellwig btrfs_orig_bbio_end_io(bbio); 336103c1972SChristoph Hellwig } 337103c1972SChristoph Hellwig } 338103c1972SChristoph Hellwig 339103c1972SChristoph Hellwig static void btrfs_raid56_end_io(struct bio *bio) 340103c1972SChristoph Hellwig { 341103c1972SChristoph Hellwig struct btrfs_io_context *bioc = bio->bi_private; 342103c1972SChristoph Hellwig struct btrfs_bio *bbio = btrfs_bio(bio); 343103c1972SChristoph Hellwig 344103c1972SChristoph Hellwig btrfs_bio_counter_dec(bioc->fs_info); 345103c1972SChristoph Hellwig bbio->mirror_num = bioc->mirror_num; 3467609afacSChristoph Hellwig if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META)) 3477609afacSChristoph Hellwig btrfs_check_read_bio(bbio, NULL); 3487609afacSChristoph Hellwig else 349852eee62SChristoph Hellwig btrfs_orig_bbio_end_io(bbio); 350103c1972SChristoph Hellwig 351103c1972SChristoph Hellwig btrfs_put_bioc(bioc); 352103c1972SChristoph Hellwig } 353103c1972SChristoph Hellwig 354103c1972SChristoph Hellwig static void btrfs_orig_write_end_io(struct bio *bio) 355103c1972SChristoph Hellwig { 356103c1972SChristoph Hellwig struct btrfs_io_stripe *stripe = bio->bi_private; 357103c1972SChristoph Hellwig struct btrfs_io_context *bioc = stripe->bioc; 358103c1972SChristoph Hellwig struct btrfs_bio *bbio = btrfs_bio(bio); 359103c1972SChristoph Hellwig 360103c1972SChristoph Hellwig btrfs_bio_counter_dec(bioc->fs_info); 361103c1972SChristoph Hellwig 362103c1972SChristoph Hellwig if (bio->bi_status) { 363103c1972SChristoph Hellwig atomic_inc(&bioc->error); 364103c1972SChristoph Hellwig btrfs_log_dev_io_error(bio, stripe->dev); 365103c1972SChristoph Hellwig } 366103c1972SChristoph Hellwig 367103c1972SChristoph Hellwig /* 368103c1972SChristoph Hellwig * Only send an error to the higher layers if it is beyond the tolerance 369103c1972SChristoph Hellwig * threshold. 370103c1972SChristoph Hellwig */ 371103c1972SChristoph Hellwig if (atomic_read(&bioc->error) > bioc->max_errors) 372103c1972SChristoph Hellwig bio->bi_status = BLK_STS_IOERR; 373103c1972SChristoph Hellwig else 374103c1972SChristoph Hellwig bio->bi_status = BLK_STS_OK; 375103c1972SChristoph Hellwig 376852eee62SChristoph Hellwig btrfs_orig_bbio_end_io(bbio); 377103c1972SChristoph Hellwig btrfs_put_bioc(bioc); 378103c1972SChristoph Hellwig } 379103c1972SChristoph Hellwig 380103c1972SChristoph Hellwig static void btrfs_clone_write_end_io(struct bio *bio) 381103c1972SChristoph Hellwig { 382103c1972SChristoph Hellwig struct btrfs_io_stripe *stripe = bio->bi_private; 383103c1972SChristoph Hellwig 384103c1972SChristoph Hellwig if (bio->bi_status) { 385103c1972SChristoph Hellwig atomic_inc(&stripe->bioc->error); 386103c1972SChristoph Hellwig btrfs_log_dev_io_error(bio, stripe->dev); 387103c1972SChristoph Hellwig } 388103c1972SChristoph Hellwig 389103c1972SChristoph Hellwig /* Pass on control to the original bio this one was cloned from */ 390103c1972SChristoph Hellwig bio_endio(stripe->bioc->orig_bio); 391103c1972SChristoph Hellwig bio_put(bio); 392103c1972SChristoph Hellwig } 393103c1972SChristoph Hellwig 394103c1972SChristoph Hellwig static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) 395103c1972SChristoph Hellwig { 396103c1972SChristoph Hellwig if (!dev || !dev->bdev || 397103c1972SChristoph Hellwig test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 398103c1972SChristoph Hellwig (btrfs_op(bio) == BTRFS_MAP_WRITE && 399103c1972SChristoph Hellwig !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 400103c1972SChristoph Hellwig bio_io_error(bio); 401103c1972SChristoph Hellwig return; 402103c1972SChristoph Hellwig } 403103c1972SChristoph Hellwig 404103c1972SChristoph Hellwig bio_set_dev(bio, dev->bdev); 405103c1972SChristoph Hellwig 406103c1972SChristoph Hellwig /* 407103c1972SChristoph Hellwig * For zone append writing, bi_sector must point the beginning of the 408103c1972SChristoph Hellwig * zone 409103c1972SChristoph Hellwig */ 410103c1972SChristoph Hellwig if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 411103c1972SChristoph Hellwig u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 412d5e4377dSChristoph Hellwig u64 zone_start = round_down(physical, dev->fs_info->zone_size); 413103c1972SChristoph Hellwig 414d5e4377dSChristoph Hellwig ASSERT(btrfs_dev_is_sequential(dev, physical)); 415103c1972SChristoph Hellwig bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 416103c1972SChristoph Hellwig } 417103c1972SChristoph Hellwig btrfs_debug_in_rcu(dev->fs_info, 418103c1972SChristoph Hellwig "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 419103c1972SChristoph Hellwig __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 420103c1972SChristoph Hellwig (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev), 421103c1972SChristoph Hellwig dev->devid, bio->bi_iter.bi_size); 422103c1972SChristoph Hellwig 423103c1972SChristoph Hellwig btrfsic_check_bio(bio); 424103c1972SChristoph Hellwig submit_bio(bio); 425103c1972SChristoph Hellwig } 426103c1972SChristoph Hellwig 427103c1972SChristoph Hellwig static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr) 428103c1972SChristoph Hellwig { 429103c1972SChristoph Hellwig struct bio *orig_bio = bioc->orig_bio, *bio; 430103c1972SChristoph Hellwig 431103c1972SChristoph Hellwig ASSERT(bio_op(orig_bio) != REQ_OP_READ); 432103c1972SChristoph Hellwig 433103c1972SChristoph Hellwig /* Reuse the bio embedded into the btrfs_bio for the last mirror */ 434103c1972SChristoph Hellwig if (dev_nr == bioc->num_stripes - 1) { 435103c1972SChristoph Hellwig bio = orig_bio; 436103c1972SChristoph Hellwig bio->bi_end_io = btrfs_orig_write_end_io; 437103c1972SChristoph Hellwig } else { 438103c1972SChristoph Hellwig bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set); 439103c1972SChristoph Hellwig bio_inc_remaining(orig_bio); 440103c1972SChristoph Hellwig bio->bi_end_io = btrfs_clone_write_end_io; 441103c1972SChristoph Hellwig } 442103c1972SChristoph Hellwig 443103c1972SChristoph Hellwig bio->bi_private = &bioc->stripes[dev_nr]; 444103c1972SChristoph Hellwig bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT; 445103c1972SChristoph Hellwig bioc->stripes[dev_nr].bioc = bioc; 446103c1972SChristoph Hellwig btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio); 447103c1972SChristoph Hellwig } 448103c1972SChristoph Hellwig 449f8a53bb5SChristoph Hellwig static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc, 450f8a53bb5SChristoph Hellwig struct btrfs_io_stripe *smap, int mirror_num) 451f8a53bb5SChristoph Hellwig { 452f8a53bb5SChristoph Hellwig /* Do not leak our private flag into the block layer. */ 453f8a53bb5SChristoph Hellwig bio->bi_opf &= ~REQ_BTRFS_ONE_ORDERED; 454f8a53bb5SChristoph Hellwig 455f8a53bb5SChristoph Hellwig if (!bioc) { 456f8a53bb5SChristoph Hellwig /* Single mirror read/write fast path. */ 457f8a53bb5SChristoph Hellwig btrfs_bio(bio)->mirror_num = mirror_num; 458f8a53bb5SChristoph Hellwig bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT; 459f8a53bb5SChristoph Hellwig bio->bi_private = smap->dev; 460f8a53bb5SChristoph Hellwig bio->bi_end_io = btrfs_simple_end_io; 461f8a53bb5SChristoph Hellwig btrfs_submit_dev_bio(smap->dev, bio); 462f8a53bb5SChristoph Hellwig } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 463f8a53bb5SChristoph Hellwig /* Parity RAID write or read recovery. */ 464f8a53bb5SChristoph Hellwig bio->bi_private = bioc; 465f8a53bb5SChristoph Hellwig bio->bi_end_io = btrfs_raid56_end_io; 466f8a53bb5SChristoph Hellwig if (bio_op(bio) == REQ_OP_READ) 467f8a53bb5SChristoph Hellwig raid56_parity_recover(bio, bioc, mirror_num); 468f8a53bb5SChristoph Hellwig else 469f8a53bb5SChristoph Hellwig raid56_parity_write(bio, bioc); 470f8a53bb5SChristoph Hellwig } else { 471f8a53bb5SChristoph Hellwig /* Write to multiple mirrors. */ 472f8a53bb5SChristoph Hellwig int total_devs = bioc->num_stripes; 473f8a53bb5SChristoph Hellwig 474f8a53bb5SChristoph Hellwig bioc->orig_bio = bio; 475f8a53bb5SChristoph Hellwig for (int dev_nr = 0; dev_nr < total_devs; dev_nr++) 476f8a53bb5SChristoph Hellwig btrfs_submit_mirrored_bio(bioc, dev_nr); 477f8a53bb5SChristoph Hellwig } 478f8a53bb5SChristoph Hellwig } 479f8a53bb5SChristoph Hellwig 480f8a53bb5SChristoph Hellwig static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio) 481f8a53bb5SChristoph Hellwig { 482f8a53bb5SChristoph Hellwig if (bbio->bio.bi_opf & REQ_META) 483542e300eSChristoph Hellwig return btree_csum_one_bio(bbio); 484f8a53bb5SChristoph Hellwig return btrfs_csum_one_bio(bbio); 485f8a53bb5SChristoph Hellwig } 486f8a53bb5SChristoph Hellwig 487f8a53bb5SChristoph Hellwig /* 488f8a53bb5SChristoph Hellwig * Async submit bios are used to offload expensive checksumming onto the worker 489f8a53bb5SChristoph Hellwig * threads. 490f8a53bb5SChristoph Hellwig */ 491f8a53bb5SChristoph Hellwig struct async_submit_bio { 492f8a53bb5SChristoph Hellwig struct btrfs_bio *bbio; 493f8a53bb5SChristoph Hellwig struct btrfs_io_context *bioc; 494f8a53bb5SChristoph Hellwig struct btrfs_io_stripe smap; 495f8a53bb5SChristoph Hellwig int mirror_num; 496f8a53bb5SChristoph Hellwig struct btrfs_work work; 497f8a53bb5SChristoph Hellwig }; 498f8a53bb5SChristoph Hellwig 499f8a53bb5SChristoph Hellwig /* 500f8a53bb5SChristoph Hellwig * In order to insert checksums into the metadata in large chunks, we wait 501f8a53bb5SChristoph Hellwig * until bio submission time. All the pages in the bio are checksummed and 502f8a53bb5SChristoph Hellwig * sums are attached onto the ordered extent record. 503f8a53bb5SChristoph Hellwig * 504f8a53bb5SChristoph Hellwig * At IO completion time the csums attached on the ordered extent record are 505f8a53bb5SChristoph Hellwig * inserted into the btree. 506f8a53bb5SChristoph Hellwig */ 507f8a53bb5SChristoph Hellwig static void run_one_async_start(struct btrfs_work *work) 508f8a53bb5SChristoph Hellwig { 509f8a53bb5SChristoph Hellwig struct async_submit_bio *async = 510f8a53bb5SChristoph Hellwig container_of(work, struct async_submit_bio, work); 511f8a53bb5SChristoph Hellwig blk_status_t ret; 512f8a53bb5SChristoph Hellwig 513f8a53bb5SChristoph Hellwig ret = btrfs_bio_csum(async->bbio); 514f8a53bb5SChristoph Hellwig if (ret) 515f8a53bb5SChristoph Hellwig async->bbio->bio.bi_status = ret; 516f8a53bb5SChristoph Hellwig } 517f8a53bb5SChristoph Hellwig 518f8a53bb5SChristoph Hellwig /* 519f8a53bb5SChristoph Hellwig * In order to insert checksums into the metadata in large chunks, we wait 520f8a53bb5SChristoph Hellwig * until bio submission time. All the pages in the bio are checksummed and 521f8a53bb5SChristoph Hellwig * sums are attached onto the ordered extent record. 522f8a53bb5SChristoph Hellwig * 523f8a53bb5SChristoph Hellwig * At IO completion time the csums attached on the ordered extent record are 524f8a53bb5SChristoph Hellwig * inserted into the tree. 525f8a53bb5SChristoph Hellwig */ 526f8a53bb5SChristoph Hellwig static void run_one_async_done(struct btrfs_work *work) 527f8a53bb5SChristoph Hellwig { 528f8a53bb5SChristoph Hellwig struct async_submit_bio *async = 529f8a53bb5SChristoph Hellwig container_of(work, struct async_submit_bio, work); 530f8a53bb5SChristoph Hellwig struct bio *bio = &async->bbio->bio; 531f8a53bb5SChristoph Hellwig 532f8a53bb5SChristoph Hellwig /* If an error occurred we just want to clean up the bio and move on. */ 533f8a53bb5SChristoph Hellwig if (bio->bi_status) { 534852eee62SChristoph Hellwig btrfs_orig_bbio_end_io(async->bbio); 535f8a53bb5SChristoph Hellwig return; 536f8a53bb5SChristoph Hellwig } 537f8a53bb5SChristoph Hellwig 538f8a53bb5SChristoph Hellwig /* 539f8a53bb5SChristoph Hellwig * All of the bios that pass through here are from async helpers. 540f8a53bb5SChristoph Hellwig * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context. 541f8a53bb5SChristoph Hellwig * This changes nothing when cgroups aren't in use. 542f8a53bb5SChristoph Hellwig */ 543f8a53bb5SChristoph Hellwig bio->bi_opf |= REQ_CGROUP_PUNT; 544f8a53bb5SChristoph Hellwig __btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num); 545f8a53bb5SChristoph Hellwig } 546f8a53bb5SChristoph Hellwig 547f8a53bb5SChristoph Hellwig static void run_one_async_free(struct btrfs_work *work) 548f8a53bb5SChristoph Hellwig { 549f8a53bb5SChristoph Hellwig kfree(container_of(work, struct async_submit_bio, work)); 550f8a53bb5SChristoph Hellwig } 551f8a53bb5SChristoph Hellwig 552f8a53bb5SChristoph Hellwig static bool should_async_write(struct btrfs_bio *bbio) 553f8a53bb5SChristoph Hellwig { 554f8a53bb5SChristoph Hellwig /* 555f8a53bb5SChristoph Hellwig * If the I/O is not issued by fsync and friends, (->sync_writers != 0), 556f8a53bb5SChristoph Hellwig * then try to defer the submission to a workqueue to parallelize the 557f8a53bb5SChristoph Hellwig * checksum calculation. 558f8a53bb5SChristoph Hellwig */ 559f8a53bb5SChristoph Hellwig if (atomic_read(&bbio->inode->sync_writers)) 560f8a53bb5SChristoph Hellwig return false; 561f8a53bb5SChristoph Hellwig 562f8a53bb5SChristoph Hellwig /* 563f8a53bb5SChristoph Hellwig * Submit metadata writes synchronously if the checksum implementation 564f8a53bb5SChristoph Hellwig * is fast, or we are on a zoned device that wants I/O to be submitted 565f8a53bb5SChristoph Hellwig * in order. 566f8a53bb5SChristoph Hellwig */ 567f8a53bb5SChristoph Hellwig if (bbio->bio.bi_opf & REQ_META) { 568f8a53bb5SChristoph Hellwig struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 569f8a53bb5SChristoph Hellwig 570f8a53bb5SChristoph Hellwig if (btrfs_is_zoned(fs_info)) 571f8a53bb5SChristoph Hellwig return false; 572f8a53bb5SChristoph Hellwig if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags)) 573f8a53bb5SChristoph Hellwig return false; 574f8a53bb5SChristoph Hellwig } 575f8a53bb5SChristoph Hellwig 576f8a53bb5SChristoph Hellwig return true; 577f8a53bb5SChristoph Hellwig } 578f8a53bb5SChristoph Hellwig 579f8a53bb5SChristoph Hellwig /* 580f8a53bb5SChristoph Hellwig * Submit bio to an async queue. 581f8a53bb5SChristoph Hellwig * 582f8a53bb5SChristoph Hellwig * Return true if the work has been succesfuly submitted, else false. 583f8a53bb5SChristoph Hellwig */ 584f8a53bb5SChristoph Hellwig static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, 585f8a53bb5SChristoph Hellwig struct btrfs_io_context *bioc, 586f8a53bb5SChristoph Hellwig struct btrfs_io_stripe *smap, int mirror_num) 587f8a53bb5SChristoph Hellwig { 588f8a53bb5SChristoph Hellwig struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 589f8a53bb5SChristoph Hellwig struct async_submit_bio *async; 590f8a53bb5SChristoph Hellwig 591f8a53bb5SChristoph Hellwig async = kmalloc(sizeof(*async), GFP_NOFS); 592f8a53bb5SChristoph Hellwig if (!async) 593f8a53bb5SChristoph Hellwig return false; 594f8a53bb5SChristoph Hellwig 595f8a53bb5SChristoph Hellwig async->bbio = bbio; 596f8a53bb5SChristoph Hellwig async->bioc = bioc; 597f8a53bb5SChristoph Hellwig async->smap = *smap; 598f8a53bb5SChristoph Hellwig async->mirror_num = mirror_num; 599f8a53bb5SChristoph Hellwig 600f8a53bb5SChristoph Hellwig btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, 601f8a53bb5SChristoph Hellwig run_one_async_free); 602f8a53bb5SChristoph Hellwig if (op_is_sync(bbio->bio.bi_opf)) 603f8a53bb5SChristoph Hellwig btrfs_queue_work(fs_info->hipri_workers, &async->work); 604f8a53bb5SChristoph Hellwig else 605f8a53bb5SChristoph Hellwig btrfs_queue_work(fs_info->workers, &async->work); 606f8a53bb5SChristoph Hellwig return true; 607f8a53bb5SChristoph Hellwig } 608f8a53bb5SChristoph Hellwig 609ae42a154SChristoph Hellwig static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) 610103c1972SChristoph Hellwig { 611d5e4377dSChristoph Hellwig struct btrfs_inode *inode = bbio->inode; 612d5e4377dSChristoph Hellwig struct btrfs_fs_info *fs_info = inode->root->fs_info; 613852eee62SChristoph Hellwig struct btrfs_bio *orig_bbio = bbio; 614ae42a154SChristoph Hellwig struct bio *bio = &bbio->bio; 615103c1972SChristoph Hellwig u64 logical = bio->bi_iter.bi_sector << 9; 616103c1972SChristoph Hellwig u64 length = bio->bi_iter.bi_size; 617103c1972SChristoph Hellwig u64 map_length = length; 618921603c7SChristoph Hellwig bool use_append = btrfs_use_zone_append(bbio); 619103c1972SChristoph Hellwig struct btrfs_io_context *bioc = NULL; 620103c1972SChristoph Hellwig struct btrfs_io_stripe smap; 6219ba0004bSChristoph Hellwig blk_status_t ret; 6229ba0004bSChristoph Hellwig int error; 623103c1972SChristoph Hellwig 624103c1972SChristoph Hellwig btrfs_bio_counter_inc_blocked(fs_info); 6259ba0004bSChristoph Hellwig error = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, 626103c1972SChristoph Hellwig &bioc, &smap, &mirror_num, 1); 6279ba0004bSChristoph Hellwig if (error) { 6289ba0004bSChristoph Hellwig ret = errno_to_blk_status(error); 6299ba0004bSChristoph Hellwig goto fail; 630103c1972SChristoph Hellwig } 631103c1972SChristoph Hellwig 632852eee62SChristoph Hellwig map_length = min(map_length, length); 633d5e4377dSChristoph Hellwig if (use_append) 634d5e4377dSChristoph Hellwig map_length = min(map_length, fs_info->max_zone_append_size); 635d5e4377dSChristoph Hellwig 636103c1972SChristoph Hellwig if (map_length < length) { 6372cef0c79SChristoph Hellwig bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append); 6382cef0c79SChristoph Hellwig bio = &bbio->bio; 639103c1972SChristoph Hellwig } 640103c1972SChristoph Hellwig 6411c2b3ee3SChristoph Hellwig /* 6421c2b3ee3SChristoph Hellwig * Save the iter for the end_io handler and preload the checksums for 6431c2b3ee3SChristoph Hellwig * data reads. 6441c2b3ee3SChristoph Hellwig */ 6451c2b3ee3SChristoph Hellwig if (bio_op(bio) == REQ_OP_READ && !(bio->bi_opf & REQ_META)) { 6460d3acb25SChristoph Hellwig bbio->saved_iter = bio->bi_iter; 6471c2b3ee3SChristoph Hellwig ret = btrfs_lookup_bio_sums(bbio); 6481c2b3ee3SChristoph Hellwig if (ret) 649852eee62SChristoph Hellwig goto fail_put_bio; 6501c2b3ee3SChristoph Hellwig } 6517276aa7dSChristoph Hellwig 652f8a53bb5SChristoph Hellwig if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 653d5e4377dSChristoph Hellwig if (use_append) { 654d5e4377dSChristoph Hellwig bio->bi_opf &= ~REQ_OP_WRITE; 655d5e4377dSChristoph Hellwig bio->bi_opf |= REQ_OP_ZONE_APPEND; 656ae42a154SChristoph Hellwig ret = btrfs_extract_ordered_extent(bbio); 65769ccf3f4SChristoph Hellwig if (ret) 658852eee62SChristoph Hellwig goto fail_put_bio; 65969ccf3f4SChristoph Hellwig } 66069ccf3f4SChristoph Hellwig 661f8a53bb5SChristoph Hellwig /* 662f8a53bb5SChristoph Hellwig * Csum items for reloc roots have already been cloned at this 663f8a53bb5SChristoph Hellwig * point, so they are handled as part of the no-checksum case. 664f8a53bb5SChristoph Hellwig */ 665d5e4377dSChristoph Hellwig if (!(inode->flags & BTRFS_INODE_NODATASUM) && 666f8a53bb5SChristoph Hellwig !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && 667d5e4377dSChristoph Hellwig !btrfs_is_data_reloc_root(inode->root)) { 668f8a53bb5SChristoph Hellwig if (should_async_write(bbio) && 669f8a53bb5SChristoph Hellwig btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num)) 670852eee62SChristoph Hellwig goto done; 671f8c44673SChristoph Hellwig 672f8a53bb5SChristoph Hellwig ret = btrfs_bio_csum(bbio); 673f8a53bb5SChristoph Hellwig if (ret) 674852eee62SChristoph Hellwig goto fail_put_bio; 675103c1972SChristoph Hellwig } 676f8a53bb5SChristoph Hellwig } 677f8a53bb5SChristoph Hellwig 678f8a53bb5SChristoph Hellwig __btrfs_submit_bio(bio, bioc, &smap, mirror_num); 679852eee62SChristoph Hellwig done: 680852eee62SChristoph Hellwig return map_length == length; 6819ba0004bSChristoph Hellwig 682852eee62SChristoph Hellwig fail_put_bio: 683852eee62SChristoph Hellwig if (map_length < length) 684852eee62SChristoph Hellwig bio_put(bio); 6859ba0004bSChristoph Hellwig fail: 6869ba0004bSChristoph Hellwig btrfs_bio_counter_dec(fs_info); 687852eee62SChristoph Hellwig btrfs_bio_end_io(orig_bbio, ret); 688852eee62SChristoph Hellwig /* Do not submit another chunk */ 689852eee62SChristoph Hellwig return true; 690852eee62SChristoph Hellwig } 691852eee62SChristoph Hellwig 692ae42a154SChristoph Hellwig void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num) 693852eee62SChristoph Hellwig { 694ae42a154SChristoph Hellwig while (!btrfs_submit_chunk(bbio, mirror_num)) 695852eee62SChristoph Hellwig ; 696103c1972SChristoph Hellwig } 697103c1972SChristoph Hellwig 698bacf60e5SChristoph Hellwig /* 699bacf60e5SChristoph Hellwig * Submit a repair write. 700bacf60e5SChristoph Hellwig * 701bacf60e5SChristoph Hellwig * This bypasses btrfs_submit_bio deliberately, as that writes all copies in a 702bacf60e5SChristoph Hellwig * RAID setup. Here we only want to write the one bad copy, so we do the 703bacf60e5SChristoph Hellwig * mapping ourselves and submit the bio directly. 704bacf60e5SChristoph Hellwig * 70567da05b3SColin Ian King * The I/O is issued synchronously to block the repair read completion from 706bacf60e5SChristoph Hellwig * freeing the bio. 707bacf60e5SChristoph Hellwig */ 708bacf60e5SChristoph Hellwig int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, 709bacf60e5SChristoph Hellwig u64 length, u64 logical, struct page *page, 710bacf60e5SChristoph Hellwig unsigned int pg_offset, int mirror_num) 711bacf60e5SChristoph Hellwig { 712bacf60e5SChristoph Hellwig struct btrfs_device *dev; 713bacf60e5SChristoph Hellwig struct bio_vec bvec; 714bacf60e5SChristoph Hellwig struct bio bio; 715bacf60e5SChristoph Hellwig u64 map_length = 0; 716bacf60e5SChristoph Hellwig u64 sector; 717bacf60e5SChristoph Hellwig struct btrfs_io_context *bioc = NULL; 718bacf60e5SChristoph Hellwig int ret = 0; 719bacf60e5SChristoph Hellwig 720bacf60e5SChristoph Hellwig ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); 721bacf60e5SChristoph Hellwig BUG_ON(!mirror_num); 722bacf60e5SChristoph Hellwig 723bacf60e5SChristoph Hellwig if (btrfs_repair_one_zone(fs_info, logical)) 724bacf60e5SChristoph Hellwig return 0; 725bacf60e5SChristoph Hellwig 726bacf60e5SChristoph Hellwig map_length = length; 727bacf60e5SChristoph Hellwig 728bacf60e5SChristoph Hellwig /* 729bacf60e5SChristoph Hellwig * Avoid races with device replace and make sure our bioc has devices 730bacf60e5SChristoph Hellwig * associated to its stripes that don't go away while we are doing the 731bacf60e5SChristoph Hellwig * read repair operation. 732bacf60e5SChristoph Hellwig */ 733bacf60e5SChristoph Hellwig btrfs_bio_counter_inc_blocked(fs_info); 734bacf60e5SChristoph Hellwig if (btrfs_is_parity_mirror(fs_info, logical, length)) { 735bacf60e5SChristoph Hellwig /* 736bacf60e5SChristoph Hellwig * Note that we don't use BTRFS_MAP_WRITE because it's supposed 737bacf60e5SChristoph Hellwig * to update all raid stripes, but here we just want to correct 738bacf60e5SChristoph Hellwig * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad 739bacf60e5SChristoph Hellwig * stripe's dev and sector. 740bacf60e5SChristoph Hellwig */ 741bacf60e5SChristoph Hellwig ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical, 742bacf60e5SChristoph Hellwig &map_length, &bioc, 0); 743bacf60e5SChristoph Hellwig if (ret) 744bacf60e5SChristoph Hellwig goto out_counter_dec; 745bacf60e5SChristoph Hellwig ASSERT(bioc->mirror_num == 1); 746bacf60e5SChristoph Hellwig } else { 747bacf60e5SChristoph Hellwig ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, 748bacf60e5SChristoph Hellwig &map_length, &bioc, mirror_num); 749bacf60e5SChristoph Hellwig if (ret) 750bacf60e5SChristoph Hellwig goto out_counter_dec; 751d73a27b8SQu Wenruo /* 752d73a27b8SQu Wenruo * This happens when dev-replace is also running, and the 753d73a27b8SQu Wenruo * mirror_num indicates the dev-replace target. 754d73a27b8SQu Wenruo * 755d73a27b8SQu Wenruo * In this case, we don't need to do anything, as the read 756d73a27b8SQu Wenruo * error just means the replace progress hasn't reached our 757d73a27b8SQu Wenruo * read range, and later replace routine would handle it well. 758d73a27b8SQu Wenruo */ 759d73a27b8SQu Wenruo if (mirror_num != bioc->mirror_num) 760d73a27b8SQu Wenruo goto out_counter_dec; 761bacf60e5SChristoph Hellwig } 762bacf60e5SChristoph Hellwig 763bacf60e5SChristoph Hellwig sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9; 764bacf60e5SChristoph Hellwig dev = bioc->stripes[bioc->mirror_num - 1].dev; 765bacf60e5SChristoph Hellwig btrfs_put_bioc(bioc); 766bacf60e5SChristoph Hellwig 767bacf60e5SChristoph Hellwig if (!dev || !dev->bdev || 768bacf60e5SChristoph Hellwig !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { 769bacf60e5SChristoph Hellwig ret = -EIO; 770bacf60e5SChristoph Hellwig goto out_counter_dec; 771bacf60e5SChristoph Hellwig } 772bacf60e5SChristoph Hellwig 773bacf60e5SChristoph Hellwig bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC); 774bacf60e5SChristoph Hellwig bio.bi_iter.bi_sector = sector; 775bacf60e5SChristoph Hellwig __bio_add_page(&bio, page, length, pg_offset); 776bacf60e5SChristoph Hellwig 777bacf60e5SChristoph Hellwig btrfsic_check_bio(&bio); 778bacf60e5SChristoph Hellwig ret = submit_bio_wait(&bio); 779bacf60e5SChristoph Hellwig if (ret) { 780bacf60e5SChristoph Hellwig /* try to remap that extent elsewhere? */ 781bacf60e5SChristoph Hellwig btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 782bacf60e5SChristoph Hellwig goto out_bio_uninit; 783bacf60e5SChristoph Hellwig } 784bacf60e5SChristoph Hellwig 785bacf60e5SChristoph Hellwig btrfs_info_rl_in_rcu(fs_info, 786bacf60e5SChristoph Hellwig "read error corrected: ino %llu off %llu (dev %s sector %llu)", 787bacf60e5SChristoph Hellwig ino, start, btrfs_dev_name(dev), sector); 788bacf60e5SChristoph Hellwig ret = 0; 789bacf60e5SChristoph Hellwig 790bacf60e5SChristoph Hellwig out_bio_uninit: 791bacf60e5SChristoph Hellwig bio_uninit(&bio); 792bacf60e5SChristoph Hellwig out_counter_dec: 793bacf60e5SChristoph Hellwig btrfs_bio_counter_dec(fs_info); 794bacf60e5SChristoph Hellwig return ret; 795bacf60e5SChristoph Hellwig } 796bacf60e5SChristoph Hellwig 797103c1972SChristoph Hellwig int __init btrfs_bioset_init(void) 798103c1972SChristoph Hellwig { 799103c1972SChristoph Hellwig if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, 800103c1972SChristoph Hellwig offsetof(struct btrfs_bio, bio), 801103c1972SChristoph Hellwig BIOSET_NEED_BVECS)) 802103c1972SChristoph Hellwig return -ENOMEM; 803852eee62SChristoph Hellwig if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE, 804852eee62SChristoph Hellwig offsetof(struct btrfs_bio, bio), 0)) 805852eee62SChristoph Hellwig goto out_free_bioset; 8067609afacSChristoph Hellwig if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE, 8077609afacSChristoph Hellwig offsetof(struct btrfs_bio, bio), 8087609afacSChristoph Hellwig BIOSET_NEED_BVECS)) 809852eee62SChristoph Hellwig goto out_free_clone_bioset; 8107609afacSChristoph Hellwig if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE, 8117609afacSChristoph Hellwig sizeof(struct btrfs_failed_bio))) 8127609afacSChristoph Hellwig goto out_free_repair_bioset; 813103c1972SChristoph Hellwig return 0; 8147609afacSChristoph Hellwig 8157609afacSChristoph Hellwig out_free_repair_bioset: 8167609afacSChristoph Hellwig bioset_exit(&btrfs_repair_bioset); 817852eee62SChristoph Hellwig out_free_clone_bioset: 818852eee62SChristoph Hellwig bioset_exit(&btrfs_clone_bioset); 8197609afacSChristoph Hellwig out_free_bioset: 8207609afacSChristoph Hellwig bioset_exit(&btrfs_bioset); 8217609afacSChristoph Hellwig return -ENOMEM; 822103c1972SChristoph Hellwig } 823103c1972SChristoph Hellwig 824103c1972SChristoph Hellwig void __cold btrfs_bioset_exit(void) 825103c1972SChristoph Hellwig { 8267609afacSChristoph Hellwig mempool_exit(&btrfs_failed_bio_pool); 8277609afacSChristoph Hellwig bioset_exit(&btrfs_repair_bioset); 828852eee62SChristoph Hellwig bioset_exit(&btrfs_clone_bioset); 829103c1972SChristoph Hellwig bioset_exit(&btrfs_bioset); 830103c1972SChristoph Hellwig } 831