1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 * Copyright (C) 2022 Christoph Hellwig. 5 */ 6 7 #include <linux/bio.h> 8 #include "bio.h" 9 #include "ctree.h" 10 #include "volumes.h" 11 #include "raid56.h" 12 #include "async-thread.h" 13 #include "check-integrity.h" 14 #include "dev-replace.h" 15 #include "rcu-string.h" 16 #include "zoned.h" 17 #include "file-item.h" 18 19 static struct bio_set btrfs_bioset; 20 static struct bio_set btrfs_clone_bioset; 21 static struct bio_set btrfs_repair_bioset; 22 static mempool_t btrfs_failed_bio_pool; 23 24 struct btrfs_failed_bio { 25 struct btrfs_bio *bbio; 26 int num_copies; 27 atomic_t repair_count; 28 }; 29 30 /* 31 * Initialize a btrfs_bio structure. This skips the embedded bio itself as it 32 * is already initialized by the block layer. 33 */ 34 void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info, 35 btrfs_bio_end_io_t end_io, void *private) 36 { 37 memset(bbio, 0, offsetof(struct btrfs_bio, bio)); 38 bbio->fs_info = fs_info; 39 bbio->end_io = end_io; 40 bbio->private = private; 41 atomic_set(&bbio->pending_ios, 1); 42 } 43 44 /* 45 * Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for 46 * btrfs, and is used for all I/O submitted through btrfs_submit_bio. 47 * 48 * Just like the underlying bio_alloc_bioset it will not fail as it is backed by 49 * a mempool. 50 */ 51 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, 52 struct btrfs_fs_info *fs_info, 53 btrfs_bio_end_io_t end_io, void *private) 54 { 55 struct btrfs_bio *bbio; 56 struct bio *bio; 57 58 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); 59 bbio = btrfs_bio(bio); 60 btrfs_bio_init(bbio, fs_info, end_io, private); 61 return bbio; 62 } 63 64 static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, 65 struct btrfs_bio *orig_bbio, 66 u64 map_length, bool use_append) 67 { 68 struct btrfs_bio *bbio; 69 struct bio *bio; 70 71 if (use_append) { 72 unsigned int nr_segs; 73 74 bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, 75 &btrfs_clone_bioset, map_length); 76 } else { 77 bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, 78 GFP_NOFS, &btrfs_clone_bioset); 79 } 80 bbio = btrfs_bio(bio); 81 btrfs_bio_init(bbio, fs_info, NULL, orig_bbio); 82 bbio->inode = orig_bbio->inode; 83 bbio->file_offset = orig_bbio->file_offset; 84 orig_bbio->file_offset += map_length; 85 86 atomic_inc(&orig_bbio->pending_ios); 87 return bbio; 88 } 89 90 static void btrfs_orig_write_end_io(struct bio *bio); 91 92 static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, 93 struct btrfs_bio *orig_bbio) 94 { 95 /* 96 * For writes we tolerate nr_mirrors - 1 write failures, so we can't 97 * just blindly propagate a write failure here. Instead increment the 98 * error count in the original I/O context so that it is guaranteed to 99 * be larger than the error tolerance. 100 */ 101 if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) { 102 struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private; 103 struct btrfs_io_context *orig_bioc = orig_stripe->bioc; 104 105 atomic_add(orig_bioc->max_errors, &orig_bioc->error); 106 } else { 107 orig_bbio->bio.bi_status = bbio->bio.bi_status; 108 } 109 } 110 111 static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) 112 { 113 if (bbio->bio.bi_pool == &btrfs_clone_bioset) { 114 struct btrfs_bio *orig_bbio = bbio->private; 115 116 if (bbio->bio.bi_status) 117 btrfs_bbio_propagate_error(bbio, orig_bbio); 118 bio_put(&bbio->bio); 119 bbio = orig_bbio; 120 } 121 122 if (atomic_dec_and_test(&bbio->pending_ios)) 123 bbio->end_io(bbio); 124 } 125 126 static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 127 { 128 if (cur_mirror == fbio->num_copies) 129 return cur_mirror + 1 - fbio->num_copies; 130 return cur_mirror + 1; 131 } 132 133 static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 134 { 135 if (cur_mirror == 1) 136 return fbio->num_copies; 137 return cur_mirror - 1; 138 } 139 140 static void btrfs_repair_done(struct btrfs_failed_bio *fbio) 141 { 142 if (atomic_dec_and_test(&fbio->repair_count)) { 143 btrfs_orig_bbio_end_io(fbio->bbio); 144 mempool_free(fbio, &btrfs_failed_bio_pool); 145 } 146 } 147 148 static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio, 149 struct btrfs_device *dev) 150 { 151 struct btrfs_failed_bio *fbio = repair_bbio->private; 152 struct btrfs_inode *inode = repair_bbio->inode; 153 struct btrfs_fs_info *fs_info = inode->root->fs_info; 154 struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio); 155 int mirror = repair_bbio->mirror_num; 156 157 if (repair_bbio->bio.bi_status || 158 !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) { 159 bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ); 160 repair_bbio->bio.bi_iter = repair_bbio->saved_iter; 161 162 mirror = next_repair_mirror(fbio, mirror); 163 if (mirror == fbio->bbio->mirror_num) { 164 btrfs_debug(fs_info, "no mirror left"); 165 fbio->bbio->bio.bi_status = BLK_STS_IOERR; 166 goto done; 167 } 168 169 btrfs_submit_bio(repair_bbio, mirror); 170 return; 171 } 172 173 do { 174 mirror = prev_repair_mirror(fbio, mirror); 175 btrfs_repair_io_failure(fs_info, btrfs_ino(inode), 176 repair_bbio->file_offset, fs_info->sectorsize, 177 repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT, 178 bv->bv_page, bv->bv_offset, mirror); 179 } while (mirror != fbio->bbio->mirror_num); 180 181 done: 182 btrfs_repair_done(fbio); 183 bio_put(&repair_bbio->bio); 184 } 185 186 /* 187 * Try to kick off a repair read to the next available mirror for a bad sector. 188 * 189 * This primarily tries to recover good data to serve the actual read request, 190 * but also tries to write the good data back to the bad mirror(s) when a 191 * read succeeded to restore the redundancy. 192 */ 193 static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio, 194 u32 bio_offset, 195 struct bio_vec *bv, 196 struct btrfs_failed_bio *fbio) 197 { 198 struct btrfs_inode *inode = failed_bbio->inode; 199 struct btrfs_fs_info *fs_info = inode->root->fs_info; 200 const u32 sectorsize = fs_info->sectorsize; 201 const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT); 202 struct btrfs_bio *repair_bbio; 203 struct bio *repair_bio; 204 int num_copies; 205 int mirror; 206 207 btrfs_debug(fs_info, "repair read error: read error at %llu", 208 failed_bbio->file_offset + bio_offset); 209 210 num_copies = btrfs_num_copies(fs_info, logical, sectorsize); 211 if (num_copies == 1) { 212 btrfs_debug(fs_info, "no copy to repair from"); 213 failed_bbio->bio.bi_status = BLK_STS_IOERR; 214 return fbio; 215 } 216 217 if (!fbio) { 218 fbio = mempool_alloc(&btrfs_failed_bio_pool, GFP_NOFS); 219 fbio->bbio = failed_bbio; 220 fbio->num_copies = num_copies; 221 atomic_set(&fbio->repair_count, 1); 222 } 223 224 atomic_inc(&fbio->repair_count); 225 226 repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, 227 &btrfs_repair_bioset); 228 repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector; 229 __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); 230 231 repair_bbio = btrfs_bio(repair_bio); 232 btrfs_bio_init(repair_bbio, fs_info, NULL, fbio); 233 repair_bbio->inode = failed_bbio->inode; 234 repair_bbio->file_offset = failed_bbio->file_offset + bio_offset; 235 236 mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); 237 btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror); 238 btrfs_submit_bio(repair_bbio, mirror); 239 return fbio; 240 } 241 242 static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev) 243 { 244 struct btrfs_inode *inode = bbio->inode; 245 struct btrfs_fs_info *fs_info = inode->root->fs_info; 246 u32 sectorsize = fs_info->sectorsize; 247 struct bvec_iter *iter = &bbio->saved_iter; 248 blk_status_t status = bbio->bio.bi_status; 249 struct btrfs_failed_bio *fbio = NULL; 250 u32 offset = 0; 251 252 /* Read-repair requires the inode field to be set by the submitter. */ 253 ASSERT(inode); 254 255 /* 256 * Hand off repair bios to the repair code as there is no upper level 257 * submitter for them. 258 */ 259 if (bbio->bio.bi_pool == &btrfs_repair_bioset) { 260 btrfs_end_repair_bio(bbio, dev); 261 return; 262 } 263 264 /* Clear the I/O error. A failed repair will reset it. */ 265 bbio->bio.bi_status = BLK_STS_OK; 266 267 while (iter->bi_size) { 268 struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter); 269 270 bv.bv_len = min(bv.bv_len, sectorsize); 271 if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv)) 272 fbio = repair_one_sector(bbio, offset, &bv, fbio); 273 274 bio_advance_iter_single(&bbio->bio, iter, sectorsize); 275 offset += sectorsize; 276 } 277 278 if (bbio->csum != bbio->csum_inline) 279 kfree(bbio->csum); 280 281 if (fbio) 282 btrfs_repair_done(fbio); 283 else 284 btrfs_orig_bbio_end_io(bbio); 285 } 286 287 static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) 288 { 289 if (!dev || !dev->bdev) 290 return; 291 if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET) 292 return; 293 294 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 295 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 296 else if (!(bio->bi_opf & REQ_RAHEAD)) 297 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 298 if (bio->bi_opf & REQ_PREFLUSH) 299 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); 300 } 301 302 static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info, 303 struct bio *bio) 304 { 305 if (bio->bi_opf & REQ_META) 306 return fs_info->endio_meta_workers; 307 return fs_info->endio_workers; 308 } 309 310 static void btrfs_end_bio_work(struct work_struct *work) 311 { 312 struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work); 313 314 /* Metadata reads are checked and repaired by the submitter. */ 315 if (bbio->inode && !(bbio->bio.bi_opf & REQ_META)) 316 btrfs_check_read_bio(bbio, bbio->bio.bi_private); 317 else 318 btrfs_orig_bbio_end_io(bbio); 319 } 320 321 static void btrfs_simple_end_io(struct bio *bio) 322 { 323 struct btrfs_bio *bbio = btrfs_bio(bio); 324 struct btrfs_device *dev = bio->bi_private; 325 struct btrfs_fs_info *fs_info = bbio->fs_info; 326 327 btrfs_bio_counter_dec(fs_info); 328 329 if (bio->bi_status) 330 btrfs_log_dev_io_error(bio, dev); 331 332 if (bio_op(bio) == REQ_OP_READ) { 333 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); 334 queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work); 335 } else { 336 if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status) 337 btrfs_record_physical_zoned(bbio); 338 btrfs_orig_bbio_end_io(bbio); 339 } 340 } 341 342 static void btrfs_raid56_end_io(struct bio *bio) 343 { 344 struct btrfs_io_context *bioc = bio->bi_private; 345 struct btrfs_bio *bbio = btrfs_bio(bio); 346 347 btrfs_bio_counter_dec(bioc->fs_info); 348 bbio->mirror_num = bioc->mirror_num; 349 if (bio_op(bio) == REQ_OP_READ && bbio->inode && 350 !(bbio->bio.bi_opf & REQ_META)) 351 btrfs_check_read_bio(bbio, NULL); 352 else 353 btrfs_orig_bbio_end_io(bbio); 354 355 btrfs_put_bioc(bioc); 356 } 357 358 static void btrfs_orig_write_end_io(struct bio *bio) 359 { 360 struct btrfs_io_stripe *stripe = bio->bi_private; 361 struct btrfs_io_context *bioc = stripe->bioc; 362 struct btrfs_bio *bbio = btrfs_bio(bio); 363 364 btrfs_bio_counter_dec(bioc->fs_info); 365 366 if (bio->bi_status) { 367 atomic_inc(&bioc->error); 368 btrfs_log_dev_io_error(bio, stripe->dev); 369 } 370 371 /* 372 * Only send an error to the higher layers if it is beyond the tolerance 373 * threshold. 374 */ 375 if (atomic_read(&bioc->error) > bioc->max_errors) 376 bio->bi_status = BLK_STS_IOERR; 377 else 378 bio->bi_status = BLK_STS_OK; 379 380 btrfs_orig_bbio_end_io(bbio); 381 btrfs_put_bioc(bioc); 382 } 383 384 static void btrfs_clone_write_end_io(struct bio *bio) 385 { 386 struct btrfs_io_stripe *stripe = bio->bi_private; 387 388 if (bio->bi_status) { 389 atomic_inc(&stripe->bioc->error); 390 btrfs_log_dev_io_error(bio, stripe->dev); 391 } 392 393 /* Pass on control to the original bio this one was cloned from */ 394 bio_endio(stripe->bioc->orig_bio); 395 bio_put(bio); 396 } 397 398 static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) 399 { 400 if (!dev || !dev->bdev || 401 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 402 (btrfs_op(bio) == BTRFS_MAP_WRITE && 403 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 404 bio_io_error(bio); 405 return; 406 } 407 408 bio_set_dev(bio, dev->bdev); 409 410 /* 411 * For zone append writing, bi_sector must point the beginning of the 412 * zone 413 */ 414 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 415 u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 416 u64 zone_start = round_down(physical, dev->fs_info->zone_size); 417 418 ASSERT(btrfs_dev_is_sequential(dev, physical)); 419 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 420 } 421 btrfs_debug_in_rcu(dev->fs_info, 422 "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 423 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 424 (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev), 425 dev->devid, bio->bi_iter.bi_size); 426 427 btrfsic_check_bio(bio); 428 429 if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT) 430 blkcg_punt_bio_submit(bio); 431 else 432 submit_bio(bio); 433 } 434 435 static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr) 436 { 437 struct bio *orig_bio = bioc->orig_bio, *bio; 438 439 ASSERT(bio_op(orig_bio) != REQ_OP_READ); 440 441 /* Reuse the bio embedded into the btrfs_bio for the last mirror */ 442 if (dev_nr == bioc->num_stripes - 1) { 443 bio = orig_bio; 444 bio->bi_end_io = btrfs_orig_write_end_io; 445 } else { 446 bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set); 447 bio_inc_remaining(orig_bio); 448 bio->bi_end_io = btrfs_clone_write_end_io; 449 } 450 451 bio->bi_private = &bioc->stripes[dev_nr]; 452 bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT; 453 bioc->stripes[dev_nr].bioc = bioc; 454 btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio); 455 } 456 457 static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc, 458 struct btrfs_io_stripe *smap, int mirror_num) 459 { 460 if (!bioc) { 461 /* Single mirror read/write fast path. */ 462 btrfs_bio(bio)->mirror_num = mirror_num; 463 bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT; 464 if (bio_op(bio) != REQ_OP_READ) 465 btrfs_bio(bio)->orig_physical = smap->physical; 466 bio->bi_private = smap->dev; 467 bio->bi_end_io = btrfs_simple_end_io; 468 btrfs_submit_dev_bio(smap->dev, bio); 469 } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 470 /* Parity RAID write or read recovery. */ 471 bio->bi_private = bioc; 472 bio->bi_end_io = btrfs_raid56_end_io; 473 if (bio_op(bio) == REQ_OP_READ) 474 raid56_parity_recover(bio, bioc, mirror_num); 475 else 476 raid56_parity_write(bio, bioc); 477 } else { 478 /* Write to multiple mirrors. */ 479 int total_devs = bioc->num_stripes; 480 481 bioc->orig_bio = bio; 482 for (int dev_nr = 0; dev_nr < total_devs; dev_nr++) 483 btrfs_submit_mirrored_bio(bioc, dev_nr); 484 } 485 } 486 487 static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio) 488 { 489 if (bbio->bio.bi_opf & REQ_META) 490 return btree_csum_one_bio(bbio); 491 return btrfs_csum_one_bio(bbio); 492 } 493 494 /* 495 * Async submit bios are used to offload expensive checksumming onto the worker 496 * threads. 497 */ 498 struct async_submit_bio { 499 struct btrfs_bio *bbio; 500 struct btrfs_io_context *bioc; 501 struct btrfs_io_stripe smap; 502 int mirror_num; 503 struct btrfs_work work; 504 }; 505 506 /* 507 * In order to insert checksums into the metadata in large chunks, we wait 508 * until bio submission time. All the pages in the bio are checksummed and 509 * sums are attached onto the ordered extent record. 510 * 511 * At IO completion time the csums attached on the ordered extent record are 512 * inserted into the btree. 513 */ 514 static void run_one_async_start(struct btrfs_work *work) 515 { 516 struct async_submit_bio *async = 517 container_of(work, struct async_submit_bio, work); 518 blk_status_t ret; 519 520 ret = btrfs_bio_csum(async->bbio); 521 if (ret) 522 async->bbio->bio.bi_status = ret; 523 } 524 525 /* 526 * In order to insert checksums into the metadata in large chunks, we wait 527 * until bio submission time. All the pages in the bio are checksummed and 528 * sums are attached onto the ordered extent record. 529 * 530 * At IO completion time the csums attached on the ordered extent record are 531 * inserted into the tree. 532 */ 533 static void run_one_async_done(struct btrfs_work *work) 534 { 535 struct async_submit_bio *async = 536 container_of(work, struct async_submit_bio, work); 537 struct bio *bio = &async->bbio->bio; 538 539 /* If an error occurred we just want to clean up the bio and move on. */ 540 if (bio->bi_status) { 541 btrfs_orig_bbio_end_io(async->bbio); 542 return; 543 } 544 545 /* 546 * All of the bios that pass through here are from async helpers. 547 * Use REQ_BTRFS_CGROUP_PUNT to issue them from the owning cgroup's 548 * context. This changes nothing when cgroups aren't in use. 549 */ 550 bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT; 551 __btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num); 552 } 553 554 static void run_one_async_free(struct btrfs_work *work) 555 { 556 kfree(container_of(work, struct async_submit_bio, work)); 557 } 558 559 static bool should_async_write(struct btrfs_bio *bbio) 560 { 561 /* Submit synchronously if the checksum implementation is fast. */ 562 if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &bbio->fs_info->flags)) 563 return false; 564 565 /* 566 * Try to defer the submission to a workqueue to parallelize the 567 * checksum calculation unless the I/O is issued synchronously. 568 */ 569 if (op_is_sync(bbio->bio.bi_opf)) 570 return false; 571 572 /* Zoned devices require I/O to be submitted in order. */ 573 if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info)) 574 return false; 575 576 return true; 577 } 578 579 /* 580 * Submit bio to an async queue. 581 * 582 * Return true if the work has been succesfuly submitted, else false. 583 */ 584 static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, 585 struct btrfs_io_context *bioc, 586 struct btrfs_io_stripe *smap, int mirror_num) 587 { 588 struct btrfs_fs_info *fs_info = bbio->fs_info; 589 struct async_submit_bio *async; 590 591 async = kmalloc(sizeof(*async), GFP_NOFS); 592 if (!async) 593 return false; 594 595 async->bbio = bbio; 596 async->bioc = bioc; 597 async->smap = *smap; 598 async->mirror_num = mirror_num; 599 600 btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, 601 run_one_async_free); 602 btrfs_queue_work(fs_info->workers, &async->work); 603 return true; 604 } 605 606 static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) 607 { 608 struct btrfs_inode *inode = bbio->inode; 609 struct btrfs_fs_info *fs_info = bbio->fs_info; 610 struct btrfs_bio *orig_bbio = bbio; 611 struct bio *bio = &bbio->bio; 612 u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 613 u64 length = bio->bi_iter.bi_size; 614 u64 map_length = length; 615 bool use_append = btrfs_use_zone_append(bbio); 616 struct btrfs_io_context *bioc = NULL; 617 struct btrfs_io_stripe smap; 618 blk_status_t ret; 619 int error; 620 621 btrfs_bio_counter_inc_blocked(fs_info); 622 error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, 623 &bioc, &smap, &mirror_num, 1); 624 if (error) { 625 ret = errno_to_blk_status(error); 626 goto fail; 627 } 628 629 map_length = min(map_length, length); 630 if (use_append) 631 map_length = min(map_length, fs_info->max_zone_append_size); 632 633 if (map_length < length) { 634 bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append); 635 bio = &bbio->bio; 636 } 637 638 /* 639 * Save the iter for the end_io handler and preload the checksums for 640 * data reads. 641 */ 642 if (bio_op(bio) == REQ_OP_READ && inode && !(bio->bi_opf & REQ_META)) { 643 bbio->saved_iter = bio->bi_iter; 644 ret = btrfs_lookup_bio_sums(bbio); 645 if (ret) 646 goto fail_put_bio; 647 } 648 649 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 650 if (use_append) { 651 bio->bi_opf &= ~REQ_OP_WRITE; 652 bio->bi_opf |= REQ_OP_ZONE_APPEND; 653 } 654 655 /* 656 * Csum items for reloc roots have already been cloned at this 657 * point, so they are handled as part of the no-checksum case. 658 */ 659 if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) && 660 !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && 661 !btrfs_is_data_reloc_root(inode->root)) { 662 if (should_async_write(bbio) && 663 btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num)) 664 goto done; 665 666 ret = btrfs_bio_csum(bbio); 667 if (ret) 668 goto fail_put_bio; 669 } else if (use_append) { 670 ret = btrfs_alloc_dummy_sum(bbio); 671 if (ret) 672 goto fail_put_bio; 673 } 674 } 675 676 __btrfs_submit_bio(bio, bioc, &smap, mirror_num); 677 done: 678 return map_length == length; 679 680 fail_put_bio: 681 if (map_length < length) 682 bio_put(bio); 683 fail: 684 btrfs_bio_counter_dec(fs_info); 685 btrfs_bio_end_io(orig_bbio, ret); 686 /* Do not submit another chunk */ 687 return true; 688 } 689 690 void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num) 691 { 692 /* If bbio->inode is not populated, its file_offset must be 0. */ 693 ASSERT(bbio->inode || bbio->file_offset == 0); 694 695 while (!btrfs_submit_chunk(bbio, mirror_num)) 696 ; 697 } 698 699 /* 700 * Submit a repair write. 701 * 702 * This bypasses btrfs_submit_bio deliberately, as that writes all copies in a 703 * RAID setup. Here we only want to write the one bad copy, so we do the 704 * mapping ourselves and submit the bio directly. 705 * 706 * The I/O is issued synchronously to block the repair read completion from 707 * freeing the bio. 708 */ 709 int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, 710 u64 length, u64 logical, struct page *page, 711 unsigned int pg_offset, int mirror_num) 712 { 713 struct btrfs_io_stripe smap = { 0 }; 714 struct bio_vec bvec; 715 struct bio bio; 716 int ret = 0; 717 718 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); 719 BUG_ON(!mirror_num); 720 721 if (btrfs_repair_one_zone(fs_info, logical)) 722 return 0; 723 724 /* 725 * Avoid races with device replace and make sure our bioc has devices 726 * associated to its stripes that don't go away while we are doing the 727 * read repair operation. 728 */ 729 btrfs_bio_counter_inc_blocked(fs_info); 730 ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num); 731 if (ret < 0) 732 goto out_counter_dec; 733 734 if (!smap.dev->bdev || 735 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state)) { 736 ret = -EIO; 737 goto out_counter_dec; 738 } 739 740 bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC); 741 bio.bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT; 742 __bio_add_page(&bio, page, length, pg_offset); 743 744 btrfsic_check_bio(&bio); 745 ret = submit_bio_wait(&bio); 746 if (ret) { 747 /* try to remap that extent elsewhere? */ 748 btrfs_dev_stat_inc_and_print(smap.dev, BTRFS_DEV_STAT_WRITE_ERRS); 749 goto out_bio_uninit; 750 } 751 752 btrfs_info_rl_in_rcu(fs_info, 753 "read error corrected: ino %llu off %llu (dev %s sector %llu)", 754 ino, start, btrfs_dev_name(smap.dev), 755 smap.physical >> SECTOR_SHIFT); 756 ret = 0; 757 758 out_bio_uninit: 759 bio_uninit(&bio); 760 out_counter_dec: 761 btrfs_bio_counter_dec(fs_info); 762 return ret; 763 } 764 765 /* 766 * Submit a btrfs_bio based repair write. 767 * 768 * If @dev_replace is true, the write would be submitted to dev-replace target. 769 */ 770 void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace) 771 { 772 struct btrfs_fs_info *fs_info = bbio->fs_info; 773 u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 774 u64 length = bbio->bio.bi_iter.bi_size; 775 struct btrfs_io_stripe smap = { 0 }; 776 int ret; 777 778 ASSERT(fs_info); 779 ASSERT(mirror_num > 0); 780 ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE); 781 ASSERT(!bbio->inode); 782 783 btrfs_bio_counter_inc_blocked(fs_info); 784 ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num); 785 if (ret < 0) 786 goto fail; 787 788 if (dev_replace) { 789 ASSERT(smap.dev == fs_info->dev_replace.srcdev); 790 smap.dev = fs_info->dev_replace.tgtdev; 791 } 792 __btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num); 793 return; 794 795 fail: 796 btrfs_bio_counter_dec(fs_info); 797 btrfs_bio_end_io(bbio, errno_to_blk_status(ret)); 798 } 799 800 int __init btrfs_bioset_init(void) 801 { 802 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, 803 offsetof(struct btrfs_bio, bio), 804 BIOSET_NEED_BVECS)) 805 return -ENOMEM; 806 if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE, 807 offsetof(struct btrfs_bio, bio), 0)) 808 goto out_free_bioset; 809 if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE, 810 offsetof(struct btrfs_bio, bio), 811 BIOSET_NEED_BVECS)) 812 goto out_free_clone_bioset; 813 if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE, 814 sizeof(struct btrfs_failed_bio))) 815 goto out_free_repair_bioset; 816 return 0; 817 818 out_free_repair_bioset: 819 bioset_exit(&btrfs_repair_bioset); 820 out_free_clone_bioset: 821 bioset_exit(&btrfs_clone_bioset); 822 out_free_bioset: 823 bioset_exit(&btrfs_bioset); 824 return -ENOMEM; 825 } 826 827 void __cold btrfs_bioset_exit(void) 828 { 829 mempool_exit(&btrfs_failed_bio_pool); 830 bioset_exit(&btrfs_repair_bioset); 831 bioset_exit(&btrfs_clone_bioset); 832 bioset_exit(&btrfs_bioset); 833 } 834