1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 * Copyright (C) 2022 Christoph Hellwig. 5 */ 6 7 #include <linux/bio.h> 8 #include "bio.h" 9 #include "ctree.h" 10 #include "volumes.h" 11 #include "raid56.h" 12 #include "async-thread.h" 13 #include "check-integrity.h" 14 #include "dev-replace.h" 15 #include "rcu-string.h" 16 #include "zoned.h" 17 #include "file-item.h" 18 19 static struct bio_set btrfs_bioset; 20 static struct bio_set btrfs_clone_bioset; 21 static struct bio_set btrfs_repair_bioset; 22 static mempool_t btrfs_failed_bio_pool; 23 24 struct btrfs_failed_bio { 25 struct btrfs_bio *bbio; 26 int num_copies; 27 atomic_t repair_count; 28 }; 29 30 /* 31 * Initialize a btrfs_bio structure. This skips the embedded bio itself as it 32 * is already initialized by the block layer. 33 */ 34 void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info, 35 btrfs_bio_end_io_t end_io, void *private) 36 { 37 memset(bbio, 0, offsetof(struct btrfs_bio, bio)); 38 bbio->fs_info = fs_info; 39 bbio->end_io = end_io; 40 bbio->private = private; 41 atomic_set(&bbio->pending_ios, 1); 42 } 43 44 /* 45 * Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for 46 * btrfs, and is used for all I/O submitted through btrfs_submit_bio. 47 * 48 * Just like the underlying bio_alloc_bioset it will not fail as it is backed by 49 * a mempool. 50 */ 51 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, 52 struct btrfs_fs_info *fs_info, 53 btrfs_bio_end_io_t end_io, void *private) 54 { 55 struct btrfs_bio *bbio; 56 struct bio *bio; 57 58 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); 59 bbio = btrfs_bio(bio); 60 btrfs_bio_init(bbio, fs_info, end_io, private); 61 return bbio; 62 } 63 64 static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, 65 struct btrfs_bio *orig_bbio, 66 u64 map_length, bool use_append) 67 { 68 struct btrfs_bio *bbio; 69 struct bio *bio; 70 71 if (use_append) { 72 unsigned int nr_segs; 73 74 bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, 75 &btrfs_clone_bioset, map_length); 76 } else { 77 bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, 78 GFP_NOFS, &btrfs_clone_bioset); 79 } 80 bbio = btrfs_bio(bio); 81 btrfs_bio_init(bbio, fs_info, NULL, orig_bbio); 82 bbio->inode = orig_bbio->inode; 83 bbio->file_offset = orig_bbio->file_offset; 84 orig_bbio->file_offset += map_length; 85 86 atomic_inc(&orig_bbio->pending_ios); 87 return bbio; 88 } 89 90 static void btrfs_orig_write_end_io(struct bio *bio); 91 92 static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, 93 struct btrfs_bio *orig_bbio) 94 { 95 /* 96 * For writes we tolerate nr_mirrors - 1 write failures, so we can't 97 * just blindly propagate a write failure here. Instead increment the 98 * error count in the original I/O context so that it is guaranteed to 99 * be larger than the error tolerance. 100 */ 101 if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) { 102 struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private; 103 struct btrfs_io_context *orig_bioc = orig_stripe->bioc; 104 105 atomic_add(orig_bioc->max_errors, &orig_bioc->error); 106 } else { 107 orig_bbio->bio.bi_status = bbio->bio.bi_status; 108 } 109 } 110 111 static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) 112 { 113 if (bbio->bio.bi_pool == &btrfs_clone_bioset) { 114 struct btrfs_bio *orig_bbio = bbio->private; 115 116 if (bbio->bio.bi_status) 117 btrfs_bbio_propagate_error(bbio, orig_bbio); 118 bio_put(&bbio->bio); 119 bbio = orig_bbio; 120 } 121 122 if (atomic_dec_and_test(&bbio->pending_ios)) 123 bbio->end_io(bbio); 124 } 125 126 static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 127 { 128 if (cur_mirror == fbio->num_copies) 129 return cur_mirror + 1 - fbio->num_copies; 130 return cur_mirror + 1; 131 } 132 133 static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 134 { 135 if (cur_mirror == 1) 136 return fbio->num_copies; 137 return cur_mirror - 1; 138 } 139 140 static void btrfs_repair_done(struct btrfs_failed_bio *fbio) 141 { 142 if (atomic_dec_and_test(&fbio->repair_count)) { 143 btrfs_orig_bbio_end_io(fbio->bbio); 144 mempool_free(fbio, &btrfs_failed_bio_pool); 145 } 146 } 147 148 static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio, 149 struct btrfs_device *dev) 150 { 151 struct btrfs_failed_bio *fbio = repair_bbio->private; 152 struct btrfs_inode *inode = repair_bbio->inode; 153 struct btrfs_fs_info *fs_info = inode->root->fs_info; 154 struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio); 155 int mirror = repair_bbio->mirror_num; 156 157 if (repair_bbio->bio.bi_status || 158 !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) { 159 bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ); 160 repair_bbio->bio.bi_iter = repair_bbio->saved_iter; 161 162 mirror = next_repair_mirror(fbio, mirror); 163 if (mirror == fbio->bbio->mirror_num) { 164 btrfs_debug(fs_info, "no mirror left"); 165 fbio->bbio->bio.bi_status = BLK_STS_IOERR; 166 goto done; 167 } 168 169 btrfs_submit_bio(repair_bbio, mirror); 170 return; 171 } 172 173 do { 174 mirror = prev_repair_mirror(fbio, mirror); 175 btrfs_repair_io_failure(fs_info, btrfs_ino(inode), 176 repair_bbio->file_offset, fs_info->sectorsize, 177 repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT, 178 bv->bv_page, bv->bv_offset, mirror); 179 } while (mirror != fbio->bbio->mirror_num); 180 181 done: 182 btrfs_repair_done(fbio); 183 bio_put(&repair_bbio->bio); 184 } 185 186 /* 187 * Try to kick off a repair read to the next available mirror for a bad sector. 188 * 189 * This primarily tries to recover good data to serve the actual read request, 190 * but also tries to write the good data back to the bad mirror(s) when a 191 * read succeeded to restore the redundancy. 192 */ 193 static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio, 194 u32 bio_offset, 195 struct bio_vec *bv, 196 struct btrfs_failed_bio *fbio) 197 { 198 struct btrfs_inode *inode = failed_bbio->inode; 199 struct btrfs_fs_info *fs_info = inode->root->fs_info; 200 const u32 sectorsize = fs_info->sectorsize; 201 const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT); 202 struct btrfs_bio *repair_bbio; 203 struct bio *repair_bio; 204 int num_copies; 205 int mirror; 206 207 btrfs_debug(fs_info, "repair read error: read error at %llu", 208 failed_bbio->file_offset + bio_offset); 209 210 num_copies = btrfs_num_copies(fs_info, logical, sectorsize); 211 if (num_copies == 1) { 212 btrfs_debug(fs_info, "no copy to repair from"); 213 failed_bbio->bio.bi_status = BLK_STS_IOERR; 214 return fbio; 215 } 216 217 if (!fbio) { 218 fbio = mempool_alloc(&btrfs_failed_bio_pool, GFP_NOFS); 219 fbio->bbio = failed_bbio; 220 fbio->num_copies = num_copies; 221 atomic_set(&fbio->repair_count, 1); 222 } 223 224 atomic_inc(&fbio->repair_count); 225 226 repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, 227 &btrfs_repair_bioset); 228 repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector; 229 __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); 230 231 repair_bbio = btrfs_bio(repair_bio); 232 btrfs_bio_init(repair_bbio, fs_info, NULL, fbio); 233 repair_bbio->inode = failed_bbio->inode; 234 repair_bbio->file_offset = failed_bbio->file_offset + bio_offset; 235 236 mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); 237 btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror); 238 btrfs_submit_bio(repair_bbio, mirror); 239 return fbio; 240 } 241 242 static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev) 243 { 244 struct btrfs_inode *inode = bbio->inode; 245 struct btrfs_fs_info *fs_info = inode->root->fs_info; 246 u32 sectorsize = fs_info->sectorsize; 247 struct bvec_iter *iter = &bbio->saved_iter; 248 blk_status_t status = bbio->bio.bi_status; 249 struct btrfs_failed_bio *fbio = NULL; 250 u32 offset = 0; 251 252 /* Read-repair requires the inode field to be set by the submitter. */ 253 ASSERT(inode); 254 255 /* 256 * Hand off repair bios to the repair code as there is no upper level 257 * submitter for them. 258 */ 259 if (bbio->bio.bi_pool == &btrfs_repair_bioset) { 260 btrfs_end_repair_bio(bbio, dev); 261 return; 262 } 263 264 /* Clear the I/O error. A failed repair will reset it. */ 265 bbio->bio.bi_status = BLK_STS_OK; 266 267 while (iter->bi_size) { 268 struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter); 269 270 bv.bv_len = min(bv.bv_len, sectorsize); 271 if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv)) 272 fbio = repair_one_sector(bbio, offset, &bv, fbio); 273 274 bio_advance_iter_single(&bbio->bio, iter, sectorsize); 275 offset += sectorsize; 276 } 277 278 if (bbio->csum != bbio->csum_inline) 279 kfree(bbio->csum); 280 281 if (fbio) 282 btrfs_repair_done(fbio); 283 else 284 btrfs_orig_bbio_end_io(bbio); 285 } 286 287 static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) 288 { 289 if (!dev || !dev->bdev) 290 return; 291 if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET) 292 return; 293 294 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 295 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 296 else if (!(bio->bi_opf & REQ_RAHEAD)) 297 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 298 if (bio->bi_opf & REQ_PREFLUSH) 299 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); 300 } 301 302 static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info, 303 struct bio *bio) 304 { 305 if (bio->bi_opf & REQ_META) 306 return fs_info->endio_meta_workers; 307 return fs_info->endio_workers; 308 } 309 310 static void btrfs_end_bio_work(struct work_struct *work) 311 { 312 struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work); 313 314 /* Metadata reads are checked and repaired by the submitter. */ 315 if (bbio->inode && !(bbio->bio.bi_opf & REQ_META)) 316 btrfs_check_read_bio(bbio, bbio->bio.bi_private); 317 else 318 btrfs_orig_bbio_end_io(bbio); 319 } 320 321 static void btrfs_simple_end_io(struct bio *bio) 322 { 323 struct btrfs_bio *bbio = btrfs_bio(bio); 324 struct btrfs_device *dev = bio->bi_private; 325 struct btrfs_fs_info *fs_info = bbio->fs_info; 326 327 btrfs_bio_counter_dec(fs_info); 328 329 if (bio->bi_status) 330 btrfs_log_dev_io_error(bio, dev); 331 332 if (bio_op(bio) == REQ_OP_READ) { 333 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); 334 queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work); 335 } else { 336 if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status) 337 btrfs_record_physical_zoned(bbio); 338 btrfs_orig_bbio_end_io(bbio); 339 } 340 } 341 342 static void btrfs_raid56_end_io(struct bio *bio) 343 { 344 struct btrfs_io_context *bioc = bio->bi_private; 345 struct btrfs_bio *bbio = btrfs_bio(bio); 346 347 btrfs_bio_counter_dec(bioc->fs_info); 348 bbio->mirror_num = bioc->mirror_num; 349 if (bio_op(bio) == REQ_OP_READ && bbio->inode && 350 !(bbio->bio.bi_opf & REQ_META)) 351 btrfs_check_read_bio(bbio, NULL); 352 else 353 btrfs_orig_bbio_end_io(bbio); 354 355 btrfs_put_bioc(bioc); 356 } 357 358 static void btrfs_orig_write_end_io(struct bio *bio) 359 { 360 struct btrfs_io_stripe *stripe = bio->bi_private; 361 struct btrfs_io_context *bioc = stripe->bioc; 362 struct btrfs_bio *bbio = btrfs_bio(bio); 363 364 btrfs_bio_counter_dec(bioc->fs_info); 365 366 if (bio->bi_status) { 367 atomic_inc(&bioc->error); 368 btrfs_log_dev_io_error(bio, stripe->dev); 369 } 370 371 /* 372 * Only send an error to the higher layers if it is beyond the tolerance 373 * threshold. 374 */ 375 if (atomic_read(&bioc->error) > bioc->max_errors) 376 bio->bi_status = BLK_STS_IOERR; 377 else 378 bio->bi_status = BLK_STS_OK; 379 380 btrfs_orig_bbio_end_io(bbio); 381 btrfs_put_bioc(bioc); 382 } 383 384 static void btrfs_clone_write_end_io(struct bio *bio) 385 { 386 struct btrfs_io_stripe *stripe = bio->bi_private; 387 388 if (bio->bi_status) { 389 atomic_inc(&stripe->bioc->error); 390 btrfs_log_dev_io_error(bio, stripe->dev); 391 } 392 393 /* Pass on control to the original bio this one was cloned from */ 394 bio_endio(stripe->bioc->orig_bio); 395 bio_put(bio); 396 } 397 398 static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) 399 { 400 if (!dev || !dev->bdev || 401 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 402 (btrfs_op(bio) == BTRFS_MAP_WRITE && 403 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 404 bio_io_error(bio); 405 return; 406 } 407 408 bio_set_dev(bio, dev->bdev); 409 410 /* 411 * For zone append writing, bi_sector must point the beginning of the 412 * zone 413 */ 414 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 415 u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 416 u64 zone_start = round_down(physical, dev->fs_info->zone_size); 417 418 ASSERT(btrfs_dev_is_sequential(dev, physical)); 419 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 420 } 421 btrfs_debug_in_rcu(dev->fs_info, 422 "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 423 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 424 (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev), 425 dev->devid, bio->bi_iter.bi_size); 426 427 btrfsic_check_bio(bio); 428 429 if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT) 430 blkcg_punt_bio_submit(bio); 431 else 432 submit_bio(bio); 433 } 434 435 static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr) 436 { 437 struct bio *orig_bio = bioc->orig_bio, *bio; 438 439 ASSERT(bio_op(orig_bio) != REQ_OP_READ); 440 441 /* Reuse the bio embedded into the btrfs_bio for the last mirror */ 442 if (dev_nr == bioc->num_stripes - 1) { 443 bio = orig_bio; 444 bio->bi_end_io = btrfs_orig_write_end_io; 445 } else { 446 bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set); 447 bio_inc_remaining(orig_bio); 448 bio->bi_end_io = btrfs_clone_write_end_io; 449 } 450 451 bio->bi_private = &bioc->stripes[dev_nr]; 452 bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT; 453 bioc->stripes[dev_nr].bioc = bioc; 454 btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio); 455 } 456 457 static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc, 458 struct btrfs_io_stripe *smap, int mirror_num) 459 { 460 /* Do not leak our private flag into the block layer. */ 461 bio->bi_opf &= ~REQ_BTRFS_ONE_ORDERED; 462 463 if (!bioc) { 464 /* Single mirror read/write fast path. */ 465 btrfs_bio(bio)->mirror_num = mirror_num; 466 bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT; 467 if (bio_op(bio) != REQ_OP_READ) 468 btrfs_bio(bio)->orig_physical = smap->physical; 469 bio->bi_private = smap->dev; 470 bio->bi_end_io = btrfs_simple_end_io; 471 btrfs_submit_dev_bio(smap->dev, bio); 472 } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 473 /* Parity RAID write or read recovery. */ 474 bio->bi_private = bioc; 475 bio->bi_end_io = btrfs_raid56_end_io; 476 if (bio_op(bio) == REQ_OP_READ) 477 raid56_parity_recover(bio, bioc, mirror_num); 478 else 479 raid56_parity_write(bio, bioc); 480 } else { 481 /* Write to multiple mirrors. */ 482 int total_devs = bioc->num_stripes; 483 484 bioc->orig_bio = bio; 485 for (int dev_nr = 0; dev_nr < total_devs; dev_nr++) 486 btrfs_submit_mirrored_bio(bioc, dev_nr); 487 } 488 } 489 490 static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio) 491 { 492 if (bbio->bio.bi_opf & REQ_META) 493 return btree_csum_one_bio(bbio); 494 return btrfs_csum_one_bio(bbio); 495 } 496 497 /* 498 * Async submit bios are used to offload expensive checksumming onto the worker 499 * threads. 500 */ 501 struct async_submit_bio { 502 struct btrfs_bio *bbio; 503 struct btrfs_io_context *bioc; 504 struct btrfs_io_stripe smap; 505 int mirror_num; 506 struct btrfs_work work; 507 }; 508 509 /* 510 * In order to insert checksums into the metadata in large chunks, we wait 511 * until bio submission time. All the pages in the bio are checksummed and 512 * sums are attached onto the ordered extent record. 513 * 514 * At IO completion time the csums attached on the ordered extent record are 515 * inserted into the btree. 516 */ 517 static void run_one_async_start(struct btrfs_work *work) 518 { 519 struct async_submit_bio *async = 520 container_of(work, struct async_submit_bio, work); 521 blk_status_t ret; 522 523 ret = btrfs_bio_csum(async->bbio); 524 if (ret) 525 async->bbio->bio.bi_status = ret; 526 } 527 528 /* 529 * In order to insert checksums into the metadata in large chunks, we wait 530 * until bio submission time. All the pages in the bio are checksummed and 531 * sums are attached onto the ordered extent record. 532 * 533 * At IO completion time the csums attached on the ordered extent record are 534 * inserted into the tree. 535 */ 536 static void run_one_async_done(struct btrfs_work *work) 537 { 538 struct async_submit_bio *async = 539 container_of(work, struct async_submit_bio, work); 540 struct bio *bio = &async->bbio->bio; 541 542 /* If an error occurred we just want to clean up the bio and move on. */ 543 if (bio->bi_status) { 544 btrfs_orig_bbio_end_io(async->bbio); 545 return; 546 } 547 548 /* 549 * All of the bios that pass through here are from async helpers. 550 * Use REQ_BTRFS_CGROUP_PUNT to issue them from the owning cgroup's 551 * context. This changes nothing when cgroups aren't in use. 552 */ 553 bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT; 554 __btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num); 555 } 556 557 static void run_one_async_free(struct btrfs_work *work) 558 { 559 kfree(container_of(work, struct async_submit_bio, work)); 560 } 561 562 static bool should_async_write(struct btrfs_bio *bbio) 563 { 564 /* Submit synchronously if the checksum implementation is fast. */ 565 if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &bbio->fs_info->flags)) 566 return false; 567 568 /* 569 * Try to defer the submission to a workqueue to parallelize the 570 * checksum calculation unless the I/O is issued synchronously. 571 */ 572 if (op_is_sync(bbio->bio.bi_opf)) 573 return false; 574 575 /* Zoned devices require I/O to be submitted in order. */ 576 if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info)) 577 return false; 578 579 return true; 580 } 581 582 /* 583 * Submit bio to an async queue. 584 * 585 * Return true if the work has been succesfuly submitted, else false. 586 */ 587 static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, 588 struct btrfs_io_context *bioc, 589 struct btrfs_io_stripe *smap, int mirror_num) 590 { 591 struct btrfs_fs_info *fs_info = bbio->fs_info; 592 struct async_submit_bio *async; 593 594 async = kmalloc(sizeof(*async), GFP_NOFS); 595 if (!async) 596 return false; 597 598 async->bbio = bbio; 599 async->bioc = bioc; 600 async->smap = *smap; 601 async->mirror_num = mirror_num; 602 603 btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, 604 run_one_async_free); 605 btrfs_queue_work(fs_info->workers, &async->work); 606 return true; 607 } 608 609 static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) 610 { 611 struct btrfs_inode *inode = bbio->inode; 612 struct btrfs_fs_info *fs_info = bbio->fs_info; 613 struct btrfs_bio *orig_bbio = bbio; 614 struct bio *bio = &bbio->bio; 615 u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 616 u64 length = bio->bi_iter.bi_size; 617 u64 map_length = length; 618 bool use_append = btrfs_use_zone_append(bbio); 619 struct btrfs_io_context *bioc = NULL; 620 struct btrfs_io_stripe smap; 621 blk_status_t ret; 622 int error; 623 624 btrfs_bio_counter_inc_blocked(fs_info); 625 error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, 626 &bioc, &smap, &mirror_num, 1); 627 if (error) { 628 ret = errno_to_blk_status(error); 629 goto fail; 630 } 631 632 map_length = min(map_length, length); 633 if (use_append) 634 map_length = min(map_length, fs_info->max_zone_append_size); 635 636 if (map_length < length) { 637 bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append); 638 bio = &bbio->bio; 639 } 640 641 /* 642 * Save the iter for the end_io handler and preload the checksums for 643 * data reads. 644 */ 645 if (bio_op(bio) == REQ_OP_READ && inode && !(bio->bi_opf & REQ_META)) { 646 bbio->saved_iter = bio->bi_iter; 647 ret = btrfs_lookup_bio_sums(bbio); 648 if (ret) 649 goto fail_put_bio; 650 } 651 652 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 653 if (use_append) { 654 bio->bi_opf &= ~REQ_OP_WRITE; 655 bio->bi_opf |= REQ_OP_ZONE_APPEND; 656 } 657 658 /* 659 * Csum items for reloc roots have already been cloned at this 660 * point, so they are handled as part of the no-checksum case. 661 */ 662 if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) && 663 !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && 664 !btrfs_is_data_reloc_root(inode->root)) { 665 if (should_async_write(bbio) && 666 btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num)) 667 goto done; 668 669 ret = btrfs_bio_csum(bbio); 670 if (ret) 671 goto fail_put_bio; 672 } else if (use_append) { 673 ret = btrfs_alloc_dummy_sum(bbio); 674 if (ret) 675 goto fail_put_bio; 676 } 677 } 678 679 __btrfs_submit_bio(bio, bioc, &smap, mirror_num); 680 done: 681 return map_length == length; 682 683 fail_put_bio: 684 if (map_length < length) 685 bio_put(bio); 686 fail: 687 btrfs_bio_counter_dec(fs_info); 688 btrfs_bio_end_io(orig_bbio, ret); 689 /* Do not submit another chunk */ 690 return true; 691 } 692 693 void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num) 694 { 695 /* If bbio->inode is not populated, its file_offset must be 0. */ 696 ASSERT(bbio->inode || bbio->file_offset == 0); 697 698 while (!btrfs_submit_chunk(bbio, mirror_num)) 699 ; 700 } 701 702 /* 703 * Submit a repair write. 704 * 705 * This bypasses btrfs_submit_bio deliberately, as that writes all copies in a 706 * RAID setup. Here we only want to write the one bad copy, so we do the 707 * mapping ourselves and submit the bio directly. 708 * 709 * The I/O is issued synchronously to block the repair read completion from 710 * freeing the bio. 711 */ 712 int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, 713 u64 length, u64 logical, struct page *page, 714 unsigned int pg_offset, int mirror_num) 715 { 716 struct btrfs_io_stripe smap = { 0 }; 717 struct bio_vec bvec; 718 struct bio bio; 719 int ret = 0; 720 721 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); 722 BUG_ON(!mirror_num); 723 724 if (btrfs_repair_one_zone(fs_info, logical)) 725 return 0; 726 727 /* 728 * Avoid races with device replace and make sure our bioc has devices 729 * associated to its stripes that don't go away while we are doing the 730 * read repair operation. 731 */ 732 btrfs_bio_counter_inc_blocked(fs_info); 733 ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num); 734 if (ret < 0) 735 goto out_counter_dec; 736 737 if (!smap.dev->bdev || 738 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state)) { 739 ret = -EIO; 740 goto out_counter_dec; 741 } 742 743 bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC); 744 bio.bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT; 745 __bio_add_page(&bio, page, length, pg_offset); 746 747 btrfsic_check_bio(&bio); 748 ret = submit_bio_wait(&bio); 749 if (ret) { 750 /* try to remap that extent elsewhere? */ 751 btrfs_dev_stat_inc_and_print(smap.dev, BTRFS_DEV_STAT_WRITE_ERRS); 752 goto out_bio_uninit; 753 } 754 755 btrfs_info_rl_in_rcu(fs_info, 756 "read error corrected: ino %llu off %llu (dev %s sector %llu)", 757 ino, start, btrfs_dev_name(smap.dev), 758 smap.physical >> SECTOR_SHIFT); 759 ret = 0; 760 761 out_bio_uninit: 762 bio_uninit(&bio); 763 out_counter_dec: 764 btrfs_bio_counter_dec(fs_info); 765 return ret; 766 } 767 768 /* 769 * Submit a btrfs_bio based repair write. 770 * 771 * If @dev_replace is true, the write would be submitted to dev-replace target. 772 */ 773 void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace) 774 { 775 struct btrfs_fs_info *fs_info = bbio->fs_info; 776 u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 777 u64 length = bbio->bio.bi_iter.bi_size; 778 struct btrfs_io_stripe smap = { 0 }; 779 int ret; 780 781 ASSERT(fs_info); 782 ASSERT(mirror_num > 0); 783 ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE); 784 ASSERT(!bbio->inode); 785 786 btrfs_bio_counter_inc_blocked(fs_info); 787 ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num); 788 if (ret < 0) 789 goto fail; 790 791 if (dev_replace) { 792 ASSERT(smap.dev == fs_info->dev_replace.srcdev); 793 smap.dev = fs_info->dev_replace.tgtdev; 794 } 795 __btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num); 796 return; 797 798 fail: 799 btrfs_bio_counter_dec(fs_info); 800 btrfs_bio_end_io(bbio, errno_to_blk_status(ret)); 801 } 802 803 int __init btrfs_bioset_init(void) 804 { 805 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, 806 offsetof(struct btrfs_bio, bio), 807 BIOSET_NEED_BVECS)) 808 return -ENOMEM; 809 if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE, 810 offsetof(struct btrfs_bio, bio), 0)) 811 goto out_free_bioset; 812 if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE, 813 offsetof(struct btrfs_bio, bio), 814 BIOSET_NEED_BVECS)) 815 goto out_free_clone_bioset; 816 if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE, 817 sizeof(struct btrfs_failed_bio))) 818 goto out_free_repair_bioset; 819 return 0; 820 821 out_free_repair_bioset: 822 bioset_exit(&btrfs_repair_bioset); 823 out_free_clone_bioset: 824 bioset_exit(&btrfs_clone_bioset); 825 out_free_bioset: 826 bioset_exit(&btrfs_bioset); 827 return -ENOMEM; 828 } 829 830 void __cold btrfs_bioset_exit(void) 831 { 832 mempool_exit(&btrfs_failed_bio_pool); 833 bioset_exit(&btrfs_repair_bioset); 834 bioset_exit(&btrfs_clone_bioset); 835 bioset_exit(&btrfs_bioset); 836 } 837