1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 * Copyright (C) 2022 Christoph Hellwig. 5 */ 6 7 #include <linux/bio.h> 8 #include "bio.h" 9 #include "ctree.h" 10 #include "volumes.h" 11 #include "raid56.h" 12 #include "async-thread.h" 13 #include "check-integrity.h" 14 #include "dev-replace.h" 15 #include "rcu-string.h" 16 #include "zoned.h" 17 #include "file-item.h" 18 19 static struct bio_set btrfs_bioset; 20 static struct bio_set btrfs_clone_bioset; 21 static struct bio_set btrfs_repair_bioset; 22 static mempool_t btrfs_failed_bio_pool; 23 24 struct btrfs_failed_bio { 25 struct btrfs_bio *bbio; 26 int num_copies; 27 atomic_t repair_count; 28 }; 29 30 /* 31 * Initialize a btrfs_bio structure. This skips the embedded bio itself as it 32 * is already initialized by the block layer. 33 */ 34 void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, 35 btrfs_bio_end_io_t end_io, void *private) 36 { 37 memset(bbio, 0, offsetof(struct btrfs_bio, bio)); 38 bbio->inode = inode; 39 bbio->end_io = end_io; 40 bbio->private = private; 41 atomic_set(&bbio->pending_ios, 1); 42 } 43 44 /* 45 * Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for 46 * btrfs, and is used for all I/O submitted through btrfs_submit_bio. 47 * 48 * Just like the underlying bio_alloc_bioset it will not fail as it is backed by 49 * a mempool. 50 */ 51 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, 52 struct btrfs_inode *inode, 53 btrfs_bio_end_io_t end_io, void *private) 54 { 55 struct btrfs_bio *bbio; 56 struct bio *bio; 57 58 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); 59 bbio = btrfs_bio(bio); 60 btrfs_bio_init(bbio, inode, end_io, private); 61 return bbio; 62 } 63 64 static blk_status_t btrfs_bio_extract_ordered_extent(struct btrfs_bio *bbio) 65 { 66 struct btrfs_ordered_extent *ordered; 67 int ret; 68 69 ordered = btrfs_lookup_ordered_extent(bbio->inode, bbio->file_offset); 70 if (WARN_ON_ONCE(!ordered)) 71 return BLK_STS_IOERR; 72 ret = btrfs_extract_ordered_extent(bbio, ordered); 73 btrfs_put_ordered_extent(ordered); 74 75 return errno_to_blk_status(ret); 76 } 77 78 static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, 79 struct btrfs_bio *orig_bbio, 80 u64 map_length, bool use_append) 81 { 82 struct btrfs_bio *bbio; 83 struct bio *bio; 84 85 if (use_append) { 86 unsigned int nr_segs; 87 88 bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, 89 &btrfs_clone_bioset, map_length); 90 } else { 91 bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, 92 GFP_NOFS, &btrfs_clone_bioset); 93 } 94 bbio = btrfs_bio(bio); 95 btrfs_bio_init(bbio, orig_bbio->inode, NULL, orig_bbio); 96 97 bbio->file_offset = orig_bbio->file_offset; 98 if (!(orig_bbio->bio.bi_opf & REQ_BTRFS_ONE_ORDERED)) 99 orig_bbio->file_offset += map_length; 100 101 atomic_inc(&orig_bbio->pending_ios); 102 return bbio; 103 } 104 105 static void btrfs_orig_write_end_io(struct bio *bio); 106 107 static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, 108 struct btrfs_bio *orig_bbio) 109 { 110 /* 111 * For writes we tolerate nr_mirrors - 1 write failures, so we can't 112 * just blindly propagate a write failure here. Instead increment the 113 * error count in the original I/O context so that it is guaranteed to 114 * be larger than the error tolerance. 115 */ 116 if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) { 117 struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private; 118 struct btrfs_io_context *orig_bioc = orig_stripe->bioc; 119 120 atomic_add(orig_bioc->max_errors, &orig_bioc->error); 121 } else { 122 orig_bbio->bio.bi_status = bbio->bio.bi_status; 123 } 124 } 125 126 static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) 127 { 128 if (bbio->bio.bi_pool == &btrfs_clone_bioset) { 129 struct btrfs_bio *orig_bbio = bbio->private; 130 131 if (bbio->bio.bi_status) 132 btrfs_bbio_propagate_error(bbio, orig_bbio); 133 bio_put(&bbio->bio); 134 bbio = orig_bbio; 135 } 136 137 if (atomic_dec_and_test(&bbio->pending_ios)) 138 bbio->end_io(bbio); 139 } 140 141 static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 142 { 143 if (cur_mirror == fbio->num_copies) 144 return cur_mirror + 1 - fbio->num_copies; 145 return cur_mirror + 1; 146 } 147 148 static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 149 { 150 if (cur_mirror == 1) 151 return fbio->num_copies; 152 return cur_mirror - 1; 153 } 154 155 static void btrfs_repair_done(struct btrfs_failed_bio *fbio) 156 { 157 if (atomic_dec_and_test(&fbio->repair_count)) { 158 btrfs_orig_bbio_end_io(fbio->bbio); 159 mempool_free(fbio, &btrfs_failed_bio_pool); 160 } 161 } 162 163 static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio, 164 struct btrfs_device *dev) 165 { 166 struct btrfs_failed_bio *fbio = repair_bbio->private; 167 struct btrfs_inode *inode = repair_bbio->inode; 168 struct btrfs_fs_info *fs_info = inode->root->fs_info; 169 struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio); 170 int mirror = repair_bbio->mirror_num; 171 172 if (repair_bbio->bio.bi_status || 173 !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) { 174 bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ); 175 repair_bbio->bio.bi_iter = repair_bbio->saved_iter; 176 177 mirror = next_repair_mirror(fbio, mirror); 178 if (mirror == fbio->bbio->mirror_num) { 179 btrfs_debug(fs_info, "no mirror left"); 180 fbio->bbio->bio.bi_status = BLK_STS_IOERR; 181 goto done; 182 } 183 184 btrfs_submit_bio(repair_bbio, mirror); 185 return; 186 } 187 188 do { 189 mirror = prev_repair_mirror(fbio, mirror); 190 btrfs_repair_io_failure(fs_info, btrfs_ino(inode), 191 repair_bbio->file_offset, fs_info->sectorsize, 192 repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT, 193 bv->bv_page, bv->bv_offset, mirror); 194 } while (mirror != fbio->bbio->mirror_num); 195 196 done: 197 btrfs_repair_done(fbio); 198 bio_put(&repair_bbio->bio); 199 } 200 201 /* 202 * Try to kick off a repair read to the next available mirror for a bad sector. 203 * 204 * This primarily tries to recover good data to serve the actual read request, 205 * but also tries to write the good data back to the bad mirror(s) when a 206 * read succeeded to restore the redundancy. 207 */ 208 static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio, 209 u32 bio_offset, 210 struct bio_vec *bv, 211 struct btrfs_failed_bio *fbio) 212 { 213 struct btrfs_inode *inode = failed_bbio->inode; 214 struct btrfs_fs_info *fs_info = inode->root->fs_info; 215 const u32 sectorsize = fs_info->sectorsize; 216 const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT); 217 struct btrfs_bio *repair_bbio; 218 struct bio *repair_bio; 219 int num_copies; 220 int mirror; 221 222 btrfs_debug(fs_info, "repair read error: read error at %llu", 223 failed_bbio->file_offset + bio_offset); 224 225 num_copies = btrfs_num_copies(fs_info, logical, sectorsize); 226 if (num_copies == 1) { 227 btrfs_debug(fs_info, "no copy to repair from"); 228 failed_bbio->bio.bi_status = BLK_STS_IOERR; 229 return fbio; 230 } 231 232 if (!fbio) { 233 fbio = mempool_alloc(&btrfs_failed_bio_pool, GFP_NOFS); 234 fbio->bbio = failed_bbio; 235 fbio->num_copies = num_copies; 236 atomic_set(&fbio->repair_count, 1); 237 } 238 239 atomic_inc(&fbio->repair_count); 240 241 repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, 242 &btrfs_repair_bioset); 243 repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector; 244 __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); 245 246 repair_bbio = btrfs_bio(repair_bio); 247 btrfs_bio_init(repair_bbio, failed_bbio->inode, NULL, fbio); 248 repair_bbio->file_offset = failed_bbio->file_offset + bio_offset; 249 250 mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); 251 btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror); 252 btrfs_submit_bio(repair_bbio, mirror); 253 return fbio; 254 } 255 256 static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev) 257 { 258 struct btrfs_inode *inode = bbio->inode; 259 struct btrfs_fs_info *fs_info = inode->root->fs_info; 260 u32 sectorsize = fs_info->sectorsize; 261 struct bvec_iter *iter = &bbio->saved_iter; 262 blk_status_t status = bbio->bio.bi_status; 263 struct btrfs_failed_bio *fbio = NULL; 264 u32 offset = 0; 265 266 /* 267 * Hand off repair bios to the repair code as there is no upper level 268 * submitter for them. 269 */ 270 if (bbio->bio.bi_pool == &btrfs_repair_bioset) { 271 btrfs_end_repair_bio(bbio, dev); 272 return; 273 } 274 275 /* Clear the I/O error. A failed repair will reset it. */ 276 bbio->bio.bi_status = BLK_STS_OK; 277 278 while (iter->bi_size) { 279 struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter); 280 281 bv.bv_len = min(bv.bv_len, sectorsize); 282 if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv)) 283 fbio = repair_one_sector(bbio, offset, &bv, fbio); 284 285 bio_advance_iter_single(&bbio->bio, iter, sectorsize); 286 offset += sectorsize; 287 } 288 289 if (bbio->csum != bbio->csum_inline) 290 kfree(bbio->csum); 291 292 if (fbio) 293 btrfs_repair_done(fbio); 294 else 295 btrfs_orig_bbio_end_io(bbio); 296 } 297 298 static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) 299 { 300 if (!dev || !dev->bdev) 301 return; 302 if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET) 303 return; 304 305 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 306 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 307 else if (!(bio->bi_opf & REQ_RAHEAD)) 308 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 309 if (bio->bi_opf & REQ_PREFLUSH) 310 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); 311 } 312 313 static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info, 314 struct bio *bio) 315 { 316 if (bio->bi_opf & REQ_META) 317 return fs_info->endio_meta_workers; 318 return fs_info->endio_workers; 319 } 320 321 static void btrfs_end_bio_work(struct work_struct *work) 322 { 323 struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work); 324 325 /* Metadata reads are checked and repaired by the submitter. */ 326 if (bbio->bio.bi_opf & REQ_META) 327 bbio->end_io(bbio); 328 else 329 btrfs_check_read_bio(bbio, bbio->bio.bi_private); 330 } 331 332 static void btrfs_simple_end_io(struct bio *bio) 333 { 334 struct btrfs_bio *bbio = btrfs_bio(bio); 335 struct btrfs_device *dev = bio->bi_private; 336 struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 337 338 btrfs_bio_counter_dec(fs_info); 339 340 if (bio->bi_status) 341 btrfs_log_dev_io_error(bio, dev); 342 343 if (bio_op(bio) == REQ_OP_READ) { 344 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); 345 queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work); 346 } else { 347 if (bio_op(bio) == REQ_OP_ZONE_APPEND) 348 btrfs_record_physical_zoned(bbio); 349 btrfs_orig_bbio_end_io(bbio); 350 } 351 } 352 353 static void btrfs_raid56_end_io(struct bio *bio) 354 { 355 struct btrfs_io_context *bioc = bio->bi_private; 356 struct btrfs_bio *bbio = btrfs_bio(bio); 357 358 btrfs_bio_counter_dec(bioc->fs_info); 359 bbio->mirror_num = bioc->mirror_num; 360 if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META)) 361 btrfs_check_read_bio(bbio, NULL); 362 else 363 btrfs_orig_bbio_end_io(bbio); 364 365 btrfs_put_bioc(bioc); 366 } 367 368 static void btrfs_orig_write_end_io(struct bio *bio) 369 { 370 struct btrfs_io_stripe *stripe = bio->bi_private; 371 struct btrfs_io_context *bioc = stripe->bioc; 372 struct btrfs_bio *bbio = btrfs_bio(bio); 373 374 btrfs_bio_counter_dec(bioc->fs_info); 375 376 if (bio->bi_status) { 377 atomic_inc(&bioc->error); 378 btrfs_log_dev_io_error(bio, stripe->dev); 379 } 380 381 /* 382 * Only send an error to the higher layers if it is beyond the tolerance 383 * threshold. 384 */ 385 if (atomic_read(&bioc->error) > bioc->max_errors) 386 bio->bi_status = BLK_STS_IOERR; 387 else 388 bio->bi_status = BLK_STS_OK; 389 390 btrfs_orig_bbio_end_io(bbio); 391 btrfs_put_bioc(bioc); 392 } 393 394 static void btrfs_clone_write_end_io(struct bio *bio) 395 { 396 struct btrfs_io_stripe *stripe = bio->bi_private; 397 398 if (bio->bi_status) { 399 atomic_inc(&stripe->bioc->error); 400 btrfs_log_dev_io_error(bio, stripe->dev); 401 } 402 403 /* Pass on control to the original bio this one was cloned from */ 404 bio_endio(stripe->bioc->orig_bio); 405 bio_put(bio); 406 } 407 408 static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) 409 { 410 if (!dev || !dev->bdev || 411 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 412 (btrfs_op(bio) == BTRFS_MAP_WRITE && 413 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 414 bio_io_error(bio); 415 return; 416 } 417 418 bio_set_dev(bio, dev->bdev); 419 420 /* 421 * For zone append writing, bi_sector must point the beginning of the 422 * zone 423 */ 424 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 425 u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 426 u64 zone_start = round_down(physical, dev->fs_info->zone_size); 427 428 ASSERT(btrfs_dev_is_sequential(dev, physical)); 429 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 430 } 431 btrfs_debug_in_rcu(dev->fs_info, 432 "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 433 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 434 (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev), 435 dev->devid, bio->bi_iter.bi_size); 436 437 btrfsic_check_bio(bio); 438 submit_bio(bio); 439 } 440 441 static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr) 442 { 443 struct bio *orig_bio = bioc->orig_bio, *bio; 444 445 ASSERT(bio_op(orig_bio) != REQ_OP_READ); 446 447 /* Reuse the bio embedded into the btrfs_bio for the last mirror */ 448 if (dev_nr == bioc->num_stripes - 1) { 449 bio = orig_bio; 450 bio->bi_end_io = btrfs_orig_write_end_io; 451 } else { 452 bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set); 453 bio_inc_remaining(orig_bio); 454 bio->bi_end_io = btrfs_clone_write_end_io; 455 } 456 457 bio->bi_private = &bioc->stripes[dev_nr]; 458 bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT; 459 bioc->stripes[dev_nr].bioc = bioc; 460 btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio); 461 } 462 463 static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc, 464 struct btrfs_io_stripe *smap, int mirror_num) 465 { 466 /* Do not leak our private flag into the block layer. */ 467 bio->bi_opf &= ~REQ_BTRFS_ONE_ORDERED; 468 469 if (!bioc) { 470 /* Single mirror read/write fast path. */ 471 btrfs_bio(bio)->mirror_num = mirror_num; 472 bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT; 473 bio->bi_private = smap->dev; 474 bio->bi_end_io = btrfs_simple_end_io; 475 btrfs_submit_dev_bio(smap->dev, bio); 476 } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 477 /* Parity RAID write or read recovery. */ 478 bio->bi_private = bioc; 479 bio->bi_end_io = btrfs_raid56_end_io; 480 if (bio_op(bio) == REQ_OP_READ) 481 raid56_parity_recover(bio, bioc, mirror_num); 482 else 483 raid56_parity_write(bio, bioc); 484 } else { 485 /* Write to multiple mirrors. */ 486 int total_devs = bioc->num_stripes; 487 488 bioc->orig_bio = bio; 489 for (int dev_nr = 0; dev_nr < total_devs; dev_nr++) 490 btrfs_submit_mirrored_bio(bioc, dev_nr); 491 } 492 } 493 494 static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio) 495 { 496 if (bbio->bio.bi_opf & REQ_META) 497 return btree_csum_one_bio(bbio); 498 return btrfs_csum_one_bio(bbio); 499 } 500 501 /* 502 * Async submit bios are used to offload expensive checksumming onto the worker 503 * threads. 504 */ 505 struct async_submit_bio { 506 struct btrfs_bio *bbio; 507 struct btrfs_io_context *bioc; 508 struct btrfs_io_stripe smap; 509 int mirror_num; 510 struct btrfs_work work; 511 }; 512 513 /* 514 * In order to insert checksums into the metadata in large chunks, we wait 515 * until bio submission time. All the pages in the bio are checksummed and 516 * sums are attached onto the ordered extent record. 517 * 518 * At IO completion time the csums attached on the ordered extent record are 519 * inserted into the btree. 520 */ 521 static void run_one_async_start(struct btrfs_work *work) 522 { 523 struct async_submit_bio *async = 524 container_of(work, struct async_submit_bio, work); 525 blk_status_t ret; 526 527 ret = btrfs_bio_csum(async->bbio); 528 if (ret) 529 async->bbio->bio.bi_status = ret; 530 } 531 532 /* 533 * In order to insert checksums into the metadata in large chunks, we wait 534 * until bio submission time. All the pages in the bio are checksummed and 535 * sums are attached onto the ordered extent record. 536 * 537 * At IO completion time the csums attached on the ordered extent record are 538 * inserted into the tree. 539 */ 540 static void run_one_async_done(struct btrfs_work *work) 541 { 542 struct async_submit_bio *async = 543 container_of(work, struct async_submit_bio, work); 544 struct bio *bio = &async->bbio->bio; 545 546 /* If an error occurred we just want to clean up the bio and move on. */ 547 if (bio->bi_status) { 548 btrfs_orig_bbio_end_io(async->bbio); 549 return; 550 } 551 552 /* 553 * All of the bios that pass through here are from async helpers. 554 * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context. 555 * This changes nothing when cgroups aren't in use. 556 */ 557 bio->bi_opf |= REQ_CGROUP_PUNT; 558 __btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num); 559 } 560 561 static void run_one_async_free(struct btrfs_work *work) 562 { 563 kfree(container_of(work, struct async_submit_bio, work)); 564 } 565 566 static bool should_async_write(struct btrfs_bio *bbio) 567 { 568 /* 569 * If the I/O is not issued by fsync and friends, (->sync_writers != 0), 570 * then try to defer the submission to a workqueue to parallelize the 571 * checksum calculation. 572 */ 573 if (atomic_read(&bbio->inode->sync_writers)) 574 return false; 575 576 /* 577 * Submit metadata writes synchronously if the checksum implementation 578 * is fast, or we are on a zoned device that wants I/O to be submitted 579 * in order. 580 */ 581 if (bbio->bio.bi_opf & REQ_META) { 582 struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 583 584 if (btrfs_is_zoned(fs_info)) 585 return false; 586 if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags)) 587 return false; 588 } 589 590 return true; 591 } 592 593 /* 594 * Submit bio to an async queue. 595 * 596 * Return true if the work has been succesfuly submitted, else false. 597 */ 598 static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, 599 struct btrfs_io_context *bioc, 600 struct btrfs_io_stripe *smap, int mirror_num) 601 { 602 struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 603 struct async_submit_bio *async; 604 605 async = kmalloc(sizeof(*async), GFP_NOFS); 606 if (!async) 607 return false; 608 609 async->bbio = bbio; 610 async->bioc = bioc; 611 async->smap = *smap; 612 async->mirror_num = mirror_num; 613 614 btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, 615 run_one_async_free); 616 if (op_is_sync(bbio->bio.bi_opf)) 617 btrfs_queue_work(fs_info->hipri_workers, &async->work); 618 else 619 btrfs_queue_work(fs_info->workers, &async->work); 620 return true; 621 } 622 623 static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) 624 { 625 struct btrfs_inode *inode = bbio->inode; 626 struct btrfs_fs_info *fs_info = inode->root->fs_info; 627 struct btrfs_bio *orig_bbio = bbio; 628 struct bio *bio = &bbio->bio; 629 u64 logical = bio->bi_iter.bi_sector << 9; 630 u64 length = bio->bi_iter.bi_size; 631 u64 map_length = length; 632 bool use_append = btrfs_use_zone_append(bbio); 633 struct btrfs_io_context *bioc = NULL; 634 struct btrfs_io_stripe smap; 635 blk_status_t ret; 636 int error; 637 638 btrfs_bio_counter_inc_blocked(fs_info); 639 error = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, 640 &bioc, &smap, &mirror_num, 1); 641 if (error) { 642 ret = errno_to_blk_status(error); 643 goto fail; 644 } 645 646 map_length = min(map_length, length); 647 if (use_append) 648 map_length = min(map_length, fs_info->max_zone_append_size); 649 650 if (map_length < length) { 651 bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append); 652 bio = &bbio->bio; 653 } 654 655 /* 656 * Save the iter for the end_io handler and preload the checksums for 657 * data reads. 658 */ 659 if (bio_op(bio) == REQ_OP_READ && !(bio->bi_opf & REQ_META)) { 660 bbio->saved_iter = bio->bi_iter; 661 ret = btrfs_lookup_bio_sums(bbio); 662 if (ret) 663 goto fail_put_bio; 664 } 665 666 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 667 if (use_append) { 668 bio->bi_opf &= ~REQ_OP_WRITE; 669 bio->bi_opf |= REQ_OP_ZONE_APPEND; 670 ret = btrfs_bio_extract_ordered_extent(bbio); 671 if (ret) 672 goto fail_put_bio; 673 } 674 675 /* 676 * Csum items for reloc roots have already been cloned at this 677 * point, so they are handled as part of the no-checksum case. 678 */ 679 if (!(inode->flags & BTRFS_INODE_NODATASUM) && 680 !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && 681 !btrfs_is_data_reloc_root(inode->root)) { 682 if (should_async_write(bbio) && 683 btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num)) 684 goto done; 685 686 ret = btrfs_bio_csum(bbio); 687 if (ret) 688 goto fail_put_bio; 689 } 690 } 691 692 __btrfs_submit_bio(bio, bioc, &smap, mirror_num); 693 done: 694 return map_length == length; 695 696 fail_put_bio: 697 if (map_length < length) 698 bio_put(bio); 699 fail: 700 btrfs_bio_counter_dec(fs_info); 701 btrfs_bio_end_io(orig_bbio, ret); 702 /* Do not submit another chunk */ 703 return true; 704 } 705 706 void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num) 707 { 708 while (!btrfs_submit_chunk(bbio, mirror_num)) 709 ; 710 } 711 712 /* 713 * Submit a repair write. 714 * 715 * This bypasses btrfs_submit_bio deliberately, as that writes all copies in a 716 * RAID setup. Here we only want to write the one bad copy, so we do the 717 * mapping ourselves and submit the bio directly. 718 * 719 * The I/O is issued synchronously to block the repair read completion from 720 * freeing the bio. 721 */ 722 int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, 723 u64 length, u64 logical, struct page *page, 724 unsigned int pg_offset, int mirror_num) 725 { 726 struct btrfs_device *dev; 727 struct bio_vec bvec; 728 struct bio bio; 729 u64 map_length = 0; 730 u64 sector; 731 struct btrfs_io_context *bioc = NULL; 732 int ret = 0; 733 734 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); 735 BUG_ON(!mirror_num); 736 737 if (btrfs_repair_one_zone(fs_info, logical)) 738 return 0; 739 740 map_length = length; 741 742 /* 743 * Avoid races with device replace and make sure our bioc has devices 744 * associated to its stripes that don't go away while we are doing the 745 * read repair operation. 746 */ 747 btrfs_bio_counter_inc_blocked(fs_info); 748 if (btrfs_is_parity_mirror(fs_info, logical, length)) { 749 /* 750 * Note that we don't use BTRFS_MAP_WRITE because it's supposed 751 * to update all raid stripes, but here we just want to correct 752 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad 753 * stripe's dev and sector. 754 */ 755 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical, 756 &map_length, &bioc, 0); 757 if (ret) 758 goto out_counter_dec; 759 ASSERT(bioc->mirror_num == 1); 760 } else { 761 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, 762 &map_length, &bioc, mirror_num); 763 if (ret) 764 goto out_counter_dec; 765 /* 766 * This happens when dev-replace is also running, and the 767 * mirror_num indicates the dev-replace target. 768 * 769 * In this case, we don't need to do anything, as the read 770 * error just means the replace progress hasn't reached our 771 * read range, and later replace routine would handle it well. 772 */ 773 if (mirror_num != bioc->mirror_num) 774 goto out_counter_dec; 775 } 776 777 sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9; 778 dev = bioc->stripes[bioc->mirror_num - 1].dev; 779 btrfs_put_bioc(bioc); 780 781 if (!dev || !dev->bdev || 782 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { 783 ret = -EIO; 784 goto out_counter_dec; 785 } 786 787 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC); 788 bio.bi_iter.bi_sector = sector; 789 __bio_add_page(&bio, page, length, pg_offset); 790 791 btrfsic_check_bio(&bio); 792 ret = submit_bio_wait(&bio); 793 if (ret) { 794 /* try to remap that extent elsewhere? */ 795 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 796 goto out_bio_uninit; 797 } 798 799 btrfs_info_rl_in_rcu(fs_info, 800 "read error corrected: ino %llu off %llu (dev %s sector %llu)", 801 ino, start, btrfs_dev_name(dev), sector); 802 ret = 0; 803 804 out_bio_uninit: 805 bio_uninit(&bio); 806 out_counter_dec: 807 btrfs_bio_counter_dec(fs_info); 808 return ret; 809 } 810 811 int __init btrfs_bioset_init(void) 812 { 813 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, 814 offsetof(struct btrfs_bio, bio), 815 BIOSET_NEED_BVECS)) 816 return -ENOMEM; 817 if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE, 818 offsetof(struct btrfs_bio, bio), 0)) 819 goto out_free_bioset; 820 if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE, 821 offsetof(struct btrfs_bio, bio), 822 BIOSET_NEED_BVECS)) 823 goto out_free_clone_bioset; 824 if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE, 825 sizeof(struct btrfs_failed_bio))) 826 goto out_free_repair_bioset; 827 return 0; 828 829 out_free_repair_bioset: 830 bioset_exit(&btrfs_repair_bioset); 831 out_free_clone_bioset: 832 bioset_exit(&btrfs_clone_bioset); 833 out_free_bioset: 834 bioset_exit(&btrfs_bioset); 835 return -ENOMEM; 836 } 837 838 void __cold btrfs_bioset_exit(void) 839 { 840 mempool_exit(&btrfs_failed_bio_pool); 841 bioset_exit(&btrfs_repair_bioset); 842 bioset_exit(&btrfs_clone_bioset); 843 bioset_exit(&btrfs_bioset); 844 } 845