1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 * Copyright (C) 2022 Christoph Hellwig. 5 */ 6 7 #include <linux/bio.h> 8 #include "bio.h" 9 #include "ctree.h" 10 #include "volumes.h" 11 #include "raid56.h" 12 #include "async-thread.h" 13 #include "check-integrity.h" 14 #include "dev-replace.h" 15 #include "rcu-string.h" 16 #include "zoned.h" 17 #include "file-item.h" 18 19 static struct bio_set btrfs_bioset; 20 static struct bio_set btrfs_clone_bioset; 21 static struct bio_set btrfs_repair_bioset; 22 static mempool_t btrfs_failed_bio_pool; 23 24 struct btrfs_failed_bio { 25 struct btrfs_bio *bbio; 26 int num_copies; 27 atomic_t repair_count; 28 }; 29 30 /* 31 * Initialize a btrfs_bio structure. This skips the embedded bio itself as it 32 * is already initialized by the block layer. 33 */ 34 void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, 35 btrfs_bio_end_io_t end_io, void *private) 36 { 37 memset(bbio, 0, offsetof(struct btrfs_bio, bio)); 38 bbio->inode = inode; 39 bbio->end_io = end_io; 40 bbio->private = private; 41 atomic_set(&bbio->pending_ios, 1); 42 } 43 44 /* 45 * Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for 46 * btrfs, and is used for all I/O submitted through btrfs_submit_bio. 47 * 48 * Just like the underlying bio_alloc_bioset it will not fail as it is backed by 49 * a mempool. 50 */ 51 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, 52 struct btrfs_inode *inode, 53 btrfs_bio_end_io_t end_io, void *private) 54 { 55 struct btrfs_bio *bbio; 56 struct bio *bio; 57 58 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); 59 bbio = btrfs_bio(bio); 60 btrfs_bio_init(bbio, inode, end_io, private); 61 return bbio; 62 } 63 64 static blk_status_t btrfs_bio_extract_ordered_extent(struct btrfs_bio *bbio) 65 { 66 struct btrfs_ordered_extent *ordered; 67 int ret; 68 69 ordered = btrfs_lookup_ordered_extent(bbio->inode, bbio->file_offset); 70 if (WARN_ON_ONCE(!ordered)) 71 return BLK_STS_IOERR; 72 ret = btrfs_extract_ordered_extent(bbio, ordered); 73 btrfs_put_ordered_extent(ordered); 74 75 return errno_to_blk_status(ret); 76 } 77 78 static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, 79 struct btrfs_bio *orig_bbio, 80 u64 map_length, bool use_append) 81 { 82 struct btrfs_bio *bbio; 83 struct bio *bio; 84 85 if (use_append) { 86 unsigned int nr_segs; 87 88 bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, 89 &btrfs_clone_bioset, map_length); 90 } else { 91 bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, 92 GFP_NOFS, &btrfs_clone_bioset); 93 } 94 bbio = btrfs_bio(bio); 95 btrfs_bio_init(bbio, orig_bbio->inode, NULL, orig_bbio); 96 97 bbio->file_offset = orig_bbio->file_offset; 98 if (!(orig_bbio->bio.bi_opf & REQ_BTRFS_ONE_ORDERED)) 99 orig_bbio->file_offset += map_length; 100 101 atomic_inc(&orig_bbio->pending_ios); 102 return bbio; 103 } 104 105 static void btrfs_orig_write_end_io(struct bio *bio); 106 107 static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, 108 struct btrfs_bio *orig_bbio) 109 { 110 /* 111 * For writes we tolerate nr_mirrors - 1 write failures, so we can't 112 * just blindly propagate a write failure here. Instead increment the 113 * error count in the original I/O context so that it is guaranteed to 114 * be larger than the error tolerance. 115 */ 116 if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) { 117 struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private; 118 struct btrfs_io_context *orig_bioc = orig_stripe->bioc; 119 120 atomic_add(orig_bioc->max_errors, &orig_bioc->error); 121 } else { 122 orig_bbio->bio.bi_status = bbio->bio.bi_status; 123 } 124 } 125 126 static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) 127 { 128 if (bbio->bio.bi_pool == &btrfs_clone_bioset) { 129 struct btrfs_bio *orig_bbio = bbio->private; 130 131 if (bbio->bio.bi_status) 132 btrfs_bbio_propagate_error(bbio, orig_bbio); 133 bio_put(&bbio->bio); 134 bbio = orig_bbio; 135 } 136 137 if (atomic_dec_and_test(&bbio->pending_ios)) 138 bbio->end_io(bbio); 139 } 140 141 static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 142 { 143 if (cur_mirror == fbio->num_copies) 144 return cur_mirror + 1 - fbio->num_copies; 145 return cur_mirror + 1; 146 } 147 148 static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) 149 { 150 if (cur_mirror == 1) 151 return fbio->num_copies; 152 return cur_mirror - 1; 153 } 154 155 static void btrfs_repair_done(struct btrfs_failed_bio *fbio) 156 { 157 if (atomic_dec_and_test(&fbio->repair_count)) { 158 btrfs_orig_bbio_end_io(fbio->bbio); 159 mempool_free(fbio, &btrfs_failed_bio_pool); 160 } 161 } 162 163 static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio, 164 struct btrfs_device *dev) 165 { 166 struct btrfs_failed_bio *fbio = repair_bbio->private; 167 struct btrfs_inode *inode = repair_bbio->inode; 168 struct btrfs_fs_info *fs_info = inode->root->fs_info; 169 struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio); 170 int mirror = repair_bbio->mirror_num; 171 172 if (repair_bbio->bio.bi_status || 173 !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) { 174 bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ); 175 repair_bbio->bio.bi_iter = repair_bbio->saved_iter; 176 177 mirror = next_repair_mirror(fbio, mirror); 178 if (mirror == fbio->bbio->mirror_num) { 179 btrfs_debug(fs_info, "no mirror left"); 180 fbio->bbio->bio.bi_status = BLK_STS_IOERR; 181 goto done; 182 } 183 184 btrfs_submit_bio(repair_bbio, mirror); 185 return; 186 } 187 188 do { 189 mirror = prev_repair_mirror(fbio, mirror); 190 btrfs_repair_io_failure(fs_info, btrfs_ino(inode), 191 repair_bbio->file_offset, fs_info->sectorsize, 192 repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT, 193 bv->bv_page, bv->bv_offset, mirror); 194 } while (mirror != fbio->bbio->mirror_num); 195 196 done: 197 btrfs_repair_done(fbio); 198 bio_put(&repair_bbio->bio); 199 } 200 201 /* 202 * Try to kick off a repair read to the next available mirror for a bad sector. 203 * 204 * This primarily tries to recover good data to serve the actual read request, 205 * but also tries to write the good data back to the bad mirror(s) when a 206 * read succeeded to restore the redundancy. 207 */ 208 static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio, 209 u32 bio_offset, 210 struct bio_vec *bv, 211 struct btrfs_failed_bio *fbio) 212 { 213 struct btrfs_inode *inode = failed_bbio->inode; 214 struct btrfs_fs_info *fs_info = inode->root->fs_info; 215 const u32 sectorsize = fs_info->sectorsize; 216 const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT); 217 struct btrfs_bio *repair_bbio; 218 struct bio *repair_bio; 219 int num_copies; 220 int mirror; 221 222 btrfs_debug(fs_info, "repair read error: read error at %llu", 223 failed_bbio->file_offset + bio_offset); 224 225 num_copies = btrfs_num_copies(fs_info, logical, sectorsize); 226 if (num_copies == 1) { 227 btrfs_debug(fs_info, "no copy to repair from"); 228 failed_bbio->bio.bi_status = BLK_STS_IOERR; 229 return fbio; 230 } 231 232 if (!fbio) { 233 fbio = mempool_alloc(&btrfs_failed_bio_pool, GFP_NOFS); 234 fbio->bbio = failed_bbio; 235 fbio->num_copies = num_copies; 236 atomic_set(&fbio->repair_count, 1); 237 } 238 239 atomic_inc(&fbio->repair_count); 240 241 repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, 242 &btrfs_repair_bioset); 243 repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector; 244 __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); 245 246 repair_bbio = btrfs_bio(repair_bio); 247 btrfs_bio_init(repair_bbio, failed_bbio->inode, NULL, fbio); 248 repair_bbio->file_offset = failed_bbio->file_offset + bio_offset; 249 250 mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); 251 btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror); 252 btrfs_submit_bio(repair_bbio, mirror); 253 return fbio; 254 } 255 256 static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev) 257 { 258 struct btrfs_inode *inode = bbio->inode; 259 struct btrfs_fs_info *fs_info = inode->root->fs_info; 260 u32 sectorsize = fs_info->sectorsize; 261 struct bvec_iter *iter = &bbio->saved_iter; 262 blk_status_t status = bbio->bio.bi_status; 263 struct btrfs_failed_bio *fbio = NULL; 264 u32 offset = 0; 265 266 /* 267 * Hand off repair bios to the repair code as there is no upper level 268 * submitter for them. 269 */ 270 if (bbio->bio.bi_pool == &btrfs_repair_bioset) { 271 btrfs_end_repair_bio(bbio, dev); 272 return; 273 } 274 275 /* Clear the I/O error. A failed repair will reset it. */ 276 bbio->bio.bi_status = BLK_STS_OK; 277 278 while (iter->bi_size) { 279 struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter); 280 281 bv.bv_len = min(bv.bv_len, sectorsize); 282 if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv)) 283 fbio = repair_one_sector(bbio, offset, &bv, fbio); 284 285 bio_advance_iter_single(&bbio->bio, iter, sectorsize); 286 offset += sectorsize; 287 } 288 289 if (bbio->csum != bbio->csum_inline) 290 kfree(bbio->csum); 291 292 if (fbio) 293 btrfs_repair_done(fbio); 294 else 295 btrfs_orig_bbio_end_io(bbio); 296 } 297 298 static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) 299 { 300 if (!dev || !dev->bdev) 301 return; 302 if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET) 303 return; 304 305 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 306 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 307 else if (!(bio->bi_opf & REQ_RAHEAD)) 308 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 309 if (bio->bi_opf & REQ_PREFLUSH) 310 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); 311 } 312 313 static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info, 314 struct bio *bio) 315 { 316 if (bio->bi_opf & REQ_META) 317 return fs_info->endio_meta_workers; 318 return fs_info->endio_workers; 319 } 320 321 static void btrfs_end_bio_work(struct work_struct *work) 322 { 323 struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work); 324 325 /* Metadata reads are checked and repaired by the submitter. */ 326 if (bbio->bio.bi_opf & REQ_META) 327 bbio->end_io(bbio); 328 else 329 btrfs_check_read_bio(bbio, bbio->bio.bi_private); 330 } 331 332 static void btrfs_simple_end_io(struct bio *bio) 333 { 334 struct btrfs_bio *bbio = btrfs_bio(bio); 335 struct btrfs_device *dev = bio->bi_private; 336 struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 337 338 btrfs_bio_counter_dec(fs_info); 339 340 if (bio->bi_status) 341 btrfs_log_dev_io_error(bio, dev); 342 343 if (bio_op(bio) == REQ_OP_READ) { 344 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); 345 queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work); 346 } else { 347 if (bio_op(bio) == REQ_OP_ZONE_APPEND) 348 btrfs_record_physical_zoned(bbio); 349 btrfs_orig_bbio_end_io(bbio); 350 } 351 } 352 353 static void btrfs_raid56_end_io(struct bio *bio) 354 { 355 struct btrfs_io_context *bioc = bio->bi_private; 356 struct btrfs_bio *bbio = btrfs_bio(bio); 357 358 btrfs_bio_counter_dec(bioc->fs_info); 359 bbio->mirror_num = bioc->mirror_num; 360 if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META)) 361 btrfs_check_read_bio(bbio, NULL); 362 else 363 btrfs_orig_bbio_end_io(bbio); 364 365 btrfs_put_bioc(bioc); 366 } 367 368 static void btrfs_orig_write_end_io(struct bio *bio) 369 { 370 struct btrfs_io_stripe *stripe = bio->bi_private; 371 struct btrfs_io_context *bioc = stripe->bioc; 372 struct btrfs_bio *bbio = btrfs_bio(bio); 373 374 btrfs_bio_counter_dec(bioc->fs_info); 375 376 if (bio->bi_status) { 377 atomic_inc(&bioc->error); 378 btrfs_log_dev_io_error(bio, stripe->dev); 379 } 380 381 /* 382 * Only send an error to the higher layers if it is beyond the tolerance 383 * threshold. 384 */ 385 if (atomic_read(&bioc->error) > bioc->max_errors) 386 bio->bi_status = BLK_STS_IOERR; 387 else 388 bio->bi_status = BLK_STS_OK; 389 390 btrfs_orig_bbio_end_io(bbio); 391 btrfs_put_bioc(bioc); 392 } 393 394 static void btrfs_clone_write_end_io(struct bio *bio) 395 { 396 struct btrfs_io_stripe *stripe = bio->bi_private; 397 398 if (bio->bi_status) { 399 atomic_inc(&stripe->bioc->error); 400 btrfs_log_dev_io_error(bio, stripe->dev); 401 } 402 403 /* Pass on control to the original bio this one was cloned from */ 404 bio_endio(stripe->bioc->orig_bio); 405 bio_put(bio); 406 } 407 408 static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) 409 { 410 if (!dev || !dev->bdev || 411 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 412 (btrfs_op(bio) == BTRFS_MAP_WRITE && 413 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 414 bio_io_error(bio); 415 return; 416 } 417 418 bio_set_dev(bio, dev->bdev); 419 420 /* 421 * For zone append writing, bi_sector must point the beginning of the 422 * zone 423 */ 424 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 425 u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 426 u64 zone_start = round_down(physical, dev->fs_info->zone_size); 427 428 ASSERT(btrfs_dev_is_sequential(dev, physical)); 429 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 430 } 431 btrfs_debug_in_rcu(dev->fs_info, 432 "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 433 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 434 (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev), 435 dev->devid, bio->bi_iter.bi_size); 436 437 btrfsic_check_bio(bio); 438 439 if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT) 440 blkcg_punt_bio_submit(bio); 441 else 442 submit_bio(bio); 443 } 444 445 static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr) 446 { 447 struct bio *orig_bio = bioc->orig_bio, *bio; 448 449 ASSERT(bio_op(orig_bio) != REQ_OP_READ); 450 451 /* Reuse the bio embedded into the btrfs_bio for the last mirror */ 452 if (dev_nr == bioc->num_stripes - 1) { 453 bio = orig_bio; 454 bio->bi_end_io = btrfs_orig_write_end_io; 455 } else { 456 bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set); 457 bio_inc_remaining(orig_bio); 458 bio->bi_end_io = btrfs_clone_write_end_io; 459 } 460 461 bio->bi_private = &bioc->stripes[dev_nr]; 462 bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT; 463 bioc->stripes[dev_nr].bioc = bioc; 464 btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio); 465 } 466 467 static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc, 468 struct btrfs_io_stripe *smap, int mirror_num) 469 { 470 /* Do not leak our private flag into the block layer. */ 471 bio->bi_opf &= ~REQ_BTRFS_ONE_ORDERED; 472 473 if (!bioc) { 474 /* Single mirror read/write fast path. */ 475 btrfs_bio(bio)->mirror_num = mirror_num; 476 bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT; 477 bio->bi_private = smap->dev; 478 bio->bi_end_io = btrfs_simple_end_io; 479 btrfs_submit_dev_bio(smap->dev, bio); 480 } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 481 /* Parity RAID write or read recovery. */ 482 bio->bi_private = bioc; 483 bio->bi_end_io = btrfs_raid56_end_io; 484 if (bio_op(bio) == REQ_OP_READ) 485 raid56_parity_recover(bio, bioc, mirror_num); 486 else 487 raid56_parity_write(bio, bioc); 488 } else { 489 /* Write to multiple mirrors. */ 490 int total_devs = bioc->num_stripes; 491 492 bioc->orig_bio = bio; 493 for (int dev_nr = 0; dev_nr < total_devs; dev_nr++) 494 btrfs_submit_mirrored_bio(bioc, dev_nr); 495 } 496 } 497 498 static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio) 499 { 500 if (bbio->bio.bi_opf & REQ_META) 501 return btree_csum_one_bio(bbio); 502 return btrfs_csum_one_bio(bbio); 503 } 504 505 /* 506 * Async submit bios are used to offload expensive checksumming onto the worker 507 * threads. 508 */ 509 struct async_submit_bio { 510 struct btrfs_bio *bbio; 511 struct btrfs_io_context *bioc; 512 struct btrfs_io_stripe smap; 513 int mirror_num; 514 struct btrfs_work work; 515 }; 516 517 /* 518 * In order to insert checksums into the metadata in large chunks, we wait 519 * until bio submission time. All the pages in the bio are checksummed and 520 * sums are attached onto the ordered extent record. 521 * 522 * At IO completion time the csums attached on the ordered extent record are 523 * inserted into the btree. 524 */ 525 static void run_one_async_start(struct btrfs_work *work) 526 { 527 struct async_submit_bio *async = 528 container_of(work, struct async_submit_bio, work); 529 blk_status_t ret; 530 531 ret = btrfs_bio_csum(async->bbio); 532 if (ret) 533 async->bbio->bio.bi_status = ret; 534 } 535 536 /* 537 * In order to insert checksums into the metadata in large chunks, we wait 538 * until bio submission time. All the pages in the bio are checksummed and 539 * sums are attached onto the ordered extent record. 540 * 541 * At IO completion time the csums attached on the ordered extent record are 542 * inserted into the tree. 543 */ 544 static void run_one_async_done(struct btrfs_work *work) 545 { 546 struct async_submit_bio *async = 547 container_of(work, struct async_submit_bio, work); 548 struct bio *bio = &async->bbio->bio; 549 550 /* If an error occurred we just want to clean up the bio and move on. */ 551 if (bio->bi_status) { 552 btrfs_orig_bbio_end_io(async->bbio); 553 return; 554 } 555 556 /* 557 * All of the bios that pass through here are from async helpers. 558 * Use REQ_BTRFS_CGROUP_PUNT to issue them from the owning cgroup's 559 * context. This changes nothing when cgroups aren't in use. 560 */ 561 bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT; 562 __btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num); 563 } 564 565 static void run_one_async_free(struct btrfs_work *work) 566 { 567 kfree(container_of(work, struct async_submit_bio, work)); 568 } 569 570 static bool should_async_write(struct btrfs_bio *bbio) 571 { 572 /* 573 * If the I/O is not issued by fsync and friends, (->sync_writers != 0), 574 * then try to defer the submission to a workqueue to parallelize the 575 * checksum calculation. 576 */ 577 if (atomic_read(&bbio->inode->sync_writers)) 578 return false; 579 580 /* 581 * Submit metadata writes synchronously if the checksum implementation 582 * is fast, or we are on a zoned device that wants I/O to be submitted 583 * in order. 584 */ 585 if (bbio->bio.bi_opf & REQ_META) { 586 struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 587 588 if (btrfs_is_zoned(fs_info)) 589 return false; 590 if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags)) 591 return false; 592 } 593 594 return true; 595 } 596 597 /* 598 * Submit bio to an async queue. 599 * 600 * Return true if the work has been succesfuly submitted, else false. 601 */ 602 static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, 603 struct btrfs_io_context *bioc, 604 struct btrfs_io_stripe *smap, int mirror_num) 605 { 606 struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; 607 struct async_submit_bio *async; 608 609 async = kmalloc(sizeof(*async), GFP_NOFS); 610 if (!async) 611 return false; 612 613 async->bbio = bbio; 614 async->bioc = bioc; 615 async->smap = *smap; 616 async->mirror_num = mirror_num; 617 618 btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, 619 run_one_async_free); 620 if (op_is_sync(bbio->bio.bi_opf)) 621 btrfs_queue_work(fs_info->hipri_workers, &async->work); 622 else 623 btrfs_queue_work(fs_info->workers, &async->work); 624 return true; 625 } 626 627 static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) 628 { 629 struct btrfs_inode *inode = bbio->inode; 630 struct btrfs_fs_info *fs_info = inode->root->fs_info; 631 struct btrfs_bio *orig_bbio = bbio; 632 struct bio *bio = &bbio->bio; 633 u64 logical = bio->bi_iter.bi_sector << 9; 634 u64 length = bio->bi_iter.bi_size; 635 u64 map_length = length; 636 bool use_append = btrfs_use_zone_append(bbio); 637 struct btrfs_io_context *bioc = NULL; 638 struct btrfs_io_stripe smap; 639 blk_status_t ret; 640 int error; 641 642 btrfs_bio_counter_inc_blocked(fs_info); 643 error = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, 644 &bioc, &smap, &mirror_num, 1); 645 if (error) { 646 ret = errno_to_blk_status(error); 647 goto fail; 648 } 649 650 map_length = min(map_length, length); 651 if (use_append) 652 map_length = min(map_length, fs_info->max_zone_append_size); 653 654 if (map_length < length) { 655 bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append); 656 bio = &bbio->bio; 657 } 658 659 /* 660 * Save the iter for the end_io handler and preload the checksums for 661 * data reads. 662 */ 663 if (bio_op(bio) == REQ_OP_READ && !(bio->bi_opf & REQ_META)) { 664 bbio->saved_iter = bio->bi_iter; 665 ret = btrfs_lookup_bio_sums(bbio); 666 if (ret) 667 goto fail_put_bio; 668 } 669 670 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 671 if (use_append) { 672 bio->bi_opf &= ~REQ_OP_WRITE; 673 bio->bi_opf |= REQ_OP_ZONE_APPEND; 674 ret = btrfs_bio_extract_ordered_extent(bbio); 675 if (ret) 676 goto fail_put_bio; 677 } 678 679 /* 680 * Csum items for reloc roots have already been cloned at this 681 * point, so they are handled as part of the no-checksum case. 682 */ 683 if (!(inode->flags & BTRFS_INODE_NODATASUM) && 684 !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && 685 !btrfs_is_data_reloc_root(inode->root)) { 686 if (should_async_write(bbio) && 687 btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num)) 688 goto done; 689 690 ret = btrfs_bio_csum(bbio); 691 if (ret) 692 goto fail_put_bio; 693 } 694 } 695 696 __btrfs_submit_bio(bio, bioc, &smap, mirror_num); 697 done: 698 return map_length == length; 699 700 fail_put_bio: 701 if (map_length < length) 702 bio_put(bio); 703 fail: 704 btrfs_bio_counter_dec(fs_info); 705 btrfs_bio_end_io(orig_bbio, ret); 706 /* Do not submit another chunk */ 707 return true; 708 } 709 710 void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num) 711 { 712 while (!btrfs_submit_chunk(bbio, mirror_num)) 713 ; 714 } 715 716 /* 717 * Submit a repair write. 718 * 719 * This bypasses btrfs_submit_bio deliberately, as that writes all copies in a 720 * RAID setup. Here we only want to write the one bad copy, so we do the 721 * mapping ourselves and submit the bio directly. 722 * 723 * The I/O is issued synchronously to block the repair read completion from 724 * freeing the bio. 725 */ 726 int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, 727 u64 length, u64 logical, struct page *page, 728 unsigned int pg_offset, int mirror_num) 729 { 730 struct btrfs_device *dev; 731 struct bio_vec bvec; 732 struct bio bio; 733 u64 map_length = 0; 734 u64 sector; 735 struct btrfs_io_context *bioc = NULL; 736 int ret = 0; 737 738 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); 739 BUG_ON(!mirror_num); 740 741 if (btrfs_repair_one_zone(fs_info, logical)) 742 return 0; 743 744 map_length = length; 745 746 /* 747 * Avoid races with device replace and make sure our bioc has devices 748 * associated to its stripes that don't go away while we are doing the 749 * read repair operation. 750 */ 751 btrfs_bio_counter_inc_blocked(fs_info); 752 if (btrfs_is_parity_mirror(fs_info, logical, length)) { 753 /* 754 * Note that we don't use BTRFS_MAP_WRITE because it's supposed 755 * to update all raid stripes, but here we just want to correct 756 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad 757 * stripe's dev and sector. 758 */ 759 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical, 760 &map_length, &bioc, 0); 761 if (ret) 762 goto out_counter_dec; 763 ASSERT(bioc->mirror_num == 1); 764 } else { 765 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, 766 &map_length, &bioc, mirror_num); 767 if (ret) 768 goto out_counter_dec; 769 /* 770 * This happens when dev-replace is also running, and the 771 * mirror_num indicates the dev-replace target. 772 * 773 * In this case, we don't need to do anything, as the read 774 * error just means the replace progress hasn't reached our 775 * read range, and later replace routine would handle it well. 776 */ 777 if (mirror_num != bioc->mirror_num) 778 goto out_counter_dec; 779 } 780 781 sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9; 782 dev = bioc->stripes[bioc->mirror_num - 1].dev; 783 btrfs_put_bioc(bioc); 784 785 if (!dev || !dev->bdev || 786 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { 787 ret = -EIO; 788 goto out_counter_dec; 789 } 790 791 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC); 792 bio.bi_iter.bi_sector = sector; 793 __bio_add_page(&bio, page, length, pg_offset); 794 795 btrfsic_check_bio(&bio); 796 ret = submit_bio_wait(&bio); 797 if (ret) { 798 /* try to remap that extent elsewhere? */ 799 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 800 goto out_bio_uninit; 801 } 802 803 btrfs_info_rl_in_rcu(fs_info, 804 "read error corrected: ino %llu off %llu (dev %s sector %llu)", 805 ino, start, btrfs_dev_name(dev), sector); 806 ret = 0; 807 808 out_bio_uninit: 809 bio_uninit(&bio); 810 out_counter_dec: 811 btrfs_bio_counter_dec(fs_info); 812 return ret; 813 } 814 815 int __init btrfs_bioset_init(void) 816 { 817 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, 818 offsetof(struct btrfs_bio, bio), 819 BIOSET_NEED_BVECS)) 820 return -ENOMEM; 821 if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE, 822 offsetof(struct btrfs_bio, bio), 0)) 823 goto out_free_bioset; 824 if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE, 825 offsetof(struct btrfs_bio, bio), 826 BIOSET_NEED_BVECS)) 827 goto out_free_clone_bioset; 828 if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE, 829 sizeof(struct btrfs_failed_bio))) 830 goto out_free_repair_bioset; 831 return 0; 832 833 out_free_repair_bioset: 834 bioset_exit(&btrfs_repair_bioset); 835 out_free_clone_bioset: 836 bioset_exit(&btrfs_clone_bioset); 837 out_free_bioset: 838 bioset_exit(&btrfs_bioset); 839 return -ENOMEM; 840 } 841 842 void __cold btrfs_bioset_exit(void) 843 { 844 mempool_exit(&btrfs_failed_bio_pool); 845 bioset_exit(&btrfs_repair_bioset); 846 bioset_exit(&btrfs_clone_bioset); 847 bioset_exit(&btrfs_bioset); 848 } 849