1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 static struct bio_set btrfs_bioset; 38 39 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 40 BTRFS_BLOCK_GROUP_RAID10 | \ 41 BTRFS_BLOCK_GROUP_RAID56_MASK) 42 43 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 44 [BTRFS_RAID_RAID10] = { 45 .sub_stripes = 2, 46 .dev_stripes = 1, 47 .devs_max = 0, /* 0 == as many as possible */ 48 .devs_min = 2, 49 .tolerated_failures = 1, 50 .devs_increment = 2, 51 .ncopies = 2, 52 .nparity = 0, 53 .raid_name = "raid10", 54 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 55 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 56 }, 57 [BTRFS_RAID_RAID1] = { 58 .sub_stripes = 1, 59 .dev_stripes = 1, 60 .devs_max = 2, 61 .devs_min = 2, 62 .tolerated_failures = 1, 63 .devs_increment = 2, 64 .ncopies = 2, 65 .nparity = 0, 66 .raid_name = "raid1", 67 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 68 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 69 }, 70 [BTRFS_RAID_RAID1C3] = { 71 .sub_stripes = 1, 72 .dev_stripes = 1, 73 .devs_max = 3, 74 .devs_min = 3, 75 .tolerated_failures = 2, 76 .devs_increment = 3, 77 .ncopies = 3, 78 .nparity = 0, 79 .raid_name = "raid1c3", 80 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 81 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 82 }, 83 [BTRFS_RAID_RAID1C4] = { 84 .sub_stripes = 1, 85 .dev_stripes = 1, 86 .devs_max = 4, 87 .devs_min = 4, 88 .tolerated_failures = 3, 89 .devs_increment = 4, 90 .ncopies = 4, 91 .nparity = 0, 92 .raid_name = "raid1c4", 93 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 94 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 95 }, 96 [BTRFS_RAID_DUP] = { 97 .sub_stripes = 1, 98 .dev_stripes = 2, 99 .devs_max = 1, 100 .devs_min = 1, 101 .tolerated_failures = 0, 102 .devs_increment = 1, 103 .ncopies = 2, 104 .nparity = 0, 105 .raid_name = "dup", 106 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 107 .mindev_error = 0, 108 }, 109 [BTRFS_RAID_RAID0] = { 110 .sub_stripes = 1, 111 .dev_stripes = 1, 112 .devs_max = 0, 113 .devs_min = 1, 114 .tolerated_failures = 0, 115 .devs_increment = 1, 116 .ncopies = 1, 117 .nparity = 0, 118 .raid_name = "raid0", 119 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 120 .mindev_error = 0, 121 }, 122 [BTRFS_RAID_SINGLE] = { 123 .sub_stripes = 1, 124 .dev_stripes = 1, 125 .devs_max = 1, 126 .devs_min = 1, 127 .tolerated_failures = 0, 128 .devs_increment = 1, 129 .ncopies = 1, 130 .nparity = 0, 131 .raid_name = "single", 132 .bg_flag = 0, 133 .mindev_error = 0, 134 }, 135 [BTRFS_RAID_RAID5] = { 136 .sub_stripes = 1, 137 .dev_stripes = 1, 138 .devs_max = 0, 139 .devs_min = 2, 140 .tolerated_failures = 1, 141 .devs_increment = 1, 142 .ncopies = 1, 143 .nparity = 1, 144 .raid_name = "raid5", 145 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 146 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 147 }, 148 [BTRFS_RAID_RAID6] = { 149 .sub_stripes = 1, 150 .dev_stripes = 1, 151 .devs_max = 0, 152 .devs_min = 3, 153 .tolerated_failures = 2, 154 .devs_increment = 1, 155 .ncopies = 1, 156 .nparity = 2, 157 .raid_name = "raid6", 158 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 159 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 160 }, 161 }; 162 163 /* 164 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 165 * can be used as index to access btrfs_raid_array[]. 166 */ 167 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 168 { 169 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 170 171 if (!profile) 172 return BTRFS_RAID_SINGLE; 173 174 return BTRFS_BG_FLAG_TO_INDEX(profile); 175 } 176 177 const char *btrfs_bg_type_to_raid_name(u64 flags) 178 { 179 const int index = btrfs_bg_flags_to_raid_index(flags); 180 181 if (index >= BTRFS_NR_RAID_TYPES) 182 return NULL; 183 184 return btrfs_raid_array[index].raid_name; 185 } 186 187 int btrfs_nr_parity_stripes(u64 type) 188 { 189 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 190 191 return btrfs_raid_array[index].nparity; 192 } 193 194 /* 195 * Fill @buf with textual description of @bg_flags, no more than @size_buf 196 * bytes including terminating null byte. 197 */ 198 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 199 { 200 int i; 201 int ret; 202 char *bp = buf; 203 u64 flags = bg_flags; 204 u32 size_bp = size_buf; 205 206 if (!flags) { 207 strcpy(bp, "NONE"); 208 return; 209 } 210 211 #define DESCRIBE_FLAG(flag, desc) \ 212 do { \ 213 if (flags & (flag)) { \ 214 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 215 if (ret < 0 || ret >= size_bp) \ 216 goto out_overflow; \ 217 size_bp -= ret; \ 218 bp += ret; \ 219 flags &= ~(flag); \ 220 } \ 221 } while (0) 222 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 224 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 225 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 226 227 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 228 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 229 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 230 btrfs_raid_array[i].raid_name); 231 #undef DESCRIBE_FLAG 232 233 if (flags) { 234 ret = snprintf(bp, size_bp, "0x%llx|", flags); 235 size_bp -= ret; 236 } 237 238 if (size_bp < size_buf) 239 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 240 241 /* 242 * The text is trimmed, it's up to the caller to provide sufficiently 243 * large buffer 244 */ 245 out_overflow:; 246 } 247 248 static int init_first_rw_device(struct btrfs_trans_handle *trans); 249 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 251 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 252 enum btrfs_map_op op, u64 logical, u64 *length, 253 struct btrfs_io_context **bioc_ret, 254 struct btrfs_io_stripe *smap, 255 int *mirror_num_ret, int need_raid_map); 256 257 /* 258 * Device locking 259 * ============== 260 * 261 * There are several mutexes that protect manipulation of devices and low-level 262 * structures like chunks but not block groups, extents or files 263 * 264 * uuid_mutex (global lock) 265 * ------------------------ 266 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 267 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 268 * device) or requested by the device= mount option 269 * 270 * the mutex can be very coarse and can cover long-running operations 271 * 272 * protects: updates to fs_devices counters like missing devices, rw devices, 273 * seeding, structure cloning, opening/closing devices at mount/umount time 274 * 275 * global::fs_devs - add, remove, updates to the global list 276 * 277 * does not protect: manipulation of the fs_devices::devices list in general 278 * but in mount context it could be used to exclude list modifications by eg. 279 * scan ioctl 280 * 281 * btrfs_device::name - renames (write side), read is RCU 282 * 283 * fs_devices::device_list_mutex (per-fs, with RCU) 284 * ------------------------------------------------ 285 * protects updates to fs_devices::devices, ie. adding and deleting 286 * 287 * simple list traversal with read-only actions can be done with RCU protection 288 * 289 * may be used to exclude some operations from running concurrently without any 290 * modifications to the list (see write_all_supers) 291 * 292 * Is not required at mount and close times, because our device list is 293 * protected by the uuid_mutex at that point. 294 * 295 * balance_mutex 296 * ------------- 297 * protects balance structures (status, state) and context accessed from 298 * several places (internally, ioctl) 299 * 300 * chunk_mutex 301 * ----------- 302 * protects chunks, adding or removing during allocation, trim or when a new 303 * device is added/removed. Additionally it also protects post_commit_list of 304 * individual devices, since they can be added to the transaction's 305 * post_commit_list only with chunk_mutex held. 306 * 307 * cleaner_mutex 308 * ------------- 309 * a big lock that is held by the cleaner thread and prevents running subvolume 310 * cleaning together with relocation or delayed iputs 311 * 312 * 313 * Lock nesting 314 * ============ 315 * 316 * uuid_mutex 317 * device_list_mutex 318 * chunk_mutex 319 * balance_mutex 320 * 321 * 322 * Exclusive operations 323 * ==================== 324 * 325 * Maintains the exclusivity of the following operations that apply to the 326 * whole filesystem and cannot run in parallel. 327 * 328 * - Balance (*) 329 * - Device add 330 * - Device remove 331 * - Device replace (*) 332 * - Resize 333 * 334 * The device operations (as above) can be in one of the following states: 335 * 336 * - Running state 337 * - Paused state 338 * - Completed state 339 * 340 * Only device operations marked with (*) can go into the Paused state for the 341 * following reasons: 342 * 343 * - ioctl (only Balance can be Paused through ioctl) 344 * - filesystem remounted as read-only 345 * - filesystem unmounted and mounted as read-only 346 * - system power-cycle and filesystem mounted as read-only 347 * - filesystem or device errors leading to forced read-only 348 * 349 * The status of exclusive operation is set and cleared atomically. 350 * During the course of Paused state, fs_info::exclusive_operation remains set. 351 * A device operation in Paused or Running state can be canceled or resumed 352 * either by ioctl (Balance only) or when remounted as read-write. 353 * The exclusive status is cleared when the device operation is canceled or 354 * completed. 355 */ 356 357 DEFINE_MUTEX(uuid_mutex); 358 static LIST_HEAD(fs_uuids); 359 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 360 { 361 return &fs_uuids; 362 } 363 364 /* 365 * alloc_fs_devices - allocate struct btrfs_fs_devices 366 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 367 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 368 * 369 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 370 * The returned struct is not linked onto any lists and can be destroyed with 371 * kfree() right away. 372 */ 373 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 374 const u8 *metadata_fsid) 375 { 376 struct btrfs_fs_devices *fs_devs; 377 378 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 379 if (!fs_devs) 380 return ERR_PTR(-ENOMEM); 381 382 mutex_init(&fs_devs->device_list_mutex); 383 384 INIT_LIST_HEAD(&fs_devs->devices); 385 INIT_LIST_HEAD(&fs_devs->alloc_list); 386 INIT_LIST_HEAD(&fs_devs->fs_list); 387 INIT_LIST_HEAD(&fs_devs->seed_list); 388 if (fsid) 389 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 390 391 if (metadata_fsid) 392 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 393 else if (fsid) 394 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 395 396 return fs_devs; 397 } 398 399 void btrfs_free_device(struct btrfs_device *device) 400 { 401 WARN_ON(!list_empty(&device->post_commit_list)); 402 rcu_string_free(device->name); 403 extent_io_tree_release(&device->alloc_state); 404 btrfs_destroy_dev_zone_info(device); 405 kfree(device); 406 } 407 408 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 409 { 410 struct btrfs_device *device; 411 WARN_ON(fs_devices->opened); 412 while (!list_empty(&fs_devices->devices)) { 413 device = list_entry(fs_devices->devices.next, 414 struct btrfs_device, dev_list); 415 list_del(&device->dev_list); 416 btrfs_free_device(device); 417 } 418 kfree(fs_devices); 419 } 420 421 void __exit btrfs_cleanup_fs_uuids(void) 422 { 423 struct btrfs_fs_devices *fs_devices; 424 425 while (!list_empty(&fs_uuids)) { 426 fs_devices = list_entry(fs_uuids.next, 427 struct btrfs_fs_devices, fs_list); 428 list_del(&fs_devices->fs_list); 429 free_fs_devices(fs_devices); 430 } 431 } 432 433 static noinline struct btrfs_fs_devices *find_fsid( 434 const u8 *fsid, const u8 *metadata_fsid) 435 { 436 struct btrfs_fs_devices *fs_devices; 437 438 ASSERT(fsid); 439 440 /* Handle non-split brain cases */ 441 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 442 if (metadata_fsid) { 443 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 444 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 445 BTRFS_FSID_SIZE) == 0) 446 return fs_devices; 447 } else { 448 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 449 return fs_devices; 450 } 451 } 452 return NULL; 453 } 454 455 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 456 struct btrfs_super_block *disk_super) 457 { 458 459 struct btrfs_fs_devices *fs_devices; 460 461 /* 462 * Handle scanned device having completed its fsid change but 463 * belonging to a fs_devices that was created by first scanning 464 * a device which didn't have its fsid/metadata_uuid changed 465 * at all and the CHANGING_FSID_V2 flag set. 466 */ 467 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 468 if (fs_devices->fsid_change && 469 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 470 BTRFS_FSID_SIZE) == 0 && 471 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 472 BTRFS_FSID_SIZE) == 0) { 473 return fs_devices; 474 } 475 } 476 /* 477 * Handle scanned device having completed its fsid change but 478 * belonging to a fs_devices that was created by a device that 479 * has an outdated pair of fsid/metadata_uuid and 480 * CHANGING_FSID_V2 flag set. 481 */ 482 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 483 if (fs_devices->fsid_change && 484 memcmp(fs_devices->metadata_uuid, 485 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 486 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 487 BTRFS_FSID_SIZE) == 0) { 488 return fs_devices; 489 } 490 } 491 492 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 493 } 494 495 496 static int 497 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 498 int flush, struct block_device **bdev, 499 struct btrfs_super_block **disk_super) 500 { 501 int ret; 502 503 *bdev = blkdev_get_by_path(device_path, flags, holder); 504 505 if (IS_ERR(*bdev)) { 506 ret = PTR_ERR(*bdev); 507 goto error; 508 } 509 510 if (flush) 511 sync_blockdev(*bdev); 512 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 513 if (ret) { 514 blkdev_put(*bdev, flags); 515 goto error; 516 } 517 invalidate_bdev(*bdev); 518 *disk_super = btrfs_read_dev_super(*bdev); 519 if (IS_ERR(*disk_super)) { 520 ret = PTR_ERR(*disk_super); 521 blkdev_put(*bdev, flags); 522 goto error; 523 } 524 525 return 0; 526 527 error: 528 *bdev = NULL; 529 return ret; 530 } 531 532 /** 533 * Search and remove all stale devices (which are not mounted). 534 * When both inputs are NULL, it will search and release all stale devices. 535 * 536 * @devt: Optional. When provided will it release all unmounted devices 537 * matching this devt only. 538 * @skip_device: Optional. Will skip this device when searching for the stale 539 * devices. 540 * 541 * Return: 0 for success or if @devt is 0. 542 * -EBUSY if @devt is a mounted device. 543 * -ENOENT if @devt does not match any device in the list. 544 */ 545 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 546 { 547 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 548 struct btrfs_device *device, *tmp_device; 549 int ret = 0; 550 551 lockdep_assert_held(&uuid_mutex); 552 553 if (devt) 554 ret = -ENOENT; 555 556 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 557 558 mutex_lock(&fs_devices->device_list_mutex); 559 list_for_each_entry_safe(device, tmp_device, 560 &fs_devices->devices, dev_list) { 561 if (skip_device && skip_device == device) 562 continue; 563 if (devt && devt != device->devt) 564 continue; 565 if (fs_devices->opened) { 566 /* for an already deleted device return 0 */ 567 if (devt && ret != 0) 568 ret = -EBUSY; 569 break; 570 } 571 572 /* delete the stale device */ 573 fs_devices->num_devices--; 574 list_del(&device->dev_list); 575 btrfs_free_device(device); 576 577 ret = 0; 578 } 579 mutex_unlock(&fs_devices->device_list_mutex); 580 581 if (fs_devices->num_devices == 0) { 582 btrfs_sysfs_remove_fsid(fs_devices); 583 list_del(&fs_devices->fs_list); 584 free_fs_devices(fs_devices); 585 } 586 } 587 588 return ret; 589 } 590 591 /* 592 * This is only used on mount, and we are protected from competing things 593 * messing with our fs_devices by the uuid_mutex, thus we do not need the 594 * fs_devices->device_list_mutex here. 595 */ 596 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 597 struct btrfs_device *device, fmode_t flags, 598 void *holder) 599 { 600 struct block_device *bdev; 601 struct btrfs_super_block *disk_super; 602 u64 devid; 603 int ret; 604 605 if (device->bdev) 606 return -EINVAL; 607 if (!device->name) 608 return -EINVAL; 609 610 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 611 &bdev, &disk_super); 612 if (ret) 613 return ret; 614 615 devid = btrfs_stack_device_id(&disk_super->dev_item); 616 if (devid != device->devid) 617 goto error_free_page; 618 619 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 620 goto error_free_page; 621 622 device->generation = btrfs_super_generation(disk_super); 623 624 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 625 if (btrfs_super_incompat_flags(disk_super) & 626 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 627 pr_err( 628 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 629 goto error_free_page; 630 } 631 632 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 633 fs_devices->seeding = true; 634 } else { 635 if (bdev_read_only(bdev)) 636 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 637 else 638 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 639 } 640 641 if (!bdev_nonrot(bdev)) 642 fs_devices->rotating = true; 643 644 device->bdev = bdev; 645 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 646 device->mode = flags; 647 648 fs_devices->open_devices++; 649 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 650 device->devid != BTRFS_DEV_REPLACE_DEVID) { 651 fs_devices->rw_devices++; 652 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 653 } 654 btrfs_release_disk_super(disk_super); 655 656 return 0; 657 658 error_free_page: 659 btrfs_release_disk_super(disk_super); 660 blkdev_put(bdev, flags); 661 662 return -EINVAL; 663 } 664 665 /* 666 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 667 * being created with a disk that has already completed its fsid change. Such 668 * disk can belong to an fs which has its FSID changed or to one which doesn't. 669 * Handle both cases here. 670 */ 671 static struct btrfs_fs_devices *find_fsid_inprogress( 672 struct btrfs_super_block *disk_super) 673 { 674 struct btrfs_fs_devices *fs_devices; 675 676 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 677 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 678 BTRFS_FSID_SIZE) != 0 && 679 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 680 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 681 return fs_devices; 682 } 683 } 684 685 return find_fsid(disk_super->fsid, NULL); 686 } 687 688 689 static struct btrfs_fs_devices *find_fsid_changed( 690 struct btrfs_super_block *disk_super) 691 { 692 struct btrfs_fs_devices *fs_devices; 693 694 /* 695 * Handles the case where scanned device is part of an fs that had 696 * multiple successful changes of FSID but currently device didn't 697 * observe it. Meaning our fsid will be different than theirs. We need 698 * to handle two subcases : 699 * 1 - The fs still continues to have different METADATA/FSID uuids. 700 * 2 - The fs is switched back to its original FSID (METADATA/FSID 701 * are equal). 702 */ 703 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 704 /* Changed UUIDs */ 705 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 706 BTRFS_FSID_SIZE) != 0 && 707 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 708 BTRFS_FSID_SIZE) == 0 && 709 memcmp(fs_devices->fsid, disk_super->fsid, 710 BTRFS_FSID_SIZE) != 0) 711 return fs_devices; 712 713 /* Unchanged UUIDs */ 714 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 715 BTRFS_FSID_SIZE) == 0 && 716 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 717 BTRFS_FSID_SIZE) == 0) 718 return fs_devices; 719 } 720 721 return NULL; 722 } 723 724 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 725 struct btrfs_super_block *disk_super) 726 { 727 struct btrfs_fs_devices *fs_devices; 728 729 /* 730 * Handle the case where the scanned device is part of an fs whose last 731 * metadata UUID change reverted it to the original FSID. At the same 732 * time * fs_devices was first created by another constitutent device 733 * which didn't fully observe the operation. This results in an 734 * btrfs_fs_devices created with metadata/fsid different AND 735 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 736 * fs_devices equal to the FSID of the disk. 737 */ 738 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 739 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 740 BTRFS_FSID_SIZE) != 0 && 741 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 742 BTRFS_FSID_SIZE) == 0 && 743 fs_devices->fsid_change) 744 return fs_devices; 745 } 746 747 return NULL; 748 } 749 /* 750 * Add new device to list of registered devices 751 * 752 * Returns: 753 * device pointer which was just added or updated when successful 754 * error pointer when failed 755 */ 756 static noinline struct btrfs_device *device_list_add(const char *path, 757 struct btrfs_super_block *disk_super, 758 bool *new_device_added) 759 { 760 struct btrfs_device *device; 761 struct btrfs_fs_devices *fs_devices = NULL; 762 struct rcu_string *name; 763 u64 found_transid = btrfs_super_generation(disk_super); 764 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 765 dev_t path_devt; 766 int error; 767 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 768 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 769 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 770 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 771 772 error = lookup_bdev(path, &path_devt); 773 if (error) 774 return ERR_PTR(error); 775 776 if (fsid_change_in_progress) { 777 if (!has_metadata_uuid) 778 fs_devices = find_fsid_inprogress(disk_super); 779 else 780 fs_devices = find_fsid_changed(disk_super); 781 } else if (has_metadata_uuid) { 782 fs_devices = find_fsid_with_metadata_uuid(disk_super); 783 } else { 784 fs_devices = find_fsid_reverted_metadata(disk_super); 785 if (!fs_devices) 786 fs_devices = find_fsid(disk_super->fsid, NULL); 787 } 788 789 790 if (!fs_devices) { 791 if (has_metadata_uuid) 792 fs_devices = alloc_fs_devices(disk_super->fsid, 793 disk_super->metadata_uuid); 794 else 795 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 796 797 if (IS_ERR(fs_devices)) 798 return ERR_CAST(fs_devices); 799 800 fs_devices->fsid_change = fsid_change_in_progress; 801 802 mutex_lock(&fs_devices->device_list_mutex); 803 list_add(&fs_devices->fs_list, &fs_uuids); 804 805 device = NULL; 806 } else { 807 struct btrfs_dev_lookup_args args = { 808 .devid = devid, 809 .uuid = disk_super->dev_item.uuid, 810 }; 811 812 mutex_lock(&fs_devices->device_list_mutex); 813 device = btrfs_find_device(fs_devices, &args); 814 815 /* 816 * If this disk has been pulled into an fs devices created by 817 * a device which had the CHANGING_FSID_V2 flag then replace the 818 * metadata_uuid/fsid values of the fs_devices. 819 */ 820 if (fs_devices->fsid_change && 821 found_transid > fs_devices->latest_generation) { 822 memcpy(fs_devices->fsid, disk_super->fsid, 823 BTRFS_FSID_SIZE); 824 825 if (has_metadata_uuid) 826 memcpy(fs_devices->metadata_uuid, 827 disk_super->metadata_uuid, 828 BTRFS_FSID_SIZE); 829 else 830 memcpy(fs_devices->metadata_uuid, 831 disk_super->fsid, BTRFS_FSID_SIZE); 832 833 fs_devices->fsid_change = false; 834 } 835 } 836 837 if (!device) { 838 if (fs_devices->opened) { 839 mutex_unlock(&fs_devices->device_list_mutex); 840 return ERR_PTR(-EBUSY); 841 } 842 843 device = btrfs_alloc_device(NULL, &devid, 844 disk_super->dev_item.uuid); 845 if (IS_ERR(device)) { 846 mutex_unlock(&fs_devices->device_list_mutex); 847 /* we can safely leave the fs_devices entry around */ 848 return device; 849 } 850 851 name = rcu_string_strdup(path, GFP_NOFS); 852 if (!name) { 853 btrfs_free_device(device); 854 mutex_unlock(&fs_devices->device_list_mutex); 855 return ERR_PTR(-ENOMEM); 856 } 857 rcu_assign_pointer(device->name, name); 858 device->devt = path_devt; 859 860 list_add_rcu(&device->dev_list, &fs_devices->devices); 861 fs_devices->num_devices++; 862 863 device->fs_devices = fs_devices; 864 *new_device_added = true; 865 866 if (disk_super->label[0]) 867 pr_info( 868 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 869 disk_super->label, devid, found_transid, path, 870 current->comm, task_pid_nr(current)); 871 else 872 pr_info( 873 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 874 disk_super->fsid, devid, found_transid, path, 875 current->comm, task_pid_nr(current)); 876 877 } else if (!device->name || strcmp(device->name->str, path)) { 878 /* 879 * When FS is already mounted. 880 * 1. If you are here and if the device->name is NULL that 881 * means this device was missing at time of FS mount. 882 * 2. If you are here and if the device->name is different 883 * from 'path' that means either 884 * a. The same device disappeared and reappeared with 885 * different name. or 886 * b. The missing-disk-which-was-replaced, has 887 * reappeared now. 888 * 889 * We must allow 1 and 2a above. But 2b would be a spurious 890 * and unintentional. 891 * 892 * Further in case of 1 and 2a above, the disk at 'path' 893 * would have missed some transaction when it was away and 894 * in case of 2a the stale bdev has to be updated as well. 895 * 2b must not be allowed at all time. 896 */ 897 898 /* 899 * For now, we do allow update to btrfs_fs_device through the 900 * btrfs dev scan cli after FS has been mounted. We're still 901 * tracking a problem where systems fail mount by subvolume id 902 * when we reject replacement on a mounted FS. 903 */ 904 if (!fs_devices->opened && found_transid < device->generation) { 905 /* 906 * That is if the FS is _not_ mounted and if you 907 * are here, that means there is more than one 908 * disk with same uuid and devid.We keep the one 909 * with larger generation number or the last-in if 910 * generation are equal. 911 */ 912 mutex_unlock(&fs_devices->device_list_mutex); 913 return ERR_PTR(-EEXIST); 914 } 915 916 /* 917 * We are going to replace the device path for a given devid, 918 * make sure it's the same device if the device is mounted 919 * 920 * NOTE: the device->fs_info may not be reliable here so pass 921 * in a NULL to message helpers instead. This avoids a possible 922 * use-after-free when the fs_info and fs_info->sb are already 923 * torn down. 924 */ 925 if (device->bdev) { 926 if (device->devt != path_devt) { 927 mutex_unlock(&fs_devices->device_list_mutex); 928 btrfs_warn_in_rcu(NULL, 929 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 930 path, devid, found_transid, 931 current->comm, 932 task_pid_nr(current)); 933 return ERR_PTR(-EEXIST); 934 } 935 btrfs_info_in_rcu(NULL, 936 "devid %llu device path %s changed to %s scanned by %s (%d)", 937 devid, rcu_str_deref(device->name), 938 path, current->comm, 939 task_pid_nr(current)); 940 } 941 942 name = rcu_string_strdup(path, GFP_NOFS); 943 if (!name) { 944 mutex_unlock(&fs_devices->device_list_mutex); 945 return ERR_PTR(-ENOMEM); 946 } 947 rcu_string_free(device->name); 948 rcu_assign_pointer(device->name, name); 949 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 950 fs_devices->missing_devices--; 951 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 952 } 953 device->devt = path_devt; 954 } 955 956 /* 957 * Unmount does not free the btrfs_device struct but would zero 958 * generation along with most of the other members. So just update 959 * it back. We need it to pick the disk with largest generation 960 * (as above). 961 */ 962 if (!fs_devices->opened) { 963 device->generation = found_transid; 964 fs_devices->latest_generation = max_t(u64, found_transid, 965 fs_devices->latest_generation); 966 } 967 968 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 969 970 mutex_unlock(&fs_devices->device_list_mutex); 971 return device; 972 } 973 974 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 975 { 976 struct btrfs_fs_devices *fs_devices; 977 struct btrfs_device *device; 978 struct btrfs_device *orig_dev; 979 int ret = 0; 980 981 lockdep_assert_held(&uuid_mutex); 982 983 fs_devices = alloc_fs_devices(orig->fsid, NULL); 984 if (IS_ERR(fs_devices)) 985 return fs_devices; 986 987 fs_devices->total_devices = orig->total_devices; 988 989 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 990 struct rcu_string *name; 991 992 device = btrfs_alloc_device(NULL, &orig_dev->devid, 993 orig_dev->uuid); 994 if (IS_ERR(device)) { 995 ret = PTR_ERR(device); 996 goto error; 997 } 998 999 /* 1000 * This is ok to do without rcu read locked because we hold the 1001 * uuid mutex so nothing we touch in here is going to disappear. 1002 */ 1003 if (orig_dev->name) { 1004 name = rcu_string_strdup(orig_dev->name->str, 1005 GFP_KERNEL); 1006 if (!name) { 1007 btrfs_free_device(device); 1008 ret = -ENOMEM; 1009 goto error; 1010 } 1011 rcu_assign_pointer(device->name, name); 1012 } 1013 1014 if (orig_dev->zone_info) { 1015 struct btrfs_zoned_device_info *zone_info; 1016 1017 zone_info = btrfs_clone_dev_zone_info(orig_dev); 1018 if (!zone_info) { 1019 btrfs_free_device(device); 1020 ret = -ENOMEM; 1021 goto error; 1022 } 1023 device->zone_info = zone_info; 1024 } 1025 1026 list_add(&device->dev_list, &fs_devices->devices); 1027 device->fs_devices = fs_devices; 1028 fs_devices->num_devices++; 1029 } 1030 return fs_devices; 1031 error: 1032 free_fs_devices(fs_devices); 1033 return ERR_PTR(ret); 1034 } 1035 1036 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1037 struct btrfs_device **latest_dev) 1038 { 1039 struct btrfs_device *device, *next; 1040 1041 /* This is the initialized path, it is safe to release the devices. */ 1042 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1043 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1044 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1045 &device->dev_state) && 1046 !test_bit(BTRFS_DEV_STATE_MISSING, 1047 &device->dev_state) && 1048 (!*latest_dev || 1049 device->generation > (*latest_dev)->generation)) { 1050 *latest_dev = device; 1051 } 1052 continue; 1053 } 1054 1055 /* 1056 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1057 * in btrfs_init_dev_replace() so just continue. 1058 */ 1059 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1060 continue; 1061 1062 if (device->bdev) { 1063 blkdev_put(device->bdev, device->mode); 1064 device->bdev = NULL; 1065 fs_devices->open_devices--; 1066 } 1067 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1068 list_del_init(&device->dev_alloc_list); 1069 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1070 fs_devices->rw_devices--; 1071 } 1072 list_del_init(&device->dev_list); 1073 fs_devices->num_devices--; 1074 btrfs_free_device(device); 1075 } 1076 1077 } 1078 1079 /* 1080 * After we have read the system tree and know devids belonging to this 1081 * filesystem, remove the device which does not belong there. 1082 */ 1083 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1084 { 1085 struct btrfs_device *latest_dev = NULL; 1086 struct btrfs_fs_devices *seed_dev; 1087 1088 mutex_lock(&uuid_mutex); 1089 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1090 1091 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1092 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1093 1094 fs_devices->latest_dev = latest_dev; 1095 1096 mutex_unlock(&uuid_mutex); 1097 } 1098 1099 static void btrfs_close_bdev(struct btrfs_device *device) 1100 { 1101 if (!device->bdev) 1102 return; 1103 1104 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1105 sync_blockdev(device->bdev); 1106 invalidate_bdev(device->bdev); 1107 } 1108 1109 blkdev_put(device->bdev, device->mode); 1110 } 1111 1112 static void btrfs_close_one_device(struct btrfs_device *device) 1113 { 1114 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1115 1116 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1117 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1118 list_del_init(&device->dev_alloc_list); 1119 fs_devices->rw_devices--; 1120 } 1121 1122 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1123 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1124 1125 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1126 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1127 fs_devices->missing_devices--; 1128 } 1129 1130 btrfs_close_bdev(device); 1131 if (device->bdev) { 1132 fs_devices->open_devices--; 1133 device->bdev = NULL; 1134 } 1135 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1136 btrfs_destroy_dev_zone_info(device); 1137 1138 device->fs_info = NULL; 1139 atomic_set(&device->dev_stats_ccnt, 0); 1140 extent_io_tree_release(&device->alloc_state); 1141 1142 /* 1143 * Reset the flush error record. We might have a transient flush error 1144 * in this mount, and if so we aborted the current transaction and set 1145 * the fs to an error state, guaranteeing no super blocks can be further 1146 * committed. However that error might be transient and if we unmount the 1147 * filesystem and mount it again, we should allow the mount to succeed 1148 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1149 * filesystem again we still get flush errors, then we will again abort 1150 * any transaction and set the error state, guaranteeing no commits of 1151 * unsafe super blocks. 1152 */ 1153 device->last_flush_error = 0; 1154 1155 /* Verify the device is back in a pristine state */ 1156 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1157 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1158 ASSERT(list_empty(&device->dev_alloc_list)); 1159 ASSERT(list_empty(&device->post_commit_list)); 1160 } 1161 1162 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1163 { 1164 struct btrfs_device *device, *tmp; 1165 1166 lockdep_assert_held(&uuid_mutex); 1167 1168 if (--fs_devices->opened > 0) 1169 return; 1170 1171 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1172 btrfs_close_one_device(device); 1173 1174 WARN_ON(fs_devices->open_devices); 1175 WARN_ON(fs_devices->rw_devices); 1176 fs_devices->opened = 0; 1177 fs_devices->seeding = false; 1178 fs_devices->fs_info = NULL; 1179 } 1180 1181 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1182 { 1183 LIST_HEAD(list); 1184 struct btrfs_fs_devices *tmp; 1185 1186 mutex_lock(&uuid_mutex); 1187 close_fs_devices(fs_devices); 1188 if (!fs_devices->opened) 1189 list_splice_init(&fs_devices->seed_list, &list); 1190 1191 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1192 close_fs_devices(fs_devices); 1193 list_del(&fs_devices->seed_list); 1194 free_fs_devices(fs_devices); 1195 } 1196 mutex_unlock(&uuid_mutex); 1197 } 1198 1199 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1200 fmode_t flags, void *holder) 1201 { 1202 struct btrfs_device *device; 1203 struct btrfs_device *latest_dev = NULL; 1204 struct btrfs_device *tmp_device; 1205 1206 flags |= FMODE_EXCL; 1207 1208 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1209 dev_list) { 1210 int ret; 1211 1212 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1213 if (ret == 0 && 1214 (!latest_dev || device->generation > latest_dev->generation)) { 1215 latest_dev = device; 1216 } else if (ret == -ENODATA) { 1217 fs_devices->num_devices--; 1218 list_del(&device->dev_list); 1219 btrfs_free_device(device); 1220 } 1221 } 1222 if (fs_devices->open_devices == 0) 1223 return -EINVAL; 1224 1225 fs_devices->opened = 1; 1226 fs_devices->latest_dev = latest_dev; 1227 fs_devices->total_rw_bytes = 0; 1228 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1229 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1230 1231 return 0; 1232 } 1233 1234 static int devid_cmp(void *priv, const struct list_head *a, 1235 const struct list_head *b) 1236 { 1237 const struct btrfs_device *dev1, *dev2; 1238 1239 dev1 = list_entry(a, struct btrfs_device, dev_list); 1240 dev2 = list_entry(b, struct btrfs_device, dev_list); 1241 1242 if (dev1->devid < dev2->devid) 1243 return -1; 1244 else if (dev1->devid > dev2->devid) 1245 return 1; 1246 return 0; 1247 } 1248 1249 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1250 fmode_t flags, void *holder) 1251 { 1252 int ret; 1253 1254 lockdep_assert_held(&uuid_mutex); 1255 /* 1256 * The device_list_mutex cannot be taken here in case opening the 1257 * underlying device takes further locks like open_mutex. 1258 * 1259 * We also don't need the lock here as this is called during mount and 1260 * exclusion is provided by uuid_mutex 1261 */ 1262 1263 if (fs_devices->opened) { 1264 fs_devices->opened++; 1265 ret = 0; 1266 } else { 1267 list_sort(NULL, &fs_devices->devices, devid_cmp); 1268 ret = open_fs_devices(fs_devices, flags, holder); 1269 } 1270 1271 return ret; 1272 } 1273 1274 void btrfs_release_disk_super(struct btrfs_super_block *super) 1275 { 1276 struct page *page = virt_to_page(super); 1277 1278 put_page(page); 1279 } 1280 1281 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1282 u64 bytenr, u64 bytenr_orig) 1283 { 1284 struct btrfs_super_block *disk_super; 1285 struct page *page; 1286 void *p; 1287 pgoff_t index; 1288 1289 /* make sure our super fits in the device */ 1290 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1291 return ERR_PTR(-EINVAL); 1292 1293 /* make sure our super fits in the page */ 1294 if (sizeof(*disk_super) > PAGE_SIZE) 1295 return ERR_PTR(-EINVAL); 1296 1297 /* make sure our super doesn't straddle pages on disk */ 1298 index = bytenr >> PAGE_SHIFT; 1299 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1300 return ERR_PTR(-EINVAL); 1301 1302 /* pull in the page with our super */ 1303 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1304 1305 if (IS_ERR(page)) 1306 return ERR_CAST(page); 1307 1308 p = page_address(page); 1309 1310 /* align our pointer to the offset of the super block */ 1311 disk_super = p + offset_in_page(bytenr); 1312 1313 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1314 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1315 btrfs_release_disk_super(p); 1316 return ERR_PTR(-EINVAL); 1317 } 1318 1319 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1320 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1321 1322 return disk_super; 1323 } 1324 1325 int btrfs_forget_devices(dev_t devt) 1326 { 1327 int ret; 1328 1329 mutex_lock(&uuid_mutex); 1330 ret = btrfs_free_stale_devices(devt, NULL); 1331 mutex_unlock(&uuid_mutex); 1332 1333 return ret; 1334 } 1335 1336 /* 1337 * Look for a btrfs signature on a device. This may be called out of the mount path 1338 * and we are not allowed to call set_blocksize during the scan. The superblock 1339 * is read via pagecache 1340 */ 1341 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1342 void *holder) 1343 { 1344 struct btrfs_super_block *disk_super; 1345 bool new_device_added = false; 1346 struct btrfs_device *device = NULL; 1347 struct block_device *bdev; 1348 u64 bytenr, bytenr_orig; 1349 int ret; 1350 1351 lockdep_assert_held(&uuid_mutex); 1352 1353 /* 1354 * we would like to check all the supers, but that would make 1355 * a btrfs mount succeed after a mkfs from a different FS. 1356 * So, we need to add a special mount option to scan for 1357 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1358 */ 1359 flags |= FMODE_EXCL; 1360 1361 bdev = blkdev_get_by_path(path, flags, holder); 1362 if (IS_ERR(bdev)) 1363 return ERR_CAST(bdev); 1364 1365 bytenr_orig = btrfs_sb_offset(0); 1366 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1367 if (ret) { 1368 device = ERR_PTR(ret); 1369 goto error_bdev_put; 1370 } 1371 1372 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1373 if (IS_ERR(disk_super)) { 1374 device = ERR_CAST(disk_super); 1375 goto error_bdev_put; 1376 } 1377 1378 device = device_list_add(path, disk_super, &new_device_added); 1379 if (!IS_ERR(device) && new_device_added) 1380 btrfs_free_stale_devices(device->devt, device); 1381 1382 btrfs_release_disk_super(disk_super); 1383 1384 error_bdev_put: 1385 blkdev_put(bdev, flags); 1386 1387 return device; 1388 } 1389 1390 /* 1391 * Try to find a chunk that intersects [start, start + len] range and when one 1392 * such is found, record the end of it in *start 1393 */ 1394 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1395 u64 len) 1396 { 1397 u64 physical_start, physical_end; 1398 1399 lockdep_assert_held(&device->fs_info->chunk_mutex); 1400 1401 if (!find_first_extent_bit(&device->alloc_state, *start, 1402 &physical_start, &physical_end, 1403 CHUNK_ALLOCATED, NULL)) { 1404 1405 if (in_range(physical_start, *start, len) || 1406 in_range(*start, physical_start, 1407 physical_end - physical_start)) { 1408 *start = physical_end + 1; 1409 return true; 1410 } 1411 } 1412 return false; 1413 } 1414 1415 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1416 { 1417 switch (device->fs_devices->chunk_alloc_policy) { 1418 case BTRFS_CHUNK_ALLOC_REGULAR: 1419 return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 1420 case BTRFS_CHUNK_ALLOC_ZONED: 1421 /* 1422 * We don't care about the starting region like regular 1423 * allocator, because we anyway use/reserve the first two zones 1424 * for superblock logging. 1425 */ 1426 return ALIGN(start, device->zone_info->zone_size); 1427 default: 1428 BUG(); 1429 } 1430 } 1431 1432 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1433 u64 *hole_start, u64 *hole_size, 1434 u64 num_bytes) 1435 { 1436 u64 zone_size = device->zone_info->zone_size; 1437 u64 pos; 1438 int ret; 1439 bool changed = false; 1440 1441 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1442 1443 while (*hole_size > 0) { 1444 pos = btrfs_find_allocatable_zones(device, *hole_start, 1445 *hole_start + *hole_size, 1446 num_bytes); 1447 if (pos != *hole_start) { 1448 *hole_size = *hole_start + *hole_size - pos; 1449 *hole_start = pos; 1450 changed = true; 1451 if (*hole_size < num_bytes) 1452 break; 1453 } 1454 1455 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1456 1457 /* Range is ensured to be empty */ 1458 if (!ret) 1459 return changed; 1460 1461 /* Given hole range was invalid (outside of device) */ 1462 if (ret == -ERANGE) { 1463 *hole_start += *hole_size; 1464 *hole_size = 0; 1465 return true; 1466 } 1467 1468 *hole_start += zone_size; 1469 *hole_size -= zone_size; 1470 changed = true; 1471 } 1472 1473 return changed; 1474 } 1475 1476 /** 1477 * dev_extent_hole_check - check if specified hole is suitable for allocation 1478 * @device: the device which we have the hole 1479 * @hole_start: starting position of the hole 1480 * @hole_size: the size of the hole 1481 * @num_bytes: the size of the free space that we need 1482 * 1483 * This function may modify @hole_start and @hole_size to reflect the suitable 1484 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1485 */ 1486 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1487 u64 *hole_size, u64 num_bytes) 1488 { 1489 bool changed = false; 1490 u64 hole_end = *hole_start + *hole_size; 1491 1492 for (;;) { 1493 /* 1494 * Check before we set max_hole_start, otherwise we could end up 1495 * sending back this offset anyway. 1496 */ 1497 if (contains_pending_extent(device, hole_start, *hole_size)) { 1498 if (hole_end >= *hole_start) 1499 *hole_size = hole_end - *hole_start; 1500 else 1501 *hole_size = 0; 1502 changed = true; 1503 } 1504 1505 switch (device->fs_devices->chunk_alloc_policy) { 1506 case BTRFS_CHUNK_ALLOC_REGULAR: 1507 /* No extra check */ 1508 break; 1509 case BTRFS_CHUNK_ALLOC_ZONED: 1510 if (dev_extent_hole_check_zoned(device, hole_start, 1511 hole_size, num_bytes)) { 1512 changed = true; 1513 /* 1514 * The changed hole can contain pending extent. 1515 * Loop again to check that. 1516 */ 1517 continue; 1518 } 1519 break; 1520 default: 1521 BUG(); 1522 } 1523 1524 break; 1525 } 1526 1527 return changed; 1528 } 1529 1530 /* 1531 * find_free_dev_extent_start - find free space in the specified device 1532 * @device: the device which we search the free space in 1533 * @num_bytes: the size of the free space that we need 1534 * @search_start: the position from which to begin the search 1535 * @start: store the start of the free space. 1536 * @len: the size of the free space. that we find, or the size 1537 * of the max free space if we don't find suitable free space 1538 * 1539 * this uses a pretty simple search, the expectation is that it is 1540 * called very infrequently and that a given device has a small number 1541 * of extents 1542 * 1543 * @start is used to store the start of the free space if we find. But if we 1544 * don't find suitable free space, it will be used to store the start position 1545 * of the max free space. 1546 * 1547 * @len is used to store the size of the free space that we find. 1548 * But if we don't find suitable free space, it is used to store the size of 1549 * the max free space. 1550 * 1551 * NOTE: This function will search *commit* root of device tree, and does extra 1552 * check to ensure dev extents are not double allocated. 1553 * This makes the function safe to allocate dev extents but may not report 1554 * correct usable device space, as device extent freed in current transaction 1555 * is not reported as available. 1556 */ 1557 static int find_free_dev_extent_start(struct btrfs_device *device, 1558 u64 num_bytes, u64 search_start, u64 *start, 1559 u64 *len) 1560 { 1561 struct btrfs_fs_info *fs_info = device->fs_info; 1562 struct btrfs_root *root = fs_info->dev_root; 1563 struct btrfs_key key; 1564 struct btrfs_dev_extent *dev_extent; 1565 struct btrfs_path *path; 1566 u64 hole_size; 1567 u64 max_hole_start; 1568 u64 max_hole_size; 1569 u64 extent_end; 1570 u64 search_end = device->total_bytes; 1571 int ret; 1572 int slot; 1573 struct extent_buffer *l; 1574 1575 search_start = dev_extent_search_start(device, search_start); 1576 1577 WARN_ON(device->zone_info && 1578 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1579 1580 path = btrfs_alloc_path(); 1581 if (!path) 1582 return -ENOMEM; 1583 1584 max_hole_start = search_start; 1585 max_hole_size = 0; 1586 1587 again: 1588 if (search_start >= search_end || 1589 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1590 ret = -ENOSPC; 1591 goto out; 1592 } 1593 1594 path->reada = READA_FORWARD; 1595 path->search_commit_root = 1; 1596 path->skip_locking = 1; 1597 1598 key.objectid = device->devid; 1599 key.offset = search_start; 1600 key.type = BTRFS_DEV_EXTENT_KEY; 1601 1602 ret = btrfs_search_backwards(root, &key, path); 1603 if (ret < 0) 1604 goto out; 1605 1606 while (1) { 1607 l = path->nodes[0]; 1608 slot = path->slots[0]; 1609 if (slot >= btrfs_header_nritems(l)) { 1610 ret = btrfs_next_leaf(root, path); 1611 if (ret == 0) 1612 continue; 1613 if (ret < 0) 1614 goto out; 1615 1616 break; 1617 } 1618 btrfs_item_key_to_cpu(l, &key, slot); 1619 1620 if (key.objectid < device->devid) 1621 goto next; 1622 1623 if (key.objectid > device->devid) 1624 break; 1625 1626 if (key.type != BTRFS_DEV_EXTENT_KEY) 1627 goto next; 1628 1629 if (key.offset > search_start) { 1630 hole_size = key.offset - search_start; 1631 dev_extent_hole_check(device, &search_start, &hole_size, 1632 num_bytes); 1633 1634 if (hole_size > max_hole_size) { 1635 max_hole_start = search_start; 1636 max_hole_size = hole_size; 1637 } 1638 1639 /* 1640 * If this free space is greater than which we need, 1641 * it must be the max free space that we have found 1642 * until now, so max_hole_start must point to the start 1643 * of this free space and the length of this free space 1644 * is stored in max_hole_size. Thus, we return 1645 * max_hole_start and max_hole_size and go back to the 1646 * caller. 1647 */ 1648 if (hole_size >= num_bytes) { 1649 ret = 0; 1650 goto out; 1651 } 1652 } 1653 1654 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1655 extent_end = key.offset + btrfs_dev_extent_length(l, 1656 dev_extent); 1657 if (extent_end > search_start) 1658 search_start = extent_end; 1659 next: 1660 path->slots[0]++; 1661 cond_resched(); 1662 } 1663 1664 /* 1665 * At this point, search_start should be the end of 1666 * allocated dev extents, and when shrinking the device, 1667 * search_end may be smaller than search_start. 1668 */ 1669 if (search_end > search_start) { 1670 hole_size = search_end - search_start; 1671 if (dev_extent_hole_check(device, &search_start, &hole_size, 1672 num_bytes)) { 1673 btrfs_release_path(path); 1674 goto again; 1675 } 1676 1677 if (hole_size > max_hole_size) { 1678 max_hole_start = search_start; 1679 max_hole_size = hole_size; 1680 } 1681 } 1682 1683 /* See above. */ 1684 if (max_hole_size < num_bytes) 1685 ret = -ENOSPC; 1686 else 1687 ret = 0; 1688 1689 out: 1690 btrfs_free_path(path); 1691 *start = max_hole_start; 1692 if (len) 1693 *len = max_hole_size; 1694 return ret; 1695 } 1696 1697 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1698 u64 *start, u64 *len) 1699 { 1700 /* FIXME use last free of some kind */ 1701 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1702 } 1703 1704 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1705 struct btrfs_device *device, 1706 u64 start, u64 *dev_extent_len) 1707 { 1708 struct btrfs_fs_info *fs_info = device->fs_info; 1709 struct btrfs_root *root = fs_info->dev_root; 1710 int ret; 1711 struct btrfs_path *path; 1712 struct btrfs_key key; 1713 struct btrfs_key found_key; 1714 struct extent_buffer *leaf = NULL; 1715 struct btrfs_dev_extent *extent = NULL; 1716 1717 path = btrfs_alloc_path(); 1718 if (!path) 1719 return -ENOMEM; 1720 1721 key.objectid = device->devid; 1722 key.offset = start; 1723 key.type = BTRFS_DEV_EXTENT_KEY; 1724 again: 1725 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1726 if (ret > 0) { 1727 ret = btrfs_previous_item(root, path, key.objectid, 1728 BTRFS_DEV_EXTENT_KEY); 1729 if (ret) 1730 goto out; 1731 leaf = path->nodes[0]; 1732 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1733 extent = btrfs_item_ptr(leaf, path->slots[0], 1734 struct btrfs_dev_extent); 1735 BUG_ON(found_key.offset > start || found_key.offset + 1736 btrfs_dev_extent_length(leaf, extent) < start); 1737 key = found_key; 1738 btrfs_release_path(path); 1739 goto again; 1740 } else if (ret == 0) { 1741 leaf = path->nodes[0]; 1742 extent = btrfs_item_ptr(leaf, path->slots[0], 1743 struct btrfs_dev_extent); 1744 } else { 1745 goto out; 1746 } 1747 1748 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1749 1750 ret = btrfs_del_item(trans, root, path); 1751 if (ret == 0) 1752 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1753 out: 1754 btrfs_free_path(path); 1755 return ret; 1756 } 1757 1758 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1759 { 1760 struct extent_map_tree *em_tree; 1761 struct extent_map *em; 1762 struct rb_node *n; 1763 u64 ret = 0; 1764 1765 em_tree = &fs_info->mapping_tree; 1766 read_lock(&em_tree->lock); 1767 n = rb_last(&em_tree->map.rb_root); 1768 if (n) { 1769 em = rb_entry(n, struct extent_map, rb_node); 1770 ret = em->start + em->len; 1771 } 1772 read_unlock(&em_tree->lock); 1773 1774 return ret; 1775 } 1776 1777 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1778 u64 *devid_ret) 1779 { 1780 int ret; 1781 struct btrfs_key key; 1782 struct btrfs_key found_key; 1783 struct btrfs_path *path; 1784 1785 path = btrfs_alloc_path(); 1786 if (!path) 1787 return -ENOMEM; 1788 1789 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1790 key.type = BTRFS_DEV_ITEM_KEY; 1791 key.offset = (u64)-1; 1792 1793 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1794 if (ret < 0) 1795 goto error; 1796 1797 if (ret == 0) { 1798 /* Corruption */ 1799 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1800 ret = -EUCLEAN; 1801 goto error; 1802 } 1803 1804 ret = btrfs_previous_item(fs_info->chunk_root, path, 1805 BTRFS_DEV_ITEMS_OBJECTID, 1806 BTRFS_DEV_ITEM_KEY); 1807 if (ret) { 1808 *devid_ret = 1; 1809 } else { 1810 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1811 path->slots[0]); 1812 *devid_ret = found_key.offset + 1; 1813 } 1814 ret = 0; 1815 error: 1816 btrfs_free_path(path); 1817 return ret; 1818 } 1819 1820 /* 1821 * the device information is stored in the chunk root 1822 * the btrfs_device struct should be fully filled in 1823 */ 1824 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1825 struct btrfs_device *device) 1826 { 1827 int ret; 1828 struct btrfs_path *path; 1829 struct btrfs_dev_item *dev_item; 1830 struct extent_buffer *leaf; 1831 struct btrfs_key key; 1832 unsigned long ptr; 1833 1834 path = btrfs_alloc_path(); 1835 if (!path) 1836 return -ENOMEM; 1837 1838 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1839 key.type = BTRFS_DEV_ITEM_KEY; 1840 key.offset = device->devid; 1841 1842 btrfs_reserve_chunk_metadata(trans, true); 1843 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1844 &key, sizeof(*dev_item)); 1845 btrfs_trans_release_chunk_metadata(trans); 1846 if (ret) 1847 goto out; 1848 1849 leaf = path->nodes[0]; 1850 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1851 1852 btrfs_set_device_id(leaf, dev_item, device->devid); 1853 btrfs_set_device_generation(leaf, dev_item, 0); 1854 btrfs_set_device_type(leaf, dev_item, device->type); 1855 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1856 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1857 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1858 btrfs_set_device_total_bytes(leaf, dev_item, 1859 btrfs_device_get_disk_total_bytes(device)); 1860 btrfs_set_device_bytes_used(leaf, dev_item, 1861 btrfs_device_get_bytes_used(device)); 1862 btrfs_set_device_group(leaf, dev_item, 0); 1863 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1864 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1865 btrfs_set_device_start_offset(leaf, dev_item, 0); 1866 1867 ptr = btrfs_device_uuid(dev_item); 1868 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1869 ptr = btrfs_device_fsid(dev_item); 1870 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1871 ptr, BTRFS_FSID_SIZE); 1872 btrfs_mark_buffer_dirty(leaf); 1873 1874 ret = 0; 1875 out: 1876 btrfs_free_path(path); 1877 return ret; 1878 } 1879 1880 /* 1881 * Function to update ctime/mtime for a given device path. 1882 * Mainly used for ctime/mtime based probe like libblkid. 1883 * 1884 * We don't care about errors here, this is just to be kind to userspace. 1885 */ 1886 static void update_dev_time(const char *device_path) 1887 { 1888 struct path path; 1889 struct timespec64 now; 1890 int ret; 1891 1892 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1893 if (ret) 1894 return; 1895 1896 now = current_time(d_inode(path.dentry)); 1897 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1898 path_put(&path); 1899 } 1900 1901 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1902 struct btrfs_device *device) 1903 { 1904 struct btrfs_root *root = device->fs_info->chunk_root; 1905 int ret; 1906 struct btrfs_path *path; 1907 struct btrfs_key key; 1908 1909 path = btrfs_alloc_path(); 1910 if (!path) 1911 return -ENOMEM; 1912 1913 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1914 key.type = BTRFS_DEV_ITEM_KEY; 1915 key.offset = device->devid; 1916 1917 btrfs_reserve_chunk_metadata(trans, false); 1918 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1919 btrfs_trans_release_chunk_metadata(trans); 1920 if (ret) { 1921 if (ret > 0) 1922 ret = -ENOENT; 1923 goto out; 1924 } 1925 1926 ret = btrfs_del_item(trans, root, path); 1927 out: 1928 btrfs_free_path(path); 1929 return ret; 1930 } 1931 1932 /* 1933 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1934 * filesystem. It's up to the caller to adjust that number regarding eg. device 1935 * replace. 1936 */ 1937 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1938 u64 num_devices) 1939 { 1940 u64 all_avail; 1941 unsigned seq; 1942 int i; 1943 1944 do { 1945 seq = read_seqbegin(&fs_info->profiles_lock); 1946 1947 all_avail = fs_info->avail_data_alloc_bits | 1948 fs_info->avail_system_alloc_bits | 1949 fs_info->avail_metadata_alloc_bits; 1950 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1951 1952 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1953 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1954 continue; 1955 1956 if (num_devices < btrfs_raid_array[i].devs_min) 1957 return btrfs_raid_array[i].mindev_error; 1958 } 1959 1960 return 0; 1961 } 1962 1963 static struct btrfs_device * btrfs_find_next_active_device( 1964 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1965 { 1966 struct btrfs_device *next_device; 1967 1968 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1969 if (next_device != device && 1970 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1971 && next_device->bdev) 1972 return next_device; 1973 } 1974 1975 return NULL; 1976 } 1977 1978 /* 1979 * Helper function to check if the given device is part of s_bdev / latest_dev 1980 * and replace it with the provided or the next active device, in the context 1981 * where this function called, there should be always be another device (or 1982 * this_dev) which is active. 1983 */ 1984 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1985 struct btrfs_device *next_device) 1986 { 1987 struct btrfs_fs_info *fs_info = device->fs_info; 1988 1989 if (!next_device) 1990 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1991 device); 1992 ASSERT(next_device); 1993 1994 if (fs_info->sb->s_bdev && 1995 (fs_info->sb->s_bdev == device->bdev)) 1996 fs_info->sb->s_bdev = next_device->bdev; 1997 1998 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 1999 fs_info->fs_devices->latest_dev = next_device; 2000 } 2001 2002 /* 2003 * Return btrfs_fs_devices::num_devices excluding the device that's being 2004 * currently replaced. 2005 */ 2006 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2007 { 2008 u64 num_devices = fs_info->fs_devices->num_devices; 2009 2010 down_read(&fs_info->dev_replace.rwsem); 2011 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2012 ASSERT(num_devices > 1); 2013 num_devices--; 2014 } 2015 up_read(&fs_info->dev_replace.rwsem); 2016 2017 return num_devices; 2018 } 2019 2020 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2021 struct block_device *bdev, 2022 const char *device_path) 2023 { 2024 struct btrfs_super_block *disk_super; 2025 int copy_num; 2026 2027 if (!bdev) 2028 return; 2029 2030 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2031 struct page *page; 2032 int ret; 2033 2034 disk_super = btrfs_read_dev_one_super(bdev, copy_num, false); 2035 if (IS_ERR(disk_super)) 2036 continue; 2037 2038 if (bdev_is_zoned(bdev)) { 2039 btrfs_reset_sb_log_zones(bdev, copy_num); 2040 continue; 2041 } 2042 2043 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2044 2045 page = virt_to_page(disk_super); 2046 set_page_dirty(page); 2047 lock_page(page); 2048 /* write_on_page() unlocks the page */ 2049 ret = write_one_page(page); 2050 if (ret) 2051 btrfs_warn(fs_info, 2052 "error clearing superblock number %d (%d)", 2053 copy_num, ret); 2054 btrfs_release_disk_super(disk_super); 2055 2056 } 2057 2058 /* Notify udev that device has changed */ 2059 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2060 2061 /* Update ctime/mtime for device path for libblkid */ 2062 update_dev_time(device_path); 2063 } 2064 2065 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2066 struct btrfs_dev_lookup_args *args, 2067 struct block_device **bdev, fmode_t *mode) 2068 { 2069 struct btrfs_trans_handle *trans; 2070 struct btrfs_device *device; 2071 struct btrfs_fs_devices *cur_devices; 2072 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2073 u64 num_devices; 2074 int ret = 0; 2075 2076 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2077 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2078 return -EINVAL; 2079 } 2080 2081 /* 2082 * The device list in fs_devices is accessed without locks (neither 2083 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2084 * filesystem and another device rm cannot run. 2085 */ 2086 num_devices = btrfs_num_devices(fs_info); 2087 2088 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2089 if (ret) 2090 return ret; 2091 2092 device = btrfs_find_device(fs_info->fs_devices, args); 2093 if (!device) { 2094 if (args->missing) 2095 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2096 else 2097 ret = -ENOENT; 2098 return ret; 2099 } 2100 2101 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2102 btrfs_warn_in_rcu(fs_info, 2103 "cannot remove device %s (devid %llu) due to active swapfile", 2104 rcu_str_deref(device->name), device->devid); 2105 return -ETXTBSY; 2106 } 2107 2108 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2109 return BTRFS_ERROR_DEV_TGT_REPLACE; 2110 2111 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2112 fs_info->fs_devices->rw_devices == 1) 2113 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2114 2115 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2116 mutex_lock(&fs_info->chunk_mutex); 2117 list_del_init(&device->dev_alloc_list); 2118 device->fs_devices->rw_devices--; 2119 mutex_unlock(&fs_info->chunk_mutex); 2120 } 2121 2122 ret = btrfs_shrink_device(device, 0); 2123 if (ret) 2124 goto error_undo; 2125 2126 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2127 if (IS_ERR(trans)) { 2128 ret = PTR_ERR(trans); 2129 goto error_undo; 2130 } 2131 2132 ret = btrfs_rm_dev_item(trans, device); 2133 if (ret) { 2134 /* Any error in dev item removal is critical */ 2135 btrfs_crit(fs_info, 2136 "failed to remove device item for devid %llu: %d", 2137 device->devid, ret); 2138 btrfs_abort_transaction(trans, ret); 2139 btrfs_end_transaction(trans); 2140 return ret; 2141 } 2142 2143 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2144 btrfs_scrub_cancel_dev(device); 2145 2146 /* 2147 * the device list mutex makes sure that we don't change 2148 * the device list while someone else is writing out all 2149 * the device supers. Whoever is writing all supers, should 2150 * lock the device list mutex before getting the number of 2151 * devices in the super block (super_copy). Conversely, 2152 * whoever updates the number of devices in the super block 2153 * (super_copy) should hold the device list mutex. 2154 */ 2155 2156 /* 2157 * In normal cases the cur_devices == fs_devices. But in case 2158 * of deleting a seed device, the cur_devices should point to 2159 * its own fs_devices listed under the fs_devices->seed_list. 2160 */ 2161 cur_devices = device->fs_devices; 2162 mutex_lock(&fs_devices->device_list_mutex); 2163 list_del_rcu(&device->dev_list); 2164 2165 cur_devices->num_devices--; 2166 cur_devices->total_devices--; 2167 /* Update total_devices of the parent fs_devices if it's seed */ 2168 if (cur_devices != fs_devices) 2169 fs_devices->total_devices--; 2170 2171 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2172 cur_devices->missing_devices--; 2173 2174 btrfs_assign_next_active_device(device, NULL); 2175 2176 if (device->bdev) { 2177 cur_devices->open_devices--; 2178 /* remove sysfs entry */ 2179 btrfs_sysfs_remove_device(device); 2180 } 2181 2182 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2183 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2184 mutex_unlock(&fs_devices->device_list_mutex); 2185 2186 /* 2187 * At this point, the device is zero sized and detached from the 2188 * devices list. All that's left is to zero out the old supers and 2189 * free the device. 2190 * 2191 * We cannot call btrfs_close_bdev() here because we're holding the sb 2192 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2193 * block device and it's dependencies. Instead just flush the device 2194 * and let the caller do the final blkdev_put. 2195 */ 2196 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2197 btrfs_scratch_superblocks(fs_info, device->bdev, 2198 device->name->str); 2199 if (device->bdev) { 2200 sync_blockdev(device->bdev); 2201 invalidate_bdev(device->bdev); 2202 } 2203 } 2204 2205 *bdev = device->bdev; 2206 *mode = device->mode; 2207 synchronize_rcu(); 2208 btrfs_free_device(device); 2209 2210 /* 2211 * This can happen if cur_devices is the private seed devices list. We 2212 * cannot call close_fs_devices() here because it expects the uuid_mutex 2213 * to be held, but in fact we don't need that for the private 2214 * seed_devices, we can simply decrement cur_devices->opened and then 2215 * remove it from our list and free the fs_devices. 2216 */ 2217 if (cur_devices->num_devices == 0) { 2218 list_del_init(&cur_devices->seed_list); 2219 ASSERT(cur_devices->opened == 1); 2220 cur_devices->opened--; 2221 free_fs_devices(cur_devices); 2222 } 2223 2224 ret = btrfs_commit_transaction(trans); 2225 2226 return ret; 2227 2228 error_undo: 2229 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2230 mutex_lock(&fs_info->chunk_mutex); 2231 list_add(&device->dev_alloc_list, 2232 &fs_devices->alloc_list); 2233 device->fs_devices->rw_devices++; 2234 mutex_unlock(&fs_info->chunk_mutex); 2235 } 2236 return ret; 2237 } 2238 2239 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2240 { 2241 struct btrfs_fs_devices *fs_devices; 2242 2243 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2244 2245 /* 2246 * in case of fs with no seed, srcdev->fs_devices will point 2247 * to fs_devices of fs_info. However when the dev being replaced is 2248 * a seed dev it will point to the seed's local fs_devices. In short 2249 * srcdev will have its correct fs_devices in both the cases. 2250 */ 2251 fs_devices = srcdev->fs_devices; 2252 2253 list_del_rcu(&srcdev->dev_list); 2254 list_del(&srcdev->dev_alloc_list); 2255 fs_devices->num_devices--; 2256 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2257 fs_devices->missing_devices--; 2258 2259 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2260 fs_devices->rw_devices--; 2261 2262 if (srcdev->bdev) 2263 fs_devices->open_devices--; 2264 } 2265 2266 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2267 { 2268 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2269 2270 mutex_lock(&uuid_mutex); 2271 2272 btrfs_close_bdev(srcdev); 2273 synchronize_rcu(); 2274 btrfs_free_device(srcdev); 2275 2276 /* if this is no devs we rather delete the fs_devices */ 2277 if (!fs_devices->num_devices) { 2278 /* 2279 * On a mounted FS, num_devices can't be zero unless it's a 2280 * seed. In case of a seed device being replaced, the replace 2281 * target added to the sprout FS, so there will be no more 2282 * device left under the seed FS. 2283 */ 2284 ASSERT(fs_devices->seeding); 2285 2286 list_del_init(&fs_devices->seed_list); 2287 close_fs_devices(fs_devices); 2288 free_fs_devices(fs_devices); 2289 } 2290 mutex_unlock(&uuid_mutex); 2291 } 2292 2293 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2294 { 2295 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2296 2297 mutex_lock(&fs_devices->device_list_mutex); 2298 2299 btrfs_sysfs_remove_device(tgtdev); 2300 2301 if (tgtdev->bdev) 2302 fs_devices->open_devices--; 2303 2304 fs_devices->num_devices--; 2305 2306 btrfs_assign_next_active_device(tgtdev, NULL); 2307 2308 list_del_rcu(&tgtdev->dev_list); 2309 2310 mutex_unlock(&fs_devices->device_list_mutex); 2311 2312 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2313 tgtdev->name->str); 2314 2315 btrfs_close_bdev(tgtdev); 2316 synchronize_rcu(); 2317 btrfs_free_device(tgtdev); 2318 } 2319 2320 /** 2321 * Populate args from device at path 2322 * 2323 * @fs_info: the filesystem 2324 * @args: the args to populate 2325 * @path: the path to the device 2326 * 2327 * This will read the super block of the device at @path and populate @args with 2328 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2329 * lookup a device to operate on, but need to do it before we take any locks. 2330 * This properly handles the special case of "missing" that a user may pass in, 2331 * and does some basic sanity checks. The caller must make sure that @path is 2332 * properly NUL terminated before calling in, and must call 2333 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2334 * uuid buffers. 2335 * 2336 * Return: 0 for success, -errno for failure 2337 */ 2338 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2339 struct btrfs_dev_lookup_args *args, 2340 const char *path) 2341 { 2342 struct btrfs_super_block *disk_super; 2343 struct block_device *bdev; 2344 int ret; 2345 2346 if (!path || !path[0]) 2347 return -EINVAL; 2348 if (!strcmp(path, "missing")) { 2349 args->missing = true; 2350 return 0; 2351 } 2352 2353 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2354 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2355 if (!args->uuid || !args->fsid) { 2356 btrfs_put_dev_args_from_path(args); 2357 return -ENOMEM; 2358 } 2359 2360 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2361 &bdev, &disk_super); 2362 if (ret) { 2363 btrfs_put_dev_args_from_path(args); 2364 return ret; 2365 } 2366 2367 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2368 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2369 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2370 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2371 else 2372 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2373 btrfs_release_disk_super(disk_super); 2374 blkdev_put(bdev, FMODE_READ); 2375 return 0; 2376 } 2377 2378 /* 2379 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2380 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2381 * that don't need to be freed. 2382 */ 2383 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2384 { 2385 kfree(args->uuid); 2386 kfree(args->fsid); 2387 args->uuid = NULL; 2388 args->fsid = NULL; 2389 } 2390 2391 struct btrfs_device *btrfs_find_device_by_devspec( 2392 struct btrfs_fs_info *fs_info, u64 devid, 2393 const char *device_path) 2394 { 2395 BTRFS_DEV_LOOKUP_ARGS(args); 2396 struct btrfs_device *device; 2397 int ret; 2398 2399 if (devid) { 2400 args.devid = devid; 2401 device = btrfs_find_device(fs_info->fs_devices, &args); 2402 if (!device) 2403 return ERR_PTR(-ENOENT); 2404 return device; 2405 } 2406 2407 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2408 if (ret) 2409 return ERR_PTR(ret); 2410 device = btrfs_find_device(fs_info->fs_devices, &args); 2411 btrfs_put_dev_args_from_path(&args); 2412 if (!device) 2413 return ERR_PTR(-ENOENT); 2414 return device; 2415 } 2416 2417 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2418 { 2419 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2420 struct btrfs_fs_devices *old_devices; 2421 struct btrfs_fs_devices *seed_devices; 2422 2423 lockdep_assert_held(&uuid_mutex); 2424 if (!fs_devices->seeding) 2425 return ERR_PTR(-EINVAL); 2426 2427 /* 2428 * Private copy of the seed devices, anchored at 2429 * fs_info->fs_devices->seed_list 2430 */ 2431 seed_devices = alloc_fs_devices(NULL, NULL); 2432 if (IS_ERR(seed_devices)) 2433 return seed_devices; 2434 2435 /* 2436 * It's necessary to retain a copy of the original seed fs_devices in 2437 * fs_uuids so that filesystems which have been seeded can successfully 2438 * reference the seed device from open_seed_devices. This also supports 2439 * multiple fs seed. 2440 */ 2441 old_devices = clone_fs_devices(fs_devices); 2442 if (IS_ERR(old_devices)) { 2443 kfree(seed_devices); 2444 return old_devices; 2445 } 2446 2447 list_add(&old_devices->fs_list, &fs_uuids); 2448 2449 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2450 seed_devices->opened = 1; 2451 INIT_LIST_HEAD(&seed_devices->devices); 2452 INIT_LIST_HEAD(&seed_devices->alloc_list); 2453 mutex_init(&seed_devices->device_list_mutex); 2454 2455 return seed_devices; 2456 } 2457 2458 /* 2459 * Splice seed devices into the sprout fs_devices. 2460 * Generate a new fsid for the sprouted read-write filesystem. 2461 */ 2462 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2463 struct btrfs_fs_devices *seed_devices) 2464 { 2465 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2466 struct btrfs_super_block *disk_super = fs_info->super_copy; 2467 struct btrfs_device *device; 2468 u64 super_flags; 2469 2470 /* 2471 * We are updating the fsid, the thread leading to device_list_add() 2472 * could race, so uuid_mutex is needed. 2473 */ 2474 lockdep_assert_held(&uuid_mutex); 2475 2476 /* 2477 * The threads listed below may traverse dev_list but can do that without 2478 * device_list_mutex: 2479 * - All device ops and balance - as we are in btrfs_exclop_start. 2480 * - Various dev_list readers - are using RCU. 2481 * - btrfs_ioctl_fitrim() - is using RCU. 2482 * 2483 * For-read threads as below are using device_list_mutex: 2484 * - Readonly scrub btrfs_scrub_dev() 2485 * - Readonly scrub btrfs_scrub_progress() 2486 * - btrfs_get_dev_stats() 2487 */ 2488 lockdep_assert_held(&fs_devices->device_list_mutex); 2489 2490 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2491 synchronize_rcu); 2492 list_for_each_entry(device, &seed_devices->devices, dev_list) 2493 device->fs_devices = seed_devices; 2494 2495 fs_devices->seeding = false; 2496 fs_devices->num_devices = 0; 2497 fs_devices->open_devices = 0; 2498 fs_devices->missing_devices = 0; 2499 fs_devices->rotating = false; 2500 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2501 2502 generate_random_uuid(fs_devices->fsid); 2503 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2504 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2505 2506 super_flags = btrfs_super_flags(disk_super) & 2507 ~BTRFS_SUPER_FLAG_SEEDING; 2508 btrfs_set_super_flags(disk_super, super_flags); 2509 } 2510 2511 /* 2512 * Store the expected generation for seed devices in device items. 2513 */ 2514 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2515 { 2516 BTRFS_DEV_LOOKUP_ARGS(args); 2517 struct btrfs_fs_info *fs_info = trans->fs_info; 2518 struct btrfs_root *root = fs_info->chunk_root; 2519 struct btrfs_path *path; 2520 struct extent_buffer *leaf; 2521 struct btrfs_dev_item *dev_item; 2522 struct btrfs_device *device; 2523 struct btrfs_key key; 2524 u8 fs_uuid[BTRFS_FSID_SIZE]; 2525 u8 dev_uuid[BTRFS_UUID_SIZE]; 2526 int ret; 2527 2528 path = btrfs_alloc_path(); 2529 if (!path) 2530 return -ENOMEM; 2531 2532 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2533 key.offset = 0; 2534 key.type = BTRFS_DEV_ITEM_KEY; 2535 2536 while (1) { 2537 btrfs_reserve_chunk_metadata(trans, false); 2538 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2539 btrfs_trans_release_chunk_metadata(trans); 2540 if (ret < 0) 2541 goto error; 2542 2543 leaf = path->nodes[0]; 2544 next_slot: 2545 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2546 ret = btrfs_next_leaf(root, path); 2547 if (ret > 0) 2548 break; 2549 if (ret < 0) 2550 goto error; 2551 leaf = path->nodes[0]; 2552 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2553 btrfs_release_path(path); 2554 continue; 2555 } 2556 2557 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2558 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2559 key.type != BTRFS_DEV_ITEM_KEY) 2560 break; 2561 2562 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2563 struct btrfs_dev_item); 2564 args.devid = btrfs_device_id(leaf, dev_item); 2565 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2566 BTRFS_UUID_SIZE); 2567 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2568 BTRFS_FSID_SIZE); 2569 args.uuid = dev_uuid; 2570 args.fsid = fs_uuid; 2571 device = btrfs_find_device(fs_info->fs_devices, &args); 2572 BUG_ON(!device); /* Logic error */ 2573 2574 if (device->fs_devices->seeding) { 2575 btrfs_set_device_generation(leaf, dev_item, 2576 device->generation); 2577 btrfs_mark_buffer_dirty(leaf); 2578 } 2579 2580 path->slots[0]++; 2581 goto next_slot; 2582 } 2583 ret = 0; 2584 error: 2585 btrfs_free_path(path); 2586 return ret; 2587 } 2588 2589 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2590 { 2591 struct btrfs_root *root = fs_info->dev_root; 2592 struct btrfs_trans_handle *trans; 2593 struct btrfs_device *device; 2594 struct block_device *bdev; 2595 struct super_block *sb = fs_info->sb; 2596 struct rcu_string *name; 2597 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2598 struct btrfs_fs_devices *seed_devices; 2599 u64 orig_super_total_bytes; 2600 u64 orig_super_num_devices; 2601 int ret = 0; 2602 bool seeding_dev = false; 2603 bool locked = false; 2604 2605 if (sb_rdonly(sb) && !fs_devices->seeding) 2606 return -EROFS; 2607 2608 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2609 fs_info->bdev_holder); 2610 if (IS_ERR(bdev)) 2611 return PTR_ERR(bdev); 2612 2613 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2614 ret = -EINVAL; 2615 goto error; 2616 } 2617 2618 if (fs_devices->seeding) { 2619 seeding_dev = true; 2620 down_write(&sb->s_umount); 2621 mutex_lock(&uuid_mutex); 2622 locked = true; 2623 } 2624 2625 sync_blockdev(bdev); 2626 2627 rcu_read_lock(); 2628 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2629 if (device->bdev == bdev) { 2630 ret = -EEXIST; 2631 rcu_read_unlock(); 2632 goto error; 2633 } 2634 } 2635 rcu_read_unlock(); 2636 2637 device = btrfs_alloc_device(fs_info, NULL, NULL); 2638 if (IS_ERR(device)) { 2639 /* we can safely leave the fs_devices entry around */ 2640 ret = PTR_ERR(device); 2641 goto error; 2642 } 2643 2644 name = rcu_string_strdup(device_path, GFP_KERNEL); 2645 if (!name) { 2646 ret = -ENOMEM; 2647 goto error_free_device; 2648 } 2649 rcu_assign_pointer(device->name, name); 2650 2651 device->fs_info = fs_info; 2652 device->bdev = bdev; 2653 ret = lookup_bdev(device_path, &device->devt); 2654 if (ret) 2655 goto error_free_device; 2656 2657 ret = btrfs_get_dev_zone_info(device, false); 2658 if (ret) 2659 goto error_free_device; 2660 2661 trans = btrfs_start_transaction(root, 0); 2662 if (IS_ERR(trans)) { 2663 ret = PTR_ERR(trans); 2664 goto error_free_zone; 2665 } 2666 2667 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2668 device->generation = trans->transid; 2669 device->io_width = fs_info->sectorsize; 2670 device->io_align = fs_info->sectorsize; 2671 device->sector_size = fs_info->sectorsize; 2672 device->total_bytes = 2673 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2674 device->disk_total_bytes = device->total_bytes; 2675 device->commit_total_bytes = device->total_bytes; 2676 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2677 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2678 device->mode = FMODE_EXCL; 2679 device->dev_stats_valid = 1; 2680 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2681 2682 if (seeding_dev) { 2683 btrfs_clear_sb_rdonly(sb); 2684 2685 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2686 seed_devices = btrfs_init_sprout(fs_info); 2687 if (IS_ERR(seed_devices)) { 2688 ret = PTR_ERR(seed_devices); 2689 btrfs_abort_transaction(trans, ret); 2690 goto error_trans; 2691 } 2692 } 2693 2694 mutex_lock(&fs_devices->device_list_mutex); 2695 if (seeding_dev) { 2696 btrfs_setup_sprout(fs_info, seed_devices); 2697 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2698 device); 2699 } 2700 2701 device->fs_devices = fs_devices; 2702 2703 mutex_lock(&fs_info->chunk_mutex); 2704 list_add_rcu(&device->dev_list, &fs_devices->devices); 2705 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2706 fs_devices->num_devices++; 2707 fs_devices->open_devices++; 2708 fs_devices->rw_devices++; 2709 fs_devices->total_devices++; 2710 fs_devices->total_rw_bytes += device->total_bytes; 2711 2712 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2713 2714 if (!bdev_nonrot(bdev)) 2715 fs_devices->rotating = true; 2716 2717 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2718 btrfs_set_super_total_bytes(fs_info->super_copy, 2719 round_down(orig_super_total_bytes + device->total_bytes, 2720 fs_info->sectorsize)); 2721 2722 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2723 btrfs_set_super_num_devices(fs_info->super_copy, 2724 orig_super_num_devices + 1); 2725 2726 /* 2727 * we've got more storage, clear any full flags on the space 2728 * infos 2729 */ 2730 btrfs_clear_space_info_full(fs_info); 2731 2732 mutex_unlock(&fs_info->chunk_mutex); 2733 2734 /* Add sysfs device entry */ 2735 btrfs_sysfs_add_device(device); 2736 2737 mutex_unlock(&fs_devices->device_list_mutex); 2738 2739 if (seeding_dev) { 2740 mutex_lock(&fs_info->chunk_mutex); 2741 ret = init_first_rw_device(trans); 2742 mutex_unlock(&fs_info->chunk_mutex); 2743 if (ret) { 2744 btrfs_abort_transaction(trans, ret); 2745 goto error_sysfs; 2746 } 2747 } 2748 2749 ret = btrfs_add_dev_item(trans, device); 2750 if (ret) { 2751 btrfs_abort_transaction(trans, ret); 2752 goto error_sysfs; 2753 } 2754 2755 if (seeding_dev) { 2756 ret = btrfs_finish_sprout(trans); 2757 if (ret) { 2758 btrfs_abort_transaction(trans, ret); 2759 goto error_sysfs; 2760 } 2761 2762 /* 2763 * fs_devices now represents the newly sprouted filesystem and 2764 * its fsid has been changed by btrfs_sprout_splice(). 2765 */ 2766 btrfs_sysfs_update_sprout_fsid(fs_devices); 2767 } 2768 2769 ret = btrfs_commit_transaction(trans); 2770 2771 if (seeding_dev) { 2772 mutex_unlock(&uuid_mutex); 2773 up_write(&sb->s_umount); 2774 locked = false; 2775 2776 if (ret) /* transaction commit */ 2777 return ret; 2778 2779 ret = btrfs_relocate_sys_chunks(fs_info); 2780 if (ret < 0) 2781 btrfs_handle_fs_error(fs_info, ret, 2782 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2783 trans = btrfs_attach_transaction(root); 2784 if (IS_ERR(trans)) { 2785 if (PTR_ERR(trans) == -ENOENT) 2786 return 0; 2787 ret = PTR_ERR(trans); 2788 trans = NULL; 2789 goto error_sysfs; 2790 } 2791 ret = btrfs_commit_transaction(trans); 2792 } 2793 2794 /* 2795 * Now that we have written a new super block to this device, check all 2796 * other fs_devices list if device_path alienates any other scanned 2797 * device. 2798 * We can ignore the return value as it typically returns -EINVAL and 2799 * only succeeds if the device was an alien. 2800 */ 2801 btrfs_forget_devices(device->devt); 2802 2803 /* Update ctime/mtime for blkid or udev */ 2804 update_dev_time(device_path); 2805 2806 return ret; 2807 2808 error_sysfs: 2809 btrfs_sysfs_remove_device(device); 2810 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2811 mutex_lock(&fs_info->chunk_mutex); 2812 list_del_rcu(&device->dev_list); 2813 list_del(&device->dev_alloc_list); 2814 fs_info->fs_devices->num_devices--; 2815 fs_info->fs_devices->open_devices--; 2816 fs_info->fs_devices->rw_devices--; 2817 fs_info->fs_devices->total_devices--; 2818 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2819 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2820 btrfs_set_super_total_bytes(fs_info->super_copy, 2821 orig_super_total_bytes); 2822 btrfs_set_super_num_devices(fs_info->super_copy, 2823 orig_super_num_devices); 2824 mutex_unlock(&fs_info->chunk_mutex); 2825 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2826 error_trans: 2827 if (seeding_dev) 2828 btrfs_set_sb_rdonly(sb); 2829 if (trans) 2830 btrfs_end_transaction(trans); 2831 error_free_zone: 2832 btrfs_destroy_dev_zone_info(device); 2833 error_free_device: 2834 btrfs_free_device(device); 2835 error: 2836 blkdev_put(bdev, FMODE_EXCL); 2837 if (locked) { 2838 mutex_unlock(&uuid_mutex); 2839 up_write(&sb->s_umount); 2840 } 2841 return ret; 2842 } 2843 2844 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2845 struct btrfs_device *device) 2846 { 2847 int ret; 2848 struct btrfs_path *path; 2849 struct btrfs_root *root = device->fs_info->chunk_root; 2850 struct btrfs_dev_item *dev_item; 2851 struct extent_buffer *leaf; 2852 struct btrfs_key key; 2853 2854 path = btrfs_alloc_path(); 2855 if (!path) 2856 return -ENOMEM; 2857 2858 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2859 key.type = BTRFS_DEV_ITEM_KEY; 2860 key.offset = device->devid; 2861 2862 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2863 if (ret < 0) 2864 goto out; 2865 2866 if (ret > 0) { 2867 ret = -ENOENT; 2868 goto out; 2869 } 2870 2871 leaf = path->nodes[0]; 2872 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2873 2874 btrfs_set_device_id(leaf, dev_item, device->devid); 2875 btrfs_set_device_type(leaf, dev_item, device->type); 2876 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2877 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2878 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2879 btrfs_set_device_total_bytes(leaf, dev_item, 2880 btrfs_device_get_disk_total_bytes(device)); 2881 btrfs_set_device_bytes_used(leaf, dev_item, 2882 btrfs_device_get_bytes_used(device)); 2883 btrfs_mark_buffer_dirty(leaf); 2884 2885 out: 2886 btrfs_free_path(path); 2887 return ret; 2888 } 2889 2890 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2891 struct btrfs_device *device, u64 new_size) 2892 { 2893 struct btrfs_fs_info *fs_info = device->fs_info; 2894 struct btrfs_super_block *super_copy = fs_info->super_copy; 2895 u64 old_total; 2896 u64 diff; 2897 int ret; 2898 2899 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2900 return -EACCES; 2901 2902 new_size = round_down(new_size, fs_info->sectorsize); 2903 2904 mutex_lock(&fs_info->chunk_mutex); 2905 old_total = btrfs_super_total_bytes(super_copy); 2906 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2907 2908 if (new_size <= device->total_bytes || 2909 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2910 mutex_unlock(&fs_info->chunk_mutex); 2911 return -EINVAL; 2912 } 2913 2914 btrfs_set_super_total_bytes(super_copy, 2915 round_down(old_total + diff, fs_info->sectorsize)); 2916 device->fs_devices->total_rw_bytes += diff; 2917 2918 btrfs_device_set_total_bytes(device, new_size); 2919 btrfs_device_set_disk_total_bytes(device, new_size); 2920 btrfs_clear_space_info_full(device->fs_info); 2921 if (list_empty(&device->post_commit_list)) 2922 list_add_tail(&device->post_commit_list, 2923 &trans->transaction->dev_update_list); 2924 mutex_unlock(&fs_info->chunk_mutex); 2925 2926 btrfs_reserve_chunk_metadata(trans, false); 2927 ret = btrfs_update_device(trans, device); 2928 btrfs_trans_release_chunk_metadata(trans); 2929 2930 return ret; 2931 } 2932 2933 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2934 { 2935 struct btrfs_fs_info *fs_info = trans->fs_info; 2936 struct btrfs_root *root = fs_info->chunk_root; 2937 int ret; 2938 struct btrfs_path *path; 2939 struct btrfs_key key; 2940 2941 path = btrfs_alloc_path(); 2942 if (!path) 2943 return -ENOMEM; 2944 2945 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2946 key.offset = chunk_offset; 2947 key.type = BTRFS_CHUNK_ITEM_KEY; 2948 2949 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2950 if (ret < 0) 2951 goto out; 2952 else if (ret > 0) { /* Logic error or corruption */ 2953 btrfs_handle_fs_error(fs_info, -ENOENT, 2954 "Failed lookup while freeing chunk."); 2955 ret = -ENOENT; 2956 goto out; 2957 } 2958 2959 ret = btrfs_del_item(trans, root, path); 2960 if (ret < 0) 2961 btrfs_handle_fs_error(fs_info, ret, 2962 "Failed to delete chunk item."); 2963 out: 2964 btrfs_free_path(path); 2965 return ret; 2966 } 2967 2968 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2969 { 2970 struct btrfs_super_block *super_copy = fs_info->super_copy; 2971 struct btrfs_disk_key *disk_key; 2972 struct btrfs_chunk *chunk; 2973 u8 *ptr; 2974 int ret = 0; 2975 u32 num_stripes; 2976 u32 array_size; 2977 u32 len = 0; 2978 u32 cur; 2979 struct btrfs_key key; 2980 2981 lockdep_assert_held(&fs_info->chunk_mutex); 2982 array_size = btrfs_super_sys_array_size(super_copy); 2983 2984 ptr = super_copy->sys_chunk_array; 2985 cur = 0; 2986 2987 while (cur < array_size) { 2988 disk_key = (struct btrfs_disk_key *)ptr; 2989 btrfs_disk_key_to_cpu(&key, disk_key); 2990 2991 len = sizeof(*disk_key); 2992 2993 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2994 chunk = (struct btrfs_chunk *)(ptr + len); 2995 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2996 len += btrfs_chunk_item_size(num_stripes); 2997 } else { 2998 ret = -EIO; 2999 break; 3000 } 3001 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3002 key.offset == chunk_offset) { 3003 memmove(ptr, ptr + len, array_size - (cur + len)); 3004 array_size -= len; 3005 btrfs_set_super_sys_array_size(super_copy, array_size); 3006 } else { 3007 ptr += len; 3008 cur += len; 3009 } 3010 } 3011 return ret; 3012 } 3013 3014 /* 3015 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3016 * @logical: Logical block offset in bytes. 3017 * @length: Length of extent in bytes. 3018 * 3019 * Return: Chunk mapping or ERR_PTR. 3020 */ 3021 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3022 u64 logical, u64 length) 3023 { 3024 struct extent_map_tree *em_tree; 3025 struct extent_map *em; 3026 3027 em_tree = &fs_info->mapping_tree; 3028 read_lock(&em_tree->lock); 3029 em = lookup_extent_mapping(em_tree, logical, length); 3030 read_unlock(&em_tree->lock); 3031 3032 if (!em) { 3033 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3034 logical, length); 3035 return ERR_PTR(-EINVAL); 3036 } 3037 3038 if (em->start > logical || em->start + em->len < logical) { 3039 btrfs_crit(fs_info, 3040 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3041 logical, length, em->start, em->start + em->len); 3042 free_extent_map(em); 3043 return ERR_PTR(-EINVAL); 3044 } 3045 3046 /* callers are responsible for dropping em's ref. */ 3047 return em; 3048 } 3049 3050 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3051 struct map_lookup *map, u64 chunk_offset) 3052 { 3053 int i; 3054 3055 /* 3056 * Removing chunk items and updating the device items in the chunks btree 3057 * requires holding the chunk_mutex. 3058 * See the comment at btrfs_chunk_alloc() for the details. 3059 */ 3060 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3061 3062 for (i = 0; i < map->num_stripes; i++) { 3063 int ret; 3064 3065 ret = btrfs_update_device(trans, map->stripes[i].dev); 3066 if (ret) 3067 return ret; 3068 } 3069 3070 return btrfs_free_chunk(trans, chunk_offset); 3071 } 3072 3073 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3074 { 3075 struct btrfs_fs_info *fs_info = trans->fs_info; 3076 struct extent_map *em; 3077 struct map_lookup *map; 3078 u64 dev_extent_len = 0; 3079 int i, ret = 0; 3080 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3081 3082 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3083 if (IS_ERR(em)) { 3084 /* 3085 * This is a logic error, but we don't want to just rely on the 3086 * user having built with ASSERT enabled, so if ASSERT doesn't 3087 * do anything we still error out. 3088 */ 3089 ASSERT(0); 3090 return PTR_ERR(em); 3091 } 3092 map = em->map_lookup; 3093 3094 /* 3095 * First delete the device extent items from the devices btree. 3096 * We take the device_list_mutex to avoid racing with the finishing phase 3097 * of a device replace operation. See the comment below before acquiring 3098 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3099 * because that can result in a deadlock when deleting the device extent 3100 * items from the devices btree - COWing an extent buffer from the btree 3101 * may result in allocating a new metadata chunk, which would attempt to 3102 * lock again fs_info->chunk_mutex. 3103 */ 3104 mutex_lock(&fs_devices->device_list_mutex); 3105 for (i = 0; i < map->num_stripes; i++) { 3106 struct btrfs_device *device = map->stripes[i].dev; 3107 ret = btrfs_free_dev_extent(trans, device, 3108 map->stripes[i].physical, 3109 &dev_extent_len); 3110 if (ret) { 3111 mutex_unlock(&fs_devices->device_list_mutex); 3112 btrfs_abort_transaction(trans, ret); 3113 goto out; 3114 } 3115 3116 if (device->bytes_used > 0) { 3117 mutex_lock(&fs_info->chunk_mutex); 3118 btrfs_device_set_bytes_used(device, 3119 device->bytes_used - dev_extent_len); 3120 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3121 btrfs_clear_space_info_full(fs_info); 3122 mutex_unlock(&fs_info->chunk_mutex); 3123 } 3124 } 3125 mutex_unlock(&fs_devices->device_list_mutex); 3126 3127 /* 3128 * We acquire fs_info->chunk_mutex for 2 reasons: 3129 * 3130 * 1) Just like with the first phase of the chunk allocation, we must 3131 * reserve system space, do all chunk btree updates and deletions, and 3132 * update the system chunk array in the superblock while holding this 3133 * mutex. This is for similar reasons as explained on the comment at 3134 * the top of btrfs_chunk_alloc(); 3135 * 3136 * 2) Prevent races with the final phase of a device replace operation 3137 * that replaces the device object associated with the map's stripes, 3138 * because the device object's id can change at any time during that 3139 * final phase of the device replace operation 3140 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3141 * replaced device and then see it with an ID of 3142 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3143 * the device item, which does not exists on the chunk btree. 3144 * The finishing phase of device replace acquires both the 3145 * device_list_mutex and the chunk_mutex, in that order, so we are 3146 * safe by just acquiring the chunk_mutex. 3147 */ 3148 trans->removing_chunk = true; 3149 mutex_lock(&fs_info->chunk_mutex); 3150 3151 check_system_chunk(trans, map->type); 3152 3153 ret = remove_chunk_item(trans, map, chunk_offset); 3154 /* 3155 * Normally we should not get -ENOSPC since we reserved space before 3156 * through the call to check_system_chunk(). 3157 * 3158 * Despite our system space_info having enough free space, we may not 3159 * be able to allocate extents from its block groups, because all have 3160 * an incompatible profile, which will force us to allocate a new system 3161 * block group with the right profile, or right after we called 3162 * check_system_space() above, a scrub turned the only system block group 3163 * with enough free space into RO mode. 3164 * This is explained with more detail at do_chunk_alloc(). 3165 * 3166 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3167 */ 3168 if (ret == -ENOSPC) { 3169 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3170 struct btrfs_block_group *sys_bg; 3171 3172 sys_bg = btrfs_create_chunk(trans, sys_flags); 3173 if (IS_ERR(sys_bg)) { 3174 ret = PTR_ERR(sys_bg); 3175 btrfs_abort_transaction(trans, ret); 3176 goto out; 3177 } 3178 3179 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3180 if (ret) { 3181 btrfs_abort_transaction(trans, ret); 3182 goto out; 3183 } 3184 3185 ret = remove_chunk_item(trans, map, chunk_offset); 3186 if (ret) { 3187 btrfs_abort_transaction(trans, ret); 3188 goto out; 3189 } 3190 } else if (ret) { 3191 btrfs_abort_transaction(trans, ret); 3192 goto out; 3193 } 3194 3195 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3196 3197 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3198 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3199 if (ret) { 3200 btrfs_abort_transaction(trans, ret); 3201 goto out; 3202 } 3203 } 3204 3205 mutex_unlock(&fs_info->chunk_mutex); 3206 trans->removing_chunk = false; 3207 3208 /* 3209 * We are done with chunk btree updates and deletions, so release the 3210 * system space we previously reserved (with check_system_chunk()). 3211 */ 3212 btrfs_trans_release_chunk_metadata(trans); 3213 3214 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3215 if (ret) { 3216 btrfs_abort_transaction(trans, ret); 3217 goto out; 3218 } 3219 3220 out: 3221 if (trans->removing_chunk) { 3222 mutex_unlock(&fs_info->chunk_mutex); 3223 trans->removing_chunk = false; 3224 } 3225 /* once for us */ 3226 free_extent_map(em); 3227 return ret; 3228 } 3229 3230 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3231 { 3232 struct btrfs_root *root = fs_info->chunk_root; 3233 struct btrfs_trans_handle *trans; 3234 struct btrfs_block_group *block_group; 3235 u64 length; 3236 int ret; 3237 3238 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3239 btrfs_err(fs_info, 3240 "relocate: not supported on extent tree v2 yet"); 3241 return -EINVAL; 3242 } 3243 3244 /* 3245 * Prevent races with automatic removal of unused block groups. 3246 * After we relocate and before we remove the chunk with offset 3247 * chunk_offset, automatic removal of the block group can kick in, 3248 * resulting in a failure when calling btrfs_remove_chunk() below. 3249 * 3250 * Make sure to acquire this mutex before doing a tree search (dev 3251 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3252 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3253 * we release the path used to search the chunk/dev tree and before 3254 * the current task acquires this mutex and calls us. 3255 */ 3256 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3257 3258 /* step one, relocate all the extents inside this chunk */ 3259 btrfs_scrub_pause(fs_info); 3260 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3261 btrfs_scrub_continue(fs_info); 3262 if (ret) 3263 return ret; 3264 3265 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3266 if (!block_group) 3267 return -ENOENT; 3268 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3269 length = block_group->length; 3270 btrfs_put_block_group(block_group); 3271 3272 /* 3273 * On a zoned file system, discard the whole block group, this will 3274 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3275 * resetting the zone fails, don't treat it as a fatal problem from the 3276 * filesystem's point of view. 3277 */ 3278 if (btrfs_is_zoned(fs_info)) { 3279 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3280 if (ret) 3281 btrfs_info(fs_info, 3282 "failed to reset zone %llu after relocation", 3283 chunk_offset); 3284 } 3285 3286 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3287 chunk_offset); 3288 if (IS_ERR(trans)) { 3289 ret = PTR_ERR(trans); 3290 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3291 return ret; 3292 } 3293 3294 /* 3295 * step two, delete the device extents and the 3296 * chunk tree entries 3297 */ 3298 ret = btrfs_remove_chunk(trans, chunk_offset); 3299 btrfs_end_transaction(trans); 3300 return ret; 3301 } 3302 3303 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3304 { 3305 struct btrfs_root *chunk_root = fs_info->chunk_root; 3306 struct btrfs_path *path; 3307 struct extent_buffer *leaf; 3308 struct btrfs_chunk *chunk; 3309 struct btrfs_key key; 3310 struct btrfs_key found_key; 3311 u64 chunk_type; 3312 bool retried = false; 3313 int failed = 0; 3314 int ret; 3315 3316 path = btrfs_alloc_path(); 3317 if (!path) 3318 return -ENOMEM; 3319 3320 again: 3321 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3322 key.offset = (u64)-1; 3323 key.type = BTRFS_CHUNK_ITEM_KEY; 3324 3325 while (1) { 3326 mutex_lock(&fs_info->reclaim_bgs_lock); 3327 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3328 if (ret < 0) { 3329 mutex_unlock(&fs_info->reclaim_bgs_lock); 3330 goto error; 3331 } 3332 BUG_ON(ret == 0); /* Corruption */ 3333 3334 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3335 key.type); 3336 if (ret) 3337 mutex_unlock(&fs_info->reclaim_bgs_lock); 3338 if (ret < 0) 3339 goto error; 3340 if (ret > 0) 3341 break; 3342 3343 leaf = path->nodes[0]; 3344 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3345 3346 chunk = btrfs_item_ptr(leaf, path->slots[0], 3347 struct btrfs_chunk); 3348 chunk_type = btrfs_chunk_type(leaf, chunk); 3349 btrfs_release_path(path); 3350 3351 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3352 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3353 if (ret == -ENOSPC) 3354 failed++; 3355 else 3356 BUG_ON(ret); 3357 } 3358 mutex_unlock(&fs_info->reclaim_bgs_lock); 3359 3360 if (found_key.offset == 0) 3361 break; 3362 key.offset = found_key.offset - 1; 3363 } 3364 ret = 0; 3365 if (failed && !retried) { 3366 failed = 0; 3367 retried = true; 3368 goto again; 3369 } else if (WARN_ON(failed && retried)) { 3370 ret = -ENOSPC; 3371 } 3372 error: 3373 btrfs_free_path(path); 3374 return ret; 3375 } 3376 3377 /* 3378 * return 1 : allocate a data chunk successfully, 3379 * return <0: errors during allocating a data chunk, 3380 * return 0 : no need to allocate a data chunk. 3381 */ 3382 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3383 u64 chunk_offset) 3384 { 3385 struct btrfs_block_group *cache; 3386 u64 bytes_used; 3387 u64 chunk_type; 3388 3389 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3390 ASSERT(cache); 3391 chunk_type = cache->flags; 3392 btrfs_put_block_group(cache); 3393 3394 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3395 return 0; 3396 3397 spin_lock(&fs_info->data_sinfo->lock); 3398 bytes_used = fs_info->data_sinfo->bytes_used; 3399 spin_unlock(&fs_info->data_sinfo->lock); 3400 3401 if (!bytes_used) { 3402 struct btrfs_trans_handle *trans; 3403 int ret; 3404 3405 trans = btrfs_join_transaction(fs_info->tree_root); 3406 if (IS_ERR(trans)) 3407 return PTR_ERR(trans); 3408 3409 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3410 btrfs_end_transaction(trans); 3411 if (ret < 0) 3412 return ret; 3413 return 1; 3414 } 3415 3416 return 0; 3417 } 3418 3419 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3420 struct btrfs_balance_control *bctl) 3421 { 3422 struct btrfs_root *root = fs_info->tree_root; 3423 struct btrfs_trans_handle *trans; 3424 struct btrfs_balance_item *item; 3425 struct btrfs_disk_balance_args disk_bargs; 3426 struct btrfs_path *path; 3427 struct extent_buffer *leaf; 3428 struct btrfs_key key; 3429 int ret, err; 3430 3431 path = btrfs_alloc_path(); 3432 if (!path) 3433 return -ENOMEM; 3434 3435 trans = btrfs_start_transaction(root, 0); 3436 if (IS_ERR(trans)) { 3437 btrfs_free_path(path); 3438 return PTR_ERR(trans); 3439 } 3440 3441 key.objectid = BTRFS_BALANCE_OBJECTID; 3442 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3443 key.offset = 0; 3444 3445 ret = btrfs_insert_empty_item(trans, root, path, &key, 3446 sizeof(*item)); 3447 if (ret) 3448 goto out; 3449 3450 leaf = path->nodes[0]; 3451 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3452 3453 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3454 3455 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3456 btrfs_set_balance_data(leaf, item, &disk_bargs); 3457 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3458 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3459 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3460 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3461 3462 btrfs_set_balance_flags(leaf, item, bctl->flags); 3463 3464 btrfs_mark_buffer_dirty(leaf); 3465 out: 3466 btrfs_free_path(path); 3467 err = btrfs_commit_transaction(trans); 3468 if (err && !ret) 3469 ret = err; 3470 return ret; 3471 } 3472 3473 static int del_balance_item(struct btrfs_fs_info *fs_info) 3474 { 3475 struct btrfs_root *root = fs_info->tree_root; 3476 struct btrfs_trans_handle *trans; 3477 struct btrfs_path *path; 3478 struct btrfs_key key; 3479 int ret, err; 3480 3481 path = btrfs_alloc_path(); 3482 if (!path) 3483 return -ENOMEM; 3484 3485 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3486 if (IS_ERR(trans)) { 3487 btrfs_free_path(path); 3488 return PTR_ERR(trans); 3489 } 3490 3491 key.objectid = BTRFS_BALANCE_OBJECTID; 3492 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3493 key.offset = 0; 3494 3495 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3496 if (ret < 0) 3497 goto out; 3498 if (ret > 0) { 3499 ret = -ENOENT; 3500 goto out; 3501 } 3502 3503 ret = btrfs_del_item(trans, root, path); 3504 out: 3505 btrfs_free_path(path); 3506 err = btrfs_commit_transaction(trans); 3507 if (err && !ret) 3508 ret = err; 3509 return ret; 3510 } 3511 3512 /* 3513 * This is a heuristic used to reduce the number of chunks balanced on 3514 * resume after balance was interrupted. 3515 */ 3516 static void update_balance_args(struct btrfs_balance_control *bctl) 3517 { 3518 /* 3519 * Turn on soft mode for chunk types that were being converted. 3520 */ 3521 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3522 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3523 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3524 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3525 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3526 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3527 3528 /* 3529 * Turn on usage filter if is not already used. The idea is 3530 * that chunks that we have already balanced should be 3531 * reasonably full. Don't do it for chunks that are being 3532 * converted - that will keep us from relocating unconverted 3533 * (albeit full) chunks. 3534 */ 3535 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3536 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3537 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3538 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3539 bctl->data.usage = 90; 3540 } 3541 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3542 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3543 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3544 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3545 bctl->sys.usage = 90; 3546 } 3547 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3548 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3549 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3550 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3551 bctl->meta.usage = 90; 3552 } 3553 } 3554 3555 /* 3556 * Clear the balance status in fs_info and delete the balance item from disk. 3557 */ 3558 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3559 { 3560 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3561 int ret; 3562 3563 BUG_ON(!fs_info->balance_ctl); 3564 3565 spin_lock(&fs_info->balance_lock); 3566 fs_info->balance_ctl = NULL; 3567 spin_unlock(&fs_info->balance_lock); 3568 3569 kfree(bctl); 3570 ret = del_balance_item(fs_info); 3571 if (ret) 3572 btrfs_handle_fs_error(fs_info, ret, NULL); 3573 } 3574 3575 /* 3576 * Balance filters. Return 1 if chunk should be filtered out 3577 * (should not be balanced). 3578 */ 3579 static int chunk_profiles_filter(u64 chunk_type, 3580 struct btrfs_balance_args *bargs) 3581 { 3582 chunk_type = chunk_to_extended(chunk_type) & 3583 BTRFS_EXTENDED_PROFILE_MASK; 3584 3585 if (bargs->profiles & chunk_type) 3586 return 0; 3587 3588 return 1; 3589 } 3590 3591 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3592 struct btrfs_balance_args *bargs) 3593 { 3594 struct btrfs_block_group *cache; 3595 u64 chunk_used; 3596 u64 user_thresh_min; 3597 u64 user_thresh_max; 3598 int ret = 1; 3599 3600 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3601 chunk_used = cache->used; 3602 3603 if (bargs->usage_min == 0) 3604 user_thresh_min = 0; 3605 else 3606 user_thresh_min = div_factor_fine(cache->length, 3607 bargs->usage_min); 3608 3609 if (bargs->usage_max == 0) 3610 user_thresh_max = 1; 3611 else if (bargs->usage_max > 100) 3612 user_thresh_max = cache->length; 3613 else 3614 user_thresh_max = div_factor_fine(cache->length, 3615 bargs->usage_max); 3616 3617 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3618 ret = 0; 3619 3620 btrfs_put_block_group(cache); 3621 return ret; 3622 } 3623 3624 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3625 u64 chunk_offset, struct btrfs_balance_args *bargs) 3626 { 3627 struct btrfs_block_group *cache; 3628 u64 chunk_used, user_thresh; 3629 int ret = 1; 3630 3631 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3632 chunk_used = cache->used; 3633 3634 if (bargs->usage_min == 0) 3635 user_thresh = 1; 3636 else if (bargs->usage > 100) 3637 user_thresh = cache->length; 3638 else 3639 user_thresh = div_factor_fine(cache->length, bargs->usage); 3640 3641 if (chunk_used < user_thresh) 3642 ret = 0; 3643 3644 btrfs_put_block_group(cache); 3645 return ret; 3646 } 3647 3648 static int chunk_devid_filter(struct extent_buffer *leaf, 3649 struct btrfs_chunk *chunk, 3650 struct btrfs_balance_args *bargs) 3651 { 3652 struct btrfs_stripe *stripe; 3653 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3654 int i; 3655 3656 for (i = 0; i < num_stripes; i++) { 3657 stripe = btrfs_stripe_nr(chunk, i); 3658 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3659 return 0; 3660 } 3661 3662 return 1; 3663 } 3664 3665 static u64 calc_data_stripes(u64 type, int num_stripes) 3666 { 3667 const int index = btrfs_bg_flags_to_raid_index(type); 3668 const int ncopies = btrfs_raid_array[index].ncopies; 3669 const int nparity = btrfs_raid_array[index].nparity; 3670 3671 return (num_stripes - nparity) / ncopies; 3672 } 3673 3674 /* [pstart, pend) */ 3675 static int chunk_drange_filter(struct extent_buffer *leaf, 3676 struct btrfs_chunk *chunk, 3677 struct btrfs_balance_args *bargs) 3678 { 3679 struct btrfs_stripe *stripe; 3680 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3681 u64 stripe_offset; 3682 u64 stripe_length; 3683 u64 type; 3684 int factor; 3685 int i; 3686 3687 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3688 return 0; 3689 3690 type = btrfs_chunk_type(leaf, chunk); 3691 factor = calc_data_stripes(type, num_stripes); 3692 3693 for (i = 0; i < num_stripes; i++) { 3694 stripe = btrfs_stripe_nr(chunk, i); 3695 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3696 continue; 3697 3698 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3699 stripe_length = btrfs_chunk_length(leaf, chunk); 3700 stripe_length = div_u64(stripe_length, factor); 3701 3702 if (stripe_offset < bargs->pend && 3703 stripe_offset + stripe_length > bargs->pstart) 3704 return 0; 3705 } 3706 3707 return 1; 3708 } 3709 3710 /* [vstart, vend) */ 3711 static int chunk_vrange_filter(struct extent_buffer *leaf, 3712 struct btrfs_chunk *chunk, 3713 u64 chunk_offset, 3714 struct btrfs_balance_args *bargs) 3715 { 3716 if (chunk_offset < bargs->vend && 3717 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3718 /* at least part of the chunk is inside this vrange */ 3719 return 0; 3720 3721 return 1; 3722 } 3723 3724 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3725 struct btrfs_chunk *chunk, 3726 struct btrfs_balance_args *bargs) 3727 { 3728 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3729 3730 if (bargs->stripes_min <= num_stripes 3731 && num_stripes <= bargs->stripes_max) 3732 return 0; 3733 3734 return 1; 3735 } 3736 3737 static int chunk_soft_convert_filter(u64 chunk_type, 3738 struct btrfs_balance_args *bargs) 3739 { 3740 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3741 return 0; 3742 3743 chunk_type = chunk_to_extended(chunk_type) & 3744 BTRFS_EXTENDED_PROFILE_MASK; 3745 3746 if (bargs->target == chunk_type) 3747 return 1; 3748 3749 return 0; 3750 } 3751 3752 static int should_balance_chunk(struct extent_buffer *leaf, 3753 struct btrfs_chunk *chunk, u64 chunk_offset) 3754 { 3755 struct btrfs_fs_info *fs_info = leaf->fs_info; 3756 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3757 struct btrfs_balance_args *bargs = NULL; 3758 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3759 3760 /* type filter */ 3761 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3762 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3763 return 0; 3764 } 3765 3766 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3767 bargs = &bctl->data; 3768 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3769 bargs = &bctl->sys; 3770 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3771 bargs = &bctl->meta; 3772 3773 /* profiles filter */ 3774 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3775 chunk_profiles_filter(chunk_type, bargs)) { 3776 return 0; 3777 } 3778 3779 /* usage filter */ 3780 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3781 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3782 return 0; 3783 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3784 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3785 return 0; 3786 } 3787 3788 /* devid filter */ 3789 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3790 chunk_devid_filter(leaf, chunk, bargs)) { 3791 return 0; 3792 } 3793 3794 /* drange filter, makes sense only with devid filter */ 3795 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3796 chunk_drange_filter(leaf, chunk, bargs)) { 3797 return 0; 3798 } 3799 3800 /* vrange filter */ 3801 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3802 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3803 return 0; 3804 } 3805 3806 /* stripes filter */ 3807 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3808 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3809 return 0; 3810 } 3811 3812 /* soft profile changing mode */ 3813 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3814 chunk_soft_convert_filter(chunk_type, bargs)) { 3815 return 0; 3816 } 3817 3818 /* 3819 * limited by count, must be the last filter 3820 */ 3821 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3822 if (bargs->limit == 0) 3823 return 0; 3824 else 3825 bargs->limit--; 3826 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3827 /* 3828 * Same logic as the 'limit' filter; the minimum cannot be 3829 * determined here because we do not have the global information 3830 * about the count of all chunks that satisfy the filters. 3831 */ 3832 if (bargs->limit_max == 0) 3833 return 0; 3834 else 3835 bargs->limit_max--; 3836 } 3837 3838 return 1; 3839 } 3840 3841 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3842 { 3843 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3844 struct btrfs_root *chunk_root = fs_info->chunk_root; 3845 u64 chunk_type; 3846 struct btrfs_chunk *chunk; 3847 struct btrfs_path *path = NULL; 3848 struct btrfs_key key; 3849 struct btrfs_key found_key; 3850 struct extent_buffer *leaf; 3851 int slot; 3852 int ret; 3853 int enospc_errors = 0; 3854 bool counting = true; 3855 /* The single value limit and min/max limits use the same bytes in the */ 3856 u64 limit_data = bctl->data.limit; 3857 u64 limit_meta = bctl->meta.limit; 3858 u64 limit_sys = bctl->sys.limit; 3859 u32 count_data = 0; 3860 u32 count_meta = 0; 3861 u32 count_sys = 0; 3862 int chunk_reserved = 0; 3863 3864 path = btrfs_alloc_path(); 3865 if (!path) { 3866 ret = -ENOMEM; 3867 goto error; 3868 } 3869 3870 /* zero out stat counters */ 3871 spin_lock(&fs_info->balance_lock); 3872 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3873 spin_unlock(&fs_info->balance_lock); 3874 again: 3875 if (!counting) { 3876 /* 3877 * The single value limit and min/max limits use the same bytes 3878 * in the 3879 */ 3880 bctl->data.limit = limit_data; 3881 bctl->meta.limit = limit_meta; 3882 bctl->sys.limit = limit_sys; 3883 } 3884 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3885 key.offset = (u64)-1; 3886 key.type = BTRFS_CHUNK_ITEM_KEY; 3887 3888 while (1) { 3889 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3890 atomic_read(&fs_info->balance_cancel_req)) { 3891 ret = -ECANCELED; 3892 goto error; 3893 } 3894 3895 mutex_lock(&fs_info->reclaim_bgs_lock); 3896 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3897 if (ret < 0) { 3898 mutex_unlock(&fs_info->reclaim_bgs_lock); 3899 goto error; 3900 } 3901 3902 /* 3903 * this shouldn't happen, it means the last relocate 3904 * failed 3905 */ 3906 if (ret == 0) 3907 BUG(); /* FIXME break ? */ 3908 3909 ret = btrfs_previous_item(chunk_root, path, 0, 3910 BTRFS_CHUNK_ITEM_KEY); 3911 if (ret) { 3912 mutex_unlock(&fs_info->reclaim_bgs_lock); 3913 ret = 0; 3914 break; 3915 } 3916 3917 leaf = path->nodes[0]; 3918 slot = path->slots[0]; 3919 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3920 3921 if (found_key.objectid != key.objectid) { 3922 mutex_unlock(&fs_info->reclaim_bgs_lock); 3923 break; 3924 } 3925 3926 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3927 chunk_type = btrfs_chunk_type(leaf, chunk); 3928 3929 if (!counting) { 3930 spin_lock(&fs_info->balance_lock); 3931 bctl->stat.considered++; 3932 spin_unlock(&fs_info->balance_lock); 3933 } 3934 3935 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3936 3937 btrfs_release_path(path); 3938 if (!ret) { 3939 mutex_unlock(&fs_info->reclaim_bgs_lock); 3940 goto loop; 3941 } 3942 3943 if (counting) { 3944 mutex_unlock(&fs_info->reclaim_bgs_lock); 3945 spin_lock(&fs_info->balance_lock); 3946 bctl->stat.expected++; 3947 spin_unlock(&fs_info->balance_lock); 3948 3949 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3950 count_data++; 3951 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3952 count_sys++; 3953 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3954 count_meta++; 3955 3956 goto loop; 3957 } 3958 3959 /* 3960 * Apply limit_min filter, no need to check if the LIMITS 3961 * filter is used, limit_min is 0 by default 3962 */ 3963 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3964 count_data < bctl->data.limit_min) 3965 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3966 count_meta < bctl->meta.limit_min) 3967 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3968 count_sys < bctl->sys.limit_min)) { 3969 mutex_unlock(&fs_info->reclaim_bgs_lock); 3970 goto loop; 3971 } 3972 3973 if (!chunk_reserved) { 3974 /* 3975 * We may be relocating the only data chunk we have, 3976 * which could potentially end up with losing data's 3977 * raid profile, so lets allocate an empty one in 3978 * advance. 3979 */ 3980 ret = btrfs_may_alloc_data_chunk(fs_info, 3981 found_key.offset); 3982 if (ret < 0) { 3983 mutex_unlock(&fs_info->reclaim_bgs_lock); 3984 goto error; 3985 } else if (ret == 1) { 3986 chunk_reserved = 1; 3987 } 3988 } 3989 3990 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3991 mutex_unlock(&fs_info->reclaim_bgs_lock); 3992 if (ret == -ENOSPC) { 3993 enospc_errors++; 3994 } else if (ret == -ETXTBSY) { 3995 btrfs_info(fs_info, 3996 "skipping relocation of block group %llu due to active swapfile", 3997 found_key.offset); 3998 ret = 0; 3999 } else if (ret) { 4000 goto error; 4001 } else { 4002 spin_lock(&fs_info->balance_lock); 4003 bctl->stat.completed++; 4004 spin_unlock(&fs_info->balance_lock); 4005 } 4006 loop: 4007 if (found_key.offset == 0) 4008 break; 4009 key.offset = found_key.offset - 1; 4010 } 4011 4012 if (counting) { 4013 btrfs_release_path(path); 4014 counting = false; 4015 goto again; 4016 } 4017 error: 4018 btrfs_free_path(path); 4019 if (enospc_errors) { 4020 btrfs_info(fs_info, "%d enospc errors during balance", 4021 enospc_errors); 4022 if (!ret) 4023 ret = -ENOSPC; 4024 } 4025 4026 return ret; 4027 } 4028 4029 /** 4030 * alloc_profile_is_valid - see if a given profile is valid and reduced 4031 * @flags: profile to validate 4032 * @extended: if true @flags is treated as an extended profile 4033 */ 4034 static int alloc_profile_is_valid(u64 flags, int extended) 4035 { 4036 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4037 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4038 4039 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4040 4041 /* 1) check that all other bits are zeroed */ 4042 if (flags & ~mask) 4043 return 0; 4044 4045 /* 2) see if profile is reduced */ 4046 if (flags == 0) 4047 return !extended; /* "0" is valid for usual profiles */ 4048 4049 return has_single_bit_set(flags); 4050 } 4051 4052 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4053 { 4054 /* cancel requested || normal exit path */ 4055 return atomic_read(&fs_info->balance_cancel_req) || 4056 (atomic_read(&fs_info->balance_pause_req) == 0 && 4057 atomic_read(&fs_info->balance_cancel_req) == 0); 4058 } 4059 4060 /* 4061 * Validate target profile against allowed profiles and return true if it's OK. 4062 * Otherwise print the error message and return false. 4063 */ 4064 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4065 const struct btrfs_balance_args *bargs, 4066 u64 allowed, const char *type) 4067 { 4068 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4069 return true; 4070 4071 /* Profile is valid and does not have bits outside of the allowed set */ 4072 if (alloc_profile_is_valid(bargs->target, 1) && 4073 (bargs->target & ~allowed) == 0) 4074 return true; 4075 4076 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4077 type, btrfs_bg_type_to_raid_name(bargs->target)); 4078 return false; 4079 } 4080 4081 /* 4082 * Fill @buf with textual description of balance filter flags @bargs, up to 4083 * @size_buf including the terminating null. The output may be trimmed if it 4084 * does not fit into the provided buffer. 4085 */ 4086 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4087 u32 size_buf) 4088 { 4089 int ret; 4090 u32 size_bp = size_buf; 4091 char *bp = buf; 4092 u64 flags = bargs->flags; 4093 char tmp_buf[128] = {'\0'}; 4094 4095 if (!flags) 4096 return; 4097 4098 #define CHECK_APPEND_NOARG(a) \ 4099 do { \ 4100 ret = snprintf(bp, size_bp, (a)); \ 4101 if (ret < 0 || ret >= size_bp) \ 4102 goto out_overflow; \ 4103 size_bp -= ret; \ 4104 bp += ret; \ 4105 } while (0) 4106 4107 #define CHECK_APPEND_1ARG(a, v1) \ 4108 do { \ 4109 ret = snprintf(bp, size_bp, (a), (v1)); \ 4110 if (ret < 0 || ret >= size_bp) \ 4111 goto out_overflow; \ 4112 size_bp -= ret; \ 4113 bp += ret; \ 4114 } while (0) 4115 4116 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4117 do { \ 4118 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4119 if (ret < 0 || ret >= size_bp) \ 4120 goto out_overflow; \ 4121 size_bp -= ret; \ 4122 bp += ret; \ 4123 } while (0) 4124 4125 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4126 CHECK_APPEND_1ARG("convert=%s,", 4127 btrfs_bg_type_to_raid_name(bargs->target)); 4128 4129 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4130 CHECK_APPEND_NOARG("soft,"); 4131 4132 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4133 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4134 sizeof(tmp_buf)); 4135 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4136 } 4137 4138 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4139 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4140 4141 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4142 CHECK_APPEND_2ARG("usage=%u..%u,", 4143 bargs->usage_min, bargs->usage_max); 4144 4145 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4146 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4147 4148 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4149 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4150 bargs->pstart, bargs->pend); 4151 4152 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4153 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4154 bargs->vstart, bargs->vend); 4155 4156 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4157 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4158 4159 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4160 CHECK_APPEND_2ARG("limit=%u..%u,", 4161 bargs->limit_min, bargs->limit_max); 4162 4163 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4164 CHECK_APPEND_2ARG("stripes=%u..%u,", 4165 bargs->stripes_min, bargs->stripes_max); 4166 4167 #undef CHECK_APPEND_2ARG 4168 #undef CHECK_APPEND_1ARG 4169 #undef CHECK_APPEND_NOARG 4170 4171 out_overflow: 4172 4173 if (size_bp < size_buf) 4174 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4175 else 4176 buf[0] = '\0'; 4177 } 4178 4179 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4180 { 4181 u32 size_buf = 1024; 4182 char tmp_buf[192] = {'\0'}; 4183 char *buf; 4184 char *bp; 4185 u32 size_bp = size_buf; 4186 int ret; 4187 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4188 4189 buf = kzalloc(size_buf, GFP_KERNEL); 4190 if (!buf) 4191 return; 4192 4193 bp = buf; 4194 4195 #define CHECK_APPEND_1ARG(a, v1) \ 4196 do { \ 4197 ret = snprintf(bp, size_bp, (a), (v1)); \ 4198 if (ret < 0 || ret >= size_bp) \ 4199 goto out_overflow; \ 4200 size_bp -= ret; \ 4201 bp += ret; \ 4202 } while (0) 4203 4204 if (bctl->flags & BTRFS_BALANCE_FORCE) 4205 CHECK_APPEND_1ARG("%s", "-f "); 4206 4207 if (bctl->flags & BTRFS_BALANCE_DATA) { 4208 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4209 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4210 } 4211 4212 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4213 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4214 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4215 } 4216 4217 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4218 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4219 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4220 } 4221 4222 #undef CHECK_APPEND_1ARG 4223 4224 out_overflow: 4225 4226 if (size_bp < size_buf) 4227 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4228 btrfs_info(fs_info, "balance: %s %s", 4229 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4230 "resume" : "start", buf); 4231 4232 kfree(buf); 4233 } 4234 4235 /* 4236 * Should be called with balance mutexe held 4237 */ 4238 int btrfs_balance(struct btrfs_fs_info *fs_info, 4239 struct btrfs_balance_control *bctl, 4240 struct btrfs_ioctl_balance_args *bargs) 4241 { 4242 u64 meta_target, data_target; 4243 u64 allowed; 4244 int mixed = 0; 4245 int ret; 4246 u64 num_devices; 4247 unsigned seq; 4248 bool reducing_redundancy; 4249 int i; 4250 4251 if (btrfs_fs_closing(fs_info) || 4252 atomic_read(&fs_info->balance_pause_req) || 4253 btrfs_should_cancel_balance(fs_info)) { 4254 ret = -EINVAL; 4255 goto out; 4256 } 4257 4258 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4259 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4260 mixed = 1; 4261 4262 /* 4263 * In case of mixed groups both data and meta should be picked, 4264 * and identical options should be given for both of them. 4265 */ 4266 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4267 if (mixed && (bctl->flags & allowed)) { 4268 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4269 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4270 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4271 btrfs_err(fs_info, 4272 "balance: mixed groups data and metadata options must be the same"); 4273 ret = -EINVAL; 4274 goto out; 4275 } 4276 } 4277 4278 /* 4279 * rw_devices will not change at the moment, device add/delete/replace 4280 * are exclusive 4281 */ 4282 num_devices = fs_info->fs_devices->rw_devices; 4283 4284 /* 4285 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4286 * special bit for it, to make it easier to distinguish. Thus we need 4287 * to set it manually, or balance would refuse the profile. 4288 */ 4289 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4290 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4291 if (num_devices >= btrfs_raid_array[i].devs_min) 4292 allowed |= btrfs_raid_array[i].bg_flag; 4293 4294 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4295 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4296 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4297 ret = -EINVAL; 4298 goto out; 4299 } 4300 4301 /* 4302 * Allow to reduce metadata or system integrity only if force set for 4303 * profiles with redundancy (copies, parity) 4304 */ 4305 allowed = 0; 4306 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4307 if (btrfs_raid_array[i].ncopies >= 2 || 4308 btrfs_raid_array[i].tolerated_failures >= 1) 4309 allowed |= btrfs_raid_array[i].bg_flag; 4310 } 4311 do { 4312 seq = read_seqbegin(&fs_info->profiles_lock); 4313 4314 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4315 (fs_info->avail_system_alloc_bits & allowed) && 4316 !(bctl->sys.target & allowed)) || 4317 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4318 (fs_info->avail_metadata_alloc_bits & allowed) && 4319 !(bctl->meta.target & allowed))) 4320 reducing_redundancy = true; 4321 else 4322 reducing_redundancy = false; 4323 4324 /* if we're not converting, the target field is uninitialized */ 4325 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4326 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4327 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4328 bctl->data.target : fs_info->avail_data_alloc_bits; 4329 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4330 4331 if (reducing_redundancy) { 4332 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4333 btrfs_info(fs_info, 4334 "balance: force reducing metadata redundancy"); 4335 } else { 4336 btrfs_err(fs_info, 4337 "balance: reduces metadata redundancy, use --force if you want this"); 4338 ret = -EINVAL; 4339 goto out; 4340 } 4341 } 4342 4343 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4344 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4345 btrfs_warn(fs_info, 4346 "balance: metadata profile %s has lower redundancy than data profile %s", 4347 btrfs_bg_type_to_raid_name(meta_target), 4348 btrfs_bg_type_to_raid_name(data_target)); 4349 } 4350 4351 ret = insert_balance_item(fs_info, bctl); 4352 if (ret && ret != -EEXIST) 4353 goto out; 4354 4355 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4356 BUG_ON(ret == -EEXIST); 4357 BUG_ON(fs_info->balance_ctl); 4358 spin_lock(&fs_info->balance_lock); 4359 fs_info->balance_ctl = bctl; 4360 spin_unlock(&fs_info->balance_lock); 4361 } else { 4362 BUG_ON(ret != -EEXIST); 4363 spin_lock(&fs_info->balance_lock); 4364 update_balance_args(bctl); 4365 spin_unlock(&fs_info->balance_lock); 4366 } 4367 4368 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4369 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4370 describe_balance_start_or_resume(fs_info); 4371 mutex_unlock(&fs_info->balance_mutex); 4372 4373 ret = __btrfs_balance(fs_info); 4374 4375 mutex_lock(&fs_info->balance_mutex); 4376 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4377 btrfs_info(fs_info, "balance: paused"); 4378 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4379 } 4380 /* 4381 * Balance can be canceled by: 4382 * 4383 * - Regular cancel request 4384 * Then ret == -ECANCELED and balance_cancel_req > 0 4385 * 4386 * - Fatal signal to "btrfs" process 4387 * Either the signal caught by wait_reserve_ticket() and callers 4388 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4389 * got -ECANCELED. 4390 * Either way, in this case balance_cancel_req = 0, and 4391 * ret == -EINTR or ret == -ECANCELED. 4392 * 4393 * So here we only check the return value to catch canceled balance. 4394 */ 4395 else if (ret == -ECANCELED || ret == -EINTR) 4396 btrfs_info(fs_info, "balance: canceled"); 4397 else 4398 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4399 4400 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4401 4402 if (bargs) { 4403 memset(bargs, 0, sizeof(*bargs)); 4404 btrfs_update_ioctl_balance_args(fs_info, bargs); 4405 } 4406 4407 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4408 balance_need_close(fs_info)) { 4409 reset_balance_state(fs_info); 4410 btrfs_exclop_finish(fs_info); 4411 } 4412 4413 wake_up(&fs_info->balance_wait_q); 4414 4415 return ret; 4416 out: 4417 if (bctl->flags & BTRFS_BALANCE_RESUME) 4418 reset_balance_state(fs_info); 4419 else 4420 kfree(bctl); 4421 btrfs_exclop_finish(fs_info); 4422 4423 return ret; 4424 } 4425 4426 static int balance_kthread(void *data) 4427 { 4428 struct btrfs_fs_info *fs_info = data; 4429 int ret = 0; 4430 4431 sb_start_write(fs_info->sb); 4432 mutex_lock(&fs_info->balance_mutex); 4433 if (fs_info->balance_ctl) 4434 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4435 mutex_unlock(&fs_info->balance_mutex); 4436 sb_end_write(fs_info->sb); 4437 4438 return ret; 4439 } 4440 4441 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4442 { 4443 struct task_struct *tsk; 4444 4445 mutex_lock(&fs_info->balance_mutex); 4446 if (!fs_info->balance_ctl) { 4447 mutex_unlock(&fs_info->balance_mutex); 4448 return 0; 4449 } 4450 mutex_unlock(&fs_info->balance_mutex); 4451 4452 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4453 btrfs_info(fs_info, "balance: resume skipped"); 4454 return 0; 4455 } 4456 4457 spin_lock(&fs_info->super_lock); 4458 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4459 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4460 spin_unlock(&fs_info->super_lock); 4461 /* 4462 * A ro->rw remount sequence should continue with the paused balance 4463 * regardless of who pauses it, system or the user as of now, so set 4464 * the resume flag. 4465 */ 4466 spin_lock(&fs_info->balance_lock); 4467 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4468 spin_unlock(&fs_info->balance_lock); 4469 4470 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4471 return PTR_ERR_OR_ZERO(tsk); 4472 } 4473 4474 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4475 { 4476 struct btrfs_balance_control *bctl; 4477 struct btrfs_balance_item *item; 4478 struct btrfs_disk_balance_args disk_bargs; 4479 struct btrfs_path *path; 4480 struct extent_buffer *leaf; 4481 struct btrfs_key key; 4482 int ret; 4483 4484 path = btrfs_alloc_path(); 4485 if (!path) 4486 return -ENOMEM; 4487 4488 key.objectid = BTRFS_BALANCE_OBJECTID; 4489 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4490 key.offset = 0; 4491 4492 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4493 if (ret < 0) 4494 goto out; 4495 if (ret > 0) { /* ret = -ENOENT; */ 4496 ret = 0; 4497 goto out; 4498 } 4499 4500 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4501 if (!bctl) { 4502 ret = -ENOMEM; 4503 goto out; 4504 } 4505 4506 leaf = path->nodes[0]; 4507 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4508 4509 bctl->flags = btrfs_balance_flags(leaf, item); 4510 bctl->flags |= BTRFS_BALANCE_RESUME; 4511 4512 btrfs_balance_data(leaf, item, &disk_bargs); 4513 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4514 btrfs_balance_meta(leaf, item, &disk_bargs); 4515 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4516 btrfs_balance_sys(leaf, item, &disk_bargs); 4517 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4518 4519 /* 4520 * This should never happen, as the paused balance state is recovered 4521 * during mount without any chance of other exclusive ops to collide. 4522 * 4523 * This gives the exclusive op status to balance and keeps in paused 4524 * state until user intervention (cancel or umount). If the ownership 4525 * cannot be assigned, show a message but do not fail. The balance 4526 * is in a paused state and must have fs_info::balance_ctl properly 4527 * set up. 4528 */ 4529 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4530 btrfs_warn(fs_info, 4531 "balance: cannot set exclusive op status, resume manually"); 4532 4533 btrfs_release_path(path); 4534 4535 mutex_lock(&fs_info->balance_mutex); 4536 BUG_ON(fs_info->balance_ctl); 4537 spin_lock(&fs_info->balance_lock); 4538 fs_info->balance_ctl = bctl; 4539 spin_unlock(&fs_info->balance_lock); 4540 mutex_unlock(&fs_info->balance_mutex); 4541 out: 4542 btrfs_free_path(path); 4543 return ret; 4544 } 4545 4546 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4547 { 4548 int ret = 0; 4549 4550 mutex_lock(&fs_info->balance_mutex); 4551 if (!fs_info->balance_ctl) { 4552 mutex_unlock(&fs_info->balance_mutex); 4553 return -ENOTCONN; 4554 } 4555 4556 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4557 atomic_inc(&fs_info->balance_pause_req); 4558 mutex_unlock(&fs_info->balance_mutex); 4559 4560 wait_event(fs_info->balance_wait_q, 4561 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4562 4563 mutex_lock(&fs_info->balance_mutex); 4564 /* we are good with balance_ctl ripped off from under us */ 4565 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4566 atomic_dec(&fs_info->balance_pause_req); 4567 } else { 4568 ret = -ENOTCONN; 4569 } 4570 4571 mutex_unlock(&fs_info->balance_mutex); 4572 return ret; 4573 } 4574 4575 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4576 { 4577 mutex_lock(&fs_info->balance_mutex); 4578 if (!fs_info->balance_ctl) { 4579 mutex_unlock(&fs_info->balance_mutex); 4580 return -ENOTCONN; 4581 } 4582 4583 /* 4584 * A paused balance with the item stored on disk can be resumed at 4585 * mount time if the mount is read-write. Otherwise it's still paused 4586 * and we must not allow cancelling as it deletes the item. 4587 */ 4588 if (sb_rdonly(fs_info->sb)) { 4589 mutex_unlock(&fs_info->balance_mutex); 4590 return -EROFS; 4591 } 4592 4593 atomic_inc(&fs_info->balance_cancel_req); 4594 /* 4595 * if we are running just wait and return, balance item is 4596 * deleted in btrfs_balance in this case 4597 */ 4598 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4599 mutex_unlock(&fs_info->balance_mutex); 4600 wait_event(fs_info->balance_wait_q, 4601 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4602 mutex_lock(&fs_info->balance_mutex); 4603 } else { 4604 mutex_unlock(&fs_info->balance_mutex); 4605 /* 4606 * Lock released to allow other waiters to continue, we'll 4607 * reexamine the status again. 4608 */ 4609 mutex_lock(&fs_info->balance_mutex); 4610 4611 if (fs_info->balance_ctl) { 4612 reset_balance_state(fs_info); 4613 btrfs_exclop_finish(fs_info); 4614 btrfs_info(fs_info, "balance: canceled"); 4615 } 4616 } 4617 4618 BUG_ON(fs_info->balance_ctl || 4619 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4620 atomic_dec(&fs_info->balance_cancel_req); 4621 mutex_unlock(&fs_info->balance_mutex); 4622 return 0; 4623 } 4624 4625 int btrfs_uuid_scan_kthread(void *data) 4626 { 4627 struct btrfs_fs_info *fs_info = data; 4628 struct btrfs_root *root = fs_info->tree_root; 4629 struct btrfs_key key; 4630 struct btrfs_path *path = NULL; 4631 int ret = 0; 4632 struct extent_buffer *eb; 4633 int slot; 4634 struct btrfs_root_item root_item; 4635 u32 item_size; 4636 struct btrfs_trans_handle *trans = NULL; 4637 bool closing = false; 4638 4639 path = btrfs_alloc_path(); 4640 if (!path) { 4641 ret = -ENOMEM; 4642 goto out; 4643 } 4644 4645 key.objectid = 0; 4646 key.type = BTRFS_ROOT_ITEM_KEY; 4647 key.offset = 0; 4648 4649 while (1) { 4650 if (btrfs_fs_closing(fs_info)) { 4651 closing = true; 4652 break; 4653 } 4654 ret = btrfs_search_forward(root, &key, path, 4655 BTRFS_OLDEST_GENERATION); 4656 if (ret) { 4657 if (ret > 0) 4658 ret = 0; 4659 break; 4660 } 4661 4662 if (key.type != BTRFS_ROOT_ITEM_KEY || 4663 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4664 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4665 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4666 goto skip; 4667 4668 eb = path->nodes[0]; 4669 slot = path->slots[0]; 4670 item_size = btrfs_item_size(eb, slot); 4671 if (item_size < sizeof(root_item)) 4672 goto skip; 4673 4674 read_extent_buffer(eb, &root_item, 4675 btrfs_item_ptr_offset(eb, slot), 4676 (int)sizeof(root_item)); 4677 if (btrfs_root_refs(&root_item) == 0) 4678 goto skip; 4679 4680 if (!btrfs_is_empty_uuid(root_item.uuid) || 4681 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4682 if (trans) 4683 goto update_tree; 4684 4685 btrfs_release_path(path); 4686 /* 4687 * 1 - subvol uuid item 4688 * 1 - received_subvol uuid item 4689 */ 4690 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4691 if (IS_ERR(trans)) { 4692 ret = PTR_ERR(trans); 4693 break; 4694 } 4695 continue; 4696 } else { 4697 goto skip; 4698 } 4699 update_tree: 4700 btrfs_release_path(path); 4701 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4702 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4703 BTRFS_UUID_KEY_SUBVOL, 4704 key.objectid); 4705 if (ret < 0) { 4706 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4707 ret); 4708 break; 4709 } 4710 } 4711 4712 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4713 ret = btrfs_uuid_tree_add(trans, 4714 root_item.received_uuid, 4715 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4716 key.objectid); 4717 if (ret < 0) { 4718 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4719 ret); 4720 break; 4721 } 4722 } 4723 4724 skip: 4725 btrfs_release_path(path); 4726 if (trans) { 4727 ret = btrfs_end_transaction(trans); 4728 trans = NULL; 4729 if (ret) 4730 break; 4731 } 4732 4733 if (key.offset < (u64)-1) { 4734 key.offset++; 4735 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4736 key.offset = 0; 4737 key.type = BTRFS_ROOT_ITEM_KEY; 4738 } else if (key.objectid < (u64)-1) { 4739 key.offset = 0; 4740 key.type = BTRFS_ROOT_ITEM_KEY; 4741 key.objectid++; 4742 } else { 4743 break; 4744 } 4745 cond_resched(); 4746 } 4747 4748 out: 4749 btrfs_free_path(path); 4750 if (trans && !IS_ERR(trans)) 4751 btrfs_end_transaction(trans); 4752 if (ret) 4753 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4754 else if (!closing) 4755 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4756 up(&fs_info->uuid_tree_rescan_sem); 4757 return 0; 4758 } 4759 4760 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4761 { 4762 struct btrfs_trans_handle *trans; 4763 struct btrfs_root *tree_root = fs_info->tree_root; 4764 struct btrfs_root *uuid_root; 4765 struct task_struct *task; 4766 int ret; 4767 4768 /* 4769 * 1 - root node 4770 * 1 - root item 4771 */ 4772 trans = btrfs_start_transaction(tree_root, 2); 4773 if (IS_ERR(trans)) 4774 return PTR_ERR(trans); 4775 4776 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4777 if (IS_ERR(uuid_root)) { 4778 ret = PTR_ERR(uuid_root); 4779 btrfs_abort_transaction(trans, ret); 4780 btrfs_end_transaction(trans); 4781 return ret; 4782 } 4783 4784 fs_info->uuid_root = uuid_root; 4785 4786 ret = btrfs_commit_transaction(trans); 4787 if (ret) 4788 return ret; 4789 4790 down(&fs_info->uuid_tree_rescan_sem); 4791 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4792 if (IS_ERR(task)) { 4793 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4794 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4795 up(&fs_info->uuid_tree_rescan_sem); 4796 return PTR_ERR(task); 4797 } 4798 4799 return 0; 4800 } 4801 4802 /* 4803 * shrinking a device means finding all of the device extents past 4804 * the new size, and then following the back refs to the chunks. 4805 * The chunk relocation code actually frees the device extent 4806 */ 4807 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4808 { 4809 struct btrfs_fs_info *fs_info = device->fs_info; 4810 struct btrfs_root *root = fs_info->dev_root; 4811 struct btrfs_trans_handle *trans; 4812 struct btrfs_dev_extent *dev_extent = NULL; 4813 struct btrfs_path *path; 4814 u64 length; 4815 u64 chunk_offset; 4816 int ret; 4817 int slot; 4818 int failed = 0; 4819 bool retried = false; 4820 struct extent_buffer *l; 4821 struct btrfs_key key; 4822 struct btrfs_super_block *super_copy = fs_info->super_copy; 4823 u64 old_total = btrfs_super_total_bytes(super_copy); 4824 u64 old_size = btrfs_device_get_total_bytes(device); 4825 u64 diff; 4826 u64 start; 4827 4828 new_size = round_down(new_size, fs_info->sectorsize); 4829 start = new_size; 4830 diff = round_down(old_size - new_size, fs_info->sectorsize); 4831 4832 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4833 return -EINVAL; 4834 4835 path = btrfs_alloc_path(); 4836 if (!path) 4837 return -ENOMEM; 4838 4839 path->reada = READA_BACK; 4840 4841 trans = btrfs_start_transaction(root, 0); 4842 if (IS_ERR(trans)) { 4843 btrfs_free_path(path); 4844 return PTR_ERR(trans); 4845 } 4846 4847 mutex_lock(&fs_info->chunk_mutex); 4848 4849 btrfs_device_set_total_bytes(device, new_size); 4850 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4851 device->fs_devices->total_rw_bytes -= diff; 4852 atomic64_sub(diff, &fs_info->free_chunk_space); 4853 } 4854 4855 /* 4856 * Once the device's size has been set to the new size, ensure all 4857 * in-memory chunks are synced to disk so that the loop below sees them 4858 * and relocates them accordingly. 4859 */ 4860 if (contains_pending_extent(device, &start, diff)) { 4861 mutex_unlock(&fs_info->chunk_mutex); 4862 ret = btrfs_commit_transaction(trans); 4863 if (ret) 4864 goto done; 4865 } else { 4866 mutex_unlock(&fs_info->chunk_mutex); 4867 btrfs_end_transaction(trans); 4868 } 4869 4870 again: 4871 key.objectid = device->devid; 4872 key.offset = (u64)-1; 4873 key.type = BTRFS_DEV_EXTENT_KEY; 4874 4875 do { 4876 mutex_lock(&fs_info->reclaim_bgs_lock); 4877 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4878 if (ret < 0) { 4879 mutex_unlock(&fs_info->reclaim_bgs_lock); 4880 goto done; 4881 } 4882 4883 ret = btrfs_previous_item(root, path, 0, key.type); 4884 if (ret) { 4885 mutex_unlock(&fs_info->reclaim_bgs_lock); 4886 if (ret < 0) 4887 goto done; 4888 ret = 0; 4889 btrfs_release_path(path); 4890 break; 4891 } 4892 4893 l = path->nodes[0]; 4894 slot = path->slots[0]; 4895 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4896 4897 if (key.objectid != device->devid) { 4898 mutex_unlock(&fs_info->reclaim_bgs_lock); 4899 btrfs_release_path(path); 4900 break; 4901 } 4902 4903 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4904 length = btrfs_dev_extent_length(l, dev_extent); 4905 4906 if (key.offset + length <= new_size) { 4907 mutex_unlock(&fs_info->reclaim_bgs_lock); 4908 btrfs_release_path(path); 4909 break; 4910 } 4911 4912 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4913 btrfs_release_path(path); 4914 4915 /* 4916 * We may be relocating the only data chunk we have, 4917 * which could potentially end up with losing data's 4918 * raid profile, so lets allocate an empty one in 4919 * advance. 4920 */ 4921 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4922 if (ret < 0) { 4923 mutex_unlock(&fs_info->reclaim_bgs_lock); 4924 goto done; 4925 } 4926 4927 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4928 mutex_unlock(&fs_info->reclaim_bgs_lock); 4929 if (ret == -ENOSPC) { 4930 failed++; 4931 } else if (ret) { 4932 if (ret == -ETXTBSY) { 4933 btrfs_warn(fs_info, 4934 "could not shrink block group %llu due to active swapfile", 4935 chunk_offset); 4936 } 4937 goto done; 4938 } 4939 } while (key.offset-- > 0); 4940 4941 if (failed && !retried) { 4942 failed = 0; 4943 retried = true; 4944 goto again; 4945 } else if (failed && retried) { 4946 ret = -ENOSPC; 4947 goto done; 4948 } 4949 4950 /* Shrinking succeeded, else we would be at "done". */ 4951 trans = btrfs_start_transaction(root, 0); 4952 if (IS_ERR(trans)) { 4953 ret = PTR_ERR(trans); 4954 goto done; 4955 } 4956 4957 mutex_lock(&fs_info->chunk_mutex); 4958 /* Clear all state bits beyond the shrunk device size */ 4959 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4960 CHUNK_STATE_MASK); 4961 4962 btrfs_device_set_disk_total_bytes(device, new_size); 4963 if (list_empty(&device->post_commit_list)) 4964 list_add_tail(&device->post_commit_list, 4965 &trans->transaction->dev_update_list); 4966 4967 WARN_ON(diff > old_total); 4968 btrfs_set_super_total_bytes(super_copy, 4969 round_down(old_total - diff, fs_info->sectorsize)); 4970 mutex_unlock(&fs_info->chunk_mutex); 4971 4972 btrfs_reserve_chunk_metadata(trans, false); 4973 /* Now btrfs_update_device() will change the on-disk size. */ 4974 ret = btrfs_update_device(trans, device); 4975 btrfs_trans_release_chunk_metadata(trans); 4976 if (ret < 0) { 4977 btrfs_abort_transaction(trans, ret); 4978 btrfs_end_transaction(trans); 4979 } else { 4980 ret = btrfs_commit_transaction(trans); 4981 } 4982 done: 4983 btrfs_free_path(path); 4984 if (ret) { 4985 mutex_lock(&fs_info->chunk_mutex); 4986 btrfs_device_set_total_bytes(device, old_size); 4987 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4988 device->fs_devices->total_rw_bytes += diff; 4989 atomic64_add(diff, &fs_info->free_chunk_space); 4990 mutex_unlock(&fs_info->chunk_mutex); 4991 } 4992 return ret; 4993 } 4994 4995 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4996 struct btrfs_key *key, 4997 struct btrfs_chunk *chunk, int item_size) 4998 { 4999 struct btrfs_super_block *super_copy = fs_info->super_copy; 5000 struct btrfs_disk_key disk_key; 5001 u32 array_size; 5002 u8 *ptr; 5003 5004 lockdep_assert_held(&fs_info->chunk_mutex); 5005 5006 array_size = btrfs_super_sys_array_size(super_copy); 5007 if (array_size + item_size + sizeof(disk_key) 5008 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5009 return -EFBIG; 5010 5011 ptr = super_copy->sys_chunk_array + array_size; 5012 btrfs_cpu_key_to_disk(&disk_key, key); 5013 memcpy(ptr, &disk_key, sizeof(disk_key)); 5014 ptr += sizeof(disk_key); 5015 memcpy(ptr, chunk, item_size); 5016 item_size += sizeof(disk_key); 5017 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5018 5019 return 0; 5020 } 5021 5022 /* 5023 * sort the devices in descending order by max_avail, total_avail 5024 */ 5025 static int btrfs_cmp_device_info(const void *a, const void *b) 5026 { 5027 const struct btrfs_device_info *di_a = a; 5028 const struct btrfs_device_info *di_b = b; 5029 5030 if (di_a->max_avail > di_b->max_avail) 5031 return -1; 5032 if (di_a->max_avail < di_b->max_avail) 5033 return 1; 5034 if (di_a->total_avail > di_b->total_avail) 5035 return -1; 5036 if (di_a->total_avail < di_b->total_avail) 5037 return 1; 5038 return 0; 5039 } 5040 5041 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5042 { 5043 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5044 return; 5045 5046 btrfs_set_fs_incompat(info, RAID56); 5047 } 5048 5049 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5050 { 5051 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5052 return; 5053 5054 btrfs_set_fs_incompat(info, RAID1C34); 5055 } 5056 5057 /* 5058 * Structure used internally for btrfs_create_chunk() function. 5059 * Wraps needed parameters. 5060 */ 5061 struct alloc_chunk_ctl { 5062 u64 start; 5063 u64 type; 5064 /* Total number of stripes to allocate */ 5065 int num_stripes; 5066 /* sub_stripes info for map */ 5067 int sub_stripes; 5068 /* Stripes per device */ 5069 int dev_stripes; 5070 /* Maximum number of devices to use */ 5071 int devs_max; 5072 /* Minimum number of devices to use */ 5073 int devs_min; 5074 /* ndevs has to be a multiple of this */ 5075 int devs_increment; 5076 /* Number of copies */ 5077 int ncopies; 5078 /* Number of stripes worth of bytes to store parity information */ 5079 int nparity; 5080 u64 max_stripe_size; 5081 u64 max_chunk_size; 5082 u64 dev_extent_min; 5083 u64 stripe_size; 5084 u64 chunk_size; 5085 int ndevs; 5086 }; 5087 5088 static void init_alloc_chunk_ctl_policy_regular( 5089 struct btrfs_fs_devices *fs_devices, 5090 struct alloc_chunk_ctl *ctl) 5091 { 5092 struct btrfs_space_info *space_info; 5093 5094 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5095 ASSERT(space_info); 5096 5097 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5098 ctl->max_stripe_size = ctl->max_chunk_size; 5099 5100 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5101 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5102 5103 /* We don't want a chunk larger than 10% of writable space */ 5104 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5105 ctl->max_chunk_size); 5106 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5107 } 5108 5109 static void init_alloc_chunk_ctl_policy_zoned( 5110 struct btrfs_fs_devices *fs_devices, 5111 struct alloc_chunk_ctl *ctl) 5112 { 5113 u64 zone_size = fs_devices->fs_info->zone_size; 5114 u64 limit; 5115 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5116 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5117 u64 min_chunk_size = min_data_stripes * zone_size; 5118 u64 type = ctl->type; 5119 5120 ctl->max_stripe_size = zone_size; 5121 if (type & BTRFS_BLOCK_GROUP_DATA) { 5122 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5123 zone_size); 5124 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5125 ctl->max_chunk_size = ctl->max_stripe_size; 5126 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5127 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5128 ctl->devs_max = min_t(int, ctl->devs_max, 5129 BTRFS_MAX_DEVS_SYS_CHUNK); 5130 } else { 5131 BUG(); 5132 } 5133 5134 /* We don't want a chunk larger than 10% of writable space */ 5135 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5136 zone_size), 5137 min_chunk_size); 5138 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5139 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5140 } 5141 5142 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5143 struct alloc_chunk_ctl *ctl) 5144 { 5145 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5146 5147 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5148 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5149 ctl->devs_max = btrfs_raid_array[index].devs_max; 5150 if (!ctl->devs_max) 5151 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5152 ctl->devs_min = btrfs_raid_array[index].devs_min; 5153 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5154 ctl->ncopies = btrfs_raid_array[index].ncopies; 5155 ctl->nparity = btrfs_raid_array[index].nparity; 5156 ctl->ndevs = 0; 5157 5158 switch (fs_devices->chunk_alloc_policy) { 5159 case BTRFS_CHUNK_ALLOC_REGULAR: 5160 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5161 break; 5162 case BTRFS_CHUNK_ALLOC_ZONED: 5163 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5164 break; 5165 default: 5166 BUG(); 5167 } 5168 } 5169 5170 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5171 struct alloc_chunk_ctl *ctl, 5172 struct btrfs_device_info *devices_info) 5173 { 5174 struct btrfs_fs_info *info = fs_devices->fs_info; 5175 struct btrfs_device *device; 5176 u64 total_avail; 5177 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5178 int ret; 5179 int ndevs = 0; 5180 u64 max_avail; 5181 u64 dev_offset; 5182 5183 /* 5184 * in the first pass through the devices list, we gather information 5185 * about the available holes on each device. 5186 */ 5187 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5188 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5189 WARN(1, KERN_ERR 5190 "BTRFS: read-only device in alloc_list\n"); 5191 continue; 5192 } 5193 5194 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5195 &device->dev_state) || 5196 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5197 continue; 5198 5199 if (device->total_bytes > device->bytes_used) 5200 total_avail = device->total_bytes - device->bytes_used; 5201 else 5202 total_avail = 0; 5203 5204 /* If there is no space on this device, skip it. */ 5205 if (total_avail < ctl->dev_extent_min) 5206 continue; 5207 5208 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5209 &max_avail); 5210 if (ret && ret != -ENOSPC) 5211 return ret; 5212 5213 if (ret == 0) 5214 max_avail = dev_extent_want; 5215 5216 if (max_avail < ctl->dev_extent_min) { 5217 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5218 btrfs_debug(info, 5219 "%s: devid %llu has no free space, have=%llu want=%llu", 5220 __func__, device->devid, max_avail, 5221 ctl->dev_extent_min); 5222 continue; 5223 } 5224 5225 if (ndevs == fs_devices->rw_devices) { 5226 WARN(1, "%s: found more than %llu devices\n", 5227 __func__, fs_devices->rw_devices); 5228 break; 5229 } 5230 devices_info[ndevs].dev_offset = dev_offset; 5231 devices_info[ndevs].max_avail = max_avail; 5232 devices_info[ndevs].total_avail = total_avail; 5233 devices_info[ndevs].dev = device; 5234 ++ndevs; 5235 } 5236 ctl->ndevs = ndevs; 5237 5238 /* 5239 * now sort the devices by hole size / available space 5240 */ 5241 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5242 btrfs_cmp_device_info, NULL); 5243 5244 return 0; 5245 } 5246 5247 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5248 struct btrfs_device_info *devices_info) 5249 { 5250 /* Number of stripes that count for block group size */ 5251 int data_stripes; 5252 5253 /* 5254 * The primary goal is to maximize the number of stripes, so use as 5255 * many devices as possible, even if the stripes are not maximum sized. 5256 * 5257 * The DUP profile stores more than one stripe per device, the 5258 * max_avail is the total size so we have to adjust. 5259 */ 5260 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5261 ctl->dev_stripes); 5262 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5263 5264 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5265 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5266 5267 /* 5268 * Use the number of data stripes to figure out how big this chunk is 5269 * really going to be in terms of logical address space, and compare 5270 * that answer with the max chunk size. If it's higher, we try to 5271 * reduce stripe_size. 5272 */ 5273 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5274 /* 5275 * Reduce stripe_size, round it up to a 16MB boundary again and 5276 * then use it, unless it ends up being even bigger than the 5277 * previous value we had already. 5278 */ 5279 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5280 data_stripes), SZ_16M), 5281 ctl->stripe_size); 5282 } 5283 5284 /* Stripe size should not go beyond 1G. */ 5285 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5286 5287 /* Align to BTRFS_STRIPE_LEN */ 5288 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5289 ctl->chunk_size = ctl->stripe_size * data_stripes; 5290 5291 return 0; 5292 } 5293 5294 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5295 struct btrfs_device_info *devices_info) 5296 { 5297 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5298 /* Number of stripes that count for block group size */ 5299 int data_stripes; 5300 5301 /* 5302 * It should hold because: 5303 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5304 */ 5305 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5306 5307 ctl->stripe_size = zone_size; 5308 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5309 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5310 5311 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5312 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5313 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5314 ctl->stripe_size) + ctl->nparity, 5315 ctl->dev_stripes); 5316 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5317 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5318 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5319 } 5320 5321 ctl->chunk_size = ctl->stripe_size * data_stripes; 5322 5323 return 0; 5324 } 5325 5326 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5327 struct alloc_chunk_ctl *ctl, 5328 struct btrfs_device_info *devices_info) 5329 { 5330 struct btrfs_fs_info *info = fs_devices->fs_info; 5331 5332 /* 5333 * Round down to number of usable stripes, devs_increment can be any 5334 * number so we can't use round_down() that requires power of 2, while 5335 * rounddown is safe. 5336 */ 5337 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5338 5339 if (ctl->ndevs < ctl->devs_min) { 5340 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5341 btrfs_debug(info, 5342 "%s: not enough devices with free space: have=%d minimum required=%d", 5343 __func__, ctl->ndevs, ctl->devs_min); 5344 } 5345 return -ENOSPC; 5346 } 5347 5348 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5349 5350 switch (fs_devices->chunk_alloc_policy) { 5351 case BTRFS_CHUNK_ALLOC_REGULAR: 5352 return decide_stripe_size_regular(ctl, devices_info); 5353 case BTRFS_CHUNK_ALLOC_ZONED: 5354 return decide_stripe_size_zoned(ctl, devices_info); 5355 default: 5356 BUG(); 5357 } 5358 } 5359 5360 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5361 struct alloc_chunk_ctl *ctl, 5362 struct btrfs_device_info *devices_info) 5363 { 5364 struct btrfs_fs_info *info = trans->fs_info; 5365 struct map_lookup *map = NULL; 5366 struct extent_map_tree *em_tree; 5367 struct btrfs_block_group *block_group; 5368 struct extent_map *em; 5369 u64 start = ctl->start; 5370 u64 type = ctl->type; 5371 int ret; 5372 int i; 5373 int j; 5374 5375 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5376 if (!map) 5377 return ERR_PTR(-ENOMEM); 5378 map->num_stripes = ctl->num_stripes; 5379 5380 for (i = 0; i < ctl->ndevs; ++i) { 5381 for (j = 0; j < ctl->dev_stripes; ++j) { 5382 int s = i * ctl->dev_stripes + j; 5383 map->stripes[s].dev = devices_info[i].dev; 5384 map->stripes[s].physical = devices_info[i].dev_offset + 5385 j * ctl->stripe_size; 5386 } 5387 } 5388 map->stripe_len = BTRFS_STRIPE_LEN; 5389 map->io_align = BTRFS_STRIPE_LEN; 5390 map->io_width = BTRFS_STRIPE_LEN; 5391 map->type = type; 5392 map->sub_stripes = ctl->sub_stripes; 5393 5394 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5395 5396 em = alloc_extent_map(); 5397 if (!em) { 5398 kfree(map); 5399 return ERR_PTR(-ENOMEM); 5400 } 5401 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5402 em->map_lookup = map; 5403 em->start = start; 5404 em->len = ctl->chunk_size; 5405 em->block_start = 0; 5406 em->block_len = em->len; 5407 em->orig_block_len = ctl->stripe_size; 5408 5409 em_tree = &info->mapping_tree; 5410 write_lock(&em_tree->lock); 5411 ret = add_extent_mapping(em_tree, em, 0); 5412 if (ret) { 5413 write_unlock(&em_tree->lock); 5414 free_extent_map(em); 5415 return ERR_PTR(ret); 5416 } 5417 write_unlock(&em_tree->lock); 5418 5419 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5420 if (IS_ERR(block_group)) 5421 goto error_del_extent; 5422 5423 for (i = 0; i < map->num_stripes; i++) { 5424 struct btrfs_device *dev = map->stripes[i].dev; 5425 5426 btrfs_device_set_bytes_used(dev, 5427 dev->bytes_used + ctl->stripe_size); 5428 if (list_empty(&dev->post_commit_list)) 5429 list_add_tail(&dev->post_commit_list, 5430 &trans->transaction->dev_update_list); 5431 } 5432 5433 atomic64_sub(ctl->stripe_size * map->num_stripes, 5434 &info->free_chunk_space); 5435 5436 free_extent_map(em); 5437 check_raid56_incompat_flag(info, type); 5438 check_raid1c34_incompat_flag(info, type); 5439 5440 return block_group; 5441 5442 error_del_extent: 5443 write_lock(&em_tree->lock); 5444 remove_extent_mapping(em_tree, em); 5445 write_unlock(&em_tree->lock); 5446 5447 /* One for our allocation */ 5448 free_extent_map(em); 5449 /* One for the tree reference */ 5450 free_extent_map(em); 5451 5452 return block_group; 5453 } 5454 5455 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5456 u64 type) 5457 { 5458 struct btrfs_fs_info *info = trans->fs_info; 5459 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5460 struct btrfs_device_info *devices_info = NULL; 5461 struct alloc_chunk_ctl ctl; 5462 struct btrfs_block_group *block_group; 5463 int ret; 5464 5465 lockdep_assert_held(&info->chunk_mutex); 5466 5467 if (!alloc_profile_is_valid(type, 0)) { 5468 ASSERT(0); 5469 return ERR_PTR(-EINVAL); 5470 } 5471 5472 if (list_empty(&fs_devices->alloc_list)) { 5473 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5474 btrfs_debug(info, "%s: no writable device", __func__); 5475 return ERR_PTR(-ENOSPC); 5476 } 5477 5478 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5479 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5480 ASSERT(0); 5481 return ERR_PTR(-EINVAL); 5482 } 5483 5484 ctl.start = find_next_chunk(info); 5485 ctl.type = type; 5486 init_alloc_chunk_ctl(fs_devices, &ctl); 5487 5488 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5489 GFP_NOFS); 5490 if (!devices_info) 5491 return ERR_PTR(-ENOMEM); 5492 5493 ret = gather_device_info(fs_devices, &ctl, devices_info); 5494 if (ret < 0) { 5495 block_group = ERR_PTR(ret); 5496 goto out; 5497 } 5498 5499 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5500 if (ret < 0) { 5501 block_group = ERR_PTR(ret); 5502 goto out; 5503 } 5504 5505 block_group = create_chunk(trans, &ctl, devices_info); 5506 5507 out: 5508 kfree(devices_info); 5509 return block_group; 5510 } 5511 5512 /* 5513 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5514 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5515 * chunks. 5516 * 5517 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5518 * phases. 5519 */ 5520 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5521 struct btrfs_block_group *bg) 5522 { 5523 struct btrfs_fs_info *fs_info = trans->fs_info; 5524 struct btrfs_root *chunk_root = fs_info->chunk_root; 5525 struct btrfs_key key; 5526 struct btrfs_chunk *chunk; 5527 struct btrfs_stripe *stripe; 5528 struct extent_map *em; 5529 struct map_lookup *map; 5530 size_t item_size; 5531 int i; 5532 int ret; 5533 5534 /* 5535 * We take the chunk_mutex for 2 reasons: 5536 * 5537 * 1) Updates and insertions in the chunk btree must be done while holding 5538 * the chunk_mutex, as well as updating the system chunk array in the 5539 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5540 * details; 5541 * 5542 * 2) To prevent races with the final phase of a device replace operation 5543 * that replaces the device object associated with the map's stripes, 5544 * because the device object's id can change at any time during that 5545 * final phase of the device replace operation 5546 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5547 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5548 * which would cause a failure when updating the device item, which does 5549 * not exists, or persisting a stripe of the chunk item with such ID. 5550 * Here we can't use the device_list_mutex because our caller already 5551 * has locked the chunk_mutex, and the final phase of device replace 5552 * acquires both mutexes - first the device_list_mutex and then the 5553 * chunk_mutex. Using any of those two mutexes protects us from a 5554 * concurrent device replace. 5555 */ 5556 lockdep_assert_held(&fs_info->chunk_mutex); 5557 5558 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5559 if (IS_ERR(em)) { 5560 ret = PTR_ERR(em); 5561 btrfs_abort_transaction(trans, ret); 5562 return ret; 5563 } 5564 5565 map = em->map_lookup; 5566 item_size = btrfs_chunk_item_size(map->num_stripes); 5567 5568 chunk = kzalloc(item_size, GFP_NOFS); 5569 if (!chunk) { 5570 ret = -ENOMEM; 5571 btrfs_abort_transaction(trans, ret); 5572 goto out; 5573 } 5574 5575 for (i = 0; i < map->num_stripes; i++) { 5576 struct btrfs_device *device = map->stripes[i].dev; 5577 5578 ret = btrfs_update_device(trans, device); 5579 if (ret) 5580 goto out; 5581 } 5582 5583 stripe = &chunk->stripe; 5584 for (i = 0; i < map->num_stripes; i++) { 5585 struct btrfs_device *device = map->stripes[i].dev; 5586 const u64 dev_offset = map->stripes[i].physical; 5587 5588 btrfs_set_stack_stripe_devid(stripe, device->devid); 5589 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5590 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5591 stripe++; 5592 } 5593 5594 btrfs_set_stack_chunk_length(chunk, bg->length); 5595 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5596 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5597 btrfs_set_stack_chunk_type(chunk, map->type); 5598 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5599 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5600 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5601 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5602 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5603 5604 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5605 key.type = BTRFS_CHUNK_ITEM_KEY; 5606 key.offset = bg->start; 5607 5608 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5609 if (ret) 5610 goto out; 5611 5612 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5613 5614 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5615 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5616 if (ret) 5617 goto out; 5618 } 5619 5620 out: 5621 kfree(chunk); 5622 free_extent_map(em); 5623 return ret; 5624 } 5625 5626 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5627 { 5628 struct btrfs_fs_info *fs_info = trans->fs_info; 5629 u64 alloc_profile; 5630 struct btrfs_block_group *meta_bg; 5631 struct btrfs_block_group *sys_bg; 5632 5633 /* 5634 * When adding a new device for sprouting, the seed device is read-only 5635 * so we must first allocate a metadata and a system chunk. But before 5636 * adding the block group items to the extent, device and chunk btrees, 5637 * we must first: 5638 * 5639 * 1) Create both chunks without doing any changes to the btrees, as 5640 * otherwise we would get -ENOSPC since the block groups from the 5641 * seed device are read-only; 5642 * 5643 * 2) Add the device item for the new sprout device - finishing the setup 5644 * of a new block group requires updating the device item in the chunk 5645 * btree, so it must exist when we attempt to do it. The previous step 5646 * ensures this does not fail with -ENOSPC. 5647 * 5648 * After that we can add the block group items to their btrees: 5649 * update existing device item in the chunk btree, add a new block group 5650 * item to the extent btree, add a new chunk item to the chunk btree and 5651 * finally add the new device extent items to the devices btree. 5652 */ 5653 5654 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5655 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5656 if (IS_ERR(meta_bg)) 5657 return PTR_ERR(meta_bg); 5658 5659 alloc_profile = btrfs_system_alloc_profile(fs_info); 5660 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5661 if (IS_ERR(sys_bg)) 5662 return PTR_ERR(sys_bg); 5663 5664 return 0; 5665 } 5666 5667 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5668 { 5669 const int index = btrfs_bg_flags_to_raid_index(map->type); 5670 5671 return btrfs_raid_array[index].tolerated_failures; 5672 } 5673 5674 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5675 { 5676 struct extent_map *em; 5677 struct map_lookup *map; 5678 int miss_ndevs = 0; 5679 int i; 5680 bool ret = true; 5681 5682 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5683 if (IS_ERR(em)) 5684 return false; 5685 5686 map = em->map_lookup; 5687 for (i = 0; i < map->num_stripes; i++) { 5688 if (test_bit(BTRFS_DEV_STATE_MISSING, 5689 &map->stripes[i].dev->dev_state)) { 5690 miss_ndevs++; 5691 continue; 5692 } 5693 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5694 &map->stripes[i].dev->dev_state)) { 5695 ret = false; 5696 goto end; 5697 } 5698 } 5699 5700 /* 5701 * If the number of missing devices is larger than max errors, we can 5702 * not write the data into that chunk successfully. 5703 */ 5704 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5705 ret = false; 5706 end: 5707 free_extent_map(em); 5708 return ret; 5709 } 5710 5711 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5712 { 5713 struct extent_map *em; 5714 5715 while (1) { 5716 write_lock(&tree->lock); 5717 em = lookup_extent_mapping(tree, 0, (u64)-1); 5718 if (em) 5719 remove_extent_mapping(tree, em); 5720 write_unlock(&tree->lock); 5721 if (!em) 5722 break; 5723 /* once for us */ 5724 free_extent_map(em); 5725 /* once for the tree */ 5726 free_extent_map(em); 5727 } 5728 } 5729 5730 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5731 { 5732 struct extent_map *em; 5733 struct map_lookup *map; 5734 enum btrfs_raid_types index; 5735 int ret = 1; 5736 5737 em = btrfs_get_chunk_map(fs_info, logical, len); 5738 if (IS_ERR(em)) 5739 /* 5740 * We could return errors for these cases, but that could get 5741 * ugly and we'd probably do the same thing which is just not do 5742 * anything else and exit, so return 1 so the callers don't try 5743 * to use other copies. 5744 */ 5745 return 1; 5746 5747 map = em->map_lookup; 5748 index = btrfs_bg_flags_to_raid_index(map->type); 5749 5750 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5751 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5752 ret = btrfs_raid_array[index].ncopies; 5753 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5754 ret = 2; 5755 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5756 /* 5757 * There could be two corrupted data stripes, we need 5758 * to loop retry in order to rebuild the correct data. 5759 * 5760 * Fail a stripe at a time on every retry except the 5761 * stripe under reconstruction. 5762 */ 5763 ret = map->num_stripes; 5764 free_extent_map(em); 5765 5766 down_read(&fs_info->dev_replace.rwsem); 5767 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5768 fs_info->dev_replace.tgtdev) 5769 ret++; 5770 up_read(&fs_info->dev_replace.rwsem); 5771 5772 return ret; 5773 } 5774 5775 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5776 u64 logical) 5777 { 5778 struct extent_map *em; 5779 struct map_lookup *map; 5780 unsigned long len = fs_info->sectorsize; 5781 5782 if (!btrfs_fs_incompat(fs_info, RAID56)) 5783 return len; 5784 5785 em = btrfs_get_chunk_map(fs_info, logical, len); 5786 5787 if (!WARN_ON(IS_ERR(em))) { 5788 map = em->map_lookup; 5789 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5790 len = map->stripe_len * nr_data_stripes(map); 5791 free_extent_map(em); 5792 } 5793 return len; 5794 } 5795 5796 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5797 { 5798 struct extent_map *em; 5799 struct map_lookup *map; 5800 int ret = 0; 5801 5802 if (!btrfs_fs_incompat(fs_info, RAID56)) 5803 return 0; 5804 5805 em = btrfs_get_chunk_map(fs_info, logical, len); 5806 5807 if(!WARN_ON(IS_ERR(em))) { 5808 map = em->map_lookup; 5809 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5810 ret = 1; 5811 free_extent_map(em); 5812 } 5813 return ret; 5814 } 5815 5816 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5817 struct map_lookup *map, int first, 5818 int dev_replace_is_ongoing) 5819 { 5820 int i; 5821 int num_stripes; 5822 int preferred_mirror; 5823 int tolerance; 5824 struct btrfs_device *srcdev; 5825 5826 ASSERT((map->type & 5827 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5828 5829 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5830 num_stripes = map->sub_stripes; 5831 else 5832 num_stripes = map->num_stripes; 5833 5834 switch (fs_info->fs_devices->read_policy) { 5835 default: 5836 /* Shouldn't happen, just warn and use pid instead of failing */ 5837 btrfs_warn_rl(fs_info, 5838 "unknown read_policy type %u, reset to pid", 5839 fs_info->fs_devices->read_policy); 5840 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5841 fallthrough; 5842 case BTRFS_READ_POLICY_PID: 5843 preferred_mirror = first + (current->pid % num_stripes); 5844 break; 5845 } 5846 5847 if (dev_replace_is_ongoing && 5848 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5849 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5850 srcdev = fs_info->dev_replace.srcdev; 5851 else 5852 srcdev = NULL; 5853 5854 /* 5855 * try to avoid the drive that is the source drive for a 5856 * dev-replace procedure, only choose it if no other non-missing 5857 * mirror is available 5858 */ 5859 for (tolerance = 0; tolerance < 2; tolerance++) { 5860 if (map->stripes[preferred_mirror].dev->bdev && 5861 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5862 return preferred_mirror; 5863 for (i = first; i < first + num_stripes; i++) { 5864 if (map->stripes[i].dev->bdev && 5865 (tolerance || map->stripes[i].dev != srcdev)) 5866 return i; 5867 } 5868 } 5869 5870 /* we couldn't find one that doesn't fail. Just return something 5871 * and the io error handling code will clean up eventually 5872 */ 5873 return preferred_mirror; 5874 } 5875 5876 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5877 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5878 { 5879 int i; 5880 int again = 1; 5881 5882 while (again) { 5883 again = 0; 5884 for (i = 0; i < num_stripes - 1; i++) { 5885 /* Swap if parity is on a smaller index */ 5886 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5887 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5888 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5889 again = 1; 5890 } 5891 } 5892 } 5893 } 5894 5895 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5896 int total_stripes, 5897 int real_stripes) 5898 { 5899 struct btrfs_io_context *bioc = kzalloc( 5900 /* The size of btrfs_io_context */ 5901 sizeof(struct btrfs_io_context) + 5902 /* Plus the variable array for the stripes */ 5903 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5904 /* Plus the variable array for the tgt dev */ 5905 sizeof(int) * (real_stripes) + 5906 /* 5907 * Plus the raid_map, which includes both the tgt dev 5908 * and the stripes. 5909 */ 5910 sizeof(u64) * (total_stripes), 5911 GFP_NOFS|__GFP_NOFAIL); 5912 5913 refcount_set(&bioc->refs, 1); 5914 5915 bioc->fs_info = fs_info; 5916 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5917 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5918 5919 return bioc; 5920 } 5921 5922 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5923 { 5924 WARN_ON(!refcount_read(&bioc->refs)); 5925 refcount_inc(&bioc->refs); 5926 } 5927 5928 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5929 { 5930 if (!bioc) 5931 return; 5932 if (refcount_dec_and_test(&bioc->refs)) 5933 kfree(bioc); 5934 } 5935 5936 /* 5937 * Please note that, discard won't be sent to target device of device 5938 * replace. 5939 */ 5940 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 5941 u64 logical, u64 *length_ret, 5942 u32 *num_stripes) 5943 { 5944 struct extent_map *em; 5945 struct map_lookup *map; 5946 struct btrfs_discard_stripe *stripes; 5947 u64 length = *length_ret; 5948 u64 offset; 5949 u64 stripe_nr; 5950 u64 stripe_nr_end; 5951 u64 stripe_end_offset; 5952 u64 stripe_cnt; 5953 u64 stripe_len; 5954 u64 stripe_offset; 5955 u32 stripe_index; 5956 u32 factor = 0; 5957 u32 sub_stripes = 0; 5958 u64 stripes_per_dev = 0; 5959 u32 remaining_stripes = 0; 5960 u32 last_stripe = 0; 5961 int ret; 5962 int i; 5963 5964 em = btrfs_get_chunk_map(fs_info, logical, length); 5965 if (IS_ERR(em)) 5966 return ERR_CAST(em); 5967 5968 map = em->map_lookup; 5969 5970 /* we don't discard raid56 yet */ 5971 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5972 ret = -EOPNOTSUPP; 5973 goto out_free_map; 5974 } 5975 5976 offset = logical - em->start; 5977 length = min_t(u64, em->start + em->len - logical, length); 5978 *length_ret = length; 5979 5980 stripe_len = map->stripe_len; 5981 /* 5982 * stripe_nr counts the total number of stripes we have to stride 5983 * to get to this block 5984 */ 5985 stripe_nr = div64_u64(offset, stripe_len); 5986 5987 /* stripe_offset is the offset of this block in its stripe */ 5988 stripe_offset = offset - stripe_nr * stripe_len; 5989 5990 stripe_nr_end = round_up(offset + length, map->stripe_len); 5991 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5992 stripe_cnt = stripe_nr_end - stripe_nr; 5993 stripe_end_offset = stripe_nr_end * map->stripe_len - 5994 (offset + length); 5995 /* 5996 * after this, stripe_nr is the number of stripes on this 5997 * device we have to walk to find the data, and stripe_index is 5998 * the number of our device in the stripe array 5999 */ 6000 *num_stripes = 1; 6001 stripe_index = 0; 6002 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6003 BTRFS_BLOCK_GROUP_RAID10)) { 6004 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6005 sub_stripes = 1; 6006 else 6007 sub_stripes = map->sub_stripes; 6008 6009 factor = map->num_stripes / sub_stripes; 6010 *num_stripes = min_t(u64, map->num_stripes, 6011 sub_stripes * stripe_cnt); 6012 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6013 stripe_index *= sub_stripes; 6014 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 6015 &remaining_stripes); 6016 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6017 last_stripe *= sub_stripes; 6018 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6019 BTRFS_BLOCK_GROUP_DUP)) { 6020 *num_stripes = map->num_stripes; 6021 } else { 6022 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6023 &stripe_index); 6024 } 6025 6026 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6027 if (!stripes) { 6028 ret = -ENOMEM; 6029 goto out_free_map; 6030 } 6031 6032 for (i = 0; i < *num_stripes; i++) { 6033 stripes[i].physical = 6034 map->stripes[stripe_index].physical + 6035 stripe_offset + stripe_nr * map->stripe_len; 6036 stripes[i].dev = map->stripes[stripe_index].dev; 6037 6038 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6039 BTRFS_BLOCK_GROUP_RAID10)) { 6040 stripes[i].length = stripes_per_dev * map->stripe_len; 6041 6042 if (i / sub_stripes < remaining_stripes) 6043 stripes[i].length += map->stripe_len; 6044 6045 /* 6046 * Special for the first stripe and 6047 * the last stripe: 6048 * 6049 * |-------|...|-------| 6050 * |----------| 6051 * off end_off 6052 */ 6053 if (i < sub_stripes) 6054 stripes[i].length -= stripe_offset; 6055 6056 if (stripe_index >= last_stripe && 6057 stripe_index <= (last_stripe + 6058 sub_stripes - 1)) 6059 stripes[i].length -= stripe_end_offset; 6060 6061 if (i == sub_stripes - 1) 6062 stripe_offset = 0; 6063 } else { 6064 stripes[i].length = length; 6065 } 6066 6067 stripe_index++; 6068 if (stripe_index == map->num_stripes) { 6069 stripe_index = 0; 6070 stripe_nr++; 6071 } 6072 } 6073 6074 free_extent_map(em); 6075 return stripes; 6076 out_free_map: 6077 free_extent_map(em); 6078 return ERR_PTR(ret); 6079 } 6080 6081 /* 6082 * In dev-replace case, for repair case (that's the only case where the mirror 6083 * is selected explicitly when calling btrfs_map_block), blocks left of the 6084 * left cursor can also be read from the target drive. 6085 * 6086 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6087 * array of stripes. 6088 * For READ, it also needs to be supported using the same mirror number. 6089 * 6090 * If the requested block is not left of the left cursor, EIO is returned. This 6091 * can happen because btrfs_num_copies() returns one more in the dev-replace 6092 * case. 6093 */ 6094 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6095 u64 logical, u64 length, 6096 u64 srcdev_devid, int *mirror_num, 6097 u64 *physical) 6098 { 6099 struct btrfs_io_context *bioc = NULL; 6100 int num_stripes; 6101 int index_srcdev = 0; 6102 int found = 0; 6103 u64 physical_of_found = 0; 6104 int i; 6105 int ret = 0; 6106 6107 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6108 logical, &length, &bioc, NULL, NULL, 0); 6109 if (ret) { 6110 ASSERT(bioc == NULL); 6111 return ret; 6112 } 6113 6114 num_stripes = bioc->num_stripes; 6115 if (*mirror_num > num_stripes) { 6116 /* 6117 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6118 * that means that the requested area is not left of the left 6119 * cursor 6120 */ 6121 btrfs_put_bioc(bioc); 6122 return -EIO; 6123 } 6124 6125 /* 6126 * process the rest of the function using the mirror_num of the source 6127 * drive. Therefore look it up first. At the end, patch the device 6128 * pointer to the one of the target drive. 6129 */ 6130 for (i = 0; i < num_stripes; i++) { 6131 if (bioc->stripes[i].dev->devid != srcdev_devid) 6132 continue; 6133 6134 /* 6135 * In case of DUP, in order to keep it simple, only add the 6136 * mirror with the lowest physical address 6137 */ 6138 if (found && 6139 physical_of_found <= bioc->stripes[i].physical) 6140 continue; 6141 6142 index_srcdev = i; 6143 found = 1; 6144 physical_of_found = bioc->stripes[i].physical; 6145 } 6146 6147 btrfs_put_bioc(bioc); 6148 6149 ASSERT(found); 6150 if (!found) 6151 return -EIO; 6152 6153 *mirror_num = index_srcdev + 1; 6154 *physical = physical_of_found; 6155 return ret; 6156 } 6157 6158 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6159 { 6160 struct btrfs_block_group *cache; 6161 bool ret; 6162 6163 /* Non zoned filesystem does not use "to_copy" flag */ 6164 if (!btrfs_is_zoned(fs_info)) 6165 return false; 6166 6167 cache = btrfs_lookup_block_group(fs_info, logical); 6168 6169 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6170 6171 btrfs_put_block_group(cache); 6172 return ret; 6173 } 6174 6175 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6176 struct btrfs_io_context **bioc_ret, 6177 struct btrfs_dev_replace *dev_replace, 6178 u64 logical, 6179 int *num_stripes_ret, int *max_errors_ret) 6180 { 6181 struct btrfs_io_context *bioc = *bioc_ret; 6182 u64 srcdev_devid = dev_replace->srcdev->devid; 6183 int tgtdev_indexes = 0; 6184 int num_stripes = *num_stripes_ret; 6185 int max_errors = *max_errors_ret; 6186 int i; 6187 6188 if (op == BTRFS_MAP_WRITE) { 6189 int index_where_to_add; 6190 6191 /* 6192 * A block group which have "to_copy" set will eventually 6193 * copied by dev-replace process. We can avoid cloning IO here. 6194 */ 6195 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6196 return; 6197 6198 /* 6199 * duplicate the write operations while the dev replace 6200 * procedure is running. Since the copying of the old disk to 6201 * the new disk takes place at run time while the filesystem is 6202 * mounted writable, the regular write operations to the old 6203 * disk have to be duplicated to go to the new disk as well. 6204 * 6205 * Note that device->missing is handled by the caller, and that 6206 * the write to the old disk is already set up in the stripes 6207 * array. 6208 */ 6209 index_where_to_add = num_stripes; 6210 for (i = 0; i < num_stripes; i++) { 6211 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6212 /* write to new disk, too */ 6213 struct btrfs_io_stripe *new = 6214 bioc->stripes + index_where_to_add; 6215 struct btrfs_io_stripe *old = 6216 bioc->stripes + i; 6217 6218 new->physical = old->physical; 6219 new->dev = dev_replace->tgtdev; 6220 bioc->tgtdev_map[i] = index_where_to_add; 6221 index_where_to_add++; 6222 max_errors++; 6223 tgtdev_indexes++; 6224 } 6225 } 6226 num_stripes = index_where_to_add; 6227 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6228 int index_srcdev = 0; 6229 int found = 0; 6230 u64 physical_of_found = 0; 6231 6232 /* 6233 * During the dev-replace procedure, the target drive can also 6234 * be used to read data in case it is needed to repair a corrupt 6235 * block elsewhere. This is possible if the requested area is 6236 * left of the left cursor. In this area, the target drive is a 6237 * full copy of the source drive. 6238 */ 6239 for (i = 0; i < num_stripes; i++) { 6240 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6241 /* 6242 * In case of DUP, in order to keep it simple, 6243 * only add the mirror with the lowest physical 6244 * address 6245 */ 6246 if (found && 6247 physical_of_found <= bioc->stripes[i].physical) 6248 continue; 6249 index_srcdev = i; 6250 found = 1; 6251 physical_of_found = bioc->stripes[i].physical; 6252 } 6253 } 6254 if (found) { 6255 struct btrfs_io_stripe *tgtdev_stripe = 6256 bioc->stripes + num_stripes; 6257 6258 tgtdev_stripe->physical = physical_of_found; 6259 tgtdev_stripe->dev = dev_replace->tgtdev; 6260 bioc->tgtdev_map[index_srcdev] = num_stripes; 6261 6262 tgtdev_indexes++; 6263 num_stripes++; 6264 } 6265 } 6266 6267 *num_stripes_ret = num_stripes; 6268 *max_errors_ret = max_errors; 6269 bioc->num_tgtdevs = tgtdev_indexes; 6270 *bioc_ret = bioc; 6271 } 6272 6273 static bool need_full_stripe(enum btrfs_map_op op) 6274 { 6275 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6276 } 6277 6278 /* 6279 * Calculate the geometry of a particular (address, len) tuple. This 6280 * information is used to calculate how big a particular bio can get before it 6281 * straddles a stripe. 6282 * 6283 * @fs_info: the filesystem 6284 * @em: mapping containing the logical extent 6285 * @op: type of operation - write or read 6286 * @logical: address that we want to figure out the geometry of 6287 * @io_geom: pointer used to return values 6288 * 6289 * Returns < 0 in case a chunk for the given logical address cannot be found, 6290 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6291 */ 6292 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6293 enum btrfs_map_op op, u64 logical, 6294 struct btrfs_io_geometry *io_geom) 6295 { 6296 struct map_lookup *map; 6297 u64 len; 6298 u64 offset; 6299 u64 stripe_offset; 6300 u64 stripe_nr; 6301 u32 stripe_len; 6302 u64 raid56_full_stripe_start = (u64)-1; 6303 int data_stripes; 6304 6305 ASSERT(op != BTRFS_MAP_DISCARD); 6306 6307 map = em->map_lookup; 6308 /* Offset of this logical address in the chunk */ 6309 offset = logical - em->start; 6310 /* Len of a stripe in a chunk */ 6311 stripe_len = map->stripe_len; 6312 /* 6313 * Stripe_nr is where this block falls in 6314 * stripe_offset is the offset of this block in its stripe. 6315 */ 6316 stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset); 6317 ASSERT(stripe_offset < U32_MAX); 6318 6319 data_stripes = nr_data_stripes(map); 6320 6321 /* Only stripe based profiles needs to check against stripe length. */ 6322 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6323 u64 max_len = stripe_len - stripe_offset; 6324 6325 /* 6326 * In case of raid56, we need to know the stripe aligned start 6327 */ 6328 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6329 unsigned long full_stripe_len = stripe_len * data_stripes; 6330 raid56_full_stripe_start = offset; 6331 6332 /* 6333 * Allow a write of a full stripe, but make sure we 6334 * don't allow straddling of stripes 6335 */ 6336 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6337 full_stripe_len); 6338 raid56_full_stripe_start *= full_stripe_len; 6339 6340 /* 6341 * For writes to RAID[56], allow a full stripeset across 6342 * all disks. For other RAID types and for RAID[56] 6343 * reads, just allow a single stripe (on a single disk). 6344 */ 6345 if (op == BTRFS_MAP_WRITE) { 6346 max_len = stripe_len * data_stripes - 6347 (offset - raid56_full_stripe_start); 6348 } 6349 } 6350 len = min_t(u64, em->len - offset, max_len); 6351 } else { 6352 len = em->len - offset; 6353 } 6354 6355 io_geom->len = len; 6356 io_geom->offset = offset; 6357 io_geom->stripe_len = stripe_len; 6358 io_geom->stripe_nr = stripe_nr; 6359 io_geom->stripe_offset = stripe_offset; 6360 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6361 6362 return 0; 6363 } 6364 6365 static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map, 6366 u32 stripe_index, u64 stripe_offset, u64 stripe_nr) 6367 { 6368 dst->dev = map->stripes[stripe_index].dev; 6369 dst->physical = map->stripes[stripe_index].physical + 6370 stripe_offset + stripe_nr * map->stripe_len; 6371 } 6372 6373 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6374 enum btrfs_map_op op, u64 logical, u64 *length, 6375 struct btrfs_io_context **bioc_ret, 6376 struct btrfs_io_stripe *smap, 6377 int *mirror_num_ret, int need_raid_map) 6378 { 6379 struct extent_map *em; 6380 struct map_lookup *map; 6381 u64 stripe_offset; 6382 u64 stripe_nr; 6383 u64 stripe_len; 6384 u32 stripe_index; 6385 int data_stripes; 6386 int i; 6387 int ret = 0; 6388 int mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6389 int num_stripes; 6390 int max_errors = 0; 6391 int tgtdev_indexes = 0; 6392 struct btrfs_io_context *bioc = NULL; 6393 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6394 int dev_replace_is_ongoing = 0; 6395 int num_alloc_stripes; 6396 int patch_the_first_stripe_for_dev_replace = 0; 6397 u64 physical_to_patch_in_first_stripe = 0; 6398 u64 raid56_full_stripe_start = (u64)-1; 6399 struct btrfs_io_geometry geom; 6400 6401 ASSERT(bioc_ret); 6402 ASSERT(op != BTRFS_MAP_DISCARD); 6403 6404 em = btrfs_get_chunk_map(fs_info, logical, *length); 6405 ASSERT(!IS_ERR(em)); 6406 6407 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6408 if (ret < 0) 6409 return ret; 6410 6411 map = em->map_lookup; 6412 6413 *length = geom.len; 6414 stripe_len = geom.stripe_len; 6415 stripe_nr = geom.stripe_nr; 6416 stripe_offset = geom.stripe_offset; 6417 raid56_full_stripe_start = geom.raid56_stripe_offset; 6418 data_stripes = nr_data_stripes(map); 6419 6420 down_read(&dev_replace->rwsem); 6421 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6422 /* 6423 * Hold the semaphore for read during the whole operation, write is 6424 * requested at commit time but must wait. 6425 */ 6426 if (!dev_replace_is_ongoing) 6427 up_read(&dev_replace->rwsem); 6428 6429 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6430 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6431 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6432 dev_replace->srcdev->devid, 6433 &mirror_num, 6434 &physical_to_patch_in_first_stripe); 6435 if (ret) 6436 goto out; 6437 else 6438 patch_the_first_stripe_for_dev_replace = 1; 6439 } else if (mirror_num > map->num_stripes) { 6440 mirror_num = 0; 6441 } 6442 6443 num_stripes = 1; 6444 stripe_index = 0; 6445 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6446 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6447 &stripe_index); 6448 if (!need_full_stripe(op)) 6449 mirror_num = 1; 6450 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6451 if (need_full_stripe(op)) 6452 num_stripes = map->num_stripes; 6453 else if (mirror_num) 6454 stripe_index = mirror_num - 1; 6455 else { 6456 stripe_index = find_live_mirror(fs_info, map, 0, 6457 dev_replace_is_ongoing); 6458 mirror_num = stripe_index + 1; 6459 } 6460 6461 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6462 if (need_full_stripe(op)) { 6463 num_stripes = map->num_stripes; 6464 } else if (mirror_num) { 6465 stripe_index = mirror_num - 1; 6466 } else { 6467 mirror_num = 1; 6468 } 6469 6470 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6471 u32 factor = map->num_stripes / map->sub_stripes; 6472 6473 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6474 stripe_index *= map->sub_stripes; 6475 6476 if (need_full_stripe(op)) 6477 num_stripes = map->sub_stripes; 6478 else if (mirror_num) 6479 stripe_index += mirror_num - 1; 6480 else { 6481 int old_stripe_index = stripe_index; 6482 stripe_index = find_live_mirror(fs_info, map, 6483 stripe_index, 6484 dev_replace_is_ongoing); 6485 mirror_num = stripe_index - old_stripe_index + 1; 6486 } 6487 6488 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6489 ASSERT(map->stripe_len == BTRFS_STRIPE_LEN); 6490 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6491 /* push stripe_nr back to the start of the full stripe */ 6492 stripe_nr = div64_u64(raid56_full_stripe_start, 6493 stripe_len * data_stripes); 6494 6495 /* RAID[56] write or recovery. Return all stripes */ 6496 num_stripes = map->num_stripes; 6497 max_errors = btrfs_chunk_max_errors(map); 6498 6499 /* Return the length to the full stripe end */ 6500 *length = min(logical + *length, 6501 raid56_full_stripe_start + em->start + 6502 data_stripes * stripe_len) - logical; 6503 stripe_index = 0; 6504 stripe_offset = 0; 6505 } else { 6506 /* 6507 * Mirror #0 or #1 means the original data block. 6508 * Mirror #2 is RAID5 parity block. 6509 * Mirror #3 is RAID6 Q block. 6510 */ 6511 stripe_nr = div_u64_rem(stripe_nr, 6512 data_stripes, &stripe_index); 6513 if (mirror_num > 1) 6514 stripe_index = data_stripes + mirror_num - 2; 6515 6516 /* We distribute the parity blocks across stripes */ 6517 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6518 &stripe_index); 6519 if (!need_full_stripe(op) && mirror_num <= 1) 6520 mirror_num = 1; 6521 } 6522 } else { 6523 /* 6524 * after this, stripe_nr is the number of stripes on this 6525 * device we have to walk to find the data, and stripe_index is 6526 * the number of our device in the stripe array 6527 */ 6528 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6529 &stripe_index); 6530 mirror_num = stripe_index + 1; 6531 } 6532 if (stripe_index >= map->num_stripes) { 6533 btrfs_crit(fs_info, 6534 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6535 stripe_index, map->num_stripes); 6536 ret = -EINVAL; 6537 goto out; 6538 } 6539 6540 num_alloc_stripes = num_stripes; 6541 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6542 if (op == BTRFS_MAP_WRITE) 6543 num_alloc_stripes <<= 1; 6544 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6545 num_alloc_stripes++; 6546 tgtdev_indexes = num_stripes; 6547 } 6548 6549 /* 6550 * If this I/O maps to a single device, try to return the device and 6551 * physical block information on the stack instead of allocating an 6552 * I/O context structure. 6553 */ 6554 if (smap && num_alloc_stripes == 1 && 6555 !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) && 6556 (!need_full_stripe(op) || !dev_replace_is_ongoing || 6557 !dev_replace->tgtdev)) { 6558 if (patch_the_first_stripe_for_dev_replace) { 6559 smap->dev = dev_replace->tgtdev; 6560 smap->physical = physical_to_patch_in_first_stripe; 6561 *mirror_num_ret = map->num_stripes + 1; 6562 } else { 6563 set_io_stripe(smap, map, stripe_index, stripe_offset, 6564 stripe_nr); 6565 *mirror_num_ret = mirror_num; 6566 } 6567 *bioc_ret = NULL; 6568 ret = 0; 6569 goto out; 6570 } 6571 6572 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6573 if (!bioc) { 6574 ret = -ENOMEM; 6575 goto out; 6576 } 6577 6578 for (i = 0; i < num_stripes; i++) { 6579 set_io_stripe(&bioc->stripes[i], map, stripe_index, stripe_offset, 6580 stripe_nr); 6581 stripe_index++; 6582 } 6583 6584 /* Build raid_map */ 6585 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6586 (need_full_stripe(op) || mirror_num > 1)) { 6587 u64 tmp; 6588 unsigned rot; 6589 6590 /* Work out the disk rotation on this stripe-set */ 6591 div_u64_rem(stripe_nr, num_stripes, &rot); 6592 6593 /* Fill in the logical address of each stripe */ 6594 tmp = stripe_nr * data_stripes; 6595 for (i = 0; i < data_stripes; i++) 6596 bioc->raid_map[(i + rot) % num_stripes] = 6597 em->start + (tmp + i) * map->stripe_len; 6598 6599 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6600 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6601 bioc->raid_map[(i + rot + 1) % num_stripes] = 6602 RAID6_Q_STRIPE; 6603 6604 sort_parity_stripes(bioc, num_stripes); 6605 } 6606 6607 if (need_full_stripe(op)) 6608 max_errors = btrfs_chunk_max_errors(map); 6609 6610 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6611 need_full_stripe(op)) { 6612 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6613 &num_stripes, &max_errors); 6614 } 6615 6616 *bioc_ret = bioc; 6617 bioc->map_type = map->type; 6618 bioc->num_stripes = num_stripes; 6619 bioc->max_errors = max_errors; 6620 bioc->mirror_num = mirror_num; 6621 6622 /* 6623 * this is the case that REQ_READ && dev_replace_is_ongoing && 6624 * mirror_num == num_stripes + 1 && dev_replace target drive is 6625 * available as a mirror 6626 */ 6627 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6628 WARN_ON(num_stripes > 1); 6629 bioc->stripes[0].dev = dev_replace->tgtdev; 6630 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6631 bioc->mirror_num = map->num_stripes + 1; 6632 } 6633 out: 6634 if (dev_replace_is_ongoing) { 6635 lockdep_assert_held(&dev_replace->rwsem); 6636 /* Unlock and let waiting writers proceed */ 6637 up_read(&dev_replace->rwsem); 6638 } 6639 free_extent_map(em); 6640 return ret; 6641 } 6642 6643 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6644 u64 logical, u64 *length, 6645 struct btrfs_io_context **bioc_ret, int mirror_num) 6646 { 6647 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6648 NULL, &mirror_num, 0); 6649 } 6650 6651 /* For Scrub/replace */ 6652 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6653 u64 logical, u64 *length, 6654 struct btrfs_io_context **bioc_ret) 6655 { 6656 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6657 NULL, NULL, 1); 6658 } 6659 6660 /* 6661 * Initialize a btrfs_bio structure. This skips the embedded bio itself as it 6662 * is already initialized by the block layer. 6663 */ 6664 static inline void btrfs_bio_init(struct btrfs_bio *bbio, 6665 btrfs_bio_end_io_t end_io, void *private) 6666 { 6667 memset(bbio, 0, offsetof(struct btrfs_bio, bio)); 6668 bbio->end_io = end_io; 6669 bbio->private = private; 6670 } 6671 6672 /* 6673 * Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for 6674 * btrfs, and is used for all I/O submitted through btrfs_submit_bio. 6675 * 6676 * Just like the underlying bio_alloc_bioset it will not fail as it is backed by 6677 * a mempool. 6678 */ 6679 struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, 6680 btrfs_bio_end_io_t end_io, void *private) 6681 { 6682 struct bio *bio; 6683 6684 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); 6685 btrfs_bio_init(btrfs_bio(bio), end_io, private); 6686 return bio; 6687 } 6688 6689 struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size, 6690 btrfs_bio_end_io_t end_io, void *private) 6691 { 6692 struct bio *bio; 6693 struct btrfs_bio *bbio; 6694 6695 ASSERT(offset <= UINT_MAX && size <= UINT_MAX); 6696 6697 bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset); 6698 bbio = btrfs_bio(bio); 6699 btrfs_bio_init(bbio, end_io, private); 6700 6701 bio_trim(bio, offset >> 9, size >> 9); 6702 bbio->iter = bio->bi_iter; 6703 return bio; 6704 } 6705 6706 static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) 6707 { 6708 if (!dev || !dev->bdev) 6709 return; 6710 if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET) 6711 return; 6712 6713 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6714 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 6715 if (!(bio->bi_opf & REQ_RAHEAD)) 6716 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 6717 if (bio->bi_opf & REQ_PREFLUSH) 6718 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); 6719 } 6720 6721 static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info, 6722 struct bio *bio) 6723 { 6724 if (bio->bi_opf & REQ_META) 6725 return fs_info->endio_meta_workers; 6726 return fs_info->endio_workers; 6727 } 6728 6729 static void btrfs_end_bio_work(struct work_struct *work) 6730 { 6731 struct btrfs_bio *bbio = 6732 container_of(work, struct btrfs_bio, end_io_work); 6733 6734 bbio->end_io(bbio); 6735 } 6736 6737 static void btrfs_simple_end_io(struct bio *bio) 6738 { 6739 struct btrfs_fs_info *fs_info = bio->bi_private; 6740 struct btrfs_bio *bbio = btrfs_bio(bio); 6741 6742 btrfs_bio_counter_dec(fs_info); 6743 6744 if (bio->bi_status) 6745 btrfs_log_dev_io_error(bio, bbio->device); 6746 6747 if (bio_op(bio) == REQ_OP_READ) { 6748 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); 6749 queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work); 6750 } else { 6751 bbio->end_io(bbio); 6752 } 6753 } 6754 6755 static void btrfs_raid56_end_io(struct bio *bio) 6756 { 6757 struct btrfs_io_context *bioc = bio->bi_private; 6758 struct btrfs_bio *bbio = btrfs_bio(bio); 6759 6760 btrfs_bio_counter_dec(bioc->fs_info); 6761 bbio->mirror_num = bioc->mirror_num; 6762 bbio->end_io(bbio); 6763 6764 btrfs_put_bioc(bioc); 6765 } 6766 6767 static void btrfs_orig_write_end_io(struct bio *bio) 6768 { 6769 struct btrfs_io_stripe *stripe = bio->bi_private; 6770 struct btrfs_io_context *bioc = stripe->bioc; 6771 struct btrfs_bio *bbio = btrfs_bio(bio); 6772 6773 btrfs_bio_counter_dec(bioc->fs_info); 6774 6775 if (bio->bi_status) { 6776 atomic_inc(&bioc->error); 6777 btrfs_log_dev_io_error(bio, stripe->dev); 6778 } 6779 6780 /* 6781 * Only send an error to the higher layers if it is beyond the tolerance 6782 * threshold. 6783 */ 6784 if (atomic_read(&bioc->error) > bioc->max_errors) 6785 bio->bi_status = BLK_STS_IOERR; 6786 else 6787 bio->bi_status = BLK_STS_OK; 6788 6789 bbio->end_io(bbio); 6790 btrfs_put_bioc(bioc); 6791 } 6792 6793 static void btrfs_clone_write_end_io(struct bio *bio) 6794 { 6795 struct btrfs_io_stripe *stripe = bio->bi_private; 6796 6797 if (bio->bi_status) { 6798 atomic_inc(&stripe->bioc->error); 6799 btrfs_log_dev_io_error(bio, stripe->dev); 6800 } 6801 6802 /* Pass on control to the original bio this one was cloned from */ 6803 bio_endio(stripe->bioc->orig_bio); 6804 bio_put(bio); 6805 } 6806 6807 static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) 6808 { 6809 if (!dev || !dev->bdev || 6810 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 6811 (btrfs_op(bio) == BTRFS_MAP_WRITE && 6812 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6813 bio_io_error(bio); 6814 return; 6815 } 6816 6817 bio_set_dev(bio, dev->bdev); 6818 6819 /* 6820 * For zone append writing, bi_sector must point the beginning of the 6821 * zone 6822 */ 6823 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6824 u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 6825 6826 if (btrfs_dev_is_sequential(dev, physical)) { 6827 u64 zone_start = round_down(physical, 6828 dev->fs_info->zone_size); 6829 6830 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6831 } else { 6832 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6833 bio->bi_opf |= REQ_OP_WRITE; 6834 } 6835 } 6836 btrfs_debug_in_rcu(dev->fs_info, 6837 "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6838 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6839 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6840 dev->devid, bio->bi_iter.bi_size); 6841 6842 btrfsic_check_bio(bio); 6843 submit_bio(bio); 6844 } 6845 6846 static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr) 6847 { 6848 struct bio *orig_bio = bioc->orig_bio, *bio; 6849 6850 ASSERT(bio_op(orig_bio) != REQ_OP_READ); 6851 6852 /* Reuse the bio embedded into the btrfs_bio for the last mirror */ 6853 if (dev_nr == bioc->num_stripes - 1) { 6854 bio = orig_bio; 6855 bio->bi_end_io = btrfs_orig_write_end_io; 6856 } else { 6857 bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set); 6858 bio_inc_remaining(orig_bio); 6859 bio->bi_end_io = btrfs_clone_write_end_io; 6860 } 6861 6862 bio->bi_private = &bioc->stripes[dev_nr]; 6863 bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT; 6864 bioc->stripes[dev_nr].bioc = bioc; 6865 btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio); 6866 } 6867 6868 void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num) 6869 { 6870 u64 logical = bio->bi_iter.bi_sector << 9; 6871 u64 length = bio->bi_iter.bi_size; 6872 u64 map_length = length; 6873 struct btrfs_io_context *bioc = NULL; 6874 struct btrfs_io_stripe smap; 6875 int ret; 6876 6877 btrfs_bio_counter_inc_blocked(fs_info); 6878 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, 6879 &bioc, &smap, &mirror_num, 1); 6880 if (ret) { 6881 btrfs_bio_counter_dec(fs_info); 6882 btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret)); 6883 return; 6884 } 6885 6886 if (map_length < length) { 6887 btrfs_crit(fs_info, 6888 "mapping failed logical %llu bio len %llu len %llu", 6889 logical, length, map_length); 6890 BUG(); 6891 } 6892 6893 if (!bioc) { 6894 /* Single mirror read/write fast path */ 6895 btrfs_bio(bio)->mirror_num = mirror_num; 6896 btrfs_bio(bio)->device = smap.dev; 6897 bio->bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT; 6898 bio->bi_private = fs_info; 6899 bio->bi_end_io = btrfs_simple_end_io; 6900 btrfs_submit_dev_bio(smap.dev, bio); 6901 } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6902 /* Parity RAID write or read recovery */ 6903 bio->bi_private = bioc; 6904 bio->bi_end_io = btrfs_raid56_end_io; 6905 if (bio_op(bio) == REQ_OP_READ) 6906 raid56_parity_recover(bio, bioc, mirror_num); 6907 else 6908 raid56_parity_write(bio, bioc); 6909 } else { 6910 /* Write to multiple mirrors */ 6911 int total_devs = bioc->num_stripes; 6912 int dev_nr; 6913 6914 bioc->orig_bio = bio; 6915 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) 6916 btrfs_submit_mirrored_bio(bioc, dev_nr); 6917 } 6918 } 6919 6920 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6921 const struct btrfs_fs_devices *fs_devices) 6922 { 6923 if (args->fsid == NULL) 6924 return true; 6925 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6926 return true; 6927 return false; 6928 } 6929 6930 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6931 const struct btrfs_device *device) 6932 { 6933 if (args->missing) { 6934 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6935 !device->bdev) 6936 return true; 6937 return false; 6938 } 6939 6940 if (device->devid != args->devid) 6941 return false; 6942 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6943 return false; 6944 return true; 6945 } 6946 6947 /* 6948 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6949 * return NULL. 6950 * 6951 * If devid and uuid are both specified, the match must be exact, otherwise 6952 * only devid is used. 6953 */ 6954 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6955 const struct btrfs_dev_lookup_args *args) 6956 { 6957 struct btrfs_device *device; 6958 struct btrfs_fs_devices *seed_devs; 6959 6960 if (dev_args_match_fs_devices(args, fs_devices)) { 6961 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6962 if (dev_args_match_device(args, device)) 6963 return device; 6964 } 6965 } 6966 6967 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6968 if (!dev_args_match_fs_devices(args, seed_devs)) 6969 continue; 6970 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6971 if (dev_args_match_device(args, device)) 6972 return device; 6973 } 6974 } 6975 6976 return NULL; 6977 } 6978 6979 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6980 u64 devid, u8 *dev_uuid) 6981 { 6982 struct btrfs_device *device; 6983 unsigned int nofs_flag; 6984 6985 /* 6986 * We call this under the chunk_mutex, so we want to use NOFS for this 6987 * allocation, however we don't want to change btrfs_alloc_device() to 6988 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6989 * places. 6990 */ 6991 nofs_flag = memalloc_nofs_save(); 6992 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6993 memalloc_nofs_restore(nofs_flag); 6994 if (IS_ERR(device)) 6995 return device; 6996 6997 list_add(&device->dev_list, &fs_devices->devices); 6998 device->fs_devices = fs_devices; 6999 fs_devices->num_devices++; 7000 7001 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7002 fs_devices->missing_devices++; 7003 7004 return device; 7005 } 7006 7007 /** 7008 * btrfs_alloc_device - allocate struct btrfs_device 7009 * @fs_info: used only for generating a new devid, can be NULL if 7010 * devid is provided (i.e. @devid != NULL). 7011 * @devid: a pointer to devid for this device. If NULL a new devid 7012 * is generated. 7013 * @uuid: a pointer to UUID for this device. If NULL a new UUID 7014 * is generated. 7015 * 7016 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 7017 * on error. Returned struct is not linked onto any lists and must be 7018 * destroyed with btrfs_free_device. 7019 */ 7020 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 7021 const u64 *devid, 7022 const u8 *uuid) 7023 { 7024 struct btrfs_device *dev; 7025 u64 tmp; 7026 7027 if (WARN_ON(!devid && !fs_info)) 7028 return ERR_PTR(-EINVAL); 7029 7030 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 7031 if (!dev) 7032 return ERR_PTR(-ENOMEM); 7033 7034 INIT_LIST_HEAD(&dev->dev_list); 7035 INIT_LIST_HEAD(&dev->dev_alloc_list); 7036 INIT_LIST_HEAD(&dev->post_commit_list); 7037 7038 atomic_set(&dev->dev_stats_ccnt, 0); 7039 btrfs_device_data_ordered_init(dev); 7040 extent_io_tree_init(fs_info, &dev->alloc_state, 7041 IO_TREE_DEVICE_ALLOC_STATE, NULL); 7042 7043 if (devid) 7044 tmp = *devid; 7045 else { 7046 int ret; 7047 7048 ret = find_next_devid(fs_info, &tmp); 7049 if (ret) { 7050 btrfs_free_device(dev); 7051 return ERR_PTR(ret); 7052 } 7053 } 7054 dev->devid = tmp; 7055 7056 if (uuid) 7057 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 7058 else 7059 generate_random_uuid(dev->uuid); 7060 7061 return dev; 7062 } 7063 7064 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 7065 u64 devid, u8 *uuid, bool error) 7066 { 7067 if (error) 7068 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 7069 devid, uuid); 7070 else 7071 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 7072 devid, uuid); 7073 } 7074 7075 u64 btrfs_calc_stripe_length(const struct extent_map *em) 7076 { 7077 const struct map_lookup *map = em->map_lookup; 7078 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 7079 7080 return div_u64(em->len, data_stripes); 7081 } 7082 7083 #if BITS_PER_LONG == 32 7084 /* 7085 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 7086 * can't be accessed on 32bit systems. 7087 * 7088 * This function do mount time check to reject the fs if it already has 7089 * metadata chunk beyond that limit. 7090 */ 7091 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7092 u64 logical, u64 length, u64 type) 7093 { 7094 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7095 return 0; 7096 7097 if (logical + length < MAX_LFS_FILESIZE) 7098 return 0; 7099 7100 btrfs_err_32bit_limit(fs_info); 7101 return -EOVERFLOW; 7102 } 7103 7104 /* 7105 * This is to give early warning for any metadata chunk reaching 7106 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7107 * Although we can still access the metadata, it's not going to be possible 7108 * once the limit is reached. 7109 */ 7110 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7111 u64 logical, u64 length, u64 type) 7112 { 7113 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7114 return; 7115 7116 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7117 return; 7118 7119 btrfs_warn_32bit_limit(fs_info); 7120 } 7121 #endif 7122 7123 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 7124 u64 devid, u8 *uuid) 7125 { 7126 struct btrfs_device *dev; 7127 7128 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7129 btrfs_report_missing_device(fs_info, devid, uuid, true); 7130 return ERR_PTR(-ENOENT); 7131 } 7132 7133 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 7134 if (IS_ERR(dev)) { 7135 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 7136 devid, PTR_ERR(dev)); 7137 return dev; 7138 } 7139 btrfs_report_missing_device(fs_info, devid, uuid, false); 7140 7141 return dev; 7142 } 7143 7144 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7145 struct btrfs_chunk *chunk) 7146 { 7147 BTRFS_DEV_LOOKUP_ARGS(args); 7148 struct btrfs_fs_info *fs_info = leaf->fs_info; 7149 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7150 struct map_lookup *map; 7151 struct extent_map *em; 7152 u64 logical; 7153 u64 length; 7154 u64 devid; 7155 u64 type; 7156 u8 uuid[BTRFS_UUID_SIZE]; 7157 int index; 7158 int num_stripes; 7159 int ret; 7160 int i; 7161 7162 logical = key->offset; 7163 length = btrfs_chunk_length(leaf, chunk); 7164 type = btrfs_chunk_type(leaf, chunk); 7165 index = btrfs_bg_flags_to_raid_index(type); 7166 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7167 7168 #if BITS_PER_LONG == 32 7169 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7170 if (ret < 0) 7171 return ret; 7172 warn_32bit_meta_chunk(fs_info, logical, length, type); 7173 #endif 7174 7175 /* 7176 * Only need to verify chunk item if we're reading from sys chunk array, 7177 * as chunk item in tree block is already verified by tree-checker. 7178 */ 7179 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7180 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7181 if (ret) 7182 return ret; 7183 } 7184 7185 read_lock(&map_tree->lock); 7186 em = lookup_extent_mapping(map_tree, logical, 1); 7187 read_unlock(&map_tree->lock); 7188 7189 /* already mapped? */ 7190 if (em && em->start <= logical && em->start + em->len > logical) { 7191 free_extent_map(em); 7192 return 0; 7193 } else if (em) { 7194 free_extent_map(em); 7195 } 7196 7197 em = alloc_extent_map(); 7198 if (!em) 7199 return -ENOMEM; 7200 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7201 if (!map) { 7202 free_extent_map(em); 7203 return -ENOMEM; 7204 } 7205 7206 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7207 em->map_lookup = map; 7208 em->start = logical; 7209 em->len = length; 7210 em->orig_start = 0; 7211 em->block_start = 0; 7212 em->block_len = em->len; 7213 7214 map->num_stripes = num_stripes; 7215 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7216 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7217 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7218 map->type = type; 7219 /* 7220 * We can't use the sub_stripes value, as for profiles other than 7221 * RAID10, they may have 0 as sub_stripes for filesystems created by 7222 * older mkfs (<v5.4). 7223 * In that case, it can cause divide-by-zero errors later. 7224 * Since currently sub_stripes is fixed for each profile, let's 7225 * use the trusted value instead. 7226 */ 7227 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 7228 map->verified_stripes = 0; 7229 em->orig_block_len = btrfs_calc_stripe_length(em); 7230 for (i = 0; i < num_stripes; i++) { 7231 map->stripes[i].physical = 7232 btrfs_stripe_offset_nr(leaf, chunk, i); 7233 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7234 args.devid = devid; 7235 read_extent_buffer(leaf, uuid, (unsigned long) 7236 btrfs_stripe_dev_uuid_nr(chunk, i), 7237 BTRFS_UUID_SIZE); 7238 args.uuid = uuid; 7239 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7240 if (!map->stripes[i].dev) { 7241 map->stripes[i].dev = handle_missing_device(fs_info, 7242 devid, uuid); 7243 if (IS_ERR(map->stripes[i].dev)) { 7244 free_extent_map(em); 7245 return PTR_ERR(map->stripes[i].dev); 7246 } 7247 } 7248 7249 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7250 &(map->stripes[i].dev->dev_state)); 7251 } 7252 7253 write_lock(&map_tree->lock); 7254 ret = add_extent_mapping(map_tree, em, 0); 7255 write_unlock(&map_tree->lock); 7256 if (ret < 0) { 7257 btrfs_err(fs_info, 7258 "failed to add chunk map, start=%llu len=%llu: %d", 7259 em->start, em->len, ret); 7260 } 7261 free_extent_map(em); 7262 7263 return ret; 7264 } 7265 7266 static void fill_device_from_item(struct extent_buffer *leaf, 7267 struct btrfs_dev_item *dev_item, 7268 struct btrfs_device *device) 7269 { 7270 unsigned long ptr; 7271 7272 device->devid = btrfs_device_id(leaf, dev_item); 7273 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7274 device->total_bytes = device->disk_total_bytes; 7275 device->commit_total_bytes = device->disk_total_bytes; 7276 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7277 device->commit_bytes_used = device->bytes_used; 7278 device->type = btrfs_device_type(leaf, dev_item); 7279 device->io_align = btrfs_device_io_align(leaf, dev_item); 7280 device->io_width = btrfs_device_io_width(leaf, dev_item); 7281 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7282 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7283 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7284 7285 ptr = btrfs_device_uuid(dev_item); 7286 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7287 } 7288 7289 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7290 u8 *fsid) 7291 { 7292 struct btrfs_fs_devices *fs_devices; 7293 int ret; 7294 7295 lockdep_assert_held(&uuid_mutex); 7296 ASSERT(fsid); 7297 7298 /* This will match only for multi-device seed fs */ 7299 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7300 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7301 return fs_devices; 7302 7303 7304 fs_devices = find_fsid(fsid, NULL); 7305 if (!fs_devices) { 7306 if (!btrfs_test_opt(fs_info, DEGRADED)) 7307 return ERR_PTR(-ENOENT); 7308 7309 fs_devices = alloc_fs_devices(fsid, NULL); 7310 if (IS_ERR(fs_devices)) 7311 return fs_devices; 7312 7313 fs_devices->seeding = true; 7314 fs_devices->opened = 1; 7315 return fs_devices; 7316 } 7317 7318 /* 7319 * Upon first call for a seed fs fsid, just create a private copy of the 7320 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7321 */ 7322 fs_devices = clone_fs_devices(fs_devices); 7323 if (IS_ERR(fs_devices)) 7324 return fs_devices; 7325 7326 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7327 if (ret) { 7328 free_fs_devices(fs_devices); 7329 return ERR_PTR(ret); 7330 } 7331 7332 if (!fs_devices->seeding) { 7333 close_fs_devices(fs_devices); 7334 free_fs_devices(fs_devices); 7335 return ERR_PTR(-EINVAL); 7336 } 7337 7338 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7339 7340 return fs_devices; 7341 } 7342 7343 static int read_one_dev(struct extent_buffer *leaf, 7344 struct btrfs_dev_item *dev_item) 7345 { 7346 BTRFS_DEV_LOOKUP_ARGS(args); 7347 struct btrfs_fs_info *fs_info = leaf->fs_info; 7348 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7349 struct btrfs_device *device; 7350 u64 devid; 7351 int ret; 7352 u8 fs_uuid[BTRFS_FSID_SIZE]; 7353 u8 dev_uuid[BTRFS_UUID_SIZE]; 7354 7355 devid = btrfs_device_id(leaf, dev_item); 7356 args.devid = devid; 7357 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7358 BTRFS_UUID_SIZE); 7359 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7360 BTRFS_FSID_SIZE); 7361 args.uuid = dev_uuid; 7362 args.fsid = fs_uuid; 7363 7364 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7365 fs_devices = open_seed_devices(fs_info, fs_uuid); 7366 if (IS_ERR(fs_devices)) 7367 return PTR_ERR(fs_devices); 7368 } 7369 7370 device = btrfs_find_device(fs_info->fs_devices, &args); 7371 if (!device) { 7372 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7373 btrfs_report_missing_device(fs_info, devid, 7374 dev_uuid, true); 7375 return -ENOENT; 7376 } 7377 7378 device = add_missing_dev(fs_devices, devid, dev_uuid); 7379 if (IS_ERR(device)) { 7380 btrfs_err(fs_info, 7381 "failed to add missing dev %llu: %ld", 7382 devid, PTR_ERR(device)); 7383 return PTR_ERR(device); 7384 } 7385 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7386 } else { 7387 if (!device->bdev) { 7388 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7389 btrfs_report_missing_device(fs_info, 7390 devid, dev_uuid, true); 7391 return -ENOENT; 7392 } 7393 btrfs_report_missing_device(fs_info, devid, 7394 dev_uuid, false); 7395 } 7396 7397 if (!device->bdev && 7398 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7399 /* 7400 * this happens when a device that was properly setup 7401 * in the device info lists suddenly goes bad. 7402 * device->bdev is NULL, and so we have to set 7403 * device->missing to one here 7404 */ 7405 device->fs_devices->missing_devices++; 7406 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7407 } 7408 7409 /* Move the device to its own fs_devices */ 7410 if (device->fs_devices != fs_devices) { 7411 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7412 &device->dev_state)); 7413 7414 list_move(&device->dev_list, &fs_devices->devices); 7415 device->fs_devices->num_devices--; 7416 fs_devices->num_devices++; 7417 7418 device->fs_devices->missing_devices--; 7419 fs_devices->missing_devices++; 7420 7421 device->fs_devices = fs_devices; 7422 } 7423 } 7424 7425 if (device->fs_devices != fs_info->fs_devices) { 7426 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7427 if (device->generation != 7428 btrfs_device_generation(leaf, dev_item)) 7429 return -EINVAL; 7430 } 7431 7432 fill_device_from_item(leaf, dev_item, device); 7433 if (device->bdev) { 7434 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7435 7436 if (device->total_bytes > max_total_bytes) { 7437 btrfs_err(fs_info, 7438 "device total_bytes should be at most %llu but found %llu", 7439 max_total_bytes, device->total_bytes); 7440 return -EINVAL; 7441 } 7442 } 7443 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7444 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7445 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7446 device->fs_devices->total_rw_bytes += device->total_bytes; 7447 atomic64_add(device->total_bytes - device->bytes_used, 7448 &fs_info->free_chunk_space); 7449 } 7450 ret = 0; 7451 return ret; 7452 } 7453 7454 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7455 { 7456 struct btrfs_super_block *super_copy = fs_info->super_copy; 7457 struct extent_buffer *sb; 7458 struct btrfs_disk_key *disk_key; 7459 struct btrfs_chunk *chunk; 7460 u8 *array_ptr; 7461 unsigned long sb_array_offset; 7462 int ret = 0; 7463 u32 num_stripes; 7464 u32 array_size; 7465 u32 len = 0; 7466 u32 cur_offset; 7467 u64 type; 7468 struct btrfs_key key; 7469 7470 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7471 7472 /* 7473 * We allocated a dummy extent, just to use extent buffer accessors. 7474 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7475 * that's fine, we will not go beyond system chunk array anyway. 7476 */ 7477 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7478 if (!sb) 7479 return -ENOMEM; 7480 set_extent_buffer_uptodate(sb); 7481 7482 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7483 array_size = btrfs_super_sys_array_size(super_copy); 7484 7485 array_ptr = super_copy->sys_chunk_array; 7486 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7487 cur_offset = 0; 7488 7489 while (cur_offset < array_size) { 7490 disk_key = (struct btrfs_disk_key *)array_ptr; 7491 len = sizeof(*disk_key); 7492 if (cur_offset + len > array_size) 7493 goto out_short_read; 7494 7495 btrfs_disk_key_to_cpu(&key, disk_key); 7496 7497 array_ptr += len; 7498 sb_array_offset += len; 7499 cur_offset += len; 7500 7501 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7502 btrfs_err(fs_info, 7503 "unexpected item type %u in sys_array at offset %u", 7504 (u32)key.type, cur_offset); 7505 ret = -EIO; 7506 break; 7507 } 7508 7509 chunk = (struct btrfs_chunk *)sb_array_offset; 7510 /* 7511 * At least one btrfs_chunk with one stripe must be present, 7512 * exact stripe count check comes afterwards 7513 */ 7514 len = btrfs_chunk_item_size(1); 7515 if (cur_offset + len > array_size) 7516 goto out_short_read; 7517 7518 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7519 if (!num_stripes) { 7520 btrfs_err(fs_info, 7521 "invalid number of stripes %u in sys_array at offset %u", 7522 num_stripes, cur_offset); 7523 ret = -EIO; 7524 break; 7525 } 7526 7527 type = btrfs_chunk_type(sb, chunk); 7528 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7529 btrfs_err(fs_info, 7530 "invalid chunk type %llu in sys_array at offset %u", 7531 type, cur_offset); 7532 ret = -EIO; 7533 break; 7534 } 7535 7536 len = btrfs_chunk_item_size(num_stripes); 7537 if (cur_offset + len > array_size) 7538 goto out_short_read; 7539 7540 ret = read_one_chunk(&key, sb, chunk); 7541 if (ret) 7542 break; 7543 7544 array_ptr += len; 7545 sb_array_offset += len; 7546 cur_offset += len; 7547 } 7548 clear_extent_buffer_uptodate(sb); 7549 free_extent_buffer_stale(sb); 7550 return ret; 7551 7552 out_short_read: 7553 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7554 len, cur_offset); 7555 clear_extent_buffer_uptodate(sb); 7556 free_extent_buffer_stale(sb); 7557 return -EIO; 7558 } 7559 7560 /* 7561 * Check if all chunks in the fs are OK for read-write degraded mount 7562 * 7563 * If the @failing_dev is specified, it's accounted as missing. 7564 * 7565 * Return true if all chunks meet the minimal RW mount requirements. 7566 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7567 */ 7568 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7569 struct btrfs_device *failing_dev) 7570 { 7571 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7572 struct extent_map *em; 7573 u64 next_start = 0; 7574 bool ret = true; 7575 7576 read_lock(&map_tree->lock); 7577 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7578 read_unlock(&map_tree->lock); 7579 /* No chunk at all? Return false anyway */ 7580 if (!em) { 7581 ret = false; 7582 goto out; 7583 } 7584 while (em) { 7585 struct map_lookup *map; 7586 int missing = 0; 7587 int max_tolerated; 7588 int i; 7589 7590 map = em->map_lookup; 7591 max_tolerated = 7592 btrfs_get_num_tolerated_disk_barrier_failures( 7593 map->type); 7594 for (i = 0; i < map->num_stripes; i++) { 7595 struct btrfs_device *dev = map->stripes[i].dev; 7596 7597 if (!dev || !dev->bdev || 7598 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7599 dev->last_flush_error) 7600 missing++; 7601 else if (failing_dev && failing_dev == dev) 7602 missing++; 7603 } 7604 if (missing > max_tolerated) { 7605 if (!failing_dev) 7606 btrfs_warn(fs_info, 7607 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7608 em->start, missing, max_tolerated); 7609 free_extent_map(em); 7610 ret = false; 7611 goto out; 7612 } 7613 next_start = extent_map_end(em); 7614 free_extent_map(em); 7615 7616 read_lock(&map_tree->lock); 7617 em = lookup_extent_mapping(map_tree, next_start, 7618 (u64)(-1) - next_start); 7619 read_unlock(&map_tree->lock); 7620 } 7621 out: 7622 return ret; 7623 } 7624 7625 static void readahead_tree_node_children(struct extent_buffer *node) 7626 { 7627 int i; 7628 const int nr_items = btrfs_header_nritems(node); 7629 7630 for (i = 0; i < nr_items; i++) 7631 btrfs_readahead_node_child(node, i); 7632 } 7633 7634 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7635 { 7636 struct btrfs_root *root = fs_info->chunk_root; 7637 struct btrfs_path *path; 7638 struct extent_buffer *leaf; 7639 struct btrfs_key key; 7640 struct btrfs_key found_key; 7641 int ret; 7642 int slot; 7643 int iter_ret = 0; 7644 u64 total_dev = 0; 7645 u64 last_ra_node = 0; 7646 7647 path = btrfs_alloc_path(); 7648 if (!path) 7649 return -ENOMEM; 7650 7651 /* 7652 * uuid_mutex is needed only if we are mounting a sprout FS 7653 * otherwise we don't need it. 7654 */ 7655 mutex_lock(&uuid_mutex); 7656 7657 /* 7658 * It is possible for mount and umount to race in such a way that 7659 * we execute this code path, but open_fs_devices failed to clear 7660 * total_rw_bytes. We certainly want it cleared before reading the 7661 * device items, so clear it here. 7662 */ 7663 fs_info->fs_devices->total_rw_bytes = 0; 7664 7665 /* 7666 * Lockdep complains about possible circular locking dependency between 7667 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7668 * used for freeze procection of a fs (struct super_block.s_writers), 7669 * which we take when starting a transaction, and extent buffers of the 7670 * chunk tree if we call read_one_dev() while holding a lock on an 7671 * extent buffer of the chunk tree. Since we are mounting the filesystem 7672 * and at this point there can't be any concurrent task modifying the 7673 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7674 */ 7675 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7676 path->skip_locking = 1; 7677 7678 /* 7679 * Read all device items, and then all the chunk items. All 7680 * device items are found before any chunk item (their object id 7681 * is smaller than the lowest possible object id for a chunk 7682 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7683 */ 7684 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7685 key.offset = 0; 7686 key.type = 0; 7687 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7688 struct extent_buffer *node = path->nodes[1]; 7689 7690 leaf = path->nodes[0]; 7691 slot = path->slots[0]; 7692 7693 if (node) { 7694 if (last_ra_node != node->start) { 7695 readahead_tree_node_children(node); 7696 last_ra_node = node->start; 7697 } 7698 } 7699 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7700 struct btrfs_dev_item *dev_item; 7701 dev_item = btrfs_item_ptr(leaf, slot, 7702 struct btrfs_dev_item); 7703 ret = read_one_dev(leaf, dev_item); 7704 if (ret) 7705 goto error; 7706 total_dev++; 7707 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7708 struct btrfs_chunk *chunk; 7709 7710 /* 7711 * We are only called at mount time, so no need to take 7712 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7713 * we always lock first fs_info->chunk_mutex before 7714 * acquiring any locks on the chunk tree. This is a 7715 * requirement for chunk allocation, see the comment on 7716 * top of btrfs_chunk_alloc() for details. 7717 */ 7718 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7719 ret = read_one_chunk(&found_key, leaf, chunk); 7720 if (ret) 7721 goto error; 7722 } 7723 } 7724 /* Catch error found during iteration */ 7725 if (iter_ret < 0) { 7726 ret = iter_ret; 7727 goto error; 7728 } 7729 7730 /* 7731 * After loading chunk tree, we've got all device information, 7732 * do another round of validation checks. 7733 */ 7734 if (total_dev != fs_info->fs_devices->total_devices) { 7735 btrfs_warn(fs_info, 7736 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7737 btrfs_super_num_devices(fs_info->super_copy), 7738 total_dev); 7739 fs_info->fs_devices->total_devices = total_dev; 7740 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7741 } 7742 if (btrfs_super_total_bytes(fs_info->super_copy) < 7743 fs_info->fs_devices->total_rw_bytes) { 7744 btrfs_err(fs_info, 7745 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7746 btrfs_super_total_bytes(fs_info->super_copy), 7747 fs_info->fs_devices->total_rw_bytes); 7748 ret = -EINVAL; 7749 goto error; 7750 } 7751 ret = 0; 7752 error: 7753 mutex_unlock(&uuid_mutex); 7754 7755 btrfs_free_path(path); 7756 return ret; 7757 } 7758 7759 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7760 { 7761 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7762 struct btrfs_device *device; 7763 int ret = 0; 7764 7765 fs_devices->fs_info = fs_info; 7766 7767 mutex_lock(&fs_devices->device_list_mutex); 7768 list_for_each_entry(device, &fs_devices->devices, dev_list) 7769 device->fs_info = fs_info; 7770 7771 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7772 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7773 device->fs_info = fs_info; 7774 ret = btrfs_get_dev_zone_info(device, false); 7775 if (ret) 7776 break; 7777 } 7778 7779 seed_devs->fs_info = fs_info; 7780 } 7781 mutex_unlock(&fs_devices->device_list_mutex); 7782 7783 return ret; 7784 } 7785 7786 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7787 const struct btrfs_dev_stats_item *ptr, 7788 int index) 7789 { 7790 u64 val; 7791 7792 read_extent_buffer(eb, &val, 7793 offsetof(struct btrfs_dev_stats_item, values) + 7794 ((unsigned long)ptr) + (index * sizeof(u64)), 7795 sizeof(val)); 7796 return val; 7797 } 7798 7799 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7800 struct btrfs_dev_stats_item *ptr, 7801 int index, u64 val) 7802 { 7803 write_extent_buffer(eb, &val, 7804 offsetof(struct btrfs_dev_stats_item, values) + 7805 ((unsigned long)ptr) + (index * sizeof(u64)), 7806 sizeof(val)); 7807 } 7808 7809 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7810 struct btrfs_path *path) 7811 { 7812 struct btrfs_dev_stats_item *ptr; 7813 struct extent_buffer *eb; 7814 struct btrfs_key key; 7815 int item_size; 7816 int i, ret, slot; 7817 7818 if (!device->fs_info->dev_root) 7819 return 0; 7820 7821 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7822 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7823 key.offset = device->devid; 7824 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7825 if (ret) { 7826 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7827 btrfs_dev_stat_set(device, i, 0); 7828 device->dev_stats_valid = 1; 7829 btrfs_release_path(path); 7830 return ret < 0 ? ret : 0; 7831 } 7832 slot = path->slots[0]; 7833 eb = path->nodes[0]; 7834 item_size = btrfs_item_size(eb, slot); 7835 7836 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7837 7838 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7839 if (item_size >= (1 + i) * sizeof(__le64)) 7840 btrfs_dev_stat_set(device, i, 7841 btrfs_dev_stats_value(eb, ptr, i)); 7842 else 7843 btrfs_dev_stat_set(device, i, 0); 7844 } 7845 7846 device->dev_stats_valid = 1; 7847 btrfs_dev_stat_print_on_load(device); 7848 btrfs_release_path(path); 7849 7850 return 0; 7851 } 7852 7853 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7854 { 7855 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7856 struct btrfs_device *device; 7857 struct btrfs_path *path = NULL; 7858 int ret = 0; 7859 7860 path = btrfs_alloc_path(); 7861 if (!path) 7862 return -ENOMEM; 7863 7864 mutex_lock(&fs_devices->device_list_mutex); 7865 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7866 ret = btrfs_device_init_dev_stats(device, path); 7867 if (ret) 7868 goto out; 7869 } 7870 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7871 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7872 ret = btrfs_device_init_dev_stats(device, path); 7873 if (ret) 7874 goto out; 7875 } 7876 } 7877 out: 7878 mutex_unlock(&fs_devices->device_list_mutex); 7879 7880 btrfs_free_path(path); 7881 return ret; 7882 } 7883 7884 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7885 struct btrfs_device *device) 7886 { 7887 struct btrfs_fs_info *fs_info = trans->fs_info; 7888 struct btrfs_root *dev_root = fs_info->dev_root; 7889 struct btrfs_path *path; 7890 struct btrfs_key key; 7891 struct extent_buffer *eb; 7892 struct btrfs_dev_stats_item *ptr; 7893 int ret; 7894 int i; 7895 7896 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7897 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7898 key.offset = device->devid; 7899 7900 path = btrfs_alloc_path(); 7901 if (!path) 7902 return -ENOMEM; 7903 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7904 if (ret < 0) { 7905 btrfs_warn_in_rcu(fs_info, 7906 "error %d while searching for dev_stats item for device %s", 7907 ret, rcu_str_deref(device->name)); 7908 goto out; 7909 } 7910 7911 if (ret == 0 && 7912 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7913 /* need to delete old one and insert a new one */ 7914 ret = btrfs_del_item(trans, dev_root, path); 7915 if (ret != 0) { 7916 btrfs_warn_in_rcu(fs_info, 7917 "delete too small dev_stats item for device %s failed %d", 7918 rcu_str_deref(device->name), ret); 7919 goto out; 7920 } 7921 ret = 1; 7922 } 7923 7924 if (ret == 1) { 7925 /* need to insert a new item */ 7926 btrfs_release_path(path); 7927 ret = btrfs_insert_empty_item(trans, dev_root, path, 7928 &key, sizeof(*ptr)); 7929 if (ret < 0) { 7930 btrfs_warn_in_rcu(fs_info, 7931 "insert dev_stats item for device %s failed %d", 7932 rcu_str_deref(device->name), ret); 7933 goto out; 7934 } 7935 } 7936 7937 eb = path->nodes[0]; 7938 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7939 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7940 btrfs_set_dev_stats_value(eb, ptr, i, 7941 btrfs_dev_stat_read(device, i)); 7942 btrfs_mark_buffer_dirty(eb); 7943 7944 out: 7945 btrfs_free_path(path); 7946 return ret; 7947 } 7948 7949 /* 7950 * called from commit_transaction. Writes all changed device stats to disk. 7951 */ 7952 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7953 { 7954 struct btrfs_fs_info *fs_info = trans->fs_info; 7955 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7956 struct btrfs_device *device; 7957 int stats_cnt; 7958 int ret = 0; 7959 7960 mutex_lock(&fs_devices->device_list_mutex); 7961 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7962 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7963 if (!device->dev_stats_valid || stats_cnt == 0) 7964 continue; 7965 7966 7967 /* 7968 * There is a LOAD-LOAD control dependency between the value of 7969 * dev_stats_ccnt and updating the on-disk values which requires 7970 * reading the in-memory counters. Such control dependencies 7971 * require explicit read memory barriers. 7972 * 7973 * This memory barriers pairs with smp_mb__before_atomic in 7974 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7975 * barrier implied by atomic_xchg in 7976 * btrfs_dev_stats_read_and_reset 7977 */ 7978 smp_rmb(); 7979 7980 ret = update_dev_stat_item(trans, device); 7981 if (!ret) 7982 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7983 } 7984 mutex_unlock(&fs_devices->device_list_mutex); 7985 7986 return ret; 7987 } 7988 7989 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7990 { 7991 btrfs_dev_stat_inc(dev, index); 7992 7993 if (!dev->dev_stats_valid) 7994 return; 7995 btrfs_err_rl_in_rcu(dev->fs_info, 7996 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7997 rcu_str_deref(dev->name), 7998 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7999 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 8000 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 8001 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 8002 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 8003 } 8004 8005 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 8006 { 8007 int i; 8008 8009 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 8010 if (btrfs_dev_stat_read(dev, i) != 0) 8011 break; 8012 if (i == BTRFS_DEV_STAT_VALUES_MAX) 8013 return; /* all values == 0, suppress message */ 8014 8015 btrfs_info_in_rcu(dev->fs_info, 8016 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 8017 rcu_str_deref(dev->name), 8018 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 8019 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 8020 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 8021 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 8022 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 8023 } 8024 8025 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 8026 struct btrfs_ioctl_get_dev_stats *stats) 8027 { 8028 BTRFS_DEV_LOOKUP_ARGS(args); 8029 struct btrfs_device *dev; 8030 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 8031 int i; 8032 8033 mutex_lock(&fs_devices->device_list_mutex); 8034 args.devid = stats->devid; 8035 dev = btrfs_find_device(fs_info->fs_devices, &args); 8036 mutex_unlock(&fs_devices->device_list_mutex); 8037 8038 if (!dev) { 8039 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 8040 return -ENODEV; 8041 } else if (!dev->dev_stats_valid) { 8042 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 8043 return -ENODEV; 8044 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 8045 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 8046 if (stats->nr_items > i) 8047 stats->values[i] = 8048 btrfs_dev_stat_read_and_reset(dev, i); 8049 else 8050 btrfs_dev_stat_set(dev, i, 0); 8051 } 8052 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 8053 current->comm, task_pid_nr(current)); 8054 } else { 8055 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 8056 if (stats->nr_items > i) 8057 stats->values[i] = btrfs_dev_stat_read(dev, i); 8058 } 8059 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 8060 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 8061 return 0; 8062 } 8063 8064 /* 8065 * Update the size and bytes used for each device where it changed. This is 8066 * delayed since we would otherwise get errors while writing out the 8067 * superblocks. 8068 * 8069 * Must be invoked during transaction commit. 8070 */ 8071 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 8072 { 8073 struct btrfs_device *curr, *next; 8074 8075 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 8076 8077 if (list_empty(&trans->dev_update_list)) 8078 return; 8079 8080 /* 8081 * We don't need the device_list_mutex here. This list is owned by the 8082 * transaction and the transaction must complete before the device is 8083 * released. 8084 */ 8085 mutex_lock(&trans->fs_info->chunk_mutex); 8086 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 8087 post_commit_list) { 8088 list_del_init(&curr->post_commit_list); 8089 curr->commit_total_bytes = curr->disk_total_bytes; 8090 curr->commit_bytes_used = curr->bytes_used; 8091 } 8092 mutex_unlock(&trans->fs_info->chunk_mutex); 8093 } 8094 8095 /* 8096 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 8097 */ 8098 int btrfs_bg_type_to_factor(u64 flags) 8099 { 8100 const int index = btrfs_bg_flags_to_raid_index(flags); 8101 8102 return btrfs_raid_array[index].ncopies; 8103 } 8104 8105 8106 8107 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 8108 u64 chunk_offset, u64 devid, 8109 u64 physical_offset, u64 physical_len) 8110 { 8111 struct btrfs_dev_lookup_args args = { .devid = devid }; 8112 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8113 struct extent_map *em; 8114 struct map_lookup *map; 8115 struct btrfs_device *dev; 8116 u64 stripe_len; 8117 bool found = false; 8118 int ret = 0; 8119 int i; 8120 8121 read_lock(&em_tree->lock); 8122 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8123 read_unlock(&em_tree->lock); 8124 8125 if (!em) { 8126 btrfs_err(fs_info, 8127 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8128 physical_offset, devid); 8129 ret = -EUCLEAN; 8130 goto out; 8131 } 8132 8133 map = em->map_lookup; 8134 stripe_len = btrfs_calc_stripe_length(em); 8135 if (physical_len != stripe_len) { 8136 btrfs_err(fs_info, 8137 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8138 physical_offset, devid, em->start, physical_len, 8139 stripe_len); 8140 ret = -EUCLEAN; 8141 goto out; 8142 } 8143 8144 /* 8145 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 8146 * space. Although kernel can handle it without problem, better to warn 8147 * the users. 8148 */ 8149 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 8150 btrfs_warn(fs_info, 8151 "devid %llu physical %llu len %llu inside the reserved space", 8152 devid, physical_offset, physical_len); 8153 8154 for (i = 0; i < map->num_stripes; i++) { 8155 if (map->stripes[i].dev->devid == devid && 8156 map->stripes[i].physical == physical_offset) { 8157 found = true; 8158 if (map->verified_stripes >= map->num_stripes) { 8159 btrfs_err(fs_info, 8160 "too many dev extents for chunk %llu found", 8161 em->start); 8162 ret = -EUCLEAN; 8163 goto out; 8164 } 8165 map->verified_stripes++; 8166 break; 8167 } 8168 } 8169 if (!found) { 8170 btrfs_err(fs_info, 8171 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8172 physical_offset, devid); 8173 ret = -EUCLEAN; 8174 } 8175 8176 /* Make sure no dev extent is beyond device boundary */ 8177 dev = btrfs_find_device(fs_info->fs_devices, &args); 8178 if (!dev) { 8179 btrfs_err(fs_info, "failed to find devid %llu", devid); 8180 ret = -EUCLEAN; 8181 goto out; 8182 } 8183 8184 if (physical_offset + physical_len > dev->disk_total_bytes) { 8185 btrfs_err(fs_info, 8186 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8187 devid, physical_offset, physical_len, 8188 dev->disk_total_bytes); 8189 ret = -EUCLEAN; 8190 goto out; 8191 } 8192 8193 if (dev->zone_info) { 8194 u64 zone_size = dev->zone_info->zone_size; 8195 8196 if (!IS_ALIGNED(physical_offset, zone_size) || 8197 !IS_ALIGNED(physical_len, zone_size)) { 8198 btrfs_err(fs_info, 8199 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8200 devid, physical_offset, physical_len); 8201 ret = -EUCLEAN; 8202 goto out; 8203 } 8204 } 8205 8206 out: 8207 free_extent_map(em); 8208 return ret; 8209 } 8210 8211 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8212 { 8213 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8214 struct extent_map *em; 8215 struct rb_node *node; 8216 int ret = 0; 8217 8218 read_lock(&em_tree->lock); 8219 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8220 em = rb_entry(node, struct extent_map, rb_node); 8221 if (em->map_lookup->num_stripes != 8222 em->map_lookup->verified_stripes) { 8223 btrfs_err(fs_info, 8224 "chunk %llu has missing dev extent, have %d expect %d", 8225 em->start, em->map_lookup->verified_stripes, 8226 em->map_lookup->num_stripes); 8227 ret = -EUCLEAN; 8228 goto out; 8229 } 8230 } 8231 out: 8232 read_unlock(&em_tree->lock); 8233 return ret; 8234 } 8235 8236 /* 8237 * Ensure that all dev extents are mapped to correct chunk, otherwise 8238 * later chunk allocation/free would cause unexpected behavior. 8239 * 8240 * NOTE: This will iterate through the whole device tree, which should be of 8241 * the same size level as the chunk tree. This slightly increases mount time. 8242 */ 8243 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8244 { 8245 struct btrfs_path *path; 8246 struct btrfs_root *root = fs_info->dev_root; 8247 struct btrfs_key key; 8248 u64 prev_devid = 0; 8249 u64 prev_dev_ext_end = 0; 8250 int ret = 0; 8251 8252 /* 8253 * We don't have a dev_root because we mounted with ignorebadroots and 8254 * failed to load the root, so we want to skip the verification in this 8255 * case for sure. 8256 * 8257 * However if the dev root is fine, but the tree itself is corrupted 8258 * we'd still fail to mount. This verification is only to make sure 8259 * writes can happen safely, so instead just bypass this check 8260 * completely in the case of IGNOREBADROOTS. 8261 */ 8262 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8263 return 0; 8264 8265 key.objectid = 1; 8266 key.type = BTRFS_DEV_EXTENT_KEY; 8267 key.offset = 0; 8268 8269 path = btrfs_alloc_path(); 8270 if (!path) 8271 return -ENOMEM; 8272 8273 path->reada = READA_FORWARD; 8274 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8275 if (ret < 0) 8276 goto out; 8277 8278 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8279 ret = btrfs_next_leaf(root, path); 8280 if (ret < 0) 8281 goto out; 8282 /* No dev extents at all? Not good */ 8283 if (ret > 0) { 8284 ret = -EUCLEAN; 8285 goto out; 8286 } 8287 } 8288 while (1) { 8289 struct extent_buffer *leaf = path->nodes[0]; 8290 struct btrfs_dev_extent *dext; 8291 int slot = path->slots[0]; 8292 u64 chunk_offset; 8293 u64 physical_offset; 8294 u64 physical_len; 8295 u64 devid; 8296 8297 btrfs_item_key_to_cpu(leaf, &key, slot); 8298 if (key.type != BTRFS_DEV_EXTENT_KEY) 8299 break; 8300 devid = key.objectid; 8301 physical_offset = key.offset; 8302 8303 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8304 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8305 physical_len = btrfs_dev_extent_length(leaf, dext); 8306 8307 /* Check if this dev extent overlaps with the previous one */ 8308 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8309 btrfs_err(fs_info, 8310 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8311 devid, physical_offset, prev_dev_ext_end); 8312 ret = -EUCLEAN; 8313 goto out; 8314 } 8315 8316 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8317 physical_offset, physical_len); 8318 if (ret < 0) 8319 goto out; 8320 prev_devid = devid; 8321 prev_dev_ext_end = physical_offset + physical_len; 8322 8323 ret = btrfs_next_item(root, path); 8324 if (ret < 0) 8325 goto out; 8326 if (ret > 0) { 8327 ret = 0; 8328 break; 8329 } 8330 } 8331 8332 /* Ensure all chunks have corresponding dev extents */ 8333 ret = verify_chunk_dev_extent_mapping(fs_info); 8334 out: 8335 btrfs_free_path(path); 8336 return ret; 8337 } 8338 8339 /* 8340 * Check whether the given block group or device is pinned by any inode being 8341 * used as a swapfile. 8342 */ 8343 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8344 { 8345 struct btrfs_swapfile_pin *sp; 8346 struct rb_node *node; 8347 8348 spin_lock(&fs_info->swapfile_pins_lock); 8349 node = fs_info->swapfile_pins.rb_node; 8350 while (node) { 8351 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8352 if (ptr < sp->ptr) 8353 node = node->rb_left; 8354 else if (ptr > sp->ptr) 8355 node = node->rb_right; 8356 else 8357 break; 8358 } 8359 spin_unlock(&fs_info->swapfile_pins_lock); 8360 return node != NULL; 8361 } 8362 8363 static int relocating_repair_kthread(void *data) 8364 { 8365 struct btrfs_block_group *cache = data; 8366 struct btrfs_fs_info *fs_info = cache->fs_info; 8367 u64 target; 8368 int ret = 0; 8369 8370 target = cache->start; 8371 btrfs_put_block_group(cache); 8372 8373 sb_start_write(fs_info->sb); 8374 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8375 btrfs_info(fs_info, 8376 "zoned: skip relocating block group %llu to repair: EBUSY", 8377 target); 8378 sb_end_write(fs_info->sb); 8379 return -EBUSY; 8380 } 8381 8382 mutex_lock(&fs_info->reclaim_bgs_lock); 8383 8384 /* Ensure block group still exists */ 8385 cache = btrfs_lookup_block_group(fs_info, target); 8386 if (!cache) 8387 goto out; 8388 8389 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 8390 goto out; 8391 8392 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8393 if (ret < 0) 8394 goto out; 8395 8396 btrfs_info(fs_info, 8397 "zoned: relocating block group %llu to repair IO failure", 8398 target); 8399 ret = btrfs_relocate_chunk(fs_info, target); 8400 8401 out: 8402 if (cache) 8403 btrfs_put_block_group(cache); 8404 mutex_unlock(&fs_info->reclaim_bgs_lock); 8405 btrfs_exclop_finish(fs_info); 8406 sb_end_write(fs_info->sb); 8407 8408 return ret; 8409 } 8410 8411 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8412 { 8413 struct btrfs_block_group *cache; 8414 8415 if (!btrfs_is_zoned(fs_info)) 8416 return false; 8417 8418 /* Do not attempt to repair in degraded state */ 8419 if (btrfs_test_opt(fs_info, DEGRADED)) 8420 return true; 8421 8422 cache = btrfs_lookup_block_group(fs_info, logical); 8423 if (!cache) 8424 return true; 8425 8426 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8427 btrfs_put_block_group(cache); 8428 return true; 8429 } 8430 8431 kthread_run(relocating_repair_kthread, cache, 8432 "btrfs-relocating-repair"); 8433 8434 return true; 8435 } 8436 8437 int __init btrfs_bioset_init(void) 8438 { 8439 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, 8440 offsetof(struct btrfs_bio, bio), 8441 BIOSET_NEED_BVECS)) 8442 return -ENOMEM; 8443 return 0; 8444 } 8445 8446 void __cold btrfs_bioset_exit(void) 8447 { 8448 bioset_exit(&btrfs_bioset); 8449 } 8450