1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/slab.h> 9 #include <linux/ratelimit.h> 10 #include <linux/kthread.h> 11 #include <linux/semaphore.h> 12 #include <linux/uuid.h> 13 #include <linux/list_sort.h> 14 #include <linux/namei.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "extent_map.h" 18 #include "disk-io.h" 19 #include "transaction.h" 20 #include "print-tree.h" 21 #include "volumes.h" 22 #include "raid56.h" 23 #include "rcu-string.h" 24 #include "dev-replace.h" 25 #include "sysfs.h" 26 #include "tree-checker.h" 27 #include "space-info.h" 28 #include "block-group.h" 29 #include "discard.h" 30 #include "zoned.h" 31 #include "fs.h" 32 #include "accessors.h" 33 #include "uuid-tree.h" 34 #include "ioctl.h" 35 #include "relocation.h" 36 #include "scrub.h" 37 #include "super.h" 38 39 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 40 BTRFS_BLOCK_GROUP_RAID10 | \ 41 BTRFS_BLOCK_GROUP_RAID56_MASK) 42 43 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 44 [BTRFS_RAID_RAID10] = { 45 .sub_stripes = 2, 46 .dev_stripes = 1, 47 .devs_max = 0, /* 0 == as many as possible */ 48 .devs_min = 2, 49 .tolerated_failures = 1, 50 .devs_increment = 2, 51 .ncopies = 2, 52 .nparity = 0, 53 .raid_name = "raid10", 54 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 55 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 56 }, 57 [BTRFS_RAID_RAID1] = { 58 .sub_stripes = 1, 59 .dev_stripes = 1, 60 .devs_max = 2, 61 .devs_min = 2, 62 .tolerated_failures = 1, 63 .devs_increment = 2, 64 .ncopies = 2, 65 .nparity = 0, 66 .raid_name = "raid1", 67 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 68 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 69 }, 70 [BTRFS_RAID_RAID1C3] = { 71 .sub_stripes = 1, 72 .dev_stripes = 1, 73 .devs_max = 3, 74 .devs_min = 3, 75 .tolerated_failures = 2, 76 .devs_increment = 3, 77 .ncopies = 3, 78 .nparity = 0, 79 .raid_name = "raid1c3", 80 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 81 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 82 }, 83 [BTRFS_RAID_RAID1C4] = { 84 .sub_stripes = 1, 85 .dev_stripes = 1, 86 .devs_max = 4, 87 .devs_min = 4, 88 .tolerated_failures = 3, 89 .devs_increment = 4, 90 .ncopies = 4, 91 .nparity = 0, 92 .raid_name = "raid1c4", 93 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 94 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 95 }, 96 [BTRFS_RAID_DUP] = { 97 .sub_stripes = 1, 98 .dev_stripes = 2, 99 .devs_max = 1, 100 .devs_min = 1, 101 .tolerated_failures = 0, 102 .devs_increment = 1, 103 .ncopies = 2, 104 .nparity = 0, 105 .raid_name = "dup", 106 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 107 .mindev_error = 0, 108 }, 109 [BTRFS_RAID_RAID0] = { 110 .sub_stripes = 1, 111 .dev_stripes = 1, 112 .devs_max = 0, 113 .devs_min = 1, 114 .tolerated_failures = 0, 115 .devs_increment = 1, 116 .ncopies = 1, 117 .nparity = 0, 118 .raid_name = "raid0", 119 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 120 .mindev_error = 0, 121 }, 122 [BTRFS_RAID_SINGLE] = { 123 .sub_stripes = 1, 124 .dev_stripes = 1, 125 .devs_max = 1, 126 .devs_min = 1, 127 .tolerated_failures = 0, 128 .devs_increment = 1, 129 .ncopies = 1, 130 .nparity = 0, 131 .raid_name = "single", 132 .bg_flag = 0, 133 .mindev_error = 0, 134 }, 135 [BTRFS_RAID_RAID5] = { 136 .sub_stripes = 1, 137 .dev_stripes = 1, 138 .devs_max = 0, 139 .devs_min = 2, 140 .tolerated_failures = 1, 141 .devs_increment = 1, 142 .ncopies = 1, 143 .nparity = 1, 144 .raid_name = "raid5", 145 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 146 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 147 }, 148 [BTRFS_RAID_RAID6] = { 149 .sub_stripes = 1, 150 .dev_stripes = 1, 151 .devs_max = 0, 152 .devs_min = 3, 153 .tolerated_failures = 2, 154 .devs_increment = 1, 155 .ncopies = 1, 156 .nparity = 2, 157 .raid_name = "raid6", 158 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 159 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 160 }, 161 }; 162 163 /* 164 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 165 * can be used as index to access btrfs_raid_array[]. 166 */ 167 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 168 { 169 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 170 171 if (!profile) 172 return BTRFS_RAID_SINGLE; 173 174 return BTRFS_BG_FLAG_TO_INDEX(profile); 175 } 176 177 const char *btrfs_bg_type_to_raid_name(u64 flags) 178 { 179 const int index = btrfs_bg_flags_to_raid_index(flags); 180 181 if (index >= BTRFS_NR_RAID_TYPES) 182 return NULL; 183 184 return btrfs_raid_array[index].raid_name; 185 } 186 187 int btrfs_nr_parity_stripes(u64 type) 188 { 189 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 190 191 return btrfs_raid_array[index].nparity; 192 } 193 194 /* 195 * Fill @buf with textual description of @bg_flags, no more than @size_buf 196 * bytes including terminating null byte. 197 */ 198 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 199 { 200 int i; 201 int ret; 202 char *bp = buf; 203 u64 flags = bg_flags; 204 u32 size_bp = size_buf; 205 206 if (!flags) { 207 strcpy(bp, "NONE"); 208 return; 209 } 210 211 #define DESCRIBE_FLAG(flag, desc) \ 212 do { \ 213 if (flags & (flag)) { \ 214 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 215 if (ret < 0 || ret >= size_bp) \ 216 goto out_overflow; \ 217 size_bp -= ret; \ 218 bp += ret; \ 219 flags &= ~(flag); \ 220 } \ 221 } while (0) 222 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 224 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 225 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 226 227 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 228 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 229 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 230 btrfs_raid_array[i].raid_name); 231 #undef DESCRIBE_FLAG 232 233 if (flags) { 234 ret = snprintf(bp, size_bp, "0x%llx|", flags); 235 size_bp -= ret; 236 } 237 238 if (size_bp < size_buf) 239 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 240 241 /* 242 * The text is trimmed, it's up to the caller to provide sufficiently 243 * large buffer 244 */ 245 out_overflow:; 246 } 247 248 static int init_first_rw_device(struct btrfs_trans_handle *trans); 249 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 251 252 /* 253 * Device locking 254 * ============== 255 * 256 * There are several mutexes that protect manipulation of devices and low-level 257 * structures like chunks but not block groups, extents or files 258 * 259 * uuid_mutex (global lock) 260 * ------------------------ 261 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 262 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 263 * device) or requested by the device= mount option 264 * 265 * the mutex can be very coarse and can cover long-running operations 266 * 267 * protects: updates to fs_devices counters like missing devices, rw devices, 268 * seeding, structure cloning, opening/closing devices at mount/umount time 269 * 270 * global::fs_devs - add, remove, updates to the global list 271 * 272 * does not protect: manipulation of the fs_devices::devices list in general 273 * but in mount context it could be used to exclude list modifications by eg. 274 * scan ioctl 275 * 276 * btrfs_device::name - renames (write side), read is RCU 277 * 278 * fs_devices::device_list_mutex (per-fs, with RCU) 279 * ------------------------------------------------ 280 * protects updates to fs_devices::devices, ie. adding and deleting 281 * 282 * simple list traversal with read-only actions can be done with RCU protection 283 * 284 * may be used to exclude some operations from running concurrently without any 285 * modifications to the list (see write_all_supers) 286 * 287 * Is not required at mount and close times, because our device list is 288 * protected by the uuid_mutex at that point. 289 * 290 * balance_mutex 291 * ------------- 292 * protects balance structures (status, state) and context accessed from 293 * several places (internally, ioctl) 294 * 295 * chunk_mutex 296 * ----------- 297 * protects chunks, adding or removing during allocation, trim or when a new 298 * device is added/removed. Additionally it also protects post_commit_list of 299 * individual devices, since they can be added to the transaction's 300 * post_commit_list only with chunk_mutex held. 301 * 302 * cleaner_mutex 303 * ------------- 304 * a big lock that is held by the cleaner thread and prevents running subvolume 305 * cleaning together with relocation or delayed iputs 306 * 307 * 308 * Lock nesting 309 * ============ 310 * 311 * uuid_mutex 312 * device_list_mutex 313 * chunk_mutex 314 * balance_mutex 315 * 316 * 317 * Exclusive operations 318 * ==================== 319 * 320 * Maintains the exclusivity of the following operations that apply to the 321 * whole filesystem and cannot run in parallel. 322 * 323 * - Balance (*) 324 * - Device add 325 * - Device remove 326 * - Device replace (*) 327 * - Resize 328 * 329 * The device operations (as above) can be in one of the following states: 330 * 331 * - Running state 332 * - Paused state 333 * - Completed state 334 * 335 * Only device operations marked with (*) can go into the Paused state for the 336 * following reasons: 337 * 338 * - ioctl (only Balance can be Paused through ioctl) 339 * - filesystem remounted as read-only 340 * - filesystem unmounted and mounted as read-only 341 * - system power-cycle and filesystem mounted as read-only 342 * - filesystem or device errors leading to forced read-only 343 * 344 * The status of exclusive operation is set and cleared atomically. 345 * During the course of Paused state, fs_info::exclusive_operation remains set. 346 * A device operation in Paused or Running state can be canceled or resumed 347 * either by ioctl (Balance only) or when remounted as read-write. 348 * The exclusive status is cleared when the device operation is canceled or 349 * completed. 350 */ 351 352 DEFINE_MUTEX(uuid_mutex); 353 static LIST_HEAD(fs_uuids); 354 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 355 { 356 return &fs_uuids; 357 } 358 359 /* 360 * alloc_fs_devices - allocate struct btrfs_fs_devices 361 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 362 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 363 * 364 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 365 * The returned struct is not linked onto any lists and can be destroyed with 366 * kfree() right away. 367 */ 368 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 369 const u8 *metadata_fsid) 370 { 371 struct btrfs_fs_devices *fs_devs; 372 373 ASSERT(fsid || !metadata_fsid); 374 375 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 376 if (!fs_devs) 377 return ERR_PTR(-ENOMEM); 378 379 mutex_init(&fs_devs->device_list_mutex); 380 381 INIT_LIST_HEAD(&fs_devs->devices); 382 INIT_LIST_HEAD(&fs_devs->alloc_list); 383 INIT_LIST_HEAD(&fs_devs->fs_list); 384 INIT_LIST_HEAD(&fs_devs->seed_list); 385 386 if (fsid) { 387 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 388 memcpy(fs_devs->metadata_uuid, 389 metadata_fsid ?: fsid, BTRFS_FSID_SIZE); 390 } 391 392 return fs_devs; 393 } 394 395 static void btrfs_free_device(struct btrfs_device *device) 396 { 397 WARN_ON(!list_empty(&device->post_commit_list)); 398 rcu_string_free(device->name); 399 extent_io_tree_release(&device->alloc_state); 400 btrfs_destroy_dev_zone_info(device); 401 kfree(device); 402 } 403 404 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 405 { 406 struct btrfs_device *device; 407 408 WARN_ON(fs_devices->opened); 409 while (!list_empty(&fs_devices->devices)) { 410 device = list_entry(fs_devices->devices.next, 411 struct btrfs_device, dev_list); 412 list_del(&device->dev_list); 413 btrfs_free_device(device); 414 } 415 kfree(fs_devices); 416 } 417 418 void __exit btrfs_cleanup_fs_uuids(void) 419 { 420 struct btrfs_fs_devices *fs_devices; 421 422 while (!list_empty(&fs_uuids)) { 423 fs_devices = list_entry(fs_uuids.next, 424 struct btrfs_fs_devices, fs_list); 425 list_del(&fs_devices->fs_list); 426 free_fs_devices(fs_devices); 427 } 428 } 429 430 static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices, 431 const u8 *fsid, const u8 *metadata_fsid) 432 { 433 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0) 434 return false; 435 436 if (!metadata_fsid) 437 return true; 438 439 if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0) 440 return false; 441 442 return true; 443 } 444 445 static noinline struct btrfs_fs_devices *find_fsid( 446 const u8 *fsid, const u8 *metadata_fsid) 447 { 448 struct btrfs_fs_devices *fs_devices; 449 450 ASSERT(fsid); 451 452 /* Handle non-split brain cases */ 453 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 454 if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid)) 455 return fs_devices; 456 } 457 return NULL; 458 } 459 460 /* 461 * First check if the metadata_uuid is different from the fsid in the given 462 * fs_devices. Then check if the given fsid is the same as the metadata_uuid 463 * in the fs_devices. If it is, return true; otherwise, return false. 464 */ 465 static inline bool check_fsid_changed(const struct btrfs_fs_devices *fs_devices, 466 const u8 *fsid) 467 { 468 return memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 469 BTRFS_FSID_SIZE) != 0 && 470 memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE) == 0; 471 } 472 473 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 474 struct btrfs_super_block *disk_super) 475 { 476 477 struct btrfs_fs_devices *fs_devices; 478 479 /* 480 * Handle scanned device having completed its fsid change but 481 * belonging to a fs_devices that was created by first scanning 482 * a device which didn't have its fsid/metadata_uuid changed 483 * at all and the CHANGING_FSID_V2 flag set. 484 */ 485 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 486 if (!fs_devices->fsid_change) 487 continue; 488 489 if (match_fsid_fs_devices(fs_devices, disk_super->metadata_uuid, 490 fs_devices->fsid)) 491 return fs_devices; 492 } 493 494 /* 495 * Handle scanned device having completed its fsid change but 496 * belonging to a fs_devices that was created by a device that 497 * has an outdated pair of fsid/metadata_uuid and 498 * CHANGING_FSID_V2 flag set. 499 */ 500 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 501 if (!fs_devices->fsid_change) 502 continue; 503 504 if (check_fsid_changed(fs_devices, disk_super->metadata_uuid)) 505 return fs_devices; 506 } 507 508 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 509 } 510 511 512 static int 513 btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, 514 int flush, struct block_device **bdev, 515 struct btrfs_super_block **disk_super) 516 { 517 int ret; 518 519 *bdev = blkdev_get_by_path(device_path, flags, holder, NULL); 520 521 if (IS_ERR(*bdev)) { 522 ret = PTR_ERR(*bdev); 523 goto error; 524 } 525 526 if (flush) 527 sync_blockdev(*bdev); 528 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 529 if (ret) { 530 blkdev_put(*bdev, holder); 531 goto error; 532 } 533 invalidate_bdev(*bdev); 534 *disk_super = btrfs_read_dev_super(*bdev); 535 if (IS_ERR(*disk_super)) { 536 ret = PTR_ERR(*disk_super); 537 blkdev_put(*bdev, holder); 538 goto error; 539 } 540 541 return 0; 542 543 error: 544 *bdev = NULL; 545 return ret; 546 } 547 548 /* 549 * Search and remove all stale devices (which are not mounted). When both 550 * inputs are NULL, it will search and release all stale devices. 551 * 552 * @devt: Optional. When provided will it release all unmounted devices 553 * matching this devt only. 554 * @skip_device: Optional. Will skip this device when searching for the stale 555 * devices. 556 * 557 * Return: 0 for success or if @devt is 0. 558 * -EBUSY if @devt is a mounted device. 559 * -ENOENT if @devt does not match any device in the list. 560 */ 561 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 562 { 563 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 564 struct btrfs_device *device, *tmp_device; 565 int ret = 0; 566 567 lockdep_assert_held(&uuid_mutex); 568 569 if (devt) 570 ret = -ENOENT; 571 572 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 573 574 mutex_lock(&fs_devices->device_list_mutex); 575 list_for_each_entry_safe(device, tmp_device, 576 &fs_devices->devices, dev_list) { 577 if (skip_device && skip_device == device) 578 continue; 579 if (devt && devt != device->devt) 580 continue; 581 if (fs_devices->opened) { 582 /* for an already deleted device return 0 */ 583 if (devt && ret != 0) 584 ret = -EBUSY; 585 break; 586 } 587 588 /* delete the stale device */ 589 fs_devices->num_devices--; 590 list_del(&device->dev_list); 591 btrfs_free_device(device); 592 593 ret = 0; 594 } 595 mutex_unlock(&fs_devices->device_list_mutex); 596 597 if (fs_devices->num_devices == 0) { 598 btrfs_sysfs_remove_fsid(fs_devices); 599 list_del(&fs_devices->fs_list); 600 free_fs_devices(fs_devices); 601 } 602 } 603 604 return ret; 605 } 606 607 /* 608 * This is only used on mount, and we are protected from competing things 609 * messing with our fs_devices by the uuid_mutex, thus we do not need the 610 * fs_devices->device_list_mutex here. 611 */ 612 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 613 struct btrfs_device *device, blk_mode_t flags, 614 void *holder) 615 { 616 struct block_device *bdev; 617 struct btrfs_super_block *disk_super; 618 u64 devid; 619 int ret; 620 621 if (device->bdev) 622 return -EINVAL; 623 if (!device->name) 624 return -EINVAL; 625 626 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 627 &bdev, &disk_super); 628 if (ret) 629 return ret; 630 631 devid = btrfs_stack_device_id(&disk_super->dev_item); 632 if (devid != device->devid) 633 goto error_free_page; 634 635 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 636 goto error_free_page; 637 638 device->generation = btrfs_super_generation(disk_super); 639 640 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 641 if (btrfs_super_incompat_flags(disk_super) & 642 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 643 pr_err( 644 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 645 goto error_free_page; 646 } 647 648 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 649 fs_devices->seeding = true; 650 } else { 651 if (bdev_read_only(bdev)) 652 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 653 else 654 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 655 } 656 657 if (!bdev_nonrot(bdev)) 658 fs_devices->rotating = true; 659 660 if (bdev_max_discard_sectors(bdev)) 661 fs_devices->discardable = true; 662 663 device->bdev = bdev; 664 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 665 device->holder = holder; 666 667 fs_devices->open_devices++; 668 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 669 device->devid != BTRFS_DEV_REPLACE_DEVID) { 670 fs_devices->rw_devices++; 671 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 672 } 673 btrfs_release_disk_super(disk_super); 674 675 return 0; 676 677 error_free_page: 678 btrfs_release_disk_super(disk_super); 679 blkdev_put(bdev, holder); 680 681 return -EINVAL; 682 } 683 684 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb) 685 { 686 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) & 687 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 688 689 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; 690 } 691 692 static bool is_same_device(struct btrfs_device *device, const char *new_path) 693 { 694 struct path old = { .mnt = NULL, .dentry = NULL }; 695 struct path new = { .mnt = NULL, .dentry = NULL }; 696 char *old_path = NULL; 697 bool is_same = false; 698 int ret; 699 700 if (!device->name) 701 goto out; 702 703 old_path = kzalloc(PATH_MAX, GFP_NOFS); 704 if (!old_path) 705 goto out; 706 707 rcu_read_lock(); 708 ret = strscpy(old_path, rcu_str_deref(device->name), PATH_MAX); 709 rcu_read_unlock(); 710 if (ret < 0) 711 goto out; 712 713 ret = kern_path(old_path, LOOKUP_FOLLOW, &old); 714 if (ret) 715 goto out; 716 ret = kern_path(new_path, LOOKUP_FOLLOW, &new); 717 if (ret) 718 goto out; 719 if (path_equal(&old, &new)) 720 is_same = true; 721 out: 722 kfree(old_path); 723 path_put(&old); 724 path_put(&new); 725 return is_same; 726 } 727 728 /* 729 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 730 * being created with a disk that has already completed its fsid change. Such 731 * disk can belong to an fs which has its FSID changed or to one which doesn't. 732 * Handle both cases here. 733 */ 734 static struct btrfs_fs_devices *find_fsid_inprogress( 735 struct btrfs_super_block *disk_super) 736 { 737 struct btrfs_fs_devices *fs_devices; 738 739 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 740 if (fs_devices->fsid_change) 741 continue; 742 743 if (check_fsid_changed(fs_devices, disk_super->fsid)) 744 return fs_devices; 745 } 746 747 return find_fsid(disk_super->fsid, NULL); 748 } 749 750 static struct btrfs_fs_devices *find_fsid_changed( 751 struct btrfs_super_block *disk_super) 752 { 753 struct btrfs_fs_devices *fs_devices; 754 755 /* 756 * Handles the case where scanned device is part of an fs that had 757 * multiple successful changes of FSID but currently device didn't 758 * observe it. Meaning our fsid will be different than theirs. We need 759 * to handle two subcases : 760 * 1 - The fs still continues to have different METADATA/FSID uuids. 761 * 2 - The fs is switched back to its original FSID (METADATA/FSID 762 * are equal). 763 */ 764 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 765 /* Changed UUIDs */ 766 if (check_fsid_changed(fs_devices, disk_super->metadata_uuid) && 767 memcmp(fs_devices->fsid, disk_super->fsid, 768 BTRFS_FSID_SIZE) != 0) 769 return fs_devices; 770 771 /* Unchanged UUIDs */ 772 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 773 BTRFS_FSID_SIZE) == 0 && 774 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 775 BTRFS_FSID_SIZE) == 0) 776 return fs_devices; 777 } 778 779 return NULL; 780 } 781 782 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 783 struct btrfs_super_block *disk_super) 784 { 785 struct btrfs_fs_devices *fs_devices; 786 787 /* 788 * Handle the case where the scanned device is part of an fs whose last 789 * metadata UUID change reverted it to the original FSID. At the same 790 * time fs_devices was first created by another constituent device 791 * which didn't fully observe the operation. This results in an 792 * btrfs_fs_devices created with metadata/fsid different AND 793 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 794 * fs_devices equal to the FSID of the disk. 795 */ 796 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 797 if (!fs_devices->fsid_change) 798 continue; 799 800 if (check_fsid_changed(fs_devices, disk_super->fsid)) 801 return fs_devices; 802 } 803 804 return NULL; 805 } 806 /* 807 * Add new device to list of registered devices 808 * 809 * Returns: 810 * device pointer which was just added or updated when successful 811 * error pointer when failed 812 */ 813 static noinline struct btrfs_device *device_list_add(const char *path, 814 struct btrfs_super_block *disk_super, 815 bool *new_device_added) 816 { 817 struct btrfs_device *device; 818 struct btrfs_fs_devices *fs_devices = NULL; 819 struct rcu_string *name; 820 u64 found_transid = btrfs_super_generation(disk_super); 821 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 822 dev_t path_devt; 823 int error; 824 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 825 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 826 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 827 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 828 829 error = lookup_bdev(path, &path_devt); 830 if (error) { 831 btrfs_err(NULL, "failed to lookup block device for path %s: %d", 832 path, error); 833 return ERR_PTR(error); 834 } 835 836 if (fsid_change_in_progress) { 837 if (!has_metadata_uuid) 838 fs_devices = find_fsid_inprogress(disk_super); 839 else 840 fs_devices = find_fsid_changed(disk_super); 841 } else if (has_metadata_uuid) { 842 fs_devices = find_fsid_with_metadata_uuid(disk_super); 843 } else { 844 fs_devices = find_fsid_reverted_metadata(disk_super); 845 if (!fs_devices) 846 fs_devices = find_fsid(disk_super->fsid, NULL); 847 } 848 849 850 if (!fs_devices) { 851 fs_devices = alloc_fs_devices(disk_super->fsid, 852 has_metadata_uuid ? disk_super->metadata_uuid : NULL); 853 if (IS_ERR(fs_devices)) 854 return ERR_CAST(fs_devices); 855 856 fs_devices->fsid_change = fsid_change_in_progress; 857 858 mutex_lock(&fs_devices->device_list_mutex); 859 list_add(&fs_devices->fs_list, &fs_uuids); 860 861 device = NULL; 862 } else { 863 struct btrfs_dev_lookup_args args = { 864 .devid = devid, 865 .uuid = disk_super->dev_item.uuid, 866 }; 867 868 mutex_lock(&fs_devices->device_list_mutex); 869 device = btrfs_find_device(fs_devices, &args); 870 871 /* 872 * If this disk has been pulled into an fs devices created by 873 * a device which had the CHANGING_FSID_V2 flag then replace the 874 * metadata_uuid/fsid values of the fs_devices. 875 */ 876 if (fs_devices->fsid_change && 877 found_transid > fs_devices->latest_generation) { 878 memcpy(fs_devices->fsid, disk_super->fsid, 879 BTRFS_FSID_SIZE); 880 memcpy(fs_devices->metadata_uuid, 881 btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE); 882 fs_devices->fsid_change = false; 883 } 884 } 885 886 if (!device) { 887 unsigned int nofs_flag; 888 889 if (fs_devices->opened) { 890 btrfs_err(NULL, 891 "device %s belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)", 892 path, fs_devices->fsid, current->comm, 893 task_pid_nr(current)); 894 mutex_unlock(&fs_devices->device_list_mutex); 895 return ERR_PTR(-EBUSY); 896 } 897 898 nofs_flag = memalloc_nofs_save(); 899 device = btrfs_alloc_device(NULL, &devid, 900 disk_super->dev_item.uuid, path); 901 memalloc_nofs_restore(nofs_flag); 902 if (IS_ERR(device)) { 903 mutex_unlock(&fs_devices->device_list_mutex); 904 /* we can safely leave the fs_devices entry around */ 905 return device; 906 } 907 908 device->devt = path_devt; 909 910 list_add_rcu(&device->dev_list, &fs_devices->devices); 911 fs_devices->num_devices++; 912 913 device->fs_devices = fs_devices; 914 *new_device_added = true; 915 916 if (disk_super->label[0]) 917 pr_info( 918 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 919 disk_super->label, devid, found_transid, path, 920 current->comm, task_pid_nr(current)); 921 else 922 pr_info( 923 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 924 disk_super->fsid, devid, found_transid, path, 925 current->comm, task_pid_nr(current)); 926 927 } else if (!device->name || !is_same_device(device, path)) { 928 /* 929 * When FS is already mounted. 930 * 1. If you are here and if the device->name is NULL that 931 * means this device was missing at time of FS mount. 932 * 2. If you are here and if the device->name is different 933 * from 'path' that means either 934 * a. The same device disappeared and reappeared with 935 * different name. or 936 * b. The missing-disk-which-was-replaced, has 937 * reappeared now. 938 * 939 * We must allow 1 and 2a above. But 2b would be a spurious 940 * and unintentional. 941 * 942 * Further in case of 1 and 2a above, the disk at 'path' 943 * would have missed some transaction when it was away and 944 * in case of 2a the stale bdev has to be updated as well. 945 * 2b must not be allowed at all time. 946 */ 947 948 /* 949 * For now, we do allow update to btrfs_fs_device through the 950 * btrfs dev scan cli after FS has been mounted. We're still 951 * tracking a problem where systems fail mount by subvolume id 952 * when we reject replacement on a mounted FS. 953 */ 954 if (!fs_devices->opened && found_transid < device->generation) { 955 /* 956 * That is if the FS is _not_ mounted and if you 957 * are here, that means there is more than one 958 * disk with same uuid and devid.We keep the one 959 * with larger generation number or the last-in if 960 * generation are equal. 961 */ 962 mutex_unlock(&fs_devices->device_list_mutex); 963 btrfs_err(NULL, 964 "device %s already registered with a higher generation, found %llu expect %llu", 965 path, found_transid, device->generation); 966 return ERR_PTR(-EEXIST); 967 } 968 969 /* 970 * We are going to replace the device path for a given devid, 971 * make sure it's the same device if the device is mounted 972 * 973 * NOTE: the device->fs_info may not be reliable here so pass 974 * in a NULL to message helpers instead. This avoids a possible 975 * use-after-free when the fs_info and fs_info->sb are already 976 * torn down. 977 */ 978 if (device->bdev) { 979 if (device->devt != path_devt) { 980 mutex_unlock(&fs_devices->device_list_mutex); 981 btrfs_warn_in_rcu(NULL, 982 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 983 path, devid, found_transid, 984 current->comm, 985 task_pid_nr(current)); 986 return ERR_PTR(-EEXIST); 987 } 988 btrfs_info_in_rcu(NULL, 989 "devid %llu device path %s changed to %s scanned by %s (%d)", 990 devid, btrfs_dev_name(device), 991 path, current->comm, 992 task_pid_nr(current)); 993 } 994 995 name = rcu_string_strdup(path, GFP_NOFS); 996 if (!name) { 997 mutex_unlock(&fs_devices->device_list_mutex); 998 return ERR_PTR(-ENOMEM); 999 } 1000 rcu_string_free(device->name); 1001 rcu_assign_pointer(device->name, name); 1002 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1003 fs_devices->missing_devices--; 1004 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1005 } 1006 device->devt = path_devt; 1007 } 1008 1009 /* 1010 * Unmount does not free the btrfs_device struct but would zero 1011 * generation along with most of the other members. So just update 1012 * it back. We need it to pick the disk with largest generation 1013 * (as above). 1014 */ 1015 if (!fs_devices->opened) { 1016 device->generation = found_transid; 1017 fs_devices->latest_generation = max_t(u64, found_transid, 1018 fs_devices->latest_generation); 1019 } 1020 1021 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 1022 1023 mutex_unlock(&fs_devices->device_list_mutex); 1024 return device; 1025 } 1026 1027 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 1028 { 1029 struct btrfs_fs_devices *fs_devices; 1030 struct btrfs_device *device; 1031 struct btrfs_device *orig_dev; 1032 int ret = 0; 1033 1034 lockdep_assert_held(&uuid_mutex); 1035 1036 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1037 if (IS_ERR(fs_devices)) 1038 return fs_devices; 1039 1040 fs_devices->total_devices = orig->total_devices; 1041 1042 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1043 const char *dev_path = NULL; 1044 1045 /* 1046 * This is ok to do without RCU read locked because we hold the 1047 * uuid mutex so nothing we touch in here is going to disappear. 1048 */ 1049 if (orig_dev->name) 1050 dev_path = orig_dev->name->str; 1051 1052 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1053 orig_dev->uuid, dev_path); 1054 if (IS_ERR(device)) { 1055 ret = PTR_ERR(device); 1056 goto error; 1057 } 1058 1059 if (orig_dev->zone_info) { 1060 struct btrfs_zoned_device_info *zone_info; 1061 1062 zone_info = btrfs_clone_dev_zone_info(orig_dev); 1063 if (!zone_info) { 1064 btrfs_free_device(device); 1065 ret = -ENOMEM; 1066 goto error; 1067 } 1068 device->zone_info = zone_info; 1069 } 1070 1071 list_add(&device->dev_list, &fs_devices->devices); 1072 device->fs_devices = fs_devices; 1073 fs_devices->num_devices++; 1074 } 1075 return fs_devices; 1076 error: 1077 free_fs_devices(fs_devices); 1078 return ERR_PTR(ret); 1079 } 1080 1081 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1082 struct btrfs_device **latest_dev) 1083 { 1084 struct btrfs_device *device, *next; 1085 1086 /* This is the initialized path, it is safe to release the devices. */ 1087 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1088 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1089 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1090 &device->dev_state) && 1091 !test_bit(BTRFS_DEV_STATE_MISSING, 1092 &device->dev_state) && 1093 (!*latest_dev || 1094 device->generation > (*latest_dev)->generation)) { 1095 *latest_dev = device; 1096 } 1097 continue; 1098 } 1099 1100 /* 1101 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1102 * in btrfs_init_dev_replace() so just continue. 1103 */ 1104 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1105 continue; 1106 1107 if (device->bdev) { 1108 blkdev_put(device->bdev, device->holder); 1109 device->bdev = NULL; 1110 fs_devices->open_devices--; 1111 } 1112 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1113 list_del_init(&device->dev_alloc_list); 1114 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1115 fs_devices->rw_devices--; 1116 } 1117 list_del_init(&device->dev_list); 1118 fs_devices->num_devices--; 1119 btrfs_free_device(device); 1120 } 1121 1122 } 1123 1124 /* 1125 * After we have read the system tree and know devids belonging to this 1126 * filesystem, remove the device which does not belong there. 1127 */ 1128 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1129 { 1130 struct btrfs_device *latest_dev = NULL; 1131 struct btrfs_fs_devices *seed_dev; 1132 1133 mutex_lock(&uuid_mutex); 1134 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1135 1136 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1137 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1138 1139 fs_devices->latest_dev = latest_dev; 1140 1141 mutex_unlock(&uuid_mutex); 1142 } 1143 1144 static void btrfs_close_bdev(struct btrfs_device *device) 1145 { 1146 if (!device->bdev) 1147 return; 1148 1149 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1150 sync_blockdev(device->bdev); 1151 invalidate_bdev(device->bdev); 1152 } 1153 1154 blkdev_put(device->bdev, device->holder); 1155 } 1156 1157 static void btrfs_close_one_device(struct btrfs_device *device) 1158 { 1159 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1160 1161 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1162 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1163 list_del_init(&device->dev_alloc_list); 1164 fs_devices->rw_devices--; 1165 } 1166 1167 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1168 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1169 1170 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1171 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1172 fs_devices->missing_devices--; 1173 } 1174 1175 btrfs_close_bdev(device); 1176 if (device->bdev) { 1177 fs_devices->open_devices--; 1178 device->bdev = NULL; 1179 } 1180 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1181 btrfs_destroy_dev_zone_info(device); 1182 1183 device->fs_info = NULL; 1184 atomic_set(&device->dev_stats_ccnt, 0); 1185 extent_io_tree_release(&device->alloc_state); 1186 1187 /* 1188 * Reset the flush error record. We might have a transient flush error 1189 * in this mount, and if so we aborted the current transaction and set 1190 * the fs to an error state, guaranteeing no super blocks can be further 1191 * committed. However that error might be transient and if we unmount the 1192 * filesystem and mount it again, we should allow the mount to succeed 1193 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1194 * filesystem again we still get flush errors, then we will again abort 1195 * any transaction and set the error state, guaranteeing no commits of 1196 * unsafe super blocks. 1197 */ 1198 device->last_flush_error = 0; 1199 1200 /* Verify the device is back in a pristine state */ 1201 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1202 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1203 WARN_ON(!list_empty(&device->dev_alloc_list)); 1204 WARN_ON(!list_empty(&device->post_commit_list)); 1205 } 1206 1207 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1208 { 1209 struct btrfs_device *device, *tmp; 1210 1211 lockdep_assert_held(&uuid_mutex); 1212 1213 if (--fs_devices->opened > 0) 1214 return; 1215 1216 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1217 btrfs_close_one_device(device); 1218 1219 WARN_ON(fs_devices->open_devices); 1220 WARN_ON(fs_devices->rw_devices); 1221 fs_devices->opened = 0; 1222 fs_devices->seeding = false; 1223 fs_devices->fs_info = NULL; 1224 } 1225 1226 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1227 { 1228 LIST_HEAD(list); 1229 struct btrfs_fs_devices *tmp; 1230 1231 mutex_lock(&uuid_mutex); 1232 close_fs_devices(fs_devices); 1233 if (!fs_devices->opened) { 1234 list_splice_init(&fs_devices->seed_list, &list); 1235 1236 /* 1237 * If the struct btrfs_fs_devices is not assembled with any 1238 * other device, it can be re-initialized during the next mount 1239 * without the needing device-scan step. Therefore, it can be 1240 * fully freed. 1241 */ 1242 if (fs_devices->num_devices == 1) { 1243 list_del(&fs_devices->fs_list); 1244 free_fs_devices(fs_devices); 1245 } 1246 } 1247 1248 1249 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1250 close_fs_devices(fs_devices); 1251 list_del(&fs_devices->seed_list); 1252 free_fs_devices(fs_devices); 1253 } 1254 mutex_unlock(&uuid_mutex); 1255 } 1256 1257 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1258 blk_mode_t flags, void *holder) 1259 { 1260 struct btrfs_device *device; 1261 struct btrfs_device *latest_dev = NULL; 1262 struct btrfs_device *tmp_device; 1263 int ret = 0; 1264 1265 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1266 dev_list) { 1267 int ret2; 1268 1269 ret2 = btrfs_open_one_device(fs_devices, device, flags, holder); 1270 if (ret2 == 0 && 1271 (!latest_dev || device->generation > latest_dev->generation)) { 1272 latest_dev = device; 1273 } else if (ret2 == -ENODATA) { 1274 fs_devices->num_devices--; 1275 list_del(&device->dev_list); 1276 btrfs_free_device(device); 1277 } 1278 if (ret == 0 && ret2 != 0) 1279 ret = ret2; 1280 } 1281 1282 if (fs_devices->open_devices == 0) { 1283 if (ret) 1284 return ret; 1285 return -EINVAL; 1286 } 1287 1288 fs_devices->opened = 1; 1289 fs_devices->latest_dev = latest_dev; 1290 fs_devices->total_rw_bytes = 0; 1291 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1292 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1293 1294 return 0; 1295 } 1296 1297 static int devid_cmp(void *priv, const struct list_head *a, 1298 const struct list_head *b) 1299 { 1300 const struct btrfs_device *dev1, *dev2; 1301 1302 dev1 = list_entry(a, struct btrfs_device, dev_list); 1303 dev2 = list_entry(b, struct btrfs_device, dev_list); 1304 1305 if (dev1->devid < dev2->devid) 1306 return -1; 1307 else if (dev1->devid > dev2->devid) 1308 return 1; 1309 return 0; 1310 } 1311 1312 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1313 blk_mode_t flags, void *holder) 1314 { 1315 int ret; 1316 1317 lockdep_assert_held(&uuid_mutex); 1318 /* 1319 * The device_list_mutex cannot be taken here in case opening the 1320 * underlying device takes further locks like open_mutex. 1321 * 1322 * We also don't need the lock here as this is called during mount and 1323 * exclusion is provided by uuid_mutex 1324 */ 1325 1326 if (fs_devices->opened) { 1327 fs_devices->opened++; 1328 ret = 0; 1329 } else { 1330 list_sort(NULL, &fs_devices->devices, devid_cmp); 1331 ret = open_fs_devices(fs_devices, flags, holder); 1332 } 1333 1334 return ret; 1335 } 1336 1337 void btrfs_release_disk_super(struct btrfs_super_block *super) 1338 { 1339 struct page *page = virt_to_page(super); 1340 1341 put_page(page); 1342 } 1343 1344 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1345 u64 bytenr, u64 bytenr_orig) 1346 { 1347 struct btrfs_super_block *disk_super; 1348 struct page *page; 1349 void *p; 1350 pgoff_t index; 1351 1352 /* make sure our super fits in the device */ 1353 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1354 return ERR_PTR(-EINVAL); 1355 1356 /* make sure our super fits in the page */ 1357 if (sizeof(*disk_super) > PAGE_SIZE) 1358 return ERR_PTR(-EINVAL); 1359 1360 /* make sure our super doesn't straddle pages on disk */ 1361 index = bytenr >> PAGE_SHIFT; 1362 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1363 return ERR_PTR(-EINVAL); 1364 1365 /* pull in the page with our super */ 1366 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1367 1368 if (IS_ERR(page)) 1369 return ERR_CAST(page); 1370 1371 p = page_address(page); 1372 1373 /* align our pointer to the offset of the super block */ 1374 disk_super = p + offset_in_page(bytenr); 1375 1376 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1377 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1378 btrfs_release_disk_super(p); 1379 return ERR_PTR(-EINVAL); 1380 } 1381 1382 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1383 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1384 1385 return disk_super; 1386 } 1387 1388 int btrfs_forget_devices(dev_t devt) 1389 { 1390 int ret; 1391 1392 mutex_lock(&uuid_mutex); 1393 ret = btrfs_free_stale_devices(devt, NULL); 1394 mutex_unlock(&uuid_mutex); 1395 1396 return ret; 1397 } 1398 1399 /* 1400 * Look for a btrfs signature on a device. This may be called out of the mount path 1401 * and we are not allowed to call set_blocksize during the scan. The superblock 1402 * is read via pagecache 1403 */ 1404 struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags) 1405 { 1406 struct btrfs_super_block *disk_super; 1407 bool new_device_added = false; 1408 struct btrfs_device *device = NULL; 1409 struct block_device *bdev; 1410 u64 bytenr, bytenr_orig; 1411 int ret; 1412 1413 lockdep_assert_held(&uuid_mutex); 1414 1415 /* 1416 * we would like to check all the supers, but that would make 1417 * a btrfs mount succeed after a mkfs from a different FS. 1418 * So, we need to add a special mount option to scan for 1419 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1420 */ 1421 1422 /* 1423 * Avoid an exclusive open here, as the systemd-udev may initiate the 1424 * device scan which may race with the user's mount or mkfs command, 1425 * resulting in failure. 1426 * Since the device scan is solely for reading purposes, there is no 1427 * need for an exclusive open. Additionally, the devices are read again 1428 * during the mount process. It is ok to get some inconsistent 1429 * values temporarily, as the device paths of the fsid are the only 1430 * required information for assembling the volume. 1431 */ 1432 bdev = blkdev_get_by_path(path, flags, NULL, NULL); 1433 if (IS_ERR(bdev)) 1434 return ERR_CAST(bdev); 1435 1436 bytenr_orig = btrfs_sb_offset(0); 1437 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1438 if (ret) { 1439 device = ERR_PTR(ret); 1440 goto error_bdev_put; 1441 } 1442 1443 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1444 if (IS_ERR(disk_super)) { 1445 device = ERR_CAST(disk_super); 1446 goto error_bdev_put; 1447 } 1448 1449 device = device_list_add(path, disk_super, &new_device_added); 1450 if (!IS_ERR(device) && new_device_added) 1451 btrfs_free_stale_devices(device->devt, device); 1452 1453 btrfs_release_disk_super(disk_super); 1454 1455 error_bdev_put: 1456 blkdev_put(bdev, NULL); 1457 1458 return device; 1459 } 1460 1461 /* 1462 * Try to find a chunk that intersects [start, start + len] range and when one 1463 * such is found, record the end of it in *start 1464 */ 1465 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1466 u64 len) 1467 { 1468 u64 physical_start, physical_end; 1469 1470 lockdep_assert_held(&device->fs_info->chunk_mutex); 1471 1472 if (find_first_extent_bit(&device->alloc_state, *start, 1473 &physical_start, &physical_end, 1474 CHUNK_ALLOCATED, NULL)) { 1475 1476 if (in_range(physical_start, *start, len) || 1477 in_range(*start, physical_start, 1478 physical_end + 1 - physical_start)) { 1479 *start = physical_end + 1; 1480 return true; 1481 } 1482 } 1483 return false; 1484 } 1485 1486 static u64 dev_extent_search_start(struct btrfs_device *device) 1487 { 1488 switch (device->fs_devices->chunk_alloc_policy) { 1489 case BTRFS_CHUNK_ALLOC_REGULAR: 1490 return BTRFS_DEVICE_RANGE_RESERVED; 1491 case BTRFS_CHUNK_ALLOC_ZONED: 1492 /* 1493 * We don't care about the starting region like regular 1494 * allocator, because we anyway use/reserve the first two zones 1495 * for superblock logging. 1496 */ 1497 return 0; 1498 default: 1499 BUG(); 1500 } 1501 } 1502 1503 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1504 u64 *hole_start, u64 *hole_size, 1505 u64 num_bytes) 1506 { 1507 u64 zone_size = device->zone_info->zone_size; 1508 u64 pos; 1509 int ret; 1510 bool changed = false; 1511 1512 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1513 1514 while (*hole_size > 0) { 1515 pos = btrfs_find_allocatable_zones(device, *hole_start, 1516 *hole_start + *hole_size, 1517 num_bytes); 1518 if (pos != *hole_start) { 1519 *hole_size = *hole_start + *hole_size - pos; 1520 *hole_start = pos; 1521 changed = true; 1522 if (*hole_size < num_bytes) 1523 break; 1524 } 1525 1526 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1527 1528 /* Range is ensured to be empty */ 1529 if (!ret) 1530 return changed; 1531 1532 /* Given hole range was invalid (outside of device) */ 1533 if (ret == -ERANGE) { 1534 *hole_start += *hole_size; 1535 *hole_size = 0; 1536 return true; 1537 } 1538 1539 *hole_start += zone_size; 1540 *hole_size -= zone_size; 1541 changed = true; 1542 } 1543 1544 return changed; 1545 } 1546 1547 /* 1548 * Check if specified hole is suitable for allocation. 1549 * 1550 * @device: the device which we have the hole 1551 * @hole_start: starting position of the hole 1552 * @hole_size: the size of the hole 1553 * @num_bytes: the size of the free space that we need 1554 * 1555 * This function may modify @hole_start and @hole_size to reflect the suitable 1556 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1557 */ 1558 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1559 u64 *hole_size, u64 num_bytes) 1560 { 1561 bool changed = false; 1562 u64 hole_end = *hole_start + *hole_size; 1563 1564 for (;;) { 1565 /* 1566 * Check before we set max_hole_start, otherwise we could end up 1567 * sending back this offset anyway. 1568 */ 1569 if (contains_pending_extent(device, hole_start, *hole_size)) { 1570 if (hole_end >= *hole_start) 1571 *hole_size = hole_end - *hole_start; 1572 else 1573 *hole_size = 0; 1574 changed = true; 1575 } 1576 1577 switch (device->fs_devices->chunk_alloc_policy) { 1578 case BTRFS_CHUNK_ALLOC_REGULAR: 1579 /* No extra check */ 1580 break; 1581 case BTRFS_CHUNK_ALLOC_ZONED: 1582 if (dev_extent_hole_check_zoned(device, hole_start, 1583 hole_size, num_bytes)) { 1584 changed = true; 1585 /* 1586 * The changed hole can contain pending extent. 1587 * Loop again to check that. 1588 */ 1589 continue; 1590 } 1591 break; 1592 default: 1593 BUG(); 1594 } 1595 1596 break; 1597 } 1598 1599 return changed; 1600 } 1601 1602 /* 1603 * Find free space in the specified device. 1604 * 1605 * @device: the device which we search the free space in 1606 * @num_bytes: the size of the free space that we need 1607 * @search_start: the position from which to begin the search 1608 * @start: store the start of the free space. 1609 * @len: the size of the free space. that we find, or the size 1610 * of the max free space if we don't find suitable free space 1611 * 1612 * This does a pretty simple search, the expectation is that it is called very 1613 * infrequently and that a given device has a small number of extents. 1614 * 1615 * @start is used to store the start of the free space if we find. But if we 1616 * don't find suitable free space, it will be used to store the start position 1617 * of the max free space. 1618 * 1619 * @len is used to store the size of the free space that we find. 1620 * But if we don't find suitable free space, it is used to store the size of 1621 * the max free space. 1622 * 1623 * NOTE: This function will search *commit* root of device tree, and does extra 1624 * check to ensure dev extents are not double allocated. 1625 * This makes the function safe to allocate dev extents but may not report 1626 * correct usable device space, as device extent freed in current transaction 1627 * is not reported as available. 1628 */ 1629 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1630 u64 *start, u64 *len) 1631 { 1632 struct btrfs_fs_info *fs_info = device->fs_info; 1633 struct btrfs_root *root = fs_info->dev_root; 1634 struct btrfs_key key; 1635 struct btrfs_dev_extent *dev_extent; 1636 struct btrfs_path *path; 1637 u64 search_start; 1638 u64 hole_size; 1639 u64 max_hole_start; 1640 u64 max_hole_size = 0; 1641 u64 extent_end; 1642 u64 search_end = device->total_bytes; 1643 int ret; 1644 int slot; 1645 struct extent_buffer *l; 1646 1647 search_start = dev_extent_search_start(device); 1648 max_hole_start = search_start; 1649 1650 WARN_ON(device->zone_info && 1651 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1652 1653 path = btrfs_alloc_path(); 1654 if (!path) { 1655 ret = -ENOMEM; 1656 goto out; 1657 } 1658 again: 1659 if (search_start >= search_end || 1660 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1661 ret = -ENOSPC; 1662 goto out; 1663 } 1664 1665 path->reada = READA_FORWARD; 1666 path->search_commit_root = 1; 1667 path->skip_locking = 1; 1668 1669 key.objectid = device->devid; 1670 key.offset = search_start; 1671 key.type = BTRFS_DEV_EXTENT_KEY; 1672 1673 ret = btrfs_search_backwards(root, &key, path); 1674 if (ret < 0) 1675 goto out; 1676 1677 while (search_start < search_end) { 1678 l = path->nodes[0]; 1679 slot = path->slots[0]; 1680 if (slot >= btrfs_header_nritems(l)) { 1681 ret = btrfs_next_leaf(root, path); 1682 if (ret == 0) 1683 continue; 1684 if (ret < 0) 1685 goto out; 1686 1687 break; 1688 } 1689 btrfs_item_key_to_cpu(l, &key, slot); 1690 1691 if (key.objectid < device->devid) 1692 goto next; 1693 1694 if (key.objectid > device->devid) 1695 break; 1696 1697 if (key.type != BTRFS_DEV_EXTENT_KEY) 1698 goto next; 1699 1700 if (key.offset > search_end) 1701 break; 1702 1703 if (key.offset > search_start) { 1704 hole_size = key.offset - search_start; 1705 dev_extent_hole_check(device, &search_start, &hole_size, 1706 num_bytes); 1707 1708 if (hole_size > max_hole_size) { 1709 max_hole_start = search_start; 1710 max_hole_size = hole_size; 1711 } 1712 1713 /* 1714 * If this free space is greater than which we need, 1715 * it must be the max free space that we have found 1716 * until now, so max_hole_start must point to the start 1717 * of this free space and the length of this free space 1718 * is stored in max_hole_size. Thus, we return 1719 * max_hole_start and max_hole_size and go back to the 1720 * caller. 1721 */ 1722 if (hole_size >= num_bytes) { 1723 ret = 0; 1724 goto out; 1725 } 1726 } 1727 1728 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1729 extent_end = key.offset + btrfs_dev_extent_length(l, 1730 dev_extent); 1731 if (extent_end > search_start) 1732 search_start = extent_end; 1733 next: 1734 path->slots[0]++; 1735 cond_resched(); 1736 } 1737 1738 /* 1739 * At this point, search_start should be the end of 1740 * allocated dev extents, and when shrinking the device, 1741 * search_end may be smaller than search_start. 1742 */ 1743 if (search_end > search_start) { 1744 hole_size = search_end - search_start; 1745 if (dev_extent_hole_check(device, &search_start, &hole_size, 1746 num_bytes)) { 1747 btrfs_release_path(path); 1748 goto again; 1749 } 1750 1751 if (hole_size > max_hole_size) { 1752 max_hole_start = search_start; 1753 max_hole_size = hole_size; 1754 } 1755 } 1756 1757 /* See above. */ 1758 if (max_hole_size < num_bytes) 1759 ret = -ENOSPC; 1760 else 1761 ret = 0; 1762 1763 ASSERT(max_hole_start + max_hole_size <= search_end); 1764 out: 1765 btrfs_free_path(path); 1766 *start = max_hole_start; 1767 if (len) 1768 *len = max_hole_size; 1769 return ret; 1770 } 1771 1772 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1773 struct btrfs_device *device, 1774 u64 start, u64 *dev_extent_len) 1775 { 1776 struct btrfs_fs_info *fs_info = device->fs_info; 1777 struct btrfs_root *root = fs_info->dev_root; 1778 int ret; 1779 struct btrfs_path *path; 1780 struct btrfs_key key; 1781 struct btrfs_key found_key; 1782 struct extent_buffer *leaf = NULL; 1783 struct btrfs_dev_extent *extent = NULL; 1784 1785 path = btrfs_alloc_path(); 1786 if (!path) 1787 return -ENOMEM; 1788 1789 key.objectid = device->devid; 1790 key.offset = start; 1791 key.type = BTRFS_DEV_EXTENT_KEY; 1792 again: 1793 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1794 if (ret > 0) { 1795 ret = btrfs_previous_item(root, path, key.objectid, 1796 BTRFS_DEV_EXTENT_KEY); 1797 if (ret) 1798 goto out; 1799 leaf = path->nodes[0]; 1800 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1801 extent = btrfs_item_ptr(leaf, path->slots[0], 1802 struct btrfs_dev_extent); 1803 BUG_ON(found_key.offset > start || found_key.offset + 1804 btrfs_dev_extent_length(leaf, extent) < start); 1805 key = found_key; 1806 btrfs_release_path(path); 1807 goto again; 1808 } else if (ret == 0) { 1809 leaf = path->nodes[0]; 1810 extent = btrfs_item_ptr(leaf, path->slots[0], 1811 struct btrfs_dev_extent); 1812 } else { 1813 goto out; 1814 } 1815 1816 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1817 1818 ret = btrfs_del_item(trans, root, path); 1819 if (ret == 0) 1820 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1821 out: 1822 btrfs_free_path(path); 1823 return ret; 1824 } 1825 1826 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1827 { 1828 struct extent_map_tree *em_tree; 1829 struct extent_map *em; 1830 struct rb_node *n; 1831 u64 ret = 0; 1832 1833 em_tree = &fs_info->mapping_tree; 1834 read_lock(&em_tree->lock); 1835 n = rb_last(&em_tree->map.rb_root); 1836 if (n) { 1837 em = rb_entry(n, struct extent_map, rb_node); 1838 ret = em->start + em->len; 1839 } 1840 read_unlock(&em_tree->lock); 1841 1842 return ret; 1843 } 1844 1845 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1846 u64 *devid_ret) 1847 { 1848 int ret; 1849 struct btrfs_key key; 1850 struct btrfs_key found_key; 1851 struct btrfs_path *path; 1852 1853 path = btrfs_alloc_path(); 1854 if (!path) 1855 return -ENOMEM; 1856 1857 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1858 key.type = BTRFS_DEV_ITEM_KEY; 1859 key.offset = (u64)-1; 1860 1861 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1862 if (ret < 0) 1863 goto error; 1864 1865 if (ret == 0) { 1866 /* Corruption */ 1867 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1868 ret = -EUCLEAN; 1869 goto error; 1870 } 1871 1872 ret = btrfs_previous_item(fs_info->chunk_root, path, 1873 BTRFS_DEV_ITEMS_OBJECTID, 1874 BTRFS_DEV_ITEM_KEY); 1875 if (ret) { 1876 *devid_ret = 1; 1877 } else { 1878 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1879 path->slots[0]); 1880 *devid_ret = found_key.offset + 1; 1881 } 1882 ret = 0; 1883 error: 1884 btrfs_free_path(path); 1885 return ret; 1886 } 1887 1888 /* 1889 * the device information is stored in the chunk root 1890 * the btrfs_device struct should be fully filled in 1891 */ 1892 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1893 struct btrfs_device *device) 1894 { 1895 int ret; 1896 struct btrfs_path *path; 1897 struct btrfs_dev_item *dev_item; 1898 struct extent_buffer *leaf; 1899 struct btrfs_key key; 1900 unsigned long ptr; 1901 1902 path = btrfs_alloc_path(); 1903 if (!path) 1904 return -ENOMEM; 1905 1906 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1907 key.type = BTRFS_DEV_ITEM_KEY; 1908 key.offset = device->devid; 1909 1910 btrfs_reserve_chunk_metadata(trans, true); 1911 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1912 &key, sizeof(*dev_item)); 1913 btrfs_trans_release_chunk_metadata(trans); 1914 if (ret) 1915 goto out; 1916 1917 leaf = path->nodes[0]; 1918 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1919 1920 btrfs_set_device_id(leaf, dev_item, device->devid); 1921 btrfs_set_device_generation(leaf, dev_item, 0); 1922 btrfs_set_device_type(leaf, dev_item, device->type); 1923 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1924 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1925 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1926 btrfs_set_device_total_bytes(leaf, dev_item, 1927 btrfs_device_get_disk_total_bytes(device)); 1928 btrfs_set_device_bytes_used(leaf, dev_item, 1929 btrfs_device_get_bytes_used(device)); 1930 btrfs_set_device_group(leaf, dev_item, 0); 1931 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1932 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1933 btrfs_set_device_start_offset(leaf, dev_item, 0); 1934 1935 ptr = btrfs_device_uuid(dev_item); 1936 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1937 ptr = btrfs_device_fsid(dev_item); 1938 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1939 ptr, BTRFS_FSID_SIZE); 1940 btrfs_mark_buffer_dirty(trans, leaf); 1941 1942 ret = 0; 1943 out: 1944 btrfs_free_path(path); 1945 return ret; 1946 } 1947 1948 /* 1949 * Function to update ctime/mtime for a given device path. 1950 * Mainly used for ctime/mtime based probe like libblkid. 1951 * 1952 * We don't care about errors here, this is just to be kind to userspace. 1953 */ 1954 static void update_dev_time(const char *device_path) 1955 { 1956 struct path path; 1957 int ret; 1958 1959 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1960 if (ret) 1961 return; 1962 1963 inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION); 1964 path_put(&path); 1965 } 1966 1967 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1968 struct btrfs_device *device) 1969 { 1970 struct btrfs_root *root = device->fs_info->chunk_root; 1971 int ret; 1972 struct btrfs_path *path; 1973 struct btrfs_key key; 1974 1975 path = btrfs_alloc_path(); 1976 if (!path) 1977 return -ENOMEM; 1978 1979 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1980 key.type = BTRFS_DEV_ITEM_KEY; 1981 key.offset = device->devid; 1982 1983 btrfs_reserve_chunk_metadata(trans, false); 1984 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1985 btrfs_trans_release_chunk_metadata(trans); 1986 if (ret) { 1987 if (ret > 0) 1988 ret = -ENOENT; 1989 goto out; 1990 } 1991 1992 ret = btrfs_del_item(trans, root, path); 1993 out: 1994 btrfs_free_path(path); 1995 return ret; 1996 } 1997 1998 /* 1999 * Verify that @num_devices satisfies the RAID profile constraints in the whole 2000 * filesystem. It's up to the caller to adjust that number regarding eg. device 2001 * replace. 2002 */ 2003 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 2004 u64 num_devices) 2005 { 2006 u64 all_avail; 2007 unsigned seq; 2008 int i; 2009 2010 do { 2011 seq = read_seqbegin(&fs_info->profiles_lock); 2012 2013 all_avail = fs_info->avail_data_alloc_bits | 2014 fs_info->avail_system_alloc_bits | 2015 fs_info->avail_metadata_alloc_bits; 2016 } while (read_seqretry(&fs_info->profiles_lock, seq)); 2017 2018 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2019 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 2020 continue; 2021 2022 if (num_devices < btrfs_raid_array[i].devs_min) 2023 return btrfs_raid_array[i].mindev_error; 2024 } 2025 2026 return 0; 2027 } 2028 2029 static struct btrfs_device * btrfs_find_next_active_device( 2030 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 2031 { 2032 struct btrfs_device *next_device; 2033 2034 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 2035 if (next_device != device && 2036 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 2037 && next_device->bdev) 2038 return next_device; 2039 } 2040 2041 return NULL; 2042 } 2043 2044 /* 2045 * Helper function to check if the given device is part of s_bdev / latest_dev 2046 * and replace it with the provided or the next active device, in the context 2047 * where this function called, there should be always be another device (or 2048 * this_dev) which is active. 2049 */ 2050 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2051 struct btrfs_device *next_device) 2052 { 2053 struct btrfs_fs_info *fs_info = device->fs_info; 2054 2055 if (!next_device) 2056 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2057 device); 2058 ASSERT(next_device); 2059 2060 if (fs_info->sb->s_bdev && 2061 (fs_info->sb->s_bdev == device->bdev)) 2062 fs_info->sb->s_bdev = next_device->bdev; 2063 2064 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2065 fs_info->fs_devices->latest_dev = next_device; 2066 } 2067 2068 /* 2069 * Return btrfs_fs_devices::num_devices excluding the device that's being 2070 * currently replaced. 2071 */ 2072 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2073 { 2074 u64 num_devices = fs_info->fs_devices->num_devices; 2075 2076 down_read(&fs_info->dev_replace.rwsem); 2077 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2078 ASSERT(num_devices > 1); 2079 num_devices--; 2080 } 2081 up_read(&fs_info->dev_replace.rwsem); 2082 2083 return num_devices; 2084 } 2085 2086 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2087 struct block_device *bdev, int copy_num) 2088 { 2089 struct btrfs_super_block *disk_super; 2090 const size_t len = sizeof(disk_super->magic); 2091 const u64 bytenr = btrfs_sb_offset(copy_num); 2092 int ret; 2093 2094 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2095 if (IS_ERR(disk_super)) 2096 return; 2097 2098 memset(&disk_super->magic, 0, len); 2099 folio_mark_dirty(virt_to_folio(disk_super)); 2100 btrfs_release_disk_super(disk_super); 2101 2102 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2103 if (ret) 2104 btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2105 copy_num, ret); 2106 } 2107 2108 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2109 struct block_device *bdev, 2110 const char *device_path) 2111 { 2112 int copy_num; 2113 2114 if (!bdev) 2115 return; 2116 2117 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2118 if (bdev_is_zoned(bdev)) 2119 btrfs_reset_sb_log_zones(bdev, copy_num); 2120 else 2121 btrfs_scratch_superblock(fs_info, bdev, copy_num); 2122 } 2123 2124 /* Notify udev that device has changed */ 2125 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2126 2127 /* Update ctime/mtime for device path for libblkid */ 2128 update_dev_time(device_path); 2129 } 2130 2131 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2132 struct btrfs_dev_lookup_args *args, 2133 struct block_device **bdev, void **holder) 2134 { 2135 struct btrfs_trans_handle *trans; 2136 struct btrfs_device *device; 2137 struct btrfs_fs_devices *cur_devices; 2138 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2139 u64 num_devices; 2140 int ret = 0; 2141 2142 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2143 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2144 return -EINVAL; 2145 } 2146 2147 /* 2148 * The device list in fs_devices is accessed without locks (neither 2149 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2150 * filesystem and another device rm cannot run. 2151 */ 2152 num_devices = btrfs_num_devices(fs_info); 2153 2154 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2155 if (ret) 2156 return ret; 2157 2158 device = btrfs_find_device(fs_info->fs_devices, args); 2159 if (!device) { 2160 if (args->missing) 2161 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2162 else 2163 ret = -ENOENT; 2164 return ret; 2165 } 2166 2167 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2168 btrfs_warn_in_rcu(fs_info, 2169 "cannot remove device %s (devid %llu) due to active swapfile", 2170 btrfs_dev_name(device), device->devid); 2171 return -ETXTBSY; 2172 } 2173 2174 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2175 return BTRFS_ERROR_DEV_TGT_REPLACE; 2176 2177 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2178 fs_info->fs_devices->rw_devices == 1) 2179 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2180 2181 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2182 mutex_lock(&fs_info->chunk_mutex); 2183 list_del_init(&device->dev_alloc_list); 2184 device->fs_devices->rw_devices--; 2185 mutex_unlock(&fs_info->chunk_mutex); 2186 } 2187 2188 ret = btrfs_shrink_device(device, 0); 2189 if (ret) 2190 goto error_undo; 2191 2192 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2193 if (IS_ERR(trans)) { 2194 ret = PTR_ERR(trans); 2195 goto error_undo; 2196 } 2197 2198 ret = btrfs_rm_dev_item(trans, device); 2199 if (ret) { 2200 /* Any error in dev item removal is critical */ 2201 btrfs_crit(fs_info, 2202 "failed to remove device item for devid %llu: %d", 2203 device->devid, ret); 2204 btrfs_abort_transaction(trans, ret); 2205 btrfs_end_transaction(trans); 2206 return ret; 2207 } 2208 2209 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2210 btrfs_scrub_cancel_dev(device); 2211 2212 /* 2213 * the device list mutex makes sure that we don't change 2214 * the device list while someone else is writing out all 2215 * the device supers. Whoever is writing all supers, should 2216 * lock the device list mutex before getting the number of 2217 * devices in the super block (super_copy). Conversely, 2218 * whoever updates the number of devices in the super block 2219 * (super_copy) should hold the device list mutex. 2220 */ 2221 2222 /* 2223 * In normal cases the cur_devices == fs_devices. But in case 2224 * of deleting a seed device, the cur_devices should point to 2225 * its own fs_devices listed under the fs_devices->seed_list. 2226 */ 2227 cur_devices = device->fs_devices; 2228 mutex_lock(&fs_devices->device_list_mutex); 2229 list_del_rcu(&device->dev_list); 2230 2231 cur_devices->num_devices--; 2232 cur_devices->total_devices--; 2233 /* Update total_devices of the parent fs_devices if it's seed */ 2234 if (cur_devices != fs_devices) 2235 fs_devices->total_devices--; 2236 2237 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2238 cur_devices->missing_devices--; 2239 2240 btrfs_assign_next_active_device(device, NULL); 2241 2242 if (device->bdev) { 2243 cur_devices->open_devices--; 2244 /* remove sysfs entry */ 2245 btrfs_sysfs_remove_device(device); 2246 } 2247 2248 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2249 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2250 mutex_unlock(&fs_devices->device_list_mutex); 2251 2252 /* 2253 * At this point, the device is zero sized and detached from the 2254 * devices list. All that's left is to zero out the old supers and 2255 * free the device. 2256 * 2257 * We cannot call btrfs_close_bdev() here because we're holding the sb 2258 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2259 * block device and it's dependencies. Instead just flush the device 2260 * and let the caller do the final blkdev_put. 2261 */ 2262 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2263 btrfs_scratch_superblocks(fs_info, device->bdev, 2264 device->name->str); 2265 if (device->bdev) { 2266 sync_blockdev(device->bdev); 2267 invalidate_bdev(device->bdev); 2268 } 2269 } 2270 2271 *bdev = device->bdev; 2272 *holder = device->holder; 2273 synchronize_rcu(); 2274 btrfs_free_device(device); 2275 2276 /* 2277 * This can happen if cur_devices is the private seed devices list. We 2278 * cannot call close_fs_devices() here because it expects the uuid_mutex 2279 * to be held, but in fact we don't need that for the private 2280 * seed_devices, we can simply decrement cur_devices->opened and then 2281 * remove it from our list and free the fs_devices. 2282 */ 2283 if (cur_devices->num_devices == 0) { 2284 list_del_init(&cur_devices->seed_list); 2285 ASSERT(cur_devices->opened == 1); 2286 cur_devices->opened--; 2287 free_fs_devices(cur_devices); 2288 } 2289 2290 ret = btrfs_commit_transaction(trans); 2291 2292 return ret; 2293 2294 error_undo: 2295 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2296 mutex_lock(&fs_info->chunk_mutex); 2297 list_add(&device->dev_alloc_list, 2298 &fs_devices->alloc_list); 2299 device->fs_devices->rw_devices++; 2300 mutex_unlock(&fs_info->chunk_mutex); 2301 } 2302 return ret; 2303 } 2304 2305 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2306 { 2307 struct btrfs_fs_devices *fs_devices; 2308 2309 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2310 2311 /* 2312 * in case of fs with no seed, srcdev->fs_devices will point 2313 * to fs_devices of fs_info. However when the dev being replaced is 2314 * a seed dev it will point to the seed's local fs_devices. In short 2315 * srcdev will have its correct fs_devices in both the cases. 2316 */ 2317 fs_devices = srcdev->fs_devices; 2318 2319 list_del_rcu(&srcdev->dev_list); 2320 list_del(&srcdev->dev_alloc_list); 2321 fs_devices->num_devices--; 2322 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2323 fs_devices->missing_devices--; 2324 2325 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2326 fs_devices->rw_devices--; 2327 2328 if (srcdev->bdev) 2329 fs_devices->open_devices--; 2330 } 2331 2332 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2333 { 2334 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2335 2336 mutex_lock(&uuid_mutex); 2337 2338 btrfs_close_bdev(srcdev); 2339 synchronize_rcu(); 2340 btrfs_free_device(srcdev); 2341 2342 /* if this is no devs we rather delete the fs_devices */ 2343 if (!fs_devices->num_devices) { 2344 /* 2345 * On a mounted FS, num_devices can't be zero unless it's a 2346 * seed. In case of a seed device being replaced, the replace 2347 * target added to the sprout FS, so there will be no more 2348 * device left under the seed FS. 2349 */ 2350 ASSERT(fs_devices->seeding); 2351 2352 list_del_init(&fs_devices->seed_list); 2353 close_fs_devices(fs_devices); 2354 free_fs_devices(fs_devices); 2355 } 2356 mutex_unlock(&uuid_mutex); 2357 } 2358 2359 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2360 { 2361 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2362 2363 mutex_lock(&fs_devices->device_list_mutex); 2364 2365 btrfs_sysfs_remove_device(tgtdev); 2366 2367 if (tgtdev->bdev) 2368 fs_devices->open_devices--; 2369 2370 fs_devices->num_devices--; 2371 2372 btrfs_assign_next_active_device(tgtdev, NULL); 2373 2374 list_del_rcu(&tgtdev->dev_list); 2375 2376 mutex_unlock(&fs_devices->device_list_mutex); 2377 2378 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2379 tgtdev->name->str); 2380 2381 btrfs_close_bdev(tgtdev); 2382 synchronize_rcu(); 2383 btrfs_free_device(tgtdev); 2384 } 2385 2386 /* 2387 * Populate args from device at path. 2388 * 2389 * @fs_info: the filesystem 2390 * @args: the args to populate 2391 * @path: the path to the device 2392 * 2393 * This will read the super block of the device at @path and populate @args with 2394 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2395 * lookup a device to operate on, but need to do it before we take any locks. 2396 * This properly handles the special case of "missing" that a user may pass in, 2397 * and does some basic sanity checks. The caller must make sure that @path is 2398 * properly NUL terminated before calling in, and must call 2399 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2400 * uuid buffers. 2401 * 2402 * Return: 0 for success, -errno for failure 2403 */ 2404 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2405 struct btrfs_dev_lookup_args *args, 2406 const char *path) 2407 { 2408 struct btrfs_super_block *disk_super; 2409 struct block_device *bdev; 2410 int ret; 2411 2412 if (!path || !path[0]) 2413 return -EINVAL; 2414 if (!strcmp(path, "missing")) { 2415 args->missing = true; 2416 return 0; 2417 } 2418 2419 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2420 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2421 if (!args->uuid || !args->fsid) { 2422 btrfs_put_dev_args_from_path(args); 2423 return -ENOMEM; 2424 } 2425 2426 ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0, 2427 &bdev, &disk_super); 2428 if (ret) { 2429 btrfs_put_dev_args_from_path(args); 2430 return ret; 2431 } 2432 2433 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2434 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2435 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2436 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2437 else 2438 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2439 btrfs_release_disk_super(disk_super); 2440 blkdev_put(bdev, NULL); 2441 return 0; 2442 } 2443 2444 /* 2445 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2446 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2447 * that don't need to be freed. 2448 */ 2449 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2450 { 2451 kfree(args->uuid); 2452 kfree(args->fsid); 2453 args->uuid = NULL; 2454 args->fsid = NULL; 2455 } 2456 2457 struct btrfs_device *btrfs_find_device_by_devspec( 2458 struct btrfs_fs_info *fs_info, u64 devid, 2459 const char *device_path) 2460 { 2461 BTRFS_DEV_LOOKUP_ARGS(args); 2462 struct btrfs_device *device; 2463 int ret; 2464 2465 if (devid) { 2466 args.devid = devid; 2467 device = btrfs_find_device(fs_info->fs_devices, &args); 2468 if (!device) 2469 return ERR_PTR(-ENOENT); 2470 return device; 2471 } 2472 2473 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2474 if (ret) 2475 return ERR_PTR(ret); 2476 device = btrfs_find_device(fs_info->fs_devices, &args); 2477 btrfs_put_dev_args_from_path(&args); 2478 if (!device) 2479 return ERR_PTR(-ENOENT); 2480 return device; 2481 } 2482 2483 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2484 { 2485 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2486 struct btrfs_fs_devices *old_devices; 2487 struct btrfs_fs_devices *seed_devices; 2488 2489 lockdep_assert_held(&uuid_mutex); 2490 if (!fs_devices->seeding) 2491 return ERR_PTR(-EINVAL); 2492 2493 /* 2494 * Private copy of the seed devices, anchored at 2495 * fs_info->fs_devices->seed_list 2496 */ 2497 seed_devices = alloc_fs_devices(NULL, NULL); 2498 if (IS_ERR(seed_devices)) 2499 return seed_devices; 2500 2501 /* 2502 * It's necessary to retain a copy of the original seed fs_devices in 2503 * fs_uuids so that filesystems which have been seeded can successfully 2504 * reference the seed device from open_seed_devices. This also supports 2505 * multiple fs seed. 2506 */ 2507 old_devices = clone_fs_devices(fs_devices); 2508 if (IS_ERR(old_devices)) { 2509 kfree(seed_devices); 2510 return old_devices; 2511 } 2512 2513 list_add(&old_devices->fs_list, &fs_uuids); 2514 2515 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2516 seed_devices->opened = 1; 2517 INIT_LIST_HEAD(&seed_devices->devices); 2518 INIT_LIST_HEAD(&seed_devices->alloc_list); 2519 mutex_init(&seed_devices->device_list_mutex); 2520 2521 return seed_devices; 2522 } 2523 2524 /* 2525 * Splice seed devices into the sprout fs_devices. 2526 * Generate a new fsid for the sprouted read-write filesystem. 2527 */ 2528 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2529 struct btrfs_fs_devices *seed_devices) 2530 { 2531 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2532 struct btrfs_super_block *disk_super = fs_info->super_copy; 2533 struct btrfs_device *device; 2534 u64 super_flags; 2535 2536 /* 2537 * We are updating the fsid, the thread leading to device_list_add() 2538 * could race, so uuid_mutex is needed. 2539 */ 2540 lockdep_assert_held(&uuid_mutex); 2541 2542 /* 2543 * The threads listed below may traverse dev_list but can do that without 2544 * device_list_mutex: 2545 * - All device ops and balance - as we are in btrfs_exclop_start. 2546 * - Various dev_list readers - are using RCU. 2547 * - btrfs_ioctl_fitrim() - is using RCU. 2548 * 2549 * For-read threads as below are using device_list_mutex: 2550 * - Readonly scrub btrfs_scrub_dev() 2551 * - Readonly scrub btrfs_scrub_progress() 2552 * - btrfs_get_dev_stats() 2553 */ 2554 lockdep_assert_held(&fs_devices->device_list_mutex); 2555 2556 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2557 synchronize_rcu); 2558 list_for_each_entry(device, &seed_devices->devices, dev_list) 2559 device->fs_devices = seed_devices; 2560 2561 fs_devices->seeding = false; 2562 fs_devices->num_devices = 0; 2563 fs_devices->open_devices = 0; 2564 fs_devices->missing_devices = 0; 2565 fs_devices->rotating = false; 2566 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2567 2568 generate_random_uuid(fs_devices->fsid); 2569 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2570 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2571 2572 super_flags = btrfs_super_flags(disk_super) & 2573 ~BTRFS_SUPER_FLAG_SEEDING; 2574 btrfs_set_super_flags(disk_super, super_flags); 2575 } 2576 2577 /* 2578 * Store the expected generation for seed devices in device items. 2579 */ 2580 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2581 { 2582 BTRFS_DEV_LOOKUP_ARGS(args); 2583 struct btrfs_fs_info *fs_info = trans->fs_info; 2584 struct btrfs_root *root = fs_info->chunk_root; 2585 struct btrfs_path *path; 2586 struct extent_buffer *leaf; 2587 struct btrfs_dev_item *dev_item; 2588 struct btrfs_device *device; 2589 struct btrfs_key key; 2590 u8 fs_uuid[BTRFS_FSID_SIZE]; 2591 u8 dev_uuid[BTRFS_UUID_SIZE]; 2592 int ret; 2593 2594 path = btrfs_alloc_path(); 2595 if (!path) 2596 return -ENOMEM; 2597 2598 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2599 key.offset = 0; 2600 key.type = BTRFS_DEV_ITEM_KEY; 2601 2602 while (1) { 2603 btrfs_reserve_chunk_metadata(trans, false); 2604 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2605 btrfs_trans_release_chunk_metadata(trans); 2606 if (ret < 0) 2607 goto error; 2608 2609 leaf = path->nodes[0]; 2610 next_slot: 2611 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2612 ret = btrfs_next_leaf(root, path); 2613 if (ret > 0) 2614 break; 2615 if (ret < 0) 2616 goto error; 2617 leaf = path->nodes[0]; 2618 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2619 btrfs_release_path(path); 2620 continue; 2621 } 2622 2623 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2624 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2625 key.type != BTRFS_DEV_ITEM_KEY) 2626 break; 2627 2628 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2629 struct btrfs_dev_item); 2630 args.devid = btrfs_device_id(leaf, dev_item); 2631 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2632 BTRFS_UUID_SIZE); 2633 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2634 BTRFS_FSID_SIZE); 2635 args.uuid = dev_uuid; 2636 args.fsid = fs_uuid; 2637 device = btrfs_find_device(fs_info->fs_devices, &args); 2638 BUG_ON(!device); /* Logic error */ 2639 2640 if (device->fs_devices->seeding) { 2641 btrfs_set_device_generation(leaf, dev_item, 2642 device->generation); 2643 btrfs_mark_buffer_dirty(trans, leaf); 2644 } 2645 2646 path->slots[0]++; 2647 goto next_slot; 2648 } 2649 ret = 0; 2650 error: 2651 btrfs_free_path(path); 2652 return ret; 2653 } 2654 2655 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2656 { 2657 struct btrfs_root *root = fs_info->dev_root; 2658 struct btrfs_trans_handle *trans; 2659 struct btrfs_device *device; 2660 struct block_device *bdev; 2661 struct super_block *sb = fs_info->sb; 2662 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2663 struct btrfs_fs_devices *seed_devices = NULL; 2664 u64 orig_super_total_bytes; 2665 u64 orig_super_num_devices; 2666 int ret = 0; 2667 bool seeding_dev = false; 2668 bool locked = false; 2669 2670 if (sb_rdonly(sb) && !fs_devices->seeding) 2671 return -EROFS; 2672 2673 bdev = blkdev_get_by_path(device_path, BLK_OPEN_WRITE, 2674 fs_info->bdev_holder, NULL); 2675 if (IS_ERR(bdev)) 2676 return PTR_ERR(bdev); 2677 2678 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2679 ret = -EINVAL; 2680 goto error; 2681 } 2682 2683 if (fs_devices->seeding) { 2684 seeding_dev = true; 2685 down_write(&sb->s_umount); 2686 mutex_lock(&uuid_mutex); 2687 locked = true; 2688 } 2689 2690 sync_blockdev(bdev); 2691 2692 rcu_read_lock(); 2693 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2694 if (device->bdev == bdev) { 2695 ret = -EEXIST; 2696 rcu_read_unlock(); 2697 goto error; 2698 } 2699 } 2700 rcu_read_unlock(); 2701 2702 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path); 2703 if (IS_ERR(device)) { 2704 /* we can safely leave the fs_devices entry around */ 2705 ret = PTR_ERR(device); 2706 goto error; 2707 } 2708 2709 device->fs_info = fs_info; 2710 device->bdev = bdev; 2711 ret = lookup_bdev(device_path, &device->devt); 2712 if (ret) 2713 goto error_free_device; 2714 2715 ret = btrfs_get_dev_zone_info(device, false); 2716 if (ret) 2717 goto error_free_device; 2718 2719 trans = btrfs_start_transaction(root, 0); 2720 if (IS_ERR(trans)) { 2721 ret = PTR_ERR(trans); 2722 goto error_free_zone; 2723 } 2724 2725 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2726 device->generation = trans->transid; 2727 device->io_width = fs_info->sectorsize; 2728 device->io_align = fs_info->sectorsize; 2729 device->sector_size = fs_info->sectorsize; 2730 device->total_bytes = 2731 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2732 device->disk_total_bytes = device->total_bytes; 2733 device->commit_total_bytes = device->total_bytes; 2734 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2735 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2736 device->holder = fs_info->bdev_holder; 2737 device->dev_stats_valid = 1; 2738 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2739 2740 if (seeding_dev) { 2741 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2742 seed_devices = btrfs_init_sprout(fs_info); 2743 if (IS_ERR(seed_devices)) { 2744 ret = PTR_ERR(seed_devices); 2745 btrfs_abort_transaction(trans, ret); 2746 goto error_trans; 2747 } 2748 } 2749 2750 mutex_lock(&fs_devices->device_list_mutex); 2751 if (seeding_dev) { 2752 btrfs_setup_sprout(fs_info, seed_devices); 2753 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2754 device); 2755 } 2756 2757 device->fs_devices = fs_devices; 2758 2759 mutex_lock(&fs_info->chunk_mutex); 2760 list_add_rcu(&device->dev_list, &fs_devices->devices); 2761 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2762 fs_devices->num_devices++; 2763 fs_devices->open_devices++; 2764 fs_devices->rw_devices++; 2765 fs_devices->total_devices++; 2766 fs_devices->total_rw_bytes += device->total_bytes; 2767 2768 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2769 2770 if (!bdev_nonrot(bdev)) 2771 fs_devices->rotating = true; 2772 2773 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2774 btrfs_set_super_total_bytes(fs_info->super_copy, 2775 round_down(orig_super_total_bytes + device->total_bytes, 2776 fs_info->sectorsize)); 2777 2778 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2779 btrfs_set_super_num_devices(fs_info->super_copy, 2780 orig_super_num_devices + 1); 2781 2782 /* 2783 * we've got more storage, clear any full flags on the space 2784 * infos 2785 */ 2786 btrfs_clear_space_info_full(fs_info); 2787 2788 mutex_unlock(&fs_info->chunk_mutex); 2789 2790 /* Add sysfs device entry */ 2791 btrfs_sysfs_add_device(device); 2792 2793 mutex_unlock(&fs_devices->device_list_mutex); 2794 2795 if (seeding_dev) { 2796 mutex_lock(&fs_info->chunk_mutex); 2797 ret = init_first_rw_device(trans); 2798 mutex_unlock(&fs_info->chunk_mutex); 2799 if (ret) { 2800 btrfs_abort_transaction(trans, ret); 2801 goto error_sysfs; 2802 } 2803 } 2804 2805 ret = btrfs_add_dev_item(trans, device); 2806 if (ret) { 2807 btrfs_abort_transaction(trans, ret); 2808 goto error_sysfs; 2809 } 2810 2811 if (seeding_dev) { 2812 ret = btrfs_finish_sprout(trans); 2813 if (ret) { 2814 btrfs_abort_transaction(trans, ret); 2815 goto error_sysfs; 2816 } 2817 2818 /* 2819 * fs_devices now represents the newly sprouted filesystem and 2820 * its fsid has been changed by btrfs_sprout_splice(). 2821 */ 2822 btrfs_sysfs_update_sprout_fsid(fs_devices); 2823 } 2824 2825 ret = btrfs_commit_transaction(trans); 2826 2827 if (seeding_dev) { 2828 mutex_unlock(&uuid_mutex); 2829 up_write(&sb->s_umount); 2830 locked = false; 2831 2832 if (ret) /* transaction commit */ 2833 return ret; 2834 2835 ret = btrfs_relocate_sys_chunks(fs_info); 2836 if (ret < 0) 2837 btrfs_handle_fs_error(fs_info, ret, 2838 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2839 trans = btrfs_attach_transaction(root); 2840 if (IS_ERR(trans)) { 2841 if (PTR_ERR(trans) == -ENOENT) 2842 return 0; 2843 ret = PTR_ERR(trans); 2844 trans = NULL; 2845 goto error_sysfs; 2846 } 2847 ret = btrfs_commit_transaction(trans); 2848 } 2849 2850 /* 2851 * Now that we have written a new super block to this device, check all 2852 * other fs_devices list if device_path alienates any other scanned 2853 * device. 2854 * We can ignore the return value as it typically returns -EINVAL and 2855 * only succeeds if the device was an alien. 2856 */ 2857 btrfs_forget_devices(device->devt); 2858 2859 /* Update ctime/mtime for blkid or udev */ 2860 update_dev_time(device_path); 2861 2862 return ret; 2863 2864 error_sysfs: 2865 btrfs_sysfs_remove_device(device); 2866 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2867 mutex_lock(&fs_info->chunk_mutex); 2868 list_del_rcu(&device->dev_list); 2869 list_del(&device->dev_alloc_list); 2870 fs_info->fs_devices->num_devices--; 2871 fs_info->fs_devices->open_devices--; 2872 fs_info->fs_devices->rw_devices--; 2873 fs_info->fs_devices->total_devices--; 2874 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2875 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2876 btrfs_set_super_total_bytes(fs_info->super_copy, 2877 orig_super_total_bytes); 2878 btrfs_set_super_num_devices(fs_info->super_copy, 2879 orig_super_num_devices); 2880 mutex_unlock(&fs_info->chunk_mutex); 2881 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2882 error_trans: 2883 if (trans) 2884 btrfs_end_transaction(trans); 2885 error_free_zone: 2886 btrfs_destroy_dev_zone_info(device); 2887 error_free_device: 2888 btrfs_free_device(device); 2889 error: 2890 blkdev_put(bdev, fs_info->bdev_holder); 2891 if (locked) { 2892 mutex_unlock(&uuid_mutex); 2893 up_write(&sb->s_umount); 2894 } 2895 return ret; 2896 } 2897 2898 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2899 struct btrfs_device *device) 2900 { 2901 int ret; 2902 struct btrfs_path *path; 2903 struct btrfs_root *root = device->fs_info->chunk_root; 2904 struct btrfs_dev_item *dev_item; 2905 struct extent_buffer *leaf; 2906 struct btrfs_key key; 2907 2908 path = btrfs_alloc_path(); 2909 if (!path) 2910 return -ENOMEM; 2911 2912 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2913 key.type = BTRFS_DEV_ITEM_KEY; 2914 key.offset = device->devid; 2915 2916 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2917 if (ret < 0) 2918 goto out; 2919 2920 if (ret > 0) { 2921 ret = -ENOENT; 2922 goto out; 2923 } 2924 2925 leaf = path->nodes[0]; 2926 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2927 2928 btrfs_set_device_id(leaf, dev_item, device->devid); 2929 btrfs_set_device_type(leaf, dev_item, device->type); 2930 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2931 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2932 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2933 btrfs_set_device_total_bytes(leaf, dev_item, 2934 btrfs_device_get_disk_total_bytes(device)); 2935 btrfs_set_device_bytes_used(leaf, dev_item, 2936 btrfs_device_get_bytes_used(device)); 2937 btrfs_mark_buffer_dirty(trans, leaf); 2938 2939 out: 2940 btrfs_free_path(path); 2941 return ret; 2942 } 2943 2944 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2945 struct btrfs_device *device, u64 new_size) 2946 { 2947 struct btrfs_fs_info *fs_info = device->fs_info; 2948 struct btrfs_super_block *super_copy = fs_info->super_copy; 2949 u64 old_total; 2950 u64 diff; 2951 int ret; 2952 2953 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2954 return -EACCES; 2955 2956 new_size = round_down(new_size, fs_info->sectorsize); 2957 2958 mutex_lock(&fs_info->chunk_mutex); 2959 old_total = btrfs_super_total_bytes(super_copy); 2960 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2961 2962 if (new_size <= device->total_bytes || 2963 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2964 mutex_unlock(&fs_info->chunk_mutex); 2965 return -EINVAL; 2966 } 2967 2968 btrfs_set_super_total_bytes(super_copy, 2969 round_down(old_total + diff, fs_info->sectorsize)); 2970 device->fs_devices->total_rw_bytes += diff; 2971 2972 btrfs_device_set_total_bytes(device, new_size); 2973 btrfs_device_set_disk_total_bytes(device, new_size); 2974 btrfs_clear_space_info_full(device->fs_info); 2975 if (list_empty(&device->post_commit_list)) 2976 list_add_tail(&device->post_commit_list, 2977 &trans->transaction->dev_update_list); 2978 mutex_unlock(&fs_info->chunk_mutex); 2979 2980 btrfs_reserve_chunk_metadata(trans, false); 2981 ret = btrfs_update_device(trans, device); 2982 btrfs_trans_release_chunk_metadata(trans); 2983 2984 return ret; 2985 } 2986 2987 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2988 { 2989 struct btrfs_fs_info *fs_info = trans->fs_info; 2990 struct btrfs_root *root = fs_info->chunk_root; 2991 int ret; 2992 struct btrfs_path *path; 2993 struct btrfs_key key; 2994 2995 path = btrfs_alloc_path(); 2996 if (!path) 2997 return -ENOMEM; 2998 2999 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3000 key.offset = chunk_offset; 3001 key.type = BTRFS_CHUNK_ITEM_KEY; 3002 3003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3004 if (ret < 0) 3005 goto out; 3006 else if (ret > 0) { /* Logic error or corruption */ 3007 btrfs_handle_fs_error(fs_info, -ENOENT, 3008 "Failed lookup while freeing chunk."); 3009 ret = -ENOENT; 3010 goto out; 3011 } 3012 3013 ret = btrfs_del_item(trans, root, path); 3014 if (ret < 0) 3015 btrfs_handle_fs_error(fs_info, ret, 3016 "Failed to delete chunk item."); 3017 out: 3018 btrfs_free_path(path); 3019 return ret; 3020 } 3021 3022 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3023 { 3024 struct btrfs_super_block *super_copy = fs_info->super_copy; 3025 struct btrfs_disk_key *disk_key; 3026 struct btrfs_chunk *chunk; 3027 u8 *ptr; 3028 int ret = 0; 3029 u32 num_stripes; 3030 u32 array_size; 3031 u32 len = 0; 3032 u32 cur; 3033 struct btrfs_key key; 3034 3035 lockdep_assert_held(&fs_info->chunk_mutex); 3036 array_size = btrfs_super_sys_array_size(super_copy); 3037 3038 ptr = super_copy->sys_chunk_array; 3039 cur = 0; 3040 3041 while (cur < array_size) { 3042 disk_key = (struct btrfs_disk_key *)ptr; 3043 btrfs_disk_key_to_cpu(&key, disk_key); 3044 3045 len = sizeof(*disk_key); 3046 3047 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3048 chunk = (struct btrfs_chunk *)(ptr + len); 3049 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3050 len += btrfs_chunk_item_size(num_stripes); 3051 } else { 3052 ret = -EIO; 3053 break; 3054 } 3055 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3056 key.offset == chunk_offset) { 3057 memmove(ptr, ptr + len, array_size - (cur + len)); 3058 array_size -= len; 3059 btrfs_set_super_sys_array_size(super_copy, array_size); 3060 } else { 3061 ptr += len; 3062 cur += len; 3063 } 3064 } 3065 return ret; 3066 } 3067 3068 /* 3069 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3070 * @logical: Logical block offset in bytes. 3071 * @length: Length of extent in bytes. 3072 * 3073 * Return: Chunk mapping or ERR_PTR. 3074 */ 3075 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3076 u64 logical, u64 length) 3077 { 3078 struct extent_map_tree *em_tree; 3079 struct extent_map *em; 3080 3081 em_tree = &fs_info->mapping_tree; 3082 read_lock(&em_tree->lock); 3083 em = lookup_extent_mapping(em_tree, logical, length); 3084 read_unlock(&em_tree->lock); 3085 3086 if (!em) { 3087 btrfs_crit(fs_info, 3088 "unable to find chunk map for logical %llu length %llu", 3089 logical, length); 3090 return ERR_PTR(-EINVAL); 3091 } 3092 3093 if (em->start > logical || em->start + em->len <= logical) { 3094 btrfs_crit(fs_info, 3095 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu", 3096 logical, logical + length, em->start, em->start + em->len); 3097 free_extent_map(em); 3098 return ERR_PTR(-EINVAL); 3099 } 3100 3101 /* callers are responsible for dropping em's ref. */ 3102 return em; 3103 } 3104 3105 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3106 struct map_lookup *map, u64 chunk_offset) 3107 { 3108 int i; 3109 3110 /* 3111 * Removing chunk items and updating the device items in the chunks btree 3112 * requires holding the chunk_mutex. 3113 * See the comment at btrfs_chunk_alloc() for the details. 3114 */ 3115 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3116 3117 for (i = 0; i < map->num_stripes; i++) { 3118 int ret; 3119 3120 ret = btrfs_update_device(trans, map->stripes[i].dev); 3121 if (ret) 3122 return ret; 3123 } 3124 3125 return btrfs_free_chunk(trans, chunk_offset); 3126 } 3127 3128 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3129 { 3130 struct btrfs_fs_info *fs_info = trans->fs_info; 3131 struct extent_map *em; 3132 struct map_lookup *map; 3133 u64 dev_extent_len = 0; 3134 int i, ret = 0; 3135 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3136 3137 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3138 if (IS_ERR(em)) { 3139 /* 3140 * This is a logic error, but we don't want to just rely on the 3141 * user having built with ASSERT enabled, so if ASSERT doesn't 3142 * do anything we still error out. 3143 */ 3144 ASSERT(0); 3145 return PTR_ERR(em); 3146 } 3147 map = em->map_lookup; 3148 3149 /* 3150 * First delete the device extent items from the devices btree. 3151 * We take the device_list_mutex to avoid racing with the finishing phase 3152 * of a device replace operation. See the comment below before acquiring 3153 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3154 * because that can result in a deadlock when deleting the device extent 3155 * items from the devices btree - COWing an extent buffer from the btree 3156 * may result in allocating a new metadata chunk, which would attempt to 3157 * lock again fs_info->chunk_mutex. 3158 */ 3159 mutex_lock(&fs_devices->device_list_mutex); 3160 for (i = 0; i < map->num_stripes; i++) { 3161 struct btrfs_device *device = map->stripes[i].dev; 3162 ret = btrfs_free_dev_extent(trans, device, 3163 map->stripes[i].physical, 3164 &dev_extent_len); 3165 if (ret) { 3166 mutex_unlock(&fs_devices->device_list_mutex); 3167 btrfs_abort_transaction(trans, ret); 3168 goto out; 3169 } 3170 3171 if (device->bytes_used > 0) { 3172 mutex_lock(&fs_info->chunk_mutex); 3173 btrfs_device_set_bytes_used(device, 3174 device->bytes_used - dev_extent_len); 3175 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3176 btrfs_clear_space_info_full(fs_info); 3177 mutex_unlock(&fs_info->chunk_mutex); 3178 } 3179 } 3180 mutex_unlock(&fs_devices->device_list_mutex); 3181 3182 /* 3183 * We acquire fs_info->chunk_mutex for 2 reasons: 3184 * 3185 * 1) Just like with the first phase of the chunk allocation, we must 3186 * reserve system space, do all chunk btree updates and deletions, and 3187 * update the system chunk array in the superblock while holding this 3188 * mutex. This is for similar reasons as explained on the comment at 3189 * the top of btrfs_chunk_alloc(); 3190 * 3191 * 2) Prevent races with the final phase of a device replace operation 3192 * that replaces the device object associated with the map's stripes, 3193 * because the device object's id can change at any time during that 3194 * final phase of the device replace operation 3195 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3196 * replaced device and then see it with an ID of 3197 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3198 * the device item, which does not exists on the chunk btree. 3199 * The finishing phase of device replace acquires both the 3200 * device_list_mutex and the chunk_mutex, in that order, so we are 3201 * safe by just acquiring the chunk_mutex. 3202 */ 3203 trans->removing_chunk = true; 3204 mutex_lock(&fs_info->chunk_mutex); 3205 3206 check_system_chunk(trans, map->type); 3207 3208 ret = remove_chunk_item(trans, map, chunk_offset); 3209 /* 3210 * Normally we should not get -ENOSPC since we reserved space before 3211 * through the call to check_system_chunk(). 3212 * 3213 * Despite our system space_info having enough free space, we may not 3214 * be able to allocate extents from its block groups, because all have 3215 * an incompatible profile, which will force us to allocate a new system 3216 * block group with the right profile, or right after we called 3217 * check_system_space() above, a scrub turned the only system block group 3218 * with enough free space into RO mode. 3219 * This is explained with more detail at do_chunk_alloc(). 3220 * 3221 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3222 */ 3223 if (ret == -ENOSPC) { 3224 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3225 struct btrfs_block_group *sys_bg; 3226 3227 sys_bg = btrfs_create_chunk(trans, sys_flags); 3228 if (IS_ERR(sys_bg)) { 3229 ret = PTR_ERR(sys_bg); 3230 btrfs_abort_transaction(trans, ret); 3231 goto out; 3232 } 3233 3234 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3235 if (ret) { 3236 btrfs_abort_transaction(trans, ret); 3237 goto out; 3238 } 3239 3240 ret = remove_chunk_item(trans, map, chunk_offset); 3241 if (ret) { 3242 btrfs_abort_transaction(trans, ret); 3243 goto out; 3244 } 3245 } else if (ret) { 3246 btrfs_abort_transaction(trans, ret); 3247 goto out; 3248 } 3249 3250 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3251 3252 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3253 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3254 if (ret) { 3255 btrfs_abort_transaction(trans, ret); 3256 goto out; 3257 } 3258 } 3259 3260 mutex_unlock(&fs_info->chunk_mutex); 3261 trans->removing_chunk = false; 3262 3263 /* 3264 * We are done with chunk btree updates and deletions, so release the 3265 * system space we previously reserved (with check_system_chunk()). 3266 */ 3267 btrfs_trans_release_chunk_metadata(trans); 3268 3269 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3270 if (ret) { 3271 btrfs_abort_transaction(trans, ret); 3272 goto out; 3273 } 3274 3275 out: 3276 if (trans->removing_chunk) { 3277 mutex_unlock(&fs_info->chunk_mutex); 3278 trans->removing_chunk = false; 3279 } 3280 /* once for us */ 3281 free_extent_map(em); 3282 return ret; 3283 } 3284 3285 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3286 { 3287 struct btrfs_root *root = fs_info->chunk_root; 3288 struct btrfs_trans_handle *trans; 3289 struct btrfs_block_group *block_group; 3290 u64 length; 3291 int ret; 3292 3293 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3294 btrfs_err(fs_info, 3295 "relocate: not supported on extent tree v2 yet"); 3296 return -EINVAL; 3297 } 3298 3299 /* 3300 * Prevent races with automatic removal of unused block groups. 3301 * After we relocate and before we remove the chunk with offset 3302 * chunk_offset, automatic removal of the block group can kick in, 3303 * resulting in a failure when calling btrfs_remove_chunk() below. 3304 * 3305 * Make sure to acquire this mutex before doing a tree search (dev 3306 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3307 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3308 * we release the path used to search the chunk/dev tree and before 3309 * the current task acquires this mutex and calls us. 3310 */ 3311 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3312 3313 /* step one, relocate all the extents inside this chunk */ 3314 btrfs_scrub_pause(fs_info); 3315 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3316 btrfs_scrub_continue(fs_info); 3317 if (ret) { 3318 /* 3319 * If we had a transaction abort, stop all running scrubs. 3320 * See transaction.c:cleanup_transaction() why we do it here. 3321 */ 3322 if (BTRFS_FS_ERROR(fs_info)) 3323 btrfs_scrub_cancel(fs_info); 3324 return ret; 3325 } 3326 3327 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3328 if (!block_group) 3329 return -ENOENT; 3330 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3331 length = block_group->length; 3332 btrfs_put_block_group(block_group); 3333 3334 /* 3335 * On a zoned file system, discard the whole block group, this will 3336 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3337 * resetting the zone fails, don't treat it as a fatal problem from the 3338 * filesystem's point of view. 3339 */ 3340 if (btrfs_is_zoned(fs_info)) { 3341 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3342 if (ret) 3343 btrfs_info(fs_info, 3344 "failed to reset zone %llu after relocation", 3345 chunk_offset); 3346 } 3347 3348 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3349 chunk_offset); 3350 if (IS_ERR(trans)) { 3351 ret = PTR_ERR(trans); 3352 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3353 return ret; 3354 } 3355 3356 /* 3357 * step two, delete the device extents and the 3358 * chunk tree entries 3359 */ 3360 ret = btrfs_remove_chunk(trans, chunk_offset); 3361 btrfs_end_transaction(trans); 3362 return ret; 3363 } 3364 3365 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3366 { 3367 struct btrfs_root *chunk_root = fs_info->chunk_root; 3368 struct btrfs_path *path; 3369 struct extent_buffer *leaf; 3370 struct btrfs_chunk *chunk; 3371 struct btrfs_key key; 3372 struct btrfs_key found_key; 3373 u64 chunk_type; 3374 bool retried = false; 3375 int failed = 0; 3376 int ret; 3377 3378 path = btrfs_alloc_path(); 3379 if (!path) 3380 return -ENOMEM; 3381 3382 again: 3383 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3384 key.offset = (u64)-1; 3385 key.type = BTRFS_CHUNK_ITEM_KEY; 3386 3387 while (1) { 3388 mutex_lock(&fs_info->reclaim_bgs_lock); 3389 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3390 if (ret < 0) { 3391 mutex_unlock(&fs_info->reclaim_bgs_lock); 3392 goto error; 3393 } 3394 if (ret == 0) { 3395 /* 3396 * On the first search we would find chunk tree with 3397 * offset -1, which is not possible. On subsequent 3398 * loops this would find an existing item on an invalid 3399 * offset (one less than the previous one, wrong 3400 * alignment and size). 3401 */ 3402 ret = -EUCLEAN; 3403 mutex_unlock(&fs_info->reclaim_bgs_lock); 3404 goto error; 3405 } 3406 3407 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3408 key.type); 3409 if (ret) 3410 mutex_unlock(&fs_info->reclaim_bgs_lock); 3411 if (ret < 0) 3412 goto error; 3413 if (ret > 0) 3414 break; 3415 3416 leaf = path->nodes[0]; 3417 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3418 3419 chunk = btrfs_item_ptr(leaf, path->slots[0], 3420 struct btrfs_chunk); 3421 chunk_type = btrfs_chunk_type(leaf, chunk); 3422 btrfs_release_path(path); 3423 3424 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3425 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3426 if (ret == -ENOSPC) 3427 failed++; 3428 else 3429 BUG_ON(ret); 3430 } 3431 mutex_unlock(&fs_info->reclaim_bgs_lock); 3432 3433 if (found_key.offset == 0) 3434 break; 3435 key.offset = found_key.offset - 1; 3436 } 3437 ret = 0; 3438 if (failed && !retried) { 3439 failed = 0; 3440 retried = true; 3441 goto again; 3442 } else if (WARN_ON(failed && retried)) { 3443 ret = -ENOSPC; 3444 } 3445 error: 3446 btrfs_free_path(path); 3447 return ret; 3448 } 3449 3450 /* 3451 * return 1 : allocate a data chunk successfully, 3452 * return <0: errors during allocating a data chunk, 3453 * return 0 : no need to allocate a data chunk. 3454 */ 3455 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3456 u64 chunk_offset) 3457 { 3458 struct btrfs_block_group *cache; 3459 u64 bytes_used; 3460 u64 chunk_type; 3461 3462 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3463 ASSERT(cache); 3464 chunk_type = cache->flags; 3465 btrfs_put_block_group(cache); 3466 3467 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3468 return 0; 3469 3470 spin_lock(&fs_info->data_sinfo->lock); 3471 bytes_used = fs_info->data_sinfo->bytes_used; 3472 spin_unlock(&fs_info->data_sinfo->lock); 3473 3474 if (!bytes_used) { 3475 struct btrfs_trans_handle *trans; 3476 int ret; 3477 3478 trans = btrfs_join_transaction(fs_info->tree_root); 3479 if (IS_ERR(trans)) 3480 return PTR_ERR(trans); 3481 3482 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3483 btrfs_end_transaction(trans); 3484 if (ret < 0) 3485 return ret; 3486 return 1; 3487 } 3488 3489 return 0; 3490 } 3491 3492 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3493 struct btrfs_balance_control *bctl) 3494 { 3495 struct btrfs_root *root = fs_info->tree_root; 3496 struct btrfs_trans_handle *trans; 3497 struct btrfs_balance_item *item; 3498 struct btrfs_disk_balance_args disk_bargs; 3499 struct btrfs_path *path; 3500 struct extent_buffer *leaf; 3501 struct btrfs_key key; 3502 int ret, err; 3503 3504 path = btrfs_alloc_path(); 3505 if (!path) 3506 return -ENOMEM; 3507 3508 trans = btrfs_start_transaction(root, 0); 3509 if (IS_ERR(trans)) { 3510 btrfs_free_path(path); 3511 return PTR_ERR(trans); 3512 } 3513 3514 key.objectid = BTRFS_BALANCE_OBJECTID; 3515 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3516 key.offset = 0; 3517 3518 ret = btrfs_insert_empty_item(trans, root, path, &key, 3519 sizeof(*item)); 3520 if (ret) 3521 goto out; 3522 3523 leaf = path->nodes[0]; 3524 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3525 3526 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3527 3528 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3529 btrfs_set_balance_data(leaf, item, &disk_bargs); 3530 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3531 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3532 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3533 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3534 3535 btrfs_set_balance_flags(leaf, item, bctl->flags); 3536 3537 btrfs_mark_buffer_dirty(trans, leaf); 3538 out: 3539 btrfs_free_path(path); 3540 err = btrfs_commit_transaction(trans); 3541 if (err && !ret) 3542 ret = err; 3543 return ret; 3544 } 3545 3546 static int del_balance_item(struct btrfs_fs_info *fs_info) 3547 { 3548 struct btrfs_root *root = fs_info->tree_root; 3549 struct btrfs_trans_handle *trans; 3550 struct btrfs_path *path; 3551 struct btrfs_key key; 3552 int ret, err; 3553 3554 path = btrfs_alloc_path(); 3555 if (!path) 3556 return -ENOMEM; 3557 3558 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3559 if (IS_ERR(trans)) { 3560 btrfs_free_path(path); 3561 return PTR_ERR(trans); 3562 } 3563 3564 key.objectid = BTRFS_BALANCE_OBJECTID; 3565 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3566 key.offset = 0; 3567 3568 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3569 if (ret < 0) 3570 goto out; 3571 if (ret > 0) { 3572 ret = -ENOENT; 3573 goto out; 3574 } 3575 3576 ret = btrfs_del_item(trans, root, path); 3577 out: 3578 btrfs_free_path(path); 3579 err = btrfs_commit_transaction(trans); 3580 if (err && !ret) 3581 ret = err; 3582 return ret; 3583 } 3584 3585 /* 3586 * This is a heuristic used to reduce the number of chunks balanced on 3587 * resume after balance was interrupted. 3588 */ 3589 static void update_balance_args(struct btrfs_balance_control *bctl) 3590 { 3591 /* 3592 * Turn on soft mode for chunk types that were being converted. 3593 */ 3594 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3595 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3596 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3597 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3598 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3599 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3600 3601 /* 3602 * Turn on usage filter if is not already used. The idea is 3603 * that chunks that we have already balanced should be 3604 * reasonably full. Don't do it for chunks that are being 3605 * converted - that will keep us from relocating unconverted 3606 * (albeit full) chunks. 3607 */ 3608 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3609 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3610 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3611 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3612 bctl->data.usage = 90; 3613 } 3614 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3615 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3616 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3617 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3618 bctl->sys.usage = 90; 3619 } 3620 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3621 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3622 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3623 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3624 bctl->meta.usage = 90; 3625 } 3626 } 3627 3628 /* 3629 * Clear the balance status in fs_info and delete the balance item from disk. 3630 */ 3631 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3632 { 3633 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3634 int ret; 3635 3636 BUG_ON(!fs_info->balance_ctl); 3637 3638 spin_lock(&fs_info->balance_lock); 3639 fs_info->balance_ctl = NULL; 3640 spin_unlock(&fs_info->balance_lock); 3641 3642 kfree(bctl); 3643 ret = del_balance_item(fs_info); 3644 if (ret) 3645 btrfs_handle_fs_error(fs_info, ret, NULL); 3646 } 3647 3648 /* 3649 * Balance filters. Return 1 if chunk should be filtered out 3650 * (should not be balanced). 3651 */ 3652 static int chunk_profiles_filter(u64 chunk_type, 3653 struct btrfs_balance_args *bargs) 3654 { 3655 chunk_type = chunk_to_extended(chunk_type) & 3656 BTRFS_EXTENDED_PROFILE_MASK; 3657 3658 if (bargs->profiles & chunk_type) 3659 return 0; 3660 3661 return 1; 3662 } 3663 3664 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3665 struct btrfs_balance_args *bargs) 3666 { 3667 struct btrfs_block_group *cache; 3668 u64 chunk_used; 3669 u64 user_thresh_min; 3670 u64 user_thresh_max; 3671 int ret = 1; 3672 3673 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3674 chunk_used = cache->used; 3675 3676 if (bargs->usage_min == 0) 3677 user_thresh_min = 0; 3678 else 3679 user_thresh_min = mult_perc(cache->length, bargs->usage_min); 3680 3681 if (bargs->usage_max == 0) 3682 user_thresh_max = 1; 3683 else if (bargs->usage_max > 100) 3684 user_thresh_max = cache->length; 3685 else 3686 user_thresh_max = mult_perc(cache->length, bargs->usage_max); 3687 3688 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3689 ret = 0; 3690 3691 btrfs_put_block_group(cache); 3692 return ret; 3693 } 3694 3695 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3696 u64 chunk_offset, struct btrfs_balance_args *bargs) 3697 { 3698 struct btrfs_block_group *cache; 3699 u64 chunk_used, user_thresh; 3700 int ret = 1; 3701 3702 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3703 chunk_used = cache->used; 3704 3705 if (bargs->usage_min == 0) 3706 user_thresh = 1; 3707 else if (bargs->usage > 100) 3708 user_thresh = cache->length; 3709 else 3710 user_thresh = mult_perc(cache->length, bargs->usage); 3711 3712 if (chunk_used < user_thresh) 3713 ret = 0; 3714 3715 btrfs_put_block_group(cache); 3716 return ret; 3717 } 3718 3719 static int chunk_devid_filter(struct extent_buffer *leaf, 3720 struct btrfs_chunk *chunk, 3721 struct btrfs_balance_args *bargs) 3722 { 3723 struct btrfs_stripe *stripe; 3724 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3725 int i; 3726 3727 for (i = 0; i < num_stripes; i++) { 3728 stripe = btrfs_stripe_nr(chunk, i); 3729 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3730 return 0; 3731 } 3732 3733 return 1; 3734 } 3735 3736 static u64 calc_data_stripes(u64 type, int num_stripes) 3737 { 3738 const int index = btrfs_bg_flags_to_raid_index(type); 3739 const int ncopies = btrfs_raid_array[index].ncopies; 3740 const int nparity = btrfs_raid_array[index].nparity; 3741 3742 return (num_stripes - nparity) / ncopies; 3743 } 3744 3745 /* [pstart, pend) */ 3746 static int chunk_drange_filter(struct extent_buffer *leaf, 3747 struct btrfs_chunk *chunk, 3748 struct btrfs_balance_args *bargs) 3749 { 3750 struct btrfs_stripe *stripe; 3751 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3752 u64 stripe_offset; 3753 u64 stripe_length; 3754 u64 type; 3755 int factor; 3756 int i; 3757 3758 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3759 return 0; 3760 3761 type = btrfs_chunk_type(leaf, chunk); 3762 factor = calc_data_stripes(type, num_stripes); 3763 3764 for (i = 0; i < num_stripes; i++) { 3765 stripe = btrfs_stripe_nr(chunk, i); 3766 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3767 continue; 3768 3769 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3770 stripe_length = btrfs_chunk_length(leaf, chunk); 3771 stripe_length = div_u64(stripe_length, factor); 3772 3773 if (stripe_offset < bargs->pend && 3774 stripe_offset + stripe_length > bargs->pstart) 3775 return 0; 3776 } 3777 3778 return 1; 3779 } 3780 3781 /* [vstart, vend) */ 3782 static int chunk_vrange_filter(struct extent_buffer *leaf, 3783 struct btrfs_chunk *chunk, 3784 u64 chunk_offset, 3785 struct btrfs_balance_args *bargs) 3786 { 3787 if (chunk_offset < bargs->vend && 3788 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3789 /* at least part of the chunk is inside this vrange */ 3790 return 0; 3791 3792 return 1; 3793 } 3794 3795 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3796 struct btrfs_chunk *chunk, 3797 struct btrfs_balance_args *bargs) 3798 { 3799 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3800 3801 if (bargs->stripes_min <= num_stripes 3802 && num_stripes <= bargs->stripes_max) 3803 return 0; 3804 3805 return 1; 3806 } 3807 3808 static int chunk_soft_convert_filter(u64 chunk_type, 3809 struct btrfs_balance_args *bargs) 3810 { 3811 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3812 return 0; 3813 3814 chunk_type = chunk_to_extended(chunk_type) & 3815 BTRFS_EXTENDED_PROFILE_MASK; 3816 3817 if (bargs->target == chunk_type) 3818 return 1; 3819 3820 return 0; 3821 } 3822 3823 static int should_balance_chunk(struct extent_buffer *leaf, 3824 struct btrfs_chunk *chunk, u64 chunk_offset) 3825 { 3826 struct btrfs_fs_info *fs_info = leaf->fs_info; 3827 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3828 struct btrfs_balance_args *bargs = NULL; 3829 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3830 3831 /* type filter */ 3832 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3833 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3834 return 0; 3835 } 3836 3837 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3838 bargs = &bctl->data; 3839 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3840 bargs = &bctl->sys; 3841 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3842 bargs = &bctl->meta; 3843 3844 /* profiles filter */ 3845 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3846 chunk_profiles_filter(chunk_type, bargs)) { 3847 return 0; 3848 } 3849 3850 /* usage filter */ 3851 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3852 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3853 return 0; 3854 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3855 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3856 return 0; 3857 } 3858 3859 /* devid filter */ 3860 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3861 chunk_devid_filter(leaf, chunk, bargs)) { 3862 return 0; 3863 } 3864 3865 /* drange filter, makes sense only with devid filter */ 3866 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3867 chunk_drange_filter(leaf, chunk, bargs)) { 3868 return 0; 3869 } 3870 3871 /* vrange filter */ 3872 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3873 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3874 return 0; 3875 } 3876 3877 /* stripes filter */ 3878 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3879 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3880 return 0; 3881 } 3882 3883 /* soft profile changing mode */ 3884 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3885 chunk_soft_convert_filter(chunk_type, bargs)) { 3886 return 0; 3887 } 3888 3889 /* 3890 * limited by count, must be the last filter 3891 */ 3892 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3893 if (bargs->limit == 0) 3894 return 0; 3895 else 3896 bargs->limit--; 3897 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3898 /* 3899 * Same logic as the 'limit' filter; the minimum cannot be 3900 * determined here because we do not have the global information 3901 * about the count of all chunks that satisfy the filters. 3902 */ 3903 if (bargs->limit_max == 0) 3904 return 0; 3905 else 3906 bargs->limit_max--; 3907 } 3908 3909 return 1; 3910 } 3911 3912 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3913 { 3914 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3915 struct btrfs_root *chunk_root = fs_info->chunk_root; 3916 u64 chunk_type; 3917 struct btrfs_chunk *chunk; 3918 struct btrfs_path *path = NULL; 3919 struct btrfs_key key; 3920 struct btrfs_key found_key; 3921 struct extent_buffer *leaf; 3922 int slot; 3923 int ret; 3924 int enospc_errors = 0; 3925 bool counting = true; 3926 /* The single value limit and min/max limits use the same bytes in the */ 3927 u64 limit_data = bctl->data.limit; 3928 u64 limit_meta = bctl->meta.limit; 3929 u64 limit_sys = bctl->sys.limit; 3930 u32 count_data = 0; 3931 u32 count_meta = 0; 3932 u32 count_sys = 0; 3933 int chunk_reserved = 0; 3934 3935 path = btrfs_alloc_path(); 3936 if (!path) { 3937 ret = -ENOMEM; 3938 goto error; 3939 } 3940 3941 /* zero out stat counters */ 3942 spin_lock(&fs_info->balance_lock); 3943 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3944 spin_unlock(&fs_info->balance_lock); 3945 again: 3946 if (!counting) { 3947 /* 3948 * The single value limit and min/max limits use the same bytes 3949 * in the 3950 */ 3951 bctl->data.limit = limit_data; 3952 bctl->meta.limit = limit_meta; 3953 bctl->sys.limit = limit_sys; 3954 } 3955 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3956 key.offset = (u64)-1; 3957 key.type = BTRFS_CHUNK_ITEM_KEY; 3958 3959 while (1) { 3960 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3961 atomic_read(&fs_info->balance_cancel_req)) { 3962 ret = -ECANCELED; 3963 goto error; 3964 } 3965 3966 mutex_lock(&fs_info->reclaim_bgs_lock); 3967 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3968 if (ret < 0) { 3969 mutex_unlock(&fs_info->reclaim_bgs_lock); 3970 goto error; 3971 } 3972 3973 /* 3974 * this shouldn't happen, it means the last relocate 3975 * failed 3976 */ 3977 if (ret == 0) 3978 BUG(); /* FIXME break ? */ 3979 3980 ret = btrfs_previous_item(chunk_root, path, 0, 3981 BTRFS_CHUNK_ITEM_KEY); 3982 if (ret) { 3983 mutex_unlock(&fs_info->reclaim_bgs_lock); 3984 ret = 0; 3985 break; 3986 } 3987 3988 leaf = path->nodes[0]; 3989 slot = path->slots[0]; 3990 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3991 3992 if (found_key.objectid != key.objectid) { 3993 mutex_unlock(&fs_info->reclaim_bgs_lock); 3994 break; 3995 } 3996 3997 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3998 chunk_type = btrfs_chunk_type(leaf, chunk); 3999 4000 if (!counting) { 4001 spin_lock(&fs_info->balance_lock); 4002 bctl->stat.considered++; 4003 spin_unlock(&fs_info->balance_lock); 4004 } 4005 4006 ret = should_balance_chunk(leaf, chunk, found_key.offset); 4007 4008 btrfs_release_path(path); 4009 if (!ret) { 4010 mutex_unlock(&fs_info->reclaim_bgs_lock); 4011 goto loop; 4012 } 4013 4014 if (counting) { 4015 mutex_unlock(&fs_info->reclaim_bgs_lock); 4016 spin_lock(&fs_info->balance_lock); 4017 bctl->stat.expected++; 4018 spin_unlock(&fs_info->balance_lock); 4019 4020 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 4021 count_data++; 4022 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 4023 count_sys++; 4024 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 4025 count_meta++; 4026 4027 goto loop; 4028 } 4029 4030 /* 4031 * Apply limit_min filter, no need to check if the LIMITS 4032 * filter is used, limit_min is 0 by default 4033 */ 4034 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 4035 count_data < bctl->data.limit_min) 4036 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 4037 count_meta < bctl->meta.limit_min) 4038 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 4039 count_sys < bctl->sys.limit_min)) { 4040 mutex_unlock(&fs_info->reclaim_bgs_lock); 4041 goto loop; 4042 } 4043 4044 if (!chunk_reserved) { 4045 /* 4046 * We may be relocating the only data chunk we have, 4047 * which could potentially end up with losing data's 4048 * raid profile, so lets allocate an empty one in 4049 * advance. 4050 */ 4051 ret = btrfs_may_alloc_data_chunk(fs_info, 4052 found_key.offset); 4053 if (ret < 0) { 4054 mutex_unlock(&fs_info->reclaim_bgs_lock); 4055 goto error; 4056 } else if (ret == 1) { 4057 chunk_reserved = 1; 4058 } 4059 } 4060 4061 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4062 mutex_unlock(&fs_info->reclaim_bgs_lock); 4063 if (ret == -ENOSPC) { 4064 enospc_errors++; 4065 } else if (ret == -ETXTBSY) { 4066 btrfs_info(fs_info, 4067 "skipping relocation of block group %llu due to active swapfile", 4068 found_key.offset); 4069 ret = 0; 4070 } else if (ret) { 4071 goto error; 4072 } else { 4073 spin_lock(&fs_info->balance_lock); 4074 bctl->stat.completed++; 4075 spin_unlock(&fs_info->balance_lock); 4076 } 4077 loop: 4078 if (found_key.offset == 0) 4079 break; 4080 key.offset = found_key.offset - 1; 4081 } 4082 4083 if (counting) { 4084 btrfs_release_path(path); 4085 counting = false; 4086 goto again; 4087 } 4088 error: 4089 btrfs_free_path(path); 4090 if (enospc_errors) { 4091 btrfs_info(fs_info, "%d enospc errors during balance", 4092 enospc_errors); 4093 if (!ret) 4094 ret = -ENOSPC; 4095 } 4096 4097 return ret; 4098 } 4099 4100 /* 4101 * See if a given profile is valid and reduced. 4102 * 4103 * @flags: profile to validate 4104 * @extended: if true @flags is treated as an extended profile 4105 */ 4106 static int alloc_profile_is_valid(u64 flags, int extended) 4107 { 4108 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4109 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4110 4111 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4112 4113 /* 1) check that all other bits are zeroed */ 4114 if (flags & ~mask) 4115 return 0; 4116 4117 /* 2) see if profile is reduced */ 4118 if (flags == 0) 4119 return !extended; /* "0" is valid for usual profiles */ 4120 4121 return has_single_bit_set(flags); 4122 } 4123 4124 /* 4125 * Validate target profile against allowed profiles and return true if it's OK. 4126 * Otherwise print the error message and return false. 4127 */ 4128 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4129 const struct btrfs_balance_args *bargs, 4130 u64 allowed, const char *type) 4131 { 4132 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4133 return true; 4134 4135 /* Profile is valid and does not have bits outside of the allowed set */ 4136 if (alloc_profile_is_valid(bargs->target, 1) && 4137 (bargs->target & ~allowed) == 0) 4138 return true; 4139 4140 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4141 type, btrfs_bg_type_to_raid_name(bargs->target)); 4142 return false; 4143 } 4144 4145 /* 4146 * Fill @buf with textual description of balance filter flags @bargs, up to 4147 * @size_buf including the terminating null. The output may be trimmed if it 4148 * does not fit into the provided buffer. 4149 */ 4150 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4151 u32 size_buf) 4152 { 4153 int ret; 4154 u32 size_bp = size_buf; 4155 char *bp = buf; 4156 u64 flags = bargs->flags; 4157 char tmp_buf[128] = {'\0'}; 4158 4159 if (!flags) 4160 return; 4161 4162 #define CHECK_APPEND_NOARG(a) \ 4163 do { \ 4164 ret = snprintf(bp, size_bp, (a)); \ 4165 if (ret < 0 || ret >= size_bp) \ 4166 goto out_overflow; \ 4167 size_bp -= ret; \ 4168 bp += ret; \ 4169 } while (0) 4170 4171 #define CHECK_APPEND_1ARG(a, v1) \ 4172 do { \ 4173 ret = snprintf(bp, size_bp, (a), (v1)); \ 4174 if (ret < 0 || ret >= size_bp) \ 4175 goto out_overflow; \ 4176 size_bp -= ret; \ 4177 bp += ret; \ 4178 } while (0) 4179 4180 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4181 do { \ 4182 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4183 if (ret < 0 || ret >= size_bp) \ 4184 goto out_overflow; \ 4185 size_bp -= ret; \ 4186 bp += ret; \ 4187 } while (0) 4188 4189 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4190 CHECK_APPEND_1ARG("convert=%s,", 4191 btrfs_bg_type_to_raid_name(bargs->target)); 4192 4193 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4194 CHECK_APPEND_NOARG("soft,"); 4195 4196 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4197 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4198 sizeof(tmp_buf)); 4199 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4200 } 4201 4202 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4203 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4204 4205 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4206 CHECK_APPEND_2ARG("usage=%u..%u,", 4207 bargs->usage_min, bargs->usage_max); 4208 4209 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4210 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4211 4212 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4213 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4214 bargs->pstart, bargs->pend); 4215 4216 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4217 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4218 bargs->vstart, bargs->vend); 4219 4220 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4221 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4222 4223 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4224 CHECK_APPEND_2ARG("limit=%u..%u,", 4225 bargs->limit_min, bargs->limit_max); 4226 4227 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4228 CHECK_APPEND_2ARG("stripes=%u..%u,", 4229 bargs->stripes_min, bargs->stripes_max); 4230 4231 #undef CHECK_APPEND_2ARG 4232 #undef CHECK_APPEND_1ARG 4233 #undef CHECK_APPEND_NOARG 4234 4235 out_overflow: 4236 4237 if (size_bp < size_buf) 4238 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4239 else 4240 buf[0] = '\0'; 4241 } 4242 4243 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4244 { 4245 u32 size_buf = 1024; 4246 char tmp_buf[192] = {'\0'}; 4247 char *buf; 4248 char *bp; 4249 u32 size_bp = size_buf; 4250 int ret; 4251 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4252 4253 buf = kzalloc(size_buf, GFP_KERNEL); 4254 if (!buf) 4255 return; 4256 4257 bp = buf; 4258 4259 #define CHECK_APPEND_1ARG(a, v1) \ 4260 do { \ 4261 ret = snprintf(bp, size_bp, (a), (v1)); \ 4262 if (ret < 0 || ret >= size_bp) \ 4263 goto out_overflow; \ 4264 size_bp -= ret; \ 4265 bp += ret; \ 4266 } while (0) 4267 4268 if (bctl->flags & BTRFS_BALANCE_FORCE) 4269 CHECK_APPEND_1ARG("%s", "-f "); 4270 4271 if (bctl->flags & BTRFS_BALANCE_DATA) { 4272 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4273 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4274 } 4275 4276 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4277 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4278 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4279 } 4280 4281 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4282 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4283 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4284 } 4285 4286 #undef CHECK_APPEND_1ARG 4287 4288 out_overflow: 4289 4290 if (size_bp < size_buf) 4291 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4292 btrfs_info(fs_info, "balance: %s %s", 4293 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4294 "resume" : "start", buf); 4295 4296 kfree(buf); 4297 } 4298 4299 /* 4300 * Should be called with balance mutexe held 4301 */ 4302 int btrfs_balance(struct btrfs_fs_info *fs_info, 4303 struct btrfs_balance_control *bctl, 4304 struct btrfs_ioctl_balance_args *bargs) 4305 { 4306 u64 meta_target, data_target; 4307 u64 allowed; 4308 int mixed = 0; 4309 int ret; 4310 u64 num_devices; 4311 unsigned seq; 4312 bool reducing_redundancy; 4313 bool paused = false; 4314 int i; 4315 4316 if (btrfs_fs_closing(fs_info) || 4317 atomic_read(&fs_info->balance_pause_req) || 4318 btrfs_should_cancel_balance(fs_info)) { 4319 ret = -EINVAL; 4320 goto out; 4321 } 4322 4323 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4324 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4325 mixed = 1; 4326 4327 /* 4328 * In case of mixed groups both data and meta should be picked, 4329 * and identical options should be given for both of them. 4330 */ 4331 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4332 if (mixed && (bctl->flags & allowed)) { 4333 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4334 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4335 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4336 btrfs_err(fs_info, 4337 "balance: mixed groups data and metadata options must be the same"); 4338 ret = -EINVAL; 4339 goto out; 4340 } 4341 } 4342 4343 /* 4344 * rw_devices will not change at the moment, device add/delete/replace 4345 * are exclusive 4346 */ 4347 num_devices = fs_info->fs_devices->rw_devices; 4348 4349 /* 4350 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4351 * special bit for it, to make it easier to distinguish. Thus we need 4352 * to set it manually, or balance would refuse the profile. 4353 */ 4354 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4355 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4356 if (num_devices >= btrfs_raid_array[i].devs_min) 4357 allowed |= btrfs_raid_array[i].bg_flag; 4358 4359 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4360 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4361 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4362 ret = -EINVAL; 4363 goto out; 4364 } 4365 4366 /* 4367 * Allow to reduce metadata or system integrity only if force set for 4368 * profiles with redundancy (copies, parity) 4369 */ 4370 allowed = 0; 4371 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4372 if (btrfs_raid_array[i].ncopies >= 2 || 4373 btrfs_raid_array[i].tolerated_failures >= 1) 4374 allowed |= btrfs_raid_array[i].bg_flag; 4375 } 4376 do { 4377 seq = read_seqbegin(&fs_info->profiles_lock); 4378 4379 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4380 (fs_info->avail_system_alloc_bits & allowed) && 4381 !(bctl->sys.target & allowed)) || 4382 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4383 (fs_info->avail_metadata_alloc_bits & allowed) && 4384 !(bctl->meta.target & allowed))) 4385 reducing_redundancy = true; 4386 else 4387 reducing_redundancy = false; 4388 4389 /* if we're not converting, the target field is uninitialized */ 4390 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4391 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4392 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4393 bctl->data.target : fs_info->avail_data_alloc_bits; 4394 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4395 4396 if (reducing_redundancy) { 4397 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4398 btrfs_info(fs_info, 4399 "balance: force reducing metadata redundancy"); 4400 } else { 4401 btrfs_err(fs_info, 4402 "balance: reduces metadata redundancy, use --force if you want this"); 4403 ret = -EINVAL; 4404 goto out; 4405 } 4406 } 4407 4408 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4409 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4410 btrfs_warn(fs_info, 4411 "balance: metadata profile %s has lower redundancy than data profile %s", 4412 btrfs_bg_type_to_raid_name(meta_target), 4413 btrfs_bg_type_to_raid_name(data_target)); 4414 } 4415 4416 ret = insert_balance_item(fs_info, bctl); 4417 if (ret && ret != -EEXIST) 4418 goto out; 4419 4420 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4421 BUG_ON(ret == -EEXIST); 4422 BUG_ON(fs_info->balance_ctl); 4423 spin_lock(&fs_info->balance_lock); 4424 fs_info->balance_ctl = bctl; 4425 spin_unlock(&fs_info->balance_lock); 4426 } else { 4427 BUG_ON(ret != -EEXIST); 4428 spin_lock(&fs_info->balance_lock); 4429 update_balance_args(bctl); 4430 spin_unlock(&fs_info->balance_lock); 4431 } 4432 4433 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4434 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4435 describe_balance_start_or_resume(fs_info); 4436 mutex_unlock(&fs_info->balance_mutex); 4437 4438 ret = __btrfs_balance(fs_info); 4439 4440 mutex_lock(&fs_info->balance_mutex); 4441 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4442 btrfs_info(fs_info, "balance: paused"); 4443 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4444 paused = true; 4445 } 4446 /* 4447 * Balance can be canceled by: 4448 * 4449 * - Regular cancel request 4450 * Then ret == -ECANCELED and balance_cancel_req > 0 4451 * 4452 * - Fatal signal to "btrfs" process 4453 * Either the signal caught by wait_reserve_ticket() and callers 4454 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4455 * got -ECANCELED. 4456 * Either way, in this case balance_cancel_req = 0, and 4457 * ret == -EINTR or ret == -ECANCELED. 4458 * 4459 * So here we only check the return value to catch canceled balance. 4460 */ 4461 else if (ret == -ECANCELED || ret == -EINTR) 4462 btrfs_info(fs_info, "balance: canceled"); 4463 else 4464 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4465 4466 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4467 4468 if (bargs) { 4469 memset(bargs, 0, sizeof(*bargs)); 4470 btrfs_update_ioctl_balance_args(fs_info, bargs); 4471 } 4472 4473 /* We didn't pause, we can clean everything up. */ 4474 if (!paused) { 4475 reset_balance_state(fs_info); 4476 btrfs_exclop_finish(fs_info); 4477 } 4478 4479 wake_up(&fs_info->balance_wait_q); 4480 4481 return ret; 4482 out: 4483 if (bctl->flags & BTRFS_BALANCE_RESUME) 4484 reset_balance_state(fs_info); 4485 else 4486 kfree(bctl); 4487 btrfs_exclop_finish(fs_info); 4488 4489 return ret; 4490 } 4491 4492 static int balance_kthread(void *data) 4493 { 4494 struct btrfs_fs_info *fs_info = data; 4495 int ret = 0; 4496 4497 sb_start_write(fs_info->sb); 4498 mutex_lock(&fs_info->balance_mutex); 4499 if (fs_info->balance_ctl) 4500 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4501 mutex_unlock(&fs_info->balance_mutex); 4502 sb_end_write(fs_info->sb); 4503 4504 return ret; 4505 } 4506 4507 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4508 { 4509 struct task_struct *tsk; 4510 4511 mutex_lock(&fs_info->balance_mutex); 4512 if (!fs_info->balance_ctl) { 4513 mutex_unlock(&fs_info->balance_mutex); 4514 return 0; 4515 } 4516 mutex_unlock(&fs_info->balance_mutex); 4517 4518 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4519 btrfs_info(fs_info, "balance: resume skipped"); 4520 return 0; 4521 } 4522 4523 spin_lock(&fs_info->super_lock); 4524 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4525 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4526 spin_unlock(&fs_info->super_lock); 4527 /* 4528 * A ro->rw remount sequence should continue with the paused balance 4529 * regardless of who pauses it, system or the user as of now, so set 4530 * the resume flag. 4531 */ 4532 spin_lock(&fs_info->balance_lock); 4533 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4534 spin_unlock(&fs_info->balance_lock); 4535 4536 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4537 return PTR_ERR_OR_ZERO(tsk); 4538 } 4539 4540 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4541 { 4542 struct btrfs_balance_control *bctl; 4543 struct btrfs_balance_item *item; 4544 struct btrfs_disk_balance_args disk_bargs; 4545 struct btrfs_path *path; 4546 struct extent_buffer *leaf; 4547 struct btrfs_key key; 4548 int ret; 4549 4550 path = btrfs_alloc_path(); 4551 if (!path) 4552 return -ENOMEM; 4553 4554 key.objectid = BTRFS_BALANCE_OBJECTID; 4555 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4556 key.offset = 0; 4557 4558 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4559 if (ret < 0) 4560 goto out; 4561 if (ret > 0) { /* ret = -ENOENT; */ 4562 ret = 0; 4563 goto out; 4564 } 4565 4566 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4567 if (!bctl) { 4568 ret = -ENOMEM; 4569 goto out; 4570 } 4571 4572 leaf = path->nodes[0]; 4573 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4574 4575 bctl->flags = btrfs_balance_flags(leaf, item); 4576 bctl->flags |= BTRFS_BALANCE_RESUME; 4577 4578 btrfs_balance_data(leaf, item, &disk_bargs); 4579 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4580 btrfs_balance_meta(leaf, item, &disk_bargs); 4581 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4582 btrfs_balance_sys(leaf, item, &disk_bargs); 4583 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4584 4585 /* 4586 * This should never happen, as the paused balance state is recovered 4587 * during mount without any chance of other exclusive ops to collide. 4588 * 4589 * This gives the exclusive op status to balance and keeps in paused 4590 * state until user intervention (cancel or umount). If the ownership 4591 * cannot be assigned, show a message but do not fail. The balance 4592 * is in a paused state and must have fs_info::balance_ctl properly 4593 * set up. 4594 */ 4595 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4596 btrfs_warn(fs_info, 4597 "balance: cannot set exclusive op status, resume manually"); 4598 4599 btrfs_release_path(path); 4600 4601 mutex_lock(&fs_info->balance_mutex); 4602 BUG_ON(fs_info->balance_ctl); 4603 spin_lock(&fs_info->balance_lock); 4604 fs_info->balance_ctl = bctl; 4605 spin_unlock(&fs_info->balance_lock); 4606 mutex_unlock(&fs_info->balance_mutex); 4607 out: 4608 btrfs_free_path(path); 4609 return ret; 4610 } 4611 4612 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4613 { 4614 int ret = 0; 4615 4616 mutex_lock(&fs_info->balance_mutex); 4617 if (!fs_info->balance_ctl) { 4618 mutex_unlock(&fs_info->balance_mutex); 4619 return -ENOTCONN; 4620 } 4621 4622 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4623 atomic_inc(&fs_info->balance_pause_req); 4624 mutex_unlock(&fs_info->balance_mutex); 4625 4626 wait_event(fs_info->balance_wait_q, 4627 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4628 4629 mutex_lock(&fs_info->balance_mutex); 4630 /* we are good with balance_ctl ripped off from under us */ 4631 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4632 atomic_dec(&fs_info->balance_pause_req); 4633 } else { 4634 ret = -ENOTCONN; 4635 } 4636 4637 mutex_unlock(&fs_info->balance_mutex); 4638 return ret; 4639 } 4640 4641 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4642 { 4643 mutex_lock(&fs_info->balance_mutex); 4644 if (!fs_info->balance_ctl) { 4645 mutex_unlock(&fs_info->balance_mutex); 4646 return -ENOTCONN; 4647 } 4648 4649 /* 4650 * A paused balance with the item stored on disk can be resumed at 4651 * mount time if the mount is read-write. Otherwise it's still paused 4652 * and we must not allow cancelling as it deletes the item. 4653 */ 4654 if (sb_rdonly(fs_info->sb)) { 4655 mutex_unlock(&fs_info->balance_mutex); 4656 return -EROFS; 4657 } 4658 4659 atomic_inc(&fs_info->balance_cancel_req); 4660 /* 4661 * if we are running just wait and return, balance item is 4662 * deleted in btrfs_balance in this case 4663 */ 4664 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4665 mutex_unlock(&fs_info->balance_mutex); 4666 wait_event(fs_info->balance_wait_q, 4667 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4668 mutex_lock(&fs_info->balance_mutex); 4669 } else { 4670 mutex_unlock(&fs_info->balance_mutex); 4671 /* 4672 * Lock released to allow other waiters to continue, we'll 4673 * reexamine the status again. 4674 */ 4675 mutex_lock(&fs_info->balance_mutex); 4676 4677 if (fs_info->balance_ctl) { 4678 reset_balance_state(fs_info); 4679 btrfs_exclop_finish(fs_info); 4680 btrfs_info(fs_info, "balance: canceled"); 4681 } 4682 } 4683 4684 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4685 atomic_dec(&fs_info->balance_cancel_req); 4686 mutex_unlock(&fs_info->balance_mutex); 4687 return 0; 4688 } 4689 4690 int btrfs_uuid_scan_kthread(void *data) 4691 { 4692 struct btrfs_fs_info *fs_info = data; 4693 struct btrfs_root *root = fs_info->tree_root; 4694 struct btrfs_key key; 4695 struct btrfs_path *path = NULL; 4696 int ret = 0; 4697 struct extent_buffer *eb; 4698 int slot; 4699 struct btrfs_root_item root_item; 4700 u32 item_size; 4701 struct btrfs_trans_handle *trans = NULL; 4702 bool closing = false; 4703 4704 path = btrfs_alloc_path(); 4705 if (!path) { 4706 ret = -ENOMEM; 4707 goto out; 4708 } 4709 4710 key.objectid = 0; 4711 key.type = BTRFS_ROOT_ITEM_KEY; 4712 key.offset = 0; 4713 4714 while (1) { 4715 if (btrfs_fs_closing(fs_info)) { 4716 closing = true; 4717 break; 4718 } 4719 ret = btrfs_search_forward(root, &key, path, 4720 BTRFS_OLDEST_GENERATION); 4721 if (ret) { 4722 if (ret > 0) 4723 ret = 0; 4724 break; 4725 } 4726 4727 if (key.type != BTRFS_ROOT_ITEM_KEY || 4728 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4729 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4730 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4731 goto skip; 4732 4733 eb = path->nodes[0]; 4734 slot = path->slots[0]; 4735 item_size = btrfs_item_size(eb, slot); 4736 if (item_size < sizeof(root_item)) 4737 goto skip; 4738 4739 read_extent_buffer(eb, &root_item, 4740 btrfs_item_ptr_offset(eb, slot), 4741 (int)sizeof(root_item)); 4742 if (btrfs_root_refs(&root_item) == 0) 4743 goto skip; 4744 4745 if (!btrfs_is_empty_uuid(root_item.uuid) || 4746 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4747 if (trans) 4748 goto update_tree; 4749 4750 btrfs_release_path(path); 4751 /* 4752 * 1 - subvol uuid item 4753 * 1 - received_subvol uuid item 4754 */ 4755 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4756 if (IS_ERR(trans)) { 4757 ret = PTR_ERR(trans); 4758 break; 4759 } 4760 continue; 4761 } else { 4762 goto skip; 4763 } 4764 update_tree: 4765 btrfs_release_path(path); 4766 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4767 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4768 BTRFS_UUID_KEY_SUBVOL, 4769 key.objectid); 4770 if (ret < 0) { 4771 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4772 ret); 4773 break; 4774 } 4775 } 4776 4777 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4778 ret = btrfs_uuid_tree_add(trans, 4779 root_item.received_uuid, 4780 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4781 key.objectid); 4782 if (ret < 0) { 4783 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4784 ret); 4785 break; 4786 } 4787 } 4788 4789 skip: 4790 btrfs_release_path(path); 4791 if (trans) { 4792 ret = btrfs_end_transaction(trans); 4793 trans = NULL; 4794 if (ret) 4795 break; 4796 } 4797 4798 if (key.offset < (u64)-1) { 4799 key.offset++; 4800 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4801 key.offset = 0; 4802 key.type = BTRFS_ROOT_ITEM_KEY; 4803 } else if (key.objectid < (u64)-1) { 4804 key.offset = 0; 4805 key.type = BTRFS_ROOT_ITEM_KEY; 4806 key.objectid++; 4807 } else { 4808 break; 4809 } 4810 cond_resched(); 4811 } 4812 4813 out: 4814 btrfs_free_path(path); 4815 if (trans && !IS_ERR(trans)) 4816 btrfs_end_transaction(trans); 4817 if (ret) 4818 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4819 else if (!closing) 4820 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4821 up(&fs_info->uuid_tree_rescan_sem); 4822 return 0; 4823 } 4824 4825 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4826 { 4827 struct btrfs_trans_handle *trans; 4828 struct btrfs_root *tree_root = fs_info->tree_root; 4829 struct btrfs_root *uuid_root; 4830 struct task_struct *task; 4831 int ret; 4832 4833 /* 4834 * 1 - root node 4835 * 1 - root item 4836 */ 4837 trans = btrfs_start_transaction(tree_root, 2); 4838 if (IS_ERR(trans)) 4839 return PTR_ERR(trans); 4840 4841 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4842 if (IS_ERR(uuid_root)) { 4843 ret = PTR_ERR(uuid_root); 4844 btrfs_abort_transaction(trans, ret); 4845 btrfs_end_transaction(trans); 4846 return ret; 4847 } 4848 4849 fs_info->uuid_root = uuid_root; 4850 4851 ret = btrfs_commit_transaction(trans); 4852 if (ret) 4853 return ret; 4854 4855 down(&fs_info->uuid_tree_rescan_sem); 4856 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4857 if (IS_ERR(task)) { 4858 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4859 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4860 up(&fs_info->uuid_tree_rescan_sem); 4861 return PTR_ERR(task); 4862 } 4863 4864 return 0; 4865 } 4866 4867 /* 4868 * shrinking a device means finding all of the device extents past 4869 * the new size, and then following the back refs to the chunks. 4870 * The chunk relocation code actually frees the device extent 4871 */ 4872 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4873 { 4874 struct btrfs_fs_info *fs_info = device->fs_info; 4875 struct btrfs_root *root = fs_info->dev_root; 4876 struct btrfs_trans_handle *trans; 4877 struct btrfs_dev_extent *dev_extent = NULL; 4878 struct btrfs_path *path; 4879 u64 length; 4880 u64 chunk_offset; 4881 int ret; 4882 int slot; 4883 int failed = 0; 4884 bool retried = false; 4885 struct extent_buffer *l; 4886 struct btrfs_key key; 4887 struct btrfs_super_block *super_copy = fs_info->super_copy; 4888 u64 old_total = btrfs_super_total_bytes(super_copy); 4889 u64 old_size = btrfs_device_get_total_bytes(device); 4890 u64 diff; 4891 u64 start; 4892 4893 new_size = round_down(new_size, fs_info->sectorsize); 4894 start = new_size; 4895 diff = round_down(old_size - new_size, fs_info->sectorsize); 4896 4897 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4898 return -EINVAL; 4899 4900 path = btrfs_alloc_path(); 4901 if (!path) 4902 return -ENOMEM; 4903 4904 path->reada = READA_BACK; 4905 4906 trans = btrfs_start_transaction(root, 0); 4907 if (IS_ERR(trans)) { 4908 btrfs_free_path(path); 4909 return PTR_ERR(trans); 4910 } 4911 4912 mutex_lock(&fs_info->chunk_mutex); 4913 4914 btrfs_device_set_total_bytes(device, new_size); 4915 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4916 device->fs_devices->total_rw_bytes -= diff; 4917 atomic64_sub(diff, &fs_info->free_chunk_space); 4918 } 4919 4920 /* 4921 * Once the device's size has been set to the new size, ensure all 4922 * in-memory chunks are synced to disk so that the loop below sees them 4923 * and relocates them accordingly. 4924 */ 4925 if (contains_pending_extent(device, &start, diff)) { 4926 mutex_unlock(&fs_info->chunk_mutex); 4927 ret = btrfs_commit_transaction(trans); 4928 if (ret) 4929 goto done; 4930 } else { 4931 mutex_unlock(&fs_info->chunk_mutex); 4932 btrfs_end_transaction(trans); 4933 } 4934 4935 again: 4936 key.objectid = device->devid; 4937 key.offset = (u64)-1; 4938 key.type = BTRFS_DEV_EXTENT_KEY; 4939 4940 do { 4941 mutex_lock(&fs_info->reclaim_bgs_lock); 4942 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4943 if (ret < 0) { 4944 mutex_unlock(&fs_info->reclaim_bgs_lock); 4945 goto done; 4946 } 4947 4948 ret = btrfs_previous_item(root, path, 0, key.type); 4949 if (ret) { 4950 mutex_unlock(&fs_info->reclaim_bgs_lock); 4951 if (ret < 0) 4952 goto done; 4953 ret = 0; 4954 btrfs_release_path(path); 4955 break; 4956 } 4957 4958 l = path->nodes[0]; 4959 slot = path->slots[0]; 4960 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4961 4962 if (key.objectid != device->devid) { 4963 mutex_unlock(&fs_info->reclaim_bgs_lock); 4964 btrfs_release_path(path); 4965 break; 4966 } 4967 4968 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4969 length = btrfs_dev_extent_length(l, dev_extent); 4970 4971 if (key.offset + length <= new_size) { 4972 mutex_unlock(&fs_info->reclaim_bgs_lock); 4973 btrfs_release_path(path); 4974 break; 4975 } 4976 4977 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4978 btrfs_release_path(path); 4979 4980 /* 4981 * We may be relocating the only data chunk we have, 4982 * which could potentially end up with losing data's 4983 * raid profile, so lets allocate an empty one in 4984 * advance. 4985 */ 4986 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4987 if (ret < 0) { 4988 mutex_unlock(&fs_info->reclaim_bgs_lock); 4989 goto done; 4990 } 4991 4992 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4993 mutex_unlock(&fs_info->reclaim_bgs_lock); 4994 if (ret == -ENOSPC) { 4995 failed++; 4996 } else if (ret) { 4997 if (ret == -ETXTBSY) { 4998 btrfs_warn(fs_info, 4999 "could not shrink block group %llu due to active swapfile", 5000 chunk_offset); 5001 } 5002 goto done; 5003 } 5004 } while (key.offset-- > 0); 5005 5006 if (failed && !retried) { 5007 failed = 0; 5008 retried = true; 5009 goto again; 5010 } else if (failed && retried) { 5011 ret = -ENOSPC; 5012 goto done; 5013 } 5014 5015 /* Shrinking succeeded, else we would be at "done". */ 5016 trans = btrfs_start_transaction(root, 0); 5017 if (IS_ERR(trans)) { 5018 ret = PTR_ERR(trans); 5019 goto done; 5020 } 5021 5022 mutex_lock(&fs_info->chunk_mutex); 5023 /* Clear all state bits beyond the shrunk device size */ 5024 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 5025 CHUNK_STATE_MASK); 5026 5027 btrfs_device_set_disk_total_bytes(device, new_size); 5028 if (list_empty(&device->post_commit_list)) 5029 list_add_tail(&device->post_commit_list, 5030 &trans->transaction->dev_update_list); 5031 5032 WARN_ON(diff > old_total); 5033 btrfs_set_super_total_bytes(super_copy, 5034 round_down(old_total - diff, fs_info->sectorsize)); 5035 mutex_unlock(&fs_info->chunk_mutex); 5036 5037 btrfs_reserve_chunk_metadata(trans, false); 5038 /* Now btrfs_update_device() will change the on-disk size. */ 5039 ret = btrfs_update_device(trans, device); 5040 btrfs_trans_release_chunk_metadata(trans); 5041 if (ret < 0) { 5042 btrfs_abort_transaction(trans, ret); 5043 btrfs_end_transaction(trans); 5044 } else { 5045 ret = btrfs_commit_transaction(trans); 5046 } 5047 done: 5048 btrfs_free_path(path); 5049 if (ret) { 5050 mutex_lock(&fs_info->chunk_mutex); 5051 btrfs_device_set_total_bytes(device, old_size); 5052 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 5053 device->fs_devices->total_rw_bytes += diff; 5054 atomic64_add(diff, &fs_info->free_chunk_space); 5055 mutex_unlock(&fs_info->chunk_mutex); 5056 } 5057 return ret; 5058 } 5059 5060 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5061 struct btrfs_key *key, 5062 struct btrfs_chunk *chunk, int item_size) 5063 { 5064 struct btrfs_super_block *super_copy = fs_info->super_copy; 5065 struct btrfs_disk_key disk_key; 5066 u32 array_size; 5067 u8 *ptr; 5068 5069 lockdep_assert_held(&fs_info->chunk_mutex); 5070 5071 array_size = btrfs_super_sys_array_size(super_copy); 5072 if (array_size + item_size + sizeof(disk_key) 5073 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5074 return -EFBIG; 5075 5076 ptr = super_copy->sys_chunk_array + array_size; 5077 btrfs_cpu_key_to_disk(&disk_key, key); 5078 memcpy(ptr, &disk_key, sizeof(disk_key)); 5079 ptr += sizeof(disk_key); 5080 memcpy(ptr, chunk, item_size); 5081 item_size += sizeof(disk_key); 5082 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5083 5084 return 0; 5085 } 5086 5087 /* 5088 * sort the devices in descending order by max_avail, total_avail 5089 */ 5090 static int btrfs_cmp_device_info(const void *a, const void *b) 5091 { 5092 const struct btrfs_device_info *di_a = a; 5093 const struct btrfs_device_info *di_b = b; 5094 5095 if (di_a->max_avail > di_b->max_avail) 5096 return -1; 5097 if (di_a->max_avail < di_b->max_avail) 5098 return 1; 5099 if (di_a->total_avail > di_b->total_avail) 5100 return -1; 5101 if (di_a->total_avail < di_b->total_avail) 5102 return 1; 5103 return 0; 5104 } 5105 5106 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5107 { 5108 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5109 return; 5110 5111 btrfs_set_fs_incompat(info, RAID56); 5112 } 5113 5114 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5115 { 5116 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5117 return; 5118 5119 btrfs_set_fs_incompat(info, RAID1C34); 5120 } 5121 5122 /* 5123 * Structure used internally for btrfs_create_chunk() function. 5124 * Wraps needed parameters. 5125 */ 5126 struct alloc_chunk_ctl { 5127 u64 start; 5128 u64 type; 5129 /* Total number of stripes to allocate */ 5130 int num_stripes; 5131 /* sub_stripes info for map */ 5132 int sub_stripes; 5133 /* Stripes per device */ 5134 int dev_stripes; 5135 /* Maximum number of devices to use */ 5136 int devs_max; 5137 /* Minimum number of devices to use */ 5138 int devs_min; 5139 /* ndevs has to be a multiple of this */ 5140 int devs_increment; 5141 /* Number of copies */ 5142 int ncopies; 5143 /* Number of stripes worth of bytes to store parity information */ 5144 int nparity; 5145 u64 max_stripe_size; 5146 u64 max_chunk_size; 5147 u64 dev_extent_min; 5148 u64 stripe_size; 5149 u64 chunk_size; 5150 int ndevs; 5151 }; 5152 5153 static void init_alloc_chunk_ctl_policy_regular( 5154 struct btrfs_fs_devices *fs_devices, 5155 struct alloc_chunk_ctl *ctl) 5156 { 5157 struct btrfs_space_info *space_info; 5158 5159 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5160 ASSERT(space_info); 5161 5162 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5163 ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G); 5164 5165 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5166 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5167 5168 /* We don't want a chunk larger than 10% of writable space */ 5169 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5170 ctl->max_chunk_size); 5171 ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes); 5172 } 5173 5174 static void init_alloc_chunk_ctl_policy_zoned( 5175 struct btrfs_fs_devices *fs_devices, 5176 struct alloc_chunk_ctl *ctl) 5177 { 5178 u64 zone_size = fs_devices->fs_info->zone_size; 5179 u64 limit; 5180 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5181 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5182 u64 min_chunk_size = min_data_stripes * zone_size; 5183 u64 type = ctl->type; 5184 5185 ctl->max_stripe_size = zone_size; 5186 if (type & BTRFS_BLOCK_GROUP_DATA) { 5187 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5188 zone_size); 5189 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5190 ctl->max_chunk_size = ctl->max_stripe_size; 5191 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5192 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5193 ctl->devs_max = min_t(int, ctl->devs_max, 5194 BTRFS_MAX_DEVS_SYS_CHUNK); 5195 } else { 5196 BUG(); 5197 } 5198 5199 /* We don't want a chunk larger than 10% of writable space */ 5200 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10), 5201 zone_size), 5202 min_chunk_size); 5203 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5204 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5205 } 5206 5207 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5208 struct alloc_chunk_ctl *ctl) 5209 { 5210 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5211 5212 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5213 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5214 ctl->devs_max = btrfs_raid_array[index].devs_max; 5215 if (!ctl->devs_max) 5216 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5217 ctl->devs_min = btrfs_raid_array[index].devs_min; 5218 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5219 ctl->ncopies = btrfs_raid_array[index].ncopies; 5220 ctl->nparity = btrfs_raid_array[index].nparity; 5221 ctl->ndevs = 0; 5222 5223 switch (fs_devices->chunk_alloc_policy) { 5224 case BTRFS_CHUNK_ALLOC_REGULAR: 5225 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5226 break; 5227 case BTRFS_CHUNK_ALLOC_ZONED: 5228 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5229 break; 5230 default: 5231 BUG(); 5232 } 5233 } 5234 5235 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5236 struct alloc_chunk_ctl *ctl, 5237 struct btrfs_device_info *devices_info) 5238 { 5239 struct btrfs_fs_info *info = fs_devices->fs_info; 5240 struct btrfs_device *device; 5241 u64 total_avail; 5242 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5243 int ret; 5244 int ndevs = 0; 5245 u64 max_avail; 5246 u64 dev_offset; 5247 5248 /* 5249 * in the first pass through the devices list, we gather information 5250 * about the available holes on each device. 5251 */ 5252 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5253 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5254 WARN(1, KERN_ERR 5255 "BTRFS: read-only device in alloc_list\n"); 5256 continue; 5257 } 5258 5259 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5260 &device->dev_state) || 5261 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5262 continue; 5263 5264 if (device->total_bytes > device->bytes_used) 5265 total_avail = device->total_bytes - device->bytes_used; 5266 else 5267 total_avail = 0; 5268 5269 /* If there is no space on this device, skip it. */ 5270 if (total_avail < ctl->dev_extent_min) 5271 continue; 5272 5273 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5274 &max_avail); 5275 if (ret && ret != -ENOSPC) 5276 return ret; 5277 5278 if (ret == 0) 5279 max_avail = dev_extent_want; 5280 5281 if (max_avail < ctl->dev_extent_min) { 5282 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5283 btrfs_debug(info, 5284 "%s: devid %llu has no free space, have=%llu want=%llu", 5285 __func__, device->devid, max_avail, 5286 ctl->dev_extent_min); 5287 continue; 5288 } 5289 5290 if (ndevs == fs_devices->rw_devices) { 5291 WARN(1, "%s: found more than %llu devices\n", 5292 __func__, fs_devices->rw_devices); 5293 break; 5294 } 5295 devices_info[ndevs].dev_offset = dev_offset; 5296 devices_info[ndevs].max_avail = max_avail; 5297 devices_info[ndevs].total_avail = total_avail; 5298 devices_info[ndevs].dev = device; 5299 ++ndevs; 5300 } 5301 ctl->ndevs = ndevs; 5302 5303 /* 5304 * now sort the devices by hole size / available space 5305 */ 5306 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5307 btrfs_cmp_device_info, NULL); 5308 5309 return 0; 5310 } 5311 5312 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5313 struct btrfs_device_info *devices_info) 5314 { 5315 /* Number of stripes that count for block group size */ 5316 int data_stripes; 5317 5318 /* 5319 * The primary goal is to maximize the number of stripes, so use as 5320 * many devices as possible, even if the stripes are not maximum sized. 5321 * 5322 * The DUP profile stores more than one stripe per device, the 5323 * max_avail is the total size so we have to adjust. 5324 */ 5325 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5326 ctl->dev_stripes); 5327 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5328 5329 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5330 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5331 5332 /* 5333 * Use the number of data stripes to figure out how big this chunk is 5334 * really going to be in terms of logical address space, and compare 5335 * that answer with the max chunk size. If it's higher, we try to 5336 * reduce stripe_size. 5337 */ 5338 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5339 /* 5340 * Reduce stripe_size, round it up to a 16MB boundary again and 5341 * then use it, unless it ends up being even bigger than the 5342 * previous value we had already. 5343 */ 5344 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5345 data_stripes), SZ_16M), 5346 ctl->stripe_size); 5347 } 5348 5349 /* Stripe size should not go beyond 1G. */ 5350 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5351 5352 /* Align to BTRFS_STRIPE_LEN */ 5353 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5354 ctl->chunk_size = ctl->stripe_size * data_stripes; 5355 5356 return 0; 5357 } 5358 5359 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5360 struct btrfs_device_info *devices_info) 5361 { 5362 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5363 /* Number of stripes that count for block group size */ 5364 int data_stripes; 5365 5366 /* 5367 * It should hold because: 5368 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5369 */ 5370 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5371 5372 ctl->stripe_size = zone_size; 5373 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5374 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5375 5376 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5377 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5378 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5379 ctl->stripe_size) + ctl->nparity, 5380 ctl->dev_stripes); 5381 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5382 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5383 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5384 } 5385 5386 ctl->chunk_size = ctl->stripe_size * data_stripes; 5387 5388 return 0; 5389 } 5390 5391 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5392 struct alloc_chunk_ctl *ctl, 5393 struct btrfs_device_info *devices_info) 5394 { 5395 struct btrfs_fs_info *info = fs_devices->fs_info; 5396 5397 /* 5398 * Round down to number of usable stripes, devs_increment can be any 5399 * number so we can't use round_down() that requires power of 2, while 5400 * rounddown is safe. 5401 */ 5402 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5403 5404 if (ctl->ndevs < ctl->devs_min) { 5405 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5406 btrfs_debug(info, 5407 "%s: not enough devices with free space: have=%d minimum required=%d", 5408 __func__, ctl->ndevs, ctl->devs_min); 5409 } 5410 return -ENOSPC; 5411 } 5412 5413 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5414 5415 switch (fs_devices->chunk_alloc_policy) { 5416 case BTRFS_CHUNK_ALLOC_REGULAR: 5417 return decide_stripe_size_regular(ctl, devices_info); 5418 case BTRFS_CHUNK_ALLOC_ZONED: 5419 return decide_stripe_size_zoned(ctl, devices_info); 5420 default: 5421 BUG(); 5422 } 5423 } 5424 5425 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5426 struct alloc_chunk_ctl *ctl, 5427 struct btrfs_device_info *devices_info) 5428 { 5429 struct btrfs_fs_info *info = trans->fs_info; 5430 struct map_lookup *map = NULL; 5431 struct extent_map_tree *em_tree; 5432 struct btrfs_block_group *block_group; 5433 struct extent_map *em; 5434 u64 start = ctl->start; 5435 u64 type = ctl->type; 5436 int ret; 5437 int i; 5438 int j; 5439 5440 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5441 if (!map) 5442 return ERR_PTR(-ENOMEM); 5443 map->num_stripes = ctl->num_stripes; 5444 5445 for (i = 0; i < ctl->ndevs; ++i) { 5446 for (j = 0; j < ctl->dev_stripes; ++j) { 5447 int s = i * ctl->dev_stripes + j; 5448 map->stripes[s].dev = devices_info[i].dev; 5449 map->stripes[s].physical = devices_info[i].dev_offset + 5450 j * ctl->stripe_size; 5451 } 5452 } 5453 map->io_align = BTRFS_STRIPE_LEN; 5454 map->io_width = BTRFS_STRIPE_LEN; 5455 map->type = type; 5456 map->sub_stripes = ctl->sub_stripes; 5457 5458 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5459 5460 em = alloc_extent_map(); 5461 if (!em) { 5462 kfree(map); 5463 return ERR_PTR(-ENOMEM); 5464 } 5465 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5466 em->map_lookup = map; 5467 em->start = start; 5468 em->len = ctl->chunk_size; 5469 em->block_start = 0; 5470 em->block_len = em->len; 5471 em->orig_block_len = ctl->stripe_size; 5472 5473 em_tree = &info->mapping_tree; 5474 write_lock(&em_tree->lock); 5475 ret = add_extent_mapping(em_tree, em, 0); 5476 if (ret) { 5477 write_unlock(&em_tree->lock); 5478 free_extent_map(em); 5479 return ERR_PTR(ret); 5480 } 5481 write_unlock(&em_tree->lock); 5482 5483 block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); 5484 if (IS_ERR(block_group)) 5485 goto error_del_extent; 5486 5487 for (i = 0; i < map->num_stripes; i++) { 5488 struct btrfs_device *dev = map->stripes[i].dev; 5489 5490 btrfs_device_set_bytes_used(dev, 5491 dev->bytes_used + ctl->stripe_size); 5492 if (list_empty(&dev->post_commit_list)) 5493 list_add_tail(&dev->post_commit_list, 5494 &trans->transaction->dev_update_list); 5495 } 5496 5497 atomic64_sub(ctl->stripe_size * map->num_stripes, 5498 &info->free_chunk_space); 5499 5500 free_extent_map(em); 5501 check_raid56_incompat_flag(info, type); 5502 check_raid1c34_incompat_flag(info, type); 5503 5504 return block_group; 5505 5506 error_del_extent: 5507 write_lock(&em_tree->lock); 5508 remove_extent_mapping(em_tree, em); 5509 write_unlock(&em_tree->lock); 5510 5511 /* One for our allocation */ 5512 free_extent_map(em); 5513 /* One for the tree reference */ 5514 free_extent_map(em); 5515 5516 return block_group; 5517 } 5518 5519 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5520 u64 type) 5521 { 5522 struct btrfs_fs_info *info = trans->fs_info; 5523 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5524 struct btrfs_device_info *devices_info = NULL; 5525 struct alloc_chunk_ctl ctl; 5526 struct btrfs_block_group *block_group; 5527 int ret; 5528 5529 lockdep_assert_held(&info->chunk_mutex); 5530 5531 if (!alloc_profile_is_valid(type, 0)) { 5532 ASSERT(0); 5533 return ERR_PTR(-EINVAL); 5534 } 5535 5536 if (list_empty(&fs_devices->alloc_list)) { 5537 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5538 btrfs_debug(info, "%s: no writable device", __func__); 5539 return ERR_PTR(-ENOSPC); 5540 } 5541 5542 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5543 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5544 ASSERT(0); 5545 return ERR_PTR(-EINVAL); 5546 } 5547 5548 ctl.start = find_next_chunk(info); 5549 ctl.type = type; 5550 init_alloc_chunk_ctl(fs_devices, &ctl); 5551 5552 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5553 GFP_NOFS); 5554 if (!devices_info) 5555 return ERR_PTR(-ENOMEM); 5556 5557 ret = gather_device_info(fs_devices, &ctl, devices_info); 5558 if (ret < 0) { 5559 block_group = ERR_PTR(ret); 5560 goto out; 5561 } 5562 5563 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5564 if (ret < 0) { 5565 block_group = ERR_PTR(ret); 5566 goto out; 5567 } 5568 5569 block_group = create_chunk(trans, &ctl, devices_info); 5570 5571 out: 5572 kfree(devices_info); 5573 return block_group; 5574 } 5575 5576 /* 5577 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5578 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5579 * chunks. 5580 * 5581 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5582 * phases. 5583 */ 5584 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5585 struct btrfs_block_group *bg) 5586 { 5587 struct btrfs_fs_info *fs_info = trans->fs_info; 5588 struct btrfs_root *chunk_root = fs_info->chunk_root; 5589 struct btrfs_key key; 5590 struct btrfs_chunk *chunk; 5591 struct btrfs_stripe *stripe; 5592 struct extent_map *em; 5593 struct map_lookup *map; 5594 size_t item_size; 5595 int i; 5596 int ret; 5597 5598 /* 5599 * We take the chunk_mutex for 2 reasons: 5600 * 5601 * 1) Updates and insertions in the chunk btree must be done while holding 5602 * the chunk_mutex, as well as updating the system chunk array in the 5603 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5604 * details; 5605 * 5606 * 2) To prevent races with the final phase of a device replace operation 5607 * that replaces the device object associated with the map's stripes, 5608 * because the device object's id can change at any time during that 5609 * final phase of the device replace operation 5610 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5611 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5612 * which would cause a failure when updating the device item, which does 5613 * not exists, or persisting a stripe of the chunk item with such ID. 5614 * Here we can't use the device_list_mutex because our caller already 5615 * has locked the chunk_mutex, and the final phase of device replace 5616 * acquires both mutexes - first the device_list_mutex and then the 5617 * chunk_mutex. Using any of those two mutexes protects us from a 5618 * concurrent device replace. 5619 */ 5620 lockdep_assert_held(&fs_info->chunk_mutex); 5621 5622 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5623 if (IS_ERR(em)) { 5624 ret = PTR_ERR(em); 5625 btrfs_abort_transaction(trans, ret); 5626 return ret; 5627 } 5628 5629 map = em->map_lookup; 5630 item_size = btrfs_chunk_item_size(map->num_stripes); 5631 5632 chunk = kzalloc(item_size, GFP_NOFS); 5633 if (!chunk) { 5634 ret = -ENOMEM; 5635 btrfs_abort_transaction(trans, ret); 5636 goto out; 5637 } 5638 5639 for (i = 0; i < map->num_stripes; i++) { 5640 struct btrfs_device *device = map->stripes[i].dev; 5641 5642 ret = btrfs_update_device(trans, device); 5643 if (ret) 5644 goto out; 5645 } 5646 5647 stripe = &chunk->stripe; 5648 for (i = 0; i < map->num_stripes; i++) { 5649 struct btrfs_device *device = map->stripes[i].dev; 5650 const u64 dev_offset = map->stripes[i].physical; 5651 5652 btrfs_set_stack_stripe_devid(stripe, device->devid); 5653 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5654 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5655 stripe++; 5656 } 5657 5658 btrfs_set_stack_chunk_length(chunk, bg->length); 5659 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5660 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN); 5661 btrfs_set_stack_chunk_type(chunk, map->type); 5662 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5663 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN); 5664 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN); 5665 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5666 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5667 5668 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5669 key.type = BTRFS_CHUNK_ITEM_KEY; 5670 key.offset = bg->start; 5671 5672 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5673 if (ret) 5674 goto out; 5675 5676 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5677 5678 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5679 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5680 if (ret) 5681 goto out; 5682 } 5683 5684 out: 5685 kfree(chunk); 5686 free_extent_map(em); 5687 return ret; 5688 } 5689 5690 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5691 { 5692 struct btrfs_fs_info *fs_info = trans->fs_info; 5693 u64 alloc_profile; 5694 struct btrfs_block_group *meta_bg; 5695 struct btrfs_block_group *sys_bg; 5696 5697 /* 5698 * When adding a new device for sprouting, the seed device is read-only 5699 * so we must first allocate a metadata and a system chunk. But before 5700 * adding the block group items to the extent, device and chunk btrees, 5701 * we must first: 5702 * 5703 * 1) Create both chunks without doing any changes to the btrees, as 5704 * otherwise we would get -ENOSPC since the block groups from the 5705 * seed device are read-only; 5706 * 5707 * 2) Add the device item for the new sprout device - finishing the setup 5708 * of a new block group requires updating the device item in the chunk 5709 * btree, so it must exist when we attempt to do it. The previous step 5710 * ensures this does not fail with -ENOSPC. 5711 * 5712 * After that we can add the block group items to their btrees: 5713 * update existing device item in the chunk btree, add a new block group 5714 * item to the extent btree, add a new chunk item to the chunk btree and 5715 * finally add the new device extent items to the devices btree. 5716 */ 5717 5718 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5719 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5720 if (IS_ERR(meta_bg)) 5721 return PTR_ERR(meta_bg); 5722 5723 alloc_profile = btrfs_system_alloc_profile(fs_info); 5724 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5725 if (IS_ERR(sys_bg)) 5726 return PTR_ERR(sys_bg); 5727 5728 return 0; 5729 } 5730 5731 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5732 { 5733 const int index = btrfs_bg_flags_to_raid_index(map->type); 5734 5735 return btrfs_raid_array[index].tolerated_failures; 5736 } 5737 5738 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5739 { 5740 struct extent_map *em; 5741 struct map_lookup *map; 5742 int miss_ndevs = 0; 5743 int i; 5744 bool ret = true; 5745 5746 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5747 if (IS_ERR(em)) 5748 return false; 5749 5750 map = em->map_lookup; 5751 for (i = 0; i < map->num_stripes; i++) { 5752 if (test_bit(BTRFS_DEV_STATE_MISSING, 5753 &map->stripes[i].dev->dev_state)) { 5754 miss_ndevs++; 5755 continue; 5756 } 5757 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5758 &map->stripes[i].dev->dev_state)) { 5759 ret = false; 5760 goto end; 5761 } 5762 } 5763 5764 /* 5765 * If the number of missing devices is larger than max errors, we can 5766 * not write the data into that chunk successfully. 5767 */ 5768 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5769 ret = false; 5770 end: 5771 free_extent_map(em); 5772 return ret; 5773 } 5774 5775 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5776 { 5777 struct extent_map *em; 5778 5779 while (1) { 5780 write_lock(&tree->lock); 5781 em = lookup_extent_mapping(tree, 0, (u64)-1); 5782 if (em) 5783 remove_extent_mapping(tree, em); 5784 write_unlock(&tree->lock); 5785 if (!em) 5786 break; 5787 /* once for us */ 5788 free_extent_map(em); 5789 /* once for the tree */ 5790 free_extent_map(em); 5791 } 5792 } 5793 5794 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5795 { 5796 struct extent_map *em; 5797 struct map_lookup *map; 5798 enum btrfs_raid_types index; 5799 int ret = 1; 5800 5801 em = btrfs_get_chunk_map(fs_info, logical, len); 5802 if (IS_ERR(em)) 5803 /* 5804 * We could return errors for these cases, but that could get 5805 * ugly and we'd probably do the same thing which is just not do 5806 * anything else and exit, so return 1 so the callers don't try 5807 * to use other copies. 5808 */ 5809 return 1; 5810 5811 map = em->map_lookup; 5812 index = btrfs_bg_flags_to_raid_index(map->type); 5813 5814 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5815 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5816 ret = btrfs_raid_array[index].ncopies; 5817 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5818 ret = 2; 5819 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5820 /* 5821 * There could be two corrupted data stripes, we need 5822 * to loop retry in order to rebuild the correct data. 5823 * 5824 * Fail a stripe at a time on every retry except the 5825 * stripe under reconstruction. 5826 */ 5827 ret = map->num_stripes; 5828 free_extent_map(em); 5829 return ret; 5830 } 5831 5832 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5833 u64 logical) 5834 { 5835 struct extent_map *em; 5836 struct map_lookup *map; 5837 unsigned long len = fs_info->sectorsize; 5838 5839 if (!btrfs_fs_incompat(fs_info, RAID56)) 5840 return len; 5841 5842 em = btrfs_get_chunk_map(fs_info, logical, len); 5843 5844 if (!WARN_ON(IS_ERR(em))) { 5845 map = em->map_lookup; 5846 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5847 len = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 5848 free_extent_map(em); 5849 } 5850 return len; 5851 } 5852 5853 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5854 { 5855 struct extent_map *em; 5856 struct map_lookup *map; 5857 int ret = 0; 5858 5859 if (!btrfs_fs_incompat(fs_info, RAID56)) 5860 return 0; 5861 5862 em = btrfs_get_chunk_map(fs_info, logical, len); 5863 5864 if(!WARN_ON(IS_ERR(em))) { 5865 map = em->map_lookup; 5866 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5867 ret = 1; 5868 free_extent_map(em); 5869 } 5870 return ret; 5871 } 5872 5873 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5874 struct map_lookup *map, int first, 5875 int dev_replace_is_ongoing) 5876 { 5877 int i; 5878 int num_stripes; 5879 int preferred_mirror; 5880 int tolerance; 5881 struct btrfs_device *srcdev; 5882 5883 ASSERT((map->type & 5884 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5885 5886 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5887 num_stripes = map->sub_stripes; 5888 else 5889 num_stripes = map->num_stripes; 5890 5891 switch (fs_info->fs_devices->read_policy) { 5892 default: 5893 /* Shouldn't happen, just warn and use pid instead of failing */ 5894 btrfs_warn_rl(fs_info, 5895 "unknown read_policy type %u, reset to pid", 5896 fs_info->fs_devices->read_policy); 5897 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5898 fallthrough; 5899 case BTRFS_READ_POLICY_PID: 5900 preferred_mirror = first + (current->pid % num_stripes); 5901 break; 5902 } 5903 5904 if (dev_replace_is_ongoing && 5905 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5906 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5907 srcdev = fs_info->dev_replace.srcdev; 5908 else 5909 srcdev = NULL; 5910 5911 /* 5912 * try to avoid the drive that is the source drive for a 5913 * dev-replace procedure, only choose it if no other non-missing 5914 * mirror is available 5915 */ 5916 for (tolerance = 0; tolerance < 2; tolerance++) { 5917 if (map->stripes[preferred_mirror].dev->bdev && 5918 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5919 return preferred_mirror; 5920 for (i = first; i < first + num_stripes; i++) { 5921 if (map->stripes[i].dev->bdev && 5922 (tolerance || map->stripes[i].dev != srcdev)) 5923 return i; 5924 } 5925 } 5926 5927 /* we couldn't find one that doesn't fail. Just return something 5928 * and the io error handling code will clean up eventually 5929 */ 5930 return preferred_mirror; 5931 } 5932 5933 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5934 u16 total_stripes) 5935 { 5936 struct btrfs_io_context *bioc; 5937 5938 bioc = kzalloc( 5939 /* The size of btrfs_io_context */ 5940 sizeof(struct btrfs_io_context) + 5941 /* Plus the variable array for the stripes */ 5942 sizeof(struct btrfs_io_stripe) * (total_stripes), 5943 GFP_NOFS); 5944 5945 if (!bioc) 5946 return NULL; 5947 5948 refcount_set(&bioc->refs, 1); 5949 5950 bioc->fs_info = fs_info; 5951 bioc->replace_stripe_src = -1; 5952 bioc->full_stripe_logical = (u64)-1; 5953 5954 return bioc; 5955 } 5956 5957 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5958 { 5959 WARN_ON(!refcount_read(&bioc->refs)); 5960 refcount_inc(&bioc->refs); 5961 } 5962 5963 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5964 { 5965 if (!bioc) 5966 return; 5967 if (refcount_dec_and_test(&bioc->refs)) 5968 kfree(bioc); 5969 } 5970 5971 /* 5972 * Please note that, discard won't be sent to target device of device 5973 * replace. 5974 */ 5975 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 5976 u64 logical, u64 *length_ret, 5977 u32 *num_stripes) 5978 { 5979 struct extent_map *em; 5980 struct map_lookup *map; 5981 struct btrfs_discard_stripe *stripes; 5982 u64 length = *length_ret; 5983 u64 offset; 5984 u32 stripe_nr; 5985 u32 stripe_nr_end; 5986 u32 stripe_cnt; 5987 u64 stripe_end_offset; 5988 u64 stripe_offset; 5989 u32 stripe_index; 5990 u32 factor = 0; 5991 u32 sub_stripes = 0; 5992 u32 stripes_per_dev = 0; 5993 u32 remaining_stripes = 0; 5994 u32 last_stripe = 0; 5995 int ret; 5996 int i; 5997 5998 em = btrfs_get_chunk_map(fs_info, logical, length); 5999 if (IS_ERR(em)) 6000 return ERR_CAST(em); 6001 6002 map = em->map_lookup; 6003 6004 /* we don't discard raid56 yet */ 6005 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6006 ret = -EOPNOTSUPP; 6007 goto out_free_map; 6008 } 6009 6010 offset = logical - em->start; 6011 length = min_t(u64, em->start + em->len - logical, length); 6012 *length_ret = length; 6013 6014 /* 6015 * stripe_nr counts the total number of stripes we have to stride 6016 * to get to this block 6017 */ 6018 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6019 6020 /* stripe_offset is the offset of this block in its stripe */ 6021 stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr); 6022 6023 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> 6024 BTRFS_STRIPE_LEN_SHIFT; 6025 stripe_cnt = stripe_nr_end - stripe_nr; 6026 stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) - 6027 (offset + length); 6028 /* 6029 * after this, stripe_nr is the number of stripes on this 6030 * device we have to walk to find the data, and stripe_index is 6031 * the number of our device in the stripe array 6032 */ 6033 *num_stripes = 1; 6034 stripe_index = 0; 6035 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6036 BTRFS_BLOCK_GROUP_RAID10)) { 6037 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6038 sub_stripes = 1; 6039 else 6040 sub_stripes = map->sub_stripes; 6041 6042 factor = map->num_stripes / sub_stripes; 6043 *num_stripes = min_t(u64, map->num_stripes, 6044 sub_stripes * stripe_cnt); 6045 stripe_index = stripe_nr % factor; 6046 stripe_nr /= factor; 6047 stripe_index *= sub_stripes; 6048 6049 remaining_stripes = stripe_cnt % factor; 6050 stripes_per_dev = stripe_cnt / factor; 6051 last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes; 6052 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6053 BTRFS_BLOCK_GROUP_DUP)) { 6054 *num_stripes = map->num_stripes; 6055 } else { 6056 stripe_index = stripe_nr % map->num_stripes; 6057 stripe_nr /= map->num_stripes; 6058 } 6059 6060 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6061 if (!stripes) { 6062 ret = -ENOMEM; 6063 goto out_free_map; 6064 } 6065 6066 for (i = 0; i < *num_stripes; i++) { 6067 stripes[i].physical = 6068 map->stripes[stripe_index].physical + 6069 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6070 stripes[i].dev = map->stripes[stripe_index].dev; 6071 6072 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6073 BTRFS_BLOCK_GROUP_RAID10)) { 6074 stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev); 6075 6076 if (i / sub_stripes < remaining_stripes) 6077 stripes[i].length += BTRFS_STRIPE_LEN; 6078 6079 /* 6080 * Special for the first stripe and 6081 * the last stripe: 6082 * 6083 * |-------|...|-------| 6084 * |----------| 6085 * off end_off 6086 */ 6087 if (i < sub_stripes) 6088 stripes[i].length -= stripe_offset; 6089 6090 if (stripe_index >= last_stripe && 6091 stripe_index <= (last_stripe + 6092 sub_stripes - 1)) 6093 stripes[i].length -= stripe_end_offset; 6094 6095 if (i == sub_stripes - 1) 6096 stripe_offset = 0; 6097 } else { 6098 stripes[i].length = length; 6099 } 6100 6101 stripe_index++; 6102 if (stripe_index == map->num_stripes) { 6103 stripe_index = 0; 6104 stripe_nr++; 6105 } 6106 } 6107 6108 free_extent_map(em); 6109 return stripes; 6110 out_free_map: 6111 free_extent_map(em); 6112 return ERR_PTR(ret); 6113 } 6114 6115 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6116 { 6117 struct btrfs_block_group *cache; 6118 bool ret; 6119 6120 /* Non zoned filesystem does not use "to_copy" flag */ 6121 if (!btrfs_is_zoned(fs_info)) 6122 return false; 6123 6124 cache = btrfs_lookup_block_group(fs_info, logical); 6125 6126 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6127 6128 btrfs_put_block_group(cache); 6129 return ret; 6130 } 6131 6132 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6133 struct btrfs_io_context *bioc, 6134 struct btrfs_dev_replace *dev_replace, 6135 u64 logical, 6136 int *num_stripes_ret, int *max_errors_ret) 6137 { 6138 u64 srcdev_devid = dev_replace->srcdev->devid; 6139 /* 6140 * At this stage, num_stripes is still the real number of stripes, 6141 * excluding the duplicated stripes. 6142 */ 6143 int num_stripes = *num_stripes_ret; 6144 int nr_extra_stripes = 0; 6145 int max_errors = *max_errors_ret; 6146 int i; 6147 6148 /* 6149 * A block group which has "to_copy" set will eventually be copied by 6150 * the dev-replace process. We can avoid cloning IO here. 6151 */ 6152 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6153 return; 6154 6155 /* 6156 * Duplicate the write operations while the dev-replace procedure is 6157 * running. Since the copying of the old disk to the new disk takes 6158 * place at run time while the filesystem is mounted writable, the 6159 * regular write operations to the old disk have to be duplicated to go 6160 * to the new disk as well. 6161 * 6162 * Note that device->missing is handled by the caller, and that the 6163 * write to the old disk is already set up in the stripes array. 6164 */ 6165 for (i = 0; i < num_stripes; i++) { 6166 struct btrfs_io_stripe *old = &bioc->stripes[i]; 6167 struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes]; 6168 6169 if (old->dev->devid != srcdev_devid) 6170 continue; 6171 6172 new->physical = old->physical; 6173 new->dev = dev_replace->tgtdev; 6174 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6175 bioc->replace_stripe_src = i; 6176 nr_extra_stripes++; 6177 } 6178 6179 /* We can only have at most 2 extra nr_stripes (for DUP). */ 6180 ASSERT(nr_extra_stripes <= 2); 6181 /* 6182 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for 6183 * replace. 6184 * If we have 2 extra stripes, only choose the one with smaller physical. 6185 */ 6186 if (op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) { 6187 struct btrfs_io_stripe *first = &bioc->stripes[num_stripes]; 6188 struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1]; 6189 6190 /* Only DUP can have two extra stripes. */ 6191 ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP); 6192 6193 /* 6194 * Swap the last stripe stripes and reduce @nr_extra_stripes. 6195 * The extra stripe would still be there, but won't be accessed. 6196 */ 6197 if (first->physical > second->physical) { 6198 swap(second->physical, first->physical); 6199 swap(second->dev, first->dev); 6200 nr_extra_stripes--; 6201 } 6202 } 6203 6204 *num_stripes_ret = num_stripes + nr_extra_stripes; 6205 *max_errors_ret = max_errors + nr_extra_stripes; 6206 bioc->replace_nr_stripes = nr_extra_stripes; 6207 } 6208 6209 static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op, 6210 u64 offset, u32 *stripe_nr, u64 *stripe_offset, 6211 u64 *full_stripe_start) 6212 { 6213 /* 6214 * Stripe_nr is the stripe where this block falls. stripe_offset is 6215 * the offset of this block in its stripe. 6216 */ 6217 *stripe_offset = offset & BTRFS_STRIPE_LEN_MASK; 6218 *stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6219 ASSERT(*stripe_offset < U32_MAX); 6220 6221 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6222 unsigned long full_stripe_len = 6223 btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 6224 6225 /* 6226 * For full stripe start, we use previously calculated 6227 * @stripe_nr. Align it to nr_data_stripes, then multiply with 6228 * STRIPE_LEN. 6229 * 6230 * By this we can avoid u64 division completely. And we have 6231 * to go rounddown(), not round_down(), as nr_data_stripes is 6232 * not ensured to be power of 2. 6233 */ 6234 *full_stripe_start = 6235 btrfs_stripe_nr_to_offset( 6236 rounddown(*stripe_nr, nr_data_stripes(map))); 6237 6238 ASSERT(*full_stripe_start + full_stripe_len > offset); 6239 ASSERT(*full_stripe_start <= offset); 6240 /* 6241 * For writes to RAID56, allow to write a full stripe set, but 6242 * no straddling of stripe sets. 6243 */ 6244 if (op == BTRFS_MAP_WRITE) 6245 return full_stripe_len - (offset - *full_stripe_start); 6246 } 6247 6248 /* 6249 * For other RAID types and for RAID56 reads, allow a single stripe (on 6250 * a single disk). 6251 */ 6252 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) 6253 return BTRFS_STRIPE_LEN - *stripe_offset; 6254 return U64_MAX; 6255 } 6256 6257 static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map, 6258 u32 stripe_index, u64 stripe_offset, u32 stripe_nr) 6259 { 6260 dst->dev = map->stripes[stripe_index].dev; 6261 dst->physical = map->stripes[stripe_index].physical + 6262 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6263 } 6264 6265 /* 6266 * Map one logical range to one or more physical ranges. 6267 * 6268 * @length: (Mandatory) mapped length of this run. 6269 * One logical range can be split into different segments 6270 * due to factors like zones and RAID0/5/6/10 stripe 6271 * boundaries. 6272 * 6273 * @bioc_ret: (Mandatory) returned btrfs_io_context structure. 6274 * which has one or more physical ranges (btrfs_io_stripe) 6275 * recorded inside. 6276 * Caller should call btrfs_put_bioc() to free it after use. 6277 * 6278 * @smap: (Optional) single physical range optimization. 6279 * If the map request can be fulfilled by one single 6280 * physical range, and this is parameter is not NULL, 6281 * then @bioc_ret would be NULL, and @smap would be 6282 * updated. 6283 * 6284 * @mirror_num_ret: (Mandatory) returned mirror number if the original 6285 * value is 0. 6286 * 6287 * Mirror number 0 means to choose any live mirrors. 6288 * 6289 * For non-RAID56 profiles, non-zero mirror_num means 6290 * the Nth mirror. (e.g. mirror_num 1 means the first 6291 * copy). 6292 * 6293 * For RAID56 profile, mirror 1 means rebuild from P and 6294 * the remaining data stripes. 6295 * 6296 * For RAID6 profile, mirror > 2 means mark another 6297 * data/P stripe error and rebuild from the remaining 6298 * stripes.. 6299 * 6300 * @need_raid_map: (Used only for integrity checker) whether the map wants 6301 * a full stripe map (including all data and P/Q stripes) 6302 * for RAID56. Should always be 1 except integrity checker. 6303 */ 6304 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6305 u64 logical, u64 *length, 6306 struct btrfs_io_context **bioc_ret, 6307 struct btrfs_io_stripe *smap, int *mirror_num_ret, 6308 int need_raid_map) 6309 { 6310 struct extent_map *em; 6311 struct map_lookup *map; 6312 u64 map_offset; 6313 u64 stripe_offset; 6314 u32 stripe_nr; 6315 u32 stripe_index; 6316 int data_stripes; 6317 int i; 6318 int ret = 0; 6319 int mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6320 int num_stripes; 6321 int num_copies; 6322 int max_errors = 0; 6323 struct btrfs_io_context *bioc = NULL; 6324 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6325 int dev_replace_is_ongoing = 0; 6326 u16 num_alloc_stripes; 6327 u64 raid56_full_stripe_start = (u64)-1; 6328 u64 max_len; 6329 6330 ASSERT(bioc_ret); 6331 6332 num_copies = btrfs_num_copies(fs_info, logical, fs_info->sectorsize); 6333 if (mirror_num > num_copies) 6334 return -EINVAL; 6335 6336 em = btrfs_get_chunk_map(fs_info, logical, *length); 6337 if (IS_ERR(em)) 6338 return PTR_ERR(em); 6339 6340 map = em->map_lookup; 6341 data_stripes = nr_data_stripes(map); 6342 6343 map_offset = logical - em->start; 6344 max_len = btrfs_max_io_len(map, op, map_offset, &stripe_nr, 6345 &stripe_offset, &raid56_full_stripe_start); 6346 *length = min_t(u64, em->len - map_offset, max_len); 6347 6348 if (dev_replace->replace_task != current) 6349 down_read(&dev_replace->rwsem); 6350 6351 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6352 /* 6353 * Hold the semaphore for read during the whole operation, write is 6354 * requested at commit time but must wait. 6355 */ 6356 if (!dev_replace_is_ongoing && dev_replace->replace_task != current) 6357 up_read(&dev_replace->rwsem); 6358 6359 num_stripes = 1; 6360 stripe_index = 0; 6361 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6362 stripe_index = stripe_nr % map->num_stripes; 6363 stripe_nr /= map->num_stripes; 6364 if (op == BTRFS_MAP_READ) 6365 mirror_num = 1; 6366 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6367 if (op != BTRFS_MAP_READ) { 6368 num_stripes = map->num_stripes; 6369 } else if (mirror_num) { 6370 stripe_index = mirror_num - 1; 6371 } else { 6372 stripe_index = find_live_mirror(fs_info, map, 0, 6373 dev_replace_is_ongoing); 6374 mirror_num = stripe_index + 1; 6375 } 6376 6377 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6378 if (op != BTRFS_MAP_READ) { 6379 num_stripes = map->num_stripes; 6380 } else if (mirror_num) { 6381 stripe_index = mirror_num - 1; 6382 } else { 6383 mirror_num = 1; 6384 } 6385 6386 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6387 u32 factor = map->num_stripes / map->sub_stripes; 6388 6389 stripe_index = (stripe_nr % factor) * map->sub_stripes; 6390 stripe_nr /= factor; 6391 6392 if (op != BTRFS_MAP_READ) 6393 num_stripes = map->sub_stripes; 6394 else if (mirror_num) 6395 stripe_index += mirror_num - 1; 6396 else { 6397 int old_stripe_index = stripe_index; 6398 stripe_index = find_live_mirror(fs_info, map, 6399 stripe_index, 6400 dev_replace_is_ongoing); 6401 mirror_num = stripe_index - old_stripe_index + 1; 6402 } 6403 6404 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6405 if (need_raid_map && (op != BTRFS_MAP_READ || mirror_num > 1)) { 6406 /* 6407 * Push stripe_nr back to the start of the full stripe 6408 * For those cases needing a full stripe, @stripe_nr 6409 * is the full stripe number. 6410 * 6411 * Originally we go raid56_full_stripe_start / full_stripe_len, 6412 * but that can be expensive. Here we just divide 6413 * @stripe_nr with @data_stripes. 6414 */ 6415 stripe_nr /= data_stripes; 6416 6417 /* RAID[56] write or recovery. Return all stripes */ 6418 num_stripes = map->num_stripes; 6419 max_errors = btrfs_chunk_max_errors(map); 6420 6421 /* Return the length to the full stripe end */ 6422 *length = min(logical + *length, 6423 raid56_full_stripe_start + em->start + 6424 btrfs_stripe_nr_to_offset(data_stripes)) - 6425 logical; 6426 stripe_index = 0; 6427 stripe_offset = 0; 6428 } else { 6429 /* 6430 * Mirror #0 or #1 means the original data block. 6431 * Mirror #2 is RAID5 parity block. 6432 * Mirror #3 is RAID6 Q block. 6433 */ 6434 stripe_index = stripe_nr % data_stripes; 6435 stripe_nr /= data_stripes; 6436 if (mirror_num > 1) 6437 stripe_index = data_stripes + mirror_num - 2; 6438 6439 /* We distribute the parity blocks across stripes */ 6440 stripe_index = (stripe_nr + stripe_index) % map->num_stripes; 6441 if (op == BTRFS_MAP_READ && mirror_num <= 1) 6442 mirror_num = 1; 6443 } 6444 } else { 6445 /* 6446 * After this, stripe_nr is the number of stripes on this 6447 * device we have to walk to find the data, and stripe_index is 6448 * the number of our device in the stripe array 6449 */ 6450 stripe_index = stripe_nr % map->num_stripes; 6451 stripe_nr /= map->num_stripes; 6452 mirror_num = stripe_index + 1; 6453 } 6454 if (stripe_index >= map->num_stripes) { 6455 btrfs_crit(fs_info, 6456 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6457 stripe_index, map->num_stripes); 6458 ret = -EINVAL; 6459 goto out; 6460 } 6461 6462 num_alloc_stripes = num_stripes; 6463 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6464 op != BTRFS_MAP_READ) 6465 /* 6466 * For replace case, we need to add extra stripes for extra 6467 * duplicated stripes. 6468 * 6469 * For both WRITE and GET_READ_MIRRORS, we may have at most 6470 * 2 more stripes (DUP types, otherwise 1). 6471 */ 6472 num_alloc_stripes += 2; 6473 6474 /* 6475 * If this I/O maps to a single device, try to return the device and 6476 * physical block information on the stack instead of allocating an 6477 * I/O context structure. 6478 */ 6479 if (smap && num_alloc_stripes == 1 && 6480 !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)) { 6481 set_io_stripe(smap, map, stripe_index, stripe_offset, stripe_nr); 6482 if (mirror_num_ret) 6483 *mirror_num_ret = mirror_num; 6484 *bioc_ret = NULL; 6485 ret = 0; 6486 goto out; 6487 } 6488 6489 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes); 6490 if (!bioc) { 6491 ret = -ENOMEM; 6492 goto out; 6493 } 6494 bioc->map_type = map->type; 6495 6496 /* 6497 * For RAID56 full map, we need to make sure the stripes[] follows the 6498 * rule that data stripes are all ordered, then followed with P and Q 6499 * (if we have). 6500 * 6501 * It's still mostly the same as other profiles, just with extra rotation. 6502 */ 6503 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6504 (op != BTRFS_MAP_READ || mirror_num > 1)) { 6505 /* 6506 * For RAID56 @stripe_nr is already the number of full stripes 6507 * before us, which is also the rotation value (needs to modulo 6508 * with num_stripes). 6509 * 6510 * In this case, we just add @stripe_nr with @i, then do the 6511 * modulo, to reduce one modulo call. 6512 */ 6513 bioc->full_stripe_logical = em->start + 6514 btrfs_stripe_nr_to_offset(stripe_nr * data_stripes); 6515 for (i = 0; i < num_stripes; i++) 6516 set_io_stripe(&bioc->stripes[i], map, 6517 (i + stripe_nr) % num_stripes, 6518 stripe_offset, stripe_nr); 6519 } else { 6520 /* 6521 * For all other non-RAID56 profiles, just copy the target 6522 * stripe into the bioc. 6523 */ 6524 for (i = 0; i < num_stripes; i++) { 6525 set_io_stripe(&bioc->stripes[i], map, stripe_index, 6526 stripe_offset, stripe_nr); 6527 stripe_index++; 6528 } 6529 } 6530 6531 if (op != BTRFS_MAP_READ) 6532 max_errors = btrfs_chunk_max_errors(map); 6533 6534 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6535 op != BTRFS_MAP_READ) { 6536 handle_ops_on_dev_replace(op, bioc, dev_replace, logical, 6537 &num_stripes, &max_errors); 6538 } 6539 6540 *bioc_ret = bioc; 6541 bioc->num_stripes = num_stripes; 6542 bioc->max_errors = max_errors; 6543 bioc->mirror_num = mirror_num; 6544 6545 out: 6546 if (dev_replace_is_ongoing && dev_replace->replace_task != current) { 6547 lockdep_assert_held(&dev_replace->rwsem); 6548 /* Unlock and let waiting writers proceed */ 6549 up_read(&dev_replace->rwsem); 6550 } 6551 free_extent_map(em); 6552 return ret; 6553 } 6554 6555 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6556 const struct btrfs_fs_devices *fs_devices) 6557 { 6558 if (args->fsid == NULL) 6559 return true; 6560 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6561 return true; 6562 return false; 6563 } 6564 6565 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6566 const struct btrfs_device *device) 6567 { 6568 if (args->missing) { 6569 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6570 !device->bdev) 6571 return true; 6572 return false; 6573 } 6574 6575 if (device->devid != args->devid) 6576 return false; 6577 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6578 return false; 6579 return true; 6580 } 6581 6582 /* 6583 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6584 * return NULL. 6585 * 6586 * If devid and uuid are both specified, the match must be exact, otherwise 6587 * only devid is used. 6588 */ 6589 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6590 const struct btrfs_dev_lookup_args *args) 6591 { 6592 struct btrfs_device *device; 6593 struct btrfs_fs_devices *seed_devs; 6594 6595 if (dev_args_match_fs_devices(args, fs_devices)) { 6596 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6597 if (dev_args_match_device(args, device)) 6598 return device; 6599 } 6600 } 6601 6602 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6603 if (!dev_args_match_fs_devices(args, seed_devs)) 6604 continue; 6605 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6606 if (dev_args_match_device(args, device)) 6607 return device; 6608 } 6609 } 6610 6611 return NULL; 6612 } 6613 6614 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6615 u64 devid, u8 *dev_uuid) 6616 { 6617 struct btrfs_device *device; 6618 unsigned int nofs_flag; 6619 6620 /* 6621 * We call this under the chunk_mutex, so we want to use NOFS for this 6622 * allocation, however we don't want to change btrfs_alloc_device() to 6623 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6624 * places. 6625 */ 6626 6627 nofs_flag = memalloc_nofs_save(); 6628 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL); 6629 memalloc_nofs_restore(nofs_flag); 6630 if (IS_ERR(device)) 6631 return device; 6632 6633 list_add(&device->dev_list, &fs_devices->devices); 6634 device->fs_devices = fs_devices; 6635 fs_devices->num_devices++; 6636 6637 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6638 fs_devices->missing_devices++; 6639 6640 return device; 6641 } 6642 6643 /* 6644 * Allocate new device struct, set up devid and UUID. 6645 * 6646 * @fs_info: used only for generating a new devid, can be NULL if 6647 * devid is provided (i.e. @devid != NULL). 6648 * @devid: a pointer to devid for this device. If NULL a new devid 6649 * is generated. 6650 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6651 * is generated. 6652 * @path: a pointer to device path if available, NULL otherwise. 6653 * 6654 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6655 * on error. Returned struct is not linked onto any lists and must be 6656 * destroyed with btrfs_free_device. 6657 */ 6658 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6659 const u64 *devid, const u8 *uuid, 6660 const char *path) 6661 { 6662 struct btrfs_device *dev; 6663 u64 tmp; 6664 6665 if (WARN_ON(!devid && !fs_info)) 6666 return ERR_PTR(-EINVAL); 6667 6668 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6669 if (!dev) 6670 return ERR_PTR(-ENOMEM); 6671 6672 INIT_LIST_HEAD(&dev->dev_list); 6673 INIT_LIST_HEAD(&dev->dev_alloc_list); 6674 INIT_LIST_HEAD(&dev->post_commit_list); 6675 6676 atomic_set(&dev->dev_stats_ccnt, 0); 6677 btrfs_device_data_ordered_init(dev); 6678 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); 6679 6680 if (devid) 6681 tmp = *devid; 6682 else { 6683 int ret; 6684 6685 ret = find_next_devid(fs_info, &tmp); 6686 if (ret) { 6687 btrfs_free_device(dev); 6688 return ERR_PTR(ret); 6689 } 6690 } 6691 dev->devid = tmp; 6692 6693 if (uuid) 6694 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6695 else 6696 generate_random_uuid(dev->uuid); 6697 6698 if (path) { 6699 struct rcu_string *name; 6700 6701 name = rcu_string_strdup(path, GFP_KERNEL); 6702 if (!name) { 6703 btrfs_free_device(dev); 6704 return ERR_PTR(-ENOMEM); 6705 } 6706 rcu_assign_pointer(dev->name, name); 6707 } 6708 6709 return dev; 6710 } 6711 6712 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6713 u64 devid, u8 *uuid, bool error) 6714 { 6715 if (error) 6716 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6717 devid, uuid); 6718 else 6719 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6720 devid, uuid); 6721 } 6722 6723 u64 btrfs_calc_stripe_length(const struct extent_map *em) 6724 { 6725 const struct map_lookup *map = em->map_lookup; 6726 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6727 6728 return div_u64(em->len, data_stripes); 6729 } 6730 6731 #if BITS_PER_LONG == 32 6732 /* 6733 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6734 * can't be accessed on 32bit systems. 6735 * 6736 * This function do mount time check to reject the fs if it already has 6737 * metadata chunk beyond that limit. 6738 */ 6739 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6740 u64 logical, u64 length, u64 type) 6741 { 6742 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6743 return 0; 6744 6745 if (logical + length < MAX_LFS_FILESIZE) 6746 return 0; 6747 6748 btrfs_err_32bit_limit(fs_info); 6749 return -EOVERFLOW; 6750 } 6751 6752 /* 6753 * This is to give early warning for any metadata chunk reaching 6754 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6755 * Although we can still access the metadata, it's not going to be possible 6756 * once the limit is reached. 6757 */ 6758 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6759 u64 logical, u64 length, u64 type) 6760 { 6761 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6762 return; 6763 6764 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6765 return; 6766 6767 btrfs_warn_32bit_limit(fs_info); 6768 } 6769 #endif 6770 6771 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6772 u64 devid, u8 *uuid) 6773 { 6774 struct btrfs_device *dev; 6775 6776 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6777 btrfs_report_missing_device(fs_info, devid, uuid, true); 6778 return ERR_PTR(-ENOENT); 6779 } 6780 6781 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 6782 if (IS_ERR(dev)) { 6783 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 6784 devid, PTR_ERR(dev)); 6785 return dev; 6786 } 6787 btrfs_report_missing_device(fs_info, devid, uuid, false); 6788 6789 return dev; 6790 } 6791 6792 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6793 struct btrfs_chunk *chunk) 6794 { 6795 BTRFS_DEV_LOOKUP_ARGS(args); 6796 struct btrfs_fs_info *fs_info = leaf->fs_info; 6797 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 6798 struct map_lookup *map; 6799 struct extent_map *em; 6800 u64 logical; 6801 u64 length; 6802 u64 devid; 6803 u64 type; 6804 u8 uuid[BTRFS_UUID_SIZE]; 6805 int index; 6806 int num_stripes; 6807 int ret; 6808 int i; 6809 6810 logical = key->offset; 6811 length = btrfs_chunk_length(leaf, chunk); 6812 type = btrfs_chunk_type(leaf, chunk); 6813 index = btrfs_bg_flags_to_raid_index(type); 6814 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6815 6816 #if BITS_PER_LONG == 32 6817 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 6818 if (ret < 0) 6819 return ret; 6820 warn_32bit_meta_chunk(fs_info, logical, length, type); 6821 #endif 6822 6823 /* 6824 * Only need to verify chunk item if we're reading from sys chunk array, 6825 * as chunk item in tree block is already verified by tree-checker. 6826 */ 6827 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6828 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6829 if (ret) 6830 return ret; 6831 } 6832 6833 read_lock(&map_tree->lock); 6834 em = lookup_extent_mapping(map_tree, logical, 1); 6835 read_unlock(&map_tree->lock); 6836 6837 /* already mapped? */ 6838 if (em && em->start <= logical && em->start + em->len > logical) { 6839 free_extent_map(em); 6840 return 0; 6841 } else if (em) { 6842 free_extent_map(em); 6843 } 6844 6845 em = alloc_extent_map(); 6846 if (!em) 6847 return -ENOMEM; 6848 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6849 if (!map) { 6850 free_extent_map(em); 6851 return -ENOMEM; 6852 } 6853 6854 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6855 em->map_lookup = map; 6856 em->start = logical; 6857 em->len = length; 6858 em->orig_start = 0; 6859 em->block_start = 0; 6860 em->block_len = em->len; 6861 6862 map->num_stripes = num_stripes; 6863 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6864 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6865 map->type = type; 6866 /* 6867 * We can't use the sub_stripes value, as for profiles other than 6868 * RAID10, they may have 0 as sub_stripes for filesystems created by 6869 * older mkfs (<v5.4). 6870 * In that case, it can cause divide-by-zero errors later. 6871 * Since currently sub_stripes is fixed for each profile, let's 6872 * use the trusted value instead. 6873 */ 6874 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 6875 map->verified_stripes = 0; 6876 em->orig_block_len = btrfs_calc_stripe_length(em); 6877 for (i = 0; i < num_stripes; i++) { 6878 map->stripes[i].physical = 6879 btrfs_stripe_offset_nr(leaf, chunk, i); 6880 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6881 args.devid = devid; 6882 read_extent_buffer(leaf, uuid, (unsigned long) 6883 btrfs_stripe_dev_uuid_nr(chunk, i), 6884 BTRFS_UUID_SIZE); 6885 args.uuid = uuid; 6886 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 6887 if (!map->stripes[i].dev) { 6888 map->stripes[i].dev = handle_missing_device(fs_info, 6889 devid, uuid); 6890 if (IS_ERR(map->stripes[i].dev)) { 6891 ret = PTR_ERR(map->stripes[i].dev); 6892 free_extent_map(em); 6893 return ret; 6894 } 6895 } 6896 6897 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6898 &(map->stripes[i].dev->dev_state)); 6899 } 6900 6901 write_lock(&map_tree->lock); 6902 ret = add_extent_mapping(map_tree, em, 0); 6903 write_unlock(&map_tree->lock); 6904 if (ret < 0) { 6905 btrfs_err(fs_info, 6906 "failed to add chunk map, start=%llu len=%llu: %d", 6907 em->start, em->len, ret); 6908 } 6909 free_extent_map(em); 6910 6911 return ret; 6912 } 6913 6914 static void fill_device_from_item(struct extent_buffer *leaf, 6915 struct btrfs_dev_item *dev_item, 6916 struct btrfs_device *device) 6917 { 6918 unsigned long ptr; 6919 6920 device->devid = btrfs_device_id(leaf, dev_item); 6921 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6922 device->total_bytes = device->disk_total_bytes; 6923 device->commit_total_bytes = device->disk_total_bytes; 6924 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6925 device->commit_bytes_used = device->bytes_used; 6926 device->type = btrfs_device_type(leaf, dev_item); 6927 device->io_align = btrfs_device_io_align(leaf, dev_item); 6928 device->io_width = btrfs_device_io_width(leaf, dev_item); 6929 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6930 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6931 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6932 6933 ptr = btrfs_device_uuid(dev_item); 6934 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6935 } 6936 6937 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 6938 u8 *fsid) 6939 { 6940 struct btrfs_fs_devices *fs_devices; 6941 int ret; 6942 6943 lockdep_assert_held(&uuid_mutex); 6944 ASSERT(fsid); 6945 6946 /* This will match only for multi-device seed fs */ 6947 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 6948 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 6949 return fs_devices; 6950 6951 6952 fs_devices = find_fsid(fsid, NULL); 6953 if (!fs_devices) { 6954 if (!btrfs_test_opt(fs_info, DEGRADED)) 6955 return ERR_PTR(-ENOENT); 6956 6957 fs_devices = alloc_fs_devices(fsid, NULL); 6958 if (IS_ERR(fs_devices)) 6959 return fs_devices; 6960 6961 fs_devices->seeding = true; 6962 fs_devices->opened = 1; 6963 return fs_devices; 6964 } 6965 6966 /* 6967 * Upon first call for a seed fs fsid, just create a private copy of the 6968 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 6969 */ 6970 fs_devices = clone_fs_devices(fs_devices); 6971 if (IS_ERR(fs_devices)) 6972 return fs_devices; 6973 6974 ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder); 6975 if (ret) { 6976 free_fs_devices(fs_devices); 6977 return ERR_PTR(ret); 6978 } 6979 6980 if (!fs_devices->seeding) { 6981 close_fs_devices(fs_devices); 6982 free_fs_devices(fs_devices); 6983 return ERR_PTR(-EINVAL); 6984 } 6985 6986 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 6987 6988 return fs_devices; 6989 } 6990 6991 static int read_one_dev(struct extent_buffer *leaf, 6992 struct btrfs_dev_item *dev_item) 6993 { 6994 BTRFS_DEV_LOOKUP_ARGS(args); 6995 struct btrfs_fs_info *fs_info = leaf->fs_info; 6996 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6997 struct btrfs_device *device; 6998 u64 devid; 6999 int ret; 7000 u8 fs_uuid[BTRFS_FSID_SIZE]; 7001 u8 dev_uuid[BTRFS_UUID_SIZE]; 7002 7003 devid = btrfs_device_id(leaf, dev_item); 7004 args.devid = devid; 7005 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7006 BTRFS_UUID_SIZE); 7007 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7008 BTRFS_FSID_SIZE); 7009 args.uuid = dev_uuid; 7010 args.fsid = fs_uuid; 7011 7012 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7013 fs_devices = open_seed_devices(fs_info, fs_uuid); 7014 if (IS_ERR(fs_devices)) 7015 return PTR_ERR(fs_devices); 7016 } 7017 7018 device = btrfs_find_device(fs_info->fs_devices, &args); 7019 if (!device) { 7020 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7021 btrfs_report_missing_device(fs_info, devid, 7022 dev_uuid, true); 7023 return -ENOENT; 7024 } 7025 7026 device = add_missing_dev(fs_devices, devid, dev_uuid); 7027 if (IS_ERR(device)) { 7028 btrfs_err(fs_info, 7029 "failed to add missing dev %llu: %ld", 7030 devid, PTR_ERR(device)); 7031 return PTR_ERR(device); 7032 } 7033 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7034 } else { 7035 if (!device->bdev) { 7036 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7037 btrfs_report_missing_device(fs_info, 7038 devid, dev_uuid, true); 7039 return -ENOENT; 7040 } 7041 btrfs_report_missing_device(fs_info, devid, 7042 dev_uuid, false); 7043 } 7044 7045 if (!device->bdev && 7046 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7047 /* 7048 * this happens when a device that was properly setup 7049 * in the device info lists suddenly goes bad. 7050 * device->bdev is NULL, and so we have to set 7051 * device->missing to one here 7052 */ 7053 device->fs_devices->missing_devices++; 7054 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7055 } 7056 7057 /* Move the device to its own fs_devices */ 7058 if (device->fs_devices != fs_devices) { 7059 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7060 &device->dev_state)); 7061 7062 list_move(&device->dev_list, &fs_devices->devices); 7063 device->fs_devices->num_devices--; 7064 fs_devices->num_devices++; 7065 7066 device->fs_devices->missing_devices--; 7067 fs_devices->missing_devices++; 7068 7069 device->fs_devices = fs_devices; 7070 } 7071 } 7072 7073 if (device->fs_devices != fs_info->fs_devices) { 7074 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7075 if (device->generation != 7076 btrfs_device_generation(leaf, dev_item)) 7077 return -EINVAL; 7078 } 7079 7080 fill_device_from_item(leaf, dev_item, device); 7081 if (device->bdev) { 7082 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7083 7084 if (device->total_bytes > max_total_bytes) { 7085 btrfs_err(fs_info, 7086 "device total_bytes should be at most %llu but found %llu", 7087 max_total_bytes, device->total_bytes); 7088 return -EINVAL; 7089 } 7090 } 7091 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7092 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7093 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7094 device->fs_devices->total_rw_bytes += device->total_bytes; 7095 atomic64_add(device->total_bytes - device->bytes_used, 7096 &fs_info->free_chunk_space); 7097 } 7098 ret = 0; 7099 return ret; 7100 } 7101 7102 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7103 { 7104 struct btrfs_super_block *super_copy = fs_info->super_copy; 7105 struct extent_buffer *sb; 7106 struct btrfs_disk_key *disk_key; 7107 struct btrfs_chunk *chunk; 7108 u8 *array_ptr; 7109 unsigned long sb_array_offset; 7110 int ret = 0; 7111 u32 num_stripes; 7112 u32 array_size; 7113 u32 len = 0; 7114 u32 cur_offset; 7115 u64 type; 7116 struct btrfs_key key; 7117 7118 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7119 7120 /* 7121 * We allocated a dummy extent, just to use extent buffer accessors. 7122 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7123 * that's fine, we will not go beyond system chunk array anyway. 7124 */ 7125 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7126 if (!sb) 7127 return -ENOMEM; 7128 set_extent_buffer_uptodate(sb); 7129 7130 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7131 array_size = btrfs_super_sys_array_size(super_copy); 7132 7133 array_ptr = super_copy->sys_chunk_array; 7134 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7135 cur_offset = 0; 7136 7137 while (cur_offset < array_size) { 7138 disk_key = (struct btrfs_disk_key *)array_ptr; 7139 len = sizeof(*disk_key); 7140 if (cur_offset + len > array_size) 7141 goto out_short_read; 7142 7143 btrfs_disk_key_to_cpu(&key, disk_key); 7144 7145 array_ptr += len; 7146 sb_array_offset += len; 7147 cur_offset += len; 7148 7149 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7150 btrfs_err(fs_info, 7151 "unexpected item type %u in sys_array at offset %u", 7152 (u32)key.type, cur_offset); 7153 ret = -EIO; 7154 break; 7155 } 7156 7157 chunk = (struct btrfs_chunk *)sb_array_offset; 7158 /* 7159 * At least one btrfs_chunk with one stripe must be present, 7160 * exact stripe count check comes afterwards 7161 */ 7162 len = btrfs_chunk_item_size(1); 7163 if (cur_offset + len > array_size) 7164 goto out_short_read; 7165 7166 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7167 if (!num_stripes) { 7168 btrfs_err(fs_info, 7169 "invalid number of stripes %u in sys_array at offset %u", 7170 num_stripes, cur_offset); 7171 ret = -EIO; 7172 break; 7173 } 7174 7175 type = btrfs_chunk_type(sb, chunk); 7176 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7177 btrfs_err(fs_info, 7178 "invalid chunk type %llu in sys_array at offset %u", 7179 type, cur_offset); 7180 ret = -EIO; 7181 break; 7182 } 7183 7184 len = btrfs_chunk_item_size(num_stripes); 7185 if (cur_offset + len > array_size) 7186 goto out_short_read; 7187 7188 ret = read_one_chunk(&key, sb, chunk); 7189 if (ret) 7190 break; 7191 7192 array_ptr += len; 7193 sb_array_offset += len; 7194 cur_offset += len; 7195 } 7196 clear_extent_buffer_uptodate(sb); 7197 free_extent_buffer_stale(sb); 7198 return ret; 7199 7200 out_short_read: 7201 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7202 len, cur_offset); 7203 clear_extent_buffer_uptodate(sb); 7204 free_extent_buffer_stale(sb); 7205 return -EIO; 7206 } 7207 7208 /* 7209 * Check if all chunks in the fs are OK for read-write degraded mount 7210 * 7211 * If the @failing_dev is specified, it's accounted as missing. 7212 * 7213 * Return true if all chunks meet the minimal RW mount requirements. 7214 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7215 */ 7216 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7217 struct btrfs_device *failing_dev) 7218 { 7219 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7220 struct extent_map *em; 7221 u64 next_start = 0; 7222 bool ret = true; 7223 7224 read_lock(&map_tree->lock); 7225 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7226 read_unlock(&map_tree->lock); 7227 /* No chunk at all? Return false anyway */ 7228 if (!em) { 7229 ret = false; 7230 goto out; 7231 } 7232 while (em) { 7233 struct map_lookup *map; 7234 int missing = 0; 7235 int max_tolerated; 7236 int i; 7237 7238 map = em->map_lookup; 7239 max_tolerated = 7240 btrfs_get_num_tolerated_disk_barrier_failures( 7241 map->type); 7242 for (i = 0; i < map->num_stripes; i++) { 7243 struct btrfs_device *dev = map->stripes[i].dev; 7244 7245 if (!dev || !dev->bdev || 7246 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7247 dev->last_flush_error) 7248 missing++; 7249 else if (failing_dev && failing_dev == dev) 7250 missing++; 7251 } 7252 if (missing > max_tolerated) { 7253 if (!failing_dev) 7254 btrfs_warn(fs_info, 7255 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7256 em->start, missing, max_tolerated); 7257 free_extent_map(em); 7258 ret = false; 7259 goto out; 7260 } 7261 next_start = extent_map_end(em); 7262 free_extent_map(em); 7263 7264 read_lock(&map_tree->lock); 7265 em = lookup_extent_mapping(map_tree, next_start, 7266 (u64)(-1) - next_start); 7267 read_unlock(&map_tree->lock); 7268 } 7269 out: 7270 return ret; 7271 } 7272 7273 static void readahead_tree_node_children(struct extent_buffer *node) 7274 { 7275 int i; 7276 const int nr_items = btrfs_header_nritems(node); 7277 7278 for (i = 0; i < nr_items; i++) 7279 btrfs_readahead_node_child(node, i); 7280 } 7281 7282 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7283 { 7284 struct btrfs_root *root = fs_info->chunk_root; 7285 struct btrfs_path *path; 7286 struct extent_buffer *leaf; 7287 struct btrfs_key key; 7288 struct btrfs_key found_key; 7289 int ret; 7290 int slot; 7291 int iter_ret = 0; 7292 u64 total_dev = 0; 7293 u64 last_ra_node = 0; 7294 7295 path = btrfs_alloc_path(); 7296 if (!path) 7297 return -ENOMEM; 7298 7299 /* 7300 * uuid_mutex is needed only if we are mounting a sprout FS 7301 * otherwise we don't need it. 7302 */ 7303 mutex_lock(&uuid_mutex); 7304 7305 /* 7306 * It is possible for mount and umount to race in such a way that 7307 * we execute this code path, but open_fs_devices failed to clear 7308 * total_rw_bytes. We certainly want it cleared before reading the 7309 * device items, so clear it here. 7310 */ 7311 fs_info->fs_devices->total_rw_bytes = 0; 7312 7313 /* 7314 * Lockdep complains about possible circular locking dependency between 7315 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7316 * used for freeze procection of a fs (struct super_block.s_writers), 7317 * which we take when starting a transaction, and extent buffers of the 7318 * chunk tree if we call read_one_dev() while holding a lock on an 7319 * extent buffer of the chunk tree. Since we are mounting the filesystem 7320 * and at this point there can't be any concurrent task modifying the 7321 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7322 */ 7323 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7324 path->skip_locking = 1; 7325 7326 /* 7327 * Read all device items, and then all the chunk items. All 7328 * device items are found before any chunk item (their object id 7329 * is smaller than the lowest possible object id for a chunk 7330 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7331 */ 7332 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7333 key.offset = 0; 7334 key.type = 0; 7335 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7336 struct extent_buffer *node = path->nodes[1]; 7337 7338 leaf = path->nodes[0]; 7339 slot = path->slots[0]; 7340 7341 if (node) { 7342 if (last_ra_node != node->start) { 7343 readahead_tree_node_children(node); 7344 last_ra_node = node->start; 7345 } 7346 } 7347 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7348 struct btrfs_dev_item *dev_item; 7349 dev_item = btrfs_item_ptr(leaf, slot, 7350 struct btrfs_dev_item); 7351 ret = read_one_dev(leaf, dev_item); 7352 if (ret) 7353 goto error; 7354 total_dev++; 7355 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7356 struct btrfs_chunk *chunk; 7357 7358 /* 7359 * We are only called at mount time, so no need to take 7360 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7361 * we always lock first fs_info->chunk_mutex before 7362 * acquiring any locks on the chunk tree. This is a 7363 * requirement for chunk allocation, see the comment on 7364 * top of btrfs_chunk_alloc() for details. 7365 */ 7366 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7367 ret = read_one_chunk(&found_key, leaf, chunk); 7368 if (ret) 7369 goto error; 7370 } 7371 } 7372 /* Catch error found during iteration */ 7373 if (iter_ret < 0) { 7374 ret = iter_ret; 7375 goto error; 7376 } 7377 7378 /* 7379 * After loading chunk tree, we've got all device information, 7380 * do another round of validation checks. 7381 */ 7382 if (total_dev != fs_info->fs_devices->total_devices) { 7383 btrfs_warn(fs_info, 7384 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7385 btrfs_super_num_devices(fs_info->super_copy), 7386 total_dev); 7387 fs_info->fs_devices->total_devices = total_dev; 7388 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7389 } 7390 if (btrfs_super_total_bytes(fs_info->super_copy) < 7391 fs_info->fs_devices->total_rw_bytes) { 7392 btrfs_err(fs_info, 7393 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7394 btrfs_super_total_bytes(fs_info->super_copy), 7395 fs_info->fs_devices->total_rw_bytes); 7396 ret = -EINVAL; 7397 goto error; 7398 } 7399 ret = 0; 7400 error: 7401 mutex_unlock(&uuid_mutex); 7402 7403 btrfs_free_path(path); 7404 return ret; 7405 } 7406 7407 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7408 { 7409 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7410 struct btrfs_device *device; 7411 int ret = 0; 7412 7413 fs_devices->fs_info = fs_info; 7414 7415 mutex_lock(&fs_devices->device_list_mutex); 7416 list_for_each_entry(device, &fs_devices->devices, dev_list) 7417 device->fs_info = fs_info; 7418 7419 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7420 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7421 device->fs_info = fs_info; 7422 ret = btrfs_get_dev_zone_info(device, false); 7423 if (ret) 7424 break; 7425 } 7426 7427 seed_devs->fs_info = fs_info; 7428 } 7429 mutex_unlock(&fs_devices->device_list_mutex); 7430 7431 return ret; 7432 } 7433 7434 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7435 const struct btrfs_dev_stats_item *ptr, 7436 int index) 7437 { 7438 u64 val; 7439 7440 read_extent_buffer(eb, &val, 7441 offsetof(struct btrfs_dev_stats_item, values) + 7442 ((unsigned long)ptr) + (index * sizeof(u64)), 7443 sizeof(val)); 7444 return val; 7445 } 7446 7447 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7448 struct btrfs_dev_stats_item *ptr, 7449 int index, u64 val) 7450 { 7451 write_extent_buffer(eb, &val, 7452 offsetof(struct btrfs_dev_stats_item, values) + 7453 ((unsigned long)ptr) + (index * sizeof(u64)), 7454 sizeof(val)); 7455 } 7456 7457 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7458 struct btrfs_path *path) 7459 { 7460 struct btrfs_dev_stats_item *ptr; 7461 struct extent_buffer *eb; 7462 struct btrfs_key key; 7463 int item_size; 7464 int i, ret, slot; 7465 7466 if (!device->fs_info->dev_root) 7467 return 0; 7468 7469 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7470 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7471 key.offset = device->devid; 7472 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7473 if (ret) { 7474 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7475 btrfs_dev_stat_set(device, i, 0); 7476 device->dev_stats_valid = 1; 7477 btrfs_release_path(path); 7478 return ret < 0 ? ret : 0; 7479 } 7480 slot = path->slots[0]; 7481 eb = path->nodes[0]; 7482 item_size = btrfs_item_size(eb, slot); 7483 7484 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7485 7486 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7487 if (item_size >= (1 + i) * sizeof(__le64)) 7488 btrfs_dev_stat_set(device, i, 7489 btrfs_dev_stats_value(eb, ptr, i)); 7490 else 7491 btrfs_dev_stat_set(device, i, 0); 7492 } 7493 7494 device->dev_stats_valid = 1; 7495 btrfs_dev_stat_print_on_load(device); 7496 btrfs_release_path(path); 7497 7498 return 0; 7499 } 7500 7501 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7502 { 7503 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7504 struct btrfs_device *device; 7505 struct btrfs_path *path = NULL; 7506 int ret = 0; 7507 7508 path = btrfs_alloc_path(); 7509 if (!path) 7510 return -ENOMEM; 7511 7512 mutex_lock(&fs_devices->device_list_mutex); 7513 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7514 ret = btrfs_device_init_dev_stats(device, path); 7515 if (ret) 7516 goto out; 7517 } 7518 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7519 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7520 ret = btrfs_device_init_dev_stats(device, path); 7521 if (ret) 7522 goto out; 7523 } 7524 } 7525 out: 7526 mutex_unlock(&fs_devices->device_list_mutex); 7527 7528 btrfs_free_path(path); 7529 return ret; 7530 } 7531 7532 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7533 struct btrfs_device *device) 7534 { 7535 struct btrfs_fs_info *fs_info = trans->fs_info; 7536 struct btrfs_root *dev_root = fs_info->dev_root; 7537 struct btrfs_path *path; 7538 struct btrfs_key key; 7539 struct extent_buffer *eb; 7540 struct btrfs_dev_stats_item *ptr; 7541 int ret; 7542 int i; 7543 7544 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7545 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7546 key.offset = device->devid; 7547 7548 path = btrfs_alloc_path(); 7549 if (!path) 7550 return -ENOMEM; 7551 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7552 if (ret < 0) { 7553 btrfs_warn_in_rcu(fs_info, 7554 "error %d while searching for dev_stats item for device %s", 7555 ret, btrfs_dev_name(device)); 7556 goto out; 7557 } 7558 7559 if (ret == 0 && 7560 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7561 /* need to delete old one and insert a new one */ 7562 ret = btrfs_del_item(trans, dev_root, path); 7563 if (ret != 0) { 7564 btrfs_warn_in_rcu(fs_info, 7565 "delete too small dev_stats item for device %s failed %d", 7566 btrfs_dev_name(device), ret); 7567 goto out; 7568 } 7569 ret = 1; 7570 } 7571 7572 if (ret == 1) { 7573 /* need to insert a new item */ 7574 btrfs_release_path(path); 7575 ret = btrfs_insert_empty_item(trans, dev_root, path, 7576 &key, sizeof(*ptr)); 7577 if (ret < 0) { 7578 btrfs_warn_in_rcu(fs_info, 7579 "insert dev_stats item for device %s failed %d", 7580 btrfs_dev_name(device), ret); 7581 goto out; 7582 } 7583 } 7584 7585 eb = path->nodes[0]; 7586 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7587 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7588 btrfs_set_dev_stats_value(eb, ptr, i, 7589 btrfs_dev_stat_read(device, i)); 7590 btrfs_mark_buffer_dirty(trans, eb); 7591 7592 out: 7593 btrfs_free_path(path); 7594 return ret; 7595 } 7596 7597 /* 7598 * called from commit_transaction. Writes all changed device stats to disk. 7599 */ 7600 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7601 { 7602 struct btrfs_fs_info *fs_info = trans->fs_info; 7603 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7604 struct btrfs_device *device; 7605 int stats_cnt; 7606 int ret = 0; 7607 7608 mutex_lock(&fs_devices->device_list_mutex); 7609 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7610 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7611 if (!device->dev_stats_valid || stats_cnt == 0) 7612 continue; 7613 7614 7615 /* 7616 * There is a LOAD-LOAD control dependency between the value of 7617 * dev_stats_ccnt and updating the on-disk values which requires 7618 * reading the in-memory counters. Such control dependencies 7619 * require explicit read memory barriers. 7620 * 7621 * This memory barriers pairs with smp_mb__before_atomic in 7622 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7623 * barrier implied by atomic_xchg in 7624 * btrfs_dev_stats_read_and_reset 7625 */ 7626 smp_rmb(); 7627 7628 ret = update_dev_stat_item(trans, device); 7629 if (!ret) 7630 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7631 } 7632 mutex_unlock(&fs_devices->device_list_mutex); 7633 7634 return ret; 7635 } 7636 7637 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7638 { 7639 btrfs_dev_stat_inc(dev, index); 7640 7641 if (!dev->dev_stats_valid) 7642 return; 7643 btrfs_err_rl_in_rcu(dev->fs_info, 7644 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7645 btrfs_dev_name(dev), 7646 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7647 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7648 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7649 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7650 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7651 } 7652 7653 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7654 { 7655 int i; 7656 7657 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7658 if (btrfs_dev_stat_read(dev, i) != 0) 7659 break; 7660 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7661 return; /* all values == 0, suppress message */ 7662 7663 btrfs_info_in_rcu(dev->fs_info, 7664 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7665 btrfs_dev_name(dev), 7666 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7667 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7668 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7669 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7670 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7671 } 7672 7673 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7674 struct btrfs_ioctl_get_dev_stats *stats) 7675 { 7676 BTRFS_DEV_LOOKUP_ARGS(args); 7677 struct btrfs_device *dev; 7678 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7679 int i; 7680 7681 mutex_lock(&fs_devices->device_list_mutex); 7682 args.devid = stats->devid; 7683 dev = btrfs_find_device(fs_info->fs_devices, &args); 7684 mutex_unlock(&fs_devices->device_list_mutex); 7685 7686 if (!dev) { 7687 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7688 return -ENODEV; 7689 } else if (!dev->dev_stats_valid) { 7690 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7691 return -ENODEV; 7692 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7693 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7694 if (stats->nr_items > i) 7695 stats->values[i] = 7696 btrfs_dev_stat_read_and_reset(dev, i); 7697 else 7698 btrfs_dev_stat_set(dev, i, 0); 7699 } 7700 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7701 current->comm, task_pid_nr(current)); 7702 } else { 7703 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7704 if (stats->nr_items > i) 7705 stats->values[i] = btrfs_dev_stat_read(dev, i); 7706 } 7707 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7708 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7709 return 0; 7710 } 7711 7712 /* 7713 * Update the size and bytes used for each device where it changed. This is 7714 * delayed since we would otherwise get errors while writing out the 7715 * superblocks. 7716 * 7717 * Must be invoked during transaction commit. 7718 */ 7719 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7720 { 7721 struct btrfs_device *curr, *next; 7722 7723 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7724 7725 if (list_empty(&trans->dev_update_list)) 7726 return; 7727 7728 /* 7729 * We don't need the device_list_mutex here. This list is owned by the 7730 * transaction and the transaction must complete before the device is 7731 * released. 7732 */ 7733 mutex_lock(&trans->fs_info->chunk_mutex); 7734 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7735 post_commit_list) { 7736 list_del_init(&curr->post_commit_list); 7737 curr->commit_total_bytes = curr->disk_total_bytes; 7738 curr->commit_bytes_used = curr->bytes_used; 7739 } 7740 mutex_unlock(&trans->fs_info->chunk_mutex); 7741 } 7742 7743 /* 7744 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7745 */ 7746 int btrfs_bg_type_to_factor(u64 flags) 7747 { 7748 const int index = btrfs_bg_flags_to_raid_index(flags); 7749 7750 return btrfs_raid_array[index].ncopies; 7751 } 7752 7753 7754 7755 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7756 u64 chunk_offset, u64 devid, 7757 u64 physical_offset, u64 physical_len) 7758 { 7759 struct btrfs_dev_lookup_args args = { .devid = devid }; 7760 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7761 struct extent_map *em; 7762 struct map_lookup *map; 7763 struct btrfs_device *dev; 7764 u64 stripe_len; 7765 bool found = false; 7766 int ret = 0; 7767 int i; 7768 7769 read_lock(&em_tree->lock); 7770 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7771 read_unlock(&em_tree->lock); 7772 7773 if (!em) { 7774 btrfs_err(fs_info, 7775 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7776 physical_offset, devid); 7777 ret = -EUCLEAN; 7778 goto out; 7779 } 7780 7781 map = em->map_lookup; 7782 stripe_len = btrfs_calc_stripe_length(em); 7783 if (physical_len != stripe_len) { 7784 btrfs_err(fs_info, 7785 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7786 physical_offset, devid, em->start, physical_len, 7787 stripe_len); 7788 ret = -EUCLEAN; 7789 goto out; 7790 } 7791 7792 /* 7793 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 7794 * space. Although kernel can handle it without problem, better to warn 7795 * the users. 7796 */ 7797 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 7798 btrfs_warn(fs_info, 7799 "devid %llu physical %llu len %llu inside the reserved space", 7800 devid, physical_offset, physical_len); 7801 7802 for (i = 0; i < map->num_stripes; i++) { 7803 if (map->stripes[i].dev->devid == devid && 7804 map->stripes[i].physical == physical_offset) { 7805 found = true; 7806 if (map->verified_stripes >= map->num_stripes) { 7807 btrfs_err(fs_info, 7808 "too many dev extents for chunk %llu found", 7809 em->start); 7810 ret = -EUCLEAN; 7811 goto out; 7812 } 7813 map->verified_stripes++; 7814 break; 7815 } 7816 } 7817 if (!found) { 7818 btrfs_err(fs_info, 7819 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7820 physical_offset, devid); 7821 ret = -EUCLEAN; 7822 } 7823 7824 /* Make sure no dev extent is beyond device boundary */ 7825 dev = btrfs_find_device(fs_info->fs_devices, &args); 7826 if (!dev) { 7827 btrfs_err(fs_info, "failed to find devid %llu", devid); 7828 ret = -EUCLEAN; 7829 goto out; 7830 } 7831 7832 if (physical_offset + physical_len > dev->disk_total_bytes) { 7833 btrfs_err(fs_info, 7834 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7835 devid, physical_offset, physical_len, 7836 dev->disk_total_bytes); 7837 ret = -EUCLEAN; 7838 goto out; 7839 } 7840 7841 if (dev->zone_info) { 7842 u64 zone_size = dev->zone_info->zone_size; 7843 7844 if (!IS_ALIGNED(physical_offset, zone_size) || 7845 !IS_ALIGNED(physical_len, zone_size)) { 7846 btrfs_err(fs_info, 7847 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 7848 devid, physical_offset, physical_len); 7849 ret = -EUCLEAN; 7850 goto out; 7851 } 7852 } 7853 7854 out: 7855 free_extent_map(em); 7856 return ret; 7857 } 7858 7859 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 7860 { 7861 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7862 struct extent_map *em; 7863 struct rb_node *node; 7864 int ret = 0; 7865 7866 read_lock(&em_tree->lock); 7867 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 7868 em = rb_entry(node, struct extent_map, rb_node); 7869 if (em->map_lookup->num_stripes != 7870 em->map_lookup->verified_stripes) { 7871 btrfs_err(fs_info, 7872 "chunk %llu has missing dev extent, have %d expect %d", 7873 em->start, em->map_lookup->verified_stripes, 7874 em->map_lookup->num_stripes); 7875 ret = -EUCLEAN; 7876 goto out; 7877 } 7878 } 7879 out: 7880 read_unlock(&em_tree->lock); 7881 return ret; 7882 } 7883 7884 /* 7885 * Ensure that all dev extents are mapped to correct chunk, otherwise 7886 * later chunk allocation/free would cause unexpected behavior. 7887 * 7888 * NOTE: This will iterate through the whole device tree, which should be of 7889 * the same size level as the chunk tree. This slightly increases mount time. 7890 */ 7891 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 7892 { 7893 struct btrfs_path *path; 7894 struct btrfs_root *root = fs_info->dev_root; 7895 struct btrfs_key key; 7896 u64 prev_devid = 0; 7897 u64 prev_dev_ext_end = 0; 7898 int ret = 0; 7899 7900 /* 7901 * We don't have a dev_root because we mounted with ignorebadroots and 7902 * failed to load the root, so we want to skip the verification in this 7903 * case for sure. 7904 * 7905 * However if the dev root is fine, but the tree itself is corrupted 7906 * we'd still fail to mount. This verification is only to make sure 7907 * writes can happen safely, so instead just bypass this check 7908 * completely in the case of IGNOREBADROOTS. 7909 */ 7910 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 7911 return 0; 7912 7913 key.objectid = 1; 7914 key.type = BTRFS_DEV_EXTENT_KEY; 7915 key.offset = 0; 7916 7917 path = btrfs_alloc_path(); 7918 if (!path) 7919 return -ENOMEM; 7920 7921 path->reada = READA_FORWARD; 7922 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7923 if (ret < 0) 7924 goto out; 7925 7926 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 7927 ret = btrfs_next_leaf(root, path); 7928 if (ret < 0) 7929 goto out; 7930 /* No dev extents at all? Not good */ 7931 if (ret > 0) { 7932 ret = -EUCLEAN; 7933 goto out; 7934 } 7935 } 7936 while (1) { 7937 struct extent_buffer *leaf = path->nodes[0]; 7938 struct btrfs_dev_extent *dext; 7939 int slot = path->slots[0]; 7940 u64 chunk_offset; 7941 u64 physical_offset; 7942 u64 physical_len; 7943 u64 devid; 7944 7945 btrfs_item_key_to_cpu(leaf, &key, slot); 7946 if (key.type != BTRFS_DEV_EXTENT_KEY) 7947 break; 7948 devid = key.objectid; 7949 physical_offset = key.offset; 7950 7951 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 7952 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 7953 physical_len = btrfs_dev_extent_length(leaf, dext); 7954 7955 /* Check if this dev extent overlaps with the previous one */ 7956 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 7957 btrfs_err(fs_info, 7958 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 7959 devid, physical_offset, prev_dev_ext_end); 7960 ret = -EUCLEAN; 7961 goto out; 7962 } 7963 7964 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 7965 physical_offset, physical_len); 7966 if (ret < 0) 7967 goto out; 7968 prev_devid = devid; 7969 prev_dev_ext_end = physical_offset + physical_len; 7970 7971 ret = btrfs_next_item(root, path); 7972 if (ret < 0) 7973 goto out; 7974 if (ret > 0) { 7975 ret = 0; 7976 break; 7977 } 7978 } 7979 7980 /* Ensure all chunks have corresponding dev extents */ 7981 ret = verify_chunk_dev_extent_mapping(fs_info); 7982 out: 7983 btrfs_free_path(path); 7984 return ret; 7985 } 7986 7987 /* 7988 * Check whether the given block group or device is pinned by any inode being 7989 * used as a swapfile. 7990 */ 7991 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 7992 { 7993 struct btrfs_swapfile_pin *sp; 7994 struct rb_node *node; 7995 7996 spin_lock(&fs_info->swapfile_pins_lock); 7997 node = fs_info->swapfile_pins.rb_node; 7998 while (node) { 7999 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8000 if (ptr < sp->ptr) 8001 node = node->rb_left; 8002 else if (ptr > sp->ptr) 8003 node = node->rb_right; 8004 else 8005 break; 8006 } 8007 spin_unlock(&fs_info->swapfile_pins_lock); 8008 return node != NULL; 8009 } 8010 8011 static int relocating_repair_kthread(void *data) 8012 { 8013 struct btrfs_block_group *cache = data; 8014 struct btrfs_fs_info *fs_info = cache->fs_info; 8015 u64 target; 8016 int ret = 0; 8017 8018 target = cache->start; 8019 btrfs_put_block_group(cache); 8020 8021 sb_start_write(fs_info->sb); 8022 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8023 btrfs_info(fs_info, 8024 "zoned: skip relocating block group %llu to repair: EBUSY", 8025 target); 8026 sb_end_write(fs_info->sb); 8027 return -EBUSY; 8028 } 8029 8030 mutex_lock(&fs_info->reclaim_bgs_lock); 8031 8032 /* Ensure block group still exists */ 8033 cache = btrfs_lookup_block_group(fs_info, target); 8034 if (!cache) 8035 goto out; 8036 8037 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 8038 goto out; 8039 8040 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8041 if (ret < 0) 8042 goto out; 8043 8044 btrfs_info(fs_info, 8045 "zoned: relocating block group %llu to repair IO failure", 8046 target); 8047 ret = btrfs_relocate_chunk(fs_info, target); 8048 8049 out: 8050 if (cache) 8051 btrfs_put_block_group(cache); 8052 mutex_unlock(&fs_info->reclaim_bgs_lock); 8053 btrfs_exclop_finish(fs_info); 8054 sb_end_write(fs_info->sb); 8055 8056 return ret; 8057 } 8058 8059 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8060 { 8061 struct btrfs_block_group *cache; 8062 8063 if (!btrfs_is_zoned(fs_info)) 8064 return false; 8065 8066 /* Do not attempt to repair in degraded state */ 8067 if (btrfs_test_opt(fs_info, DEGRADED)) 8068 return true; 8069 8070 cache = btrfs_lookup_block_group(fs_info, logical); 8071 if (!cache) 8072 return true; 8073 8074 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8075 btrfs_put_block_group(cache); 8076 return true; 8077 } 8078 8079 kthread_run(relocating_repair_kthread, cache, 8080 "btrfs-relocating-repair"); 8081 8082 return true; 8083 } 8084 8085 static void map_raid56_repair_block(struct btrfs_io_context *bioc, 8086 struct btrfs_io_stripe *smap, 8087 u64 logical) 8088 { 8089 int data_stripes = nr_bioc_data_stripes(bioc); 8090 int i; 8091 8092 for (i = 0; i < data_stripes; i++) { 8093 u64 stripe_start = bioc->full_stripe_logical + 8094 btrfs_stripe_nr_to_offset(i); 8095 8096 if (logical >= stripe_start && 8097 logical < stripe_start + BTRFS_STRIPE_LEN) 8098 break; 8099 } 8100 ASSERT(i < data_stripes); 8101 smap->dev = bioc->stripes[i].dev; 8102 smap->physical = bioc->stripes[i].physical + 8103 ((logical - bioc->full_stripe_logical) & 8104 BTRFS_STRIPE_LEN_MASK); 8105 } 8106 8107 /* 8108 * Map a repair write into a single device. 8109 * 8110 * A repair write is triggered by read time repair or scrub, which would only 8111 * update the contents of a single device. 8112 * Not update any other mirrors nor go through RMW path. 8113 * 8114 * Callers should ensure: 8115 * 8116 * - Call btrfs_bio_counter_inc_blocked() first 8117 * - The range does not cross stripe boundary 8118 * - Has a valid @mirror_num passed in. 8119 */ 8120 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, 8121 struct btrfs_io_stripe *smap, u64 logical, 8122 u32 length, int mirror_num) 8123 { 8124 struct btrfs_io_context *bioc = NULL; 8125 u64 map_length = length; 8126 int mirror_ret = mirror_num; 8127 int ret; 8128 8129 ASSERT(mirror_num > 0); 8130 8131 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length, 8132 &bioc, smap, &mirror_ret, true); 8133 if (ret < 0) 8134 return ret; 8135 8136 /* The map range should not cross stripe boundary. */ 8137 ASSERT(map_length >= length); 8138 8139 /* Already mapped to single stripe. */ 8140 if (!bioc) 8141 goto out; 8142 8143 /* Map the RAID56 multi-stripe writes to a single one. */ 8144 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 8145 map_raid56_repair_block(bioc, smap, logical); 8146 goto out; 8147 } 8148 8149 ASSERT(mirror_num <= bioc->num_stripes); 8150 smap->dev = bioc->stripes[mirror_num - 1].dev; 8151 smap->physical = bioc->stripes[mirror_num - 1].physical; 8152 out: 8153 btrfs_put_bioc(bioc); 8154 ASSERT(smap->dev); 8155 return 0; 8156 } 8157