1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/slab.h> 9 #include <linux/ratelimit.h> 10 #include <linux/kthread.h> 11 #include <linux/semaphore.h> 12 #include <linux/uuid.h> 13 #include <linux/list_sort.h> 14 #include <linux/namei.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "extent_map.h" 18 #include "disk-io.h" 19 #include "transaction.h" 20 #include "print-tree.h" 21 #include "volumes.h" 22 #include "raid56.h" 23 #include "rcu-string.h" 24 #include "dev-replace.h" 25 #include "sysfs.h" 26 #include "tree-checker.h" 27 #include "space-info.h" 28 #include "block-group.h" 29 #include "discard.h" 30 #include "zoned.h" 31 #include "fs.h" 32 #include "accessors.h" 33 #include "uuid-tree.h" 34 #include "ioctl.h" 35 #include "relocation.h" 36 #include "scrub.h" 37 #include "super.h" 38 39 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 40 BTRFS_BLOCK_GROUP_RAID10 | \ 41 BTRFS_BLOCK_GROUP_RAID56_MASK) 42 43 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 44 [BTRFS_RAID_RAID10] = { 45 .sub_stripes = 2, 46 .dev_stripes = 1, 47 .devs_max = 0, /* 0 == as many as possible */ 48 .devs_min = 2, 49 .tolerated_failures = 1, 50 .devs_increment = 2, 51 .ncopies = 2, 52 .nparity = 0, 53 .raid_name = "raid10", 54 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 55 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 56 }, 57 [BTRFS_RAID_RAID1] = { 58 .sub_stripes = 1, 59 .dev_stripes = 1, 60 .devs_max = 2, 61 .devs_min = 2, 62 .tolerated_failures = 1, 63 .devs_increment = 2, 64 .ncopies = 2, 65 .nparity = 0, 66 .raid_name = "raid1", 67 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 68 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 69 }, 70 [BTRFS_RAID_RAID1C3] = { 71 .sub_stripes = 1, 72 .dev_stripes = 1, 73 .devs_max = 3, 74 .devs_min = 3, 75 .tolerated_failures = 2, 76 .devs_increment = 3, 77 .ncopies = 3, 78 .nparity = 0, 79 .raid_name = "raid1c3", 80 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 81 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 82 }, 83 [BTRFS_RAID_RAID1C4] = { 84 .sub_stripes = 1, 85 .dev_stripes = 1, 86 .devs_max = 4, 87 .devs_min = 4, 88 .tolerated_failures = 3, 89 .devs_increment = 4, 90 .ncopies = 4, 91 .nparity = 0, 92 .raid_name = "raid1c4", 93 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 94 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 95 }, 96 [BTRFS_RAID_DUP] = { 97 .sub_stripes = 1, 98 .dev_stripes = 2, 99 .devs_max = 1, 100 .devs_min = 1, 101 .tolerated_failures = 0, 102 .devs_increment = 1, 103 .ncopies = 2, 104 .nparity = 0, 105 .raid_name = "dup", 106 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 107 .mindev_error = 0, 108 }, 109 [BTRFS_RAID_RAID0] = { 110 .sub_stripes = 1, 111 .dev_stripes = 1, 112 .devs_max = 0, 113 .devs_min = 1, 114 .tolerated_failures = 0, 115 .devs_increment = 1, 116 .ncopies = 1, 117 .nparity = 0, 118 .raid_name = "raid0", 119 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 120 .mindev_error = 0, 121 }, 122 [BTRFS_RAID_SINGLE] = { 123 .sub_stripes = 1, 124 .dev_stripes = 1, 125 .devs_max = 1, 126 .devs_min = 1, 127 .tolerated_failures = 0, 128 .devs_increment = 1, 129 .ncopies = 1, 130 .nparity = 0, 131 .raid_name = "single", 132 .bg_flag = 0, 133 .mindev_error = 0, 134 }, 135 [BTRFS_RAID_RAID5] = { 136 .sub_stripes = 1, 137 .dev_stripes = 1, 138 .devs_max = 0, 139 .devs_min = 2, 140 .tolerated_failures = 1, 141 .devs_increment = 1, 142 .ncopies = 1, 143 .nparity = 1, 144 .raid_name = "raid5", 145 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 146 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 147 }, 148 [BTRFS_RAID_RAID6] = { 149 .sub_stripes = 1, 150 .dev_stripes = 1, 151 .devs_max = 0, 152 .devs_min = 3, 153 .tolerated_failures = 2, 154 .devs_increment = 1, 155 .ncopies = 1, 156 .nparity = 2, 157 .raid_name = "raid6", 158 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 159 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 160 }, 161 }; 162 163 /* 164 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 165 * can be used as index to access btrfs_raid_array[]. 166 */ 167 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 168 { 169 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 170 171 if (!profile) 172 return BTRFS_RAID_SINGLE; 173 174 return BTRFS_BG_FLAG_TO_INDEX(profile); 175 } 176 177 const char *btrfs_bg_type_to_raid_name(u64 flags) 178 { 179 const int index = btrfs_bg_flags_to_raid_index(flags); 180 181 if (index >= BTRFS_NR_RAID_TYPES) 182 return NULL; 183 184 return btrfs_raid_array[index].raid_name; 185 } 186 187 int btrfs_nr_parity_stripes(u64 type) 188 { 189 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 190 191 return btrfs_raid_array[index].nparity; 192 } 193 194 /* 195 * Fill @buf with textual description of @bg_flags, no more than @size_buf 196 * bytes including terminating null byte. 197 */ 198 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 199 { 200 int i; 201 int ret; 202 char *bp = buf; 203 u64 flags = bg_flags; 204 u32 size_bp = size_buf; 205 206 if (!flags) { 207 strcpy(bp, "NONE"); 208 return; 209 } 210 211 #define DESCRIBE_FLAG(flag, desc) \ 212 do { \ 213 if (flags & (flag)) { \ 214 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 215 if (ret < 0 || ret >= size_bp) \ 216 goto out_overflow; \ 217 size_bp -= ret; \ 218 bp += ret; \ 219 flags &= ~(flag); \ 220 } \ 221 } while (0) 222 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 224 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 225 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 226 227 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 228 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 229 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 230 btrfs_raid_array[i].raid_name); 231 #undef DESCRIBE_FLAG 232 233 if (flags) { 234 ret = snprintf(bp, size_bp, "0x%llx|", flags); 235 size_bp -= ret; 236 } 237 238 if (size_bp < size_buf) 239 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 240 241 /* 242 * The text is trimmed, it's up to the caller to provide sufficiently 243 * large buffer 244 */ 245 out_overflow:; 246 } 247 248 static int init_first_rw_device(struct btrfs_trans_handle *trans); 249 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 251 252 /* 253 * Device locking 254 * ============== 255 * 256 * There are several mutexes that protect manipulation of devices and low-level 257 * structures like chunks but not block groups, extents or files 258 * 259 * uuid_mutex (global lock) 260 * ------------------------ 261 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 262 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 263 * device) or requested by the device= mount option 264 * 265 * the mutex can be very coarse and can cover long-running operations 266 * 267 * protects: updates to fs_devices counters like missing devices, rw devices, 268 * seeding, structure cloning, opening/closing devices at mount/umount time 269 * 270 * global::fs_devs - add, remove, updates to the global list 271 * 272 * does not protect: manipulation of the fs_devices::devices list in general 273 * but in mount context it could be used to exclude list modifications by eg. 274 * scan ioctl 275 * 276 * btrfs_device::name - renames (write side), read is RCU 277 * 278 * fs_devices::device_list_mutex (per-fs, with RCU) 279 * ------------------------------------------------ 280 * protects updates to fs_devices::devices, ie. adding and deleting 281 * 282 * simple list traversal with read-only actions can be done with RCU protection 283 * 284 * may be used to exclude some operations from running concurrently without any 285 * modifications to the list (see write_all_supers) 286 * 287 * Is not required at mount and close times, because our device list is 288 * protected by the uuid_mutex at that point. 289 * 290 * balance_mutex 291 * ------------- 292 * protects balance structures (status, state) and context accessed from 293 * several places (internally, ioctl) 294 * 295 * chunk_mutex 296 * ----------- 297 * protects chunks, adding or removing during allocation, trim or when a new 298 * device is added/removed. Additionally it also protects post_commit_list of 299 * individual devices, since they can be added to the transaction's 300 * post_commit_list only with chunk_mutex held. 301 * 302 * cleaner_mutex 303 * ------------- 304 * a big lock that is held by the cleaner thread and prevents running subvolume 305 * cleaning together with relocation or delayed iputs 306 * 307 * 308 * Lock nesting 309 * ============ 310 * 311 * uuid_mutex 312 * device_list_mutex 313 * chunk_mutex 314 * balance_mutex 315 * 316 * 317 * Exclusive operations 318 * ==================== 319 * 320 * Maintains the exclusivity of the following operations that apply to the 321 * whole filesystem and cannot run in parallel. 322 * 323 * - Balance (*) 324 * - Device add 325 * - Device remove 326 * - Device replace (*) 327 * - Resize 328 * 329 * The device operations (as above) can be in one of the following states: 330 * 331 * - Running state 332 * - Paused state 333 * - Completed state 334 * 335 * Only device operations marked with (*) can go into the Paused state for the 336 * following reasons: 337 * 338 * - ioctl (only Balance can be Paused through ioctl) 339 * - filesystem remounted as read-only 340 * - filesystem unmounted and mounted as read-only 341 * - system power-cycle and filesystem mounted as read-only 342 * - filesystem or device errors leading to forced read-only 343 * 344 * The status of exclusive operation is set and cleared atomically. 345 * During the course of Paused state, fs_info::exclusive_operation remains set. 346 * A device operation in Paused or Running state can be canceled or resumed 347 * either by ioctl (Balance only) or when remounted as read-write. 348 * The exclusive status is cleared when the device operation is canceled or 349 * completed. 350 */ 351 352 DEFINE_MUTEX(uuid_mutex); 353 static LIST_HEAD(fs_uuids); 354 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 355 { 356 return &fs_uuids; 357 } 358 359 /* 360 * alloc_fs_devices - allocate struct btrfs_fs_devices 361 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 362 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 363 * 364 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 365 * The returned struct is not linked onto any lists and can be destroyed with 366 * kfree() right away. 367 */ 368 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 369 const u8 *metadata_fsid) 370 { 371 struct btrfs_fs_devices *fs_devs; 372 373 ASSERT(fsid || !metadata_fsid); 374 375 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 376 if (!fs_devs) 377 return ERR_PTR(-ENOMEM); 378 379 mutex_init(&fs_devs->device_list_mutex); 380 381 INIT_LIST_HEAD(&fs_devs->devices); 382 INIT_LIST_HEAD(&fs_devs->alloc_list); 383 INIT_LIST_HEAD(&fs_devs->fs_list); 384 INIT_LIST_HEAD(&fs_devs->seed_list); 385 386 if (fsid) { 387 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 388 memcpy(fs_devs->metadata_uuid, 389 metadata_fsid ?: fsid, BTRFS_FSID_SIZE); 390 } 391 392 return fs_devs; 393 } 394 395 static void btrfs_free_device(struct btrfs_device *device) 396 { 397 WARN_ON(!list_empty(&device->post_commit_list)); 398 rcu_string_free(device->name); 399 extent_io_tree_release(&device->alloc_state); 400 btrfs_destroy_dev_zone_info(device); 401 kfree(device); 402 } 403 404 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 405 { 406 struct btrfs_device *device; 407 408 WARN_ON(fs_devices->opened); 409 while (!list_empty(&fs_devices->devices)) { 410 device = list_entry(fs_devices->devices.next, 411 struct btrfs_device, dev_list); 412 list_del(&device->dev_list); 413 btrfs_free_device(device); 414 } 415 kfree(fs_devices); 416 } 417 418 void __exit btrfs_cleanup_fs_uuids(void) 419 { 420 struct btrfs_fs_devices *fs_devices; 421 422 while (!list_empty(&fs_uuids)) { 423 fs_devices = list_entry(fs_uuids.next, 424 struct btrfs_fs_devices, fs_list); 425 list_del(&fs_devices->fs_list); 426 free_fs_devices(fs_devices); 427 } 428 } 429 430 static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices, 431 const u8 *fsid, const u8 *metadata_fsid) 432 { 433 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0) 434 return false; 435 436 if (!metadata_fsid) 437 return true; 438 439 if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0) 440 return false; 441 442 return true; 443 } 444 445 static noinline struct btrfs_fs_devices *find_fsid( 446 const u8 *fsid, const u8 *metadata_fsid) 447 { 448 struct btrfs_fs_devices *fs_devices; 449 450 ASSERT(fsid); 451 452 /* Handle non-split brain cases */ 453 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 454 if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid)) 455 return fs_devices; 456 } 457 return NULL; 458 } 459 460 /* 461 * First check if the metadata_uuid is different from the fsid in the given 462 * fs_devices. Then check if the given fsid is the same as the metadata_uuid 463 * in the fs_devices. If it is, return true; otherwise, return false. 464 */ 465 static inline bool check_fsid_changed(const struct btrfs_fs_devices *fs_devices, 466 const u8 *fsid) 467 { 468 return memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 469 BTRFS_FSID_SIZE) != 0 && 470 memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE) == 0; 471 } 472 473 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 474 struct btrfs_super_block *disk_super) 475 { 476 477 struct btrfs_fs_devices *fs_devices; 478 479 /* 480 * Handle scanned device having completed its fsid change but 481 * belonging to a fs_devices that was created by first scanning 482 * a device which didn't have its fsid/metadata_uuid changed 483 * at all and the CHANGING_FSID_V2 flag set. 484 */ 485 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 486 if (!fs_devices->fsid_change) 487 continue; 488 489 if (match_fsid_fs_devices(fs_devices, disk_super->metadata_uuid, 490 fs_devices->fsid)) 491 return fs_devices; 492 } 493 494 /* 495 * Handle scanned device having completed its fsid change but 496 * belonging to a fs_devices that was created by a device that 497 * has an outdated pair of fsid/metadata_uuid and 498 * CHANGING_FSID_V2 flag set. 499 */ 500 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 501 if (!fs_devices->fsid_change) 502 continue; 503 504 if (check_fsid_changed(fs_devices, disk_super->metadata_uuid)) 505 return fs_devices; 506 } 507 508 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 509 } 510 511 512 static int 513 btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, 514 int flush, struct block_device **bdev, 515 struct btrfs_super_block **disk_super) 516 { 517 int ret; 518 519 *bdev = blkdev_get_by_path(device_path, flags, holder, NULL); 520 521 if (IS_ERR(*bdev)) { 522 ret = PTR_ERR(*bdev); 523 goto error; 524 } 525 526 if (flush) 527 sync_blockdev(*bdev); 528 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 529 if (ret) { 530 blkdev_put(*bdev, holder); 531 goto error; 532 } 533 invalidate_bdev(*bdev); 534 *disk_super = btrfs_read_dev_super(*bdev); 535 if (IS_ERR(*disk_super)) { 536 ret = PTR_ERR(*disk_super); 537 blkdev_put(*bdev, holder); 538 goto error; 539 } 540 541 return 0; 542 543 error: 544 *bdev = NULL; 545 return ret; 546 } 547 548 /* 549 * Search and remove all stale devices (which are not mounted). When both 550 * inputs are NULL, it will search and release all stale devices. 551 * 552 * @devt: Optional. When provided will it release all unmounted devices 553 * matching this devt only. 554 * @skip_device: Optional. Will skip this device when searching for the stale 555 * devices. 556 * 557 * Return: 0 for success or if @devt is 0. 558 * -EBUSY if @devt is a mounted device. 559 * -ENOENT if @devt does not match any device in the list. 560 */ 561 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 562 { 563 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 564 struct btrfs_device *device, *tmp_device; 565 int ret = 0; 566 567 lockdep_assert_held(&uuid_mutex); 568 569 if (devt) 570 ret = -ENOENT; 571 572 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 573 574 mutex_lock(&fs_devices->device_list_mutex); 575 list_for_each_entry_safe(device, tmp_device, 576 &fs_devices->devices, dev_list) { 577 if (skip_device && skip_device == device) 578 continue; 579 if (devt && devt != device->devt) 580 continue; 581 if (fs_devices->opened) { 582 /* for an already deleted device return 0 */ 583 if (devt && ret != 0) 584 ret = -EBUSY; 585 break; 586 } 587 588 /* delete the stale device */ 589 fs_devices->num_devices--; 590 list_del(&device->dev_list); 591 btrfs_free_device(device); 592 593 ret = 0; 594 } 595 mutex_unlock(&fs_devices->device_list_mutex); 596 597 if (fs_devices->num_devices == 0) { 598 btrfs_sysfs_remove_fsid(fs_devices); 599 list_del(&fs_devices->fs_list); 600 free_fs_devices(fs_devices); 601 } 602 } 603 604 return ret; 605 } 606 607 /* 608 * This is only used on mount, and we are protected from competing things 609 * messing with our fs_devices by the uuid_mutex, thus we do not need the 610 * fs_devices->device_list_mutex here. 611 */ 612 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 613 struct btrfs_device *device, blk_mode_t flags, 614 void *holder) 615 { 616 struct block_device *bdev; 617 struct btrfs_super_block *disk_super; 618 u64 devid; 619 int ret; 620 621 if (device->bdev) 622 return -EINVAL; 623 if (!device->name) 624 return -EINVAL; 625 626 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 627 &bdev, &disk_super); 628 if (ret) 629 return ret; 630 631 devid = btrfs_stack_device_id(&disk_super->dev_item); 632 if (devid != device->devid) 633 goto error_free_page; 634 635 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 636 goto error_free_page; 637 638 device->generation = btrfs_super_generation(disk_super); 639 640 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 641 if (btrfs_super_incompat_flags(disk_super) & 642 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 643 pr_err( 644 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 645 goto error_free_page; 646 } 647 648 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 649 fs_devices->seeding = true; 650 } else { 651 if (bdev_read_only(bdev)) 652 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 653 else 654 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 655 } 656 657 if (!bdev_nonrot(bdev)) 658 fs_devices->rotating = true; 659 660 if (bdev_max_discard_sectors(bdev)) 661 fs_devices->discardable = true; 662 663 device->bdev = bdev; 664 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 665 device->holder = holder; 666 667 fs_devices->open_devices++; 668 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 669 device->devid != BTRFS_DEV_REPLACE_DEVID) { 670 fs_devices->rw_devices++; 671 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 672 } 673 btrfs_release_disk_super(disk_super); 674 675 return 0; 676 677 error_free_page: 678 btrfs_release_disk_super(disk_super); 679 blkdev_put(bdev, holder); 680 681 return -EINVAL; 682 } 683 684 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb) 685 { 686 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) & 687 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 688 689 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; 690 } 691 692 /* 693 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 694 * being created with a disk that has already completed its fsid change. Such 695 * disk can belong to an fs which has its FSID changed or to one which doesn't. 696 * Handle both cases here. 697 */ 698 static struct btrfs_fs_devices *find_fsid_inprogress( 699 struct btrfs_super_block *disk_super) 700 { 701 struct btrfs_fs_devices *fs_devices; 702 703 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 704 if (fs_devices->fsid_change) 705 continue; 706 707 if (check_fsid_changed(fs_devices, disk_super->fsid)) 708 return fs_devices; 709 } 710 711 return find_fsid(disk_super->fsid, NULL); 712 } 713 714 static struct btrfs_fs_devices *find_fsid_changed( 715 struct btrfs_super_block *disk_super) 716 { 717 struct btrfs_fs_devices *fs_devices; 718 719 /* 720 * Handles the case where scanned device is part of an fs that had 721 * multiple successful changes of FSID but currently device didn't 722 * observe it. Meaning our fsid will be different than theirs. We need 723 * to handle two subcases : 724 * 1 - The fs still continues to have different METADATA/FSID uuids. 725 * 2 - The fs is switched back to its original FSID (METADATA/FSID 726 * are equal). 727 */ 728 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 729 /* Changed UUIDs */ 730 if (check_fsid_changed(fs_devices, disk_super->metadata_uuid) && 731 memcmp(fs_devices->fsid, disk_super->fsid, 732 BTRFS_FSID_SIZE) != 0) 733 return fs_devices; 734 735 /* Unchanged UUIDs */ 736 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 737 BTRFS_FSID_SIZE) == 0 && 738 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 739 BTRFS_FSID_SIZE) == 0) 740 return fs_devices; 741 } 742 743 return NULL; 744 } 745 746 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 747 struct btrfs_super_block *disk_super) 748 { 749 struct btrfs_fs_devices *fs_devices; 750 751 /* 752 * Handle the case where the scanned device is part of an fs whose last 753 * metadata UUID change reverted it to the original FSID. At the same 754 * time fs_devices was first created by another constituent device 755 * which didn't fully observe the operation. This results in an 756 * btrfs_fs_devices created with metadata/fsid different AND 757 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 758 * fs_devices equal to the FSID of the disk. 759 */ 760 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 761 if (!fs_devices->fsid_change) 762 continue; 763 764 if (check_fsid_changed(fs_devices, disk_super->fsid)) 765 return fs_devices; 766 } 767 768 return NULL; 769 } 770 /* 771 * Add new device to list of registered devices 772 * 773 * Returns: 774 * device pointer which was just added or updated when successful 775 * error pointer when failed 776 */ 777 static noinline struct btrfs_device *device_list_add(const char *path, 778 struct btrfs_super_block *disk_super, 779 bool *new_device_added) 780 { 781 struct btrfs_device *device; 782 struct btrfs_fs_devices *fs_devices = NULL; 783 struct rcu_string *name; 784 u64 found_transid = btrfs_super_generation(disk_super); 785 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 786 dev_t path_devt; 787 int error; 788 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 789 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 790 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 791 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 792 793 error = lookup_bdev(path, &path_devt); 794 if (error) { 795 btrfs_err(NULL, "failed to lookup block device for path %s: %d", 796 path, error); 797 return ERR_PTR(error); 798 } 799 800 if (fsid_change_in_progress) { 801 if (!has_metadata_uuid) 802 fs_devices = find_fsid_inprogress(disk_super); 803 else 804 fs_devices = find_fsid_changed(disk_super); 805 } else if (has_metadata_uuid) { 806 fs_devices = find_fsid_with_metadata_uuid(disk_super); 807 } else { 808 fs_devices = find_fsid_reverted_metadata(disk_super); 809 if (!fs_devices) 810 fs_devices = find_fsid(disk_super->fsid, NULL); 811 } 812 813 814 if (!fs_devices) { 815 fs_devices = alloc_fs_devices(disk_super->fsid, 816 has_metadata_uuid ? disk_super->metadata_uuid : NULL); 817 if (IS_ERR(fs_devices)) 818 return ERR_CAST(fs_devices); 819 820 fs_devices->fsid_change = fsid_change_in_progress; 821 822 mutex_lock(&fs_devices->device_list_mutex); 823 list_add(&fs_devices->fs_list, &fs_uuids); 824 825 device = NULL; 826 } else { 827 struct btrfs_dev_lookup_args args = { 828 .devid = devid, 829 .uuid = disk_super->dev_item.uuid, 830 }; 831 832 mutex_lock(&fs_devices->device_list_mutex); 833 device = btrfs_find_device(fs_devices, &args); 834 835 /* 836 * If this disk has been pulled into an fs devices created by 837 * a device which had the CHANGING_FSID_V2 flag then replace the 838 * metadata_uuid/fsid values of the fs_devices. 839 */ 840 if (fs_devices->fsid_change && 841 found_transid > fs_devices->latest_generation) { 842 memcpy(fs_devices->fsid, disk_super->fsid, 843 BTRFS_FSID_SIZE); 844 memcpy(fs_devices->metadata_uuid, 845 btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE); 846 fs_devices->fsid_change = false; 847 } 848 } 849 850 if (!device) { 851 unsigned int nofs_flag; 852 853 if (fs_devices->opened) { 854 btrfs_err(NULL, 855 "device %s belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)", 856 path, fs_devices->fsid, current->comm, 857 task_pid_nr(current)); 858 mutex_unlock(&fs_devices->device_list_mutex); 859 return ERR_PTR(-EBUSY); 860 } 861 862 nofs_flag = memalloc_nofs_save(); 863 device = btrfs_alloc_device(NULL, &devid, 864 disk_super->dev_item.uuid, path); 865 memalloc_nofs_restore(nofs_flag); 866 if (IS_ERR(device)) { 867 mutex_unlock(&fs_devices->device_list_mutex); 868 /* we can safely leave the fs_devices entry around */ 869 return device; 870 } 871 872 device->devt = path_devt; 873 874 list_add_rcu(&device->dev_list, &fs_devices->devices); 875 fs_devices->num_devices++; 876 877 device->fs_devices = fs_devices; 878 *new_device_added = true; 879 880 if (disk_super->label[0]) 881 pr_info( 882 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 883 disk_super->label, devid, found_transid, path, 884 current->comm, task_pid_nr(current)); 885 else 886 pr_info( 887 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 888 disk_super->fsid, devid, found_transid, path, 889 current->comm, task_pid_nr(current)); 890 891 } else if (!device->name || strcmp(device->name->str, path)) { 892 /* 893 * When FS is already mounted. 894 * 1. If you are here and if the device->name is NULL that 895 * means this device was missing at time of FS mount. 896 * 2. If you are here and if the device->name is different 897 * from 'path' that means either 898 * a. The same device disappeared and reappeared with 899 * different name. or 900 * b. The missing-disk-which-was-replaced, has 901 * reappeared now. 902 * 903 * We must allow 1 and 2a above. But 2b would be a spurious 904 * and unintentional. 905 * 906 * Further in case of 1 and 2a above, the disk at 'path' 907 * would have missed some transaction when it was away and 908 * in case of 2a the stale bdev has to be updated as well. 909 * 2b must not be allowed at all time. 910 */ 911 912 /* 913 * For now, we do allow update to btrfs_fs_device through the 914 * btrfs dev scan cli after FS has been mounted. We're still 915 * tracking a problem where systems fail mount by subvolume id 916 * when we reject replacement on a mounted FS. 917 */ 918 if (!fs_devices->opened && found_transid < device->generation) { 919 /* 920 * That is if the FS is _not_ mounted and if you 921 * are here, that means there is more than one 922 * disk with same uuid and devid.We keep the one 923 * with larger generation number or the last-in if 924 * generation are equal. 925 */ 926 mutex_unlock(&fs_devices->device_list_mutex); 927 btrfs_err(NULL, 928 "device %s already registered with a higher generation, found %llu expect %llu", 929 path, found_transid, device->generation); 930 return ERR_PTR(-EEXIST); 931 } 932 933 /* 934 * We are going to replace the device path for a given devid, 935 * make sure it's the same device if the device is mounted 936 * 937 * NOTE: the device->fs_info may not be reliable here so pass 938 * in a NULL to message helpers instead. This avoids a possible 939 * use-after-free when the fs_info and fs_info->sb are already 940 * torn down. 941 */ 942 if (device->bdev) { 943 if (device->devt != path_devt) { 944 mutex_unlock(&fs_devices->device_list_mutex); 945 btrfs_warn_in_rcu(NULL, 946 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 947 path, devid, found_transid, 948 current->comm, 949 task_pid_nr(current)); 950 return ERR_PTR(-EEXIST); 951 } 952 btrfs_info_in_rcu(NULL, 953 "devid %llu device path %s changed to %s scanned by %s (%d)", 954 devid, btrfs_dev_name(device), 955 path, current->comm, 956 task_pid_nr(current)); 957 } 958 959 name = rcu_string_strdup(path, GFP_NOFS); 960 if (!name) { 961 mutex_unlock(&fs_devices->device_list_mutex); 962 return ERR_PTR(-ENOMEM); 963 } 964 rcu_string_free(device->name); 965 rcu_assign_pointer(device->name, name); 966 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 967 fs_devices->missing_devices--; 968 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 969 } 970 device->devt = path_devt; 971 } 972 973 /* 974 * Unmount does not free the btrfs_device struct but would zero 975 * generation along with most of the other members. So just update 976 * it back. We need it to pick the disk with largest generation 977 * (as above). 978 */ 979 if (!fs_devices->opened) { 980 device->generation = found_transid; 981 fs_devices->latest_generation = max_t(u64, found_transid, 982 fs_devices->latest_generation); 983 } 984 985 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 986 987 mutex_unlock(&fs_devices->device_list_mutex); 988 return device; 989 } 990 991 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 992 { 993 struct btrfs_fs_devices *fs_devices; 994 struct btrfs_device *device; 995 struct btrfs_device *orig_dev; 996 int ret = 0; 997 998 lockdep_assert_held(&uuid_mutex); 999 1000 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1001 if (IS_ERR(fs_devices)) 1002 return fs_devices; 1003 1004 fs_devices->total_devices = orig->total_devices; 1005 1006 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1007 const char *dev_path = NULL; 1008 1009 /* 1010 * This is ok to do without RCU read locked because we hold the 1011 * uuid mutex so nothing we touch in here is going to disappear. 1012 */ 1013 if (orig_dev->name) 1014 dev_path = orig_dev->name->str; 1015 1016 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1017 orig_dev->uuid, dev_path); 1018 if (IS_ERR(device)) { 1019 ret = PTR_ERR(device); 1020 goto error; 1021 } 1022 1023 if (orig_dev->zone_info) { 1024 struct btrfs_zoned_device_info *zone_info; 1025 1026 zone_info = btrfs_clone_dev_zone_info(orig_dev); 1027 if (!zone_info) { 1028 btrfs_free_device(device); 1029 ret = -ENOMEM; 1030 goto error; 1031 } 1032 device->zone_info = zone_info; 1033 } 1034 1035 list_add(&device->dev_list, &fs_devices->devices); 1036 device->fs_devices = fs_devices; 1037 fs_devices->num_devices++; 1038 } 1039 return fs_devices; 1040 error: 1041 free_fs_devices(fs_devices); 1042 return ERR_PTR(ret); 1043 } 1044 1045 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1046 struct btrfs_device **latest_dev) 1047 { 1048 struct btrfs_device *device, *next; 1049 1050 /* This is the initialized path, it is safe to release the devices. */ 1051 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1052 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1053 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1054 &device->dev_state) && 1055 !test_bit(BTRFS_DEV_STATE_MISSING, 1056 &device->dev_state) && 1057 (!*latest_dev || 1058 device->generation > (*latest_dev)->generation)) { 1059 *latest_dev = device; 1060 } 1061 continue; 1062 } 1063 1064 /* 1065 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1066 * in btrfs_init_dev_replace() so just continue. 1067 */ 1068 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1069 continue; 1070 1071 if (device->bdev) { 1072 blkdev_put(device->bdev, device->holder); 1073 device->bdev = NULL; 1074 fs_devices->open_devices--; 1075 } 1076 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1077 list_del_init(&device->dev_alloc_list); 1078 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1079 fs_devices->rw_devices--; 1080 } 1081 list_del_init(&device->dev_list); 1082 fs_devices->num_devices--; 1083 btrfs_free_device(device); 1084 } 1085 1086 } 1087 1088 /* 1089 * After we have read the system tree and know devids belonging to this 1090 * filesystem, remove the device which does not belong there. 1091 */ 1092 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1093 { 1094 struct btrfs_device *latest_dev = NULL; 1095 struct btrfs_fs_devices *seed_dev; 1096 1097 mutex_lock(&uuid_mutex); 1098 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1099 1100 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1101 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1102 1103 fs_devices->latest_dev = latest_dev; 1104 1105 mutex_unlock(&uuid_mutex); 1106 } 1107 1108 static void btrfs_close_bdev(struct btrfs_device *device) 1109 { 1110 if (!device->bdev) 1111 return; 1112 1113 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1114 sync_blockdev(device->bdev); 1115 invalidate_bdev(device->bdev); 1116 } 1117 1118 blkdev_put(device->bdev, device->holder); 1119 } 1120 1121 static void btrfs_close_one_device(struct btrfs_device *device) 1122 { 1123 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1124 1125 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1126 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1127 list_del_init(&device->dev_alloc_list); 1128 fs_devices->rw_devices--; 1129 } 1130 1131 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1132 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1133 1134 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1135 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1136 fs_devices->missing_devices--; 1137 } 1138 1139 btrfs_close_bdev(device); 1140 if (device->bdev) { 1141 fs_devices->open_devices--; 1142 device->bdev = NULL; 1143 } 1144 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1145 btrfs_destroy_dev_zone_info(device); 1146 1147 device->fs_info = NULL; 1148 atomic_set(&device->dev_stats_ccnt, 0); 1149 extent_io_tree_release(&device->alloc_state); 1150 1151 /* 1152 * Reset the flush error record. We might have a transient flush error 1153 * in this mount, and if so we aborted the current transaction and set 1154 * the fs to an error state, guaranteeing no super blocks can be further 1155 * committed. However that error might be transient and if we unmount the 1156 * filesystem and mount it again, we should allow the mount to succeed 1157 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1158 * filesystem again we still get flush errors, then we will again abort 1159 * any transaction and set the error state, guaranteeing no commits of 1160 * unsafe super blocks. 1161 */ 1162 device->last_flush_error = 0; 1163 1164 /* Verify the device is back in a pristine state */ 1165 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1166 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1167 WARN_ON(!list_empty(&device->dev_alloc_list)); 1168 WARN_ON(!list_empty(&device->post_commit_list)); 1169 } 1170 1171 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1172 { 1173 struct btrfs_device *device, *tmp; 1174 1175 lockdep_assert_held(&uuid_mutex); 1176 1177 if (--fs_devices->opened > 0) 1178 return; 1179 1180 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1181 btrfs_close_one_device(device); 1182 1183 WARN_ON(fs_devices->open_devices); 1184 WARN_ON(fs_devices->rw_devices); 1185 fs_devices->opened = 0; 1186 fs_devices->seeding = false; 1187 fs_devices->fs_info = NULL; 1188 } 1189 1190 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1191 { 1192 LIST_HEAD(list); 1193 struct btrfs_fs_devices *tmp; 1194 1195 mutex_lock(&uuid_mutex); 1196 close_fs_devices(fs_devices); 1197 if (!fs_devices->opened) { 1198 list_splice_init(&fs_devices->seed_list, &list); 1199 1200 /* 1201 * If the struct btrfs_fs_devices is not assembled with any 1202 * other device, it can be re-initialized during the next mount 1203 * without the needing device-scan step. Therefore, it can be 1204 * fully freed. 1205 */ 1206 if (fs_devices->num_devices == 1) { 1207 list_del(&fs_devices->fs_list); 1208 free_fs_devices(fs_devices); 1209 } 1210 } 1211 1212 1213 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1214 close_fs_devices(fs_devices); 1215 list_del(&fs_devices->seed_list); 1216 free_fs_devices(fs_devices); 1217 } 1218 mutex_unlock(&uuid_mutex); 1219 } 1220 1221 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1222 blk_mode_t flags, void *holder) 1223 { 1224 struct btrfs_device *device; 1225 struct btrfs_device *latest_dev = NULL; 1226 struct btrfs_device *tmp_device; 1227 int ret = 0; 1228 1229 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1230 dev_list) { 1231 int ret2; 1232 1233 ret2 = btrfs_open_one_device(fs_devices, device, flags, holder); 1234 if (ret2 == 0 && 1235 (!latest_dev || device->generation > latest_dev->generation)) { 1236 latest_dev = device; 1237 } else if (ret2 == -ENODATA) { 1238 fs_devices->num_devices--; 1239 list_del(&device->dev_list); 1240 btrfs_free_device(device); 1241 } 1242 if (ret == 0 && ret2 != 0) 1243 ret = ret2; 1244 } 1245 1246 if (fs_devices->open_devices == 0) { 1247 if (ret) 1248 return ret; 1249 return -EINVAL; 1250 } 1251 1252 fs_devices->opened = 1; 1253 fs_devices->latest_dev = latest_dev; 1254 fs_devices->total_rw_bytes = 0; 1255 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1256 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1257 1258 return 0; 1259 } 1260 1261 static int devid_cmp(void *priv, const struct list_head *a, 1262 const struct list_head *b) 1263 { 1264 const struct btrfs_device *dev1, *dev2; 1265 1266 dev1 = list_entry(a, struct btrfs_device, dev_list); 1267 dev2 = list_entry(b, struct btrfs_device, dev_list); 1268 1269 if (dev1->devid < dev2->devid) 1270 return -1; 1271 else if (dev1->devid > dev2->devid) 1272 return 1; 1273 return 0; 1274 } 1275 1276 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1277 blk_mode_t flags, void *holder) 1278 { 1279 int ret; 1280 1281 lockdep_assert_held(&uuid_mutex); 1282 /* 1283 * The device_list_mutex cannot be taken here in case opening the 1284 * underlying device takes further locks like open_mutex. 1285 * 1286 * We also don't need the lock here as this is called during mount and 1287 * exclusion is provided by uuid_mutex 1288 */ 1289 1290 if (fs_devices->opened) { 1291 fs_devices->opened++; 1292 ret = 0; 1293 } else { 1294 list_sort(NULL, &fs_devices->devices, devid_cmp); 1295 ret = open_fs_devices(fs_devices, flags, holder); 1296 } 1297 1298 return ret; 1299 } 1300 1301 void btrfs_release_disk_super(struct btrfs_super_block *super) 1302 { 1303 struct page *page = virt_to_page(super); 1304 1305 put_page(page); 1306 } 1307 1308 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1309 u64 bytenr, u64 bytenr_orig) 1310 { 1311 struct btrfs_super_block *disk_super; 1312 struct page *page; 1313 void *p; 1314 pgoff_t index; 1315 1316 /* make sure our super fits in the device */ 1317 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1318 return ERR_PTR(-EINVAL); 1319 1320 /* make sure our super fits in the page */ 1321 if (sizeof(*disk_super) > PAGE_SIZE) 1322 return ERR_PTR(-EINVAL); 1323 1324 /* make sure our super doesn't straddle pages on disk */ 1325 index = bytenr >> PAGE_SHIFT; 1326 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1327 return ERR_PTR(-EINVAL); 1328 1329 /* pull in the page with our super */ 1330 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1331 1332 if (IS_ERR(page)) 1333 return ERR_CAST(page); 1334 1335 p = page_address(page); 1336 1337 /* align our pointer to the offset of the super block */ 1338 disk_super = p + offset_in_page(bytenr); 1339 1340 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1341 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1342 btrfs_release_disk_super(p); 1343 return ERR_PTR(-EINVAL); 1344 } 1345 1346 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1347 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1348 1349 return disk_super; 1350 } 1351 1352 int btrfs_forget_devices(dev_t devt) 1353 { 1354 int ret; 1355 1356 mutex_lock(&uuid_mutex); 1357 ret = btrfs_free_stale_devices(devt, NULL); 1358 mutex_unlock(&uuid_mutex); 1359 1360 return ret; 1361 } 1362 1363 /* 1364 * Look for a btrfs signature on a device. This may be called out of the mount path 1365 * and we are not allowed to call set_blocksize during the scan. The superblock 1366 * is read via pagecache 1367 */ 1368 struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags) 1369 { 1370 struct btrfs_super_block *disk_super; 1371 bool new_device_added = false; 1372 struct btrfs_device *device = NULL; 1373 struct block_device *bdev; 1374 u64 bytenr, bytenr_orig; 1375 int ret; 1376 1377 lockdep_assert_held(&uuid_mutex); 1378 1379 /* 1380 * we would like to check all the supers, but that would make 1381 * a btrfs mount succeed after a mkfs from a different FS. 1382 * So, we need to add a special mount option to scan for 1383 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1384 */ 1385 1386 /* 1387 * Avoid an exclusive open here, as the systemd-udev may initiate the 1388 * device scan which may race with the user's mount or mkfs command, 1389 * resulting in failure. 1390 * Since the device scan is solely for reading purposes, there is no 1391 * need for an exclusive open. Additionally, the devices are read again 1392 * during the mount process. It is ok to get some inconsistent 1393 * values temporarily, as the device paths of the fsid are the only 1394 * required information for assembling the volume. 1395 */ 1396 bdev = blkdev_get_by_path(path, flags, NULL, NULL); 1397 if (IS_ERR(bdev)) 1398 return ERR_CAST(bdev); 1399 1400 bytenr_orig = btrfs_sb_offset(0); 1401 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1402 if (ret) { 1403 device = ERR_PTR(ret); 1404 goto error_bdev_put; 1405 } 1406 1407 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1408 if (IS_ERR(disk_super)) { 1409 device = ERR_CAST(disk_super); 1410 goto error_bdev_put; 1411 } 1412 1413 device = device_list_add(path, disk_super, &new_device_added); 1414 if (!IS_ERR(device) && new_device_added) 1415 btrfs_free_stale_devices(device->devt, device); 1416 1417 btrfs_release_disk_super(disk_super); 1418 1419 error_bdev_put: 1420 blkdev_put(bdev, NULL); 1421 1422 return device; 1423 } 1424 1425 /* 1426 * Try to find a chunk that intersects [start, start + len] range and when one 1427 * such is found, record the end of it in *start 1428 */ 1429 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1430 u64 len) 1431 { 1432 u64 physical_start, physical_end; 1433 1434 lockdep_assert_held(&device->fs_info->chunk_mutex); 1435 1436 if (find_first_extent_bit(&device->alloc_state, *start, 1437 &physical_start, &physical_end, 1438 CHUNK_ALLOCATED, NULL)) { 1439 1440 if (in_range(physical_start, *start, len) || 1441 in_range(*start, physical_start, 1442 physical_end + 1 - physical_start)) { 1443 *start = physical_end + 1; 1444 return true; 1445 } 1446 } 1447 return false; 1448 } 1449 1450 static u64 dev_extent_search_start(struct btrfs_device *device) 1451 { 1452 switch (device->fs_devices->chunk_alloc_policy) { 1453 case BTRFS_CHUNK_ALLOC_REGULAR: 1454 return BTRFS_DEVICE_RANGE_RESERVED; 1455 case BTRFS_CHUNK_ALLOC_ZONED: 1456 /* 1457 * We don't care about the starting region like regular 1458 * allocator, because we anyway use/reserve the first two zones 1459 * for superblock logging. 1460 */ 1461 return 0; 1462 default: 1463 BUG(); 1464 } 1465 } 1466 1467 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1468 u64 *hole_start, u64 *hole_size, 1469 u64 num_bytes) 1470 { 1471 u64 zone_size = device->zone_info->zone_size; 1472 u64 pos; 1473 int ret; 1474 bool changed = false; 1475 1476 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1477 1478 while (*hole_size > 0) { 1479 pos = btrfs_find_allocatable_zones(device, *hole_start, 1480 *hole_start + *hole_size, 1481 num_bytes); 1482 if (pos != *hole_start) { 1483 *hole_size = *hole_start + *hole_size - pos; 1484 *hole_start = pos; 1485 changed = true; 1486 if (*hole_size < num_bytes) 1487 break; 1488 } 1489 1490 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1491 1492 /* Range is ensured to be empty */ 1493 if (!ret) 1494 return changed; 1495 1496 /* Given hole range was invalid (outside of device) */ 1497 if (ret == -ERANGE) { 1498 *hole_start += *hole_size; 1499 *hole_size = 0; 1500 return true; 1501 } 1502 1503 *hole_start += zone_size; 1504 *hole_size -= zone_size; 1505 changed = true; 1506 } 1507 1508 return changed; 1509 } 1510 1511 /* 1512 * Check if specified hole is suitable for allocation. 1513 * 1514 * @device: the device which we have the hole 1515 * @hole_start: starting position of the hole 1516 * @hole_size: the size of the hole 1517 * @num_bytes: the size of the free space that we need 1518 * 1519 * This function may modify @hole_start and @hole_size to reflect the suitable 1520 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1521 */ 1522 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1523 u64 *hole_size, u64 num_bytes) 1524 { 1525 bool changed = false; 1526 u64 hole_end = *hole_start + *hole_size; 1527 1528 for (;;) { 1529 /* 1530 * Check before we set max_hole_start, otherwise we could end up 1531 * sending back this offset anyway. 1532 */ 1533 if (contains_pending_extent(device, hole_start, *hole_size)) { 1534 if (hole_end >= *hole_start) 1535 *hole_size = hole_end - *hole_start; 1536 else 1537 *hole_size = 0; 1538 changed = true; 1539 } 1540 1541 switch (device->fs_devices->chunk_alloc_policy) { 1542 case BTRFS_CHUNK_ALLOC_REGULAR: 1543 /* No extra check */ 1544 break; 1545 case BTRFS_CHUNK_ALLOC_ZONED: 1546 if (dev_extent_hole_check_zoned(device, hole_start, 1547 hole_size, num_bytes)) { 1548 changed = true; 1549 /* 1550 * The changed hole can contain pending extent. 1551 * Loop again to check that. 1552 */ 1553 continue; 1554 } 1555 break; 1556 default: 1557 BUG(); 1558 } 1559 1560 break; 1561 } 1562 1563 return changed; 1564 } 1565 1566 /* 1567 * Find free space in the specified device. 1568 * 1569 * @device: the device which we search the free space in 1570 * @num_bytes: the size of the free space that we need 1571 * @search_start: the position from which to begin the search 1572 * @start: store the start of the free space. 1573 * @len: the size of the free space. that we find, or the size 1574 * of the max free space if we don't find suitable free space 1575 * 1576 * This does a pretty simple search, the expectation is that it is called very 1577 * infrequently and that a given device has a small number of extents. 1578 * 1579 * @start is used to store the start of the free space if we find. But if we 1580 * don't find suitable free space, it will be used to store the start position 1581 * of the max free space. 1582 * 1583 * @len is used to store the size of the free space that we find. 1584 * But if we don't find suitable free space, it is used to store the size of 1585 * the max free space. 1586 * 1587 * NOTE: This function will search *commit* root of device tree, and does extra 1588 * check to ensure dev extents are not double allocated. 1589 * This makes the function safe to allocate dev extents but may not report 1590 * correct usable device space, as device extent freed in current transaction 1591 * is not reported as available. 1592 */ 1593 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1594 u64 *start, u64 *len) 1595 { 1596 struct btrfs_fs_info *fs_info = device->fs_info; 1597 struct btrfs_root *root = fs_info->dev_root; 1598 struct btrfs_key key; 1599 struct btrfs_dev_extent *dev_extent; 1600 struct btrfs_path *path; 1601 u64 search_start; 1602 u64 hole_size; 1603 u64 max_hole_start; 1604 u64 max_hole_size = 0; 1605 u64 extent_end; 1606 u64 search_end = device->total_bytes; 1607 int ret; 1608 int slot; 1609 struct extent_buffer *l; 1610 1611 search_start = dev_extent_search_start(device); 1612 max_hole_start = search_start; 1613 1614 WARN_ON(device->zone_info && 1615 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1616 1617 path = btrfs_alloc_path(); 1618 if (!path) { 1619 ret = -ENOMEM; 1620 goto out; 1621 } 1622 again: 1623 if (search_start >= search_end || 1624 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1625 ret = -ENOSPC; 1626 goto out; 1627 } 1628 1629 path->reada = READA_FORWARD; 1630 path->search_commit_root = 1; 1631 path->skip_locking = 1; 1632 1633 key.objectid = device->devid; 1634 key.offset = search_start; 1635 key.type = BTRFS_DEV_EXTENT_KEY; 1636 1637 ret = btrfs_search_backwards(root, &key, path); 1638 if (ret < 0) 1639 goto out; 1640 1641 while (search_start < search_end) { 1642 l = path->nodes[0]; 1643 slot = path->slots[0]; 1644 if (slot >= btrfs_header_nritems(l)) { 1645 ret = btrfs_next_leaf(root, path); 1646 if (ret == 0) 1647 continue; 1648 if (ret < 0) 1649 goto out; 1650 1651 break; 1652 } 1653 btrfs_item_key_to_cpu(l, &key, slot); 1654 1655 if (key.objectid < device->devid) 1656 goto next; 1657 1658 if (key.objectid > device->devid) 1659 break; 1660 1661 if (key.type != BTRFS_DEV_EXTENT_KEY) 1662 goto next; 1663 1664 if (key.offset > search_end) 1665 break; 1666 1667 if (key.offset > search_start) { 1668 hole_size = key.offset - search_start; 1669 dev_extent_hole_check(device, &search_start, &hole_size, 1670 num_bytes); 1671 1672 if (hole_size > max_hole_size) { 1673 max_hole_start = search_start; 1674 max_hole_size = hole_size; 1675 } 1676 1677 /* 1678 * If this free space is greater than which we need, 1679 * it must be the max free space that we have found 1680 * until now, so max_hole_start must point to the start 1681 * of this free space and the length of this free space 1682 * is stored in max_hole_size. Thus, we return 1683 * max_hole_start and max_hole_size and go back to the 1684 * caller. 1685 */ 1686 if (hole_size >= num_bytes) { 1687 ret = 0; 1688 goto out; 1689 } 1690 } 1691 1692 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1693 extent_end = key.offset + btrfs_dev_extent_length(l, 1694 dev_extent); 1695 if (extent_end > search_start) 1696 search_start = extent_end; 1697 next: 1698 path->slots[0]++; 1699 cond_resched(); 1700 } 1701 1702 /* 1703 * At this point, search_start should be the end of 1704 * allocated dev extents, and when shrinking the device, 1705 * search_end may be smaller than search_start. 1706 */ 1707 if (search_end > search_start) { 1708 hole_size = search_end - search_start; 1709 if (dev_extent_hole_check(device, &search_start, &hole_size, 1710 num_bytes)) { 1711 btrfs_release_path(path); 1712 goto again; 1713 } 1714 1715 if (hole_size > max_hole_size) { 1716 max_hole_start = search_start; 1717 max_hole_size = hole_size; 1718 } 1719 } 1720 1721 /* See above. */ 1722 if (max_hole_size < num_bytes) 1723 ret = -ENOSPC; 1724 else 1725 ret = 0; 1726 1727 ASSERT(max_hole_start + max_hole_size <= search_end); 1728 out: 1729 btrfs_free_path(path); 1730 *start = max_hole_start; 1731 if (len) 1732 *len = max_hole_size; 1733 return ret; 1734 } 1735 1736 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1737 struct btrfs_device *device, 1738 u64 start, u64 *dev_extent_len) 1739 { 1740 struct btrfs_fs_info *fs_info = device->fs_info; 1741 struct btrfs_root *root = fs_info->dev_root; 1742 int ret; 1743 struct btrfs_path *path; 1744 struct btrfs_key key; 1745 struct btrfs_key found_key; 1746 struct extent_buffer *leaf = NULL; 1747 struct btrfs_dev_extent *extent = NULL; 1748 1749 path = btrfs_alloc_path(); 1750 if (!path) 1751 return -ENOMEM; 1752 1753 key.objectid = device->devid; 1754 key.offset = start; 1755 key.type = BTRFS_DEV_EXTENT_KEY; 1756 again: 1757 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1758 if (ret > 0) { 1759 ret = btrfs_previous_item(root, path, key.objectid, 1760 BTRFS_DEV_EXTENT_KEY); 1761 if (ret) 1762 goto out; 1763 leaf = path->nodes[0]; 1764 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1765 extent = btrfs_item_ptr(leaf, path->slots[0], 1766 struct btrfs_dev_extent); 1767 BUG_ON(found_key.offset > start || found_key.offset + 1768 btrfs_dev_extent_length(leaf, extent) < start); 1769 key = found_key; 1770 btrfs_release_path(path); 1771 goto again; 1772 } else if (ret == 0) { 1773 leaf = path->nodes[0]; 1774 extent = btrfs_item_ptr(leaf, path->slots[0], 1775 struct btrfs_dev_extent); 1776 } else { 1777 goto out; 1778 } 1779 1780 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1781 1782 ret = btrfs_del_item(trans, root, path); 1783 if (ret == 0) 1784 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1785 out: 1786 btrfs_free_path(path); 1787 return ret; 1788 } 1789 1790 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1791 { 1792 struct extent_map_tree *em_tree; 1793 struct extent_map *em; 1794 struct rb_node *n; 1795 u64 ret = 0; 1796 1797 em_tree = &fs_info->mapping_tree; 1798 read_lock(&em_tree->lock); 1799 n = rb_last(&em_tree->map.rb_root); 1800 if (n) { 1801 em = rb_entry(n, struct extent_map, rb_node); 1802 ret = em->start + em->len; 1803 } 1804 read_unlock(&em_tree->lock); 1805 1806 return ret; 1807 } 1808 1809 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1810 u64 *devid_ret) 1811 { 1812 int ret; 1813 struct btrfs_key key; 1814 struct btrfs_key found_key; 1815 struct btrfs_path *path; 1816 1817 path = btrfs_alloc_path(); 1818 if (!path) 1819 return -ENOMEM; 1820 1821 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1822 key.type = BTRFS_DEV_ITEM_KEY; 1823 key.offset = (u64)-1; 1824 1825 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1826 if (ret < 0) 1827 goto error; 1828 1829 if (ret == 0) { 1830 /* Corruption */ 1831 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1832 ret = -EUCLEAN; 1833 goto error; 1834 } 1835 1836 ret = btrfs_previous_item(fs_info->chunk_root, path, 1837 BTRFS_DEV_ITEMS_OBJECTID, 1838 BTRFS_DEV_ITEM_KEY); 1839 if (ret) { 1840 *devid_ret = 1; 1841 } else { 1842 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1843 path->slots[0]); 1844 *devid_ret = found_key.offset + 1; 1845 } 1846 ret = 0; 1847 error: 1848 btrfs_free_path(path); 1849 return ret; 1850 } 1851 1852 /* 1853 * the device information is stored in the chunk root 1854 * the btrfs_device struct should be fully filled in 1855 */ 1856 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1857 struct btrfs_device *device) 1858 { 1859 int ret; 1860 struct btrfs_path *path; 1861 struct btrfs_dev_item *dev_item; 1862 struct extent_buffer *leaf; 1863 struct btrfs_key key; 1864 unsigned long ptr; 1865 1866 path = btrfs_alloc_path(); 1867 if (!path) 1868 return -ENOMEM; 1869 1870 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1871 key.type = BTRFS_DEV_ITEM_KEY; 1872 key.offset = device->devid; 1873 1874 btrfs_reserve_chunk_metadata(trans, true); 1875 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1876 &key, sizeof(*dev_item)); 1877 btrfs_trans_release_chunk_metadata(trans); 1878 if (ret) 1879 goto out; 1880 1881 leaf = path->nodes[0]; 1882 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1883 1884 btrfs_set_device_id(leaf, dev_item, device->devid); 1885 btrfs_set_device_generation(leaf, dev_item, 0); 1886 btrfs_set_device_type(leaf, dev_item, device->type); 1887 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1888 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1889 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1890 btrfs_set_device_total_bytes(leaf, dev_item, 1891 btrfs_device_get_disk_total_bytes(device)); 1892 btrfs_set_device_bytes_used(leaf, dev_item, 1893 btrfs_device_get_bytes_used(device)); 1894 btrfs_set_device_group(leaf, dev_item, 0); 1895 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1896 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1897 btrfs_set_device_start_offset(leaf, dev_item, 0); 1898 1899 ptr = btrfs_device_uuid(dev_item); 1900 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1901 ptr = btrfs_device_fsid(dev_item); 1902 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1903 ptr, BTRFS_FSID_SIZE); 1904 btrfs_mark_buffer_dirty(trans, leaf); 1905 1906 ret = 0; 1907 out: 1908 btrfs_free_path(path); 1909 return ret; 1910 } 1911 1912 /* 1913 * Function to update ctime/mtime for a given device path. 1914 * Mainly used for ctime/mtime based probe like libblkid. 1915 * 1916 * We don't care about errors here, this is just to be kind to userspace. 1917 */ 1918 static void update_dev_time(const char *device_path) 1919 { 1920 struct path path; 1921 int ret; 1922 1923 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1924 if (ret) 1925 return; 1926 1927 inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION); 1928 path_put(&path); 1929 } 1930 1931 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1932 struct btrfs_device *device) 1933 { 1934 struct btrfs_root *root = device->fs_info->chunk_root; 1935 int ret; 1936 struct btrfs_path *path; 1937 struct btrfs_key key; 1938 1939 path = btrfs_alloc_path(); 1940 if (!path) 1941 return -ENOMEM; 1942 1943 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1944 key.type = BTRFS_DEV_ITEM_KEY; 1945 key.offset = device->devid; 1946 1947 btrfs_reserve_chunk_metadata(trans, false); 1948 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1949 btrfs_trans_release_chunk_metadata(trans); 1950 if (ret) { 1951 if (ret > 0) 1952 ret = -ENOENT; 1953 goto out; 1954 } 1955 1956 ret = btrfs_del_item(trans, root, path); 1957 out: 1958 btrfs_free_path(path); 1959 return ret; 1960 } 1961 1962 /* 1963 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1964 * filesystem. It's up to the caller to adjust that number regarding eg. device 1965 * replace. 1966 */ 1967 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1968 u64 num_devices) 1969 { 1970 u64 all_avail; 1971 unsigned seq; 1972 int i; 1973 1974 do { 1975 seq = read_seqbegin(&fs_info->profiles_lock); 1976 1977 all_avail = fs_info->avail_data_alloc_bits | 1978 fs_info->avail_system_alloc_bits | 1979 fs_info->avail_metadata_alloc_bits; 1980 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1981 1982 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1983 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1984 continue; 1985 1986 if (num_devices < btrfs_raid_array[i].devs_min) 1987 return btrfs_raid_array[i].mindev_error; 1988 } 1989 1990 return 0; 1991 } 1992 1993 static struct btrfs_device * btrfs_find_next_active_device( 1994 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1995 { 1996 struct btrfs_device *next_device; 1997 1998 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1999 if (next_device != device && 2000 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 2001 && next_device->bdev) 2002 return next_device; 2003 } 2004 2005 return NULL; 2006 } 2007 2008 /* 2009 * Helper function to check if the given device is part of s_bdev / latest_dev 2010 * and replace it with the provided or the next active device, in the context 2011 * where this function called, there should be always be another device (or 2012 * this_dev) which is active. 2013 */ 2014 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2015 struct btrfs_device *next_device) 2016 { 2017 struct btrfs_fs_info *fs_info = device->fs_info; 2018 2019 if (!next_device) 2020 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2021 device); 2022 ASSERT(next_device); 2023 2024 if (fs_info->sb->s_bdev && 2025 (fs_info->sb->s_bdev == device->bdev)) 2026 fs_info->sb->s_bdev = next_device->bdev; 2027 2028 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2029 fs_info->fs_devices->latest_dev = next_device; 2030 } 2031 2032 /* 2033 * Return btrfs_fs_devices::num_devices excluding the device that's being 2034 * currently replaced. 2035 */ 2036 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2037 { 2038 u64 num_devices = fs_info->fs_devices->num_devices; 2039 2040 down_read(&fs_info->dev_replace.rwsem); 2041 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2042 ASSERT(num_devices > 1); 2043 num_devices--; 2044 } 2045 up_read(&fs_info->dev_replace.rwsem); 2046 2047 return num_devices; 2048 } 2049 2050 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2051 struct block_device *bdev, int copy_num) 2052 { 2053 struct btrfs_super_block *disk_super; 2054 const size_t len = sizeof(disk_super->magic); 2055 const u64 bytenr = btrfs_sb_offset(copy_num); 2056 int ret; 2057 2058 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2059 if (IS_ERR(disk_super)) 2060 return; 2061 2062 memset(&disk_super->magic, 0, len); 2063 folio_mark_dirty(virt_to_folio(disk_super)); 2064 btrfs_release_disk_super(disk_super); 2065 2066 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2067 if (ret) 2068 btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2069 copy_num, ret); 2070 } 2071 2072 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2073 struct block_device *bdev, 2074 const char *device_path) 2075 { 2076 int copy_num; 2077 2078 if (!bdev) 2079 return; 2080 2081 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2082 if (bdev_is_zoned(bdev)) 2083 btrfs_reset_sb_log_zones(bdev, copy_num); 2084 else 2085 btrfs_scratch_superblock(fs_info, bdev, copy_num); 2086 } 2087 2088 /* Notify udev that device has changed */ 2089 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2090 2091 /* Update ctime/mtime for device path for libblkid */ 2092 update_dev_time(device_path); 2093 } 2094 2095 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2096 struct btrfs_dev_lookup_args *args, 2097 struct block_device **bdev, void **holder) 2098 { 2099 struct btrfs_trans_handle *trans; 2100 struct btrfs_device *device; 2101 struct btrfs_fs_devices *cur_devices; 2102 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2103 u64 num_devices; 2104 int ret = 0; 2105 2106 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2107 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2108 return -EINVAL; 2109 } 2110 2111 /* 2112 * The device list in fs_devices is accessed without locks (neither 2113 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2114 * filesystem and another device rm cannot run. 2115 */ 2116 num_devices = btrfs_num_devices(fs_info); 2117 2118 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2119 if (ret) 2120 return ret; 2121 2122 device = btrfs_find_device(fs_info->fs_devices, args); 2123 if (!device) { 2124 if (args->missing) 2125 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2126 else 2127 ret = -ENOENT; 2128 return ret; 2129 } 2130 2131 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2132 btrfs_warn_in_rcu(fs_info, 2133 "cannot remove device %s (devid %llu) due to active swapfile", 2134 btrfs_dev_name(device), device->devid); 2135 return -ETXTBSY; 2136 } 2137 2138 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2139 return BTRFS_ERROR_DEV_TGT_REPLACE; 2140 2141 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2142 fs_info->fs_devices->rw_devices == 1) 2143 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2144 2145 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2146 mutex_lock(&fs_info->chunk_mutex); 2147 list_del_init(&device->dev_alloc_list); 2148 device->fs_devices->rw_devices--; 2149 mutex_unlock(&fs_info->chunk_mutex); 2150 } 2151 2152 ret = btrfs_shrink_device(device, 0); 2153 if (ret) 2154 goto error_undo; 2155 2156 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2157 if (IS_ERR(trans)) { 2158 ret = PTR_ERR(trans); 2159 goto error_undo; 2160 } 2161 2162 ret = btrfs_rm_dev_item(trans, device); 2163 if (ret) { 2164 /* Any error in dev item removal is critical */ 2165 btrfs_crit(fs_info, 2166 "failed to remove device item for devid %llu: %d", 2167 device->devid, ret); 2168 btrfs_abort_transaction(trans, ret); 2169 btrfs_end_transaction(trans); 2170 return ret; 2171 } 2172 2173 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2174 btrfs_scrub_cancel_dev(device); 2175 2176 /* 2177 * the device list mutex makes sure that we don't change 2178 * the device list while someone else is writing out all 2179 * the device supers. Whoever is writing all supers, should 2180 * lock the device list mutex before getting the number of 2181 * devices in the super block (super_copy). Conversely, 2182 * whoever updates the number of devices in the super block 2183 * (super_copy) should hold the device list mutex. 2184 */ 2185 2186 /* 2187 * In normal cases the cur_devices == fs_devices. But in case 2188 * of deleting a seed device, the cur_devices should point to 2189 * its own fs_devices listed under the fs_devices->seed_list. 2190 */ 2191 cur_devices = device->fs_devices; 2192 mutex_lock(&fs_devices->device_list_mutex); 2193 list_del_rcu(&device->dev_list); 2194 2195 cur_devices->num_devices--; 2196 cur_devices->total_devices--; 2197 /* Update total_devices of the parent fs_devices if it's seed */ 2198 if (cur_devices != fs_devices) 2199 fs_devices->total_devices--; 2200 2201 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2202 cur_devices->missing_devices--; 2203 2204 btrfs_assign_next_active_device(device, NULL); 2205 2206 if (device->bdev) { 2207 cur_devices->open_devices--; 2208 /* remove sysfs entry */ 2209 btrfs_sysfs_remove_device(device); 2210 } 2211 2212 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2213 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2214 mutex_unlock(&fs_devices->device_list_mutex); 2215 2216 /* 2217 * At this point, the device is zero sized and detached from the 2218 * devices list. All that's left is to zero out the old supers and 2219 * free the device. 2220 * 2221 * We cannot call btrfs_close_bdev() here because we're holding the sb 2222 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2223 * block device and it's dependencies. Instead just flush the device 2224 * and let the caller do the final blkdev_put. 2225 */ 2226 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2227 btrfs_scratch_superblocks(fs_info, device->bdev, 2228 device->name->str); 2229 if (device->bdev) { 2230 sync_blockdev(device->bdev); 2231 invalidate_bdev(device->bdev); 2232 } 2233 } 2234 2235 *bdev = device->bdev; 2236 *holder = device->holder; 2237 synchronize_rcu(); 2238 btrfs_free_device(device); 2239 2240 /* 2241 * This can happen if cur_devices is the private seed devices list. We 2242 * cannot call close_fs_devices() here because it expects the uuid_mutex 2243 * to be held, but in fact we don't need that for the private 2244 * seed_devices, we can simply decrement cur_devices->opened and then 2245 * remove it from our list and free the fs_devices. 2246 */ 2247 if (cur_devices->num_devices == 0) { 2248 list_del_init(&cur_devices->seed_list); 2249 ASSERT(cur_devices->opened == 1); 2250 cur_devices->opened--; 2251 free_fs_devices(cur_devices); 2252 } 2253 2254 ret = btrfs_commit_transaction(trans); 2255 2256 return ret; 2257 2258 error_undo: 2259 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2260 mutex_lock(&fs_info->chunk_mutex); 2261 list_add(&device->dev_alloc_list, 2262 &fs_devices->alloc_list); 2263 device->fs_devices->rw_devices++; 2264 mutex_unlock(&fs_info->chunk_mutex); 2265 } 2266 return ret; 2267 } 2268 2269 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2270 { 2271 struct btrfs_fs_devices *fs_devices; 2272 2273 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2274 2275 /* 2276 * in case of fs with no seed, srcdev->fs_devices will point 2277 * to fs_devices of fs_info. However when the dev being replaced is 2278 * a seed dev it will point to the seed's local fs_devices. In short 2279 * srcdev will have its correct fs_devices in both the cases. 2280 */ 2281 fs_devices = srcdev->fs_devices; 2282 2283 list_del_rcu(&srcdev->dev_list); 2284 list_del(&srcdev->dev_alloc_list); 2285 fs_devices->num_devices--; 2286 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2287 fs_devices->missing_devices--; 2288 2289 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2290 fs_devices->rw_devices--; 2291 2292 if (srcdev->bdev) 2293 fs_devices->open_devices--; 2294 } 2295 2296 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2297 { 2298 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2299 2300 mutex_lock(&uuid_mutex); 2301 2302 btrfs_close_bdev(srcdev); 2303 synchronize_rcu(); 2304 btrfs_free_device(srcdev); 2305 2306 /* if this is no devs we rather delete the fs_devices */ 2307 if (!fs_devices->num_devices) { 2308 /* 2309 * On a mounted FS, num_devices can't be zero unless it's a 2310 * seed. In case of a seed device being replaced, the replace 2311 * target added to the sprout FS, so there will be no more 2312 * device left under the seed FS. 2313 */ 2314 ASSERT(fs_devices->seeding); 2315 2316 list_del_init(&fs_devices->seed_list); 2317 close_fs_devices(fs_devices); 2318 free_fs_devices(fs_devices); 2319 } 2320 mutex_unlock(&uuid_mutex); 2321 } 2322 2323 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2324 { 2325 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2326 2327 mutex_lock(&fs_devices->device_list_mutex); 2328 2329 btrfs_sysfs_remove_device(tgtdev); 2330 2331 if (tgtdev->bdev) 2332 fs_devices->open_devices--; 2333 2334 fs_devices->num_devices--; 2335 2336 btrfs_assign_next_active_device(tgtdev, NULL); 2337 2338 list_del_rcu(&tgtdev->dev_list); 2339 2340 mutex_unlock(&fs_devices->device_list_mutex); 2341 2342 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2343 tgtdev->name->str); 2344 2345 btrfs_close_bdev(tgtdev); 2346 synchronize_rcu(); 2347 btrfs_free_device(tgtdev); 2348 } 2349 2350 /* 2351 * Populate args from device at path. 2352 * 2353 * @fs_info: the filesystem 2354 * @args: the args to populate 2355 * @path: the path to the device 2356 * 2357 * This will read the super block of the device at @path and populate @args with 2358 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2359 * lookup a device to operate on, but need to do it before we take any locks. 2360 * This properly handles the special case of "missing" that a user may pass in, 2361 * and does some basic sanity checks. The caller must make sure that @path is 2362 * properly NUL terminated before calling in, and must call 2363 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2364 * uuid buffers. 2365 * 2366 * Return: 0 for success, -errno for failure 2367 */ 2368 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2369 struct btrfs_dev_lookup_args *args, 2370 const char *path) 2371 { 2372 struct btrfs_super_block *disk_super; 2373 struct block_device *bdev; 2374 int ret; 2375 2376 if (!path || !path[0]) 2377 return -EINVAL; 2378 if (!strcmp(path, "missing")) { 2379 args->missing = true; 2380 return 0; 2381 } 2382 2383 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2384 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2385 if (!args->uuid || !args->fsid) { 2386 btrfs_put_dev_args_from_path(args); 2387 return -ENOMEM; 2388 } 2389 2390 ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0, 2391 &bdev, &disk_super); 2392 if (ret) { 2393 btrfs_put_dev_args_from_path(args); 2394 return ret; 2395 } 2396 2397 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2398 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2399 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2400 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2401 else 2402 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2403 btrfs_release_disk_super(disk_super); 2404 blkdev_put(bdev, NULL); 2405 return 0; 2406 } 2407 2408 /* 2409 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2410 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2411 * that don't need to be freed. 2412 */ 2413 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2414 { 2415 kfree(args->uuid); 2416 kfree(args->fsid); 2417 args->uuid = NULL; 2418 args->fsid = NULL; 2419 } 2420 2421 struct btrfs_device *btrfs_find_device_by_devspec( 2422 struct btrfs_fs_info *fs_info, u64 devid, 2423 const char *device_path) 2424 { 2425 BTRFS_DEV_LOOKUP_ARGS(args); 2426 struct btrfs_device *device; 2427 int ret; 2428 2429 if (devid) { 2430 args.devid = devid; 2431 device = btrfs_find_device(fs_info->fs_devices, &args); 2432 if (!device) 2433 return ERR_PTR(-ENOENT); 2434 return device; 2435 } 2436 2437 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2438 if (ret) 2439 return ERR_PTR(ret); 2440 device = btrfs_find_device(fs_info->fs_devices, &args); 2441 btrfs_put_dev_args_from_path(&args); 2442 if (!device) 2443 return ERR_PTR(-ENOENT); 2444 return device; 2445 } 2446 2447 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2448 { 2449 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2450 struct btrfs_fs_devices *old_devices; 2451 struct btrfs_fs_devices *seed_devices; 2452 2453 lockdep_assert_held(&uuid_mutex); 2454 if (!fs_devices->seeding) 2455 return ERR_PTR(-EINVAL); 2456 2457 /* 2458 * Private copy of the seed devices, anchored at 2459 * fs_info->fs_devices->seed_list 2460 */ 2461 seed_devices = alloc_fs_devices(NULL, NULL); 2462 if (IS_ERR(seed_devices)) 2463 return seed_devices; 2464 2465 /* 2466 * It's necessary to retain a copy of the original seed fs_devices in 2467 * fs_uuids so that filesystems which have been seeded can successfully 2468 * reference the seed device from open_seed_devices. This also supports 2469 * multiple fs seed. 2470 */ 2471 old_devices = clone_fs_devices(fs_devices); 2472 if (IS_ERR(old_devices)) { 2473 kfree(seed_devices); 2474 return old_devices; 2475 } 2476 2477 list_add(&old_devices->fs_list, &fs_uuids); 2478 2479 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2480 seed_devices->opened = 1; 2481 INIT_LIST_HEAD(&seed_devices->devices); 2482 INIT_LIST_HEAD(&seed_devices->alloc_list); 2483 mutex_init(&seed_devices->device_list_mutex); 2484 2485 return seed_devices; 2486 } 2487 2488 /* 2489 * Splice seed devices into the sprout fs_devices. 2490 * Generate a new fsid for the sprouted read-write filesystem. 2491 */ 2492 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2493 struct btrfs_fs_devices *seed_devices) 2494 { 2495 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2496 struct btrfs_super_block *disk_super = fs_info->super_copy; 2497 struct btrfs_device *device; 2498 u64 super_flags; 2499 2500 /* 2501 * We are updating the fsid, the thread leading to device_list_add() 2502 * could race, so uuid_mutex is needed. 2503 */ 2504 lockdep_assert_held(&uuid_mutex); 2505 2506 /* 2507 * The threads listed below may traverse dev_list but can do that without 2508 * device_list_mutex: 2509 * - All device ops and balance - as we are in btrfs_exclop_start. 2510 * - Various dev_list readers - are using RCU. 2511 * - btrfs_ioctl_fitrim() - is using RCU. 2512 * 2513 * For-read threads as below are using device_list_mutex: 2514 * - Readonly scrub btrfs_scrub_dev() 2515 * - Readonly scrub btrfs_scrub_progress() 2516 * - btrfs_get_dev_stats() 2517 */ 2518 lockdep_assert_held(&fs_devices->device_list_mutex); 2519 2520 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2521 synchronize_rcu); 2522 list_for_each_entry(device, &seed_devices->devices, dev_list) 2523 device->fs_devices = seed_devices; 2524 2525 fs_devices->seeding = false; 2526 fs_devices->num_devices = 0; 2527 fs_devices->open_devices = 0; 2528 fs_devices->missing_devices = 0; 2529 fs_devices->rotating = false; 2530 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2531 2532 generate_random_uuid(fs_devices->fsid); 2533 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2534 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2535 2536 super_flags = btrfs_super_flags(disk_super) & 2537 ~BTRFS_SUPER_FLAG_SEEDING; 2538 btrfs_set_super_flags(disk_super, super_flags); 2539 } 2540 2541 /* 2542 * Store the expected generation for seed devices in device items. 2543 */ 2544 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2545 { 2546 BTRFS_DEV_LOOKUP_ARGS(args); 2547 struct btrfs_fs_info *fs_info = trans->fs_info; 2548 struct btrfs_root *root = fs_info->chunk_root; 2549 struct btrfs_path *path; 2550 struct extent_buffer *leaf; 2551 struct btrfs_dev_item *dev_item; 2552 struct btrfs_device *device; 2553 struct btrfs_key key; 2554 u8 fs_uuid[BTRFS_FSID_SIZE]; 2555 u8 dev_uuid[BTRFS_UUID_SIZE]; 2556 int ret; 2557 2558 path = btrfs_alloc_path(); 2559 if (!path) 2560 return -ENOMEM; 2561 2562 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2563 key.offset = 0; 2564 key.type = BTRFS_DEV_ITEM_KEY; 2565 2566 while (1) { 2567 btrfs_reserve_chunk_metadata(trans, false); 2568 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2569 btrfs_trans_release_chunk_metadata(trans); 2570 if (ret < 0) 2571 goto error; 2572 2573 leaf = path->nodes[0]; 2574 next_slot: 2575 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2576 ret = btrfs_next_leaf(root, path); 2577 if (ret > 0) 2578 break; 2579 if (ret < 0) 2580 goto error; 2581 leaf = path->nodes[0]; 2582 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2583 btrfs_release_path(path); 2584 continue; 2585 } 2586 2587 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2588 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2589 key.type != BTRFS_DEV_ITEM_KEY) 2590 break; 2591 2592 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2593 struct btrfs_dev_item); 2594 args.devid = btrfs_device_id(leaf, dev_item); 2595 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2596 BTRFS_UUID_SIZE); 2597 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2598 BTRFS_FSID_SIZE); 2599 args.uuid = dev_uuid; 2600 args.fsid = fs_uuid; 2601 device = btrfs_find_device(fs_info->fs_devices, &args); 2602 BUG_ON(!device); /* Logic error */ 2603 2604 if (device->fs_devices->seeding) { 2605 btrfs_set_device_generation(leaf, dev_item, 2606 device->generation); 2607 btrfs_mark_buffer_dirty(trans, leaf); 2608 } 2609 2610 path->slots[0]++; 2611 goto next_slot; 2612 } 2613 ret = 0; 2614 error: 2615 btrfs_free_path(path); 2616 return ret; 2617 } 2618 2619 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2620 { 2621 struct btrfs_root *root = fs_info->dev_root; 2622 struct btrfs_trans_handle *trans; 2623 struct btrfs_device *device; 2624 struct block_device *bdev; 2625 struct super_block *sb = fs_info->sb; 2626 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2627 struct btrfs_fs_devices *seed_devices = NULL; 2628 u64 orig_super_total_bytes; 2629 u64 orig_super_num_devices; 2630 int ret = 0; 2631 bool seeding_dev = false; 2632 bool locked = false; 2633 2634 if (sb_rdonly(sb) && !fs_devices->seeding) 2635 return -EROFS; 2636 2637 bdev = blkdev_get_by_path(device_path, BLK_OPEN_WRITE, 2638 fs_info->bdev_holder, NULL); 2639 if (IS_ERR(bdev)) 2640 return PTR_ERR(bdev); 2641 2642 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2643 ret = -EINVAL; 2644 goto error; 2645 } 2646 2647 if (fs_devices->seeding) { 2648 seeding_dev = true; 2649 down_write(&sb->s_umount); 2650 mutex_lock(&uuid_mutex); 2651 locked = true; 2652 } 2653 2654 sync_blockdev(bdev); 2655 2656 rcu_read_lock(); 2657 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2658 if (device->bdev == bdev) { 2659 ret = -EEXIST; 2660 rcu_read_unlock(); 2661 goto error; 2662 } 2663 } 2664 rcu_read_unlock(); 2665 2666 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path); 2667 if (IS_ERR(device)) { 2668 /* we can safely leave the fs_devices entry around */ 2669 ret = PTR_ERR(device); 2670 goto error; 2671 } 2672 2673 device->fs_info = fs_info; 2674 device->bdev = bdev; 2675 ret = lookup_bdev(device_path, &device->devt); 2676 if (ret) 2677 goto error_free_device; 2678 2679 ret = btrfs_get_dev_zone_info(device, false); 2680 if (ret) 2681 goto error_free_device; 2682 2683 trans = btrfs_start_transaction(root, 0); 2684 if (IS_ERR(trans)) { 2685 ret = PTR_ERR(trans); 2686 goto error_free_zone; 2687 } 2688 2689 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2690 device->generation = trans->transid; 2691 device->io_width = fs_info->sectorsize; 2692 device->io_align = fs_info->sectorsize; 2693 device->sector_size = fs_info->sectorsize; 2694 device->total_bytes = 2695 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2696 device->disk_total_bytes = device->total_bytes; 2697 device->commit_total_bytes = device->total_bytes; 2698 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2699 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2700 device->holder = fs_info->bdev_holder; 2701 device->dev_stats_valid = 1; 2702 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2703 2704 if (seeding_dev) { 2705 btrfs_clear_sb_rdonly(sb); 2706 2707 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2708 seed_devices = btrfs_init_sprout(fs_info); 2709 if (IS_ERR(seed_devices)) { 2710 ret = PTR_ERR(seed_devices); 2711 btrfs_abort_transaction(trans, ret); 2712 goto error_trans; 2713 } 2714 } 2715 2716 mutex_lock(&fs_devices->device_list_mutex); 2717 if (seeding_dev) { 2718 btrfs_setup_sprout(fs_info, seed_devices); 2719 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2720 device); 2721 } 2722 2723 device->fs_devices = fs_devices; 2724 2725 mutex_lock(&fs_info->chunk_mutex); 2726 list_add_rcu(&device->dev_list, &fs_devices->devices); 2727 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2728 fs_devices->num_devices++; 2729 fs_devices->open_devices++; 2730 fs_devices->rw_devices++; 2731 fs_devices->total_devices++; 2732 fs_devices->total_rw_bytes += device->total_bytes; 2733 2734 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2735 2736 if (!bdev_nonrot(bdev)) 2737 fs_devices->rotating = true; 2738 2739 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2740 btrfs_set_super_total_bytes(fs_info->super_copy, 2741 round_down(orig_super_total_bytes + device->total_bytes, 2742 fs_info->sectorsize)); 2743 2744 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2745 btrfs_set_super_num_devices(fs_info->super_copy, 2746 orig_super_num_devices + 1); 2747 2748 /* 2749 * we've got more storage, clear any full flags on the space 2750 * infos 2751 */ 2752 btrfs_clear_space_info_full(fs_info); 2753 2754 mutex_unlock(&fs_info->chunk_mutex); 2755 2756 /* Add sysfs device entry */ 2757 btrfs_sysfs_add_device(device); 2758 2759 mutex_unlock(&fs_devices->device_list_mutex); 2760 2761 if (seeding_dev) { 2762 mutex_lock(&fs_info->chunk_mutex); 2763 ret = init_first_rw_device(trans); 2764 mutex_unlock(&fs_info->chunk_mutex); 2765 if (ret) { 2766 btrfs_abort_transaction(trans, ret); 2767 goto error_sysfs; 2768 } 2769 } 2770 2771 ret = btrfs_add_dev_item(trans, device); 2772 if (ret) { 2773 btrfs_abort_transaction(trans, ret); 2774 goto error_sysfs; 2775 } 2776 2777 if (seeding_dev) { 2778 ret = btrfs_finish_sprout(trans); 2779 if (ret) { 2780 btrfs_abort_transaction(trans, ret); 2781 goto error_sysfs; 2782 } 2783 2784 /* 2785 * fs_devices now represents the newly sprouted filesystem and 2786 * its fsid has been changed by btrfs_sprout_splice(). 2787 */ 2788 btrfs_sysfs_update_sprout_fsid(fs_devices); 2789 } 2790 2791 ret = btrfs_commit_transaction(trans); 2792 2793 if (seeding_dev) { 2794 mutex_unlock(&uuid_mutex); 2795 up_write(&sb->s_umount); 2796 locked = false; 2797 2798 if (ret) /* transaction commit */ 2799 return ret; 2800 2801 ret = btrfs_relocate_sys_chunks(fs_info); 2802 if (ret < 0) 2803 btrfs_handle_fs_error(fs_info, ret, 2804 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2805 trans = btrfs_attach_transaction(root); 2806 if (IS_ERR(trans)) { 2807 if (PTR_ERR(trans) == -ENOENT) 2808 return 0; 2809 ret = PTR_ERR(trans); 2810 trans = NULL; 2811 goto error_sysfs; 2812 } 2813 ret = btrfs_commit_transaction(trans); 2814 } 2815 2816 /* 2817 * Now that we have written a new super block to this device, check all 2818 * other fs_devices list if device_path alienates any other scanned 2819 * device. 2820 * We can ignore the return value as it typically returns -EINVAL and 2821 * only succeeds if the device was an alien. 2822 */ 2823 btrfs_forget_devices(device->devt); 2824 2825 /* Update ctime/mtime for blkid or udev */ 2826 update_dev_time(device_path); 2827 2828 return ret; 2829 2830 error_sysfs: 2831 btrfs_sysfs_remove_device(device); 2832 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2833 mutex_lock(&fs_info->chunk_mutex); 2834 list_del_rcu(&device->dev_list); 2835 list_del(&device->dev_alloc_list); 2836 fs_info->fs_devices->num_devices--; 2837 fs_info->fs_devices->open_devices--; 2838 fs_info->fs_devices->rw_devices--; 2839 fs_info->fs_devices->total_devices--; 2840 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2841 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2842 btrfs_set_super_total_bytes(fs_info->super_copy, 2843 orig_super_total_bytes); 2844 btrfs_set_super_num_devices(fs_info->super_copy, 2845 orig_super_num_devices); 2846 mutex_unlock(&fs_info->chunk_mutex); 2847 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2848 error_trans: 2849 if (seeding_dev) 2850 btrfs_set_sb_rdonly(sb); 2851 if (trans) 2852 btrfs_end_transaction(trans); 2853 error_free_zone: 2854 btrfs_destroy_dev_zone_info(device); 2855 error_free_device: 2856 btrfs_free_device(device); 2857 error: 2858 blkdev_put(bdev, fs_info->bdev_holder); 2859 if (locked) { 2860 mutex_unlock(&uuid_mutex); 2861 up_write(&sb->s_umount); 2862 } 2863 return ret; 2864 } 2865 2866 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2867 struct btrfs_device *device) 2868 { 2869 int ret; 2870 struct btrfs_path *path; 2871 struct btrfs_root *root = device->fs_info->chunk_root; 2872 struct btrfs_dev_item *dev_item; 2873 struct extent_buffer *leaf; 2874 struct btrfs_key key; 2875 2876 path = btrfs_alloc_path(); 2877 if (!path) 2878 return -ENOMEM; 2879 2880 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2881 key.type = BTRFS_DEV_ITEM_KEY; 2882 key.offset = device->devid; 2883 2884 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2885 if (ret < 0) 2886 goto out; 2887 2888 if (ret > 0) { 2889 ret = -ENOENT; 2890 goto out; 2891 } 2892 2893 leaf = path->nodes[0]; 2894 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2895 2896 btrfs_set_device_id(leaf, dev_item, device->devid); 2897 btrfs_set_device_type(leaf, dev_item, device->type); 2898 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2899 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2900 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2901 btrfs_set_device_total_bytes(leaf, dev_item, 2902 btrfs_device_get_disk_total_bytes(device)); 2903 btrfs_set_device_bytes_used(leaf, dev_item, 2904 btrfs_device_get_bytes_used(device)); 2905 btrfs_mark_buffer_dirty(trans, leaf); 2906 2907 out: 2908 btrfs_free_path(path); 2909 return ret; 2910 } 2911 2912 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2913 struct btrfs_device *device, u64 new_size) 2914 { 2915 struct btrfs_fs_info *fs_info = device->fs_info; 2916 struct btrfs_super_block *super_copy = fs_info->super_copy; 2917 u64 old_total; 2918 u64 diff; 2919 int ret; 2920 2921 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2922 return -EACCES; 2923 2924 new_size = round_down(new_size, fs_info->sectorsize); 2925 2926 mutex_lock(&fs_info->chunk_mutex); 2927 old_total = btrfs_super_total_bytes(super_copy); 2928 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2929 2930 if (new_size <= device->total_bytes || 2931 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2932 mutex_unlock(&fs_info->chunk_mutex); 2933 return -EINVAL; 2934 } 2935 2936 btrfs_set_super_total_bytes(super_copy, 2937 round_down(old_total + diff, fs_info->sectorsize)); 2938 device->fs_devices->total_rw_bytes += diff; 2939 2940 btrfs_device_set_total_bytes(device, new_size); 2941 btrfs_device_set_disk_total_bytes(device, new_size); 2942 btrfs_clear_space_info_full(device->fs_info); 2943 if (list_empty(&device->post_commit_list)) 2944 list_add_tail(&device->post_commit_list, 2945 &trans->transaction->dev_update_list); 2946 mutex_unlock(&fs_info->chunk_mutex); 2947 2948 btrfs_reserve_chunk_metadata(trans, false); 2949 ret = btrfs_update_device(trans, device); 2950 btrfs_trans_release_chunk_metadata(trans); 2951 2952 return ret; 2953 } 2954 2955 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2956 { 2957 struct btrfs_fs_info *fs_info = trans->fs_info; 2958 struct btrfs_root *root = fs_info->chunk_root; 2959 int ret; 2960 struct btrfs_path *path; 2961 struct btrfs_key key; 2962 2963 path = btrfs_alloc_path(); 2964 if (!path) 2965 return -ENOMEM; 2966 2967 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2968 key.offset = chunk_offset; 2969 key.type = BTRFS_CHUNK_ITEM_KEY; 2970 2971 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2972 if (ret < 0) 2973 goto out; 2974 else if (ret > 0) { /* Logic error or corruption */ 2975 btrfs_handle_fs_error(fs_info, -ENOENT, 2976 "Failed lookup while freeing chunk."); 2977 ret = -ENOENT; 2978 goto out; 2979 } 2980 2981 ret = btrfs_del_item(trans, root, path); 2982 if (ret < 0) 2983 btrfs_handle_fs_error(fs_info, ret, 2984 "Failed to delete chunk item."); 2985 out: 2986 btrfs_free_path(path); 2987 return ret; 2988 } 2989 2990 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2991 { 2992 struct btrfs_super_block *super_copy = fs_info->super_copy; 2993 struct btrfs_disk_key *disk_key; 2994 struct btrfs_chunk *chunk; 2995 u8 *ptr; 2996 int ret = 0; 2997 u32 num_stripes; 2998 u32 array_size; 2999 u32 len = 0; 3000 u32 cur; 3001 struct btrfs_key key; 3002 3003 lockdep_assert_held(&fs_info->chunk_mutex); 3004 array_size = btrfs_super_sys_array_size(super_copy); 3005 3006 ptr = super_copy->sys_chunk_array; 3007 cur = 0; 3008 3009 while (cur < array_size) { 3010 disk_key = (struct btrfs_disk_key *)ptr; 3011 btrfs_disk_key_to_cpu(&key, disk_key); 3012 3013 len = sizeof(*disk_key); 3014 3015 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3016 chunk = (struct btrfs_chunk *)(ptr + len); 3017 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3018 len += btrfs_chunk_item_size(num_stripes); 3019 } else { 3020 ret = -EIO; 3021 break; 3022 } 3023 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3024 key.offset == chunk_offset) { 3025 memmove(ptr, ptr + len, array_size - (cur + len)); 3026 array_size -= len; 3027 btrfs_set_super_sys_array_size(super_copy, array_size); 3028 } else { 3029 ptr += len; 3030 cur += len; 3031 } 3032 } 3033 return ret; 3034 } 3035 3036 /* 3037 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3038 * @logical: Logical block offset in bytes. 3039 * @length: Length of extent in bytes. 3040 * 3041 * Return: Chunk mapping or ERR_PTR. 3042 */ 3043 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3044 u64 logical, u64 length) 3045 { 3046 struct extent_map_tree *em_tree; 3047 struct extent_map *em; 3048 3049 em_tree = &fs_info->mapping_tree; 3050 read_lock(&em_tree->lock); 3051 em = lookup_extent_mapping(em_tree, logical, length); 3052 read_unlock(&em_tree->lock); 3053 3054 if (!em) { 3055 btrfs_crit(fs_info, 3056 "unable to find chunk map for logical %llu length %llu", 3057 logical, length); 3058 return ERR_PTR(-EINVAL); 3059 } 3060 3061 if (em->start > logical || em->start + em->len <= logical) { 3062 btrfs_crit(fs_info, 3063 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu", 3064 logical, logical + length, em->start, em->start + em->len); 3065 free_extent_map(em); 3066 return ERR_PTR(-EINVAL); 3067 } 3068 3069 /* callers are responsible for dropping em's ref. */ 3070 return em; 3071 } 3072 3073 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3074 struct map_lookup *map, u64 chunk_offset) 3075 { 3076 int i; 3077 3078 /* 3079 * Removing chunk items and updating the device items in the chunks btree 3080 * requires holding the chunk_mutex. 3081 * See the comment at btrfs_chunk_alloc() for the details. 3082 */ 3083 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3084 3085 for (i = 0; i < map->num_stripes; i++) { 3086 int ret; 3087 3088 ret = btrfs_update_device(trans, map->stripes[i].dev); 3089 if (ret) 3090 return ret; 3091 } 3092 3093 return btrfs_free_chunk(trans, chunk_offset); 3094 } 3095 3096 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3097 { 3098 struct btrfs_fs_info *fs_info = trans->fs_info; 3099 struct extent_map *em; 3100 struct map_lookup *map; 3101 u64 dev_extent_len = 0; 3102 int i, ret = 0; 3103 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3104 3105 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3106 if (IS_ERR(em)) { 3107 /* 3108 * This is a logic error, but we don't want to just rely on the 3109 * user having built with ASSERT enabled, so if ASSERT doesn't 3110 * do anything we still error out. 3111 */ 3112 ASSERT(0); 3113 return PTR_ERR(em); 3114 } 3115 map = em->map_lookup; 3116 3117 /* 3118 * First delete the device extent items from the devices btree. 3119 * We take the device_list_mutex to avoid racing with the finishing phase 3120 * of a device replace operation. See the comment below before acquiring 3121 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3122 * because that can result in a deadlock when deleting the device extent 3123 * items from the devices btree - COWing an extent buffer from the btree 3124 * may result in allocating a new metadata chunk, which would attempt to 3125 * lock again fs_info->chunk_mutex. 3126 */ 3127 mutex_lock(&fs_devices->device_list_mutex); 3128 for (i = 0; i < map->num_stripes; i++) { 3129 struct btrfs_device *device = map->stripes[i].dev; 3130 ret = btrfs_free_dev_extent(trans, device, 3131 map->stripes[i].physical, 3132 &dev_extent_len); 3133 if (ret) { 3134 mutex_unlock(&fs_devices->device_list_mutex); 3135 btrfs_abort_transaction(trans, ret); 3136 goto out; 3137 } 3138 3139 if (device->bytes_used > 0) { 3140 mutex_lock(&fs_info->chunk_mutex); 3141 btrfs_device_set_bytes_used(device, 3142 device->bytes_used - dev_extent_len); 3143 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3144 btrfs_clear_space_info_full(fs_info); 3145 mutex_unlock(&fs_info->chunk_mutex); 3146 } 3147 } 3148 mutex_unlock(&fs_devices->device_list_mutex); 3149 3150 /* 3151 * We acquire fs_info->chunk_mutex for 2 reasons: 3152 * 3153 * 1) Just like with the first phase of the chunk allocation, we must 3154 * reserve system space, do all chunk btree updates and deletions, and 3155 * update the system chunk array in the superblock while holding this 3156 * mutex. This is for similar reasons as explained on the comment at 3157 * the top of btrfs_chunk_alloc(); 3158 * 3159 * 2) Prevent races with the final phase of a device replace operation 3160 * that replaces the device object associated with the map's stripes, 3161 * because the device object's id can change at any time during that 3162 * final phase of the device replace operation 3163 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3164 * replaced device and then see it with an ID of 3165 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3166 * the device item, which does not exists on the chunk btree. 3167 * The finishing phase of device replace acquires both the 3168 * device_list_mutex and the chunk_mutex, in that order, so we are 3169 * safe by just acquiring the chunk_mutex. 3170 */ 3171 trans->removing_chunk = true; 3172 mutex_lock(&fs_info->chunk_mutex); 3173 3174 check_system_chunk(trans, map->type); 3175 3176 ret = remove_chunk_item(trans, map, chunk_offset); 3177 /* 3178 * Normally we should not get -ENOSPC since we reserved space before 3179 * through the call to check_system_chunk(). 3180 * 3181 * Despite our system space_info having enough free space, we may not 3182 * be able to allocate extents from its block groups, because all have 3183 * an incompatible profile, which will force us to allocate a new system 3184 * block group with the right profile, or right after we called 3185 * check_system_space() above, a scrub turned the only system block group 3186 * with enough free space into RO mode. 3187 * This is explained with more detail at do_chunk_alloc(). 3188 * 3189 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3190 */ 3191 if (ret == -ENOSPC) { 3192 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3193 struct btrfs_block_group *sys_bg; 3194 3195 sys_bg = btrfs_create_chunk(trans, sys_flags); 3196 if (IS_ERR(sys_bg)) { 3197 ret = PTR_ERR(sys_bg); 3198 btrfs_abort_transaction(trans, ret); 3199 goto out; 3200 } 3201 3202 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3203 if (ret) { 3204 btrfs_abort_transaction(trans, ret); 3205 goto out; 3206 } 3207 3208 ret = remove_chunk_item(trans, map, chunk_offset); 3209 if (ret) { 3210 btrfs_abort_transaction(trans, ret); 3211 goto out; 3212 } 3213 } else if (ret) { 3214 btrfs_abort_transaction(trans, ret); 3215 goto out; 3216 } 3217 3218 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3219 3220 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3221 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3222 if (ret) { 3223 btrfs_abort_transaction(trans, ret); 3224 goto out; 3225 } 3226 } 3227 3228 mutex_unlock(&fs_info->chunk_mutex); 3229 trans->removing_chunk = false; 3230 3231 /* 3232 * We are done with chunk btree updates and deletions, so release the 3233 * system space we previously reserved (with check_system_chunk()). 3234 */ 3235 btrfs_trans_release_chunk_metadata(trans); 3236 3237 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3238 if (ret) { 3239 btrfs_abort_transaction(trans, ret); 3240 goto out; 3241 } 3242 3243 out: 3244 if (trans->removing_chunk) { 3245 mutex_unlock(&fs_info->chunk_mutex); 3246 trans->removing_chunk = false; 3247 } 3248 /* once for us */ 3249 free_extent_map(em); 3250 return ret; 3251 } 3252 3253 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3254 { 3255 struct btrfs_root *root = fs_info->chunk_root; 3256 struct btrfs_trans_handle *trans; 3257 struct btrfs_block_group *block_group; 3258 u64 length; 3259 int ret; 3260 3261 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3262 btrfs_err(fs_info, 3263 "relocate: not supported on extent tree v2 yet"); 3264 return -EINVAL; 3265 } 3266 3267 /* 3268 * Prevent races with automatic removal of unused block groups. 3269 * After we relocate and before we remove the chunk with offset 3270 * chunk_offset, automatic removal of the block group can kick in, 3271 * resulting in a failure when calling btrfs_remove_chunk() below. 3272 * 3273 * Make sure to acquire this mutex before doing a tree search (dev 3274 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3275 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3276 * we release the path used to search the chunk/dev tree and before 3277 * the current task acquires this mutex and calls us. 3278 */ 3279 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3280 3281 /* step one, relocate all the extents inside this chunk */ 3282 btrfs_scrub_pause(fs_info); 3283 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3284 btrfs_scrub_continue(fs_info); 3285 if (ret) { 3286 /* 3287 * If we had a transaction abort, stop all running scrubs. 3288 * See transaction.c:cleanup_transaction() why we do it here. 3289 */ 3290 if (BTRFS_FS_ERROR(fs_info)) 3291 btrfs_scrub_cancel(fs_info); 3292 return ret; 3293 } 3294 3295 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3296 if (!block_group) 3297 return -ENOENT; 3298 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3299 length = block_group->length; 3300 btrfs_put_block_group(block_group); 3301 3302 /* 3303 * On a zoned file system, discard the whole block group, this will 3304 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3305 * resetting the zone fails, don't treat it as a fatal problem from the 3306 * filesystem's point of view. 3307 */ 3308 if (btrfs_is_zoned(fs_info)) { 3309 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3310 if (ret) 3311 btrfs_info(fs_info, 3312 "failed to reset zone %llu after relocation", 3313 chunk_offset); 3314 } 3315 3316 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3317 chunk_offset); 3318 if (IS_ERR(trans)) { 3319 ret = PTR_ERR(trans); 3320 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3321 return ret; 3322 } 3323 3324 /* 3325 * step two, delete the device extents and the 3326 * chunk tree entries 3327 */ 3328 ret = btrfs_remove_chunk(trans, chunk_offset); 3329 btrfs_end_transaction(trans); 3330 return ret; 3331 } 3332 3333 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3334 { 3335 struct btrfs_root *chunk_root = fs_info->chunk_root; 3336 struct btrfs_path *path; 3337 struct extent_buffer *leaf; 3338 struct btrfs_chunk *chunk; 3339 struct btrfs_key key; 3340 struct btrfs_key found_key; 3341 u64 chunk_type; 3342 bool retried = false; 3343 int failed = 0; 3344 int ret; 3345 3346 path = btrfs_alloc_path(); 3347 if (!path) 3348 return -ENOMEM; 3349 3350 again: 3351 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3352 key.offset = (u64)-1; 3353 key.type = BTRFS_CHUNK_ITEM_KEY; 3354 3355 while (1) { 3356 mutex_lock(&fs_info->reclaim_bgs_lock); 3357 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3358 if (ret < 0) { 3359 mutex_unlock(&fs_info->reclaim_bgs_lock); 3360 goto error; 3361 } 3362 if (ret == 0) { 3363 /* 3364 * On the first search we would find chunk tree with 3365 * offset -1, which is not possible. On subsequent 3366 * loops this would find an existing item on an invalid 3367 * offset (one less than the previous one, wrong 3368 * alignment and size). 3369 */ 3370 ret = -EUCLEAN; 3371 goto error; 3372 } 3373 3374 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3375 key.type); 3376 if (ret) 3377 mutex_unlock(&fs_info->reclaim_bgs_lock); 3378 if (ret < 0) 3379 goto error; 3380 if (ret > 0) 3381 break; 3382 3383 leaf = path->nodes[0]; 3384 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3385 3386 chunk = btrfs_item_ptr(leaf, path->slots[0], 3387 struct btrfs_chunk); 3388 chunk_type = btrfs_chunk_type(leaf, chunk); 3389 btrfs_release_path(path); 3390 3391 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3392 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3393 if (ret == -ENOSPC) 3394 failed++; 3395 else 3396 BUG_ON(ret); 3397 } 3398 mutex_unlock(&fs_info->reclaim_bgs_lock); 3399 3400 if (found_key.offset == 0) 3401 break; 3402 key.offset = found_key.offset - 1; 3403 } 3404 ret = 0; 3405 if (failed && !retried) { 3406 failed = 0; 3407 retried = true; 3408 goto again; 3409 } else if (WARN_ON(failed && retried)) { 3410 ret = -ENOSPC; 3411 } 3412 error: 3413 btrfs_free_path(path); 3414 return ret; 3415 } 3416 3417 /* 3418 * return 1 : allocate a data chunk successfully, 3419 * return <0: errors during allocating a data chunk, 3420 * return 0 : no need to allocate a data chunk. 3421 */ 3422 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3423 u64 chunk_offset) 3424 { 3425 struct btrfs_block_group *cache; 3426 u64 bytes_used; 3427 u64 chunk_type; 3428 3429 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3430 ASSERT(cache); 3431 chunk_type = cache->flags; 3432 btrfs_put_block_group(cache); 3433 3434 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3435 return 0; 3436 3437 spin_lock(&fs_info->data_sinfo->lock); 3438 bytes_used = fs_info->data_sinfo->bytes_used; 3439 spin_unlock(&fs_info->data_sinfo->lock); 3440 3441 if (!bytes_used) { 3442 struct btrfs_trans_handle *trans; 3443 int ret; 3444 3445 trans = btrfs_join_transaction(fs_info->tree_root); 3446 if (IS_ERR(trans)) 3447 return PTR_ERR(trans); 3448 3449 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3450 btrfs_end_transaction(trans); 3451 if (ret < 0) 3452 return ret; 3453 return 1; 3454 } 3455 3456 return 0; 3457 } 3458 3459 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3460 struct btrfs_balance_control *bctl) 3461 { 3462 struct btrfs_root *root = fs_info->tree_root; 3463 struct btrfs_trans_handle *trans; 3464 struct btrfs_balance_item *item; 3465 struct btrfs_disk_balance_args disk_bargs; 3466 struct btrfs_path *path; 3467 struct extent_buffer *leaf; 3468 struct btrfs_key key; 3469 int ret, err; 3470 3471 path = btrfs_alloc_path(); 3472 if (!path) 3473 return -ENOMEM; 3474 3475 trans = btrfs_start_transaction(root, 0); 3476 if (IS_ERR(trans)) { 3477 btrfs_free_path(path); 3478 return PTR_ERR(trans); 3479 } 3480 3481 key.objectid = BTRFS_BALANCE_OBJECTID; 3482 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3483 key.offset = 0; 3484 3485 ret = btrfs_insert_empty_item(trans, root, path, &key, 3486 sizeof(*item)); 3487 if (ret) 3488 goto out; 3489 3490 leaf = path->nodes[0]; 3491 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3492 3493 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3494 3495 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3496 btrfs_set_balance_data(leaf, item, &disk_bargs); 3497 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3498 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3499 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3500 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3501 3502 btrfs_set_balance_flags(leaf, item, bctl->flags); 3503 3504 btrfs_mark_buffer_dirty(trans, leaf); 3505 out: 3506 btrfs_free_path(path); 3507 err = btrfs_commit_transaction(trans); 3508 if (err && !ret) 3509 ret = err; 3510 return ret; 3511 } 3512 3513 static int del_balance_item(struct btrfs_fs_info *fs_info) 3514 { 3515 struct btrfs_root *root = fs_info->tree_root; 3516 struct btrfs_trans_handle *trans; 3517 struct btrfs_path *path; 3518 struct btrfs_key key; 3519 int ret, err; 3520 3521 path = btrfs_alloc_path(); 3522 if (!path) 3523 return -ENOMEM; 3524 3525 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3526 if (IS_ERR(trans)) { 3527 btrfs_free_path(path); 3528 return PTR_ERR(trans); 3529 } 3530 3531 key.objectid = BTRFS_BALANCE_OBJECTID; 3532 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3533 key.offset = 0; 3534 3535 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3536 if (ret < 0) 3537 goto out; 3538 if (ret > 0) { 3539 ret = -ENOENT; 3540 goto out; 3541 } 3542 3543 ret = btrfs_del_item(trans, root, path); 3544 out: 3545 btrfs_free_path(path); 3546 err = btrfs_commit_transaction(trans); 3547 if (err && !ret) 3548 ret = err; 3549 return ret; 3550 } 3551 3552 /* 3553 * This is a heuristic used to reduce the number of chunks balanced on 3554 * resume after balance was interrupted. 3555 */ 3556 static void update_balance_args(struct btrfs_balance_control *bctl) 3557 { 3558 /* 3559 * Turn on soft mode for chunk types that were being converted. 3560 */ 3561 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3562 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3563 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3564 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3565 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3566 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3567 3568 /* 3569 * Turn on usage filter if is not already used. The idea is 3570 * that chunks that we have already balanced should be 3571 * reasonably full. Don't do it for chunks that are being 3572 * converted - that will keep us from relocating unconverted 3573 * (albeit full) chunks. 3574 */ 3575 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3576 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3577 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3578 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3579 bctl->data.usage = 90; 3580 } 3581 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3582 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3583 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3584 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3585 bctl->sys.usage = 90; 3586 } 3587 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3588 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3589 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3590 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3591 bctl->meta.usage = 90; 3592 } 3593 } 3594 3595 /* 3596 * Clear the balance status in fs_info and delete the balance item from disk. 3597 */ 3598 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3599 { 3600 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3601 int ret; 3602 3603 BUG_ON(!fs_info->balance_ctl); 3604 3605 spin_lock(&fs_info->balance_lock); 3606 fs_info->balance_ctl = NULL; 3607 spin_unlock(&fs_info->balance_lock); 3608 3609 kfree(bctl); 3610 ret = del_balance_item(fs_info); 3611 if (ret) 3612 btrfs_handle_fs_error(fs_info, ret, NULL); 3613 } 3614 3615 /* 3616 * Balance filters. Return 1 if chunk should be filtered out 3617 * (should not be balanced). 3618 */ 3619 static int chunk_profiles_filter(u64 chunk_type, 3620 struct btrfs_balance_args *bargs) 3621 { 3622 chunk_type = chunk_to_extended(chunk_type) & 3623 BTRFS_EXTENDED_PROFILE_MASK; 3624 3625 if (bargs->profiles & chunk_type) 3626 return 0; 3627 3628 return 1; 3629 } 3630 3631 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3632 struct btrfs_balance_args *bargs) 3633 { 3634 struct btrfs_block_group *cache; 3635 u64 chunk_used; 3636 u64 user_thresh_min; 3637 u64 user_thresh_max; 3638 int ret = 1; 3639 3640 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3641 chunk_used = cache->used; 3642 3643 if (bargs->usage_min == 0) 3644 user_thresh_min = 0; 3645 else 3646 user_thresh_min = mult_perc(cache->length, bargs->usage_min); 3647 3648 if (bargs->usage_max == 0) 3649 user_thresh_max = 1; 3650 else if (bargs->usage_max > 100) 3651 user_thresh_max = cache->length; 3652 else 3653 user_thresh_max = mult_perc(cache->length, bargs->usage_max); 3654 3655 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3656 ret = 0; 3657 3658 btrfs_put_block_group(cache); 3659 return ret; 3660 } 3661 3662 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3663 u64 chunk_offset, struct btrfs_balance_args *bargs) 3664 { 3665 struct btrfs_block_group *cache; 3666 u64 chunk_used, user_thresh; 3667 int ret = 1; 3668 3669 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3670 chunk_used = cache->used; 3671 3672 if (bargs->usage_min == 0) 3673 user_thresh = 1; 3674 else if (bargs->usage > 100) 3675 user_thresh = cache->length; 3676 else 3677 user_thresh = mult_perc(cache->length, bargs->usage); 3678 3679 if (chunk_used < user_thresh) 3680 ret = 0; 3681 3682 btrfs_put_block_group(cache); 3683 return ret; 3684 } 3685 3686 static int chunk_devid_filter(struct extent_buffer *leaf, 3687 struct btrfs_chunk *chunk, 3688 struct btrfs_balance_args *bargs) 3689 { 3690 struct btrfs_stripe *stripe; 3691 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3692 int i; 3693 3694 for (i = 0; i < num_stripes; i++) { 3695 stripe = btrfs_stripe_nr(chunk, i); 3696 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3697 return 0; 3698 } 3699 3700 return 1; 3701 } 3702 3703 static u64 calc_data_stripes(u64 type, int num_stripes) 3704 { 3705 const int index = btrfs_bg_flags_to_raid_index(type); 3706 const int ncopies = btrfs_raid_array[index].ncopies; 3707 const int nparity = btrfs_raid_array[index].nparity; 3708 3709 return (num_stripes - nparity) / ncopies; 3710 } 3711 3712 /* [pstart, pend) */ 3713 static int chunk_drange_filter(struct extent_buffer *leaf, 3714 struct btrfs_chunk *chunk, 3715 struct btrfs_balance_args *bargs) 3716 { 3717 struct btrfs_stripe *stripe; 3718 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3719 u64 stripe_offset; 3720 u64 stripe_length; 3721 u64 type; 3722 int factor; 3723 int i; 3724 3725 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3726 return 0; 3727 3728 type = btrfs_chunk_type(leaf, chunk); 3729 factor = calc_data_stripes(type, num_stripes); 3730 3731 for (i = 0; i < num_stripes; i++) { 3732 stripe = btrfs_stripe_nr(chunk, i); 3733 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3734 continue; 3735 3736 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3737 stripe_length = btrfs_chunk_length(leaf, chunk); 3738 stripe_length = div_u64(stripe_length, factor); 3739 3740 if (stripe_offset < bargs->pend && 3741 stripe_offset + stripe_length > bargs->pstart) 3742 return 0; 3743 } 3744 3745 return 1; 3746 } 3747 3748 /* [vstart, vend) */ 3749 static int chunk_vrange_filter(struct extent_buffer *leaf, 3750 struct btrfs_chunk *chunk, 3751 u64 chunk_offset, 3752 struct btrfs_balance_args *bargs) 3753 { 3754 if (chunk_offset < bargs->vend && 3755 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3756 /* at least part of the chunk is inside this vrange */ 3757 return 0; 3758 3759 return 1; 3760 } 3761 3762 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3763 struct btrfs_chunk *chunk, 3764 struct btrfs_balance_args *bargs) 3765 { 3766 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3767 3768 if (bargs->stripes_min <= num_stripes 3769 && num_stripes <= bargs->stripes_max) 3770 return 0; 3771 3772 return 1; 3773 } 3774 3775 static int chunk_soft_convert_filter(u64 chunk_type, 3776 struct btrfs_balance_args *bargs) 3777 { 3778 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3779 return 0; 3780 3781 chunk_type = chunk_to_extended(chunk_type) & 3782 BTRFS_EXTENDED_PROFILE_MASK; 3783 3784 if (bargs->target == chunk_type) 3785 return 1; 3786 3787 return 0; 3788 } 3789 3790 static int should_balance_chunk(struct extent_buffer *leaf, 3791 struct btrfs_chunk *chunk, u64 chunk_offset) 3792 { 3793 struct btrfs_fs_info *fs_info = leaf->fs_info; 3794 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3795 struct btrfs_balance_args *bargs = NULL; 3796 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3797 3798 /* type filter */ 3799 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3800 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3801 return 0; 3802 } 3803 3804 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3805 bargs = &bctl->data; 3806 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3807 bargs = &bctl->sys; 3808 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3809 bargs = &bctl->meta; 3810 3811 /* profiles filter */ 3812 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3813 chunk_profiles_filter(chunk_type, bargs)) { 3814 return 0; 3815 } 3816 3817 /* usage filter */ 3818 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3819 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3820 return 0; 3821 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3822 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3823 return 0; 3824 } 3825 3826 /* devid filter */ 3827 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3828 chunk_devid_filter(leaf, chunk, bargs)) { 3829 return 0; 3830 } 3831 3832 /* drange filter, makes sense only with devid filter */ 3833 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3834 chunk_drange_filter(leaf, chunk, bargs)) { 3835 return 0; 3836 } 3837 3838 /* vrange filter */ 3839 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3840 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3841 return 0; 3842 } 3843 3844 /* stripes filter */ 3845 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3846 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3847 return 0; 3848 } 3849 3850 /* soft profile changing mode */ 3851 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3852 chunk_soft_convert_filter(chunk_type, bargs)) { 3853 return 0; 3854 } 3855 3856 /* 3857 * limited by count, must be the last filter 3858 */ 3859 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3860 if (bargs->limit == 0) 3861 return 0; 3862 else 3863 bargs->limit--; 3864 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3865 /* 3866 * Same logic as the 'limit' filter; the minimum cannot be 3867 * determined here because we do not have the global information 3868 * about the count of all chunks that satisfy the filters. 3869 */ 3870 if (bargs->limit_max == 0) 3871 return 0; 3872 else 3873 bargs->limit_max--; 3874 } 3875 3876 return 1; 3877 } 3878 3879 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3880 { 3881 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3882 struct btrfs_root *chunk_root = fs_info->chunk_root; 3883 u64 chunk_type; 3884 struct btrfs_chunk *chunk; 3885 struct btrfs_path *path = NULL; 3886 struct btrfs_key key; 3887 struct btrfs_key found_key; 3888 struct extent_buffer *leaf; 3889 int slot; 3890 int ret; 3891 int enospc_errors = 0; 3892 bool counting = true; 3893 /* The single value limit and min/max limits use the same bytes in the */ 3894 u64 limit_data = bctl->data.limit; 3895 u64 limit_meta = bctl->meta.limit; 3896 u64 limit_sys = bctl->sys.limit; 3897 u32 count_data = 0; 3898 u32 count_meta = 0; 3899 u32 count_sys = 0; 3900 int chunk_reserved = 0; 3901 3902 path = btrfs_alloc_path(); 3903 if (!path) { 3904 ret = -ENOMEM; 3905 goto error; 3906 } 3907 3908 /* zero out stat counters */ 3909 spin_lock(&fs_info->balance_lock); 3910 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3911 spin_unlock(&fs_info->balance_lock); 3912 again: 3913 if (!counting) { 3914 /* 3915 * The single value limit and min/max limits use the same bytes 3916 * in the 3917 */ 3918 bctl->data.limit = limit_data; 3919 bctl->meta.limit = limit_meta; 3920 bctl->sys.limit = limit_sys; 3921 } 3922 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3923 key.offset = (u64)-1; 3924 key.type = BTRFS_CHUNK_ITEM_KEY; 3925 3926 while (1) { 3927 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3928 atomic_read(&fs_info->balance_cancel_req)) { 3929 ret = -ECANCELED; 3930 goto error; 3931 } 3932 3933 mutex_lock(&fs_info->reclaim_bgs_lock); 3934 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3935 if (ret < 0) { 3936 mutex_unlock(&fs_info->reclaim_bgs_lock); 3937 goto error; 3938 } 3939 3940 /* 3941 * this shouldn't happen, it means the last relocate 3942 * failed 3943 */ 3944 if (ret == 0) 3945 BUG(); /* FIXME break ? */ 3946 3947 ret = btrfs_previous_item(chunk_root, path, 0, 3948 BTRFS_CHUNK_ITEM_KEY); 3949 if (ret) { 3950 mutex_unlock(&fs_info->reclaim_bgs_lock); 3951 ret = 0; 3952 break; 3953 } 3954 3955 leaf = path->nodes[0]; 3956 slot = path->slots[0]; 3957 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3958 3959 if (found_key.objectid != key.objectid) { 3960 mutex_unlock(&fs_info->reclaim_bgs_lock); 3961 break; 3962 } 3963 3964 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3965 chunk_type = btrfs_chunk_type(leaf, chunk); 3966 3967 if (!counting) { 3968 spin_lock(&fs_info->balance_lock); 3969 bctl->stat.considered++; 3970 spin_unlock(&fs_info->balance_lock); 3971 } 3972 3973 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3974 3975 btrfs_release_path(path); 3976 if (!ret) { 3977 mutex_unlock(&fs_info->reclaim_bgs_lock); 3978 goto loop; 3979 } 3980 3981 if (counting) { 3982 mutex_unlock(&fs_info->reclaim_bgs_lock); 3983 spin_lock(&fs_info->balance_lock); 3984 bctl->stat.expected++; 3985 spin_unlock(&fs_info->balance_lock); 3986 3987 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3988 count_data++; 3989 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3990 count_sys++; 3991 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3992 count_meta++; 3993 3994 goto loop; 3995 } 3996 3997 /* 3998 * Apply limit_min filter, no need to check if the LIMITS 3999 * filter is used, limit_min is 0 by default 4000 */ 4001 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 4002 count_data < bctl->data.limit_min) 4003 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 4004 count_meta < bctl->meta.limit_min) 4005 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 4006 count_sys < bctl->sys.limit_min)) { 4007 mutex_unlock(&fs_info->reclaim_bgs_lock); 4008 goto loop; 4009 } 4010 4011 if (!chunk_reserved) { 4012 /* 4013 * We may be relocating the only data chunk we have, 4014 * which could potentially end up with losing data's 4015 * raid profile, so lets allocate an empty one in 4016 * advance. 4017 */ 4018 ret = btrfs_may_alloc_data_chunk(fs_info, 4019 found_key.offset); 4020 if (ret < 0) { 4021 mutex_unlock(&fs_info->reclaim_bgs_lock); 4022 goto error; 4023 } else if (ret == 1) { 4024 chunk_reserved = 1; 4025 } 4026 } 4027 4028 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4029 mutex_unlock(&fs_info->reclaim_bgs_lock); 4030 if (ret == -ENOSPC) { 4031 enospc_errors++; 4032 } else if (ret == -ETXTBSY) { 4033 btrfs_info(fs_info, 4034 "skipping relocation of block group %llu due to active swapfile", 4035 found_key.offset); 4036 ret = 0; 4037 } else if (ret) { 4038 goto error; 4039 } else { 4040 spin_lock(&fs_info->balance_lock); 4041 bctl->stat.completed++; 4042 spin_unlock(&fs_info->balance_lock); 4043 } 4044 loop: 4045 if (found_key.offset == 0) 4046 break; 4047 key.offset = found_key.offset - 1; 4048 } 4049 4050 if (counting) { 4051 btrfs_release_path(path); 4052 counting = false; 4053 goto again; 4054 } 4055 error: 4056 btrfs_free_path(path); 4057 if (enospc_errors) { 4058 btrfs_info(fs_info, "%d enospc errors during balance", 4059 enospc_errors); 4060 if (!ret) 4061 ret = -ENOSPC; 4062 } 4063 4064 return ret; 4065 } 4066 4067 /* 4068 * See if a given profile is valid and reduced. 4069 * 4070 * @flags: profile to validate 4071 * @extended: if true @flags is treated as an extended profile 4072 */ 4073 static int alloc_profile_is_valid(u64 flags, int extended) 4074 { 4075 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4076 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4077 4078 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4079 4080 /* 1) check that all other bits are zeroed */ 4081 if (flags & ~mask) 4082 return 0; 4083 4084 /* 2) see if profile is reduced */ 4085 if (flags == 0) 4086 return !extended; /* "0" is valid for usual profiles */ 4087 4088 return has_single_bit_set(flags); 4089 } 4090 4091 /* 4092 * Validate target profile against allowed profiles and return true if it's OK. 4093 * Otherwise print the error message and return false. 4094 */ 4095 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4096 const struct btrfs_balance_args *bargs, 4097 u64 allowed, const char *type) 4098 { 4099 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4100 return true; 4101 4102 /* Profile is valid and does not have bits outside of the allowed set */ 4103 if (alloc_profile_is_valid(bargs->target, 1) && 4104 (bargs->target & ~allowed) == 0) 4105 return true; 4106 4107 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4108 type, btrfs_bg_type_to_raid_name(bargs->target)); 4109 return false; 4110 } 4111 4112 /* 4113 * Fill @buf with textual description of balance filter flags @bargs, up to 4114 * @size_buf including the terminating null. The output may be trimmed if it 4115 * does not fit into the provided buffer. 4116 */ 4117 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4118 u32 size_buf) 4119 { 4120 int ret; 4121 u32 size_bp = size_buf; 4122 char *bp = buf; 4123 u64 flags = bargs->flags; 4124 char tmp_buf[128] = {'\0'}; 4125 4126 if (!flags) 4127 return; 4128 4129 #define CHECK_APPEND_NOARG(a) \ 4130 do { \ 4131 ret = snprintf(bp, size_bp, (a)); \ 4132 if (ret < 0 || ret >= size_bp) \ 4133 goto out_overflow; \ 4134 size_bp -= ret; \ 4135 bp += ret; \ 4136 } while (0) 4137 4138 #define CHECK_APPEND_1ARG(a, v1) \ 4139 do { \ 4140 ret = snprintf(bp, size_bp, (a), (v1)); \ 4141 if (ret < 0 || ret >= size_bp) \ 4142 goto out_overflow; \ 4143 size_bp -= ret; \ 4144 bp += ret; \ 4145 } while (0) 4146 4147 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4148 do { \ 4149 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4150 if (ret < 0 || ret >= size_bp) \ 4151 goto out_overflow; \ 4152 size_bp -= ret; \ 4153 bp += ret; \ 4154 } while (0) 4155 4156 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4157 CHECK_APPEND_1ARG("convert=%s,", 4158 btrfs_bg_type_to_raid_name(bargs->target)); 4159 4160 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4161 CHECK_APPEND_NOARG("soft,"); 4162 4163 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4164 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4165 sizeof(tmp_buf)); 4166 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4167 } 4168 4169 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4170 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4171 4172 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4173 CHECK_APPEND_2ARG("usage=%u..%u,", 4174 bargs->usage_min, bargs->usage_max); 4175 4176 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4177 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4178 4179 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4180 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4181 bargs->pstart, bargs->pend); 4182 4183 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4184 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4185 bargs->vstart, bargs->vend); 4186 4187 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4188 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4189 4190 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4191 CHECK_APPEND_2ARG("limit=%u..%u,", 4192 bargs->limit_min, bargs->limit_max); 4193 4194 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4195 CHECK_APPEND_2ARG("stripes=%u..%u,", 4196 bargs->stripes_min, bargs->stripes_max); 4197 4198 #undef CHECK_APPEND_2ARG 4199 #undef CHECK_APPEND_1ARG 4200 #undef CHECK_APPEND_NOARG 4201 4202 out_overflow: 4203 4204 if (size_bp < size_buf) 4205 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4206 else 4207 buf[0] = '\0'; 4208 } 4209 4210 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4211 { 4212 u32 size_buf = 1024; 4213 char tmp_buf[192] = {'\0'}; 4214 char *buf; 4215 char *bp; 4216 u32 size_bp = size_buf; 4217 int ret; 4218 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4219 4220 buf = kzalloc(size_buf, GFP_KERNEL); 4221 if (!buf) 4222 return; 4223 4224 bp = buf; 4225 4226 #define CHECK_APPEND_1ARG(a, v1) \ 4227 do { \ 4228 ret = snprintf(bp, size_bp, (a), (v1)); \ 4229 if (ret < 0 || ret >= size_bp) \ 4230 goto out_overflow; \ 4231 size_bp -= ret; \ 4232 bp += ret; \ 4233 } while (0) 4234 4235 if (bctl->flags & BTRFS_BALANCE_FORCE) 4236 CHECK_APPEND_1ARG("%s", "-f "); 4237 4238 if (bctl->flags & BTRFS_BALANCE_DATA) { 4239 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4240 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4241 } 4242 4243 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4244 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4245 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4246 } 4247 4248 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4249 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4250 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4251 } 4252 4253 #undef CHECK_APPEND_1ARG 4254 4255 out_overflow: 4256 4257 if (size_bp < size_buf) 4258 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4259 btrfs_info(fs_info, "balance: %s %s", 4260 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4261 "resume" : "start", buf); 4262 4263 kfree(buf); 4264 } 4265 4266 /* 4267 * Should be called with balance mutexe held 4268 */ 4269 int btrfs_balance(struct btrfs_fs_info *fs_info, 4270 struct btrfs_balance_control *bctl, 4271 struct btrfs_ioctl_balance_args *bargs) 4272 { 4273 u64 meta_target, data_target; 4274 u64 allowed; 4275 int mixed = 0; 4276 int ret; 4277 u64 num_devices; 4278 unsigned seq; 4279 bool reducing_redundancy; 4280 bool paused = false; 4281 int i; 4282 4283 if (btrfs_fs_closing(fs_info) || 4284 atomic_read(&fs_info->balance_pause_req) || 4285 btrfs_should_cancel_balance(fs_info)) { 4286 ret = -EINVAL; 4287 goto out; 4288 } 4289 4290 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4291 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4292 mixed = 1; 4293 4294 /* 4295 * In case of mixed groups both data and meta should be picked, 4296 * and identical options should be given for both of them. 4297 */ 4298 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4299 if (mixed && (bctl->flags & allowed)) { 4300 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4301 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4302 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4303 btrfs_err(fs_info, 4304 "balance: mixed groups data and metadata options must be the same"); 4305 ret = -EINVAL; 4306 goto out; 4307 } 4308 } 4309 4310 /* 4311 * rw_devices will not change at the moment, device add/delete/replace 4312 * are exclusive 4313 */ 4314 num_devices = fs_info->fs_devices->rw_devices; 4315 4316 /* 4317 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4318 * special bit for it, to make it easier to distinguish. Thus we need 4319 * to set it manually, or balance would refuse the profile. 4320 */ 4321 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4322 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4323 if (num_devices >= btrfs_raid_array[i].devs_min) 4324 allowed |= btrfs_raid_array[i].bg_flag; 4325 4326 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4327 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4328 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4329 ret = -EINVAL; 4330 goto out; 4331 } 4332 4333 /* 4334 * Allow to reduce metadata or system integrity only if force set for 4335 * profiles with redundancy (copies, parity) 4336 */ 4337 allowed = 0; 4338 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4339 if (btrfs_raid_array[i].ncopies >= 2 || 4340 btrfs_raid_array[i].tolerated_failures >= 1) 4341 allowed |= btrfs_raid_array[i].bg_flag; 4342 } 4343 do { 4344 seq = read_seqbegin(&fs_info->profiles_lock); 4345 4346 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4347 (fs_info->avail_system_alloc_bits & allowed) && 4348 !(bctl->sys.target & allowed)) || 4349 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4350 (fs_info->avail_metadata_alloc_bits & allowed) && 4351 !(bctl->meta.target & allowed))) 4352 reducing_redundancy = true; 4353 else 4354 reducing_redundancy = false; 4355 4356 /* if we're not converting, the target field is uninitialized */ 4357 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4358 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4359 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4360 bctl->data.target : fs_info->avail_data_alloc_bits; 4361 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4362 4363 if (reducing_redundancy) { 4364 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4365 btrfs_info(fs_info, 4366 "balance: force reducing metadata redundancy"); 4367 } else { 4368 btrfs_err(fs_info, 4369 "balance: reduces metadata redundancy, use --force if you want this"); 4370 ret = -EINVAL; 4371 goto out; 4372 } 4373 } 4374 4375 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4376 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4377 btrfs_warn(fs_info, 4378 "balance: metadata profile %s has lower redundancy than data profile %s", 4379 btrfs_bg_type_to_raid_name(meta_target), 4380 btrfs_bg_type_to_raid_name(data_target)); 4381 } 4382 4383 ret = insert_balance_item(fs_info, bctl); 4384 if (ret && ret != -EEXIST) 4385 goto out; 4386 4387 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4388 BUG_ON(ret == -EEXIST); 4389 BUG_ON(fs_info->balance_ctl); 4390 spin_lock(&fs_info->balance_lock); 4391 fs_info->balance_ctl = bctl; 4392 spin_unlock(&fs_info->balance_lock); 4393 } else { 4394 BUG_ON(ret != -EEXIST); 4395 spin_lock(&fs_info->balance_lock); 4396 update_balance_args(bctl); 4397 spin_unlock(&fs_info->balance_lock); 4398 } 4399 4400 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4401 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4402 describe_balance_start_or_resume(fs_info); 4403 mutex_unlock(&fs_info->balance_mutex); 4404 4405 ret = __btrfs_balance(fs_info); 4406 4407 mutex_lock(&fs_info->balance_mutex); 4408 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4409 btrfs_info(fs_info, "balance: paused"); 4410 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4411 paused = true; 4412 } 4413 /* 4414 * Balance can be canceled by: 4415 * 4416 * - Regular cancel request 4417 * Then ret == -ECANCELED and balance_cancel_req > 0 4418 * 4419 * - Fatal signal to "btrfs" process 4420 * Either the signal caught by wait_reserve_ticket() and callers 4421 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4422 * got -ECANCELED. 4423 * Either way, in this case balance_cancel_req = 0, and 4424 * ret == -EINTR or ret == -ECANCELED. 4425 * 4426 * So here we only check the return value to catch canceled balance. 4427 */ 4428 else if (ret == -ECANCELED || ret == -EINTR) 4429 btrfs_info(fs_info, "balance: canceled"); 4430 else 4431 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4432 4433 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4434 4435 if (bargs) { 4436 memset(bargs, 0, sizeof(*bargs)); 4437 btrfs_update_ioctl_balance_args(fs_info, bargs); 4438 } 4439 4440 /* We didn't pause, we can clean everything up. */ 4441 if (!paused) { 4442 reset_balance_state(fs_info); 4443 btrfs_exclop_finish(fs_info); 4444 } 4445 4446 wake_up(&fs_info->balance_wait_q); 4447 4448 return ret; 4449 out: 4450 if (bctl->flags & BTRFS_BALANCE_RESUME) 4451 reset_balance_state(fs_info); 4452 else 4453 kfree(bctl); 4454 btrfs_exclop_finish(fs_info); 4455 4456 return ret; 4457 } 4458 4459 static int balance_kthread(void *data) 4460 { 4461 struct btrfs_fs_info *fs_info = data; 4462 int ret = 0; 4463 4464 sb_start_write(fs_info->sb); 4465 mutex_lock(&fs_info->balance_mutex); 4466 if (fs_info->balance_ctl) 4467 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4468 mutex_unlock(&fs_info->balance_mutex); 4469 sb_end_write(fs_info->sb); 4470 4471 return ret; 4472 } 4473 4474 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4475 { 4476 struct task_struct *tsk; 4477 4478 mutex_lock(&fs_info->balance_mutex); 4479 if (!fs_info->balance_ctl) { 4480 mutex_unlock(&fs_info->balance_mutex); 4481 return 0; 4482 } 4483 mutex_unlock(&fs_info->balance_mutex); 4484 4485 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4486 btrfs_info(fs_info, "balance: resume skipped"); 4487 return 0; 4488 } 4489 4490 spin_lock(&fs_info->super_lock); 4491 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4492 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4493 spin_unlock(&fs_info->super_lock); 4494 /* 4495 * A ro->rw remount sequence should continue with the paused balance 4496 * regardless of who pauses it, system or the user as of now, so set 4497 * the resume flag. 4498 */ 4499 spin_lock(&fs_info->balance_lock); 4500 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4501 spin_unlock(&fs_info->balance_lock); 4502 4503 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4504 return PTR_ERR_OR_ZERO(tsk); 4505 } 4506 4507 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4508 { 4509 struct btrfs_balance_control *bctl; 4510 struct btrfs_balance_item *item; 4511 struct btrfs_disk_balance_args disk_bargs; 4512 struct btrfs_path *path; 4513 struct extent_buffer *leaf; 4514 struct btrfs_key key; 4515 int ret; 4516 4517 path = btrfs_alloc_path(); 4518 if (!path) 4519 return -ENOMEM; 4520 4521 key.objectid = BTRFS_BALANCE_OBJECTID; 4522 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4523 key.offset = 0; 4524 4525 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4526 if (ret < 0) 4527 goto out; 4528 if (ret > 0) { /* ret = -ENOENT; */ 4529 ret = 0; 4530 goto out; 4531 } 4532 4533 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4534 if (!bctl) { 4535 ret = -ENOMEM; 4536 goto out; 4537 } 4538 4539 leaf = path->nodes[0]; 4540 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4541 4542 bctl->flags = btrfs_balance_flags(leaf, item); 4543 bctl->flags |= BTRFS_BALANCE_RESUME; 4544 4545 btrfs_balance_data(leaf, item, &disk_bargs); 4546 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4547 btrfs_balance_meta(leaf, item, &disk_bargs); 4548 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4549 btrfs_balance_sys(leaf, item, &disk_bargs); 4550 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4551 4552 /* 4553 * This should never happen, as the paused balance state is recovered 4554 * during mount without any chance of other exclusive ops to collide. 4555 * 4556 * This gives the exclusive op status to balance and keeps in paused 4557 * state until user intervention (cancel or umount). If the ownership 4558 * cannot be assigned, show a message but do not fail. The balance 4559 * is in a paused state and must have fs_info::balance_ctl properly 4560 * set up. 4561 */ 4562 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4563 btrfs_warn(fs_info, 4564 "balance: cannot set exclusive op status, resume manually"); 4565 4566 btrfs_release_path(path); 4567 4568 mutex_lock(&fs_info->balance_mutex); 4569 BUG_ON(fs_info->balance_ctl); 4570 spin_lock(&fs_info->balance_lock); 4571 fs_info->balance_ctl = bctl; 4572 spin_unlock(&fs_info->balance_lock); 4573 mutex_unlock(&fs_info->balance_mutex); 4574 out: 4575 btrfs_free_path(path); 4576 return ret; 4577 } 4578 4579 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4580 { 4581 int ret = 0; 4582 4583 mutex_lock(&fs_info->balance_mutex); 4584 if (!fs_info->balance_ctl) { 4585 mutex_unlock(&fs_info->balance_mutex); 4586 return -ENOTCONN; 4587 } 4588 4589 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4590 atomic_inc(&fs_info->balance_pause_req); 4591 mutex_unlock(&fs_info->balance_mutex); 4592 4593 wait_event(fs_info->balance_wait_q, 4594 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4595 4596 mutex_lock(&fs_info->balance_mutex); 4597 /* we are good with balance_ctl ripped off from under us */ 4598 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4599 atomic_dec(&fs_info->balance_pause_req); 4600 } else { 4601 ret = -ENOTCONN; 4602 } 4603 4604 mutex_unlock(&fs_info->balance_mutex); 4605 return ret; 4606 } 4607 4608 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4609 { 4610 mutex_lock(&fs_info->balance_mutex); 4611 if (!fs_info->balance_ctl) { 4612 mutex_unlock(&fs_info->balance_mutex); 4613 return -ENOTCONN; 4614 } 4615 4616 /* 4617 * A paused balance with the item stored on disk can be resumed at 4618 * mount time if the mount is read-write. Otherwise it's still paused 4619 * and we must not allow cancelling as it deletes the item. 4620 */ 4621 if (sb_rdonly(fs_info->sb)) { 4622 mutex_unlock(&fs_info->balance_mutex); 4623 return -EROFS; 4624 } 4625 4626 atomic_inc(&fs_info->balance_cancel_req); 4627 /* 4628 * if we are running just wait and return, balance item is 4629 * deleted in btrfs_balance in this case 4630 */ 4631 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4632 mutex_unlock(&fs_info->balance_mutex); 4633 wait_event(fs_info->balance_wait_q, 4634 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4635 mutex_lock(&fs_info->balance_mutex); 4636 } else { 4637 mutex_unlock(&fs_info->balance_mutex); 4638 /* 4639 * Lock released to allow other waiters to continue, we'll 4640 * reexamine the status again. 4641 */ 4642 mutex_lock(&fs_info->balance_mutex); 4643 4644 if (fs_info->balance_ctl) { 4645 reset_balance_state(fs_info); 4646 btrfs_exclop_finish(fs_info); 4647 btrfs_info(fs_info, "balance: canceled"); 4648 } 4649 } 4650 4651 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4652 atomic_dec(&fs_info->balance_cancel_req); 4653 mutex_unlock(&fs_info->balance_mutex); 4654 return 0; 4655 } 4656 4657 int btrfs_uuid_scan_kthread(void *data) 4658 { 4659 struct btrfs_fs_info *fs_info = data; 4660 struct btrfs_root *root = fs_info->tree_root; 4661 struct btrfs_key key; 4662 struct btrfs_path *path = NULL; 4663 int ret = 0; 4664 struct extent_buffer *eb; 4665 int slot; 4666 struct btrfs_root_item root_item; 4667 u32 item_size; 4668 struct btrfs_trans_handle *trans = NULL; 4669 bool closing = false; 4670 4671 path = btrfs_alloc_path(); 4672 if (!path) { 4673 ret = -ENOMEM; 4674 goto out; 4675 } 4676 4677 key.objectid = 0; 4678 key.type = BTRFS_ROOT_ITEM_KEY; 4679 key.offset = 0; 4680 4681 while (1) { 4682 if (btrfs_fs_closing(fs_info)) { 4683 closing = true; 4684 break; 4685 } 4686 ret = btrfs_search_forward(root, &key, path, 4687 BTRFS_OLDEST_GENERATION); 4688 if (ret) { 4689 if (ret > 0) 4690 ret = 0; 4691 break; 4692 } 4693 4694 if (key.type != BTRFS_ROOT_ITEM_KEY || 4695 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4696 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4697 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4698 goto skip; 4699 4700 eb = path->nodes[0]; 4701 slot = path->slots[0]; 4702 item_size = btrfs_item_size(eb, slot); 4703 if (item_size < sizeof(root_item)) 4704 goto skip; 4705 4706 read_extent_buffer(eb, &root_item, 4707 btrfs_item_ptr_offset(eb, slot), 4708 (int)sizeof(root_item)); 4709 if (btrfs_root_refs(&root_item) == 0) 4710 goto skip; 4711 4712 if (!btrfs_is_empty_uuid(root_item.uuid) || 4713 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4714 if (trans) 4715 goto update_tree; 4716 4717 btrfs_release_path(path); 4718 /* 4719 * 1 - subvol uuid item 4720 * 1 - received_subvol uuid item 4721 */ 4722 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4723 if (IS_ERR(trans)) { 4724 ret = PTR_ERR(trans); 4725 break; 4726 } 4727 continue; 4728 } else { 4729 goto skip; 4730 } 4731 update_tree: 4732 btrfs_release_path(path); 4733 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4734 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4735 BTRFS_UUID_KEY_SUBVOL, 4736 key.objectid); 4737 if (ret < 0) { 4738 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4739 ret); 4740 break; 4741 } 4742 } 4743 4744 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4745 ret = btrfs_uuid_tree_add(trans, 4746 root_item.received_uuid, 4747 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4748 key.objectid); 4749 if (ret < 0) { 4750 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4751 ret); 4752 break; 4753 } 4754 } 4755 4756 skip: 4757 btrfs_release_path(path); 4758 if (trans) { 4759 ret = btrfs_end_transaction(trans); 4760 trans = NULL; 4761 if (ret) 4762 break; 4763 } 4764 4765 if (key.offset < (u64)-1) { 4766 key.offset++; 4767 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4768 key.offset = 0; 4769 key.type = BTRFS_ROOT_ITEM_KEY; 4770 } else if (key.objectid < (u64)-1) { 4771 key.offset = 0; 4772 key.type = BTRFS_ROOT_ITEM_KEY; 4773 key.objectid++; 4774 } else { 4775 break; 4776 } 4777 cond_resched(); 4778 } 4779 4780 out: 4781 btrfs_free_path(path); 4782 if (trans && !IS_ERR(trans)) 4783 btrfs_end_transaction(trans); 4784 if (ret) 4785 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4786 else if (!closing) 4787 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4788 up(&fs_info->uuid_tree_rescan_sem); 4789 return 0; 4790 } 4791 4792 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4793 { 4794 struct btrfs_trans_handle *trans; 4795 struct btrfs_root *tree_root = fs_info->tree_root; 4796 struct btrfs_root *uuid_root; 4797 struct task_struct *task; 4798 int ret; 4799 4800 /* 4801 * 1 - root node 4802 * 1 - root item 4803 */ 4804 trans = btrfs_start_transaction(tree_root, 2); 4805 if (IS_ERR(trans)) 4806 return PTR_ERR(trans); 4807 4808 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4809 if (IS_ERR(uuid_root)) { 4810 ret = PTR_ERR(uuid_root); 4811 btrfs_abort_transaction(trans, ret); 4812 btrfs_end_transaction(trans); 4813 return ret; 4814 } 4815 4816 fs_info->uuid_root = uuid_root; 4817 4818 ret = btrfs_commit_transaction(trans); 4819 if (ret) 4820 return ret; 4821 4822 down(&fs_info->uuid_tree_rescan_sem); 4823 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4824 if (IS_ERR(task)) { 4825 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4826 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4827 up(&fs_info->uuid_tree_rescan_sem); 4828 return PTR_ERR(task); 4829 } 4830 4831 return 0; 4832 } 4833 4834 /* 4835 * shrinking a device means finding all of the device extents past 4836 * the new size, and then following the back refs to the chunks. 4837 * The chunk relocation code actually frees the device extent 4838 */ 4839 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4840 { 4841 struct btrfs_fs_info *fs_info = device->fs_info; 4842 struct btrfs_root *root = fs_info->dev_root; 4843 struct btrfs_trans_handle *trans; 4844 struct btrfs_dev_extent *dev_extent = NULL; 4845 struct btrfs_path *path; 4846 u64 length; 4847 u64 chunk_offset; 4848 int ret; 4849 int slot; 4850 int failed = 0; 4851 bool retried = false; 4852 struct extent_buffer *l; 4853 struct btrfs_key key; 4854 struct btrfs_super_block *super_copy = fs_info->super_copy; 4855 u64 old_total = btrfs_super_total_bytes(super_copy); 4856 u64 old_size = btrfs_device_get_total_bytes(device); 4857 u64 diff; 4858 u64 start; 4859 4860 new_size = round_down(new_size, fs_info->sectorsize); 4861 start = new_size; 4862 diff = round_down(old_size - new_size, fs_info->sectorsize); 4863 4864 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4865 return -EINVAL; 4866 4867 path = btrfs_alloc_path(); 4868 if (!path) 4869 return -ENOMEM; 4870 4871 path->reada = READA_BACK; 4872 4873 trans = btrfs_start_transaction(root, 0); 4874 if (IS_ERR(trans)) { 4875 btrfs_free_path(path); 4876 return PTR_ERR(trans); 4877 } 4878 4879 mutex_lock(&fs_info->chunk_mutex); 4880 4881 btrfs_device_set_total_bytes(device, new_size); 4882 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4883 device->fs_devices->total_rw_bytes -= diff; 4884 atomic64_sub(diff, &fs_info->free_chunk_space); 4885 } 4886 4887 /* 4888 * Once the device's size has been set to the new size, ensure all 4889 * in-memory chunks are synced to disk so that the loop below sees them 4890 * and relocates them accordingly. 4891 */ 4892 if (contains_pending_extent(device, &start, diff)) { 4893 mutex_unlock(&fs_info->chunk_mutex); 4894 ret = btrfs_commit_transaction(trans); 4895 if (ret) 4896 goto done; 4897 } else { 4898 mutex_unlock(&fs_info->chunk_mutex); 4899 btrfs_end_transaction(trans); 4900 } 4901 4902 again: 4903 key.objectid = device->devid; 4904 key.offset = (u64)-1; 4905 key.type = BTRFS_DEV_EXTENT_KEY; 4906 4907 do { 4908 mutex_lock(&fs_info->reclaim_bgs_lock); 4909 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4910 if (ret < 0) { 4911 mutex_unlock(&fs_info->reclaim_bgs_lock); 4912 goto done; 4913 } 4914 4915 ret = btrfs_previous_item(root, path, 0, key.type); 4916 if (ret) { 4917 mutex_unlock(&fs_info->reclaim_bgs_lock); 4918 if (ret < 0) 4919 goto done; 4920 ret = 0; 4921 btrfs_release_path(path); 4922 break; 4923 } 4924 4925 l = path->nodes[0]; 4926 slot = path->slots[0]; 4927 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4928 4929 if (key.objectid != device->devid) { 4930 mutex_unlock(&fs_info->reclaim_bgs_lock); 4931 btrfs_release_path(path); 4932 break; 4933 } 4934 4935 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4936 length = btrfs_dev_extent_length(l, dev_extent); 4937 4938 if (key.offset + length <= new_size) { 4939 mutex_unlock(&fs_info->reclaim_bgs_lock); 4940 btrfs_release_path(path); 4941 break; 4942 } 4943 4944 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4945 btrfs_release_path(path); 4946 4947 /* 4948 * We may be relocating the only data chunk we have, 4949 * which could potentially end up with losing data's 4950 * raid profile, so lets allocate an empty one in 4951 * advance. 4952 */ 4953 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4954 if (ret < 0) { 4955 mutex_unlock(&fs_info->reclaim_bgs_lock); 4956 goto done; 4957 } 4958 4959 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4960 mutex_unlock(&fs_info->reclaim_bgs_lock); 4961 if (ret == -ENOSPC) { 4962 failed++; 4963 } else if (ret) { 4964 if (ret == -ETXTBSY) { 4965 btrfs_warn(fs_info, 4966 "could not shrink block group %llu due to active swapfile", 4967 chunk_offset); 4968 } 4969 goto done; 4970 } 4971 } while (key.offset-- > 0); 4972 4973 if (failed && !retried) { 4974 failed = 0; 4975 retried = true; 4976 goto again; 4977 } else if (failed && retried) { 4978 ret = -ENOSPC; 4979 goto done; 4980 } 4981 4982 /* Shrinking succeeded, else we would be at "done". */ 4983 trans = btrfs_start_transaction(root, 0); 4984 if (IS_ERR(trans)) { 4985 ret = PTR_ERR(trans); 4986 goto done; 4987 } 4988 4989 mutex_lock(&fs_info->chunk_mutex); 4990 /* Clear all state bits beyond the shrunk device size */ 4991 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4992 CHUNK_STATE_MASK); 4993 4994 btrfs_device_set_disk_total_bytes(device, new_size); 4995 if (list_empty(&device->post_commit_list)) 4996 list_add_tail(&device->post_commit_list, 4997 &trans->transaction->dev_update_list); 4998 4999 WARN_ON(diff > old_total); 5000 btrfs_set_super_total_bytes(super_copy, 5001 round_down(old_total - diff, fs_info->sectorsize)); 5002 mutex_unlock(&fs_info->chunk_mutex); 5003 5004 btrfs_reserve_chunk_metadata(trans, false); 5005 /* Now btrfs_update_device() will change the on-disk size. */ 5006 ret = btrfs_update_device(trans, device); 5007 btrfs_trans_release_chunk_metadata(trans); 5008 if (ret < 0) { 5009 btrfs_abort_transaction(trans, ret); 5010 btrfs_end_transaction(trans); 5011 } else { 5012 ret = btrfs_commit_transaction(trans); 5013 } 5014 done: 5015 btrfs_free_path(path); 5016 if (ret) { 5017 mutex_lock(&fs_info->chunk_mutex); 5018 btrfs_device_set_total_bytes(device, old_size); 5019 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 5020 device->fs_devices->total_rw_bytes += diff; 5021 atomic64_add(diff, &fs_info->free_chunk_space); 5022 mutex_unlock(&fs_info->chunk_mutex); 5023 } 5024 return ret; 5025 } 5026 5027 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5028 struct btrfs_key *key, 5029 struct btrfs_chunk *chunk, int item_size) 5030 { 5031 struct btrfs_super_block *super_copy = fs_info->super_copy; 5032 struct btrfs_disk_key disk_key; 5033 u32 array_size; 5034 u8 *ptr; 5035 5036 lockdep_assert_held(&fs_info->chunk_mutex); 5037 5038 array_size = btrfs_super_sys_array_size(super_copy); 5039 if (array_size + item_size + sizeof(disk_key) 5040 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5041 return -EFBIG; 5042 5043 ptr = super_copy->sys_chunk_array + array_size; 5044 btrfs_cpu_key_to_disk(&disk_key, key); 5045 memcpy(ptr, &disk_key, sizeof(disk_key)); 5046 ptr += sizeof(disk_key); 5047 memcpy(ptr, chunk, item_size); 5048 item_size += sizeof(disk_key); 5049 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5050 5051 return 0; 5052 } 5053 5054 /* 5055 * sort the devices in descending order by max_avail, total_avail 5056 */ 5057 static int btrfs_cmp_device_info(const void *a, const void *b) 5058 { 5059 const struct btrfs_device_info *di_a = a; 5060 const struct btrfs_device_info *di_b = b; 5061 5062 if (di_a->max_avail > di_b->max_avail) 5063 return -1; 5064 if (di_a->max_avail < di_b->max_avail) 5065 return 1; 5066 if (di_a->total_avail > di_b->total_avail) 5067 return -1; 5068 if (di_a->total_avail < di_b->total_avail) 5069 return 1; 5070 return 0; 5071 } 5072 5073 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5074 { 5075 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5076 return; 5077 5078 btrfs_set_fs_incompat(info, RAID56); 5079 } 5080 5081 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5082 { 5083 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5084 return; 5085 5086 btrfs_set_fs_incompat(info, RAID1C34); 5087 } 5088 5089 /* 5090 * Structure used internally for btrfs_create_chunk() function. 5091 * Wraps needed parameters. 5092 */ 5093 struct alloc_chunk_ctl { 5094 u64 start; 5095 u64 type; 5096 /* Total number of stripes to allocate */ 5097 int num_stripes; 5098 /* sub_stripes info for map */ 5099 int sub_stripes; 5100 /* Stripes per device */ 5101 int dev_stripes; 5102 /* Maximum number of devices to use */ 5103 int devs_max; 5104 /* Minimum number of devices to use */ 5105 int devs_min; 5106 /* ndevs has to be a multiple of this */ 5107 int devs_increment; 5108 /* Number of copies */ 5109 int ncopies; 5110 /* Number of stripes worth of bytes to store parity information */ 5111 int nparity; 5112 u64 max_stripe_size; 5113 u64 max_chunk_size; 5114 u64 dev_extent_min; 5115 u64 stripe_size; 5116 u64 chunk_size; 5117 int ndevs; 5118 }; 5119 5120 static void init_alloc_chunk_ctl_policy_regular( 5121 struct btrfs_fs_devices *fs_devices, 5122 struct alloc_chunk_ctl *ctl) 5123 { 5124 struct btrfs_space_info *space_info; 5125 5126 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5127 ASSERT(space_info); 5128 5129 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5130 ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G); 5131 5132 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5133 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5134 5135 /* We don't want a chunk larger than 10% of writable space */ 5136 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5137 ctl->max_chunk_size); 5138 ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes); 5139 } 5140 5141 static void init_alloc_chunk_ctl_policy_zoned( 5142 struct btrfs_fs_devices *fs_devices, 5143 struct alloc_chunk_ctl *ctl) 5144 { 5145 u64 zone_size = fs_devices->fs_info->zone_size; 5146 u64 limit; 5147 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5148 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5149 u64 min_chunk_size = min_data_stripes * zone_size; 5150 u64 type = ctl->type; 5151 5152 ctl->max_stripe_size = zone_size; 5153 if (type & BTRFS_BLOCK_GROUP_DATA) { 5154 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5155 zone_size); 5156 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5157 ctl->max_chunk_size = ctl->max_stripe_size; 5158 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5159 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5160 ctl->devs_max = min_t(int, ctl->devs_max, 5161 BTRFS_MAX_DEVS_SYS_CHUNK); 5162 } else { 5163 BUG(); 5164 } 5165 5166 /* We don't want a chunk larger than 10% of writable space */ 5167 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10), 5168 zone_size), 5169 min_chunk_size); 5170 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5171 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5172 } 5173 5174 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5175 struct alloc_chunk_ctl *ctl) 5176 { 5177 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5178 5179 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5180 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5181 ctl->devs_max = btrfs_raid_array[index].devs_max; 5182 if (!ctl->devs_max) 5183 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5184 ctl->devs_min = btrfs_raid_array[index].devs_min; 5185 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5186 ctl->ncopies = btrfs_raid_array[index].ncopies; 5187 ctl->nparity = btrfs_raid_array[index].nparity; 5188 ctl->ndevs = 0; 5189 5190 switch (fs_devices->chunk_alloc_policy) { 5191 case BTRFS_CHUNK_ALLOC_REGULAR: 5192 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5193 break; 5194 case BTRFS_CHUNK_ALLOC_ZONED: 5195 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5196 break; 5197 default: 5198 BUG(); 5199 } 5200 } 5201 5202 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5203 struct alloc_chunk_ctl *ctl, 5204 struct btrfs_device_info *devices_info) 5205 { 5206 struct btrfs_fs_info *info = fs_devices->fs_info; 5207 struct btrfs_device *device; 5208 u64 total_avail; 5209 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5210 int ret; 5211 int ndevs = 0; 5212 u64 max_avail; 5213 u64 dev_offset; 5214 5215 /* 5216 * in the first pass through the devices list, we gather information 5217 * about the available holes on each device. 5218 */ 5219 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5220 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5221 WARN(1, KERN_ERR 5222 "BTRFS: read-only device in alloc_list\n"); 5223 continue; 5224 } 5225 5226 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5227 &device->dev_state) || 5228 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5229 continue; 5230 5231 if (device->total_bytes > device->bytes_used) 5232 total_avail = device->total_bytes - device->bytes_used; 5233 else 5234 total_avail = 0; 5235 5236 /* If there is no space on this device, skip it. */ 5237 if (total_avail < ctl->dev_extent_min) 5238 continue; 5239 5240 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5241 &max_avail); 5242 if (ret && ret != -ENOSPC) 5243 return ret; 5244 5245 if (ret == 0) 5246 max_avail = dev_extent_want; 5247 5248 if (max_avail < ctl->dev_extent_min) { 5249 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5250 btrfs_debug(info, 5251 "%s: devid %llu has no free space, have=%llu want=%llu", 5252 __func__, device->devid, max_avail, 5253 ctl->dev_extent_min); 5254 continue; 5255 } 5256 5257 if (ndevs == fs_devices->rw_devices) { 5258 WARN(1, "%s: found more than %llu devices\n", 5259 __func__, fs_devices->rw_devices); 5260 break; 5261 } 5262 devices_info[ndevs].dev_offset = dev_offset; 5263 devices_info[ndevs].max_avail = max_avail; 5264 devices_info[ndevs].total_avail = total_avail; 5265 devices_info[ndevs].dev = device; 5266 ++ndevs; 5267 } 5268 ctl->ndevs = ndevs; 5269 5270 /* 5271 * now sort the devices by hole size / available space 5272 */ 5273 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5274 btrfs_cmp_device_info, NULL); 5275 5276 return 0; 5277 } 5278 5279 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5280 struct btrfs_device_info *devices_info) 5281 { 5282 /* Number of stripes that count for block group size */ 5283 int data_stripes; 5284 5285 /* 5286 * The primary goal is to maximize the number of stripes, so use as 5287 * many devices as possible, even if the stripes are not maximum sized. 5288 * 5289 * The DUP profile stores more than one stripe per device, the 5290 * max_avail is the total size so we have to adjust. 5291 */ 5292 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5293 ctl->dev_stripes); 5294 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5295 5296 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5297 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5298 5299 /* 5300 * Use the number of data stripes to figure out how big this chunk is 5301 * really going to be in terms of logical address space, and compare 5302 * that answer with the max chunk size. If it's higher, we try to 5303 * reduce stripe_size. 5304 */ 5305 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5306 /* 5307 * Reduce stripe_size, round it up to a 16MB boundary again and 5308 * then use it, unless it ends up being even bigger than the 5309 * previous value we had already. 5310 */ 5311 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5312 data_stripes), SZ_16M), 5313 ctl->stripe_size); 5314 } 5315 5316 /* Stripe size should not go beyond 1G. */ 5317 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5318 5319 /* Align to BTRFS_STRIPE_LEN */ 5320 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5321 ctl->chunk_size = ctl->stripe_size * data_stripes; 5322 5323 return 0; 5324 } 5325 5326 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5327 struct btrfs_device_info *devices_info) 5328 { 5329 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5330 /* Number of stripes that count for block group size */ 5331 int data_stripes; 5332 5333 /* 5334 * It should hold because: 5335 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5336 */ 5337 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5338 5339 ctl->stripe_size = zone_size; 5340 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5341 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5342 5343 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5344 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5345 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5346 ctl->stripe_size) + ctl->nparity, 5347 ctl->dev_stripes); 5348 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5349 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5350 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5351 } 5352 5353 ctl->chunk_size = ctl->stripe_size * data_stripes; 5354 5355 return 0; 5356 } 5357 5358 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5359 struct alloc_chunk_ctl *ctl, 5360 struct btrfs_device_info *devices_info) 5361 { 5362 struct btrfs_fs_info *info = fs_devices->fs_info; 5363 5364 /* 5365 * Round down to number of usable stripes, devs_increment can be any 5366 * number so we can't use round_down() that requires power of 2, while 5367 * rounddown is safe. 5368 */ 5369 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5370 5371 if (ctl->ndevs < ctl->devs_min) { 5372 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5373 btrfs_debug(info, 5374 "%s: not enough devices with free space: have=%d minimum required=%d", 5375 __func__, ctl->ndevs, ctl->devs_min); 5376 } 5377 return -ENOSPC; 5378 } 5379 5380 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5381 5382 switch (fs_devices->chunk_alloc_policy) { 5383 case BTRFS_CHUNK_ALLOC_REGULAR: 5384 return decide_stripe_size_regular(ctl, devices_info); 5385 case BTRFS_CHUNK_ALLOC_ZONED: 5386 return decide_stripe_size_zoned(ctl, devices_info); 5387 default: 5388 BUG(); 5389 } 5390 } 5391 5392 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5393 struct alloc_chunk_ctl *ctl, 5394 struct btrfs_device_info *devices_info) 5395 { 5396 struct btrfs_fs_info *info = trans->fs_info; 5397 struct map_lookup *map = NULL; 5398 struct extent_map_tree *em_tree; 5399 struct btrfs_block_group *block_group; 5400 struct extent_map *em; 5401 u64 start = ctl->start; 5402 u64 type = ctl->type; 5403 int ret; 5404 int i; 5405 int j; 5406 5407 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5408 if (!map) 5409 return ERR_PTR(-ENOMEM); 5410 map->num_stripes = ctl->num_stripes; 5411 5412 for (i = 0; i < ctl->ndevs; ++i) { 5413 for (j = 0; j < ctl->dev_stripes; ++j) { 5414 int s = i * ctl->dev_stripes + j; 5415 map->stripes[s].dev = devices_info[i].dev; 5416 map->stripes[s].physical = devices_info[i].dev_offset + 5417 j * ctl->stripe_size; 5418 } 5419 } 5420 map->io_align = BTRFS_STRIPE_LEN; 5421 map->io_width = BTRFS_STRIPE_LEN; 5422 map->type = type; 5423 map->sub_stripes = ctl->sub_stripes; 5424 5425 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5426 5427 em = alloc_extent_map(); 5428 if (!em) { 5429 kfree(map); 5430 return ERR_PTR(-ENOMEM); 5431 } 5432 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5433 em->map_lookup = map; 5434 em->start = start; 5435 em->len = ctl->chunk_size; 5436 em->block_start = 0; 5437 em->block_len = em->len; 5438 em->orig_block_len = ctl->stripe_size; 5439 5440 em_tree = &info->mapping_tree; 5441 write_lock(&em_tree->lock); 5442 ret = add_extent_mapping(em_tree, em, 0); 5443 if (ret) { 5444 write_unlock(&em_tree->lock); 5445 free_extent_map(em); 5446 return ERR_PTR(ret); 5447 } 5448 write_unlock(&em_tree->lock); 5449 5450 block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); 5451 if (IS_ERR(block_group)) 5452 goto error_del_extent; 5453 5454 for (i = 0; i < map->num_stripes; i++) { 5455 struct btrfs_device *dev = map->stripes[i].dev; 5456 5457 btrfs_device_set_bytes_used(dev, 5458 dev->bytes_used + ctl->stripe_size); 5459 if (list_empty(&dev->post_commit_list)) 5460 list_add_tail(&dev->post_commit_list, 5461 &trans->transaction->dev_update_list); 5462 } 5463 5464 atomic64_sub(ctl->stripe_size * map->num_stripes, 5465 &info->free_chunk_space); 5466 5467 free_extent_map(em); 5468 check_raid56_incompat_flag(info, type); 5469 check_raid1c34_incompat_flag(info, type); 5470 5471 return block_group; 5472 5473 error_del_extent: 5474 write_lock(&em_tree->lock); 5475 remove_extent_mapping(em_tree, em); 5476 write_unlock(&em_tree->lock); 5477 5478 /* One for our allocation */ 5479 free_extent_map(em); 5480 /* One for the tree reference */ 5481 free_extent_map(em); 5482 5483 return block_group; 5484 } 5485 5486 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5487 u64 type) 5488 { 5489 struct btrfs_fs_info *info = trans->fs_info; 5490 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5491 struct btrfs_device_info *devices_info = NULL; 5492 struct alloc_chunk_ctl ctl; 5493 struct btrfs_block_group *block_group; 5494 int ret; 5495 5496 lockdep_assert_held(&info->chunk_mutex); 5497 5498 if (!alloc_profile_is_valid(type, 0)) { 5499 ASSERT(0); 5500 return ERR_PTR(-EINVAL); 5501 } 5502 5503 if (list_empty(&fs_devices->alloc_list)) { 5504 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5505 btrfs_debug(info, "%s: no writable device", __func__); 5506 return ERR_PTR(-ENOSPC); 5507 } 5508 5509 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5510 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5511 ASSERT(0); 5512 return ERR_PTR(-EINVAL); 5513 } 5514 5515 ctl.start = find_next_chunk(info); 5516 ctl.type = type; 5517 init_alloc_chunk_ctl(fs_devices, &ctl); 5518 5519 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5520 GFP_NOFS); 5521 if (!devices_info) 5522 return ERR_PTR(-ENOMEM); 5523 5524 ret = gather_device_info(fs_devices, &ctl, devices_info); 5525 if (ret < 0) { 5526 block_group = ERR_PTR(ret); 5527 goto out; 5528 } 5529 5530 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5531 if (ret < 0) { 5532 block_group = ERR_PTR(ret); 5533 goto out; 5534 } 5535 5536 block_group = create_chunk(trans, &ctl, devices_info); 5537 5538 out: 5539 kfree(devices_info); 5540 return block_group; 5541 } 5542 5543 /* 5544 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5545 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5546 * chunks. 5547 * 5548 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5549 * phases. 5550 */ 5551 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5552 struct btrfs_block_group *bg) 5553 { 5554 struct btrfs_fs_info *fs_info = trans->fs_info; 5555 struct btrfs_root *chunk_root = fs_info->chunk_root; 5556 struct btrfs_key key; 5557 struct btrfs_chunk *chunk; 5558 struct btrfs_stripe *stripe; 5559 struct extent_map *em; 5560 struct map_lookup *map; 5561 size_t item_size; 5562 int i; 5563 int ret; 5564 5565 /* 5566 * We take the chunk_mutex for 2 reasons: 5567 * 5568 * 1) Updates and insertions in the chunk btree must be done while holding 5569 * the chunk_mutex, as well as updating the system chunk array in the 5570 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5571 * details; 5572 * 5573 * 2) To prevent races with the final phase of a device replace operation 5574 * that replaces the device object associated with the map's stripes, 5575 * because the device object's id can change at any time during that 5576 * final phase of the device replace operation 5577 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5578 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5579 * which would cause a failure when updating the device item, which does 5580 * not exists, or persisting a stripe of the chunk item with such ID. 5581 * Here we can't use the device_list_mutex because our caller already 5582 * has locked the chunk_mutex, and the final phase of device replace 5583 * acquires both mutexes - first the device_list_mutex and then the 5584 * chunk_mutex. Using any of those two mutexes protects us from a 5585 * concurrent device replace. 5586 */ 5587 lockdep_assert_held(&fs_info->chunk_mutex); 5588 5589 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5590 if (IS_ERR(em)) { 5591 ret = PTR_ERR(em); 5592 btrfs_abort_transaction(trans, ret); 5593 return ret; 5594 } 5595 5596 map = em->map_lookup; 5597 item_size = btrfs_chunk_item_size(map->num_stripes); 5598 5599 chunk = kzalloc(item_size, GFP_NOFS); 5600 if (!chunk) { 5601 ret = -ENOMEM; 5602 btrfs_abort_transaction(trans, ret); 5603 goto out; 5604 } 5605 5606 for (i = 0; i < map->num_stripes; i++) { 5607 struct btrfs_device *device = map->stripes[i].dev; 5608 5609 ret = btrfs_update_device(trans, device); 5610 if (ret) 5611 goto out; 5612 } 5613 5614 stripe = &chunk->stripe; 5615 for (i = 0; i < map->num_stripes; i++) { 5616 struct btrfs_device *device = map->stripes[i].dev; 5617 const u64 dev_offset = map->stripes[i].physical; 5618 5619 btrfs_set_stack_stripe_devid(stripe, device->devid); 5620 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5621 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5622 stripe++; 5623 } 5624 5625 btrfs_set_stack_chunk_length(chunk, bg->length); 5626 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5627 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN); 5628 btrfs_set_stack_chunk_type(chunk, map->type); 5629 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5630 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN); 5631 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN); 5632 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5633 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5634 5635 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5636 key.type = BTRFS_CHUNK_ITEM_KEY; 5637 key.offset = bg->start; 5638 5639 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5640 if (ret) 5641 goto out; 5642 5643 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5644 5645 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5646 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5647 if (ret) 5648 goto out; 5649 } 5650 5651 out: 5652 kfree(chunk); 5653 free_extent_map(em); 5654 return ret; 5655 } 5656 5657 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5658 { 5659 struct btrfs_fs_info *fs_info = trans->fs_info; 5660 u64 alloc_profile; 5661 struct btrfs_block_group *meta_bg; 5662 struct btrfs_block_group *sys_bg; 5663 5664 /* 5665 * When adding a new device for sprouting, the seed device is read-only 5666 * so we must first allocate a metadata and a system chunk. But before 5667 * adding the block group items to the extent, device and chunk btrees, 5668 * we must first: 5669 * 5670 * 1) Create both chunks without doing any changes to the btrees, as 5671 * otherwise we would get -ENOSPC since the block groups from the 5672 * seed device are read-only; 5673 * 5674 * 2) Add the device item for the new sprout device - finishing the setup 5675 * of a new block group requires updating the device item in the chunk 5676 * btree, so it must exist when we attempt to do it. The previous step 5677 * ensures this does not fail with -ENOSPC. 5678 * 5679 * After that we can add the block group items to their btrees: 5680 * update existing device item in the chunk btree, add a new block group 5681 * item to the extent btree, add a new chunk item to the chunk btree and 5682 * finally add the new device extent items to the devices btree. 5683 */ 5684 5685 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5686 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5687 if (IS_ERR(meta_bg)) 5688 return PTR_ERR(meta_bg); 5689 5690 alloc_profile = btrfs_system_alloc_profile(fs_info); 5691 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5692 if (IS_ERR(sys_bg)) 5693 return PTR_ERR(sys_bg); 5694 5695 return 0; 5696 } 5697 5698 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5699 { 5700 const int index = btrfs_bg_flags_to_raid_index(map->type); 5701 5702 return btrfs_raid_array[index].tolerated_failures; 5703 } 5704 5705 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5706 { 5707 struct extent_map *em; 5708 struct map_lookup *map; 5709 int miss_ndevs = 0; 5710 int i; 5711 bool ret = true; 5712 5713 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5714 if (IS_ERR(em)) 5715 return false; 5716 5717 map = em->map_lookup; 5718 for (i = 0; i < map->num_stripes; i++) { 5719 if (test_bit(BTRFS_DEV_STATE_MISSING, 5720 &map->stripes[i].dev->dev_state)) { 5721 miss_ndevs++; 5722 continue; 5723 } 5724 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5725 &map->stripes[i].dev->dev_state)) { 5726 ret = false; 5727 goto end; 5728 } 5729 } 5730 5731 /* 5732 * If the number of missing devices is larger than max errors, we can 5733 * not write the data into that chunk successfully. 5734 */ 5735 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5736 ret = false; 5737 end: 5738 free_extent_map(em); 5739 return ret; 5740 } 5741 5742 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5743 { 5744 struct extent_map *em; 5745 5746 while (1) { 5747 write_lock(&tree->lock); 5748 em = lookup_extent_mapping(tree, 0, (u64)-1); 5749 if (em) 5750 remove_extent_mapping(tree, em); 5751 write_unlock(&tree->lock); 5752 if (!em) 5753 break; 5754 /* once for us */ 5755 free_extent_map(em); 5756 /* once for the tree */ 5757 free_extent_map(em); 5758 } 5759 } 5760 5761 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5762 { 5763 struct extent_map *em; 5764 struct map_lookup *map; 5765 enum btrfs_raid_types index; 5766 int ret = 1; 5767 5768 em = btrfs_get_chunk_map(fs_info, logical, len); 5769 if (IS_ERR(em)) 5770 /* 5771 * We could return errors for these cases, but that could get 5772 * ugly and we'd probably do the same thing which is just not do 5773 * anything else and exit, so return 1 so the callers don't try 5774 * to use other copies. 5775 */ 5776 return 1; 5777 5778 map = em->map_lookup; 5779 index = btrfs_bg_flags_to_raid_index(map->type); 5780 5781 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5782 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5783 ret = btrfs_raid_array[index].ncopies; 5784 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5785 ret = 2; 5786 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5787 /* 5788 * There could be two corrupted data stripes, we need 5789 * to loop retry in order to rebuild the correct data. 5790 * 5791 * Fail a stripe at a time on every retry except the 5792 * stripe under reconstruction. 5793 */ 5794 ret = map->num_stripes; 5795 free_extent_map(em); 5796 return ret; 5797 } 5798 5799 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5800 u64 logical) 5801 { 5802 struct extent_map *em; 5803 struct map_lookup *map; 5804 unsigned long len = fs_info->sectorsize; 5805 5806 if (!btrfs_fs_incompat(fs_info, RAID56)) 5807 return len; 5808 5809 em = btrfs_get_chunk_map(fs_info, logical, len); 5810 5811 if (!WARN_ON(IS_ERR(em))) { 5812 map = em->map_lookup; 5813 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5814 len = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 5815 free_extent_map(em); 5816 } 5817 return len; 5818 } 5819 5820 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5821 { 5822 struct extent_map *em; 5823 struct map_lookup *map; 5824 int ret = 0; 5825 5826 if (!btrfs_fs_incompat(fs_info, RAID56)) 5827 return 0; 5828 5829 em = btrfs_get_chunk_map(fs_info, logical, len); 5830 5831 if(!WARN_ON(IS_ERR(em))) { 5832 map = em->map_lookup; 5833 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5834 ret = 1; 5835 free_extent_map(em); 5836 } 5837 return ret; 5838 } 5839 5840 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5841 struct map_lookup *map, int first, 5842 int dev_replace_is_ongoing) 5843 { 5844 int i; 5845 int num_stripes; 5846 int preferred_mirror; 5847 int tolerance; 5848 struct btrfs_device *srcdev; 5849 5850 ASSERT((map->type & 5851 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5852 5853 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5854 num_stripes = map->sub_stripes; 5855 else 5856 num_stripes = map->num_stripes; 5857 5858 switch (fs_info->fs_devices->read_policy) { 5859 default: 5860 /* Shouldn't happen, just warn and use pid instead of failing */ 5861 btrfs_warn_rl(fs_info, 5862 "unknown read_policy type %u, reset to pid", 5863 fs_info->fs_devices->read_policy); 5864 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5865 fallthrough; 5866 case BTRFS_READ_POLICY_PID: 5867 preferred_mirror = first + (current->pid % num_stripes); 5868 break; 5869 } 5870 5871 if (dev_replace_is_ongoing && 5872 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5873 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5874 srcdev = fs_info->dev_replace.srcdev; 5875 else 5876 srcdev = NULL; 5877 5878 /* 5879 * try to avoid the drive that is the source drive for a 5880 * dev-replace procedure, only choose it if no other non-missing 5881 * mirror is available 5882 */ 5883 for (tolerance = 0; tolerance < 2; tolerance++) { 5884 if (map->stripes[preferred_mirror].dev->bdev && 5885 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5886 return preferred_mirror; 5887 for (i = first; i < first + num_stripes; i++) { 5888 if (map->stripes[i].dev->bdev && 5889 (tolerance || map->stripes[i].dev != srcdev)) 5890 return i; 5891 } 5892 } 5893 5894 /* we couldn't find one that doesn't fail. Just return something 5895 * and the io error handling code will clean up eventually 5896 */ 5897 return preferred_mirror; 5898 } 5899 5900 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5901 u16 total_stripes) 5902 { 5903 struct btrfs_io_context *bioc; 5904 5905 bioc = kzalloc( 5906 /* The size of btrfs_io_context */ 5907 sizeof(struct btrfs_io_context) + 5908 /* Plus the variable array for the stripes */ 5909 sizeof(struct btrfs_io_stripe) * (total_stripes), 5910 GFP_NOFS); 5911 5912 if (!bioc) 5913 return NULL; 5914 5915 refcount_set(&bioc->refs, 1); 5916 5917 bioc->fs_info = fs_info; 5918 bioc->replace_stripe_src = -1; 5919 bioc->full_stripe_logical = (u64)-1; 5920 5921 return bioc; 5922 } 5923 5924 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5925 { 5926 WARN_ON(!refcount_read(&bioc->refs)); 5927 refcount_inc(&bioc->refs); 5928 } 5929 5930 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5931 { 5932 if (!bioc) 5933 return; 5934 if (refcount_dec_and_test(&bioc->refs)) 5935 kfree(bioc); 5936 } 5937 5938 /* 5939 * Please note that, discard won't be sent to target device of device 5940 * replace. 5941 */ 5942 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 5943 u64 logical, u64 *length_ret, 5944 u32 *num_stripes) 5945 { 5946 struct extent_map *em; 5947 struct map_lookup *map; 5948 struct btrfs_discard_stripe *stripes; 5949 u64 length = *length_ret; 5950 u64 offset; 5951 u32 stripe_nr; 5952 u32 stripe_nr_end; 5953 u32 stripe_cnt; 5954 u64 stripe_end_offset; 5955 u64 stripe_offset; 5956 u32 stripe_index; 5957 u32 factor = 0; 5958 u32 sub_stripes = 0; 5959 u32 stripes_per_dev = 0; 5960 u32 remaining_stripes = 0; 5961 u32 last_stripe = 0; 5962 int ret; 5963 int i; 5964 5965 em = btrfs_get_chunk_map(fs_info, logical, length); 5966 if (IS_ERR(em)) 5967 return ERR_CAST(em); 5968 5969 map = em->map_lookup; 5970 5971 /* we don't discard raid56 yet */ 5972 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5973 ret = -EOPNOTSUPP; 5974 goto out_free_map; 5975 } 5976 5977 offset = logical - em->start; 5978 length = min_t(u64, em->start + em->len - logical, length); 5979 *length_ret = length; 5980 5981 /* 5982 * stripe_nr counts the total number of stripes we have to stride 5983 * to get to this block 5984 */ 5985 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 5986 5987 /* stripe_offset is the offset of this block in its stripe */ 5988 stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr); 5989 5990 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> 5991 BTRFS_STRIPE_LEN_SHIFT; 5992 stripe_cnt = stripe_nr_end - stripe_nr; 5993 stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) - 5994 (offset + length); 5995 /* 5996 * after this, stripe_nr is the number of stripes on this 5997 * device we have to walk to find the data, and stripe_index is 5998 * the number of our device in the stripe array 5999 */ 6000 *num_stripes = 1; 6001 stripe_index = 0; 6002 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6003 BTRFS_BLOCK_GROUP_RAID10)) { 6004 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6005 sub_stripes = 1; 6006 else 6007 sub_stripes = map->sub_stripes; 6008 6009 factor = map->num_stripes / sub_stripes; 6010 *num_stripes = min_t(u64, map->num_stripes, 6011 sub_stripes * stripe_cnt); 6012 stripe_index = stripe_nr % factor; 6013 stripe_nr /= factor; 6014 stripe_index *= sub_stripes; 6015 6016 remaining_stripes = stripe_cnt % factor; 6017 stripes_per_dev = stripe_cnt / factor; 6018 last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes; 6019 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6020 BTRFS_BLOCK_GROUP_DUP)) { 6021 *num_stripes = map->num_stripes; 6022 } else { 6023 stripe_index = stripe_nr % map->num_stripes; 6024 stripe_nr /= map->num_stripes; 6025 } 6026 6027 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6028 if (!stripes) { 6029 ret = -ENOMEM; 6030 goto out_free_map; 6031 } 6032 6033 for (i = 0; i < *num_stripes; i++) { 6034 stripes[i].physical = 6035 map->stripes[stripe_index].physical + 6036 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6037 stripes[i].dev = map->stripes[stripe_index].dev; 6038 6039 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6040 BTRFS_BLOCK_GROUP_RAID10)) { 6041 stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev); 6042 6043 if (i / sub_stripes < remaining_stripes) 6044 stripes[i].length += BTRFS_STRIPE_LEN; 6045 6046 /* 6047 * Special for the first stripe and 6048 * the last stripe: 6049 * 6050 * |-------|...|-------| 6051 * |----------| 6052 * off end_off 6053 */ 6054 if (i < sub_stripes) 6055 stripes[i].length -= stripe_offset; 6056 6057 if (stripe_index >= last_stripe && 6058 stripe_index <= (last_stripe + 6059 sub_stripes - 1)) 6060 stripes[i].length -= stripe_end_offset; 6061 6062 if (i == sub_stripes - 1) 6063 stripe_offset = 0; 6064 } else { 6065 stripes[i].length = length; 6066 } 6067 6068 stripe_index++; 6069 if (stripe_index == map->num_stripes) { 6070 stripe_index = 0; 6071 stripe_nr++; 6072 } 6073 } 6074 6075 free_extent_map(em); 6076 return stripes; 6077 out_free_map: 6078 free_extent_map(em); 6079 return ERR_PTR(ret); 6080 } 6081 6082 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6083 { 6084 struct btrfs_block_group *cache; 6085 bool ret; 6086 6087 /* Non zoned filesystem does not use "to_copy" flag */ 6088 if (!btrfs_is_zoned(fs_info)) 6089 return false; 6090 6091 cache = btrfs_lookup_block_group(fs_info, logical); 6092 6093 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6094 6095 btrfs_put_block_group(cache); 6096 return ret; 6097 } 6098 6099 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6100 struct btrfs_io_context *bioc, 6101 struct btrfs_dev_replace *dev_replace, 6102 u64 logical, 6103 int *num_stripes_ret, int *max_errors_ret) 6104 { 6105 u64 srcdev_devid = dev_replace->srcdev->devid; 6106 /* 6107 * At this stage, num_stripes is still the real number of stripes, 6108 * excluding the duplicated stripes. 6109 */ 6110 int num_stripes = *num_stripes_ret; 6111 int nr_extra_stripes = 0; 6112 int max_errors = *max_errors_ret; 6113 int i; 6114 6115 /* 6116 * A block group which has "to_copy" set will eventually be copied by 6117 * the dev-replace process. We can avoid cloning IO here. 6118 */ 6119 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6120 return; 6121 6122 /* 6123 * Duplicate the write operations while the dev-replace procedure is 6124 * running. Since the copying of the old disk to the new disk takes 6125 * place at run time while the filesystem is mounted writable, the 6126 * regular write operations to the old disk have to be duplicated to go 6127 * to the new disk as well. 6128 * 6129 * Note that device->missing is handled by the caller, and that the 6130 * write to the old disk is already set up in the stripes array. 6131 */ 6132 for (i = 0; i < num_stripes; i++) { 6133 struct btrfs_io_stripe *old = &bioc->stripes[i]; 6134 struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes]; 6135 6136 if (old->dev->devid != srcdev_devid) 6137 continue; 6138 6139 new->physical = old->physical; 6140 new->dev = dev_replace->tgtdev; 6141 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6142 bioc->replace_stripe_src = i; 6143 nr_extra_stripes++; 6144 } 6145 6146 /* We can only have at most 2 extra nr_stripes (for DUP). */ 6147 ASSERT(nr_extra_stripes <= 2); 6148 /* 6149 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for 6150 * replace. 6151 * If we have 2 extra stripes, only choose the one with smaller physical. 6152 */ 6153 if (op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) { 6154 struct btrfs_io_stripe *first = &bioc->stripes[num_stripes]; 6155 struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1]; 6156 6157 /* Only DUP can have two extra stripes. */ 6158 ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP); 6159 6160 /* 6161 * Swap the last stripe stripes and reduce @nr_extra_stripes. 6162 * The extra stripe would still be there, but won't be accessed. 6163 */ 6164 if (first->physical > second->physical) { 6165 swap(second->physical, first->physical); 6166 swap(second->dev, first->dev); 6167 nr_extra_stripes--; 6168 } 6169 } 6170 6171 *num_stripes_ret = num_stripes + nr_extra_stripes; 6172 *max_errors_ret = max_errors + nr_extra_stripes; 6173 bioc->replace_nr_stripes = nr_extra_stripes; 6174 } 6175 6176 static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op, 6177 u64 offset, u32 *stripe_nr, u64 *stripe_offset, 6178 u64 *full_stripe_start) 6179 { 6180 /* 6181 * Stripe_nr is the stripe where this block falls. stripe_offset is 6182 * the offset of this block in its stripe. 6183 */ 6184 *stripe_offset = offset & BTRFS_STRIPE_LEN_MASK; 6185 *stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6186 ASSERT(*stripe_offset < U32_MAX); 6187 6188 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6189 unsigned long full_stripe_len = 6190 btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 6191 6192 /* 6193 * For full stripe start, we use previously calculated 6194 * @stripe_nr. Align it to nr_data_stripes, then multiply with 6195 * STRIPE_LEN. 6196 * 6197 * By this we can avoid u64 division completely. And we have 6198 * to go rounddown(), not round_down(), as nr_data_stripes is 6199 * not ensured to be power of 2. 6200 */ 6201 *full_stripe_start = 6202 btrfs_stripe_nr_to_offset( 6203 rounddown(*stripe_nr, nr_data_stripes(map))); 6204 6205 ASSERT(*full_stripe_start + full_stripe_len > offset); 6206 ASSERT(*full_stripe_start <= offset); 6207 /* 6208 * For writes to RAID56, allow to write a full stripe set, but 6209 * no straddling of stripe sets. 6210 */ 6211 if (op == BTRFS_MAP_WRITE) 6212 return full_stripe_len - (offset - *full_stripe_start); 6213 } 6214 6215 /* 6216 * For other RAID types and for RAID56 reads, allow a single stripe (on 6217 * a single disk). 6218 */ 6219 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) 6220 return BTRFS_STRIPE_LEN - *stripe_offset; 6221 return U64_MAX; 6222 } 6223 6224 static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map, 6225 u32 stripe_index, u64 stripe_offset, u32 stripe_nr) 6226 { 6227 dst->dev = map->stripes[stripe_index].dev; 6228 dst->physical = map->stripes[stripe_index].physical + 6229 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6230 } 6231 6232 /* 6233 * Map one logical range to one or more physical ranges. 6234 * 6235 * @length: (Mandatory) mapped length of this run. 6236 * One logical range can be split into different segments 6237 * due to factors like zones and RAID0/5/6/10 stripe 6238 * boundaries. 6239 * 6240 * @bioc_ret: (Mandatory) returned btrfs_io_context structure. 6241 * which has one or more physical ranges (btrfs_io_stripe) 6242 * recorded inside. 6243 * Caller should call btrfs_put_bioc() to free it after use. 6244 * 6245 * @smap: (Optional) single physical range optimization. 6246 * If the map request can be fulfilled by one single 6247 * physical range, and this is parameter is not NULL, 6248 * then @bioc_ret would be NULL, and @smap would be 6249 * updated. 6250 * 6251 * @mirror_num_ret: (Mandatory) returned mirror number if the original 6252 * value is 0. 6253 * 6254 * Mirror number 0 means to choose any live mirrors. 6255 * 6256 * For non-RAID56 profiles, non-zero mirror_num means 6257 * the Nth mirror. (e.g. mirror_num 1 means the first 6258 * copy). 6259 * 6260 * For RAID56 profile, mirror 1 means rebuild from P and 6261 * the remaining data stripes. 6262 * 6263 * For RAID6 profile, mirror > 2 means mark another 6264 * data/P stripe error and rebuild from the remaining 6265 * stripes.. 6266 * 6267 * @need_raid_map: (Used only for integrity checker) whether the map wants 6268 * a full stripe map (including all data and P/Q stripes) 6269 * for RAID56. Should always be 1 except integrity checker. 6270 */ 6271 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6272 u64 logical, u64 *length, 6273 struct btrfs_io_context **bioc_ret, 6274 struct btrfs_io_stripe *smap, int *mirror_num_ret, 6275 int need_raid_map) 6276 { 6277 struct extent_map *em; 6278 struct map_lookup *map; 6279 u64 map_offset; 6280 u64 stripe_offset; 6281 u32 stripe_nr; 6282 u32 stripe_index; 6283 int data_stripes; 6284 int i; 6285 int ret = 0; 6286 int mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6287 int num_stripes; 6288 int num_copies; 6289 int max_errors = 0; 6290 struct btrfs_io_context *bioc = NULL; 6291 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6292 int dev_replace_is_ongoing = 0; 6293 u16 num_alloc_stripes; 6294 u64 raid56_full_stripe_start = (u64)-1; 6295 u64 max_len; 6296 6297 ASSERT(bioc_ret); 6298 6299 num_copies = btrfs_num_copies(fs_info, logical, fs_info->sectorsize); 6300 if (mirror_num > num_copies) 6301 return -EINVAL; 6302 6303 em = btrfs_get_chunk_map(fs_info, logical, *length); 6304 if (IS_ERR(em)) 6305 return PTR_ERR(em); 6306 6307 map = em->map_lookup; 6308 data_stripes = nr_data_stripes(map); 6309 6310 map_offset = logical - em->start; 6311 max_len = btrfs_max_io_len(map, op, map_offset, &stripe_nr, 6312 &stripe_offset, &raid56_full_stripe_start); 6313 *length = min_t(u64, em->len - map_offset, max_len); 6314 6315 down_read(&dev_replace->rwsem); 6316 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6317 /* 6318 * Hold the semaphore for read during the whole operation, write is 6319 * requested at commit time but must wait. 6320 */ 6321 if (!dev_replace_is_ongoing) 6322 up_read(&dev_replace->rwsem); 6323 6324 num_stripes = 1; 6325 stripe_index = 0; 6326 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6327 stripe_index = stripe_nr % map->num_stripes; 6328 stripe_nr /= map->num_stripes; 6329 if (op == BTRFS_MAP_READ) 6330 mirror_num = 1; 6331 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6332 if (op != BTRFS_MAP_READ) { 6333 num_stripes = map->num_stripes; 6334 } else if (mirror_num) { 6335 stripe_index = mirror_num - 1; 6336 } else { 6337 stripe_index = find_live_mirror(fs_info, map, 0, 6338 dev_replace_is_ongoing); 6339 mirror_num = stripe_index + 1; 6340 } 6341 6342 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6343 if (op != BTRFS_MAP_READ) { 6344 num_stripes = map->num_stripes; 6345 } else if (mirror_num) { 6346 stripe_index = mirror_num - 1; 6347 } else { 6348 mirror_num = 1; 6349 } 6350 6351 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6352 u32 factor = map->num_stripes / map->sub_stripes; 6353 6354 stripe_index = (stripe_nr % factor) * map->sub_stripes; 6355 stripe_nr /= factor; 6356 6357 if (op != BTRFS_MAP_READ) 6358 num_stripes = map->sub_stripes; 6359 else if (mirror_num) 6360 stripe_index += mirror_num - 1; 6361 else { 6362 int old_stripe_index = stripe_index; 6363 stripe_index = find_live_mirror(fs_info, map, 6364 stripe_index, 6365 dev_replace_is_ongoing); 6366 mirror_num = stripe_index - old_stripe_index + 1; 6367 } 6368 6369 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6370 if (need_raid_map && (op != BTRFS_MAP_READ || mirror_num > 1)) { 6371 /* 6372 * Push stripe_nr back to the start of the full stripe 6373 * For those cases needing a full stripe, @stripe_nr 6374 * is the full stripe number. 6375 * 6376 * Originally we go raid56_full_stripe_start / full_stripe_len, 6377 * but that can be expensive. Here we just divide 6378 * @stripe_nr with @data_stripes. 6379 */ 6380 stripe_nr /= data_stripes; 6381 6382 /* RAID[56] write or recovery. Return all stripes */ 6383 num_stripes = map->num_stripes; 6384 max_errors = btrfs_chunk_max_errors(map); 6385 6386 /* Return the length to the full stripe end */ 6387 *length = min(logical + *length, 6388 raid56_full_stripe_start + em->start + 6389 btrfs_stripe_nr_to_offset(data_stripes)) - 6390 logical; 6391 stripe_index = 0; 6392 stripe_offset = 0; 6393 } else { 6394 /* 6395 * Mirror #0 or #1 means the original data block. 6396 * Mirror #2 is RAID5 parity block. 6397 * Mirror #3 is RAID6 Q block. 6398 */ 6399 stripe_index = stripe_nr % data_stripes; 6400 stripe_nr /= data_stripes; 6401 if (mirror_num > 1) 6402 stripe_index = data_stripes + mirror_num - 2; 6403 6404 /* We distribute the parity blocks across stripes */ 6405 stripe_index = (stripe_nr + stripe_index) % map->num_stripes; 6406 if (op == BTRFS_MAP_READ && mirror_num <= 1) 6407 mirror_num = 1; 6408 } 6409 } else { 6410 /* 6411 * After this, stripe_nr is the number of stripes on this 6412 * device we have to walk to find the data, and stripe_index is 6413 * the number of our device in the stripe array 6414 */ 6415 stripe_index = stripe_nr % map->num_stripes; 6416 stripe_nr /= map->num_stripes; 6417 mirror_num = stripe_index + 1; 6418 } 6419 if (stripe_index >= map->num_stripes) { 6420 btrfs_crit(fs_info, 6421 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6422 stripe_index, map->num_stripes); 6423 ret = -EINVAL; 6424 goto out; 6425 } 6426 6427 num_alloc_stripes = num_stripes; 6428 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6429 op != BTRFS_MAP_READ) 6430 /* 6431 * For replace case, we need to add extra stripes for extra 6432 * duplicated stripes. 6433 * 6434 * For both WRITE and GET_READ_MIRRORS, we may have at most 6435 * 2 more stripes (DUP types, otherwise 1). 6436 */ 6437 num_alloc_stripes += 2; 6438 6439 /* 6440 * If this I/O maps to a single device, try to return the device and 6441 * physical block information on the stack instead of allocating an 6442 * I/O context structure. 6443 */ 6444 if (smap && num_alloc_stripes == 1 && 6445 !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)) { 6446 set_io_stripe(smap, map, stripe_index, stripe_offset, stripe_nr); 6447 if (mirror_num_ret) 6448 *mirror_num_ret = mirror_num; 6449 *bioc_ret = NULL; 6450 ret = 0; 6451 goto out; 6452 } 6453 6454 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes); 6455 if (!bioc) { 6456 ret = -ENOMEM; 6457 goto out; 6458 } 6459 bioc->map_type = map->type; 6460 6461 /* 6462 * For RAID56 full map, we need to make sure the stripes[] follows the 6463 * rule that data stripes are all ordered, then followed with P and Q 6464 * (if we have). 6465 * 6466 * It's still mostly the same as other profiles, just with extra rotation. 6467 */ 6468 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6469 (op != BTRFS_MAP_READ || mirror_num > 1)) { 6470 /* 6471 * For RAID56 @stripe_nr is already the number of full stripes 6472 * before us, which is also the rotation value (needs to modulo 6473 * with num_stripes). 6474 * 6475 * In this case, we just add @stripe_nr with @i, then do the 6476 * modulo, to reduce one modulo call. 6477 */ 6478 bioc->full_stripe_logical = em->start + 6479 btrfs_stripe_nr_to_offset(stripe_nr * data_stripes); 6480 for (i = 0; i < num_stripes; i++) 6481 set_io_stripe(&bioc->stripes[i], map, 6482 (i + stripe_nr) % num_stripes, 6483 stripe_offset, stripe_nr); 6484 } else { 6485 /* 6486 * For all other non-RAID56 profiles, just copy the target 6487 * stripe into the bioc. 6488 */ 6489 for (i = 0; i < num_stripes; i++) { 6490 set_io_stripe(&bioc->stripes[i], map, stripe_index, 6491 stripe_offset, stripe_nr); 6492 stripe_index++; 6493 } 6494 } 6495 6496 if (op != BTRFS_MAP_READ) 6497 max_errors = btrfs_chunk_max_errors(map); 6498 6499 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6500 op != BTRFS_MAP_READ) { 6501 handle_ops_on_dev_replace(op, bioc, dev_replace, logical, 6502 &num_stripes, &max_errors); 6503 } 6504 6505 *bioc_ret = bioc; 6506 bioc->num_stripes = num_stripes; 6507 bioc->max_errors = max_errors; 6508 bioc->mirror_num = mirror_num; 6509 6510 out: 6511 if (dev_replace_is_ongoing) { 6512 lockdep_assert_held(&dev_replace->rwsem); 6513 /* Unlock and let waiting writers proceed */ 6514 up_read(&dev_replace->rwsem); 6515 } 6516 free_extent_map(em); 6517 return ret; 6518 } 6519 6520 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6521 const struct btrfs_fs_devices *fs_devices) 6522 { 6523 if (args->fsid == NULL) 6524 return true; 6525 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6526 return true; 6527 return false; 6528 } 6529 6530 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6531 const struct btrfs_device *device) 6532 { 6533 if (args->missing) { 6534 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6535 !device->bdev) 6536 return true; 6537 return false; 6538 } 6539 6540 if (device->devid != args->devid) 6541 return false; 6542 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6543 return false; 6544 return true; 6545 } 6546 6547 /* 6548 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6549 * return NULL. 6550 * 6551 * If devid and uuid are both specified, the match must be exact, otherwise 6552 * only devid is used. 6553 */ 6554 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6555 const struct btrfs_dev_lookup_args *args) 6556 { 6557 struct btrfs_device *device; 6558 struct btrfs_fs_devices *seed_devs; 6559 6560 if (dev_args_match_fs_devices(args, fs_devices)) { 6561 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6562 if (dev_args_match_device(args, device)) 6563 return device; 6564 } 6565 } 6566 6567 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6568 if (!dev_args_match_fs_devices(args, seed_devs)) 6569 continue; 6570 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6571 if (dev_args_match_device(args, device)) 6572 return device; 6573 } 6574 } 6575 6576 return NULL; 6577 } 6578 6579 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6580 u64 devid, u8 *dev_uuid) 6581 { 6582 struct btrfs_device *device; 6583 unsigned int nofs_flag; 6584 6585 /* 6586 * We call this under the chunk_mutex, so we want to use NOFS for this 6587 * allocation, however we don't want to change btrfs_alloc_device() to 6588 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6589 * places. 6590 */ 6591 6592 nofs_flag = memalloc_nofs_save(); 6593 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL); 6594 memalloc_nofs_restore(nofs_flag); 6595 if (IS_ERR(device)) 6596 return device; 6597 6598 list_add(&device->dev_list, &fs_devices->devices); 6599 device->fs_devices = fs_devices; 6600 fs_devices->num_devices++; 6601 6602 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6603 fs_devices->missing_devices++; 6604 6605 return device; 6606 } 6607 6608 /* 6609 * Allocate new device struct, set up devid and UUID. 6610 * 6611 * @fs_info: used only for generating a new devid, can be NULL if 6612 * devid is provided (i.e. @devid != NULL). 6613 * @devid: a pointer to devid for this device. If NULL a new devid 6614 * is generated. 6615 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6616 * is generated. 6617 * @path: a pointer to device path if available, NULL otherwise. 6618 * 6619 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6620 * on error. Returned struct is not linked onto any lists and must be 6621 * destroyed with btrfs_free_device. 6622 */ 6623 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6624 const u64 *devid, const u8 *uuid, 6625 const char *path) 6626 { 6627 struct btrfs_device *dev; 6628 u64 tmp; 6629 6630 if (WARN_ON(!devid && !fs_info)) 6631 return ERR_PTR(-EINVAL); 6632 6633 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6634 if (!dev) 6635 return ERR_PTR(-ENOMEM); 6636 6637 INIT_LIST_HEAD(&dev->dev_list); 6638 INIT_LIST_HEAD(&dev->dev_alloc_list); 6639 INIT_LIST_HEAD(&dev->post_commit_list); 6640 6641 atomic_set(&dev->dev_stats_ccnt, 0); 6642 btrfs_device_data_ordered_init(dev); 6643 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); 6644 6645 if (devid) 6646 tmp = *devid; 6647 else { 6648 int ret; 6649 6650 ret = find_next_devid(fs_info, &tmp); 6651 if (ret) { 6652 btrfs_free_device(dev); 6653 return ERR_PTR(ret); 6654 } 6655 } 6656 dev->devid = tmp; 6657 6658 if (uuid) 6659 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6660 else 6661 generate_random_uuid(dev->uuid); 6662 6663 if (path) { 6664 struct rcu_string *name; 6665 6666 name = rcu_string_strdup(path, GFP_KERNEL); 6667 if (!name) { 6668 btrfs_free_device(dev); 6669 return ERR_PTR(-ENOMEM); 6670 } 6671 rcu_assign_pointer(dev->name, name); 6672 } 6673 6674 return dev; 6675 } 6676 6677 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6678 u64 devid, u8 *uuid, bool error) 6679 { 6680 if (error) 6681 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6682 devid, uuid); 6683 else 6684 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6685 devid, uuid); 6686 } 6687 6688 u64 btrfs_calc_stripe_length(const struct extent_map *em) 6689 { 6690 const struct map_lookup *map = em->map_lookup; 6691 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6692 6693 return div_u64(em->len, data_stripes); 6694 } 6695 6696 #if BITS_PER_LONG == 32 6697 /* 6698 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6699 * can't be accessed on 32bit systems. 6700 * 6701 * This function do mount time check to reject the fs if it already has 6702 * metadata chunk beyond that limit. 6703 */ 6704 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6705 u64 logical, u64 length, u64 type) 6706 { 6707 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6708 return 0; 6709 6710 if (logical + length < MAX_LFS_FILESIZE) 6711 return 0; 6712 6713 btrfs_err_32bit_limit(fs_info); 6714 return -EOVERFLOW; 6715 } 6716 6717 /* 6718 * This is to give early warning for any metadata chunk reaching 6719 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6720 * Although we can still access the metadata, it's not going to be possible 6721 * once the limit is reached. 6722 */ 6723 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6724 u64 logical, u64 length, u64 type) 6725 { 6726 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6727 return; 6728 6729 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6730 return; 6731 6732 btrfs_warn_32bit_limit(fs_info); 6733 } 6734 #endif 6735 6736 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6737 u64 devid, u8 *uuid) 6738 { 6739 struct btrfs_device *dev; 6740 6741 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6742 btrfs_report_missing_device(fs_info, devid, uuid, true); 6743 return ERR_PTR(-ENOENT); 6744 } 6745 6746 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 6747 if (IS_ERR(dev)) { 6748 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 6749 devid, PTR_ERR(dev)); 6750 return dev; 6751 } 6752 btrfs_report_missing_device(fs_info, devid, uuid, false); 6753 6754 return dev; 6755 } 6756 6757 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6758 struct btrfs_chunk *chunk) 6759 { 6760 BTRFS_DEV_LOOKUP_ARGS(args); 6761 struct btrfs_fs_info *fs_info = leaf->fs_info; 6762 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 6763 struct map_lookup *map; 6764 struct extent_map *em; 6765 u64 logical; 6766 u64 length; 6767 u64 devid; 6768 u64 type; 6769 u8 uuid[BTRFS_UUID_SIZE]; 6770 int index; 6771 int num_stripes; 6772 int ret; 6773 int i; 6774 6775 logical = key->offset; 6776 length = btrfs_chunk_length(leaf, chunk); 6777 type = btrfs_chunk_type(leaf, chunk); 6778 index = btrfs_bg_flags_to_raid_index(type); 6779 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6780 6781 #if BITS_PER_LONG == 32 6782 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 6783 if (ret < 0) 6784 return ret; 6785 warn_32bit_meta_chunk(fs_info, logical, length, type); 6786 #endif 6787 6788 /* 6789 * Only need to verify chunk item if we're reading from sys chunk array, 6790 * as chunk item in tree block is already verified by tree-checker. 6791 */ 6792 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6793 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6794 if (ret) 6795 return ret; 6796 } 6797 6798 read_lock(&map_tree->lock); 6799 em = lookup_extent_mapping(map_tree, logical, 1); 6800 read_unlock(&map_tree->lock); 6801 6802 /* already mapped? */ 6803 if (em && em->start <= logical && em->start + em->len > logical) { 6804 free_extent_map(em); 6805 return 0; 6806 } else if (em) { 6807 free_extent_map(em); 6808 } 6809 6810 em = alloc_extent_map(); 6811 if (!em) 6812 return -ENOMEM; 6813 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6814 if (!map) { 6815 free_extent_map(em); 6816 return -ENOMEM; 6817 } 6818 6819 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6820 em->map_lookup = map; 6821 em->start = logical; 6822 em->len = length; 6823 em->orig_start = 0; 6824 em->block_start = 0; 6825 em->block_len = em->len; 6826 6827 map->num_stripes = num_stripes; 6828 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6829 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6830 map->type = type; 6831 /* 6832 * We can't use the sub_stripes value, as for profiles other than 6833 * RAID10, they may have 0 as sub_stripes for filesystems created by 6834 * older mkfs (<v5.4). 6835 * In that case, it can cause divide-by-zero errors later. 6836 * Since currently sub_stripes is fixed for each profile, let's 6837 * use the trusted value instead. 6838 */ 6839 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 6840 map->verified_stripes = 0; 6841 em->orig_block_len = btrfs_calc_stripe_length(em); 6842 for (i = 0; i < num_stripes; i++) { 6843 map->stripes[i].physical = 6844 btrfs_stripe_offset_nr(leaf, chunk, i); 6845 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6846 args.devid = devid; 6847 read_extent_buffer(leaf, uuid, (unsigned long) 6848 btrfs_stripe_dev_uuid_nr(chunk, i), 6849 BTRFS_UUID_SIZE); 6850 args.uuid = uuid; 6851 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 6852 if (!map->stripes[i].dev) { 6853 map->stripes[i].dev = handle_missing_device(fs_info, 6854 devid, uuid); 6855 if (IS_ERR(map->stripes[i].dev)) { 6856 ret = PTR_ERR(map->stripes[i].dev); 6857 free_extent_map(em); 6858 return ret; 6859 } 6860 } 6861 6862 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6863 &(map->stripes[i].dev->dev_state)); 6864 } 6865 6866 write_lock(&map_tree->lock); 6867 ret = add_extent_mapping(map_tree, em, 0); 6868 write_unlock(&map_tree->lock); 6869 if (ret < 0) { 6870 btrfs_err(fs_info, 6871 "failed to add chunk map, start=%llu len=%llu: %d", 6872 em->start, em->len, ret); 6873 } 6874 free_extent_map(em); 6875 6876 return ret; 6877 } 6878 6879 static void fill_device_from_item(struct extent_buffer *leaf, 6880 struct btrfs_dev_item *dev_item, 6881 struct btrfs_device *device) 6882 { 6883 unsigned long ptr; 6884 6885 device->devid = btrfs_device_id(leaf, dev_item); 6886 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6887 device->total_bytes = device->disk_total_bytes; 6888 device->commit_total_bytes = device->disk_total_bytes; 6889 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6890 device->commit_bytes_used = device->bytes_used; 6891 device->type = btrfs_device_type(leaf, dev_item); 6892 device->io_align = btrfs_device_io_align(leaf, dev_item); 6893 device->io_width = btrfs_device_io_width(leaf, dev_item); 6894 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6895 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6896 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6897 6898 ptr = btrfs_device_uuid(dev_item); 6899 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6900 } 6901 6902 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 6903 u8 *fsid) 6904 { 6905 struct btrfs_fs_devices *fs_devices; 6906 int ret; 6907 6908 lockdep_assert_held(&uuid_mutex); 6909 ASSERT(fsid); 6910 6911 /* This will match only for multi-device seed fs */ 6912 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 6913 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 6914 return fs_devices; 6915 6916 6917 fs_devices = find_fsid(fsid, NULL); 6918 if (!fs_devices) { 6919 if (!btrfs_test_opt(fs_info, DEGRADED)) 6920 return ERR_PTR(-ENOENT); 6921 6922 fs_devices = alloc_fs_devices(fsid, NULL); 6923 if (IS_ERR(fs_devices)) 6924 return fs_devices; 6925 6926 fs_devices->seeding = true; 6927 fs_devices->opened = 1; 6928 return fs_devices; 6929 } 6930 6931 /* 6932 * Upon first call for a seed fs fsid, just create a private copy of the 6933 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 6934 */ 6935 fs_devices = clone_fs_devices(fs_devices); 6936 if (IS_ERR(fs_devices)) 6937 return fs_devices; 6938 6939 ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder); 6940 if (ret) { 6941 free_fs_devices(fs_devices); 6942 return ERR_PTR(ret); 6943 } 6944 6945 if (!fs_devices->seeding) { 6946 close_fs_devices(fs_devices); 6947 free_fs_devices(fs_devices); 6948 return ERR_PTR(-EINVAL); 6949 } 6950 6951 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 6952 6953 return fs_devices; 6954 } 6955 6956 static int read_one_dev(struct extent_buffer *leaf, 6957 struct btrfs_dev_item *dev_item) 6958 { 6959 BTRFS_DEV_LOOKUP_ARGS(args); 6960 struct btrfs_fs_info *fs_info = leaf->fs_info; 6961 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6962 struct btrfs_device *device; 6963 u64 devid; 6964 int ret; 6965 u8 fs_uuid[BTRFS_FSID_SIZE]; 6966 u8 dev_uuid[BTRFS_UUID_SIZE]; 6967 6968 devid = btrfs_device_id(leaf, dev_item); 6969 args.devid = devid; 6970 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6971 BTRFS_UUID_SIZE); 6972 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6973 BTRFS_FSID_SIZE); 6974 args.uuid = dev_uuid; 6975 args.fsid = fs_uuid; 6976 6977 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 6978 fs_devices = open_seed_devices(fs_info, fs_uuid); 6979 if (IS_ERR(fs_devices)) 6980 return PTR_ERR(fs_devices); 6981 } 6982 6983 device = btrfs_find_device(fs_info->fs_devices, &args); 6984 if (!device) { 6985 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6986 btrfs_report_missing_device(fs_info, devid, 6987 dev_uuid, true); 6988 return -ENOENT; 6989 } 6990 6991 device = add_missing_dev(fs_devices, devid, dev_uuid); 6992 if (IS_ERR(device)) { 6993 btrfs_err(fs_info, 6994 "failed to add missing dev %llu: %ld", 6995 devid, PTR_ERR(device)); 6996 return PTR_ERR(device); 6997 } 6998 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 6999 } else { 7000 if (!device->bdev) { 7001 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7002 btrfs_report_missing_device(fs_info, 7003 devid, dev_uuid, true); 7004 return -ENOENT; 7005 } 7006 btrfs_report_missing_device(fs_info, devid, 7007 dev_uuid, false); 7008 } 7009 7010 if (!device->bdev && 7011 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7012 /* 7013 * this happens when a device that was properly setup 7014 * in the device info lists suddenly goes bad. 7015 * device->bdev is NULL, and so we have to set 7016 * device->missing to one here 7017 */ 7018 device->fs_devices->missing_devices++; 7019 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7020 } 7021 7022 /* Move the device to its own fs_devices */ 7023 if (device->fs_devices != fs_devices) { 7024 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7025 &device->dev_state)); 7026 7027 list_move(&device->dev_list, &fs_devices->devices); 7028 device->fs_devices->num_devices--; 7029 fs_devices->num_devices++; 7030 7031 device->fs_devices->missing_devices--; 7032 fs_devices->missing_devices++; 7033 7034 device->fs_devices = fs_devices; 7035 } 7036 } 7037 7038 if (device->fs_devices != fs_info->fs_devices) { 7039 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7040 if (device->generation != 7041 btrfs_device_generation(leaf, dev_item)) 7042 return -EINVAL; 7043 } 7044 7045 fill_device_from_item(leaf, dev_item, device); 7046 if (device->bdev) { 7047 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7048 7049 if (device->total_bytes > max_total_bytes) { 7050 btrfs_err(fs_info, 7051 "device total_bytes should be at most %llu but found %llu", 7052 max_total_bytes, device->total_bytes); 7053 return -EINVAL; 7054 } 7055 } 7056 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7057 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7058 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7059 device->fs_devices->total_rw_bytes += device->total_bytes; 7060 atomic64_add(device->total_bytes - device->bytes_used, 7061 &fs_info->free_chunk_space); 7062 } 7063 ret = 0; 7064 return ret; 7065 } 7066 7067 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7068 { 7069 struct btrfs_super_block *super_copy = fs_info->super_copy; 7070 struct extent_buffer *sb; 7071 struct btrfs_disk_key *disk_key; 7072 struct btrfs_chunk *chunk; 7073 u8 *array_ptr; 7074 unsigned long sb_array_offset; 7075 int ret = 0; 7076 u32 num_stripes; 7077 u32 array_size; 7078 u32 len = 0; 7079 u32 cur_offset; 7080 u64 type; 7081 struct btrfs_key key; 7082 7083 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7084 7085 /* 7086 * We allocated a dummy extent, just to use extent buffer accessors. 7087 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7088 * that's fine, we will not go beyond system chunk array anyway. 7089 */ 7090 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7091 if (!sb) 7092 return -ENOMEM; 7093 set_extent_buffer_uptodate(sb); 7094 7095 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7096 array_size = btrfs_super_sys_array_size(super_copy); 7097 7098 array_ptr = super_copy->sys_chunk_array; 7099 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7100 cur_offset = 0; 7101 7102 while (cur_offset < array_size) { 7103 disk_key = (struct btrfs_disk_key *)array_ptr; 7104 len = sizeof(*disk_key); 7105 if (cur_offset + len > array_size) 7106 goto out_short_read; 7107 7108 btrfs_disk_key_to_cpu(&key, disk_key); 7109 7110 array_ptr += len; 7111 sb_array_offset += len; 7112 cur_offset += len; 7113 7114 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7115 btrfs_err(fs_info, 7116 "unexpected item type %u in sys_array at offset %u", 7117 (u32)key.type, cur_offset); 7118 ret = -EIO; 7119 break; 7120 } 7121 7122 chunk = (struct btrfs_chunk *)sb_array_offset; 7123 /* 7124 * At least one btrfs_chunk with one stripe must be present, 7125 * exact stripe count check comes afterwards 7126 */ 7127 len = btrfs_chunk_item_size(1); 7128 if (cur_offset + len > array_size) 7129 goto out_short_read; 7130 7131 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7132 if (!num_stripes) { 7133 btrfs_err(fs_info, 7134 "invalid number of stripes %u in sys_array at offset %u", 7135 num_stripes, cur_offset); 7136 ret = -EIO; 7137 break; 7138 } 7139 7140 type = btrfs_chunk_type(sb, chunk); 7141 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7142 btrfs_err(fs_info, 7143 "invalid chunk type %llu in sys_array at offset %u", 7144 type, cur_offset); 7145 ret = -EIO; 7146 break; 7147 } 7148 7149 len = btrfs_chunk_item_size(num_stripes); 7150 if (cur_offset + len > array_size) 7151 goto out_short_read; 7152 7153 ret = read_one_chunk(&key, sb, chunk); 7154 if (ret) 7155 break; 7156 7157 array_ptr += len; 7158 sb_array_offset += len; 7159 cur_offset += len; 7160 } 7161 clear_extent_buffer_uptodate(sb); 7162 free_extent_buffer_stale(sb); 7163 return ret; 7164 7165 out_short_read: 7166 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7167 len, cur_offset); 7168 clear_extent_buffer_uptodate(sb); 7169 free_extent_buffer_stale(sb); 7170 return -EIO; 7171 } 7172 7173 /* 7174 * Check if all chunks in the fs are OK for read-write degraded mount 7175 * 7176 * If the @failing_dev is specified, it's accounted as missing. 7177 * 7178 * Return true if all chunks meet the minimal RW mount requirements. 7179 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7180 */ 7181 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7182 struct btrfs_device *failing_dev) 7183 { 7184 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7185 struct extent_map *em; 7186 u64 next_start = 0; 7187 bool ret = true; 7188 7189 read_lock(&map_tree->lock); 7190 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7191 read_unlock(&map_tree->lock); 7192 /* No chunk at all? Return false anyway */ 7193 if (!em) { 7194 ret = false; 7195 goto out; 7196 } 7197 while (em) { 7198 struct map_lookup *map; 7199 int missing = 0; 7200 int max_tolerated; 7201 int i; 7202 7203 map = em->map_lookup; 7204 max_tolerated = 7205 btrfs_get_num_tolerated_disk_barrier_failures( 7206 map->type); 7207 for (i = 0; i < map->num_stripes; i++) { 7208 struct btrfs_device *dev = map->stripes[i].dev; 7209 7210 if (!dev || !dev->bdev || 7211 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7212 dev->last_flush_error) 7213 missing++; 7214 else if (failing_dev && failing_dev == dev) 7215 missing++; 7216 } 7217 if (missing > max_tolerated) { 7218 if (!failing_dev) 7219 btrfs_warn(fs_info, 7220 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7221 em->start, missing, max_tolerated); 7222 free_extent_map(em); 7223 ret = false; 7224 goto out; 7225 } 7226 next_start = extent_map_end(em); 7227 free_extent_map(em); 7228 7229 read_lock(&map_tree->lock); 7230 em = lookup_extent_mapping(map_tree, next_start, 7231 (u64)(-1) - next_start); 7232 read_unlock(&map_tree->lock); 7233 } 7234 out: 7235 return ret; 7236 } 7237 7238 static void readahead_tree_node_children(struct extent_buffer *node) 7239 { 7240 int i; 7241 const int nr_items = btrfs_header_nritems(node); 7242 7243 for (i = 0; i < nr_items; i++) 7244 btrfs_readahead_node_child(node, i); 7245 } 7246 7247 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7248 { 7249 struct btrfs_root *root = fs_info->chunk_root; 7250 struct btrfs_path *path; 7251 struct extent_buffer *leaf; 7252 struct btrfs_key key; 7253 struct btrfs_key found_key; 7254 int ret; 7255 int slot; 7256 int iter_ret = 0; 7257 u64 total_dev = 0; 7258 u64 last_ra_node = 0; 7259 7260 path = btrfs_alloc_path(); 7261 if (!path) 7262 return -ENOMEM; 7263 7264 /* 7265 * uuid_mutex is needed only if we are mounting a sprout FS 7266 * otherwise we don't need it. 7267 */ 7268 mutex_lock(&uuid_mutex); 7269 7270 /* 7271 * It is possible for mount and umount to race in such a way that 7272 * we execute this code path, but open_fs_devices failed to clear 7273 * total_rw_bytes. We certainly want it cleared before reading the 7274 * device items, so clear it here. 7275 */ 7276 fs_info->fs_devices->total_rw_bytes = 0; 7277 7278 /* 7279 * Lockdep complains about possible circular locking dependency between 7280 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7281 * used for freeze procection of a fs (struct super_block.s_writers), 7282 * which we take when starting a transaction, and extent buffers of the 7283 * chunk tree if we call read_one_dev() while holding a lock on an 7284 * extent buffer of the chunk tree. Since we are mounting the filesystem 7285 * and at this point there can't be any concurrent task modifying the 7286 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7287 */ 7288 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7289 path->skip_locking = 1; 7290 7291 /* 7292 * Read all device items, and then all the chunk items. All 7293 * device items are found before any chunk item (their object id 7294 * is smaller than the lowest possible object id for a chunk 7295 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7296 */ 7297 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7298 key.offset = 0; 7299 key.type = 0; 7300 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7301 struct extent_buffer *node = path->nodes[1]; 7302 7303 leaf = path->nodes[0]; 7304 slot = path->slots[0]; 7305 7306 if (node) { 7307 if (last_ra_node != node->start) { 7308 readahead_tree_node_children(node); 7309 last_ra_node = node->start; 7310 } 7311 } 7312 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7313 struct btrfs_dev_item *dev_item; 7314 dev_item = btrfs_item_ptr(leaf, slot, 7315 struct btrfs_dev_item); 7316 ret = read_one_dev(leaf, dev_item); 7317 if (ret) 7318 goto error; 7319 total_dev++; 7320 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7321 struct btrfs_chunk *chunk; 7322 7323 /* 7324 * We are only called at mount time, so no need to take 7325 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7326 * we always lock first fs_info->chunk_mutex before 7327 * acquiring any locks on the chunk tree. This is a 7328 * requirement for chunk allocation, see the comment on 7329 * top of btrfs_chunk_alloc() for details. 7330 */ 7331 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7332 ret = read_one_chunk(&found_key, leaf, chunk); 7333 if (ret) 7334 goto error; 7335 } 7336 } 7337 /* Catch error found during iteration */ 7338 if (iter_ret < 0) { 7339 ret = iter_ret; 7340 goto error; 7341 } 7342 7343 /* 7344 * After loading chunk tree, we've got all device information, 7345 * do another round of validation checks. 7346 */ 7347 if (total_dev != fs_info->fs_devices->total_devices) { 7348 btrfs_warn(fs_info, 7349 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7350 btrfs_super_num_devices(fs_info->super_copy), 7351 total_dev); 7352 fs_info->fs_devices->total_devices = total_dev; 7353 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7354 } 7355 if (btrfs_super_total_bytes(fs_info->super_copy) < 7356 fs_info->fs_devices->total_rw_bytes) { 7357 btrfs_err(fs_info, 7358 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7359 btrfs_super_total_bytes(fs_info->super_copy), 7360 fs_info->fs_devices->total_rw_bytes); 7361 ret = -EINVAL; 7362 goto error; 7363 } 7364 ret = 0; 7365 error: 7366 mutex_unlock(&uuid_mutex); 7367 7368 btrfs_free_path(path); 7369 return ret; 7370 } 7371 7372 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7373 { 7374 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7375 struct btrfs_device *device; 7376 int ret = 0; 7377 7378 fs_devices->fs_info = fs_info; 7379 7380 mutex_lock(&fs_devices->device_list_mutex); 7381 list_for_each_entry(device, &fs_devices->devices, dev_list) 7382 device->fs_info = fs_info; 7383 7384 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7385 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7386 device->fs_info = fs_info; 7387 ret = btrfs_get_dev_zone_info(device, false); 7388 if (ret) 7389 break; 7390 } 7391 7392 seed_devs->fs_info = fs_info; 7393 } 7394 mutex_unlock(&fs_devices->device_list_mutex); 7395 7396 return ret; 7397 } 7398 7399 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7400 const struct btrfs_dev_stats_item *ptr, 7401 int index) 7402 { 7403 u64 val; 7404 7405 read_extent_buffer(eb, &val, 7406 offsetof(struct btrfs_dev_stats_item, values) + 7407 ((unsigned long)ptr) + (index * sizeof(u64)), 7408 sizeof(val)); 7409 return val; 7410 } 7411 7412 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7413 struct btrfs_dev_stats_item *ptr, 7414 int index, u64 val) 7415 { 7416 write_extent_buffer(eb, &val, 7417 offsetof(struct btrfs_dev_stats_item, values) + 7418 ((unsigned long)ptr) + (index * sizeof(u64)), 7419 sizeof(val)); 7420 } 7421 7422 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7423 struct btrfs_path *path) 7424 { 7425 struct btrfs_dev_stats_item *ptr; 7426 struct extent_buffer *eb; 7427 struct btrfs_key key; 7428 int item_size; 7429 int i, ret, slot; 7430 7431 if (!device->fs_info->dev_root) 7432 return 0; 7433 7434 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7435 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7436 key.offset = device->devid; 7437 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7438 if (ret) { 7439 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7440 btrfs_dev_stat_set(device, i, 0); 7441 device->dev_stats_valid = 1; 7442 btrfs_release_path(path); 7443 return ret < 0 ? ret : 0; 7444 } 7445 slot = path->slots[0]; 7446 eb = path->nodes[0]; 7447 item_size = btrfs_item_size(eb, slot); 7448 7449 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7450 7451 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7452 if (item_size >= (1 + i) * sizeof(__le64)) 7453 btrfs_dev_stat_set(device, i, 7454 btrfs_dev_stats_value(eb, ptr, i)); 7455 else 7456 btrfs_dev_stat_set(device, i, 0); 7457 } 7458 7459 device->dev_stats_valid = 1; 7460 btrfs_dev_stat_print_on_load(device); 7461 btrfs_release_path(path); 7462 7463 return 0; 7464 } 7465 7466 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7467 { 7468 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7469 struct btrfs_device *device; 7470 struct btrfs_path *path = NULL; 7471 int ret = 0; 7472 7473 path = btrfs_alloc_path(); 7474 if (!path) 7475 return -ENOMEM; 7476 7477 mutex_lock(&fs_devices->device_list_mutex); 7478 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7479 ret = btrfs_device_init_dev_stats(device, path); 7480 if (ret) 7481 goto out; 7482 } 7483 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7484 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7485 ret = btrfs_device_init_dev_stats(device, path); 7486 if (ret) 7487 goto out; 7488 } 7489 } 7490 out: 7491 mutex_unlock(&fs_devices->device_list_mutex); 7492 7493 btrfs_free_path(path); 7494 return ret; 7495 } 7496 7497 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7498 struct btrfs_device *device) 7499 { 7500 struct btrfs_fs_info *fs_info = trans->fs_info; 7501 struct btrfs_root *dev_root = fs_info->dev_root; 7502 struct btrfs_path *path; 7503 struct btrfs_key key; 7504 struct extent_buffer *eb; 7505 struct btrfs_dev_stats_item *ptr; 7506 int ret; 7507 int i; 7508 7509 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7510 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7511 key.offset = device->devid; 7512 7513 path = btrfs_alloc_path(); 7514 if (!path) 7515 return -ENOMEM; 7516 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7517 if (ret < 0) { 7518 btrfs_warn_in_rcu(fs_info, 7519 "error %d while searching for dev_stats item for device %s", 7520 ret, btrfs_dev_name(device)); 7521 goto out; 7522 } 7523 7524 if (ret == 0 && 7525 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7526 /* need to delete old one and insert a new one */ 7527 ret = btrfs_del_item(trans, dev_root, path); 7528 if (ret != 0) { 7529 btrfs_warn_in_rcu(fs_info, 7530 "delete too small dev_stats item for device %s failed %d", 7531 btrfs_dev_name(device), ret); 7532 goto out; 7533 } 7534 ret = 1; 7535 } 7536 7537 if (ret == 1) { 7538 /* need to insert a new item */ 7539 btrfs_release_path(path); 7540 ret = btrfs_insert_empty_item(trans, dev_root, path, 7541 &key, sizeof(*ptr)); 7542 if (ret < 0) { 7543 btrfs_warn_in_rcu(fs_info, 7544 "insert dev_stats item for device %s failed %d", 7545 btrfs_dev_name(device), ret); 7546 goto out; 7547 } 7548 } 7549 7550 eb = path->nodes[0]; 7551 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7552 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7553 btrfs_set_dev_stats_value(eb, ptr, i, 7554 btrfs_dev_stat_read(device, i)); 7555 btrfs_mark_buffer_dirty(trans, eb); 7556 7557 out: 7558 btrfs_free_path(path); 7559 return ret; 7560 } 7561 7562 /* 7563 * called from commit_transaction. Writes all changed device stats to disk. 7564 */ 7565 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7566 { 7567 struct btrfs_fs_info *fs_info = trans->fs_info; 7568 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7569 struct btrfs_device *device; 7570 int stats_cnt; 7571 int ret = 0; 7572 7573 mutex_lock(&fs_devices->device_list_mutex); 7574 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7575 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7576 if (!device->dev_stats_valid || stats_cnt == 0) 7577 continue; 7578 7579 7580 /* 7581 * There is a LOAD-LOAD control dependency between the value of 7582 * dev_stats_ccnt and updating the on-disk values which requires 7583 * reading the in-memory counters. Such control dependencies 7584 * require explicit read memory barriers. 7585 * 7586 * This memory barriers pairs with smp_mb__before_atomic in 7587 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7588 * barrier implied by atomic_xchg in 7589 * btrfs_dev_stats_read_and_reset 7590 */ 7591 smp_rmb(); 7592 7593 ret = update_dev_stat_item(trans, device); 7594 if (!ret) 7595 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7596 } 7597 mutex_unlock(&fs_devices->device_list_mutex); 7598 7599 return ret; 7600 } 7601 7602 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7603 { 7604 btrfs_dev_stat_inc(dev, index); 7605 7606 if (!dev->dev_stats_valid) 7607 return; 7608 btrfs_err_rl_in_rcu(dev->fs_info, 7609 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7610 btrfs_dev_name(dev), 7611 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7612 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7613 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7614 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7615 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7616 } 7617 7618 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7619 { 7620 int i; 7621 7622 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7623 if (btrfs_dev_stat_read(dev, i) != 0) 7624 break; 7625 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7626 return; /* all values == 0, suppress message */ 7627 7628 btrfs_info_in_rcu(dev->fs_info, 7629 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7630 btrfs_dev_name(dev), 7631 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7632 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7633 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7634 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7635 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7636 } 7637 7638 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7639 struct btrfs_ioctl_get_dev_stats *stats) 7640 { 7641 BTRFS_DEV_LOOKUP_ARGS(args); 7642 struct btrfs_device *dev; 7643 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7644 int i; 7645 7646 mutex_lock(&fs_devices->device_list_mutex); 7647 args.devid = stats->devid; 7648 dev = btrfs_find_device(fs_info->fs_devices, &args); 7649 mutex_unlock(&fs_devices->device_list_mutex); 7650 7651 if (!dev) { 7652 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7653 return -ENODEV; 7654 } else if (!dev->dev_stats_valid) { 7655 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7656 return -ENODEV; 7657 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7658 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7659 if (stats->nr_items > i) 7660 stats->values[i] = 7661 btrfs_dev_stat_read_and_reset(dev, i); 7662 else 7663 btrfs_dev_stat_set(dev, i, 0); 7664 } 7665 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7666 current->comm, task_pid_nr(current)); 7667 } else { 7668 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7669 if (stats->nr_items > i) 7670 stats->values[i] = btrfs_dev_stat_read(dev, i); 7671 } 7672 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7673 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7674 return 0; 7675 } 7676 7677 /* 7678 * Update the size and bytes used for each device where it changed. This is 7679 * delayed since we would otherwise get errors while writing out the 7680 * superblocks. 7681 * 7682 * Must be invoked during transaction commit. 7683 */ 7684 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7685 { 7686 struct btrfs_device *curr, *next; 7687 7688 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7689 7690 if (list_empty(&trans->dev_update_list)) 7691 return; 7692 7693 /* 7694 * We don't need the device_list_mutex here. This list is owned by the 7695 * transaction and the transaction must complete before the device is 7696 * released. 7697 */ 7698 mutex_lock(&trans->fs_info->chunk_mutex); 7699 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7700 post_commit_list) { 7701 list_del_init(&curr->post_commit_list); 7702 curr->commit_total_bytes = curr->disk_total_bytes; 7703 curr->commit_bytes_used = curr->bytes_used; 7704 } 7705 mutex_unlock(&trans->fs_info->chunk_mutex); 7706 } 7707 7708 /* 7709 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7710 */ 7711 int btrfs_bg_type_to_factor(u64 flags) 7712 { 7713 const int index = btrfs_bg_flags_to_raid_index(flags); 7714 7715 return btrfs_raid_array[index].ncopies; 7716 } 7717 7718 7719 7720 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7721 u64 chunk_offset, u64 devid, 7722 u64 physical_offset, u64 physical_len) 7723 { 7724 struct btrfs_dev_lookup_args args = { .devid = devid }; 7725 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7726 struct extent_map *em; 7727 struct map_lookup *map; 7728 struct btrfs_device *dev; 7729 u64 stripe_len; 7730 bool found = false; 7731 int ret = 0; 7732 int i; 7733 7734 read_lock(&em_tree->lock); 7735 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7736 read_unlock(&em_tree->lock); 7737 7738 if (!em) { 7739 btrfs_err(fs_info, 7740 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7741 physical_offset, devid); 7742 ret = -EUCLEAN; 7743 goto out; 7744 } 7745 7746 map = em->map_lookup; 7747 stripe_len = btrfs_calc_stripe_length(em); 7748 if (physical_len != stripe_len) { 7749 btrfs_err(fs_info, 7750 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7751 physical_offset, devid, em->start, physical_len, 7752 stripe_len); 7753 ret = -EUCLEAN; 7754 goto out; 7755 } 7756 7757 /* 7758 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 7759 * space. Although kernel can handle it without problem, better to warn 7760 * the users. 7761 */ 7762 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 7763 btrfs_warn(fs_info, 7764 "devid %llu physical %llu len %llu inside the reserved space", 7765 devid, physical_offset, physical_len); 7766 7767 for (i = 0; i < map->num_stripes; i++) { 7768 if (map->stripes[i].dev->devid == devid && 7769 map->stripes[i].physical == physical_offset) { 7770 found = true; 7771 if (map->verified_stripes >= map->num_stripes) { 7772 btrfs_err(fs_info, 7773 "too many dev extents for chunk %llu found", 7774 em->start); 7775 ret = -EUCLEAN; 7776 goto out; 7777 } 7778 map->verified_stripes++; 7779 break; 7780 } 7781 } 7782 if (!found) { 7783 btrfs_err(fs_info, 7784 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7785 physical_offset, devid); 7786 ret = -EUCLEAN; 7787 } 7788 7789 /* Make sure no dev extent is beyond device boundary */ 7790 dev = btrfs_find_device(fs_info->fs_devices, &args); 7791 if (!dev) { 7792 btrfs_err(fs_info, "failed to find devid %llu", devid); 7793 ret = -EUCLEAN; 7794 goto out; 7795 } 7796 7797 if (physical_offset + physical_len > dev->disk_total_bytes) { 7798 btrfs_err(fs_info, 7799 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7800 devid, physical_offset, physical_len, 7801 dev->disk_total_bytes); 7802 ret = -EUCLEAN; 7803 goto out; 7804 } 7805 7806 if (dev->zone_info) { 7807 u64 zone_size = dev->zone_info->zone_size; 7808 7809 if (!IS_ALIGNED(physical_offset, zone_size) || 7810 !IS_ALIGNED(physical_len, zone_size)) { 7811 btrfs_err(fs_info, 7812 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 7813 devid, physical_offset, physical_len); 7814 ret = -EUCLEAN; 7815 goto out; 7816 } 7817 } 7818 7819 out: 7820 free_extent_map(em); 7821 return ret; 7822 } 7823 7824 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 7825 { 7826 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7827 struct extent_map *em; 7828 struct rb_node *node; 7829 int ret = 0; 7830 7831 read_lock(&em_tree->lock); 7832 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 7833 em = rb_entry(node, struct extent_map, rb_node); 7834 if (em->map_lookup->num_stripes != 7835 em->map_lookup->verified_stripes) { 7836 btrfs_err(fs_info, 7837 "chunk %llu has missing dev extent, have %d expect %d", 7838 em->start, em->map_lookup->verified_stripes, 7839 em->map_lookup->num_stripes); 7840 ret = -EUCLEAN; 7841 goto out; 7842 } 7843 } 7844 out: 7845 read_unlock(&em_tree->lock); 7846 return ret; 7847 } 7848 7849 /* 7850 * Ensure that all dev extents are mapped to correct chunk, otherwise 7851 * later chunk allocation/free would cause unexpected behavior. 7852 * 7853 * NOTE: This will iterate through the whole device tree, which should be of 7854 * the same size level as the chunk tree. This slightly increases mount time. 7855 */ 7856 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 7857 { 7858 struct btrfs_path *path; 7859 struct btrfs_root *root = fs_info->dev_root; 7860 struct btrfs_key key; 7861 u64 prev_devid = 0; 7862 u64 prev_dev_ext_end = 0; 7863 int ret = 0; 7864 7865 /* 7866 * We don't have a dev_root because we mounted with ignorebadroots and 7867 * failed to load the root, so we want to skip the verification in this 7868 * case for sure. 7869 * 7870 * However if the dev root is fine, but the tree itself is corrupted 7871 * we'd still fail to mount. This verification is only to make sure 7872 * writes can happen safely, so instead just bypass this check 7873 * completely in the case of IGNOREBADROOTS. 7874 */ 7875 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 7876 return 0; 7877 7878 key.objectid = 1; 7879 key.type = BTRFS_DEV_EXTENT_KEY; 7880 key.offset = 0; 7881 7882 path = btrfs_alloc_path(); 7883 if (!path) 7884 return -ENOMEM; 7885 7886 path->reada = READA_FORWARD; 7887 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7888 if (ret < 0) 7889 goto out; 7890 7891 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 7892 ret = btrfs_next_leaf(root, path); 7893 if (ret < 0) 7894 goto out; 7895 /* No dev extents at all? Not good */ 7896 if (ret > 0) { 7897 ret = -EUCLEAN; 7898 goto out; 7899 } 7900 } 7901 while (1) { 7902 struct extent_buffer *leaf = path->nodes[0]; 7903 struct btrfs_dev_extent *dext; 7904 int slot = path->slots[0]; 7905 u64 chunk_offset; 7906 u64 physical_offset; 7907 u64 physical_len; 7908 u64 devid; 7909 7910 btrfs_item_key_to_cpu(leaf, &key, slot); 7911 if (key.type != BTRFS_DEV_EXTENT_KEY) 7912 break; 7913 devid = key.objectid; 7914 physical_offset = key.offset; 7915 7916 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 7917 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 7918 physical_len = btrfs_dev_extent_length(leaf, dext); 7919 7920 /* Check if this dev extent overlaps with the previous one */ 7921 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 7922 btrfs_err(fs_info, 7923 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 7924 devid, physical_offset, prev_dev_ext_end); 7925 ret = -EUCLEAN; 7926 goto out; 7927 } 7928 7929 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 7930 physical_offset, physical_len); 7931 if (ret < 0) 7932 goto out; 7933 prev_devid = devid; 7934 prev_dev_ext_end = physical_offset + physical_len; 7935 7936 ret = btrfs_next_item(root, path); 7937 if (ret < 0) 7938 goto out; 7939 if (ret > 0) { 7940 ret = 0; 7941 break; 7942 } 7943 } 7944 7945 /* Ensure all chunks have corresponding dev extents */ 7946 ret = verify_chunk_dev_extent_mapping(fs_info); 7947 out: 7948 btrfs_free_path(path); 7949 return ret; 7950 } 7951 7952 /* 7953 * Check whether the given block group or device is pinned by any inode being 7954 * used as a swapfile. 7955 */ 7956 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 7957 { 7958 struct btrfs_swapfile_pin *sp; 7959 struct rb_node *node; 7960 7961 spin_lock(&fs_info->swapfile_pins_lock); 7962 node = fs_info->swapfile_pins.rb_node; 7963 while (node) { 7964 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 7965 if (ptr < sp->ptr) 7966 node = node->rb_left; 7967 else if (ptr > sp->ptr) 7968 node = node->rb_right; 7969 else 7970 break; 7971 } 7972 spin_unlock(&fs_info->swapfile_pins_lock); 7973 return node != NULL; 7974 } 7975 7976 static int relocating_repair_kthread(void *data) 7977 { 7978 struct btrfs_block_group *cache = data; 7979 struct btrfs_fs_info *fs_info = cache->fs_info; 7980 u64 target; 7981 int ret = 0; 7982 7983 target = cache->start; 7984 btrfs_put_block_group(cache); 7985 7986 sb_start_write(fs_info->sb); 7987 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 7988 btrfs_info(fs_info, 7989 "zoned: skip relocating block group %llu to repair: EBUSY", 7990 target); 7991 sb_end_write(fs_info->sb); 7992 return -EBUSY; 7993 } 7994 7995 mutex_lock(&fs_info->reclaim_bgs_lock); 7996 7997 /* Ensure block group still exists */ 7998 cache = btrfs_lookup_block_group(fs_info, target); 7999 if (!cache) 8000 goto out; 8001 8002 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 8003 goto out; 8004 8005 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8006 if (ret < 0) 8007 goto out; 8008 8009 btrfs_info(fs_info, 8010 "zoned: relocating block group %llu to repair IO failure", 8011 target); 8012 ret = btrfs_relocate_chunk(fs_info, target); 8013 8014 out: 8015 if (cache) 8016 btrfs_put_block_group(cache); 8017 mutex_unlock(&fs_info->reclaim_bgs_lock); 8018 btrfs_exclop_finish(fs_info); 8019 sb_end_write(fs_info->sb); 8020 8021 return ret; 8022 } 8023 8024 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8025 { 8026 struct btrfs_block_group *cache; 8027 8028 if (!btrfs_is_zoned(fs_info)) 8029 return false; 8030 8031 /* Do not attempt to repair in degraded state */ 8032 if (btrfs_test_opt(fs_info, DEGRADED)) 8033 return true; 8034 8035 cache = btrfs_lookup_block_group(fs_info, logical); 8036 if (!cache) 8037 return true; 8038 8039 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8040 btrfs_put_block_group(cache); 8041 return true; 8042 } 8043 8044 kthread_run(relocating_repair_kthread, cache, 8045 "btrfs-relocating-repair"); 8046 8047 return true; 8048 } 8049 8050 static void map_raid56_repair_block(struct btrfs_io_context *bioc, 8051 struct btrfs_io_stripe *smap, 8052 u64 logical) 8053 { 8054 int data_stripes = nr_bioc_data_stripes(bioc); 8055 int i; 8056 8057 for (i = 0; i < data_stripes; i++) { 8058 u64 stripe_start = bioc->full_stripe_logical + 8059 btrfs_stripe_nr_to_offset(i); 8060 8061 if (logical >= stripe_start && 8062 logical < stripe_start + BTRFS_STRIPE_LEN) 8063 break; 8064 } 8065 ASSERT(i < data_stripes); 8066 smap->dev = bioc->stripes[i].dev; 8067 smap->physical = bioc->stripes[i].physical + 8068 ((logical - bioc->full_stripe_logical) & 8069 BTRFS_STRIPE_LEN_MASK); 8070 } 8071 8072 /* 8073 * Map a repair write into a single device. 8074 * 8075 * A repair write is triggered by read time repair or scrub, which would only 8076 * update the contents of a single device. 8077 * Not update any other mirrors nor go through RMW path. 8078 * 8079 * Callers should ensure: 8080 * 8081 * - Call btrfs_bio_counter_inc_blocked() first 8082 * - The range does not cross stripe boundary 8083 * - Has a valid @mirror_num passed in. 8084 */ 8085 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, 8086 struct btrfs_io_stripe *smap, u64 logical, 8087 u32 length, int mirror_num) 8088 { 8089 struct btrfs_io_context *bioc = NULL; 8090 u64 map_length = length; 8091 int mirror_ret = mirror_num; 8092 int ret; 8093 8094 ASSERT(mirror_num > 0); 8095 8096 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length, 8097 &bioc, smap, &mirror_ret, true); 8098 if (ret < 0) 8099 return ret; 8100 8101 /* The map range should not cross stripe boundary. */ 8102 ASSERT(map_length >= length); 8103 8104 /* Already mapped to single stripe. */ 8105 if (!bioc) 8106 goto out; 8107 8108 /* Map the RAID56 multi-stripe writes to a single one. */ 8109 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 8110 map_raid56_repair_block(bioc, smap, logical); 8111 goto out; 8112 } 8113 8114 ASSERT(mirror_num <= bioc->num_stripes); 8115 smap->dev = bioc->stripes[mirror_num - 1].dev; 8116 smap->physical = bioc->stripes[mirror_num - 1].physical; 8117 out: 8118 btrfs_put_bioc(bioc); 8119 ASSERT(smap->dev); 8120 return 0; 8121 } 8122