1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/slab.h> 9 #include <linux/ratelimit.h> 10 #include <linux/kthread.h> 11 #include <linux/semaphore.h> 12 #include <linux/uuid.h> 13 #include <linux/list_sort.h> 14 #include <linux/namei.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "extent_map.h" 18 #include "disk-io.h" 19 #include "transaction.h" 20 #include "print-tree.h" 21 #include "volumes.h" 22 #include "raid56.h" 23 #include "rcu-string.h" 24 #include "dev-replace.h" 25 #include "sysfs.h" 26 #include "tree-checker.h" 27 #include "space-info.h" 28 #include "block-group.h" 29 #include "discard.h" 30 #include "zoned.h" 31 #include "fs.h" 32 #include "accessors.h" 33 #include "uuid-tree.h" 34 #include "ioctl.h" 35 #include "relocation.h" 36 #include "scrub.h" 37 #include "super.h" 38 39 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 40 BTRFS_BLOCK_GROUP_RAID10 | \ 41 BTRFS_BLOCK_GROUP_RAID56_MASK) 42 43 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 44 [BTRFS_RAID_RAID10] = { 45 .sub_stripes = 2, 46 .dev_stripes = 1, 47 .devs_max = 0, /* 0 == as many as possible */ 48 .devs_min = 2, 49 .tolerated_failures = 1, 50 .devs_increment = 2, 51 .ncopies = 2, 52 .nparity = 0, 53 .raid_name = "raid10", 54 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 55 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 56 }, 57 [BTRFS_RAID_RAID1] = { 58 .sub_stripes = 1, 59 .dev_stripes = 1, 60 .devs_max = 2, 61 .devs_min = 2, 62 .tolerated_failures = 1, 63 .devs_increment = 2, 64 .ncopies = 2, 65 .nparity = 0, 66 .raid_name = "raid1", 67 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 68 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 69 }, 70 [BTRFS_RAID_RAID1C3] = { 71 .sub_stripes = 1, 72 .dev_stripes = 1, 73 .devs_max = 3, 74 .devs_min = 3, 75 .tolerated_failures = 2, 76 .devs_increment = 3, 77 .ncopies = 3, 78 .nparity = 0, 79 .raid_name = "raid1c3", 80 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 81 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 82 }, 83 [BTRFS_RAID_RAID1C4] = { 84 .sub_stripes = 1, 85 .dev_stripes = 1, 86 .devs_max = 4, 87 .devs_min = 4, 88 .tolerated_failures = 3, 89 .devs_increment = 4, 90 .ncopies = 4, 91 .nparity = 0, 92 .raid_name = "raid1c4", 93 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 94 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 95 }, 96 [BTRFS_RAID_DUP] = { 97 .sub_stripes = 1, 98 .dev_stripes = 2, 99 .devs_max = 1, 100 .devs_min = 1, 101 .tolerated_failures = 0, 102 .devs_increment = 1, 103 .ncopies = 2, 104 .nparity = 0, 105 .raid_name = "dup", 106 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 107 .mindev_error = 0, 108 }, 109 [BTRFS_RAID_RAID0] = { 110 .sub_stripes = 1, 111 .dev_stripes = 1, 112 .devs_max = 0, 113 .devs_min = 1, 114 .tolerated_failures = 0, 115 .devs_increment = 1, 116 .ncopies = 1, 117 .nparity = 0, 118 .raid_name = "raid0", 119 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 120 .mindev_error = 0, 121 }, 122 [BTRFS_RAID_SINGLE] = { 123 .sub_stripes = 1, 124 .dev_stripes = 1, 125 .devs_max = 1, 126 .devs_min = 1, 127 .tolerated_failures = 0, 128 .devs_increment = 1, 129 .ncopies = 1, 130 .nparity = 0, 131 .raid_name = "single", 132 .bg_flag = 0, 133 .mindev_error = 0, 134 }, 135 [BTRFS_RAID_RAID5] = { 136 .sub_stripes = 1, 137 .dev_stripes = 1, 138 .devs_max = 0, 139 .devs_min = 2, 140 .tolerated_failures = 1, 141 .devs_increment = 1, 142 .ncopies = 1, 143 .nparity = 1, 144 .raid_name = "raid5", 145 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 146 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 147 }, 148 [BTRFS_RAID_RAID6] = { 149 .sub_stripes = 1, 150 .dev_stripes = 1, 151 .devs_max = 0, 152 .devs_min = 3, 153 .tolerated_failures = 2, 154 .devs_increment = 1, 155 .ncopies = 1, 156 .nparity = 2, 157 .raid_name = "raid6", 158 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 159 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 160 }, 161 }; 162 163 /* 164 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 165 * can be used as index to access btrfs_raid_array[]. 166 */ 167 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 168 { 169 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 170 171 if (!profile) 172 return BTRFS_RAID_SINGLE; 173 174 return BTRFS_BG_FLAG_TO_INDEX(profile); 175 } 176 177 const char *btrfs_bg_type_to_raid_name(u64 flags) 178 { 179 const int index = btrfs_bg_flags_to_raid_index(flags); 180 181 if (index >= BTRFS_NR_RAID_TYPES) 182 return NULL; 183 184 return btrfs_raid_array[index].raid_name; 185 } 186 187 int btrfs_nr_parity_stripes(u64 type) 188 { 189 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 190 191 return btrfs_raid_array[index].nparity; 192 } 193 194 /* 195 * Fill @buf with textual description of @bg_flags, no more than @size_buf 196 * bytes including terminating null byte. 197 */ 198 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 199 { 200 int i; 201 int ret; 202 char *bp = buf; 203 u64 flags = bg_flags; 204 u32 size_bp = size_buf; 205 206 if (!flags) { 207 strcpy(bp, "NONE"); 208 return; 209 } 210 211 #define DESCRIBE_FLAG(flag, desc) \ 212 do { \ 213 if (flags & (flag)) { \ 214 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 215 if (ret < 0 || ret >= size_bp) \ 216 goto out_overflow; \ 217 size_bp -= ret; \ 218 bp += ret; \ 219 flags &= ~(flag); \ 220 } \ 221 } while (0) 222 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 224 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 225 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 226 227 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 228 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 229 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 230 btrfs_raid_array[i].raid_name); 231 #undef DESCRIBE_FLAG 232 233 if (flags) { 234 ret = snprintf(bp, size_bp, "0x%llx|", flags); 235 size_bp -= ret; 236 } 237 238 if (size_bp < size_buf) 239 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 240 241 /* 242 * The text is trimmed, it's up to the caller to provide sufficiently 243 * large buffer 244 */ 245 out_overflow:; 246 } 247 248 static int init_first_rw_device(struct btrfs_trans_handle *trans); 249 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 251 252 /* 253 * Device locking 254 * ============== 255 * 256 * There are several mutexes that protect manipulation of devices and low-level 257 * structures like chunks but not block groups, extents or files 258 * 259 * uuid_mutex (global lock) 260 * ------------------------ 261 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 262 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 263 * device) or requested by the device= mount option 264 * 265 * the mutex can be very coarse and can cover long-running operations 266 * 267 * protects: updates to fs_devices counters like missing devices, rw devices, 268 * seeding, structure cloning, opening/closing devices at mount/umount time 269 * 270 * global::fs_devs - add, remove, updates to the global list 271 * 272 * does not protect: manipulation of the fs_devices::devices list in general 273 * but in mount context it could be used to exclude list modifications by eg. 274 * scan ioctl 275 * 276 * btrfs_device::name - renames (write side), read is RCU 277 * 278 * fs_devices::device_list_mutex (per-fs, with RCU) 279 * ------------------------------------------------ 280 * protects updates to fs_devices::devices, ie. adding and deleting 281 * 282 * simple list traversal with read-only actions can be done with RCU protection 283 * 284 * may be used to exclude some operations from running concurrently without any 285 * modifications to the list (see write_all_supers) 286 * 287 * Is not required at mount and close times, because our device list is 288 * protected by the uuid_mutex at that point. 289 * 290 * balance_mutex 291 * ------------- 292 * protects balance structures (status, state) and context accessed from 293 * several places (internally, ioctl) 294 * 295 * chunk_mutex 296 * ----------- 297 * protects chunks, adding or removing during allocation, trim or when a new 298 * device is added/removed. Additionally it also protects post_commit_list of 299 * individual devices, since they can be added to the transaction's 300 * post_commit_list only with chunk_mutex held. 301 * 302 * cleaner_mutex 303 * ------------- 304 * a big lock that is held by the cleaner thread and prevents running subvolume 305 * cleaning together with relocation or delayed iputs 306 * 307 * 308 * Lock nesting 309 * ============ 310 * 311 * uuid_mutex 312 * device_list_mutex 313 * chunk_mutex 314 * balance_mutex 315 * 316 * 317 * Exclusive operations 318 * ==================== 319 * 320 * Maintains the exclusivity of the following operations that apply to the 321 * whole filesystem and cannot run in parallel. 322 * 323 * - Balance (*) 324 * - Device add 325 * - Device remove 326 * - Device replace (*) 327 * - Resize 328 * 329 * The device operations (as above) can be in one of the following states: 330 * 331 * - Running state 332 * - Paused state 333 * - Completed state 334 * 335 * Only device operations marked with (*) can go into the Paused state for the 336 * following reasons: 337 * 338 * - ioctl (only Balance can be Paused through ioctl) 339 * - filesystem remounted as read-only 340 * - filesystem unmounted and mounted as read-only 341 * - system power-cycle and filesystem mounted as read-only 342 * - filesystem or device errors leading to forced read-only 343 * 344 * The status of exclusive operation is set and cleared atomically. 345 * During the course of Paused state, fs_info::exclusive_operation remains set. 346 * A device operation in Paused or Running state can be canceled or resumed 347 * either by ioctl (Balance only) or when remounted as read-write. 348 * The exclusive status is cleared when the device operation is canceled or 349 * completed. 350 */ 351 352 DEFINE_MUTEX(uuid_mutex); 353 static LIST_HEAD(fs_uuids); 354 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 355 { 356 return &fs_uuids; 357 } 358 359 /* 360 * alloc_fs_devices - allocate struct btrfs_fs_devices 361 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 362 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 363 * 364 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 365 * The returned struct is not linked onto any lists and can be destroyed with 366 * kfree() right away. 367 */ 368 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 369 const u8 *metadata_fsid) 370 { 371 struct btrfs_fs_devices *fs_devs; 372 373 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 374 if (!fs_devs) 375 return ERR_PTR(-ENOMEM); 376 377 mutex_init(&fs_devs->device_list_mutex); 378 379 INIT_LIST_HEAD(&fs_devs->devices); 380 INIT_LIST_HEAD(&fs_devs->alloc_list); 381 INIT_LIST_HEAD(&fs_devs->fs_list); 382 INIT_LIST_HEAD(&fs_devs->seed_list); 383 if (fsid) 384 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 385 386 if (metadata_fsid) 387 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 388 else if (fsid) 389 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 390 391 return fs_devs; 392 } 393 394 void btrfs_free_device(struct btrfs_device *device) 395 { 396 WARN_ON(!list_empty(&device->post_commit_list)); 397 rcu_string_free(device->name); 398 extent_io_tree_release(&device->alloc_state); 399 btrfs_destroy_dev_zone_info(device); 400 kfree(device); 401 } 402 403 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 404 { 405 struct btrfs_device *device; 406 WARN_ON(fs_devices->opened); 407 while (!list_empty(&fs_devices->devices)) { 408 device = list_entry(fs_devices->devices.next, 409 struct btrfs_device, dev_list); 410 list_del(&device->dev_list); 411 btrfs_free_device(device); 412 } 413 kfree(fs_devices); 414 } 415 416 void __exit btrfs_cleanup_fs_uuids(void) 417 { 418 struct btrfs_fs_devices *fs_devices; 419 420 while (!list_empty(&fs_uuids)) { 421 fs_devices = list_entry(fs_uuids.next, 422 struct btrfs_fs_devices, fs_list); 423 list_del(&fs_devices->fs_list); 424 free_fs_devices(fs_devices); 425 } 426 } 427 428 static noinline struct btrfs_fs_devices *find_fsid( 429 const u8 *fsid, const u8 *metadata_fsid) 430 { 431 struct btrfs_fs_devices *fs_devices; 432 433 ASSERT(fsid); 434 435 /* Handle non-split brain cases */ 436 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 437 if (metadata_fsid) { 438 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 439 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 440 BTRFS_FSID_SIZE) == 0) 441 return fs_devices; 442 } else { 443 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 444 return fs_devices; 445 } 446 } 447 return NULL; 448 } 449 450 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 451 struct btrfs_super_block *disk_super) 452 { 453 454 struct btrfs_fs_devices *fs_devices; 455 456 /* 457 * Handle scanned device having completed its fsid change but 458 * belonging to a fs_devices that was created by first scanning 459 * a device which didn't have its fsid/metadata_uuid changed 460 * at all and the CHANGING_FSID_V2 flag set. 461 */ 462 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 463 if (fs_devices->fsid_change && 464 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 465 BTRFS_FSID_SIZE) == 0 && 466 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 467 BTRFS_FSID_SIZE) == 0) { 468 return fs_devices; 469 } 470 } 471 /* 472 * Handle scanned device having completed its fsid change but 473 * belonging to a fs_devices that was created by a device that 474 * has an outdated pair of fsid/metadata_uuid and 475 * CHANGING_FSID_V2 flag set. 476 */ 477 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 478 if (fs_devices->fsid_change && 479 memcmp(fs_devices->metadata_uuid, 480 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 481 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 482 BTRFS_FSID_SIZE) == 0) { 483 return fs_devices; 484 } 485 } 486 487 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 488 } 489 490 491 static int 492 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 493 int flush, struct block_device **bdev, 494 struct btrfs_super_block **disk_super) 495 { 496 int ret; 497 498 *bdev = blkdev_get_by_path(device_path, flags, holder); 499 500 if (IS_ERR(*bdev)) { 501 ret = PTR_ERR(*bdev); 502 goto error; 503 } 504 505 if (flush) 506 sync_blockdev(*bdev); 507 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 508 if (ret) { 509 blkdev_put(*bdev, flags); 510 goto error; 511 } 512 invalidate_bdev(*bdev); 513 *disk_super = btrfs_read_dev_super(*bdev); 514 if (IS_ERR(*disk_super)) { 515 ret = PTR_ERR(*disk_super); 516 blkdev_put(*bdev, flags); 517 goto error; 518 } 519 520 return 0; 521 522 error: 523 *bdev = NULL; 524 return ret; 525 } 526 527 /* 528 * Search and remove all stale devices (which are not mounted). When both 529 * inputs are NULL, it will search and release all stale devices. 530 * 531 * @devt: Optional. When provided will it release all unmounted devices 532 * matching this devt only. 533 * @skip_device: Optional. Will skip this device when searching for the stale 534 * devices. 535 * 536 * Return: 0 for success or if @devt is 0. 537 * -EBUSY if @devt is a mounted device. 538 * -ENOENT if @devt does not match any device in the list. 539 */ 540 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 541 { 542 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 543 struct btrfs_device *device, *tmp_device; 544 int ret = 0; 545 546 lockdep_assert_held(&uuid_mutex); 547 548 if (devt) 549 ret = -ENOENT; 550 551 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 552 553 mutex_lock(&fs_devices->device_list_mutex); 554 list_for_each_entry_safe(device, tmp_device, 555 &fs_devices->devices, dev_list) { 556 if (skip_device && skip_device == device) 557 continue; 558 if (devt && devt != device->devt) 559 continue; 560 if (fs_devices->opened) { 561 /* for an already deleted device return 0 */ 562 if (devt && ret != 0) 563 ret = -EBUSY; 564 break; 565 } 566 567 /* delete the stale device */ 568 fs_devices->num_devices--; 569 list_del(&device->dev_list); 570 btrfs_free_device(device); 571 572 ret = 0; 573 } 574 mutex_unlock(&fs_devices->device_list_mutex); 575 576 if (fs_devices->num_devices == 0) { 577 btrfs_sysfs_remove_fsid(fs_devices); 578 list_del(&fs_devices->fs_list); 579 free_fs_devices(fs_devices); 580 } 581 } 582 583 return ret; 584 } 585 586 /* 587 * This is only used on mount, and we are protected from competing things 588 * messing with our fs_devices by the uuid_mutex, thus we do not need the 589 * fs_devices->device_list_mutex here. 590 */ 591 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 592 struct btrfs_device *device, fmode_t flags, 593 void *holder) 594 { 595 struct block_device *bdev; 596 struct btrfs_super_block *disk_super; 597 u64 devid; 598 int ret; 599 600 if (device->bdev) 601 return -EINVAL; 602 if (!device->name) 603 return -EINVAL; 604 605 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 606 &bdev, &disk_super); 607 if (ret) 608 return ret; 609 610 devid = btrfs_stack_device_id(&disk_super->dev_item); 611 if (devid != device->devid) 612 goto error_free_page; 613 614 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 615 goto error_free_page; 616 617 device->generation = btrfs_super_generation(disk_super); 618 619 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 620 if (btrfs_super_incompat_flags(disk_super) & 621 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 622 pr_err( 623 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 624 goto error_free_page; 625 } 626 627 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 628 fs_devices->seeding = true; 629 } else { 630 if (bdev_read_only(bdev)) 631 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 632 else 633 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 634 } 635 636 if (!bdev_nonrot(bdev)) 637 fs_devices->rotating = true; 638 639 if (bdev_max_discard_sectors(bdev)) 640 fs_devices->discardable = true; 641 642 device->bdev = bdev; 643 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 644 device->mode = flags; 645 646 fs_devices->open_devices++; 647 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 648 device->devid != BTRFS_DEV_REPLACE_DEVID) { 649 fs_devices->rw_devices++; 650 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 651 } 652 btrfs_release_disk_super(disk_super); 653 654 return 0; 655 656 error_free_page: 657 btrfs_release_disk_super(disk_super); 658 blkdev_put(bdev, flags); 659 660 return -EINVAL; 661 } 662 663 /* 664 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 665 * being created with a disk that has already completed its fsid change. Such 666 * disk can belong to an fs which has its FSID changed or to one which doesn't. 667 * Handle both cases here. 668 */ 669 static struct btrfs_fs_devices *find_fsid_inprogress( 670 struct btrfs_super_block *disk_super) 671 { 672 struct btrfs_fs_devices *fs_devices; 673 674 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 675 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 676 BTRFS_FSID_SIZE) != 0 && 677 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 678 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 679 return fs_devices; 680 } 681 } 682 683 return find_fsid(disk_super->fsid, NULL); 684 } 685 686 687 static struct btrfs_fs_devices *find_fsid_changed( 688 struct btrfs_super_block *disk_super) 689 { 690 struct btrfs_fs_devices *fs_devices; 691 692 /* 693 * Handles the case where scanned device is part of an fs that had 694 * multiple successful changes of FSID but currently device didn't 695 * observe it. Meaning our fsid will be different than theirs. We need 696 * to handle two subcases : 697 * 1 - The fs still continues to have different METADATA/FSID uuids. 698 * 2 - The fs is switched back to its original FSID (METADATA/FSID 699 * are equal). 700 */ 701 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 702 /* Changed UUIDs */ 703 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 704 BTRFS_FSID_SIZE) != 0 && 705 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 706 BTRFS_FSID_SIZE) == 0 && 707 memcmp(fs_devices->fsid, disk_super->fsid, 708 BTRFS_FSID_SIZE) != 0) 709 return fs_devices; 710 711 /* Unchanged UUIDs */ 712 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 713 BTRFS_FSID_SIZE) == 0 && 714 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 715 BTRFS_FSID_SIZE) == 0) 716 return fs_devices; 717 } 718 719 return NULL; 720 } 721 722 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 723 struct btrfs_super_block *disk_super) 724 { 725 struct btrfs_fs_devices *fs_devices; 726 727 /* 728 * Handle the case where the scanned device is part of an fs whose last 729 * metadata UUID change reverted it to the original FSID. At the same 730 * time * fs_devices was first created by another constitutent device 731 * which didn't fully observe the operation. This results in an 732 * btrfs_fs_devices created with metadata/fsid different AND 733 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 734 * fs_devices equal to the FSID of the disk. 735 */ 736 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 737 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 738 BTRFS_FSID_SIZE) != 0 && 739 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 740 BTRFS_FSID_SIZE) == 0 && 741 fs_devices->fsid_change) 742 return fs_devices; 743 } 744 745 return NULL; 746 } 747 /* 748 * Add new device to list of registered devices 749 * 750 * Returns: 751 * device pointer which was just added or updated when successful 752 * error pointer when failed 753 */ 754 static noinline struct btrfs_device *device_list_add(const char *path, 755 struct btrfs_super_block *disk_super, 756 bool *new_device_added) 757 { 758 struct btrfs_device *device; 759 struct btrfs_fs_devices *fs_devices = NULL; 760 struct rcu_string *name; 761 u64 found_transid = btrfs_super_generation(disk_super); 762 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 763 dev_t path_devt; 764 int error; 765 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 766 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 767 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 768 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 769 770 error = lookup_bdev(path, &path_devt); 771 if (error) { 772 btrfs_err(NULL, "failed to lookup block device for path %s: %d", 773 path, error); 774 return ERR_PTR(error); 775 } 776 777 if (fsid_change_in_progress) { 778 if (!has_metadata_uuid) 779 fs_devices = find_fsid_inprogress(disk_super); 780 else 781 fs_devices = find_fsid_changed(disk_super); 782 } else if (has_metadata_uuid) { 783 fs_devices = find_fsid_with_metadata_uuid(disk_super); 784 } else { 785 fs_devices = find_fsid_reverted_metadata(disk_super); 786 if (!fs_devices) 787 fs_devices = find_fsid(disk_super->fsid, NULL); 788 } 789 790 791 if (!fs_devices) { 792 if (has_metadata_uuid) 793 fs_devices = alloc_fs_devices(disk_super->fsid, 794 disk_super->metadata_uuid); 795 else 796 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 797 798 if (IS_ERR(fs_devices)) 799 return ERR_CAST(fs_devices); 800 801 fs_devices->fsid_change = fsid_change_in_progress; 802 803 mutex_lock(&fs_devices->device_list_mutex); 804 list_add(&fs_devices->fs_list, &fs_uuids); 805 806 device = NULL; 807 } else { 808 struct btrfs_dev_lookup_args args = { 809 .devid = devid, 810 .uuid = disk_super->dev_item.uuid, 811 }; 812 813 mutex_lock(&fs_devices->device_list_mutex); 814 device = btrfs_find_device(fs_devices, &args); 815 816 /* 817 * If this disk has been pulled into an fs devices created by 818 * a device which had the CHANGING_FSID_V2 flag then replace the 819 * metadata_uuid/fsid values of the fs_devices. 820 */ 821 if (fs_devices->fsid_change && 822 found_transid > fs_devices->latest_generation) { 823 memcpy(fs_devices->fsid, disk_super->fsid, 824 BTRFS_FSID_SIZE); 825 826 if (has_metadata_uuid) 827 memcpy(fs_devices->metadata_uuid, 828 disk_super->metadata_uuid, 829 BTRFS_FSID_SIZE); 830 else 831 memcpy(fs_devices->metadata_uuid, 832 disk_super->fsid, BTRFS_FSID_SIZE); 833 834 fs_devices->fsid_change = false; 835 } 836 } 837 838 if (!device) { 839 unsigned int nofs_flag; 840 841 if (fs_devices->opened) { 842 btrfs_err(NULL, 843 "device %s belongs to fsid %pU, and the fs is already mounted", 844 path, fs_devices->fsid); 845 mutex_unlock(&fs_devices->device_list_mutex); 846 return ERR_PTR(-EBUSY); 847 } 848 849 nofs_flag = memalloc_nofs_save(); 850 device = btrfs_alloc_device(NULL, &devid, 851 disk_super->dev_item.uuid, path); 852 memalloc_nofs_restore(nofs_flag); 853 if (IS_ERR(device)) { 854 mutex_unlock(&fs_devices->device_list_mutex); 855 /* we can safely leave the fs_devices entry around */ 856 return device; 857 } 858 859 device->devt = path_devt; 860 861 list_add_rcu(&device->dev_list, &fs_devices->devices); 862 fs_devices->num_devices++; 863 864 device->fs_devices = fs_devices; 865 *new_device_added = true; 866 867 if (disk_super->label[0]) 868 pr_info( 869 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 870 disk_super->label, devid, found_transid, path, 871 current->comm, task_pid_nr(current)); 872 else 873 pr_info( 874 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 875 disk_super->fsid, devid, found_transid, path, 876 current->comm, task_pid_nr(current)); 877 878 } else if (!device->name || strcmp(device->name->str, path)) { 879 /* 880 * When FS is already mounted. 881 * 1. If you are here and if the device->name is NULL that 882 * means this device was missing at time of FS mount. 883 * 2. If you are here and if the device->name is different 884 * from 'path' that means either 885 * a. The same device disappeared and reappeared with 886 * different name. or 887 * b. The missing-disk-which-was-replaced, has 888 * reappeared now. 889 * 890 * We must allow 1 and 2a above. But 2b would be a spurious 891 * and unintentional. 892 * 893 * Further in case of 1 and 2a above, the disk at 'path' 894 * would have missed some transaction when it was away and 895 * in case of 2a the stale bdev has to be updated as well. 896 * 2b must not be allowed at all time. 897 */ 898 899 /* 900 * For now, we do allow update to btrfs_fs_device through the 901 * btrfs dev scan cli after FS has been mounted. We're still 902 * tracking a problem where systems fail mount by subvolume id 903 * when we reject replacement on a mounted FS. 904 */ 905 if (!fs_devices->opened && found_transid < device->generation) { 906 /* 907 * That is if the FS is _not_ mounted and if you 908 * are here, that means there is more than one 909 * disk with same uuid and devid.We keep the one 910 * with larger generation number or the last-in if 911 * generation are equal. 912 */ 913 mutex_unlock(&fs_devices->device_list_mutex); 914 btrfs_err(NULL, 915 "device %s already registered with a higher generation, found %llu expect %llu", 916 path, found_transid, device->generation); 917 return ERR_PTR(-EEXIST); 918 } 919 920 /* 921 * We are going to replace the device path for a given devid, 922 * make sure it's the same device if the device is mounted 923 * 924 * NOTE: the device->fs_info may not be reliable here so pass 925 * in a NULL to message helpers instead. This avoids a possible 926 * use-after-free when the fs_info and fs_info->sb are already 927 * torn down. 928 */ 929 if (device->bdev) { 930 if (device->devt != path_devt) { 931 mutex_unlock(&fs_devices->device_list_mutex); 932 btrfs_warn_in_rcu(NULL, 933 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 934 path, devid, found_transid, 935 current->comm, 936 task_pid_nr(current)); 937 return ERR_PTR(-EEXIST); 938 } 939 btrfs_info_in_rcu(NULL, 940 "devid %llu device path %s changed to %s scanned by %s (%d)", 941 devid, btrfs_dev_name(device), 942 path, current->comm, 943 task_pid_nr(current)); 944 } 945 946 name = rcu_string_strdup(path, GFP_NOFS); 947 if (!name) { 948 mutex_unlock(&fs_devices->device_list_mutex); 949 return ERR_PTR(-ENOMEM); 950 } 951 rcu_string_free(device->name); 952 rcu_assign_pointer(device->name, name); 953 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 954 fs_devices->missing_devices--; 955 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 956 } 957 device->devt = path_devt; 958 } 959 960 /* 961 * Unmount does not free the btrfs_device struct but would zero 962 * generation along with most of the other members. So just update 963 * it back. We need it to pick the disk with largest generation 964 * (as above). 965 */ 966 if (!fs_devices->opened) { 967 device->generation = found_transid; 968 fs_devices->latest_generation = max_t(u64, found_transid, 969 fs_devices->latest_generation); 970 } 971 972 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 973 974 mutex_unlock(&fs_devices->device_list_mutex); 975 return device; 976 } 977 978 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 979 { 980 struct btrfs_fs_devices *fs_devices; 981 struct btrfs_device *device; 982 struct btrfs_device *orig_dev; 983 int ret = 0; 984 985 lockdep_assert_held(&uuid_mutex); 986 987 fs_devices = alloc_fs_devices(orig->fsid, NULL); 988 if (IS_ERR(fs_devices)) 989 return fs_devices; 990 991 fs_devices->total_devices = orig->total_devices; 992 993 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 994 const char *dev_path = NULL; 995 996 /* 997 * This is ok to do without RCU read locked because we hold the 998 * uuid mutex so nothing we touch in here is going to disappear. 999 */ 1000 if (orig_dev->name) 1001 dev_path = orig_dev->name->str; 1002 1003 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1004 orig_dev->uuid, dev_path); 1005 if (IS_ERR(device)) { 1006 ret = PTR_ERR(device); 1007 goto error; 1008 } 1009 1010 if (orig_dev->zone_info) { 1011 struct btrfs_zoned_device_info *zone_info; 1012 1013 zone_info = btrfs_clone_dev_zone_info(orig_dev); 1014 if (!zone_info) { 1015 btrfs_free_device(device); 1016 ret = -ENOMEM; 1017 goto error; 1018 } 1019 device->zone_info = zone_info; 1020 } 1021 1022 list_add(&device->dev_list, &fs_devices->devices); 1023 device->fs_devices = fs_devices; 1024 fs_devices->num_devices++; 1025 } 1026 return fs_devices; 1027 error: 1028 free_fs_devices(fs_devices); 1029 return ERR_PTR(ret); 1030 } 1031 1032 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1033 struct btrfs_device **latest_dev) 1034 { 1035 struct btrfs_device *device, *next; 1036 1037 /* This is the initialized path, it is safe to release the devices. */ 1038 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1039 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1040 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1041 &device->dev_state) && 1042 !test_bit(BTRFS_DEV_STATE_MISSING, 1043 &device->dev_state) && 1044 (!*latest_dev || 1045 device->generation > (*latest_dev)->generation)) { 1046 *latest_dev = device; 1047 } 1048 continue; 1049 } 1050 1051 /* 1052 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1053 * in btrfs_init_dev_replace() so just continue. 1054 */ 1055 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1056 continue; 1057 1058 if (device->bdev) { 1059 blkdev_put(device->bdev, device->mode); 1060 device->bdev = NULL; 1061 fs_devices->open_devices--; 1062 } 1063 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1064 list_del_init(&device->dev_alloc_list); 1065 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1066 fs_devices->rw_devices--; 1067 } 1068 list_del_init(&device->dev_list); 1069 fs_devices->num_devices--; 1070 btrfs_free_device(device); 1071 } 1072 1073 } 1074 1075 /* 1076 * After we have read the system tree and know devids belonging to this 1077 * filesystem, remove the device which does not belong there. 1078 */ 1079 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1080 { 1081 struct btrfs_device *latest_dev = NULL; 1082 struct btrfs_fs_devices *seed_dev; 1083 1084 mutex_lock(&uuid_mutex); 1085 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1086 1087 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1088 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1089 1090 fs_devices->latest_dev = latest_dev; 1091 1092 mutex_unlock(&uuid_mutex); 1093 } 1094 1095 static void btrfs_close_bdev(struct btrfs_device *device) 1096 { 1097 if (!device->bdev) 1098 return; 1099 1100 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1101 sync_blockdev(device->bdev); 1102 invalidate_bdev(device->bdev); 1103 } 1104 1105 blkdev_put(device->bdev, device->mode); 1106 } 1107 1108 static void btrfs_close_one_device(struct btrfs_device *device) 1109 { 1110 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1111 1112 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1113 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1114 list_del_init(&device->dev_alloc_list); 1115 fs_devices->rw_devices--; 1116 } 1117 1118 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1119 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1120 1121 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1122 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1123 fs_devices->missing_devices--; 1124 } 1125 1126 btrfs_close_bdev(device); 1127 if (device->bdev) { 1128 fs_devices->open_devices--; 1129 device->bdev = NULL; 1130 } 1131 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1132 btrfs_destroy_dev_zone_info(device); 1133 1134 device->fs_info = NULL; 1135 atomic_set(&device->dev_stats_ccnt, 0); 1136 extent_io_tree_release(&device->alloc_state); 1137 1138 /* 1139 * Reset the flush error record. We might have a transient flush error 1140 * in this mount, and if so we aborted the current transaction and set 1141 * the fs to an error state, guaranteeing no super blocks can be further 1142 * committed. However that error might be transient and if we unmount the 1143 * filesystem and mount it again, we should allow the mount to succeed 1144 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1145 * filesystem again we still get flush errors, then we will again abort 1146 * any transaction and set the error state, guaranteeing no commits of 1147 * unsafe super blocks. 1148 */ 1149 device->last_flush_error = 0; 1150 1151 /* Verify the device is back in a pristine state */ 1152 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1153 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1154 ASSERT(list_empty(&device->dev_alloc_list)); 1155 ASSERT(list_empty(&device->post_commit_list)); 1156 } 1157 1158 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1159 { 1160 struct btrfs_device *device, *tmp; 1161 1162 lockdep_assert_held(&uuid_mutex); 1163 1164 if (--fs_devices->opened > 0) 1165 return; 1166 1167 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1168 btrfs_close_one_device(device); 1169 1170 WARN_ON(fs_devices->open_devices); 1171 WARN_ON(fs_devices->rw_devices); 1172 fs_devices->opened = 0; 1173 fs_devices->seeding = false; 1174 fs_devices->fs_info = NULL; 1175 } 1176 1177 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1178 { 1179 LIST_HEAD(list); 1180 struct btrfs_fs_devices *tmp; 1181 1182 mutex_lock(&uuid_mutex); 1183 close_fs_devices(fs_devices); 1184 if (!fs_devices->opened) 1185 list_splice_init(&fs_devices->seed_list, &list); 1186 1187 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1188 close_fs_devices(fs_devices); 1189 list_del(&fs_devices->seed_list); 1190 free_fs_devices(fs_devices); 1191 } 1192 mutex_unlock(&uuid_mutex); 1193 } 1194 1195 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1196 fmode_t flags, void *holder) 1197 { 1198 struct btrfs_device *device; 1199 struct btrfs_device *latest_dev = NULL; 1200 struct btrfs_device *tmp_device; 1201 1202 flags |= FMODE_EXCL; 1203 1204 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1205 dev_list) { 1206 int ret; 1207 1208 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1209 if (ret == 0 && 1210 (!latest_dev || device->generation > latest_dev->generation)) { 1211 latest_dev = device; 1212 } else if (ret == -ENODATA) { 1213 fs_devices->num_devices--; 1214 list_del(&device->dev_list); 1215 btrfs_free_device(device); 1216 } 1217 } 1218 if (fs_devices->open_devices == 0) 1219 return -EINVAL; 1220 1221 fs_devices->opened = 1; 1222 fs_devices->latest_dev = latest_dev; 1223 fs_devices->total_rw_bytes = 0; 1224 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1225 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1226 1227 return 0; 1228 } 1229 1230 static int devid_cmp(void *priv, const struct list_head *a, 1231 const struct list_head *b) 1232 { 1233 const struct btrfs_device *dev1, *dev2; 1234 1235 dev1 = list_entry(a, struct btrfs_device, dev_list); 1236 dev2 = list_entry(b, struct btrfs_device, dev_list); 1237 1238 if (dev1->devid < dev2->devid) 1239 return -1; 1240 else if (dev1->devid > dev2->devid) 1241 return 1; 1242 return 0; 1243 } 1244 1245 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1246 fmode_t flags, void *holder) 1247 { 1248 int ret; 1249 1250 lockdep_assert_held(&uuid_mutex); 1251 /* 1252 * The device_list_mutex cannot be taken here in case opening the 1253 * underlying device takes further locks like open_mutex. 1254 * 1255 * We also don't need the lock here as this is called during mount and 1256 * exclusion is provided by uuid_mutex 1257 */ 1258 1259 if (fs_devices->opened) { 1260 fs_devices->opened++; 1261 ret = 0; 1262 } else { 1263 list_sort(NULL, &fs_devices->devices, devid_cmp); 1264 ret = open_fs_devices(fs_devices, flags, holder); 1265 } 1266 1267 return ret; 1268 } 1269 1270 void btrfs_release_disk_super(struct btrfs_super_block *super) 1271 { 1272 struct page *page = virt_to_page(super); 1273 1274 put_page(page); 1275 } 1276 1277 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1278 u64 bytenr, u64 bytenr_orig) 1279 { 1280 struct btrfs_super_block *disk_super; 1281 struct page *page; 1282 void *p; 1283 pgoff_t index; 1284 1285 /* make sure our super fits in the device */ 1286 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1287 return ERR_PTR(-EINVAL); 1288 1289 /* make sure our super fits in the page */ 1290 if (sizeof(*disk_super) > PAGE_SIZE) 1291 return ERR_PTR(-EINVAL); 1292 1293 /* make sure our super doesn't straddle pages on disk */ 1294 index = bytenr >> PAGE_SHIFT; 1295 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1296 return ERR_PTR(-EINVAL); 1297 1298 /* pull in the page with our super */ 1299 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1300 1301 if (IS_ERR(page)) 1302 return ERR_CAST(page); 1303 1304 p = page_address(page); 1305 1306 /* align our pointer to the offset of the super block */ 1307 disk_super = p + offset_in_page(bytenr); 1308 1309 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1310 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1311 btrfs_release_disk_super(p); 1312 return ERR_PTR(-EINVAL); 1313 } 1314 1315 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1316 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1317 1318 return disk_super; 1319 } 1320 1321 int btrfs_forget_devices(dev_t devt) 1322 { 1323 int ret; 1324 1325 mutex_lock(&uuid_mutex); 1326 ret = btrfs_free_stale_devices(devt, NULL); 1327 mutex_unlock(&uuid_mutex); 1328 1329 return ret; 1330 } 1331 1332 /* 1333 * Look for a btrfs signature on a device. This may be called out of the mount path 1334 * and we are not allowed to call set_blocksize during the scan. The superblock 1335 * is read via pagecache 1336 */ 1337 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1338 void *holder) 1339 { 1340 struct btrfs_super_block *disk_super; 1341 bool new_device_added = false; 1342 struct btrfs_device *device = NULL; 1343 struct block_device *bdev; 1344 u64 bytenr, bytenr_orig; 1345 int ret; 1346 1347 lockdep_assert_held(&uuid_mutex); 1348 1349 /* 1350 * we would like to check all the supers, but that would make 1351 * a btrfs mount succeed after a mkfs from a different FS. 1352 * So, we need to add a special mount option to scan for 1353 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1354 */ 1355 flags |= FMODE_EXCL; 1356 1357 bdev = blkdev_get_by_path(path, flags, holder); 1358 if (IS_ERR(bdev)) 1359 return ERR_CAST(bdev); 1360 1361 bytenr_orig = btrfs_sb_offset(0); 1362 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1363 if (ret) { 1364 device = ERR_PTR(ret); 1365 goto error_bdev_put; 1366 } 1367 1368 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1369 if (IS_ERR(disk_super)) { 1370 device = ERR_CAST(disk_super); 1371 goto error_bdev_put; 1372 } 1373 1374 device = device_list_add(path, disk_super, &new_device_added); 1375 if (!IS_ERR(device) && new_device_added) 1376 btrfs_free_stale_devices(device->devt, device); 1377 1378 btrfs_release_disk_super(disk_super); 1379 1380 error_bdev_put: 1381 blkdev_put(bdev, flags); 1382 1383 return device; 1384 } 1385 1386 /* 1387 * Try to find a chunk that intersects [start, start + len] range and when one 1388 * such is found, record the end of it in *start 1389 */ 1390 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1391 u64 len) 1392 { 1393 u64 physical_start, physical_end; 1394 1395 lockdep_assert_held(&device->fs_info->chunk_mutex); 1396 1397 if (!find_first_extent_bit(&device->alloc_state, *start, 1398 &physical_start, &physical_end, 1399 CHUNK_ALLOCATED, NULL)) { 1400 1401 if (in_range(physical_start, *start, len) || 1402 in_range(*start, physical_start, 1403 physical_end - physical_start)) { 1404 *start = physical_end + 1; 1405 return true; 1406 } 1407 } 1408 return false; 1409 } 1410 1411 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1412 { 1413 switch (device->fs_devices->chunk_alloc_policy) { 1414 case BTRFS_CHUNK_ALLOC_REGULAR: 1415 return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 1416 case BTRFS_CHUNK_ALLOC_ZONED: 1417 /* 1418 * We don't care about the starting region like regular 1419 * allocator, because we anyway use/reserve the first two zones 1420 * for superblock logging. 1421 */ 1422 return ALIGN(start, device->zone_info->zone_size); 1423 default: 1424 BUG(); 1425 } 1426 } 1427 1428 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1429 u64 *hole_start, u64 *hole_size, 1430 u64 num_bytes) 1431 { 1432 u64 zone_size = device->zone_info->zone_size; 1433 u64 pos; 1434 int ret; 1435 bool changed = false; 1436 1437 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1438 1439 while (*hole_size > 0) { 1440 pos = btrfs_find_allocatable_zones(device, *hole_start, 1441 *hole_start + *hole_size, 1442 num_bytes); 1443 if (pos != *hole_start) { 1444 *hole_size = *hole_start + *hole_size - pos; 1445 *hole_start = pos; 1446 changed = true; 1447 if (*hole_size < num_bytes) 1448 break; 1449 } 1450 1451 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1452 1453 /* Range is ensured to be empty */ 1454 if (!ret) 1455 return changed; 1456 1457 /* Given hole range was invalid (outside of device) */ 1458 if (ret == -ERANGE) { 1459 *hole_start += *hole_size; 1460 *hole_size = 0; 1461 return true; 1462 } 1463 1464 *hole_start += zone_size; 1465 *hole_size -= zone_size; 1466 changed = true; 1467 } 1468 1469 return changed; 1470 } 1471 1472 /* 1473 * Check if specified hole is suitable for allocation. 1474 * 1475 * @device: the device which we have the hole 1476 * @hole_start: starting position of the hole 1477 * @hole_size: the size of the hole 1478 * @num_bytes: the size of the free space that we need 1479 * 1480 * This function may modify @hole_start and @hole_size to reflect the suitable 1481 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1482 */ 1483 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1484 u64 *hole_size, u64 num_bytes) 1485 { 1486 bool changed = false; 1487 u64 hole_end = *hole_start + *hole_size; 1488 1489 for (;;) { 1490 /* 1491 * Check before we set max_hole_start, otherwise we could end up 1492 * sending back this offset anyway. 1493 */ 1494 if (contains_pending_extent(device, hole_start, *hole_size)) { 1495 if (hole_end >= *hole_start) 1496 *hole_size = hole_end - *hole_start; 1497 else 1498 *hole_size = 0; 1499 changed = true; 1500 } 1501 1502 switch (device->fs_devices->chunk_alloc_policy) { 1503 case BTRFS_CHUNK_ALLOC_REGULAR: 1504 /* No extra check */ 1505 break; 1506 case BTRFS_CHUNK_ALLOC_ZONED: 1507 if (dev_extent_hole_check_zoned(device, hole_start, 1508 hole_size, num_bytes)) { 1509 changed = true; 1510 /* 1511 * The changed hole can contain pending extent. 1512 * Loop again to check that. 1513 */ 1514 continue; 1515 } 1516 break; 1517 default: 1518 BUG(); 1519 } 1520 1521 break; 1522 } 1523 1524 return changed; 1525 } 1526 1527 /* 1528 * Find free space in the specified device. 1529 * 1530 * @device: the device which we search the free space in 1531 * @num_bytes: the size of the free space that we need 1532 * @search_start: the position from which to begin the search 1533 * @start: store the start of the free space. 1534 * @len: the size of the free space. that we find, or the size 1535 * of the max free space if we don't find suitable free space 1536 * 1537 * This does a pretty simple search, the expectation is that it is called very 1538 * infrequently and that a given device has a small number of extents. 1539 * 1540 * @start is used to store the start of the free space if we find. But if we 1541 * don't find suitable free space, it will be used to store the start position 1542 * of the max free space. 1543 * 1544 * @len is used to store the size of the free space that we find. 1545 * But if we don't find suitable free space, it is used to store the size of 1546 * the max free space. 1547 * 1548 * NOTE: This function will search *commit* root of device tree, and does extra 1549 * check to ensure dev extents are not double allocated. 1550 * This makes the function safe to allocate dev extents but may not report 1551 * correct usable device space, as device extent freed in current transaction 1552 * is not reported as available. 1553 */ 1554 static int find_free_dev_extent_start(struct btrfs_device *device, 1555 u64 num_bytes, u64 search_start, u64 *start, 1556 u64 *len) 1557 { 1558 struct btrfs_fs_info *fs_info = device->fs_info; 1559 struct btrfs_root *root = fs_info->dev_root; 1560 struct btrfs_key key; 1561 struct btrfs_dev_extent *dev_extent; 1562 struct btrfs_path *path; 1563 u64 hole_size; 1564 u64 max_hole_start; 1565 u64 max_hole_size; 1566 u64 extent_end; 1567 u64 search_end = device->total_bytes; 1568 int ret; 1569 int slot; 1570 struct extent_buffer *l; 1571 1572 search_start = dev_extent_search_start(device, search_start); 1573 1574 WARN_ON(device->zone_info && 1575 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1576 1577 path = btrfs_alloc_path(); 1578 if (!path) 1579 return -ENOMEM; 1580 1581 max_hole_start = search_start; 1582 max_hole_size = 0; 1583 1584 again: 1585 if (search_start >= search_end || 1586 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1587 ret = -ENOSPC; 1588 goto out; 1589 } 1590 1591 path->reada = READA_FORWARD; 1592 path->search_commit_root = 1; 1593 path->skip_locking = 1; 1594 1595 key.objectid = device->devid; 1596 key.offset = search_start; 1597 key.type = BTRFS_DEV_EXTENT_KEY; 1598 1599 ret = btrfs_search_backwards(root, &key, path); 1600 if (ret < 0) 1601 goto out; 1602 1603 while (1) { 1604 l = path->nodes[0]; 1605 slot = path->slots[0]; 1606 if (slot >= btrfs_header_nritems(l)) { 1607 ret = btrfs_next_leaf(root, path); 1608 if (ret == 0) 1609 continue; 1610 if (ret < 0) 1611 goto out; 1612 1613 break; 1614 } 1615 btrfs_item_key_to_cpu(l, &key, slot); 1616 1617 if (key.objectid < device->devid) 1618 goto next; 1619 1620 if (key.objectid > device->devid) 1621 break; 1622 1623 if (key.type != BTRFS_DEV_EXTENT_KEY) 1624 goto next; 1625 1626 if (key.offset > search_start) { 1627 hole_size = key.offset - search_start; 1628 dev_extent_hole_check(device, &search_start, &hole_size, 1629 num_bytes); 1630 1631 if (hole_size > max_hole_size) { 1632 max_hole_start = search_start; 1633 max_hole_size = hole_size; 1634 } 1635 1636 /* 1637 * If this free space is greater than which we need, 1638 * it must be the max free space that we have found 1639 * until now, so max_hole_start must point to the start 1640 * of this free space and the length of this free space 1641 * is stored in max_hole_size. Thus, we return 1642 * max_hole_start and max_hole_size and go back to the 1643 * caller. 1644 */ 1645 if (hole_size >= num_bytes) { 1646 ret = 0; 1647 goto out; 1648 } 1649 } 1650 1651 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1652 extent_end = key.offset + btrfs_dev_extent_length(l, 1653 dev_extent); 1654 if (extent_end > search_start) 1655 search_start = extent_end; 1656 next: 1657 path->slots[0]++; 1658 cond_resched(); 1659 } 1660 1661 /* 1662 * At this point, search_start should be the end of 1663 * allocated dev extents, and when shrinking the device, 1664 * search_end may be smaller than search_start. 1665 */ 1666 if (search_end > search_start) { 1667 hole_size = search_end - search_start; 1668 if (dev_extent_hole_check(device, &search_start, &hole_size, 1669 num_bytes)) { 1670 btrfs_release_path(path); 1671 goto again; 1672 } 1673 1674 if (hole_size > max_hole_size) { 1675 max_hole_start = search_start; 1676 max_hole_size = hole_size; 1677 } 1678 } 1679 1680 /* See above. */ 1681 if (max_hole_size < num_bytes) 1682 ret = -ENOSPC; 1683 else 1684 ret = 0; 1685 1686 out: 1687 btrfs_free_path(path); 1688 *start = max_hole_start; 1689 if (len) 1690 *len = max_hole_size; 1691 return ret; 1692 } 1693 1694 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1695 u64 *start, u64 *len) 1696 { 1697 /* FIXME use last free of some kind */ 1698 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1699 } 1700 1701 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1702 struct btrfs_device *device, 1703 u64 start, u64 *dev_extent_len) 1704 { 1705 struct btrfs_fs_info *fs_info = device->fs_info; 1706 struct btrfs_root *root = fs_info->dev_root; 1707 int ret; 1708 struct btrfs_path *path; 1709 struct btrfs_key key; 1710 struct btrfs_key found_key; 1711 struct extent_buffer *leaf = NULL; 1712 struct btrfs_dev_extent *extent = NULL; 1713 1714 path = btrfs_alloc_path(); 1715 if (!path) 1716 return -ENOMEM; 1717 1718 key.objectid = device->devid; 1719 key.offset = start; 1720 key.type = BTRFS_DEV_EXTENT_KEY; 1721 again: 1722 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1723 if (ret > 0) { 1724 ret = btrfs_previous_item(root, path, key.objectid, 1725 BTRFS_DEV_EXTENT_KEY); 1726 if (ret) 1727 goto out; 1728 leaf = path->nodes[0]; 1729 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1730 extent = btrfs_item_ptr(leaf, path->slots[0], 1731 struct btrfs_dev_extent); 1732 BUG_ON(found_key.offset > start || found_key.offset + 1733 btrfs_dev_extent_length(leaf, extent) < start); 1734 key = found_key; 1735 btrfs_release_path(path); 1736 goto again; 1737 } else if (ret == 0) { 1738 leaf = path->nodes[0]; 1739 extent = btrfs_item_ptr(leaf, path->slots[0], 1740 struct btrfs_dev_extent); 1741 } else { 1742 goto out; 1743 } 1744 1745 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1746 1747 ret = btrfs_del_item(trans, root, path); 1748 if (ret == 0) 1749 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1750 out: 1751 btrfs_free_path(path); 1752 return ret; 1753 } 1754 1755 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1756 { 1757 struct extent_map_tree *em_tree; 1758 struct extent_map *em; 1759 struct rb_node *n; 1760 u64 ret = 0; 1761 1762 em_tree = &fs_info->mapping_tree; 1763 read_lock(&em_tree->lock); 1764 n = rb_last(&em_tree->map.rb_root); 1765 if (n) { 1766 em = rb_entry(n, struct extent_map, rb_node); 1767 ret = em->start + em->len; 1768 } 1769 read_unlock(&em_tree->lock); 1770 1771 return ret; 1772 } 1773 1774 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1775 u64 *devid_ret) 1776 { 1777 int ret; 1778 struct btrfs_key key; 1779 struct btrfs_key found_key; 1780 struct btrfs_path *path; 1781 1782 path = btrfs_alloc_path(); 1783 if (!path) 1784 return -ENOMEM; 1785 1786 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1787 key.type = BTRFS_DEV_ITEM_KEY; 1788 key.offset = (u64)-1; 1789 1790 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1791 if (ret < 0) 1792 goto error; 1793 1794 if (ret == 0) { 1795 /* Corruption */ 1796 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1797 ret = -EUCLEAN; 1798 goto error; 1799 } 1800 1801 ret = btrfs_previous_item(fs_info->chunk_root, path, 1802 BTRFS_DEV_ITEMS_OBJECTID, 1803 BTRFS_DEV_ITEM_KEY); 1804 if (ret) { 1805 *devid_ret = 1; 1806 } else { 1807 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1808 path->slots[0]); 1809 *devid_ret = found_key.offset + 1; 1810 } 1811 ret = 0; 1812 error: 1813 btrfs_free_path(path); 1814 return ret; 1815 } 1816 1817 /* 1818 * the device information is stored in the chunk root 1819 * the btrfs_device struct should be fully filled in 1820 */ 1821 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1822 struct btrfs_device *device) 1823 { 1824 int ret; 1825 struct btrfs_path *path; 1826 struct btrfs_dev_item *dev_item; 1827 struct extent_buffer *leaf; 1828 struct btrfs_key key; 1829 unsigned long ptr; 1830 1831 path = btrfs_alloc_path(); 1832 if (!path) 1833 return -ENOMEM; 1834 1835 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1836 key.type = BTRFS_DEV_ITEM_KEY; 1837 key.offset = device->devid; 1838 1839 btrfs_reserve_chunk_metadata(trans, true); 1840 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1841 &key, sizeof(*dev_item)); 1842 btrfs_trans_release_chunk_metadata(trans); 1843 if (ret) 1844 goto out; 1845 1846 leaf = path->nodes[0]; 1847 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1848 1849 btrfs_set_device_id(leaf, dev_item, device->devid); 1850 btrfs_set_device_generation(leaf, dev_item, 0); 1851 btrfs_set_device_type(leaf, dev_item, device->type); 1852 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1853 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1854 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1855 btrfs_set_device_total_bytes(leaf, dev_item, 1856 btrfs_device_get_disk_total_bytes(device)); 1857 btrfs_set_device_bytes_used(leaf, dev_item, 1858 btrfs_device_get_bytes_used(device)); 1859 btrfs_set_device_group(leaf, dev_item, 0); 1860 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1861 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1862 btrfs_set_device_start_offset(leaf, dev_item, 0); 1863 1864 ptr = btrfs_device_uuid(dev_item); 1865 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1866 ptr = btrfs_device_fsid(dev_item); 1867 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1868 ptr, BTRFS_FSID_SIZE); 1869 btrfs_mark_buffer_dirty(leaf); 1870 1871 ret = 0; 1872 out: 1873 btrfs_free_path(path); 1874 return ret; 1875 } 1876 1877 /* 1878 * Function to update ctime/mtime for a given device path. 1879 * Mainly used for ctime/mtime based probe like libblkid. 1880 * 1881 * We don't care about errors here, this is just to be kind to userspace. 1882 */ 1883 static void update_dev_time(const char *device_path) 1884 { 1885 struct path path; 1886 struct timespec64 now; 1887 int ret; 1888 1889 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1890 if (ret) 1891 return; 1892 1893 now = current_time(d_inode(path.dentry)); 1894 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1895 path_put(&path); 1896 } 1897 1898 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1899 struct btrfs_device *device) 1900 { 1901 struct btrfs_root *root = device->fs_info->chunk_root; 1902 int ret; 1903 struct btrfs_path *path; 1904 struct btrfs_key key; 1905 1906 path = btrfs_alloc_path(); 1907 if (!path) 1908 return -ENOMEM; 1909 1910 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1911 key.type = BTRFS_DEV_ITEM_KEY; 1912 key.offset = device->devid; 1913 1914 btrfs_reserve_chunk_metadata(trans, false); 1915 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1916 btrfs_trans_release_chunk_metadata(trans); 1917 if (ret) { 1918 if (ret > 0) 1919 ret = -ENOENT; 1920 goto out; 1921 } 1922 1923 ret = btrfs_del_item(trans, root, path); 1924 out: 1925 btrfs_free_path(path); 1926 return ret; 1927 } 1928 1929 /* 1930 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1931 * filesystem. It's up to the caller to adjust that number regarding eg. device 1932 * replace. 1933 */ 1934 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1935 u64 num_devices) 1936 { 1937 u64 all_avail; 1938 unsigned seq; 1939 int i; 1940 1941 do { 1942 seq = read_seqbegin(&fs_info->profiles_lock); 1943 1944 all_avail = fs_info->avail_data_alloc_bits | 1945 fs_info->avail_system_alloc_bits | 1946 fs_info->avail_metadata_alloc_bits; 1947 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1948 1949 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1950 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1951 continue; 1952 1953 if (num_devices < btrfs_raid_array[i].devs_min) 1954 return btrfs_raid_array[i].mindev_error; 1955 } 1956 1957 return 0; 1958 } 1959 1960 static struct btrfs_device * btrfs_find_next_active_device( 1961 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1962 { 1963 struct btrfs_device *next_device; 1964 1965 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1966 if (next_device != device && 1967 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1968 && next_device->bdev) 1969 return next_device; 1970 } 1971 1972 return NULL; 1973 } 1974 1975 /* 1976 * Helper function to check if the given device is part of s_bdev / latest_dev 1977 * and replace it with the provided or the next active device, in the context 1978 * where this function called, there should be always be another device (or 1979 * this_dev) which is active. 1980 */ 1981 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1982 struct btrfs_device *next_device) 1983 { 1984 struct btrfs_fs_info *fs_info = device->fs_info; 1985 1986 if (!next_device) 1987 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1988 device); 1989 ASSERT(next_device); 1990 1991 if (fs_info->sb->s_bdev && 1992 (fs_info->sb->s_bdev == device->bdev)) 1993 fs_info->sb->s_bdev = next_device->bdev; 1994 1995 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 1996 fs_info->fs_devices->latest_dev = next_device; 1997 } 1998 1999 /* 2000 * Return btrfs_fs_devices::num_devices excluding the device that's being 2001 * currently replaced. 2002 */ 2003 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2004 { 2005 u64 num_devices = fs_info->fs_devices->num_devices; 2006 2007 down_read(&fs_info->dev_replace.rwsem); 2008 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2009 ASSERT(num_devices > 1); 2010 num_devices--; 2011 } 2012 up_read(&fs_info->dev_replace.rwsem); 2013 2014 return num_devices; 2015 } 2016 2017 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2018 struct block_device *bdev, int copy_num) 2019 { 2020 struct btrfs_super_block *disk_super; 2021 const size_t len = sizeof(disk_super->magic); 2022 const u64 bytenr = btrfs_sb_offset(copy_num); 2023 int ret; 2024 2025 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2026 if (IS_ERR(disk_super)) 2027 return; 2028 2029 memset(&disk_super->magic, 0, len); 2030 folio_mark_dirty(virt_to_folio(disk_super)); 2031 btrfs_release_disk_super(disk_super); 2032 2033 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2034 if (ret) 2035 btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2036 copy_num, ret); 2037 } 2038 2039 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2040 struct block_device *bdev, 2041 const char *device_path) 2042 { 2043 int copy_num; 2044 2045 if (!bdev) 2046 return; 2047 2048 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2049 if (bdev_is_zoned(bdev)) 2050 btrfs_reset_sb_log_zones(bdev, copy_num); 2051 else 2052 btrfs_scratch_superblock(fs_info, bdev, copy_num); 2053 } 2054 2055 /* Notify udev that device has changed */ 2056 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2057 2058 /* Update ctime/mtime for device path for libblkid */ 2059 update_dev_time(device_path); 2060 } 2061 2062 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2063 struct btrfs_dev_lookup_args *args, 2064 struct block_device **bdev, fmode_t *mode) 2065 { 2066 struct btrfs_trans_handle *trans; 2067 struct btrfs_device *device; 2068 struct btrfs_fs_devices *cur_devices; 2069 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2070 u64 num_devices; 2071 int ret = 0; 2072 2073 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2074 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2075 return -EINVAL; 2076 } 2077 2078 /* 2079 * The device list in fs_devices is accessed without locks (neither 2080 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2081 * filesystem and another device rm cannot run. 2082 */ 2083 num_devices = btrfs_num_devices(fs_info); 2084 2085 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2086 if (ret) 2087 return ret; 2088 2089 device = btrfs_find_device(fs_info->fs_devices, args); 2090 if (!device) { 2091 if (args->missing) 2092 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2093 else 2094 ret = -ENOENT; 2095 return ret; 2096 } 2097 2098 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2099 btrfs_warn_in_rcu(fs_info, 2100 "cannot remove device %s (devid %llu) due to active swapfile", 2101 btrfs_dev_name(device), device->devid); 2102 return -ETXTBSY; 2103 } 2104 2105 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2106 return BTRFS_ERROR_DEV_TGT_REPLACE; 2107 2108 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2109 fs_info->fs_devices->rw_devices == 1) 2110 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2111 2112 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2113 mutex_lock(&fs_info->chunk_mutex); 2114 list_del_init(&device->dev_alloc_list); 2115 device->fs_devices->rw_devices--; 2116 mutex_unlock(&fs_info->chunk_mutex); 2117 } 2118 2119 ret = btrfs_shrink_device(device, 0); 2120 if (ret) 2121 goto error_undo; 2122 2123 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2124 if (IS_ERR(trans)) { 2125 ret = PTR_ERR(trans); 2126 goto error_undo; 2127 } 2128 2129 ret = btrfs_rm_dev_item(trans, device); 2130 if (ret) { 2131 /* Any error in dev item removal is critical */ 2132 btrfs_crit(fs_info, 2133 "failed to remove device item for devid %llu: %d", 2134 device->devid, ret); 2135 btrfs_abort_transaction(trans, ret); 2136 btrfs_end_transaction(trans); 2137 return ret; 2138 } 2139 2140 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2141 btrfs_scrub_cancel_dev(device); 2142 2143 /* 2144 * the device list mutex makes sure that we don't change 2145 * the device list while someone else is writing out all 2146 * the device supers. Whoever is writing all supers, should 2147 * lock the device list mutex before getting the number of 2148 * devices in the super block (super_copy). Conversely, 2149 * whoever updates the number of devices in the super block 2150 * (super_copy) should hold the device list mutex. 2151 */ 2152 2153 /* 2154 * In normal cases the cur_devices == fs_devices. But in case 2155 * of deleting a seed device, the cur_devices should point to 2156 * its own fs_devices listed under the fs_devices->seed_list. 2157 */ 2158 cur_devices = device->fs_devices; 2159 mutex_lock(&fs_devices->device_list_mutex); 2160 list_del_rcu(&device->dev_list); 2161 2162 cur_devices->num_devices--; 2163 cur_devices->total_devices--; 2164 /* Update total_devices of the parent fs_devices if it's seed */ 2165 if (cur_devices != fs_devices) 2166 fs_devices->total_devices--; 2167 2168 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2169 cur_devices->missing_devices--; 2170 2171 btrfs_assign_next_active_device(device, NULL); 2172 2173 if (device->bdev) { 2174 cur_devices->open_devices--; 2175 /* remove sysfs entry */ 2176 btrfs_sysfs_remove_device(device); 2177 } 2178 2179 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2180 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2181 mutex_unlock(&fs_devices->device_list_mutex); 2182 2183 /* 2184 * At this point, the device is zero sized and detached from the 2185 * devices list. All that's left is to zero out the old supers and 2186 * free the device. 2187 * 2188 * We cannot call btrfs_close_bdev() here because we're holding the sb 2189 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2190 * block device and it's dependencies. Instead just flush the device 2191 * and let the caller do the final blkdev_put. 2192 */ 2193 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2194 btrfs_scratch_superblocks(fs_info, device->bdev, 2195 device->name->str); 2196 if (device->bdev) { 2197 sync_blockdev(device->bdev); 2198 invalidate_bdev(device->bdev); 2199 } 2200 } 2201 2202 *bdev = device->bdev; 2203 *mode = device->mode; 2204 synchronize_rcu(); 2205 btrfs_free_device(device); 2206 2207 /* 2208 * This can happen if cur_devices is the private seed devices list. We 2209 * cannot call close_fs_devices() here because it expects the uuid_mutex 2210 * to be held, but in fact we don't need that for the private 2211 * seed_devices, we can simply decrement cur_devices->opened and then 2212 * remove it from our list and free the fs_devices. 2213 */ 2214 if (cur_devices->num_devices == 0) { 2215 list_del_init(&cur_devices->seed_list); 2216 ASSERT(cur_devices->opened == 1); 2217 cur_devices->opened--; 2218 free_fs_devices(cur_devices); 2219 } 2220 2221 ret = btrfs_commit_transaction(trans); 2222 2223 return ret; 2224 2225 error_undo: 2226 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2227 mutex_lock(&fs_info->chunk_mutex); 2228 list_add(&device->dev_alloc_list, 2229 &fs_devices->alloc_list); 2230 device->fs_devices->rw_devices++; 2231 mutex_unlock(&fs_info->chunk_mutex); 2232 } 2233 return ret; 2234 } 2235 2236 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2237 { 2238 struct btrfs_fs_devices *fs_devices; 2239 2240 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2241 2242 /* 2243 * in case of fs with no seed, srcdev->fs_devices will point 2244 * to fs_devices of fs_info. However when the dev being replaced is 2245 * a seed dev it will point to the seed's local fs_devices. In short 2246 * srcdev will have its correct fs_devices in both the cases. 2247 */ 2248 fs_devices = srcdev->fs_devices; 2249 2250 list_del_rcu(&srcdev->dev_list); 2251 list_del(&srcdev->dev_alloc_list); 2252 fs_devices->num_devices--; 2253 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2254 fs_devices->missing_devices--; 2255 2256 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2257 fs_devices->rw_devices--; 2258 2259 if (srcdev->bdev) 2260 fs_devices->open_devices--; 2261 } 2262 2263 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2264 { 2265 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2266 2267 mutex_lock(&uuid_mutex); 2268 2269 btrfs_close_bdev(srcdev); 2270 synchronize_rcu(); 2271 btrfs_free_device(srcdev); 2272 2273 /* if this is no devs we rather delete the fs_devices */ 2274 if (!fs_devices->num_devices) { 2275 /* 2276 * On a mounted FS, num_devices can't be zero unless it's a 2277 * seed. In case of a seed device being replaced, the replace 2278 * target added to the sprout FS, so there will be no more 2279 * device left under the seed FS. 2280 */ 2281 ASSERT(fs_devices->seeding); 2282 2283 list_del_init(&fs_devices->seed_list); 2284 close_fs_devices(fs_devices); 2285 free_fs_devices(fs_devices); 2286 } 2287 mutex_unlock(&uuid_mutex); 2288 } 2289 2290 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2291 { 2292 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2293 2294 mutex_lock(&fs_devices->device_list_mutex); 2295 2296 btrfs_sysfs_remove_device(tgtdev); 2297 2298 if (tgtdev->bdev) 2299 fs_devices->open_devices--; 2300 2301 fs_devices->num_devices--; 2302 2303 btrfs_assign_next_active_device(tgtdev, NULL); 2304 2305 list_del_rcu(&tgtdev->dev_list); 2306 2307 mutex_unlock(&fs_devices->device_list_mutex); 2308 2309 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2310 tgtdev->name->str); 2311 2312 btrfs_close_bdev(tgtdev); 2313 synchronize_rcu(); 2314 btrfs_free_device(tgtdev); 2315 } 2316 2317 /* 2318 * Populate args from device at path. 2319 * 2320 * @fs_info: the filesystem 2321 * @args: the args to populate 2322 * @path: the path to the device 2323 * 2324 * This will read the super block of the device at @path and populate @args with 2325 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2326 * lookup a device to operate on, but need to do it before we take any locks. 2327 * This properly handles the special case of "missing" that a user may pass in, 2328 * and does some basic sanity checks. The caller must make sure that @path is 2329 * properly NUL terminated before calling in, and must call 2330 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2331 * uuid buffers. 2332 * 2333 * Return: 0 for success, -errno for failure 2334 */ 2335 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2336 struct btrfs_dev_lookup_args *args, 2337 const char *path) 2338 { 2339 struct btrfs_super_block *disk_super; 2340 struct block_device *bdev; 2341 int ret; 2342 2343 if (!path || !path[0]) 2344 return -EINVAL; 2345 if (!strcmp(path, "missing")) { 2346 args->missing = true; 2347 return 0; 2348 } 2349 2350 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2351 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2352 if (!args->uuid || !args->fsid) { 2353 btrfs_put_dev_args_from_path(args); 2354 return -ENOMEM; 2355 } 2356 2357 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2358 &bdev, &disk_super); 2359 if (ret) { 2360 btrfs_put_dev_args_from_path(args); 2361 return ret; 2362 } 2363 2364 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2365 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2366 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2367 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2368 else 2369 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2370 btrfs_release_disk_super(disk_super); 2371 blkdev_put(bdev, FMODE_READ); 2372 return 0; 2373 } 2374 2375 /* 2376 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2377 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2378 * that don't need to be freed. 2379 */ 2380 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2381 { 2382 kfree(args->uuid); 2383 kfree(args->fsid); 2384 args->uuid = NULL; 2385 args->fsid = NULL; 2386 } 2387 2388 struct btrfs_device *btrfs_find_device_by_devspec( 2389 struct btrfs_fs_info *fs_info, u64 devid, 2390 const char *device_path) 2391 { 2392 BTRFS_DEV_LOOKUP_ARGS(args); 2393 struct btrfs_device *device; 2394 int ret; 2395 2396 if (devid) { 2397 args.devid = devid; 2398 device = btrfs_find_device(fs_info->fs_devices, &args); 2399 if (!device) 2400 return ERR_PTR(-ENOENT); 2401 return device; 2402 } 2403 2404 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2405 if (ret) 2406 return ERR_PTR(ret); 2407 device = btrfs_find_device(fs_info->fs_devices, &args); 2408 btrfs_put_dev_args_from_path(&args); 2409 if (!device) 2410 return ERR_PTR(-ENOENT); 2411 return device; 2412 } 2413 2414 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2415 { 2416 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2417 struct btrfs_fs_devices *old_devices; 2418 struct btrfs_fs_devices *seed_devices; 2419 2420 lockdep_assert_held(&uuid_mutex); 2421 if (!fs_devices->seeding) 2422 return ERR_PTR(-EINVAL); 2423 2424 /* 2425 * Private copy of the seed devices, anchored at 2426 * fs_info->fs_devices->seed_list 2427 */ 2428 seed_devices = alloc_fs_devices(NULL, NULL); 2429 if (IS_ERR(seed_devices)) 2430 return seed_devices; 2431 2432 /* 2433 * It's necessary to retain a copy of the original seed fs_devices in 2434 * fs_uuids so that filesystems which have been seeded can successfully 2435 * reference the seed device from open_seed_devices. This also supports 2436 * multiple fs seed. 2437 */ 2438 old_devices = clone_fs_devices(fs_devices); 2439 if (IS_ERR(old_devices)) { 2440 kfree(seed_devices); 2441 return old_devices; 2442 } 2443 2444 list_add(&old_devices->fs_list, &fs_uuids); 2445 2446 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2447 seed_devices->opened = 1; 2448 INIT_LIST_HEAD(&seed_devices->devices); 2449 INIT_LIST_HEAD(&seed_devices->alloc_list); 2450 mutex_init(&seed_devices->device_list_mutex); 2451 2452 return seed_devices; 2453 } 2454 2455 /* 2456 * Splice seed devices into the sprout fs_devices. 2457 * Generate a new fsid for the sprouted read-write filesystem. 2458 */ 2459 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2460 struct btrfs_fs_devices *seed_devices) 2461 { 2462 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2463 struct btrfs_super_block *disk_super = fs_info->super_copy; 2464 struct btrfs_device *device; 2465 u64 super_flags; 2466 2467 /* 2468 * We are updating the fsid, the thread leading to device_list_add() 2469 * could race, so uuid_mutex is needed. 2470 */ 2471 lockdep_assert_held(&uuid_mutex); 2472 2473 /* 2474 * The threads listed below may traverse dev_list but can do that without 2475 * device_list_mutex: 2476 * - All device ops and balance - as we are in btrfs_exclop_start. 2477 * - Various dev_list readers - are using RCU. 2478 * - btrfs_ioctl_fitrim() - is using RCU. 2479 * 2480 * For-read threads as below are using device_list_mutex: 2481 * - Readonly scrub btrfs_scrub_dev() 2482 * - Readonly scrub btrfs_scrub_progress() 2483 * - btrfs_get_dev_stats() 2484 */ 2485 lockdep_assert_held(&fs_devices->device_list_mutex); 2486 2487 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2488 synchronize_rcu); 2489 list_for_each_entry(device, &seed_devices->devices, dev_list) 2490 device->fs_devices = seed_devices; 2491 2492 fs_devices->seeding = false; 2493 fs_devices->num_devices = 0; 2494 fs_devices->open_devices = 0; 2495 fs_devices->missing_devices = 0; 2496 fs_devices->rotating = false; 2497 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2498 2499 generate_random_uuid(fs_devices->fsid); 2500 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2501 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2502 2503 super_flags = btrfs_super_flags(disk_super) & 2504 ~BTRFS_SUPER_FLAG_SEEDING; 2505 btrfs_set_super_flags(disk_super, super_flags); 2506 } 2507 2508 /* 2509 * Store the expected generation for seed devices in device items. 2510 */ 2511 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2512 { 2513 BTRFS_DEV_LOOKUP_ARGS(args); 2514 struct btrfs_fs_info *fs_info = trans->fs_info; 2515 struct btrfs_root *root = fs_info->chunk_root; 2516 struct btrfs_path *path; 2517 struct extent_buffer *leaf; 2518 struct btrfs_dev_item *dev_item; 2519 struct btrfs_device *device; 2520 struct btrfs_key key; 2521 u8 fs_uuid[BTRFS_FSID_SIZE]; 2522 u8 dev_uuid[BTRFS_UUID_SIZE]; 2523 int ret; 2524 2525 path = btrfs_alloc_path(); 2526 if (!path) 2527 return -ENOMEM; 2528 2529 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2530 key.offset = 0; 2531 key.type = BTRFS_DEV_ITEM_KEY; 2532 2533 while (1) { 2534 btrfs_reserve_chunk_metadata(trans, false); 2535 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2536 btrfs_trans_release_chunk_metadata(trans); 2537 if (ret < 0) 2538 goto error; 2539 2540 leaf = path->nodes[0]; 2541 next_slot: 2542 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2543 ret = btrfs_next_leaf(root, path); 2544 if (ret > 0) 2545 break; 2546 if (ret < 0) 2547 goto error; 2548 leaf = path->nodes[0]; 2549 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2550 btrfs_release_path(path); 2551 continue; 2552 } 2553 2554 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2555 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2556 key.type != BTRFS_DEV_ITEM_KEY) 2557 break; 2558 2559 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2560 struct btrfs_dev_item); 2561 args.devid = btrfs_device_id(leaf, dev_item); 2562 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2563 BTRFS_UUID_SIZE); 2564 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2565 BTRFS_FSID_SIZE); 2566 args.uuid = dev_uuid; 2567 args.fsid = fs_uuid; 2568 device = btrfs_find_device(fs_info->fs_devices, &args); 2569 BUG_ON(!device); /* Logic error */ 2570 2571 if (device->fs_devices->seeding) { 2572 btrfs_set_device_generation(leaf, dev_item, 2573 device->generation); 2574 btrfs_mark_buffer_dirty(leaf); 2575 } 2576 2577 path->slots[0]++; 2578 goto next_slot; 2579 } 2580 ret = 0; 2581 error: 2582 btrfs_free_path(path); 2583 return ret; 2584 } 2585 2586 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2587 { 2588 struct btrfs_root *root = fs_info->dev_root; 2589 struct btrfs_trans_handle *trans; 2590 struct btrfs_device *device; 2591 struct block_device *bdev; 2592 struct super_block *sb = fs_info->sb; 2593 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2594 struct btrfs_fs_devices *seed_devices; 2595 u64 orig_super_total_bytes; 2596 u64 orig_super_num_devices; 2597 int ret = 0; 2598 bool seeding_dev = false; 2599 bool locked = false; 2600 2601 if (sb_rdonly(sb) && !fs_devices->seeding) 2602 return -EROFS; 2603 2604 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2605 fs_info->bdev_holder); 2606 if (IS_ERR(bdev)) 2607 return PTR_ERR(bdev); 2608 2609 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2610 ret = -EINVAL; 2611 goto error; 2612 } 2613 2614 if (fs_devices->seeding) { 2615 seeding_dev = true; 2616 down_write(&sb->s_umount); 2617 mutex_lock(&uuid_mutex); 2618 locked = true; 2619 } 2620 2621 sync_blockdev(bdev); 2622 2623 rcu_read_lock(); 2624 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2625 if (device->bdev == bdev) { 2626 ret = -EEXIST; 2627 rcu_read_unlock(); 2628 goto error; 2629 } 2630 } 2631 rcu_read_unlock(); 2632 2633 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path); 2634 if (IS_ERR(device)) { 2635 /* we can safely leave the fs_devices entry around */ 2636 ret = PTR_ERR(device); 2637 goto error; 2638 } 2639 2640 device->fs_info = fs_info; 2641 device->bdev = bdev; 2642 ret = lookup_bdev(device_path, &device->devt); 2643 if (ret) 2644 goto error_free_device; 2645 2646 ret = btrfs_get_dev_zone_info(device, false); 2647 if (ret) 2648 goto error_free_device; 2649 2650 trans = btrfs_start_transaction(root, 0); 2651 if (IS_ERR(trans)) { 2652 ret = PTR_ERR(trans); 2653 goto error_free_zone; 2654 } 2655 2656 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2657 device->generation = trans->transid; 2658 device->io_width = fs_info->sectorsize; 2659 device->io_align = fs_info->sectorsize; 2660 device->sector_size = fs_info->sectorsize; 2661 device->total_bytes = 2662 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2663 device->disk_total_bytes = device->total_bytes; 2664 device->commit_total_bytes = device->total_bytes; 2665 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2666 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2667 device->mode = FMODE_EXCL; 2668 device->dev_stats_valid = 1; 2669 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2670 2671 if (seeding_dev) { 2672 btrfs_clear_sb_rdonly(sb); 2673 2674 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2675 seed_devices = btrfs_init_sprout(fs_info); 2676 if (IS_ERR(seed_devices)) { 2677 ret = PTR_ERR(seed_devices); 2678 btrfs_abort_transaction(trans, ret); 2679 goto error_trans; 2680 } 2681 } 2682 2683 mutex_lock(&fs_devices->device_list_mutex); 2684 if (seeding_dev) { 2685 btrfs_setup_sprout(fs_info, seed_devices); 2686 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2687 device); 2688 } 2689 2690 device->fs_devices = fs_devices; 2691 2692 mutex_lock(&fs_info->chunk_mutex); 2693 list_add_rcu(&device->dev_list, &fs_devices->devices); 2694 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2695 fs_devices->num_devices++; 2696 fs_devices->open_devices++; 2697 fs_devices->rw_devices++; 2698 fs_devices->total_devices++; 2699 fs_devices->total_rw_bytes += device->total_bytes; 2700 2701 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2702 2703 if (!bdev_nonrot(bdev)) 2704 fs_devices->rotating = true; 2705 2706 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2707 btrfs_set_super_total_bytes(fs_info->super_copy, 2708 round_down(orig_super_total_bytes + device->total_bytes, 2709 fs_info->sectorsize)); 2710 2711 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2712 btrfs_set_super_num_devices(fs_info->super_copy, 2713 orig_super_num_devices + 1); 2714 2715 /* 2716 * we've got more storage, clear any full flags on the space 2717 * infos 2718 */ 2719 btrfs_clear_space_info_full(fs_info); 2720 2721 mutex_unlock(&fs_info->chunk_mutex); 2722 2723 /* Add sysfs device entry */ 2724 btrfs_sysfs_add_device(device); 2725 2726 mutex_unlock(&fs_devices->device_list_mutex); 2727 2728 if (seeding_dev) { 2729 mutex_lock(&fs_info->chunk_mutex); 2730 ret = init_first_rw_device(trans); 2731 mutex_unlock(&fs_info->chunk_mutex); 2732 if (ret) { 2733 btrfs_abort_transaction(trans, ret); 2734 goto error_sysfs; 2735 } 2736 } 2737 2738 ret = btrfs_add_dev_item(trans, device); 2739 if (ret) { 2740 btrfs_abort_transaction(trans, ret); 2741 goto error_sysfs; 2742 } 2743 2744 if (seeding_dev) { 2745 ret = btrfs_finish_sprout(trans); 2746 if (ret) { 2747 btrfs_abort_transaction(trans, ret); 2748 goto error_sysfs; 2749 } 2750 2751 /* 2752 * fs_devices now represents the newly sprouted filesystem and 2753 * its fsid has been changed by btrfs_sprout_splice(). 2754 */ 2755 btrfs_sysfs_update_sprout_fsid(fs_devices); 2756 } 2757 2758 ret = btrfs_commit_transaction(trans); 2759 2760 if (seeding_dev) { 2761 mutex_unlock(&uuid_mutex); 2762 up_write(&sb->s_umount); 2763 locked = false; 2764 2765 if (ret) /* transaction commit */ 2766 return ret; 2767 2768 ret = btrfs_relocate_sys_chunks(fs_info); 2769 if (ret < 0) 2770 btrfs_handle_fs_error(fs_info, ret, 2771 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2772 trans = btrfs_attach_transaction(root); 2773 if (IS_ERR(trans)) { 2774 if (PTR_ERR(trans) == -ENOENT) 2775 return 0; 2776 ret = PTR_ERR(trans); 2777 trans = NULL; 2778 goto error_sysfs; 2779 } 2780 ret = btrfs_commit_transaction(trans); 2781 } 2782 2783 /* 2784 * Now that we have written a new super block to this device, check all 2785 * other fs_devices list if device_path alienates any other scanned 2786 * device. 2787 * We can ignore the return value as it typically returns -EINVAL and 2788 * only succeeds if the device was an alien. 2789 */ 2790 btrfs_forget_devices(device->devt); 2791 2792 /* Update ctime/mtime for blkid or udev */ 2793 update_dev_time(device_path); 2794 2795 return ret; 2796 2797 error_sysfs: 2798 btrfs_sysfs_remove_device(device); 2799 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2800 mutex_lock(&fs_info->chunk_mutex); 2801 list_del_rcu(&device->dev_list); 2802 list_del(&device->dev_alloc_list); 2803 fs_info->fs_devices->num_devices--; 2804 fs_info->fs_devices->open_devices--; 2805 fs_info->fs_devices->rw_devices--; 2806 fs_info->fs_devices->total_devices--; 2807 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2808 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2809 btrfs_set_super_total_bytes(fs_info->super_copy, 2810 orig_super_total_bytes); 2811 btrfs_set_super_num_devices(fs_info->super_copy, 2812 orig_super_num_devices); 2813 mutex_unlock(&fs_info->chunk_mutex); 2814 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2815 error_trans: 2816 if (seeding_dev) 2817 btrfs_set_sb_rdonly(sb); 2818 if (trans) 2819 btrfs_end_transaction(trans); 2820 error_free_zone: 2821 btrfs_destroy_dev_zone_info(device); 2822 error_free_device: 2823 btrfs_free_device(device); 2824 error: 2825 blkdev_put(bdev, FMODE_EXCL); 2826 if (locked) { 2827 mutex_unlock(&uuid_mutex); 2828 up_write(&sb->s_umount); 2829 } 2830 return ret; 2831 } 2832 2833 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2834 struct btrfs_device *device) 2835 { 2836 int ret; 2837 struct btrfs_path *path; 2838 struct btrfs_root *root = device->fs_info->chunk_root; 2839 struct btrfs_dev_item *dev_item; 2840 struct extent_buffer *leaf; 2841 struct btrfs_key key; 2842 2843 path = btrfs_alloc_path(); 2844 if (!path) 2845 return -ENOMEM; 2846 2847 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2848 key.type = BTRFS_DEV_ITEM_KEY; 2849 key.offset = device->devid; 2850 2851 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2852 if (ret < 0) 2853 goto out; 2854 2855 if (ret > 0) { 2856 ret = -ENOENT; 2857 goto out; 2858 } 2859 2860 leaf = path->nodes[0]; 2861 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2862 2863 btrfs_set_device_id(leaf, dev_item, device->devid); 2864 btrfs_set_device_type(leaf, dev_item, device->type); 2865 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2866 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2867 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2868 btrfs_set_device_total_bytes(leaf, dev_item, 2869 btrfs_device_get_disk_total_bytes(device)); 2870 btrfs_set_device_bytes_used(leaf, dev_item, 2871 btrfs_device_get_bytes_used(device)); 2872 btrfs_mark_buffer_dirty(leaf); 2873 2874 out: 2875 btrfs_free_path(path); 2876 return ret; 2877 } 2878 2879 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2880 struct btrfs_device *device, u64 new_size) 2881 { 2882 struct btrfs_fs_info *fs_info = device->fs_info; 2883 struct btrfs_super_block *super_copy = fs_info->super_copy; 2884 u64 old_total; 2885 u64 diff; 2886 int ret; 2887 2888 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2889 return -EACCES; 2890 2891 new_size = round_down(new_size, fs_info->sectorsize); 2892 2893 mutex_lock(&fs_info->chunk_mutex); 2894 old_total = btrfs_super_total_bytes(super_copy); 2895 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2896 2897 if (new_size <= device->total_bytes || 2898 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2899 mutex_unlock(&fs_info->chunk_mutex); 2900 return -EINVAL; 2901 } 2902 2903 btrfs_set_super_total_bytes(super_copy, 2904 round_down(old_total + diff, fs_info->sectorsize)); 2905 device->fs_devices->total_rw_bytes += diff; 2906 2907 btrfs_device_set_total_bytes(device, new_size); 2908 btrfs_device_set_disk_total_bytes(device, new_size); 2909 btrfs_clear_space_info_full(device->fs_info); 2910 if (list_empty(&device->post_commit_list)) 2911 list_add_tail(&device->post_commit_list, 2912 &trans->transaction->dev_update_list); 2913 mutex_unlock(&fs_info->chunk_mutex); 2914 2915 btrfs_reserve_chunk_metadata(trans, false); 2916 ret = btrfs_update_device(trans, device); 2917 btrfs_trans_release_chunk_metadata(trans); 2918 2919 return ret; 2920 } 2921 2922 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2923 { 2924 struct btrfs_fs_info *fs_info = trans->fs_info; 2925 struct btrfs_root *root = fs_info->chunk_root; 2926 int ret; 2927 struct btrfs_path *path; 2928 struct btrfs_key key; 2929 2930 path = btrfs_alloc_path(); 2931 if (!path) 2932 return -ENOMEM; 2933 2934 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2935 key.offset = chunk_offset; 2936 key.type = BTRFS_CHUNK_ITEM_KEY; 2937 2938 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2939 if (ret < 0) 2940 goto out; 2941 else if (ret > 0) { /* Logic error or corruption */ 2942 btrfs_handle_fs_error(fs_info, -ENOENT, 2943 "Failed lookup while freeing chunk."); 2944 ret = -ENOENT; 2945 goto out; 2946 } 2947 2948 ret = btrfs_del_item(trans, root, path); 2949 if (ret < 0) 2950 btrfs_handle_fs_error(fs_info, ret, 2951 "Failed to delete chunk item."); 2952 out: 2953 btrfs_free_path(path); 2954 return ret; 2955 } 2956 2957 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2958 { 2959 struct btrfs_super_block *super_copy = fs_info->super_copy; 2960 struct btrfs_disk_key *disk_key; 2961 struct btrfs_chunk *chunk; 2962 u8 *ptr; 2963 int ret = 0; 2964 u32 num_stripes; 2965 u32 array_size; 2966 u32 len = 0; 2967 u32 cur; 2968 struct btrfs_key key; 2969 2970 lockdep_assert_held(&fs_info->chunk_mutex); 2971 array_size = btrfs_super_sys_array_size(super_copy); 2972 2973 ptr = super_copy->sys_chunk_array; 2974 cur = 0; 2975 2976 while (cur < array_size) { 2977 disk_key = (struct btrfs_disk_key *)ptr; 2978 btrfs_disk_key_to_cpu(&key, disk_key); 2979 2980 len = sizeof(*disk_key); 2981 2982 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2983 chunk = (struct btrfs_chunk *)(ptr + len); 2984 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2985 len += btrfs_chunk_item_size(num_stripes); 2986 } else { 2987 ret = -EIO; 2988 break; 2989 } 2990 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2991 key.offset == chunk_offset) { 2992 memmove(ptr, ptr + len, array_size - (cur + len)); 2993 array_size -= len; 2994 btrfs_set_super_sys_array_size(super_copy, array_size); 2995 } else { 2996 ptr += len; 2997 cur += len; 2998 } 2999 } 3000 return ret; 3001 } 3002 3003 /* 3004 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3005 * @logical: Logical block offset in bytes. 3006 * @length: Length of extent in bytes. 3007 * 3008 * Return: Chunk mapping or ERR_PTR. 3009 */ 3010 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3011 u64 logical, u64 length) 3012 { 3013 struct extent_map_tree *em_tree; 3014 struct extent_map *em; 3015 3016 em_tree = &fs_info->mapping_tree; 3017 read_lock(&em_tree->lock); 3018 em = lookup_extent_mapping(em_tree, logical, length); 3019 read_unlock(&em_tree->lock); 3020 3021 if (!em) { 3022 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3023 logical, length); 3024 return ERR_PTR(-EINVAL); 3025 } 3026 3027 if (em->start > logical || em->start + em->len < logical) { 3028 btrfs_crit(fs_info, 3029 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3030 logical, length, em->start, em->start + em->len); 3031 free_extent_map(em); 3032 return ERR_PTR(-EINVAL); 3033 } 3034 3035 /* callers are responsible for dropping em's ref. */ 3036 return em; 3037 } 3038 3039 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3040 struct map_lookup *map, u64 chunk_offset) 3041 { 3042 int i; 3043 3044 /* 3045 * Removing chunk items and updating the device items in the chunks btree 3046 * requires holding the chunk_mutex. 3047 * See the comment at btrfs_chunk_alloc() for the details. 3048 */ 3049 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3050 3051 for (i = 0; i < map->num_stripes; i++) { 3052 int ret; 3053 3054 ret = btrfs_update_device(trans, map->stripes[i].dev); 3055 if (ret) 3056 return ret; 3057 } 3058 3059 return btrfs_free_chunk(trans, chunk_offset); 3060 } 3061 3062 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3063 { 3064 struct btrfs_fs_info *fs_info = trans->fs_info; 3065 struct extent_map *em; 3066 struct map_lookup *map; 3067 u64 dev_extent_len = 0; 3068 int i, ret = 0; 3069 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3070 3071 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3072 if (IS_ERR(em)) { 3073 /* 3074 * This is a logic error, but we don't want to just rely on the 3075 * user having built with ASSERT enabled, so if ASSERT doesn't 3076 * do anything we still error out. 3077 */ 3078 ASSERT(0); 3079 return PTR_ERR(em); 3080 } 3081 map = em->map_lookup; 3082 3083 /* 3084 * First delete the device extent items from the devices btree. 3085 * We take the device_list_mutex to avoid racing with the finishing phase 3086 * of a device replace operation. See the comment below before acquiring 3087 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3088 * because that can result in a deadlock when deleting the device extent 3089 * items from the devices btree - COWing an extent buffer from the btree 3090 * may result in allocating a new metadata chunk, which would attempt to 3091 * lock again fs_info->chunk_mutex. 3092 */ 3093 mutex_lock(&fs_devices->device_list_mutex); 3094 for (i = 0; i < map->num_stripes; i++) { 3095 struct btrfs_device *device = map->stripes[i].dev; 3096 ret = btrfs_free_dev_extent(trans, device, 3097 map->stripes[i].physical, 3098 &dev_extent_len); 3099 if (ret) { 3100 mutex_unlock(&fs_devices->device_list_mutex); 3101 btrfs_abort_transaction(trans, ret); 3102 goto out; 3103 } 3104 3105 if (device->bytes_used > 0) { 3106 mutex_lock(&fs_info->chunk_mutex); 3107 btrfs_device_set_bytes_used(device, 3108 device->bytes_used - dev_extent_len); 3109 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3110 btrfs_clear_space_info_full(fs_info); 3111 mutex_unlock(&fs_info->chunk_mutex); 3112 } 3113 } 3114 mutex_unlock(&fs_devices->device_list_mutex); 3115 3116 /* 3117 * We acquire fs_info->chunk_mutex for 2 reasons: 3118 * 3119 * 1) Just like with the first phase of the chunk allocation, we must 3120 * reserve system space, do all chunk btree updates and deletions, and 3121 * update the system chunk array in the superblock while holding this 3122 * mutex. This is for similar reasons as explained on the comment at 3123 * the top of btrfs_chunk_alloc(); 3124 * 3125 * 2) Prevent races with the final phase of a device replace operation 3126 * that replaces the device object associated with the map's stripes, 3127 * because the device object's id can change at any time during that 3128 * final phase of the device replace operation 3129 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3130 * replaced device and then see it with an ID of 3131 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3132 * the device item, which does not exists on the chunk btree. 3133 * The finishing phase of device replace acquires both the 3134 * device_list_mutex and the chunk_mutex, in that order, so we are 3135 * safe by just acquiring the chunk_mutex. 3136 */ 3137 trans->removing_chunk = true; 3138 mutex_lock(&fs_info->chunk_mutex); 3139 3140 check_system_chunk(trans, map->type); 3141 3142 ret = remove_chunk_item(trans, map, chunk_offset); 3143 /* 3144 * Normally we should not get -ENOSPC since we reserved space before 3145 * through the call to check_system_chunk(). 3146 * 3147 * Despite our system space_info having enough free space, we may not 3148 * be able to allocate extents from its block groups, because all have 3149 * an incompatible profile, which will force us to allocate a new system 3150 * block group with the right profile, or right after we called 3151 * check_system_space() above, a scrub turned the only system block group 3152 * with enough free space into RO mode. 3153 * This is explained with more detail at do_chunk_alloc(). 3154 * 3155 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3156 */ 3157 if (ret == -ENOSPC) { 3158 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3159 struct btrfs_block_group *sys_bg; 3160 3161 sys_bg = btrfs_create_chunk(trans, sys_flags); 3162 if (IS_ERR(sys_bg)) { 3163 ret = PTR_ERR(sys_bg); 3164 btrfs_abort_transaction(trans, ret); 3165 goto out; 3166 } 3167 3168 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3169 if (ret) { 3170 btrfs_abort_transaction(trans, ret); 3171 goto out; 3172 } 3173 3174 ret = remove_chunk_item(trans, map, chunk_offset); 3175 if (ret) { 3176 btrfs_abort_transaction(trans, ret); 3177 goto out; 3178 } 3179 } else if (ret) { 3180 btrfs_abort_transaction(trans, ret); 3181 goto out; 3182 } 3183 3184 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3185 3186 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3187 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3188 if (ret) { 3189 btrfs_abort_transaction(trans, ret); 3190 goto out; 3191 } 3192 } 3193 3194 mutex_unlock(&fs_info->chunk_mutex); 3195 trans->removing_chunk = false; 3196 3197 /* 3198 * We are done with chunk btree updates and deletions, so release the 3199 * system space we previously reserved (with check_system_chunk()). 3200 */ 3201 btrfs_trans_release_chunk_metadata(trans); 3202 3203 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3204 if (ret) { 3205 btrfs_abort_transaction(trans, ret); 3206 goto out; 3207 } 3208 3209 out: 3210 if (trans->removing_chunk) { 3211 mutex_unlock(&fs_info->chunk_mutex); 3212 trans->removing_chunk = false; 3213 } 3214 /* once for us */ 3215 free_extent_map(em); 3216 return ret; 3217 } 3218 3219 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3220 { 3221 struct btrfs_root *root = fs_info->chunk_root; 3222 struct btrfs_trans_handle *trans; 3223 struct btrfs_block_group *block_group; 3224 u64 length; 3225 int ret; 3226 3227 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3228 btrfs_err(fs_info, 3229 "relocate: not supported on extent tree v2 yet"); 3230 return -EINVAL; 3231 } 3232 3233 /* 3234 * Prevent races with automatic removal of unused block groups. 3235 * After we relocate and before we remove the chunk with offset 3236 * chunk_offset, automatic removal of the block group can kick in, 3237 * resulting in a failure when calling btrfs_remove_chunk() below. 3238 * 3239 * Make sure to acquire this mutex before doing a tree search (dev 3240 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3241 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3242 * we release the path used to search the chunk/dev tree and before 3243 * the current task acquires this mutex and calls us. 3244 */ 3245 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3246 3247 /* step one, relocate all the extents inside this chunk */ 3248 btrfs_scrub_pause(fs_info); 3249 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3250 btrfs_scrub_continue(fs_info); 3251 if (ret) 3252 return ret; 3253 3254 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3255 if (!block_group) 3256 return -ENOENT; 3257 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3258 length = block_group->length; 3259 btrfs_put_block_group(block_group); 3260 3261 /* 3262 * On a zoned file system, discard the whole block group, this will 3263 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3264 * resetting the zone fails, don't treat it as a fatal problem from the 3265 * filesystem's point of view. 3266 */ 3267 if (btrfs_is_zoned(fs_info)) { 3268 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3269 if (ret) 3270 btrfs_info(fs_info, 3271 "failed to reset zone %llu after relocation", 3272 chunk_offset); 3273 } 3274 3275 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3276 chunk_offset); 3277 if (IS_ERR(trans)) { 3278 ret = PTR_ERR(trans); 3279 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3280 return ret; 3281 } 3282 3283 /* 3284 * step two, delete the device extents and the 3285 * chunk tree entries 3286 */ 3287 ret = btrfs_remove_chunk(trans, chunk_offset); 3288 btrfs_end_transaction(trans); 3289 return ret; 3290 } 3291 3292 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3293 { 3294 struct btrfs_root *chunk_root = fs_info->chunk_root; 3295 struct btrfs_path *path; 3296 struct extent_buffer *leaf; 3297 struct btrfs_chunk *chunk; 3298 struct btrfs_key key; 3299 struct btrfs_key found_key; 3300 u64 chunk_type; 3301 bool retried = false; 3302 int failed = 0; 3303 int ret; 3304 3305 path = btrfs_alloc_path(); 3306 if (!path) 3307 return -ENOMEM; 3308 3309 again: 3310 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3311 key.offset = (u64)-1; 3312 key.type = BTRFS_CHUNK_ITEM_KEY; 3313 3314 while (1) { 3315 mutex_lock(&fs_info->reclaim_bgs_lock); 3316 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3317 if (ret < 0) { 3318 mutex_unlock(&fs_info->reclaim_bgs_lock); 3319 goto error; 3320 } 3321 BUG_ON(ret == 0); /* Corruption */ 3322 3323 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3324 key.type); 3325 if (ret) 3326 mutex_unlock(&fs_info->reclaim_bgs_lock); 3327 if (ret < 0) 3328 goto error; 3329 if (ret > 0) 3330 break; 3331 3332 leaf = path->nodes[0]; 3333 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3334 3335 chunk = btrfs_item_ptr(leaf, path->slots[0], 3336 struct btrfs_chunk); 3337 chunk_type = btrfs_chunk_type(leaf, chunk); 3338 btrfs_release_path(path); 3339 3340 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3341 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3342 if (ret == -ENOSPC) 3343 failed++; 3344 else 3345 BUG_ON(ret); 3346 } 3347 mutex_unlock(&fs_info->reclaim_bgs_lock); 3348 3349 if (found_key.offset == 0) 3350 break; 3351 key.offset = found_key.offset - 1; 3352 } 3353 ret = 0; 3354 if (failed && !retried) { 3355 failed = 0; 3356 retried = true; 3357 goto again; 3358 } else if (WARN_ON(failed && retried)) { 3359 ret = -ENOSPC; 3360 } 3361 error: 3362 btrfs_free_path(path); 3363 return ret; 3364 } 3365 3366 /* 3367 * return 1 : allocate a data chunk successfully, 3368 * return <0: errors during allocating a data chunk, 3369 * return 0 : no need to allocate a data chunk. 3370 */ 3371 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3372 u64 chunk_offset) 3373 { 3374 struct btrfs_block_group *cache; 3375 u64 bytes_used; 3376 u64 chunk_type; 3377 3378 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3379 ASSERT(cache); 3380 chunk_type = cache->flags; 3381 btrfs_put_block_group(cache); 3382 3383 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3384 return 0; 3385 3386 spin_lock(&fs_info->data_sinfo->lock); 3387 bytes_used = fs_info->data_sinfo->bytes_used; 3388 spin_unlock(&fs_info->data_sinfo->lock); 3389 3390 if (!bytes_used) { 3391 struct btrfs_trans_handle *trans; 3392 int ret; 3393 3394 trans = btrfs_join_transaction(fs_info->tree_root); 3395 if (IS_ERR(trans)) 3396 return PTR_ERR(trans); 3397 3398 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3399 btrfs_end_transaction(trans); 3400 if (ret < 0) 3401 return ret; 3402 return 1; 3403 } 3404 3405 return 0; 3406 } 3407 3408 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3409 struct btrfs_balance_control *bctl) 3410 { 3411 struct btrfs_root *root = fs_info->tree_root; 3412 struct btrfs_trans_handle *trans; 3413 struct btrfs_balance_item *item; 3414 struct btrfs_disk_balance_args disk_bargs; 3415 struct btrfs_path *path; 3416 struct extent_buffer *leaf; 3417 struct btrfs_key key; 3418 int ret, err; 3419 3420 path = btrfs_alloc_path(); 3421 if (!path) 3422 return -ENOMEM; 3423 3424 trans = btrfs_start_transaction(root, 0); 3425 if (IS_ERR(trans)) { 3426 btrfs_free_path(path); 3427 return PTR_ERR(trans); 3428 } 3429 3430 key.objectid = BTRFS_BALANCE_OBJECTID; 3431 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3432 key.offset = 0; 3433 3434 ret = btrfs_insert_empty_item(trans, root, path, &key, 3435 sizeof(*item)); 3436 if (ret) 3437 goto out; 3438 3439 leaf = path->nodes[0]; 3440 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3441 3442 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3443 3444 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3445 btrfs_set_balance_data(leaf, item, &disk_bargs); 3446 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3447 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3448 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3449 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3450 3451 btrfs_set_balance_flags(leaf, item, bctl->flags); 3452 3453 btrfs_mark_buffer_dirty(leaf); 3454 out: 3455 btrfs_free_path(path); 3456 err = btrfs_commit_transaction(trans); 3457 if (err && !ret) 3458 ret = err; 3459 return ret; 3460 } 3461 3462 static int del_balance_item(struct btrfs_fs_info *fs_info) 3463 { 3464 struct btrfs_root *root = fs_info->tree_root; 3465 struct btrfs_trans_handle *trans; 3466 struct btrfs_path *path; 3467 struct btrfs_key key; 3468 int ret, err; 3469 3470 path = btrfs_alloc_path(); 3471 if (!path) 3472 return -ENOMEM; 3473 3474 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3475 if (IS_ERR(trans)) { 3476 btrfs_free_path(path); 3477 return PTR_ERR(trans); 3478 } 3479 3480 key.objectid = BTRFS_BALANCE_OBJECTID; 3481 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3482 key.offset = 0; 3483 3484 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3485 if (ret < 0) 3486 goto out; 3487 if (ret > 0) { 3488 ret = -ENOENT; 3489 goto out; 3490 } 3491 3492 ret = btrfs_del_item(trans, root, path); 3493 out: 3494 btrfs_free_path(path); 3495 err = btrfs_commit_transaction(trans); 3496 if (err && !ret) 3497 ret = err; 3498 return ret; 3499 } 3500 3501 /* 3502 * This is a heuristic used to reduce the number of chunks balanced on 3503 * resume after balance was interrupted. 3504 */ 3505 static void update_balance_args(struct btrfs_balance_control *bctl) 3506 { 3507 /* 3508 * Turn on soft mode for chunk types that were being converted. 3509 */ 3510 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3511 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3512 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3513 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3514 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3515 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3516 3517 /* 3518 * Turn on usage filter if is not already used. The idea is 3519 * that chunks that we have already balanced should be 3520 * reasonably full. Don't do it for chunks that are being 3521 * converted - that will keep us from relocating unconverted 3522 * (albeit full) chunks. 3523 */ 3524 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3525 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3526 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3527 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3528 bctl->data.usage = 90; 3529 } 3530 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3531 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3532 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3533 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3534 bctl->sys.usage = 90; 3535 } 3536 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3537 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3538 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3539 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3540 bctl->meta.usage = 90; 3541 } 3542 } 3543 3544 /* 3545 * Clear the balance status in fs_info and delete the balance item from disk. 3546 */ 3547 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3548 { 3549 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3550 int ret; 3551 3552 BUG_ON(!fs_info->balance_ctl); 3553 3554 spin_lock(&fs_info->balance_lock); 3555 fs_info->balance_ctl = NULL; 3556 spin_unlock(&fs_info->balance_lock); 3557 3558 kfree(bctl); 3559 ret = del_balance_item(fs_info); 3560 if (ret) 3561 btrfs_handle_fs_error(fs_info, ret, NULL); 3562 } 3563 3564 /* 3565 * Balance filters. Return 1 if chunk should be filtered out 3566 * (should not be balanced). 3567 */ 3568 static int chunk_profiles_filter(u64 chunk_type, 3569 struct btrfs_balance_args *bargs) 3570 { 3571 chunk_type = chunk_to_extended(chunk_type) & 3572 BTRFS_EXTENDED_PROFILE_MASK; 3573 3574 if (bargs->profiles & chunk_type) 3575 return 0; 3576 3577 return 1; 3578 } 3579 3580 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3581 struct btrfs_balance_args *bargs) 3582 { 3583 struct btrfs_block_group *cache; 3584 u64 chunk_used; 3585 u64 user_thresh_min; 3586 u64 user_thresh_max; 3587 int ret = 1; 3588 3589 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3590 chunk_used = cache->used; 3591 3592 if (bargs->usage_min == 0) 3593 user_thresh_min = 0; 3594 else 3595 user_thresh_min = mult_perc(cache->length, bargs->usage_min); 3596 3597 if (bargs->usage_max == 0) 3598 user_thresh_max = 1; 3599 else if (bargs->usage_max > 100) 3600 user_thresh_max = cache->length; 3601 else 3602 user_thresh_max = mult_perc(cache->length, bargs->usage_max); 3603 3604 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3605 ret = 0; 3606 3607 btrfs_put_block_group(cache); 3608 return ret; 3609 } 3610 3611 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3612 u64 chunk_offset, struct btrfs_balance_args *bargs) 3613 { 3614 struct btrfs_block_group *cache; 3615 u64 chunk_used, user_thresh; 3616 int ret = 1; 3617 3618 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3619 chunk_used = cache->used; 3620 3621 if (bargs->usage_min == 0) 3622 user_thresh = 1; 3623 else if (bargs->usage > 100) 3624 user_thresh = cache->length; 3625 else 3626 user_thresh = mult_perc(cache->length, bargs->usage); 3627 3628 if (chunk_used < user_thresh) 3629 ret = 0; 3630 3631 btrfs_put_block_group(cache); 3632 return ret; 3633 } 3634 3635 static int chunk_devid_filter(struct extent_buffer *leaf, 3636 struct btrfs_chunk *chunk, 3637 struct btrfs_balance_args *bargs) 3638 { 3639 struct btrfs_stripe *stripe; 3640 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3641 int i; 3642 3643 for (i = 0; i < num_stripes; i++) { 3644 stripe = btrfs_stripe_nr(chunk, i); 3645 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3646 return 0; 3647 } 3648 3649 return 1; 3650 } 3651 3652 static u64 calc_data_stripes(u64 type, int num_stripes) 3653 { 3654 const int index = btrfs_bg_flags_to_raid_index(type); 3655 const int ncopies = btrfs_raid_array[index].ncopies; 3656 const int nparity = btrfs_raid_array[index].nparity; 3657 3658 return (num_stripes - nparity) / ncopies; 3659 } 3660 3661 /* [pstart, pend) */ 3662 static int chunk_drange_filter(struct extent_buffer *leaf, 3663 struct btrfs_chunk *chunk, 3664 struct btrfs_balance_args *bargs) 3665 { 3666 struct btrfs_stripe *stripe; 3667 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3668 u64 stripe_offset; 3669 u64 stripe_length; 3670 u64 type; 3671 int factor; 3672 int i; 3673 3674 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3675 return 0; 3676 3677 type = btrfs_chunk_type(leaf, chunk); 3678 factor = calc_data_stripes(type, num_stripes); 3679 3680 for (i = 0; i < num_stripes; i++) { 3681 stripe = btrfs_stripe_nr(chunk, i); 3682 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3683 continue; 3684 3685 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3686 stripe_length = btrfs_chunk_length(leaf, chunk); 3687 stripe_length = div_u64(stripe_length, factor); 3688 3689 if (stripe_offset < bargs->pend && 3690 stripe_offset + stripe_length > bargs->pstart) 3691 return 0; 3692 } 3693 3694 return 1; 3695 } 3696 3697 /* [vstart, vend) */ 3698 static int chunk_vrange_filter(struct extent_buffer *leaf, 3699 struct btrfs_chunk *chunk, 3700 u64 chunk_offset, 3701 struct btrfs_balance_args *bargs) 3702 { 3703 if (chunk_offset < bargs->vend && 3704 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3705 /* at least part of the chunk is inside this vrange */ 3706 return 0; 3707 3708 return 1; 3709 } 3710 3711 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3712 struct btrfs_chunk *chunk, 3713 struct btrfs_balance_args *bargs) 3714 { 3715 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3716 3717 if (bargs->stripes_min <= num_stripes 3718 && num_stripes <= bargs->stripes_max) 3719 return 0; 3720 3721 return 1; 3722 } 3723 3724 static int chunk_soft_convert_filter(u64 chunk_type, 3725 struct btrfs_balance_args *bargs) 3726 { 3727 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3728 return 0; 3729 3730 chunk_type = chunk_to_extended(chunk_type) & 3731 BTRFS_EXTENDED_PROFILE_MASK; 3732 3733 if (bargs->target == chunk_type) 3734 return 1; 3735 3736 return 0; 3737 } 3738 3739 static int should_balance_chunk(struct extent_buffer *leaf, 3740 struct btrfs_chunk *chunk, u64 chunk_offset) 3741 { 3742 struct btrfs_fs_info *fs_info = leaf->fs_info; 3743 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3744 struct btrfs_balance_args *bargs = NULL; 3745 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3746 3747 /* type filter */ 3748 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3749 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3750 return 0; 3751 } 3752 3753 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3754 bargs = &bctl->data; 3755 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3756 bargs = &bctl->sys; 3757 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3758 bargs = &bctl->meta; 3759 3760 /* profiles filter */ 3761 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3762 chunk_profiles_filter(chunk_type, bargs)) { 3763 return 0; 3764 } 3765 3766 /* usage filter */ 3767 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3768 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3769 return 0; 3770 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3771 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3772 return 0; 3773 } 3774 3775 /* devid filter */ 3776 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3777 chunk_devid_filter(leaf, chunk, bargs)) { 3778 return 0; 3779 } 3780 3781 /* drange filter, makes sense only with devid filter */ 3782 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3783 chunk_drange_filter(leaf, chunk, bargs)) { 3784 return 0; 3785 } 3786 3787 /* vrange filter */ 3788 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3789 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3790 return 0; 3791 } 3792 3793 /* stripes filter */ 3794 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3795 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3796 return 0; 3797 } 3798 3799 /* soft profile changing mode */ 3800 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3801 chunk_soft_convert_filter(chunk_type, bargs)) { 3802 return 0; 3803 } 3804 3805 /* 3806 * limited by count, must be the last filter 3807 */ 3808 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3809 if (bargs->limit == 0) 3810 return 0; 3811 else 3812 bargs->limit--; 3813 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3814 /* 3815 * Same logic as the 'limit' filter; the minimum cannot be 3816 * determined here because we do not have the global information 3817 * about the count of all chunks that satisfy the filters. 3818 */ 3819 if (bargs->limit_max == 0) 3820 return 0; 3821 else 3822 bargs->limit_max--; 3823 } 3824 3825 return 1; 3826 } 3827 3828 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3829 { 3830 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3831 struct btrfs_root *chunk_root = fs_info->chunk_root; 3832 u64 chunk_type; 3833 struct btrfs_chunk *chunk; 3834 struct btrfs_path *path = NULL; 3835 struct btrfs_key key; 3836 struct btrfs_key found_key; 3837 struct extent_buffer *leaf; 3838 int slot; 3839 int ret; 3840 int enospc_errors = 0; 3841 bool counting = true; 3842 /* The single value limit and min/max limits use the same bytes in the */ 3843 u64 limit_data = bctl->data.limit; 3844 u64 limit_meta = bctl->meta.limit; 3845 u64 limit_sys = bctl->sys.limit; 3846 u32 count_data = 0; 3847 u32 count_meta = 0; 3848 u32 count_sys = 0; 3849 int chunk_reserved = 0; 3850 3851 path = btrfs_alloc_path(); 3852 if (!path) { 3853 ret = -ENOMEM; 3854 goto error; 3855 } 3856 3857 /* zero out stat counters */ 3858 spin_lock(&fs_info->balance_lock); 3859 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3860 spin_unlock(&fs_info->balance_lock); 3861 again: 3862 if (!counting) { 3863 /* 3864 * The single value limit and min/max limits use the same bytes 3865 * in the 3866 */ 3867 bctl->data.limit = limit_data; 3868 bctl->meta.limit = limit_meta; 3869 bctl->sys.limit = limit_sys; 3870 } 3871 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3872 key.offset = (u64)-1; 3873 key.type = BTRFS_CHUNK_ITEM_KEY; 3874 3875 while (1) { 3876 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3877 atomic_read(&fs_info->balance_cancel_req)) { 3878 ret = -ECANCELED; 3879 goto error; 3880 } 3881 3882 mutex_lock(&fs_info->reclaim_bgs_lock); 3883 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3884 if (ret < 0) { 3885 mutex_unlock(&fs_info->reclaim_bgs_lock); 3886 goto error; 3887 } 3888 3889 /* 3890 * this shouldn't happen, it means the last relocate 3891 * failed 3892 */ 3893 if (ret == 0) 3894 BUG(); /* FIXME break ? */ 3895 3896 ret = btrfs_previous_item(chunk_root, path, 0, 3897 BTRFS_CHUNK_ITEM_KEY); 3898 if (ret) { 3899 mutex_unlock(&fs_info->reclaim_bgs_lock); 3900 ret = 0; 3901 break; 3902 } 3903 3904 leaf = path->nodes[0]; 3905 slot = path->slots[0]; 3906 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3907 3908 if (found_key.objectid != key.objectid) { 3909 mutex_unlock(&fs_info->reclaim_bgs_lock); 3910 break; 3911 } 3912 3913 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3914 chunk_type = btrfs_chunk_type(leaf, chunk); 3915 3916 if (!counting) { 3917 spin_lock(&fs_info->balance_lock); 3918 bctl->stat.considered++; 3919 spin_unlock(&fs_info->balance_lock); 3920 } 3921 3922 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3923 3924 btrfs_release_path(path); 3925 if (!ret) { 3926 mutex_unlock(&fs_info->reclaim_bgs_lock); 3927 goto loop; 3928 } 3929 3930 if (counting) { 3931 mutex_unlock(&fs_info->reclaim_bgs_lock); 3932 spin_lock(&fs_info->balance_lock); 3933 bctl->stat.expected++; 3934 spin_unlock(&fs_info->balance_lock); 3935 3936 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3937 count_data++; 3938 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3939 count_sys++; 3940 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3941 count_meta++; 3942 3943 goto loop; 3944 } 3945 3946 /* 3947 * Apply limit_min filter, no need to check if the LIMITS 3948 * filter is used, limit_min is 0 by default 3949 */ 3950 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3951 count_data < bctl->data.limit_min) 3952 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3953 count_meta < bctl->meta.limit_min) 3954 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3955 count_sys < bctl->sys.limit_min)) { 3956 mutex_unlock(&fs_info->reclaim_bgs_lock); 3957 goto loop; 3958 } 3959 3960 if (!chunk_reserved) { 3961 /* 3962 * We may be relocating the only data chunk we have, 3963 * which could potentially end up with losing data's 3964 * raid profile, so lets allocate an empty one in 3965 * advance. 3966 */ 3967 ret = btrfs_may_alloc_data_chunk(fs_info, 3968 found_key.offset); 3969 if (ret < 0) { 3970 mutex_unlock(&fs_info->reclaim_bgs_lock); 3971 goto error; 3972 } else if (ret == 1) { 3973 chunk_reserved = 1; 3974 } 3975 } 3976 3977 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3978 mutex_unlock(&fs_info->reclaim_bgs_lock); 3979 if (ret == -ENOSPC) { 3980 enospc_errors++; 3981 } else if (ret == -ETXTBSY) { 3982 btrfs_info(fs_info, 3983 "skipping relocation of block group %llu due to active swapfile", 3984 found_key.offset); 3985 ret = 0; 3986 } else if (ret) { 3987 goto error; 3988 } else { 3989 spin_lock(&fs_info->balance_lock); 3990 bctl->stat.completed++; 3991 spin_unlock(&fs_info->balance_lock); 3992 } 3993 loop: 3994 if (found_key.offset == 0) 3995 break; 3996 key.offset = found_key.offset - 1; 3997 } 3998 3999 if (counting) { 4000 btrfs_release_path(path); 4001 counting = false; 4002 goto again; 4003 } 4004 error: 4005 btrfs_free_path(path); 4006 if (enospc_errors) { 4007 btrfs_info(fs_info, "%d enospc errors during balance", 4008 enospc_errors); 4009 if (!ret) 4010 ret = -ENOSPC; 4011 } 4012 4013 return ret; 4014 } 4015 4016 /* 4017 * See if a given profile is valid and reduced. 4018 * 4019 * @flags: profile to validate 4020 * @extended: if true @flags is treated as an extended profile 4021 */ 4022 static int alloc_profile_is_valid(u64 flags, int extended) 4023 { 4024 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4025 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4026 4027 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4028 4029 /* 1) check that all other bits are zeroed */ 4030 if (flags & ~mask) 4031 return 0; 4032 4033 /* 2) see if profile is reduced */ 4034 if (flags == 0) 4035 return !extended; /* "0" is valid for usual profiles */ 4036 4037 return has_single_bit_set(flags); 4038 } 4039 4040 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4041 { 4042 /* cancel requested || normal exit path */ 4043 return atomic_read(&fs_info->balance_cancel_req) || 4044 (atomic_read(&fs_info->balance_pause_req) == 0 && 4045 atomic_read(&fs_info->balance_cancel_req) == 0); 4046 } 4047 4048 /* 4049 * Validate target profile against allowed profiles and return true if it's OK. 4050 * Otherwise print the error message and return false. 4051 */ 4052 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4053 const struct btrfs_balance_args *bargs, 4054 u64 allowed, const char *type) 4055 { 4056 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4057 return true; 4058 4059 /* Profile is valid and does not have bits outside of the allowed set */ 4060 if (alloc_profile_is_valid(bargs->target, 1) && 4061 (bargs->target & ~allowed) == 0) 4062 return true; 4063 4064 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4065 type, btrfs_bg_type_to_raid_name(bargs->target)); 4066 return false; 4067 } 4068 4069 /* 4070 * Fill @buf with textual description of balance filter flags @bargs, up to 4071 * @size_buf including the terminating null. The output may be trimmed if it 4072 * does not fit into the provided buffer. 4073 */ 4074 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4075 u32 size_buf) 4076 { 4077 int ret; 4078 u32 size_bp = size_buf; 4079 char *bp = buf; 4080 u64 flags = bargs->flags; 4081 char tmp_buf[128] = {'\0'}; 4082 4083 if (!flags) 4084 return; 4085 4086 #define CHECK_APPEND_NOARG(a) \ 4087 do { \ 4088 ret = snprintf(bp, size_bp, (a)); \ 4089 if (ret < 0 || ret >= size_bp) \ 4090 goto out_overflow; \ 4091 size_bp -= ret; \ 4092 bp += ret; \ 4093 } while (0) 4094 4095 #define CHECK_APPEND_1ARG(a, v1) \ 4096 do { \ 4097 ret = snprintf(bp, size_bp, (a), (v1)); \ 4098 if (ret < 0 || ret >= size_bp) \ 4099 goto out_overflow; \ 4100 size_bp -= ret; \ 4101 bp += ret; \ 4102 } while (0) 4103 4104 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4105 do { \ 4106 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4107 if (ret < 0 || ret >= size_bp) \ 4108 goto out_overflow; \ 4109 size_bp -= ret; \ 4110 bp += ret; \ 4111 } while (0) 4112 4113 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4114 CHECK_APPEND_1ARG("convert=%s,", 4115 btrfs_bg_type_to_raid_name(bargs->target)); 4116 4117 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4118 CHECK_APPEND_NOARG("soft,"); 4119 4120 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4121 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4122 sizeof(tmp_buf)); 4123 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4124 } 4125 4126 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4127 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4128 4129 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4130 CHECK_APPEND_2ARG("usage=%u..%u,", 4131 bargs->usage_min, bargs->usage_max); 4132 4133 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4134 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4135 4136 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4137 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4138 bargs->pstart, bargs->pend); 4139 4140 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4141 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4142 bargs->vstart, bargs->vend); 4143 4144 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4145 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4146 4147 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4148 CHECK_APPEND_2ARG("limit=%u..%u,", 4149 bargs->limit_min, bargs->limit_max); 4150 4151 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4152 CHECK_APPEND_2ARG("stripes=%u..%u,", 4153 bargs->stripes_min, bargs->stripes_max); 4154 4155 #undef CHECK_APPEND_2ARG 4156 #undef CHECK_APPEND_1ARG 4157 #undef CHECK_APPEND_NOARG 4158 4159 out_overflow: 4160 4161 if (size_bp < size_buf) 4162 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4163 else 4164 buf[0] = '\0'; 4165 } 4166 4167 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4168 { 4169 u32 size_buf = 1024; 4170 char tmp_buf[192] = {'\0'}; 4171 char *buf; 4172 char *bp; 4173 u32 size_bp = size_buf; 4174 int ret; 4175 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4176 4177 buf = kzalloc(size_buf, GFP_KERNEL); 4178 if (!buf) 4179 return; 4180 4181 bp = buf; 4182 4183 #define CHECK_APPEND_1ARG(a, v1) \ 4184 do { \ 4185 ret = snprintf(bp, size_bp, (a), (v1)); \ 4186 if (ret < 0 || ret >= size_bp) \ 4187 goto out_overflow; \ 4188 size_bp -= ret; \ 4189 bp += ret; \ 4190 } while (0) 4191 4192 if (bctl->flags & BTRFS_BALANCE_FORCE) 4193 CHECK_APPEND_1ARG("%s", "-f "); 4194 4195 if (bctl->flags & BTRFS_BALANCE_DATA) { 4196 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4197 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4198 } 4199 4200 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4201 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4202 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4203 } 4204 4205 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4206 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4207 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4208 } 4209 4210 #undef CHECK_APPEND_1ARG 4211 4212 out_overflow: 4213 4214 if (size_bp < size_buf) 4215 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4216 btrfs_info(fs_info, "balance: %s %s", 4217 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4218 "resume" : "start", buf); 4219 4220 kfree(buf); 4221 } 4222 4223 /* 4224 * Should be called with balance mutexe held 4225 */ 4226 int btrfs_balance(struct btrfs_fs_info *fs_info, 4227 struct btrfs_balance_control *bctl, 4228 struct btrfs_ioctl_balance_args *bargs) 4229 { 4230 u64 meta_target, data_target; 4231 u64 allowed; 4232 int mixed = 0; 4233 int ret; 4234 u64 num_devices; 4235 unsigned seq; 4236 bool reducing_redundancy; 4237 int i; 4238 4239 if (btrfs_fs_closing(fs_info) || 4240 atomic_read(&fs_info->balance_pause_req) || 4241 btrfs_should_cancel_balance(fs_info)) { 4242 ret = -EINVAL; 4243 goto out; 4244 } 4245 4246 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4247 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4248 mixed = 1; 4249 4250 /* 4251 * In case of mixed groups both data and meta should be picked, 4252 * and identical options should be given for both of them. 4253 */ 4254 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4255 if (mixed && (bctl->flags & allowed)) { 4256 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4257 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4258 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4259 btrfs_err(fs_info, 4260 "balance: mixed groups data and metadata options must be the same"); 4261 ret = -EINVAL; 4262 goto out; 4263 } 4264 } 4265 4266 /* 4267 * rw_devices will not change at the moment, device add/delete/replace 4268 * are exclusive 4269 */ 4270 num_devices = fs_info->fs_devices->rw_devices; 4271 4272 /* 4273 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4274 * special bit for it, to make it easier to distinguish. Thus we need 4275 * to set it manually, or balance would refuse the profile. 4276 */ 4277 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4278 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4279 if (num_devices >= btrfs_raid_array[i].devs_min) 4280 allowed |= btrfs_raid_array[i].bg_flag; 4281 4282 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4283 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4284 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4285 ret = -EINVAL; 4286 goto out; 4287 } 4288 4289 /* 4290 * Allow to reduce metadata or system integrity only if force set for 4291 * profiles with redundancy (copies, parity) 4292 */ 4293 allowed = 0; 4294 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4295 if (btrfs_raid_array[i].ncopies >= 2 || 4296 btrfs_raid_array[i].tolerated_failures >= 1) 4297 allowed |= btrfs_raid_array[i].bg_flag; 4298 } 4299 do { 4300 seq = read_seqbegin(&fs_info->profiles_lock); 4301 4302 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4303 (fs_info->avail_system_alloc_bits & allowed) && 4304 !(bctl->sys.target & allowed)) || 4305 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4306 (fs_info->avail_metadata_alloc_bits & allowed) && 4307 !(bctl->meta.target & allowed))) 4308 reducing_redundancy = true; 4309 else 4310 reducing_redundancy = false; 4311 4312 /* if we're not converting, the target field is uninitialized */ 4313 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4314 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4315 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4316 bctl->data.target : fs_info->avail_data_alloc_bits; 4317 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4318 4319 if (reducing_redundancy) { 4320 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4321 btrfs_info(fs_info, 4322 "balance: force reducing metadata redundancy"); 4323 } else { 4324 btrfs_err(fs_info, 4325 "balance: reduces metadata redundancy, use --force if you want this"); 4326 ret = -EINVAL; 4327 goto out; 4328 } 4329 } 4330 4331 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4332 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4333 btrfs_warn(fs_info, 4334 "balance: metadata profile %s has lower redundancy than data profile %s", 4335 btrfs_bg_type_to_raid_name(meta_target), 4336 btrfs_bg_type_to_raid_name(data_target)); 4337 } 4338 4339 ret = insert_balance_item(fs_info, bctl); 4340 if (ret && ret != -EEXIST) 4341 goto out; 4342 4343 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4344 BUG_ON(ret == -EEXIST); 4345 BUG_ON(fs_info->balance_ctl); 4346 spin_lock(&fs_info->balance_lock); 4347 fs_info->balance_ctl = bctl; 4348 spin_unlock(&fs_info->balance_lock); 4349 } else { 4350 BUG_ON(ret != -EEXIST); 4351 spin_lock(&fs_info->balance_lock); 4352 update_balance_args(bctl); 4353 spin_unlock(&fs_info->balance_lock); 4354 } 4355 4356 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4357 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4358 describe_balance_start_or_resume(fs_info); 4359 mutex_unlock(&fs_info->balance_mutex); 4360 4361 ret = __btrfs_balance(fs_info); 4362 4363 mutex_lock(&fs_info->balance_mutex); 4364 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4365 btrfs_info(fs_info, "balance: paused"); 4366 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4367 } 4368 /* 4369 * Balance can be canceled by: 4370 * 4371 * - Regular cancel request 4372 * Then ret == -ECANCELED and balance_cancel_req > 0 4373 * 4374 * - Fatal signal to "btrfs" process 4375 * Either the signal caught by wait_reserve_ticket() and callers 4376 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4377 * got -ECANCELED. 4378 * Either way, in this case balance_cancel_req = 0, and 4379 * ret == -EINTR or ret == -ECANCELED. 4380 * 4381 * So here we only check the return value to catch canceled balance. 4382 */ 4383 else if (ret == -ECANCELED || ret == -EINTR) 4384 btrfs_info(fs_info, "balance: canceled"); 4385 else 4386 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4387 4388 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4389 4390 if (bargs) { 4391 memset(bargs, 0, sizeof(*bargs)); 4392 btrfs_update_ioctl_balance_args(fs_info, bargs); 4393 } 4394 4395 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4396 balance_need_close(fs_info)) { 4397 reset_balance_state(fs_info); 4398 btrfs_exclop_finish(fs_info); 4399 } 4400 4401 wake_up(&fs_info->balance_wait_q); 4402 4403 return ret; 4404 out: 4405 if (bctl->flags & BTRFS_BALANCE_RESUME) 4406 reset_balance_state(fs_info); 4407 else 4408 kfree(bctl); 4409 btrfs_exclop_finish(fs_info); 4410 4411 return ret; 4412 } 4413 4414 static int balance_kthread(void *data) 4415 { 4416 struct btrfs_fs_info *fs_info = data; 4417 int ret = 0; 4418 4419 sb_start_write(fs_info->sb); 4420 mutex_lock(&fs_info->balance_mutex); 4421 if (fs_info->balance_ctl) 4422 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4423 mutex_unlock(&fs_info->balance_mutex); 4424 sb_end_write(fs_info->sb); 4425 4426 return ret; 4427 } 4428 4429 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4430 { 4431 struct task_struct *tsk; 4432 4433 mutex_lock(&fs_info->balance_mutex); 4434 if (!fs_info->balance_ctl) { 4435 mutex_unlock(&fs_info->balance_mutex); 4436 return 0; 4437 } 4438 mutex_unlock(&fs_info->balance_mutex); 4439 4440 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4441 btrfs_info(fs_info, "balance: resume skipped"); 4442 return 0; 4443 } 4444 4445 spin_lock(&fs_info->super_lock); 4446 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4447 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4448 spin_unlock(&fs_info->super_lock); 4449 /* 4450 * A ro->rw remount sequence should continue with the paused balance 4451 * regardless of who pauses it, system or the user as of now, so set 4452 * the resume flag. 4453 */ 4454 spin_lock(&fs_info->balance_lock); 4455 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4456 spin_unlock(&fs_info->balance_lock); 4457 4458 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4459 return PTR_ERR_OR_ZERO(tsk); 4460 } 4461 4462 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4463 { 4464 struct btrfs_balance_control *bctl; 4465 struct btrfs_balance_item *item; 4466 struct btrfs_disk_balance_args disk_bargs; 4467 struct btrfs_path *path; 4468 struct extent_buffer *leaf; 4469 struct btrfs_key key; 4470 int ret; 4471 4472 path = btrfs_alloc_path(); 4473 if (!path) 4474 return -ENOMEM; 4475 4476 key.objectid = BTRFS_BALANCE_OBJECTID; 4477 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4478 key.offset = 0; 4479 4480 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4481 if (ret < 0) 4482 goto out; 4483 if (ret > 0) { /* ret = -ENOENT; */ 4484 ret = 0; 4485 goto out; 4486 } 4487 4488 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4489 if (!bctl) { 4490 ret = -ENOMEM; 4491 goto out; 4492 } 4493 4494 leaf = path->nodes[0]; 4495 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4496 4497 bctl->flags = btrfs_balance_flags(leaf, item); 4498 bctl->flags |= BTRFS_BALANCE_RESUME; 4499 4500 btrfs_balance_data(leaf, item, &disk_bargs); 4501 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4502 btrfs_balance_meta(leaf, item, &disk_bargs); 4503 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4504 btrfs_balance_sys(leaf, item, &disk_bargs); 4505 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4506 4507 /* 4508 * This should never happen, as the paused balance state is recovered 4509 * during mount without any chance of other exclusive ops to collide. 4510 * 4511 * This gives the exclusive op status to balance and keeps in paused 4512 * state until user intervention (cancel or umount). If the ownership 4513 * cannot be assigned, show a message but do not fail. The balance 4514 * is in a paused state and must have fs_info::balance_ctl properly 4515 * set up. 4516 */ 4517 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4518 btrfs_warn(fs_info, 4519 "balance: cannot set exclusive op status, resume manually"); 4520 4521 btrfs_release_path(path); 4522 4523 mutex_lock(&fs_info->balance_mutex); 4524 BUG_ON(fs_info->balance_ctl); 4525 spin_lock(&fs_info->balance_lock); 4526 fs_info->balance_ctl = bctl; 4527 spin_unlock(&fs_info->balance_lock); 4528 mutex_unlock(&fs_info->balance_mutex); 4529 out: 4530 btrfs_free_path(path); 4531 return ret; 4532 } 4533 4534 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4535 { 4536 int ret = 0; 4537 4538 mutex_lock(&fs_info->balance_mutex); 4539 if (!fs_info->balance_ctl) { 4540 mutex_unlock(&fs_info->balance_mutex); 4541 return -ENOTCONN; 4542 } 4543 4544 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4545 atomic_inc(&fs_info->balance_pause_req); 4546 mutex_unlock(&fs_info->balance_mutex); 4547 4548 wait_event(fs_info->balance_wait_q, 4549 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4550 4551 mutex_lock(&fs_info->balance_mutex); 4552 /* we are good with balance_ctl ripped off from under us */ 4553 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4554 atomic_dec(&fs_info->balance_pause_req); 4555 } else { 4556 ret = -ENOTCONN; 4557 } 4558 4559 mutex_unlock(&fs_info->balance_mutex); 4560 return ret; 4561 } 4562 4563 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4564 { 4565 mutex_lock(&fs_info->balance_mutex); 4566 if (!fs_info->balance_ctl) { 4567 mutex_unlock(&fs_info->balance_mutex); 4568 return -ENOTCONN; 4569 } 4570 4571 /* 4572 * A paused balance with the item stored on disk can be resumed at 4573 * mount time if the mount is read-write. Otherwise it's still paused 4574 * and we must not allow cancelling as it deletes the item. 4575 */ 4576 if (sb_rdonly(fs_info->sb)) { 4577 mutex_unlock(&fs_info->balance_mutex); 4578 return -EROFS; 4579 } 4580 4581 atomic_inc(&fs_info->balance_cancel_req); 4582 /* 4583 * if we are running just wait and return, balance item is 4584 * deleted in btrfs_balance in this case 4585 */ 4586 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4587 mutex_unlock(&fs_info->balance_mutex); 4588 wait_event(fs_info->balance_wait_q, 4589 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4590 mutex_lock(&fs_info->balance_mutex); 4591 } else { 4592 mutex_unlock(&fs_info->balance_mutex); 4593 /* 4594 * Lock released to allow other waiters to continue, we'll 4595 * reexamine the status again. 4596 */ 4597 mutex_lock(&fs_info->balance_mutex); 4598 4599 if (fs_info->balance_ctl) { 4600 reset_balance_state(fs_info); 4601 btrfs_exclop_finish(fs_info); 4602 btrfs_info(fs_info, "balance: canceled"); 4603 } 4604 } 4605 4606 BUG_ON(fs_info->balance_ctl || 4607 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4608 atomic_dec(&fs_info->balance_cancel_req); 4609 mutex_unlock(&fs_info->balance_mutex); 4610 return 0; 4611 } 4612 4613 int btrfs_uuid_scan_kthread(void *data) 4614 { 4615 struct btrfs_fs_info *fs_info = data; 4616 struct btrfs_root *root = fs_info->tree_root; 4617 struct btrfs_key key; 4618 struct btrfs_path *path = NULL; 4619 int ret = 0; 4620 struct extent_buffer *eb; 4621 int slot; 4622 struct btrfs_root_item root_item; 4623 u32 item_size; 4624 struct btrfs_trans_handle *trans = NULL; 4625 bool closing = false; 4626 4627 path = btrfs_alloc_path(); 4628 if (!path) { 4629 ret = -ENOMEM; 4630 goto out; 4631 } 4632 4633 key.objectid = 0; 4634 key.type = BTRFS_ROOT_ITEM_KEY; 4635 key.offset = 0; 4636 4637 while (1) { 4638 if (btrfs_fs_closing(fs_info)) { 4639 closing = true; 4640 break; 4641 } 4642 ret = btrfs_search_forward(root, &key, path, 4643 BTRFS_OLDEST_GENERATION); 4644 if (ret) { 4645 if (ret > 0) 4646 ret = 0; 4647 break; 4648 } 4649 4650 if (key.type != BTRFS_ROOT_ITEM_KEY || 4651 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4652 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4653 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4654 goto skip; 4655 4656 eb = path->nodes[0]; 4657 slot = path->slots[0]; 4658 item_size = btrfs_item_size(eb, slot); 4659 if (item_size < sizeof(root_item)) 4660 goto skip; 4661 4662 read_extent_buffer(eb, &root_item, 4663 btrfs_item_ptr_offset(eb, slot), 4664 (int)sizeof(root_item)); 4665 if (btrfs_root_refs(&root_item) == 0) 4666 goto skip; 4667 4668 if (!btrfs_is_empty_uuid(root_item.uuid) || 4669 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4670 if (trans) 4671 goto update_tree; 4672 4673 btrfs_release_path(path); 4674 /* 4675 * 1 - subvol uuid item 4676 * 1 - received_subvol uuid item 4677 */ 4678 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4679 if (IS_ERR(trans)) { 4680 ret = PTR_ERR(trans); 4681 break; 4682 } 4683 continue; 4684 } else { 4685 goto skip; 4686 } 4687 update_tree: 4688 btrfs_release_path(path); 4689 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4690 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4691 BTRFS_UUID_KEY_SUBVOL, 4692 key.objectid); 4693 if (ret < 0) { 4694 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4695 ret); 4696 break; 4697 } 4698 } 4699 4700 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4701 ret = btrfs_uuid_tree_add(trans, 4702 root_item.received_uuid, 4703 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4704 key.objectid); 4705 if (ret < 0) { 4706 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4707 ret); 4708 break; 4709 } 4710 } 4711 4712 skip: 4713 btrfs_release_path(path); 4714 if (trans) { 4715 ret = btrfs_end_transaction(trans); 4716 trans = NULL; 4717 if (ret) 4718 break; 4719 } 4720 4721 if (key.offset < (u64)-1) { 4722 key.offset++; 4723 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4724 key.offset = 0; 4725 key.type = BTRFS_ROOT_ITEM_KEY; 4726 } else if (key.objectid < (u64)-1) { 4727 key.offset = 0; 4728 key.type = BTRFS_ROOT_ITEM_KEY; 4729 key.objectid++; 4730 } else { 4731 break; 4732 } 4733 cond_resched(); 4734 } 4735 4736 out: 4737 btrfs_free_path(path); 4738 if (trans && !IS_ERR(trans)) 4739 btrfs_end_transaction(trans); 4740 if (ret) 4741 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4742 else if (!closing) 4743 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4744 up(&fs_info->uuid_tree_rescan_sem); 4745 return 0; 4746 } 4747 4748 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4749 { 4750 struct btrfs_trans_handle *trans; 4751 struct btrfs_root *tree_root = fs_info->tree_root; 4752 struct btrfs_root *uuid_root; 4753 struct task_struct *task; 4754 int ret; 4755 4756 /* 4757 * 1 - root node 4758 * 1 - root item 4759 */ 4760 trans = btrfs_start_transaction(tree_root, 2); 4761 if (IS_ERR(trans)) 4762 return PTR_ERR(trans); 4763 4764 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4765 if (IS_ERR(uuid_root)) { 4766 ret = PTR_ERR(uuid_root); 4767 btrfs_abort_transaction(trans, ret); 4768 btrfs_end_transaction(trans); 4769 return ret; 4770 } 4771 4772 fs_info->uuid_root = uuid_root; 4773 4774 ret = btrfs_commit_transaction(trans); 4775 if (ret) 4776 return ret; 4777 4778 down(&fs_info->uuid_tree_rescan_sem); 4779 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4780 if (IS_ERR(task)) { 4781 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4782 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4783 up(&fs_info->uuid_tree_rescan_sem); 4784 return PTR_ERR(task); 4785 } 4786 4787 return 0; 4788 } 4789 4790 /* 4791 * shrinking a device means finding all of the device extents past 4792 * the new size, and then following the back refs to the chunks. 4793 * The chunk relocation code actually frees the device extent 4794 */ 4795 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4796 { 4797 struct btrfs_fs_info *fs_info = device->fs_info; 4798 struct btrfs_root *root = fs_info->dev_root; 4799 struct btrfs_trans_handle *trans; 4800 struct btrfs_dev_extent *dev_extent = NULL; 4801 struct btrfs_path *path; 4802 u64 length; 4803 u64 chunk_offset; 4804 int ret; 4805 int slot; 4806 int failed = 0; 4807 bool retried = false; 4808 struct extent_buffer *l; 4809 struct btrfs_key key; 4810 struct btrfs_super_block *super_copy = fs_info->super_copy; 4811 u64 old_total = btrfs_super_total_bytes(super_copy); 4812 u64 old_size = btrfs_device_get_total_bytes(device); 4813 u64 diff; 4814 u64 start; 4815 4816 new_size = round_down(new_size, fs_info->sectorsize); 4817 start = new_size; 4818 diff = round_down(old_size - new_size, fs_info->sectorsize); 4819 4820 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4821 return -EINVAL; 4822 4823 path = btrfs_alloc_path(); 4824 if (!path) 4825 return -ENOMEM; 4826 4827 path->reada = READA_BACK; 4828 4829 trans = btrfs_start_transaction(root, 0); 4830 if (IS_ERR(trans)) { 4831 btrfs_free_path(path); 4832 return PTR_ERR(trans); 4833 } 4834 4835 mutex_lock(&fs_info->chunk_mutex); 4836 4837 btrfs_device_set_total_bytes(device, new_size); 4838 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4839 device->fs_devices->total_rw_bytes -= diff; 4840 atomic64_sub(diff, &fs_info->free_chunk_space); 4841 } 4842 4843 /* 4844 * Once the device's size has been set to the new size, ensure all 4845 * in-memory chunks are synced to disk so that the loop below sees them 4846 * and relocates them accordingly. 4847 */ 4848 if (contains_pending_extent(device, &start, diff)) { 4849 mutex_unlock(&fs_info->chunk_mutex); 4850 ret = btrfs_commit_transaction(trans); 4851 if (ret) 4852 goto done; 4853 } else { 4854 mutex_unlock(&fs_info->chunk_mutex); 4855 btrfs_end_transaction(trans); 4856 } 4857 4858 again: 4859 key.objectid = device->devid; 4860 key.offset = (u64)-1; 4861 key.type = BTRFS_DEV_EXTENT_KEY; 4862 4863 do { 4864 mutex_lock(&fs_info->reclaim_bgs_lock); 4865 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4866 if (ret < 0) { 4867 mutex_unlock(&fs_info->reclaim_bgs_lock); 4868 goto done; 4869 } 4870 4871 ret = btrfs_previous_item(root, path, 0, key.type); 4872 if (ret) { 4873 mutex_unlock(&fs_info->reclaim_bgs_lock); 4874 if (ret < 0) 4875 goto done; 4876 ret = 0; 4877 btrfs_release_path(path); 4878 break; 4879 } 4880 4881 l = path->nodes[0]; 4882 slot = path->slots[0]; 4883 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4884 4885 if (key.objectid != device->devid) { 4886 mutex_unlock(&fs_info->reclaim_bgs_lock); 4887 btrfs_release_path(path); 4888 break; 4889 } 4890 4891 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4892 length = btrfs_dev_extent_length(l, dev_extent); 4893 4894 if (key.offset + length <= new_size) { 4895 mutex_unlock(&fs_info->reclaim_bgs_lock); 4896 btrfs_release_path(path); 4897 break; 4898 } 4899 4900 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4901 btrfs_release_path(path); 4902 4903 /* 4904 * We may be relocating the only data chunk we have, 4905 * which could potentially end up with losing data's 4906 * raid profile, so lets allocate an empty one in 4907 * advance. 4908 */ 4909 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4910 if (ret < 0) { 4911 mutex_unlock(&fs_info->reclaim_bgs_lock); 4912 goto done; 4913 } 4914 4915 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4916 mutex_unlock(&fs_info->reclaim_bgs_lock); 4917 if (ret == -ENOSPC) { 4918 failed++; 4919 } else if (ret) { 4920 if (ret == -ETXTBSY) { 4921 btrfs_warn(fs_info, 4922 "could not shrink block group %llu due to active swapfile", 4923 chunk_offset); 4924 } 4925 goto done; 4926 } 4927 } while (key.offset-- > 0); 4928 4929 if (failed && !retried) { 4930 failed = 0; 4931 retried = true; 4932 goto again; 4933 } else if (failed && retried) { 4934 ret = -ENOSPC; 4935 goto done; 4936 } 4937 4938 /* Shrinking succeeded, else we would be at "done". */ 4939 trans = btrfs_start_transaction(root, 0); 4940 if (IS_ERR(trans)) { 4941 ret = PTR_ERR(trans); 4942 goto done; 4943 } 4944 4945 mutex_lock(&fs_info->chunk_mutex); 4946 /* Clear all state bits beyond the shrunk device size */ 4947 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4948 CHUNK_STATE_MASK); 4949 4950 btrfs_device_set_disk_total_bytes(device, new_size); 4951 if (list_empty(&device->post_commit_list)) 4952 list_add_tail(&device->post_commit_list, 4953 &trans->transaction->dev_update_list); 4954 4955 WARN_ON(diff > old_total); 4956 btrfs_set_super_total_bytes(super_copy, 4957 round_down(old_total - diff, fs_info->sectorsize)); 4958 mutex_unlock(&fs_info->chunk_mutex); 4959 4960 btrfs_reserve_chunk_metadata(trans, false); 4961 /* Now btrfs_update_device() will change the on-disk size. */ 4962 ret = btrfs_update_device(trans, device); 4963 btrfs_trans_release_chunk_metadata(trans); 4964 if (ret < 0) { 4965 btrfs_abort_transaction(trans, ret); 4966 btrfs_end_transaction(trans); 4967 } else { 4968 ret = btrfs_commit_transaction(trans); 4969 } 4970 done: 4971 btrfs_free_path(path); 4972 if (ret) { 4973 mutex_lock(&fs_info->chunk_mutex); 4974 btrfs_device_set_total_bytes(device, old_size); 4975 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4976 device->fs_devices->total_rw_bytes += diff; 4977 atomic64_add(diff, &fs_info->free_chunk_space); 4978 mutex_unlock(&fs_info->chunk_mutex); 4979 } 4980 return ret; 4981 } 4982 4983 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4984 struct btrfs_key *key, 4985 struct btrfs_chunk *chunk, int item_size) 4986 { 4987 struct btrfs_super_block *super_copy = fs_info->super_copy; 4988 struct btrfs_disk_key disk_key; 4989 u32 array_size; 4990 u8 *ptr; 4991 4992 lockdep_assert_held(&fs_info->chunk_mutex); 4993 4994 array_size = btrfs_super_sys_array_size(super_copy); 4995 if (array_size + item_size + sizeof(disk_key) 4996 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 4997 return -EFBIG; 4998 4999 ptr = super_copy->sys_chunk_array + array_size; 5000 btrfs_cpu_key_to_disk(&disk_key, key); 5001 memcpy(ptr, &disk_key, sizeof(disk_key)); 5002 ptr += sizeof(disk_key); 5003 memcpy(ptr, chunk, item_size); 5004 item_size += sizeof(disk_key); 5005 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5006 5007 return 0; 5008 } 5009 5010 /* 5011 * sort the devices in descending order by max_avail, total_avail 5012 */ 5013 static int btrfs_cmp_device_info(const void *a, const void *b) 5014 { 5015 const struct btrfs_device_info *di_a = a; 5016 const struct btrfs_device_info *di_b = b; 5017 5018 if (di_a->max_avail > di_b->max_avail) 5019 return -1; 5020 if (di_a->max_avail < di_b->max_avail) 5021 return 1; 5022 if (di_a->total_avail > di_b->total_avail) 5023 return -1; 5024 if (di_a->total_avail < di_b->total_avail) 5025 return 1; 5026 return 0; 5027 } 5028 5029 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5030 { 5031 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5032 return; 5033 5034 btrfs_set_fs_incompat(info, RAID56); 5035 } 5036 5037 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5038 { 5039 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5040 return; 5041 5042 btrfs_set_fs_incompat(info, RAID1C34); 5043 } 5044 5045 /* 5046 * Structure used internally for btrfs_create_chunk() function. 5047 * Wraps needed parameters. 5048 */ 5049 struct alloc_chunk_ctl { 5050 u64 start; 5051 u64 type; 5052 /* Total number of stripes to allocate */ 5053 int num_stripes; 5054 /* sub_stripes info for map */ 5055 int sub_stripes; 5056 /* Stripes per device */ 5057 int dev_stripes; 5058 /* Maximum number of devices to use */ 5059 int devs_max; 5060 /* Minimum number of devices to use */ 5061 int devs_min; 5062 /* ndevs has to be a multiple of this */ 5063 int devs_increment; 5064 /* Number of copies */ 5065 int ncopies; 5066 /* Number of stripes worth of bytes to store parity information */ 5067 int nparity; 5068 u64 max_stripe_size; 5069 u64 max_chunk_size; 5070 u64 dev_extent_min; 5071 u64 stripe_size; 5072 u64 chunk_size; 5073 int ndevs; 5074 }; 5075 5076 static void init_alloc_chunk_ctl_policy_regular( 5077 struct btrfs_fs_devices *fs_devices, 5078 struct alloc_chunk_ctl *ctl) 5079 { 5080 struct btrfs_space_info *space_info; 5081 5082 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5083 ASSERT(space_info); 5084 5085 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5086 ctl->max_stripe_size = ctl->max_chunk_size; 5087 5088 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5089 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5090 5091 /* We don't want a chunk larger than 10% of writable space */ 5092 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5093 ctl->max_chunk_size); 5094 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5095 } 5096 5097 static void init_alloc_chunk_ctl_policy_zoned( 5098 struct btrfs_fs_devices *fs_devices, 5099 struct alloc_chunk_ctl *ctl) 5100 { 5101 u64 zone_size = fs_devices->fs_info->zone_size; 5102 u64 limit; 5103 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5104 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5105 u64 min_chunk_size = min_data_stripes * zone_size; 5106 u64 type = ctl->type; 5107 5108 ctl->max_stripe_size = zone_size; 5109 if (type & BTRFS_BLOCK_GROUP_DATA) { 5110 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5111 zone_size); 5112 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5113 ctl->max_chunk_size = ctl->max_stripe_size; 5114 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5115 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5116 ctl->devs_max = min_t(int, ctl->devs_max, 5117 BTRFS_MAX_DEVS_SYS_CHUNK); 5118 } else { 5119 BUG(); 5120 } 5121 5122 /* We don't want a chunk larger than 10% of writable space */ 5123 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10), 5124 zone_size), 5125 min_chunk_size); 5126 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5127 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5128 } 5129 5130 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5131 struct alloc_chunk_ctl *ctl) 5132 { 5133 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5134 5135 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5136 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5137 ctl->devs_max = btrfs_raid_array[index].devs_max; 5138 if (!ctl->devs_max) 5139 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5140 ctl->devs_min = btrfs_raid_array[index].devs_min; 5141 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5142 ctl->ncopies = btrfs_raid_array[index].ncopies; 5143 ctl->nparity = btrfs_raid_array[index].nparity; 5144 ctl->ndevs = 0; 5145 5146 switch (fs_devices->chunk_alloc_policy) { 5147 case BTRFS_CHUNK_ALLOC_REGULAR: 5148 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5149 break; 5150 case BTRFS_CHUNK_ALLOC_ZONED: 5151 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5152 break; 5153 default: 5154 BUG(); 5155 } 5156 } 5157 5158 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5159 struct alloc_chunk_ctl *ctl, 5160 struct btrfs_device_info *devices_info) 5161 { 5162 struct btrfs_fs_info *info = fs_devices->fs_info; 5163 struct btrfs_device *device; 5164 u64 total_avail; 5165 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5166 int ret; 5167 int ndevs = 0; 5168 u64 max_avail; 5169 u64 dev_offset; 5170 5171 /* 5172 * in the first pass through the devices list, we gather information 5173 * about the available holes on each device. 5174 */ 5175 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5176 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5177 WARN(1, KERN_ERR 5178 "BTRFS: read-only device in alloc_list\n"); 5179 continue; 5180 } 5181 5182 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5183 &device->dev_state) || 5184 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5185 continue; 5186 5187 if (device->total_bytes > device->bytes_used) 5188 total_avail = device->total_bytes - device->bytes_used; 5189 else 5190 total_avail = 0; 5191 5192 /* If there is no space on this device, skip it. */ 5193 if (total_avail < ctl->dev_extent_min) 5194 continue; 5195 5196 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5197 &max_avail); 5198 if (ret && ret != -ENOSPC) 5199 return ret; 5200 5201 if (ret == 0) 5202 max_avail = dev_extent_want; 5203 5204 if (max_avail < ctl->dev_extent_min) { 5205 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5206 btrfs_debug(info, 5207 "%s: devid %llu has no free space, have=%llu want=%llu", 5208 __func__, device->devid, max_avail, 5209 ctl->dev_extent_min); 5210 continue; 5211 } 5212 5213 if (ndevs == fs_devices->rw_devices) { 5214 WARN(1, "%s: found more than %llu devices\n", 5215 __func__, fs_devices->rw_devices); 5216 break; 5217 } 5218 devices_info[ndevs].dev_offset = dev_offset; 5219 devices_info[ndevs].max_avail = max_avail; 5220 devices_info[ndevs].total_avail = total_avail; 5221 devices_info[ndevs].dev = device; 5222 ++ndevs; 5223 } 5224 ctl->ndevs = ndevs; 5225 5226 /* 5227 * now sort the devices by hole size / available space 5228 */ 5229 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5230 btrfs_cmp_device_info, NULL); 5231 5232 return 0; 5233 } 5234 5235 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5236 struct btrfs_device_info *devices_info) 5237 { 5238 /* Number of stripes that count for block group size */ 5239 int data_stripes; 5240 5241 /* 5242 * The primary goal is to maximize the number of stripes, so use as 5243 * many devices as possible, even if the stripes are not maximum sized. 5244 * 5245 * The DUP profile stores more than one stripe per device, the 5246 * max_avail is the total size so we have to adjust. 5247 */ 5248 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5249 ctl->dev_stripes); 5250 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5251 5252 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5253 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5254 5255 /* 5256 * Use the number of data stripes to figure out how big this chunk is 5257 * really going to be in terms of logical address space, and compare 5258 * that answer with the max chunk size. If it's higher, we try to 5259 * reduce stripe_size. 5260 */ 5261 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5262 /* 5263 * Reduce stripe_size, round it up to a 16MB boundary again and 5264 * then use it, unless it ends up being even bigger than the 5265 * previous value we had already. 5266 */ 5267 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5268 data_stripes), SZ_16M), 5269 ctl->stripe_size); 5270 } 5271 5272 /* Stripe size should not go beyond 1G. */ 5273 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5274 5275 /* Align to BTRFS_STRIPE_LEN */ 5276 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5277 ctl->chunk_size = ctl->stripe_size * data_stripes; 5278 5279 return 0; 5280 } 5281 5282 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5283 struct btrfs_device_info *devices_info) 5284 { 5285 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5286 /* Number of stripes that count for block group size */ 5287 int data_stripes; 5288 5289 /* 5290 * It should hold because: 5291 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5292 */ 5293 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5294 5295 ctl->stripe_size = zone_size; 5296 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5297 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5298 5299 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5300 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5301 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5302 ctl->stripe_size) + ctl->nparity, 5303 ctl->dev_stripes); 5304 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5305 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5306 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5307 } 5308 5309 ctl->chunk_size = ctl->stripe_size * data_stripes; 5310 5311 return 0; 5312 } 5313 5314 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5315 struct alloc_chunk_ctl *ctl, 5316 struct btrfs_device_info *devices_info) 5317 { 5318 struct btrfs_fs_info *info = fs_devices->fs_info; 5319 5320 /* 5321 * Round down to number of usable stripes, devs_increment can be any 5322 * number so we can't use round_down() that requires power of 2, while 5323 * rounddown is safe. 5324 */ 5325 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5326 5327 if (ctl->ndevs < ctl->devs_min) { 5328 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5329 btrfs_debug(info, 5330 "%s: not enough devices with free space: have=%d minimum required=%d", 5331 __func__, ctl->ndevs, ctl->devs_min); 5332 } 5333 return -ENOSPC; 5334 } 5335 5336 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5337 5338 switch (fs_devices->chunk_alloc_policy) { 5339 case BTRFS_CHUNK_ALLOC_REGULAR: 5340 return decide_stripe_size_regular(ctl, devices_info); 5341 case BTRFS_CHUNK_ALLOC_ZONED: 5342 return decide_stripe_size_zoned(ctl, devices_info); 5343 default: 5344 BUG(); 5345 } 5346 } 5347 5348 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5349 struct alloc_chunk_ctl *ctl, 5350 struct btrfs_device_info *devices_info) 5351 { 5352 struct btrfs_fs_info *info = trans->fs_info; 5353 struct map_lookup *map = NULL; 5354 struct extent_map_tree *em_tree; 5355 struct btrfs_block_group *block_group; 5356 struct extent_map *em; 5357 u64 start = ctl->start; 5358 u64 type = ctl->type; 5359 int ret; 5360 int i; 5361 int j; 5362 5363 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5364 if (!map) 5365 return ERR_PTR(-ENOMEM); 5366 map->num_stripes = ctl->num_stripes; 5367 5368 for (i = 0; i < ctl->ndevs; ++i) { 5369 for (j = 0; j < ctl->dev_stripes; ++j) { 5370 int s = i * ctl->dev_stripes + j; 5371 map->stripes[s].dev = devices_info[i].dev; 5372 map->stripes[s].physical = devices_info[i].dev_offset + 5373 j * ctl->stripe_size; 5374 } 5375 } 5376 map->stripe_len = BTRFS_STRIPE_LEN; 5377 map->io_align = BTRFS_STRIPE_LEN; 5378 map->io_width = BTRFS_STRIPE_LEN; 5379 map->type = type; 5380 map->sub_stripes = ctl->sub_stripes; 5381 5382 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5383 5384 em = alloc_extent_map(); 5385 if (!em) { 5386 kfree(map); 5387 return ERR_PTR(-ENOMEM); 5388 } 5389 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5390 em->map_lookup = map; 5391 em->start = start; 5392 em->len = ctl->chunk_size; 5393 em->block_start = 0; 5394 em->block_len = em->len; 5395 em->orig_block_len = ctl->stripe_size; 5396 5397 em_tree = &info->mapping_tree; 5398 write_lock(&em_tree->lock); 5399 ret = add_extent_mapping(em_tree, em, 0); 5400 if (ret) { 5401 write_unlock(&em_tree->lock); 5402 free_extent_map(em); 5403 return ERR_PTR(ret); 5404 } 5405 write_unlock(&em_tree->lock); 5406 5407 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5408 if (IS_ERR(block_group)) 5409 goto error_del_extent; 5410 5411 for (i = 0; i < map->num_stripes; i++) { 5412 struct btrfs_device *dev = map->stripes[i].dev; 5413 5414 btrfs_device_set_bytes_used(dev, 5415 dev->bytes_used + ctl->stripe_size); 5416 if (list_empty(&dev->post_commit_list)) 5417 list_add_tail(&dev->post_commit_list, 5418 &trans->transaction->dev_update_list); 5419 } 5420 5421 atomic64_sub(ctl->stripe_size * map->num_stripes, 5422 &info->free_chunk_space); 5423 5424 free_extent_map(em); 5425 check_raid56_incompat_flag(info, type); 5426 check_raid1c34_incompat_flag(info, type); 5427 5428 return block_group; 5429 5430 error_del_extent: 5431 write_lock(&em_tree->lock); 5432 remove_extent_mapping(em_tree, em); 5433 write_unlock(&em_tree->lock); 5434 5435 /* One for our allocation */ 5436 free_extent_map(em); 5437 /* One for the tree reference */ 5438 free_extent_map(em); 5439 5440 return block_group; 5441 } 5442 5443 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5444 u64 type) 5445 { 5446 struct btrfs_fs_info *info = trans->fs_info; 5447 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5448 struct btrfs_device_info *devices_info = NULL; 5449 struct alloc_chunk_ctl ctl; 5450 struct btrfs_block_group *block_group; 5451 int ret; 5452 5453 lockdep_assert_held(&info->chunk_mutex); 5454 5455 if (!alloc_profile_is_valid(type, 0)) { 5456 ASSERT(0); 5457 return ERR_PTR(-EINVAL); 5458 } 5459 5460 if (list_empty(&fs_devices->alloc_list)) { 5461 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5462 btrfs_debug(info, "%s: no writable device", __func__); 5463 return ERR_PTR(-ENOSPC); 5464 } 5465 5466 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5467 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5468 ASSERT(0); 5469 return ERR_PTR(-EINVAL); 5470 } 5471 5472 ctl.start = find_next_chunk(info); 5473 ctl.type = type; 5474 init_alloc_chunk_ctl(fs_devices, &ctl); 5475 5476 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5477 GFP_NOFS); 5478 if (!devices_info) 5479 return ERR_PTR(-ENOMEM); 5480 5481 ret = gather_device_info(fs_devices, &ctl, devices_info); 5482 if (ret < 0) { 5483 block_group = ERR_PTR(ret); 5484 goto out; 5485 } 5486 5487 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5488 if (ret < 0) { 5489 block_group = ERR_PTR(ret); 5490 goto out; 5491 } 5492 5493 block_group = create_chunk(trans, &ctl, devices_info); 5494 5495 out: 5496 kfree(devices_info); 5497 return block_group; 5498 } 5499 5500 /* 5501 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5502 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5503 * chunks. 5504 * 5505 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5506 * phases. 5507 */ 5508 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5509 struct btrfs_block_group *bg) 5510 { 5511 struct btrfs_fs_info *fs_info = trans->fs_info; 5512 struct btrfs_root *chunk_root = fs_info->chunk_root; 5513 struct btrfs_key key; 5514 struct btrfs_chunk *chunk; 5515 struct btrfs_stripe *stripe; 5516 struct extent_map *em; 5517 struct map_lookup *map; 5518 size_t item_size; 5519 int i; 5520 int ret; 5521 5522 /* 5523 * We take the chunk_mutex for 2 reasons: 5524 * 5525 * 1) Updates and insertions in the chunk btree must be done while holding 5526 * the chunk_mutex, as well as updating the system chunk array in the 5527 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5528 * details; 5529 * 5530 * 2) To prevent races with the final phase of a device replace operation 5531 * that replaces the device object associated with the map's stripes, 5532 * because the device object's id can change at any time during that 5533 * final phase of the device replace operation 5534 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5535 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5536 * which would cause a failure when updating the device item, which does 5537 * not exists, or persisting a stripe of the chunk item with such ID. 5538 * Here we can't use the device_list_mutex because our caller already 5539 * has locked the chunk_mutex, and the final phase of device replace 5540 * acquires both mutexes - first the device_list_mutex and then the 5541 * chunk_mutex. Using any of those two mutexes protects us from a 5542 * concurrent device replace. 5543 */ 5544 lockdep_assert_held(&fs_info->chunk_mutex); 5545 5546 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5547 if (IS_ERR(em)) { 5548 ret = PTR_ERR(em); 5549 btrfs_abort_transaction(trans, ret); 5550 return ret; 5551 } 5552 5553 map = em->map_lookup; 5554 item_size = btrfs_chunk_item_size(map->num_stripes); 5555 5556 chunk = kzalloc(item_size, GFP_NOFS); 5557 if (!chunk) { 5558 ret = -ENOMEM; 5559 btrfs_abort_transaction(trans, ret); 5560 goto out; 5561 } 5562 5563 for (i = 0; i < map->num_stripes; i++) { 5564 struct btrfs_device *device = map->stripes[i].dev; 5565 5566 ret = btrfs_update_device(trans, device); 5567 if (ret) 5568 goto out; 5569 } 5570 5571 stripe = &chunk->stripe; 5572 for (i = 0; i < map->num_stripes; i++) { 5573 struct btrfs_device *device = map->stripes[i].dev; 5574 const u64 dev_offset = map->stripes[i].physical; 5575 5576 btrfs_set_stack_stripe_devid(stripe, device->devid); 5577 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5578 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5579 stripe++; 5580 } 5581 5582 btrfs_set_stack_chunk_length(chunk, bg->length); 5583 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5584 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5585 btrfs_set_stack_chunk_type(chunk, map->type); 5586 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5587 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5588 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5589 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5590 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5591 5592 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5593 key.type = BTRFS_CHUNK_ITEM_KEY; 5594 key.offset = bg->start; 5595 5596 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5597 if (ret) 5598 goto out; 5599 5600 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5601 5602 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5603 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5604 if (ret) 5605 goto out; 5606 } 5607 5608 out: 5609 kfree(chunk); 5610 free_extent_map(em); 5611 return ret; 5612 } 5613 5614 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5615 { 5616 struct btrfs_fs_info *fs_info = trans->fs_info; 5617 u64 alloc_profile; 5618 struct btrfs_block_group *meta_bg; 5619 struct btrfs_block_group *sys_bg; 5620 5621 /* 5622 * When adding a new device for sprouting, the seed device is read-only 5623 * so we must first allocate a metadata and a system chunk. But before 5624 * adding the block group items to the extent, device and chunk btrees, 5625 * we must first: 5626 * 5627 * 1) Create both chunks without doing any changes to the btrees, as 5628 * otherwise we would get -ENOSPC since the block groups from the 5629 * seed device are read-only; 5630 * 5631 * 2) Add the device item for the new sprout device - finishing the setup 5632 * of a new block group requires updating the device item in the chunk 5633 * btree, so it must exist when we attempt to do it. The previous step 5634 * ensures this does not fail with -ENOSPC. 5635 * 5636 * After that we can add the block group items to their btrees: 5637 * update existing device item in the chunk btree, add a new block group 5638 * item to the extent btree, add a new chunk item to the chunk btree and 5639 * finally add the new device extent items to the devices btree. 5640 */ 5641 5642 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5643 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5644 if (IS_ERR(meta_bg)) 5645 return PTR_ERR(meta_bg); 5646 5647 alloc_profile = btrfs_system_alloc_profile(fs_info); 5648 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5649 if (IS_ERR(sys_bg)) 5650 return PTR_ERR(sys_bg); 5651 5652 return 0; 5653 } 5654 5655 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5656 { 5657 const int index = btrfs_bg_flags_to_raid_index(map->type); 5658 5659 return btrfs_raid_array[index].tolerated_failures; 5660 } 5661 5662 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5663 { 5664 struct extent_map *em; 5665 struct map_lookup *map; 5666 int miss_ndevs = 0; 5667 int i; 5668 bool ret = true; 5669 5670 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5671 if (IS_ERR(em)) 5672 return false; 5673 5674 map = em->map_lookup; 5675 for (i = 0; i < map->num_stripes; i++) { 5676 if (test_bit(BTRFS_DEV_STATE_MISSING, 5677 &map->stripes[i].dev->dev_state)) { 5678 miss_ndevs++; 5679 continue; 5680 } 5681 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5682 &map->stripes[i].dev->dev_state)) { 5683 ret = false; 5684 goto end; 5685 } 5686 } 5687 5688 /* 5689 * If the number of missing devices is larger than max errors, we can 5690 * not write the data into that chunk successfully. 5691 */ 5692 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5693 ret = false; 5694 end: 5695 free_extent_map(em); 5696 return ret; 5697 } 5698 5699 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5700 { 5701 struct extent_map *em; 5702 5703 while (1) { 5704 write_lock(&tree->lock); 5705 em = lookup_extent_mapping(tree, 0, (u64)-1); 5706 if (em) 5707 remove_extent_mapping(tree, em); 5708 write_unlock(&tree->lock); 5709 if (!em) 5710 break; 5711 /* once for us */ 5712 free_extent_map(em); 5713 /* once for the tree */ 5714 free_extent_map(em); 5715 } 5716 } 5717 5718 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5719 { 5720 struct extent_map *em; 5721 struct map_lookup *map; 5722 enum btrfs_raid_types index; 5723 int ret = 1; 5724 5725 em = btrfs_get_chunk_map(fs_info, logical, len); 5726 if (IS_ERR(em)) 5727 /* 5728 * We could return errors for these cases, but that could get 5729 * ugly and we'd probably do the same thing which is just not do 5730 * anything else and exit, so return 1 so the callers don't try 5731 * to use other copies. 5732 */ 5733 return 1; 5734 5735 map = em->map_lookup; 5736 index = btrfs_bg_flags_to_raid_index(map->type); 5737 5738 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5739 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5740 ret = btrfs_raid_array[index].ncopies; 5741 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5742 ret = 2; 5743 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5744 /* 5745 * There could be two corrupted data stripes, we need 5746 * to loop retry in order to rebuild the correct data. 5747 * 5748 * Fail a stripe at a time on every retry except the 5749 * stripe under reconstruction. 5750 */ 5751 ret = map->num_stripes; 5752 free_extent_map(em); 5753 5754 down_read(&fs_info->dev_replace.rwsem); 5755 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5756 fs_info->dev_replace.tgtdev) 5757 ret++; 5758 up_read(&fs_info->dev_replace.rwsem); 5759 5760 return ret; 5761 } 5762 5763 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5764 u64 logical) 5765 { 5766 struct extent_map *em; 5767 struct map_lookup *map; 5768 unsigned long len = fs_info->sectorsize; 5769 5770 if (!btrfs_fs_incompat(fs_info, RAID56)) 5771 return len; 5772 5773 em = btrfs_get_chunk_map(fs_info, logical, len); 5774 5775 if (!WARN_ON(IS_ERR(em))) { 5776 map = em->map_lookup; 5777 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5778 len = map->stripe_len * nr_data_stripes(map); 5779 free_extent_map(em); 5780 } 5781 return len; 5782 } 5783 5784 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5785 { 5786 struct extent_map *em; 5787 struct map_lookup *map; 5788 int ret = 0; 5789 5790 if (!btrfs_fs_incompat(fs_info, RAID56)) 5791 return 0; 5792 5793 em = btrfs_get_chunk_map(fs_info, logical, len); 5794 5795 if(!WARN_ON(IS_ERR(em))) { 5796 map = em->map_lookup; 5797 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5798 ret = 1; 5799 free_extent_map(em); 5800 } 5801 return ret; 5802 } 5803 5804 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5805 struct map_lookup *map, int first, 5806 int dev_replace_is_ongoing) 5807 { 5808 int i; 5809 int num_stripes; 5810 int preferred_mirror; 5811 int tolerance; 5812 struct btrfs_device *srcdev; 5813 5814 ASSERT((map->type & 5815 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5816 5817 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5818 num_stripes = map->sub_stripes; 5819 else 5820 num_stripes = map->num_stripes; 5821 5822 switch (fs_info->fs_devices->read_policy) { 5823 default: 5824 /* Shouldn't happen, just warn and use pid instead of failing */ 5825 btrfs_warn_rl(fs_info, 5826 "unknown read_policy type %u, reset to pid", 5827 fs_info->fs_devices->read_policy); 5828 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5829 fallthrough; 5830 case BTRFS_READ_POLICY_PID: 5831 preferred_mirror = first + (current->pid % num_stripes); 5832 break; 5833 } 5834 5835 if (dev_replace_is_ongoing && 5836 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5837 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5838 srcdev = fs_info->dev_replace.srcdev; 5839 else 5840 srcdev = NULL; 5841 5842 /* 5843 * try to avoid the drive that is the source drive for a 5844 * dev-replace procedure, only choose it if no other non-missing 5845 * mirror is available 5846 */ 5847 for (tolerance = 0; tolerance < 2; tolerance++) { 5848 if (map->stripes[preferred_mirror].dev->bdev && 5849 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5850 return preferred_mirror; 5851 for (i = first; i < first + num_stripes; i++) { 5852 if (map->stripes[i].dev->bdev && 5853 (tolerance || map->stripes[i].dev != srcdev)) 5854 return i; 5855 } 5856 } 5857 5858 /* we couldn't find one that doesn't fail. Just return something 5859 * and the io error handling code will clean up eventually 5860 */ 5861 return preferred_mirror; 5862 } 5863 5864 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5865 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5866 { 5867 int i; 5868 int again = 1; 5869 5870 while (again) { 5871 again = 0; 5872 for (i = 0; i < num_stripes - 1; i++) { 5873 /* Swap if parity is on a smaller index */ 5874 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5875 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5876 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5877 again = 1; 5878 } 5879 } 5880 } 5881 } 5882 5883 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5884 int total_stripes, 5885 int real_stripes) 5886 { 5887 struct btrfs_io_context *bioc = kzalloc( 5888 /* The size of btrfs_io_context */ 5889 sizeof(struct btrfs_io_context) + 5890 /* Plus the variable array for the stripes */ 5891 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5892 /* Plus the variable array for the tgt dev */ 5893 sizeof(int) * (real_stripes) + 5894 /* 5895 * Plus the raid_map, which includes both the tgt dev 5896 * and the stripes. 5897 */ 5898 sizeof(u64) * (total_stripes), 5899 GFP_NOFS); 5900 5901 if (!bioc) 5902 return NULL; 5903 5904 refcount_set(&bioc->refs, 1); 5905 5906 bioc->fs_info = fs_info; 5907 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5908 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5909 5910 return bioc; 5911 } 5912 5913 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5914 { 5915 WARN_ON(!refcount_read(&bioc->refs)); 5916 refcount_inc(&bioc->refs); 5917 } 5918 5919 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5920 { 5921 if (!bioc) 5922 return; 5923 if (refcount_dec_and_test(&bioc->refs)) 5924 kfree(bioc); 5925 } 5926 5927 /* 5928 * Please note that, discard won't be sent to target device of device 5929 * replace. 5930 */ 5931 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 5932 u64 logical, u64 *length_ret, 5933 u32 *num_stripes) 5934 { 5935 struct extent_map *em; 5936 struct map_lookup *map; 5937 struct btrfs_discard_stripe *stripes; 5938 u64 length = *length_ret; 5939 u64 offset; 5940 u64 stripe_nr; 5941 u64 stripe_nr_end; 5942 u64 stripe_end_offset; 5943 u64 stripe_cnt; 5944 u64 stripe_len; 5945 u64 stripe_offset; 5946 u32 stripe_index; 5947 u32 factor = 0; 5948 u32 sub_stripes = 0; 5949 u64 stripes_per_dev = 0; 5950 u32 remaining_stripes = 0; 5951 u32 last_stripe = 0; 5952 int ret; 5953 int i; 5954 5955 em = btrfs_get_chunk_map(fs_info, logical, length); 5956 if (IS_ERR(em)) 5957 return ERR_CAST(em); 5958 5959 map = em->map_lookup; 5960 5961 /* we don't discard raid56 yet */ 5962 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5963 ret = -EOPNOTSUPP; 5964 goto out_free_map; 5965 } 5966 5967 offset = logical - em->start; 5968 length = min_t(u64, em->start + em->len - logical, length); 5969 *length_ret = length; 5970 5971 stripe_len = map->stripe_len; 5972 /* 5973 * stripe_nr counts the total number of stripes we have to stride 5974 * to get to this block 5975 */ 5976 stripe_nr = div64_u64(offset, stripe_len); 5977 5978 /* stripe_offset is the offset of this block in its stripe */ 5979 stripe_offset = offset - stripe_nr * stripe_len; 5980 5981 stripe_nr_end = round_up(offset + length, map->stripe_len); 5982 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5983 stripe_cnt = stripe_nr_end - stripe_nr; 5984 stripe_end_offset = stripe_nr_end * map->stripe_len - 5985 (offset + length); 5986 /* 5987 * after this, stripe_nr is the number of stripes on this 5988 * device we have to walk to find the data, and stripe_index is 5989 * the number of our device in the stripe array 5990 */ 5991 *num_stripes = 1; 5992 stripe_index = 0; 5993 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5994 BTRFS_BLOCK_GROUP_RAID10)) { 5995 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5996 sub_stripes = 1; 5997 else 5998 sub_stripes = map->sub_stripes; 5999 6000 factor = map->num_stripes / sub_stripes; 6001 *num_stripes = min_t(u64, map->num_stripes, 6002 sub_stripes * stripe_cnt); 6003 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6004 stripe_index *= sub_stripes; 6005 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 6006 &remaining_stripes); 6007 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6008 last_stripe *= sub_stripes; 6009 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6010 BTRFS_BLOCK_GROUP_DUP)) { 6011 *num_stripes = map->num_stripes; 6012 } else { 6013 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6014 &stripe_index); 6015 } 6016 6017 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6018 if (!stripes) { 6019 ret = -ENOMEM; 6020 goto out_free_map; 6021 } 6022 6023 for (i = 0; i < *num_stripes; i++) { 6024 stripes[i].physical = 6025 map->stripes[stripe_index].physical + 6026 stripe_offset + stripe_nr * map->stripe_len; 6027 stripes[i].dev = map->stripes[stripe_index].dev; 6028 6029 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6030 BTRFS_BLOCK_GROUP_RAID10)) { 6031 stripes[i].length = stripes_per_dev * map->stripe_len; 6032 6033 if (i / sub_stripes < remaining_stripes) 6034 stripes[i].length += map->stripe_len; 6035 6036 /* 6037 * Special for the first stripe and 6038 * the last stripe: 6039 * 6040 * |-------|...|-------| 6041 * |----------| 6042 * off end_off 6043 */ 6044 if (i < sub_stripes) 6045 stripes[i].length -= stripe_offset; 6046 6047 if (stripe_index >= last_stripe && 6048 stripe_index <= (last_stripe + 6049 sub_stripes - 1)) 6050 stripes[i].length -= stripe_end_offset; 6051 6052 if (i == sub_stripes - 1) 6053 stripe_offset = 0; 6054 } else { 6055 stripes[i].length = length; 6056 } 6057 6058 stripe_index++; 6059 if (stripe_index == map->num_stripes) { 6060 stripe_index = 0; 6061 stripe_nr++; 6062 } 6063 } 6064 6065 free_extent_map(em); 6066 return stripes; 6067 out_free_map: 6068 free_extent_map(em); 6069 return ERR_PTR(ret); 6070 } 6071 6072 /* 6073 * In dev-replace case, for repair case (that's the only case where the mirror 6074 * is selected explicitly when calling btrfs_map_block), blocks left of the 6075 * left cursor can also be read from the target drive. 6076 * 6077 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6078 * array of stripes. 6079 * For READ, it also needs to be supported using the same mirror number. 6080 * 6081 * If the requested block is not left of the left cursor, EIO is returned. This 6082 * can happen because btrfs_num_copies() returns one more in the dev-replace 6083 * case. 6084 */ 6085 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6086 u64 logical, u64 length, 6087 u64 srcdev_devid, int *mirror_num, 6088 u64 *physical) 6089 { 6090 struct btrfs_io_context *bioc = NULL; 6091 int num_stripes; 6092 int index_srcdev = 0; 6093 int found = 0; 6094 u64 physical_of_found = 0; 6095 int i; 6096 int ret = 0; 6097 6098 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6099 logical, &length, &bioc, NULL, NULL, 0); 6100 if (ret) { 6101 ASSERT(bioc == NULL); 6102 return ret; 6103 } 6104 6105 num_stripes = bioc->num_stripes; 6106 if (*mirror_num > num_stripes) { 6107 /* 6108 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6109 * that means that the requested area is not left of the left 6110 * cursor 6111 */ 6112 btrfs_put_bioc(bioc); 6113 return -EIO; 6114 } 6115 6116 /* 6117 * process the rest of the function using the mirror_num of the source 6118 * drive. Therefore look it up first. At the end, patch the device 6119 * pointer to the one of the target drive. 6120 */ 6121 for (i = 0; i < num_stripes; i++) { 6122 if (bioc->stripes[i].dev->devid != srcdev_devid) 6123 continue; 6124 6125 /* 6126 * In case of DUP, in order to keep it simple, only add the 6127 * mirror with the lowest physical address 6128 */ 6129 if (found && 6130 physical_of_found <= bioc->stripes[i].physical) 6131 continue; 6132 6133 index_srcdev = i; 6134 found = 1; 6135 physical_of_found = bioc->stripes[i].physical; 6136 } 6137 6138 btrfs_put_bioc(bioc); 6139 6140 ASSERT(found); 6141 if (!found) 6142 return -EIO; 6143 6144 *mirror_num = index_srcdev + 1; 6145 *physical = physical_of_found; 6146 return ret; 6147 } 6148 6149 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6150 { 6151 struct btrfs_block_group *cache; 6152 bool ret; 6153 6154 /* Non zoned filesystem does not use "to_copy" flag */ 6155 if (!btrfs_is_zoned(fs_info)) 6156 return false; 6157 6158 cache = btrfs_lookup_block_group(fs_info, logical); 6159 6160 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6161 6162 btrfs_put_block_group(cache); 6163 return ret; 6164 } 6165 6166 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6167 struct btrfs_io_context **bioc_ret, 6168 struct btrfs_dev_replace *dev_replace, 6169 u64 logical, 6170 int *num_stripes_ret, int *max_errors_ret) 6171 { 6172 struct btrfs_io_context *bioc = *bioc_ret; 6173 u64 srcdev_devid = dev_replace->srcdev->devid; 6174 int tgtdev_indexes = 0; 6175 int num_stripes = *num_stripes_ret; 6176 int max_errors = *max_errors_ret; 6177 int i; 6178 6179 if (op == BTRFS_MAP_WRITE) { 6180 int index_where_to_add; 6181 6182 /* 6183 * A block group which have "to_copy" set will eventually 6184 * copied by dev-replace process. We can avoid cloning IO here. 6185 */ 6186 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6187 return; 6188 6189 /* 6190 * duplicate the write operations while the dev replace 6191 * procedure is running. Since the copying of the old disk to 6192 * the new disk takes place at run time while the filesystem is 6193 * mounted writable, the regular write operations to the old 6194 * disk have to be duplicated to go to the new disk as well. 6195 * 6196 * Note that device->missing is handled by the caller, and that 6197 * the write to the old disk is already set up in the stripes 6198 * array. 6199 */ 6200 index_where_to_add = num_stripes; 6201 for (i = 0; i < num_stripes; i++) { 6202 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6203 /* write to new disk, too */ 6204 struct btrfs_io_stripe *new = 6205 bioc->stripes + index_where_to_add; 6206 struct btrfs_io_stripe *old = 6207 bioc->stripes + i; 6208 6209 new->physical = old->physical; 6210 new->dev = dev_replace->tgtdev; 6211 bioc->tgtdev_map[i] = index_where_to_add; 6212 index_where_to_add++; 6213 max_errors++; 6214 tgtdev_indexes++; 6215 } 6216 } 6217 num_stripes = index_where_to_add; 6218 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6219 int index_srcdev = 0; 6220 int found = 0; 6221 u64 physical_of_found = 0; 6222 6223 /* 6224 * During the dev-replace procedure, the target drive can also 6225 * be used to read data in case it is needed to repair a corrupt 6226 * block elsewhere. This is possible if the requested area is 6227 * left of the left cursor. In this area, the target drive is a 6228 * full copy of the source drive. 6229 */ 6230 for (i = 0; i < num_stripes; i++) { 6231 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6232 /* 6233 * In case of DUP, in order to keep it simple, 6234 * only add the mirror with the lowest physical 6235 * address 6236 */ 6237 if (found && 6238 physical_of_found <= bioc->stripes[i].physical) 6239 continue; 6240 index_srcdev = i; 6241 found = 1; 6242 physical_of_found = bioc->stripes[i].physical; 6243 } 6244 } 6245 if (found) { 6246 struct btrfs_io_stripe *tgtdev_stripe = 6247 bioc->stripes + num_stripes; 6248 6249 tgtdev_stripe->physical = physical_of_found; 6250 tgtdev_stripe->dev = dev_replace->tgtdev; 6251 bioc->tgtdev_map[index_srcdev] = num_stripes; 6252 6253 tgtdev_indexes++; 6254 num_stripes++; 6255 } 6256 } 6257 6258 *num_stripes_ret = num_stripes; 6259 *max_errors_ret = max_errors; 6260 bioc->num_tgtdevs = tgtdev_indexes; 6261 *bioc_ret = bioc; 6262 } 6263 6264 static bool need_full_stripe(enum btrfs_map_op op) 6265 { 6266 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6267 } 6268 6269 /* 6270 * Calculate the geometry of a particular (address, len) tuple. This 6271 * information is used to calculate how big a particular bio can get before it 6272 * straddles a stripe. 6273 * 6274 * @fs_info: the filesystem 6275 * @em: mapping containing the logical extent 6276 * @op: type of operation - write or read 6277 * @logical: address that we want to figure out the geometry of 6278 * @io_geom: pointer used to return values 6279 * 6280 * Returns < 0 in case a chunk for the given logical address cannot be found, 6281 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6282 */ 6283 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6284 enum btrfs_map_op op, u64 logical, 6285 struct btrfs_io_geometry *io_geom) 6286 { 6287 struct map_lookup *map; 6288 u64 len; 6289 u64 offset; 6290 u64 stripe_offset; 6291 u64 stripe_nr; 6292 u32 stripe_len; 6293 u64 raid56_full_stripe_start = (u64)-1; 6294 int data_stripes; 6295 6296 ASSERT(op != BTRFS_MAP_DISCARD); 6297 6298 map = em->map_lookup; 6299 /* Offset of this logical address in the chunk */ 6300 offset = logical - em->start; 6301 /* Len of a stripe in a chunk */ 6302 stripe_len = map->stripe_len; 6303 /* 6304 * Stripe_nr is where this block falls in 6305 * stripe_offset is the offset of this block in its stripe. 6306 */ 6307 stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset); 6308 ASSERT(stripe_offset < U32_MAX); 6309 6310 data_stripes = nr_data_stripes(map); 6311 6312 /* Only stripe based profiles needs to check against stripe length. */ 6313 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6314 u64 max_len = stripe_len - stripe_offset; 6315 6316 /* 6317 * In case of raid56, we need to know the stripe aligned start 6318 */ 6319 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6320 unsigned long full_stripe_len = stripe_len * data_stripes; 6321 raid56_full_stripe_start = offset; 6322 6323 /* 6324 * Allow a write of a full stripe, but make sure we 6325 * don't allow straddling of stripes 6326 */ 6327 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6328 full_stripe_len); 6329 raid56_full_stripe_start *= full_stripe_len; 6330 6331 /* 6332 * For writes to RAID[56], allow a full stripeset across 6333 * all disks. For other RAID types and for RAID[56] 6334 * reads, just allow a single stripe (on a single disk). 6335 */ 6336 if (op == BTRFS_MAP_WRITE) { 6337 max_len = stripe_len * data_stripes - 6338 (offset - raid56_full_stripe_start); 6339 } 6340 } 6341 len = min_t(u64, em->len - offset, max_len); 6342 } else { 6343 len = em->len - offset; 6344 } 6345 6346 io_geom->len = len; 6347 io_geom->offset = offset; 6348 io_geom->stripe_len = stripe_len; 6349 io_geom->stripe_nr = stripe_nr; 6350 io_geom->stripe_offset = stripe_offset; 6351 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6352 6353 return 0; 6354 } 6355 6356 static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map, 6357 u32 stripe_index, u64 stripe_offset, u64 stripe_nr) 6358 { 6359 dst->dev = map->stripes[stripe_index].dev; 6360 dst->physical = map->stripes[stripe_index].physical + 6361 stripe_offset + stripe_nr * map->stripe_len; 6362 } 6363 6364 int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6365 u64 logical, u64 *length, 6366 struct btrfs_io_context **bioc_ret, 6367 struct btrfs_io_stripe *smap, int *mirror_num_ret, 6368 int need_raid_map) 6369 { 6370 struct extent_map *em; 6371 struct map_lookup *map; 6372 u64 stripe_offset; 6373 u64 stripe_nr; 6374 u64 stripe_len; 6375 u32 stripe_index; 6376 int data_stripes; 6377 int i; 6378 int ret = 0; 6379 int mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6380 int num_stripes; 6381 int max_errors = 0; 6382 int tgtdev_indexes = 0; 6383 struct btrfs_io_context *bioc = NULL; 6384 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6385 int dev_replace_is_ongoing = 0; 6386 int num_alloc_stripes; 6387 int patch_the_first_stripe_for_dev_replace = 0; 6388 u64 physical_to_patch_in_first_stripe = 0; 6389 u64 raid56_full_stripe_start = (u64)-1; 6390 struct btrfs_io_geometry geom; 6391 6392 ASSERT(bioc_ret); 6393 ASSERT(op != BTRFS_MAP_DISCARD); 6394 6395 em = btrfs_get_chunk_map(fs_info, logical, *length); 6396 ASSERT(!IS_ERR(em)); 6397 6398 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6399 if (ret < 0) 6400 return ret; 6401 6402 map = em->map_lookup; 6403 6404 *length = geom.len; 6405 stripe_len = geom.stripe_len; 6406 stripe_nr = geom.stripe_nr; 6407 stripe_offset = geom.stripe_offset; 6408 raid56_full_stripe_start = geom.raid56_stripe_offset; 6409 data_stripes = nr_data_stripes(map); 6410 6411 down_read(&dev_replace->rwsem); 6412 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6413 /* 6414 * Hold the semaphore for read during the whole operation, write is 6415 * requested at commit time but must wait. 6416 */ 6417 if (!dev_replace_is_ongoing) 6418 up_read(&dev_replace->rwsem); 6419 6420 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6421 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6422 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6423 dev_replace->srcdev->devid, 6424 &mirror_num, 6425 &physical_to_patch_in_first_stripe); 6426 if (ret) 6427 goto out; 6428 else 6429 patch_the_first_stripe_for_dev_replace = 1; 6430 } else if (mirror_num > map->num_stripes) { 6431 mirror_num = 0; 6432 } 6433 6434 num_stripes = 1; 6435 stripe_index = 0; 6436 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6437 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6438 &stripe_index); 6439 if (!need_full_stripe(op)) 6440 mirror_num = 1; 6441 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6442 if (need_full_stripe(op)) 6443 num_stripes = map->num_stripes; 6444 else if (mirror_num) 6445 stripe_index = mirror_num - 1; 6446 else { 6447 stripe_index = find_live_mirror(fs_info, map, 0, 6448 dev_replace_is_ongoing); 6449 mirror_num = stripe_index + 1; 6450 } 6451 6452 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6453 if (need_full_stripe(op)) { 6454 num_stripes = map->num_stripes; 6455 } else if (mirror_num) { 6456 stripe_index = mirror_num - 1; 6457 } else { 6458 mirror_num = 1; 6459 } 6460 6461 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6462 u32 factor = map->num_stripes / map->sub_stripes; 6463 6464 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6465 stripe_index *= map->sub_stripes; 6466 6467 if (need_full_stripe(op)) 6468 num_stripes = map->sub_stripes; 6469 else if (mirror_num) 6470 stripe_index += mirror_num - 1; 6471 else { 6472 int old_stripe_index = stripe_index; 6473 stripe_index = find_live_mirror(fs_info, map, 6474 stripe_index, 6475 dev_replace_is_ongoing); 6476 mirror_num = stripe_index - old_stripe_index + 1; 6477 } 6478 6479 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6480 ASSERT(map->stripe_len == BTRFS_STRIPE_LEN); 6481 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6482 /* push stripe_nr back to the start of the full stripe */ 6483 stripe_nr = div64_u64(raid56_full_stripe_start, 6484 stripe_len * data_stripes); 6485 6486 /* RAID[56] write or recovery. Return all stripes */ 6487 num_stripes = map->num_stripes; 6488 max_errors = btrfs_chunk_max_errors(map); 6489 6490 /* Return the length to the full stripe end */ 6491 *length = min(logical + *length, 6492 raid56_full_stripe_start + em->start + 6493 data_stripes * stripe_len) - logical; 6494 stripe_index = 0; 6495 stripe_offset = 0; 6496 } else { 6497 /* 6498 * Mirror #0 or #1 means the original data block. 6499 * Mirror #2 is RAID5 parity block. 6500 * Mirror #3 is RAID6 Q block. 6501 */ 6502 stripe_nr = div_u64_rem(stripe_nr, 6503 data_stripes, &stripe_index); 6504 if (mirror_num > 1) 6505 stripe_index = data_stripes + mirror_num - 2; 6506 6507 /* We distribute the parity blocks across stripes */ 6508 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6509 &stripe_index); 6510 if (!need_full_stripe(op) && mirror_num <= 1) 6511 mirror_num = 1; 6512 } 6513 } else { 6514 /* 6515 * after this, stripe_nr is the number of stripes on this 6516 * device we have to walk to find the data, and stripe_index is 6517 * the number of our device in the stripe array 6518 */ 6519 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6520 &stripe_index); 6521 mirror_num = stripe_index + 1; 6522 } 6523 if (stripe_index >= map->num_stripes) { 6524 btrfs_crit(fs_info, 6525 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6526 stripe_index, map->num_stripes); 6527 ret = -EINVAL; 6528 goto out; 6529 } 6530 6531 num_alloc_stripes = num_stripes; 6532 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6533 if (op == BTRFS_MAP_WRITE) 6534 num_alloc_stripes <<= 1; 6535 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6536 num_alloc_stripes++; 6537 tgtdev_indexes = num_stripes; 6538 } 6539 6540 /* 6541 * If this I/O maps to a single device, try to return the device and 6542 * physical block information on the stack instead of allocating an 6543 * I/O context structure. 6544 */ 6545 if (smap && num_alloc_stripes == 1 && 6546 !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) && 6547 (!need_full_stripe(op) || !dev_replace_is_ongoing || 6548 !dev_replace->tgtdev)) { 6549 if (patch_the_first_stripe_for_dev_replace) { 6550 smap->dev = dev_replace->tgtdev; 6551 smap->physical = physical_to_patch_in_first_stripe; 6552 *mirror_num_ret = map->num_stripes + 1; 6553 } else { 6554 set_io_stripe(smap, map, stripe_index, stripe_offset, 6555 stripe_nr); 6556 *mirror_num_ret = mirror_num; 6557 } 6558 *bioc_ret = NULL; 6559 ret = 0; 6560 goto out; 6561 } 6562 6563 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6564 if (!bioc) { 6565 ret = -ENOMEM; 6566 goto out; 6567 } 6568 6569 for (i = 0; i < num_stripes; i++) { 6570 set_io_stripe(&bioc->stripes[i], map, stripe_index, stripe_offset, 6571 stripe_nr); 6572 stripe_index++; 6573 } 6574 6575 /* Build raid_map */ 6576 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6577 (need_full_stripe(op) || mirror_num > 1)) { 6578 u64 tmp; 6579 unsigned rot; 6580 6581 /* Work out the disk rotation on this stripe-set */ 6582 div_u64_rem(stripe_nr, num_stripes, &rot); 6583 6584 /* Fill in the logical address of each stripe */ 6585 tmp = stripe_nr * data_stripes; 6586 for (i = 0; i < data_stripes; i++) 6587 bioc->raid_map[(i + rot) % num_stripes] = 6588 em->start + (tmp + i) * map->stripe_len; 6589 6590 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6591 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6592 bioc->raid_map[(i + rot + 1) % num_stripes] = 6593 RAID6_Q_STRIPE; 6594 6595 sort_parity_stripes(bioc, num_stripes); 6596 } 6597 6598 if (need_full_stripe(op)) 6599 max_errors = btrfs_chunk_max_errors(map); 6600 6601 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6602 need_full_stripe(op)) { 6603 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6604 &num_stripes, &max_errors); 6605 } 6606 6607 *bioc_ret = bioc; 6608 bioc->map_type = map->type; 6609 bioc->num_stripes = num_stripes; 6610 bioc->max_errors = max_errors; 6611 bioc->mirror_num = mirror_num; 6612 6613 /* 6614 * this is the case that REQ_READ && dev_replace_is_ongoing && 6615 * mirror_num == num_stripes + 1 && dev_replace target drive is 6616 * available as a mirror 6617 */ 6618 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6619 WARN_ON(num_stripes > 1); 6620 bioc->stripes[0].dev = dev_replace->tgtdev; 6621 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6622 bioc->mirror_num = map->num_stripes + 1; 6623 } 6624 out: 6625 if (dev_replace_is_ongoing) { 6626 lockdep_assert_held(&dev_replace->rwsem); 6627 /* Unlock and let waiting writers proceed */ 6628 up_read(&dev_replace->rwsem); 6629 } 6630 free_extent_map(em); 6631 return ret; 6632 } 6633 6634 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6635 u64 logical, u64 *length, 6636 struct btrfs_io_context **bioc_ret, int mirror_num) 6637 { 6638 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6639 NULL, &mirror_num, 0); 6640 } 6641 6642 /* For Scrub/replace */ 6643 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6644 u64 logical, u64 *length, 6645 struct btrfs_io_context **bioc_ret) 6646 { 6647 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6648 NULL, NULL, 1); 6649 } 6650 6651 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6652 const struct btrfs_fs_devices *fs_devices) 6653 { 6654 if (args->fsid == NULL) 6655 return true; 6656 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6657 return true; 6658 return false; 6659 } 6660 6661 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6662 const struct btrfs_device *device) 6663 { 6664 if (args->missing) { 6665 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6666 !device->bdev) 6667 return true; 6668 return false; 6669 } 6670 6671 if (device->devid != args->devid) 6672 return false; 6673 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6674 return false; 6675 return true; 6676 } 6677 6678 /* 6679 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6680 * return NULL. 6681 * 6682 * If devid and uuid are both specified, the match must be exact, otherwise 6683 * only devid is used. 6684 */ 6685 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6686 const struct btrfs_dev_lookup_args *args) 6687 { 6688 struct btrfs_device *device; 6689 struct btrfs_fs_devices *seed_devs; 6690 6691 if (dev_args_match_fs_devices(args, fs_devices)) { 6692 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6693 if (dev_args_match_device(args, device)) 6694 return device; 6695 } 6696 } 6697 6698 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6699 if (!dev_args_match_fs_devices(args, seed_devs)) 6700 continue; 6701 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6702 if (dev_args_match_device(args, device)) 6703 return device; 6704 } 6705 } 6706 6707 return NULL; 6708 } 6709 6710 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6711 u64 devid, u8 *dev_uuid) 6712 { 6713 struct btrfs_device *device; 6714 unsigned int nofs_flag; 6715 6716 /* 6717 * We call this under the chunk_mutex, so we want to use NOFS for this 6718 * allocation, however we don't want to change btrfs_alloc_device() to 6719 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6720 * places. 6721 */ 6722 6723 nofs_flag = memalloc_nofs_save(); 6724 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL); 6725 memalloc_nofs_restore(nofs_flag); 6726 if (IS_ERR(device)) 6727 return device; 6728 6729 list_add(&device->dev_list, &fs_devices->devices); 6730 device->fs_devices = fs_devices; 6731 fs_devices->num_devices++; 6732 6733 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6734 fs_devices->missing_devices++; 6735 6736 return device; 6737 } 6738 6739 /* 6740 * Allocate new device struct, set up devid and UUID. 6741 * 6742 * @fs_info: used only for generating a new devid, can be NULL if 6743 * devid is provided (i.e. @devid != NULL). 6744 * @devid: a pointer to devid for this device. If NULL a new devid 6745 * is generated. 6746 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6747 * is generated. 6748 * @path: a pointer to device path if available, NULL otherwise. 6749 * 6750 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6751 * on error. Returned struct is not linked onto any lists and must be 6752 * destroyed with btrfs_free_device. 6753 */ 6754 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6755 const u64 *devid, const u8 *uuid, 6756 const char *path) 6757 { 6758 struct btrfs_device *dev; 6759 u64 tmp; 6760 6761 if (WARN_ON(!devid && !fs_info)) 6762 return ERR_PTR(-EINVAL); 6763 6764 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6765 if (!dev) 6766 return ERR_PTR(-ENOMEM); 6767 6768 INIT_LIST_HEAD(&dev->dev_list); 6769 INIT_LIST_HEAD(&dev->dev_alloc_list); 6770 INIT_LIST_HEAD(&dev->post_commit_list); 6771 6772 atomic_set(&dev->dev_stats_ccnt, 0); 6773 btrfs_device_data_ordered_init(dev); 6774 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); 6775 6776 if (devid) 6777 tmp = *devid; 6778 else { 6779 int ret; 6780 6781 ret = find_next_devid(fs_info, &tmp); 6782 if (ret) { 6783 btrfs_free_device(dev); 6784 return ERR_PTR(ret); 6785 } 6786 } 6787 dev->devid = tmp; 6788 6789 if (uuid) 6790 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6791 else 6792 generate_random_uuid(dev->uuid); 6793 6794 if (path) { 6795 struct rcu_string *name; 6796 6797 name = rcu_string_strdup(path, GFP_KERNEL); 6798 if (!name) { 6799 btrfs_free_device(dev); 6800 return ERR_PTR(-ENOMEM); 6801 } 6802 rcu_assign_pointer(dev->name, name); 6803 } 6804 6805 return dev; 6806 } 6807 6808 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6809 u64 devid, u8 *uuid, bool error) 6810 { 6811 if (error) 6812 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6813 devid, uuid); 6814 else 6815 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6816 devid, uuid); 6817 } 6818 6819 u64 btrfs_calc_stripe_length(const struct extent_map *em) 6820 { 6821 const struct map_lookup *map = em->map_lookup; 6822 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6823 6824 return div_u64(em->len, data_stripes); 6825 } 6826 6827 #if BITS_PER_LONG == 32 6828 /* 6829 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6830 * can't be accessed on 32bit systems. 6831 * 6832 * This function do mount time check to reject the fs if it already has 6833 * metadata chunk beyond that limit. 6834 */ 6835 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6836 u64 logical, u64 length, u64 type) 6837 { 6838 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6839 return 0; 6840 6841 if (logical + length < MAX_LFS_FILESIZE) 6842 return 0; 6843 6844 btrfs_err_32bit_limit(fs_info); 6845 return -EOVERFLOW; 6846 } 6847 6848 /* 6849 * This is to give early warning for any metadata chunk reaching 6850 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6851 * Although we can still access the metadata, it's not going to be possible 6852 * once the limit is reached. 6853 */ 6854 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6855 u64 logical, u64 length, u64 type) 6856 { 6857 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6858 return; 6859 6860 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6861 return; 6862 6863 btrfs_warn_32bit_limit(fs_info); 6864 } 6865 #endif 6866 6867 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6868 u64 devid, u8 *uuid) 6869 { 6870 struct btrfs_device *dev; 6871 6872 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6873 btrfs_report_missing_device(fs_info, devid, uuid, true); 6874 return ERR_PTR(-ENOENT); 6875 } 6876 6877 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 6878 if (IS_ERR(dev)) { 6879 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 6880 devid, PTR_ERR(dev)); 6881 return dev; 6882 } 6883 btrfs_report_missing_device(fs_info, devid, uuid, false); 6884 6885 return dev; 6886 } 6887 6888 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6889 struct btrfs_chunk *chunk) 6890 { 6891 BTRFS_DEV_LOOKUP_ARGS(args); 6892 struct btrfs_fs_info *fs_info = leaf->fs_info; 6893 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 6894 struct map_lookup *map; 6895 struct extent_map *em; 6896 u64 logical; 6897 u64 length; 6898 u64 devid; 6899 u64 type; 6900 u8 uuid[BTRFS_UUID_SIZE]; 6901 int index; 6902 int num_stripes; 6903 int ret; 6904 int i; 6905 6906 logical = key->offset; 6907 length = btrfs_chunk_length(leaf, chunk); 6908 type = btrfs_chunk_type(leaf, chunk); 6909 index = btrfs_bg_flags_to_raid_index(type); 6910 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6911 6912 #if BITS_PER_LONG == 32 6913 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 6914 if (ret < 0) 6915 return ret; 6916 warn_32bit_meta_chunk(fs_info, logical, length, type); 6917 #endif 6918 6919 /* 6920 * Only need to verify chunk item if we're reading from sys chunk array, 6921 * as chunk item in tree block is already verified by tree-checker. 6922 */ 6923 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6924 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6925 if (ret) 6926 return ret; 6927 } 6928 6929 read_lock(&map_tree->lock); 6930 em = lookup_extent_mapping(map_tree, logical, 1); 6931 read_unlock(&map_tree->lock); 6932 6933 /* already mapped? */ 6934 if (em && em->start <= logical && em->start + em->len > logical) { 6935 free_extent_map(em); 6936 return 0; 6937 } else if (em) { 6938 free_extent_map(em); 6939 } 6940 6941 em = alloc_extent_map(); 6942 if (!em) 6943 return -ENOMEM; 6944 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6945 if (!map) { 6946 free_extent_map(em); 6947 return -ENOMEM; 6948 } 6949 6950 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6951 em->map_lookup = map; 6952 em->start = logical; 6953 em->len = length; 6954 em->orig_start = 0; 6955 em->block_start = 0; 6956 em->block_len = em->len; 6957 6958 map->num_stripes = num_stripes; 6959 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6960 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6961 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6962 map->type = type; 6963 /* 6964 * We can't use the sub_stripes value, as for profiles other than 6965 * RAID10, they may have 0 as sub_stripes for filesystems created by 6966 * older mkfs (<v5.4). 6967 * In that case, it can cause divide-by-zero errors later. 6968 * Since currently sub_stripes is fixed for each profile, let's 6969 * use the trusted value instead. 6970 */ 6971 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 6972 map->verified_stripes = 0; 6973 em->orig_block_len = btrfs_calc_stripe_length(em); 6974 for (i = 0; i < num_stripes; i++) { 6975 map->stripes[i].physical = 6976 btrfs_stripe_offset_nr(leaf, chunk, i); 6977 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6978 args.devid = devid; 6979 read_extent_buffer(leaf, uuid, (unsigned long) 6980 btrfs_stripe_dev_uuid_nr(chunk, i), 6981 BTRFS_UUID_SIZE); 6982 args.uuid = uuid; 6983 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 6984 if (!map->stripes[i].dev) { 6985 map->stripes[i].dev = handle_missing_device(fs_info, 6986 devid, uuid); 6987 if (IS_ERR(map->stripes[i].dev)) { 6988 ret = PTR_ERR(map->stripes[i].dev); 6989 free_extent_map(em); 6990 return ret; 6991 } 6992 } 6993 6994 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6995 &(map->stripes[i].dev->dev_state)); 6996 } 6997 6998 write_lock(&map_tree->lock); 6999 ret = add_extent_mapping(map_tree, em, 0); 7000 write_unlock(&map_tree->lock); 7001 if (ret < 0) { 7002 btrfs_err(fs_info, 7003 "failed to add chunk map, start=%llu len=%llu: %d", 7004 em->start, em->len, ret); 7005 } 7006 free_extent_map(em); 7007 7008 return ret; 7009 } 7010 7011 static void fill_device_from_item(struct extent_buffer *leaf, 7012 struct btrfs_dev_item *dev_item, 7013 struct btrfs_device *device) 7014 { 7015 unsigned long ptr; 7016 7017 device->devid = btrfs_device_id(leaf, dev_item); 7018 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7019 device->total_bytes = device->disk_total_bytes; 7020 device->commit_total_bytes = device->disk_total_bytes; 7021 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7022 device->commit_bytes_used = device->bytes_used; 7023 device->type = btrfs_device_type(leaf, dev_item); 7024 device->io_align = btrfs_device_io_align(leaf, dev_item); 7025 device->io_width = btrfs_device_io_width(leaf, dev_item); 7026 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7027 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7028 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7029 7030 ptr = btrfs_device_uuid(dev_item); 7031 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7032 } 7033 7034 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7035 u8 *fsid) 7036 { 7037 struct btrfs_fs_devices *fs_devices; 7038 int ret; 7039 7040 lockdep_assert_held(&uuid_mutex); 7041 ASSERT(fsid); 7042 7043 /* This will match only for multi-device seed fs */ 7044 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7045 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7046 return fs_devices; 7047 7048 7049 fs_devices = find_fsid(fsid, NULL); 7050 if (!fs_devices) { 7051 if (!btrfs_test_opt(fs_info, DEGRADED)) 7052 return ERR_PTR(-ENOENT); 7053 7054 fs_devices = alloc_fs_devices(fsid, NULL); 7055 if (IS_ERR(fs_devices)) 7056 return fs_devices; 7057 7058 fs_devices->seeding = true; 7059 fs_devices->opened = 1; 7060 return fs_devices; 7061 } 7062 7063 /* 7064 * Upon first call for a seed fs fsid, just create a private copy of the 7065 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7066 */ 7067 fs_devices = clone_fs_devices(fs_devices); 7068 if (IS_ERR(fs_devices)) 7069 return fs_devices; 7070 7071 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7072 if (ret) { 7073 free_fs_devices(fs_devices); 7074 return ERR_PTR(ret); 7075 } 7076 7077 if (!fs_devices->seeding) { 7078 close_fs_devices(fs_devices); 7079 free_fs_devices(fs_devices); 7080 return ERR_PTR(-EINVAL); 7081 } 7082 7083 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7084 7085 return fs_devices; 7086 } 7087 7088 static int read_one_dev(struct extent_buffer *leaf, 7089 struct btrfs_dev_item *dev_item) 7090 { 7091 BTRFS_DEV_LOOKUP_ARGS(args); 7092 struct btrfs_fs_info *fs_info = leaf->fs_info; 7093 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7094 struct btrfs_device *device; 7095 u64 devid; 7096 int ret; 7097 u8 fs_uuid[BTRFS_FSID_SIZE]; 7098 u8 dev_uuid[BTRFS_UUID_SIZE]; 7099 7100 devid = btrfs_device_id(leaf, dev_item); 7101 args.devid = devid; 7102 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7103 BTRFS_UUID_SIZE); 7104 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7105 BTRFS_FSID_SIZE); 7106 args.uuid = dev_uuid; 7107 args.fsid = fs_uuid; 7108 7109 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7110 fs_devices = open_seed_devices(fs_info, fs_uuid); 7111 if (IS_ERR(fs_devices)) 7112 return PTR_ERR(fs_devices); 7113 } 7114 7115 device = btrfs_find_device(fs_info->fs_devices, &args); 7116 if (!device) { 7117 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7118 btrfs_report_missing_device(fs_info, devid, 7119 dev_uuid, true); 7120 return -ENOENT; 7121 } 7122 7123 device = add_missing_dev(fs_devices, devid, dev_uuid); 7124 if (IS_ERR(device)) { 7125 btrfs_err(fs_info, 7126 "failed to add missing dev %llu: %ld", 7127 devid, PTR_ERR(device)); 7128 return PTR_ERR(device); 7129 } 7130 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7131 } else { 7132 if (!device->bdev) { 7133 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7134 btrfs_report_missing_device(fs_info, 7135 devid, dev_uuid, true); 7136 return -ENOENT; 7137 } 7138 btrfs_report_missing_device(fs_info, devid, 7139 dev_uuid, false); 7140 } 7141 7142 if (!device->bdev && 7143 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7144 /* 7145 * this happens when a device that was properly setup 7146 * in the device info lists suddenly goes bad. 7147 * device->bdev is NULL, and so we have to set 7148 * device->missing to one here 7149 */ 7150 device->fs_devices->missing_devices++; 7151 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7152 } 7153 7154 /* Move the device to its own fs_devices */ 7155 if (device->fs_devices != fs_devices) { 7156 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7157 &device->dev_state)); 7158 7159 list_move(&device->dev_list, &fs_devices->devices); 7160 device->fs_devices->num_devices--; 7161 fs_devices->num_devices++; 7162 7163 device->fs_devices->missing_devices--; 7164 fs_devices->missing_devices++; 7165 7166 device->fs_devices = fs_devices; 7167 } 7168 } 7169 7170 if (device->fs_devices != fs_info->fs_devices) { 7171 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7172 if (device->generation != 7173 btrfs_device_generation(leaf, dev_item)) 7174 return -EINVAL; 7175 } 7176 7177 fill_device_from_item(leaf, dev_item, device); 7178 if (device->bdev) { 7179 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7180 7181 if (device->total_bytes > max_total_bytes) { 7182 btrfs_err(fs_info, 7183 "device total_bytes should be at most %llu but found %llu", 7184 max_total_bytes, device->total_bytes); 7185 return -EINVAL; 7186 } 7187 } 7188 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7189 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7190 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7191 device->fs_devices->total_rw_bytes += device->total_bytes; 7192 atomic64_add(device->total_bytes - device->bytes_used, 7193 &fs_info->free_chunk_space); 7194 } 7195 ret = 0; 7196 return ret; 7197 } 7198 7199 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7200 { 7201 struct btrfs_super_block *super_copy = fs_info->super_copy; 7202 struct extent_buffer *sb; 7203 struct btrfs_disk_key *disk_key; 7204 struct btrfs_chunk *chunk; 7205 u8 *array_ptr; 7206 unsigned long sb_array_offset; 7207 int ret = 0; 7208 u32 num_stripes; 7209 u32 array_size; 7210 u32 len = 0; 7211 u32 cur_offset; 7212 u64 type; 7213 struct btrfs_key key; 7214 7215 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7216 7217 /* 7218 * We allocated a dummy extent, just to use extent buffer accessors. 7219 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7220 * that's fine, we will not go beyond system chunk array anyway. 7221 */ 7222 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7223 if (!sb) 7224 return -ENOMEM; 7225 set_extent_buffer_uptodate(sb); 7226 7227 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7228 array_size = btrfs_super_sys_array_size(super_copy); 7229 7230 array_ptr = super_copy->sys_chunk_array; 7231 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7232 cur_offset = 0; 7233 7234 while (cur_offset < array_size) { 7235 disk_key = (struct btrfs_disk_key *)array_ptr; 7236 len = sizeof(*disk_key); 7237 if (cur_offset + len > array_size) 7238 goto out_short_read; 7239 7240 btrfs_disk_key_to_cpu(&key, disk_key); 7241 7242 array_ptr += len; 7243 sb_array_offset += len; 7244 cur_offset += len; 7245 7246 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7247 btrfs_err(fs_info, 7248 "unexpected item type %u in sys_array at offset %u", 7249 (u32)key.type, cur_offset); 7250 ret = -EIO; 7251 break; 7252 } 7253 7254 chunk = (struct btrfs_chunk *)sb_array_offset; 7255 /* 7256 * At least one btrfs_chunk with one stripe must be present, 7257 * exact stripe count check comes afterwards 7258 */ 7259 len = btrfs_chunk_item_size(1); 7260 if (cur_offset + len > array_size) 7261 goto out_short_read; 7262 7263 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7264 if (!num_stripes) { 7265 btrfs_err(fs_info, 7266 "invalid number of stripes %u in sys_array at offset %u", 7267 num_stripes, cur_offset); 7268 ret = -EIO; 7269 break; 7270 } 7271 7272 type = btrfs_chunk_type(sb, chunk); 7273 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7274 btrfs_err(fs_info, 7275 "invalid chunk type %llu in sys_array at offset %u", 7276 type, cur_offset); 7277 ret = -EIO; 7278 break; 7279 } 7280 7281 len = btrfs_chunk_item_size(num_stripes); 7282 if (cur_offset + len > array_size) 7283 goto out_short_read; 7284 7285 ret = read_one_chunk(&key, sb, chunk); 7286 if (ret) 7287 break; 7288 7289 array_ptr += len; 7290 sb_array_offset += len; 7291 cur_offset += len; 7292 } 7293 clear_extent_buffer_uptodate(sb); 7294 free_extent_buffer_stale(sb); 7295 return ret; 7296 7297 out_short_read: 7298 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7299 len, cur_offset); 7300 clear_extent_buffer_uptodate(sb); 7301 free_extent_buffer_stale(sb); 7302 return -EIO; 7303 } 7304 7305 /* 7306 * Check if all chunks in the fs are OK for read-write degraded mount 7307 * 7308 * If the @failing_dev is specified, it's accounted as missing. 7309 * 7310 * Return true if all chunks meet the minimal RW mount requirements. 7311 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7312 */ 7313 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7314 struct btrfs_device *failing_dev) 7315 { 7316 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7317 struct extent_map *em; 7318 u64 next_start = 0; 7319 bool ret = true; 7320 7321 read_lock(&map_tree->lock); 7322 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7323 read_unlock(&map_tree->lock); 7324 /* No chunk at all? Return false anyway */ 7325 if (!em) { 7326 ret = false; 7327 goto out; 7328 } 7329 while (em) { 7330 struct map_lookup *map; 7331 int missing = 0; 7332 int max_tolerated; 7333 int i; 7334 7335 map = em->map_lookup; 7336 max_tolerated = 7337 btrfs_get_num_tolerated_disk_barrier_failures( 7338 map->type); 7339 for (i = 0; i < map->num_stripes; i++) { 7340 struct btrfs_device *dev = map->stripes[i].dev; 7341 7342 if (!dev || !dev->bdev || 7343 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7344 dev->last_flush_error) 7345 missing++; 7346 else if (failing_dev && failing_dev == dev) 7347 missing++; 7348 } 7349 if (missing > max_tolerated) { 7350 if (!failing_dev) 7351 btrfs_warn(fs_info, 7352 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7353 em->start, missing, max_tolerated); 7354 free_extent_map(em); 7355 ret = false; 7356 goto out; 7357 } 7358 next_start = extent_map_end(em); 7359 free_extent_map(em); 7360 7361 read_lock(&map_tree->lock); 7362 em = lookup_extent_mapping(map_tree, next_start, 7363 (u64)(-1) - next_start); 7364 read_unlock(&map_tree->lock); 7365 } 7366 out: 7367 return ret; 7368 } 7369 7370 static void readahead_tree_node_children(struct extent_buffer *node) 7371 { 7372 int i; 7373 const int nr_items = btrfs_header_nritems(node); 7374 7375 for (i = 0; i < nr_items; i++) 7376 btrfs_readahead_node_child(node, i); 7377 } 7378 7379 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7380 { 7381 struct btrfs_root *root = fs_info->chunk_root; 7382 struct btrfs_path *path; 7383 struct extent_buffer *leaf; 7384 struct btrfs_key key; 7385 struct btrfs_key found_key; 7386 int ret; 7387 int slot; 7388 int iter_ret = 0; 7389 u64 total_dev = 0; 7390 u64 last_ra_node = 0; 7391 7392 path = btrfs_alloc_path(); 7393 if (!path) 7394 return -ENOMEM; 7395 7396 /* 7397 * uuid_mutex is needed only if we are mounting a sprout FS 7398 * otherwise we don't need it. 7399 */ 7400 mutex_lock(&uuid_mutex); 7401 7402 /* 7403 * It is possible for mount and umount to race in such a way that 7404 * we execute this code path, but open_fs_devices failed to clear 7405 * total_rw_bytes. We certainly want it cleared before reading the 7406 * device items, so clear it here. 7407 */ 7408 fs_info->fs_devices->total_rw_bytes = 0; 7409 7410 /* 7411 * Lockdep complains about possible circular locking dependency between 7412 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7413 * used for freeze procection of a fs (struct super_block.s_writers), 7414 * which we take when starting a transaction, and extent buffers of the 7415 * chunk tree if we call read_one_dev() while holding a lock on an 7416 * extent buffer of the chunk tree. Since we are mounting the filesystem 7417 * and at this point there can't be any concurrent task modifying the 7418 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7419 */ 7420 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7421 path->skip_locking = 1; 7422 7423 /* 7424 * Read all device items, and then all the chunk items. All 7425 * device items are found before any chunk item (their object id 7426 * is smaller than the lowest possible object id for a chunk 7427 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7428 */ 7429 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7430 key.offset = 0; 7431 key.type = 0; 7432 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7433 struct extent_buffer *node = path->nodes[1]; 7434 7435 leaf = path->nodes[0]; 7436 slot = path->slots[0]; 7437 7438 if (node) { 7439 if (last_ra_node != node->start) { 7440 readahead_tree_node_children(node); 7441 last_ra_node = node->start; 7442 } 7443 } 7444 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7445 struct btrfs_dev_item *dev_item; 7446 dev_item = btrfs_item_ptr(leaf, slot, 7447 struct btrfs_dev_item); 7448 ret = read_one_dev(leaf, dev_item); 7449 if (ret) 7450 goto error; 7451 total_dev++; 7452 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7453 struct btrfs_chunk *chunk; 7454 7455 /* 7456 * We are only called at mount time, so no need to take 7457 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7458 * we always lock first fs_info->chunk_mutex before 7459 * acquiring any locks on the chunk tree. This is a 7460 * requirement for chunk allocation, see the comment on 7461 * top of btrfs_chunk_alloc() for details. 7462 */ 7463 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7464 ret = read_one_chunk(&found_key, leaf, chunk); 7465 if (ret) 7466 goto error; 7467 } 7468 } 7469 /* Catch error found during iteration */ 7470 if (iter_ret < 0) { 7471 ret = iter_ret; 7472 goto error; 7473 } 7474 7475 /* 7476 * After loading chunk tree, we've got all device information, 7477 * do another round of validation checks. 7478 */ 7479 if (total_dev != fs_info->fs_devices->total_devices) { 7480 btrfs_warn(fs_info, 7481 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7482 btrfs_super_num_devices(fs_info->super_copy), 7483 total_dev); 7484 fs_info->fs_devices->total_devices = total_dev; 7485 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7486 } 7487 if (btrfs_super_total_bytes(fs_info->super_copy) < 7488 fs_info->fs_devices->total_rw_bytes) { 7489 btrfs_err(fs_info, 7490 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7491 btrfs_super_total_bytes(fs_info->super_copy), 7492 fs_info->fs_devices->total_rw_bytes); 7493 ret = -EINVAL; 7494 goto error; 7495 } 7496 ret = 0; 7497 error: 7498 mutex_unlock(&uuid_mutex); 7499 7500 btrfs_free_path(path); 7501 return ret; 7502 } 7503 7504 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7505 { 7506 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7507 struct btrfs_device *device; 7508 int ret = 0; 7509 7510 fs_devices->fs_info = fs_info; 7511 7512 mutex_lock(&fs_devices->device_list_mutex); 7513 list_for_each_entry(device, &fs_devices->devices, dev_list) 7514 device->fs_info = fs_info; 7515 7516 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7517 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7518 device->fs_info = fs_info; 7519 ret = btrfs_get_dev_zone_info(device, false); 7520 if (ret) 7521 break; 7522 } 7523 7524 seed_devs->fs_info = fs_info; 7525 } 7526 mutex_unlock(&fs_devices->device_list_mutex); 7527 7528 return ret; 7529 } 7530 7531 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7532 const struct btrfs_dev_stats_item *ptr, 7533 int index) 7534 { 7535 u64 val; 7536 7537 read_extent_buffer(eb, &val, 7538 offsetof(struct btrfs_dev_stats_item, values) + 7539 ((unsigned long)ptr) + (index * sizeof(u64)), 7540 sizeof(val)); 7541 return val; 7542 } 7543 7544 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7545 struct btrfs_dev_stats_item *ptr, 7546 int index, u64 val) 7547 { 7548 write_extent_buffer(eb, &val, 7549 offsetof(struct btrfs_dev_stats_item, values) + 7550 ((unsigned long)ptr) + (index * sizeof(u64)), 7551 sizeof(val)); 7552 } 7553 7554 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7555 struct btrfs_path *path) 7556 { 7557 struct btrfs_dev_stats_item *ptr; 7558 struct extent_buffer *eb; 7559 struct btrfs_key key; 7560 int item_size; 7561 int i, ret, slot; 7562 7563 if (!device->fs_info->dev_root) 7564 return 0; 7565 7566 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7567 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7568 key.offset = device->devid; 7569 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7570 if (ret) { 7571 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7572 btrfs_dev_stat_set(device, i, 0); 7573 device->dev_stats_valid = 1; 7574 btrfs_release_path(path); 7575 return ret < 0 ? ret : 0; 7576 } 7577 slot = path->slots[0]; 7578 eb = path->nodes[0]; 7579 item_size = btrfs_item_size(eb, slot); 7580 7581 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7582 7583 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7584 if (item_size >= (1 + i) * sizeof(__le64)) 7585 btrfs_dev_stat_set(device, i, 7586 btrfs_dev_stats_value(eb, ptr, i)); 7587 else 7588 btrfs_dev_stat_set(device, i, 0); 7589 } 7590 7591 device->dev_stats_valid = 1; 7592 btrfs_dev_stat_print_on_load(device); 7593 btrfs_release_path(path); 7594 7595 return 0; 7596 } 7597 7598 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7599 { 7600 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7601 struct btrfs_device *device; 7602 struct btrfs_path *path = NULL; 7603 int ret = 0; 7604 7605 path = btrfs_alloc_path(); 7606 if (!path) 7607 return -ENOMEM; 7608 7609 mutex_lock(&fs_devices->device_list_mutex); 7610 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7611 ret = btrfs_device_init_dev_stats(device, path); 7612 if (ret) 7613 goto out; 7614 } 7615 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7616 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7617 ret = btrfs_device_init_dev_stats(device, path); 7618 if (ret) 7619 goto out; 7620 } 7621 } 7622 out: 7623 mutex_unlock(&fs_devices->device_list_mutex); 7624 7625 btrfs_free_path(path); 7626 return ret; 7627 } 7628 7629 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7630 struct btrfs_device *device) 7631 { 7632 struct btrfs_fs_info *fs_info = trans->fs_info; 7633 struct btrfs_root *dev_root = fs_info->dev_root; 7634 struct btrfs_path *path; 7635 struct btrfs_key key; 7636 struct extent_buffer *eb; 7637 struct btrfs_dev_stats_item *ptr; 7638 int ret; 7639 int i; 7640 7641 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7642 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7643 key.offset = device->devid; 7644 7645 path = btrfs_alloc_path(); 7646 if (!path) 7647 return -ENOMEM; 7648 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7649 if (ret < 0) { 7650 btrfs_warn_in_rcu(fs_info, 7651 "error %d while searching for dev_stats item for device %s", 7652 ret, btrfs_dev_name(device)); 7653 goto out; 7654 } 7655 7656 if (ret == 0 && 7657 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7658 /* need to delete old one and insert a new one */ 7659 ret = btrfs_del_item(trans, dev_root, path); 7660 if (ret != 0) { 7661 btrfs_warn_in_rcu(fs_info, 7662 "delete too small dev_stats item for device %s failed %d", 7663 btrfs_dev_name(device), ret); 7664 goto out; 7665 } 7666 ret = 1; 7667 } 7668 7669 if (ret == 1) { 7670 /* need to insert a new item */ 7671 btrfs_release_path(path); 7672 ret = btrfs_insert_empty_item(trans, dev_root, path, 7673 &key, sizeof(*ptr)); 7674 if (ret < 0) { 7675 btrfs_warn_in_rcu(fs_info, 7676 "insert dev_stats item for device %s failed %d", 7677 btrfs_dev_name(device), ret); 7678 goto out; 7679 } 7680 } 7681 7682 eb = path->nodes[0]; 7683 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7684 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7685 btrfs_set_dev_stats_value(eb, ptr, i, 7686 btrfs_dev_stat_read(device, i)); 7687 btrfs_mark_buffer_dirty(eb); 7688 7689 out: 7690 btrfs_free_path(path); 7691 return ret; 7692 } 7693 7694 /* 7695 * called from commit_transaction. Writes all changed device stats to disk. 7696 */ 7697 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7698 { 7699 struct btrfs_fs_info *fs_info = trans->fs_info; 7700 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7701 struct btrfs_device *device; 7702 int stats_cnt; 7703 int ret = 0; 7704 7705 mutex_lock(&fs_devices->device_list_mutex); 7706 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7707 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7708 if (!device->dev_stats_valid || stats_cnt == 0) 7709 continue; 7710 7711 7712 /* 7713 * There is a LOAD-LOAD control dependency between the value of 7714 * dev_stats_ccnt and updating the on-disk values which requires 7715 * reading the in-memory counters. Such control dependencies 7716 * require explicit read memory barriers. 7717 * 7718 * This memory barriers pairs with smp_mb__before_atomic in 7719 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7720 * barrier implied by atomic_xchg in 7721 * btrfs_dev_stats_read_and_reset 7722 */ 7723 smp_rmb(); 7724 7725 ret = update_dev_stat_item(trans, device); 7726 if (!ret) 7727 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7728 } 7729 mutex_unlock(&fs_devices->device_list_mutex); 7730 7731 return ret; 7732 } 7733 7734 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7735 { 7736 btrfs_dev_stat_inc(dev, index); 7737 7738 if (!dev->dev_stats_valid) 7739 return; 7740 btrfs_err_rl_in_rcu(dev->fs_info, 7741 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7742 btrfs_dev_name(dev), 7743 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7744 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7745 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7746 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7747 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7748 } 7749 7750 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7751 { 7752 int i; 7753 7754 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7755 if (btrfs_dev_stat_read(dev, i) != 0) 7756 break; 7757 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7758 return; /* all values == 0, suppress message */ 7759 7760 btrfs_info_in_rcu(dev->fs_info, 7761 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7762 btrfs_dev_name(dev), 7763 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7764 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7765 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7766 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7767 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7768 } 7769 7770 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7771 struct btrfs_ioctl_get_dev_stats *stats) 7772 { 7773 BTRFS_DEV_LOOKUP_ARGS(args); 7774 struct btrfs_device *dev; 7775 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7776 int i; 7777 7778 mutex_lock(&fs_devices->device_list_mutex); 7779 args.devid = stats->devid; 7780 dev = btrfs_find_device(fs_info->fs_devices, &args); 7781 mutex_unlock(&fs_devices->device_list_mutex); 7782 7783 if (!dev) { 7784 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7785 return -ENODEV; 7786 } else if (!dev->dev_stats_valid) { 7787 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7788 return -ENODEV; 7789 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7790 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7791 if (stats->nr_items > i) 7792 stats->values[i] = 7793 btrfs_dev_stat_read_and_reset(dev, i); 7794 else 7795 btrfs_dev_stat_set(dev, i, 0); 7796 } 7797 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7798 current->comm, task_pid_nr(current)); 7799 } else { 7800 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7801 if (stats->nr_items > i) 7802 stats->values[i] = btrfs_dev_stat_read(dev, i); 7803 } 7804 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7805 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7806 return 0; 7807 } 7808 7809 /* 7810 * Update the size and bytes used for each device where it changed. This is 7811 * delayed since we would otherwise get errors while writing out the 7812 * superblocks. 7813 * 7814 * Must be invoked during transaction commit. 7815 */ 7816 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7817 { 7818 struct btrfs_device *curr, *next; 7819 7820 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7821 7822 if (list_empty(&trans->dev_update_list)) 7823 return; 7824 7825 /* 7826 * We don't need the device_list_mutex here. This list is owned by the 7827 * transaction and the transaction must complete before the device is 7828 * released. 7829 */ 7830 mutex_lock(&trans->fs_info->chunk_mutex); 7831 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7832 post_commit_list) { 7833 list_del_init(&curr->post_commit_list); 7834 curr->commit_total_bytes = curr->disk_total_bytes; 7835 curr->commit_bytes_used = curr->bytes_used; 7836 } 7837 mutex_unlock(&trans->fs_info->chunk_mutex); 7838 } 7839 7840 /* 7841 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7842 */ 7843 int btrfs_bg_type_to_factor(u64 flags) 7844 { 7845 const int index = btrfs_bg_flags_to_raid_index(flags); 7846 7847 return btrfs_raid_array[index].ncopies; 7848 } 7849 7850 7851 7852 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7853 u64 chunk_offset, u64 devid, 7854 u64 physical_offset, u64 physical_len) 7855 { 7856 struct btrfs_dev_lookup_args args = { .devid = devid }; 7857 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7858 struct extent_map *em; 7859 struct map_lookup *map; 7860 struct btrfs_device *dev; 7861 u64 stripe_len; 7862 bool found = false; 7863 int ret = 0; 7864 int i; 7865 7866 read_lock(&em_tree->lock); 7867 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7868 read_unlock(&em_tree->lock); 7869 7870 if (!em) { 7871 btrfs_err(fs_info, 7872 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7873 physical_offset, devid); 7874 ret = -EUCLEAN; 7875 goto out; 7876 } 7877 7878 map = em->map_lookup; 7879 stripe_len = btrfs_calc_stripe_length(em); 7880 if (physical_len != stripe_len) { 7881 btrfs_err(fs_info, 7882 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7883 physical_offset, devid, em->start, physical_len, 7884 stripe_len); 7885 ret = -EUCLEAN; 7886 goto out; 7887 } 7888 7889 /* 7890 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 7891 * space. Although kernel can handle it without problem, better to warn 7892 * the users. 7893 */ 7894 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 7895 btrfs_warn(fs_info, 7896 "devid %llu physical %llu len %llu inside the reserved space", 7897 devid, physical_offset, physical_len); 7898 7899 for (i = 0; i < map->num_stripes; i++) { 7900 if (map->stripes[i].dev->devid == devid && 7901 map->stripes[i].physical == physical_offset) { 7902 found = true; 7903 if (map->verified_stripes >= map->num_stripes) { 7904 btrfs_err(fs_info, 7905 "too many dev extents for chunk %llu found", 7906 em->start); 7907 ret = -EUCLEAN; 7908 goto out; 7909 } 7910 map->verified_stripes++; 7911 break; 7912 } 7913 } 7914 if (!found) { 7915 btrfs_err(fs_info, 7916 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7917 physical_offset, devid); 7918 ret = -EUCLEAN; 7919 } 7920 7921 /* Make sure no dev extent is beyond device boundary */ 7922 dev = btrfs_find_device(fs_info->fs_devices, &args); 7923 if (!dev) { 7924 btrfs_err(fs_info, "failed to find devid %llu", devid); 7925 ret = -EUCLEAN; 7926 goto out; 7927 } 7928 7929 if (physical_offset + physical_len > dev->disk_total_bytes) { 7930 btrfs_err(fs_info, 7931 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7932 devid, physical_offset, physical_len, 7933 dev->disk_total_bytes); 7934 ret = -EUCLEAN; 7935 goto out; 7936 } 7937 7938 if (dev->zone_info) { 7939 u64 zone_size = dev->zone_info->zone_size; 7940 7941 if (!IS_ALIGNED(physical_offset, zone_size) || 7942 !IS_ALIGNED(physical_len, zone_size)) { 7943 btrfs_err(fs_info, 7944 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 7945 devid, physical_offset, physical_len); 7946 ret = -EUCLEAN; 7947 goto out; 7948 } 7949 } 7950 7951 out: 7952 free_extent_map(em); 7953 return ret; 7954 } 7955 7956 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 7957 { 7958 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7959 struct extent_map *em; 7960 struct rb_node *node; 7961 int ret = 0; 7962 7963 read_lock(&em_tree->lock); 7964 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 7965 em = rb_entry(node, struct extent_map, rb_node); 7966 if (em->map_lookup->num_stripes != 7967 em->map_lookup->verified_stripes) { 7968 btrfs_err(fs_info, 7969 "chunk %llu has missing dev extent, have %d expect %d", 7970 em->start, em->map_lookup->verified_stripes, 7971 em->map_lookup->num_stripes); 7972 ret = -EUCLEAN; 7973 goto out; 7974 } 7975 } 7976 out: 7977 read_unlock(&em_tree->lock); 7978 return ret; 7979 } 7980 7981 /* 7982 * Ensure that all dev extents are mapped to correct chunk, otherwise 7983 * later chunk allocation/free would cause unexpected behavior. 7984 * 7985 * NOTE: This will iterate through the whole device tree, which should be of 7986 * the same size level as the chunk tree. This slightly increases mount time. 7987 */ 7988 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 7989 { 7990 struct btrfs_path *path; 7991 struct btrfs_root *root = fs_info->dev_root; 7992 struct btrfs_key key; 7993 u64 prev_devid = 0; 7994 u64 prev_dev_ext_end = 0; 7995 int ret = 0; 7996 7997 /* 7998 * We don't have a dev_root because we mounted with ignorebadroots and 7999 * failed to load the root, so we want to skip the verification in this 8000 * case for sure. 8001 * 8002 * However if the dev root is fine, but the tree itself is corrupted 8003 * we'd still fail to mount. This verification is only to make sure 8004 * writes can happen safely, so instead just bypass this check 8005 * completely in the case of IGNOREBADROOTS. 8006 */ 8007 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8008 return 0; 8009 8010 key.objectid = 1; 8011 key.type = BTRFS_DEV_EXTENT_KEY; 8012 key.offset = 0; 8013 8014 path = btrfs_alloc_path(); 8015 if (!path) 8016 return -ENOMEM; 8017 8018 path->reada = READA_FORWARD; 8019 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8020 if (ret < 0) 8021 goto out; 8022 8023 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8024 ret = btrfs_next_leaf(root, path); 8025 if (ret < 0) 8026 goto out; 8027 /* No dev extents at all? Not good */ 8028 if (ret > 0) { 8029 ret = -EUCLEAN; 8030 goto out; 8031 } 8032 } 8033 while (1) { 8034 struct extent_buffer *leaf = path->nodes[0]; 8035 struct btrfs_dev_extent *dext; 8036 int slot = path->slots[0]; 8037 u64 chunk_offset; 8038 u64 physical_offset; 8039 u64 physical_len; 8040 u64 devid; 8041 8042 btrfs_item_key_to_cpu(leaf, &key, slot); 8043 if (key.type != BTRFS_DEV_EXTENT_KEY) 8044 break; 8045 devid = key.objectid; 8046 physical_offset = key.offset; 8047 8048 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8049 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8050 physical_len = btrfs_dev_extent_length(leaf, dext); 8051 8052 /* Check if this dev extent overlaps with the previous one */ 8053 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8054 btrfs_err(fs_info, 8055 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8056 devid, physical_offset, prev_dev_ext_end); 8057 ret = -EUCLEAN; 8058 goto out; 8059 } 8060 8061 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8062 physical_offset, physical_len); 8063 if (ret < 0) 8064 goto out; 8065 prev_devid = devid; 8066 prev_dev_ext_end = physical_offset + physical_len; 8067 8068 ret = btrfs_next_item(root, path); 8069 if (ret < 0) 8070 goto out; 8071 if (ret > 0) { 8072 ret = 0; 8073 break; 8074 } 8075 } 8076 8077 /* Ensure all chunks have corresponding dev extents */ 8078 ret = verify_chunk_dev_extent_mapping(fs_info); 8079 out: 8080 btrfs_free_path(path); 8081 return ret; 8082 } 8083 8084 /* 8085 * Check whether the given block group or device is pinned by any inode being 8086 * used as a swapfile. 8087 */ 8088 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8089 { 8090 struct btrfs_swapfile_pin *sp; 8091 struct rb_node *node; 8092 8093 spin_lock(&fs_info->swapfile_pins_lock); 8094 node = fs_info->swapfile_pins.rb_node; 8095 while (node) { 8096 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8097 if (ptr < sp->ptr) 8098 node = node->rb_left; 8099 else if (ptr > sp->ptr) 8100 node = node->rb_right; 8101 else 8102 break; 8103 } 8104 spin_unlock(&fs_info->swapfile_pins_lock); 8105 return node != NULL; 8106 } 8107 8108 static int relocating_repair_kthread(void *data) 8109 { 8110 struct btrfs_block_group *cache = data; 8111 struct btrfs_fs_info *fs_info = cache->fs_info; 8112 u64 target; 8113 int ret = 0; 8114 8115 target = cache->start; 8116 btrfs_put_block_group(cache); 8117 8118 sb_start_write(fs_info->sb); 8119 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8120 btrfs_info(fs_info, 8121 "zoned: skip relocating block group %llu to repair: EBUSY", 8122 target); 8123 sb_end_write(fs_info->sb); 8124 return -EBUSY; 8125 } 8126 8127 mutex_lock(&fs_info->reclaim_bgs_lock); 8128 8129 /* Ensure block group still exists */ 8130 cache = btrfs_lookup_block_group(fs_info, target); 8131 if (!cache) 8132 goto out; 8133 8134 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 8135 goto out; 8136 8137 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8138 if (ret < 0) 8139 goto out; 8140 8141 btrfs_info(fs_info, 8142 "zoned: relocating block group %llu to repair IO failure", 8143 target); 8144 ret = btrfs_relocate_chunk(fs_info, target); 8145 8146 out: 8147 if (cache) 8148 btrfs_put_block_group(cache); 8149 mutex_unlock(&fs_info->reclaim_bgs_lock); 8150 btrfs_exclop_finish(fs_info); 8151 sb_end_write(fs_info->sb); 8152 8153 return ret; 8154 } 8155 8156 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8157 { 8158 struct btrfs_block_group *cache; 8159 8160 if (!btrfs_is_zoned(fs_info)) 8161 return false; 8162 8163 /* Do not attempt to repair in degraded state */ 8164 if (btrfs_test_opt(fs_info, DEGRADED)) 8165 return true; 8166 8167 cache = btrfs_lookup_block_group(fs_info, logical); 8168 if (!cache) 8169 return true; 8170 8171 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8172 btrfs_put_block_group(cache); 8173 return true; 8174 } 8175 8176 kthread_run(relocating_repair_kthread, cache, 8177 "btrfs-relocating-repair"); 8178 8179 return true; 8180 } 8181