1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 38 BTRFS_BLOCK_GROUP_RAID10 | \ 39 BTRFS_BLOCK_GROUP_RAID56_MASK) 40 41 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 42 [BTRFS_RAID_RAID10] = { 43 .sub_stripes = 2, 44 .dev_stripes = 1, 45 .devs_max = 0, /* 0 == as many as possible */ 46 .devs_min = 2, 47 .tolerated_failures = 1, 48 .devs_increment = 2, 49 .ncopies = 2, 50 .nparity = 0, 51 .raid_name = "raid10", 52 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 53 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 .nparity = 0, 64 .raid_name = "raid1", 65 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 66 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 67 }, 68 [BTRFS_RAID_RAID1C3] = { 69 .sub_stripes = 1, 70 .dev_stripes = 1, 71 .devs_max = 3, 72 .devs_min = 3, 73 .tolerated_failures = 2, 74 .devs_increment = 3, 75 .ncopies = 3, 76 .nparity = 0, 77 .raid_name = "raid1c3", 78 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 79 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 80 }, 81 [BTRFS_RAID_RAID1C4] = { 82 .sub_stripes = 1, 83 .dev_stripes = 1, 84 .devs_max = 4, 85 .devs_min = 4, 86 .tolerated_failures = 3, 87 .devs_increment = 4, 88 .ncopies = 4, 89 .nparity = 0, 90 .raid_name = "raid1c4", 91 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 92 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 93 }, 94 [BTRFS_RAID_DUP] = { 95 .sub_stripes = 1, 96 .dev_stripes = 2, 97 .devs_max = 1, 98 .devs_min = 1, 99 .tolerated_failures = 0, 100 .devs_increment = 1, 101 .ncopies = 2, 102 .nparity = 0, 103 .raid_name = "dup", 104 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 105 .mindev_error = 0, 106 }, 107 [BTRFS_RAID_RAID0] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 1, 112 .tolerated_failures = 0, 113 .devs_increment = 1, 114 .ncopies = 1, 115 .nparity = 0, 116 .raid_name = "raid0", 117 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 118 .mindev_error = 0, 119 }, 120 [BTRFS_RAID_SINGLE] = { 121 .sub_stripes = 1, 122 .dev_stripes = 1, 123 .devs_max = 1, 124 .devs_min = 1, 125 .tolerated_failures = 0, 126 .devs_increment = 1, 127 .ncopies = 1, 128 .nparity = 0, 129 .raid_name = "single", 130 .bg_flag = 0, 131 .mindev_error = 0, 132 }, 133 [BTRFS_RAID_RAID5] = { 134 .sub_stripes = 1, 135 .dev_stripes = 1, 136 .devs_max = 0, 137 .devs_min = 2, 138 .tolerated_failures = 1, 139 .devs_increment = 1, 140 .ncopies = 1, 141 .nparity = 1, 142 .raid_name = "raid5", 143 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 144 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 145 }, 146 [BTRFS_RAID_RAID6] = { 147 .sub_stripes = 1, 148 .dev_stripes = 1, 149 .devs_max = 0, 150 .devs_min = 3, 151 .tolerated_failures = 2, 152 .devs_increment = 1, 153 .ncopies = 1, 154 .nparity = 2, 155 .raid_name = "raid6", 156 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 157 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 158 }, 159 }; 160 161 /* 162 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 163 * can be used as index to access btrfs_raid_array[]. 164 */ 165 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 166 { 167 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 168 169 if (!profile) 170 return BTRFS_RAID_SINGLE; 171 172 return BTRFS_BG_FLAG_TO_INDEX(profile); 173 } 174 175 const char *btrfs_bg_type_to_raid_name(u64 flags) 176 { 177 const int index = btrfs_bg_flags_to_raid_index(flags); 178 179 if (index >= BTRFS_NR_RAID_TYPES) 180 return NULL; 181 182 return btrfs_raid_array[index].raid_name; 183 } 184 185 /* 186 * Fill @buf with textual description of @bg_flags, no more than @size_buf 187 * bytes including terminating null byte. 188 */ 189 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 190 { 191 int i; 192 int ret; 193 char *bp = buf; 194 u64 flags = bg_flags; 195 u32 size_bp = size_buf; 196 197 if (!flags) { 198 strcpy(bp, "NONE"); 199 return; 200 } 201 202 #define DESCRIBE_FLAG(flag, desc) \ 203 do { \ 204 if (flags & (flag)) { \ 205 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 206 if (ret < 0 || ret >= size_bp) \ 207 goto out_overflow; \ 208 size_bp -= ret; \ 209 bp += ret; \ 210 flags &= ~(flag); \ 211 } \ 212 } while (0) 213 214 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 215 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 216 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 217 218 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 219 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 220 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 221 btrfs_raid_array[i].raid_name); 222 #undef DESCRIBE_FLAG 223 224 if (flags) { 225 ret = snprintf(bp, size_bp, "0x%llx|", flags); 226 size_bp -= ret; 227 } 228 229 if (size_bp < size_buf) 230 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 231 232 /* 233 * The text is trimmed, it's up to the caller to provide sufficiently 234 * large buffer 235 */ 236 out_overflow:; 237 } 238 239 static int init_first_rw_device(struct btrfs_trans_handle *trans); 240 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 241 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 242 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 243 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 244 enum btrfs_map_op op, 245 u64 logical, u64 *length, 246 struct btrfs_io_context **bioc_ret, 247 int mirror_num, int need_raid_map); 248 249 /* 250 * Device locking 251 * ============== 252 * 253 * There are several mutexes that protect manipulation of devices and low-level 254 * structures like chunks but not block groups, extents or files 255 * 256 * uuid_mutex (global lock) 257 * ------------------------ 258 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 259 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 260 * device) or requested by the device= mount option 261 * 262 * the mutex can be very coarse and can cover long-running operations 263 * 264 * protects: updates to fs_devices counters like missing devices, rw devices, 265 * seeding, structure cloning, opening/closing devices at mount/umount time 266 * 267 * global::fs_devs - add, remove, updates to the global list 268 * 269 * does not protect: manipulation of the fs_devices::devices list in general 270 * but in mount context it could be used to exclude list modifications by eg. 271 * scan ioctl 272 * 273 * btrfs_device::name - renames (write side), read is RCU 274 * 275 * fs_devices::device_list_mutex (per-fs, with RCU) 276 * ------------------------------------------------ 277 * protects updates to fs_devices::devices, ie. adding and deleting 278 * 279 * simple list traversal with read-only actions can be done with RCU protection 280 * 281 * may be used to exclude some operations from running concurrently without any 282 * modifications to the list (see write_all_supers) 283 * 284 * Is not required at mount and close times, because our device list is 285 * protected by the uuid_mutex at that point. 286 * 287 * balance_mutex 288 * ------------- 289 * protects balance structures (status, state) and context accessed from 290 * several places (internally, ioctl) 291 * 292 * chunk_mutex 293 * ----------- 294 * protects chunks, adding or removing during allocation, trim or when a new 295 * device is added/removed. Additionally it also protects post_commit_list of 296 * individual devices, since they can be added to the transaction's 297 * post_commit_list only with chunk_mutex held. 298 * 299 * cleaner_mutex 300 * ------------- 301 * a big lock that is held by the cleaner thread and prevents running subvolume 302 * cleaning together with relocation or delayed iputs 303 * 304 * 305 * Lock nesting 306 * ============ 307 * 308 * uuid_mutex 309 * device_list_mutex 310 * chunk_mutex 311 * balance_mutex 312 * 313 * 314 * Exclusive operations 315 * ==================== 316 * 317 * Maintains the exclusivity of the following operations that apply to the 318 * whole filesystem and cannot run in parallel. 319 * 320 * - Balance (*) 321 * - Device add 322 * - Device remove 323 * - Device replace (*) 324 * - Resize 325 * 326 * The device operations (as above) can be in one of the following states: 327 * 328 * - Running state 329 * - Paused state 330 * - Completed state 331 * 332 * Only device operations marked with (*) can go into the Paused state for the 333 * following reasons: 334 * 335 * - ioctl (only Balance can be Paused through ioctl) 336 * - filesystem remounted as read-only 337 * - filesystem unmounted and mounted as read-only 338 * - system power-cycle and filesystem mounted as read-only 339 * - filesystem or device errors leading to forced read-only 340 * 341 * The status of exclusive operation is set and cleared atomically. 342 * During the course of Paused state, fs_info::exclusive_operation remains set. 343 * A device operation in Paused or Running state can be canceled or resumed 344 * either by ioctl (Balance only) or when remounted as read-write. 345 * The exclusive status is cleared when the device operation is canceled or 346 * completed. 347 */ 348 349 DEFINE_MUTEX(uuid_mutex); 350 static LIST_HEAD(fs_uuids); 351 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 352 { 353 return &fs_uuids; 354 } 355 356 /* 357 * alloc_fs_devices - allocate struct btrfs_fs_devices 358 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 359 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 360 * 361 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 362 * The returned struct is not linked onto any lists and can be destroyed with 363 * kfree() right away. 364 */ 365 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 366 const u8 *metadata_fsid) 367 { 368 struct btrfs_fs_devices *fs_devs; 369 370 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 371 if (!fs_devs) 372 return ERR_PTR(-ENOMEM); 373 374 mutex_init(&fs_devs->device_list_mutex); 375 376 INIT_LIST_HEAD(&fs_devs->devices); 377 INIT_LIST_HEAD(&fs_devs->alloc_list); 378 INIT_LIST_HEAD(&fs_devs->fs_list); 379 INIT_LIST_HEAD(&fs_devs->seed_list); 380 if (fsid) 381 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 382 383 if (metadata_fsid) 384 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 385 else if (fsid) 386 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 387 388 return fs_devs; 389 } 390 391 void btrfs_free_device(struct btrfs_device *device) 392 { 393 WARN_ON(!list_empty(&device->post_commit_list)); 394 rcu_string_free(device->name); 395 extent_io_tree_release(&device->alloc_state); 396 bio_put(device->flush_bio); 397 btrfs_destroy_dev_zone_info(device); 398 kfree(device); 399 } 400 401 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 402 { 403 struct btrfs_device *device; 404 WARN_ON(fs_devices->opened); 405 while (!list_empty(&fs_devices->devices)) { 406 device = list_entry(fs_devices->devices.next, 407 struct btrfs_device, dev_list); 408 list_del(&device->dev_list); 409 btrfs_free_device(device); 410 } 411 kfree(fs_devices); 412 } 413 414 void __exit btrfs_cleanup_fs_uuids(void) 415 { 416 struct btrfs_fs_devices *fs_devices; 417 418 while (!list_empty(&fs_uuids)) { 419 fs_devices = list_entry(fs_uuids.next, 420 struct btrfs_fs_devices, fs_list); 421 list_del(&fs_devices->fs_list); 422 free_fs_devices(fs_devices); 423 } 424 } 425 426 static noinline struct btrfs_fs_devices *find_fsid( 427 const u8 *fsid, const u8 *metadata_fsid) 428 { 429 struct btrfs_fs_devices *fs_devices; 430 431 ASSERT(fsid); 432 433 /* Handle non-split brain cases */ 434 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 435 if (metadata_fsid) { 436 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 437 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 438 BTRFS_FSID_SIZE) == 0) 439 return fs_devices; 440 } else { 441 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 442 return fs_devices; 443 } 444 } 445 return NULL; 446 } 447 448 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 449 struct btrfs_super_block *disk_super) 450 { 451 452 struct btrfs_fs_devices *fs_devices; 453 454 /* 455 * Handle scanned device having completed its fsid change but 456 * belonging to a fs_devices that was created by first scanning 457 * a device which didn't have its fsid/metadata_uuid changed 458 * at all and the CHANGING_FSID_V2 flag set. 459 */ 460 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 461 if (fs_devices->fsid_change && 462 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 463 BTRFS_FSID_SIZE) == 0 && 464 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 465 BTRFS_FSID_SIZE) == 0) { 466 return fs_devices; 467 } 468 } 469 /* 470 * Handle scanned device having completed its fsid change but 471 * belonging to a fs_devices that was created by a device that 472 * has an outdated pair of fsid/metadata_uuid and 473 * CHANGING_FSID_V2 flag set. 474 */ 475 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 476 if (fs_devices->fsid_change && 477 memcmp(fs_devices->metadata_uuid, 478 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 479 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 480 BTRFS_FSID_SIZE) == 0) { 481 return fs_devices; 482 } 483 } 484 485 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 486 } 487 488 489 static int 490 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 491 int flush, struct block_device **bdev, 492 struct btrfs_super_block **disk_super) 493 { 494 int ret; 495 496 *bdev = blkdev_get_by_path(device_path, flags, holder); 497 498 if (IS_ERR(*bdev)) { 499 ret = PTR_ERR(*bdev); 500 goto error; 501 } 502 503 if (flush) 504 sync_blockdev(*bdev); 505 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 506 if (ret) { 507 blkdev_put(*bdev, flags); 508 goto error; 509 } 510 invalidate_bdev(*bdev); 511 *disk_super = btrfs_read_dev_super(*bdev); 512 if (IS_ERR(*disk_super)) { 513 ret = PTR_ERR(*disk_super); 514 blkdev_put(*bdev, flags); 515 goto error; 516 } 517 518 return 0; 519 520 error: 521 *bdev = NULL; 522 return ret; 523 } 524 525 /** 526 * Search and remove all stale devices (which are not mounted). 527 * When both inputs are NULL, it will search and release all stale devices. 528 * 529 * @devt: Optional. When provided will it release all unmounted devices 530 * matching this devt only. 531 * @skip_device: Optional. Will skip this device when searching for the stale 532 * devices. 533 * 534 * Return: 0 for success or if @devt is 0. 535 * -EBUSY if @devt is a mounted device. 536 * -ENOENT if @devt does not match any device in the list. 537 */ 538 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 539 { 540 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 541 struct btrfs_device *device, *tmp_device; 542 int ret = 0; 543 544 lockdep_assert_held(&uuid_mutex); 545 546 if (devt) 547 ret = -ENOENT; 548 549 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 550 551 mutex_lock(&fs_devices->device_list_mutex); 552 list_for_each_entry_safe(device, tmp_device, 553 &fs_devices->devices, dev_list) { 554 if (skip_device && skip_device == device) 555 continue; 556 if (devt && devt != device->devt) 557 continue; 558 if (fs_devices->opened) { 559 /* for an already deleted device return 0 */ 560 if (devt && ret != 0) 561 ret = -EBUSY; 562 break; 563 } 564 565 /* delete the stale device */ 566 fs_devices->num_devices--; 567 list_del(&device->dev_list); 568 btrfs_free_device(device); 569 570 ret = 0; 571 } 572 mutex_unlock(&fs_devices->device_list_mutex); 573 574 if (fs_devices->num_devices == 0) { 575 btrfs_sysfs_remove_fsid(fs_devices); 576 list_del(&fs_devices->fs_list); 577 free_fs_devices(fs_devices); 578 } 579 } 580 581 return ret; 582 } 583 584 /* 585 * This is only used on mount, and we are protected from competing things 586 * messing with our fs_devices by the uuid_mutex, thus we do not need the 587 * fs_devices->device_list_mutex here. 588 */ 589 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 590 struct btrfs_device *device, fmode_t flags, 591 void *holder) 592 { 593 struct block_device *bdev; 594 struct btrfs_super_block *disk_super; 595 u64 devid; 596 int ret; 597 598 if (device->bdev) 599 return -EINVAL; 600 if (!device->name) 601 return -EINVAL; 602 603 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 604 &bdev, &disk_super); 605 if (ret) 606 return ret; 607 608 devid = btrfs_stack_device_id(&disk_super->dev_item); 609 if (devid != device->devid) 610 goto error_free_page; 611 612 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 613 goto error_free_page; 614 615 device->generation = btrfs_super_generation(disk_super); 616 617 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 618 if (btrfs_super_incompat_flags(disk_super) & 619 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 620 pr_err( 621 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 622 goto error_free_page; 623 } 624 625 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 626 fs_devices->seeding = true; 627 } else { 628 if (bdev_read_only(bdev)) 629 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 630 else 631 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 632 } 633 634 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 635 fs_devices->rotating = true; 636 637 device->bdev = bdev; 638 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 639 device->mode = flags; 640 641 fs_devices->open_devices++; 642 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 643 device->devid != BTRFS_DEV_REPLACE_DEVID) { 644 fs_devices->rw_devices++; 645 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 646 } 647 btrfs_release_disk_super(disk_super); 648 649 return 0; 650 651 error_free_page: 652 btrfs_release_disk_super(disk_super); 653 blkdev_put(bdev, flags); 654 655 return -EINVAL; 656 } 657 658 /* 659 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 660 * being created with a disk that has already completed its fsid change. Such 661 * disk can belong to an fs which has its FSID changed or to one which doesn't. 662 * Handle both cases here. 663 */ 664 static struct btrfs_fs_devices *find_fsid_inprogress( 665 struct btrfs_super_block *disk_super) 666 { 667 struct btrfs_fs_devices *fs_devices; 668 669 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 670 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 671 BTRFS_FSID_SIZE) != 0 && 672 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 673 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 674 return fs_devices; 675 } 676 } 677 678 return find_fsid(disk_super->fsid, NULL); 679 } 680 681 682 static struct btrfs_fs_devices *find_fsid_changed( 683 struct btrfs_super_block *disk_super) 684 { 685 struct btrfs_fs_devices *fs_devices; 686 687 /* 688 * Handles the case where scanned device is part of an fs that had 689 * multiple successful changes of FSID but currently device didn't 690 * observe it. Meaning our fsid will be different than theirs. We need 691 * to handle two subcases : 692 * 1 - The fs still continues to have different METADATA/FSID uuids. 693 * 2 - The fs is switched back to its original FSID (METADATA/FSID 694 * are equal). 695 */ 696 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 697 /* Changed UUIDs */ 698 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 699 BTRFS_FSID_SIZE) != 0 && 700 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 701 BTRFS_FSID_SIZE) == 0 && 702 memcmp(fs_devices->fsid, disk_super->fsid, 703 BTRFS_FSID_SIZE) != 0) 704 return fs_devices; 705 706 /* Unchanged UUIDs */ 707 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 708 BTRFS_FSID_SIZE) == 0 && 709 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 710 BTRFS_FSID_SIZE) == 0) 711 return fs_devices; 712 } 713 714 return NULL; 715 } 716 717 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 718 struct btrfs_super_block *disk_super) 719 { 720 struct btrfs_fs_devices *fs_devices; 721 722 /* 723 * Handle the case where the scanned device is part of an fs whose last 724 * metadata UUID change reverted it to the original FSID. At the same 725 * time * fs_devices was first created by another constitutent device 726 * which didn't fully observe the operation. This results in an 727 * btrfs_fs_devices created with metadata/fsid different AND 728 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 729 * fs_devices equal to the FSID of the disk. 730 */ 731 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 732 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 733 BTRFS_FSID_SIZE) != 0 && 734 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 735 BTRFS_FSID_SIZE) == 0 && 736 fs_devices->fsid_change) 737 return fs_devices; 738 } 739 740 return NULL; 741 } 742 /* 743 * Add new device to list of registered devices 744 * 745 * Returns: 746 * device pointer which was just added or updated when successful 747 * error pointer when failed 748 */ 749 static noinline struct btrfs_device *device_list_add(const char *path, 750 struct btrfs_super_block *disk_super, 751 bool *new_device_added) 752 { 753 struct btrfs_device *device; 754 struct btrfs_fs_devices *fs_devices = NULL; 755 struct rcu_string *name; 756 u64 found_transid = btrfs_super_generation(disk_super); 757 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 758 dev_t path_devt; 759 int error; 760 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 761 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 762 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 763 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 764 765 error = lookup_bdev(path, &path_devt); 766 if (error) 767 return ERR_PTR(error); 768 769 if (fsid_change_in_progress) { 770 if (!has_metadata_uuid) 771 fs_devices = find_fsid_inprogress(disk_super); 772 else 773 fs_devices = find_fsid_changed(disk_super); 774 } else if (has_metadata_uuid) { 775 fs_devices = find_fsid_with_metadata_uuid(disk_super); 776 } else { 777 fs_devices = find_fsid_reverted_metadata(disk_super); 778 if (!fs_devices) 779 fs_devices = find_fsid(disk_super->fsid, NULL); 780 } 781 782 783 if (!fs_devices) { 784 if (has_metadata_uuid) 785 fs_devices = alloc_fs_devices(disk_super->fsid, 786 disk_super->metadata_uuid); 787 else 788 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 789 790 if (IS_ERR(fs_devices)) 791 return ERR_CAST(fs_devices); 792 793 fs_devices->fsid_change = fsid_change_in_progress; 794 795 mutex_lock(&fs_devices->device_list_mutex); 796 list_add(&fs_devices->fs_list, &fs_uuids); 797 798 device = NULL; 799 } else { 800 struct btrfs_dev_lookup_args args = { 801 .devid = devid, 802 .uuid = disk_super->dev_item.uuid, 803 }; 804 805 mutex_lock(&fs_devices->device_list_mutex); 806 device = btrfs_find_device(fs_devices, &args); 807 808 /* 809 * If this disk has been pulled into an fs devices created by 810 * a device which had the CHANGING_FSID_V2 flag then replace the 811 * metadata_uuid/fsid values of the fs_devices. 812 */ 813 if (fs_devices->fsid_change && 814 found_transid > fs_devices->latest_generation) { 815 memcpy(fs_devices->fsid, disk_super->fsid, 816 BTRFS_FSID_SIZE); 817 818 if (has_metadata_uuid) 819 memcpy(fs_devices->metadata_uuid, 820 disk_super->metadata_uuid, 821 BTRFS_FSID_SIZE); 822 else 823 memcpy(fs_devices->metadata_uuid, 824 disk_super->fsid, BTRFS_FSID_SIZE); 825 826 fs_devices->fsid_change = false; 827 } 828 } 829 830 if (!device) { 831 if (fs_devices->opened) { 832 mutex_unlock(&fs_devices->device_list_mutex); 833 return ERR_PTR(-EBUSY); 834 } 835 836 device = btrfs_alloc_device(NULL, &devid, 837 disk_super->dev_item.uuid); 838 if (IS_ERR(device)) { 839 mutex_unlock(&fs_devices->device_list_mutex); 840 /* we can safely leave the fs_devices entry around */ 841 return device; 842 } 843 844 name = rcu_string_strdup(path, GFP_NOFS); 845 if (!name) { 846 btrfs_free_device(device); 847 mutex_unlock(&fs_devices->device_list_mutex); 848 return ERR_PTR(-ENOMEM); 849 } 850 rcu_assign_pointer(device->name, name); 851 device->devt = path_devt; 852 853 list_add_rcu(&device->dev_list, &fs_devices->devices); 854 fs_devices->num_devices++; 855 856 device->fs_devices = fs_devices; 857 *new_device_added = true; 858 859 if (disk_super->label[0]) 860 pr_info( 861 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 862 disk_super->label, devid, found_transid, path, 863 current->comm, task_pid_nr(current)); 864 else 865 pr_info( 866 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 867 disk_super->fsid, devid, found_transid, path, 868 current->comm, task_pid_nr(current)); 869 870 } else if (!device->name || strcmp(device->name->str, path)) { 871 /* 872 * When FS is already mounted. 873 * 1. If you are here and if the device->name is NULL that 874 * means this device was missing at time of FS mount. 875 * 2. If you are here and if the device->name is different 876 * from 'path' that means either 877 * a. The same device disappeared and reappeared with 878 * different name. or 879 * b. The missing-disk-which-was-replaced, has 880 * reappeared now. 881 * 882 * We must allow 1 and 2a above. But 2b would be a spurious 883 * and unintentional. 884 * 885 * Further in case of 1 and 2a above, the disk at 'path' 886 * would have missed some transaction when it was away and 887 * in case of 2a the stale bdev has to be updated as well. 888 * 2b must not be allowed at all time. 889 */ 890 891 /* 892 * For now, we do allow update to btrfs_fs_device through the 893 * btrfs dev scan cli after FS has been mounted. We're still 894 * tracking a problem where systems fail mount by subvolume id 895 * when we reject replacement on a mounted FS. 896 */ 897 if (!fs_devices->opened && found_transid < device->generation) { 898 /* 899 * That is if the FS is _not_ mounted and if you 900 * are here, that means there is more than one 901 * disk with same uuid and devid.We keep the one 902 * with larger generation number or the last-in if 903 * generation are equal. 904 */ 905 mutex_unlock(&fs_devices->device_list_mutex); 906 return ERR_PTR(-EEXIST); 907 } 908 909 /* 910 * We are going to replace the device path for a given devid, 911 * make sure it's the same device if the device is mounted 912 * 913 * NOTE: the device->fs_info may not be reliable here so pass 914 * in a NULL to message helpers instead. This avoids a possible 915 * use-after-free when the fs_info and fs_info->sb are already 916 * torn down. 917 */ 918 if (device->bdev) { 919 if (device->devt != path_devt) { 920 mutex_unlock(&fs_devices->device_list_mutex); 921 btrfs_warn_in_rcu(NULL, 922 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 923 path, devid, found_transid, 924 current->comm, 925 task_pid_nr(current)); 926 return ERR_PTR(-EEXIST); 927 } 928 btrfs_info_in_rcu(NULL, 929 "devid %llu device path %s changed to %s scanned by %s (%d)", 930 devid, rcu_str_deref(device->name), 931 path, current->comm, 932 task_pid_nr(current)); 933 } 934 935 name = rcu_string_strdup(path, GFP_NOFS); 936 if (!name) { 937 mutex_unlock(&fs_devices->device_list_mutex); 938 return ERR_PTR(-ENOMEM); 939 } 940 rcu_string_free(device->name); 941 rcu_assign_pointer(device->name, name); 942 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 943 fs_devices->missing_devices--; 944 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 945 } 946 device->devt = path_devt; 947 } 948 949 /* 950 * Unmount does not free the btrfs_device struct but would zero 951 * generation along with most of the other members. So just update 952 * it back. We need it to pick the disk with largest generation 953 * (as above). 954 */ 955 if (!fs_devices->opened) { 956 device->generation = found_transid; 957 fs_devices->latest_generation = max_t(u64, found_transid, 958 fs_devices->latest_generation); 959 } 960 961 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 962 963 mutex_unlock(&fs_devices->device_list_mutex); 964 return device; 965 } 966 967 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 968 { 969 struct btrfs_fs_devices *fs_devices; 970 struct btrfs_device *device; 971 struct btrfs_device *orig_dev; 972 int ret = 0; 973 974 lockdep_assert_held(&uuid_mutex); 975 976 fs_devices = alloc_fs_devices(orig->fsid, NULL); 977 if (IS_ERR(fs_devices)) 978 return fs_devices; 979 980 fs_devices->total_devices = orig->total_devices; 981 982 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 983 struct rcu_string *name; 984 985 device = btrfs_alloc_device(NULL, &orig_dev->devid, 986 orig_dev->uuid); 987 if (IS_ERR(device)) { 988 ret = PTR_ERR(device); 989 goto error; 990 } 991 992 /* 993 * This is ok to do without rcu read locked because we hold the 994 * uuid mutex so nothing we touch in here is going to disappear. 995 */ 996 if (orig_dev->name) { 997 name = rcu_string_strdup(orig_dev->name->str, 998 GFP_KERNEL); 999 if (!name) { 1000 btrfs_free_device(device); 1001 ret = -ENOMEM; 1002 goto error; 1003 } 1004 rcu_assign_pointer(device->name, name); 1005 } 1006 1007 list_add(&device->dev_list, &fs_devices->devices); 1008 device->fs_devices = fs_devices; 1009 fs_devices->num_devices++; 1010 } 1011 return fs_devices; 1012 error: 1013 free_fs_devices(fs_devices); 1014 return ERR_PTR(ret); 1015 } 1016 1017 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1018 struct btrfs_device **latest_dev) 1019 { 1020 struct btrfs_device *device, *next; 1021 1022 /* This is the initialized path, it is safe to release the devices. */ 1023 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1024 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1025 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1026 &device->dev_state) && 1027 !test_bit(BTRFS_DEV_STATE_MISSING, 1028 &device->dev_state) && 1029 (!*latest_dev || 1030 device->generation > (*latest_dev)->generation)) { 1031 *latest_dev = device; 1032 } 1033 continue; 1034 } 1035 1036 /* 1037 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1038 * in btrfs_init_dev_replace() so just continue. 1039 */ 1040 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1041 continue; 1042 1043 if (device->bdev) { 1044 blkdev_put(device->bdev, device->mode); 1045 device->bdev = NULL; 1046 fs_devices->open_devices--; 1047 } 1048 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1049 list_del_init(&device->dev_alloc_list); 1050 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1051 fs_devices->rw_devices--; 1052 } 1053 list_del_init(&device->dev_list); 1054 fs_devices->num_devices--; 1055 btrfs_free_device(device); 1056 } 1057 1058 } 1059 1060 /* 1061 * After we have read the system tree and know devids belonging to this 1062 * filesystem, remove the device which does not belong there. 1063 */ 1064 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1065 { 1066 struct btrfs_device *latest_dev = NULL; 1067 struct btrfs_fs_devices *seed_dev; 1068 1069 mutex_lock(&uuid_mutex); 1070 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1071 1072 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1073 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1074 1075 fs_devices->latest_dev = latest_dev; 1076 1077 mutex_unlock(&uuid_mutex); 1078 } 1079 1080 static void btrfs_close_bdev(struct btrfs_device *device) 1081 { 1082 if (!device->bdev) 1083 return; 1084 1085 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1086 sync_blockdev(device->bdev); 1087 invalidate_bdev(device->bdev); 1088 } 1089 1090 blkdev_put(device->bdev, device->mode); 1091 } 1092 1093 static void btrfs_close_one_device(struct btrfs_device *device) 1094 { 1095 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1096 1097 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1098 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1099 list_del_init(&device->dev_alloc_list); 1100 fs_devices->rw_devices--; 1101 } 1102 1103 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1104 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1105 1106 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1107 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1108 fs_devices->missing_devices--; 1109 } 1110 1111 btrfs_close_bdev(device); 1112 if (device->bdev) { 1113 fs_devices->open_devices--; 1114 device->bdev = NULL; 1115 } 1116 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1117 btrfs_destroy_dev_zone_info(device); 1118 1119 device->fs_info = NULL; 1120 atomic_set(&device->dev_stats_ccnt, 0); 1121 extent_io_tree_release(&device->alloc_state); 1122 1123 /* 1124 * Reset the flush error record. We might have a transient flush error 1125 * in this mount, and if so we aborted the current transaction and set 1126 * the fs to an error state, guaranteeing no super blocks can be further 1127 * committed. However that error might be transient and if we unmount the 1128 * filesystem and mount it again, we should allow the mount to succeed 1129 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1130 * filesystem again we still get flush errors, then we will again abort 1131 * any transaction and set the error state, guaranteeing no commits of 1132 * unsafe super blocks. 1133 */ 1134 device->last_flush_error = 0; 1135 1136 /* Verify the device is back in a pristine state */ 1137 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1138 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1139 ASSERT(list_empty(&device->dev_alloc_list)); 1140 ASSERT(list_empty(&device->post_commit_list)); 1141 } 1142 1143 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1144 { 1145 struct btrfs_device *device, *tmp; 1146 1147 lockdep_assert_held(&uuid_mutex); 1148 1149 if (--fs_devices->opened > 0) 1150 return; 1151 1152 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1153 btrfs_close_one_device(device); 1154 1155 WARN_ON(fs_devices->open_devices); 1156 WARN_ON(fs_devices->rw_devices); 1157 fs_devices->opened = 0; 1158 fs_devices->seeding = false; 1159 fs_devices->fs_info = NULL; 1160 } 1161 1162 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1163 { 1164 LIST_HEAD(list); 1165 struct btrfs_fs_devices *tmp; 1166 1167 mutex_lock(&uuid_mutex); 1168 close_fs_devices(fs_devices); 1169 if (!fs_devices->opened) 1170 list_splice_init(&fs_devices->seed_list, &list); 1171 1172 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1173 close_fs_devices(fs_devices); 1174 list_del(&fs_devices->seed_list); 1175 free_fs_devices(fs_devices); 1176 } 1177 mutex_unlock(&uuid_mutex); 1178 } 1179 1180 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1181 fmode_t flags, void *holder) 1182 { 1183 struct btrfs_device *device; 1184 struct btrfs_device *latest_dev = NULL; 1185 struct btrfs_device *tmp_device; 1186 1187 flags |= FMODE_EXCL; 1188 1189 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1190 dev_list) { 1191 int ret; 1192 1193 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1194 if (ret == 0 && 1195 (!latest_dev || device->generation > latest_dev->generation)) { 1196 latest_dev = device; 1197 } else if (ret == -ENODATA) { 1198 fs_devices->num_devices--; 1199 list_del(&device->dev_list); 1200 btrfs_free_device(device); 1201 } 1202 } 1203 if (fs_devices->open_devices == 0) 1204 return -EINVAL; 1205 1206 fs_devices->opened = 1; 1207 fs_devices->latest_dev = latest_dev; 1208 fs_devices->total_rw_bytes = 0; 1209 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1210 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1211 1212 return 0; 1213 } 1214 1215 static int devid_cmp(void *priv, const struct list_head *a, 1216 const struct list_head *b) 1217 { 1218 const struct btrfs_device *dev1, *dev2; 1219 1220 dev1 = list_entry(a, struct btrfs_device, dev_list); 1221 dev2 = list_entry(b, struct btrfs_device, dev_list); 1222 1223 if (dev1->devid < dev2->devid) 1224 return -1; 1225 else if (dev1->devid > dev2->devid) 1226 return 1; 1227 return 0; 1228 } 1229 1230 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1231 fmode_t flags, void *holder) 1232 { 1233 int ret; 1234 1235 lockdep_assert_held(&uuid_mutex); 1236 /* 1237 * The device_list_mutex cannot be taken here in case opening the 1238 * underlying device takes further locks like open_mutex. 1239 * 1240 * We also don't need the lock here as this is called during mount and 1241 * exclusion is provided by uuid_mutex 1242 */ 1243 1244 if (fs_devices->opened) { 1245 fs_devices->opened++; 1246 ret = 0; 1247 } else { 1248 list_sort(NULL, &fs_devices->devices, devid_cmp); 1249 ret = open_fs_devices(fs_devices, flags, holder); 1250 } 1251 1252 return ret; 1253 } 1254 1255 void btrfs_release_disk_super(struct btrfs_super_block *super) 1256 { 1257 struct page *page = virt_to_page(super); 1258 1259 put_page(page); 1260 } 1261 1262 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1263 u64 bytenr, u64 bytenr_orig) 1264 { 1265 struct btrfs_super_block *disk_super; 1266 struct page *page; 1267 void *p; 1268 pgoff_t index; 1269 1270 /* make sure our super fits in the device */ 1271 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1272 return ERR_PTR(-EINVAL); 1273 1274 /* make sure our super fits in the page */ 1275 if (sizeof(*disk_super) > PAGE_SIZE) 1276 return ERR_PTR(-EINVAL); 1277 1278 /* make sure our super doesn't straddle pages on disk */ 1279 index = bytenr >> PAGE_SHIFT; 1280 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1281 return ERR_PTR(-EINVAL); 1282 1283 /* pull in the page with our super */ 1284 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1285 1286 if (IS_ERR(page)) 1287 return ERR_CAST(page); 1288 1289 p = page_address(page); 1290 1291 /* align our pointer to the offset of the super block */ 1292 disk_super = p + offset_in_page(bytenr); 1293 1294 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1295 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1296 btrfs_release_disk_super(p); 1297 return ERR_PTR(-EINVAL); 1298 } 1299 1300 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1301 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1302 1303 return disk_super; 1304 } 1305 1306 int btrfs_forget_devices(dev_t devt) 1307 { 1308 int ret; 1309 1310 mutex_lock(&uuid_mutex); 1311 ret = btrfs_free_stale_devices(devt, NULL); 1312 mutex_unlock(&uuid_mutex); 1313 1314 return ret; 1315 } 1316 1317 /* 1318 * Look for a btrfs signature on a device. This may be called out of the mount path 1319 * and we are not allowed to call set_blocksize during the scan. The superblock 1320 * is read via pagecache 1321 */ 1322 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1323 void *holder) 1324 { 1325 struct btrfs_super_block *disk_super; 1326 bool new_device_added = false; 1327 struct btrfs_device *device = NULL; 1328 struct block_device *bdev; 1329 u64 bytenr, bytenr_orig; 1330 int ret; 1331 1332 lockdep_assert_held(&uuid_mutex); 1333 1334 /* 1335 * we would like to check all the supers, but that would make 1336 * a btrfs mount succeed after a mkfs from a different FS. 1337 * So, we need to add a special mount option to scan for 1338 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1339 */ 1340 flags |= FMODE_EXCL; 1341 1342 bdev = blkdev_get_by_path(path, flags, holder); 1343 if (IS_ERR(bdev)) 1344 return ERR_CAST(bdev); 1345 1346 bytenr_orig = btrfs_sb_offset(0); 1347 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1348 if (ret) { 1349 device = ERR_PTR(ret); 1350 goto error_bdev_put; 1351 } 1352 1353 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1354 if (IS_ERR(disk_super)) { 1355 device = ERR_CAST(disk_super); 1356 goto error_bdev_put; 1357 } 1358 1359 device = device_list_add(path, disk_super, &new_device_added); 1360 if (!IS_ERR(device) && new_device_added) 1361 btrfs_free_stale_devices(device->devt, device); 1362 1363 btrfs_release_disk_super(disk_super); 1364 1365 error_bdev_put: 1366 blkdev_put(bdev, flags); 1367 1368 return device; 1369 } 1370 1371 /* 1372 * Try to find a chunk that intersects [start, start + len] range and when one 1373 * such is found, record the end of it in *start 1374 */ 1375 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1376 u64 len) 1377 { 1378 u64 physical_start, physical_end; 1379 1380 lockdep_assert_held(&device->fs_info->chunk_mutex); 1381 1382 if (!find_first_extent_bit(&device->alloc_state, *start, 1383 &physical_start, &physical_end, 1384 CHUNK_ALLOCATED, NULL)) { 1385 1386 if (in_range(physical_start, *start, len) || 1387 in_range(*start, physical_start, 1388 physical_end - physical_start)) { 1389 *start = physical_end + 1; 1390 return true; 1391 } 1392 } 1393 return false; 1394 } 1395 1396 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1397 { 1398 switch (device->fs_devices->chunk_alloc_policy) { 1399 case BTRFS_CHUNK_ALLOC_REGULAR: 1400 /* 1401 * We don't want to overwrite the superblock on the drive nor 1402 * any area used by the boot loader (grub for example), so we 1403 * make sure to start at an offset of at least 1MB. 1404 */ 1405 return max_t(u64, start, SZ_1M); 1406 case BTRFS_CHUNK_ALLOC_ZONED: 1407 /* 1408 * We don't care about the starting region like regular 1409 * allocator, because we anyway use/reserve the first two zones 1410 * for superblock logging. 1411 */ 1412 return ALIGN(start, device->zone_info->zone_size); 1413 default: 1414 BUG(); 1415 } 1416 } 1417 1418 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1419 u64 *hole_start, u64 *hole_size, 1420 u64 num_bytes) 1421 { 1422 u64 zone_size = device->zone_info->zone_size; 1423 u64 pos; 1424 int ret; 1425 bool changed = false; 1426 1427 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1428 1429 while (*hole_size > 0) { 1430 pos = btrfs_find_allocatable_zones(device, *hole_start, 1431 *hole_start + *hole_size, 1432 num_bytes); 1433 if (pos != *hole_start) { 1434 *hole_size = *hole_start + *hole_size - pos; 1435 *hole_start = pos; 1436 changed = true; 1437 if (*hole_size < num_bytes) 1438 break; 1439 } 1440 1441 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1442 1443 /* Range is ensured to be empty */ 1444 if (!ret) 1445 return changed; 1446 1447 /* Given hole range was invalid (outside of device) */ 1448 if (ret == -ERANGE) { 1449 *hole_start += *hole_size; 1450 *hole_size = 0; 1451 return true; 1452 } 1453 1454 *hole_start += zone_size; 1455 *hole_size -= zone_size; 1456 changed = true; 1457 } 1458 1459 return changed; 1460 } 1461 1462 /** 1463 * dev_extent_hole_check - check if specified hole is suitable for allocation 1464 * @device: the device which we have the hole 1465 * @hole_start: starting position of the hole 1466 * @hole_size: the size of the hole 1467 * @num_bytes: the size of the free space that we need 1468 * 1469 * This function may modify @hole_start and @hole_size to reflect the suitable 1470 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1471 */ 1472 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1473 u64 *hole_size, u64 num_bytes) 1474 { 1475 bool changed = false; 1476 u64 hole_end = *hole_start + *hole_size; 1477 1478 for (;;) { 1479 /* 1480 * Check before we set max_hole_start, otherwise we could end up 1481 * sending back this offset anyway. 1482 */ 1483 if (contains_pending_extent(device, hole_start, *hole_size)) { 1484 if (hole_end >= *hole_start) 1485 *hole_size = hole_end - *hole_start; 1486 else 1487 *hole_size = 0; 1488 changed = true; 1489 } 1490 1491 switch (device->fs_devices->chunk_alloc_policy) { 1492 case BTRFS_CHUNK_ALLOC_REGULAR: 1493 /* No extra check */ 1494 break; 1495 case BTRFS_CHUNK_ALLOC_ZONED: 1496 if (dev_extent_hole_check_zoned(device, hole_start, 1497 hole_size, num_bytes)) { 1498 changed = true; 1499 /* 1500 * The changed hole can contain pending extent. 1501 * Loop again to check that. 1502 */ 1503 continue; 1504 } 1505 break; 1506 default: 1507 BUG(); 1508 } 1509 1510 break; 1511 } 1512 1513 return changed; 1514 } 1515 1516 /* 1517 * find_free_dev_extent_start - find free space in the specified device 1518 * @device: the device which we search the free space in 1519 * @num_bytes: the size of the free space that we need 1520 * @search_start: the position from which to begin the search 1521 * @start: store the start of the free space. 1522 * @len: the size of the free space. that we find, or the size 1523 * of the max free space if we don't find suitable free space 1524 * 1525 * this uses a pretty simple search, the expectation is that it is 1526 * called very infrequently and that a given device has a small number 1527 * of extents 1528 * 1529 * @start is used to store the start of the free space if we find. But if we 1530 * don't find suitable free space, it will be used to store the start position 1531 * of the max free space. 1532 * 1533 * @len is used to store the size of the free space that we find. 1534 * But if we don't find suitable free space, it is used to store the size of 1535 * the max free space. 1536 * 1537 * NOTE: This function will search *commit* root of device tree, and does extra 1538 * check to ensure dev extents are not double allocated. 1539 * This makes the function safe to allocate dev extents but may not report 1540 * correct usable device space, as device extent freed in current transaction 1541 * is not reported as available. 1542 */ 1543 static int find_free_dev_extent_start(struct btrfs_device *device, 1544 u64 num_bytes, u64 search_start, u64 *start, 1545 u64 *len) 1546 { 1547 struct btrfs_fs_info *fs_info = device->fs_info; 1548 struct btrfs_root *root = fs_info->dev_root; 1549 struct btrfs_key key; 1550 struct btrfs_dev_extent *dev_extent; 1551 struct btrfs_path *path; 1552 u64 hole_size; 1553 u64 max_hole_start; 1554 u64 max_hole_size; 1555 u64 extent_end; 1556 u64 search_end = device->total_bytes; 1557 int ret; 1558 int slot; 1559 struct extent_buffer *l; 1560 1561 search_start = dev_extent_search_start(device, search_start); 1562 1563 WARN_ON(device->zone_info && 1564 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1565 1566 path = btrfs_alloc_path(); 1567 if (!path) 1568 return -ENOMEM; 1569 1570 max_hole_start = search_start; 1571 max_hole_size = 0; 1572 1573 again: 1574 if (search_start >= search_end || 1575 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1576 ret = -ENOSPC; 1577 goto out; 1578 } 1579 1580 path->reada = READA_FORWARD; 1581 path->search_commit_root = 1; 1582 path->skip_locking = 1; 1583 1584 key.objectid = device->devid; 1585 key.offset = search_start; 1586 key.type = BTRFS_DEV_EXTENT_KEY; 1587 1588 ret = btrfs_search_backwards(root, &key, path); 1589 if (ret < 0) 1590 goto out; 1591 1592 while (1) { 1593 l = path->nodes[0]; 1594 slot = path->slots[0]; 1595 if (slot >= btrfs_header_nritems(l)) { 1596 ret = btrfs_next_leaf(root, path); 1597 if (ret == 0) 1598 continue; 1599 if (ret < 0) 1600 goto out; 1601 1602 break; 1603 } 1604 btrfs_item_key_to_cpu(l, &key, slot); 1605 1606 if (key.objectid < device->devid) 1607 goto next; 1608 1609 if (key.objectid > device->devid) 1610 break; 1611 1612 if (key.type != BTRFS_DEV_EXTENT_KEY) 1613 goto next; 1614 1615 if (key.offset > search_start) { 1616 hole_size = key.offset - search_start; 1617 dev_extent_hole_check(device, &search_start, &hole_size, 1618 num_bytes); 1619 1620 if (hole_size > max_hole_size) { 1621 max_hole_start = search_start; 1622 max_hole_size = hole_size; 1623 } 1624 1625 /* 1626 * If this free space is greater than which we need, 1627 * it must be the max free space that we have found 1628 * until now, so max_hole_start must point to the start 1629 * of this free space and the length of this free space 1630 * is stored in max_hole_size. Thus, we return 1631 * max_hole_start and max_hole_size and go back to the 1632 * caller. 1633 */ 1634 if (hole_size >= num_bytes) { 1635 ret = 0; 1636 goto out; 1637 } 1638 } 1639 1640 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1641 extent_end = key.offset + btrfs_dev_extent_length(l, 1642 dev_extent); 1643 if (extent_end > search_start) 1644 search_start = extent_end; 1645 next: 1646 path->slots[0]++; 1647 cond_resched(); 1648 } 1649 1650 /* 1651 * At this point, search_start should be the end of 1652 * allocated dev extents, and when shrinking the device, 1653 * search_end may be smaller than search_start. 1654 */ 1655 if (search_end > search_start) { 1656 hole_size = search_end - search_start; 1657 if (dev_extent_hole_check(device, &search_start, &hole_size, 1658 num_bytes)) { 1659 btrfs_release_path(path); 1660 goto again; 1661 } 1662 1663 if (hole_size > max_hole_size) { 1664 max_hole_start = search_start; 1665 max_hole_size = hole_size; 1666 } 1667 } 1668 1669 /* See above. */ 1670 if (max_hole_size < num_bytes) 1671 ret = -ENOSPC; 1672 else 1673 ret = 0; 1674 1675 out: 1676 btrfs_free_path(path); 1677 *start = max_hole_start; 1678 if (len) 1679 *len = max_hole_size; 1680 return ret; 1681 } 1682 1683 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1684 u64 *start, u64 *len) 1685 { 1686 /* FIXME use last free of some kind */ 1687 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1688 } 1689 1690 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1691 struct btrfs_device *device, 1692 u64 start, u64 *dev_extent_len) 1693 { 1694 struct btrfs_fs_info *fs_info = device->fs_info; 1695 struct btrfs_root *root = fs_info->dev_root; 1696 int ret; 1697 struct btrfs_path *path; 1698 struct btrfs_key key; 1699 struct btrfs_key found_key; 1700 struct extent_buffer *leaf = NULL; 1701 struct btrfs_dev_extent *extent = NULL; 1702 1703 path = btrfs_alloc_path(); 1704 if (!path) 1705 return -ENOMEM; 1706 1707 key.objectid = device->devid; 1708 key.offset = start; 1709 key.type = BTRFS_DEV_EXTENT_KEY; 1710 again: 1711 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1712 if (ret > 0) { 1713 ret = btrfs_previous_item(root, path, key.objectid, 1714 BTRFS_DEV_EXTENT_KEY); 1715 if (ret) 1716 goto out; 1717 leaf = path->nodes[0]; 1718 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1719 extent = btrfs_item_ptr(leaf, path->slots[0], 1720 struct btrfs_dev_extent); 1721 BUG_ON(found_key.offset > start || found_key.offset + 1722 btrfs_dev_extent_length(leaf, extent) < start); 1723 key = found_key; 1724 btrfs_release_path(path); 1725 goto again; 1726 } else if (ret == 0) { 1727 leaf = path->nodes[0]; 1728 extent = btrfs_item_ptr(leaf, path->slots[0], 1729 struct btrfs_dev_extent); 1730 } else { 1731 goto out; 1732 } 1733 1734 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1735 1736 ret = btrfs_del_item(trans, root, path); 1737 if (ret == 0) 1738 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1739 out: 1740 btrfs_free_path(path); 1741 return ret; 1742 } 1743 1744 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1745 { 1746 struct extent_map_tree *em_tree; 1747 struct extent_map *em; 1748 struct rb_node *n; 1749 u64 ret = 0; 1750 1751 em_tree = &fs_info->mapping_tree; 1752 read_lock(&em_tree->lock); 1753 n = rb_last(&em_tree->map.rb_root); 1754 if (n) { 1755 em = rb_entry(n, struct extent_map, rb_node); 1756 ret = em->start + em->len; 1757 } 1758 read_unlock(&em_tree->lock); 1759 1760 return ret; 1761 } 1762 1763 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1764 u64 *devid_ret) 1765 { 1766 int ret; 1767 struct btrfs_key key; 1768 struct btrfs_key found_key; 1769 struct btrfs_path *path; 1770 1771 path = btrfs_alloc_path(); 1772 if (!path) 1773 return -ENOMEM; 1774 1775 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1776 key.type = BTRFS_DEV_ITEM_KEY; 1777 key.offset = (u64)-1; 1778 1779 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1780 if (ret < 0) 1781 goto error; 1782 1783 if (ret == 0) { 1784 /* Corruption */ 1785 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1786 ret = -EUCLEAN; 1787 goto error; 1788 } 1789 1790 ret = btrfs_previous_item(fs_info->chunk_root, path, 1791 BTRFS_DEV_ITEMS_OBJECTID, 1792 BTRFS_DEV_ITEM_KEY); 1793 if (ret) { 1794 *devid_ret = 1; 1795 } else { 1796 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1797 path->slots[0]); 1798 *devid_ret = found_key.offset + 1; 1799 } 1800 ret = 0; 1801 error: 1802 btrfs_free_path(path); 1803 return ret; 1804 } 1805 1806 /* 1807 * the device information is stored in the chunk root 1808 * the btrfs_device struct should be fully filled in 1809 */ 1810 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1811 struct btrfs_device *device) 1812 { 1813 int ret; 1814 struct btrfs_path *path; 1815 struct btrfs_dev_item *dev_item; 1816 struct extent_buffer *leaf; 1817 struct btrfs_key key; 1818 unsigned long ptr; 1819 1820 path = btrfs_alloc_path(); 1821 if (!path) 1822 return -ENOMEM; 1823 1824 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1825 key.type = BTRFS_DEV_ITEM_KEY; 1826 key.offset = device->devid; 1827 1828 btrfs_reserve_chunk_metadata(trans, true); 1829 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1830 &key, sizeof(*dev_item)); 1831 btrfs_trans_release_chunk_metadata(trans); 1832 if (ret) 1833 goto out; 1834 1835 leaf = path->nodes[0]; 1836 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1837 1838 btrfs_set_device_id(leaf, dev_item, device->devid); 1839 btrfs_set_device_generation(leaf, dev_item, 0); 1840 btrfs_set_device_type(leaf, dev_item, device->type); 1841 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1842 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1843 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1844 btrfs_set_device_total_bytes(leaf, dev_item, 1845 btrfs_device_get_disk_total_bytes(device)); 1846 btrfs_set_device_bytes_used(leaf, dev_item, 1847 btrfs_device_get_bytes_used(device)); 1848 btrfs_set_device_group(leaf, dev_item, 0); 1849 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1850 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1851 btrfs_set_device_start_offset(leaf, dev_item, 0); 1852 1853 ptr = btrfs_device_uuid(dev_item); 1854 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1855 ptr = btrfs_device_fsid(dev_item); 1856 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1857 ptr, BTRFS_FSID_SIZE); 1858 btrfs_mark_buffer_dirty(leaf); 1859 1860 ret = 0; 1861 out: 1862 btrfs_free_path(path); 1863 return ret; 1864 } 1865 1866 /* 1867 * Function to update ctime/mtime for a given device path. 1868 * Mainly used for ctime/mtime based probe like libblkid. 1869 * 1870 * We don't care about errors here, this is just to be kind to userspace. 1871 */ 1872 static void update_dev_time(const char *device_path) 1873 { 1874 struct path path; 1875 struct timespec64 now; 1876 int ret; 1877 1878 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1879 if (ret) 1880 return; 1881 1882 now = current_time(d_inode(path.dentry)); 1883 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1884 path_put(&path); 1885 } 1886 1887 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1888 struct btrfs_device *device) 1889 { 1890 struct btrfs_root *root = device->fs_info->chunk_root; 1891 int ret; 1892 struct btrfs_path *path; 1893 struct btrfs_key key; 1894 1895 path = btrfs_alloc_path(); 1896 if (!path) 1897 return -ENOMEM; 1898 1899 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1900 key.type = BTRFS_DEV_ITEM_KEY; 1901 key.offset = device->devid; 1902 1903 btrfs_reserve_chunk_metadata(trans, false); 1904 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1905 btrfs_trans_release_chunk_metadata(trans); 1906 if (ret) { 1907 if (ret > 0) 1908 ret = -ENOENT; 1909 goto out; 1910 } 1911 1912 ret = btrfs_del_item(trans, root, path); 1913 out: 1914 btrfs_free_path(path); 1915 return ret; 1916 } 1917 1918 /* 1919 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1920 * filesystem. It's up to the caller to adjust that number regarding eg. device 1921 * replace. 1922 */ 1923 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1924 u64 num_devices) 1925 { 1926 u64 all_avail; 1927 unsigned seq; 1928 int i; 1929 1930 do { 1931 seq = read_seqbegin(&fs_info->profiles_lock); 1932 1933 all_avail = fs_info->avail_data_alloc_bits | 1934 fs_info->avail_system_alloc_bits | 1935 fs_info->avail_metadata_alloc_bits; 1936 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1937 1938 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1939 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1940 continue; 1941 1942 if (num_devices < btrfs_raid_array[i].devs_min) 1943 return btrfs_raid_array[i].mindev_error; 1944 } 1945 1946 return 0; 1947 } 1948 1949 static struct btrfs_device * btrfs_find_next_active_device( 1950 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1951 { 1952 struct btrfs_device *next_device; 1953 1954 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1955 if (next_device != device && 1956 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1957 && next_device->bdev) 1958 return next_device; 1959 } 1960 1961 return NULL; 1962 } 1963 1964 /* 1965 * Helper function to check if the given device is part of s_bdev / latest_dev 1966 * and replace it with the provided or the next active device, in the context 1967 * where this function called, there should be always be another device (or 1968 * this_dev) which is active. 1969 */ 1970 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1971 struct btrfs_device *next_device) 1972 { 1973 struct btrfs_fs_info *fs_info = device->fs_info; 1974 1975 if (!next_device) 1976 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1977 device); 1978 ASSERT(next_device); 1979 1980 if (fs_info->sb->s_bdev && 1981 (fs_info->sb->s_bdev == device->bdev)) 1982 fs_info->sb->s_bdev = next_device->bdev; 1983 1984 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 1985 fs_info->fs_devices->latest_dev = next_device; 1986 } 1987 1988 /* 1989 * Return btrfs_fs_devices::num_devices excluding the device that's being 1990 * currently replaced. 1991 */ 1992 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 1993 { 1994 u64 num_devices = fs_info->fs_devices->num_devices; 1995 1996 down_read(&fs_info->dev_replace.rwsem); 1997 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 1998 ASSERT(num_devices > 1); 1999 num_devices--; 2000 } 2001 up_read(&fs_info->dev_replace.rwsem); 2002 2003 return num_devices; 2004 } 2005 2006 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2007 struct block_device *bdev, 2008 const char *device_path) 2009 { 2010 struct btrfs_super_block *disk_super; 2011 int copy_num; 2012 2013 if (!bdev) 2014 return; 2015 2016 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2017 struct page *page; 2018 int ret; 2019 2020 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2021 if (IS_ERR(disk_super)) 2022 continue; 2023 2024 if (bdev_is_zoned(bdev)) { 2025 btrfs_reset_sb_log_zones(bdev, copy_num); 2026 continue; 2027 } 2028 2029 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2030 2031 page = virt_to_page(disk_super); 2032 set_page_dirty(page); 2033 lock_page(page); 2034 /* write_on_page() unlocks the page */ 2035 ret = write_one_page(page); 2036 if (ret) 2037 btrfs_warn(fs_info, 2038 "error clearing superblock number %d (%d)", 2039 copy_num, ret); 2040 btrfs_release_disk_super(disk_super); 2041 2042 } 2043 2044 /* Notify udev that device has changed */ 2045 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2046 2047 /* Update ctime/mtime for device path for libblkid */ 2048 update_dev_time(device_path); 2049 } 2050 2051 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2052 struct btrfs_dev_lookup_args *args, 2053 struct block_device **bdev, fmode_t *mode) 2054 { 2055 struct btrfs_trans_handle *trans; 2056 struct btrfs_device *device; 2057 struct btrfs_fs_devices *cur_devices; 2058 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2059 u64 num_devices; 2060 int ret = 0; 2061 2062 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2063 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2064 return -EINVAL; 2065 } 2066 2067 /* 2068 * The device list in fs_devices is accessed without locks (neither 2069 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2070 * filesystem and another device rm cannot run. 2071 */ 2072 num_devices = btrfs_num_devices(fs_info); 2073 2074 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2075 if (ret) 2076 return ret; 2077 2078 device = btrfs_find_device(fs_info->fs_devices, args); 2079 if (!device) { 2080 if (args->missing) 2081 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2082 else 2083 ret = -ENOENT; 2084 return ret; 2085 } 2086 2087 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2088 btrfs_warn_in_rcu(fs_info, 2089 "cannot remove device %s (devid %llu) due to active swapfile", 2090 rcu_str_deref(device->name), device->devid); 2091 return -ETXTBSY; 2092 } 2093 2094 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2095 return BTRFS_ERROR_DEV_TGT_REPLACE; 2096 2097 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2098 fs_info->fs_devices->rw_devices == 1) 2099 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2100 2101 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2102 mutex_lock(&fs_info->chunk_mutex); 2103 list_del_init(&device->dev_alloc_list); 2104 device->fs_devices->rw_devices--; 2105 mutex_unlock(&fs_info->chunk_mutex); 2106 } 2107 2108 ret = btrfs_shrink_device(device, 0); 2109 if (ret) 2110 goto error_undo; 2111 2112 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2113 if (IS_ERR(trans)) { 2114 ret = PTR_ERR(trans); 2115 goto error_undo; 2116 } 2117 2118 ret = btrfs_rm_dev_item(trans, device); 2119 if (ret) { 2120 /* Any error in dev item removal is critical */ 2121 btrfs_crit(fs_info, 2122 "failed to remove device item for devid %llu: %d", 2123 device->devid, ret); 2124 btrfs_abort_transaction(trans, ret); 2125 btrfs_end_transaction(trans); 2126 return ret; 2127 } 2128 2129 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2130 btrfs_scrub_cancel_dev(device); 2131 2132 /* 2133 * the device list mutex makes sure that we don't change 2134 * the device list while someone else is writing out all 2135 * the device supers. Whoever is writing all supers, should 2136 * lock the device list mutex before getting the number of 2137 * devices in the super block (super_copy). Conversely, 2138 * whoever updates the number of devices in the super block 2139 * (super_copy) should hold the device list mutex. 2140 */ 2141 2142 /* 2143 * In normal cases the cur_devices == fs_devices. But in case 2144 * of deleting a seed device, the cur_devices should point to 2145 * its own fs_devices listed under the fs_devices->seed_list. 2146 */ 2147 cur_devices = device->fs_devices; 2148 mutex_lock(&fs_devices->device_list_mutex); 2149 list_del_rcu(&device->dev_list); 2150 2151 cur_devices->num_devices--; 2152 cur_devices->total_devices--; 2153 /* Update total_devices of the parent fs_devices if it's seed */ 2154 if (cur_devices != fs_devices) 2155 fs_devices->total_devices--; 2156 2157 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2158 cur_devices->missing_devices--; 2159 2160 btrfs_assign_next_active_device(device, NULL); 2161 2162 if (device->bdev) { 2163 cur_devices->open_devices--; 2164 /* remove sysfs entry */ 2165 btrfs_sysfs_remove_device(device); 2166 } 2167 2168 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2169 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2170 mutex_unlock(&fs_devices->device_list_mutex); 2171 2172 /* 2173 * At this point, the device is zero sized and detached from the 2174 * devices list. All that's left is to zero out the old supers and 2175 * free the device. 2176 * 2177 * We cannot call btrfs_close_bdev() here because we're holding the sb 2178 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2179 * block device and it's dependencies. Instead just flush the device 2180 * and let the caller do the final blkdev_put. 2181 */ 2182 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2183 btrfs_scratch_superblocks(fs_info, device->bdev, 2184 device->name->str); 2185 if (device->bdev) { 2186 sync_blockdev(device->bdev); 2187 invalidate_bdev(device->bdev); 2188 } 2189 } 2190 2191 *bdev = device->bdev; 2192 *mode = device->mode; 2193 synchronize_rcu(); 2194 btrfs_free_device(device); 2195 2196 /* 2197 * This can happen if cur_devices is the private seed devices list. We 2198 * cannot call close_fs_devices() here because it expects the uuid_mutex 2199 * to be held, but in fact we don't need that for the private 2200 * seed_devices, we can simply decrement cur_devices->opened and then 2201 * remove it from our list and free the fs_devices. 2202 */ 2203 if (cur_devices->num_devices == 0) { 2204 list_del_init(&cur_devices->seed_list); 2205 ASSERT(cur_devices->opened == 1); 2206 cur_devices->opened--; 2207 free_fs_devices(cur_devices); 2208 } 2209 2210 ret = btrfs_commit_transaction(trans); 2211 2212 return ret; 2213 2214 error_undo: 2215 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2216 mutex_lock(&fs_info->chunk_mutex); 2217 list_add(&device->dev_alloc_list, 2218 &fs_devices->alloc_list); 2219 device->fs_devices->rw_devices++; 2220 mutex_unlock(&fs_info->chunk_mutex); 2221 } 2222 return ret; 2223 } 2224 2225 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2226 { 2227 struct btrfs_fs_devices *fs_devices; 2228 2229 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2230 2231 /* 2232 * in case of fs with no seed, srcdev->fs_devices will point 2233 * to fs_devices of fs_info. However when the dev being replaced is 2234 * a seed dev it will point to the seed's local fs_devices. In short 2235 * srcdev will have its correct fs_devices in both the cases. 2236 */ 2237 fs_devices = srcdev->fs_devices; 2238 2239 list_del_rcu(&srcdev->dev_list); 2240 list_del(&srcdev->dev_alloc_list); 2241 fs_devices->num_devices--; 2242 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2243 fs_devices->missing_devices--; 2244 2245 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2246 fs_devices->rw_devices--; 2247 2248 if (srcdev->bdev) 2249 fs_devices->open_devices--; 2250 } 2251 2252 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2253 { 2254 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2255 2256 mutex_lock(&uuid_mutex); 2257 2258 btrfs_close_bdev(srcdev); 2259 synchronize_rcu(); 2260 btrfs_free_device(srcdev); 2261 2262 /* if this is no devs we rather delete the fs_devices */ 2263 if (!fs_devices->num_devices) { 2264 /* 2265 * On a mounted FS, num_devices can't be zero unless it's a 2266 * seed. In case of a seed device being replaced, the replace 2267 * target added to the sprout FS, so there will be no more 2268 * device left under the seed FS. 2269 */ 2270 ASSERT(fs_devices->seeding); 2271 2272 list_del_init(&fs_devices->seed_list); 2273 close_fs_devices(fs_devices); 2274 free_fs_devices(fs_devices); 2275 } 2276 mutex_unlock(&uuid_mutex); 2277 } 2278 2279 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2280 { 2281 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2282 2283 mutex_lock(&fs_devices->device_list_mutex); 2284 2285 btrfs_sysfs_remove_device(tgtdev); 2286 2287 if (tgtdev->bdev) 2288 fs_devices->open_devices--; 2289 2290 fs_devices->num_devices--; 2291 2292 btrfs_assign_next_active_device(tgtdev, NULL); 2293 2294 list_del_rcu(&tgtdev->dev_list); 2295 2296 mutex_unlock(&fs_devices->device_list_mutex); 2297 2298 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2299 tgtdev->name->str); 2300 2301 btrfs_close_bdev(tgtdev); 2302 synchronize_rcu(); 2303 btrfs_free_device(tgtdev); 2304 } 2305 2306 /** 2307 * Populate args from device at path 2308 * 2309 * @fs_info: the filesystem 2310 * @args: the args to populate 2311 * @path: the path to the device 2312 * 2313 * This will read the super block of the device at @path and populate @args with 2314 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2315 * lookup a device to operate on, but need to do it before we take any locks. 2316 * This properly handles the special case of "missing" that a user may pass in, 2317 * and does some basic sanity checks. The caller must make sure that @path is 2318 * properly NUL terminated before calling in, and must call 2319 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2320 * uuid buffers. 2321 * 2322 * Return: 0 for success, -errno for failure 2323 */ 2324 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2325 struct btrfs_dev_lookup_args *args, 2326 const char *path) 2327 { 2328 struct btrfs_super_block *disk_super; 2329 struct block_device *bdev; 2330 int ret; 2331 2332 if (!path || !path[0]) 2333 return -EINVAL; 2334 if (!strcmp(path, "missing")) { 2335 args->missing = true; 2336 return 0; 2337 } 2338 2339 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2340 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2341 if (!args->uuid || !args->fsid) { 2342 btrfs_put_dev_args_from_path(args); 2343 return -ENOMEM; 2344 } 2345 2346 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2347 &bdev, &disk_super); 2348 if (ret) 2349 return ret; 2350 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2351 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2352 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2353 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2354 else 2355 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2356 btrfs_release_disk_super(disk_super); 2357 blkdev_put(bdev, FMODE_READ); 2358 return 0; 2359 } 2360 2361 /* 2362 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2363 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2364 * that don't need to be freed. 2365 */ 2366 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2367 { 2368 kfree(args->uuid); 2369 kfree(args->fsid); 2370 args->uuid = NULL; 2371 args->fsid = NULL; 2372 } 2373 2374 struct btrfs_device *btrfs_find_device_by_devspec( 2375 struct btrfs_fs_info *fs_info, u64 devid, 2376 const char *device_path) 2377 { 2378 BTRFS_DEV_LOOKUP_ARGS(args); 2379 struct btrfs_device *device; 2380 int ret; 2381 2382 if (devid) { 2383 args.devid = devid; 2384 device = btrfs_find_device(fs_info->fs_devices, &args); 2385 if (!device) 2386 return ERR_PTR(-ENOENT); 2387 return device; 2388 } 2389 2390 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2391 if (ret) 2392 return ERR_PTR(ret); 2393 device = btrfs_find_device(fs_info->fs_devices, &args); 2394 btrfs_put_dev_args_from_path(&args); 2395 if (!device) 2396 return ERR_PTR(-ENOENT); 2397 return device; 2398 } 2399 2400 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2401 { 2402 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2403 struct btrfs_fs_devices *old_devices; 2404 struct btrfs_fs_devices *seed_devices; 2405 2406 lockdep_assert_held(&uuid_mutex); 2407 if (!fs_devices->seeding) 2408 return ERR_PTR(-EINVAL); 2409 2410 /* 2411 * Private copy of the seed devices, anchored at 2412 * fs_info->fs_devices->seed_list 2413 */ 2414 seed_devices = alloc_fs_devices(NULL, NULL); 2415 if (IS_ERR(seed_devices)) 2416 return seed_devices; 2417 2418 /* 2419 * It's necessary to retain a copy of the original seed fs_devices in 2420 * fs_uuids so that filesystems which have been seeded can successfully 2421 * reference the seed device from open_seed_devices. This also supports 2422 * multiple fs seed. 2423 */ 2424 old_devices = clone_fs_devices(fs_devices); 2425 if (IS_ERR(old_devices)) { 2426 kfree(seed_devices); 2427 return old_devices; 2428 } 2429 2430 list_add(&old_devices->fs_list, &fs_uuids); 2431 2432 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2433 seed_devices->opened = 1; 2434 INIT_LIST_HEAD(&seed_devices->devices); 2435 INIT_LIST_HEAD(&seed_devices->alloc_list); 2436 mutex_init(&seed_devices->device_list_mutex); 2437 2438 return seed_devices; 2439 } 2440 2441 /* 2442 * Splice seed devices into the sprout fs_devices. 2443 * Generate a new fsid for the sprouted read-write filesystem. 2444 */ 2445 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2446 struct btrfs_fs_devices *seed_devices) 2447 { 2448 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2449 struct btrfs_super_block *disk_super = fs_info->super_copy; 2450 struct btrfs_device *device; 2451 u64 super_flags; 2452 2453 /* 2454 * We are updating the fsid, the thread leading to device_list_add() 2455 * could race, so uuid_mutex is needed. 2456 */ 2457 lockdep_assert_held(&uuid_mutex); 2458 2459 /* 2460 * The threads listed below may traverse dev_list but can do that without 2461 * device_list_mutex: 2462 * - All device ops and balance - as we are in btrfs_exclop_start. 2463 * - Various dev_list readers - are using RCU. 2464 * - btrfs_ioctl_fitrim() - is using RCU. 2465 * 2466 * For-read threads as below are using device_list_mutex: 2467 * - Readonly scrub btrfs_scrub_dev() 2468 * - Readonly scrub btrfs_scrub_progress() 2469 * - btrfs_get_dev_stats() 2470 */ 2471 lockdep_assert_held(&fs_devices->device_list_mutex); 2472 2473 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2474 synchronize_rcu); 2475 list_for_each_entry(device, &seed_devices->devices, dev_list) 2476 device->fs_devices = seed_devices; 2477 2478 fs_devices->seeding = false; 2479 fs_devices->num_devices = 0; 2480 fs_devices->open_devices = 0; 2481 fs_devices->missing_devices = 0; 2482 fs_devices->rotating = false; 2483 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2484 2485 generate_random_uuid(fs_devices->fsid); 2486 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2487 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2488 2489 super_flags = btrfs_super_flags(disk_super) & 2490 ~BTRFS_SUPER_FLAG_SEEDING; 2491 btrfs_set_super_flags(disk_super, super_flags); 2492 } 2493 2494 /* 2495 * Store the expected generation for seed devices in device items. 2496 */ 2497 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2498 { 2499 BTRFS_DEV_LOOKUP_ARGS(args); 2500 struct btrfs_fs_info *fs_info = trans->fs_info; 2501 struct btrfs_root *root = fs_info->chunk_root; 2502 struct btrfs_path *path; 2503 struct extent_buffer *leaf; 2504 struct btrfs_dev_item *dev_item; 2505 struct btrfs_device *device; 2506 struct btrfs_key key; 2507 u8 fs_uuid[BTRFS_FSID_SIZE]; 2508 u8 dev_uuid[BTRFS_UUID_SIZE]; 2509 int ret; 2510 2511 path = btrfs_alloc_path(); 2512 if (!path) 2513 return -ENOMEM; 2514 2515 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2516 key.offset = 0; 2517 key.type = BTRFS_DEV_ITEM_KEY; 2518 2519 while (1) { 2520 btrfs_reserve_chunk_metadata(trans, false); 2521 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2522 btrfs_trans_release_chunk_metadata(trans); 2523 if (ret < 0) 2524 goto error; 2525 2526 leaf = path->nodes[0]; 2527 next_slot: 2528 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2529 ret = btrfs_next_leaf(root, path); 2530 if (ret > 0) 2531 break; 2532 if (ret < 0) 2533 goto error; 2534 leaf = path->nodes[0]; 2535 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2536 btrfs_release_path(path); 2537 continue; 2538 } 2539 2540 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2541 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2542 key.type != BTRFS_DEV_ITEM_KEY) 2543 break; 2544 2545 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2546 struct btrfs_dev_item); 2547 args.devid = btrfs_device_id(leaf, dev_item); 2548 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2549 BTRFS_UUID_SIZE); 2550 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2551 BTRFS_FSID_SIZE); 2552 args.uuid = dev_uuid; 2553 args.fsid = fs_uuid; 2554 device = btrfs_find_device(fs_info->fs_devices, &args); 2555 BUG_ON(!device); /* Logic error */ 2556 2557 if (device->fs_devices->seeding) { 2558 btrfs_set_device_generation(leaf, dev_item, 2559 device->generation); 2560 btrfs_mark_buffer_dirty(leaf); 2561 } 2562 2563 path->slots[0]++; 2564 goto next_slot; 2565 } 2566 ret = 0; 2567 error: 2568 btrfs_free_path(path); 2569 return ret; 2570 } 2571 2572 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2573 { 2574 struct btrfs_root *root = fs_info->dev_root; 2575 struct btrfs_trans_handle *trans; 2576 struct btrfs_device *device; 2577 struct block_device *bdev; 2578 struct super_block *sb = fs_info->sb; 2579 struct rcu_string *name; 2580 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2581 struct btrfs_fs_devices *seed_devices; 2582 u64 orig_super_total_bytes; 2583 u64 orig_super_num_devices; 2584 int ret = 0; 2585 bool seeding_dev = false; 2586 bool locked = false; 2587 2588 if (sb_rdonly(sb) && !fs_devices->seeding) 2589 return -EROFS; 2590 2591 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2592 fs_info->bdev_holder); 2593 if (IS_ERR(bdev)) 2594 return PTR_ERR(bdev); 2595 2596 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2597 ret = -EINVAL; 2598 goto error; 2599 } 2600 2601 if (fs_devices->seeding) { 2602 seeding_dev = true; 2603 down_write(&sb->s_umount); 2604 mutex_lock(&uuid_mutex); 2605 locked = true; 2606 } 2607 2608 sync_blockdev(bdev); 2609 2610 rcu_read_lock(); 2611 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2612 if (device->bdev == bdev) { 2613 ret = -EEXIST; 2614 rcu_read_unlock(); 2615 goto error; 2616 } 2617 } 2618 rcu_read_unlock(); 2619 2620 device = btrfs_alloc_device(fs_info, NULL, NULL); 2621 if (IS_ERR(device)) { 2622 /* we can safely leave the fs_devices entry around */ 2623 ret = PTR_ERR(device); 2624 goto error; 2625 } 2626 2627 name = rcu_string_strdup(device_path, GFP_KERNEL); 2628 if (!name) { 2629 ret = -ENOMEM; 2630 goto error_free_device; 2631 } 2632 rcu_assign_pointer(device->name, name); 2633 2634 device->fs_info = fs_info; 2635 device->bdev = bdev; 2636 ret = lookup_bdev(device_path, &device->devt); 2637 if (ret) 2638 goto error_free_device; 2639 2640 ret = btrfs_get_dev_zone_info(device, false); 2641 if (ret) 2642 goto error_free_device; 2643 2644 trans = btrfs_start_transaction(root, 0); 2645 if (IS_ERR(trans)) { 2646 ret = PTR_ERR(trans); 2647 goto error_free_zone; 2648 } 2649 2650 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2651 device->generation = trans->transid; 2652 device->io_width = fs_info->sectorsize; 2653 device->io_align = fs_info->sectorsize; 2654 device->sector_size = fs_info->sectorsize; 2655 device->total_bytes = 2656 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2657 device->disk_total_bytes = device->total_bytes; 2658 device->commit_total_bytes = device->total_bytes; 2659 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2660 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2661 device->mode = FMODE_EXCL; 2662 device->dev_stats_valid = 1; 2663 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2664 2665 if (seeding_dev) { 2666 btrfs_clear_sb_rdonly(sb); 2667 2668 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2669 seed_devices = btrfs_init_sprout(fs_info); 2670 if (IS_ERR(seed_devices)) { 2671 ret = PTR_ERR(seed_devices); 2672 btrfs_abort_transaction(trans, ret); 2673 goto error_trans; 2674 } 2675 } 2676 2677 mutex_lock(&fs_devices->device_list_mutex); 2678 if (seeding_dev) { 2679 btrfs_setup_sprout(fs_info, seed_devices); 2680 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2681 device); 2682 } 2683 2684 device->fs_devices = fs_devices; 2685 2686 mutex_lock(&fs_info->chunk_mutex); 2687 list_add_rcu(&device->dev_list, &fs_devices->devices); 2688 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2689 fs_devices->num_devices++; 2690 fs_devices->open_devices++; 2691 fs_devices->rw_devices++; 2692 fs_devices->total_devices++; 2693 fs_devices->total_rw_bytes += device->total_bytes; 2694 2695 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2696 2697 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 2698 fs_devices->rotating = true; 2699 2700 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2701 btrfs_set_super_total_bytes(fs_info->super_copy, 2702 round_down(orig_super_total_bytes + device->total_bytes, 2703 fs_info->sectorsize)); 2704 2705 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2706 btrfs_set_super_num_devices(fs_info->super_copy, 2707 orig_super_num_devices + 1); 2708 2709 /* 2710 * we've got more storage, clear any full flags on the space 2711 * infos 2712 */ 2713 btrfs_clear_space_info_full(fs_info); 2714 2715 mutex_unlock(&fs_info->chunk_mutex); 2716 2717 /* Add sysfs device entry */ 2718 btrfs_sysfs_add_device(device); 2719 2720 mutex_unlock(&fs_devices->device_list_mutex); 2721 2722 if (seeding_dev) { 2723 mutex_lock(&fs_info->chunk_mutex); 2724 ret = init_first_rw_device(trans); 2725 mutex_unlock(&fs_info->chunk_mutex); 2726 if (ret) { 2727 btrfs_abort_transaction(trans, ret); 2728 goto error_sysfs; 2729 } 2730 } 2731 2732 ret = btrfs_add_dev_item(trans, device); 2733 if (ret) { 2734 btrfs_abort_transaction(trans, ret); 2735 goto error_sysfs; 2736 } 2737 2738 if (seeding_dev) { 2739 ret = btrfs_finish_sprout(trans); 2740 if (ret) { 2741 btrfs_abort_transaction(trans, ret); 2742 goto error_sysfs; 2743 } 2744 2745 /* 2746 * fs_devices now represents the newly sprouted filesystem and 2747 * its fsid has been changed by btrfs_sprout_splice(). 2748 */ 2749 btrfs_sysfs_update_sprout_fsid(fs_devices); 2750 } 2751 2752 ret = btrfs_commit_transaction(trans); 2753 2754 if (seeding_dev) { 2755 mutex_unlock(&uuid_mutex); 2756 up_write(&sb->s_umount); 2757 locked = false; 2758 2759 if (ret) /* transaction commit */ 2760 return ret; 2761 2762 ret = btrfs_relocate_sys_chunks(fs_info); 2763 if (ret < 0) 2764 btrfs_handle_fs_error(fs_info, ret, 2765 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2766 trans = btrfs_attach_transaction(root); 2767 if (IS_ERR(trans)) { 2768 if (PTR_ERR(trans) == -ENOENT) 2769 return 0; 2770 ret = PTR_ERR(trans); 2771 trans = NULL; 2772 goto error_sysfs; 2773 } 2774 ret = btrfs_commit_transaction(trans); 2775 } 2776 2777 /* 2778 * Now that we have written a new super block to this device, check all 2779 * other fs_devices list if device_path alienates any other scanned 2780 * device. 2781 * We can ignore the return value as it typically returns -EINVAL and 2782 * only succeeds if the device was an alien. 2783 */ 2784 btrfs_forget_devices(device->devt); 2785 2786 /* Update ctime/mtime for blkid or udev */ 2787 update_dev_time(device_path); 2788 2789 return ret; 2790 2791 error_sysfs: 2792 btrfs_sysfs_remove_device(device); 2793 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2794 mutex_lock(&fs_info->chunk_mutex); 2795 list_del_rcu(&device->dev_list); 2796 list_del(&device->dev_alloc_list); 2797 fs_info->fs_devices->num_devices--; 2798 fs_info->fs_devices->open_devices--; 2799 fs_info->fs_devices->rw_devices--; 2800 fs_info->fs_devices->total_devices--; 2801 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2802 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2803 btrfs_set_super_total_bytes(fs_info->super_copy, 2804 orig_super_total_bytes); 2805 btrfs_set_super_num_devices(fs_info->super_copy, 2806 orig_super_num_devices); 2807 mutex_unlock(&fs_info->chunk_mutex); 2808 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2809 error_trans: 2810 if (seeding_dev) 2811 btrfs_set_sb_rdonly(sb); 2812 if (trans) 2813 btrfs_end_transaction(trans); 2814 error_free_zone: 2815 btrfs_destroy_dev_zone_info(device); 2816 error_free_device: 2817 btrfs_free_device(device); 2818 error: 2819 blkdev_put(bdev, FMODE_EXCL); 2820 if (locked) { 2821 mutex_unlock(&uuid_mutex); 2822 up_write(&sb->s_umount); 2823 } 2824 return ret; 2825 } 2826 2827 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2828 struct btrfs_device *device) 2829 { 2830 int ret; 2831 struct btrfs_path *path; 2832 struct btrfs_root *root = device->fs_info->chunk_root; 2833 struct btrfs_dev_item *dev_item; 2834 struct extent_buffer *leaf; 2835 struct btrfs_key key; 2836 2837 path = btrfs_alloc_path(); 2838 if (!path) 2839 return -ENOMEM; 2840 2841 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2842 key.type = BTRFS_DEV_ITEM_KEY; 2843 key.offset = device->devid; 2844 2845 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2846 if (ret < 0) 2847 goto out; 2848 2849 if (ret > 0) { 2850 ret = -ENOENT; 2851 goto out; 2852 } 2853 2854 leaf = path->nodes[0]; 2855 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2856 2857 btrfs_set_device_id(leaf, dev_item, device->devid); 2858 btrfs_set_device_type(leaf, dev_item, device->type); 2859 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2860 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2861 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2862 btrfs_set_device_total_bytes(leaf, dev_item, 2863 btrfs_device_get_disk_total_bytes(device)); 2864 btrfs_set_device_bytes_used(leaf, dev_item, 2865 btrfs_device_get_bytes_used(device)); 2866 btrfs_mark_buffer_dirty(leaf); 2867 2868 out: 2869 btrfs_free_path(path); 2870 return ret; 2871 } 2872 2873 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2874 struct btrfs_device *device, u64 new_size) 2875 { 2876 struct btrfs_fs_info *fs_info = device->fs_info; 2877 struct btrfs_super_block *super_copy = fs_info->super_copy; 2878 u64 old_total; 2879 u64 diff; 2880 int ret; 2881 2882 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2883 return -EACCES; 2884 2885 new_size = round_down(new_size, fs_info->sectorsize); 2886 2887 mutex_lock(&fs_info->chunk_mutex); 2888 old_total = btrfs_super_total_bytes(super_copy); 2889 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2890 2891 if (new_size <= device->total_bytes || 2892 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2893 mutex_unlock(&fs_info->chunk_mutex); 2894 return -EINVAL; 2895 } 2896 2897 btrfs_set_super_total_bytes(super_copy, 2898 round_down(old_total + diff, fs_info->sectorsize)); 2899 device->fs_devices->total_rw_bytes += diff; 2900 2901 btrfs_device_set_total_bytes(device, new_size); 2902 btrfs_device_set_disk_total_bytes(device, new_size); 2903 btrfs_clear_space_info_full(device->fs_info); 2904 if (list_empty(&device->post_commit_list)) 2905 list_add_tail(&device->post_commit_list, 2906 &trans->transaction->dev_update_list); 2907 mutex_unlock(&fs_info->chunk_mutex); 2908 2909 btrfs_reserve_chunk_metadata(trans, false); 2910 ret = btrfs_update_device(trans, device); 2911 btrfs_trans_release_chunk_metadata(trans); 2912 2913 return ret; 2914 } 2915 2916 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2917 { 2918 struct btrfs_fs_info *fs_info = trans->fs_info; 2919 struct btrfs_root *root = fs_info->chunk_root; 2920 int ret; 2921 struct btrfs_path *path; 2922 struct btrfs_key key; 2923 2924 path = btrfs_alloc_path(); 2925 if (!path) 2926 return -ENOMEM; 2927 2928 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2929 key.offset = chunk_offset; 2930 key.type = BTRFS_CHUNK_ITEM_KEY; 2931 2932 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2933 if (ret < 0) 2934 goto out; 2935 else if (ret > 0) { /* Logic error or corruption */ 2936 btrfs_handle_fs_error(fs_info, -ENOENT, 2937 "Failed lookup while freeing chunk."); 2938 ret = -ENOENT; 2939 goto out; 2940 } 2941 2942 ret = btrfs_del_item(trans, root, path); 2943 if (ret < 0) 2944 btrfs_handle_fs_error(fs_info, ret, 2945 "Failed to delete chunk item."); 2946 out: 2947 btrfs_free_path(path); 2948 return ret; 2949 } 2950 2951 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2952 { 2953 struct btrfs_super_block *super_copy = fs_info->super_copy; 2954 struct btrfs_disk_key *disk_key; 2955 struct btrfs_chunk *chunk; 2956 u8 *ptr; 2957 int ret = 0; 2958 u32 num_stripes; 2959 u32 array_size; 2960 u32 len = 0; 2961 u32 cur; 2962 struct btrfs_key key; 2963 2964 lockdep_assert_held(&fs_info->chunk_mutex); 2965 array_size = btrfs_super_sys_array_size(super_copy); 2966 2967 ptr = super_copy->sys_chunk_array; 2968 cur = 0; 2969 2970 while (cur < array_size) { 2971 disk_key = (struct btrfs_disk_key *)ptr; 2972 btrfs_disk_key_to_cpu(&key, disk_key); 2973 2974 len = sizeof(*disk_key); 2975 2976 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2977 chunk = (struct btrfs_chunk *)(ptr + len); 2978 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2979 len += btrfs_chunk_item_size(num_stripes); 2980 } else { 2981 ret = -EIO; 2982 break; 2983 } 2984 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2985 key.offset == chunk_offset) { 2986 memmove(ptr, ptr + len, array_size - (cur + len)); 2987 array_size -= len; 2988 btrfs_set_super_sys_array_size(super_copy, array_size); 2989 } else { 2990 ptr += len; 2991 cur += len; 2992 } 2993 } 2994 return ret; 2995 } 2996 2997 /* 2998 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 2999 * @logical: Logical block offset in bytes. 3000 * @length: Length of extent in bytes. 3001 * 3002 * Return: Chunk mapping or ERR_PTR. 3003 */ 3004 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3005 u64 logical, u64 length) 3006 { 3007 struct extent_map_tree *em_tree; 3008 struct extent_map *em; 3009 3010 em_tree = &fs_info->mapping_tree; 3011 read_lock(&em_tree->lock); 3012 em = lookup_extent_mapping(em_tree, logical, length); 3013 read_unlock(&em_tree->lock); 3014 3015 if (!em) { 3016 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3017 logical, length); 3018 return ERR_PTR(-EINVAL); 3019 } 3020 3021 if (em->start > logical || em->start + em->len < logical) { 3022 btrfs_crit(fs_info, 3023 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3024 logical, length, em->start, em->start + em->len); 3025 free_extent_map(em); 3026 return ERR_PTR(-EINVAL); 3027 } 3028 3029 /* callers are responsible for dropping em's ref. */ 3030 return em; 3031 } 3032 3033 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3034 struct map_lookup *map, u64 chunk_offset) 3035 { 3036 int i; 3037 3038 /* 3039 * Removing chunk items and updating the device items in the chunks btree 3040 * requires holding the chunk_mutex. 3041 * See the comment at btrfs_chunk_alloc() for the details. 3042 */ 3043 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3044 3045 for (i = 0; i < map->num_stripes; i++) { 3046 int ret; 3047 3048 ret = btrfs_update_device(trans, map->stripes[i].dev); 3049 if (ret) 3050 return ret; 3051 } 3052 3053 return btrfs_free_chunk(trans, chunk_offset); 3054 } 3055 3056 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3057 { 3058 struct btrfs_fs_info *fs_info = trans->fs_info; 3059 struct extent_map *em; 3060 struct map_lookup *map; 3061 u64 dev_extent_len = 0; 3062 int i, ret = 0; 3063 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3064 3065 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3066 if (IS_ERR(em)) { 3067 /* 3068 * This is a logic error, but we don't want to just rely on the 3069 * user having built with ASSERT enabled, so if ASSERT doesn't 3070 * do anything we still error out. 3071 */ 3072 ASSERT(0); 3073 return PTR_ERR(em); 3074 } 3075 map = em->map_lookup; 3076 3077 /* 3078 * First delete the device extent items from the devices btree. 3079 * We take the device_list_mutex to avoid racing with the finishing phase 3080 * of a device replace operation. See the comment below before acquiring 3081 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3082 * because that can result in a deadlock when deleting the device extent 3083 * items from the devices btree - COWing an extent buffer from the btree 3084 * may result in allocating a new metadata chunk, which would attempt to 3085 * lock again fs_info->chunk_mutex. 3086 */ 3087 mutex_lock(&fs_devices->device_list_mutex); 3088 for (i = 0; i < map->num_stripes; i++) { 3089 struct btrfs_device *device = map->stripes[i].dev; 3090 ret = btrfs_free_dev_extent(trans, device, 3091 map->stripes[i].physical, 3092 &dev_extent_len); 3093 if (ret) { 3094 mutex_unlock(&fs_devices->device_list_mutex); 3095 btrfs_abort_transaction(trans, ret); 3096 goto out; 3097 } 3098 3099 if (device->bytes_used > 0) { 3100 mutex_lock(&fs_info->chunk_mutex); 3101 btrfs_device_set_bytes_used(device, 3102 device->bytes_used - dev_extent_len); 3103 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3104 btrfs_clear_space_info_full(fs_info); 3105 mutex_unlock(&fs_info->chunk_mutex); 3106 } 3107 } 3108 mutex_unlock(&fs_devices->device_list_mutex); 3109 3110 /* 3111 * We acquire fs_info->chunk_mutex for 2 reasons: 3112 * 3113 * 1) Just like with the first phase of the chunk allocation, we must 3114 * reserve system space, do all chunk btree updates and deletions, and 3115 * update the system chunk array in the superblock while holding this 3116 * mutex. This is for similar reasons as explained on the comment at 3117 * the top of btrfs_chunk_alloc(); 3118 * 3119 * 2) Prevent races with the final phase of a device replace operation 3120 * that replaces the device object associated with the map's stripes, 3121 * because the device object's id can change at any time during that 3122 * final phase of the device replace operation 3123 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3124 * replaced device and then see it with an ID of 3125 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3126 * the device item, which does not exists on the chunk btree. 3127 * The finishing phase of device replace acquires both the 3128 * device_list_mutex and the chunk_mutex, in that order, so we are 3129 * safe by just acquiring the chunk_mutex. 3130 */ 3131 trans->removing_chunk = true; 3132 mutex_lock(&fs_info->chunk_mutex); 3133 3134 check_system_chunk(trans, map->type); 3135 3136 ret = remove_chunk_item(trans, map, chunk_offset); 3137 /* 3138 * Normally we should not get -ENOSPC since we reserved space before 3139 * through the call to check_system_chunk(). 3140 * 3141 * Despite our system space_info having enough free space, we may not 3142 * be able to allocate extents from its block groups, because all have 3143 * an incompatible profile, which will force us to allocate a new system 3144 * block group with the right profile, or right after we called 3145 * check_system_space() above, a scrub turned the only system block group 3146 * with enough free space into RO mode. 3147 * This is explained with more detail at do_chunk_alloc(). 3148 * 3149 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3150 */ 3151 if (ret == -ENOSPC) { 3152 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3153 struct btrfs_block_group *sys_bg; 3154 3155 sys_bg = btrfs_create_chunk(trans, sys_flags); 3156 if (IS_ERR(sys_bg)) { 3157 ret = PTR_ERR(sys_bg); 3158 btrfs_abort_transaction(trans, ret); 3159 goto out; 3160 } 3161 3162 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3163 if (ret) { 3164 btrfs_abort_transaction(trans, ret); 3165 goto out; 3166 } 3167 3168 ret = remove_chunk_item(trans, map, chunk_offset); 3169 if (ret) { 3170 btrfs_abort_transaction(trans, ret); 3171 goto out; 3172 } 3173 } else if (ret) { 3174 btrfs_abort_transaction(trans, ret); 3175 goto out; 3176 } 3177 3178 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3179 3180 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3181 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3182 if (ret) { 3183 btrfs_abort_transaction(trans, ret); 3184 goto out; 3185 } 3186 } 3187 3188 mutex_unlock(&fs_info->chunk_mutex); 3189 trans->removing_chunk = false; 3190 3191 /* 3192 * We are done with chunk btree updates and deletions, so release the 3193 * system space we previously reserved (with check_system_chunk()). 3194 */ 3195 btrfs_trans_release_chunk_metadata(trans); 3196 3197 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3198 if (ret) { 3199 btrfs_abort_transaction(trans, ret); 3200 goto out; 3201 } 3202 3203 out: 3204 if (trans->removing_chunk) { 3205 mutex_unlock(&fs_info->chunk_mutex); 3206 trans->removing_chunk = false; 3207 } 3208 /* once for us */ 3209 free_extent_map(em); 3210 return ret; 3211 } 3212 3213 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3214 { 3215 struct btrfs_root *root = fs_info->chunk_root; 3216 struct btrfs_trans_handle *trans; 3217 struct btrfs_block_group *block_group; 3218 u64 length; 3219 int ret; 3220 3221 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3222 btrfs_err(fs_info, 3223 "relocate: not supported on extent tree v2 yet"); 3224 return -EINVAL; 3225 } 3226 3227 /* 3228 * Prevent races with automatic removal of unused block groups. 3229 * After we relocate and before we remove the chunk with offset 3230 * chunk_offset, automatic removal of the block group can kick in, 3231 * resulting in a failure when calling btrfs_remove_chunk() below. 3232 * 3233 * Make sure to acquire this mutex before doing a tree search (dev 3234 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3235 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3236 * we release the path used to search the chunk/dev tree and before 3237 * the current task acquires this mutex and calls us. 3238 */ 3239 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3240 3241 /* step one, relocate all the extents inside this chunk */ 3242 btrfs_scrub_pause(fs_info); 3243 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3244 btrfs_scrub_continue(fs_info); 3245 if (ret) 3246 return ret; 3247 3248 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3249 if (!block_group) 3250 return -ENOENT; 3251 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3252 length = block_group->length; 3253 btrfs_put_block_group(block_group); 3254 3255 /* 3256 * On a zoned file system, discard the whole block group, this will 3257 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3258 * resetting the zone fails, don't treat it as a fatal problem from the 3259 * filesystem's point of view. 3260 */ 3261 if (btrfs_is_zoned(fs_info)) { 3262 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3263 if (ret) 3264 btrfs_info(fs_info, 3265 "failed to reset zone %llu after relocation", 3266 chunk_offset); 3267 } 3268 3269 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3270 chunk_offset); 3271 if (IS_ERR(trans)) { 3272 ret = PTR_ERR(trans); 3273 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3274 return ret; 3275 } 3276 3277 /* 3278 * step two, delete the device extents and the 3279 * chunk tree entries 3280 */ 3281 ret = btrfs_remove_chunk(trans, chunk_offset); 3282 btrfs_end_transaction(trans); 3283 return ret; 3284 } 3285 3286 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3287 { 3288 struct btrfs_root *chunk_root = fs_info->chunk_root; 3289 struct btrfs_path *path; 3290 struct extent_buffer *leaf; 3291 struct btrfs_chunk *chunk; 3292 struct btrfs_key key; 3293 struct btrfs_key found_key; 3294 u64 chunk_type; 3295 bool retried = false; 3296 int failed = 0; 3297 int ret; 3298 3299 path = btrfs_alloc_path(); 3300 if (!path) 3301 return -ENOMEM; 3302 3303 again: 3304 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3305 key.offset = (u64)-1; 3306 key.type = BTRFS_CHUNK_ITEM_KEY; 3307 3308 while (1) { 3309 mutex_lock(&fs_info->reclaim_bgs_lock); 3310 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3311 if (ret < 0) { 3312 mutex_unlock(&fs_info->reclaim_bgs_lock); 3313 goto error; 3314 } 3315 BUG_ON(ret == 0); /* Corruption */ 3316 3317 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3318 key.type); 3319 if (ret) 3320 mutex_unlock(&fs_info->reclaim_bgs_lock); 3321 if (ret < 0) 3322 goto error; 3323 if (ret > 0) 3324 break; 3325 3326 leaf = path->nodes[0]; 3327 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3328 3329 chunk = btrfs_item_ptr(leaf, path->slots[0], 3330 struct btrfs_chunk); 3331 chunk_type = btrfs_chunk_type(leaf, chunk); 3332 btrfs_release_path(path); 3333 3334 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3335 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3336 if (ret == -ENOSPC) 3337 failed++; 3338 else 3339 BUG_ON(ret); 3340 } 3341 mutex_unlock(&fs_info->reclaim_bgs_lock); 3342 3343 if (found_key.offset == 0) 3344 break; 3345 key.offset = found_key.offset - 1; 3346 } 3347 ret = 0; 3348 if (failed && !retried) { 3349 failed = 0; 3350 retried = true; 3351 goto again; 3352 } else if (WARN_ON(failed && retried)) { 3353 ret = -ENOSPC; 3354 } 3355 error: 3356 btrfs_free_path(path); 3357 return ret; 3358 } 3359 3360 /* 3361 * return 1 : allocate a data chunk successfully, 3362 * return <0: errors during allocating a data chunk, 3363 * return 0 : no need to allocate a data chunk. 3364 */ 3365 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3366 u64 chunk_offset) 3367 { 3368 struct btrfs_block_group *cache; 3369 u64 bytes_used; 3370 u64 chunk_type; 3371 3372 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3373 ASSERT(cache); 3374 chunk_type = cache->flags; 3375 btrfs_put_block_group(cache); 3376 3377 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3378 return 0; 3379 3380 spin_lock(&fs_info->data_sinfo->lock); 3381 bytes_used = fs_info->data_sinfo->bytes_used; 3382 spin_unlock(&fs_info->data_sinfo->lock); 3383 3384 if (!bytes_used) { 3385 struct btrfs_trans_handle *trans; 3386 int ret; 3387 3388 trans = btrfs_join_transaction(fs_info->tree_root); 3389 if (IS_ERR(trans)) 3390 return PTR_ERR(trans); 3391 3392 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3393 btrfs_end_transaction(trans); 3394 if (ret < 0) 3395 return ret; 3396 return 1; 3397 } 3398 3399 return 0; 3400 } 3401 3402 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3403 struct btrfs_balance_control *bctl) 3404 { 3405 struct btrfs_root *root = fs_info->tree_root; 3406 struct btrfs_trans_handle *trans; 3407 struct btrfs_balance_item *item; 3408 struct btrfs_disk_balance_args disk_bargs; 3409 struct btrfs_path *path; 3410 struct extent_buffer *leaf; 3411 struct btrfs_key key; 3412 int ret, err; 3413 3414 path = btrfs_alloc_path(); 3415 if (!path) 3416 return -ENOMEM; 3417 3418 trans = btrfs_start_transaction(root, 0); 3419 if (IS_ERR(trans)) { 3420 btrfs_free_path(path); 3421 return PTR_ERR(trans); 3422 } 3423 3424 key.objectid = BTRFS_BALANCE_OBJECTID; 3425 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3426 key.offset = 0; 3427 3428 ret = btrfs_insert_empty_item(trans, root, path, &key, 3429 sizeof(*item)); 3430 if (ret) 3431 goto out; 3432 3433 leaf = path->nodes[0]; 3434 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3435 3436 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3437 3438 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3439 btrfs_set_balance_data(leaf, item, &disk_bargs); 3440 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3441 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3442 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3443 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3444 3445 btrfs_set_balance_flags(leaf, item, bctl->flags); 3446 3447 btrfs_mark_buffer_dirty(leaf); 3448 out: 3449 btrfs_free_path(path); 3450 err = btrfs_commit_transaction(trans); 3451 if (err && !ret) 3452 ret = err; 3453 return ret; 3454 } 3455 3456 static int del_balance_item(struct btrfs_fs_info *fs_info) 3457 { 3458 struct btrfs_root *root = fs_info->tree_root; 3459 struct btrfs_trans_handle *trans; 3460 struct btrfs_path *path; 3461 struct btrfs_key key; 3462 int ret, err; 3463 3464 path = btrfs_alloc_path(); 3465 if (!path) 3466 return -ENOMEM; 3467 3468 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3469 if (IS_ERR(trans)) { 3470 btrfs_free_path(path); 3471 return PTR_ERR(trans); 3472 } 3473 3474 key.objectid = BTRFS_BALANCE_OBJECTID; 3475 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3476 key.offset = 0; 3477 3478 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3479 if (ret < 0) 3480 goto out; 3481 if (ret > 0) { 3482 ret = -ENOENT; 3483 goto out; 3484 } 3485 3486 ret = btrfs_del_item(trans, root, path); 3487 out: 3488 btrfs_free_path(path); 3489 err = btrfs_commit_transaction(trans); 3490 if (err && !ret) 3491 ret = err; 3492 return ret; 3493 } 3494 3495 /* 3496 * This is a heuristic used to reduce the number of chunks balanced on 3497 * resume after balance was interrupted. 3498 */ 3499 static void update_balance_args(struct btrfs_balance_control *bctl) 3500 { 3501 /* 3502 * Turn on soft mode for chunk types that were being converted. 3503 */ 3504 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3505 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3506 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3507 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3508 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3509 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3510 3511 /* 3512 * Turn on usage filter if is not already used. The idea is 3513 * that chunks that we have already balanced should be 3514 * reasonably full. Don't do it for chunks that are being 3515 * converted - that will keep us from relocating unconverted 3516 * (albeit full) chunks. 3517 */ 3518 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3519 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3520 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3521 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3522 bctl->data.usage = 90; 3523 } 3524 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3525 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3526 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3527 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3528 bctl->sys.usage = 90; 3529 } 3530 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3531 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3532 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3533 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3534 bctl->meta.usage = 90; 3535 } 3536 } 3537 3538 /* 3539 * Clear the balance status in fs_info and delete the balance item from disk. 3540 */ 3541 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3542 { 3543 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3544 int ret; 3545 3546 BUG_ON(!fs_info->balance_ctl); 3547 3548 spin_lock(&fs_info->balance_lock); 3549 fs_info->balance_ctl = NULL; 3550 spin_unlock(&fs_info->balance_lock); 3551 3552 kfree(bctl); 3553 ret = del_balance_item(fs_info); 3554 if (ret) 3555 btrfs_handle_fs_error(fs_info, ret, NULL); 3556 } 3557 3558 /* 3559 * Balance filters. Return 1 if chunk should be filtered out 3560 * (should not be balanced). 3561 */ 3562 static int chunk_profiles_filter(u64 chunk_type, 3563 struct btrfs_balance_args *bargs) 3564 { 3565 chunk_type = chunk_to_extended(chunk_type) & 3566 BTRFS_EXTENDED_PROFILE_MASK; 3567 3568 if (bargs->profiles & chunk_type) 3569 return 0; 3570 3571 return 1; 3572 } 3573 3574 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3575 struct btrfs_balance_args *bargs) 3576 { 3577 struct btrfs_block_group *cache; 3578 u64 chunk_used; 3579 u64 user_thresh_min; 3580 u64 user_thresh_max; 3581 int ret = 1; 3582 3583 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3584 chunk_used = cache->used; 3585 3586 if (bargs->usage_min == 0) 3587 user_thresh_min = 0; 3588 else 3589 user_thresh_min = div_factor_fine(cache->length, 3590 bargs->usage_min); 3591 3592 if (bargs->usage_max == 0) 3593 user_thresh_max = 1; 3594 else if (bargs->usage_max > 100) 3595 user_thresh_max = cache->length; 3596 else 3597 user_thresh_max = div_factor_fine(cache->length, 3598 bargs->usage_max); 3599 3600 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3601 ret = 0; 3602 3603 btrfs_put_block_group(cache); 3604 return ret; 3605 } 3606 3607 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3608 u64 chunk_offset, struct btrfs_balance_args *bargs) 3609 { 3610 struct btrfs_block_group *cache; 3611 u64 chunk_used, user_thresh; 3612 int ret = 1; 3613 3614 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3615 chunk_used = cache->used; 3616 3617 if (bargs->usage_min == 0) 3618 user_thresh = 1; 3619 else if (bargs->usage > 100) 3620 user_thresh = cache->length; 3621 else 3622 user_thresh = div_factor_fine(cache->length, bargs->usage); 3623 3624 if (chunk_used < user_thresh) 3625 ret = 0; 3626 3627 btrfs_put_block_group(cache); 3628 return ret; 3629 } 3630 3631 static int chunk_devid_filter(struct extent_buffer *leaf, 3632 struct btrfs_chunk *chunk, 3633 struct btrfs_balance_args *bargs) 3634 { 3635 struct btrfs_stripe *stripe; 3636 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3637 int i; 3638 3639 for (i = 0; i < num_stripes; i++) { 3640 stripe = btrfs_stripe_nr(chunk, i); 3641 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3642 return 0; 3643 } 3644 3645 return 1; 3646 } 3647 3648 static u64 calc_data_stripes(u64 type, int num_stripes) 3649 { 3650 const int index = btrfs_bg_flags_to_raid_index(type); 3651 const int ncopies = btrfs_raid_array[index].ncopies; 3652 const int nparity = btrfs_raid_array[index].nparity; 3653 3654 return (num_stripes - nparity) / ncopies; 3655 } 3656 3657 /* [pstart, pend) */ 3658 static int chunk_drange_filter(struct extent_buffer *leaf, 3659 struct btrfs_chunk *chunk, 3660 struct btrfs_balance_args *bargs) 3661 { 3662 struct btrfs_stripe *stripe; 3663 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3664 u64 stripe_offset; 3665 u64 stripe_length; 3666 u64 type; 3667 int factor; 3668 int i; 3669 3670 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3671 return 0; 3672 3673 type = btrfs_chunk_type(leaf, chunk); 3674 factor = calc_data_stripes(type, num_stripes); 3675 3676 for (i = 0; i < num_stripes; i++) { 3677 stripe = btrfs_stripe_nr(chunk, i); 3678 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3679 continue; 3680 3681 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3682 stripe_length = btrfs_chunk_length(leaf, chunk); 3683 stripe_length = div_u64(stripe_length, factor); 3684 3685 if (stripe_offset < bargs->pend && 3686 stripe_offset + stripe_length > bargs->pstart) 3687 return 0; 3688 } 3689 3690 return 1; 3691 } 3692 3693 /* [vstart, vend) */ 3694 static int chunk_vrange_filter(struct extent_buffer *leaf, 3695 struct btrfs_chunk *chunk, 3696 u64 chunk_offset, 3697 struct btrfs_balance_args *bargs) 3698 { 3699 if (chunk_offset < bargs->vend && 3700 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3701 /* at least part of the chunk is inside this vrange */ 3702 return 0; 3703 3704 return 1; 3705 } 3706 3707 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3708 struct btrfs_chunk *chunk, 3709 struct btrfs_balance_args *bargs) 3710 { 3711 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3712 3713 if (bargs->stripes_min <= num_stripes 3714 && num_stripes <= bargs->stripes_max) 3715 return 0; 3716 3717 return 1; 3718 } 3719 3720 static int chunk_soft_convert_filter(u64 chunk_type, 3721 struct btrfs_balance_args *bargs) 3722 { 3723 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3724 return 0; 3725 3726 chunk_type = chunk_to_extended(chunk_type) & 3727 BTRFS_EXTENDED_PROFILE_MASK; 3728 3729 if (bargs->target == chunk_type) 3730 return 1; 3731 3732 return 0; 3733 } 3734 3735 static int should_balance_chunk(struct extent_buffer *leaf, 3736 struct btrfs_chunk *chunk, u64 chunk_offset) 3737 { 3738 struct btrfs_fs_info *fs_info = leaf->fs_info; 3739 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3740 struct btrfs_balance_args *bargs = NULL; 3741 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3742 3743 /* type filter */ 3744 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3745 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3746 return 0; 3747 } 3748 3749 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3750 bargs = &bctl->data; 3751 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3752 bargs = &bctl->sys; 3753 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3754 bargs = &bctl->meta; 3755 3756 /* profiles filter */ 3757 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3758 chunk_profiles_filter(chunk_type, bargs)) { 3759 return 0; 3760 } 3761 3762 /* usage filter */ 3763 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3764 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3765 return 0; 3766 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3767 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3768 return 0; 3769 } 3770 3771 /* devid filter */ 3772 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3773 chunk_devid_filter(leaf, chunk, bargs)) { 3774 return 0; 3775 } 3776 3777 /* drange filter, makes sense only with devid filter */ 3778 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3779 chunk_drange_filter(leaf, chunk, bargs)) { 3780 return 0; 3781 } 3782 3783 /* vrange filter */ 3784 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3785 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3786 return 0; 3787 } 3788 3789 /* stripes filter */ 3790 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3791 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3792 return 0; 3793 } 3794 3795 /* soft profile changing mode */ 3796 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3797 chunk_soft_convert_filter(chunk_type, bargs)) { 3798 return 0; 3799 } 3800 3801 /* 3802 * limited by count, must be the last filter 3803 */ 3804 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3805 if (bargs->limit == 0) 3806 return 0; 3807 else 3808 bargs->limit--; 3809 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3810 /* 3811 * Same logic as the 'limit' filter; the minimum cannot be 3812 * determined here because we do not have the global information 3813 * about the count of all chunks that satisfy the filters. 3814 */ 3815 if (bargs->limit_max == 0) 3816 return 0; 3817 else 3818 bargs->limit_max--; 3819 } 3820 3821 return 1; 3822 } 3823 3824 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3825 { 3826 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3827 struct btrfs_root *chunk_root = fs_info->chunk_root; 3828 u64 chunk_type; 3829 struct btrfs_chunk *chunk; 3830 struct btrfs_path *path = NULL; 3831 struct btrfs_key key; 3832 struct btrfs_key found_key; 3833 struct extent_buffer *leaf; 3834 int slot; 3835 int ret; 3836 int enospc_errors = 0; 3837 bool counting = true; 3838 /* The single value limit and min/max limits use the same bytes in the */ 3839 u64 limit_data = bctl->data.limit; 3840 u64 limit_meta = bctl->meta.limit; 3841 u64 limit_sys = bctl->sys.limit; 3842 u32 count_data = 0; 3843 u32 count_meta = 0; 3844 u32 count_sys = 0; 3845 int chunk_reserved = 0; 3846 3847 path = btrfs_alloc_path(); 3848 if (!path) { 3849 ret = -ENOMEM; 3850 goto error; 3851 } 3852 3853 /* zero out stat counters */ 3854 spin_lock(&fs_info->balance_lock); 3855 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3856 spin_unlock(&fs_info->balance_lock); 3857 again: 3858 if (!counting) { 3859 /* 3860 * The single value limit and min/max limits use the same bytes 3861 * in the 3862 */ 3863 bctl->data.limit = limit_data; 3864 bctl->meta.limit = limit_meta; 3865 bctl->sys.limit = limit_sys; 3866 } 3867 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3868 key.offset = (u64)-1; 3869 key.type = BTRFS_CHUNK_ITEM_KEY; 3870 3871 while (1) { 3872 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3873 atomic_read(&fs_info->balance_cancel_req)) { 3874 ret = -ECANCELED; 3875 goto error; 3876 } 3877 3878 mutex_lock(&fs_info->reclaim_bgs_lock); 3879 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3880 if (ret < 0) { 3881 mutex_unlock(&fs_info->reclaim_bgs_lock); 3882 goto error; 3883 } 3884 3885 /* 3886 * this shouldn't happen, it means the last relocate 3887 * failed 3888 */ 3889 if (ret == 0) 3890 BUG(); /* FIXME break ? */ 3891 3892 ret = btrfs_previous_item(chunk_root, path, 0, 3893 BTRFS_CHUNK_ITEM_KEY); 3894 if (ret) { 3895 mutex_unlock(&fs_info->reclaim_bgs_lock); 3896 ret = 0; 3897 break; 3898 } 3899 3900 leaf = path->nodes[0]; 3901 slot = path->slots[0]; 3902 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3903 3904 if (found_key.objectid != key.objectid) { 3905 mutex_unlock(&fs_info->reclaim_bgs_lock); 3906 break; 3907 } 3908 3909 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3910 chunk_type = btrfs_chunk_type(leaf, chunk); 3911 3912 if (!counting) { 3913 spin_lock(&fs_info->balance_lock); 3914 bctl->stat.considered++; 3915 spin_unlock(&fs_info->balance_lock); 3916 } 3917 3918 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3919 3920 btrfs_release_path(path); 3921 if (!ret) { 3922 mutex_unlock(&fs_info->reclaim_bgs_lock); 3923 goto loop; 3924 } 3925 3926 if (counting) { 3927 mutex_unlock(&fs_info->reclaim_bgs_lock); 3928 spin_lock(&fs_info->balance_lock); 3929 bctl->stat.expected++; 3930 spin_unlock(&fs_info->balance_lock); 3931 3932 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3933 count_data++; 3934 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3935 count_sys++; 3936 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3937 count_meta++; 3938 3939 goto loop; 3940 } 3941 3942 /* 3943 * Apply limit_min filter, no need to check if the LIMITS 3944 * filter is used, limit_min is 0 by default 3945 */ 3946 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3947 count_data < bctl->data.limit_min) 3948 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3949 count_meta < bctl->meta.limit_min) 3950 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3951 count_sys < bctl->sys.limit_min)) { 3952 mutex_unlock(&fs_info->reclaim_bgs_lock); 3953 goto loop; 3954 } 3955 3956 if (!chunk_reserved) { 3957 /* 3958 * We may be relocating the only data chunk we have, 3959 * which could potentially end up with losing data's 3960 * raid profile, so lets allocate an empty one in 3961 * advance. 3962 */ 3963 ret = btrfs_may_alloc_data_chunk(fs_info, 3964 found_key.offset); 3965 if (ret < 0) { 3966 mutex_unlock(&fs_info->reclaim_bgs_lock); 3967 goto error; 3968 } else if (ret == 1) { 3969 chunk_reserved = 1; 3970 } 3971 } 3972 3973 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3974 mutex_unlock(&fs_info->reclaim_bgs_lock); 3975 if (ret == -ENOSPC) { 3976 enospc_errors++; 3977 } else if (ret == -ETXTBSY) { 3978 btrfs_info(fs_info, 3979 "skipping relocation of block group %llu due to active swapfile", 3980 found_key.offset); 3981 ret = 0; 3982 } else if (ret) { 3983 goto error; 3984 } else { 3985 spin_lock(&fs_info->balance_lock); 3986 bctl->stat.completed++; 3987 spin_unlock(&fs_info->balance_lock); 3988 } 3989 loop: 3990 if (found_key.offset == 0) 3991 break; 3992 key.offset = found_key.offset - 1; 3993 } 3994 3995 if (counting) { 3996 btrfs_release_path(path); 3997 counting = false; 3998 goto again; 3999 } 4000 error: 4001 btrfs_free_path(path); 4002 if (enospc_errors) { 4003 btrfs_info(fs_info, "%d enospc errors during balance", 4004 enospc_errors); 4005 if (!ret) 4006 ret = -ENOSPC; 4007 } 4008 4009 return ret; 4010 } 4011 4012 /** 4013 * alloc_profile_is_valid - see if a given profile is valid and reduced 4014 * @flags: profile to validate 4015 * @extended: if true @flags is treated as an extended profile 4016 */ 4017 static int alloc_profile_is_valid(u64 flags, int extended) 4018 { 4019 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4020 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4021 4022 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4023 4024 /* 1) check that all other bits are zeroed */ 4025 if (flags & ~mask) 4026 return 0; 4027 4028 /* 2) see if profile is reduced */ 4029 if (flags == 0) 4030 return !extended; /* "0" is valid for usual profiles */ 4031 4032 return has_single_bit_set(flags); 4033 } 4034 4035 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4036 { 4037 /* cancel requested || normal exit path */ 4038 return atomic_read(&fs_info->balance_cancel_req) || 4039 (atomic_read(&fs_info->balance_pause_req) == 0 && 4040 atomic_read(&fs_info->balance_cancel_req) == 0); 4041 } 4042 4043 /* 4044 * Validate target profile against allowed profiles and return true if it's OK. 4045 * Otherwise print the error message and return false. 4046 */ 4047 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4048 const struct btrfs_balance_args *bargs, 4049 u64 allowed, const char *type) 4050 { 4051 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4052 return true; 4053 4054 /* Profile is valid and does not have bits outside of the allowed set */ 4055 if (alloc_profile_is_valid(bargs->target, 1) && 4056 (bargs->target & ~allowed) == 0) 4057 return true; 4058 4059 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4060 type, btrfs_bg_type_to_raid_name(bargs->target)); 4061 return false; 4062 } 4063 4064 /* 4065 * Fill @buf with textual description of balance filter flags @bargs, up to 4066 * @size_buf including the terminating null. The output may be trimmed if it 4067 * does not fit into the provided buffer. 4068 */ 4069 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4070 u32 size_buf) 4071 { 4072 int ret; 4073 u32 size_bp = size_buf; 4074 char *bp = buf; 4075 u64 flags = bargs->flags; 4076 char tmp_buf[128] = {'\0'}; 4077 4078 if (!flags) 4079 return; 4080 4081 #define CHECK_APPEND_NOARG(a) \ 4082 do { \ 4083 ret = snprintf(bp, size_bp, (a)); \ 4084 if (ret < 0 || ret >= size_bp) \ 4085 goto out_overflow; \ 4086 size_bp -= ret; \ 4087 bp += ret; \ 4088 } while (0) 4089 4090 #define CHECK_APPEND_1ARG(a, v1) \ 4091 do { \ 4092 ret = snprintf(bp, size_bp, (a), (v1)); \ 4093 if (ret < 0 || ret >= size_bp) \ 4094 goto out_overflow; \ 4095 size_bp -= ret; \ 4096 bp += ret; \ 4097 } while (0) 4098 4099 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4100 do { \ 4101 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4102 if (ret < 0 || ret >= size_bp) \ 4103 goto out_overflow; \ 4104 size_bp -= ret; \ 4105 bp += ret; \ 4106 } while (0) 4107 4108 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4109 CHECK_APPEND_1ARG("convert=%s,", 4110 btrfs_bg_type_to_raid_name(bargs->target)); 4111 4112 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4113 CHECK_APPEND_NOARG("soft,"); 4114 4115 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4116 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4117 sizeof(tmp_buf)); 4118 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4119 } 4120 4121 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4122 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4123 4124 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4125 CHECK_APPEND_2ARG("usage=%u..%u,", 4126 bargs->usage_min, bargs->usage_max); 4127 4128 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4129 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4130 4131 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4132 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4133 bargs->pstart, bargs->pend); 4134 4135 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4136 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4137 bargs->vstart, bargs->vend); 4138 4139 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4140 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4141 4142 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4143 CHECK_APPEND_2ARG("limit=%u..%u,", 4144 bargs->limit_min, bargs->limit_max); 4145 4146 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4147 CHECK_APPEND_2ARG("stripes=%u..%u,", 4148 bargs->stripes_min, bargs->stripes_max); 4149 4150 #undef CHECK_APPEND_2ARG 4151 #undef CHECK_APPEND_1ARG 4152 #undef CHECK_APPEND_NOARG 4153 4154 out_overflow: 4155 4156 if (size_bp < size_buf) 4157 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4158 else 4159 buf[0] = '\0'; 4160 } 4161 4162 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4163 { 4164 u32 size_buf = 1024; 4165 char tmp_buf[192] = {'\0'}; 4166 char *buf; 4167 char *bp; 4168 u32 size_bp = size_buf; 4169 int ret; 4170 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4171 4172 buf = kzalloc(size_buf, GFP_KERNEL); 4173 if (!buf) 4174 return; 4175 4176 bp = buf; 4177 4178 #define CHECK_APPEND_1ARG(a, v1) \ 4179 do { \ 4180 ret = snprintf(bp, size_bp, (a), (v1)); \ 4181 if (ret < 0 || ret >= size_bp) \ 4182 goto out_overflow; \ 4183 size_bp -= ret; \ 4184 bp += ret; \ 4185 } while (0) 4186 4187 if (bctl->flags & BTRFS_BALANCE_FORCE) 4188 CHECK_APPEND_1ARG("%s", "-f "); 4189 4190 if (bctl->flags & BTRFS_BALANCE_DATA) { 4191 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4192 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4193 } 4194 4195 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4196 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4197 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4198 } 4199 4200 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4201 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4202 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4203 } 4204 4205 #undef CHECK_APPEND_1ARG 4206 4207 out_overflow: 4208 4209 if (size_bp < size_buf) 4210 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4211 btrfs_info(fs_info, "balance: %s %s", 4212 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4213 "resume" : "start", buf); 4214 4215 kfree(buf); 4216 } 4217 4218 /* 4219 * Should be called with balance mutexe held 4220 */ 4221 int btrfs_balance(struct btrfs_fs_info *fs_info, 4222 struct btrfs_balance_control *bctl, 4223 struct btrfs_ioctl_balance_args *bargs) 4224 { 4225 u64 meta_target, data_target; 4226 u64 allowed; 4227 int mixed = 0; 4228 int ret; 4229 u64 num_devices; 4230 unsigned seq; 4231 bool reducing_redundancy; 4232 int i; 4233 4234 if (btrfs_fs_closing(fs_info) || 4235 atomic_read(&fs_info->balance_pause_req) || 4236 btrfs_should_cancel_balance(fs_info)) { 4237 ret = -EINVAL; 4238 goto out; 4239 } 4240 4241 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4242 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4243 mixed = 1; 4244 4245 /* 4246 * In case of mixed groups both data and meta should be picked, 4247 * and identical options should be given for both of them. 4248 */ 4249 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4250 if (mixed && (bctl->flags & allowed)) { 4251 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4252 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4253 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4254 btrfs_err(fs_info, 4255 "balance: mixed groups data and metadata options must be the same"); 4256 ret = -EINVAL; 4257 goto out; 4258 } 4259 } 4260 4261 /* 4262 * rw_devices will not change at the moment, device add/delete/replace 4263 * are exclusive 4264 */ 4265 num_devices = fs_info->fs_devices->rw_devices; 4266 4267 /* 4268 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4269 * special bit for it, to make it easier to distinguish. Thus we need 4270 * to set it manually, or balance would refuse the profile. 4271 */ 4272 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4273 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4274 if (num_devices >= btrfs_raid_array[i].devs_min) 4275 allowed |= btrfs_raid_array[i].bg_flag; 4276 4277 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4278 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4279 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4280 ret = -EINVAL; 4281 goto out; 4282 } 4283 4284 /* 4285 * Allow to reduce metadata or system integrity only if force set for 4286 * profiles with redundancy (copies, parity) 4287 */ 4288 allowed = 0; 4289 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4290 if (btrfs_raid_array[i].ncopies >= 2 || 4291 btrfs_raid_array[i].tolerated_failures >= 1) 4292 allowed |= btrfs_raid_array[i].bg_flag; 4293 } 4294 do { 4295 seq = read_seqbegin(&fs_info->profiles_lock); 4296 4297 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4298 (fs_info->avail_system_alloc_bits & allowed) && 4299 !(bctl->sys.target & allowed)) || 4300 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4301 (fs_info->avail_metadata_alloc_bits & allowed) && 4302 !(bctl->meta.target & allowed))) 4303 reducing_redundancy = true; 4304 else 4305 reducing_redundancy = false; 4306 4307 /* if we're not converting, the target field is uninitialized */ 4308 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4309 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4310 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4311 bctl->data.target : fs_info->avail_data_alloc_bits; 4312 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4313 4314 if (reducing_redundancy) { 4315 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4316 btrfs_info(fs_info, 4317 "balance: force reducing metadata redundancy"); 4318 } else { 4319 btrfs_err(fs_info, 4320 "balance: reduces metadata redundancy, use --force if you want this"); 4321 ret = -EINVAL; 4322 goto out; 4323 } 4324 } 4325 4326 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4327 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4328 btrfs_warn(fs_info, 4329 "balance: metadata profile %s has lower redundancy than data profile %s", 4330 btrfs_bg_type_to_raid_name(meta_target), 4331 btrfs_bg_type_to_raid_name(data_target)); 4332 } 4333 4334 ret = insert_balance_item(fs_info, bctl); 4335 if (ret && ret != -EEXIST) 4336 goto out; 4337 4338 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4339 BUG_ON(ret == -EEXIST); 4340 BUG_ON(fs_info->balance_ctl); 4341 spin_lock(&fs_info->balance_lock); 4342 fs_info->balance_ctl = bctl; 4343 spin_unlock(&fs_info->balance_lock); 4344 } else { 4345 BUG_ON(ret != -EEXIST); 4346 spin_lock(&fs_info->balance_lock); 4347 update_balance_args(bctl); 4348 spin_unlock(&fs_info->balance_lock); 4349 } 4350 4351 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4352 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4353 describe_balance_start_or_resume(fs_info); 4354 mutex_unlock(&fs_info->balance_mutex); 4355 4356 ret = __btrfs_balance(fs_info); 4357 4358 mutex_lock(&fs_info->balance_mutex); 4359 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4360 btrfs_info(fs_info, "balance: paused"); 4361 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4362 } 4363 /* 4364 * Balance can be canceled by: 4365 * 4366 * - Regular cancel request 4367 * Then ret == -ECANCELED and balance_cancel_req > 0 4368 * 4369 * - Fatal signal to "btrfs" process 4370 * Either the signal caught by wait_reserve_ticket() and callers 4371 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4372 * got -ECANCELED. 4373 * Either way, in this case balance_cancel_req = 0, and 4374 * ret == -EINTR or ret == -ECANCELED. 4375 * 4376 * So here we only check the return value to catch canceled balance. 4377 */ 4378 else if (ret == -ECANCELED || ret == -EINTR) 4379 btrfs_info(fs_info, "balance: canceled"); 4380 else 4381 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4382 4383 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4384 4385 if (bargs) { 4386 memset(bargs, 0, sizeof(*bargs)); 4387 btrfs_update_ioctl_balance_args(fs_info, bargs); 4388 } 4389 4390 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4391 balance_need_close(fs_info)) { 4392 reset_balance_state(fs_info); 4393 btrfs_exclop_finish(fs_info); 4394 } 4395 4396 wake_up(&fs_info->balance_wait_q); 4397 4398 return ret; 4399 out: 4400 if (bctl->flags & BTRFS_BALANCE_RESUME) 4401 reset_balance_state(fs_info); 4402 else 4403 kfree(bctl); 4404 btrfs_exclop_finish(fs_info); 4405 4406 return ret; 4407 } 4408 4409 static int balance_kthread(void *data) 4410 { 4411 struct btrfs_fs_info *fs_info = data; 4412 int ret = 0; 4413 4414 sb_start_write(fs_info->sb); 4415 mutex_lock(&fs_info->balance_mutex); 4416 if (fs_info->balance_ctl) 4417 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4418 mutex_unlock(&fs_info->balance_mutex); 4419 sb_end_write(fs_info->sb); 4420 4421 return ret; 4422 } 4423 4424 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4425 { 4426 struct task_struct *tsk; 4427 4428 mutex_lock(&fs_info->balance_mutex); 4429 if (!fs_info->balance_ctl) { 4430 mutex_unlock(&fs_info->balance_mutex); 4431 return 0; 4432 } 4433 mutex_unlock(&fs_info->balance_mutex); 4434 4435 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4436 btrfs_info(fs_info, "balance: resume skipped"); 4437 return 0; 4438 } 4439 4440 spin_lock(&fs_info->super_lock); 4441 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4442 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4443 spin_unlock(&fs_info->super_lock); 4444 /* 4445 * A ro->rw remount sequence should continue with the paused balance 4446 * regardless of who pauses it, system or the user as of now, so set 4447 * the resume flag. 4448 */ 4449 spin_lock(&fs_info->balance_lock); 4450 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4451 spin_unlock(&fs_info->balance_lock); 4452 4453 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4454 return PTR_ERR_OR_ZERO(tsk); 4455 } 4456 4457 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4458 { 4459 struct btrfs_balance_control *bctl; 4460 struct btrfs_balance_item *item; 4461 struct btrfs_disk_balance_args disk_bargs; 4462 struct btrfs_path *path; 4463 struct extent_buffer *leaf; 4464 struct btrfs_key key; 4465 int ret; 4466 4467 path = btrfs_alloc_path(); 4468 if (!path) 4469 return -ENOMEM; 4470 4471 key.objectid = BTRFS_BALANCE_OBJECTID; 4472 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4473 key.offset = 0; 4474 4475 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4476 if (ret < 0) 4477 goto out; 4478 if (ret > 0) { /* ret = -ENOENT; */ 4479 ret = 0; 4480 goto out; 4481 } 4482 4483 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4484 if (!bctl) { 4485 ret = -ENOMEM; 4486 goto out; 4487 } 4488 4489 leaf = path->nodes[0]; 4490 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4491 4492 bctl->flags = btrfs_balance_flags(leaf, item); 4493 bctl->flags |= BTRFS_BALANCE_RESUME; 4494 4495 btrfs_balance_data(leaf, item, &disk_bargs); 4496 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4497 btrfs_balance_meta(leaf, item, &disk_bargs); 4498 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4499 btrfs_balance_sys(leaf, item, &disk_bargs); 4500 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4501 4502 /* 4503 * This should never happen, as the paused balance state is recovered 4504 * during mount without any chance of other exclusive ops to collide. 4505 * 4506 * This gives the exclusive op status to balance and keeps in paused 4507 * state until user intervention (cancel or umount). If the ownership 4508 * cannot be assigned, show a message but do not fail. The balance 4509 * is in a paused state and must have fs_info::balance_ctl properly 4510 * set up. 4511 */ 4512 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4513 btrfs_warn(fs_info, 4514 "balance: cannot set exclusive op status, resume manually"); 4515 4516 btrfs_release_path(path); 4517 4518 mutex_lock(&fs_info->balance_mutex); 4519 BUG_ON(fs_info->balance_ctl); 4520 spin_lock(&fs_info->balance_lock); 4521 fs_info->balance_ctl = bctl; 4522 spin_unlock(&fs_info->balance_lock); 4523 mutex_unlock(&fs_info->balance_mutex); 4524 out: 4525 btrfs_free_path(path); 4526 return ret; 4527 } 4528 4529 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4530 { 4531 int ret = 0; 4532 4533 mutex_lock(&fs_info->balance_mutex); 4534 if (!fs_info->balance_ctl) { 4535 mutex_unlock(&fs_info->balance_mutex); 4536 return -ENOTCONN; 4537 } 4538 4539 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4540 atomic_inc(&fs_info->balance_pause_req); 4541 mutex_unlock(&fs_info->balance_mutex); 4542 4543 wait_event(fs_info->balance_wait_q, 4544 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4545 4546 mutex_lock(&fs_info->balance_mutex); 4547 /* we are good with balance_ctl ripped off from under us */ 4548 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4549 atomic_dec(&fs_info->balance_pause_req); 4550 } else { 4551 ret = -ENOTCONN; 4552 } 4553 4554 mutex_unlock(&fs_info->balance_mutex); 4555 return ret; 4556 } 4557 4558 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4559 { 4560 mutex_lock(&fs_info->balance_mutex); 4561 if (!fs_info->balance_ctl) { 4562 mutex_unlock(&fs_info->balance_mutex); 4563 return -ENOTCONN; 4564 } 4565 4566 /* 4567 * A paused balance with the item stored on disk can be resumed at 4568 * mount time if the mount is read-write. Otherwise it's still paused 4569 * and we must not allow cancelling as it deletes the item. 4570 */ 4571 if (sb_rdonly(fs_info->sb)) { 4572 mutex_unlock(&fs_info->balance_mutex); 4573 return -EROFS; 4574 } 4575 4576 atomic_inc(&fs_info->balance_cancel_req); 4577 /* 4578 * if we are running just wait and return, balance item is 4579 * deleted in btrfs_balance in this case 4580 */ 4581 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4582 mutex_unlock(&fs_info->balance_mutex); 4583 wait_event(fs_info->balance_wait_q, 4584 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4585 mutex_lock(&fs_info->balance_mutex); 4586 } else { 4587 mutex_unlock(&fs_info->balance_mutex); 4588 /* 4589 * Lock released to allow other waiters to continue, we'll 4590 * reexamine the status again. 4591 */ 4592 mutex_lock(&fs_info->balance_mutex); 4593 4594 if (fs_info->balance_ctl) { 4595 reset_balance_state(fs_info); 4596 btrfs_exclop_finish(fs_info); 4597 btrfs_info(fs_info, "balance: canceled"); 4598 } 4599 } 4600 4601 BUG_ON(fs_info->balance_ctl || 4602 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4603 atomic_dec(&fs_info->balance_cancel_req); 4604 mutex_unlock(&fs_info->balance_mutex); 4605 return 0; 4606 } 4607 4608 int btrfs_uuid_scan_kthread(void *data) 4609 { 4610 struct btrfs_fs_info *fs_info = data; 4611 struct btrfs_root *root = fs_info->tree_root; 4612 struct btrfs_key key; 4613 struct btrfs_path *path = NULL; 4614 int ret = 0; 4615 struct extent_buffer *eb; 4616 int slot; 4617 struct btrfs_root_item root_item; 4618 u32 item_size; 4619 struct btrfs_trans_handle *trans = NULL; 4620 bool closing = false; 4621 4622 path = btrfs_alloc_path(); 4623 if (!path) { 4624 ret = -ENOMEM; 4625 goto out; 4626 } 4627 4628 key.objectid = 0; 4629 key.type = BTRFS_ROOT_ITEM_KEY; 4630 key.offset = 0; 4631 4632 while (1) { 4633 if (btrfs_fs_closing(fs_info)) { 4634 closing = true; 4635 break; 4636 } 4637 ret = btrfs_search_forward(root, &key, path, 4638 BTRFS_OLDEST_GENERATION); 4639 if (ret) { 4640 if (ret > 0) 4641 ret = 0; 4642 break; 4643 } 4644 4645 if (key.type != BTRFS_ROOT_ITEM_KEY || 4646 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4647 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4648 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4649 goto skip; 4650 4651 eb = path->nodes[0]; 4652 slot = path->slots[0]; 4653 item_size = btrfs_item_size(eb, slot); 4654 if (item_size < sizeof(root_item)) 4655 goto skip; 4656 4657 read_extent_buffer(eb, &root_item, 4658 btrfs_item_ptr_offset(eb, slot), 4659 (int)sizeof(root_item)); 4660 if (btrfs_root_refs(&root_item) == 0) 4661 goto skip; 4662 4663 if (!btrfs_is_empty_uuid(root_item.uuid) || 4664 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4665 if (trans) 4666 goto update_tree; 4667 4668 btrfs_release_path(path); 4669 /* 4670 * 1 - subvol uuid item 4671 * 1 - received_subvol uuid item 4672 */ 4673 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4674 if (IS_ERR(trans)) { 4675 ret = PTR_ERR(trans); 4676 break; 4677 } 4678 continue; 4679 } else { 4680 goto skip; 4681 } 4682 update_tree: 4683 btrfs_release_path(path); 4684 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4685 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4686 BTRFS_UUID_KEY_SUBVOL, 4687 key.objectid); 4688 if (ret < 0) { 4689 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4690 ret); 4691 break; 4692 } 4693 } 4694 4695 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4696 ret = btrfs_uuid_tree_add(trans, 4697 root_item.received_uuid, 4698 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4699 key.objectid); 4700 if (ret < 0) { 4701 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4702 ret); 4703 break; 4704 } 4705 } 4706 4707 skip: 4708 btrfs_release_path(path); 4709 if (trans) { 4710 ret = btrfs_end_transaction(trans); 4711 trans = NULL; 4712 if (ret) 4713 break; 4714 } 4715 4716 if (key.offset < (u64)-1) { 4717 key.offset++; 4718 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4719 key.offset = 0; 4720 key.type = BTRFS_ROOT_ITEM_KEY; 4721 } else if (key.objectid < (u64)-1) { 4722 key.offset = 0; 4723 key.type = BTRFS_ROOT_ITEM_KEY; 4724 key.objectid++; 4725 } else { 4726 break; 4727 } 4728 cond_resched(); 4729 } 4730 4731 out: 4732 btrfs_free_path(path); 4733 if (trans && !IS_ERR(trans)) 4734 btrfs_end_transaction(trans); 4735 if (ret) 4736 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4737 else if (!closing) 4738 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4739 up(&fs_info->uuid_tree_rescan_sem); 4740 return 0; 4741 } 4742 4743 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4744 { 4745 struct btrfs_trans_handle *trans; 4746 struct btrfs_root *tree_root = fs_info->tree_root; 4747 struct btrfs_root *uuid_root; 4748 struct task_struct *task; 4749 int ret; 4750 4751 /* 4752 * 1 - root node 4753 * 1 - root item 4754 */ 4755 trans = btrfs_start_transaction(tree_root, 2); 4756 if (IS_ERR(trans)) 4757 return PTR_ERR(trans); 4758 4759 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4760 if (IS_ERR(uuid_root)) { 4761 ret = PTR_ERR(uuid_root); 4762 btrfs_abort_transaction(trans, ret); 4763 btrfs_end_transaction(trans); 4764 return ret; 4765 } 4766 4767 fs_info->uuid_root = uuid_root; 4768 4769 ret = btrfs_commit_transaction(trans); 4770 if (ret) 4771 return ret; 4772 4773 down(&fs_info->uuid_tree_rescan_sem); 4774 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4775 if (IS_ERR(task)) { 4776 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4777 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4778 up(&fs_info->uuid_tree_rescan_sem); 4779 return PTR_ERR(task); 4780 } 4781 4782 return 0; 4783 } 4784 4785 /* 4786 * shrinking a device means finding all of the device extents past 4787 * the new size, and then following the back refs to the chunks. 4788 * The chunk relocation code actually frees the device extent 4789 */ 4790 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4791 { 4792 struct btrfs_fs_info *fs_info = device->fs_info; 4793 struct btrfs_root *root = fs_info->dev_root; 4794 struct btrfs_trans_handle *trans; 4795 struct btrfs_dev_extent *dev_extent = NULL; 4796 struct btrfs_path *path; 4797 u64 length; 4798 u64 chunk_offset; 4799 int ret; 4800 int slot; 4801 int failed = 0; 4802 bool retried = false; 4803 struct extent_buffer *l; 4804 struct btrfs_key key; 4805 struct btrfs_super_block *super_copy = fs_info->super_copy; 4806 u64 old_total = btrfs_super_total_bytes(super_copy); 4807 u64 old_size = btrfs_device_get_total_bytes(device); 4808 u64 diff; 4809 u64 start; 4810 4811 new_size = round_down(new_size, fs_info->sectorsize); 4812 start = new_size; 4813 diff = round_down(old_size - new_size, fs_info->sectorsize); 4814 4815 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4816 return -EINVAL; 4817 4818 path = btrfs_alloc_path(); 4819 if (!path) 4820 return -ENOMEM; 4821 4822 path->reada = READA_BACK; 4823 4824 trans = btrfs_start_transaction(root, 0); 4825 if (IS_ERR(trans)) { 4826 btrfs_free_path(path); 4827 return PTR_ERR(trans); 4828 } 4829 4830 mutex_lock(&fs_info->chunk_mutex); 4831 4832 btrfs_device_set_total_bytes(device, new_size); 4833 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4834 device->fs_devices->total_rw_bytes -= diff; 4835 atomic64_sub(diff, &fs_info->free_chunk_space); 4836 } 4837 4838 /* 4839 * Once the device's size has been set to the new size, ensure all 4840 * in-memory chunks are synced to disk so that the loop below sees them 4841 * and relocates them accordingly. 4842 */ 4843 if (contains_pending_extent(device, &start, diff)) { 4844 mutex_unlock(&fs_info->chunk_mutex); 4845 ret = btrfs_commit_transaction(trans); 4846 if (ret) 4847 goto done; 4848 } else { 4849 mutex_unlock(&fs_info->chunk_mutex); 4850 btrfs_end_transaction(trans); 4851 } 4852 4853 again: 4854 key.objectid = device->devid; 4855 key.offset = (u64)-1; 4856 key.type = BTRFS_DEV_EXTENT_KEY; 4857 4858 do { 4859 mutex_lock(&fs_info->reclaim_bgs_lock); 4860 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4861 if (ret < 0) { 4862 mutex_unlock(&fs_info->reclaim_bgs_lock); 4863 goto done; 4864 } 4865 4866 ret = btrfs_previous_item(root, path, 0, key.type); 4867 if (ret) { 4868 mutex_unlock(&fs_info->reclaim_bgs_lock); 4869 if (ret < 0) 4870 goto done; 4871 ret = 0; 4872 btrfs_release_path(path); 4873 break; 4874 } 4875 4876 l = path->nodes[0]; 4877 slot = path->slots[0]; 4878 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4879 4880 if (key.objectid != device->devid) { 4881 mutex_unlock(&fs_info->reclaim_bgs_lock); 4882 btrfs_release_path(path); 4883 break; 4884 } 4885 4886 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4887 length = btrfs_dev_extent_length(l, dev_extent); 4888 4889 if (key.offset + length <= new_size) { 4890 mutex_unlock(&fs_info->reclaim_bgs_lock); 4891 btrfs_release_path(path); 4892 break; 4893 } 4894 4895 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4896 btrfs_release_path(path); 4897 4898 /* 4899 * We may be relocating the only data chunk we have, 4900 * which could potentially end up with losing data's 4901 * raid profile, so lets allocate an empty one in 4902 * advance. 4903 */ 4904 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4905 if (ret < 0) { 4906 mutex_unlock(&fs_info->reclaim_bgs_lock); 4907 goto done; 4908 } 4909 4910 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4911 mutex_unlock(&fs_info->reclaim_bgs_lock); 4912 if (ret == -ENOSPC) { 4913 failed++; 4914 } else if (ret) { 4915 if (ret == -ETXTBSY) { 4916 btrfs_warn(fs_info, 4917 "could not shrink block group %llu due to active swapfile", 4918 chunk_offset); 4919 } 4920 goto done; 4921 } 4922 } while (key.offset-- > 0); 4923 4924 if (failed && !retried) { 4925 failed = 0; 4926 retried = true; 4927 goto again; 4928 } else if (failed && retried) { 4929 ret = -ENOSPC; 4930 goto done; 4931 } 4932 4933 /* Shrinking succeeded, else we would be at "done". */ 4934 trans = btrfs_start_transaction(root, 0); 4935 if (IS_ERR(trans)) { 4936 ret = PTR_ERR(trans); 4937 goto done; 4938 } 4939 4940 mutex_lock(&fs_info->chunk_mutex); 4941 /* Clear all state bits beyond the shrunk device size */ 4942 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4943 CHUNK_STATE_MASK); 4944 4945 btrfs_device_set_disk_total_bytes(device, new_size); 4946 if (list_empty(&device->post_commit_list)) 4947 list_add_tail(&device->post_commit_list, 4948 &trans->transaction->dev_update_list); 4949 4950 WARN_ON(diff > old_total); 4951 btrfs_set_super_total_bytes(super_copy, 4952 round_down(old_total - diff, fs_info->sectorsize)); 4953 mutex_unlock(&fs_info->chunk_mutex); 4954 4955 btrfs_reserve_chunk_metadata(trans, false); 4956 /* Now btrfs_update_device() will change the on-disk size. */ 4957 ret = btrfs_update_device(trans, device); 4958 btrfs_trans_release_chunk_metadata(trans); 4959 if (ret < 0) { 4960 btrfs_abort_transaction(trans, ret); 4961 btrfs_end_transaction(trans); 4962 } else { 4963 ret = btrfs_commit_transaction(trans); 4964 } 4965 done: 4966 btrfs_free_path(path); 4967 if (ret) { 4968 mutex_lock(&fs_info->chunk_mutex); 4969 btrfs_device_set_total_bytes(device, old_size); 4970 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4971 device->fs_devices->total_rw_bytes += diff; 4972 atomic64_add(diff, &fs_info->free_chunk_space); 4973 mutex_unlock(&fs_info->chunk_mutex); 4974 } 4975 return ret; 4976 } 4977 4978 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4979 struct btrfs_key *key, 4980 struct btrfs_chunk *chunk, int item_size) 4981 { 4982 struct btrfs_super_block *super_copy = fs_info->super_copy; 4983 struct btrfs_disk_key disk_key; 4984 u32 array_size; 4985 u8 *ptr; 4986 4987 lockdep_assert_held(&fs_info->chunk_mutex); 4988 4989 array_size = btrfs_super_sys_array_size(super_copy); 4990 if (array_size + item_size + sizeof(disk_key) 4991 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 4992 return -EFBIG; 4993 4994 ptr = super_copy->sys_chunk_array + array_size; 4995 btrfs_cpu_key_to_disk(&disk_key, key); 4996 memcpy(ptr, &disk_key, sizeof(disk_key)); 4997 ptr += sizeof(disk_key); 4998 memcpy(ptr, chunk, item_size); 4999 item_size += sizeof(disk_key); 5000 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5001 5002 return 0; 5003 } 5004 5005 /* 5006 * sort the devices in descending order by max_avail, total_avail 5007 */ 5008 static int btrfs_cmp_device_info(const void *a, const void *b) 5009 { 5010 const struct btrfs_device_info *di_a = a; 5011 const struct btrfs_device_info *di_b = b; 5012 5013 if (di_a->max_avail > di_b->max_avail) 5014 return -1; 5015 if (di_a->max_avail < di_b->max_avail) 5016 return 1; 5017 if (di_a->total_avail > di_b->total_avail) 5018 return -1; 5019 if (di_a->total_avail < di_b->total_avail) 5020 return 1; 5021 return 0; 5022 } 5023 5024 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5025 { 5026 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5027 return; 5028 5029 btrfs_set_fs_incompat(info, RAID56); 5030 } 5031 5032 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5033 { 5034 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5035 return; 5036 5037 btrfs_set_fs_incompat(info, RAID1C34); 5038 } 5039 5040 /* 5041 * Structure used internally for btrfs_create_chunk() function. 5042 * Wraps needed parameters. 5043 */ 5044 struct alloc_chunk_ctl { 5045 u64 start; 5046 u64 type; 5047 /* Total number of stripes to allocate */ 5048 int num_stripes; 5049 /* sub_stripes info for map */ 5050 int sub_stripes; 5051 /* Stripes per device */ 5052 int dev_stripes; 5053 /* Maximum number of devices to use */ 5054 int devs_max; 5055 /* Minimum number of devices to use */ 5056 int devs_min; 5057 /* ndevs has to be a multiple of this */ 5058 int devs_increment; 5059 /* Number of copies */ 5060 int ncopies; 5061 /* Number of stripes worth of bytes to store parity information */ 5062 int nparity; 5063 u64 max_stripe_size; 5064 u64 max_chunk_size; 5065 u64 dev_extent_min; 5066 u64 stripe_size; 5067 u64 chunk_size; 5068 int ndevs; 5069 }; 5070 5071 static void init_alloc_chunk_ctl_policy_regular( 5072 struct btrfs_fs_devices *fs_devices, 5073 struct alloc_chunk_ctl *ctl) 5074 { 5075 u64 type = ctl->type; 5076 5077 if (type & BTRFS_BLOCK_GROUP_DATA) { 5078 ctl->max_stripe_size = SZ_1G; 5079 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5080 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5081 /* For larger filesystems, use larger metadata chunks */ 5082 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5083 ctl->max_stripe_size = SZ_1G; 5084 else 5085 ctl->max_stripe_size = SZ_256M; 5086 ctl->max_chunk_size = ctl->max_stripe_size; 5087 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5088 ctl->max_stripe_size = SZ_32M; 5089 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5090 ctl->devs_max = min_t(int, ctl->devs_max, 5091 BTRFS_MAX_DEVS_SYS_CHUNK); 5092 } else { 5093 BUG(); 5094 } 5095 5096 /* We don't want a chunk larger than 10% of writable space */ 5097 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5098 ctl->max_chunk_size); 5099 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5100 } 5101 5102 static void init_alloc_chunk_ctl_policy_zoned( 5103 struct btrfs_fs_devices *fs_devices, 5104 struct alloc_chunk_ctl *ctl) 5105 { 5106 u64 zone_size = fs_devices->fs_info->zone_size; 5107 u64 limit; 5108 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5109 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5110 u64 min_chunk_size = min_data_stripes * zone_size; 5111 u64 type = ctl->type; 5112 5113 ctl->max_stripe_size = zone_size; 5114 if (type & BTRFS_BLOCK_GROUP_DATA) { 5115 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5116 zone_size); 5117 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5118 ctl->max_chunk_size = ctl->max_stripe_size; 5119 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5120 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5121 ctl->devs_max = min_t(int, ctl->devs_max, 5122 BTRFS_MAX_DEVS_SYS_CHUNK); 5123 } else { 5124 BUG(); 5125 } 5126 5127 /* We don't want a chunk larger than 10% of writable space */ 5128 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5129 zone_size), 5130 min_chunk_size); 5131 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5132 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5133 } 5134 5135 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5136 struct alloc_chunk_ctl *ctl) 5137 { 5138 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5139 5140 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5141 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5142 ctl->devs_max = btrfs_raid_array[index].devs_max; 5143 if (!ctl->devs_max) 5144 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5145 ctl->devs_min = btrfs_raid_array[index].devs_min; 5146 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5147 ctl->ncopies = btrfs_raid_array[index].ncopies; 5148 ctl->nparity = btrfs_raid_array[index].nparity; 5149 ctl->ndevs = 0; 5150 5151 switch (fs_devices->chunk_alloc_policy) { 5152 case BTRFS_CHUNK_ALLOC_REGULAR: 5153 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5154 break; 5155 case BTRFS_CHUNK_ALLOC_ZONED: 5156 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5157 break; 5158 default: 5159 BUG(); 5160 } 5161 } 5162 5163 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5164 struct alloc_chunk_ctl *ctl, 5165 struct btrfs_device_info *devices_info) 5166 { 5167 struct btrfs_fs_info *info = fs_devices->fs_info; 5168 struct btrfs_device *device; 5169 u64 total_avail; 5170 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5171 int ret; 5172 int ndevs = 0; 5173 u64 max_avail; 5174 u64 dev_offset; 5175 5176 /* 5177 * in the first pass through the devices list, we gather information 5178 * about the available holes on each device. 5179 */ 5180 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5181 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5182 WARN(1, KERN_ERR 5183 "BTRFS: read-only device in alloc_list\n"); 5184 continue; 5185 } 5186 5187 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5188 &device->dev_state) || 5189 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5190 continue; 5191 5192 if (device->total_bytes > device->bytes_used) 5193 total_avail = device->total_bytes - device->bytes_used; 5194 else 5195 total_avail = 0; 5196 5197 /* If there is no space on this device, skip it. */ 5198 if (total_avail < ctl->dev_extent_min) 5199 continue; 5200 5201 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5202 &max_avail); 5203 if (ret && ret != -ENOSPC) 5204 return ret; 5205 5206 if (ret == 0) 5207 max_avail = dev_extent_want; 5208 5209 if (max_avail < ctl->dev_extent_min) { 5210 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5211 btrfs_debug(info, 5212 "%s: devid %llu has no free space, have=%llu want=%llu", 5213 __func__, device->devid, max_avail, 5214 ctl->dev_extent_min); 5215 continue; 5216 } 5217 5218 if (ndevs == fs_devices->rw_devices) { 5219 WARN(1, "%s: found more than %llu devices\n", 5220 __func__, fs_devices->rw_devices); 5221 break; 5222 } 5223 devices_info[ndevs].dev_offset = dev_offset; 5224 devices_info[ndevs].max_avail = max_avail; 5225 devices_info[ndevs].total_avail = total_avail; 5226 devices_info[ndevs].dev = device; 5227 ++ndevs; 5228 } 5229 ctl->ndevs = ndevs; 5230 5231 /* 5232 * now sort the devices by hole size / available space 5233 */ 5234 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5235 btrfs_cmp_device_info, NULL); 5236 5237 return 0; 5238 } 5239 5240 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5241 struct btrfs_device_info *devices_info) 5242 { 5243 /* Number of stripes that count for block group size */ 5244 int data_stripes; 5245 5246 /* 5247 * The primary goal is to maximize the number of stripes, so use as 5248 * many devices as possible, even if the stripes are not maximum sized. 5249 * 5250 * The DUP profile stores more than one stripe per device, the 5251 * max_avail is the total size so we have to adjust. 5252 */ 5253 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5254 ctl->dev_stripes); 5255 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5256 5257 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5258 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5259 5260 /* 5261 * Use the number of data stripes to figure out how big this chunk is 5262 * really going to be in terms of logical address space, and compare 5263 * that answer with the max chunk size. If it's higher, we try to 5264 * reduce stripe_size. 5265 */ 5266 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5267 /* 5268 * Reduce stripe_size, round it up to a 16MB boundary again and 5269 * then use it, unless it ends up being even bigger than the 5270 * previous value we had already. 5271 */ 5272 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5273 data_stripes), SZ_16M), 5274 ctl->stripe_size); 5275 } 5276 5277 /* Align to BTRFS_STRIPE_LEN */ 5278 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5279 ctl->chunk_size = ctl->stripe_size * data_stripes; 5280 5281 return 0; 5282 } 5283 5284 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5285 struct btrfs_device_info *devices_info) 5286 { 5287 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5288 /* Number of stripes that count for block group size */ 5289 int data_stripes; 5290 5291 /* 5292 * It should hold because: 5293 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5294 */ 5295 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5296 5297 ctl->stripe_size = zone_size; 5298 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5299 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5300 5301 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5302 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5303 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5304 ctl->stripe_size) + ctl->nparity, 5305 ctl->dev_stripes); 5306 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5307 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5308 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5309 } 5310 5311 ctl->chunk_size = ctl->stripe_size * data_stripes; 5312 5313 return 0; 5314 } 5315 5316 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5317 struct alloc_chunk_ctl *ctl, 5318 struct btrfs_device_info *devices_info) 5319 { 5320 struct btrfs_fs_info *info = fs_devices->fs_info; 5321 5322 /* 5323 * Round down to number of usable stripes, devs_increment can be any 5324 * number so we can't use round_down() that requires power of 2, while 5325 * rounddown is safe. 5326 */ 5327 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5328 5329 if (ctl->ndevs < ctl->devs_min) { 5330 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5331 btrfs_debug(info, 5332 "%s: not enough devices with free space: have=%d minimum required=%d", 5333 __func__, ctl->ndevs, ctl->devs_min); 5334 } 5335 return -ENOSPC; 5336 } 5337 5338 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5339 5340 switch (fs_devices->chunk_alloc_policy) { 5341 case BTRFS_CHUNK_ALLOC_REGULAR: 5342 return decide_stripe_size_regular(ctl, devices_info); 5343 case BTRFS_CHUNK_ALLOC_ZONED: 5344 return decide_stripe_size_zoned(ctl, devices_info); 5345 default: 5346 BUG(); 5347 } 5348 } 5349 5350 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5351 struct alloc_chunk_ctl *ctl, 5352 struct btrfs_device_info *devices_info) 5353 { 5354 struct btrfs_fs_info *info = trans->fs_info; 5355 struct map_lookup *map = NULL; 5356 struct extent_map_tree *em_tree; 5357 struct btrfs_block_group *block_group; 5358 struct extent_map *em; 5359 u64 start = ctl->start; 5360 u64 type = ctl->type; 5361 int ret; 5362 int i; 5363 int j; 5364 5365 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5366 if (!map) 5367 return ERR_PTR(-ENOMEM); 5368 map->num_stripes = ctl->num_stripes; 5369 5370 for (i = 0; i < ctl->ndevs; ++i) { 5371 for (j = 0; j < ctl->dev_stripes; ++j) { 5372 int s = i * ctl->dev_stripes + j; 5373 map->stripes[s].dev = devices_info[i].dev; 5374 map->stripes[s].physical = devices_info[i].dev_offset + 5375 j * ctl->stripe_size; 5376 } 5377 } 5378 map->stripe_len = BTRFS_STRIPE_LEN; 5379 map->io_align = BTRFS_STRIPE_LEN; 5380 map->io_width = BTRFS_STRIPE_LEN; 5381 map->type = type; 5382 map->sub_stripes = ctl->sub_stripes; 5383 5384 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5385 5386 em = alloc_extent_map(); 5387 if (!em) { 5388 kfree(map); 5389 return ERR_PTR(-ENOMEM); 5390 } 5391 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5392 em->map_lookup = map; 5393 em->start = start; 5394 em->len = ctl->chunk_size; 5395 em->block_start = 0; 5396 em->block_len = em->len; 5397 em->orig_block_len = ctl->stripe_size; 5398 5399 em_tree = &info->mapping_tree; 5400 write_lock(&em_tree->lock); 5401 ret = add_extent_mapping(em_tree, em, 0); 5402 if (ret) { 5403 write_unlock(&em_tree->lock); 5404 free_extent_map(em); 5405 return ERR_PTR(ret); 5406 } 5407 write_unlock(&em_tree->lock); 5408 5409 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5410 if (IS_ERR(block_group)) 5411 goto error_del_extent; 5412 5413 for (i = 0; i < map->num_stripes; i++) { 5414 struct btrfs_device *dev = map->stripes[i].dev; 5415 5416 btrfs_device_set_bytes_used(dev, 5417 dev->bytes_used + ctl->stripe_size); 5418 if (list_empty(&dev->post_commit_list)) 5419 list_add_tail(&dev->post_commit_list, 5420 &trans->transaction->dev_update_list); 5421 } 5422 5423 atomic64_sub(ctl->stripe_size * map->num_stripes, 5424 &info->free_chunk_space); 5425 5426 free_extent_map(em); 5427 check_raid56_incompat_flag(info, type); 5428 check_raid1c34_incompat_flag(info, type); 5429 5430 return block_group; 5431 5432 error_del_extent: 5433 write_lock(&em_tree->lock); 5434 remove_extent_mapping(em_tree, em); 5435 write_unlock(&em_tree->lock); 5436 5437 /* One for our allocation */ 5438 free_extent_map(em); 5439 /* One for the tree reference */ 5440 free_extent_map(em); 5441 5442 return block_group; 5443 } 5444 5445 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5446 u64 type) 5447 { 5448 struct btrfs_fs_info *info = trans->fs_info; 5449 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5450 struct btrfs_device_info *devices_info = NULL; 5451 struct alloc_chunk_ctl ctl; 5452 struct btrfs_block_group *block_group; 5453 int ret; 5454 5455 lockdep_assert_held(&info->chunk_mutex); 5456 5457 if (!alloc_profile_is_valid(type, 0)) { 5458 ASSERT(0); 5459 return ERR_PTR(-EINVAL); 5460 } 5461 5462 if (list_empty(&fs_devices->alloc_list)) { 5463 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5464 btrfs_debug(info, "%s: no writable device", __func__); 5465 return ERR_PTR(-ENOSPC); 5466 } 5467 5468 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5469 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5470 ASSERT(0); 5471 return ERR_PTR(-EINVAL); 5472 } 5473 5474 ctl.start = find_next_chunk(info); 5475 ctl.type = type; 5476 init_alloc_chunk_ctl(fs_devices, &ctl); 5477 5478 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5479 GFP_NOFS); 5480 if (!devices_info) 5481 return ERR_PTR(-ENOMEM); 5482 5483 ret = gather_device_info(fs_devices, &ctl, devices_info); 5484 if (ret < 0) { 5485 block_group = ERR_PTR(ret); 5486 goto out; 5487 } 5488 5489 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5490 if (ret < 0) { 5491 block_group = ERR_PTR(ret); 5492 goto out; 5493 } 5494 5495 block_group = create_chunk(trans, &ctl, devices_info); 5496 5497 out: 5498 kfree(devices_info); 5499 return block_group; 5500 } 5501 5502 /* 5503 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5504 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5505 * chunks. 5506 * 5507 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5508 * phases. 5509 */ 5510 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5511 struct btrfs_block_group *bg) 5512 { 5513 struct btrfs_fs_info *fs_info = trans->fs_info; 5514 struct btrfs_root *chunk_root = fs_info->chunk_root; 5515 struct btrfs_key key; 5516 struct btrfs_chunk *chunk; 5517 struct btrfs_stripe *stripe; 5518 struct extent_map *em; 5519 struct map_lookup *map; 5520 size_t item_size; 5521 int i; 5522 int ret; 5523 5524 /* 5525 * We take the chunk_mutex for 2 reasons: 5526 * 5527 * 1) Updates and insertions in the chunk btree must be done while holding 5528 * the chunk_mutex, as well as updating the system chunk array in the 5529 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5530 * details; 5531 * 5532 * 2) To prevent races with the final phase of a device replace operation 5533 * that replaces the device object associated with the map's stripes, 5534 * because the device object's id can change at any time during that 5535 * final phase of the device replace operation 5536 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5537 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5538 * which would cause a failure when updating the device item, which does 5539 * not exists, or persisting a stripe of the chunk item with such ID. 5540 * Here we can't use the device_list_mutex because our caller already 5541 * has locked the chunk_mutex, and the final phase of device replace 5542 * acquires both mutexes - first the device_list_mutex and then the 5543 * chunk_mutex. Using any of those two mutexes protects us from a 5544 * concurrent device replace. 5545 */ 5546 lockdep_assert_held(&fs_info->chunk_mutex); 5547 5548 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5549 if (IS_ERR(em)) { 5550 ret = PTR_ERR(em); 5551 btrfs_abort_transaction(trans, ret); 5552 return ret; 5553 } 5554 5555 map = em->map_lookup; 5556 item_size = btrfs_chunk_item_size(map->num_stripes); 5557 5558 chunk = kzalloc(item_size, GFP_NOFS); 5559 if (!chunk) { 5560 ret = -ENOMEM; 5561 btrfs_abort_transaction(trans, ret); 5562 goto out; 5563 } 5564 5565 for (i = 0; i < map->num_stripes; i++) { 5566 struct btrfs_device *device = map->stripes[i].dev; 5567 5568 ret = btrfs_update_device(trans, device); 5569 if (ret) 5570 goto out; 5571 } 5572 5573 stripe = &chunk->stripe; 5574 for (i = 0; i < map->num_stripes; i++) { 5575 struct btrfs_device *device = map->stripes[i].dev; 5576 const u64 dev_offset = map->stripes[i].physical; 5577 5578 btrfs_set_stack_stripe_devid(stripe, device->devid); 5579 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5580 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5581 stripe++; 5582 } 5583 5584 btrfs_set_stack_chunk_length(chunk, bg->length); 5585 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5586 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5587 btrfs_set_stack_chunk_type(chunk, map->type); 5588 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5589 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5590 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5591 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5592 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5593 5594 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5595 key.type = BTRFS_CHUNK_ITEM_KEY; 5596 key.offset = bg->start; 5597 5598 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5599 if (ret) 5600 goto out; 5601 5602 bg->chunk_item_inserted = 1; 5603 5604 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5605 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5606 if (ret) 5607 goto out; 5608 } 5609 5610 out: 5611 kfree(chunk); 5612 free_extent_map(em); 5613 return ret; 5614 } 5615 5616 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5617 { 5618 struct btrfs_fs_info *fs_info = trans->fs_info; 5619 u64 alloc_profile; 5620 struct btrfs_block_group *meta_bg; 5621 struct btrfs_block_group *sys_bg; 5622 5623 /* 5624 * When adding a new device for sprouting, the seed device is read-only 5625 * so we must first allocate a metadata and a system chunk. But before 5626 * adding the block group items to the extent, device and chunk btrees, 5627 * we must first: 5628 * 5629 * 1) Create both chunks without doing any changes to the btrees, as 5630 * otherwise we would get -ENOSPC since the block groups from the 5631 * seed device are read-only; 5632 * 5633 * 2) Add the device item for the new sprout device - finishing the setup 5634 * of a new block group requires updating the device item in the chunk 5635 * btree, so it must exist when we attempt to do it. The previous step 5636 * ensures this does not fail with -ENOSPC. 5637 * 5638 * After that we can add the block group items to their btrees: 5639 * update existing device item in the chunk btree, add a new block group 5640 * item to the extent btree, add a new chunk item to the chunk btree and 5641 * finally add the new device extent items to the devices btree. 5642 */ 5643 5644 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5645 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5646 if (IS_ERR(meta_bg)) 5647 return PTR_ERR(meta_bg); 5648 5649 alloc_profile = btrfs_system_alloc_profile(fs_info); 5650 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5651 if (IS_ERR(sys_bg)) 5652 return PTR_ERR(sys_bg); 5653 5654 return 0; 5655 } 5656 5657 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5658 { 5659 const int index = btrfs_bg_flags_to_raid_index(map->type); 5660 5661 return btrfs_raid_array[index].tolerated_failures; 5662 } 5663 5664 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5665 { 5666 struct extent_map *em; 5667 struct map_lookup *map; 5668 int miss_ndevs = 0; 5669 int i; 5670 bool ret = true; 5671 5672 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5673 if (IS_ERR(em)) 5674 return false; 5675 5676 map = em->map_lookup; 5677 for (i = 0; i < map->num_stripes; i++) { 5678 if (test_bit(BTRFS_DEV_STATE_MISSING, 5679 &map->stripes[i].dev->dev_state)) { 5680 miss_ndevs++; 5681 continue; 5682 } 5683 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5684 &map->stripes[i].dev->dev_state)) { 5685 ret = false; 5686 goto end; 5687 } 5688 } 5689 5690 /* 5691 * If the number of missing devices is larger than max errors, we can 5692 * not write the data into that chunk successfully. 5693 */ 5694 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5695 ret = false; 5696 end: 5697 free_extent_map(em); 5698 return ret; 5699 } 5700 5701 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5702 { 5703 struct extent_map *em; 5704 5705 while (1) { 5706 write_lock(&tree->lock); 5707 em = lookup_extent_mapping(tree, 0, (u64)-1); 5708 if (em) 5709 remove_extent_mapping(tree, em); 5710 write_unlock(&tree->lock); 5711 if (!em) 5712 break; 5713 /* once for us */ 5714 free_extent_map(em); 5715 /* once for the tree */ 5716 free_extent_map(em); 5717 } 5718 } 5719 5720 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5721 { 5722 struct extent_map *em; 5723 struct map_lookup *map; 5724 int ret; 5725 5726 em = btrfs_get_chunk_map(fs_info, logical, len); 5727 if (IS_ERR(em)) 5728 /* 5729 * We could return errors for these cases, but that could get 5730 * ugly and we'd probably do the same thing which is just not do 5731 * anything else and exit, so return 1 so the callers don't try 5732 * to use other copies. 5733 */ 5734 return 1; 5735 5736 map = em->map_lookup; 5737 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5738 ret = map->num_stripes; 5739 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5740 ret = map->sub_stripes; 5741 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5742 ret = 2; 5743 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5744 /* 5745 * There could be two corrupted data stripes, we need 5746 * to loop retry in order to rebuild the correct data. 5747 * 5748 * Fail a stripe at a time on every retry except the 5749 * stripe under reconstruction. 5750 */ 5751 ret = map->num_stripes; 5752 else 5753 ret = 1; 5754 free_extent_map(em); 5755 5756 down_read(&fs_info->dev_replace.rwsem); 5757 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5758 fs_info->dev_replace.tgtdev) 5759 ret++; 5760 up_read(&fs_info->dev_replace.rwsem); 5761 5762 return ret; 5763 } 5764 5765 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5766 u64 logical) 5767 { 5768 struct extent_map *em; 5769 struct map_lookup *map; 5770 unsigned long len = fs_info->sectorsize; 5771 5772 em = btrfs_get_chunk_map(fs_info, logical, len); 5773 5774 if (!WARN_ON(IS_ERR(em))) { 5775 map = em->map_lookup; 5776 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5777 len = map->stripe_len * nr_data_stripes(map); 5778 free_extent_map(em); 5779 } 5780 return len; 5781 } 5782 5783 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5784 { 5785 struct extent_map *em; 5786 struct map_lookup *map; 5787 int ret = 0; 5788 5789 em = btrfs_get_chunk_map(fs_info, logical, len); 5790 5791 if(!WARN_ON(IS_ERR(em))) { 5792 map = em->map_lookup; 5793 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5794 ret = 1; 5795 free_extent_map(em); 5796 } 5797 return ret; 5798 } 5799 5800 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5801 struct map_lookup *map, int first, 5802 int dev_replace_is_ongoing) 5803 { 5804 int i; 5805 int num_stripes; 5806 int preferred_mirror; 5807 int tolerance; 5808 struct btrfs_device *srcdev; 5809 5810 ASSERT((map->type & 5811 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5812 5813 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5814 num_stripes = map->sub_stripes; 5815 else 5816 num_stripes = map->num_stripes; 5817 5818 switch (fs_info->fs_devices->read_policy) { 5819 default: 5820 /* Shouldn't happen, just warn and use pid instead of failing */ 5821 btrfs_warn_rl(fs_info, 5822 "unknown read_policy type %u, reset to pid", 5823 fs_info->fs_devices->read_policy); 5824 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5825 fallthrough; 5826 case BTRFS_READ_POLICY_PID: 5827 preferred_mirror = first + (current->pid % num_stripes); 5828 break; 5829 } 5830 5831 if (dev_replace_is_ongoing && 5832 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5833 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5834 srcdev = fs_info->dev_replace.srcdev; 5835 else 5836 srcdev = NULL; 5837 5838 /* 5839 * try to avoid the drive that is the source drive for a 5840 * dev-replace procedure, only choose it if no other non-missing 5841 * mirror is available 5842 */ 5843 for (tolerance = 0; tolerance < 2; tolerance++) { 5844 if (map->stripes[preferred_mirror].dev->bdev && 5845 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5846 return preferred_mirror; 5847 for (i = first; i < first + num_stripes; i++) { 5848 if (map->stripes[i].dev->bdev && 5849 (tolerance || map->stripes[i].dev != srcdev)) 5850 return i; 5851 } 5852 } 5853 5854 /* we couldn't find one that doesn't fail. Just return something 5855 * and the io error handling code will clean up eventually 5856 */ 5857 return preferred_mirror; 5858 } 5859 5860 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5861 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5862 { 5863 int i; 5864 int again = 1; 5865 5866 while (again) { 5867 again = 0; 5868 for (i = 0; i < num_stripes - 1; i++) { 5869 /* Swap if parity is on a smaller index */ 5870 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5871 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5872 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5873 again = 1; 5874 } 5875 } 5876 } 5877 } 5878 5879 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5880 int total_stripes, 5881 int real_stripes) 5882 { 5883 struct btrfs_io_context *bioc = kzalloc( 5884 /* The size of btrfs_io_context */ 5885 sizeof(struct btrfs_io_context) + 5886 /* Plus the variable array for the stripes */ 5887 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5888 /* Plus the variable array for the tgt dev */ 5889 sizeof(int) * (real_stripes) + 5890 /* 5891 * Plus the raid_map, which includes both the tgt dev 5892 * and the stripes. 5893 */ 5894 sizeof(u64) * (total_stripes), 5895 GFP_NOFS|__GFP_NOFAIL); 5896 5897 atomic_set(&bioc->error, 0); 5898 refcount_set(&bioc->refs, 1); 5899 5900 bioc->fs_info = fs_info; 5901 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5902 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5903 5904 return bioc; 5905 } 5906 5907 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5908 { 5909 WARN_ON(!refcount_read(&bioc->refs)); 5910 refcount_inc(&bioc->refs); 5911 } 5912 5913 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5914 { 5915 if (!bioc) 5916 return; 5917 if (refcount_dec_and_test(&bioc->refs)) 5918 kfree(bioc); 5919 } 5920 5921 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5922 /* 5923 * Please note that, discard won't be sent to target device of device 5924 * replace. 5925 */ 5926 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5927 u64 logical, u64 *length_ret, 5928 struct btrfs_io_context **bioc_ret) 5929 { 5930 struct extent_map *em; 5931 struct map_lookup *map; 5932 struct btrfs_io_context *bioc; 5933 u64 length = *length_ret; 5934 u64 offset; 5935 u64 stripe_nr; 5936 u64 stripe_nr_end; 5937 u64 stripe_end_offset; 5938 u64 stripe_cnt; 5939 u64 stripe_len; 5940 u64 stripe_offset; 5941 u64 num_stripes; 5942 u32 stripe_index; 5943 u32 factor = 0; 5944 u32 sub_stripes = 0; 5945 u64 stripes_per_dev = 0; 5946 u32 remaining_stripes = 0; 5947 u32 last_stripe = 0; 5948 int ret = 0; 5949 int i; 5950 5951 /* Discard always returns a bioc. */ 5952 ASSERT(bioc_ret); 5953 5954 em = btrfs_get_chunk_map(fs_info, logical, length); 5955 if (IS_ERR(em)) 5956 return PTR_ERR(em); 5957 5958 map = em->map_lookup; 5959 /* we don't discard raid56 yet */ 5960 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5961 ret = -EOPNOTSUPP; 5962 goto out; 5963 } 5964 5965 offset = logical - em->start; 5966 length = min_t(u64, em->start + em->len - logical, length); 5967 *length_ret = length; 5968 5969 stripe_len = map->stripe_len; 5970 /* 5971 * stripe_nr counts the total number of stripes we have to stride 5972 * to get to this block 5973 */ 5974 stripe_nr = div64_u64(offset, stripe_len); 5975 5976 /* stripe_offset is the offset of this block in its stripe */ 5977 stripe_offset = offset - stripe_nr * stripe_len; 5978 5979 stripe_nr_end = round_up(offset + length, map->stripe_len); 5980 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5981 stripe_cnt = stripe_nr_end - stripe_nr; 5982 stripe_end_offset = stripe_nr_end * map->stripe_len - 5983 (offset + length); 5984 /* 5985 * after this, stripe_nr is the number of stripes on this 5986 * device we have to walk to find the data, and stripe_index is 5987 * the number of our device in the stripe array 5988 */ 5989 num_stripes = 1; 5990 stripe_index = 0; 5991 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5992 BTRFS_BLOCK_GROUP_RAID10)) { 5993 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5994 sub_stripes = 1; 5995 else 5996 sub_stripes = map->sub_stripes; 5997 5998 factor = map->num_stripes / sub_stripes; 5999 num_stripes = min_t(u64, map->num_stripes, 6000 sub_stripes * stripe_cnt); 6001 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6002 stripe_index *= sub_stripes; 6003 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 6004 &remaining_stripes); 6005 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6006 last_stripe *= sub_stripes; 6007 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6008 BTRFS_BLOCK_GROUP_DUP)) { 6009 num_stripes = map->num_stripes; 6010 } else { 6011 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6012 &stripe_index); 6013 } 6014 6015 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 6016 if (!bioc) { 6017 ret = -ENOMEM; 6018 goto out; 6019 } 6020 6021 for (i = 0; i < num_stripes; i++) { 6022 bioc->stripes[i].physical = 6023 map->stripes[stripe_index].physical + 6024 stripe_offset + stripe_nr * map->stripe_len; 6025 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6026 6027 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6028 BTRFS_BLOCK_GROUP_RAID10)) { 6029 bioc->stripes[i].length = stripes_per_dev * 6030 map->stripe_len; 6031 6032 if (i / sub_stripes < remaining_stripes) 6033 bioc->stripes[i].length += map->stripe_len; 6034 6035 /* 6036 * Special for the first stripe and 6037 * the last stripe: 6038 * 6039 * |-------|...|-------| 6040 * |----------| 6041 * off end_off 6042 */ 6043 if (i < sub_stripes) 6044 bioc->stripes[i].length -= stripe_offset; 6045 6046 if (stripe_index >= last_stripe && 6047 stripe_index <= (last_stripe + 6048 sub_stripes - 1)) 6049 bioc->stripes[i].length -= stripe_end_offset; 6050 6051 if (i == sub_stripes - 1) 6052 stripe_offset = 0; 6053 } else { 6054 bioc->stripes[i].length = length; 6055 } 6056 6057 stripe_index++; 6058 if (stripe_index == map->num_stripes) { 6059 stripe_index = 0; 6060 stripe_nr++; 6061 } 6062 } 6063 6064 *bioc_ret = bioc; 6065 bioc->map_type = map->type; 6066 bioc->num_stripes = num_stripes; 6067 out: 6068 free_extent_map(em); 6069 return ret; 6070 } 6071 6072 /* 6073 * In dev-replace case, for repair case (that's the only case where the mirror 6074 * is selected explicitly when calling btrfs_map_block), blocks left of the 6075 * left cursor can also be read from the target drive. 6076 * 6077 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6078 * array of stripes. 6079 * For READ, it also needs to be supported using the same mirror number. 6080 * 6081 * If the requested block is not left of the left cursor, EIO is returned. This 6082 * can happen because btrfs_num_copies() returns one more in the dev-replace 6083 * case. 6084 */ 6085 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6086 u64 logical, u64 length, 6087 u64 srcdev_devid, int *mirror_num, 6088 u64 *physical) 6089 { 6090 struct btrfs_io_context *bioc = NULL; 6091 int num_stripes; 6092 int index_srcdev = 0; 6093 int found = 0; 6094 u64 physical_of_found = 0; 6095 int i; 6096 int ret = 0; 6097 6098 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6099 logical, &length, &bioc, 0, 0); 6100 if (ret) { 6101 ASSERT(bioc == NULL); 6102 return ret; 6103 } 6104 6105 num_stripes = bioc->num_stripes; 6106 if (*mirror_num > num_stripes) { 6107 /* 6108 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6109 * that means that the requested area is not left of the left 6110 * cursor 6111 */ 6112 btrfs_put_bioc(bioc); 6113 return -EIO; 6114 } 6115 6116 /* 6117 * process the rest of the function using the mirror_num of the source 6118 * drive. Therefore look it up first. At the end, patch the device 6119 * pointer to the one of the target drive. 6120 */ 6121 for (i = 0; i < num_stripes; i++) { 6122 if (bioc->stripes[i].dev->devid != srcdev_devid) 6123 continue; 6124 6125 /* 6126 * In case of DUP, in order to keep it simple, only add the 6127 * mirror with the lowest physical address 6128 */ 6129 if (found && 6130 physical_of_found <= bioc->stripes[i].physical) 6131 continue; 6132 6133 index_srcdev = i; 6134 found = 1; 6135 physical_of_found = bioc->stripes[i].physical; 6136 } 6137 6138 btrfs_put_bioc(bioc); 6139 6140 ASSERT(found); 6141 if (!found) 6142 return -EIO; 6143 6144 *mirror_num = index_srcdev + 1; 6145 *physical = physical_of_found; 6146 return ret; 6147 } 6148 6149 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6150 { 6151 struct btrfs_block_group *cache; 6152 bool ret; 6153 6154 /* Non zoned filesystem does not use "to_copy" flag */ 6155 if (!btrfs_is_zoned(fs_info)) 6156 return false; 6157 6158 cache = btrfs_lookup_block_group(fs_info, logical); 6159 6160 spin_lock(&cache->lock); 6161 ret = cache->to_copy; 6162 spin_unlock(&cache->lock); 6163 6164 btrfs_put_block_group(cache); 6165 return ret; 6166 } 6167 6168 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6169 struct btrfs_io_context **bioc_ret, 6170 struct btrfs_dev_replace *dev_replace, 6171 u64 logical, 6172 int *num_stripes_ret, int *max_errors_ret) 6173 { 6174 struct btrfs_io_context *bioc = *bioc_ret; 6175 u64 srcdev_devid = dev_replace->srcdev->devid; 6176 int tgtdev_indexes = 0; 6177 int num_stripes = *num_stripes_ret; 6178 int max_errors = *max_errors_ret; 6179 int i; 6180 6181 if (op == BTRFS_MAP_WRITE) { 6182 int index_where_to_add; 6183 6184 /* 6185 * A block group which have "to_copy" set will eventually 6186 * copied by dev-replace process. We can avoid cloning IO here. 6187 */ 6188 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6189 return; 6190 6191 /* 6192 * duplicate the write operations while the dev replace 6193 * procedure is running. Since the copying of the old disk to 6194 * the new disk takes place at run time while the filesystem is 6195 * mounted writable, the regular write operations to the old 6196 * disk have to be duplicated to go to the new disk as well. 6197 * 6198 * Note that device->missing is handled by the caller, and that 6199 * the write to the old disk is already set up in the stripes 6200 * array. 6201 */ 6202 index_where_to_add = num_stripes; 6203 for (i = 0; i < num_stripes; i++) { 6204 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6205 /* write to new disk, too */ 6206 struct btrfs_io_stripe *new = 6207 bioc->stripes + index_where_to_add; 6208 struct btrfs_io_stripe *old = 6209 bioc->stripes + i; 6210 6211 new->physical = old->physical; 6212 new->length = old->length; 6213 new->dev = dev_replace->tgtdev; 6214 bioc->tgtdev_map[i] = index_where_to_add; 6215 index_where_to_add++; 6216 max_errors++; 6217 tgtdev_indexes++; 6218 } 6219 } 6220 num_stripes = index_where_to_add; 6221 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6222 int index_srcdev = 0; 6223 int found = 0; 6224 u64 physical_of_found = 0; 6225 6226 /* 6227 * During the dev-replace procedure, the target drive can also 6228 * be used to read data in case it is needed to repair a corrupt 6229 * block elsewhere. This is possible if the requested area is 6230 * left of the left cursor. In this area, the target drive is a 6231 * full copy of the source drive. 6232 */ 6233 for (i = 0; i < num_stripes; i++) { 6234 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6235 /* 6236 * In case of DUP, in order to keep it simple, 6237 * only add the mirror with the lowest physical 6238 * address 6239 */ 6240 if (found && 6241 physical_of_found <= bioc->stripes[i].physical) 6242 continue; 6243 index_srcdev = i; 6244 found = 1; 6245 physical_of_found = bioc->stripes[i].physical; 6246 } 6247 } 6248 if (found) { 6249 struct btrfs_io_stripe *tgtdev_stripe = 6250 bioc->stripes + num_stripes; 6251 6252 tgtdev_stripe->physical = physical_of_found; 6253 tgtdev_stripe->length = 6254 bioc->stripes[index_srcdev].length; 6255 tgtdev_stripe->dev = dev_replace->tgtdev; 6256 bioc->tgtdev_map[index_srcdev] = num_stripes; 6257 6258 tgtdev_indexes++; 6259 num_stripes++; 6260 } 6261 } 6262 6263 *num_stripes_ret = num_stripes; 6264 *max_errors_ret = max_errors; 6265 bioc->num_tgtdevs = tgtdev_indexes; 6266 *bioc_ret = bioc; 6267 } 6268 6269 static bool need_full_stripe(enum btrfs_map_op op) 6270 { 6271 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6272 } 6273 6274 /* 6275 * Calculate the geometry of a particular (address, len) tuple. This 6276 * information is used to calculate how big a particular bio can get before it 6277 * straddles a stripe. 6278 * 6279 * @fs_info: the filesystem 6280 * @em: mapping containing the logical extent 6281 * @op: type of operation - write or read 6282 * @logical: address that we want to figure out the geometry of 6283 * @io_geom: pointer used to return values 6284 * 6285 * Returns < 0 in case a chunk for the given logical address cannot be found, 6286 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6287 */ 6288 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6289 enum btrfs_map_op op, u64 logical, 6290 struct btrfs_io_geometry *io_geom) 6291 { 6292 struct map_lookup *map; 6293 u64 len; 6294 u64 offset; 6295 u64 stripe_offset; 6296 u64 stripe_nr; 6297 u32 stripe_len; 6298 u64 raid56_full_stripe_start = (u64)-1; 6299 int data_stripes; 6300 6301 ASSERT(op != BTRFS_MAP_DISCARD); 6302 6303 map = em->map_lookup; 6304 /* Offset of this logical address in the chunk */ 6305 offset = logical - em->start; 6306 /* Len of a stripe in a chunk */ 6307 stripe_len = map->stripe_len; 6308 /* 6309 * Stripe_nr is where this block falls in 6310 * stripe_offset is the offset of this block in its stripe. 6311 */ 6312 stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset); 6313 ASSERT(stripe_offset < U32_MAX); 6314 6315 data_stripes = nr_data_stripes(map); 6316 6317 /* Only stripe based profiles needs to check against stripe length. */ 6318 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6319 u64 max_len = stripe_len - stripe_offset; 6320 6321 /* 6322 * In case of raid56, we need to know the stripe aligned start 6323 */ 6324 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6325 unsigned long full_stripe_len = stripe_len * data_stripes; 6326 raid56_full_stripe_start = offset; 6327 6328 /* 6329 * Allow a write of a full stripe, but make sure we 6330 * don't allow straddling of stripes 6331 */ 6332 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6333 full_stripe_len); 6334 raid56_full_stripe_start *= full_stripe_len; 6335 6336 /* 6337 * For writes to RAID[56], allow a full stripeset across 6338 * all disks. For other RAID types and for RAID[56] 6339 * reads, just allow a single stripe (on a single disk). 6340 */ 6341 if (op == BTRFS_MAP_WRITE) { 6342 max_len = stripe_len * data_stripes - 6343 (offset - raid56_full_stripe_start); 6344 } 6345 } 6346 len = min_t(u64, em->len - offset, max_len); 6347 } else { 6348 len = em->len - offset; 6349 } 6350 6351 io_geom->len = len; 6352 io_geom->offset = offset; 6353 io_geom->stripe_len = stripe_len; 6354 io_geom->stripe_nr = stripe_nr; 6355 io_geom->stripe_offset = stripe_offset; 6356 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6357 6358 return 0; 6359 } 6360 6361 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6362 enum btrfs_map_op op, 6363 u64 logical, u64 *length, 6364 struct btrfs_io_context **bioc_ret, 6365 int mirror_num, int need_raid_map) 6366 { 6367 struct extent_map *em; 6368 struct map_lookup *map; 6369 u64 stripe_offset; 6370 u64 stripe_nr; 6371 u64 stripe_len; 6372 u32 stripe_index; 6373 int data_stripes; 6374 int i; 6375 int ret = 0; 6376 int num_stripes; 6377 int max_errors = 0; 6378 int tgtdev_indexes = 0; 6379 struct btrfs_io_context *bioc = NULL; 6380 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6381 int dev_replace_is_ongoing = 0; 6382 int num_alloc_stripes; 6383 int patch_the_first_stripe_for_dev_replace = 0; 6384 u64 physical_to_patch_in_first_stripe = 0; 6385 u64 raid56_full_stripe_start = (u64)-1; 6386 struct btrfs_io_geometry geom; 6387 6388 ASSERT(bioc_ret); 6389 ASSERT(op != BTRFS_MAP_DISCARD); 6390 6391 em = btrfs_get_chunk_map(fs_info, logical, *length); 6392 ASSERT(!IS_ERR(em)); 6393 6394 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6395 if (ret < 0) 6396 return ret; 6397 6398 map = em->map_lookup; 6399 6400 *length = geom.len; 6401 stripe_len = geom.stripe_len; 6402 stripe_nr = geom.stripe_nr; 6403 stripe_offset = geom.stripe_offset; 6404 raid56_full_stripe_start = geom.raid56_stripe_offset; 6405 data_stripes = nr_data_stripes(map); 6406 6407 down_read(&dev_replace->rwsem); 6408 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6409 /* 6410 * Hold the semaphore for read during the whole operation, write is 6411 * requested at commit time but must wait. 6412 */ 6413 if (!dev_replace_is_ongoing) 6414 up_read(&dev_replace->rwsem); 6415 6416 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6417 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6418 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6419 dev_replace->srcdev->devid, 6420 &mirror_num, 6421 &physical_to_patch_in_first_stripe); 6422 if (ret) 6423 goto out; 6424 else 6425 patch_the_first_stripe_for_dev_replace = 1; 6426 } else if (mirror_num > map->num_stripes) { 6427 mirror_num = 0; 6428 } 6429 6430 num_stripes = 1; 6431 stripe_index = 0; 6432 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6433 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6434 &stripe_index); 6435 if (!need_full_stripe(op)) 6436 mirror_num = 1; 6437 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6438 if (need_full_stripe(op)) 6439 num_stripes = map->num_stripes; 6440 else if (mirror_num) 6441 stripe_index = mirror_num - 1; 6442 else { 6443 stripe_index = find_live_mirror(fs_info, map, 0, 6444 dev_replace_is_ongoing); 6445 mirror_num = stripe_index + 1; 6446 } 6447 6448 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6449 if (need_full_stripe(op)) { 6450 num_stripes = map->num_stripes; 6451 } else if (mirror_num) { 6452 stripe_index = mirror_num - 1; 6453 } else { 6454 mirror_num = 1; 6455 } 6456 6457 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6458 u32 factor = map->num_stripes / map->sub_stripes; 6459 6460 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6461 stripe_index *= map->sub_stripes; 6462 6463 if (need_full_stripe(op)) 6464 num_stripes = map->sub_stripes; 6465 else if (mirror_num) 6466 stripe_index += mirror_num - 1; 6467 else { 6468 int old_stripe_index = stripe_index; 6469 stripe_index = find_live_mirror(fs_info, map, 6470 stripe_index, 6471 dev_replace_is_ongoing); 6472 mirror_num = stripe_index - old_stripe_index + 1; 6473 } 6474 6475 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6476 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6477 /* push stripe_nr back to the start of the full stripe */ 6478 stripe_nr = div64_u64(raid56_full_stripe_start, 6479 stripe_len * data_stripes); 6480 6481 /* RAID[56] write or recovery. Return all stripes */ 6482 num_stripes = map->num_stripes; 6483 max_errors = nr_parity_stripes(map); 6484 6485 *length = map->stripe_len; 6486 stripe_index = 0; 6487 stripe_offset = 0; 6488 } else { 6489 /* 6490 * Mirror #0 or #1 means the original data block. 6491 * Mirror #2 is RAID5 parity block. 6492 * Mirror #3 is RAID6 Q block. 6493 */ 6494 stripe_nr = div_u64_rem(stripe_nr, 6495 data_stripes, &stripe_index); 6496 if (mirror_num > 1) 6497 stripe_index = data_stripes + mirror_num - 2; 6498 6499 /* We distribute the parity blocks across stripes */ 6500 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6501 &stripe_index); 6502 if (!need_full_stripe(op) && mirror_num <= 1) 6503 mirror_num = 1; 6504 } 6505 } else { 6506 /* 6507 * after this, stripe_nr is the number of stripes on this 6508 * device we have to walk to find the data, and stripe_index is 6509 * the number of our device in the stripe array 6510 */ 6511 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6512 &stripe_index); 6513 mirror_num = stripe_index + 1; 6514 } 6515 if (stripe_index >= map->num_stripes) { 6516 btrfs_crit(fs_info, 6517 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6518 stripe_index, map->num_stripes); 6519 ret = -EINVAL; 6520 goto out; 6521 } 6522 6523 num_alloc_stripes = num_stripes; 6524 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6525 if (op == BTRFS_MAP_WRITE) 6526 num_alloc_stripes <<= 1; 6527 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6528 num_alloc_stripes++; 6529 tgtdev_indexes = num_stripes; 6530 } 6531 6532 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6533 if (!bioc) { 6534 ret = -ENOMEM; 6535 goto out; 6536 } 6537 6538 for (i = 0; i < num_stripes; i++) { 6539 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6540 stripe_offset + stripe_nr * map->stripe_len; 6541 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6542 stripe_index++; 6543 } 6544 6545 /* Build raid_map */ 6546 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6547 (need_full_stripe(op) || mirror_num > 1)) { 6548 u64 tmp; 6549 unsigned rot; 6550 6551 /* Work out the disk rotation on this stripe-set */ 6552 div_u64_rem(stripe_nr, num_stripes, &rot); 6553 6554 /* Fill in the logical address of each stripe */ 6555 tmp = stripe_nr * data_stripes; 6556 for (i = 0; i < data_stripes; i++) 6557 bioc->raid_map[(i + rot) % num_stripes] = 6558 em->start + (tmp + i) * map->stripe_len; 6559 6560 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6561 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6562 bioc->raid_map[(i + rot + 1) % num_stripes] = 6563 RAID6_Q_STRIPE; 6564 6565 sort_parity_stripes(bioc, num_stripes); 6566 } 6567 6568 if (need_full_stripe(op)) 6569 max_errors = btrfs_chunk_max_errors(map); 6570 6571 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6572 need_full_stripe(op)) { 6573 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6574 &num_stripes, &max_errors); 6575 } 6576 6577 *bioc_ret = bioc; 6578 bioc->map_type = map->type; 6579 bioc->num_stripes = num_stripes; 6580 bioc->max_errors = max_errors; 6581 bioc->mirror_num = mirror_num; 6582 6583 /* 6584 * this is the case that REQ_READ && dev_replace_is_ongoing && 6585 * mirror_num == num_stripes + 1 && dev_replace target drive is 6586 * available as a mirror 6587 */ 6588 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6589 WARN_ON(num_stripes > 1); 6590 bioc->stripes[0].dev = dev_replace->tgtdev; 6591 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6592 bioc->mirror_num = map->num_stripes + 1; 6593 } 6594 out: 6595 if (dev_replace_is_ongoing) { 6596 lockdep_assert_held(&dev_replace->rwsem); 6597 /* Unlock and let waiting writers proceed */ 6598 up_read(&dev_replace->rwsem); 6599 } 6600 free_extent_map(em); 6601 return ret; 6602 } 6603 6604 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6605 u64 logical, u64 *length, 6606 struct btrfs_io_context **bioc_ret, int mirror_num) 6607 { 6608 if (op == BTRFS_MAP_DISCARD) 6609 return __btrfs_map_block_for_discard(fs_info, logical, 6610 length, bioc_ret); 6611 6612 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6613 mirror_num, 0); 6614 } 6615 6616 /* For Scrub/replace */ 6617 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6618 u64 logical, u64 *length, 6619 struct btrfs_io_context **bioc_ret) 6620 { 6621 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6622 } 6623 6624 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6625 { 6626 bio->bi_private = bioc->private; 6627 bio->bi_end_io = bioc->end_io; 6628 bio_endio(bio); 6629 6630 btrfs_put_bioc(bioc); 6631 } 6632 6633 static void btrfs_end_bio(struct bio *bio) 6634 { 6635 struct btrfs_io_context *bioc = bio->bi_private; 6636 int is_orig_bio = 0; 6637 6638 if (bio->bi_status) { 6639 atomic_inc(&bioc->error); 6640 if (bio->bi_status == BLK_STS_IOERR || 6641 bio->bi_status == BLK_STS_TARGET) { 6642 struct btrfs_device *dev = btrfs_bio(bio)->device; 6643 6644 ASSERT(dev->bdev); 6645 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6646 btrfs_dev_stat_inc_and_print(dev, 6647 BTRFS_DEV_STAT_WRITE_ERRS); 6648 else if (!(bio->bi_opf & REQ_RAHEAD)) 6649 btrfs_dev_stat_inc_and_print(dev, 6650 BTRFS_DEV_STAT_READ_ERRS); 6651 if (bio->bi_opf & REQ_PREFLUSH) 6652 btrfs_dev_stat_inc_and_print(dev, 6653 BTRFS_DEV_STAT_FLUSH_ERRS); 6654 } 6655 } 6656 6657 if (bio == bioc->orig_bio) 6658 is_orig_bio = 1; 6659 6660 btrfs_bio_counter_dec(bioc->fs_info); 6661 6662 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6663 if (!is_orig_bio) { 6664 bio_put(bio); 6665 bio = bioc->orig_bio; 6666 } 6667 6668 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6669 /* only send an error to the higher layers if it is 6670 * beyond the tolerance of the btrfs bio 6671 */ 6672 if (atomic_read(&bioc->error) > bioc->max_errors) { 6673 bio->bi_status = BLK_STS_IOERR; 6674 } else { 6675 /* 6676 * this bio is actually up to date, we didn't 6677 * go over the max number of errors 6678 */ 6679 bio->bi_status = BLK_STS_OK; 6680 } 6681 6682 btrfs_end_bioc(bioc, bio); 6683 } else if (!is_orig_bio) { 6684 bio_put(bio); 6685 } 6686 } 6687 6688 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6689 u64 physical, struct btrfs_device *dev) 6690 { 6691 struct btrfs_fs_info *fs_info = bioc->fs_info; 6692 6693 bio->bi_private = bioc; 6694 btrfs_bio(bio)->device = dev; 6695 bio->bi_end_io = btrfs_end_bio; 6696 bio->bi_iter.bi_sector = physical >> 9; 6697 /* 6698 * For zone append writing, bi_sector must point the beginning of the 6699 * zone 6700 */ 6701 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6702 if (btrfs_dev_is_sequential(dev, physical)) { 6703 u64 zone_start = round_down(physical, fs_info->zone_size); 6704 6705 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6706 } else { 6707 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6708 bio->bi_opf |= REQ_OP_WRITE; 6709 } 6710 } 6711 btrfs_debug_in_rcu(fs_info, 6712 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6713 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6714 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6715 dev->devid, bio->bi_iter.bi_size); 6716 6717 btrfs_bio_counter_inc_noblocked(fs_info); 6718 6719 btrfsic_check_bio(bio); 6720 submit_bio(bio); 6721 } 6722 6723 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6724 { 6725 atomic_inc(&bioc->error); 6726 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6727 /* Should be the original bio. */ 6728 WARN_ON(bio != bioc->orig_bio); 6729 6730 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6731 bio->bi_iter.bi_sector = logical >> 9; 6732 if (atomic_read(&bioc->error) > bioc->max_errors) 6733 bio->bi_status = BLK_STS_IOERR; 6734 else 6735 bio->bi_status = BLK_STS_OK; 6736 btrfs_end_bioc(bioc, bio); 6737 } 6738 } 6739 6740 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6741 int mirror_num) 6742 { 6743 struct btrfs_device *dev; 6744 struct bio *first_bio = bio; 6745 u64 logical = bio->bi_iter.bi_sector << 9; 6746 u64 length = 0; 6747 u64 map_length; 6748 int ret; 6749 int dev_nr; 6750 int total_devs; 6751 struct btrfs_io_context *bioc = NULL; 6752 6753 length = bio->bi_iter.bi_size; 6754 map_length = length; 6755 6756 btrfs_bio_counter_inc_blocked(fs_info); 6757 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6758 &map_length, &bioc, mirror_num, 1); 6759 if (ret) { 6760 btrfs_bio_counter_dec(fs_info); 6761 return errno_to_blk_status(ret); 6762 } 6763 6764 total_devs = bioc->num_stripes; 6765 bioc->orig_bio = first_bio; 6766 bioc->private = first_bio->bi_private; 6767 bioc->end_io = first_bio->bi_end_io; 6768 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6769 6770 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6771 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6772 /* In this case, map_length has been set to the length of 6773 a single stripe; not the whole write */ 6774 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6775 ret = raid56_parity_write(bio, bioc, map_length); 6776 } else { 6777 ret = raid56_parity_recover(bio, bioc, map_length, 6778 mirror_num, 1); 6779 } 6780 6781 btrfs_bio_counter_dec(fs_info); 6782 return errno_to_blk_status(ret); 6783 } 6784 6785 if (map_length < length) { 6786 btrfs_crit(fs_info, 6787 "mapping failed logical %llu bio len %llu len %llu", 6788 logical, length, map_length); 6789 BUG(); 6790 } 6791 6792 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6793 dev = bioc->stripes[dev_nr].dev; 6794 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6795 &dev->dev_state) || 6796 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6797 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6798 bioc_error(bioc, first_bio, logical); 6799 continue; 6800 } 6801 6802 if (dev_nr < total_devs - 1) { 6803 bio = btrfs_bio_clone(dev->bdev, first_bio); 6804 } else { 6805 bio = first_bio; 6806 bio_set_dev(bio, dev->bdev); 6807 } 6808 6809 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6810 } 6811 btrfs_bio_counter_dec(fs_info); 6812 return BLK_STS_OK; 6813 } 6814 6815 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6816 const struct btrfs_fs_devices *fs_devices) 6817 { 6818 if (args->fsid == NULL) 6819 return true; 6820 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6821 return true; 6822 return false; 6823 } 6824 6825 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6826 const struct btrfs_device *device) 6827 { 6828 ASSERT((args->devid != (u64)-1) || args->missing); 6829 6830 if ((args->devid != (u64)-1) && device->devid != args->devid) 6831 return false; 6832 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6833 return false; 6834 if (!args->missing) 6835 return true; 6836 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6837 !device->bdev) 6838 return true; 6839 return false; 6840 } 6841 6842 /* 6843 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6844 * return NULL. 6845 * 6846 * If devid and uuid are both specified, the match must be exact, otherwise 6847 * only devid is used. 6848 */ 6849 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6850 const struct btrfs_dev_lookup_args *args) 6851 { 6852 struct btrfs_device *device; 6853 struct btrfs_fs_devices *seed_devs; 6854 6855 if (dev_args_match_fs_devices(args, fs_devices)) { 6856 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6857 if (dev_args_match_device(args, device)) 6858 return device; 6859 } 6860 } 6861 6862 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6863 if (!dev_args_match_fs_devices(args, seed_devs)) 6864 continue; 6865 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6866 if (dev_args_match_device(args, device)) 6867 return device; 6868 } 6869 } 6870 6871 return NULL; 6872 } 6873 6874 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6875 u64 devid, u8 *dev_uuid) 6876 { 6877 struct btrfs_device *device; 6878 unsigned int nofs_flag; 6879 6880 /* 6881 * We call this under the chunk_mutex, so we want to use NOFS for this 6882 * allocation, however we don't want to change btrfs_alloc_device() to 6883 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6884 * places. 6885 */ 6886 nofs_flag = memalloc_nofs_save(); 6887 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6888 memalloc_nofs_restore(nofs_flag); 6889 if (IS_ERR(device)) 6890 return device; 6891 6892 list_add(&device->dev_list, &fs_devices->devices); 6893 device->fs_devices = fs_devices; 6894 fs_devices->num_devices++; 6895 6896 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6897 fs_devices->missing_devices++; 6898 6899 return device; 6900 } 6901 6902 /** 6903 * btrfs_alloc_device - allocate struct btrfs_device 6904 * @fs_info: used only for generating a new devid, can be NULL if 6905 * devid is provided (i.e. @devid != NULL). 6906 * @devid: a pointer to devid for this device. If NULL a new devid 6907 * is generated. 6908 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6909 * is generated. 6910 * 6911 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6912 * on error. Returned struct is not linked onto any lists and must be 6913 * destroyed with btrfs_free_device. 6914 */ 6915 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6916 const u64 *devid, 6917 const u8 *uuid) 6918 { 6919 struct btrfs_device *dev; 6920 u64 tmp; 6921 6922 if (WARN_ON(!devid && !fs_info)) 6923 return ERR_PTR(-EINVAL); 6924 6925 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6926 if (!dev) 6927 return ERR_PTR(-ENOMEM); 6928 6929 /* 6930 * Preallocate a bio that's always going to be used for flushing device 6931 * barriers and matches the device lifespan 6932 */ 6933 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 6934 if (!dev->flush_bio) { 6935 kfree(dev); 6936 return ERR_PTR(-ENOMEM); 6937 } 6938 6939 INIT_LIST_HEAD(&dev->dev_list); 6940 INIT_LIST_HEAD(&dev->dev_alloc_list); 6941 INIT_LIST_HEAD(&dev->post_commit_list); 6942 6943 atomic_set(&dev->dev_stats_ccnt, 0); 6944 btrfs_device_data_ordered_init(dev); 6945 extent_io_tree_init(fs_info, &dev->alloc_state, 6946 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6947 6948 if (devid) 6949 tmp = *devid; 6950 else { 6951 int ret; 6952 6953 ret = find_next_devid(fs_info, &tmp); 6954 if (ret) { 6955 btrfs_free_device(dev); 6956 return ERR_PTR(ret); 6957 } 6958 } 6959 dev->devid = tmp; 6960 6961 if (uuid) 6962 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6963 else 6964 generate_random_uuid(dev->uuid); 6965 6966 return dev; 6967 } 6968 6969 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6970 u64 devid, u8 *uuid, bool error) 6971 { 6972 if (error) 6973 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6974 devid, uuid); 6975 else 6976 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6977 devid, uuid); 6978 } 6979 6980 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 6981 { 6982 const int data_stripes = calc_data_stripes(type, num_stripes); 6983 6984 return div_u64(chunk_len, data_stripes); 6985 } 6986 6987 #if BITS_PER_LONG == 32 6988 /* 6989 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6990 * can't be accessed on 32bit systems. 6991 * 6992 * This function do mount time check to reject the fs if it already has 6993 * metadata chunk beyond that limit. 6994 */ 6995 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6996 u64 logical, u64 length, u64 type) 6997 { 6998 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6999 return 0; 7000 7001 if (logical + length < MAX_LFS_FILESIZE) 7002 return 0; 7003 7004 btrfs_err_32bit_limit(fs_info); 7005 return -EOVERFLOW; 7006 } 7007 7008 /* 7009 * This is to give early warning for any metadata chunk reaching 7010 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7011 * Although we can still access the metadata, it's not going to be possible 7012 * once the limit is reached. 7013 */ 7014 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7015 u64 logical, u64 length, u64 type) 7016 { 7017 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7018 return; 7019 7020 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7021 return; 7022 7023 btrfs_warn_32bit_limit(fs_info); 7024 } 7025 #endif 7026 7027 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 7028 u64 devid, u8 *uuid) 7029 { 7030 struct btrfs_device *dev; 7031 7032 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7033 btrfs_report_missing_device(fs_info, devid, uuid, true); 7034 return ERR_PTR(-ENOENT); 7035 } 7036 7037 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 7038 if (IS_ERR(dev)) { 7039 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 7040 devid, PTR_ERR(dev)); 7041 return dev; 7042 } 7043 btrfs_report_missing_device(fs_info, devid, uuid, false); 7044 7045 return dev; 7046 } 7047 7048 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7049 struct btrfs_chunk *chunk) 7050 { 7051 BTRFS_DEV_LOOKUP_ARGS(args); 7052 struct btrfs_fs_info *fs_info = leaf->fs_info; 7053 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7054 struct map_lookup *map; 7055 struct extent_map *em; 7056 u64 logical; 7057 u64 length; 7058 u64 devid; 7059 u64 type; 7060 u8 uuid[BTRFS_UUID_SIZE]; 7061 int num_stripes; 7062 int ret; 7063 int i; 7064 7065 logical = key->offset; 7066 length = btrfs_chunk_length(leaf, chunk); 7067 type = btrfs_chunk_type(leaf, chunk); 7068 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7069 7070 #if BITS_PER_LONG == 32 7071 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7072 if (ret < 0) 7073 return ret; 7074 warn_32bit_meta_chunk(fs_info, logical, length, type); 7075 #endif 7076 7077 /* 7078 * Only need to verify chunk item if we're reading from sys chunk array, 7079 * as chunk item in tree block is already verified by tree-checker. 7080 */ 7081 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7082 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7083 if (ret) 7084 return ret; 7085 } 7086 7087 read_lock(&map_tree->lock); 7088 em = lookup_extent_mapping(map_tree, logical, 1); 7089 read_unlock(&map_tree->lock); 7090 7091 /* already mapped? */ 7092 if (em && em->start <= logical && em->start + em->len > logical) { 7093 free_extent_map(em); 7094 return 0; 7095 } else if (em) { 7096 free_extent_map(em); 7097 } 7098 7099 em = alloc_extent_map(); 7100 if (!em) 7101 return -ENOMEM; 7102 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7103 if (!map) { 7104 free_extent_map(em); 7105 return -ENOMEM; 7106 } 7107 7108 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7109 em->map_lookup = map; 7110 em->start = logical; 7111 em->len = length; 7112 em->orig_start = 0; 7113 em->block_start = 0; 7114 em->block_len = em->len; 7115 7116 map->num_stripes = num_stripes; 7117 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7118 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7119 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7120 map->type = type; 7121 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7122 map->verified_stripes = 0; 7123 em->orig_block_len = calc_stripe_length(type, em->len, 7124 map->num_stripes); 7125 for (i = 0; i < num_stripes; i++) { 7126 map->stripes[i].physical = 7127 btrfs_stripe_offset_nr(leaf, chunk, i); 7128 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7129 args.devid = devid; 7130 read_extent_buffer(leaf, uuid, (unsigned long) 7131 btrfs_stripe_dev_uuid_nr(chunk, i), 7132 BTRFS_UUID_SIZE); 7133 args.uuid = uuid; 7134 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7135 if (!map->stripes[i].dev) { 7136 map->stripes[i].dev = handle_missing_device(fs_info, 7137 devid, uuid); 7138 if (IS_ERR(map->stripes[i].dev)) { 7139 free_extent_map(em); 7140 return PTR_ERR(map->stripes[i].dev); 7141 } 7142 } 7143 7144 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7145 &(map->stripes[i].dev->dev_state)); 7146 } 7147 7148 write_lock(&map_tree->lock); 7149 ret = add_extent_mapping(map_tree, em, 0); 7150 write_unlock(&map_tree->lock); 7151 if (ret < 0) { 7152 btrfs_err(fs_info, 7153 "failed to add chunk map, start=%llu len=%llu: %d", 7154 em->start, em->len, ret); 7155 } 7156 free_extent_map(em); 7157 7158 return ret; 7159 } 7160 7161 static void fill_device_from_item(struct extent_buffer *leaf, 7162 struct btrfs_dev_item *dev_item, 7163 struct btrfs_device *device) 7164 { 7165 unsigned long ptr; 7166 7167 device->devid = btrfs_device_id(leaf, dev_item); 7168 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7169 device->total_bytes = device->disk_total_bytes; 7170 device->commit_total_bytes = device->disk_total_bytes; 7171 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7172 device->commit_bytes_used = device->bytes_used; 7173 device->type = btrfs_device_type(leaf, dev_item); 7174 device->io_align = btrfs_device_io_align(leaf, dev_item); 7175 device->io_width = btrfs_device_io_width(leaf, dev_item); 7176 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7177 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7178 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7179 7180 ptr = btrfs_device_uuid(dev_item); 7181 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7182 } 7183 7184 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7185 u8 *fsid) 7186 { 7187 struct btrfs_fs_devices *fs_devices; 7188 int ret; 7189 7190 lockdep_assert_held(&uuid_mutex); 7191 ASSERT(fsid); 7192 7193 /* This will match only for multi-device seed fs */ 7194 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7195 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7196 return fs_devices; 7197 7198 7199 fs_devices = find_fsid(fsid, NULL); 7200 if (!fs_devices) { 7201 if (!btrfs_test_opt(fs_info, DEGRADED)) 7202 return ERR_PTR(-ENOENT); 7203 7204 fs_devices = alloc_fs_devices(fsid, NULL); 7205 if (IS_ERR(fs_devices)) 7206 return fs_devices; 7207 7208 fs_devices->seeding = true; 7209 fs_devices->opened = 1; 7210 return fs_devices; 7211 } 7212 7213 /* 7214 * Upon first call for a seed fs fsid, just create a private copy of the 7215 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7216 */ 7217 fs_devices = clone_fs_devices(fs_devices); 7218 if (IS_ERR(fs_devices)) 7219 return fs_devices; 7220 7221 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7222 if (ret) { 7223 free_fs_devices(fs_devices); 7224 return ERR_PTR(ret); 7225 } 7226 7227 if (!fs_devices->seeding) { 7228 close_fs_devices(fs_devices); 7229 free_fs_devices(fs_devices); 7230 return ERR_PTR(-EINVAL); 7231 } 7232 7233 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7234 7235 return fs_devices; 7236 } 7237 7238 static int read_one_dev(struct extent_buffer *leaf, 7239 struct btrfs_dev_item *dev_item) 7240 { 7241 BTRFS_DEV_LOOKUP_ARGS(args); 7242 struct btrfs_fs_info *fs_info = leaf->fs_info; 7243 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7244 struct btrfs_device *device; 7245 u64 devid; 7246 int ret; 7247 u8 fs_uuid[BTRFS_FSID_SIZE]; 7248 u8 dev_uuid[BTRFS_UUID_SIZE]; 7249 7250 devid = args.devid = btrfs_device_id(leaf, dev_item); 7251 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7252 BTRFS_UUID_SIZE); 7253 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7254 BTRFS_FSID_SIZE); 7255 args.uuid = dev_uuid; 7256 args.fsid = fs_uuid; 7257 7258 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7259 fs_devices = open_seed_devices(fs_info, fs_uuid); 7260 if (IS_ERR(fs_devices)) 7261 return PTR_ERR(fs_devices); 7262 } 7263 7264 device = btrfs_find_device(fs_info->fs_devices, &args); 7265 if (!device) { 7266 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7267 btrfs_report_missing_device(fs_info, devid, 7268 dev_uuid, true); 7269 return -ENOENT; 7270 } 7271 7272 device = add_missing_dev(fs_devices, devid, dev_uuid); 7273 if (IS_ERR(device)) { 7274 btrfs_err(fs_info, 7275 "failed to add missing dev %llu: %ld", 7276 devid, PTR_ERR(device)); 7277 return PTR_ERR(device); 7278 } 7279 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7280 } else { 7281 if (!device->bdev) { 7282 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7283 btrfs_report_missing_device(fs_info, 7284 devid, dev_uuid, true); 7285 return -ENOENT; 7286 } 7287 btrfs_report_missing_device(fs_info, devid, 7288 dev_uuid, false); 7289 } 7290 7291 if (!device->bdev && 7292 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7293 /* 7294 * this happens when a device that was properly setup 7295 * in the device info lists suddenly goes bad. 7296 * device->bdev is NULL, and so we have to set 7297 * device->missing to one here 7298 */ 7299 device->fs_devices->missing_devices++; 7300 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7301 } 7302 7303 /* Move the device to its own fs_devices */ 7304 if (device->fs_devices != fs_devices) { 7305 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7306 &device->dev_state)); 7307 7308 list_move(&device->dev_list, &fs_devices->devices); 7309 device->fs_devices->num_devices--; 7310 fs_devices->num_devices++; 7311 7312 device->fs_devices->missing_devices--; 7313 fs_devices->missing_devices++; 7314 7315 device->fs_devices = fs_devices; 7316 } 7317 } 7318 7319 if (device->fs_devices != fs_info->fs_devices) { 7320 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7321 if (device->generation != 7322 btrfs_device_generation(leaf, dev_item)) 7323 return -EINVAL; 7324 } 7325 7326 fill_device_from_item(leaf, dev_item, device); 7327 if (device->bdev) { 7328 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7329 7330 if (device->total_bytes > max_total_bytes) { 7331 btrfs_err(fs_info, 7332 "device total_bytes should be at most %llu but found %llu", 7333 max_total_bytes, device->total_bytes); 7334 return -EINVAL; 7335 } 7336 } 7337 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7338 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7339 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7340 device->fs_devices->total_rw_bytes += device->total_bytes; 7341 atomic64_add(device->total_bytes - device->bytes_used, 7342 &fs_info->free_chunk_space); 7343 } 7344 ret = 0; 7345 return ret; 7346 } 7347 7348 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7349 { 7350 struct btrfs_super_block *super_copy = fs_info->super_copy; 7351 struct extent_buffer *sb; 7352 struct btrfs_disk_key *disk_key; 7353 struct btrfs_chunk *chunk; 7354 u8 *array_ptr; 7355 unsigned long sb_array_offset; 7356 int ret = 0; 7357 u32 num_stripes; 7358 u32 array_size; 7359 u32 len = 0; 7360 u32 cur_offset; 7361 u64 type; 7362 struct btrfs_key key; 7363 7364 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7365 7366 /* 7367 * We allocated a dummy extent, just to use extent buffer accessors. 7368 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7369 * that's fine, we will not go beyond system chunk array anyway. 7370 */ 7371 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7372 if (!sb) 7373 return -ENOMEM; 7374 set_extent_buffer_uptodate(sb); 7375 7376 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7377 array_size = btrfs_super_sys_array_size(super_copy); 7378 7379 array_ptr = super_copy->sys_chunk_array; 7380 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7381 cur_offset = 0; 7382 7383 while (cur_offset < array_size) { 7384 disk_key = (struct btrfs_disk_key *)array_ptr; 7385 len = sizeof(*disk_key); 7386 if (cur_offset + len > array_size) 7387 goto out_short_read; 7388 7389 btrfs_disk_key_to_cpu(&key, disk_key); 7390 7391 array_ptr += len; 7392 sb_array_offset += len; 7393 cur_offset += len; 7394 7395 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7396 btrfs_err(fs_info, 7397 "unexpected item type %u in sys_array at offset %u", 7398 (u32)key.type, cur_offset); 7399 ret = -EIO; 7400 break; 7401 } 7402 7403 chunk = (struct btrfs_chunk *)sb_array_offset; 7404 /* 7405 * At least one btrfs_chunk with one stripe must be present, 7406 * exact stripe count check comes afterwards 7407 */ 7408 len = btrfs_chunk_item_size(1); 7409 if (cur_offset + len > array_size) 7410 goto out_short_read; 7411 7412 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7413 if (!num_stripes) { 7414 btrfs_err(fs_info, 7415 "invalid number of stripes %u in sys_array at offset %u", 7416 num_stripes, cur_offset); 7417 ret = -EIO; 7418 break; 7419 } 7420 7421 type = btrfs_chunk_type(sb, chunk); 7422 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7423 btrfs_err(fs_info, 7424 "invalid chunk type %llu in sys_array at offset %u", 7425 type, cur_offset); 7426 ret = -EIO; 7427 break; 7428 } 7429 7430 len = btrfs_chunk_item_size(num_stripes); 7431 if (cur_offset + len > array_size) 7432 goto out_short_read; 7433 7434 ret = read_one_chunk(&key, sb, chunk); 7435 if (ret) 7436 break; 7437 7438 array_ptr += len; 7439 sb_array_offset += len; 7440 cur_offset += len; 7441 } 7442 clear_extent_buffer_uptodate(sb); 7443 free_extent_buffer_stale(sb); 7444 return ret; 7445 7446 out_short_read: 7447 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7448 len, cur_offset); 7449 clear_extent_buffer_uptodate(sb); 7450 free_extent_buffer_stale(sb); 7451 return -EIO; 7452 } 7453 7454 /* 7455 * Check if all chunks in the fs are OK for read-write degraded mount 7456 * 7457 * If the @failing_dev is specified, it's accounted as missing. 7458 * 7459 * Return true if all chunks meet the minimal RW mount requirements. 7460 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7461 */ 7462 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7463 struct btrfs_device *failing_dev) 7464 { 7465 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7466 struct extent_map *em; 7467 u64 next_start = 0; 7468 bool ret = true; 7469 7470 read_lock(&map_tree->lock); 7471 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7472 read_unlock(&map_tree->lock); 7473 /* No chunk at all? Return false anyway */ 7474 if (!em) { 7475 ret = false; 7476 goto out; 7477 } 7478 while (em) { 7479 struct map_lookup *map; 7480 int missing = 0; 7481 int max_tolerated; 7482 int i; 7483 7484 map = em->map_lookup; 7485 max_tolerated = 7486 btrfs_get_num_tolerated_disk_barrier_failures( 7487 map->type); 7488 for (i = 0; i < map->num_stripes; i++) { 7489 struct btrfs_device *dev = map->stripes[i].dev; 7490 7491 if (!dev || !dev->bdev || 7492 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7493 dev->last_flush_error) 7494 missing++; 7495 else if (failing_dev && failing_dev == dev) 7496 missing++; 7497 } 7498 if (missing > max_tolerated) { 7499 if (!failing_dev) 7500 btrfs_warn(fs_info, 7501 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7502 em->start, missing, max_tolerated); 7503 free_extent_map(em); 7504 ret = false; 7505 goto out; 7506 } 7507 next_start = extent_map_end(em); 7508 free_extent_map(em); 7509 7510 read_lock(&map_tree->lock); 7511 em = lookup_extent_mapping(map_tree, next_start, 7512 (u64)(-1) - next_start); 7513 read_unlock(&map_tree->lock); 7514 } 7515 out: 7516 return ret; 7517 } 7518 7519 static void readahead_tree_node_children(struct extent_buffer *node) 7520 { 7521 int i; 7522 const int nr_items = btrfs_header_nritems(node); 7523 7524 for (i = 0; i < nr_items; i++) 7525 btrfs_readahead_node_child(node, i); 7526 } 7527 7528 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7529 { 7530 struct btrfs_root *root = fs_info->chunk_root; 7531 struct btrfs_path *path; 7532 struct extent_buffer *leaf; 7533 struct btrfs_key key; 7534 struct btrfs_key found_key; 7535 int ret; 7536 int slot; 7537 int iter_ret = 0; 7538 u64 total_dev = 0; 7539 u64 last_ra_node = 0; 7540 7541 path = btrfs_alloc_path(); 7542 if (!path) 7543 return -ENOMEM; 7544 7545 /* 7546 * uuid_mutex is needed only if we are mounting a sprout FS 7547 * otherwise we don't need it. 7548 */ 7549 mutex_lock(&uuid_mutex); 7550 7551 /* 7552 * It is possible for mount and umount to race in such a way that 7553 * we execute this code path, but open_fs_devices failed to clear 7554 * total_rw_bytes. We certainly want it cleared before reading the 7555 * device items, so clear it here. 7556 */ 7557 fs_info->fs_devices->total_rw_bytes = 0; 7558 7559 /* 7560 * Lockdep complains about possible circular locking dependency between 7561 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7562 * used for freeze procection of a fs (struct super_block.s_writers), 7563 * which we take when starting a transaction, and extent buffers of the 7564 * chunk tree if we call read_one_dev() while holding a lock on an 7565 * extent buffer of the chunk tree. Since we are mounting the filesystem 7566 * and at this point there can't be any concurrent task modifying the 7567 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7568 */ 7569 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7570 path->skip_locking = 1; 7571 7572 /* 7573 * Read all device items, and then all the chunk items. All 7574 * device items are found before any chunk item (their object id 7575 * is smaller than the lowest possible object id for a chunk 7576 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7577 */ 7578 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7579 key.offset = 0; 7580 key.type = 0; 7581 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7582 struct extent_buffer *node = path->nodes[1]; 7583 7584 leaf = path->nodes[0]; 7585 slot = path->slots[0]; 7586 7587 if (node) { 7588 if (last_ra_node != node->start) { 7589 readahead_tree_node_children(node); 7590 last_ra_node = node->start; 7591 } 7592 } 7593 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7594 struct btrfs_dev_item *dev_item; 7595 dev_item = btrfs_item_ptr(leaf, slot, 7596 struct btrfs_dev_item); 7597 ret = read_one_dev(leaf, dev_item); 7598 if (ret) 7599 goto error; 7600 total_dev++; 7601 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7602 struct btrfs_chunk *chunk; 7603 7604 /* 7605 * We are only called at mount time, so no need to take 7606 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7607 * we always lock first fs_info->chunk_mutex before 7608 * acquiring any locks on the chunk tree. This is a 7609 * requirement for chunk allocation, see the comment on 7610 * top of btrfs_chunk_alloc() for details. 7611 */ 7612 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7613 ret = read_one_chunk(&found_key, leaf, chunk); 7614 if (ret) 7615 goto error; 7616 } 7617 } 7618 /* Catch error found during iteration */ 7619 if (iter_ret < 0) { 7620 ret = iter_ret; 7621 goto error; 7622 } 7623 7624 /* 7625 * After loading chunk tree, we've got all device information, 7626 * do another round of validation checks. 7627 */ 7628 if (total_dev != fs_info->fs_devices->total_devices) { 7629 btrfs_warn(fs_info, 7630 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7631 btrfs_super_num_devices(fs_info->super_copy), 7632 total_dev); 7633 fs_info->fs_devices->total_devices = total_dev; 7634 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7635 } 7636 if (btrfs_super_total_bytes(fs_info->super_copy) < 7637 fs_info->fs_devices->total_rw_bytes) { 7638 btrfs_err(fs_info, 7639 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7640 btrfs_super_total_bytes(fs_info->super_copy), 7641 fs_info->fs_devices->total_rw_bytes); 7642 ret = -EINVAL; 7643 goto error; 7644 } 7645 ret = 0; 7646 error: 7647 mutex_unlock(&uuid_mutex); 7648 7649 btrfs_free_path(path); 7650 return ret; 7651 } 7652 7653 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7654 { 7655 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7656 struct btrfs_device *device; 7657 7658 fs_devices->fs_info = fs_info; 7659 7660 mutex_lock(&fs_devices->device_list_mutex); 7661 list_for_each_entry(device, &fs_devices->devices, dev_list) 7662 device->fs_info = fs_info; 7663 7664 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7665 list_for_each_entry(device, &seed_devs->devices, dev_list) 7666 device->fs_info = fs_info; 7667 7668 seed_devs->fs_info = fs_info; 7669 } 7670 mutex_unlock(&fs_devices->device_list_mutex); 7671 } 7672 7673 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7674 const struct btrfs_dev_stats_item *ptr, 7675 int index) 7676 { 7677 u64 val; 7678 7679 read_extent_buffer(eb, &val, 7680 offsetof(struct btrfs_dev_stats_item, values) + 7681 ((unsigned long)ptr) + (index * sizeof(u64)), 7682 sizeof(val)); 7683 return val; 7684 } 7685 7686 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7687 struct btrfs_dev_stats_item *ptr, 7688 int index, u64 val) 7689 { 7690 write_extent_buffer(eb, &val, 7691 offsetof(struct btrfs_dev_stats_item, values) + 7692 ((unsigned long)ptr) + (index * sizeof(u64)), 7693 sizeof(val)); 7694 } 7695 7696 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7697 struct btrfs_path *path) 7698 { 7699 struct btrfs_dev_stats_item *ptr; 7700 struct extent_buffer *eb; 7701 struct btrfs_key key; 7702 int item_size; 7703 int i, ret, slot; 7704 7705 if (!device->fs_info->dev_root) 7706 return 0; 7707 7708 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7709 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7710 key.offset = device->devid; 7711 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7712 if (ret) { 7713 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7714 btrfs_dev_stat_set(device, i, 0); 7715 device->dev_stats_valid = 1; 7716 btrfs_release_path(path); 7717 return ret < 0 ? ret : 0; 7718 } 7719 slot = path->slots[0]; 7720 eb = path->nodes[0]; 7721 item_size = btrfs_item_size(eb, slot); 7722 7723 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7724 7725 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7726 if (item_size >= (1 + i) * sizeof(__le64)) 7727 btrfs_dev_stat_set(device, i, 7728 btrfs_dev_stats_value(eb, ptr, i)); 7729 else 7730 btrfs_dev_stat_set(device, i, 0); 7731 } 7732 7733 device->dev_stats_valid = 1; 7734 btrfs_dev_stat_print_on_load(device); 7735 btrfs_release_path(path); 7736 7737 return 0; 7738 } 7739 7740 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7741 { 7742 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7743 struct btrfs_device *device; 7744 struct btrfs_path *path = NULL; 7745 int ret = 0; 7746 7747 path = btrfs_alloc_path(); 7748 if (!path) 7749 return -ENOMEM; 7750 7751 mutex_lock(&fs_devices->device_list_mutex); 7752 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7753 ret = btrfs_device_init_dev_stats(device, path); 7754 if (ret) 7755 goto out; 7756 } 7757 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7758 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7759 ret = btrfs_device_init_dev_stats(device, path); 7760 if (ret) 7761 goto out; 7762 } 7763 } 7764 out: 7765 mutex_unlock(&fs_devices->device_list_mutex); 7766 7767 btrfs_free_path(path); 7768 return ret; 7769 } 7770 7771 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7772 struct btrfs_device *device) 7773 { 7774 struct btrfs_fs_info *fs_info = trans->fs_info; 7775 struct btrfs_root *dev_root = fs_info->dev_root; 7776 struct btrfs_path *path; 7777 struct btrfs_key key; 7778 struct extent_buffer *eb; 7779 struct btrfs_dev_stats_item *ptr; 7780 int ret; 7781 int i; 7782 7783 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7784 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7785 key.offset = device->devid; 7786 7787 path = btrfs_alloc_path(); 7788 if (!path) 7789 return -ENOMEM; 7790 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7791 if (ret < 0) { 7792 btrfs_warn_in_rcu(fs_info, 7793 "error %d while searching for dev_stats item for device %s", 7794 ret, rcu_str_deref(device->name)); 7795 goto out; 7796 } 7797 7798 if (ret == 0 && 7799 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7800 /* need to delete old one and insert a new one */ 7801 ret = btrfs_del_item(trans, dev_root, path); 7802 if (ret != 0) { 7803 btrfs_warn_in_rcu(fs_info, 7804 "delete too small dev_stats item for device %s failed %d", 7805 rcu_str_deref(device->name), ret); 7806 goto out; 7807 } 7808 ret = 1; 7809 } 7810 7811 if (ret == 1) { 7812 /* need to insert a new item */ 7813 btrfs_release_path(path); 7814 ret = btrfs_insert_empty_item(trans, dev_root, path, 7815 &key, sizeof(*ptr)); 7816 if (ret < 0) { 7817 btrfs_warn_in_rcu(fs_info, 7818 "insert dev_stats item for device %s failed %d", 7819 rcu_str_deref(device->name), ret); 7820 goto out; 7821 } 7822 } 7823 7824 eb = path->nodes[0]; 7825 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7826 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7827 btrfs_set_dev_stats_value(eb, ptr, i, 7828 btrfs_dev_stat_read(device, i)); 7829 btrfs_mark_buffer_dirty(eb); 7830 7831 out: 7832 btrfs_free_path(path); 7833 return ret; 7834 } 7835 7836 /* 7837 * called from commit_transaction. Writes all changed device stats to disk. 7838 */ 7839 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7840 { 7841 struct btrfs_fs_info *fs_info = trans->fs_info; 7842 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7843 struct btrfs_device *device; 7844 int stats_cnt; 7845 int ret = 0; 7846 7847 mutex_lock(&fs_devices->device_list_mutex); 7848 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7849 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7850 if (!device->dev_stats_valid || stats_cnt == 0) 7851 continue; 7852 7853 7854 /* 7855 * There is a LOAD-LOAD control dependency between the value of 7856 * dev_stats_ccnt and updating the on-disk values which requires 7857 * reading the in-memory counters. Such control dependencies 7858 * require explicit read memory barriers. 7859 * 7860 * This memory barriers pairs with smp_mb__before_atomic in 7861 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7862 * barrier implied by atomic_xchg in 7863 * btrfs_dev_stats_read_and_reset 7864 */ 7865 smp_rmb(); 7866 7867 ret = update_dev_stat_item(trans, device); 7868 if (!ret) 7869 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7870 } 7871 mutex_unlock(&fs_devices->device_list_mutex); 7872 7873 return ret; 7874 } 7875 7876 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7877 { 7878 btrfs_dev_stat_inc(dev, index); 7879 btrfs_dev_stat_print_on_error(dev); 7880 } 7881 7882 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7883 { 7884 if (!dev->dev_stats_valid) 7885 return; 7886 btrfs_err_rl_in_rcu(dev->fs_info, 7887 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7888 rcu_str_deref(dev->name), 7889 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7890 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7891 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7892 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7893 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7894 } 7895 7896 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7897 { 7898 int i; 7899 7900 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7901 if (btrfs_dev_stat_read(dev, i) != 0) 7902 break; 7903 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7904 return; /* all values == 0, suppress message */ 7905 7906 btrfs_info_in_rcu(dev->fs_info, 7907 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7908 rcu_str_deref(dev->name), 7909 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7910 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7911 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7912 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7913 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7914 } 7915 7916 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7917 struct btrfs_ioctl_get_dev_stats *stats) 7918 { 7919 BTRFS_DEV_LOOKUP_ARGS(args); 7920 struct btrfs_device *dev; 7921 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7922 int i; 7923 7924 mutex_lock(&fs_devices->device_list_mutex); 7925 args.devid = stats->devid; 7926 dev = btrfs_find_device(fs_info->fs_devices, &args); 7927 mutex_unlock(&fs_devices->device_list_mutex); 7928 7929 if (!dev) { 7930 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7931 return -ENODEV; 7932 } else if (!dev->dev_stats_valid) { 7933 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7934 return -ENODEV; 7935 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7936 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7937 if (stats->nr_items > i) 7938 stats->values[i] = 7939 btrfs_dev_stat_read_and_reset(dev, i); 7940 else 7941 btrfs_dev_stat_set(dev, i, 0); 7942 } 7943 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7944 current->comm, task_pid_nr(current)); 7945 } else { 7946 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7947 if (stats->nr_items > i) 7948 stats->values[i] = btrfs_dev_stat_read(dev, i); 7949 } 7950 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7951 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7952 return 0; 7953 } 7954 7955 /* 7956 * Update the size and bytes used for each device where it changed. This is 7957 * delayed since we would otherwise get errors while writing out the 7958 * superblocks. 7959 * 7960 * Must be invoked during transaction commit. 7961 */ 7962 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7963 { 7964 struct btrfs_device *curr, *next; 7965 7966 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7967 7968 if (list_empty(&trans->dev_update_list)) 7969 return; 7970 7971 /* 7972 * We don't need the device_list_mutex here. This list is owned by the 7973 * transaction and the transaction must complete before the device is 7974 * released. 7975 */ 7976 mutex_lock(&trans->fs_info->chunk_mutex); 7977 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7978 post_commit_list) { 7979 list_del_init(&curr->post_commit_list); 7980 curr->commit_total_bytes = curr->disk_total_bytes; 7981 curr->commit_bytes_used = curr->bytes_used; 7982 } 7983 mutex_unlock(&trans->fs_info->chunk_mutex); 7984 } 7985 7986 /* 7987 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7988 */ 7989 int btrfs_bg_type_to_factor(u64 flags) 7990 { 7991 const int index = btrfs_bg_flags_to_raid_index(flags); 7992 7993 return btrfs_raid_array[index].ncopies; 7994 } 7995 7996 7997 7998 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7999 u64 chunk_offset, u64 devid, 8000 u64 physical_offset, u64 physical_len) 8001 { 8002 struct btrfs_dev_lookup_args args = { .devid = devid }; 8003 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8004 struct extent_map *em; 8005 struct map_lookup *map; 8006 struct btrfs_device *dev; 8007 u64 stripe_len; 8008 bool found = false; 8009 int ret = 0; 8010 int i; 8011 8012 read_lock(&em_tree->lock); 8013 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8014 read_unlock(&em_tree->lock); 8015 8016 if (!em) { 8017 btrfs_err(fs_info, 8018 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8019 physical_offset, devid); 8020 ret = -EUCLEAN; 8021 goto out; 8022 } 8023 8024 map = em->map_lookup; 8025 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 8026 if (physical_len != stripe_len) { 8027 btrfs_err(fs_info, 8028 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8029 physical_offset, devid, em->start, physical_len, 8030 stripe_len); 8031 ret = -EUCLEAN; 8032 goto out; 8033 } 8034 8035 for (i = 0; i < map->num_stripes; i++) { 8036 if (map->stripes[i].dev->devid == devid && 8037 map->stripes[i].physical == physical_offset) { 8038 found = true; 8039 if (map->verified_stripes >= map->num_stripes) { 8040 btrfs_err(fs_info, 8041 "too many dev extents for chunk %llu found", 8042 em->start); 8043 ret = -EUCLEAN; 8044 goto out; 8045 } 8046 map->verified_stripes++; 8047 break; 8048 } 8049 } 8050 if (!found) { 8051 btrfs_err(fs_info, 8052 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8053 physical_offset, devid); 8054 ret = -EUCLEAN; 8055 } 8056 8057 /* Make sure no dev extent is beyond device boundary */ 8058 dev = btrfs_find_device(fs_info->fs_devices, &args); 8059 if (!dev) { 8060 btrfs_err(fs_info, "failed to find devid %llu", devid); 8061 ret = -EUCLEAN; 8062 goto out; 8063 } 8064 8065 if (physical_offset + physical_len > dev->disk_total_bytes) { 8066 btrfs_err(fs_info, 8067 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8068 devid, physical_offset, physical_len, 8069 dev->disk_total_bytes); 8070 ret = -EUCLEAN; 8071 goto out; 8072 } 8073 8074 if (dev->zone_info) { 8075 u64 zone_size = dev->zone_info->zone_size; 8076 8077 if (!IS_ALIGNED(physical_offset, zone_size) || 8078 !IS_ALIGNED(physical_len, zone_size)) { 8079 btrfs_err(fs_info, 8080 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8081 devid, physical_offset, physical_len); 8082 ret = -EUCLEAN; 8083 goto out; 8084 } 8085 } 8086 8087 out: 8088 free_extent_map(em); 8089 return ret; 8090 } 8091 8092 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8093 { 8094 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8095 struct extent_map *em; 8096 struct rb_node *node; 8097 int ret = 0; 8098 8099 read_lock(&em_tree->lock); 8100 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8101 em = rb_entry(node, struct extent_map, rb_node); 8102 if (em->map_lookup->num_stripes != 8103 em->map_lookup->verified_stripes) { 8104 btrfs_err(fs_info, 8105 "chunk %llu has missing dev extent, have %d expect %d", 8106 em->start, em->map_lookup->verified_stripes, 8107 em->map_lookup->num_stripes); 8108 ret = -EUCLEAN; 8109 goto out; 8110 } 8111 } 8112 out: 8113 read_unlock(&em_tree->lock); 8114 return ret; 8115 } 8116 8117 /* 8118 * Ensure that all dev extents are mapped to correct chunk, otherwise 8119 * later chunk allocation/free would cause unexpected behavior. 8120 * 8121 * NOTE: This will iterate through the whole device tree, which should be of 8122 * the same size level as the chunk tree. This slightly increases mount time. 8123 */ 8124 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8125 { 8126 struct btrfs_path *path; 8127 struct btrfs_root *root = fs_info->dev_root; 8128 struct btrfs_key key; 8129 u64 prev_devid = 0; 8130 u64 prev_dev_ext_end = 0; 8131 int ret = 0; 8132 8133 /* 8134 * We don't have a dev_root because we mounted with ignorebadroots and 8135 * failed to load the root, so we want to skip the verification in this 8136 * case for sure. 8137 * 8138 * However if the dev root is fine, but the tree itself is corrupted 8139 * we'd still fail to mount. This verification is only to make sure 8140 * writes can happen safely, so instead just bypass this check 8141 * completely in the case of IGNOREBADROOTS. 8142 */ 8143 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8144 return 0; 8145 8146 key.objectid = 1; 8147 key.type = BTRFS_DEV_EXTENT_KEY; 8148 key.offset = 0; 8149 8150 path = btrfs_alloc_path(); 8151 if (!path) 8152 return -ENOMEM; 8153 8154 path->reada = READA_FORWARD; 8155 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8156 if (ret < 0) 8157 goto out; 8158 8159 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8160 ret = btrfs_next_leaf(root, path); 8161 if (ret < 0) 8162 goto out; 8163 /* No dev extents at all? Not good */ 8164 if (ret > 0) { 8165 ret = -EUCLEAN; 8166 goto out; 8167 } 8168 } 8169 while (1) { 8170 struct extent_buffer *leaf = path->nodes[0]; 8171 struct btrfs_dev_extent *dext; 8172 int slot = path->slots[0]; 8173 u64 chunk_offset; 8174 u64 physical_offset; 8175 u64 physical_len; 8176 u64 devid; 8177 8178 btrfs_item_key_to_cpu(leaf, &key, slot); 8179 if (key.type != BTRFS_DEV_EXTENT_KEY) 8180 break; 8181 devid = key.objectid; 8182 physical_offset = key.offset; 8183 8184 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8185 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8186 physical_len = btrfs_dev_extent_length(leaf, dext); 8187 8188 /* Check if this dev extent overlaps with the previous one */ 8189 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8190 btrfs_err(fs_info, 8191 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8192 devid, physical_offset, prev_dev_ext_end); 8193 ret = -EUCLEAN; 8194 goto out; 8195 } 8196 8197 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8198 physical_offset, physical_len); 8199 if (ret < 0) 8200 goto out; 8201 prev_devid = devid; 8202 prev_dev_ext_end = physical_offset + physical_len; 8203 8204 ret = btrfs_next_item(root, path); 8205 if (ret < 0) 8206 goto out; 8207 if (ret > 0) { 8208 ret = 0; 8209 break; 8210 } 8211 } 8212 8213 /* Ensure all chunks have corresponding dev extents */ 8214 ret = verify_chunk_dev_extent_mapping(fs_info); 8215 out: 8216 btrfs_free_path(path); 8217 return ret; 8218 } 8219 8220 /* 8221 * Check whether the given block group or device is pinned by any inode being 8222 * used as a swapfile. 8223 */ 8224 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8225 { 8226 struct btrfs_swapfile_pin *sp; 8227 struct rb_node *node; 8228 8229 spin_lock(&fs_info->swapfile_pins_lock); 8230 node = fs_info->swapfile_pins.rb_node; 8231 while (node) { 8232 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8233 if (ptr < sp->ptr) 8234 node = node->rb_left; 8235 else if (ptr > sp->ptr) 8236 node = node->rb_right; 8237 else 8238 break; 8239 } 8240 spin_unlock(&fs_info->swapfile_pins_lock); 8241 return node != NULL; 8242 } 8243 8244 static int relocating_repair_kthread(void *data) 8245 { 8246 struct btrfs_block_group *cache = data; 8247 struct btrfs_fs_info *fs_info = cache->fs_info; 8248 u64 target; 8249 int ret = 0; 8250 8251 target = cache->start; 8252 btrfs_put_block_group(cache); 8253 8254 sb_start_write(fs_info->sb); 8255 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8256 btrfs_info(fs_info, 8257 "zoned: skip relocating block group %llu to repair: EBUSY", 8258 target); 8259 sb_end_write(fs_info->sb); 8260 return -EBUSY; 8261 } 8262 8263 mutex_lock(&fs_info->reclaim_bgs_lock); 8264 8265 /* Ensure block group still exists */ 8266 cache = btrfs_lookup_block_group(fs_info, target); 8267 if (!cache) 8268 goto out; 8269 8270 if (!cache->relocating_repair) 8271 goto out; 8272 8273 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8274 if (ret < 0) 8275 goto out; 8276 8277 btrfs_info(fs_info, 8278 "zoned: relocating block group %llu to repair IO failure", 8279 target); 8280 ret = btrfs_relocate_chunk(fs_info, target); 8281 8282 out: 8283 if (cache) 8284 btrfs_put_block_group(cache); 8285 mutex_unlock(&fs_info->reclaim_bgs_lock); 8286 btrfs_exclop_finish(fs_info); 8287 sb_end_write(fs_info->sb); 8288 8289 return ret; 8290 } 8291 8292 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8293 { 8294 struct btrfs_block_group *cache; 8295 8296 if (!btrfs_is_zoned(fs_info)) 8297 return false; 8298 8299 /* Do not attempt to repair in degraded state */ 8300 if (btrfs_test_opt(fs_info, DEGRADED)) 8301 return true; 8302 8303 cache = btrfs_lookup_block_group(fs_info, logical); 8304 if (!cache) 8305 return true; 8306 8307 spin_lock(&cache->lock); 8308 if (cache->relocating_repair) { 8309 spin_unlock(&cache->lock); 8310 btrfs_put_block_group(cache); 8311 return true; 8312 } 8313 cache->relocating_repair = 1; 8314 spin_unlock(&cache->lock); 8315 8316 kthread_run(relocating_repair_kthread, cache, 8317 "btrfs-relocating-repair"); 8318 8319 return true; 8320 } 8321