1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 38 BTRFS_BLOCK_GROUP_RAID10 | \ 39 BTRFS_BLOCK_GROUP_RAID56_MASK) 40 41 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 42 [BTRFS_RAID_RAID10] = { 43 .sub_stripes = 2, 44 .dev_stripes = 1, 45 .devs_max = 0, /* 0 == as many as possible */ 46 .devs_min = 2, 47 .tolerated_failures = 1, 48 .devs_increment = 2, 49 .ncopies = 2, 50 .nparity = 0, 51 .raid_name = "raid10", 52 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 53 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 .nparity = 0, 64 .raid_name = "raid1", 65 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 66 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 67 }, 68 [BTRFS_RAID_RAID1C3] = { 69 .sub_stripes = 1, 70 .dev_stripes = 1, 71 .devs_max = 3, 72 .devs_min = 3, 73 .tolerated_failures = 2, 74 .devs_increment = 3, 75 .ncopies = 3, 76 .nparity = 0, 77 .raid_name = "raid1c3", 78 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 79 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 80 }, 81 [BTRFS_RAID_RAID1C4] = { 82 .sub_stripes = 1, 83 .dev_stripes = 1, 84 .devs_max = 4, 85 .devs_min = 4, 86 .tolerated_failures = 3, 87 .devs_increment = 4, 88 .ncopies = 4, 89 .nparity = 0, 90 .raid_name = "raid1c4", 91 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 92 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 93 }, 94 [BTRFS_RAID_DUP] = { 95 .sub_stripes = 1, 96 .dev_stripes = 2, 97 .devs_max = 1, 98 .devs_min = 1, 99 .tolerated_failures = 0, 100 .devs_increment = 1, 101 .ncopies = 2, 102 .nparity = 0, 103 .raid_name = "dup", 104 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 105 .mindev_error = 0, 106 }, 107 [BTRFS_RAID_RAID0] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 1, 112 .tolerated_failures = 0, 113 .devs_increment = 1, 114 .ncopies = 1, 115 .nparity = 0, 116 .raid_name = "raid0", 117 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 118 .mindev_error = 0, 119 }, 120 [BTRFS_RAID_SINGLE] = { 121 .sub_stripes = 1, 122 .dev_stripes = 1, 123 .devs_max = 1, 124 .devs_min = 1, 125 .tolerated_failures = 0, 126 .devs_increment = 1, 127 .ncopies = 1, 128 .nparity = 0, 129 .raid_name = "single", 130 .bg_flag = 0, 131 .mindev_error = 0, 132 }, 133 [BTRFS_RAID_RAID5] = { 134 .sub_stripes = 1, 135 .dev_stripes = 1, 136 .devs_max = 0, 137 .devs_min = 2, 138 .tolerated_failures = 1, 139 .devs_increment = 1, 140 .ncopies = 1, 141 .nparity = 1, 142 .raid_name = "raid5", 143 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 144 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 145 }, 146 [BTRFS_RAID_RAID6] = { 147 .sub_stripes = 1, 148 .dev_stripes = 1, 149 .devs_max = 0, 150 .devs_min = 3, 151 .tolerated_failures = 2, 152 .devs_increment = 1, 153 .ncopies = 1, 154 .nparity = 2, 155 .raid_name = "raid6", 156 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 157 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 158 }, 159 }; 160 161 /* 162 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 163 * can be used as index to access btrfs_raid_array[]. 164 */ 165 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 166 { 167 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 168 169 if (!profile) 170 return BTRFS_RAID_SINGLE; 171 172 return BTRFS_BG_FLAG_TO_INDEX(profile); 173 } 174 175 const char *btrfs_bg_type_to_raid_name(u64 flags) 176 { 177 const int index = btrfs_bg_flags_to_raid_index(flags); 178 179 if (index >= BTRFS_NR_RAID_TYPES) 180 return NULL; 181 182 return btrfs_raid_array[index].raid_name; 183 } 184 185 int btrfs_nr_parity_stripes(u64 type) 186 { 187 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 188 189 return btrfs_raid_array[index].nparity; 190 } 191 192 /* 193 * Fill @buf with textual description of @bg_flags, no more than @size_buf 194 * bytes including terminating null byte. 195 */ 196 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 197 { 198 int i; 199 int ret; 200 char *bp = buf; 201 u64 flags = bg_flags; 202 u32 size_bp = size_buf; 203 204 if (!flags) { 205 strcpy(bp, "NONE"); 206 return; 207 } 208 209 #define DESCRIBE_FLAG(flag, desc) \ 210 do { \ 211 if (flags & (flag)) { \ 212 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 213 if (ret < 0 || ret >= size_bp) \ 214 goto out_overflow; \ 215 size_bp -= ret; \ 216 bp += ret; \ 217 flags &= ~(flag); \ 218 } \ 219 } while (0) 220 221 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 222 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 224 225 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 226 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 227 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 228 btrfs_raid_array[i].raid_name); 229 #undef DESCRIBE_FLAG 230 231 if (flags) { 232 ret = snprintf(bp, size_bp, "0x%llx|", flags); 233 size_bp -= ret; 234 } 235 236 if (size_bp < size_buf) 237 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 238 239 /* 240 * The text is trimmed, it's up to the caller to provide sufficiently 241 * large buffer 242 */ 243 out_overflow:; 244 } 245 246 static int init_first_rw_device(struct btrfs_trans_handle *trans); 247 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 248 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 249 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 250 enum btrfs_map_op op, 251 u64 logical, u64 *length, 252 struct btrfs_io_context **bioc_ret, 253 int mirror_num, int need_raid_map); 254 255 /* 256 * Device locking 257 * ============== 258 * 259 * There are several mutexes that protect manipulation of devices and low-level 260 * structures like chunks but not block groups, extents or files 261 * 262 * uuid_mutex (global lock) 263 * ------------------------ 264 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 265 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 266 * device) or requested by the device= mount option 267 * 268 * the mutex can be very coarse and can cover long-running operations 269 * 270 * protects: updates to fs_devices counters like missing devices, rw devices, 271 * seeding, structure cloning, opening/closing devices at mount/umount time 272 * 273 * global::fs_devs - add, remove, updates to the global list 274 * 275 * does not protect: manipulation of the fs_devices::devices list in general 276 * but in mount context it could be used to exclude list modifications by eg. 277 * scan ioctl 278 * 279 * btrfs_device::name - renames (write side), read is RCU 280 * 281 * fs_devices::device_list_mutex (per-fs, with RCU) 282 * ------------------------------------------------ 283 * protects updates to fs_devices::devices, ie. adding and deleting 284 * 285 * simple list traversal with read-only actions can be done with RCU protection 286 * 287 * may be used to exclude some operations from running concurrently without any 288 * modifications to the list (see write_all_supers) 289 * 290 * Is not required at mount and close times, because our device list is 291 * protected by the uuid_mutex at that point. 292 * 293 * balance_mutex 294 * ------------- 295 * protects balance structures (status, state) and context accessed from 296 * several places (internally, ioctl) 297 * 298 * chunk_mutex 299 * ----------- 300 * protects chunks, adding or removing during allocation, trim or when a new 301 * device is added/removed. Additionally it also protects post_commit_list of 302 * individual devices, since they can be added to the transaction's 303 * post_commit_list only with chunk_mutex held. 304 * 305 * cleaner_mutex 306 * ------------- 307 * a big lock that is held by the cleaner thread and prevents running subvolume 308 * cleaning together with relocation or delayed iputs 309 * 310 * 311 * Lock nesting 312 * ============ 313 * 314 * uuid_mutex 315 * device_list_mutex 316 * chunk_mutex 317 * balance_mutex 318 * 319 * 320 * Exclusive operations 321 * ==================== 322 * 323 * Maintains the exclusivity of the following operations that apply to the 324 * whole filesystem and cannot run in parallel. 325 * 326 * - Balance (*) 327 * - Device add 328 * - Device remove 329 * - Device replace (*) 330 * - Resize 331 * 332 * The device operations (as above) can be in one of the following states: 333 * 334 * - Running state 335 * - Paused state 336 * - Completed state 337 * 338 * Only device operations marked with (*) can go into the Paused state for the 339 * following reasons: 340 * 341 * - ioctl (only Balance can be Paused through ioctl) 342 * - filesystem remounted as read-only 343 * - filesystem unmounted and mounted as read-only 344 * - system power-cycle and filesystem mounted as read-only 345 * - filesystem or device errors leading to forced read-only 346 * 347 * The status of exclusive operation is set and cleared atomically. 348 * During the course of Paused state, fs_info::exclusive_operation remains set. 349 * A device operation in Paused or Running state can be canceled or resumed 350 * either by ioctl (Balance only) or when remounted as read-write. 351 * The exclusive status is cleared when the device operation is canceled or 352 * completed. 353 */ 354 355 DEFINE_MUTEX(uuid_mutex); 356 static LIST_HEAD(fs_uuids); 357 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 358 { 359 return &fs_uuids; 360 } 361 362 /* 363 * alloc_fs_devices - allocate struct btrfs_fs_devices 364 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 365 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 366 * 367 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 368 * The returned struct is not linked onto any lists and can be destroyed with 369 * kfree() right away. 370 */ 371 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 372 const u8 *metadata_fsid) 373 { 374 struct btrfs_fs_devices *fs_devs; 375 376 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 377 if (!fs_devs) 378 return ERR_PTR(-ENOMEM); 379 380 mutex_init(&fs_devs->device_list_mutex); 381 382 INIT_LIST_HEAD(&fs_devs->devices); 383 INIT_LIST_HEAD(&fs_devs->alloc_list); 384 INIT_LIST_HEAD(&fs_devs->fs_list); 385 INIT_LIST_HEAD(&fs_devs->seed_list); 386 if (fsid) 387 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 388 389 if (metadata_fsid) 390 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 391 else if (fsid) 392 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 393 394 return fs_devs; 395 } 396 397 void btrfs_free_device(struct btrfs_device *device) 398 { 399 WARN_ON(!list_empty(&device->post_commit_list)); 400 rcu_string_free(device->name); 401 extent_io_tree_release(&device->alloc_state); 402 btrfs_destroy_dev_zone_info(device); 403 kfree(device); 404 } 405 406 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 407 { 408 struct btrfs_device *device; 409 WARN_ON(fs_devices->opened); 410 while (!list_empty(&fs_devices->devices)) { 411 device = list_entry(fs_devices->devices.next, 412 struct btrfs_device, dev_list); 413 list_del(&device->dev_list); 414 btrfs_free_device(device); 415 } 416 kfree(fs_devices); 417 } 418 419 void __exit btrfs_cleanup_fs_uuids(void) 420 { 421 struct btrfs_fs_devices *fs_devices; 422 423 while (!list_empty(&fs_uuids)) { 424 fs_devices = list_entry(fs_uuids.next, 425 struct btrfs_fs_devices, fs_list); 426 list_del(&fs_devices->fs_list); 427 free_fs_devices(fs_devices); 428 } 429 } 430 431 static noinline struct btrfs_fs_devices *find_fsid( 432 const u8 *fsid, const u8 *metadata_fsid) 433 { 434 struct btrfs_fs_devices *fs_devices; 435 436 ASSERT(fsid); 437 438 /* Handle non-split brain cases */ 439 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 440 if (metadata_fsid) { 441 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 442 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 443 BTRFS_FSID_SIZE) == 0) 444 return fs_devices; 445 } else { 446 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 447 return fs_devices; 448 } 449 } 450 return NULL; 451 } 452 453 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 454 struct btrfs_super_block *disk_super) 455 { 456 457 struct btrfs_fs_devices *fs_devices; 458 459 /* 460 * Handle scanned device having completed its fsid change but 461 * belonging to a fs_devices that was created by first scanning 462 * a device which didn't have its fsid/metadata_uuid changed 463 * at all and the CHANGING_FSID_V2 flag set. 464 */ 465 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 466 if (fs_devices->fsid_change && 467 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 468 BTRFS_FSID_SIZE) == 0 && 469 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 470 BTRFS_FSID_SIZE) == 0) { 471 return fs_devices; 472 } 473 } 474 /* 475 * Handle scanned device having completed its fsid change but 476 * belonging to a fs_devices that was created by a device that 477 * has an outdated pair of fsid/metadata_uuid and 478 * CHANGING_FSID_V2 flag set. 479 */ 480 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 481 if (fs_devices->fsid_change && 482 memcmp(fs_devices->metadata_uuid, 483 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 484 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 485 BTRFS_FSID_SIZE) == 0) { 486 return fs_devices; 487 } 488 } 489 490 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 491 } 492 493 494 static int 495 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 496 int flush, struct block_device **bdev, 497 struct btrfs_super_block **disk_super) 498 { 499 int ret; 500 501 *bdev = blkdev_get_by_path(device_path, flags, holder); 502 503 if (IS_ERR(*bdev)) { 504 ret = PTR_ERR(*bdev); 505 goto error; 506 } 507 508 if (flush) 509 sync_blockdev(*bdev); 510 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 511 if (ret) { 512 blkdev_put(*bdev, flags); 513 goto error; 514 } 515 invalidate_bdev(*bdev); 516 *disk_super = btrfs_read_dev_super(*bdev); 517 if (IS_ERR(*disk_super)) { 518 ret = PTR_ERR(*disk_super); 519 blkdev_put(*bdev, flags); 520 goto error; 521 } 522 523 return 0; 524 525 error: 526 *bdev = NULL; 527 return ret; 528 } 529 530 /** 531 * Search and remove all stale devices (which are not mounted). 532 * When both inputs are NULL, it will search and release all stale devices. 533 * 534 * @devt: Optional. When provided will it release all unmounted devices 535 * matching this devt only. 536 * @skip_device: Optional. Will skip this device when searching for the stale 537 * devices. 538 * 539 * Return: 0 for success or if @devt is 0. 540 * -EBUSY if @devt is a mounted device. 541 * -ENOENT if @devt does not match any device in the list. 542 */ 543 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 544 { 545 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 546 struct btrfs_device *device, *tmp_device; 547 int ret = 0; 548 549 lockdep_assert_held(&uuid_mutex); 550 551 if (devt) 552 ret = -ENOENT; 553 554 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 555 556 mutex_lock(&fs_devices->device_list_mutex); 557 list_for_each_entry_safe(device, tmp_device, 558 &fs_devices->devices, dev_list) { 559 if (skip_device && skip_device == device) 560 continue; 561 if (devt && devt != device->devt) 562 continue; 563 if (fs_devices->opened) { 564 /* for an already deleted device return 0 */ 565 if (devt && ret != 0) 566 ret = -EBUSY; 567 break; 568 } 569 570 /* delete the stale device */ 571 fs_devices->num_devices--; 572 list_del(&device->dev_list); 573 btrfs_free_device(device); 574 575 ret = 0; 576 } 577 mutex_unlock(&fs_devices->device_list_mutex); 578 579 if (fs_devices->num_devices == 0) { 580 btrfs_sysfs_remove_fsid(fs_devices); 581 list_del(&fs_devices->fs_list); 582 free_fs_devices(fs_devices); 583 } 584 } 585 586 return ret; 587 } 588 589 /* 590 * This is only used on mount, and we are protected from competing things 591 * messing with our fs_devices by the uuid_mutex, thus we do not need the 592 * fs_devices->device_list_mutex here. 593 */ 594 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 595 struct btrfs_device *device, fmode_t flags, 596 void *holder) 597 { 598 struct block_device *bdev; 599 struct btrfs_super_block *disk_super; 600 u64 devid; 601 int ret; 602 603 if (device->bdev) 604 return -EINVAL; 605 if (!device->name) 606 return -EINVAL; 607 608 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 609 &bdev, &disk_super); 610 if (ret) 611 return ret; 612 613 devid = btrfs_stack_device_id(&disk_super->dev_item); 614 if (devid != device->devid) 615 goto error_free_page; 616 617 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 618 goto error_free_page; 619 620 device->generation = btrfs_super_generation(disk_super); 621 622 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 623 if (btrfs_super_incompat_flags(disk_super) & 624 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 625 pr_err( 626 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 627 goto error_free_page; 628 } 629 630 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 631 fs_devices->seeding = true; 632 } else { 633 if (bdev_read_only(bdev)) 634 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 635 else 636 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 637 } 638 639 if (!bdev_nonrot(bdev)) 640 fs_devices->rotating = true; 641 642 device->bdev = bdev; 643 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 644 device->mode = flags; 645 646 fs_devices->open_devices++; 647 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 648 device->devid != BTRFS_DEV_REPLACE_DEVID) { 649 fs_devices->rw_devices++; 650 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 651 } 652 btrfs_release_disk_super(disk_super); 653 654 return 0; 655 656 error_free_page: 657 btrfs_release_disk_super(disk_super); 658 blkdev_put(bdev, flags); 659 660 return -EINVAL; 661 } 662 663 /* 664 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 665 * being created with a disk that has already completed its fsid change. Such 666 * disk can belong to an fs which has its FSID changed or to one which doesn't. 667 * Handle both cases here. 668 */ 669 static struct btrfs_fs_devices *find_fsid_inprogress( 670 struct btrfs_super_block *disk_super) 671 { 672 struct btrfs_fs_devices *fs_devices; 673 674 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 675 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 676 BTRFS_FSID_SIZE) != 0 && 677 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 678 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 679 return fs_devices; 680 } 681 } 682 683 return find_fsid(disk_super->fsid, NULL); 684 } 685 686 687 static struct btrfs_fs_devices *find_fsid_changed( 688 struct btrfs_super_block *disk_super) 689 { 690 struct btrfs_fs_devices *fs_devices; 691 692 /* 693 * Handles the case where scanned device is part of an fs that had 694 * multiple successful changes of FSID but currently device didn't 695 * observe it. Meaning our fsid will be different than theirs. We need 696 * to handle two subcases : 697 * 1 - The fs still continues to have different METADATA/FSID uuids. 698 * 2 - The fs is switched back to its original FSID (METADATA/FSID 699 * are equal). 700 */ 701 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 702 /* Changed UUIDs */ 703 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 704 BTRFS_FSID_SIZE) != 0 && 705 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 706 BTRFS_FSID_SIZE) == 0 && 707 memcmp(fs_devices->fsid, disk_super->fsid, 708 BTRFS_FSID_SIZE) != 0) 709 return fs_devices; 710 711 /* Unchanged UUIDs */ 712 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 713 BTRFS_FSID_SIZE) == 0 && 714 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 715 BTRFS_FSID_SIZE) == 0) 716 return fs_devices; 717 } 718 719 return NULL; 720 } 721 722 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 723 struct btrfs_super_block *disk_super) 724 { 725 struct btrfs_fs_devices *fs_devices; 726 727 /* 728 * Handle the case where the scanned device is part of an fs whose last 729 * metadata UUID change reverted it to the original FSID. At the same 730 * time * fs_devices was first created by another constitutent device 731 * which didn't fully observe the operation. This results in an 732 * btrfs_fs_devices created with metadata/fsid different AND 733 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 734 * fs_devices equal to the FSID of the disk. 735 */ 736 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 737 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 738 BTRFS_FSID_SIZE) != 0 && 739 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 740 BTRFS_FSID_SIZE) == 0 && 741 fs_devices->fsid_change) 742 return fs_devices; 743 } 744 745 return NULL; 746 } 747 /* 748 * Add new device to list of registered devices 749 * 750 * Returns: 751 * device pointer which was just added or updated when successful 752 * error pointer when failed 753 */ 754 static noinline struct btrfs_device *device_list_add(const char *path, 755 struct btrfs_super_block *disk_super, 756 bool *new_device_added) 757 { 758 struct btrfs_device *device; 759 struct btrfs_fs_devices *fs_devices = NULL; 760 struct rcu_string *name; 761 u64 found_transid = btrfs_super_generation(disk_super); 762 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 763 dev_t path_devt; 764 int error; 765 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 766 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 767 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 768 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 769 770 error = lookup_bdev(path, &path_devt); 771 if (error) 772 return ERR_PTR(error); 773 774 if (fsid_change_in_progress) { 775 if (!has_metadata_uuid) 776 fs_devices = find_fsid_inprogress(disk_super); 777 else 778 fs_devices = find_fsid_changed(disk_super); 779 } else if (has_metadata_uuid) { 780 fs_devices = find_fsid_with_metadata_uuid(disk_super); 781 } else { 782 fs_devices = find_fsid_reverted_metadata(disk_super); 783 if (!fs_devices) 784 fs_devices = find_fsid(disk_super->fsid, NULL); 785 } 786 787 788 if (!fs_devices) { 789 if (has_metadata_uuid) 790 fs_devices = alloc_fs_devices(disk_super->fsid, 791 disk_super->metadata_uuid); 792 else 793 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 794 795 if (IS_ERR(fs_devices)) 796 return ERR_CAST(fs_devices); 797 798 fs_devices->fsid_change = fsid_change_in_progress; 799 800 mutex_lock(&fs_devices->device_list_mutex); 801 list_add(&fs_devices->fs_list, &fs_uuids); 802 803 device = NULL; 804 } else { 805 struct btrfs_dev_lookup_args args = { 806 .devid = devid, 807 .uuid = disk_super->dev_item.uuid, 808 }; 809 810 mutex_lock(&fs_devices->device_list_mutex); 811 device = btrfs_find_device(fs_devices, &args); 812 813 /* 814 * If this disk has been pulled into an fs devices created by 815 * a device which had the CHANGING_FSID_V2 flag then replace the 816 * metadata_uuid/fsid values of the fs_devices. 817 */ 818 if (fs_devices->fsid_change && 819 found_transid > fs_devices->latest_generation) { 820 memcpy(fs_devices->fsid, disk_super->fsid, 821 BTRFS_FSID_SIZE); 822 823 if (has_metadata_uuid) 824 memcpy(fs_devices->metadata_uuid, 825 disk_super->metadata_uuid, 826 BTRFS_FSID_SIZE); 827 else 828 memcpy(fs_devices->metadata_uuid, 829 disk_super->fsid, BTRFS_FSID_SIZE); 830 831 fs_devices->fsid_change = false; 832 } 833 } 834 835 if (!device) { 836 if (fs_devices->opened) { 837 mutex_unlock(&fs_devices->device_list_mutex); 838 return ERR_PTR(-EBUSY); 839 } 840 841 device = btrfs_alloc_device(NULL, &devid, 842 disk_super->dev_item.uuid); 843 if (IS_ERR(device)) { 844 mutex_unlock(&fs_devices->device_list_mutex); 845 /* we can safely leave the fs_devices entry around */ 846 return device; 847 } 848 849 name = rcu_string_strdup(path, GFP_NOFS); 850 if (!name) { 851 btrfs_free_device(device); 852 mutex_unlock(&fs_devices->device_list_mutex); 853 return ERR_PTR(-ENOMEM); 854 } 855 rcu_assign_pointer(device->name, name); 856 device->devt = path_devt; 857 858 list_add_rcu(&device->dev_list, &fs_devices->devices); 859 fs_devices->num_devices++; 860 861 device->fs_devices = fs_devices; 862 *new_device_added = true; 863 864 if (disk_super->label[0]) 865 pr_info( 866 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 867 disk_super->label, devid, found_transid, path, 868 current->comm, task_pid_nr(current)); 869 else 870 pr_info( 871 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 872 disk_super->fsid, devid, found_transid, path, 873 current->comm, task_pid_nr(current)); 874 875 } else if (!device->name || strcmp(device->name->str, path)) { 876 /* 877 * When FS is already mounted. 878 * 1. If you are here and if the device->name is NULL that 879 * means this device was missing at time of FS mount. 880 * 2. If you are here and if the device->name is different 881 * from 'path' that means either 882 * a. The same device disappeared and reappeared with 883 * different name. or 884 * b. The missing-disk-which-was-replaced, has 885 * reappeared now. 886 * 887 * We must allow 1 and 2a above. But 2b would be a spurious 888 * and unintentional. 889 * 890 * Further in case of 1 and 2a above, the disk at 'path' 891 * would have missed some transaction when it was away and 892 * in case of 2a the stale bdev has to be updated as well. 893 * 2b must not be allowed at all time. 894 */ 895 896 /* 897 * For now, we do allow update to btrfs_fs_device through the 898 * btrfs dev scan cli after FS has been mounted. We're still 899 * tracking a problem where systems fail mount by subvolume id 900 * when we reject replacement on a mounted FS. 901 */ 902 if (!fs_devices->opened && found_transid < device->generation) { 903 /* 904 * That is if the FS is _not_ mounted and if you 905 * are here, that means there is more than one 906 * disk with same uuid and devid.We keep the one 907 * with larger generation number or the last-in if 908 * generation are equal. 909 */ 910 mutex_unlock(&fs_devices->device_list_mutex); 911 return ERR_PTR(-EEXIST); 912 } 913 914 /* 915 * We are going to replace the device path for a given devid, 916 * make sure it's the same device if the device is mounted 917 * 918 * NOTE: the device->fs_info may not be reliable here so pass 919 * in a NULL to message helpers instead. This avoids a possible 920 * use-after-free when the fs_info and fs_info->sb are already 921 * torn down. 922 */ 923 if (device->bdev) { 924 if (device->devt != path_devt) { 925 mutex_unlock(&fs_devices->device_list_mutex); 926 btrfs_warn_in_rcu(NULL, 927 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 928 path, devid, found_transid, 929 current->comm, 930 task_pid_nr(current)); 931 return ERR_PTR(-EEXIST); 932 } 933 btrfs_info_in_rcu(NULL, 934 "devid %llu device path %s changed to %s scanned by %s (%d)", 935 devid, rcu_str_deref(device->name), 936 path, current->comm, 937 task_pid_nr(current)); 938 } 939 940 name = rcu_string_strdup(path, GFP_NOFS); 941 if (!name) { 942 mutex_unlock(&fs_devices->device_list_mutex); 943 return ERR_PTR(-ENOMEM); 944 } 945 rcu_string_free(device->name); 946 rcu_assign_pointer(device->name, name); 947 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 948 fs_devices->missing_devices--; 949 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 950 } 951 device->devt = path_devt; 952 } 953 954 /* 955 * Unmount does not free the btrfs_device struct but would zero 956 * generation along with most of the other members. So just update 957 * it back. We need it to pick the disk with largest generation 958 * (as above). 959 */ 960 if (!fs_devices->opened) { 961 device->generation = found_transid; 962 fs_devices->latest_generation = max_t(u64, found_transid, 963 fs_devices->latest_generation); 964 } 965 966 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 967 968 mutex_unlock(&fs_devices->device_list_mutex); 969 return device; 970 } 971 972 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 973 { 974 struct btrfs_fs_devices *fs_devices; 975 struct btrfs_device *device; 976 struct btrfs_device *orig_dev; 977 int ret = 0; 978 979 lockdep_assert_held(&uuid_mutex); 980 981 fs_devices = alloc_fs_devices(orig->fsid, NULL); 982 if (IS_ERR(fs_devices)) 983 return fs_devices; 984 985 fs_devices->total_devices = orig->total_devices; 986 987 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 988 struct rcu_string *name; 989 990 device = btrfs_alloc_device(NULL, &orig_dev->devid, 991 orig_dev->uuid); 992 if (IS_ERR(device)) { 993 ret = PTR_ERR(device); 994 goto error; 995 } 996 997 /* 998 * This is ok to do without rcu read locked because we hold the 999 * uuid mutex so nothing we touch in here is going to disappear. 1000 */ 1001 if (orig_dev->name) { 1002 name = rcu_string_strdup(orig_dev->name->str, 1003 GFP_KERNEL); 1004 if (!name) { 1005 btrfs_free_device(device); 1006 ret = -ENOMEM; 1007 goto error; 1008 } 1009 rcu_assign_pointer(device->name, name); 1010 } 1011 1012 list_add(&device->dev_list, &fs_devices->devices); 1013 device->fs_devices = fs_devices; 1014 fs_devices->num_devices++; 1015 } 1016 return fs_devices; 1017 error: 1018 free_fs_devices(fs_devices); 1019 return ERR_PTR(ret); 1020 } 1021 1022 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1023 struct btrfs_device **latest_dev) 1024 { 1025 struct btrfs_device *device, *next; 1026 1027 /* This is the initialized path, it is safe to release the devices. */ 1028 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1029 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1030 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1031 &device->dev_state) && 1032 !test_bit(BTRFS_DEV_STATE_MISSING, 1033 &device->dev_state) && 1034 (!*latest_dev || 1035 device->generation > (*latest_dev)->generation)) { 1036 *latest_dev = device; 1037 } 1038 continue; 1039 } 1040 1041 /* 1042 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1043 * in btrfs_init_dev_replace() so just continue. 1044 */ 1045 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1046 continue; 1047 1048 if (device->bdev) { 1049 blkdev_put(device->bdev, device->mode); 1050 device->bdev = NULL; 1051 fs_devices->open_devices--; 1052 } 1053 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1054 list_del_init(&device->dev_alloc_list); 1055 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1056 fs_devices->rw_devices--; 1057 } 1058 list_del_init(&device->dev_list); 1059 fs_devices->num_devices--; 1060 btrfs_free_device(device); 1061 } 1062 1063 } 1064 1065 /* 1066 * After we have read the system tree and know devids belonging to this 1067 * filesystem, remove the device which does not belong there. 1068 */ 1069 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1070 { 1071 struct btrfs_device *latest_dev = NULL; 1072 struct btrfs_fs_devices *seed_dev; 1073 1074 mutex_lock(&uuid_mutex); 1075 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1076 1077 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1078 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1079 1080 fs_devices->latest_dev = latest_dev; 1081 1082 mutex_unlock(&uuid_mutex); 1083 } 1084 1085 static void btrfs_close_bdev(struct btrfs_device *device) 1086 { 1087 if (!device->bdev) 1088 return; 1089 1090 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1091 sync_blockdev(device->bdev); 1092 invalidate_bdev(device->bdev); 1093 } 1094 1095 blkdev_put(device->bdev, device->mode); 1096 } 1097 1098 static void btrfs_close_one_device(struct btrfs_device *device) 1099 { 1100 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1101 1102 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1103 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1104 list_del_init(&device->dev_alloc_list); 1105 fs_devices->rw_devices--; 1106 } 1107 1108 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1109 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1110 1111 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1112 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1113 fs_devices->missing_devices--; 1114 } 1115 1116 btrfs_close_bdev(device); 1117 if (device->bdev) { 1118 fs_devices->open_devices--; 1119 device->bdev = NULL; 1120 } 1121 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1122 btrfs_destroy_dev_zone_info(device); 1123 1124 device->fs_info = NULL; 1125 atomic_set(&device->dev_stats_ccnt, 0); 1126 extent_io_tree_release(&device->alloc_state); 1127 1128 /* 1129 * Reset the flush error record. We might have a transient flush error 1130 * in this mount, and if so we aborted the current transaction and set 1131 * the fs to an error state, guaranteeing no super blocks can be further 1132 * committed. However that error might be transient and if we unmount the 1133 * filesystem and mount it again, we should allow the mount to succeed 1134 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1135 * filesystem again we still get flush errors, then we will again abort 1136 * any transaction and set the error state, guaranteeing no commits of 1137 * unsafe super blocks. 1138 */ 1139 device->last_flush_error = 0; 1140 1141 /* Verify the device is back in a pristine state */ 1142 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1143 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1144 ASSERT(list_empty(&device->dev_alloc_list)); 1145 ASSERT(list_empty(&device->post_commit_list)); 1146 } 1147 1148 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1149 { 1150 struct btrfs_device *device, *tmp; 1151 1152 lockdep_assert_held(&uuid_mutex); 1153 1154 if (--fs_devices->opened > 0) 1155 return; 1156 1157 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1158 btrfs_close_one_device(device); 1159 1160 WARN_ON(fs_devices->open_devices); 1161 WARN_ON(fs_devices->rw_devices); 1162 fs_devices->opened = 0; 1163 fs_devices->seeding = false; 1164 fs_devices->fs_info = NULL; 1165 } 1166 1167 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1168 { 1169 LIST_HEAD(list); 1170 struct btrfs_fs_devices *tmp; 1171 1172 mutex_lock(&uuid_mutex); 1173 close_fs_devices(fs_devices); 1174 if (!fs_devices->opened) 1175 list_splice_init(&fs_devices->seed_list, &list); 1176 1177 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1178 close_fs_devices(fs_devices); 1179 list_del(&fs_devices->seed_list); 1180 free_fs_devices(fs_devices); 1181 } 1182 mutex_unlock(&uuid_mutex); 1183 } 1184 1185 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1186 fmode_t flags, void *holder) 1187 { 1188 struct btrfs_device *device; 1189 struct btrfs_device *latest_dev = NULL; 1190 struct btrfs_device *tmp_device; 1191 1192 flags |= FMODE_EXCL; 1193 1194 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1195 dev_list) { 1196 int ret; 1197 1198 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1199 if (ret == 0 && 1200 (!latest_dev || device->generation > latest_dev->generation)) { 1201 latest_dev = device; 1202 } else if (ret == -ENODATA) { 1203 fs_devices->num_devices--; 1204 list_del(&device->dev_list); 1205 btrfs_free_device(device); 1206 } 1207 } 1208 if (fs_devices->open_devices == 0) 1209 return -EINVAL; 1210 1211 fs_devices->opened = 1; 1212 fs_devices->latest_dev = latest_dev; 1213 fs_devices->total_rw_bytes = 0; 1214 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1215 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1216 1217 return 0; 1218 } 1219 1220 static int devid_cmp(void *priv, const struct list_head *a, 1221 const struct list_head *b) 1222 { 1223 const struct btrfs_device *dev1, *dev2; 1224 1225 dev1 = list_entry(a, struct btrfs_device, dev_list); 1226 dev2 = list_entry(b, struct btrfs_device, dev_list); 1227 1228 if (dev1->devid < dev2->devid) 1229 return -1; 1230 else if (dev1->devid > dev2->devid) 1231 return 1; 1232 return 0; 1233 } 1234 1235 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1236 fmode_t flags, void *holder) 1237 { 1238 int ret; 1239 1240 lockdep_assert_held(&uuid_mutex); 1241 /* 1242 * The device_list_mutex cannot be taken here in case opening the 1243 * underlying device takes further locks like open_mutex. 1244 * 1245 * We also don't need the lock here as this is called during mount and 1246 * exclusion is provided by uuid_mutex 1247 */ 1248 1249 if (fs_devices->opened) { 1250 fs_devices->opened++; 1251 ret = 0; 1252 } else { 1253 list_sort(NULL, &fs_devices->devices, devid_cmp); 1254 ret = open_fs_devices(fs_devices, flags, holder); 1255 } 1256 1257 return ret; 1258 } 1259 1260 void btrfs_release_disk_super(struct btrfs_super_block *super) 1261 { 1262 struct page *page = virt_to_page(super); 1263 1264 put_page(page); 1265 } 1266 1267 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1268 u64 bytenr, u64 bytenr_orig) 1269 { 1270 struct btrfs_super_block *disk_super; 1271 struct page *page; 1272 void *p; 1273 pgoff_t index; 1274 1275 /* make sure our super fits in the device */ 1276 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1277 return ERR_PTR(-EINVAL); 1278 1279 /* make sure our super fits in the page */ 1280 if (sizeof(*disk_super) > PAGE_SIZE) 1281 return ERR_PTR(-EINVAL); 1282 1283 /* make sure our super doesn't straddle pages on disk */ 1284 index = bytenr >> PAGE_SHIFT; 1285 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1286 return ERR_PTR(-EINVAL); 1287 1288 /* pull in the page with our super */ 1289 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1290 1291 if (IS_ERR(page)) 1292 return ERR_CAST(page); 1293 1294 p = page_address(page); 1295 1296 /* align our pointer to the offset of the super block */ 1297 disk_super = p + offset_in_page(bytenr); 1298 1299 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1300 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1301 btrfs_release_disk_super(p); 1302 return ERR_PTR(-EINVAL); 1303 } 1304 1305 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1306 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1307 1308 return disk_super; 1309 } 1310 1311 int btrfs_forget_devices(dev_t devt) 1312 { 1313 int ret; 1314 1315 mutex_lock(&uuid_mutex); 1316 ret = btrfs_free_stale_devices(devt, NULL); 1317 mutex_unlock(&uuid_mutex); 1318 1319 return ret; 1320 } 1321 1322 /* 1323 * Look for a btrfs signature on a device. This may be called out of the mount path 1324 * and we are not allowed to call set_blocksize during the scan. The superblock 1325 * is read via pagecache 1326 */ 1327 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1328 void *holder) 1329 { 1330 struct btrfs_super_block *disk_super; 1331 bool new_device_added = false; 1332 struct btrfs_device *device = NULL; 1333 struct block_device *bdev; 1334 u64 bytenr, bytenr_orig; 1335 int ret; 1336 1337 lockdep_assert_held(&uuid_mutex); 1338 1339 /* 1340 * we would like to check all the supers, but that would make 1341 * a btrfs mount succeed after a mkfs from a different FS. 1342 * So, we need to add a special mount option to scan for 1343 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1344 */ 1345 flags |= FMODE_EXCL; 1346 1347 bdev = blkdev_get_by_path(path, flags, holder); 1348 if (IS_ERR(bdev)) 1349 return ERR_CAST(bdev); 1350 1351 bytenr_orig = btrfs_sb_offset(0); 1352 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1353 if (ret) { 1354 device = ERR_PTR(ret); 1355 goto error_bdev_put; 1356 } 1357 1358 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1359 if (IS_ERR(disk_super)) { 1360 device = ERR_CAST(disk_super); 1361 goto error_bdev_put; 1362 } 1363 1364 device = device_list_add(path, disk_super, &new_device_added); 1365 if (!IS_ERR(device) && new_device_added) 1366 btrfs_free_stale_devices(device->devt, device); 1367 1368 btrfs_release_disk_super(disk_super); 1369 1370 error_bdev_put: 1371 blkdev_put(bdev, flags); 1372 1373 return device; 1374 } 1375 1376 /* 1377 * Try to find a chunk that intersects [start, start + len] range and when one 1378 * such is found, record the end of it in *start 1379 */ 1380 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1381 u64 len) 1382 { 1383 u64 physical_start, physical_end; 1384 1385 lockdep_assert_held(&device->fs_info->chunk_mutex); 1386 1387 if (!find_first_extent_bit(&device->alloc_state, *start, 1388 &physical_start, &physical_end, 1389 CHUNK_ALLOCATED, NULL)) { 1390 1391 if (in_range(physical_start, *start, len) || 1392 in_range(*start, physical_start, 1393 physical_end - physical_start)) { 1394 *start = physical_end + 1; 1395 return true; 1396 } 1397 } 1398 return false; 1399 } 1400 1401 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1402 { 1403 switch (device->fs_devices->chunk_alloc_policy) { 1404 case BTRFS_CHUNK_ALLOC_REGULAR: 1405 return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 1406 case BTRFS_CHUNK_ALLOC_ZONED: 1407 /* 1408 * We don't care about the starting region like regular 1409 * allocator, because we anyway use/reserve the first two zones 1410 * for superblock logging. 1411 */ 1412 return ALIGN(start, device->zone_info->zone_size); 1413 default: 1414 BUG(); 1415 } 1416 } 1417 1418 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1419 u64 *hole_start, u64 *hole_size, 1420 u64 num_bytes) 1421 { 1422 u64 zone_size = device->zone_info->zone_size; 1423 u64 pos; 1424 int ret; 1425 bool changed = false; 1426 1427 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1428 1429 while (*hole_size > 0) { 1430 pos = btrfs_find_allocatable_zones(device, *hole_start, 1431 *hole_start + *hole_size, 1432 num_bytes); 1433 if (pos != *hole_start) { 1434 *hole_size = *hole_start + *hole_size - pos; 1435 *hole_start = pos; 1436 changed = true; 1437 if (*hole_size < num_bytes) 1438 break; 1439 } 1440 1441 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1442 1443 /* Range is ensured to be empty */ 1444 if (!ret) 1445 return changed; 1446 1447 /* Given hole range was invalid (outside of device) */ 1448 if (ret == -ERANGE) { 1449 *hole_start += *hole_size; 1450 *hole_size = 0; 1451 return true; 1452 } 1453 1454 *hole_start += zone_size; 1455 *hole_size -= zone_size; 1456 changed = true; 1457 } 1458 1459 return changed; 1460 } 1461 1462 /** 1463 * dev_extent_hole_check - check if specified hole is suitable for allocation 1464 * @device: the device which we have the hole 1465 * @hole_start: starting position of the hole 1466 * @hole_size: the size of the hole 1467 * @num_bytes: the size of the free space that we need 1468 * 1469 * This function may modify @hole_start and @hole_size to reflect the suitable 1470 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1471 */ 1472 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1473 u64 *hole_size, u64 num_bytes) 1474 { 1475 bool changed = false; 1476 u64 hole_end = *hole_start + *hole_size; 1477 1478 for (;;) { 1479 /* 1480 * Check before we set max_hole_start, otherwise we could end up 1481 * sending back this offset anyway. 1482 */ 1483 if (contains_pending_extent(device, hole_start, *hole_size)) { 1484 if (hole_end >= *hole_start) 1485 *hole_size = hole_end - *hole_start; 1486 else 1487 *hole_size = 0; 1488 changed = true; 1489 } 1490 1491 switch (device->fs_devices->chunk_alloc_policy) { 1492 case BTRFS_CHUNK_ALLOC_REGULAR: 1493 /* No extra check */ 1494 break; 1495 case BTRFS_CHUNK_ALLOC_ZONED: 1496 if (dev_extent_hole_check_zoned(device, hole_start, 1497 hole_size, num_bytes)) { 1498 changed = true; 1499 /* 1500 * The changed hole can contain pending extent. 1501 * Loop again to check that. 1502 */ 1503 continue; 1504 } 1505 break; 1506 default: 1507 BUG(); 1508 } 1509 1510 break; 1511 } 1512 1513 return changed; 1514 } 1515 1516 /* 1517 * find_free_dev_extent_start - find free space in the specified device 1518 * @device: the device which we search the free space in 1519 * @num_bytes: the size of the free space that we need 1520 * @search_start: the position from which to begin the search 1521 * @start: store the start of the free space. 1522 * @len: the size of the free space. that we find, or the size 1523 * of the max free space if we don't find suitable free space 1524 * 1525 * this uses a pretty simple search, the expectation is that it is 1526 * called very infrequently and that a given device has a small number 1527 * of extents 1528 * 1529 * @start is used to store the start of the free space if we find. But if we 1530 * don't find suitable free space, it will be used to store the start position 1531 * of the max free space. 1532 * 1533 * @len is used to store the size of the free space that we find. 1534 * But if we don't find suitable free space, it is used to store the size of 1535 * the max free space. 1536 * 1537 * NOTE: This function will search *commit* root of device tree, and does extra 1538 * check to ensure dev extents are not double allocated. 1539 * This makes the function safe to allocate dev extents but may not report 1540 * correct usable device space, as device extent freed in current transaction 1541 * is not reported as available. 1542 */ 1543 static int find_free_dev_extent_start(struct btrfs_device *device, 1544 u64 num_bytes, u64 search_start, u64 *start, 1545 u64 *len) 1546 { 1547 struct btrfs_fs_info *fs_info = device->fs_info; 1548 struct btrfs_root *root = fs_info->dev_root; 1549 struct btrfs_key key; 1550 struct btrfs_dev_extent *dev_extent; 1551 struct btrfs_path *path; 1552 u64 hole_size; 1553 u64 max_hole_start; 1554 u64 max_hole_size; 1555 u64 extent_end; 1556 u64 search_end = device->total_bytes; 1557 int ret; 1558 int slot; 1559 struct extent_buffer *l; 1560 1561 search_start = dev_extent_search_start(device, search_start); 1562 1563 WARN_ON(device->zone_info && 1564 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1565 1566 path = btrfs_alloc_path(); 1567 if (!path) 1568 return -ENOMEM; 1569 1570 max_hole_start = search_start; 1571 max_hole_size = 0; 1572 1573 again: 1574 if (search_start >= search_end || 1575 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1576 ret = -ENOSPC; 1577 goto out; 1578 } 1579 1580 path->reada = READA_FORWARD; 1581 path->search_commit_root = 1; 1582 path->skip_locking = 1; 1583 1584 key.objectid = device->devid; 1585 key.offset = search_start; 1586 key.type = BTRFS_DEV_EXTENT_KEY; 1587 1588 ret = btrfs_search_backwards(root, &key, path); 1589 if (ret < 0) 1590 goto out; 1591 1592 while (1) { 1593 l = path->nodes[0]; 1594 slot = path->slots[0]; 1595 if (slot >= btrfs_header_nritems(l)) { 1596 ret = btrfs_next_leaf(root, path); 1597 if (ret == 0) 1598 continue; 1599 if (ret < 0) 1600 goto out; 1601 1602 break; 1603 } 1604 btrfs_item_key_to_cpu(l, &key, slot); 1605 1606 if (key.objectid < device->devid) 1607 goto next; 1608 1609 if (key.objectid > device->devid) 1610 break; 1611 1612 if (key.type != BTRFS_DEV_EXTENT_KEY) 1613 goto next; 1614 1615 if (key.offset > search_start) { 1616 hole_size = key.offset - search_start; 1617 dev_extent_hole_check(device, &search_start, &hole_size, 1618 num_bytes); 1619 1620 if (hole_size > max_hole_size) { 1621 max_hole_start = search_start; 1622 max_hole_size = hole_size; 1623 } 1624 1625 /* 1626 * If this free space is greater than which we need, 1627 * it must be the max free space that we have found 1628 * until now, so max_hole_start must point to the start 1629 * of this free space and the length of this free space 1630 * is stored in max_hole_size. Thus, we return 1631 * max_hole_start and max_hole_size and go back to the 1632 * caller. 1633 */ 1634 if (hole_size >= num_bytes) { 1635 ret = 0; 1636 goto out; 1637 } 1638 } 1639 1640 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1641 extent_end = key.offset + btrfs_dev_extent_length(l, 1642 dev_extent); 1643 if (extent_end > search_start) 1644 search_start = extent_end; 1645 next: 1646 path->slots[0]++; 1647 cond_resched(); 1648 } 1649 1650 /* 1651 * At this point, search_start should be the end of 1652 * allocated dev extents, and when shrinking the device, 1653 * search_end may be smaller than search_start. 1654 */ 1655 if (search_end > search_start) { 1656 hole_size = search_end - search_start; 1657 if (dev_extent_hole_check(device, &search_start, &hole_size, 1658 num_bytes)) { 1659 btrfs_release_path(path); 1660 goto again; 1661 } 1662 1663 if (hole_size > max_hole_size) { 1664 max_hole_start = search_start; 1665 max_hole_size = hole_size; 1666 } 1667 } 1668 1669 /* See above. */ 1670 if (max_hole_size < num_bytes) 1671 ret = -ENOSPC; 1672 else 1673 ret = 0; 1674 1675 out: 1676 btrfs_free_path(path); 1677 *start = max_hole_start; 1678 if (len) 1679 *len = max_hole_size; 1680 return ret; 1681 } 1682 1683 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1684 u64 *start, u64 *len) 1685 { 1686 /* FIXME use last free of some kind */ 1687 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1688 } 1689 1690 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1691 struct btrfs_device *device, 1692 u64 start, u64 *dev_extent_len) 1693 { 1694 struct btrfs_fs_info *fs_info = device->fs_info; 1695 struct btrfs_root *root = fs_info->dev_root; 1696 int ret; 1697 struct btrfs_path *path; 1698 struct btrfs_key key; 1699 struct btrfs_key found_key; 1700 struct extent_buffer *leaf = NULL; 1701 struct btrfs_dev_extent *extent = NULL; 1702 1703 path = btrfs_alloc_path(); 1704 if (!path) 1705 return -ENOMEM; 1706 1707 key.objectid = device->devid; 1708 key.offset = start; 1709 key.type = BTRFS_DEV_EXTENT_KEY; 1710 again: 1711 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1712 if (ret > 0) { 1713 ret = btrfs_previous_item(root, path, key.objectid, 1714 BTRFS_DEV_EXTENT_KEY); 1715 if (ret) 1716 goto out; 1717 leaf = path->nodes[0]; 1718 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1719 extent = btrfs_item_ptr(leaf, path->slots[0], 1720 struct btrfs_dev_extent); 1721 BUG_ON(found_key.offset > start || found_key.offset + 1722 btrfs_dev_extent_length(leaf, extent) < start); 1723 key = found_key; 1724 btrfs_release_path(path); 1725 goto again; 1726 } else if (ret == 0) { 1727 leaf = path->nodes[0]; 1728 extent = btrfs_item_ptr(leaf, path->slots[0], 1729 struct btrfs_dev_extent); 1730 } else { 1731 goto out; 1732 } 1733 1734 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1735 1736 ret = btrfs_del_item(trans, root, path); 1737 if (ret == 0) 1738 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1739 out: 1740 btrfs_free_path(path); 1741 return ret; 1742 } 1743 1744 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1745 { 1746 struct extent_map_tree *em_tree; 1747 struct extent_map *em; 1748 struct rb_node *n; 1749 u64 ret = 0; 1750 1751 em_tree = &fs_info->mapping_tree; 1752 read_lock(&em_tree->lock); 1753 n = rb_last(&em_tree->map.rb_root); 1754 if (n) { 1755 em = rb_entry(n, struct extent_map, rb_node); 1756 ret = em->start + em->len; 1757 } 1758 read_unlock(&em_tree->lock); 1759 1760 return ret; 1761 } 1762 1763 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1764 u64 *devid_ret) 1765 { 1766 int ret; 1767 struct btrfs_key key; 1768 struct btrfs_key found_key; 1769 struct btrfs_path *path; 1770 1771 path = btrfs_alloc_path(); 1772 if (!path) 1773 return -ENOMEM; 1774 1775 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1776 key.type = BTRFS_DEV_ITEM_KEY; 1777 key.offset = (u64)-1; 1778 1779 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1780 if (ret < 0) 1781 goto error; 1782 1783 if (ret == 0) { 1784 /* Corruption */ 1785 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1786 ret = -EUCLEAN; 1787 goto error; 1788 } 1789 1790 ret = btrfs_previous_item(fs_info->chunk_root, path, 1791 BTRFS_DEV_ITEMS_OBJECTID, 1792 BTRFS_DEV_ITEM_KEY); 1793 if (ret) { 1794 *devid_ret = 1; 1795 } else { 1796 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1797 path->slots[0]); 1798 *devid_ret = found_key.offset + 1; 1799 } 1800 ret = 0; 1801 error: 1802 btrfs_free_path(path); 1803 return ret; 1804 } 1805 1806 /* 1807 * the device information is stored in the chunk root 1808 * the btrfs_device struct should be fully filled in 1809 */ 1810 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1811 struct btrfs_device *device) 1812 { 1813 int ret; 1814 struct btrfs_path *path; 1815 struct btrfs_dev_item *dev_item; 1816 struct extent_buffer *leaf; 1817 struct btrfs_key key; 1818 unsigned long ptr; 1819 1820 path = btrfs_alloc_path(); 1821 if (!path) 1822 return -ENOMEM; 1823 1824 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1825 key.type = BTRFS_DEV_ITEM_KEY; 1826 key.offset = device->devid; 1827 1828 btrfs_reserve_chunk_metadata(trans, true); 1829 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1830 &key, sizeof(*dev_item)); 1831 btrfs_trans_release_chunk_metadata(trans); 1832 if (ret) 1833 goto out; 1834 1835 leaf = path->nodes[0]; 1836 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1837 1838 btrfs_set_device_id(leaf, dev_item, device->devid); 1839 btrfs_set_device_generation(leaf, dev_item, 0); 1840 btrfs_set_device_type(leaf, dev_item, device->type); 1841 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1842 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1843 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1844 btrfs_set_device_total_bytes(leaf, dev_item, 1845 btrfs_device_get_disk_total_bytes(device)); 1846 btrfs_set_device_bytes_used(leaf, dev_item, 1847 btrfs_device_get_bytes_used(device)); 1848 btrfs_set_device_group(leaf, dev_item, 0); 1849 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1850 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1851 btrfs_set_device_start_offset(leaf, dev_item, 0); 1852 1853 ptr = btrfs_device_uuid(dev_item); 1854 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1855 ptr = btrfs_device_fsid(dev_item); 1856 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1857 ptr, BTRFS_FSID_SIZE); 1858 btrfs_mark_buffer_dirty(leaf); 1859 1860 ret = 0; 1861 out: 1862 btrfs_free_path(path); 1863 return ret; 1864 } 1865 1866 /* 1867 * Function to update ctime/mtime for a given device path. 1868 * Mainly used for ctime/mtime based probe like libblkid. 1869 * 1870 * We don't care about errors here, this is just to be kind to userspace. 1871 */ 1872 static void update_dev_time(const char *device_path) 1873 { 1874 struct path path; 1875 struct timespec64 now; 1876 int ret; 1877 1878 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1879 if (ret) 1880 return; 1881 1882 now = current_time(d_inode(path.dentry)); 1883 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1884 path_put(&path); 1885 } 1886 1887 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1888 struct btrfs_device *device) 1889 { 1890 struct btrfs_root *root = device->fs_info->chunk_root; 1891 int ret; 1892 struct btrfs_path *path; 1893 struct btrfs_key key; 1894 1895 path = btrfs_alloc_path(); 1896 if (!path) 1897 return -ENOMEM; 1898 1899 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1900 key.type = BTRFS_DEV_ITEM_KEY; 1901 key.offset = device->devid; 1902 1903 btrfs_reserve_chunk_metadata(trans, false); 1904 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1905 btrfs_trans_release_chunk_metadata(trans); 1906 if (ret) { 1907 if (ret > 0) 1908 ret = -ENOENT; 1909 goto out; 1910 } 1911 1912 ret = btrfs_del_item(trans, root, path); 1913 out: 1914 btrfs_free_path(path); 1915 return ret; 1916 } 1917 1918 /* 1919 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1920 * filesystem. It's up to the caller to adjust that number regarding eg. device 1921 * replace. 1922 */ 1923 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1924 u64 num_devices) 1925 { 1926 u64 all_avail; 1927 unsigned seq; 1928 int i; 1929 1930 do { 1931 seq = read_seqbegin(&fs_info->profiles_lock); 1932 1933 all_avail = fs_info->avail_data_alloc_bits | 1934 fs_info->avail_system_alloc_bits | 1935 fs_info->avail_metadata_alloc_bits; 1936 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1937 1938 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1939 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1940 continue; 1941 1942 if (num_devices < btrfs_raid_array[i].devs_min) 1943 return btrfs_raid_array[i].mindev_error; 1944 } 1945 1946 return 0; 1947 } 1948 1949 static struct btrfs_device * btrfs_find_next_active_device( 1950 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1951 { 1952 struct btrfs_device *next_device; 1953 1954 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1955 if (next_device != device && 1956 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1957 && next_device->bdev) 1958 return next_device; 1959 } 1960 1961 return NULL; 1962 } 1963 1964 /* 1965 * Helper function to check if the given device is part of s_bdev / latest_dev 1966 * and replace it with the provided or the next active device, in the context 1967 * where this function called, there should be always be another device (or 1968 * this_dev) which is active. 1969 */ 1970 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1971 struct btrfs_device *next_device) 1972 { 1973 struct btrfs_fs_info *fs_info = device->fs_info; 1974 1975 if (!next_device) 1976 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1977 device); 1978 ASSERT(next_device); 1979 1980 if (fs_info->sb->s_bdev && 1981 (fs_info->sb->s_bdev == device->bdev)) 1982 fs_info->sb->s_bdev = next_device->bdev; 1983 1984 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 1985 fs_info->fs_devices->latest_dev = next_device; 1986 } 1987 1988 /* 1989 * Return btrfs_fs_devices::num_devices excluding the device that's being 1990 * currently replaced. 1991 */ 1992 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 1993 { 1994 u64 num_devices = fs_info->fs_devices->num_devices; 1995 1996 down_read(&fs_info->dev_replace.rwsem); 1997 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 1998 ASSERT(num_devices > 1); 1999 num_devices--; 2000 } 2001 up_read(&fs_info->dev_replace.rwsem); 2002 2003 return num_devices; 2004 } 2005 2006 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2007 struct block_device *bdev, 2008 const char *device_path) 2009 { 2010 struct btrfs_super_block *disk_super; 2011 int copy_num; 2012 2013 if (!bdev) 2014 return; 2015 2016 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2017 struct page *page; 2018 int ret; 2019 2020 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2021 if (IS_ERR(disk_super)) 2022 continue; 2023 2024 if (bdev_is_zoned(bdev)) { 2025 btrfs_reset_sb_log_zones(bdev, copy_num); 2026 continue; 2027 } 2028 2029 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2030 2031 page = virt_to_page(disk_super); 2032 set_page_dirty(page); 2033 lock_page(page); 2034 /* write_on_page() unlocks the page */ 2035 ret = write_one_page(page); 2036 if (ret) 2037 btrfs_warn(fs_info, 2038 "error clearing superblock number %d (%d)", 2039 copy_num, ret); 2040 btrfs_release_disk_super(disk_super); 2041 2042 } 2043 2044 /* Notify udev that device has changed */ 2045 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2046 2047 /* Update ctime/mtime for device path for libblkid */ 2048 update_dev_time(device_path); 2049 } 2050 2051 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2052 struct btrfs_dev_lookup_args *args, 2053 struct block_device **bdev, fmode_t *mode) 2054 { 2055 struct btrfs_trans_handle *trans; 2056 struct btrfs_device *device; 2057 struct btrfs_fs_devices *cur_devices; 2058 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2059 u64 num_devices; 2060 int ret = 0; 2061 2062 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2063 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2064 return -EINVAL; 2065 } 2066 2067 /* 2068 * The device list in fs_devices is accessed without locks (neither 2069 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2070 * filesystem and another device rm cannot run. 2071 */ 2072 num_devices = btrfs_num_devices(fs_info); 2073 2074 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2075 if (ret) 2076 return ret; 2077 2078 device = btrfs_find_device(fs_info->fs_devices, args); 2079 if (!device) { 2080 if (args->missing) 2081 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2082 else 2083 ret = -ENOENT; 2084 return ret; 2085 } 2086 2087 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2088 btrfs_warn_in_rcu(fs_info, 2089 "cannot remove device %s (devid %llu) due to active swapfile", 2090 rcu_str_deref(device->name), device->devid); 2091 return -ETXTBSY; 2092 } 2093 2094 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2095 return BTRFS_ERROR_DEV_TGT_REPLACE; 2096 2097 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2098 fs_info->fs_devices->rw_devices == 1) 2099 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2100 2101 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2102 mutex_lock(&fs_info->chunk_mutex); 2103 list_del_init(&device->dev_alloc_list); 2104 device->fs_devices->rw_devices--; 2105 mutex_unlock(&fs_info->chunk_mutex); 2106 } 2107 2108 ret = btrfs_shrink_device(device, 0); 2109 if (ret) 2110 goto error_undo; 2111 2112 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2113 if (IS_ERR(trans)) { 2114 ret = PTR_ERR(trans); 2115 goto error_undo; 2116 } 2117 2118 ret = btrfs_rm_dev_item(trans, device); 2119 if (ret) { 2120 /* Any error in dev item removal is critical */ 2121 btrfs_crit(fs_info, 2122 "failed to remove device item for devid %llu: %d", 2123 device->devid, ret); 2124 btrfs_abort_transaction(trans, ret); 2125 btrfs_end_transaction(trans); 2126 return ret; 2127 } 2128 2129 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2130 btrfs_scrub_cancel_dev(device); 2131 2132 /* 2133 * the device list mutex makes sure that we don't change 2134 * the device list while someone else is writing out all 2135 * the device supers. Whoever is writing all supers, should 2136 * lock the device list mutex before getting the number of 2137 * devices in the super block (super_copy). Conversely, 2138 * whoever updates the number of devices in the super block 2139 * (super_copy) should hold the device list mutex. 2140 */ 2141 2142 /* 2143 * In normal cases the cur_devices == fs_devices. But in case 2144 * of deleting a seed device, the cur_devices should point to 2145 * its own fs_devices listed under the fs_devices->seed_list. 2146 */ 2147 cur_devices = device->fs_devices; 2148 mutex_lock(&fs_devices->device_list_mutex); 2149 list_del_rcu(&device->dev_list); 2150 2151 cur_devices->num_devices--; 2152 cur_devices->total_devices--; 2153 /* Update total_devices of the parent fs_devices if it's seed */ 2154 if (cur_devices != fs_devices) 2155 fs_devices->total_devices--; 2156 2157 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2158 cur_devices->missing_devices--; 2159 2160 btrfs_assign_next_active_device(device, NULL); 2161 2162 if (device->bdev) { 2163 cur_devices->open_devices--; 2164 /* remove sysfs entry */ 2165 btrfs_sysfs_remove_device(device); 2166 } 2167 2168 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2169 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2170 mutex_unlock(&fs_devices->device_list_mutex); 2171 2172 /* 2173 * At this point, the device is zero sized and detached from the 2174 * devices list. All that's left is to zero out the old supers and 2175 * free the device. 2176 * 2177 * We cannot call btrfs_close_bdev() here because we're holding the sb 2178 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2179 * block device and it's dependencies. Instead just flush the device 2180 * and let the caller do the final blkdev_put. 2181 */ 2182 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2183 btrfs_scratch_superblocks(fs_info, device->bdev, 2184 device->name->str); 2185 if (device->bdev) { 2186 sync_blockdev(device->bdev); 2187 invalidate_bdev(device->bdev); 2188 } 2189 } 2190 2191 *bdev = device->bdev; 2192 *mode = device->mode; 2193 synchronize_rcu(); 2194 btrfs_free_device(device); 2195 2196 /* 2197 * This can happen if cur_devices is the private seed devices list. We 2198 * cannot call close_fs_devices() here because it expects the uuid_mutex 2199 * to be held, but in fact we don't need that for the private 2200 * seed_devices, we can simply decrement cur_devices->opened and then 2201 * remove it from our list and free the fs_devices. 2202 */ 2203 if (cur_devices->num_devices == 0) { 2204 list_del_init(&cur_devices->seed_list); 2205 ASSERT(cur_devices->opened == 1); 2206 cur_devices->opened--; 2207 free_fs_devices(cur_devices); 2208 } 2209 2210 ret = btrfs_commit_transaction(trans); 2211 2212 return ret; 2213 2214 error_undo: 2215 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2216 mutex_lock(&fs_info->chunk_mutex); 2217 list_add(&device->dev_alloc_list, 2218 &fs_devices->alloc_list); 2219 device->fs_devices->rw_devices++; 2220 mutex_unlock(&fs_info->chunk_mutex); 2221 } 2222 return ret; 2223 } 2224 2225 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2226 { 2227 struct btrfs_fs_devices *fs_devices; 2228 2229 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2230 2231 /* 2232 * in case of fs with no seed, srcdev->fs_devices will point 2233 * to fs_devices of fs_info. However when the dev being replaced is 2234 * a seed dev it will point to the seed's local fs_devices. In short 2235 * srcdev will have its correct fs_devices in both the cases. 2236 */ 2237 fs_devices = srcdev->fs_devices; 2238 2239 list_del_rcu(&srcdev->dev_list); 2240 list_del(&srcdev->dev_alloc_list); 2241 fs_devices->num_devices--; 2242 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2243 fs_devices->missing_devices--; 2244 2245 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2246 fs_devices->rw_devices--; 2247 2248 if (srcdev->bdev) 2249 fs_devices->open_devices--; 2250 } 2251 2252 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2253 { 2254 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2255 2256 mutex_lock(&uuid_mutex); 2257 2258 btrfs_close_bdev(srcdev); 2259 synchronize_rcu(); 2260 btrfs_free_device(srcdev); 2261 2262 /* if this is no devs we rather delete the fs_devices */ 2263 if (!fs_devices->num_devices) { 2264 /* 2265 * On a mounted FS, num_devices can't be zero unless it's a 2266 * seed. In case of a seed device being replaced, the replace 2267 * target added to the sprout FS, so there will be no more 2268 * device left under the seed FS. 2269 */ 2270 ASSERT(fs_devices->seeding); 2271 2272 list_del_init(&fs_devices->seed_list); 2273 close_fs_devices(fs_devices); 2274 free_fs_devices(fs_devices); 2275 } 2276 mutex_unlock(&uuid_mutex); 2277 } 2278 2279 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2280 { 2281 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2282 2283 mutex_lock(&fs_devices->device_list_mutex); 2284 2285 btrfs_sysfs_remove_device(tgtdev); 2286 2287 if (tgtdev->bdev) 2288 fs_devices->open_devices--; 2289 2290 fs_devices->num_devices--; 2291 2292 btrfs_assign_next_active_device(tgtdev, NULL); 2293 2294 list_del_rcu(&tgtdev->dev_list); 2295 2296 mutex_unlock(&fs_devices->device_list_mutex); 2297 2298 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2299 tgtdev->name->str); 2300 2301 btrfs_close_bdev(tgtdev); 2302 synchronize_rcu(); 2303 btrfs_free_device(tgtdev); 2304 } 2305 2306 /** 2307 * Populate args from device at path 2308 * 2309 * @fs_info: the filesystem 2310 * @args: the args to populate 2311 * @path: the path to the device 2312 * 2313 * This will read the super block of the device at @path and populate @args with 2314 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2315 * lookup a device to operate on, but need to do it before we take any locks. 2316 * This properly handles the special case of "missing" that a user may pass in, 2317 * and does some basic sanity checks. The caller must make sure that @path is 2318 * properly NUL terminated before calling in, and must call 2319 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2320 * uuid buffers. 2321 * 2322 * Return: 0 for success, -errno for failure 2323 */ 2324 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2325 struct btrfs_dev_lookup_args *args, 2326 const char *path) 2327 { 2328 struct btrfs_super_block *disk_super; 2329 struct block_device *bdev; 2330 int ret; 2331 2332 if (!path || !path[0]) 2333 return -EINVAL; 2334 if (!strcmp(path, "missing")) { 2335 args->missing = true; 2336 return 0; 2337 } 2338 2339 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2340 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2341 if (!args->uuid || !args->fsid) { 2342 btrfs_put_dev_args_from_path(args); 2343 return -ENOMEM; 2344 } 2345 2346 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2347 &bdev, &disk_super); 2348 if (ret) { 2349 btrfs_put_dev_args_from_path(args); 2350 return ret; 2351 } 2352 2353 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2354 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2355 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2356 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2357 else 2358 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2359 btrfs_release_disk_super(disk_super); 2360 blkdev_put(bdev, FMODE_READ); 2361 return 0; 2362 } 2363 2364 /* 2365 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2366 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2367 * that don't need to be freed. 2368 */ 2369 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2370 { 2371 kfree(args->uuid); 2372 kfree(args->fsid); 2373 args->uuid = NULL; 2374 args->fsid = NULL; 2375 } 2376 2377 struct btrfs_device *btrfs_find_device_by_devspec( 2378 struct btrfs_fs_info *fs_info, u64 devid, 2379 const char *device_path) 2380 { 2381 BTRFS_DEV_LOOKUP_ARGS(args); 2382 struct btrfs_device *device; 2383 int ret; 2384 2385 if (devid) { 2386 args.devid = devid; 2387 device = btrfs_find_device(fs_info->fs_devices, &args); 2388 if (!device) 2389 return ERR_PTR(-ENOENT); 2390 return device; 2391 } 2392 2393 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2394 if (ret) 2395 return ERR_PTR(ret); 2396 device = btrfs_find_device(fs_info->fs_devices, &args); 2397 btrfs_put_dev_args_from_path(&args); 2398 if (!device) 2399 return ERR_PTR(-ENOENT); 2400 return device; 2401 } 2402 2403 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2404 { 2405 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2406 struct btrfs_fs_devices *old_devices; 2407 struct btrfs_fs_devices *seed_devices; 2408 2409 lockdep_assert_held(&uuid_mutex); 2410 if (!fs_devices->seeding) 2411 return ERR_PTR(-EINVAL); 2412 2413 /* 2414 * Private copy of the seed devices, anchored at 2415 * fs_info->fs_devices->seed_list 2416 */ 2417 seed_devices = alloc_fs_devices(NULL, NULL); 2418 if (IS_ERR(seed_devices)) 2419 return seed_devices; 2420 2421 /* 2422 * It's necessary to retain a copy of the original seed fs_devices in 2423 * fs_uuids so that filesystems which have been seeded can successfully 2424 * reference the seed device from open_seed_devices. This also supports 2425 * multiple fs seed. 2426 */ 2427 old_devices = clone_fs_devices(fs_devices); 2428 if (IS_ERR(old_devices)) { 2429 kfree(seed_devices); 2430 return old_devices; 2431 } 2432 2433 list_add(&old_devices->fs_list, &fs_uuids); 2434 2435 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2436 seed_devices->opened = 1; 2437 INIT_LIST_HEAD(&seed_devices->devices); 2438 INIT_LIST_HEAD(&seed_devices->alloc_list); 2439 mutex_init(&seed_devices->device_list_mutex); 2440 2441 return seed_devices; 2442 } 2443 2444 /* 2445 * Splice seed devices into the sprout fs_devices. 2446 * Generate a new fsid for the sprouted read-write filesystem. 2447 */ 2448 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2449 struct btrfs_fs_devices *seed_devices) 2450 { 2451 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2452 struct btrfs_super_block *disk_super = fs_info->super_copy; 2453 struct btrfs_device *device; 2454 u64 super_flags; 2455 2456 /* 2457 * We are updating the fsid, the thread leading to device_list_add() 2458 * could race, so uuid_mutex is needed. 2459 */ 2460 lockdep_assert_held(&uuid_mutex); 2461 2462 /* 2463 * The threads listed below may traverse dev_list but can do that without 2464 * device_list_mutex: 2465 * - All device ops and balance - as we are in btrfs_exclop_start. 2466 * - Various dev_list readers - are using RCU. 2467 * - btrfs_ioctl_fitrim() - is using RCU. 2468 * 2469 * For-read threads as below are using device_list_mutex: 2470 * - Readonly scrub btrfs_scrub_dev() 2471 * - Readonly scrub btrfs_scrub_progress() 2472 * - btrfs_get_dev_stats() 2473 */ 2474 lockdep_assert_held(&fs_devices->device_list_mutex); 2475 2476 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2477 synchronize_rcu); 2478 list_for_each_entry(device, &seed_devices->devices, dev_list) 2479 device->fs_devices = seed_devices; 2480 2481 fs_devices->seeding = false; 2482 fs_devices->num_devices = 0; 2483 fs_devices->open_devices = 0; 2484 fs_devices->missing_devices = 0; 2485 fs_devices->rotating = false; 2486 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2487 2488 generate_random_uuid(fs_devices->fsid); 2489 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2490 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2491 2492 super_flags = btrfs_super_flags(disk_super) & 2493 ~BTRFS_SUPER_FLAG_SEEDING; 2494 btrfs_set_super_flags(disk_super, super_flags); 2495 } 2496 2497 /* 2498 * Store the expected generation for seed devices in device items. 2499 */ 2500 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2501 { 2502 BTRFS_DEV_LOOKUP_ARGS(args); 2503 struct btrfs_fs_info *fs_info = trans->fs_info; 2504 struct btrfs_root *root = fs_info->chunk_root; 2505 struct btrfs_path *path; 2506 struct extent_buffer *leaf; 2507 struct btrfs_dev_item *dev_item; 2508 struct btrfs_device *device; 2509 struct btrfs_key key; 2510 u8 fs_uuid[BTRFS_FSID_SIZE]; 2511 u8 dev_uuid[BTRFS_UUID_SIZE]; 2512 int ret; 2513 2514 path = btrfs_alloc_path(); 2515 if (!path) 2516 return -ENOMEM; 2517 2518 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2519 key.offset = 0; 2520 key.type = BTRFS_DEV_ITEM_KEY; 2521 2522 while (1) { 2523 btrfs_reserve_chunk_metadata(trans, false); 2524 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2525 btrfs_trans_release_chunk_metadata(trans); 2526 if (ret < 0) 2527 goto error; 2528 2529 leaf = path->nodes[0]; 2530 next_slot: 2531 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2532 ret = btrfs_next_leaf(root, path); 2533 if (ret > 0) 2534 break; 2535 if (ret < 0) 2536 goto error; 2537 leaf = path->nodes[0]; 2538 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2539 btrfs_release_path(path); 2540 continue; 2541 } 2542 2543 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2544 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2545 key.type != BTRFS_DEV_ITEM_KEY) 2546 break; 2547 2548 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2549 struct btrfs_dev_item); 2550 args.devid = btrfs_device_id(leaf, dev_item); 2551 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2552 BTRFS_UUID_SIZE); 2553 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2554 BTRFS_FSID_SIZE); 2555 args.uuid = dev_uuid; 2556 args.fsid = fs_uuid; 2557 device = btrfs_find_device(fs_info->fs_devices, &args); 2558 BUG_ON(!device); /* Logic error */ 2559 2560 if (device->fs_devices->seeding) { 2561 btrfs_set_device_generation(leaf, dev_item, 2562 device->generation); 2563 btrfs_mark_buffer_dirty(leaf); 2564 } 2565 2566 path->slots[0]++; 2567 goto next_slot; 2568 } 2569 ret = 0; 2570 error: 2571 btrfs_free_path(path); 2572 return ret; 2573 } 2574 2575 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2576 { 2577 struct btrfs_root *root = fs_info->dev_root; 2578 struct btrfs_trans_handle *trans; 2579 struct btrfs_device *device; 2580 struct block_device *bdev; 2581 struct super_block *sb = fs_info->sb; 2582 struct rcu_string *name; 2583 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2584 struct btrfs_fs_devices *seed_devices; 2585 u64 orig_super_total_bytes; 2586 u64 orig_super_num_devices; 2587 int ret = 0; 2588 bool seeding_dev = false; 2589 bool locked = false; 2590 2591 if (sb_rdonly(sb) && !fs_devices->seeding) 2592 return -EROFS; 2593 2594 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2595 fs_info->bdev_holder); 2596 if (IS_ERR(bdev)) 2597 return PTR_ERR(bdev); 2598 2599 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2600 ret = -EINVAL; 2601 goto error; 2602 } 2603 2604 if (fs_devices->seeding) { 2605 seeding_dev = true; 2606 down_write(&sb->s_umount); 2607 mutex_lock(&uuid_mutex); 2608 locked = true; 2609 } 2610 2611 sync_blockdev(bdev); 2612 2613 rcu_read_lock(); 2614 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2615 if (device->bdev == bdev) { 2616 ret = -EEXIST; 2617 rcu_read_unlock(); 2618 goto error; 2619 } 2620 } 2621 rcu_read_unlock(); 2622 2623 device = btrfs_alloc_device(fs_info, NULL, NULL); 2624 if (IS_ERR(device)) { 2625 /* we can safely leave the fs_devices entry around */ 2626 ret = PTR_ERR(device); 2627 goto error; 2628 } 2629 2630 name = rcu_string_strdup(device_path, GFP_KERNEL); 2631 if (!name) { 2632 ret = -ENOMEM; 2633 goto error_free_device; 2634 } 2635 rcu_assign_pointer(device->name, name); 2636 2637 device->fs_info = fs_info; 2638 device->bdev = bdev; 2639 ret = lookup_bdev(device_path, &device->devt); 2640 if (ret) 2641 goto error_free_device; 2642 2643 ret = btrfs_get_dev_zone_info(device, false); 2644 if (ret) 2645 goto error_free_device; 2646 2647 trans = btrfs_start_transaction(root, 0); 2648 if (IS_ERR(trans)) { 2649 ret = PTR_ERR(trans); 2650 goto error_free_zone; 2651 } 2652 2653 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2654 device->generation = trans->transid; 2655 device->io_width = fs_info->sectorsize; 2656 device->io_align = fs_info->sectorsize; 2657 device->sector_size = fs_info->sectorsize; 2658 device->total_bytes = 2659 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2660 device->disk_total_bytes = device->total_bytes; 2661 device->commit_total_bytes = device->total_bytes; 2662 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2663 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2664 device->mode = FMODE_EXCL; 2665 device->dev_stats_valid = 1; 2666 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2667 2668 if (seeding_dev) { 2669 btrfs_clear_sb_rdonly(sb); 2670 2671 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2672 seed_devices = btrfs_init_sprout(fs_info); 2673 if (IS_ERR(seed_devices)) { 2674 ret = PTR_ERR(seed_devices); 2675 btrfs_abort_transaction(trans, ret); 2676 goto error_trans; 2677 } 2678 } 2679 2680 mutex_lock(&fs_devices->device_list_mutex); 2681 if (seeding_dev) { 2682 btrfs_setup_sprout(fs_info, seed_devices); 2683 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2684 device); 2685 } 2686 2687 device->fs_devices = fs_devices; 2688 2689 mutex_lock(&fs_info->chunk_mutex); 2690 list_add_rcu(&device->dev_list, &fs_devices->devices); 2691 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2692 fs_devices->num_devices++; 2693 fs_devices->open_devices++; 2694 fs_devices->rw_devices++; 2695 fs_devices->total_devices++; 2696 fs_devices->total_rw_bytes += device->total_bytes; 2697 2698 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2699 2700 if (!bdev_nonrot(bdev)) 2701 fs_devices->rotating = true; 2702 2703 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2704 btrfs_set_super_total_bytes(fs_info->super_copy, 2705 round_down(orig_super_total_bytes + device->total_bytes, 2706 fs_info->sectorsize)); 2707 2708 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2709 btrfs_set_super_num_devices(fs_info->super_copy, 2710 orig_super_num_devices + 1); 2711 2712 /* 2713 * we've got more storage, clear any full flags on the space 2714 * infos 2715 */ 2716 btrfs_clear_space_info_full(fs_info); 2717 2718 mutex_unlock(&fs_info->chunk_mutex); 2719 2720 /* Add sysfs device entry */ 2721 btrfs_sysfs_add_device(device); 2722 2723 mutex_unlock(&fs_devices->device_list_mutex); 2724 2725 if (seeding_dev) { 2726 mutex_lock(&fs_info->chunk_mutex); 2727 ret = init_first_rw_device(trans); 2728 mutex_unlock(&fs_info->chunk_mutex); 2729 if (ret) { 2730 btrfs_abort_transaction(trans, ret); 2731 goto error_sysfs; 2732 } 2733 } 2734 2735 ret = btrfs_add_dev_item(trans, device); 2736 if (ret) { 2737 btrfs_abort_transaction(trans, ret); 2738 goto error_sysfs; 2739 } 2740 2741 if (seeding_dev) { 2742 ret = btrfs_finish_sprout(trans); 2743 if (ret) { 2744 btrfs_abort_transaction(trans, ret); 2745 goto error_sysfs; 2746 } 2747 2748 /* 2749 * fs_devices now represents the newly sprouted filesystem and 2750 * its fsid has been changed by btrfs_sprout_splice(). 2751 */ 2752 btrfs_sysfs_update_sprout_fsid(fs_devices); 2753 } 2754 2755 ret = btrfs_commit_transaction(trans); 2756 2757 if (seeding_dev) { 2758 mutex_unlock(&uuid_mutex); 2759 up_write(&sb->s_umount); 2760 locked = false; 2761 2762 if (ret) /* transaction commit */ 2763 return ret; 2764 2765 ret = btrfs_relocate_sys_chunks(fs_info); 2766 if (ret < 0) 2767 btrfs_handle_fs_error(fs_info, ret, 2768 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2769 trans = btrfs_attach_transaction(root); 2770 if (IS_ERR(trans)) { 2771 if (PTR_ERR(trans) == -ENOENT) 2772 return 0; 2773 ret = PTR_ERR(trans); 2774 trans = NULL; 2775 goto error_sysfs; 2776 } 2777 ret = btrfs_commit_transaction(trans); 2778 } 2779 2780 /* 2781 * Now that we have written a new super block to this device, check all 2782 * other fs_devices list if device_path alienates any other scanned 2783 * device. 2784 * We can ignore the return value as it typically returns -EINVAL and 2785 * only succeeds if the device was an alien. 2786 */ 2787 btrfs_forget_devices(device->devt); 2788 2789 /* Update ctime/mtime for blkid or udev */ 2790 update_dev_time(device_path); 2791 2792 return ret; 2793 2794 error_sysfs: 2795 btrfs_sysfs_remove_device(device); 2796 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2797 mutex_lock(&fs_info->chunk_mutex); 2798 list_del_rcu(&device->dev_list); 2799 list_del(&device->dev_alloc_list); 2800 fs_info->fs_devices->num_devices--; 2801 fs_info->fs_devices->open_devices--; 2802 fs_info->fs_devices->rw_devices--; 2803 fs_info->fs_devices->total_devices--; 2804 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2805 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2806 btrfs_set_super_total_bytes(fs_info->super_copy, 2807 orig_super_total_bytes); 2808 btrfs_set_super_num_devices(fs_info->super_copy, 2809 orig_super_num_devices); 2810 mutex_unlock(&fs_info->chunk_mutex); 2811 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2812 error_trans: 2813 if (seeding_dev) 2814 btrfs_set_sb_rdonly(sb); 2815 if (trans) 2816 btrfs_end_transaction(trans); 2817 error_free_zone: 2818 btrfs_destroy_dev_zone_info(device); 2819 error_free_device: 2820 btrfs_free_device(device); 2821 error: 2822 blkdev_put(bdev, FMODE_EXCL); 2823 if (locked) { 2824 mutex_unlock(&uuid_mutex); 2825 up_write(&sb->s_umount); 2826 } 2827 return ret; 2828 } 2829 2830 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2831 struct btrfs_device *device) 2832 { 2833 int ret; 2834 struct btrfs_path *path; 2835 struct btrfs_root *root = device->fs_info->chunk_root; 2836 struct btrfs_dev_item *dev_item; 2837 struct extent_buffer *leaf; 2838 struct btrfs_key key; 2839 2840 path = btrfs_alloc_path(); 2841 if (!path) 2842 return -ENOMEM; 2843 2844 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2845 key.type = BTRFS_DEV_ITEM_KEY; 2846 key.offset = device->devid; 2847 2848 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2849 if (ret < 0) 2850 goto out; 2851 2852 if (ret > 0) { 2853 ret = -ENOENT; 2854 goto out; 2855 } 2856 2857 leaf = path->nodes[0]; 2858 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2859 2860 btrfs_set_device_id(leaf, dev_item, device->devid); 2861 btrfs_set_device_type(leaf, dev_item, device->type); 2862 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2863 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2864 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2865 btrfs_set_device_total_bytes(leaf, dev_item, 2866 btrfs_device_get_disk_total_bytes(device)); 2867 btrfs_set_device_bytes_used(leaf, dev_item, 2868 btrfs_device_get_bytes_used(device)); 2869 btrfs_mark_buffer_dirty(leaf); 2870 2871 out: 2872 btrfs_free_path(path); 2873 return ret; 2874 } 2875 2876 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2877 struct btrfs_device *device, u64 new_size) 2878 { 2879 struct btrfs_fs_info *fs_info = device->fs_info; 2880 struct btrfs_super_block *super_copy = fs_info->super_copy; 2881 u64 old_total; 2882 u64 diff; 2883 int ret; 2884 2885 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2886 return -EACCES; 2887 2888 new_size = round_down(new_size, fs_info->sectorsize); 2889 2890 mutex_lock(&fs_info->chunk_mutex); 2891 old_total = btrfs_super_total_bytes(super_copy); 2892 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2893 2894 if (new_size <= device->total_bytes || 2895 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2896 mutex_unlock(&fs_info->chunk_mutex); 2897 return -EINVAL; 2898 } 2899 2900 btrfs_set_super_total_bytes(super_copy, 2901 round_down(old_total + diff, fs_info->sectorsize)); 2902 device->fs_devices->total_rw_bytes += diff; 2903 2904 btrfs_device_set_total_bytes(device, new_size); 2905 btrfs_device_set_disk_total_bytes(device, new_size); 2906 btrfs_clear_space_info_full(device->fs_info); 2907 if (list_empty(&device->post_commit_list)) 2908 list_add_tail(&device->post_commit_list, 2909 &trans->transaction->dev_update_list); 2910 mutex_unlock(&fs_info->chunk_mutex); 2911 2912 btrfs_reserve_chunk_metadata(trans, false); 2913 ret = btrfs_update_device(trans, device); 2914 btrfs_trans_release_chunk_metadata(trans); 2915 2916 return ret; 2917 } 2918 2919 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2920 { 2921 struct btrfs_fs_info *fs_info = trans->fs_info; 2922 struct btrfs_root *root = fs_info->chunk_root; 2923 int ret; 2924 struct btrfs_path *path; 2925 struct btrfs_key key; 2926 2927 path = btrfs_alloc_path(); 2928 if (!path) 2929 return -ENOMEM; 2930 2931 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2932 key.offset = chunk_offset; 2933 key.type = BTRFS_CHUNK_ITEM_KEY; 2934 2935 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2936 if (ret < 0) 2937 goto out; 2938 else if (ret > 0) { /* Logic error or corruption */ 2939 btrfs_handle_fs_error(fs_info, -ENOENT, 2940 "Failed lookup while freeing chunk."); 2941 ret = -ENOENT; 2942 goto out; 2943 } 2944 2945 ret = btrfs_del_item(trans, root, path); 2946 if (ret < 0) 2947 btrfs_handle_fs_error(fs_info, ret, 2948 "Failed to delete chunk item."); 2949 out: 2950 btrfs_free_path(path); 2951 return ret; 2952 } 2953 2954 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2955 { 2956 struct btrfs_super_block *super_copy = fs_info->super_copy; 2957 struct btrfs_disk_key *disk_key; 2958 struct btrfs_chunk *chunk; 2959 u8 *ptr; 2960 int ret = 0; 2961 u32 num_stripes; 2962 u32 array_size; 2963 u32 len = 0; 2964 u32 cur; 2965 struct btrfs_key key; 2966 2967 lockdep_assert_held(&fs_info->chunk_mutex); 2968 array_size = btrfs_super_sys_array_size(super_copy); 2969 2970 ptr = super_copy->sys_chunk_array; 2971 cur = 0; 2972 2973 while (cur < array_size) { 2974 disk_key = (struct btrfs_disk_key *)ptr; 2975 btrfs_disk_key_to_cpu(&key, disk_key); 2976 2977 len = sizeof(*disk_key); 2978 2979 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2980 chunk = (struct btrfs_chunk *)(ptr + len); 2981 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2982 len += btrfs_chunk_item_size(num_stripes); 2983 } else { 2984 ret = -EIO; 2985 break; 2986 } 2987 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2988 key.offset == chunk_offset) { 2989 memmove(ptr, ptr + len, array_size - (cur + len)); 2990 array_size -= len; 2991 btrfs_set_super_sys_array_size(super_copy, array_size); 2992 } else { 2993 ptr += len; 2994 cur += len; 2995 } 2996 } 2997 return ret; 2998 } 2999 3000 /* 3001 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3002 * @logical: Logical block offset in bytes. 3003 * @length: Length of extent in bytes. 3004 * 3005 * Return: Chunk mapping or ERR_PTR. 3006 */ 3007 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3008 u64 logical, u64 length) 3009 { 3010 struct extent_map_tree *em_tree; 3011 struct extent_map *em; 3012 3013 em_tree = &fs_info->mapping_tree; 3014 read_lock(&em_tree->lock); 3015 em = lookup_extent_mapping(em_tree, logical, length); 3016 read_unlock(&em_tree->lock); 3017 3018 if (!em) { 3019 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3020 logical, length); 3021 return ERR_PTR(-EINVAL); 3022 } 3023 3024 if (em->start > logical || em->start + em->len < logical) { 3025 btrfs_crit(fs_info, 3026 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3027 logical, length, em->start, em->start + em->len); 3028 free_extent_map(em); 3029 return ERR_PTR(-EINVAL); 3030 } 3031 3032 /* callers are responsible for dropping em's ref. */ 3033 return em; 3034 } 3035 3036 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3037 struct map_lookup *map, u64 chunk_offset) 3038 { 3039 int i; 3040 3041 /* 3042 * Removing chunk items and updating the device items in the chunks btree 3043 * requires holding the chunk_mutex. 3044 * See the comment at btrfs_chunk_alloc() for the details. 3045 */ 3046 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3047 3048 for (i = 0; i < map->num_stripes; i++) { 3049 int ret; 3050 3051 ret = btrfs_update_device(trans, map->stripes[i].dev); 3052 if (ret) 3053 return ret; 3054 } 3055 3056 return btrfs_free_chunk(trans, chunk_offset); 3057 } 3058 3059 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3060 { 3061 struct btrfs_fs_info *fs_info = trans->fs_info; 3062 struct extent_map *em; 3063 struct map_lookup *map; 3064 u64 dev_extent_len = 0; 3065 int i, ret = 0; 3066 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3067 3068 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3069 if (IS_ERR(em)) { 3070 /* 3071 * This is a logic error, but we don't want to just rely on the 3072 * user having built with ASSERT enabled, so if ASSERT doesn't 3073 * do anything we still error out. 3074 */ 3075 ASSERT(0); 3076 return PTR_ERR(em); 3077 } 3078 map = em->map_lookup; 3079 3080 /* 3081 * First delete the device extent items from the devices btree. 3082 * We take the device_list_mutex to avoid racing with the finishing phase 3083 * of a device replace operation. See the comment below before acquiring 3084 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3085 * because that can result in a deadlock when deleting the device extent 3086 * items from the devices btree - COWing an extent buffer from the btree 3087 * may result in allocating a new metadata chunk, which would attempt to 3088 * lock again fs_info->chunk_mutex. 3089 */ 3090 mutex_lock(&fs_devices->device_list_mutex); 3091 for (i = 0; i < map->num_stripes; i++) { 3092 struct btrfs_device *device = map->stripes[i].dev; 3093 ret = btrfs_free_dev_extent(trans, device, 3094 map->stripes[i].physical, 3095 &dev_extent_len); 3096 if (ret) { 3097 mutex_unlock(&fs_devices->device_list_mutex); 3098 btrfs_abort_transaction(trans, ret); 3099 goto out; 3100 } 3101 3102 if (device->bytes_used > 0) { 3103 mutex_lock(&fs_info->chunk_mutex); 3104 btrfs_device_set_bytes_used(device, 3105 device->bytes_used - dev_extent_len); 3106 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3107 btrfs_clear_space_info_full(fs_info); 3108 mutex_unlock(&fs_info->chunk_mutex); 3109 } 3110 } 3111 mutex_unlock(&fs_devices->device_list_mutex); 3112 3113 /* 3114 * We acquire fs_info->chunk_mutex for 2 reasons: 3115 * 3116 * 1) Just like with the first phase of the chunk allocation, we must 3117 * reserve system space, do all chunk btree updates and deletions, and 3118 * update the system chunk array in the superblock while holding this 3119 * mutex. This is for similar reasons as explained on the comment at 3120 * the top of btrfs_chunk_alloc(); 3121 * 3122 * 2) Prevent races with the final phase of a device replace operation 3123 * that replaces the device object associated with the map's stripes, 3124 * because the device object's id can change at any time during that 3125 * final phase of the device replace operation 3126 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3127 * replaced device and then see it with an ID of 3128 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3129 * the device item, which does not exists on the chunk btree. 3130 * The finishing phase of device replace acquires both the 3131 * device_list_mutex and the chunk_mutex, in that order, so we are 3132 * safe by just acquiring the chunk_mutex. 3133 */ 3134 trans->removing_chunk = true; 3135 mutex_lock(&fs_info->chunk_mutex); 3136 3137 check_system_chunk(trans, map->type); 3138 3139 ret = remove_chunk_item(trans, map, chunk_offset); 3140 /* 3141 * Normally we should not get -ENOSPC since we reserved space before 3142 * through the call to check_system_chunk(). 3143 * 3144 * Despite our system space_info having enough free space, we may not 3145 * be able to allocate extents from its block groups, because all have 3146 * an incompatible profile, which will force us to allocate a new system 3147 * block group with the right profile, or right after we called 3148 * check_system_space() above, a scrub turned the only system block group 3149 * with enough free space into RO mode. 3150 * This is explained with more detail at do_chunk_alloc(). 3151 * 3152 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3153 */ 3154 if (ret == -ENOSPC) { 3155 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3156 struct btrfs_block_group *sys_bg; 3157 3158 sys_bg = btrfs_create_chunk(trans, sys_flags); 3159 if (IS_ERR(sys_bg)) { 3160 ret = PTR_ERR(sys_bg); 3161 btrfs_abort_transaction(trans, ret); 3162 goto out; 3163 } 3164 3165 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3166 if (ret) { 3167 btrfs_abort_transaction(trans, ret); 3168 goto out; 3169 } 3170 3171 ret = remove_chunk_item(trans, map, chunk_offset); 3172 if (ret) { 3173 btrfs_abort_transaction(trans, ret); 3174 goto out; 3175 } 3176 } else if (ret) { 3177 btrfs_abort_transaction(trans, ret); 3178 goto out; 3179 } 3180 3181 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3182 3183 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3184 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3185 if (ret) { 3186 btrfs_abort_transaction(trans, ret); 3187 goto out; 3188 } 3189 } 3190 3191 mutex_unlock(&fs_info->chunk_mutex); 3192 trans->removing_chunk = false; 3193 3194 /* 3195 * We are done with chunk btree updates and deletions, so release the 3196 * system space we previously reserved (with check_system_chunk()). 3197 */ 3198 btrfs_trans_release_chunk_metadata(trans); 3199 3200 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3201 if (ret) { 3202 btrfs_abort_transaction(trans, ret); 3203 goto out; 3204 } 3205 3206 out: 3207 if (trans->removing_chunk) { 3208 mutex_unlock(&fs_info->chunk_mutex); 3209 trans->removing_chunk = false; 3210 } 3211 /* once for us */ 3212 free_extent_map(em); 3213 return ret; 3214 } 3215 3216 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3217 { 3218 struct btrfs_root *root = fs_info->chunk_root; 3219 struct btrfs_trans_handle *trans; 3220 struct btrfs_block_group *block_group; 3221 u64 length; 3222 int ret; 3223 3224 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3225 btrfs_err(fs_info, 3226 "relocate: not supported on extent tree v2 yet"); 3227 return -EINVAL; 3228 } 3229 3230 /* 3231 * Prevent races with automatic removal of unused block groups. 3232 * After we relocate and before we remove the chunk with offset 3233 * chunk_offset, automatic removal of the block group can kick in, 3234 * resulting in a failure when calling btrfs_remove_chunk() below. 3235 * 3236 * Make sure to acquire this mutex before doing a tree search (dev 3237 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3238 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3239 * we release the path used to search the chunk/dev tree and before 3240 * the current task acquires this mutex and calls us. 3241 */ 3242 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3243 3244 /* step one, relocate all the extents inside this chunk */ 3245 btrfs_scrub_pause(fs_info); 3246 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3247 btrfs_scrub_continue(fs_info); 3248 if (ret) 3249 return ret; 3250 3251 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3252 if (!block_group) 3253 return -ENOENT; 3254 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3255 length = block_group->length; 3256 btrfs_put_block_group(block_group); 3257 3258 /* 3259 * On a zoned file system, discard the whole block group, this will 3260 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3261 * resetting the zone fails, don't treat it as a fatal problem from the 3262 * filesystem's point of view. 3263 */ 3264 if (btrfs_is_zoned(fs_info)) { 3265 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3266 if (ret) 3267 btrfs_info(fs_info, 3268 "failed to reset zone %llu after relocation", 3269 chunk_offset); 3270 } 3271 3272 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3273 chunk_offset); 3274 if (IS_ERR(trans)) { 3275 ret = PTR_ERR(trans); 3276 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3277 return ret; 3278 } 3279 3280 /* 3281 * step two, delete the device extents and the 3282 * chunk tree entries 3283 */ 3284 ret = btrfs_remove_chunk(trans, chunk_offset); 3285 btrfs_end_transaction(trans); 3286 return ret; 3287 } 3288 3289 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3290 { 3291 struct btrfs_root *chunk_root = fs_info->chunk_root; 3292 struct btrfs_path *path; 3293 struct extent_buffer *leaf; 3294 struct btrfs_chunk *chunk; 3295 struct btrfs_key key; 3296 struct btrfs_key found_key; 3297 u64 chunk_type; 3298 bool retried = false; 3299 int failed = 0; 3300 int ret; 3301 3302 path = btrfs_alloc_path(); 3303 if (!path) 3304 return -ENOMEM; 3305 3306 again: 3307 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3308 key.offset = (u64)-1; 3309 key.type = BTRFS_CHUNK_ITEM_KEY; 3310 3311 while (1) { 3312 mutex_lock(&fs_info->reclaim_bgs_lock); 3313 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3314 if (ret < 0) { 3315 mutex_unlock(&fs_info->reclaim_bgs_lock); 3316 goto error; 3317 } 3318 BUG_ON(ret == 0); /* Corruption */ 3319 3320 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3321 key.type); 3322 if (ret) 3323 mutex_unlock(&fs_info->reclaim_bgs_lock); 3324 if (ret < 0) 3325 goto error; 3326 if (ret > 0) 3327 break; 3328 3329 leaf = path->nodes[0]; 3330 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3331 3332 chunk = btrfs_item_ptr(leaf, path->slots[0], 3333 struct btrfs_chunk); 3334 chunk_type = btrfs_chunk_type(leaf, chunk); 3335 btrfs_release_path(path); 3336 3337 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3338 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3339 if (ret == -ENOSPC) 3340 failed++; 3341 else 3342 BUG_ON(ret); 3343 } 3344 mutex_unlock(&fs_info->reclaim_bgs_lock); 3345 3346 if (found_key.offset == 0) 3347 break; 3348 key.offset = found_key.offset - 1; 3349 } 3350 ret = 0; 3351 if (failed && !retried) { 3352 failed = 0; 3353 retried = true; 3354 goto again; 3355 } else if (WARN_ON(failed && retried)) { 3356 ret = -ENOSPC; 3357 } 3358 error: 3359 btrfs_free_path(path); 3360 return ret; 3361 } 3362 3363 /* 3364 * return 1 : allocate a data chunk successfully, 3365 * return <0: errors during allocating a data chunk, 3366 * return 0 : no need to allocate a data chunk. 3367 */ 3368 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3369 u64 chunk_offset) 3370 { 3371 struct btrfs_block_group *cache; 3372 u64 bytes_used; 3373 u64 chunk_type; 3374 3375 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3376 ASSERT(cache); 3377 chunk_type = cache->flags; 3378 btrfs_put_block_group(cache); 3379 3380 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3381 return 0; 3382 3383 spin_lock(&fs_info->data_sinfo->lock); 3384 bytes_used = fs_info->data_sinfo->bytes_used; 3385 spin_unlock(&fs_info->data_sinfo->lock); 3386 3387 if (!bytes_used) { 3388 struct btrfs_trans_handle *trans; 3389 int ret; 3390 3391 trans = btrfs_join_transaction(fs_info->tree_root); 3392 if (IS_ERR(trans)) 3393 return PTR_ERR(trans); 3394 3395 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3396 btrfs_end_transaction(trans); 3397 if (ret < 0) 3398 return ret; 3399 return 1; 3400 } 3401 3402 return 0; 3403 } 3404 3405 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3406 struct btrfs_balance_control *bctl) 3407 { 3408 struct btrfs_root *root = fs_info->tree_root; 3409 struct btrfs_trans_handle *trans; 3410 struct btrfs_balance_item *item; 3411 struct btrfs_disk_balance_args disk_bargs; 3412 struct btrfs_path *path; 3413 struct extent_buffer *leaf; 3414 struct btrfs_key key; 3415 int ret, err; 3416 3417 path = btrfs_alloc_path(); 3418 if (!path) 3419 return -ENOMEM; 3420 3421 trans = btrfs_start_transaction(root, 0); 3422 if (IS_ERR(trans)) { 3423 btrfs_free_path(path); 3424 return PTR_ERR(trans); 3425 } 3426 3427 key.objectid = BTRFS_BALANCE_OBJECTID; 3428 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3429 key.offset = 0; 3430 3431 ret = btrfs_insert_empty_item(trans, root, path, &key, 3432 sizeof(*item)); 3433 if (ret) 3434 goto out; 3435 3436 leaf = path->nodes[0]; 3437 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3438 3439 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3440 3441 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3442 btrfs_set_balance_data(leaf, item, &disk_bargs); 3443 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3444 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3445 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3446 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3447 3448 btrfs_set_balance_flags(leaf, item, bctl->flags); 3449 3450 btrfs_mark_buffer_dirty(leaf); 3451 out: 3452 btrfs_free_path(path); 3453 err = btrfs_commit_transaction(trans); 3454 if (err && !ret) 3455 ret = err; 3456 return ret; 3457 } 3458 3459 static int del_balance_item(struct btrfs_fs_info *fs_info) 3460 { 3461 struct btrfs_root *root = fs_info->tree_root; 3462 struct btrfs_trans_handle *trans; 3463 struct btrfs_path *path; 3464 struct btrfs_key key; 3465 int ret, err; 3466 3467 path = btrfs_alloc_path(); 3468 if (!path) 3469 return -ENOMEM; 3470 3471 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3472 if (IS_ERR(trans)) { 3473 btrfs_free_path(path); 3474 return PTR_ERR(trans); 3475 } 3476 3477 key.objectid = BTRFS_BALANCE_OBJECTID; 3478 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3479 key.offset = 0; 3480 3481 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3482 if (ret < 0) 3483 goto out; 3484 if (ret > 0) { 3485 ret = -ENOENT; 3486 goto out; 3487 } 3488 3489 ret = btrfs_del_item(trans, root, path); 3490 out: 3491 btrfs_free_path(path); 3492 err = btrfs_commit_transaction(trans); 3493 if (err && !ret) 3494 ret = err; 3495 return ret; 3496 } 3497 3498 /* 3499 * This is a heuristic used to reduce the number of chunks balanced on 3500 * resume after balance was interrupted. 3501 */ 3502 static void update_balance_args(struct btrfs_balance_control *bctl) 3503 { 3504 /* 3505 * Turn on soft mode for chunk types that were being converted. 3506 */ 3507 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3508 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3509 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3510 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3511 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3512 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3513 3514 /* 3515 * Turn on usage filter if is not already used. The idea is 3516 * that chunks that we have already balanced should be 3517 * reasonably full. Don't do it for chunks that are being 3518 * converted - that will keep us from relocating unconverted 3519 * (albeit full) chunks. 3520 */ 3521 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3522 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3523 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3524 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3525 bctl->data.usage = 90; 3526 } 3527 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3528 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3529 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3530 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3531 bctl->sys.usage = 90; 3532 } 3533 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3534 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3535 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3536 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3537 bctl->meta.usage = 90; 3538 } 3539 } 3540 3541 /* 3542 * Clear the balance status in fs_info and delete the balance item from disk. 3543 */ 3544 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3545 { 3546 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3547 int ret; 3548 3549 BUG_ON(!fs_info->balance_ctl); 3550 3551 spin_lock(&fs_info->balance_lock); 3552 fs_info->balance_ctl = NULL; 3553 spin_unlock(&fs_info->balance_lock); 3554 3555 kfree(bctl); 3556 ret = del_balance_item(fs_info); 3557 if (ret) 3558 btrfs_handle_fs_error(fs_info, ret, NULL); 3559 } 3560 3561 /* 3562 * Balance filters. Return 1 if chunk should be filtered out 3563 * (should not be balanced). 3564 */ 3565 static int chunk_profiles_filter(u64 chunk_type, 3566 struct btrfs_balance_args *bargs) 3567 { 3568 chunk_type = chunk_to_extended(chunk_type) & 3569 BTRFS_EXTENDED_PROFILE_MASK; 3570 3571 if (bargs->profiles & chunk_type) 3572 return 0; 3573 3574 return 1; 3575 } 3576 3577 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3578 struct btrfs_balance_args *bargs) 3579 { 3580 struct btrfs_block_group *cache; 3581 u64 chunk_used; 3582 u64 user_thresh_min; 3583 u64 user_thresh_max; 3584 int ret = 1; 3585 3586 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3587 chunk_used = cache->used; 3588 3589 if (bargs->usage_min == 0) 3590 user_thresh_min = 0; 3591 else 3592 user_thresh_min = div_factor_fine(cache->length, 3593 bargs->usage_min); 3594 3595 if (bargs->usage_max == 0) 3596 user_thresh_max = 1; 3597 else if (bargs->usage_max > 100) 3598 user_thresh_max = cache->length; 3599 else 3600 user_thresh_max = div_factor_fine(cache->length, 3601 bargs->usage_max); 3602 3603 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3604 ret = 0; 3605 3606 btrfs_put_block_group(cache); 3607 return ret; 3608 } 3609 3610 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3611 u64 chunk_offset, struct btrfs_balance_args *bargs) 3612 { 3613 struct btrfs_block_group *cache; 3614 u64 chunk_used, user_thresh; 3615 int ret = 1; 3616 3617 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3618 chunk_used = cache->used; 3619 3620 if (bargs->usage_min == 0) 3621 user_thresh = 1; 3622 else if (bargs->usage > 100) 3623 user_thresh = cache->length; 3624 else 3625 user_thresh = div_factor_fine(cache->length, bargs->usage); 3626 3627 if (chunk_used < user_thresh) 3628 ret = 0; 3629 3630 btrfs_put_block_group(cache); 3631 return ret; 3632 } 3633 3634 static int chunk_devid_filter(struct extent_buffer *leaf, 3635 struct btrfs_chunk *chunk, 3636 struct btrfs_balance_args *bargs) 3637 { 3638 struct btrfs_stripe *stripe; 3639 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3640 int i; 3641 3642 for (i = 0; i < num_stripes; i++) { 3643 stripe = btrfs_stripe_nr(chunk, i); 3644 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3645 return 0; 3646 } 3647 3648 return 1; 3649 } 3650 3651 static u64 calc_data_stripes(u64 type, int num_stripes) 3652 { 3653 const int index = btrfs_bg_flags_to_raid_index(type); 3654 const int ncopies = btrfs_raid_array[index].ncopies; 3655 const int nparity = btrfs_raid_array[index].nparity; 3656 3657 return (num_stripes - nparity) / ncopies; 3658 } 3659 3660 /* [pstart, pend) */ 3661 static int chunk_drange_filter(struct extent_buffer *leaf, 3662 struct btrfs_chunk *chunk, 3663 struct btrfs_balance_args *bargs) 3664 { 3665 struct btrfs_stripe *stripe; 3666 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3667 u64 stripe_offset; 3668 u64 stripe_length; 3669 u64 type; 3670 int factor; 3671 int i; 3672 3673 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3674 return 0; 3675 3676 type = btrfs_chunk_type(leaf, chunk); 3677 factor = calc_data_stripes(type, num_stripes); 3678 3679 for (i = 0; i < num_stripes; i++) { 3680 stripe = btrfs_stripe_nr(chunk, i); 3681 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3682 continue; 3683 3684 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3685 stripe_length = btrfs_chunk_length(leaf, chunk); 3686 stripe_length = div_u64(stripe_length, factor); 3687 3688 if (stripe_offset < bargs->pend && 3689 stripe_offset + stripe_length > bargs->pstart) 3690 return 0; 3691 } 3692 3693 return 1; 3694 } 3695 3696 /* [vstart, vend) */ 3697 static int chunk_vrange_filter(struct extent_buffer *leaf, 3698 struct btrfs_chunk *chunk, 3699 u64 chunk_offset, 3700 struct btrfs_balance_args *bargs) 3701 { 3702 if (chunk_offset < bargs->vend && 3703 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3704 /* at least part of the chunk is inside this vrange */ 3705 return 0; 3706 3707 return 1; 3708 } 3709 3710 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3711 struct btrfs_chunk *chunk, 3712 struct btrfs_balance_args *bargs) 3713 { 3714 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3715 3716 if (bargs->stripes_min <= num_stripes 3717 && num_stripes <= bargs->stripes_max) 3718 return 0; 3719 3720 return 1; 3721 } 3722 3723 static int chunk_soft_convert_filter(u64 chunk_type, 3724 struct btrfs_balance_args *bargs) 3725 { 3726 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3727 return 0; 3728 3729 chunk_type = chunk_to_extended(chunk_type) & 3730 BTRFS_EXTENDED_PROFILE_MASK; 3731 3732 if (bargs->target == chunk_type) 3733 return 1; 3734 3735 return 0; 3736 } 3737 3738 static int should_balance_chunk(struct extent_buffer *leaf, 3739 struct btrfs_chunk *chunk, u64 chunk_offset) 3740 { 3741 struct btrfs_fs_info *fs_info = leaf->fs_info; 3742 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3743 struct btrfs_balance_args *bargs = NULL; 3744 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3745 3746 /* type filter */ 3747 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3748 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3749 return 0; 3750 } 3751 3752 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3753 bargs = &bctl->data; 3754 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3755 bargs = &bctl->sys; 3756 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3757 bargs = &bctl->meta; 3758 3759 /* profiles filter */ 3760 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3761 chunk_profiles_filter(chunk_type, bargs)) { 3762 return 0; 3763 } 3764 3765 /* usage filter */ 3766 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3767 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3768 return 0; 3769 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3770 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3771 return 0; 3772 } 3773 3774 /* devid filter */ 3775 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3776 chunk_devid_filter(leaf, chunk, bargs)) { 3777 return 0; 3778 } 3779 3780 /* drange filter, makes sense only with devid filter */ 3781 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3782 chunk_drange_filter(leaf, chunk, bargs)) { 3783 return 0; 3784 } 3785 3786 /* vrange filter */ 3787 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3788 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3789 return 0; 3790 } 3791 3792 /* stripes filter */ 3793 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3794 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3795 return 0; 3796 } 3797 3798 /* soft profile changing mode */ 3799 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3800 chunk_soft_convert_filter(chunk_type, bargs)) { 3801 return 0; 3802 } 3803 3804 /* 3805 * limited by count, must be the last filter 3806 */ 3807 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3808 if (bargs->limit == 0) 3809 return 0; 3810 else 3811 bargs->limit--; 3812 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3813 /* 3814 * Same logic as the 'limit' filter; the minimum cannot be 3815 * determined here because we do not have the global information 3816 * about the count of all chunks that satisfy the filters. 3817 */ 3818 if (bargs->limit_max == 0) 3819 return 0; 3820 else 3821 bargs->limit_max--; 3822 } 3823 3824 return 1; 3825 } 3826 3827 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3828 { 3829 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3830 struct btrfs_root *chunk_root = fs_info->chunk_root; 3831 u64 chunk_type; 3832 struct btrfs_chunk *chunk; 3833 struct btrfs_path *path = NULL; 3834 struct btrfs_key key; 3835 struct btrfs_key found_key; 3836 struct extent_buffer *leaf; 3837 int slot; 3838 int ret; 3839 int enospc_errors = 0; 3840 bool counting = true; 3841 /* The single value limit and min/max limits use the same bytes in the */ 3842 u64 limit_data = bctl->data.limit; 3843 u64 limit_meta = bctl->meta.limit; 3844 u64 limit_sys = bctl->sys.limit; 3845 u32 count_data = 0; 3846 u32 count_meta = 0; 3847 u32 count_sys = 0; 3848 int chunk_reserved = 0; 3849 3850 path = btrfs_alloc_path(); 3851 if (!path) { 3852 ret = -ENOMEM; 3853 goto error; 3854 } 3855 3856 /* zero out stat counters */ 3857 spin_lock(&fs_info->balance_lock); 3858 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3859 spin_unlock(&fs_info->balance_lock); 3860 again: 3861 if (!counting) { 3862 /* 3863 * The single value limit and min/max limits use the same bytes 3864 * in the 3865 */ 3866 bctl->data.limit = limit_data; 3867 bctl->meta.limit = limit_meta; 3868 bctl->sys.limit = limit_sys; 3869 } 3870 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3871 key.offset = (u64)-1; 3872 key.type = BTRFS_CHUNK_ITEM_KEY; 3873 3874 while (1) { 3875 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3876 atomic_read(&fs_info->balance_cancel_req)) { 3877 ret = -ECANCELED; 3878 goto error; 3879 } 3880 3881 mutex_lock(&fs_info->reclaim_bgs_lock); 3882 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3883 if (ret < 0) { 3884 mutex_unlock(&fs_info->reclaim_bgs_lock); 3885 goto error; 3886 } 3887 3888 /* 3889 * this shouldn't happen, it means the last relocate 3890 * failed 3891 */ 3892 if (ret == 0) 3893 BUG(); /* FIXME break ? */ 3894 3895 ret = btrfs_previous_item(chunk_root, path, 0, 3896 BTRFS_CHUNK_ITEM_KEY); 3897 if (ret) { 3898 mutex_unlock(&fs_info->reclaim_bgs_lock); 3899 ret = 0; 3900 break; 3901 } 3902 3903 leaf = path->nodes[0]; 3904 slot = path->slots[0]; 3905 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3906 3907 if (found_key.objectid != key.objectid) { 3908 mutex_unlock(&fs_info->reclaim_bgs_lock); 3909 break; 3910 } 3911 3912 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3913 chunk_type = btrfs_chunk_type(leaf, chunk); 3914 3915 if (!counting) { 3916 spin_lock(&fs_info->balance_lock); 3917 bctl->stat.considered++; 3918 spin_unlock(&fs_info->balance_lock); 3919 } 3920 3921 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3922 3923 btrfs_release_path(path); 3924 if (!ret) { 3925 mutex_unlock(&fs_info->reclaim_bgs_lock); 3926 goto loop; 3927 } 3928 3929 if (counting) { 3930 mutex_unlock(&fs_info->reclaim_bgs_lock); 3931 spin_lock(&fs_info->balance_lock); 3932 bctl->stat.expected++; 3933 spin_unlock(&fs_info->balance_lock); 3934 3935 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3936 count_data++; 3937 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3938 count_sys++; 3939 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3940 count_meta++; 3941 3942 goto loop; 3943 } 3944 3945 /* 3946 * Apply limit_min filter, no need to check if the LIMITS 3947 * filter is used, limit_min is 0 by default 3948 */ 3949 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3950 count_data < bctl->data.limit_min) 3951 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3952 count_meta < bctl->meta.limit_min) 3953 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3954 count_sys < bctl->sys.limit_min)) { 3955 mutex_unlock(&fs_info->reclaim_bgs_lock); 3956 goto loop; 3957 } 3958 3959 if (!chunk_reserved) { 3960 /* 3961 * We may be relocating the only data chunk we have, 3962 * which could potentially end up with losing data's 3963 * raid profile, so lets allocate an empty one in 3964 * advance. 3965 */ 3966 ret = btrfs_may_alloc_data_chunk(fs_info, 3967 found_key.offset); 3968 if (ret < 0) { 3969 mutex_unlock(&fs_info->reclaim_bgs_lock); 3970 goto error; 3971 } else if (ret == 1) { 3972 chunk_reserved = 1; 3973 } 3974 } 3975 3976 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3977 mutex_unlock(&fs_info->reclaim_bgs_lock); 3978 if (ret == -ENOSPC) { 3979 enospc_errors++; 3980 } else if (ret == -ETXTBSY) { 3981 btrfs_info(fs_info, 3982 "skipping relocation of block group %llu due to active swapfile", 3983 found_key.offset); 3984 ret = 0; 3985 } else if (ret) { 3986 goto error; 3987 } else { 3988 spin_lock(&fs_info->balance_lock); 3989 bctl->stat.completed++; 3990 spin_unlock(&fs_info->balance_lock); 3991 } 3992 loop: 3993 if (found_key.offset == 0) 3994 break; 3995 key.offset = found_key.offset - 1; 3996 } 3997 3998 if (counting) { 3999 btrfs_release_path(path); 4000 counting = false; 4001 goto again; 4002 } 4003 error: 4004 btrfs_free_path(path); 4005 if (enospc_errors) { 4006 btrfs_info(fs_info, "%d enospc errors during balance", 4007 enospc_errors); 4008 if (!ret) 4009 ret = -ENOSPC; 4010 } 4011 4012 return ret; 4013 } 4014 4015 /** 4016 * alloc_profile_is_valid - see if a given profile is valid and reduced 4017 * @flags: profile to validate 4018 * @extended: if true @flags is treated as an extended profile 4019 */ 4020 static int alloc_profile_is_valid(u64 flags, int extended) 4021 { 4022 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4023 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4024 4025 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4026 4027 /* 1) check that all other bits are zeroed */ 4028 if (flags & ~mask) 4029 return 0; 4030 4031 /* 2) see if profile is reduced */ 4032 if (flags == 0) 4033 return !extended; /* "0" is valid for usual profiles */ 4034 4035 return has_single_bit_set(flags); 4036 } 4037 4038 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4039 { 4040 /* cancel requested || normal exit path */ 4041 return atomic_read(&fs_info->balance_cancel_req) || 4042 (atomic_read(&fs_info->balance_pause_req) == 0 && 4043 atomic_read(&fs_info->balance_cancel_req) == 0); 4044 } 4045 4046 /* 4047 * Validate target profile against allowed profiles and return true if it's OK. 4048 * Otherwise print the error message and return false. 4049 */ 4050 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4051 const struct btrfs_balance_args *bargs, 4052 u64 allowed, const char *type) 4053 { 4054 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4055 return true; 4056 4057 /* Profile is valid and does not have bits outside of the allowed set */ 4058 if (alloc_profile_is_valid(bargs->target, 1) && 4059 (bargs->target & ~allowed) == 0) 4060 return true; 4061 4062 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4063 type, btrfs_bg_type_to_raid_name(bargs->target)); 4064 return false; 4065 } 4066 4067 /* 4068 * Fill @buf with textual description of balance filter flags @bargs, up to 4069 * @size_buf including the terminating null. The output may be trimmed if it 4070 * does not fit into the provided buffer. 4071 */ 4072 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4073 u32 size_buf) 4074 { 4075 int ret; 4076 u32 size_bp = size_buf; 4077 char *bp = buf; 4078 u64 flags = bargs->flags; 4079 char tmp_buf[128] = {'\0'}; 4080 4081 if (!flags) 4082 return; 4083 4084 #define CHECK_APPEND_NOARG(a) \ 4085 do { \ 4086 ret = snprintf(bp, size_bp, (a)); \ 4087 if (ret < 0 || ret >= size_bp) \ 4088 goto out_overflow; \ 4089 size_bp -= ret; \ 4090 bp += ret; \ 4091 } while (0) 4092 4093 #define CHECK_APPEND_1ARG(a, v1) \ 4094 do { \ 4095 ret = snprintf(bp, size_bp, (a), (v1)); \ 4096 if (ret < 0 || ret >= size_bp) \ 4097 goto out_overflow; \ 4098 size_bp -= ret; \ 4099 bp += ret; \ 4100 } while (0) 4101 4102 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4103 do { \ 4104 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4105 if (ret < 0 || ret >= size_bp) \ 4106 goto out_overflow; \ 4107 size_bp -= ret; \ 4108 bp += ret; \ 4109 } while (0) 4110 4111 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4112 CHECK_APPEND_1ARG("convert=%s,", 4113 btrfs_bg_type_to_raid_name(bargs->target)); 4114 4115 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4116 CHECK_APPEND_NOARG("soft,"); 4117 4118 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4119 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4120 sizeof(tmp_buf)); 4121 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4122 } 4123 4124 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4125 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4126 4127 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4128 CHECK_APPEND_2ARG("usage=%u..%u,", 4129 bargs->usage_min, bargs->usage_max); 4130 4131 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4132 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4133 4134 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4135 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4136 bargs->pstart, bargs->pend); 4137 4138 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4139 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4140 bargs->vstart, bargs->vend); 4141 4142 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4143 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4144 4145 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4146 CHECK_APPEND_2ARG("limit=%u..%u,", 4147 bargs->limit_min, bargs->limit_max); 4148 4149 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4150 CHECK_APPEND_2ARG("stripes=%u..%u,", 4151 bargs->stripes_min, bargs->stripes_max); 4152 4153 #undef CHECK_APPEND_2ARG 4154 #undef CHECK_APPEND_1ARG 4155 #undef CHECK_APPEND_NOARG 4156 4157 out_overflow: 4158 4159 if (size_bp < size_buf) 4160 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4161 else 4162 buf[0] = '\0'; 4163 } 4164 4165 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4166 { 4167 u32 size_buf = 1024; 4168 char tmp_buf[192] = {'\0'}; 4169 char *buf; 4170 char *bp; 4171 u32 size_bp = size_buf; 4172 int ret; 4173 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4174 4175 buf = kzalloc(size_buf, GFP_KERNEL); 4176 if (!buf) 4177 return; 4178 4179 bp = buf; 4180 4181 #define CHECK_APPEND_1ARG(a, v1) \ 4182 do { \ 4183 ret = snprintf(bp, size_bp, (a), (v1)); \ 4184 if (ret < 0 || ret >= size_bp) \ 4185 goto out_overflow; \ 4186 size_bp -= ret; \ 4187 bp += ret; \ 4188 } while (0) 4189 4190 if (bctl->flags & BTRFS_BALANCE_FORCE) 4191 CHECK_APPEND_1ARG("%s", "-f "); 4192 4193 if (bctl->flags & BTRFS_BALANCE_DATA) { 4194 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4195 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4196 } 4197 4198 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4199 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4200 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4201 } 4202 4203 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4204 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4205 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4206 } 4207 4208 #undef CHECK_APPEND_1ARG 4209 4210 out_overflow: 4211 4212 if (size_bp < size_buf) 4213 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4214 btrfs_info(fs_info, "balance: %s %s", 4215 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4216 "resume" : "start", buf); 4217 4218 kfree(buf); 4219 } 4220 4221 /* 4222 * Should be called with balance mutexe held 4223 */ 4224 int btrfs_balance(struct btrfs_fs_info *fs_info, 4225 struct btrfs_balance_control *bctl, 4226 struct btrfs_ioctl_balance_args *bargs) 4227 { 4228 u64 meta_target, data_target; 4229 u64 allowed; 4230 int mixed = 0; 4231 int ret; 4232 u64 num_devices; 4233 unsigned seq; 4234 bool reducing_redundancy; 4235 int i; 4236 4237 if (btrfs_fs_closing(fs_info) || 4238 atomic_read(&fs_info->balance_pause_req) || 4239 btrfs_should_cancel_balance(fs_info)) { 4240 ret = -EINVAL; 4241 goto out; 4242 } 4243 4244 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4245 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4246 mixed = 1; 4247 4248 /* 4249 * In case of mixed groups both data and meta should be picked, 4250 * and identical options should be given for both of them. 4251 */ 4252 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4253 if (mixed && (bctl->flags & allowed)) { 4254 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4255 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4256 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4257 btrfs_err(fs_info, 4258 "balance: mixed groups data and metadata options must be the same"); 4259 ret = -EINVAL; 4260 goto out; 4261 } 4262 } 4263 4264 /* 4265 * rw_devices will not change at the moment, device add/delete/replace 4266 * are exclusive 4267 */ 4268 num_devices = fs_info->fs_devices->rw_devices; 4269 4270 /* 4271 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4272 * special bit for it, to make it easier to distinguish. Thus we need 4273 * to set it manually, or balance would refuse the profile. 4274 */ 4275 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4276 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4277 if (num_devices >= btrfs_raid_array[i].devs_min) 4278 allowed |= btrfs_raid_array[i].bg_flag; 4279 4280 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4281 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4282 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4283 ret = -EINVAL; 4284 goto out; 4285 } 4286 4287 /* 4288 * Allow to reduce metadata or system integrity only if force set for 4289 * profiles with redundancy (copies, parity) 4290 */ 4291 allowed = 0; 4292 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4293 if (btrfs_raid_array[i].ncopies >= 2 || 4294 btrfs_raid_array[i].tolerated_failures >= 1) 4295 allowed |= btrfs_raid_array[i].bg_flag; 4296 } 4297 do { 4298 seq = read_seqbegin(&fs_info->profiles_lock); 4299 4300 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4301 (fs_info->avail_system_alloc_bits & allowed) && 4302 !(bctl->sys.target & allowed)) || 4303 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4304 (fs_info->avail_metadata_alloc_bits & allowed) && 4305 !(bctl->meta.target & allowed))) 4306 reducing_redundancy = true; 4307 else 4308 reducing_redundancy = false; 4309 4310 /* if we're not converting, the target field is uninitialized */ 4311 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4312 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4313 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4314 bctl->data.target : fs_info->avail_data_alloc_bits; 4315 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4316 4317 if (reducing_redundancy) { 4318 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4319 btrfs_info(fs_info, 4320 "balance: force reducing metadata redundancy"); 4321 } else { 4322 btrfs_err(fs_info, 4323 "balance: reduces metadata redundancy, use --force if you want this"); 4324 ret = -EINVAL; 4325 goto out; 4326 } 4327 } 4328 4329 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4330 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4331 btrfs_warn(fs_info, 4332 "balance: metadata profile %s has lower redundancy than data profile %s", 4333 btrfs_bg_type_to_raid_name(meta_target), 4334 btrfs_bg_type_to_raid_name(data_target)); 4335 } 4336 4337 ret = insert_balance_item(fs_info, bctl); 4338 if (ret && ret != -EEXIST) 4339 goto out; 4340 4341 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4342 BUG_ON(ret == -EEXIST); 4343 BUG_ON(fs_info->balance_ctl); 4344 spin_lock(&fs_info->balance_lock); 4345 fs_info->balance_ctl = bctl; 4346 spin_unlock(&fs_info->balance_lock); 4347 } else { 4348 BUG_ON(ret != -EEXIST); 4349 spin_lock(&fs_info->balance_lock); 4350 update_balance_args(bctl); 4351 spin_unlock(&fs_info->balance_lock); 4352 } 4353 4354 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4355 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4356 describe_balance_start_or_resume(fs_info); 4357 mutex_unlock(&fs_info->balance_mutex); 4358 4359 ret = __btrfs_balance(fs_info); 4360 4361 mutex_lock(&fs_info->balance_mutex); 4362 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4363 btrfs_info(fs_info, "balance: paused"); 4364 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4365 } 4366 /* 4367 * Balance can be canceled by: 4368 * 4369 * - Regular cancel request 4370 * Then ret == -ECANCELED and balance_cancel_req > 0 4371 * 4372 * - Fatal signal to "btrfs" process 4373 * Either the signal caught by wait_reserve_ticket() and callers 4374 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4375 * got -ECANCELED. 4376 * Either way, in this case balance_cancel_req = 0, and 4377 * ret == -EINTR or ret == -ECANCELED. 4378 * 4379 * So here we only check the return value to catch canceled balance. 4380 */ 4381 else if (ret == -ECANCELED || ret == -EINTR) 4382 btrfs_info(fs_info, "balance: canceled"); 4383 else 4384 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4385 4386 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4387 4388 if (bargs) { 4389 memset(bargs, 0, sizeof(*bargs)); 4390 btrfs_update_ioctl_balance_args(fs_info, bargs); 4391 } 4392 4393 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4394 balance_need_close(fs_info)) { 4395 reset_balance_state(fs_info); 4396 btrfs_exclop_finish(fs_info); 4397 } 4398 4399 wake_up(&fs_info->balance_wait_q); 4400 4401 return ret; 4402 out: 4403 if (bctl->flags & BTRFS_BALANCE_RESUME) 4404 reset_balance_state(fs_info); 4405 else 4406 kfree(bctl); 4407 btrfs_exclop_finish(fs_info); 4408 4409 return ret; 4410 } 4411 4412 static int balance_kthread(void *data) 4413 { 4414 struct btrfs_fs_info *fs_info = data; 4415 int ret = 0; 4416 4417 sb_start_write(fs_info->sb); 4418 mutex_lock(&fs_info->balance_mutex); 4419 if (fs_info->balance_ctl) 4420 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4421 mutex_unlock(&fs_info->balance_mutex); 4422 sb_end_write(fs_info->sb); 4423 4424 return ret; 4425 } 4426 4427 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4428 { 4429 struct task_struct *tsk; 4430 4431 mutex_lock(&fs_info->balance_mutex); 4432 if (!fs_info->balance_ctl) { 4433 mutex_unlock(&fs_info->balance_mutex); 4434 return 0; 4435 } 4436 mutex_unlock(&fs_info->balance_mutex); 4437 4438 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4439 btrfs_info(fs_info, "balance: resume skipped"); 4440 return 0; 4441 } 4442 4443 spin_lock(&fs_info->super_lock); 4444 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4445 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4446 spin_unlock(&fs_info->super_lock); 4447 /* 4448 * A ro->rw remount sequence should continue with the paused balance 4449 * regardless of who pauses it, system or the user as of now, so set 4450 * the resume flag. 4451 */ 4452 spin_lock(&fs_info->balance_lock); 4453 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4454 spin_unlock(&fs_info->balance_lock); 4455 4456 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4457 return PTR_ERR_OR_ZERO(tsk); 4458 } 4459 4460 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4461 { 4462 struct btrfs_balance_control *bctl; 4463 struct btrfs_balance_item *item; 4464 struct btrfs_disk_balance_args disk_bargs; 4465 struct btrfs_path *path; 4466 struct extent_buffer *leaf; 4467 struct btrfs_key key; 4468 int ret; 4469 4470 path = btrfs_alloc_path(); 4471 if (!path) 4472 return -ENOMEM; 4473 4474 key.objectid = BTRFS_BALANCE_OBJECTID; 4475 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4476 key.offset = 0; 4477 4478 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4479 if (ret < 0) 4480 goto out; 4481 if (ret > 0) { /* ret = -ENOENT; */ 4482 ret = 0; 4483 goto out; 4484 } 4485 4486 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4487 if (!bctl) { 4488 ret = -ENOMEM; 4489 goto out; 4490 } 4491 4492 leaf = path->nodes[0]; 4493 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4494 4495 bctl->flags = btrfs_balance_flags(leaf, item); 4496 bctl->flags |= BTRFS_BALANCE_RESUME; 4497 4498 btrfs_balance_data(leaf, item, &disk_bargs); 4499 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4500 btrfs_balance_meta(leaf, item, &disk_bargs); 4501 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4502 btrfs_balance_sys(leaf, item, &disk_bargs); 4503 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4504 4505 /* 4506 * This should never happen, as the paused balance state is recovered 4507 * during mount without any chance of other exclusive ops to collide. 4508 * 4509 * This gives the exclusive op status to balance and keeps in paused 4510 * state until user intervention (cancel or umount). If the ownership 4511 * cannot be assigned, show a message but do not fail. The balance 4512 * is in a paused state and must have fs_info::balance_ctl properly 4513 * set up. 4514 */ 4515 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4516 btrfs_warn(fs_info, 4517 "balance: cannot set exclusive op status, resume manually"); 4518 4519 btrfs_release_path(path); 4520 4521 mutex_lock(&fs_info->balance_mutex); 4522 BUG_ON(fs_info->balance_ctl); 4523 spin_lock(&fs_info->balance_lock); 4524 fs_info->balance_ctl = bctl; 4525 spin_unlock(&fs_info->balance_lock); 4526 mutex_unlock(&fs_info->balance_mutex); 4527 out: 4528 btrfs_free_path(path); 4529 return ret; 4530 } 4531 4532 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4533 { 4534 int ret = 0; 4535 4536 mutex_lock(&fs_info->balance_mutex); 4537 if (!fs_info->balance_ctl) { 4538 mutex_unlock(&fs_info->balance_mutex); 4539 return -ENOTCONN; 4540 } 4541 4542 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4543 atomic_inc(&fs_info->balance_pause_req); 4544 mutex_unlock(&fs_info->balance_mutex); 4545 4546 wait_event(fs_info->balance_wait_q, 4547 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4548 4549 mutex_lock(&fs_info->balance_mutex); 4550 /* we are good with balance_ctl ripped off from under us */ 4551 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4552 atomic_dec(&fs_info->balance_pause_req); 4553 } else { 4554 ret = -ENOTCONN; 4555 } 4556 4557 mutex_unlock(&fs_info->balance_mutex); 4558 return ret; 4559 } 4560 4561 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4562 { 4563 mutex_lock(&fs_info->balance_mutex); 4564 if (!fs_info->balance_ctl) { 4565 mutex_unlock(&fs_info->balance_mutex); 4566 return -ENOTCONN; 4567 } 4568 4569 /* 4570 * A paused balance with the item stored on disk can be resumed at 4571 * mount time if the mount is read-write. Otherwise it's still paused 4572 * and we must not allow cancelling as it deletes the item. 4573 */ 4574 if (sb_rdonly(fs_info->sb)) { 4575 mutex_unlock(&fs_info->balance_mutex); 4576 return -EROFS; 4577 } 4578 4579 atomic_inc(&fs_info->balance_cancel_req); 4580 /* 4581 * if we are running just wait and return, balance item is 4582 * deleted in btrfs_balance in this case 4583 */ 4584 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4585 mutex_unlock(&fs_info->balance_mutex); 4586 wait_event(fs_info->balance_wait_q, 4587 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4588 mutex_lock(&fs_info->balance_mutex); 4589 } else { 4590 mutex_unlock(&fs_info->balance_mutex); 4591 /* 4592 * Lock released to allow other waiters to continue, we'll 4593 * reexamine the status again. 4594 */ 4595 mutex_lock(&fs_info->balance_mutex); 4596 4597 if (fs_info->balance_ctl) { 4598 reset_balance_state(fs_info); 4599 btrfs_exclop_finish(fs_info); 4600 btrfs_info(fs_info, "balance: canceled"); 4601 } 4602 } 4603 4604 BUG_ON(fs_info->balance_ctl || 4605 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4606 atomic_dec(&fs_info->balance_cancel_req); 4607 mutex_unlock(&fs_info->balance_mutex); 4608 return 0; 4609 } 4610 4611 int btrfs_uuid_scan_kthread(void *data) 4612 { 4613 struct btrfs_fs_info *fs_info = data; 4614 struct btrfs_root *root = fs_info->tree_root; 4615 struct btrfs_key key; 4616 struct btrfs_path *path = NULL; 4617 int ret = 0; 4618 struct extent_buffer *eb; 4619 int slot; 4620 struct btrfs_root_item root_item; 4621 u32 item_size; 4622 struct btrfs_trans_handle *trans = NULL; 4623 bool closing = false; 4624 4625 path = btrfs_alloc_path(); 4626 if (!path) { 4627 ret = -ENOMEM; 4628 goto out; 4629 } 4630 4631 key.objectid = 0; 4632 key.type = BTRFS_ROOT_ITEM_KEY; 4633 key.offset = 0; 4634 4635 while (1) { 4636 if (btrfs_fs_closing(fs_info)) { 4637 closing = true; 4638 break; 4639 } 4640 ret = btrfs_search_forward(root, &key, path, 4641 BTRFS_OLDEST_GENERATION); 4642 if (ret) { 4643 if (ret > 0) 4644 ret = 0; 4645 break; 4646 } 4647 4648 if (key.type != BTRFS_ROOT_ITEM_KEY || 4649 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4650 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4651 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4652 goto skip; 4653 4654 eb = path->nodes[0]; 4655 slot = path->slots[0]; 4656 item_size = btrfs_item_size(eb, slot); 4657 if (item_size < sizeof(root_item)) 4658 goto skip; 4659 4660 read_extent_buffer(eb, &root_item, 4661 btrfs_item_ptr_offset(eb, slot), 4662 (int)sizeof(root_item)); 4663 if (btrfs_root_refs(&root_item) == 0) 4664 goto skip; 4665 4666 if (!btrfs_is_empty_uuid(root_item.uuid) || 4667 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4668 if (trans) 4669 goto update_tree; 4670 4671 btrfs_release_path(path); 4672 /* 4673 * 1 - subvol uuid item 4674 * 1 - received_subvol uuid item 4675 */ 4676 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4677 if (IS_ERR(trans)) { 4678 ret = PTR_ERR(trans); 4679 break; 4680 } 4681 continue; 4682 } else { 4683 goto skip; 4684 } 4685 update_tree: 4686 btrfs_release_path(path); 4687 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4688 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4689 BTRFS_UUID_KEY_SUBVOL, 4690 key.objectid); 4691 if (ret < 0) { 4692 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4693 ret); 4694 break; 4695 } 4696 } 4697 4698 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4699 ret = btrfs_uuid_tree_add(trans, 4700 root_item.received_uuid, 4701 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4702 key.objectid); 4703 if (ret < 0) { 4704 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4705 ret); 4706 break; 4707 } 4708 } 4709 4710 skip: 4711 btrfs_release_path(path); 4712 if (trans) { 4713 ret = btrfs_end_transaction(trans); 4714 trans = NULL; 4715 if (ret) 4716 break; 4717 } 4718 4719 if (key.offset < (u64)-1) { 4720 key.offset++; 4721 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4722 key.offset = 0; 4723 key.type = BTRFS_ROOT_ITEM_KEY; 4724 } else if (key.objectid < (u64)-1) { 4725 key.offset = 0; 4726 key.type = BTRFS_ROOT_ITEM_KEY; 4727 key.objectid++; 4728 } else { 4729 break; 4730 } 4731 cond_resched(); 4732 } 4733 4734 out: 4735 btrfs_free_path(path); 4736 if (trans && !IS_ERR(trans)) 4737 btrfs_end_transaction(trans); 4738 if (ret) 4739 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4740 else if (!closing) 4741 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4742 up(&fs_info->uuid_tree_rescan_sem); 4743 return 0; 4744 } 4745 4746 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4747 { 4748 struct btrfs_trans_handle *trans; 4749 struct btrfs_root *tree_root = fs_info->tree_root; 4750 struct btrfs_root *uuid_root; 4751 struct task_struct *task; 4752 int ret; 4753 4754 /* 4755 * 1 - root node 4756 * 1 - root item 4757 */ 4758 trans = btrfs_start_transaction(tree_root, 2); 4759 if (IS_ERR(trans)) 4760 return PTR_ERR(trans); 4761 4762 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4763 if (IS_ERR(uuid_root)) { 4764 ret = PTR_ERR(uuid_root); 4765 btrfs_abort_transaction(trans, ret); 4766 btrfs_end_transaction(trans); 4767 return ret; 4768 } 4769 4770 fs_info->uuid_root = uuid_root; 4771 4772 ret = btrfs_commit_transaction(trans); 4773 if (ret) 4774 return ret; 4775 4776 down(&fs_info->uuid_tree_rescan_sem); 4777 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4778 if (IS_ERR(task)) { 4779 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4780 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4781 up(&fs_info->uuid_tree_rescan_sem); 4782 return PTR_ERR(task); 4783 } 4784 4785 return 0; 4786 } 4787 4788 /* 4789 * shrinking a device means finding all of the device extents past 4790 * the new size, and then following the back refs to the chunks. 4791 * The chunk relocation code actually frees the device extent 4792 */ 4793 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4794 { 4795 struct btrfs_fs_info *fs_info = device->fs_info; 4796 struct btrfs_root *root = fs_info->dev_root; 4797 struct btrfs_trans_handle *trans; 4798 struct btrfs_dev_extent *dev_extent = NULL; 4799 struct btrfs_path *path; 4800 u64 length; 4801 u64 chunk_offset; 4802 int ret; 4803 int slot; 4804 int failed = 0; 4805 bool retried = false; 4806 struct extent_buffer *l; 4807 struct btrfs_key key; 4808 struct btrfs_super_block *super_copy = fs_info->super_copy; 4809 u64 old_total = btrfs_super_total_bytes(super_copy); 4810 u64 old_size = btrfs_device_get_total_bytes(device); 4811 u64 diff; 4812 u64 start; 4813 4814 new_size = round_down(new_size, fs_info->sectorsize); 4815 start = new_size; 4816 diff = round_down(old_size - new_size, fs_info->sectorsize); 4817 4818 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4819 return -EINVAL; 4820 4821 path = btrfs_alloc_path(); 4822 if (!path) 4823 return -ENOMEM; 4824 4825 path->reada = READA_BACK; 4826 4827 trans = btrfs_start_transaction(root, 0); 4828 if (IS_ERR(trans)) { 4829 btrfs_free_path(path); 4830 return PTR_ERR(trans); 4831 } 4832 4833 mutex_lock(&fs_info->chunk_mutex); 4834 4835 btrfs_device_set_total_bytes(device, new_size); 4836 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4837 device->fs_devices->total_rw_bytes -= diff; 4838 atomic64_sub(diff, &fs_info->free_chunk_space); 4839 } 4840 4841 /* 4842 * Once the device's size has been set to the new size, ensure all 4843 * in-memory chunks are synced to disk so that the loop below sees them 4844 * and relocates them accordingly. 4845 */ 4846 if (contains_pending_extent(device, &start, diff)) { 4847 mutex_unlock(&fs_info->chunk_mutex); 4848 ret = btrfs_commit_transaction(trans); 4849 if (ret) 4850 goto done; 4851 } else { 4852 mutex_unlock(&fs_info->chunk_mutex); 4853 btrfs_end_transaction(trans); 4854 } 4855 4856 again: 4857 key.objectid = device->devid; 4858 key.offset = (u64)-1; 4859 key.type = BTRFS_DEV_EXTENT_KEY; 4860 4861 do { 4862 mutex_lock(&fs_info->reclaim_bgs_lock); 4863 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4864 if (ret < 0) { 4865 mutex_unlock(&fs_info->reclaim_bgs_lock); 4866 goto done; 4867 } 4868 4869 ret = btrfs_previous_item(root, path, 0, key.type); 4870 if (ret) { 4871 mutex_unlock(&fs_info->reclaim_bgs_lock); 4872 if (ret < 0) 4873 goto done; 4874 ret = 0; 4875 btrfs_release_path(path); 4876 break; 4877 } 4878 4879 l = path->nodes[0]; 4880 slot = path->slots[0]; 4881 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4882 4883 if (key.objectid != device->devid) { 4884 mutex_unlock(&fs_info->reclaim_bgs_lock); 4885 btrfs_release_path(path); 4886 break; 4887 } 4888 4889 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4890 length = btrfs_dev_extent_length(l, dev_extent); 4891 4892 if (key.offset + length <= new_size) { 4893 mutex_unlock(&fs_info->reclaim_bgs_lock); 4894 btrfs_release_path(path); 4895 break; 4896 } 4897 4898 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4899 btrfs_release_path(path); 4900 4901 /* 4902 * We may be relocating the only data chunk we have, 4903 * which could potentially end up with losing data's 4904 * raid profile, so lets allocate an empty one in 4905 * advance. 4906 */ 4907 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4908 if (ret < 0) { 4909 mutex_unlock(&fs_info->reclaim_bgs_lock); 4910 goto done; 4911 } 4912 4913 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4914 mutex_unlock(&fs_info->reclaim_bgs_lock); 4915 if (ret == -ENOSPC) { 4916 failed++; 4917 } else if (ret) { 4918 if (ret == -ETXTBSY) { 4919 btrfs_warn(fs_info, 4920 "could not shrink block group %llu due to active swapfile", 4921 chunk_offset); 4922 } 4923 goto done; 4924 } 4925 } while (key.offset-- > 0); 4926 4927 if (failed && !retried) { 4928 failed = 0; 4929 retried = true; 4930 goto again; 4931 } else if (failed && retried) { 4932 ret = -ENOSPC; 4933 goto done; 4934 } 4935 4936 /* Shrinking succeeded, else we would be at "done". */ 4937 trans = btrfs_start_transaction(root, 0); 4938 if (IS_ERR(trans)) { 4939 ret = PTR_ERR(trans); 4940 goto done; 4941 } 4942 4943 mutex_lock(&fs_info->chunk_mutex); 4944 /* Clear all state bits beyond the shrunk device size */ 4945 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4946 CHUNK_STATE_MASK); 4947 4948 btrfs_device_set_disk_total_bytes(device, new_size); 4949 if (list_empty(&device->post_commit_list)) 4950 list_add_tail(&device->post_commit_list, 4951 &trans->transaction->dev_update_list); 4952 4953 WARN_ON(diff > old_total); 4954 btrfs_set_super_total_bytes(super_copy, 4955 round_down(old_total - diff, fs_info->sectorsize)); 4956 mutex_unlock(&fs_info->chunk_mutex); 4957 4958 btrfs_reserve_chunk_metadata(trans, false); 4959 /* Now btrfs_update_device() will change the on-disk size. */ 4960 ret = btrfs_update_device(trans, device); 4961 btrfs_trans_release_chunk_metadata(trans); 4962 if (ret < 0) { 4963 btrfs_abort_transaction(trans, ret); 4964 btrfs_end_transaction(trans); 4965 } else { 4966 ret = btrfs_commit_transaction(trans); 4967 } 4968 done: 4969 btrfs_free_path(path); 4970 if (ret) { 4971 mutex_lock(&fs_info->chunk_mutex); 4972 btrfs_device_set_total_bytes(device, old_size); 4973 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4974 device->fs_devices->total_rw_bytes += diff; 4975 atomic64_add(diff, &fs_info->free_chunk_space); 4976 mutex_unlock(&fs_info->chunk_mutex); 4977 } 4978 return ret; 4979 } 4980 4981 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4982 struct btrfs_key *key, 4983 struct btrfs_chunk *chunk, int item_size) 4984 { 4985 struct btrfs_super_block *super_copy = fs_info->super_copy; 4986 struct btrfs_disk_key disk_key; 4987 u32 array_size; 4988 u8 *ptr; 4989 4990 lockdep_assert_held(&fs_info->chunk_mutex); 4991 4992 array_size = btrfs_super_sys_array_size(super_copy); 4993 if (array_size + item_size + sizeof(disk_key) 4994 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 4995 return -EFBIG; 4996 4997 ptr = super_copy->sys_chunk_array + array_size; 4998 btrfs_cpu_key_to_disk(&disk_key, key); 4999 memcpy(ptr, &disk_key, sizeof(disk_key)); 5000 ptr += sizeof(disk_key); 5001 memcpy(ptr, chunk, item_size); 5002 item_size += sizeof(disk_key); 5003 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5004 5005 return 0; 5006 } 5007 5008 /* 5009 * sort the devices in descending order by max_avail, total_avail 5010 */ 5011 static int btrfs_cmp_device_info(const void *a, const void *b) 5012 { 5013 const struct btrfs_device_info *di_a = a; 5014 const struct btrfs_device_info *di_b = b; 5015 5016 if (di_a->max_avail > di_b->max_avail) 5017 return -1; 5018 if (di_a->max_avail < di_b->max_avail) 5019 return 1; 5020 if (di_a->total_avail > di_b->total_avail) 5021 return -1; 5022 if (di_a->total_avail < di_b->total_avail) 5023 return 1; 5024 return 0; 5025 } 5026 5027 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5028 { 5029 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5030 return; 5031 5032 btrfs_set_fs_incompat(info, RAID56); 5033 } 5034 5035 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5036 { 5037 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5038 return; 5039 5040 btrfs_set_fs_incompat(info, RAID1C34); 5041 } 5042 5043 /* 5044 * Structure used internally for btrfs_create_chunk() function. 5045 * Wraps needed parameters. 5046 */ 5047 struct alloc_chunk_ctl { 5048 u64 start; 5049 u64 type; 5050 /* Total number of stripes to allocate */ 5051 int num_stripes; 5052 /* sub_stripes info for map */ 5053 int sub_stripes; 5054 /* Stripes per device */ 5055 int dev_stripes; 5056 /* Maximum number of devices to use */ 5057 int devs_max; 5058 /* Minimum number of devices to use */ 5059 int devs_min; 5060 /* ndevs has to be a multiple of this */ 5061 int devs_increment; 5062 /* Number of copies */ 5063 int ncopies; 5064 /* Number of stripes worth of bytes to store parity information */ 5065 int nparity; 5066 u64 max_stripe_size; 5067 u64 max_chunk_size; 5068 u64 dev_extent_min; 5069 u64 stripe_size; 5070 u64 chunk_size; 5071 int ndevs; 5072 }; 5073 5074 static void init_alloc_chunk_ctl_policy_regular( 5075 struct btrfs_fs_devices *fs_devices, 5076 struct alloc_chunk_ctl *ctl) 5077 { 5078 struct btrfs_space_info *space_info; 5079 5080 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5081 ASSERT(space_info); 5082 5083 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5084 ctl->max_stripe_size = ctl->max_chunk_size; 5085 5086 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5087 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5088 5089 /* We don't want a chunk larger than 10% of writable space */ 5090 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5091 ctl->max_chunk_size); 5092 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5093 } 5094 5095 static void init_alloc_chunk_ctl_policy_zoned( 5096 struct btrfs_fs_devices *fs_devices, 5097 struct alloc_chunk_ctl *ctl) 5098 { 5099 u64 zone_size = fs_devices->fs_info->zone_size; 5100 u64 limit; 5101 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5102 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5103 u64 min_chunk_size = min_data_stripes * zone_size; 5104 u64 type = ctl->type; 5105 5106 ctl->max_stripe_size = zone_size; 5107 if (type & BTRFS_BLOCK_GROUP_DATA) { 5108 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5109 zone_size); 5110 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5111 ctl->max_chunk_size = ctl->max_stripe_size; 5112 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5113 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5114 ctl->devs_max = min_t(int, ctl->devs_max, 5115 BTRFS_MAX_DEVS_SYS_CHUNK); 5116 } else { 5117 BUG(); 5118 } 5119 5120 /* We don't want a chunk larger than 10% of writable space */ 5121 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5122 zone_size), 5123 min_chunk_size); 5124 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5125 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5126 } 5127 5128 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5129 struct alloc_chunk_ctl *ctl) 5130 { 5131 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5132 5133 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5134 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5135 ctl->devs_max = btrfs_raid_array[index].devs_max; 5136 if (!ctl->devs_max) 5137 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5138 ctl->devs_min = btrfs_raid_array[index].devs_min; 5139 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5140 ctl->ncopies = btrfs_raid_array[index].ncopies; 5141 ctl->nparity = btrfs_raid_array[index].nparity; 5142 ctl->ndevs = 0; 5143 5144 switch (fs_devices->chunk_alloc_policy) { 5145 case BTRFS_CHUNK_ALLOC_REGULAR: 5146 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5147 break; 5148 case BTRFS_CHUNK_ALLOC_ZONED: 5149 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5150 break; 5151 default: 5152 BUG(); 5153 } 5154 } 5155 5156 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5157 struct alloc_chunk_ctl *ctl, 5158 struct btrfs_device_info *devices_info) 5159 { 5160 struct btrfs_fs_info *info = fs_devices->fs_info; 5161 struct btrfs_device *device; 5162 u64 total_avail; 5163 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5164 int ret; 5165 int ndevs = 0; 5166 u64 max_avail; 5167 u64 dev_offset; 5168 5169 /* 5170 * in the first pass through the devices list, we gather information 5171 * about the available holes on each device. 5172 */ 5173 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5174 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5175 WARN(1, KERN_ERR 5176 "BTRFS: read-only device in alloc_list\n"); 5177 continue; 5178 } 5179 5180 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5181 &device->dev_state) || 5182 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5183 continue; 5184 5185 if (device->total_bytes > device->bytes_used) 5186 total_avail = device->total_bytes - device->bytes_used; 5187 else 5188 total_avail = 0; 5189 5190 /* If there is no space on this device, skip it. */ 5191 if (total_avail < ctl->dev_extent_min) 5192 continue; 5193 5194 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5195 &max_avail); 5196 if (ret && ret != -ENOSPC) 5197 return ret; 5198 5199 if (ret == 0) 5200 max_avail = dev_extent_want; 5201 5202 if (max_avail < ctl->dev_extent_min) { 5203 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5204 btrfs_debug(info, 5205 "%s: devid %llu has no free space, have=%llu want=%llu", 5206 __func__, device->devid, max_avail, 5207 ctl->dev_extent_min); 5208 continue; 5209 } 5210 5211 if (ndevs == fs_devices->rw_devices) { 5212 WARN(1, "%s: found more than %llu devices\n", 5213 __func__, fs_devices->rw_devices); 5214 break; 5215 } 5216 devices_info[ndevs].dev_offset = dev_offset; 5217 devices_info[ndevs].max_avail = max_avail; 5218 devices_info[ndevs].total_avail = total_avail; 5219 devices_info[ndevs].dev = device; 5220 ++ndevs; 5221 } 5222 ctl->ndevs = ndevs; 5223 5224 /* 5225 * now sort the devices by hole size / available space 5226 */ 5227 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5228 btrfs_cmp_device_info, NULL); 5229 5230 return 0; 5231 } 5232 5233 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5234 struct btrfs_device_info *devices_info) 5235 { 5236 /* Number of stripes that count for block group size */ 5237 int data_stripes; 5238 5239 /* 5240 * The primary goal is to maximize the number of stripes, so use as 5241 * many devices as possible, even if the stripes are not maximum sized. 5242 * 5243 * The DUP profile stores more than one stripe per device, the 5244 * max_avail is the total size so we have to adjust. 5245 */ 5246 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5247 ctl->dev_stripes); 5248 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5249 5250 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5251 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5252 5253 /* 5254 * Use the number of data stripes to figure out how big this chunk is 5255 * really going to be in terms of logical address space, and compare 5256 * that answer with the max chunk size. If it's higher, we try to 5257 * reduce stripe_size. 5258 */ 5259 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5260 /* 5261 * Reduce stripe_size, round it up to a 16MB boundary again and 5262 * then use it, unless it ends up being even bigger than the 5263 * previous value we had already. 5264 */ 5265 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5266 data_stripes), SZ_16M), 5267 ctl->stripe_size); 5268 } 5269 5270 /* Align to BTRFS_STRIPE_LEN */ 5271 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5272 ctl->chunk_size = ctl->stripe_size * data_stripes; 5273 5274 return 0; 5275 } 5276 5277 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5278 struct btrfs_device_info *devices_info) 5279 { 5280 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5281 /* Number of stripes that count for block group size */ 5282 int data_stripes; 5283 5284 /* 5285 * It should hold because: 5286 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5287 */ 5288 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5289 5290 ctl->stripe_size = zone_size; 5291 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5292 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5293 5294 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5295 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5296 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5297 ctl->stripe_size) + ctl->nparity, 5298 ctl->dev_stripes); 5299 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5300 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5301 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5302 } 5303 5304 ctl->chunk_size = ctl->stripe_size * data_stripes; 5305 5306 return 0; 5307 } 5308 5309 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5310 struct alloc_chunk_ctl *ctl, 5311 struct btrfs_device_info *devices_info) 5312 { 5313 struct btrfs_fs_info *info = fs_devices->fs_info; 5314 5315 /* 5316 * Round down to number of usable stripes, devs_increment can be any 5317 * number so we can't use round_down() that requires power of 2, while 5318 * rounddown is safe. 5319 */ 5320 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5321 5322 if (ctl->ndevs < ctl->devs_min) { 5323 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5324 btrfs_debug(info, 5325 "%s: not enough devices with free space: have=%d minimum required=%d", 5326 __func__, ctl->ndevs, ctl->devs_min); 5327 } 5328 return -ENOSPC; 5329 } 5330 5331 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5332 5333 switch (fs_devices->chunk_alloc_policy) { 5334 case BTRFS_CHUNK_ALLOC_REGULAR: 5335 return decide_stripe_size_regular(ctl, devices_info); 5336 case BTRFS_CHUNK_ALLOC_ZONED: 5337 return decide_stripe_size_zoned(ctl, devices_info); 5338 default: 5339 BUG(); 5340 } 5341 } 5342 5343 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5344 struct alloc_chunk_ctl *ctl, 5345 struct btrfs_device_info *devices_info) 5346 { 5347 struct btrfs_fs_info *info = trans->fs_info; 5348 struct map_lookup *map = NULL; 5349 struct extent_map_tree *em_tree; 5350 struct btrfs_block_group *block_group; 5351 struct extent_map *em; 5352 u64 start = ctl->start; 5353 u64 type = ctl->type; 5354 int ret; 5355 int i; 5356 int j; 5357 5358 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5359 if (!map) 5360 return ERR_PTR(-ENOMEM); 5361 map->num_stripes = ctl->num_stripes; 5362 5363 for (i = 0; i < ctl->ndevs; ++i) { 5364 for (j = 0; j < ctl->dev_stripes; ++j) { 5365 int s = i * ctl->dev_stripes + j; 5366 map->stripes[s].dev = devices_info[i].dev; 5367 map->stripes[s].physical = devices_info[i].dev_offset + 5368 j * ctl->stripe_size; 5369 } 5370 } 5371 map->stripe_len = BTRFS_STRIPE_LEN; 5372 map->io_align = BTRFS_STRIPE_LEN; 5373 map->io_width = BTRFS_STRIPE_LEN; 5374 map->type = type; 5375 map->sub_stripes = ctl->sub_stripes; 5376 5377 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5378 5379 em = alloc_extent_map(); 5380 if (!em) { 5381 kfree(map); 5382 return ERR_PTR(-ENOMEM); 5383 } 5384 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5385 em->map_lookup = map; 5386 em->start = start; 5387 em->len = ctl->chunk_size; 5388 em->block_start = 0; 5389 em->block_len = em->len; 5390 em->orig_block_len = ctl->stripe_size; 5391 5392 em_tree = &info->mapping_tree; 5393 write_lock(&em_tree->lock); 5394 ret = add_extent_mapping(em_tree, em, 0); 5395 if (ret) { 5396 write_unlock(&em_tree->lock); 5397 free_extent_map(em); 5398 return ERR_PTR(ret); 5399 } 5400 write_unlock(&em_tree->lock); 5401 5402 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5403 if (IS_ERR(block_group)) 5404 goto error_del_extent; 5405 5406 for (i = 0; i < map->num_stripes; i++) { 5407 struct btrfs_device *dev = map->stripes[i].dev; 5408 5409 btrfs_device_set_bytes_used(dev, 5410 dev->bytes_used + ctl->stripe_size); 5411 if (list_empty(&dev->post_commit_list)) 5412 list_add_tail(&dev->post_commit_list, 5413 &trans->transaction->dev_update_list); 5414 } 5415 5416 atomic64_sub(ctl->stripe_size * map->num_stripes, 5417 &info->free_chunk_space); 5418 5419 free_extent_map(em); 5420 check_raid56_incompat_flag(info, type); 5421 check_raid1c34_incompat_flag(info, type); 5422 5423 return block_group; 5424 5425 error_del_extent: 5426 write_lock(&em_tree->lock); 5427 remove_extent_mapping(em_tree, em); 5428 write_unlock(&em_tree->lock); 5429 5430 /* One for our allocation */ 5431 free_extent_map(em); 5432 /* One for the tree reference */ 5433 free_extent_map(em); 5434 5435 return block_group; 5436 } 5437 5438 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5439 u64 type) 5440 { 5441 struct btrfs_fs_info *info = trans->fs_info; 5442 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5443 struct btrfs_device_info *devices_info = NULL; 5444 struct alloc_chunk_ctl ctl; 5445 struct btrfs_block_group *block_group; 5446 int ret; 5447 5448 lockdep_assert_held(&info->chunk_mutex); 5449 5450 if (!alloc_profile_is_valid(type, 0)) { 5451 ASSERT(0); 5452 return ERR_PTR(-EINVAL); 5453 } 5454 5455 if (list_empty(&fs_devices->alloc_list)) { 5456 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5457 btrfs_debug(info, "%s: no writable device", __func__); 5458 return ERR_PTR(-ENOSPC); 5459 } 5460 5461 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5462 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5463 ASSERT(0); 5464 return ERR_PTR(-EINVAL); 5465 } 5466 5467 ctl.start = find_next_chunk(info); 5468 ctl.type = type; 5469 init_alloc_chunk_ctl(fs_devices, &ctl); 5470 5471 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5472 GFP_NOFS); 5473 if (!devices_info) 5474 return ERR_PTR(-ENOMEM); 5475 5476 ret = gather_device_info(fs_devices, &ctl, devices_info); 5477 if (ret < 0) { 5478 block_group = ERR_PTR(ret); 5479 goto out; 5480 } 5481 5482 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5483 if (ret < 0) { 5484 block_group = ERR_PTR(ret); 5485 goto out; 5486 } 5487 5488 block_group = create_chunk(trans, &ctl, devices_info); 5489 5490 out: 5491 kfree(devices_info); 5492 return block_group; 5493 } 5494 5495 /* 5496 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5497 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5498 * chunks. 5499 * 5500 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5501 * phases. 5502 */ 5503 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5504 struct btrfs_block_group *bg) 5505 { 5506 struct btrfs_fs_info *fs_info = trans->fs_info; 5507 struct btrfs_root *chunk_root = fs_info->chunk_root; 5508 struct btrfs_key key; 5509 struct btrfs_chunk *chunk; 5510 struct btrfs_stripe *stripe; 5511 struct extent_map *em; 5512 struct map_lookup *map; 5513 size_t item_size; 5514 int i; 5515 int ret; 5516 5517 /* 5518 * We take the chunk_mutex for 2 reasons: 5519 * 5520 * 1) Updates and insertions in the chunk btree must be done while holding 5521 * the chunk_mutex, as well as updating the system chunk array in the 5522 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5523 * details; 5524 * 5525 * 2) To prevent races with the final phase of a device replace operation 5526 * that replaces the device object associated with the map's stripes, 5527 * because the device object's id can change at any time during that 5528 * final phase of the device replace operation 5529 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5530 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5531 * which would cause a failure when updating the device item, which does 5532 * not exists, or persisting a stripe of the chunk item with such ID. 5533 * Here we can't use the device_list_mutex because our caller already 5534 * has locked the chunk_mutex, and the final phase of device replace 5535 * acquires both mutexes - first the device_list_mutex and then the 5536 * chunk_mutex. Using any of those two mutexes protects us from a 5537 * concurrent device replace. 5538 */ 5539 lockdep_assert_held(&fs_info->chunk_mutex); 5540 5541 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5542 if (IS_ERR(em)) { 5543 ret = PTR_ERR(em); 5544 btrfs_abort_transaction(trans, ret); 5545 return ret; 5546 } 5547 5548 map = em->map_lookup; 5549 item_size = btrfs_chunk_item_size(map->num_stripes); 5550 5551 chunk = kzalloc(item_size, GFP_NOFS); 5552 if (!chunk) { 5553 ret = -ENOMEM; 5554 btrfs_abort_transaction(trans, ret); 5555 goto out; 5556 } 5557 5558 for (i = 0; i < map->num_stripes; i++) { 5559 struct btrfs_device *device = map->stripes[i].dev; 5560 5561 ret = btrfs_update_device(trans, device); 5562 if (ret) 5563 goto out; 5564 } 5565 5566 stripe = &chunk->stripe; 5567 for (i = 0; i < map->num_stripes; i++) { 5568 struct btrfs_device *device = map->stripes[i].dev; 5569 const u64 dev_offset = map->stripes[i].physical; 5570 5571 btrfs_set_stack_stripe_devid(stripe, device->devid); 5572 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5573 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5574 stripe++; 5575 } 5576 5577 btrfs_set_stack_chunk_length(chunk, bg->length); 5578 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5579 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5580 btrfs_set_stack_chunk_type(chunk, map->type); 5581 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5582 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5583 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5584 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5585 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5586 5587 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5588 key.type = BTRFS_CHUNK_ITEM_KEY; 5589 key.offset = bg->start; 5590 5591 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5592 if (ret) 5593 goto out; 5594 5595 bg->chunk_item_inserted = 1; 5596 5597 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5598 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5599 if (ret) 5600 goto out; 5601 } 5602 5603 out: 5604 kfree(chunk); 5605 free_extent_map(em); 5606 return ret; 5607 } 5608 5609 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5610 { 5611 struct btrfs_fs_info *fs_info = trans->fs_info; 5612 u64 alloc_profile; 5613 struct btrfs_block_group *meta_bg; 5614 struct btrfs_block_group *sys_bg; 5615 5616 /* 5617 * When adding a new device for sprouting, the seed device is read-only 5618 * so we must first allocate a metadata and a system chunk. But before 5619 * adding the block group items to the extent, device and chunk btrees, 5620 * we must first: 5621 * 5622 * 1) Create both chunks without doing any changes to the btrees, as 5623 * otherwise we would get -ENOSPC since the block groups from the 5624 * seed device are read-only; 5625 * 5626 * 2) Add the device item for the new sprout device - finishing the setup 5627 * of a new block group requires updating the device item in the chunk 5628 * btree, so it must exist when we attempt to do it. The previous step 5629 * ensures this does not fail with -ENOSPC. 5630 * 5631 * After that we can add the block group items to their btrees: 5632 * update existing device item in the chunk btree, add a new block group 5633 * item to the extent btree, add a new chunk item to the chunk btree and 5634 * finally add the new device extent items to the devices btree. 5635 */ 5636 5637 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5638 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5639 if (IS_ERR(meta_bg)) 5640 return PTR_ERR(meta_bg); 5641 5642 alloc_profile = btrfs_system_alloc_profile(fs_info); 5643 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5644 if (IS_ERR(sys_bg)) 5645 return PTR_ERR(sys_bg); 5646 5647 return 0; 5648 } 5649 5650 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5651 { 5652 const int index = btrfs_bg_flags_to_raid_index(map->type); 5653 5654 return btrfs_raid_array[index].tolerated_failures; 5655 } 5656 5657 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5658 { 5659 struct extent_map *em; 5660 struct map_lookup *map; 5661 int miss_ndevs = 0; 5662 int i; 5663 bool ret = true; 5664 5665 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5666 if (IS_ERR(em)) 5667 return false; 5668 5669 map = em->map_lookup; 5670 for (i = 0; i < map->num_stripes; i++) { 5671 if (test_bit(BTRFS_DEV_STATE_MISSING, 5672 &map->stripes[i].dev->dev_state)) { 5673 miss_ndevs++; 5674 continue; 5675 } 5676 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5677 &map->stripes[i].dev->dev_state)) { 5678 ret = false; 5679 goto end; 5680 } 5681 } 5682 5683 /* 5684 * If the number of missing devices is larger than max errors, we can 5685 * not write the data into that chunk successfully. 5686 */ 5687 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5688 ret = false; 5689 end: 5690 free_extent_map(em); 5691 return ret; 5692 } 5693 5694 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5695 { 5696 struct extent_map *em; 5697 5698 while (1) { 5699 write_lock(&tree->lock); 5700 em = lookup_extent_mapping(tree, 0, (u64)-1); 5701 if (em) 5702 remove_extent_mapping(tree, em); 5703 write_unlock(&tree->lock); 5704 if (!em) 5705 break; 5706 /* once for us */ 5707 free_extent_map(em); 5708 /* once for the tree */ 5709 free_extent_map(em); 5710 } 5711 } 5712 5713 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5714 { 5715 struct extent_map *em; 5716 struct map_lookup *map; 5717 enum btrfs_raid_types index; 5718 int ret = 1; 5719 5720 em = btrfs_get_chunk_map(fs_info, logical, len); 5721 if (IS_ERR(em)) 5722 /* 5723 * We could return errors for these cases, but that could get 5724 * ugly and we'd probably do the same thing which is just not do 5725 * anything else and exit, so return 1 so the callers don't try 5726 * to use other copies. 5727 */ 5728 return 1; 5729 5730 map = em->map_lookup; 5731 index = btrfs_bg_flags_to_raid_index(map->type); 5732 5733 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5734 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5735 ret = btrfs_raid_array[index].ncopies; 5736 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5737 ret = 2; 5738 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5739 /* 5740 * There could be two corrupted data stripes, we need 5741 * to loop retry in order to rebuild the correct data. 5742 * 5743 * Fail a stripe at a time on every retry except the 5744 * stripe under reconstruction. 5745 */ 5746 ret = map->num_stripes; 5747 free_extent_map(em); 5748 5749 down_read(&fs_info->dev_replace.rwsem); 5750 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5751 fs_info->dev_replace.tgtdev) 5752 ret++; 5753 up_read(&fs_info->dev_replace.rwsem); 5754 5755 return ret; 5756 } 5757 5758 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5759 u64 logical) 5760 { 5761 struct extent_map *em; 5762 struct map_lookup *map; 5763 unsigned long len = fs_info->sectorsize; 5764 5765 if (!btrfs_fs_incompat(fs_info, RAID56)) 5766 return len; 5767 5768 em = btrfs_get_chunk_map(fs_info, logical, len); 5769 5770 if (!WARN_ON(IS_ERR(em))) { 5771 map = em->map_lookup; 5772 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5773 len = map->stripe_len * nr_data_stripes(map); 5774 free_extent_map(em); 5775 } 5776 return len; 5777 } 5778 5779 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5780 { 5781 struct extent_map *em; 5782 struct map_lookup *map; 5783 int ret = 0; 5784 5785 if (!btrfs_fs_incompat(fs_info, RAID56)) 5786 return 0; 5787 5788 em = btrfs_get_chunk_map(fs_info, logical, len); 5789 5790 if(!WARN_ON(IS_ERR(em))) { 5791 map = em->map_lookup; 5792 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5793 ret = 1; 5794 free_extent_map(em); 5795 } 5796 return ret; 5797 } 5798 5799 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5800 struct map_lookup *map, int first, 5801 int dev_replace_is_ongoing) 5802 { 5803 int i; 5804 int num_stripes; 5805 int preferred_mirror; 5806 int tolerance; 5807 struct btrfs_device *srcdev; 5808 5809 ASSERT((map->type & 5810 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5811 5812 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5813 num_stripes = map->sub_stripes; 5814 else 5815 num_stripes = map->num_stripes; 5816 5817 switch (fs_info->fs_devices->read_policy) { 5818 default: 5819 /* Shouldn't happen, just warn and use pid instead of failing */ 5820 btrfs_warn_rl(fs_info, 5821 "unknown read_policy type %u, reset to pid", 5822 fs_info->fs_devices->read_policy); 5823 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5824 fallthrough; 5825 case BTRFS_READ_POLICY_PID: 5826 preferred_mirror = first + (current->pid % num_stripes); 5827 break; 5828 } 5829 5830 if (dev_replace_is_ongoing && 5831 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5832 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5833 srcdev = fs_info->dev_replace.srcdev; 5834 else 5835 srcdev = NULL; 5836 5837 /* 5838 * try to avoid the drive that is the source drive for a 5839 * dev-replace procedure, only choose it if no other non-missing 5840 * mirror is available 5841 */ 5842 for (tolerance = 0; tolerance < 2; tolerance++) { 5843 if (map->stripes[preferred_mirror].dev->bdev && 5844 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5845 return preferred_mirror; 5846 for (i = first; i < first + num_stripes; i++) { 5847 if (map->stripes[i].dev->bdev && 5848 (tolerance || map->stripes[i].dev != srcdev)) 5849 return i; 5850 } 5851 } 5852 5853 /* we couldn't find one that doesn't fail. Just return something 5854 * and the io error handling code will clean up eventually 5855 */ 5856 return preferred_mirror; 5857 } 5858 5859 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5860 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5861 { 5862 int i; 5863 int again = 1; 5864 5865 while (again) { 5866 again = 0; 5867 for (i = 0; i < num_stripes - 1; i++) { 5868 /* Swap if parity is on a smaller index */ 5869 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5870 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5871 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5872 again = 1; 5873 } 5874 } 5875 } 5876 } 5877 5878 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5879 int total_stripes, 5880 int real_stripes) 5881 { 5882 struct btrfs_io_context *bioc = kzalloc( 5883 /* The size of btrfs_io_context */ 5884 sizeof(struct btrfs_io_context) + 5885 /* Plus the variable array for the stripes */ 5886 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5887 /* Plus the variable array for the tgt dev */ 5888 sizeof(int) * (real_stripes) + 5889 /* 5890 * Plus the raid_map, which includes both the tgt dev 5891 * and the stripes. 5892 */ 5893 sizeof(u64) * (total_stripes), 5894 GFP_NOFS|__GFP_NOFAIL); 5895 5896 atomic_set(&bioc->error, 0); 5897 refcount_set(&bioc->refs, 1); 5898 5899 bioc->fs_info = fs_info; 5900 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5901 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5902 5903 return bioc; 5904 } 5905 5906 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5907 { 5908 WARN_ON(!refcount_read(&bioc->refs)); 5909 refcount_inc(&bioc->refs); 5910 } 5911 5912 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5913 { 5914 if (!bioc) 5915 return; 5916 if (refcount_dec_and_test(&bioc->refs)) 5917 kfree(bioc); 5918 } 5919 5920 /* 5921 * Please note that, discard won't be sent to target device of device 5922 * replace. 5923 */ 5924 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 5925 u64 logical, u64 *length_ret, 5926 u32 *num_stripes) 5927 { 5928 struct extent_map *em; 5929 struct map_lookup *map; 5930 struct btrfs_discard_stripe *stripes; 5931 u64 length = *length_ret; 5932 u64 offset; 5933 u64 stripe_nr; 5934 u64 stripe_nr_end; 5935 u64 stripe_end_offset; 5936 u64 stripe_cnt; 5937 u64 stripe_len; 5938 u64 stripe_offset; 5939 u32 stripe_index; 5940 u32 factor = 0; 5941 u32 sub_stripes = 0; 5942 u64 stripes_per_dev = 0; 5943 u32 remaining_stripes = 0; 5944 u32 last_stripe = 0; 5945 int ret; 5946 int i; 5947 5948 em = btrfs_get_chunk_map(fs_info, logical, length); 5949 if (IS_ERR(em)) 5950 return ERR_CAST(em); 5951 5952 map = em->map_lookup; 5953 5954 /* we don't discard raid56 yet */ 5955 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5956 ret = -EOPNOTSUPP; 5957 goto out_free_map; 5958 } 5959 5960 offset = logical - em->start; 5961 length = min_t(u64, em->start + em->len - logical, length); 5962 *length_ret = length; 5963 5964 stripe_len = map->stripe_len; 5965 /* 5966 * stripe_nr counts the total number of stripes we have to stride 5967 * to get to this block 5968 */ 5969 stripe_nr = div64_u64(offset, stripe_len); 5970 5971 /* stripe_offset is the offset of this block in its stripe */ 5972 stripe_offset = offset - stripe_nr * stripe_len; 5973 5974 stripe_nr_end = round_up(offset + length, map->stripe_len); 5975 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5976 stripe_cnt = stripe_nr_end - stripe_nr; 5977 stripe_end_offset = stripe_nr_end * map->stripe_len - 5978 (offset + length); 5979 /* 5980 * after this, stripe_nr is the number of stripes on this 5981 * device we have to walk to find the data, and stripe_index is 5982 * the number of our device in the stripe array 5983 */ 5984 *num_stripes = 1; 5985 stripe_index = 0; 5986 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5987 BTRFS_BLOCK_GROUP_RAID10)) { 5988 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5989 sub_stripes = 1; 5990 else 5991 sub_stripes = map->sub_stripes; 5992 5993 factor = map->num_stripes / sub_stripes; 5994 *num_stripes = min_t(u64, map->num_stripes, 5995 sub_stripes * stripe_cnt); 5996 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5997 stripe_index *= sub_stripes; 5998 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5999 &remaining_stripes); 6000 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6001 last_stripe *= sub_stripes; 6002 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6003 BTRFS_BLOCK_GROUP_DUP)) { 6004 *num_stripes = map->num_stripes; 6005 } else { 6006 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6007 &stripe_index); 6008 } 6009 6010 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6011 if (!stripes) { 6012 ret = -ENOMEM; 6013 goto out_free_map; 6014 } 6015 6016 for (i = 0; i < *num_stripes; i++) { 6017 stripes[i].physical = 6018 map->stripes[stripe_index].physical + 6019 stripe_offset + stripe_nr * map->stripe_len; 6020 stripes[i].dev = map->stripes[stripe_index].dev; 6021 6022 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6023 BTRFS_BLOCK_GROUP_RAID10)) { 6024 stripes[i].length = stripes_per_dev * map->stripe_len; 6025 6026 if (i / sub_stripes < remaining_stripes) 6027 stripes[i].length += map->stripe_len; 6028 6029 /* 6030 * Special for the first stripe and 6031 * the last stripe: 6032 * 6033 * |-------|...|-------| 6034 * |----------| 6035 * off end_off 6036 */ 6037 if (i < sub_stripes) 6038 stripes[i].length -= stripe_offset; 6039 6040 if (stripe_index >= last_stripe && 6041 stripe_index <= (last_stripe + 6042 sub_stripes - 1)) 6043 stripes[i].length -= stripe_end_offset; 6044 6045 if (i == sub_stripes - 1) 6046 stripe_offset = 0; 6047 } else { 6048 stripes[i].length = length; 6049 } 6050 6051 stripe_index++; 6052 if (stripe_index == map->num_stripes) { 6053 stripe_index = 0; 6054 stripe_nr++; 6055 } 6056 } 6057 6058 free_extent_map(em); 6059 return stripes; 6060 out_free_map: 6061 free_extent_map(em); 6062 return ERR_PTR(ret); 6063 } 6064 6065 /* 6066 * In dev-replace case, for repair case (that's the only case where the mirror 6067 * is selected explicitly when calling btrfs_map_block), blocks left of the 6068 * left cursor can also be read from the target drive. 6069 * 6070 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6071 * array of stripes. 6072 * For READ, it also needs to be supported using the same mirror number. 6073 * 6074 * If the requested block is not left of the left cursor, EIO is returned. This 6075 * can happen because btrfs_num_copies() returns one more in the dev-replace 6076 * case. 6077 */ 6078 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6079 u64 logical, u64 length, 6080 u64 srcdev_devid, int *mirror_num, 6081 u64 *physical) 6082 { 6083 struct btrfs_io_context *bioc = NULL; 6084 int num_stripes; 6085 int index_srcdev = 0; 6086 int found = 0; 6087 u64 physical_of_found = 0; 6088 int i; 6089 int ret = 0; 6090 6091 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6092 logical, &length, &bioc, 0, 0); 6093 if (ret) { 6094 ASSERT(bioc == NULL); 6095 return ret; 6096 } 6097 6098 num_stripes = bioc->num_stripes; 6099 if (*mirror_num > num_stripes) { 6100 /* 6101 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6102 * that means that the requested area is not left of the left 6103 * cursor 6104 */ 6105 btrfs_put_bioc(bioc); 6106 return -EIO; 6107 } 6108 6109 /* 6110 * process the rest of the function using the mirror_num of the source 6111 * drive. Therefore look it up first. At the end, patch the device 6112 * pointer to the one of the target drive. 6113 */ 6114 for (i = 0; i < num_stripes; i++) { 6115 if (bioc->stripes[i].dev->devid != srcdev_devid) 6116 continue; 6117 6118 /* 6119 * In case of DUP, in order to keep it simple, only add the 6120 * mirror with the lowest physical address 6121 */ 6122 if (found && 6123 physical_of_found <= bioc->stripes[i].physical) 6124 continue; 6125 6126 index_srcdev = i; 6127 found = 1; 6128 physical_of_found = bioc->stripes[i].physical; 6129 } 6130 6131 btrfs_put_bioc(bioc); 6132 6133 ASSERT(found); 6134 if (!found) 6135 return -EIO; 6136 6137 *mirror_num = index_srcdev + 1; 6138 *physical = physical_of_found; 6139 return ret; 6140 } 6141 6142 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6143 { 6144 struct btrfs_block_group *cache; 6145 bool ret; 6146 6147 /* Non zoned filesystem does not use "to_copy" flag */ 6148 if (!btrfs_is_zoned(fs_info)) 6149 return false; 6150 6151 cache = btrfs_lookup_block_group(fs_info, logical); 6152 6153 spin_lock(&cache->lock); 6154 ret = cache->to_copy; 6155 spin_unlock(&cache->lock); 6156 6157 btrfs_put_block_group(cache); 6158 return ret; 6159 } 6160 6161 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6162 struct btrfs_io_context **bioc_ret, 6163 struct btrfs_dev_replace *dev_replace, 6164 u64 logical, 6165 int *num_stripes_ret, int *max_errors_ret) 6166 { 6167 struct btrfs_io_context *bioc = *bioc_ret; 6168 u64 srcdev_devid = dev_replace->srcdev->devid; 6169 int tgtdev_indexes = 0; 6170 int num_stripes = *num_stripes_ret; 6171 int max_errors = *max_errors_ret; 6172 int i; 6173 6174 if (op == BTRFS_MAP_WRITE) { 6175 int index_where_to_add; 6176 6177 /* 6178 * A block group which have "to_copy" set will eventually 6179 * copied by dev-replace process. We can avoid cloning IO here. 6180 */ 6181 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6182 return; 6183 6184 /* 6185 * duplicate the write operations while the dev replace 6186 * procedure is running. Since the copying of the old disk to 6187 * the new disk takes place at run time while the filesystem is 6188 * mounted writable, the regular write operations to the old 6189 * disk have to be duplicated to go to the new disk as well. 6190 * 6191 * Note that device->missing is handled by the caller, and that 6192 * the write to the old disk is already set up in the stripes 6193 * array. 6194 */ 6195 index_where_to_add = num_stripes; 6196 for (i = 0; i < num_stripes; i++) { 6197 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6198 /* write to new disk, too */ 6199 struct btrfs_io_stripe *new = 6200 bioc->stripes + index_where_to_add; 6201 struct btrfs_io_stripe *old = 6202 bioc->stripes + i; 6203 6204 new->physical = old->physical; 6205 new->dev = dev_replace->tgtdev; 6206 bioc->tgtdev_map[i] = index_where_to_add; 6207 index_where_to_add++; 6208 max_errors++; 6209 tgtdev_indexes++; 6210 } 6211 } 6212 num_stripes = index_where_to_add; 6213 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6214 int index_srcdev = 0; 6215 int found = 0; 6216 u64 physical_of_found = 0; 6217 6218 /* 6219 * During the dev-replace procedure, the target drive can also 6220 * be used to read data in case it is needed to repair a corrupt 6221 * block elsewhere. This is possible if the requested area is 6222 * left of the left cursor. In this area, the target drive is a 6223 * full copy of the source drive. 6224 */ 6225 for (i = 0; i < num_stripes; i++) { 6226 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6227 /* 6228 * In case of DUP, in order to keep it simple, 6229 * only add the mirror with the lowest physical 6230 * address 6231 */ 6232 if (found && 6233 physical_of_found <= bioc->stripes[i].physical) 6234 continue; 6235 index_srcdev = i; 6236 found = 1; 6237 physical_of_found = bioc->stripes[i].physical; 6238 } 6239 } 6240 if (found) { 6241 struct btrfs_io_stripe *tgtdev_stripe = 6242 bioc->stripes + num_stripes; 6243 6244 tgtdev_stripe->physical = physical_of_found; 6245 tgtdev_stripe->dev = dev_replace->tgtdev; 6246 bioc->tgtdev_map[index_srcdev] = num_stripes; 6247 6248 tgtdev_indexes++; 6249 num_stripes++; 6250 } 6251 } 6252 6253 *num_stripes_ret = num_stripes; 6254 *max_errors_ret = max_errors; 6255 bioc->num_tgtdevs = tgtdev_indexes; 6256 *bioc_ret = bioc; 6257 } 6258 6259 static bool need_full_stripe(enum btrfs_map_op op) 6260 { 6261 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6262 } 6263 6264 /* 6265 * Calculate the geometry of a particular (address, len) tuple. This 6266 * information is used to calculate how big a particular bio can get before it 6267 * straddles a stripe. 6268 * 6269 * @fs_info: the filesystem 6270 * @em: mapping containing the logical extent 6271 * @op: type of operation - write or read 6272 * @logical: address that we want to figure out the geometry of 6273 * @io_geom: pointer used to return values 6274 * 6275 * Returns < 0 in case a chunk for the given logical address cannot be found, 6276 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6277 */ 6278 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6279 enum btrfs_map_op op, u64 logical, 6280 struct btrfs_io_geometry *io_geom) 6281 { 6282 struct map_lookup *map; 6283 u64 len; 6284 u64 offset; 6285 u64 stripe_offset; 6286 u64 stripe_nr; 6287 u32 stripe_len; 6288 u64 raid56_full_stripe_start = (u64)-1; 6289 int data_stripes; 6290 6291 ASSERT(op != BTRFS_MAP_DISCARD); 6292 6293 map = em->map_lookup; 6294 /* Offset of this logical address in the chunk */ 6295 offset = logical - em->start; 6296 /* Len of a stripe in a chunk */ 6297 stripe_len = map->stripe_len; 6298 /* 6299 * Stripe_nr is where this block falls in 6300 * stripe_offset is the offset of this block in its stripe. 6301 */ 6302 stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset); 6303 ASSERT(stripe_offset < U32_MAX); 6304 6305 data_stripes = nr_data_stripes(map); 6306 6307 /* Only stripe based profiles needs to check against stripe length. */ 6308 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6309 u64 max_len = stripe_len - stripe_offset; 6310 6311 /* 6312 * In case of raid56, we need to know the stripe aligned start 6313 */ 6314 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6315 unsigned long full_stripe_len = stripe_len * data_stripes; 6316 raid56_full_stripe_start = offset; 6317 6318 /* 6319 * Allow a write of a full stripe, but make sure we 6320 * don't allow straddling of stripes 6321 */ 6322 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6323 full_stripe_len); 6324 raid56_full_stripe_start *= full_stripe_len; 6325 6326 /* 6327 * For writes to RAID[56], allow a full stripeset across 6328 * all disks. For other RAID types and for RAID[56] 6329 * reads, just allow a single stripe (on a single disk). 6330 */ 6331 if (op == BTRFS_MAP_WRITE) { 6332 max_len = stripe_len * data_stripes - 6333 (offset - raid56_full_stripe_start); 6334 } 6335 } 6336 len = min_t(u64, em->len - offset, max_len); 6337 } else { 6338 len = em->len - offset; 6339 } 6340 6341 io_geom->len = len; 6342 io_geom->offset = offset; 6343 io_geom->stripe_len = stripe_len; 6344 io_geom->stripe_nr = stripe_nr; 6345 io_geom->stripe_offset = stripe_offset; 6346 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6347 6348 return 0; 6349 } 6350 6351 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6352 enum btrfs_map_op op, 6353 u64 logical, u64 *length, 6354 struct btrfs_io_context **bioc_ret, 6355 int mirror_num, int need_raid_map) 6356 { 6357 struct extent_map *em; 6358 struct map_lookup *map; 6359 u64 stripe_offset; 6360 u64 stripe_nr; 6361 u64 stripe_len; 6362 u32 stripe_index; 6363 int data_stripes; 6364 int i; 6365 int ret = 0; 6366 int num_stripes; 6367 int max_errors = 0; 6368 int tgtdev_indexes = 0; 6369 struct btrfs_io_context *bioc = NULL; 6370 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6371 int dev_replace_is_ongoing = 0; 6372 int num_alloc_stripes; 6373 int patch_the_first_stripe_for_dev_replace = 0; 6374 u64 physical_to_patch_in_first_stripe = 0; 6375 u64 raid56_full_stripe_start = (u64)-1; 6376 struct btrfs_io_geometry geom; 6377 6378 ASSERT(bioc_ret); 6379 ASSERT(op != BTRFS_MAP_DISCARD); 6380 6381 em = btrfs_get_chunk_map(fs_info, logical, *length); 6382 ASSERT(!IS_ERR(em)); 6383 6384 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6385 if (ret < 0) 6386 return ret; 6387 6388 map = em->map_lookup; 6389 6390 *length = geom.len; 6391 stripe_len = geom.stripe_len; 6392 stripe_nr = geom.stripe_nr; 6393 stripe_offset = geom.stripe_offset; 6394 raid56_full_stripe_start = geom.raid56_stripe_offset; 6395 data_stripes = nr_data_stripes(map); 6396 6397 down_read(&dev_replace->rwsem); 6398 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6399 /* 6400 * Hold the semaphore for read during the whole operation, write is 6401 * requested at commit time but must wait. 6402 */ 6403 if (!dev_replace_is_ongoing) 6404 up_read(&dev_replace->rwsem); 6405 6406 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6407 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6408 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6409 dev_replace->srcdev->devid, 6410 &mirror_num, 6411 &physical_to_patch_in_first_stripe); 6412 if (ret) 6413 goto out; 6414 else 6415 patch_the_first_stripe_for_dev_replace = 1; 6416 } else if (mirror_num > map->num_stripes) { 6417 mirror_num = 0; 6418 } 6419 6420 num_stripes = 1; 6421 stripe_index = 0; 6422 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6423 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6424 &stripe_index); 6425 if (!need_full_stripe(op)) 6426 mirror_num = 1; 6427 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6428 if (need_full_stripe(op)) 6429 num_stripes = map->num_stripes; 6430 else if (mirror_num) 6431 stripe_index = mirror_num - 1; 6432 else { 6433 stripe_index = find_live_mirror(fs_info, map, 0, 6434 dev_replace_is_ongoing); 6435 mirror_num = stripe_index + 1; 6436 } 6437 6438 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6439 if (need_full_stripe(op)) { 6440 num_stripes = map->num_stripes; 6441 } else if (mirror_num) { 6442 stripe_index = mirror_num - 1; 6443 } else { 6444 mirror_num = 1; 6445 } 6446 6447 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6448 u32 factor = map->num_stripes / map->sub_stripes; 6449 6450 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6451 stripe_index *= map->sub_stripes; 6452 6453 if (need_full_stripe(op)) 6454 num_stripes = map->sub_stripes; 6455 else if (mirror_num) 6456 stripe_index += mirror_num - 1; 6457 else { 6458 int old_stripe_index = stripe_index; 6459 stripe_index = find_live_mirror(fs_info, map, 6460 stripe_index, 6461 dev_replace_is_ongoing); 6462 mirror_num = stripe_index - old_stripe_index + 1; 6463 } 6464 6465 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6466 ASSERT(map->stripe_len == BTRFS_STRIPE_LEN); 6467 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6468 /* push stripe_nr back to the start of the full stripe */ 6469 stripe_nr = div64_u64(raid56_full_stripe_start, 6470 stripe_len * data_stripes); 6471 6472 /* RAID[56] write or recovery. Return all stripes */ 6473 num_stripes = map->num_stripes; 6474 max_errors = btrfs_chunk_max_errors(map); 6475 6476 /* Return the length to the full stripe end */ 6477 *length = min(logical + *length, 6478 raid56_full_stripe_start + em->start + 6479 data_stripes * stripe_len) - logical; 6480 stripe_index = 0; 6481 stripe_offset = 0; 6482 } else { 6483 /* 6484 * Mirror #0 or #1 means the original data block. 6485 * Mirror #2 is RAID5 parity block. 6486 * Mirror #3 is RAID6 Q block. 6487 */ 6488 stripe_nr = div_u64_rem(stripe_nr, 6489 data_stripes, &stripe_index); 6490 if (mirror_num > 1) 6491 stripe_index = data_stripes + mirror_num - 2; 6492 6493 /* We distribute the parity blocks across stripes */ 6494 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6495 &stripe_index); 6496 if (!need_full_stripe(op) && mirror_num <= 1) 6497 mirror_num = 1; 6498 } 6499 } else { 6500 /* 6501 * after this, stripe_nr is the number of stripes on this 6502 * device we have to walk to find the data, and stripe_index is 6503 * the number of our device in the stripe array 6504 */ 6505 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6506 &stripe_index); 6507 mirror_num = stripe_index + 1; 6508 } 6509 if (stripe_index >= map->num_stripes) { 6510 btrfs_crit(fs_info, 6511 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6512 stripe_index, map->num_stripes); 6513 ret = -EINVAL; 6514 goto out; 6515 } 6516 6517 num_alloc_stripes = num_stripes; 6518 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6519 if (op == BTRFS_MAP_WRITE) 6520 num_alloc_stripes <<= 1; 6521 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6522 num_alloc_stripes++; 6523 tgtdev_indexes = num_stripes; 6524 } 6525 6526 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6527 if (!bioc) { 6528 ret = -ENOMEM; 6529 goto out; 6530 } 6531 6532 for (i = 0; i < num_stripes; i++) { 6533 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6534 stripe_offset + stripe_nr * map->stripe_len; 6535 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6536 stripe_index++; 6537 } 6538 6539 /* Build raid_map */ 6540 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6541 (need_full_stripe(op) || mirror_num > 1)) { 6542 u64 tmp; 6543 unsigned rot; 6544 6545 /* Work out the disk rotation on this stripe-set */ 6546 div_u64_rem(stripe_nr, num_stripes, &rot); 6547 6548 /* Fill in the logical address of each stripe */ 6549 tmp = stripe_nr * data_stripes; 6550 for (i = 0; i < data_stripes; i++) 6551 bioc->raid_map[(i + rot) % num_stripes] = 6552 em->start + (tmp + i) * map->stripe_len; 6553 6554 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6555 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6556 bioc->raid_map[(i + rot + 1) % num_stripes] = 6557 RAID6_Q_STRIPE; 6558 6559 sort_parity_stripes(bioc, num_stripes); 6560 } 6561 6562 if (need_full_stripe(op)) 6563 max_errors = btrfs_chunk_max_errors(map); 6564 6565 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6566 need_full_stripe(op)) { 6567 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6568 &num_stripes, &max_errors); 6569 } 6570 6571 *bioc_ret = bioc; 6572 bioc->map_type = map->type; 6573 bioc->num_stripes = num_stripes; 6574 bioc->max_errors = max_errors; 6575 bioc->mirror_num = mirror_num; 6576 6577 /* 6578 * this is the case that REQ_READ && dev_replace_is_ongoing && 6579 * mirror_num == num_stripes + 1 && dev_replace target drive is 6580 * available as a mirror 6581 */ 6582 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6583 WARN_ON(num_stripes > 1); 6584 bioc->stripes[0].dev = dev_replace->tgtdev; 6585 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6586 bioc->mirror_num = map->num_stripes + 1; 6587 } 6588 out: 6589 if (dev_replace_is_ongoing) { 6590 lockdep_assert_held(&dev_replace->rwsem); 6591 /* Unlock and let waiting writers proceed */ 6592 up_read(&dev_replace->rwsem); 6593 } 6594 free_extent_map(em); 6595 return ret; 6596 } 6597 6598 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6599 u64 logical, u64 *length, 6600 struct btrfs_io_context **bioc_ret, int mirror_num) 6601 { 6602 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6603 mirror_num, 0); 6604 } 6605 6606 /* For Scrub/replace */ 6607 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6608 u64 logical, u64 *length, 6609 struct btrfs_io_context **bioc_ret) 6610 { 6611 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6612 } 6613 6614 static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_io_context *bioc) 6615 { 6616 if (bioc->orig_bio->bi_opf & REQ_META) 6617 return bioc->fs_info->endio_meta_workers; 6618 return bioc->fs_info->endio_workers; 6619 } 6620 6621 static void btrfs_end_bio_work(struct work_struct *work) 6622 { 6623 struct btrfs_bio *bbio = 6624 container_of(work, struct btrfs_bio, end_io_work); 6625 6626 bio_endio(&bbio->bio); 6627 } 6628 6629 static void btrfs_end_bioc(struct btrfs_io_context *bioc, bool async) 6630 { 6631 struct bio *orig_bio = bioc->orig_bio; 6632 struct btrfs_bio *bbio = btrfs_bio(orig_bio); 6633 6634 bbio->mirror_num = bioc->mirror_num; 6635 orig_bio->bi_private = bioc->private; 6636 orig_bio->bi_end_io = bioc->end_io; 6637 6638 /* 6639 * Only send an error to the higher layers if it is beyond the tolerance 6640 * threshold. 6641 */ 6642 if (atomic_read(&bioc->error) > bioc->max_errors) 6643 orig_bio->bi_status = BLK_STS_IOERR; 6644 else 6645 orig_bio->bi_status = BLK_STS_OK; 6646 6647 if (btrfs_op(orig_bio) == BTRFS_MAP_READ && async) { 6648 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); 6649 queue_work(btrfs_end_io_wq(bioc), &bbio->end_io_work); 6650 } else { 6651 bio_endio(orig_bio); 6652 } 6653 6654 btrfs_put_bioc(bioc); 6655 } 6656 6657 static void btrfs_end_bio(struct bio *bio) 6658 { 6659 struct btrfs_io_stripe *stripe = bio->bi_private; 6660 struct btrfs_io_context *bioc = stripe->bioc; 6661 6662 if (bio->bi_status) { 6663 atomic_inc(&bioc->error); 6664 if (bio->bi_status == BLK_STS_IOERR || 6665 bio->bi_status == BLK_STS_TARGET) { 6666 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6667 btrfs_dev_stat_inc_and_print(stripe->dev, 6668 BTRFS_DEV_STAT_WRITE_ERRS); 6669 else if (!(bio->bi_opf & REQ_RAHEAD)) 6670 btrfs_dev_stat_inc_and_print(stripe->dev, 6671 BTRFS_DEV_STAT_READ_ERRS); 6672 if (bio->bi_opf & REQ_PREFLUSH) 6673 btrfs_dev_stat_inc_and_print(stripe->dev, 6674 BTRFS_DEV_STAT_FLUSH_ERRS); 6675 } 6676 } 6677 6678 if (bio != bioc->orig_bio) 6679 bio_put(bio); 6680 6681 btrfs_bio_counter_dec(bioc->fs_info); 6682 if (atomic_dec_and_test(&bioc->stripes_pending)) 6683 btrfs_end_bioc(bioc, true); 6684 } 6685 6686 static void submit_stripe_bio(struct btrfs_io_context *bioc, 6687 struct bio *orig_bio, int dev_nr, bool clone) 6688 { 6689 struct btrfs_fs_info *fs_info = bioc->fs_info; 6690 struct btrfs_device *dev = bioc->stripes[dev_nr].dev; 6691 u64 physical = bioc->stripes[dev_nr].physical; 6692 struct bio *bio; 6693 6694 if (!dev || !dev->bdev || 6695 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 6696 (btrfs_op(orig_bio) == BTRFS_MAP_WRITE && 6697 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6698 atomic_inc(&bioc->error); 6699 if (atomic_dec_and_test(&bioc->stripes_pending)) 6700 btrfs_end_bioc(bioc, false); 6701 return; 6702 } 6703 6704 if (clone) { 6705 bio = bio_alloc_clone(dev->bdev, orig_bio, GFP_NOFS, &fs_bio_set); 6706 } else { 6707 bio = orig_bio; 6708 bio_set_dev(bio, dev->bdev); 6709 btrfs_bio(bio)->device = dev; 6710 } 6711 6712 bioc->stripes[dev_nr].bioc = bioc; 6713 bio->bi_private = &bioc->stripes[dev_nr]; 6714 bio->bi_end_io = btrfs_end_bio; 6715 bio->bi_iter.bi_sector = physical >> 9; 6716 /* 6717 * For zone append writing, bi_sector must point the beginning of the 6718 * zone 6719 */ 6720 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6721 if (btrfs_dev_is_sequential(dev, physical)) { 6722 u64 zone_start = round_down(physical, fs_info->zone_size); 6723 6724 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6725 } else { 6726 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6727 bio->bi_opf |= REQ_OP_WRITE; 6728 } 6729 } 6730 btrfs_debug_in_rcu(fs_info, 6731 "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6732 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6733 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6734 dev->devid, bio->bi_iter.bi_size); 6735 6736 btrfs_bio_counter_inc_noblocked(fs_info); 6737 6738 btrfsic_check_bio(bio); 6739 submit_bio(bio); 6740 } 6741 6742 void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num) 6743 { 6744 u64 logical = bio->bi_iter.bi_sector << 9; 6745 u64 length = bio->bi_iter.bi_size; 6746 u64 map_length = length; 6747 int ret; 6748 int dev_nr; 6749 int total_devs; 6750 struct btrfs_io_context *bioc = NULL; 6751 6752 btrfs_bio_counter_inc_blocked(fs_info); 6753 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6754 &map_length, &bioc, mirror_num, 1); 6755 if (ret) { 6756 btrfs_bio_counter_dec(fs_info); 6757 bio->bi_status = errno_to_blk_status(ret); 6758 bio_endio(bio); 6759 return; 6760 } 6761 6762 total_devs = bioc->num_stripes; 6763 bioc->orig_bio = bio; 6764 bioc->private = bio->bi_private; 6765 bioc->end_io = bio->bi_end_io; 6766 atomic_set(&bioc->stripes_pending, total_devs); 6767 6768 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6769 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6770 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6771 raid56_parity_write(bio, bioc); 6772 else 6773 raid56_parity_recover(bio, bioc, mirror_num, true); 6774 return; 6775 } 6776 6777 if (map_length < length) { 6778 btrfs_crit(fs_info, 6779 "mapping failed logical %llu bio len %llu len %llu", 6780 logical, length, map_length); 6781 BUG(); 6782 } 6783 6784 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6785 const bool should_clone = (dev_nr < total_devs - 1); 6786 6787 submit_stripe_bio(bioc, bio, dev_nr, should_clone); 6788 } 6789 btrfs_bio_counter_dec(fs_info); 6790 } 6791 6792 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6793 const struct btrfs_fs_devices *fs_devices) 6794 { 6795 if (args->fsid == NULL) 6796 return true; 6797 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6798 return true; 6799 return false; 6800 } 6801 6802 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6803 const struct btrfs_device *device) 6804 { 6805 ASSERT((args->devid != (u64)-1) || args->missing); 6806 6807 if ((args->devid != (u64)-1) && device->devid != args->devid) 6808 return false; 6809 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6810 return false; 6811 if (!args->missing) 6812 return true; 6813 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6814 !device->bdev) 6815 return true; 6816 return false; 6817 } 6818 6819 /* 6820 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6821 * return NULL. 6822 * 6823 * If devid and uuid are both specified, the match must be exact, otherwise 6824 * only devid is used. 6825 */ 6826 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6827 const struct btrfs_dev_lookup_args *args) 6828 { 6829 struct btrfs_device *device; 6830 struct btrfs_fs_devices *seed_devs; 6831 6832 if (dev_args_match_fs_devices(args, fs_devices)) { 6833 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6834 if (dev_args_match_device(args, device)) 6835 return device; 6836 } 6837 } 6838 6839 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6840 if (!dev_args_match_fs_devices(args, seed_devs)) 6841 continue; 6842 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6843 if (dev_args_match_device(args, device)) 6844 return device; 6845 } 6846 } 6847 6848 return NULL; 6849 } 6850 6851 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6852 u64 devid, u8 *dev_uuid) 6853 { 6854 struct btrfs_device *device; 6855 unsigned int nofs_flag; 6856 6857 /* 6858 * We call this under the chunk_mutex, so we want to use NOFS for this 6859 * allocation, however we don't want to change btrfs_alloc_device() to 6860 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6861 * places. 6862 */ 6863 nofs_flag = memalloc_nofs_save(); 6864 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6865 memalloc_nofs_restore(nofs_flag); 6866 if (IS_ERR(device)) 6867 return device; 6868 6869 list_add(&device->dev_list, &fs_devices->devices); 6870 device->fs_devices = fs_devices; 6871 fs_devices->num_devices++; 6872 6873 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6874 fs_devices->missing_devices++; 6875 6876 return device; 6877 } 6878 6879 /** 6880 * btrfs_alloc_device - allocate struct btrfs_device 6881 * @fs_info: used only for generating a new devid, can be NULL if 6882 * devid is provided (i.e. @devid != NULL). 6883 * @devid: a pointer to devid for this device. If NULL a new devid 6884 * is generated. 6885 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6886 * is generated. 6887 * 6888 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6889 * on error. Returned struct is not linked onto any lists and must be 6890 * destroyed with btrfs_free_device. 6891 */ 6892 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6893 const u64 *devid, 6894 const u8 *uuid) 6895 { 6896 struct btrfs_device *dev; 6897 u64 tmp; 6898 6899 if (WARN_ON(!devid && !fs_info)) 6900 return ERR_PTR(-EINVAL); 6901 6902 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6903 if (!dev) 6904 return ERR_PTR(-ENOMEM); 6905 6906 INIT_LIST_HEAD(&dev->dev_list); 6907 INIT_LIST_HEAD(&dev->dev_alloc_list); 6908 INIT_LIST_HEAD(&dev->post_commit_list); 6909 6910 atomic_set(&dev->dev_stats_ccnt, 0); 6911 btrfs_device_data_ordered_init(dev); 6912 extent_io_tree_init(fs_info, &dev->alloc_state, 6913 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6914 6915 if (devid) 6916 tmp = *devid; 6917 else { 6918 int ret; 6919 6920 ret = find_next_devid(fs_info, &tmp); 6921 if (ret) { 6922 btrfs_free_device(dev); 6923 return ERR_PTR(ret); 6924 } 6925 } 6926 dev->devid = tmp; 6927 6928 if (uuid) 6929 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6930 else 6931 generate_random_uuid(dev->uuid); 6932 6933 return dev; 6934 } 6935 6936 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6937 u64 devid, u8 *uuid, bool error) 6938 { 6939 if (error) 6940 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6941 devid, uuid); 6942 else 6943 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6944 devid, uuid); 6945 } 6946 6947 u64 btrfs_calc_stripe_length(const struct extent_map *em) 6948 { 6949 const struct map_lookup *map = em->map_lookup; 6950 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6951 6952 return div_u64(em->len, data_stripes); 6953 } 6954 6955 #if BITS_PER_LONG == 32 6956 /* 6957 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6958 * can't be accessed on 32bit systems. 6959 * 6960 * This function do mount time check to reject the fs if it already has 6961 * metadata chunk beyond that limit. 6962 */ 6963 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6964 u64 logical, u64 length, u64 type) 6965 { 6966 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6967 return 0; 6968 6969 if (logical + length < MAX_LFS_FILESIZE) 6970 return 0; 6971 6972 btrfs_err_32bit_limit(fs_info); 6973 return -EOVERFLOW; 6974 } 6975 6976 /* 6977 * This is to give early warning for any metadata chunk reaching 6978 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6979 * Although we can still access the metadata, it's not going to be possible 6980 * once the limit is reached. 6981 */ 6982 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6983 u64 logical, u64 length, u64 type) 6984 { 6985 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6986 return; 6987 6988 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6989 return; 6990 6991 btrfs_warn_32bit_limit(fs_info); 6992 } 6993 #endif 6994 6995 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6996 u64 devid, u8 *uuid) 6997 { 6998 struct btrfs_device *dev; 6999 7000 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7001 btrfs_report_missing_device(fs_info, devid, uuid, true); 7002 return ERR_PTR(-ENOENT); 7003 } 7004 7005 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 7006 if (IS_ERR(dev)) { 7007 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 7008 devid, PTR_ERR(dev)); 7009 return dev; 7010 } 7011 btrfs_report_missing_device(fs_info, devid, uuid, false); 7012 7013 return dev; 7014 } 7015 7016 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7017 struct btrfs_chunk *chunk) 7018 { 7019 BTRFS_DEV_LOOKUP_ARGS(args); 7020 struct btrfs_fs_info *fs_info = leaf->fs_info; 7021 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7022 struct map_lookup *map; 7023 struct extent_map *em; 7024 u64 logical; 7025 u64 length; 7026 u64 devid; 7027 u64 type; 7028 u8 uuid[BTRFS_UUID_SIZE]; 7029 int num_stripes; 7030 int ret; 7031 int i; 7032 7033 logical = key->offset; 7034 length = btrfs_chunk_length(leaf, chunk); 7035 type = btrfs_chunk_type(leaf, chunk); 7036 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7037 7038 #if BITS_PER_LONG == 32 7039 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7040 if (ret < 0) 7041 return ret; 7042 warn_32bit_meta_chunk(fs_info, logical, length, type); 7043 #endif 7044 7045 /* 7046 * Only need to verify chunk item if we're reading from sys chunk array, 7047 * as chunk item in tree block is already verified by tree-checker. 7048 */ 7049 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7050 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7051 if (ret) 7052 return ret; 7053 } 7054 7055 read_lock(&map_tree->lock); 7056 em = lookup_extent_mapping(map_tree, logical, 1); 7057 read_unlock(&map_tree->lock); 7058 7059 /* already mapped? */ 7060 if (em && em->start <= logical && em->start + em->len > logical) { 7061 free_extent_map(em); 7062 return 0; 7063 } else if (em) { 7064 free_extent_map(em); 7065 } 7066 7067 em = alloc_extent_map(); 7068 if (!em) 7069 return -ENOMEM; 7070 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7071 if (!map) { 7072 free_extent_map(em); 7073 return -ENOMEM; 7074 } 7075 7076 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7077 em->map_lookup = map; 7078 em->start = logical; 7079 em->len = length; 7080 em->orig_start = 0; 7081 em->block_start = 0; 7082 em->block_len = em->len; 7083 7084 map->num_stripes = num_stripes; 7085 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7086 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7087 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7088 map->type = type; 7089 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7090 map->verified_stripes = 0; 7091 em->orig_block_len = btrfs_calc_stripe_length(em); 7092 for (i = 0; i < num_stripes; i++) { 7093 map->stripes[i].physical = 7094 btrfs_stripe_offset_nr(leaf, chunk, i); 7095 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7096 args.devid = devid; 7097 read_extent_buffer(leaf, uuid, (unsigned long) 7098 btrfs_stripe_dev_uuid_nr(chunk, i), 7099 BTRFS_UUID_SIZE); 7100 args.uuid = uuid; 7101 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7102 if (!map->stripes[i].dev) { 7103 map->stripes[i].dev = handle_missing_device(fs_info, 7104 devid, uuid); 7105 if (IS_ERR(map->stripes[i].dev)) { 7106 free_extent_map(em); 7107 return PTR_ERR(map->stripes[i].dev); 7108 } 7109 } 7110 7111 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7112 &(map->stripes[i].dev->dev_state)); 7113 } 7114 7115 write_lock(&map_tree->lock); 7116 ret = add_extent_mapping(map_tree, em, 0); 7117 write_unlock(&map_tree->lock); 7118 if (ret < 0) { 7119 btrfs_err(fs_info, 7120 "failed to add chunk map, start=%llu len=%llu: %d", 7121 em->start, em->len, ret); 7122 } 7123 free_extent_map(em); 7124 7125 return ret; 7126 } 7127 7128 static void fill_device_from_item(struct extent_buffer *leaf, 7129 struct btrfs_dev_item *dev_item, 7130 struct btrfs_device *device) 7131 { 7132 unsigned long ptr; 7133 7134 device->devid = btrfs_device_id(leaf, dev_item); 7135 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7136 device->total_bytes = device->disk_total_bytes; 7137 device->commit_total_bytes = device->disk_total_bytes; 7138 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7139 device->commit_bytes_used = device->bytes_used; 7140 device->type = btrfs_device_type(leaf, dev_item); 7141 device->io_align = btrfs_device_io_align(leaf, dev_item); 7142 device->io_width = btrfs_device_io_width(leaf, dev_item); 7143 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7144 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7145 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7146 7147 ptr = btrfs_device_uuid(dev_item); 7148 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7149 } 7150 7151 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7152 u8 *fsid) 7153 { 7154 struct btrfs_fs_devices *fs_devices; 7155 int ret; 7156 7157 lockdep_assert_held(&uuid_mutex); 7158 ASSERT(fsid); 7159 7160 /* This will match only for multi-device seed fs */ 7161 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7162 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7163 return fs_devices; 7164 7165 7166 fs_devices = find_fsid(fsid, NULL); 7167 if (!fs_devices) { 7168 if (!btrfs_test_opt(fs_info, DEGRADED)) 7169 return ERR_PTR(-ENOENT); 7170 7171 fs_devices = alloc_fs_devices(fsid, NULL); 7172 if (IS_ERR(fs_devices)) 7173 return fs_devices; 7174 7175 fs_devices->seeding = true; 7176 fs_devices->opened = 1; 7177 return fs_devices; 7178 } 7179 7180 /* 7181 * Upon first call for a seed fs fsid, just create a private copy of the 7182 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7183 */ 7184 fs_devices = clone_fs_devices(fs_devices); 7185 if (IS_ERR(fs_devices)) 7186 return fs_devices; 7187 7188 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7189 if (ret) { 7190 free_fs_devices(fs_devices); 7191 return ERR_PTR(ret); 7192 } 7193 7194 if (!fs_devices->seeding) { 7195 close_fs_devices(fs_devices); 7196 free_fs_devices(fs_devices); 7197 return ERR_PTR(-EINVAL); 7198 } 7199 7200 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7201 7202 return fs_devices; 7203 } 7204 7205 static int read_one_dev(struct extent_buffer *leaf, 7206 struct btrfs_dev_item *dev_item) 7207 { 7208 BTRFS_DEV_LOOKUP_ARGS(args); 7209 struct btrfs_fs_info *fs_info = leaf->fs_info; 7210 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7211 struct btrfs_device *device; 7212 u64 devid; 7213 int ret; 7214 u8 fs_uuid[BTRFS_FSID_SIZE]; 7215 u8 dev_uuid[BTRFS_UUID_SIZE]; 7216 7217 devid = btrfs_device_id(leaf, dev_item); 7218 args.devid = devid; 7219 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7220 BTRFS_UUID_SIZE); 7221 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7222 BTRFS_FSID_SIZE); 7223 args.uuid = dev_uuid; 7224 args.fsid = fs_uuid; 7225 7226 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7227 fs_devices = open_seed_devices(fs_info, fs_uuid); 7228 if (IS_ERR(fs_devices)) 7229 return PTR_ERR(fs_devices); 7230 } 7231 7232 device = btrfs_find_device(fs_info->fs_devices, &args); 7233 if (!device) { 7234 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7235 btrfs_report_missing_device(fs_info, devid, 7236 dev_uuid, true); 7237 return -ENOENT; 7238 } 7239 7240 device = add_missing_dev(fs_devices, devid, dev_uuid); 7241 if (IS_ERR(device)) { 7242 btrfs_err(fs_info, 7243 "failed to add missing dev %llu: %ld", 7244 devid, PTR_ERR(device)); 7245 return PTR_ERR(device); 7246 } 7247 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7248 } else { 7249 if (!device->bdev) { 7250 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7251 btrfs_report_missing_device(fs_info, 7252 devid, dev_uuid, true); 7253 return -ENOENT; 7254 } 7255 btrfs_report_missing_device(fs_info, devid, 7256 dev_uuid, false); 7257 } 7258 7259 if (!device->bdev && 7260 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7261 /* 7262 * this happens when a device that was properly setup 7263 * in the device info lists suddenly goes bad. 7264 * device->bdev is NULL, and so we have to set 7265 * device->missing to one here 7266 */ 7267 device->fs_devices->missing_devices++; 7268 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7269 } 7270 7271 /* Move the device to its own fs_devices */ 7272 if (device->fs_devices != fs_devices) { 7273 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7274 &device->dev_state)); 7275 7276 list_move(&device->dev_list, &fs_devices->devices); 7277 device->fs_devices->num_devices--; 7278 fs_devices->num_devices++; 7279 7280 device->fs_devices->missing_devices--; 7281 fs_devices->missing_devices++; 7282 7283 device->fs_devices = fs_devices; 7284 } 7285 } 7286 7287 if (device->fs_devices != fs_info->fs_devices) { 7288 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7289 if (device->generation != 7290 btrfs_device_generation(leaf, dev_item)) 7291 return -EINVAL; 7292 } 7293 7294 fill_device_from_item(leaf, dev_item, device); 7295 if (device->bdev) { 7296 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7297 7298 if (device->total_bytes > max_total_bytes) { 7299 btrfs_err(fs_info, 7300 "device total_bytes should be at most %llu but found %llu", 7301 max_total_bytes, device->total_bytes); 7302 return -EINVAL; 7303 } 7304 } 7305 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7306 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7307 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7308 device->fs_devices->total_rw_bytes += device->total_bytes; 7309 atomic64_add(device->total_bytes - device->bytes_used, 7310 &fs_info->free_chunk_space); 7311 } 7312 ret = 0; 7313 return ret; 7314 } 7315 7316 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7317 { 7318 struct btrfs_super_block *super_copy = fs_info->super_copy; 7319 struct extent_buffer *sb; 7320 struct btrfs_disk_key *disk_key; 7321 struct btrfs_chunk *chunk; 7322 u8 *array_ptr; 7323 unsigned long sb_array_offset; 7324 int ret = 0; 7325 u32 num_stripes; 7326 u32 array_size; 7327 u32 len = 0; 7328 u32 cur_offset; 7329 u64 type; 7330 struct btrfs_key key; 7331 7332 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7333 7334 /* 7335 * We allocated a dummy extent, just to use extent buffer accessors. 7336 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7337 * that's fine, we will not go beyond system chunk array anyway. 7338 */ 7339 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7340 if (!sb) 7341 return -ENOMEM; 7342 set_extent_buffer_uptodate(sb); 7343 7344 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7345 array_size = btrfs_super_sys_array_size(super_copy); 7346 7347 array_ptr = super_copy->sys_chunk_array; 7348 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7349 cur_offset = 0; 7350 7351 while (cur_offset < array_size) { 7352 disk_key = (struct btrfs_disk_key *)array_ptr; 7353 len = sizeof(*disk_key); 7354 if (cur_offset + len > array_size) 7355 goto out_short_read; 7356 7357 btrfs_disk_key_to_cpu(&key, disk_key); 7358 7359 array_ptr += len; 7360 sb_array_offset += len; 7361 cur_offset += len; 7362 7363 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7364 btrfs_err(fs_info, 7365 "unexpected item type %u in sys_array at offset %u", 7366 (u32)key.type, cur_offset); 7367 ret = -EIO; 7368 break; 7369 } 7370 7371 chunk = (struct btrfs_chunk *)sb_array_offset; 7372 /* 7373 * At least one btrfs_chunk with one stripe must be present, 7374 * exact stripe count check comes afterwards 7375 */ 7376 len = btrfs_chunk_item_size(1); 7377 if (cur_offset + len > array_size) 7378 goto out_short_read; 7379 7380 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7381 if (!num_stripes) { 7382 btrfs_err(fs_info, 7383 "invalid number of stripes %u in sys_array at offset %u", 7384 num_stripes, cur_offset); 7385 ret = -EIO; 7386 break; 7387 } 7388 7389 type = btrfs_chunk_type(sb, chunk); 7390 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7391 btrfs_err(fs_info, 7392 "invalid chunk type %llu in sys_array at offset %u", 7393 type, cur_offset); 7394 ret = -EIO; 7395 break; 7396 } 7397 7398 len = btrfs_chunk_item_size(num_stripes); 7399 if (cur_offset + len > array_size) 7400 goto out_short_read; 7401 7402 ret = read_one_chunk(&key, sb, chunk); 7403 if (ret) 7404 break; 7405 7406 array_ptr += len; 7407 sb_array_offset += len; 7408 cur_offset += len; 7409 } 7410 clear_extent_buffer_uptodate(sb); 7411 free_extent_buffer_stale(sb); 7412 return ret; 7413 7414 out_short_read: 7415 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7416 len, cur_offset); 7417 clear_extent_buffer_uptodate(sb); 7418 free_extent_buffer_stale(sb); 7419 return -EIO; 7420 } 7421 7422 /* 7423 * Check if all chunks in the fs are OK for read-write degraded mount 7424 * 7425 * If the @failing_dev is specified, it's accounted as missing. 7426 * 7427 * Return true if all chunks meet the minimal RW mount requirements. 7428 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7429 */ 7430 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7431 struct btrfs_device *failing_dev) 7432 { 7433 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7434 struct extent_map *em; 7435 u64 next_start = 0; 7436 bool ret = true; 7437 7438 read_lock(&map_tree->lock); 7439 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7440 read_unlock(&map_tree->lock); 7441 /* No chunk at all? Return false anyway */ 7442 if (!em) { 7443 ret = false; 7444 goto out; 7445 } 7446 while (em) { 7447 struct map_lookup *map; 7448 int missing = 0; 7449 int max_tolerated; 7450 int i; 7451 7452 map = em->map_lookup; 7453 max_tolerated = 7454 btrfs_get_num_tolerated_disk_barrier_failures( 7455 map->type); 7456 for (i = 0; i < map->num_stripes; i++) { 7457 struct btrfs_device *dev = map->stripes[i].dev; 7458 7459 if (!dev || !dev->bdev || 7460 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7461 dev->last_flush_error) 7462 missing++; 7463 else if (failing_dev && failing_dev == dev) 7464 missing++; 7465 } 7466 if (missing > max_tolerated) { 7467 if (!failing_dev) 7468 btrfs_warn(fs_info, 7469 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7470 em->start, missing, max_tolerated); 7471 free_extent_map(em); 7472 ret = false; 7473 goto out; 7474 } 7475 next_start = extent_map_end(em); 7476 free_extent_map(em); 7477 7478 read_lock(&map_tree->lock); 7479 em = lookup_extent_mapping(map_tree, next_start, 7480 (u64)(-1) - next_start); 7481 read_unlock(&map_tree->lock); 7482 } 7483 out: 7484 return ret; 7485 } 7486 7487 static void readahead_tree_node_children(struct extent_buffer *node) 7488 { 7489 int i; 7490 const int nr_items = btrfs_header_nritems(node); 7491 7492 for (i = 0; i < nr_items; i++) 7493 btrfs_readahead_node_child(node, i); 7494 } 7495 7496 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7497 { 7498 struct btrfs_root *root = fs_info->chunk_root; 7499 struct btrfs_path *path; 7500 struct extent_buffer *leaf; 7501 struct btrfs_key key; 7502 struct btrfs_key found_key; 7503 int ret; 7504 int slot; 7505 int iter_ret = 0; 7506 u64 total_dev = 0; 7507 u64 last_ra_node = 0; 7508 7509 path = btrfs_alloc_path(); 7510 if (!path) 7511 return -ENOMEM; 7512 7513 /* 7514 * uuid_mutex is needed only if we are mounting a sprout FS 7515 * otherwise we don't need it. 7516 */ 7517 mutex_lock(&uuid_mutex); 7518 7519 /* 7520 * It is possible for mount and umount to race in such a way that 7521 * we execute this code path, but open_fs_devices failed to clear 7522 * total_rw_bytes. We certainly want it cleared before reading the 7523 * device items, so clear it here. 7524 */ 7525 fs_info->fs_devices->total_rw_bytes = 0; 7526 7527 /* 7528 * Lockdep complains about possible circular locking dependency between 7529 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7530 * used for freeze procection of a fs (struct super_block.s_writers), 7531 * which we take when starting a transaction, and extent buffers of the 7532 * chunk tree if we call read_one_dev() while holding a lock on an 7533 * extent buffer of the chunk tree. Since we are mounting the filesystem 7534 * and at this point there can't be any concurrent task modifying the 7535 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7536 */ 7537 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7538 path->skip_locking = 1; 7539 7540 /* 7541 * Read all device items, and then all the chunk items. All 7542 * device items are found before any chunk item (their object id 7543 * is smaller than the lowest possible object id for a chunk 7544 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7545 */ 7546 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7547 key.offset = 0; 7548 key.type = 0; 7549 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7550 struct extent_buffer *node = path->nodes[1]; 7551 7552 leaf = path->nodes[0]; 7553 slot = path->slots[0]; 7554 7555 if (node) { 7556 if (last_ra_node != node->start) { 7557 readahead_tree_node_children(node); 7558 last_ra_node = node->start; 7559 } 7560 } 7561 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7562 struct btrfs_dev_item *dev_item; 7563 dev_item = btrfs_item_ptr(leaf, slot, 7564 struct btrfs_dev_item); 7565 ret = read_one_dev(leaf, dev_item); 7566 if (ret) 7567 goto error; 7568 total_dev++; 7569 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7570 struct btrfs_chunk *chunk; 7571 7572 /* 7573 * We are only called at mount time, so no need to take 7574 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7575 * we always lock first fs_info->chunk_mutex before 7576 * acquiring any locks on the chunk tree. This is a 7577 * requirement for chunk allocation, see the comment on 7578 * top of btrfs_chunk_alloc() for details. 7579 */ 7580 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7581 ret = read_one_chunk(&found_key, leaf, chunk); 7582 if (ret) 7583 goto error; 7584 } 7585 } 7586 /* Catch error found during iteration */ 7587 if (iter_ret < 0) { 7588 ret = iter_ret; 7589 goto error; 7590 } 7591 7592 /* 7593 * After loading chunk tree, we've got all device information, 7594 * do another round of validation checks. 7595 */ 7596 if (total_dev != fs_info->fs_devices->total_devices) { 7597 btrfs_warn(fs_info, 7598 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7599 btrfs_super_num_devices(fs_info->super_copy), 7600 total_dev); 7601 fs_info->fs_devices->total_devices = total_dev; 7602 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7603 } 7604 if (btrfs_super_total_bytes(fs_info->super_copy) < 7605 fs_info->fs_devices->total_rw_bytes) { 7606 btrfs_err(fs_info, 7607 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7608 btrfs_super_total_bytes(fs_info->super_copy), 7609 fs_info->fs_devices->total_rw_bytes); 7610 ret = -EINVAL; 7611 goto error; 7612 } 7613 ret = 0; 7614 error: 7615 mutex_unlock(&uuid_mutex); 7616 7617 btrfs_free_path(path); 7618 return ret; 7619 } 7620 7621 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7622 { 7623 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7624 struct btrfs_device *device; 7625 7626 fs_devices->fs_info = fs_info; 7627 7628 mutex_lock(&fs_devices->device_list_mutex); 7629 list_for_each_entry(device, &fs_devices->devices, dev_list) 7630 device->fs_info = fs_info; 7631 7632 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7633 list_for_each_entry(device, &seed_devs->devices, dev_list) 7634 device->fs_info = fs_info; 7635 7636 seed_devs->fs_info = fs_info; 7637 } 7638 mutex_unlock(&fs_devices->device_list_mutex); 7639 } 7640 7641 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7642 const struct btrfs_dev_stats_item *ptr, 7643 int index) 7644 { 7645 u64 val; 7646 7647 read_extent_buffer(eb, &val, 7648 offsetof(struct btrfs_dev_stats_item, values) + 7649 ((unsigned long)ptr) + (index * sizeof(u64)), 7650 sizeof(val)); 7651 return val; 7652 } 7653 7654 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7655 struct btrfs_dev_stats_item *ptr, 7656 int index, u64 val) 7657 { 7658 write_extent_buffer(eb, &val, 7659 offsetof(struct btrfs_dev_stats_item, values) + 7660 ((unsigned long)ptr) + (index * sizeof(u64)), 7661 sizeof(val)); 7662 } 7663 7664 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7665 struct btrfs_path *path) 7666 { 7667 struct btrfs_dev_stats_item *ptr; 7668 struct extent_buffer *eb; 7669 struct btrfs_key key; 7670 int item_size; 7671 int i, ret, slot; 7672 7673 if (!device->fs_info->dev_root) 7674 return 0; 7675 7676 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7677 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7678 key.offset = device->devid; 7679 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7680 if (ret) { 7681 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7682 btrfs_dev_stat_set(device, i, 0); 7683 device->dev_stats_valid = 1; 7684 btrfs_release_path(path); 7685 return ret < 0 ? ret : 0; 7686 } 7687 slot = path->slots[0]; 7688 eb = path->nodes[0]; 7689 item_size = btrfs_item_size(eb, slot); 7690 7691 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7692 7693 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7694 if (item_size >= (1 + i) * sizeof(__le64)) 7695 btrfs_dev_stat_set(device, i, 7696 btrfs_dev_stats_value(eb, ptr, i)); 7697 else 7698 btrfs_dev_stat_set(device, i, 0); 7699 } 7700 7701 device->dev_stats_valid = 1; 7702 btrfs_dev_stat_print_on_load(device); 7703 btrfs_release_path(path); 7704 7705 return 0; 7706 } 7707 7708 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7709 { 7710 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7711 struct btrfs_device *device; 7712 struct btrfs_path *path = NULL; 7713 int ret = 0; 7714 7715 path = btrfs_alloc_path(); 7716 if (!path) 7717 return -ENOMEM; 7718 7719 mutex_lock(&fs_devices->device_list_mutex); 7720 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7721 ret = btrfs_device_init_dev_stats(device, path); 7722 if (ret) 7723 goto out; 7724 } 7725 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7726 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7727 ret = btrfs_device_init_dev_stats(device, path); 7728 if (ret) 7729 goto out; 7730 } 7731 } 7732 out: 7733 mutex_unlock(&fs_devices->device_list_mutex); 7734 7735 btrfs_free_path(path); 7736 return ret; 7737 } 7738 7739 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7740 struct btrfs_device *device) 7741 { 7742 struct btrfs_fs_info *fs_info = trans->fs_info; 7743 struct btrfs_root *dev_root = fs_info->dev_root; 7744 struct btrfs_path *path; 7745 struct btrfs_key key; 7746 struct extent_buffer *eb; 7747 struct btrfs_dev_stats_item *ptr; 7748 int ret; 7749 int i; 7750 7751 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7752 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7753 key.offset = device->devid; 7754 7755 path = btrfs_alloc_path(); 7756 if (!path) 7757 return -ENOMEM; 7758 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7759 if (ret < 0) { 7760 btrfs_warn_in_rcu(fs_info, 7761 "error %d while searching for dev_stats item for device %s", 7762 ret, rcu_str_deref(device->name)); 7763 goto out; 7764 } 7765 7766 if (ret == 0 && 7767 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7768 /* need to delete old one and insert a new one */ 7769 ret = btrfs_del_item(trans, dev_root, path); 7770 if (ret != 0) { 7771 btrfs_warn_in_rcu(fs_info, 7772 "delete too small dev_stats item for device %s failed %d", 7773 rcu_str_deref(device->name), ret); 7774 goto out; 7775 } 7776 ret = 1; 7777 } 7778 7779 if (ret == 1) { 7780 /* need to insert a new item */ 7781 btrfs_release_path(path); 7782 ret = btrfs_insert_empty_item(trans, dev_root, path, 7783 &key, sizeof(*ptr)); 7784 if (ret < 0) { 7785 btrfs_warn_in_rcu(fs_info, 7786 "insert dev_stats item for device %s failed %d", 7787 rcu_str_deref(device->name), ret); 7788 goto out; 7789 } 7790 } 7791 7792 eb = path->nodes[0]; 7793 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7794 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7795 btrfs_set_dev_stats_value(eb, ptr, i, 7796 btrfs_dev_stat_read(device, i)); 7797 btrfs_mark_buffer_dirty(eb); 7798 7799 out: 7800 btrfs_free_path(path); 7801 return ret; 7802 } 7803 7804 /* 7805 * called from commit_transaction. Writes all changed device stats to disk. 7806 */ 7807 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7808 { 7809 struct btrfs_fs_info *fs_info = trans->fs_info; 7810 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7811 struct btrfs_device *device; 7812 int stats_cnt; 7813 int ret = 0; 7814 7815 mutex_lock(&fs_devices->device_list_mutex); 7816 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7817 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7818 if (!device->dev_stats_valid || stats_cnt == 0) 7819 continue; 7820 7821 7822 /* 7823 * There is a LOAD-LOAD control dependency between the value of 7824 * dev_stats_ccnt and updating the on-disk values which requires 7825 * reading the in-memory counters. Such control dependencies 7826 * require explicit read memory barriers. 7827 * 7828 * This memory barriers pairs with smp_mb__before_atomic in 7829 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7830 * barrier implied by atomic_xchg in 7831 * btrfs_dev_stats_read_and_reset 7832 */ 7833 smp_rmb(); 7834 7835 ret = update_dev_stat_item(trans, device); 7836 if (!ret) 7837 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7838 } 7839 mutex_unlock(&fs_devices->device_list_mutex); 7840 7841 return ret; 7842 } 7843 7844 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7845 { 7846 btrfs_dev_stat_inc(dev, index); 7847 7848 if (!dev->dev_stats_valid) 7849 return; 7850 btrfs_err_rl_in_rcu(dev->fs_info, 7851 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7852 rcu_str_deref(dev->name), 7853 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7854 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7855 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7856 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7857 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7858 } 7859 7860 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7861 { 7862 int i; 7863 7864 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7865 if (btrfs_dev_stat_read(dev, i) != 0) 7866 break; 7867 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7868 return; /* all values == 0, suppress message */ 7869 7870 btrfs_info_in_rcu(dev->fs_info, 7871 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7872 rcu_str_deref(dev->name), 7873 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7874 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7875 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7876 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7877 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7878 } 7879 7880 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7881 struct btrfs_ioctl_get_dev_stats *stats) 7882 { 7883 BTRFS_DEV_LOOKUP_ARGS(args); 7884 struct btrfs_device *dev; 7885 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7886 int i; 7887 7888 mutex_lock(&fs_devices->device_list_mutex); 7889 args.devid = stats->devid; 7890 dev = btrfs_find_device(fs_info->fs_devices, &args); 7891 mutex_unlock(&fs_devices->device_list_mutex); 7892 7893 if (!dev) { 7894 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7895 return -ENODEV; 7896 } else if (!dev->dev_stats_valid) { 7897 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7898 return -ENODEV; 7899 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7900 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7901 if (stats->nr_items > i) 7902 stats->values[i] = 7903 btrfs_dev_stat_read_and_reset(dev, i); 7904 else 7905 btrfs_dev_stat_set(dev, i, 0); 7906 } 7907 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7908 current->comm, task_pid_nr(current)); 7909 } else { 7910 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7911 if (stats->nr_items > i) 7912 stats->values[i] = btrfs_dev_stat_read(dev, i); 7913 } 7914 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7915 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7916 return 0; 7917 } 7918 7919 /* 7920 * Update the size and bytes used for each device where it changed. This is 7921 * delayed since we would otherwise get errors while writing out the 7922 * superblocks. 7923 * 7924 * Must be invoked during transaction commit. 7925 */ 7926 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7927 { 7928 struct btrfs_device *curr, *next; 7929 7930 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7931 7932 if (list_empty(&trans->dev_update_list)) 7933 return; 7934 7935 /* 7936 * We don't need the device_list_mutex here. This list is owned by the 7937 * transaction and the transaction must complete before the device is 7938 * released. 7939 */ 7940 mutex_lock(&trans->fs_info->chunk_mutex); 7941 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7942 post_commit_list) { 7943 list_del_init(&curr->post_commit_list); 7944 curr->commit_total_bytes = curr->disk_total_bytes; 7945 curr->commit_bytes_used = curr->bytes_used; 7946 } 7947 mutex_unlock(&trans->fs_info->chunk_mutex); 7948 } 7949 7950 /* 7951 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7952 */ 7953 int btrfs_bg_type_to_factor(u64 flags) 7954 { 7955 const int index = btrfs_bg_flags_to_raid_index(flags); 7956 7957 return btrfs_raid_array[index].ncopies; 7958 } 7959 7960 7961 7962 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7963 u64 chunk_offset, u64 devid, 7964 u64 physical_offset, u64 physical_len) 7965 { 7966 struct btrfs_dev_lookup_args args = { .devid = devid }; 7967 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7968 struct extent_map *em; 7969 struct map_lookup *map; 7970 struct btrfs_device *dev; 7971 u64 stripe_len; 7972 bool found = false; 7973 int ret = 0; 7974 int i; 7975 7976 read_lock(&em_tree->lock); 7977 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7978 read_unlock(&em_tree->lock); 7979 7980 if (!em) { 7981 btrfs_err(fs_info, 7982 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7983 physical_offset, devid); 7984 ret = -EUCLEAN; 7985 goto out; 7986 } 7987 7988 map = em->map_lookup; 7989 stripe_len = btrfs_calc_stripe_length(em); 7990 if (physical_len != stripe_len) { 7991 btrfs_err(fs_info, 7992 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7993 physical_offset, devid, em->start, physical_len, 7994 stripe_len); 7995 ret = -EUCLEAN; 7996 goto out; 7997 } 7998 7999 /* 8000 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 8001 * space. Although kernel can handle it without problem, better to warn 8002 * the users. 8003 */ 8004 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 8005 btrfs_warn(fs_info, 8006 "devid %llu physical %llu len %llu inside the reserved space", 8007 devid, physical_offset, physical_len); 8008 8009 for (i = 0; i < map->num_stripes; i++) { 8010 if (map->stripes[i].dev->devid == devid && 8011 map->stripes[i].physical == physical_offset) { 8012 found = true; 8013 if (map->verified_stripes >= map->num_stripes) { 8014 btrfs_err(fs_info, 8015 "too many dev extents for chunk %llu found", 8016 em->start); 8017 ret = -EUCLEAN; 8018 goto out; 8019 } 8020 map->verified_stripes++; 8021 break; 8022 } 8023 } 8024 if (!found) { 8025 btrfs_err(fs_info, 8026 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8027 physical_offset, devid); 8028 ret = -EUCLEAN; 8029 } 8030 8031 /* Make sure no dev extent is beyond device boundary */ 8032 dev = btrfs_find_device(fs_info->fs_devices, &args); 8033 if (!dev) { 8034 btrfs_err(fs_info, "failed to find devid %llu", devid); 8035 ret = -EUCLEAN; 8036 goto out; 8037 } 8038 8039 if (physical_offset + physical_len > dev->disk_total_bytes) { 8040 btrfs_err(fs_info, 8041 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8042 devid, physical_offset, physical_len, 8043 dev->disk_total_bytes); 8044 ret = -EUCLEAN; 8045 goto out; 8046 } 8047 8048 if (dev->zone_info) { 8049 u64 zone_size = dev->zone_info->zone_size; 8050 8051 if (!IS_ALIGNED(physical_offset, zone_size) || 8052 !IS_ALIGNED(physical_len, zone_size)) { 8053 btrfs_err(fs_info, 8054 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8055 devid, physical_offset, physical_len); 8056 ret = -EUCLEAN; 8057 goto out; 8058 } 8059 } 8060 8061 out: 8062 free_extent_map(em); 8063 return ret; 8064 } 8065 8066 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8067 { 8068 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8069 struct extent_map *em; 8070 struct rb_node *node; 8071 int ret = 0; 8072 8073 read_lock(&em_tree->lock); 8074 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8075 em = rb_entry(node, struct extent_map, rb_node); 8076 if (em->map_lookup->num_stripes != 8077 em->map_lookup->verified_stripes) { 8078 btrfs_err(fs_info, 8079 "chunk %llu has missing dev extent, have %d expect %d", 8080 em->start, em->map_lookup->verified_stripes, 8081 em->map_lookup->num_stripes); 8082 ret = -EUCLEAN; 8083 goto out; 8084 } 8085 } 8086 out: 8087 read_unlock(&em_tree->lock); 8088 return ret; 8089 } 8090 8091 /* 8092 * Ensure that all dev extents are mapped to correct chunk, otherwise 8093 * later chunk allocation/free would cause unexpected behavior. 8094 * 8095 * NOTE: This will iterate through the whole device tree, which should be of 8096 * the same size level as the chunk tree. This slightly increases mount time. 8097 */ 8098 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8099 { 8100 struct btrfs_path *path; 8101 struct btrfs_root *root = fs_info->dev_root; 8102 struct btrfs_key key; 8103 u64 prev_devid = 0; 8104 u64 prev_dev_ext_end = 0; 8105 int ret = 0; 8106 8107 /* 8108 * We don't have a dev_root because we mounted with ignorebadroots and 8109 * failed to load the root, so we want to skip the verification in this 8110 * case for sure. 8111 * 8112 * However if the dev root is fine, but the tree itself is corrupted 8113 * we'd still fail to mount. This verification is only to make sure 8114 * writes can happen safely, so instead just bypass this check 8115 * completely in the case of IGNOREBADROOTS. 8116 */ 8117 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8118 return 0; 8119 8120 key.objectid = 1; 8121 key.type = BTRFS_DEV_EXTENT_KEY; 8122 key.offset = 0; 8123 8124 path = btrfs_alloc_path(); 8125 if (!path) 8126 return -ENOMEM; 8127 8128 path->reada = READA_FORWARD; 8129 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8130 if (ret < 0) 8131 goto out; 8132 8133 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8134 ret = btrfs_next_leaf(root, path); 8135 if (ret < 0) 8136 goto out; 8137 /* No dev extents at all? Not good */ 8138 if (ret > 0) { 8139 ret = -EUCLEAN; 8140 goto out; 8141 } 8142 } 8143 while (1) { 8144 struct extent_buffer *leaf = path->nodes[0]; 8145 struct btrfs_dev_extent *dext; 8146 int slot = path->slots[0]; 8147 u64 chunk_offset; 8148 u64 physical_offset; 8149 u64 physical_len; 8150 u64 devid; 8151 8152 btrfs_item_key_to_cpu(leaf, &key, slot); 8153 if (key.type != BTRFS_DEV_EXTENT_KEY) 8154 break; 8155 devid = key.objectid; 8156 physical_offset = key.offset; 8157 8158 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8159 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8160 physical_len = btrfs_dev_extent_length(leaf, dext); 8161 8162 /* Check if this dev extent overlaps with the previous one */ 8163 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8164 btrfs_err(fs_info, 8165 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8166 devid, physical_offset, prev_dev_ext_end); 8167 ret = -EUCLEAN; 8168 goto out; 8169 } 8170 8171 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8172 physical_offset, physical_len); 8173 if (ret < 0) 8174 goto out; 8175 prev_devid = devid; 8176 prev_dev_ext_end = physical_offset + physical_len; 8177 8178 ret = btrfs_next_item(root, path); 8179 if (ret < 0) 8180 goto out; 8181 if (ret > 0) { 8182 ret = 0; 8183 break; 8184 } 8185 } 8186 8187 /* Ensure all chunks have corresponding dev extents */ 8188 ret = verify_chunk_dev_extent_mapping(fs_info); 8189 out: 8190 btrfs_free_path(path); 8191 return ret; 8192 } 8193 8194 /* 8195 * Check whether the given block group or device is pinned by any inode being 8196 * used as a swapfile. 8197 */ 8198 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8199 { 8200 struct btrfs_swapfile_pin *sp; 8201 struct rb_node *node; 8202 8203 spin_lock(&fs_info->swapfile_pins_lock); 8204 node = fs_info->swapfile_pins.rb_node; 8205 while (node) { 8206 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8207 if (ptr < sp->ptr) 8208 node = node->rb_left; 8209 else if (ptr > sp->ptr) 8210 node = node->rb_right; 8211 else 8212 break; 8213 } 8214 spin_unlock(&fs_info->swapfile_pins_lock); 8215 return node != NULL; 8216 } 8217 8218 static int relocating_repair_kthread(void *data) 8219 { 8220 struct btrfs_block_group *cache = data; 8221 struct btrfs_fs_info *fs_info = cache->fs_info; 8222 u64 target; 8223 int ret = 0; 8224 8225 target = cache->start; 8226 btrfs_put_block_group(cache); 8227 8228 sb_start_write(fs_info->sb); 8229 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8230 btrfs_info(fs_info, 8231 "zoned: skip relocating block group %llu to repair: EBUSY", 8232 target); 8233 sb_end_write(fs_info->sb); 8234 return -EBUSY; 8235 } 8236 8237 mutex_lock(&fs_info->reclaim_bgs_lock); 8238 8239 /* Ensure block group still exists */ 8240 cache = btrfs_lookup_block_group(fs_info, target); 8241 if (!cache) 8242 goto out; 8243 8244 if (!cache->relocating_repair) 8245 goto out; 8246 8247 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8248 if (ret < 0) 8249 goto out; 8250 8251 btrfs_info(fs_info, 8252 "zoned: relocating block group %llu to repair IO failure", 8253 target); 8254 ret = btrfs_relocate_chunk(fs_info, target); 8255 8256 out: 8257 if (cache) 8258 btrfs_put_block_group(cache); 8259 mutex_unlock(&fs_info->reclaim_bgs_lock); 8260 btrfs_exclop_finish(fs_info); 8261 sb_end_write(fs_info->sb); 8262 8263 return ret; 8264 } 8265 8266 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8267 { 8268 struct btrfs_block_group *cache; 8269 8270 if (!btrfs_is_zoned(fs_info)) 8271 return false; 8272 8273 /* Do not attempt to repair in degraded state */ 8274 if (btrfs_test_opt(fs_info, DEGRADED)) 8275 return true; 8276 8277 cache = btrfs_lookup_block_group(fs_info, logical); 8278 if (!cache) 8279 return true; 8280 8281 spin_lock(&cache->lock); 8282 if (cache->relocating_repair) { 8283 spin_unlock(&cache->lock); 8284 btrfs_put_block_group(cache); 8285 return true; 8286 } 8287 cache->relocating_repair = 1; 8288 spin_unlock(&cache->lock); 8289 8290 kthread_run(relocating_repair_kthread, cache, 8291 "btrfs-relocating-repair"); 8292 8293 return true; 8294 } 8295