1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 38 BTRFS_BLOCK_GROUP_RAID10 | \ 39 BTRFS_BLOCK_GROUP_RAID56_MASK) 40 41 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 42 [BTRFS_RAID_RAID10] = { 43 .sub_stripes = 2, 44 .dev_stripes = 1, 45 .devs_max = 0, /* 0 == as many as possible */ 46 .devs_min = 2, 47 .tolerated_failures = 1, 48 .devs_increment = 2, 49 .ncopies = 2, 50 .nparity = 0, 51 .raid_name = "raid10", 52 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 53 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 .nparity = 0, 64 .raid_name = "raid1", 65 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 66 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 67 }, 68 [BTRFS_RAID_RAID1C3] = { 69 .sub_stripes = 1, 70 .dev_stripes = 1, 71 .devs_max = 3, 72 .devs_min = 3, 73 .tolerated_failures = 2, 74 .devs_increment = 3, 75 .ncopies = 3, 76 .nparity = 0, 77 .raid_name = "raid1c3", 78 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 79 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 80 }, 81 [BTRFS_RAID_RAID1C4] = { 82 .sub_stripes = 1, 83 .dev_stripes = 1, 84 .devs_max = 4, 85 .devs_min = 4, 86 .tolerated_failures = 3, 87 .devs_increment = 4, 88 .ncopies = 4, 89 .nparity = 0, 90 .raid_name = "raid1c4", 91 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 92 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 93 }, 94 [BTRFS_RAID_DUP] = { 95 .sub_stripes = 1, 96 .dev_stripes = 2, 97 .devs_max = 1, 98 .devs_min = 1, 99 .tolerated_failures = 0, 100 .devs_increment = 1, 101 .ncopies = 2, 102 .nparity = 0, 103 .raid_name = "dup", 104 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 105 .mindev_error = 0, 106 }, 107 [BTRFS_RAID_RAID0] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 1, 112 .tolerated_failures = 0, 113 .devs_increment = 1, 114 .ncopies = 1, 115 .nparity = 0, 116 .raid_name = "raid0", 117 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 118 .mindev_error = 0, 119 }, 120 [BTRFS_RAID_SINGLE] = { 121 .sub_stripes = 1, 122 .dev_stripes = 1, 123 .devs_max = 1, 124 .devs_min = 1, 125 .tolerated_failures = 0, 126 .devs_increment = 1, 127 .ncopies = 1, 128 .nparity = 0, 129 .raid_name = "single", 130 .bg_flag = 0, 131 .mindev_error = 0, 132 }, 133 [BTRFS_RAID_RAID5] = { 134 .sub_stripes = 1, 135 .dev_stripes = 1, 136 .devs_max = 0, 137 .devs_min = 2, 138 .tolerated_failures = 1, 139 .devs_increment = 1, 140 .ncopies = 1, 141 .nparity = 1, 142 .raid_name = "raid5", 143 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 144 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 145 }, 146 [BTRFS_RAID_RAID6] = { 147 .sub_stripes = 1, 148 .dev_stripes = 1, 149 .devs_max = 0, 150 .devs_min = 3, 151 .tolerated_failures = 2, 152 .devs_increment = 1, 153 .ncopies = 1, 154 .nparity = 2, 155 .raid_name = "raid6", 156 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 157 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 158 }, 159 }; 160 161 /* 162 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 163 * can be used as index to access btrfs_raid_array[]. 164 */ 165 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 166 { 167 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 168 169 if (!profile) 170 return BTRFS_RAID_SINGLE; 171 172 return BTRFS_BG_FLAG_TO_INDEX(profile); 173 } 174 175 const char *btrfs_bg_type_to_raid_name(u64 flags) 176 { 177 const int index = btrfs_bg_flags_to_raid_index(flags); 178 179 if (index >= BTRFS_NR_RAID_TYPES) 180 return NULL; 181 182 return btrfs_raid_array[index].raid_name; 183 } 184 185 int btrfs_nr_parity_stripes(u64 type) 186 { 187 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 188 189 return btrfs_raid_array[index].nparity; 190 } 191 192 /* 193 * Fill @buf with textual description of @bg_flags, no more than @size_buf 194 * bytes including terminating null byte. 195 */ 196 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 197 { 198 int i; 199 int ret; 200 char *bp = buf; 201 u64 flags = bg_flags; 202 u32 size_bp = size_buf; 203 204 if (!flags) { 205 strcpy(bp, "NONE"); 206 return; 207 } 208 209 #define DESCRIBE_FLAG(flag, desc) \ 210 do { \ 211 if (flags & (flag)) { \ 212 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 213 if (ret < 0 || ret >= size_bp) \ 214 goto out_overflow; \ 215 size_bp -= ret; \ 216 bp += ret; \ 217 flags &= ~(flag); \ 218 } \ 219 } while (0) 220 221 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 222 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 224 225 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 226 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 227 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 228 btrfs_raid_array[i].raid_name); 229 #undef DESCRIBE_FLAG 230 231 if (flags) { 232 ret = snprintf(bp, size_bp, "0x%llx|", flags); 233 size_bp -= ret; 234 } 235 236 if (size_bp < size_buf) 237 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 238 239 /* 240 * The text is trimmed, it's up to the caller to provide sufficiently 241 * large buffer 242 */ 243 out_overflow:; 244 } 245 246 static int init_first_rw_device(struct btrfs_trans_handle *trans); 247 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 248 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 249 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 250 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 251 enum btrfs_map_op op, 252 u64 logical, u64 *length, 253 struct btrfs_io_context **bioc_ret, 254 int mirror_num, int need_raid_map); 255 256 /* 257 * Device locking 258 * ============== 259 * 260 * There are several mutexes that protect manipulation of devices and low-level 261 * structures like chunks but not block groups, extents or files 262 * 263 * uuid_mutex (global lock) 264 * ------------------------ 265 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 266 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 267 * device) or requested by the device= mount option 268 * 269 * the mutex can be very coarse and can cover long-running operations 270 * 271 * protects: updates to fs_devices counters like missing devices, rw devices, 272 * seeding, structure cloning, opening/closing devices at mount/umount time 273 * 274 * global::fs_devs - add, remove, updates to the global list 275 * 276 * does not protect: manipulation of the fs_devices::devices list in general 277 * but in mount context it could be used to exclude list modifications by eg. 278 * scan ioctl 279 * 280 * btrfs_device::name - renames (write side), read is RCU 281 * 282 * fs_devices::device_list_mutex (per-fs, with RCU) 283 * ------------------------------------------------ 284 * protects updates to fs_devices::devices, ie. adding and deleting 285 * 286 * simple list traversal with read-only actions can be done with RCU protection 287 * 288 * may be used to exclude some operations from running concurrently without any 289 * modifications to the list (see write_all_supers) 290 * 291 * Is not required at mount and close times, because our device list is 292 * protected by the uuid_mutex at that point. 293 * 294 * balance_mutex 295 * ------------- 296 * protects balance structures (status, state) and context accessed from 297 * several places (internally, ioctl) 298 * 299 * chunk_mutex 300 * ----------- 301 * protects chunks, adding or removing during allocation, trim or when a new 302 * device is added/removed. Additionally it also protects post_commit_list of 303 * individual devices, since they can be added to the transaction's 304 * post_commit_list only with chunk_mutex held. 305 * 306 * cleaner_mutex 307 * ------------- 308 * a big lock that is held by the cleaner thread and prevents running subvolume 309 * cleaning together with relocation or delayed iputs 310 * 311 * 312 * Lock nesting 313 * ============ 314 * 315 * uuid_mutex 316 * device_list_mutex 317 * chunk_mutex 318 * balance_mutex 319 * 320 * 321 * Exclusive operations 322 * ==================== 323 * 324 * Maintains the exclusivity of the following operations that apply to the 325 * whole filesystem and cannot run in parallel. 326 * 327 * - Balance (*) 328 * - Device add 329 * - Device remove 330 * - Device replace (*) 331 * - Resize 332 * 333 * The device operations (as above) can be in one of the following states: 334 * 335 * - Running state 336 * - Paused state 337 * - Completed state 338 * 339 * Only device operations marked with (*) can go into the Paused state for the 340 * following reasons: 341 * 342 * - ioctl (only Balance can be Paused through ioctl) 343 * - filesystem remounted as read-only 344 * - filesystem unmounted and mounted as read-only 345 * - system power-cycle and filesystem mounted as read-only 346 * - filesystem or device errors leading to forced read-only 347 * 348 * The status of exclusive operation is set and cleared atomically. 349 * During the course of Paused state, fs_info::exclusive_operation remains set. 350 * A device operation in Paused or Running state can be canceled or resumed 351 * either by ioctl (Balance only) or when remounted as read-write. 352 * The exclusive status is cleared when the device operation is canceled or 353 * completed. 354 */ 355 356 DEFINE_MUTEX(uuid_mutex); 357 static LIST_HEAD(fs_uuids); 358 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 359 { 360 return &fs_uuids; 361 } 362 363 /* 364 * alloc_fs_devices - allocate struct btrfs_fs_devices 365 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 366 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 367 * 368 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 369 * The returned struct is not linked onto any lists and can be destroyed with 370 * kfree() right away. 371 */ 372 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 373 const u8 *metadata_fsid) 374 { 375 struct btrfs_fs_devices *fs_devs; 376 377 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 378 if (!fs_devs) 379 return ERR_PTR(-ENOMEM); 380 381 mutex_init(&fs_devs->device_list_mutex); 382 383 INIT_LIST_HEAD(&fs_devs->devices); 384 INIT_LIST_HEAD(&fs_devs->alloc_list); 385 INIT_LIST_HEAD(&fs_devs->fs_list); 386 INIT_LIST_HEAD(&fs_devs->seed_list); 387 if (fsid) 388 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 389 390 if (metadata_fsid) 391 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 392 else if (fsid) 393 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 394 395 return fs_devs; 396 } 397 398 void btrfs_free_device(struct btrfs_device *device) 399 { 400 WARN_ON(!list_empty(&device->post_commit_list)); 401 rcu_string_free(device->name); 402 extent_io_tree_release(&device->alloc_state); 403 btrfs_destroy_dev_zone_info(device); 404 kfree(device); 405 } 406 407 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 408 { 409 struct btrfs_device *device; 410 WARN_ON(fs_devices->opened); 411 while (!list_empty(&fs_devices->devices)) { 412 device = list_entry(fs_devices->devices.next, 413 struct btrfs_device, dev_list); 414 list_del(&device->dev_list); 415 btrfs_free_device(device); 416 } 417 kfree(fs_devices); 418 } 419 420 void __exit btrfs_cleanup_fs_uuids(void) 421 { 422 struct btrfs_fs_devices *fs_devices; 423 424 while (!list_empty(&fs_uuids)) { 425 fs_devices = list_entry(fs_uuids.next, 426 struct btrfs_fs_devices, fs_list); 427 list_del(&fs_devices->fs_list); 428 free_fs_devices(fs_devices); 429 } 430 } 431 432 static noinline struct btrfs_fs_devices *find_fsid( 433 const u8 *fsid, const u8 *metadata_fsid) 434 { 435 struct btrfs_fs_devices *fs_devices; 436 437 ASSERT(fsid); 438 439 /* Handle non-split brain cases */ 440 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 441 if (metadata_fsid) { 442 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 443 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 444 BTRFS_FSID_SIZE) == 0) 445 return fs_devices; 446 } else { 447 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 448 return fs_devices; 449 } 450 } 451 return NULL; 452 } 453 454 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 455 struct btrfs_super_block *disk_super) 456 { 457 458 struct btrfs_fs_devices *fs_devices; 459 460 /* 461 * Handle scanned device having completed its fsid change but 462 * belonging to a fs_devices that was created by first scanning 463 * a device which didn't have its fsid/metadata_uuid changed 464 * at all and the CHANGING_FSID_V2 flag set. 465 */ 466 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 467 if (fs_devices->fsid_change && 468 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 469 BTRFS_FSID_SIZE) == 0 && 470 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 471 BTRFS_FSID_SIZE) == 0) { 472 return fs_devices; 473 } 474 } 475 /* 476 * Handle scanned device having completed its fsid change but 477 * belonging to a fs_devices that was created by a device that 478 * has an outdated pair of fsid/metadata_uuid and 479 * CHANGING_FSID_V2 flag set. 480 */ 481 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 482 if (fs_devices->fsid_change && 483 memcmp(fs_devices->metadata_uuid, 484 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 485 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 486 BTRFS_FSID_SIZE) == 0) { 487 return fs_devices; 488 } 489 } 490 491 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 492 } 493 494 495 static int 496 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 497 int flush, struct block_device **bdev, 498 struct btrfs_super_block **disk_super) 499 { 500 int ret; 501 502 *bdev = blkdev_get_by_path(device_path, flags, holder); 503 504 if (IS_ERR(*bdev)) { 505 ret = PTR_ERR(*bdev); 506 goto error; 507 } 508 509 if (flush) 510 sync_blockdev(*bdev); 511 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 512 if (ret) { 513 blkdev_put(*bdev, flags); 514 goto error; 515 } 516 invalidate_bdev(*bdev); 517 *disk_super = btrfs_read_dev_super(*bdev); 518 if (IS_ERR(*disk_super)) { 519 ret = PTR_ERR(*disk_super); 520 blkdev_put(*bdev, flags); 521 goto error; 522 } 523 524 return 0; 525 526 error: 527 *bdev = NULL; 528 return ret; 529 } 530 531 /** 532 * Search and remove all stale devices (which are not mounted). 533 * When both inputs are NULL, it will search and release all stale devices. 534 * 535 * @devt: Optional. When provided will it release all unmounted devices 536 * matching this devt only. 537 * @skip_device: Optional. Will skip this device when searching for the stale 538 * devices. 539 * 540 * Return: 0 for success or if @devt is 0. 541 * -EBUSY if @devt is a mounted device. 542 * -ENOENT if @devt does not match any device in the list. 543 */ 544 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 545 { 546 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 547 struct btrfs_device *device, *tmp_device; 548 int ret = 0; 549 550 lockdep_assert_held(&uuid_mutex); 551 552 if (devt) 553 ret = -ENOENT; 554 555 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 556 557 mutex_lock(&fs_devices->device_list_mutex); 558 list_for_each_entry_safe(device, tmp_device, 559 &fs_devices->devices, dev_list) { 560 if (skip_device && skip_device == device) 561 continue; 562 if (devt && devt != device->devt) 563 continue; 564 if (fs_devices->opened) { 565 /* for an already deleted device return 0 */ 566 if (devt && ret != 0) 567 ret = -EBUSY; 568 break; 569 } 570 571 /* delete the stale device */ 572 fs_devices->num_devices--; 573 list_del(&device->dev_list); 574 btrfs_free_device(device); 575 576 ret = 0; 577 } 578 mutex_unlock(&fs_devices->device_list_mutex); 579 580 if (fs_devices->num_devices == 0) { 581 btrfs_sysfs_remove_fsid(fs_devices); 582 list_del(&fs_devices->fs_list); 583 free_fs_devices(fs_devices); 584 } 585 } 586 587 return ret; 588 } 589 590 /* 591 * This is only used on mount, and we are protected from competing things 592 * messing with our fs_devices by the uuid_mutex, thus we do not need the 593 * fs_devices->device_list_mutex here. 594 */ 595 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 596 struct btrfs_device *device, fmode_t flags, 597 void *holder) 598 { 599 struct block_device *bdev; 600 struct btrfs_super_block *disk_super; 601 u64 devid; 602 int ret; 603 604 if (device->bdev) 605 return -EINVAL; 606 if (!device->name) 607 return -EINVAL; 608 609 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 610 &bdev, &disk_super); 611 if (ret) 612 return ret; 613 614 devid = btrfs_stack_device_id(&disk_super->dev_item); 615 if (devid != device->devid) 616 goto error_free_page; 617 618 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 619 goto error_free_page; 620 621 device->generation = btrfs_super_generation(disk_super); 622 623 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 624 if (btrfs_super_incompat_flags(disk_super) & 625 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 626 pr_err( 627 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 628 goto error_free_page; 629 } 630 631 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 632 fs_devices->seeding = true; 633 } else { 634 if (bdev_read_only(bdev)) 635 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 636 else 637 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 638 } 639 640 if (!bdev_nonrot(bdev)) 641 fs_devices->rotating = true; 642 643 device->bdev = bdev; 644 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 645 device->mode = flags; 646 647 fs_devices->open_devices++; 648 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 649 device->devid != BTRFS_DEV_REPLACE_DEVID) { 650 fs_devices->rw_devices++; 651 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 652 } 653 btrfs_release_disk_super(disk_super); 654 655 return 0; 656 657 error_free_page: 658 btrfs_release_disk_super(disk_super); 659 blkdev_put(bdev, flags); 660 661 return -EINVAL; 662 } 663 664 /* 665 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 666 * being created with a disk that has already completed its fsid change. Such 667 * disk can belong to an fs which has its FSID changed or to one which doesn't. 668 * Handle both cases here. 669 */ 670 static struct btrfs_fs_devices *find_fsid_inprogress( 671 struct btrfs_super_block *disk_super) 672 { 673 struct btrfs_fs_devices *fs_devices; 674 675 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 676 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 677 BTRFS_FSID_SIZE) != 0 && 678 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 679 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 680 return fs_devices; 681 } 682 } 683 684 return find_fsid(disk_super->fsid, NULL); 685 } 686 687 688 static struct btrfs_fs_devices *find_fsid_changed( 689 struct btrfs_super_block *disk_super) 690 { 691 struct btrfs_fs_devices *fs_devices; 692 693 /* 694 * Handles the case where scanned device is part of an fs that had 695 * multiple successful changes of FSID but currently device didn't 696 * observe it. Meaning our fsid will be different than theirs. We need 697 * to handle two subcases : 698 * 1 - The fs still continues to have different METADATA/FSID uuids. 699 * 2 - The fs is switched back to its original FSID (METADATA/FSID 700 * are equal). 701 */ 702 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 703 /* Changed UUIDs */ 704 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 705 BTRFS_FSID_SIZE) != 0 && 706 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 707 BTRFS_FSID_SIZE) == 0 && 708 memcmp(fs_devices->fsid, disk_super->fsid, 709 BTRFS_FSID_SIZE) != 0) 710 return fs_devices; 711 712 /* Unchanged UUIDs */ 713 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 714 BTRFS_FSID_SIZE) == 0 && 715 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 716 BTRFS_FSID_SIZE) == 0) 717 return fs_devices; 718 } 719 720 return NULL; 721 } 722 723 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 724 struct btrfs_super_block *disk_super) 725 { 726 struct btrfs_fs_devices *fs_devices; 727 728 /* 729 * Handle the case where the scanned device is part of an fs whose last 730 * metadata UUID change reverted it to the original FSID. At the same 731 * time * fs_devices was first created by another constitutent device 732 * which didn't fully observe the operation. This results in an 733 * btrfs_fs_devices created with metadata/fsid different AND 734 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 735 * fs_devices equal to the FSID of the disk. 736 */ 737 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 738 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 739 BTRFS_FSID_SIZE) != 0 && 740 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 741 BTRFS_FSID_SIZE) == 0 && 742 fs_devices->fsid_change) 743 return fs_devices; 744 } 745 746 return NULL; 747 } 748 /* 749 * Add new device to list of registered devices 750 * 751 * Returns: 752 * device pointer which was just added or updated when successful 753 * error pointer when failed 754 */ 755 static noinline struct btrfs_device *device_list_add(const char *path, 756 struct btrfs_super_block *disk_super, 757 bool *new_device_added) 758 { 759 struct btrfs_device *device; 760 struct btrfs_fs_devices *fs_devices = NULL; 761 struct rcu_string *name; 762 u64 found_transid = btrfs_super_generation(disk_super); 763 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 764 dev_t path_devt; 765 int error; 766 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 767 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 768 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 769 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 770 771 error = lookup_bdev(path, &path_devt); 772 if (error) 773 return ERR_PTR(error); 774 775 if (fsid_change_in_progress) { 776 if (!has_metadata_uuid) 777 fs_devices = find_fsid_inprogress(disk_super); 778 else 779 fs_devices = find_fsid_changed(disk_super); 780 } else if (has_metadata_uuid) { 781 fs_devices = find_fsid_with_metadata_uuid(disk_super); 782 } else { 783 fs_devices = find_fsid_reverted_metadata(disk_super); 784 if (!fs_devices) 785 fs_devices = find_fsid(disk_super->fsid, NULL); 786 } 787 788 789 if (!fs_devices) { 790 if (has_metadata_uuid) 791 fs_devices = alloc_fs_devices(disk_super->fsid, 792 disk_super->metadata_uuid); 793 else 794 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 795 796 if (IS_ERR(fs_devices)) 797 return ERR_CAST(fs_devices); 798 799 fs_devices->fsid_change = fsid_change_in_progress; 800 801 mutex_lock(&fs_devices->device_list_mutex); 802 list_add(&fs_devices->fs_list, &fs_uuids); 803 804 device = NULL; 805 } else { 806 struct btrfs_dev_lookup_args args = { 807 .devid = devid, 808 .uuid = disk_super->dev_item.uuid, 809 }; 810 811 mutex_lock(&fs_devices->device_list_mutex); 812 device = btrfs_find_device(fs_devices, &args); 813 814 /* 815 * If this disk has been pulled into an fs devices created by 816 * a device which had the CHANGING_FSID_V2 flag then replace the 817 * metadata_uuid/fsid values of the fs_devices. 818 */ 819 if (fs_devices->fsid_change && 820 found_transid > fs_devices->latest_generation) { 821 memcpy(fs_devices->fsid, disk_super->fsid, 822 BTRFS_FSID_SIZE); 823 824 if (has_metadata_uuid) 825 memcpy(fs_devices->metadata_uuid, 826 disk_super->metadata_uuid, 827 BTRFS_FSID_SIZE); 828 else 829 memcpy(fs_devices->metadata_uuid, 830 disk_super->fsid, BTRFS_FSID_SIZE); 831 832 fs_devices->fsid_change = false; 833 } 834 } 835 836 if (!device) { 837 if (fs_devices->opened) { 838 mutex_unlock(&fs_devices->device_list_mutex); 839 return ERR_PTR(-EBUSY); 840 } 841 842 device = btrfs_alloc_device(NULL, &devid, 843 disk_super->dev_item.uuid); 844 if (IS_ERR(device)) { 845 mutex_unlock(&fs_devices->device_list_mutex); 846 /* we can safely leave the fs_devices entry around */ 847 return device; 848 } 849 850 name = rcu_string_strdup(path, GFP_NOFS); 851 if (!name) { 852 btrfs_free_device(device); 853 mutex_unlock(&fs_devices->device_list_mutex); 854 return ERR_PTR(-ENOMEM); 855 } 856 rcu_assign_pointer(device->name, name); 857 device->devt = path_devt; 858 859 list_add_rcu(&device->dev_list, &fs_devices->devices); 860 fs_devices->num_devices++; 861 862 device->fs_devices = fs_devices; 863 *new_device_added = true; 864 865 if (disk_super->label[0]) 866 pr_info( 867 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 868 disk_super->label, devid, found_transid, path, 869 current->comm, task_pid_nr(current)); 870 else 871 pr_info( 872 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 873 disk_super->fsid, devid, found_transid, path, 874 current->comm, task_pid_nr(current)); 875 876 } else if (!device->name || strcmp(device->name->str, path)) { 877 /* 878 * When FS is already mounted. 879 * 1. If you are here and if the device->name is NULL that 880 * means this device was missing at time of FS mount. 881 * 2. If you are here and if the device->name is different 882 * from 'path' that means either 883 * a. The same device disappeared and reappeared with 884 * different name. or 885 * b. The missing-disk-which-was-replaced, has 886 * reappeared now. 887 * 888 * We must allow 1 and 2a above. But 2b would be a spurious 889 * and unintentional. 890 * 891 * Further in case of 1 and 2a above, the disk at 'path' 892 * would have missed some transaction when it was away and 893 * in case of 2a the stale bdev has to be updated as well. 894 * 2b must not be allowed at all time. 895 */ 896 897 /* 898 * For now, we do allow update to btrfs_fs_device through the 899 * btrfs dev scan cli after FS has been mounted. We're still 900 * tracking a problem where systems fail mount by subvolume id 901 * when we reject replacement on a mounted FS. 902 */ 903 if (!fs_devices->opened && found_transid < device->generation) { 904 /* 905 * That is if the FS is _not_ mounted and if you 906 * are here, that means there is more than one 907 * disk with same uuid and devid.We keep the one 908 * with larger generation number or the last-in if 909 * generation are equal. 910 */ 911 mutex_unlock(&fs_devices->device_list_mutex); 912 return ERR_PTR(-EEXIST); 913 } 914 915 /* 916 * We are going to replace the device path for a given devid, 917 * make sure it's the same device if the device is mounted 918 * 919 * NOTE: the device->fs_info may not be reliable here so pass 920 * in a NULL to message helpers instead. This avoids a possible 921 * use-after-free when the fs_info and fs_info->sb are already 922 * torn down. 923 */ 924 if (device->bdev) { 925 if (device->devt != path_devt) { 926 mutex_unlock(&fs_devices->device_list_mutex); 927 btrfs_warn_in_rcu(NULL, 928 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 929 path, devid, found_transid, 930 current->comm, 931 task_pid_nr(current)); 932 return ERR_PTR(-EEXIST); 933 } 934 btrfs_info_in_rcu(NULL, 935 "devid %llu device path %s changed to %s scanned by %s (%d)", 936 devid, rcu_str_deref(device->name), 937 path, current->comm, 938 task_pid_nr(current)); 939 } 940 941 name = rcu_string_strdup(path, GFP_NOFS); 942 if (!name) { 943 mutex_unlock(&fs_devices->device_list_mutex); 944 return ERR_PTR(-ENOMEM); 945 } 946 rcu_string_free(device->name); 947 rcu_assign_pointer(device->name, name); 948 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 949 fs_devices->missing_devices--; 950 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 951 } 952 device->devt = path_devt; 953 } 954 955 /* 956 * Unmount does not free the btrfs_device struct but would zero 957 * generation along with most of the other members. So just update 958 * it back. We need it to pick the disk with largest generation 959 * (as above). 960 */ 961 if (!fs_devices->opened) { 962 device->generation = found_transid; 963 fs_devices->latest_generation = max_t(u64, found_transid, 964 fs_devices->latest_generation); 965 } 966 967 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 968 969 mutex_unlock(&fs_devices->device_list_mutex); 970 return device; 971 } 972 973 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 974 { 975 struct btrfs_fs_devices *fs_devices; 976 struct btrfs_device *device; 977 struct btrfs_device *orig_dev; 978 int ret = 0; 979 980 lockdep_assert_held(&uuid_mutex); 981 982 fs_devices = alloc_fs_devices(orig->fsid, NULL); 983 if (IS_ERR(fs_devices)) 984 return fs_devices; 985 986 fs_devices->total_devices = orig->total_devices; 987 988 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 989 struct rcu_string *name; 990 991 device = btrfs_alloc_device(NULL, &orig_dev->devid, 992 orig_dev->uuid); 993 if (IS_ERR(device)) { 994 ret = PTR_ERR(device); 995 goto error; 996 } 997 998 /* 999 * This is ok to do without rcu read locked because we hold the 1000 * uuid mutex so nothing we touch in here is going to disappear. 1001 */ 1002 if (orig_dev->name) { 1003 name = rcu_string_strdup(orig_dev->name->str, 1004 GFP_KERNEL); 1005 if (!name) { 1006 btrfs_free_device(device); 1007 ret = -ENOMEM; 1008 goto error; 1009 } 1010 rcu_assign_pointer(device->name, name); 1011 } 1012 1013 list_add(&device->dev_list, &fs_devices->devices); 1014 device->fs_devices = fs_devices; 1015 fs_devices->num_devices++; 1016 } 1017 return fs_devices; 1018 error: 1019 free_fs_devices(fs_devices); 1020 return ERR_PTR(ret); 1021 } 1022 1023 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1024 struct btrfs_device **latest_dev) 1025 { 1026 struct btrfs_device *device, *next; 1027 1028 /* This is the initialized path, it is safe to release the devices. */ 1029 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1030 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1031 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1032 &device->dev_state) && 1033 !test_bit(BTRFS_DEV_STATE_MISSING, 1034 &device->dev_state) && 1035 (!*latest_dev || 1036 device->generation > (*latest_dev)->generation)) { 1037 *latest_dev = device; 1038 } 1039 continue; 1040 } 1041 1042 /* 1043 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1044 * in btrfs_init_dev_replace() so just continue. 1045 */ 1046 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1047 continue; 1048 1049 if (device->bdev) { 1050 blkdev_put(device->bdev, device->mode); 1051 device->bdev = NULL; 1052 fs_devices->open_devices--; 1053 } 1054 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1055 list_del_init(&device->dev_alloc_list); 1056 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1057 fs_devices->rw_devices--; 1058 } 1059 list_del_init(&device->dev_list); 1060 fs_devices->num_devices--; 1061 btrfs_free_device(device); 1062 } 1063 1064 } 1065 1066 /* 1067 * After we have read the system tree and know devids belonging to this 1068 * filesystem, remove the device which does not belong there. 1069 */ 1070 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1071 { 1072 struct btrfs_device *latest_dev = NULL; 1073 struct btrfs_fs_devices *seed_dev; 1074 1075 mutex_lock(&uuid_mutex); 1076 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1077 1078 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1079 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1080 1081 fs_devices->latest_dev = latest_dev; 1082 1083 mutex_unlock(&uuid_mutex); 1084 } 1085 1086 static void btrfs_close_bdev(struct btrfs_device *device) 1087 { 1088 if (!device->bdev) 1089 return; 1090 1091 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1092 sync_blockdev(device->bdev); 1093 invalidate_bdev(device->bdev); 1094 } 1095 1096 blkdev_put(device->bdev, device->mode); 1097 } 1098 1099 static void btrfs_close_one_device(struct btrfs_device *device) 1100 { 1101 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1102 1103 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1104 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1105 list_del_init(&device->dev_alloc_list); 1106 fs_devices->rw_devices--; 1107 } 1108 1109 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1110 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1111 1112 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1113 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1114 fs_devices->missing_devices--; 1115 } 1116 1117 btrfs_close_bdev(device); 1118 if (device->bdev) { 1119 fs_devices->open_devices--; 1120 device->bdev = NULL; 1121 } 1122 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1123 btrfs_destroy_dev_zone_info(device); 1124 1125 device->fs_info = NULL; 1126 atomic_set(&device->dev_stats_ccnt, 0); 1127 extent_io_tree_release(&device->alloc_state); 1128 1129 /* 1130 * Reset the flush error record. We might have a transient flush error 1131 * in this mount, and if so we aborted the current transaction and set 1132 * the fs to an error state, guaranteeing no super blocks can be further 1133 * committed. However that error might be transient and if we unmount the 1134 * filesystem and mount it again, we should allow the mount to succeed 1135 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1136 * filesystem again we still get flush errors, then we will again abort 1137 * any transaction and set the error state, guaranteeing no commits of 1138 * unsafe super blocks. 1139 */ 1140 device->last_flush_error = 0; 1141 1142 /* Verify the device is back in a pristine state */ 1143 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1144 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1145 ASSERT(list_empty(&device->dev_alloc_list)); 1146 ASSERT(list_empty(&device->post_commit_list)); 1147 } 1148 1149 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1150 { 1151 struct btrfs_device *device, *tmp; 1152 1153 lockdep_assert_held(&uuid_mutex); 1154 1155 if (--fs_devices->opened > 0) 1156 return; 1157 1158 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1159 btrfs_close_one_device(device); 1160 1161 WARN_ON(fs_devices->open_devices); 1162 WARN_ON(fs_devices->rw_devices); 1163 fs_devices->opened = 0; 1164 fs_devices->seeding = false; 1165 fs_devices->fs_info = NULL; 1166 } 1167 1168 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1169 { 1170 LIST_HEAD(list); 1171 struct btrfs_fs_devices *tmp; 1172 1173 mutex_lock(&uuid_mutex); 1174 close_fs_devices(fs_devices); 1175 if (!fs_devices->opened) 1176 list_splice_init(&fs_devices->seed_list, &list); 1177 1178 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1179 close_fs_devices(fs_devices); 1180 list_del(&fs_devices->seed_list); 1181 free_fs_devices(fs_devices); 1182 } 1183 mutex_unlock(&uuid_mutex); 1184 } 1185 1186 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1187 fmode_t flags, void *holder) 1188 { 1189 struct btrfs_device *device; 1190 struct btrfs_device *latest_dev = NULL; 1191 struct btrfs_device *tmp_device; 1192 1193 flags |= FMODE_EXCL; 1194 1195 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1196 dev_list) { 1197 int ret; 1198 1199 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1200 if (ret == 0 && 1201 (!latest_dev || device->generation > latest_dev->generation)) { 1202 latest_dev = device; 1203 } else if (ret == -ENODATA) { 1204 fs_devices->num_devices--; 1205 list_del(&device->dev_list); 1206 btrfs_free_device(device); 1207 } 1208 } 1209 if (fs_devices->open_devices == 0) 1210 return -EINVAL; 1211 1212 fs_devices->opened = 1; 1213 fs_devices->latest_dev = latest_dev; 1214 fs_devices->total_rw_bytes = 0; 1215 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1216 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1217 1218 return 0; 1219 } 1220 1221 static int devid_cmp(void *priv, const struct list_head *a, 1222 const struct list_head *b) 1223 { 1224 const struct btrfs_device *dev1, *dev2; 1225 1226 dev1 = list_entry(a, struct btrfs_device, dev_list); 1227 dev2 = list_entry(b, struct btrfs_device, dev_list); 1228 1229 if (dev1->devid < dev2->devid) 1230 return -1; 1231 else if (dev1->devid > dev2->devid) 1232 return 1; 1233 return 0; 1234 } 1235 1236 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1237 fmode_t flags, void *holder) 1238 { 1239 int ret; 1240 1241 lockdep_assert_held(&uuid_mutex); 1242 /* 1243 * The device_list_mutex cannot be taken here in case opening the 1244 * underlying device takes further locks like open_mutex. 1245 * 1246 * We also don't need the lock here as this is called during mount and 1247 * exclusion is provided by uuid_mutex 1248 */ 1249 1250 if (fs_devices->opened) { 1251 fs_devices->opened++; 1252 ret = 0; 1253 } else { 1254 list_sort(NULL, &fs_devices->devices, devid_cmp); 1255 ret = open_fs_devices(fs_devices, flags, holder); 1256 } 1257 1258 return ret; 1259 } 1260 1261 void btrfs_release_disk_super(struct btrfs_super_block *super) 1262 { 1263 struct page *page = virt_to_page(super); 1264 1265 put_page(page); 1266 } 1267 1268 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1269 u64 bytenr, u64 bytenr_orig) 1270 { 1271 struct btrfs_super_block *disk_super; 1272 struct page *page; 1273 void *p; 1274 pgoff_t index; 1275 1276 /* make sure our super fits in the device */ 1277 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1278 return ERR_PTR(-EINVAL); 1279 1280 /* make sure our super fits in the page */ 1281 if (sizeof(*disk_super) > PAGE_SIZE) 1282 return ERR_PTR(-EINVAL); 1283 1284 /* make sure our super doesn't straddle pages on disk */ 1285 index = bytenr >> PAGE_SHIFT; 1286 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1287 return ERR_PTR(-EINVAL); 1288 1289 /* pull in the page with our super */ 1290 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1291 1292 if (IS_ERR(page)) 1293 return ERR_CAST(page); 1294 1295 p = page_address(page); 1296 1297 /* align our pointer to the offset of the super block */ 1298 disk_super = p + offset_in_page(bytenr); 1299 1300 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1301 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1302 btrfs_release_disk_super(p); 1303 return ERR_PTR(-EINVAL); 1304 } 1305 1306 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1307 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1308 1309 return disk_super; 1310 } 1311 1312 int btrfs_forget_devices(dev_t devt) 1313 { 1314 int ret; 1315 1316 mutex_lock(&uuid_mutex); 1317 ret = btrfs_free_stale_devices(devt, NULL); 1318 mutex_unlock(&uuid_mutex); 1319 1320 return ret; 1321 } 1322 1323 /* 1324 * Look for a btrfs signature on a device. This may be called out of the mount path 1325 * and we are not allowed to call set_blocksize during the scan. The superblock 1326 * is read via pagecache 1327 */ 1328 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1329 void *holder) 1330 { 1331 struct btrfs_super_block *disk_super; 1332 bool new_device_added = false; 1333 struct btrfs_device *device = NULL; 1334 struct block_device *bdev; 1335 u64 bytenr, bytenr_orig; 1336 int ret; 1337 1338 lockdep_assert_held(&uuid_mutex); 1339 1340 /* 1341 * we would like to check all the supers, but that would make 1342 * a btrfs mount succeed after a mkfs from a different FS. 1343 * So, we need to add a special mount option to scan for 1344 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1345 */ 1346 flags |= FMODE_EXCL; 1347 1348 bdev = blkdev_get_by_path(path, flags, holder); 1349 if (IS_ERR(bdev)) 1350 return ERR_CAST(bdev); 1351 1352 bytenr_orig = btrfs_sb_offset(0); 1353 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1354 if (ret) { 1355 device = ERR_PTR(ret); 1356 goto error_bdev_put; 1357 } 1358 1359 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1360 if (IS_ERR(disk_super)) { 1361 device = ERR_CAST(disk_super); 1362 goto error_bdev_put; 1363 } 1364 1365 device = device_list_add(path, disk_super, &new_device_added); 1366 if (!IS_ERR(device) && new_device_added) 1367 btrfs_free_stale_devices(device->devt, device); 1368 1369 btrfs_release_disk_super(disk_super); 1370 1371 error_bdev_put: 1372 blkdev_put(bdev, flags); 1373 1374 return device; 1375 } 1376 1377 /* 1378 * Try to find a chunk that intersects [start, start + len] range and when one 1379 * such is found, record the end of it in *start 1380 */ 1381 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1382 u64 len) 1383 { 1384 u64 physical_start, physical_end; 1385 1386 lockdep_assert_held(&device->fs_info->chunk_mutex); 1387 1388 if (!find_first_extent_bit(&device->alloc_state, *start, 1389 &physical_start, &physical_end, 1390 CHUNK_ALLOCATED, NULL)) { 1391 1392 if (in_range(physical_start, *start, len) || 1393 in_range(*start, physical_start, 1394 physical_end - physical_start)) { 1395 *start = physical_end + 1; 1396 return true; 1397 } 1398 } 1399 return false; 1400 } 1401 1402 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1403 { 1404 switch (device->fs_devices->chunk_alloc_policy) { 1405 case BTRFS_CHUNK_ALLOC_REGULAR: 1406 return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 1407 case BTRFS_CHUNK_ALLOC_ZONED: 1408 /* 1409 * We don't care about the starting region like regular 1410 * allocator, because we anyway use/reserve the first two zones 1411 * for superblock logging. 1412 */ 1413 return ALIGN(start, device->zone_info->zone_size); 1414 default: 1415 BUG(); 1416 } 1417 } 1418 1419 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1420 u64 *hole_start, u64 *hole_size, 1421 u64 num_bytes) 1422 { 1423 u64 zone_size = device->zone_info->zone_size; 1424 u64 pos; 1425 int ret; 1426 bool changed = false; 1427 1428 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1429 1430 while (*hole_size > 0) { 1431 pos = btrfs_find_allocatable_zones(device, *hole_start, 1432 *hole_start + *hole_size, 1433 num_bytes); 1434 if (pos != *hole_start) { 1435 *hole_size = *hole_start + *hole_size - pos; 1436 *hole_start = pos; 1437 changed = true; 1438 if (*hole_size < num_bytes) 1439 break; 1440 } 1441 1442 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1443 1444 /* Range is ensured to be empty */ 1445 if (!ret) 1446 return changed; 1447 1448 /* Given hole range was invalid (outside of device) */ 1449 if (ret == -ERANGE) { 1450 *hole_start += *hole_size; 1451 *hole_size = 0; 1452 return true; 1453 } 1454 1455 *hole_start += zone_size; 1456 *hole_size -= zone_size; 1457 changed = true; 1458 } 1459 1460 return changed; 1461 } 1462 1463 /** 1464 * dev_extent_hole_check - check if specified hole is suitable for allocation 1465 * @device: the device which we have the hole 1466 * @hole_start: starting position of the hole 1467 * @hole_size: the size of the hole 1468 * @num_bytes: the size of the free space that we need 1469 * 1470 * This function may modify @hole_start and @hole_size to reflect the suitable 1471 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1472 */ 1473 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1474 u64 *hole_size, u64 num_bytes) 1475 { 1476 bool changed = false; 1477 u64 hole_end = *hole_start + *hole_size; 1478 1479 for (;;) { 1480 /* 1481 * Check before we set max_hole_start, otherwise we could end up 1482 * sending back this offset anyway. 1483 */ 1484 if (contains_pending_extent(device, hole_start, *hole_size)) { 1485 if (hole_end >= *hole_start) 1486 *hole_size = hole_end - *hole_start; 1487 else 1488 *hole_size = 0; 1489 changed = true; 1490 } 1491 1492 switch (device->fs_devices->chunk_alloc_policy) { 1493 case BTRFS_CHUNK_ALLOC_REGULAR: 1494 /* No extra check */ 1495 break; 1496 case BTRFS_CHUNK_ALLOC_ZONED: 1497 if (dev_extent_hole_check_zoned(device, hole_start, 1498 hole_size, num_bytes)) { 1499 changed = true; 1500 /* 1501 * The changed hole can contain pending extent. 1502 * Loop again to check that. 1503 */ 1504 continue; 1505 } 1506 break; 1507 default: 1508 BUG(); 1509 } 1510 1511 break; 1512 } 1513 1514 return changed; 1515 } 1516 1517 /* 1518 * find_free_dev_extent_start - find free space in the specified device 1519 * @device: the device which we search the free space in 1520 * @num_bytes: the size of the free space that we need 1521 * @search_start: the position from which to begin the search 1522 * @start: store the start of the free space. 1523 * @len: the size of the free space. that we find, or the size 1524 * of the max free space if we don't find suitable free space 1525 * 1526 * this uses a pretty simple search, the expectation is that it is 1527 * called very infrequently and that a given device has a small number 1528 * of extents 1529 * 1530 * @start is used to store the start of the free space if we find. But if we 1531 * don't find suitable free space, it will be used to store the start position 1532 * of the max free space. 1533 * 1534 * @len is used to store the size of the free space that we find. 1535 * But if we don't find suitable free space, it is used to store the size of 1536 * the max free space. 1537 * 1538 * NOTE: This function will search *commit* root of device tree, and does extra 1539 * check to ensure dev extents are not double allocated. 1540 * This makes the function safe to allocate dev extents but may not report 1541 * correct usable device space, as device extent freed in current transaction 1542 * is not reported as available. 1543 */ 1544 static int find_free_dev_extent_start(struct btrfs_device *device, 1545 u64 num_bytes, u64 search_start, u64 *start, 1546 u64 *len) 1547 { 1548 struct btrfs_fs_info *fs_info = device->fs_info; 1549 struct btrfs_root *root = fs_info->dev_root; 1550 struct btrfs_key key; 1551 struct btrfs_dev_extent *dev_extent; 1552 struct btrfs_path *path; 1553 u64 hole_size; 1554 u64 max_hole_start; 1555 u64 max_hole_size; 1556 u64 extent_end; 1557 u64 search_end = device->total_bytes; 1558 int ret; 1559 int slot; 1560 struct extent_buffer *l; 1561 1562 search_start = dev_extent_search_start(device, search_start); 1563 1564 WARN_ON(device->zone_info && 1565 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1566 1567 path = btrfs_alloc_path(); 1568 if (!path) 1569 return -ENOMEM; 1570 1571 max_hole_start = search_start; 1572 max_hole_size = 0; 1573 1574 again: 1575 if (search_start >= search_end || 1576 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1577 ret = -ENOSPC; 1578 goto out; 1579 } 1580 1581 path->reada = READA_FORWARD; 1582 path->search_commit_root = 1; 1583 path->skip_locking = 1; 1584 1585 key.objectid = device->devid; 1586 key.offset = search_start; 1587 key.type = BTRFS_DEV_EXTENT_KEY; 1588 1589 ret = btrfs_search_backwards(root, &key, path); 1590 if (ret < 0) 1591 goto out; 1592 1593 while (1) { 1594 l = path->nodes[0]; 1595 slot = path->slots[0]; 1596 if (slot >= btrfs_header_nritems(l)) { 1597 ret = btrfs_next_leaf(root, path); 1598 if (ret == 0) 1599 continue; 1600 if (ret < 0) 1601 goto out; 1602 1603 break; 1604 } 1605 btrfs_item_key_to_cpu(l, &key, slot); 1606 1607 if (key.objectid < device->devid) 1608 goto next; 1609 1610 if (key.objectid > device->devid) 1611 break; 1612 1613 if (key.type != BTRFS_DEV_EXTENT_KEY) 1614 goto next; 1615 1616 if (key.offset > search_start) { 1617 hole_size = key.offset - search_start; 1618 dev_extent_hole_check(device, &search_start, &hole_size, 1619 num_bytes); 1620 1621 if (hole_size > max_hole_size) { 1622 max_hole_start = search_start; 1623 max_hole_size = hole_size; 1624 } 1625 1626 /* 1627 * If this free space is greater than which we need, 1628 * it must be the max free space that we have found 1629 * until now, so max_hole_start must point to the start 1630 * of this free space and the length of this free space 1631 * is stored in max_hole_size. Thus, we return 1632 * max_hole_start and max_hole_size and go back to the 1633 * caller. 1634 */ 1635 if (hole_size >= num_bytes) { 1636 ret = 0; 1637 goto out; 1638 } 1639 } 1640 1641 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1642 extent_end = key.offset + btrfs_dev_extent_length(l, 1643 dev_extent); 1644 if (extent_end > search_start) 1645 search_start = extent_end; 1646 next: 1647 path->slots[0]++; 1648 cond_resched(); 1649 } 1650 1651 /* 1652 * At this point, search_start should be the end of 1653 * allocated dev extents, and when shrinking the device, 1654 * search_end may be smaller than search_start. 1655 */ 1656 if (search_end > search_start) { 1657 hole_size = search_end - search_start; 1658 if (dev_extent_hole_check(device, &search_start, &hole_size, 1659 num_bytes)) { 1660 btrfs_release_path(path); 1661 goto again; 1662 } 1663 1664 if (hole_size > max_hole_size) { 1665 max_hole_start = search_start; 1666 max_hole_size = hole_size; 1667 } 1668 } 1669 1670 /* See above. */ 1671 if (max_hole_size < num_bytes) 1672 ret = -ENOSPC; 1673 else 1674 ret = 0; 1675 1676 out: 1677 btrfs_free_path(path); 1678 *start = max_hole_start; 1679 if (len) 1680 *len = max_hole_size; 1681 return ret; 1682 } 1683 1684 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1685 u64 *start, u64 *len) 1686 { 1687 /* FIXME use last free of some kind */ 1688 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1689 } 1690 1691 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1692 struct btrfs_device *device, 1693 u64 start, u64 *dev_extent_len) 1694 { 1695 struct btrfs_fs_info *fs_info = device->fs_info; 1696 struct btrfs_root *root = fs_info->dev_root; 1697 int ret; 1698 struct btrfs_path *path; 1699 struct btrfs_key key; 1700 struct btrfs_key found_key; 1701 struct extent_buffer *leaf = NULL; 1702 struct btrfs_dev_extent *extent = NULL; 1703 1704 path = btrfs_alloc_path(); 1705 if (!path) 1706 return -ENOMEM; 1707 1708 key.objectid = device->devid; 1709 key.offset = start; 1710 key.type = BTRFS_DEV_EXTENT_KEY; 1711 again: 1712 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1713 if (ret > 0) { 1714 ret = btrfs_previous_item(root, path, key.objectid, 1715 BTRFS_DEV_EXTENT_KEY); 1716 if (ret) 1717 goto out; 1718 leaf = path->nodes[0]; 1719 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1720 extent = btrfs_item_ptr(leaf, path->slots[0], 1721 struct btrfs_dev_extent); 1722 BUG_ON(found_key.offset > start || found_key.offset + 1723 btrfs_dev_extent_length(leaf, extent) < start); 1724 key = found_key; 1725 btrfs_release_path(path); 1726 goto again; 1727 } else if (ret == 0) { 1728 leaf = path->nodes[0]; 1729 extent = btrfs_item_ptr(leaf, path->slots[0], 1730 struct btrfs_dev_extent); 1731 } else { 1732 goto out; 1733 } 1734 1735 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1736 1737 ret = btrfs_del_item(trans, root, path); 1738 if (ret == 0) 1739 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1740 out: 1741 btrfs_free_path(path); 1742 return ret; 1743 } 1744 1745 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1746 { 1747 struct extent_map_tree *em_tree; 1748 struct extent_map *em; 1749 struct rb_node *n; 1750 u64 ret = 0; 1751 1752 em_tree = &fs_info->mapping_tree; 1753 read_lock(&em_tree->lock); 1754 n = rb_last(&em_tree->map.rb_root); 1755 if (n) { 1756 em = rb_entry(n, struct extent_map, rb_node); 1757 ret = em->start + em->len; 1758 } 1759 read_unlock(&em_tree->lock); 1760 1761 return ret; 1762 } 1763 1764 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1765 u64 *devid_ret) 1766 { 1767 int ret; 1768 struct btrfs_key key; 1769 struct btrfs_key found_key; 1770 struct btrfs_path *path; 1771 1772 path = btrfs_alloc_path(); 1773 if (!path) 1774 return -ENOMEM; 1775 1776 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1777 key.type = BTRFS_DEV_ITEM_KEY; 1778 key.offset = (u64)-1; 1779 1780 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1781 if (ret < 0) 1782 goto error; 1783 1784 if (ret == 0) { 1785 /* Corruption */ 1786 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1787 ret = -EUCLEAN; 1788 goto error; 1789 } 1790 1791 ret = btrfs_previous_item(fs_info->chunk_root, path, 1792 BTRFS_DEV_ITEMS_OBJECTID, 1793 BTRFS_DEV_ITEM_KEY); 1794 if (ret) { 1795 *devid_ret = 1; 1796 } else { 1797 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1798 path->slots[0]); 1799 *devid_ret = found_key.offset + 1; 1800 } 1801 ret = 0; 1802 error: 1803 btrfs_free_path(path); 1804 return ret; 1805 } 1806 1807 /* 1808 * the device information is stored in the chunk root 1809 * the btrfs_device struct should be fully filled in 1810 */ 1811 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1812 struct btrfs_device *device) 1813 { 1814 int ret; 1815 struct btrfs_path *path; 1816 struct btrfs_dev_item *dev_item; 1817 struct extent_buffer *leaf; 1818 struct btrfs_key key; 1819 unsigned long ptr; 1820 1821 path = btrfs_alloc_path(); 1822 if (!path) 1823 return -ENOMEM; 1824 1825 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1826 key.type = BTRFS_DEV_ITEM_KEY; 1827 key.offset = device->devid; 1828 1829 btrfs_reserve_chunk_metadata(trans, true); 1830 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1831 &key, sizeof(*dev_item)); 1832 btrfs_trans_release_chunk_metadata(trans); 1833 if (ret) 1834 goto out; 1835 1836 leaf = path->nodes[0]; 1837 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1838 1839 btrfs_set_device_id(leaf, dev_item, device->devid); 1840 btrfs_set_device_generation(leaf, dev_item, 0); 1841 btrfs_set_device_type(leaf, dev_item, device->type); 1842 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1843 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1844 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1845 btrfs_set_device_total_bytes(leaf, dev_item, 1846 btrfs_device_get_disk_total_bytes(device)); 1847 btrfs_set_device_bytes_used(leaf, dev_item, 1848 btrfs_device_get_bytes_used(device)); 1849 btrfs_set_device_group(leaf, dev_item, 0); 1850 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1851 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1852 btrfs_set_device_start_offset(leaf, dev_item, 0); 1853 1854 ptr = btrfs_device_uuid(dev_item); 1855 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1856 ptr = btrfs_device_fsid(dev_item); 1857 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1858 ptr, BTRFS_FSID_SIZE); 1859 btrfs_mark_buffer_dirty(leaf); 1860 1861 ret = 0; 1862 out: 1863 btrfs_free_path(path); 1864 return ret; 1865 } 1866 1867 /* 1868 * Function to update ctime/mtime for a given device path. 1869 * Mainly used for ctime/mtime based probe like libblkid. 1870 * 1871 * We don't care about errors here, this is just to be kind to userspace. 1872 */ 1873 static void update_dev_time(const char *device_path) 1874 { 1875 struct path path; 1876 struct timespec64 now; 1877 int ret; 1878 1879 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1880 if (ret) 1881 return; 1882 1883 now = current_time(d_inode(path.dentry)); 1884 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1885 path_put(&path); 1886 } 1887 1888 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1889 struct btrfs_device *device) 1890 { 1891 struct btrfs_root *root = device->fs_info->chunk_root; 1892 int ret; 1893 struct btrfs_path *path; 1894 struct btrfs_key key; 1895 1896 path = btrfs_alloc_path(); 1897 if (!path) 1898 return -ENOMEM; 1899 1900 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1901 key.type = BTRFS_DEV_ITEM_KEY; 1902 key.offset = device->devid; 1903 1904 btrfs_reserve_chunk_metadata(trans, false); 1905 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1906 btrfs_trans_release_chunk_metadata(trans); 1907 if (ret) { 1908 if (ret > 0) 1909 ret = -ENOENT; 1910 goto out; 1911 } 1912 1913 ret = btrfs_del_item(trans, root, path); 1914 out: 1915 btrfs_free_path(path); 1916 return ret; 1917 } 1918 1919 /* 1920 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1921 * filesystem. It's up to the caller to adjust that number regarding eg. device 1922 * replace. 1923 */ 1924 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1925 u64 num_devices) 1926 { 1927 u64 all_avail; 1928 unsigned seq; 1929 int i; 1930 1931 do { 1932 seq = read_seqbegin(&fs_info->profiles_lock); 1933 1934 all_avail = fs_info->avail_data_alloc_bits | 1935 fs_info->avail_system_alloc_bits | 1936 fs_info->avail_metadata_alloc_bits; 1937 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1938 1939 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1940 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1941 continue; 1942 1943 if (num_devices < btrfs_raid_array[i].devs_min) 1944 return btrfs_raid_array[i].mindev_error; 1945 } 1946 1947 return 0; 1948 } 1949 1950 static struct btrfs_device * btrfs_find_next_active_device( 1951 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1952 { 1953 struct btrfs_device *next_device; 1954 1955 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1956 if (next_device != device && 1957 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1958 && next_device->bdev) 1959 return next_device; 1960 } 1961 1962 return NULL; 1963 } 1964 1965 /* 1966 * Helper function to check if the given device is part of s_bdev / latest_dev 1967 * and replace it with the provided or the next active device, in the context 1968 * where this function called, there should be always be another device (or 1969 * this_dev) which is active. 1970 */ 1971 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1972 struct btrfs_device *next_device) 1973 { 1974 struct btrfs_fs_info *fs_info = device->fs_info; 1975 1976 if (!next_device) 1977 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1978 device); 1979 ASSERT(next_device); 1980 1981 if (fs_info->sb->s_bdev && 1982 (fs_info->sb->s_bdev == device->bdev)) 1983 fs_info->sb->s_bdev = next_device->bdev; 1984 1985 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 1986 fs_info->fs_devices->latest_dev = next_device; 1987 } 1988 1989 /* 1990 * Return btrfs_fs_devices::num_devices excluding the device that's being 1991 * currently replaced. 1992 */ 1993 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 1994 { 1995 u64 num_devices = fs_info->fs_devices->num_devices; 1996 1997 down_read(&fs_info->dev_replace.rwsem); 1998 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 1999 ASSERT(num_devices > 1); 2000 num_devices--; 2001 } 2002 up_read(&fs_info->dev_replace.rwsem); 2003 2004 return num_devices; 2005 } 2006 2007 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2008 struct block_device *bdev, 2009 const char *device_path) 2010 { 2011 struct btrfs_super_block *disk_super; 2012 int copy_num; 2013 2014 if (!bdev) 2015 return; 2016 2017 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2018 struct page *page; 2019 int ret; 2020 2021 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2022 if (IS_ERR(disk_super)) 2023 continue; 2024 2025 if (bdev_is_zoned(bdev)) { 2026 btrfs_reset_sb_log_zones(bdev, copy_num); 2027 continue; 2028 } 2029 2030 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2031 2032 page = virt_to_page(disk_super); 2033 set_page_dirty(page); 2034 lock_page(page); 2035 /* write_on_page() unlocks the page */ 2036 ret = write_one_page(page); 2037 if (ret) 2038 btrfs_warn(fs_info, 2039 "error clearing superblock number %d (%d)", 2040 copy_num, ret); 2041 btrfs_release_disk_super(disk_super); 2042 2043 } 2044 2045 /* Notify udev that device has changed */ 2046 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2047 2048 /* Update ctime/mtime for device path for libblkid */ 2049 update_dev_time(device_path); 2050 } 2051 2052 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2053 struct btrfs_dev_lookup_args *args, 2054 struct block_device **bdev, fmode_t *mode) 2055 { 2056 struct btrfs_trans_handle *trans; 2057 struct btrfs_device *device; 2058 struct btrfs_fs_devices *cur_devices; 2059 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2060 u64 num_devices; 2061 int ret = 0; 2062 2063 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2064 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2065 return -EINVAL; 2066 } 2067 2068 /* 2069 * The device list in fs_devices is accessed without locks (neither 2070 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2071 * filesystem and another device rm cannot run. 2072 */ 2073 num_devices = btrfs_num_devices(fs_info); 2074 2075 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2076 if (ret) 2077 return ret; 2078 2079 device = btrfs_find_device(fs_info->fs_devices, args); 2080 if (!device) { 2081 if (args->missing) 2082 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2083 else 2084 ret = -ENOENT; 2085 return ret; 2086 } 2087 2088 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2089 btrfs_warn_in_rcu(fs_info, 2090 "cannot remove device %s (devid %llu) due to active swapfile", 2091 rcu_str_deref(device->name), device->devid); 2092 return -ETXTBSY; 2093 } 2094 2095 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2096 return BTRFS_ERROR_DEV_TGT_REPLACE; 2097 2098 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2099 fs_info->fs_devices->rw_devices == 1) 2100 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2101 2102 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2103 mutex_lock(&fs_info->chunk_mutex); 2104 list_del_init(&device->dev_alloc_list); 2105 device->fs_devices->rw_devices--; 2106 mutex_unlock(&fs_info->chunk_mutex); 2107 } 2108 2109 ret = btrfs_shrink_device(device, 0); 2110 if (ret) 2111 goto error_undo; 2112 2113 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2114 if (IS_ERR(trans)) { 2115 ret = PTR_ERR(trans); 2116 goto error_undo; 2117 } 2118 2119 ret = btrfs_rm_dev_item(trans, device); 2120 if (ret) { 2121 /* Any error in dev item removal is critical */ 2122 btrfs_crit(fs_info, 2123 "failed to remove device item for devid %llu: %d", 2124 device->devid, ret); 2125 btrfs_abort_transaction(trans, ret); 2126 btrfs_end_transaction(trans); 2127 return ret; 2128 } 2129 2130 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2131 btrfs_scrub_cancel_dev(device); 2132 2133 /* 2134 * the device list mutex makes sure that we don't change 2135 * the device list while someone else is writing out all 2136 * the device supers. Whoever is writing all supers, should 2137 * lock the device list mutex before getting the number of 2138 * devices in the super block (super_copy). Conversely, 2139 * whoever updates the number of devices in the super block 2140 * (super_copy) should hold the device list mutex. 2141 */ 2142 2143 /* 2144 * In normal cases the cur_devices == fs_devices. But in case 2145 * of deleting a seed device, the cur_devices should point to 2146 * its own fs_devices listed under the fs_devices->seed_list. 2147 */ 2148 cur_devices = device->fs_devices; 2149 mutex_lock(&fs_devices->device_list_mutex); 2150 list_del_rcu(&device->dev_list); 2151 2152 cur_devices->num_devices--; 2153 cur_devices->total_devices--; 2154 /* Update total_devices of the parent fs_devices if it's seed */ 2155 if (cur_devices != fs_devices) 2156 fs_devices->total_devices--; 2157 2158 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2159 cur_devices->missing_devices--; 2160 2161 btrfs_assign_next_active_device(device, NULL); 2162 2163 if (device->bdev) { 2164 cur_devices->open_devices--; 2165 /* remove sysfs entry */ 2166 btrfs_sysfs_remove_device(device); 2167 } 2168 2169 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2170 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2171 mutex_unlock(&fs_devices->device_list_mutex); 2172 2173 /* 2174 * At this point, the device is zero sized and detached from the 2175 * devices list. All that's left is to zero out the old supers and 2176 * free the device. 2177 * 2178 * We cannot call btrfs_close_bdev() here because we're holding the sb 2179 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2180 * block device and it's dependencies. Instead just flush the device 2181 * and let the caller do the final blkdev_put. 2182 */ 2183 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2184 btrfs_scratch_superblocks(fs_info, device->bdev, 2185 device->name->str); 2186 if (device->bdev) { 2187 sync_blockdev(device->bdev); 2188 invalidate_bdev(device->bdev); 2189 } 2190 } 2191 2192 *bdev = device->bdev; 2193 *mode = device->mode; 2194 synchronize_rcu(); 2195 btrfs_free_device(device); 2196 2197 /* 2198 * This can happen if cur_devices is the private seed devices list. We 2199 * cannot call close_fs_devices() here because it expects the uuid_mutex 2200 * to be held, but in fact we don't need that for the private 2201 * seed_devices, we can simply decrement cur_devices->opened and then 2202 * remove it from our list and free the fs_devices. 2203 */ 2204 if (cur_devices->num_devices == 0) { 2205 list_del_init(&cur_devices->seed_list); 2206 ASSERT(cur_devices->opened == 1); 2207 cur_devices->opened--; 2208 free_fs_devices(cur_devices); 2209 } 2210 2211 ret = btrfs_commit_transaction(trans); 2212 2213 return ret; 2214 2215 error_undo: 2216 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2217 mutex_lock(&fs_info->chunk_mutex); 2218 list_add(&device->dev_alloc_list, 2219 &fs_devices->alloc_list); 2220 device->fs_devices->rw_devices++; 2221 mutex_unlock(&fs_info->chunk_mutex); 2222 } 2223 return ret; 2224 } 2225 2226 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2227 { 2228 struct btrfs_fs_devices *fs_devices; 2229 2230 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2231 2232 /* 2233 * in case of fs with no seed, srcdev->fs_devices will point 2234 * to fs_devices of fs_info. However when the dev being replaced is 2235 * a seed dev it will point to the seed's local fs_devices. In short 2236 * srcdev will have its correct fs_devices in both the cases. 2237 */ 2238 fs_devices = srcdev->fs_devices; 2239 2240 list_del_rcu(&srcdev->dev_list); 2241 list_del(&srcdev->dev_alloc_list); 2242 fs_devices->num_devices--; 2243 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2244 fs_devices->missing_devices--; 2245 2246 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2247 fs_devices->rw_devices--; 2248 2249 if (srcdev->bdev) 2250 fs_devices->open_devices--; 2251 } 2252 2253 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2254 { 2255 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2256 2257 mutex_lock(&uuid_mutex); 2258 2259 btrfs_close_bdev(srcdev); 2260 synchronize_rcu(); 2261 btrfs_free_device(srcdev); 2262 2263 /* if this is no devs we rather delete the fs_devices */ 2264 if (!fs_devices->num_devices) { 2265 /* 2266 * On a mounted FS, num_devices can't be zero unless it's a 2267 * seed. In case of a seed device being replaced, the replace 2268 * target added to the sprout FS, so there will be no more 2269 * device left under the seed FS. 2270 */ 2271 ASSERT(fs_devices->seeding); 2272 2273 list_del_init(&fs_devices->seed_list); 2274 close_fs_devices(fs_devices); 2275 free_fs_devices(fs_devices); 2276 } 2277 mutex_unlock(&uuid_mutex); 2278 } 2279 2280 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2281 { 2282 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2283 2284 mutex_lock(&fs_devices->device_list_mutex); 2285 2286 btrfs_sysfs_remove_device(tgtdev); 2287 2288 if (tgtdev->bdev) 2289 fs_devices->open_devices--; 2290 2291 fs_devices->num_devices--; 2292 2293 btrfs_assign_next_active_device(tgtdev, NULL); 2294 2295 list_del_rcu(&tgtdev->dev_list); 2296 2297 mutex_unlock(&fs_devices->device_list_mutex); 2298 2299 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2300 tgtdev->name->str); 2301 2302 btrfs_close_bdev(tgtdev); 2303 synchronize_rcu(); 2304 btrfs_free_device(tgtdev); 2305 } 2306 2307 /** 2308 * Populate args from device at path 2309 * 2310 * @fs_info: the filesystem 2311 * @args: the args to populate 2312 * @path: the path to the device 2313 * 2314 * This will read the super block of the device at @path and populate @args with 2315 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2316 * lookup a device to operate on, but need to do it before we take any locks. 2317 * This properly handles the special case of "missing" that a user may pass in, 2318 * and does some basic sanity checks. The caller must make sure that @path is 2319 * properly NUL terminated before calling in, and must call 2320 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2321 * uuid buffers. 2322 * 2323 * Return: 0 for success, -errno for failure 2324 */ 2325 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2326 struct btrfs_dev_lookup_args *args, 2327 const char *path) 2328 { 2329 struct btrfs_super_block *disk_super; 2330 struct block_device *bdev; 2331 int ret; 2332 2333 if (!path || !path[0]) 2334 return -EINVAL; 2335 if (!strcmp(path, "missing")) { 2336 args->missing = true; 2337 return 0; 2338 } 2339 2340 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2341 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2342 if (!args->uuid || !args->fsid) { 2343 btrfs_put_dev_args_from_path(args); 2344 return -ENOMEM; 2345 } 2346 2347 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2348 &bdev, &disk_super); 2349 if (ret) 2350 return ret; 2351 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2352 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2353 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2354 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2355 else 2356 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2357 btrfs_release_disk_super(disk_super); 2358 blkdev_put(bdev, FMODE_READ); 2359 return 0; 2360 } 2361 2362 /* 2363 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2364 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2365 * that don't need to be freed. 2366 */ 2367 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2368 { 2369 kfree(args->uuid); 2370 kfree(args->fsid); 2371 args->uuid = NULL; 2372 args->fsid = NULL; 2373 } 2374 2375 struct btrfs_device *btrfs_find_device_by_devspec( 2376 struct btrfs_fs_info *fs_info, u64 devid, 2377 const char *device_path) 2378 { 2379 BTRFS_DEV_LOOKUP_ARGS(args); 2380 struct btrfs_device *device; 2381 int ret; 2382 2383 if (devid) { 2384 args.devid = devid; 2385 device = btrfs_find_device(fs_info->fs_devices, &args); 2386 if (!device) 2387 return ERR_PTR(-ENOENT); 2388 return device; 2389 } 2390 2391 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2392 if (ret) 2393 return ERR_PTR(ret); 2394 device = btrfs_find_device(fs_info->fs_devices, &args); 2395 btrfs_put_dev_args_from_path(&args); 2396 if (!device) 2397 return ERR_PTR(-ENOENT); 2398 return device; 2399 } 2400 2401 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2402 { 2403 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2404 struct btrfs_fs_devices *old_devices; 2405 struct btrfs_fs_devices *seed_devices; 2406 2407 lockdep_assert_held(&uuid_mutex); 2408 if (!fs_devices->seeding) 2409 return ERR_PTR(-EINVAL); 2410 2411 /* 2412 * Private copy of the seed devices, anchored at 2413 * fs_info->fs_devices->seed_list 2414 */ 2415 seed_devices = alloc_fs_devices(NULL, NULL); 2416 if (IS_ERR(seed_devices)) 2417 return seed_devices; 2418 2419 /* 2420 * It's necessary to retain a copy of the original seed fs_devices in 2421 * fs_uuids so that filesystems which have been seeded can successfully 2422 * reference the seed device from open_seed_devices. This also supports 2423 * multiple fs seed. 2424 */ 2425 old_devices = clone_fs_devices(fs_devices); 2426 if (IS_ERR(old_devices)) { 2427 kfree(seed_devices); 2428 return old_devices; 2429 } 2430 2431 list_add(&old_devices->fs_list, &fs_uuids); 2432 2433 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2434 seed_devices->opened = 1; 2435 INIT_LIST_HEAD(&seed_devices->devices); 2436 INIT_LIST_HEAD(&seed_devices->alloc_list); 2437 mutex_init(&seed_devices->device_list_mutex); 2438 2439 return seed_devices; 2440 } 2441 2442 /* 2443 * Splice seed devices into the sprout fs_devices. 2444 * Generate a new fsid for the sprouted read-write filesystem. 2445 */ 2446 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2447 struct btrfs_fs_devices *seed_devices) 2448 { 2449 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2450 struct btrfs_super_block *disk_super = fs_info->super_copy; 2451 struct btrfs_device *device; 2452 u64 super_flags; 2453 2454 /* 2455 * We are updating the fsid, the thread leading to device_list_add() 2456 * could race, so uuid_mutex is needed. 2457 */ 2458 lockdep_assert_held(&uuid_mutex); 2459 2460 /* 2461 * The threads listed below may traverse dev_list but can do that without 2462 * device_list_mutex: 2463 * - All device ops and balance - as we are in btrfs_exclop_start. 2464 * - Various dev_list readers - are using RCU. 2465 * - btrfs_ioctl_fitrim() - is using RCU. 2466 * 2467 * For-read threads as below are using device_list_mutex: 2468 * - Readonly scrub btrfs_scrub_dev() 2469 * - Readonly scrub btrfs_scrub_progress() 2470 * - btrfs_get_dev_stats() 2471 */ 2472 lockdep_assert_held(&fs_devices->device_list_mutex); 2473 2474 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2475 synchronize_rcu); 2476 list_for_each_entry(device, &seed_devices->devices, dev_list) 2477 device->fs_devices = seed_devices; 2478 2479 fs_devices->seeding = false; 2480 fs_devices->num_devices = 0; 2481 fs_devices->open_devices = 0; 2482 fs_devices->missing_devices = 0; 2483 fs_devices->rotating = false; 2484 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2485 2486 generate_random_uuid(fs_devices->fsid); 2487 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2488 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2489 2490 super_flags = btrfs_super_flags(disk_super) & 2491 ~BTRFS_SUPER_FLAG_SEEDING; 2492 btrfs_set_super_flags(disk_super, super_flags); 2493 } 2494 2495 /* 2496 * Store the expected generation for seed devices in device items. 2497 */ 2498 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2499 { 2500 BTRFS_DEV_LOOKUP_ARGS(args); 2501 struct btrfs_fs_info *fs_info = trans->fs_info; 2502 struct btrfs_root *root = fs_info->chunk_root; 2503 struct btrfs_path *path; 2504 struct extent_buffer *leaf; 2505 struct btrfs_dev_item *dev_item; 2506 struct btrfs_device *device; 2507 struct btrfs_key key; 2508 u8 fs_uuid[BTRFS_FSID_SIZE]; 2509 u8 dev_uuid[BTRFS_UUID_SIZE]; 2510 int ret; 2511 2512 path = btrfs_alloc_path(); 2513 if (!path) 2514 return -ENOMEM; 2515 2516 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2517 key.offset = 0; 2518 key.type = BTRFS_DEV_ITEM_KEY; 2519 2520 while (1) { 2521 btrfs_reserve_chunk_metadata(trans, false); 2522 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2523 btrfs_trans_release_chunk_metadata(trans); 2524 if (ret < 0) 2525 goto error; 2526 2527 leaf = path->nodes[0]; 2528 next_slot: 2529 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2530 ret = btrfs_next_leaf(root, path); 2531 if (ret > 0) 2532 break; 2533 if (ret < 0) 2534 goto error; 2535 leaf = path->nodes[0]; 2536 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2537 btrfs_release_path(path); 2538 continue; 2539 } 2540 2541 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2542 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2543 key.type != BTRFS_DEV_ITEM_KEY) 2544 break; 2545 2546 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2547 struct btrfs_dev_item); 2548 args.devid = btrfs_device_id(leaf, dev_item); 2549 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2550 BTRFS_UUID_SIZE); 2551 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2552 BTRFS_FSID_SIZE); 2553 args.uuid = dev_uuid; 2554 args.fsid = fs_uuid; 2555 device = btrfs_find_device(fs_info->fs_devices, &args); 2556 BUG_ON(!device); /* Logic error */ 2557 2558 if (device->fs_devices->seeding) { 2559 btrfs_set_device_generation(leaf, dev_item, 2560 device->generation); 2561 btrfs_mark_buffer_dirty(leaf); 2562 } 2563 2564 path->slots[0]++; 2565 goto next_slot; 2566 } 2567 ret = 0; 2568 error: 2569 btrfs_free_path(path); 2570 return ret; 2571 } 2572 2573 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2574 { 2575 struct btrfs_root *root = fs_info->dev_root; 2576 struct btrfs_trans_handle *trans; 2577 struct btrfs_device *device; 2578 struct block_device *bdev; 2579 struct super_block *sb = fs_info->sb; 2580 struct rcu_string *name; 2581 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2582 struct btrfs_fs_devices *seed_devices; 2583 u64 orig_super_total_bytes; 2584 u64 orig_super_num_devices; 2585 int ret = 0; 2586 bool seeding_dev = false; 2587 bool locked = false; 2588 2589 if (sb_rdonly(sb) && !fs_devices->seeding) 2590 return -EROFS; 2591 2592 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2593 fs_info->bdev_holder); 2594 if (IS_ERR(bdev)) 2595 return PTR_ERR(bdev); 2596 2597 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2598 ret = -EINVAL; 2599 goto error; 2600 } 2601 2602 if (fs_devices->seeding) { 2603 seeding_dev = true; 2604 down_write(&sb->s_umount); 2605 mutex_lock(&uuid_mutex); 2606 locked = true; 2607 } 2608 2609 sync_blockdev(bdev); 2610 2611 rcu_read_lock(); 2612 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2613 if (device->bdev == bdev) { 2614 ret = -EEXIST; 2615 rcu_read_unlock(); 2616 goto error; 2617 } 2618 } 2619 rcu_read_unlock(); 2620 2621 device = btrfs_alloc_device(fs_info, NULL, NULL); 2622 if (IS_ERR(device)) { 2623 /* we can safely leave the fs_devices entry around */ 2624 ret = PTR_ERR(device); 2625 goto error; 2626 } 2627 2628 name = rcu_string_strdup(device_path, GFP_KERNEL); 2629 if (!name) { 2630 ret = -ENOMEM; 2631 goto error_free_device; 2632 } 2633 rcu_assign_pointer(device->name, name); 2634 2635 device->fs_info = fs_info; 2636 device->bdev = bdev; 2637 ret = lookup_bdev(device_path, &device->devt); 2638 if (ret) 2639 goto error_free_device; 2640 2641 ret = btrfs_get_dev_zone_info(device, false); 2642 if (ret) 2643 goto error_free_device; 2644 2645 trans = btrfs_start_transaction(root, 0); 2646 if (IS_ERR(trans)) { 2647 ret = PTR_ERR(trans); 2648 goto error_free_zone; 2649 } 2650 2651 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2652 device->generation = trans->transid; 2653 device->io_width = fs_info->sectorsize; 2654 device->io_align = fs_info->sectorsize; 2655 device->sector_size = fs_info->sectorsize; 2656 device->total_bytes = 2657 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2658 device->disk_total_bytes = device->total_bytes; 2659 device->commit_total_bytes = device->total_bytes; 2660 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2661 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2662 device->mode = FMODE_EXCL; 2663 device->dev_stats_valid = 1; 2664 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2665 2666 if (seeding_dev) { 2667 btrfs_clear_sb_rdonly(sb); 2668 2669 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2670 seed_devices = btrfs_init_sprout(fs_info); 2671 if (IS_ERR(seed_devices)) { 2672 ret = PTR_ERR(seed_devices); 2673 btrfs_abort_transaction(trans, ret); 2674 goto error_trans; 2675 } 2676 } 2677 2678 mutex_lock(&fs_devices->device_list_mutex); 2679 if (seeding_dev) { 2680 btrfs_setup_sprout(fs_info, seed_devices); 2681 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2682 device); 2683 } 2684 2685 device->fs_devices = fs_devices; 2686 2687 mutex_lock(&fs_info->chunk_mutex); 2688 list_add_rcu(&device->dev_list, &fs_devices->devices); 2689 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2690 fs_devices->num_devices++; 2691 fs_devices->open_devices++; 2692 fs_devices->rw_devices++; 2693 fs_devices->total_devices++; 2694 fs_devices->total_rw_bytes += device->total_bytes; 2695 2696 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2697 2698 if (!bdev_nonrot(bdev)) 2699 fs_devices->rotating = true; 2700 2701 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2702 btrfs_set_super_total_bytes(fs_info->super_copy, 2703 round_down(orig_super_total_bytes + device->total_bytes, 2704 fs_info->sectorsize)); 2705 2706 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2707 btrfs_set_super_num_devices(fs_info->super_copy, 2708 orig_super_num_devices + 1); 2709 2710 /* 2711 * we've got more storage, clear any full flags on the space 2712 * infos 2713 */ 2714 btrfs_clear_space_info_full(fs_info); 2715 2716 mutex_unlock(&fs_info->chunk_mutex); 2717 2718 /* Add sysfs device entry */ 2719 btrfs_sysfs_add_device(device); 2720 2721 mutex_unlock(&fs_devices->device_list_mutex); 2722 2723 if (seeding_dev) { 2724 mutex_lock(&fs_info->chunk_mutex); 2725 ret = init_first_rw_device(trans); 2726 mutex_unlock(&fs_info->chunk_mutex); 2727 if (ret) { 2728 btrfs_abort_transaction(trans, ret); 2729 goto error_sysfs; 2730 } 2731 } 2732 2733 ret = btrfs_add_dev_item(trans, device); 2734 if (ret) { 2735 btrfs_abort_transaction(trans, ret); 2736 goto error_sysfs; 2737 } 2738 2739 if (seeding_dev) { 2740 ret = btrfs_finish_sprout(trans); 2741 if (ret) { 2742 btrfs_abort_transaction(trans, ret); 2743 goto error_sysfs; 2744 } 2745 2746 /* 2747 * fs_devices now represents the newly sprouted filesystem and 2748 * its fsid has been changed by btrfs_sprout_splice(). 2749 */ 2750 btrfs_sysfs_update_sprout_fsid(fs_devices); 2751 } 2752 2753 ret = btrfs_commit_transaction(trans); 2754 2755 if (seeding_dev) { 2756 mutex_unlock(&uuid_mutex); 2757 up_write(&sb->s_umount); 2758 locked = false; 2759 2760 if (ret) /* transaction commit */ 2761 return ret; 2762 2763 ret = btrfs_relocate_sys_chunks(fs_info); 2764 if (ret < 0) 2765 btrfs_handle_fs_error(fs_info, ret, 2766 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2767 trans = btrfs_attach_transaction(root); 2768 if (IS_ERR(trans)) { 2769 if (PTR_ERR(trans) == -ENOENT) 2770 return 0; 2771 ret = PTR_ERR(trans); 2772 trans = NULL; 2773 goto error_sysfs; 2774 } 2775 ret = btrfs_commit_transaction(trans); 2776 } 2777 2778 /* 2779 * Now that we have written a new super block to this device, check all 2780 * other fs_devices list if device_path alienates any other scanned 2781 * device. 2782 * We can ignore the return value as it typically returns -EINVAL and 2783 * only succeeds if the device was an alien. 2784 */ 2785 btrfs_forget_devices(device->devt); 2786 2787 /* Update ctime/mtime for blkid or udev */ 2788 update_dev_time(device_path); 2789 2790 return ret; 2791 2792 error_sysfs: 2793 btrfs_sysfs_remove_device(device); 2794 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2795 mutex_lock(&fs_info->chunk_mutex); 2796 list_del_rcu(&device->dev_list); 2797 list_del(&device->dev_alloc_list); 2798 fs_info->fs_devices->num_devices--; 2799 fs_info->fs_devices->open_devices--; 2800 fs_info->fs_devices->rw_devices--; 2801 fs_info->fs_devices->total_devices--; 2802 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2803 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2804 btrfs_set_super_total_bytes(fs_info->super_copy, 2805 orig_super_total_bytes); 2806 btrfs_set_super_num_devices(fs_info->super_copy, 2807 orig_super_num_devices); 2808 mutex_unlock(&fs_info->chunk_mutex); 2809 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2810 error_trans: 2811 if (seeding_dev) 2812 btrfs_set_sb_rdonly(sb); 2813 if (trans) 2814 btrfs_end_transaction(trans); 2815 error_free_zone: 2816 btrfs_destroy_dev_zone_info(device); 2817 error_free_device: 2818 btrfs_free_device(device); 2819 error: 2820 blkdev_put(bdev, FMODE_EXCL); 2821 if (locked) { 2822 mutex_unlock(&uuid_mutex); 2823 up_write(&sb->s_umount); 2824 } 2825 return ret; 2826 } 2827 2828 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2829 struct btrfs_device *device) 2830 { 2831 int ret; 2832 struct btrfs_path *path; 2833 struct btrfs_root *root = device->fs_info->chunk_root; 2834 struct btrfs_dev_item *dev_item; 2835 struct extent_buffer *leaf; 2836 struct btrfs_key key; 2837 2838 path = btrfs_alloc_path(); 2839 if (!path) 2840 return -ENOMEM; 2841 2842 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2843 key.type = BTRFS_DEV_ITEM_KEY; 2844 key.offset = device->devid; 2845 2846 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2847 if (ret < 0) 2848 goto out; 2849 2850 if (ret > 0) { 2851 ret = -ENOENT; 2852 goto out; 2853 } 2854 2855 leaf = path->nodes[0]; 2856 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2857 2858 btrfs_set_device_id(leaf, dev_item, device->devid); 2859 btrfs_set_device_type(leaf, dev_item, device->type); 2860 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2861 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2862 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2863 btrfs_set_device_total_bytes(leaf, dev_item, 2864 btrfs_device_get_disk_total_bytes(device)); 2865 btrfs_set_device_bytes_used(leaf, dev_item, 2866 btrfs_device_get_bytes_used(device)); 2867 btrfs_mark_buffer_dirty(leaf); 2868 2869 out: 2870 btrfs_free_path(path); 2871 return ret; 2872 } 2873 2874 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2875 struct btrfs_device *device, u64 new_size) 2876 { 2877 struct btrfs_fs_info *fs_info = device->fs_info; 2878 struct btrfs_super_block *super_copy = fs_info->super_copy; 2879 u64 old_total; 2880 u64 diff; 2881 int ret; 2882 2883 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2884 return -EACCES; 2885 2886 new_size = round_down(new_size, fs_info->sectorsize); 2887 2888 mutex_lock(&fs_info->chunk_mutex); 2889 old_total = btrfs_super_total_bytes(super_copy); 2890 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2891 2892 if (new_size <= device->total_bytes || 2893 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2894 mutex_unlock(&fs_info->chunk_mutex); 2895 return -EINVAL; 2896 } 2897 2898 btrfs_set_super_total_bytes(super_copy, 2899 round_down(old_total + diff, fs_info->sectorsize)); 2900 device->fs_devices->total_rw_bytes += diff; 2901 2902 btrfs_device_set_total_bytes(device, new_size); 2903 btrfs_device_set_disk_total_bytes(device, new_size); 2904 btrfs_clear_space_info_full(device->fs_info); 2905 if (list_empty(&device->post_commit_list)) 2906 list_add_tail(&device->post_commit_list, 2907 &trans->transaction->dev_update_list); 2908 mutex_unlock(&fs_info->chunk_mutex); 2909 2910 btrfs_reserve_chunk_metadata(trans, false); 2911 ret = btrfs_update_device(trans, device); 2912 btrfs_trans_release_chunk_metadata(trans); 2913 2914 return ret; 2915 } 2916 2917 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2918 { 2919 struct btrfs_fs_info *fs_info = trans->fs_info; 2920 struct btrfs_root *root = fs_info->chunk_root; 2921 int ret; 2922 struct btrfs_path *path; 2923 struct btrfs_key key; 2924 2925 path = btrfs_alloc_path(); 2926 if (!path) 2927 return -ENOMEM; 2928 2929 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2930 key.offset = chunk_offset; 2931 key.type = BTRFS_CHUNK_ITEM_KEY; 2932 2933 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2934 if (ret < 0) 2935 goto out; 2936 else if (ret > 0) { /* Logic error or corruption */ 2937 btrfs_handle_fs_error(fs_info, -ENOENT, 2938 "Failed lookup while freeing chunk."); 2939 ret = -ENOENT; 2940 goto out; 2941 } 2942 2943 ret = btrfs_del_item(trans, root, path); 2944 if (ret < 0) 2945 btrfs_handle_fs_error(fs_info, ret, 2946 "Failed to delete chunk item."); 2947 out: 2948 btrfs_free_path(path); 2949 return ret; 2950 } 2951 2952 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2953 { 2954 struct btrfs_super_block *super_copy = fs_info->super_copy; 2955 struct btrfs_disk_key *disk_key; 2956 struct btrfs_chunk *chunk; 2957 u8 *ptr; 2958 int ret = 0; 2959 u32 num_stripes; 2960 u32 array_size; 2961 u32 len = 0; 2962 u32 cur; 2963 struct btrfs_key key; 2964 2965 lockdep_assert_held(&fs_info->chunk_mutex); 2966 array_size = btrfs_super_sys_array_size(super_copy); 2967 2968 ptr = super_copy->sys_chunk_array; 2969 cur = 0; 2970 2971 while (cur < array_size) { 2972 disk_key = (struct btrfs_disk_key *)ptr; 2973 btrfs_disk_key_to_cpu(&key, disk_key); 2974 2975 len = sizeof(*disk_key); 2976 2977 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2978 chunk = (struct btrfs_chunk *)(ptr + len); 2979 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2980 len += btrfs_chunk_item_size(num_stripes); 2981 } else { 2982 ret = -EIO; 2983 break; 2984 } 2985 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2986 key.offset == chunk_offset) { 2987 memmove(ptr, ptr + len, array_size - (cur + len)); 2988 array_size -= len; 2989 btrfs_set_super_sys_array_size(super_copy, array_size); 2990 } else { 2991 ptr += len; 2992 cur += len; 2993 } 2994 } 2995 return ret; 2996 } 2997 2998 /* 2999 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3000 * @logical: Logical block offset in bytes. 3001 * @length: Length of extent in bytes. 3002 * 3003 * Return: Chunk mapping or ERR_PTR. 3004 */ 3005 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3006 u64 logical, u64 length) 3007 { 3008 struct extent_map_tree *em_tree; 3009 struct extent_map *em; 3010 3011 em_tree = &fs_info->mapping_tree; 3012 read_lock(&em_tree->lock); 3013 em = lookup_extent_mapping(em_tree, logical, length); 3014 read_unlock(&em_tree->lock); 3015 3016 if (!em) { 3017 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3018 logical, length); 3019 return ERR_PTR(-EINVAL); 3020 } 3021 3022 if (em->start > logical || em->start + em->len < logical) { 3023 btrfs_crit(fs_info, 3024 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3025 logical, length, em->start, em->start + em->len); 3026 free_extent_map(em); 3027 return ERR_PTR(-EINVAL); 3028 } 3029 3030 /* callers are responsible for dropping em's ref. */ 3031 return em; 3032 } 3033 3034 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3035 struct map_lookup *map, u64 chunk_offset) 3036 { 3037 int i; 3038 3039 /* 3040 * Removing chunk items and updating the device items in the chunks btree 3041 * requires holding the chunk_mutex. 3042 * See the comment at btrfs_chunk_alloc() for the details. 3043 */ 3044 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3045 3046 for (i = 0; i < map->num_stripes; i++) { 3047 int ret; 3048 3049 ret = btrfs_update_device(trans, map->stripes[i].dev); 3050 if (ret) 3051 return ret; 3052 } 3053 3054 return btrfs_free_chunk(trans, chunk_offset); 3055 } 3056 3057 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3058 { 3059 struct btrfs_fs_info *fs_info = trans->fs_info; 3060 struct extent_map *em; 3061 struct map_lookup *map; 3062 u64 dev_extent_len = 0; 3063 int i, ret = 0; 3064 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3065 3066 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3067 if (IS_ERR(em)) { 3068 /* 3069 * This is a logic error, but we don't want to just rely on the 3070 * user having built with ASSERT enabled, so if ASSERT doesn't 3071 * do anything we still error out. 3072 */ 3073 ASSERT(0); 3074 return PTR_ERR(em); 3075 } 3076 map = em->map_lookup; 3077 3078 /* 3079 * First delete the device extent items from the devices btree. 3080 * We take the device_list_mutex to avoid racing with the finishing phase 3081 * of a device replace operation. See the comment below before acquiring 3082 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3083 * because that can result in a deadlock when deleting the device extent 3084 * items from the devices btree - COWing an extent buffer from the btree 3085 * may result in allocating a new metadata chunk, which would attempt to 3086 * lock again fs_info->chunk_mutex. 3087 */ 3088 mutex_lock(&fs_devices->device_list_mutex); 3089 for (i = 0; i < map->num_stripes; i++) { 3090 struct btrfs_device *device = map->stripes[i].dev; 3091 ret = btrfs_free_dev_extent(trans, device, 3092 map->stripes[i].physical, 3093 &dev_extent_len); 3094 if (ret) { 3095 mutex_unlock(&fs_devices->device_list_mutex); 3096 btrfs_abort_transaction(trans, ret); 3097 goto out; 3098 } 3099 3100 if (device->bytes_used > 0) { 3101 mutex_lock(&fs_info->chunk_mutex); 3102 btrfs_device_set_bytes_used(device, 3103 device->bytes_used - dev_extent_len); 3104 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3105 btrfs_clear_space_info_full(fs_info); 3106 mutex_unlock(&fs_info->chunk_mutex); 3107 } 3108 } 3109 mutex_unlock(&fs_devices->device_list_mutex); 3110 3111 /* 3112 * We acquire fs_info->chunk_mutex for 2 reasons: 3113 * 3114 * 1) Just like with the first phase of the chunk allocation, we must 3115 * reserve system space, do all chunk btree updates and deletions, and 3116 * update the system chunk array in the superblock while holding this 3117 * mutex. This is for similar reasons as explained on the comment at 3118 * the top of btrfs_chunk_alloc(); 3119 * 3120 * 2) Prevent races with the final phase of a device replace operation 3121 * that replaces the device object associated with the map's stripes, 3122 * because the device object's id can change at any time during that 3123 * final phase of the device replace operation 3124 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3125 * replaced device and then see it with an ID of 3126 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3127 * the device item, which does not exists on the chunk btree. 3128 * The finishing phase of device replace acquires both the 3129 * device_list_mutex and the chunk_mutex, in that order, so we are 3130 * safe by just acquiring the chunk_mutex. 3131 */ 3132 trans->removing_chunk = true; 3133 mutex_lock(&fs_info->chunk_mutex); 3134 3135 check_system_chunk(trans, map->type); 3136 3137 ret = remove_chunk_item(trans, map, chunk_offset); 3138 /* 3139 * Normally we should not get -ENOSPC since we reserved space before 3140 * through the call to check_system_chunk(). 3141 * 3142 * Despite our system space_info having enough free space, we may not 3143 * be able to allocate extents from its block groups, because all have 3144 * an incompatible profile, which will force us to allocate a new system 3145 * block group with the right profile, or right after we called 3146 * check_system_space() above, a scrub turned the only system block group 3147 * with enough free space into RO mode. 3148 * This is explained with more detail at do_chunk_alloc(). 3149 * 3150 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3151 */ 3152 if (ret == -ENOSPC) { 3153 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3154 struct btrfs_block_group *sys_bg; 3155 3156 sys_bg = btrfs_create_chunk(trans, sys_flags); 3157 if (IS_ERR(sys_bg)) { 3158 ret = PTR_ERR(sys_bg); 3159 btrfs_abort_transaction(trans, ret); 3160 goto out; 3161 } 3162 3163 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3164 if (ret) { 3165 btrfs_abort_transaction(trans, ret); 3166 goto out; 3167 } 3168 3169 ret = remove_chunk_item(trans, map, chunk_offset); 3170 if (ret) { 3171 btrfs_abort_transaction(trans, ret); 3172 goto out; 3173 } 3174 } else if (ret) { 3175 btrfs_abort_transaction(trans, ret); 3176 goto out; 3177 } 3178 3179 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3180 3181 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3182 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3183 if (ret) { 3184 btrfs_abort_transaction(trans, ret); 3185 goto out; 3186 } 3187 } 3188 3189 mutex_unlock(&fs_info->chunk_mutex); 3190 trans->removing_chunk = false; 3191 3192 /* 3193 * We are done with chunk btree updates and deletions, so release the 3194 * system space we previously reserved (with check_system_chunk()). 3195 */ 3196 btrfs_trans_release_chunk_metadata(trans); 3197 3198 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3199 if (ret) { 3200 btrfs_abort_transaction(trans, ret); 3201 goto out; 3202 } 3203 3204 out: 3205 if (trans->removing_chunk) { 3206 mutex_unlock(&fs_info->chunk_mutex); 3207 trans->removing_chunk = false; 3208 } 3209 /* once for us */ 3210 free_extent_map(em); 3211 return ret; 3212 } 3213 3214 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3215 { 3216 struct btrfs_root *root = fs_info->chunk_root; 3217 struct btrfs_trans_handle *trans; 3218 struct btrfs_block_group *block_group; 3219 u64 length; 3220 int ret; 3221 3222 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3223 btrfs_err(fs_info, 3224 "relocate: not supported on extent tree v2 yet"); 3225 return -EINVAL; 3226 } 3227 3228 /* 3229 * Prevent races with automatic removal of unused block groups. 3230 * After we relocate and before we remove the chunk with offset 3231 * chunk_offset, automatic removal of the block group can kick in, 3232 * resulting in a failure when calling btrfs_remove_chunk() below. 3233 * 3234 * Make sure to acquire this mutex before doing a tree search (dev 3235 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3236 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3237 * we release the path used to search the chunk/dev tree and before 3238 * the current task acquires this mutex and calls us. 3239 */ 3240 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3241 3242 /* step one, relocate all the extents inside this chunk */ 3243 btrfs_scrub_pause(fs_info); 3244 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3245 btrfs_scrub_continue(fs_info); 3246 if (ret) 3247 return ret; 3248 3249 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3250 if (!block_group) 3251 return -ENOENT; 3252 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3253 length = block_group->length; 3254 btrfs_put_block_group(block_group); 3255 3256 /* 3257 * On a zoned file system, discard the whole block group, this will 3258 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3259 * resetting the zone fails, don't treat it as a fatal problem from the 3260 * filesystem's point of view. 3261 */ 3262 if (btrfs_is_zoned(fs_info)) { 3263 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3264 if (ret) 3265 btrfs_info(fs_info, 3266 "failed to reset zone %llu after relocation", 3267 chunk_offset); 3268 } 3269 3270 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3271 chunk_offset); 3272 if (IS_ERR(trans)) { 3273 ret = PTR_ERR(trans); 3274 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3275 return ret; 3276 } 3277 3278 /* 3279 * step two, delete the device extents and the 3280 * chunk tree entries 3281 */ 3282 ret = btrfs_remove_chunk(trans, chunk_offset); 3283 btrfs_end_transaction(trans); 3284 return ret; 3285 } 3286 3287 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3288 { 3289 struct btrfs_root *chunk_root = fs_info->chunk_root; 3290 struct btrfs_path *path; 3291 struct extent_buffer *leaf; 3292 struct btrfs_chunk *chunk; 3293 struct btrfs_key key; 3294 struct btrfs_key found_key; 3295 u64 chunk_type; 3296 bool retried = false; 3297 int failed = 0; 3298 int ret; 3299 3300 path = btrfs_alloc_path(); 3301 if (!path) 3302 return -ENOMEM; 3303 3304 again: 3305 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3306 key.offset = (u64)-1; 3307 key.type = BTRFS_CHUNK_ITEM_KEY; 3308 3309 while (1) { 3310 mutex_lock(&fs_info->reclaim_bgs_lock); 3311 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3312 if (ret < 0) { 3313 mutex_unlock(&fs_info->reclaim_bgs_lock); 3314 goto error; 3315 } 3316 BUG_ON(ret == 0); /* Corruption */ 3317 3318 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3319 key.type); 3320 if (ret) 3321 mutex_unlock(&fs_info->reclaim_bgs_lock); 3322 if (ret < 0) 3323 goto error; 3324 if (ret > 0) 3325 break; 3326 3327 leaf = path->nodes[0]; 3328 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3329 3330 chunk = btrfs_item_ptr(leaf, path->slots[0], 3331 struct btrfs_chunk); 3332 chunk_type = btrfs_chunk_type(leaf, chunk); 3333 btrfs_release_path(path); 3334 3335 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3336 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3337 if (ret == -ENOSPC) 3338 failed++; 3339 else 3340 BUG_ON(ret); 3341 } 3342 mutex_unlock(&fs_info->reclaim_bgs_lock); 3343 3344 if (found_key.offset == 0) 3345 break; 3346 key.offset = found_key.offset - 1; 3347 } 3348 ret = 0; 3349 if (failed && !retried) { 3350 failed = 0; 3351 retried = true; 3352 goto again; 3353 } else if (WARN_ON(failed && retried)) { 3354 ret = -ENOSPC; 3355 } 3356 error: 3357 btrfs_free_path(path); 3358 return ret; 3359 } 3360 3361 /* 3362 * return 1 : allocate a data chunk successfully, 3363 * return <0: errors during allocating a data chunk, 3364 * return 0 : no need to allocate a data chunk. 3365 */ 3366 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3367 u64 chunk_offset) 3368 { 3369 struct btrfs_block_group *cache; 3370 u64 bytes_used; 3371 u64 chunk_type; 3372 3373 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3374 ASSERT(cache); 3375 chunk_type = cache->flags; 3376 btrfs_put_block_group(cache); 3377 3378 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3379 return 0; 3380 3381 spin_lock(&fs_info->data_sinfo->lock); 3382 bytes_used = fs_info->data_sinfo->bytes_used; 3383 spin_unlock(&fs_info->data_sinfo->lock); 3384 3385 if (!bytes_used) { 3386 struct btrfs_trans_handle *trans; 3387 int ret; 3388 3389 trans = btrfs_join_transaction(fs_info->tree_root); 3390 if (IS_ERR(trans)) 3391 return PTR_ERR(trans); 3392 3393 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3394 btrfs_end_transaction(trans); 3395 if (ret < 0) 3396 return ret; 3397 return 1; 3398 } 3399 3400 return 0; 3401 } 3402 3403 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3404 struct btrfs_balance_control *bctl) 3405 { 3406 struct btrfs_root *root = fs_info->tree_root; 3407 struct btrfs_trans_handle *trans; 3408 struct btrfs_balance_item *item; 3409 struct btrfs_disk_balance_args disk_bargs; 3410 struct btrfs_path *path; 3411 struct extent_buffer *leaf; 3412 struct btrfs_key key; 3413 int ret, err; 3414 3415 path = btrfs_alloc_path(); 3416 if (!path) 3417 return -ENOMEM; 3418 3419 trans = btrfs_start_transaction(root, 0); 3420 if (IS_ERR(trans)) { 3421 btrfs_free_path(path); 3422 return PTR_ERR(trans); 3423 } 3424 3425 key.objectid = BTRFS_BALANCE_OBJECTID; 3426 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3427 key.offset = 0; 3428 3429 ret = btrfs_insert_empty_item(trans, root, path, &key, 3430 sizeof(*item)); 3431 if (ret) 3432 goto out; 3433 3434 leaf = path->nodes[0]; 3435 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3436 3437 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3438 3439 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3440 btrfs_set_balance_data(leaf, item, &disk_bargs); 3441 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3442 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3443 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3444 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3445 3446 btrfs_set_balance_flags(leaf, item, bctl->flags); 3447 3448 btrfs_mark_buffer_dirty(leaf); 3449 out: 3450 btrfs_free_path(path); 3451 err = btrfs_commit_transaction(trans); 3452 if (err && !ret) 3453 ret = err; 3454 return ret; 3455 } 3456 3457 static int del_balance_item(struct btrfs_fs_info *fs_info) 3458 { 3459 struct btrfs_root *root = fs_info->tree_root; 3460 struct btrfs_trans_handle *trans; 3461 struct btrfs_path *path; 3462 struct btrfs_key key; 3463 int ret, err; 3464 3465 path = btrfs_alloc_path(); 3466 if (!path) 3467 return -ENOMEM; 3468 3469 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3470 if (IS_ERR(trans)) { 3471 btrfs_free_path(path); 3472 return PTR_ERR(trans); 3473 } 3474 3475 key.objectid = BTRFS_BALANCE_OBJECTID; 3476 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3477 key.offset = 0; 3478 3479 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3480 if (ret < 0) 3481 goto out; 3482 if (ret > 0) { 3483 ret = -ENOENT; 3484 goto out; 3485 } 3486 3487 ret = btrfs_del_item(trans, root, path); 3488 out: 3489 btrfs_free_path(path); 3490 err = btrfs_commit_transaction(trans); 3491 if (err && !ret) 3492 ret = err; 3493 return ret; 3494 } 3495 3496 /* 3497 * This is a heuristic used to reduce the number of chunks balanced on 3498 * resume after balance was interrupted. 3499 */ 3500 static void update_balance_args(struct btrfs_balance_control *bctl) 3501 { 3502 /* 3503 * Turn on soft mode for chunk types that were being converted. 3504 */ 3505 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3506 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3507 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3508 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3509 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3510 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3511 3512 /* 3513 * Turn on usage filter if is not already used. The idea is 3514 * that chunks that we have already balanced should be 3515 * reasonably full. Don't do it for chunks that are being 3516 * converted - that will keep us from relocating unconverted 3517 * (albeit full) chunks. 3518 */ 3519 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3520 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3521 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3522 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3523 bctl->data.usage = 90; 3524 } 3525 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3526 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3527 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3528 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3529 bctl->sys.usage = 90; 3530 } 3531 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3532 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3533 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3534 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3535 bctl->meta.usage = 90; 3536 } 3537 } 3538 3539 /* 3540 * Clear the balance status in fs_info and delete the balance item from disk. 3541 */ 3542 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3543 { 3544 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3545 int ret; 3546 3547 BUG_ON(!fs_info->balance_ctl); 3548 3549 spin_lock(&fs_info->balance_lock); 3550 fs_info->balance_ctl = NULL; 3551 spin_unlock(&fs_info->balance_lock); 3552 3553 kfree(bctl); 3554 ret = del_balance_item(fs_info); 3555 if (ret) 3556 btrfs_handle_fs_error(fs_info, ret, NULL); 3557 } 3558 3559 /* 3560 * Balance filters. Return 1 if chunk should be filtered out 3561 * (should not be balanced). 3562 */ 3563 static int chunk_profiles_filter(u64 chunk_type, 3564 struct btrfs_balance_args *bargs) 3565 { 3566 chunk_type = chunk_to_extended(chunk_type) & 3567 BTRFS_EXTENDED_PROFILE_MASK; 3568 3569 if (bargs->profiles & chunk_type) 3570 return 0; 3571 3572 return 1; 3573 } 3574 3575 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3576 struct btrfs_balance_args *bargs) 3577 { 3578 struct btrfs_block_group *cache; 3579 u64 chunk_used; 3580 u64 user_thresh_min; 3581 u64 user_thresh_max; 3582 int ret = 1; 3583 3584 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3585 chunk_used = cache->used; 3586 3587 if (bargs->usage_min == 0) 3588 user_thresh_min = 0; 3589 else 3590 user_thresh_min = div_factor_fine(cache->length, 3591 bargs->usage_min); 3592 3593 if (bargs->usage_max == 0) 3594 user_thresh_max = 1; 3595 else if (bargs->usage_max > 100) 3596 user_thresh_max = cache->length; 3597 else 3598 user_thresh_max = div_factor_fine(cache->length, 3599 bargs->usage_max); 3600 3601 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3602 ret = 0; 3603 3604 btrfs_put_block_group(cache); 3605 return ret; 3606 } 3607 3608 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3609 u64 chunk_offset, struct btrfs_balance_args *bargs) 3610 { 3611 struct btrfs_block_group *cache; 3612 u64 chunk_used, user_thresh; 3613 int ret = 1; 3614 3615 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3616 chunk_used = cache->used; 3617 3618 if (bargs->usage_min == 0) 3619 user_thresh = 1; 3620 else if (bargs->usage > 100) 3621 user_thresh = cache->length; 3622 else 3623 user_thresh = div_factor_fine(cache->length, bargs->usage); 3624 3625 if (chunk_used < user_thresh) 3626 ret = 0; 3627 3628 btrfs_put_block_group(cache); 3629 return ret; 3630 } 3631 3632 static int chunk_devid_filter(struct extent_buffer *leaf, 3633 struct btrfs_chunk *chunk, 3634 struct btrfs_balance_args *bargs) 3635 { 3636 struct btrfs_stripe *stripe; 3637 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3638 int i; 3639 3640 for (i = 0; i < num_stripes; i++) { 3641 stripe = btrfs_stripe_nr(chunk, i); 3642 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3643 return 0; 3644 } 3645 3646 return 1; 3647 } 3648 3649 static u64 calc_data_stripes(u64 type, int num_stripes) 3650 { 3651 const int index = btrfs_bg_flags_to_raid_index(type); 3652 const int ncopies = btrfs_raid_array[index].ncopies; 3653 const int nparity = btrfs_raid_array[index].nparity; 3654 3655 return (num_stripes - nparity) / ncopies; 3656 } 3657 3658 /* [pstart, pend) */ 3659 static int chunk_drange_filter(struct extent_buffer *leaf, 3660 struct btrfs_chunk *chunk, 3661 struct btrfs_balance_args *bargs) 3662 { 3663 struct btrfs_stripe *stripe; 3664 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3665 u64 stripe_offset; 3666 u64 stripe_length; 3667 u64 type; 3668 int factor; 3669 int i; 3670 3671 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3672 return 0; 3673 3674 type = btrfs_chunk_type(leaf, chunk); 3675 factor = calc_data_stripes(type, num_stripes); 3676 3677 for (i = 0; i < num_stripes; i++) { 3678 stripe = btrfs_stripe_nr(chunk, i); 3679 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3680 continue; 3681 3682 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3683 stripe_length = btrfs_chunk_length(leaf, chunk); 3684 stripe_length = div_u64(stripe_length, factor); 3685 3686 if (stripe_offset < bargs->pend && 3687 stripe_offset + stripe_length > bargs->pstart) 3688 return 0; 3689 } 3690 3691 return 1; 3692 } 3693 3694 /* [vstart, vend) */ 3695 static int chunk_vrange_filter(struct extent_buffer *leaf, 3696 struct btrfs_chunk *chunk, 3697 u64 chunk_offset, 3698 struct btrfs_balance_args *bargs) 3699 { 3700 if (chunk_offset < bargs->vend && 3701 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3702 /* at least part of the chunk is inside this vrange */ 3703 return 0; 3704 3705 return 1; 3706 } 3707 3708 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3709 struct btrfs_chunk *chunk, 3710 struct btrfs_balance_args *bargs) 3711 { 3712 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3713 3714 if (bargs->stripes_min <= num_stripes 3715 && num_stripes <= bargs->stripes_max) 3716 return 0; 3717 3718 return 1; 3719 } 3720 3721 static int chunk_soft_convert_filter(u64 chunk_type, 3722 struct btrfs_balance_args *bargs) 3723 { 3724 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3725 return 0; 3726 3727 chunk_type = chunk_to_extended(chunk_type) & 3728 BTRFS_EXTENDED_PROFILE_MASK; 3729 3730 if (bargs->target == chunk_type) 3731 return 1; 3732 3733 return 0; 3734 } 3735 3736 static int should_balance_chunk(struct extent_buffer *leaf, 3737 struct btrfs_chunk *chunk, u64 chunk_offset) 3738 { 3739 struct btrfs_fs_info *fs_info = leaf->fs_info; 3740 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3741 struct btrfs_balance_args *bargs = NULL; 3742 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3743 3744 /* type filter */ 3745 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3746 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3747 return 0; 3748 } 3749 3750 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3751 bargs = &bctl->data; 3752 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3753 bargs = &bctl->sys; 3754 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3755 bargs = &bctl->meta; 3756 3757 /* profiles filter */ 3758 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3759 chunk_profiles_filter(chunk_type, bargs)) { 3760 return 0; 3761 } 3762 3763 /* usage filter */ 3764 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3765 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3766 return 0; 3767 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3768 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3769 return 0; 3770 } 3771 3772 /* devid filter */ 3773 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3774 chunk_devid_filter(leaf, chunk, bargs)) { 3775 return 0; 3776 } 3777 3778 /* drange filter, makes sense only with devid filter */ 3779 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3780 chunk_drange_filter(leaf, chunk, bargs)) { 3781 return 0; 3782 } 3783 3784 /* vrange filter */ 3785 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3786 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3787 return 0; 3788 } 3789 3790 /* stripes filter */ 3791 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3792 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3793 return 0; 3794 } 3795 3796 /* soft profile changing mode */ 3797 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3798 chunk_soft_convert_filter(chunk_type, bargs)) { 3799 return 0; 3800 } 3801 3802 /* 3803 * limited by count, must be the last filter 3804 */ 3805 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3806 if (bargs->limit == 0) 3807 return 0; 3808 else 3809 bargs->limit--; 3810 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3811 /* 3812 * Same logic as the 'limit' filter; the minimum cannot be 3813 * determined here because we do not have the global information 3814 * about the count of all chunks that satisfy the filters. 3815 */ 3816 if (bargs->limit_max == 0) 3817 return 0; 3818 else 3819 bargs->limit_max--; 3820 } 3821 3822 return 1; 3823 } 3824 3825 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3826 { 3827 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3828 struct btrfs_root *chunk_root = fs_info->chunk_root; 3829 u64 chunk_type; 3830 struct btrfs_chunk *chunk; 3831 struct btrfs_path *path = NULL; 3832 struct btrfs_key key; 3833 struct btrfs_key found_key; 3834 struct extent_buffer *leaf; 3835 int slot; 3836 int ret; 3837 int enospc_errors = 0; 3838 bool counting = true; 3839 /* The single value limit and min/max limits use the same bytes in the */ 3840 u64 limit_data = bctl->data.limit; 3841 u64 limit_meta = bctl->meta.limit; 3842 u64 limit_sys = bctl->sys.limit; 3843 u32 count_data = 0; 3844 u32 count_meta = 0; 3845 u32 count_sys = 0; 3846 int chunk_reserved = 0; 3847 3848 path = btrfs_alloc_path(); 3849 if (!path) { 3850 ret = -ENOMEM; 3851 goto error; 3852 } 3853 3854 /* zero out stat counters */ 3855 spin_lock(&fs_info->balance_lock); 3856 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3857 spin_unlock(&fs_info->balance_lock); 3858 again: 3859 if (!counting) { 3860 /* 3861 * The single value limit and min/max limits use the same bytes 3862 * in the 3863 */ 3864 bctl->data.limit = limit_data; 3865 bctl->meta.limit = limit_meta; 3866 bctl->sys.limit = limit_sys; 3867 } 3868 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3869 key.offset = (u64)-1; 3870 key.type = BTRFS_CHUNK_ITEM_KEY; 3871 3872 while (1) { 3873 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3874 atomic_read(&fs_info->balance_cancel_req)) { 3875 ret = -ECANCELED; 3876 goto error; 3877 } 3878 3879 mutex_lock(&fs_info->reclaim_bgs_lock); 3880 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3881 if (ret < 0) { 3882 mutex_unlock(&fs_info->reclaim_bgs_lock); 3883 goto error; 3884 } 3885 3886 /* 3887 * this shouldn't happen, it means the last relocate 3888 * failed 3889 */ 3890 if (ret == 0) 3891 BUG(); /* FIXME break ? */ 3892 3893 ret = btrfs_previous_item(chunk_root, path, 0, 3894 BTRFS_CHUNK_ITEM_KEY); 3895 if (ret) { 3896 mutex_unlock(&fs_info->reclaim_bgs_lock); 3897 ret = 0; 3898 break; 3899 } 3900 3901 leaf = path->nodes[0]; 3902 slot = path->slots[0]; 3903 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3904 3905 if (found_key.objectid != key.objectid) { 3906 mutex_unlock(&fs_info->reclaim_bgs_lock); 3907 break; 3908 } 3909 3910 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3911 chunk_type = btrfs_chunk_type(leaf, chunk); 3912 3913 if (!counting) { 3914 spin_lock(&fs_info->balance_lock); 3915 bctl->stat.considered++; 3916 spin_unlock(&fs_info->balance_lock); 3917 } 3918 3919 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3920 3921 btrfs_release_path(path); 3922 if (!ret) { 3923 mutex_unlock(&fs_info->reclaim_bgs_lock); 3924 goto loop; 3925 } 3926 3927 if (counting) { 3928 mutex_unlock(&fs_info->reclaim_bgs_lock); 3929 spin_lock(&fs_info->balance_lock); 3930 bctl->stat.expected++; 3931 spin_unlock(&fs_info->balance_lock); 3932 3933 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3934 count_data++; 3935 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3936 count_sys++; 3937 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3938 count_meta++; 3939 3940 goto loop; 3941 } 3942 3943 /* 3944 * Apply limit_min filter, no need to check if the LIMITS 3945 * filter is used, limit_min is 0 by default 3946 */ 3947 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3948 count_data < bctl->data.limit_min) 3949 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3950 count_meta < bctl->meta.limit_min) 3951 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3952 count_sys < bctl->sys.limit_min)) { 3953 mutex_unlock(&fs_info->reclaim_bgs_lock); 3954 goto loop; 3955 } 3956 3957 if (!chunk_reserved) { 3958 /* 3959 * We may be relocating the only data chunk we have, 3960 * which could potentially end up with losing data's 3961 * raid profile, so lets allocate an empty one in 3962 * advance. 3963 */ 3964 ret = btrfs_may_alloc_data_chunk(fs_info, 3965 found_key.offset); 3966 if (ret < 0) { 3967 mutex_unlock(&fs_info->reclaim_bgs_lock); 3968 goto error; 3969 } else if (ret == 1) { 3970 chunk_reserved = 1; 3971 } 3972 } 3973 3974 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3975 mutex_unlock(&fs_info->reclaim_bgs_lock); 3976 if (ret == -ENOSPC) { 3977 enospc_errors++; 3978 } else if (ret == -ETXTBSY) { 3979 btrfs_info(fs_info, 3980 "skipping relocation of block group %llu due to active swapfile", 3981 found_key.offset); 3982 ret = 0; 3983 } else if (ret) { 3984 goto error; 3985 } else { 3986 spin_lock(&fs_info->balance_lock); 3987 bctl->stat.completed++; 3988 spin_unlock(&fs_info->balance_lock); 3989 } 3990 loop: 3991 if (found_key.offset == 0) 3992 break; 3993 key.offset = found_key.offset - 1; 3994 } 3995 3996 if (counting) { 3997 btrfs_release_path(path); 3998 counting = false; 3999 goto again; 4000 } 4001 error: 4002 btrfs_free_path(path); 4003 if (enospc_errors) { 4004 btrfs_info(fs_info, "%d enospc errors during balance", 4005 enospc_errors); 4006 if (!ret) 4007 ret = -ENOSPC; 4008 } 4009 4010 return ret; 4011 } 4012 4013 /** 4014 * alloc_profile_is_valid - see if a given profile is valid and reduced 4015 * @flags: profile to validate 4016 * @extended: if true @flags is treated as an extended profile 4017 */ 4018 static int alloc_profile_is_valid(u64 flags, int extended) 4019 { 4020 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4021 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4022 4023 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4024 4025 /* 1) check that all other bits are zeroed */ 4026 if (flags & ~mask) 4027 return 0; 4028 4029 /* 2) see if profile is reduced */ 4030 if (flags == 0) 4031 return !extended; /* "0" is valid for usual profiles */ 4032 4033 return has_single_bit_set(flags); 4034 } 4035 4036 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4037 { 4038 /* cancel requested || normal exit path */ 4039 return atomic_read(&fs_info->balance_cancel_req) || 4040 (atomic_read(&fs_info->balance_pause_req) == 0 && 4041 atomic_read(&fs_info->balance_cancel_req) == 0); 4042 } 4043 4044 /* 4045 * Validate target profile against allowed profiles and return true if it's OK. 4046 * Otherwise print the error message and return false. 4047 */ 4048 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4049 const struct btrfs_balance_args *bargs, 4050 u64 allowed, const char *type) 4051 { 4052 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4053 return true; 4054 4055 /* Profile is valid and does not have bits outside of the allowed set */ 4056 if (alloc_profile_is_valid(bargs->target, 1) && 4057 (bargs->target & ~allowed) == 0) 4058 return true; 4059 4060 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4061 type, btrfs_bg_type_to_raid_name(bargs->target)); 4062 return false; 4063 } 4064 4065 /* 4066 * Fill @buf with textual description of balance filter flags @bargs, up to 4067 * @size_buf including the terminating null. The output may be trimmed if it 4068 * does not fit into the provided buffer. 4069 */ 4070 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4071 u32 size_buf) 4072 { 4073 int ret; 4074 u32 size_bp = size_buf; 4075 char *bp = buf; 4076 u64 flags = bargs->flags; 4077 char tmp_buf[128] = {'\0'}; 4078 4079 if (!flags) 4080 return; 4081 4082 #define CHECK_APPEND_NOARG(a) \ 4083 do { \ 4084 ret = snprintf(bp, size_bp, (a)); \ 4085 if (ret < 0 || ret >= size_bp) \ 4086 goto out_overflow; \ 4087 size_bp -= ret; \ 4088 bp += ret; \ 4089 } while (0) 4090 4091 #define CHECK_APPEND_1ARG(a, v1) \ 4092 do { \ 4093 ret = snprintf(bp, size_bp, (a), (v1)); \ 4094 if (ret < 0 || ret >= size_bp) \ 4095 goto out_overflow; \ 4096 size_bp -= ret; \ 4097 bp += ret; \ 4098 } while (0) 4099 4100 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4101 do { \ 4102 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4103 if (ret < 0 || ret >= size_bp) \ 4104 goto out_overflow; \ 4105 size_bp -= ret; \ 4106 bp += ret; \ 4107 } while (0) 4108 4109 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4110 CHECK_APPEND_1ARG("convert=%s,", 4111 btrfs_bg_type_to_raid_name(bargs->target)); 4112 4113 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4114 CHECK_APPEND_NOARG("soft,"); 4115 4116 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4117 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4118 sizeof(tmp_buf)); 4119 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4120 } 4121 4122 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4123 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4124 4125 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4126 CHECK_APPEND_2ARG("usage=%u..%u,", 4127 bargs->usage_min, bargs->usage_max); 4128 4129 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4130 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4131 4132 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4133 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4134 bargs->pstart, bargs->pend); 4135 4136 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4137 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4138 bargs->vstart, bargs->vend); 4139 4140 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4141 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4142 4143 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4144 CHECK_APPEND_2ARG("limit=%u..%u,", 4145 bargs->limit_min, bargs->limit_max); 4146 4147 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4148 CHECK_APPEND_2ARG("stripes=%u..%u,", 4149 bargs->stripes_min, bargs->stripes_max); 4150 4151 #undef CHECK_APPEND_2ARG 4152 #undef CHECK_APPEND_1ARG 4153 #undef CHECK_APPEND_NOARG 4154 4155 out_overflow: 4156 4157 if (size_bp < size_buf) 4158 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4159 else 4160 buf[0] = '\0'; 4161 } 4162 4163 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4164 { 4165 u32 size_buf = 1024; 4166 char tmp_buf[192] = {'\0'}; 4167 char *buf; 4168 char *bp; 4169 u32 size_bp = size_buf; 4170 int ret; 4171 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4172 4173 buf = kzalloc(size_buf, GFP_KERNEL); 4174 if (!buf) 4175 return; 4176 4177 bp = buf; 4178 4179 #define CHECK_APPEND_1ARG(a, v1) \ 4180 do { \ 4181 ret = snprintf(bp, size_bp, (a), (v1)); \ 4182 if (ret < 0 || ret >= size_bp) \ 4183 goto out_overflow; \ 4184 size_bp -= ret; \ 4185 bp += ret; \ 4186 } while (0) 4187 4188 if (bctl->flags & BTRFS_BALANCE_FORCE) 4189 CHECK_APPEND_1ARG("%s", "-f "); 4190 4191 if (bctl->flags & BTRFS_BALANCE_DATA) { 4192 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4193 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4194 } 4195 4196 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4197 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4198 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4199 } 4200 4201 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4202 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4203 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4204 } 4205 4206 #undef CHECK_APPEND_1ARG 4207 4208 out_overflow: 4209 4210 if (size_bp < size_buf) 4211 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4212 btrfs_info(fs_info, "balance: %s %s", 4213 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4214 "resume" : "start", buf); 4215 4216 kfree(buf); 4217 } 4218 4219 /* 4220 * Should be called with balance mutexe held 4221 */ 4222 int btrfs_balance(struct btrfs_fs_info *fs_info, 4223 struct btrfs_balance_control *bctl, 4224 struct btrfs_ioctl_balance_args *bargs) 4225 { 4226 u64 meta_target, data_target; 4227 u64 allowed; 4228 int mixed = 0; 4229 int ret; 4230 u64 num_devices; 4231 unsigned seq; 4232 bool reducing_redundancy; 4233 int i; 4234 4235 if (btrfs_fs_closing(fs_info) || 4236 atomic_read(&fs_info->balance_pause_req) || 4237 btrfs_should_cancel_balance(fs_info)) { 4238 ret = -EINVAL; 4239 goto out; 4240 } 4241 4242 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4243 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4244 mixed = 1; 4245 4246 /* 4247 * In case of mixed groups both data and meta should be picked, 4248 * and identical options should be given for both of them. 4249 */ 4250 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4251 if (mixed && (bctl->flags & allowed)) { 4252 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4253 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4254 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4255 btrfs_err(fs_info, 4256 "balance: mixed groups data and metadata options must be the same"); 4257 ret = -EINVAL; 4258 goto out; 4259 } 4260 } 4261 4262 /* 4263 * rw_devices will not change at the moment, device add/delete/replace 4264 * are exclusive 4265 */ 4266 num_devices = fs_info->fs_devices->rw_devices; 4267 4268 /* 4269 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4270 * special bit for it, to make it easier to distinguish. Thus we need 4271 * to set it manually, or balance would refuse the profile. 4272 */ 4273 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4274 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4275 if (num_devices >= btrfs_raid_array[i].devs_min) 4276 allowed |= btrfs_raid_array[i].bg_flag; 4277 4278 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4279 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4280 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4281 ret = -EINVAL; 4282 goto out; 4283 } 4284 4285 /* 4286 * Allow to reduce metadata or system integrity only if force set for 4287 * profiles with redundancy (copies, parity) 4288 */ 4289 allowed = 0; 4290 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4291 if (btrfs_raid_array[i].ncopies >= 2 || 4292 btrfs_raid_array[i].tolerated_failures >= 1) 4293 allowed |= btrfs_raid_array[i].bg_flag; 4294 } 4295 do { 4296 seq = read_seqbegin(&fs_info->profiles_lock); 4297 4298 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4299 (fs_info->avail_system_alloc_bits & allowed) && 4300 !(bctl->sys.target & allowed)) || 4301 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4302 (fs_info->avail_metadata_alloc_bits & allowed) && 4303 !(bctl->meta.target & allowed))) 4304 reducing_redundancy = true; 4305 else 4306 reducing_redundancy = false; 4307 4308 /* if we're not converting, the target field is uninitialized */ 4309 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4310 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4311 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4312 bctl->data.target : fs_info->avail_data_alloc_bits; 4313 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4314 4315 if (reducing_redundancy) { 4316 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4317 btrfs_info(fs_info, 4318 "balance: force reducing metadata redundancy"); 4319 } else { 4320 btrfs_err(fs_info, 4321 "balance: reduces metadata redundancy, use --force if you want this"); 4322 ret = -EINVAL; 4323 goto out; 4324 } 4325 } 4326 4327 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4328 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4329 btrfs_warn(fs_info, 4330 "balance: metadata profile %s has lower redundancy than data profile %s", 4331 btrfs_bg_type_to_raid_name(meta_target), 4332 btrfs_bg_type_to_raid_name(data_target)); 4333 } 4334 4335 ret = insert_balance_item(fs_info, bctl); 4336 if (ret && ret != -EEXIST) 4337 goto out; 4338 4339 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4340 BUG_ON(ret == -EEXIST); 4341 BUG_ON(fs_info->balance_ctl); 4342 spin_lock(&fs_info->balance_lock); 4343 fs_info->balance_ctl = bctl; 4344 spin_unlock(&fs_info->balance_lock); 4345 } else { 4346 BUG_ON(ret != -EEXIST); 4347 spin_lock(&fs_info->balance_lock); 4348 update_balance_args(bctl); 4349 spin_unlock(&fs_info->balance_lock); 4350 } 4351 4352 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4353 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4354 describe_balance_start_or_resume(fs_info); 4355 mutex_unlock(&fs_info->balance_mutex); 4356 4357 ret = __btrfs_balance(fs_info); 4358 4359 mutex_lock(&fs_info->balance_mutex); 4360 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4361 btrfs_info(fs_info, "balance: paused"); 4362 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4363 } 4364 /* 4365 * Balance can be canceled by: 4366 * 4367 * - Regular cancel request 4368 * Then ret == -ECANCELED and balance_cancel_req > 0 4369 * 4370 * - Fatal signal to "btrfs" process 4371 * Either the signal caught by wait_reserve_ticket() and callers 4372 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4373 * got -ECANCELED. 4374 * Either way, in this case balance_cancel_req = 0, and 4375 * ret == -EINTR or ret == -ECANCELED. 4376 * 4377 * So here we only check the return value to catch canceled balance. 4378 */ 4379 else if (ret == -ECANCELED || ret == -EINTR) 4380 btrfs_info(fs_info, "balance: canceled"); 4381 else 4382 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4383 4384 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4385 4386 if (bargs) { 4387 memset(bargs, 0, sizeof(*bargs)); 4388 btrfs_update_ioctl_balance_args(fs_info, bargs); 4389 } 4390 4391 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4392 balance_need_close(fs_info)) { 4393 reset_balance_state(fs_info); 4394 btrfs_exclop_finish(fs_info); 4395 } 4396 4397 wake_up(&fs_info->balance_wait_q); 4398 4399 return ret; 4400 out: 4401 if (bctl->flags & BTRFS_BALANCE_RESUME) 4402 reset_balance_state(fs_info); 4403 else 4404 kfree(bctl); 4405 btrfs_exclop_finish(fs_info); 4406 4407 return ret; 4408 } 4409 4410 static int balance_kthread(void *data) 4411 { 4412 struct btrfs_fs_info *fs_info = data; 4413 int ret = 0; 4414 4415 sb_start_write(fs_info->sb); 4416 mutex_lock(&fs_info->balance_mutex); 4417 if (fs_info->balance_ctl) 4418 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4419 mutex_unlock(&fs_info->balance_mutex); 4420 sb_end_write(fs_info->sb); 4421 4422 return ret; 4423 } 4424 4425 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4426 { 4427 struct task_struct *tsk; 4428 4429 mutex_lock(&fs_info->balance_mutex); 4430 if (!fs_info->balance_ctl) { 4431 mutex_unlock(&fs_info->balance_mutex); 4432 return 0; 4433 } 4434 mutex_unlock(&fs_info->balance_mutex); 4435 4436 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4437 btrfs_info(fs_info, "balance: resume skipped"); 4438 return 0; 4439 } 4440 4441 spin_lock(&fs_info->super_lock); 4442 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4443 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4444 spin_unlock(&fs_info->super_lock); 4445 /* 4446 * A ro->rw remount sequence should continue with the paused balance 4447 * regardless of who pauses it, system or the user as of now, so set 4448 * the resume flag. 4449 */ 4450 spin_lock(&fs_info->balance_lock); 4451 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4452 spin_unlock(&fs_info->balance_lock); 4453 4454 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4455 return PTR_ERR_OR_ZERO(tsk); 4456 } 4457 4458 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4459 { 4460 struct btrfs_balance_control *bctl; 4461 struct btrfs_balance_item *item; 4462 struct btrfs_disk_balance_args disk_bargs; 4463 struct btrfs_path *path; 4464 struct extent_buffer *leaf; 4465 struct btrfs_key key; 4466 int ret; 4467 4468 path = btrfs_alloc_path(); 4469 if (!path) 4470 return -ENOMEM; 4471 4472 key.objectid = BTRFS_BALANCE_OBJECTID; 4473 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4474 key.offset = 0; 4475 4476 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4477 if (ret < 0) 4478 goto out; 4479 if (ret > 0) { /* ret = -ENOENT; */ 4480 ret = 0; 4481 goto out; 4482 } 4483 4484 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4485 if (!bctl) { 4486 ret = -ENOMEM; 4487 goto out; 4488 } 4489 4490 leaf = path->nodes[0]; 4491 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4492 4493 bctl->flags = btrfs_balance_flags(leaf, item); 4494 bctl->flags |= BTRFS_BALANCE_RESUME; 4495 4496 btrfs_balance_data(leaf, item, &disk_bargs); 4497 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4498 btrfs_balance_meta(leaf, item, &disk_bargs); 4499 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4500 btrfs_balance_sys(leaf, item, &disk_bargs); 4501 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4502 4503 /* 4504 * This should never happen, as the paused balance state is recovered 4505 * during mount without any chance of other exclusive ops to collide. 4506 * 4507 * This gives the exclusive op status to balance and keeps in paused 4508 * state until user intervention (cancel or umount). If the ownership 4509 * cannot be assigned, show a message but do not fail. The balance 4510 * is in a paused state and must have fs_info::balance_ctl properly 4511 * set up. 4512 */ 4513 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4514 btrfs_warn(fs_info, 4515 "balance: cannot set exclusive op status, resume manually"); 4516 4517 btrfs_release_path(path); 4518 4519 mutex_lock(&fs_info->balance_mutex); 4520 BUG_ON(fs_info->balance_ctl); 4521 spin_lock(&fs_info->balance_lock); 4522 fs_info->balance_ctl = bctl; 4523 spin_unlock(&fs_info->balance_lock); 4524 mutex_unlock(&fs_info->balance_mutex); 4525 out: 4526 btrfs_free_path(path); 4527 return ret; 4528 } 4529 4530 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4531 { 4532 int ret = 0; 4533 4534 mutex_lock(&fs_info->balance_mutex); 4535 if (!fs_info->balance_ctl) { 4536 mutex_unlock(&fs_info->balance_mutex); 4537 return -ENOTCONN; 4538 } 4539 4540 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4541 atomic_inc(&fs_info->balance_pause_req); 4542 mutex_unlock(&fs_info->balance_mutex); 4543 4544 wait_event(fs_info->balance_wait_q, 4545 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4546 4547 mutex_lock(&fs_info->balance_mutex); 4548 /* we are good with balance_ctl ripped off from under us */ 4549 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4550 atomic_dec(&fs_info->balance_pause_req); 4551 } else { 4552 ret = -ENOTCONN; 4553 } 4554 4555 mutex_unlock(&fs_info->balance_mutex); 4556 return ret; 4557 } 4558 4559 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4560 { 4561 mutex_lock(&fs_info->balance_mutex); 4562 if (!fs_info->balance_ctl) { 4563 mutex_unlock(&fs_info->balance_mutex); 4564 return -ENOTCONN; 4565 } 4566 4567 /* 4568 * A paused balance with the item stored on disk can be resumed at 4569 * mount time if the mount is read-write. Otherwise it's still paused 4570 * and we must not allow cancelling as it deletes the item. 4571 */ 4572 if (sb_rdonly(fs_info->sb)) { 4573 mutex_unlock(&fs_info->balance_mutex); 4574 return -EROFS; 4575 } 4576 4577 atomic_inc(&fs_info->balance_cancel_req); 4578 /* 4579 * if we are running just wait and return, balance item is 4580 * deleted in btrfs_balance in this case 4581 */ 4582 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4583 mutex_unlock(&fs_info->balance_mutex); 4584 wait_event(fs_info->balance_wait_q, 4585 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4586 mutex_lock(&fs_info->balance_mutex); 4587 } else { 4588 mutex_unlock(&fs_info->balance_mutex); 4589 /* 4590 * Lock released to allow other waiters to continue, we'll 4591 * reexamine the status again. 4592 */ 4593 mutex_lock(&fs_info->balance_mutex); 4594 4595 if (fs_info->balance_ctl) { 4596 reset_balance_state(fs_info); 4597 btrfs_exclop_finish(fs_info); 4598 btrfs_info(fs_info, "balance: canceled"); 4599 } 4600 } 4601 4602 BUG_ON(fs_info->balance_ctl || 4603 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4604 atomic_dec(&fs_info->balance_cancel_req); 4605 mutex_unlock(&fs_info->balance_mutex); 4606 return 0; 4607 } 4608 4609 int btrfs_uuid_scan_kthread(void *data) 4610 { 4611 struct btrfs_fs_info *fs_info = data; 4612 struct btrfs_root *root = fs_info->tree_root; 4613 struct btrfs_key key; 4614 struct btrfs_path *path = NULL; 4615 int ret = 0; 4616 struct extent_buffer *eb; 4617 int slot; 4618 struct btrfs_root_item root_item; 4619 u32 item_size; 4620 struct btrfs_trans_handle *trans = NULL; 4621 bool closing = false; 4622 4623 path = btrfs_alloc_path(); 4624 if (!path) { 4625 ret = -ENOMEM; 4626 goto out; 4627 } 4628 4629 key.objectid = 0; 4630 key.type = BTRFS_ROOT_ITEM_KEY; 4631 key.offset = 0; 4632 4633 while (1) { 4634 if (btrfs_fs_closing(fs_info)) { 4635 closing = true; 4636 break; 4637 } 4638 ret = btrfs_search_forward(root, &key, path, 4639 BTRFS_OLDEST_GENERATION); 4640 if (ret) { 4641 if (ret > 0) 4642 ret = 0; 4643 break; 4644 } 4645 4646 if (key.type != BTRFS_ROOT_ITEM_KEY || 4647 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4648 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4649 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4650 goto skip; 4651 4652 eb = path->nodes[0]; 4653 slot = path->slots[0]; 4654 item_size = btrfs_item_size(eb, slot); 4655 if (item_size < sizeof(root_item)) 4656 goto skip; 4657 4658 read_extent_buffer(eb, &root_item, 4659 btrfs_item_ptr_offset(eb, slot), 4660 (int)sizeof(root_item)); 4661 if (btrfs_root_refs(&root_item) == 0) 4662 goto skip; 4663 4664 if (!btrfs_is_empty_uuid(root_item.uuid) || 4665 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4666 if (trans) 4667 goto update_tree; 4668 4669 btrfs_release_path(path); 4670 /* 4671 * 1 - subvol uuid item 4672 * 1 - received_subvol uuid item 4673 */ 4674 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4675 if (IS_ERR(trans)) { 4676 ret = PTR_ERR(trans); 4677 break; 4678 } 4679 continue; 4680 } else { 4681 goto skip; 4682 } 4683 update_tree: 4684 btrfs_release_path(path); 4685 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4686 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4687 BTRFS_UUID_KEY_SUBVOL, 4688 key.objectid); 4689 if (ret < 0) { 4690 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4691 ret); 4692 break; 4693 } 4694 } 4695 4696 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4697 ret = btrfs_uuid_tree_add(trans, 4698 root_item.received_uuid, 4699 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4700 key.objectid); 4701 if (ret < 0) { 4702 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4703 ret); 4704 break; 4705 } 4706 } 4707 4708 skip: 4709 btrfs_release_path(path); 4710 if (trans) { 4711 ret = btrfs_end_transaction(trans); 4712 trans = NULL; 4713 if (ret) 4714 break; 4715 } 4716 4717 if (key.offset < (u64)-1) { 4718 key.offset++; 4719 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4720 key.offset = 0; 4721 key.type = BTRFS_ROOT_ITEM_KEY; 4722 } else if (key.objectid < (u64)-1) { 4723 key.offset = 0; 4724 key.type = BTRFS_ROOT_ITEM_KEY; 4725 key.objectid++; 4726 } else { 4727 break; 4728 } 4729 cond_resched(); 4730 } 4731 4732 out: 4733 btrfs_free_path(path); 4734 if (trans && !IS_ERR(trans)) 4735 btrfs_end_transaction(trans); 4736 if (ret) 4737 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4738 else if (!closing) 4739 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4740 up(&fs_info->uuid_tree_rescan_sem); 4741 return 0; 4742 } 4743 4744 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4745 { 4746 struct btrfs_trans_handle *trans; 4747 struct btrfs_root *tree_root = fs_info->tree_root; 4748 struct btrfs_root *uuid_root; 4749 struct task_struct *task; 4750 int ret; 4751 4752 /* 4753 * 1 - root node 4754 * 1 - root item 4755 */ 4756 trans = btrfs_start_transaction(tree_root, 2); 4757 if (IS_ERR(trans)) 4758 return PTR_ERR(trans); 4759 4760 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4761 if (IS_ERR(uuid_root)) { 4762 ret = PTR_ERR(uuid_root); 4763 btrfs_abort_transaction(trans, ret); 4764 btrfs_end_transaction(trans); 4765 return ret; 4766 } 4767 4768 fs_info->uuid_root = uuid_root; 4769 4770 ret = btrfs_commit_transaction(trans); 4771 if (ret) 4772 return ret; 4773 4774 down(&fs_info->uuid_tree_rescan_sem); 4775 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4776 if (IS_ERR(task)) { 4777 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4778 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4779 up(&fs_info->uuid_tree_rescan_sem); 4780 return PTR_ERR(task); 4781 } 4782 4783 return 0; 4784 } 4785 4786 /* 4787 * shrinking a device means finding all of the device extents past 4788 * the new size, and then following the back refs to the chunks. 4789 * The chunk relocation code actually frees the device extent 4790 */ 4791 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4792 { 4793 struct btrfs_fs_info *fs_info = device->fs_info; 4794 struct btrfs_root *root = fs_info->dev_root; 4795 struct btrfs_trans_handle *trans; 4796 struct btrfs_dev_extent *dev_extent = NULL; 4797 struct btrfs_path *path; 4798 u64 length; 4799 u64 chunk_offset; 4800 int ret; 4801 int slot; 4802 int failed = 0; 4803 bool retried = false; 4804 struct extent_buffer *l; 4805 struct btrfs_key key; 4806 struct btrfs_super_block *super_copy = fs_info->super_copy; 4807 u64 old_total = btrfs_super_total_bytes(super_copy); 4808 u64 old_size = btrfs_device_get_total_bytes(device); 4809 u64 diff; 4810 u64 start; 4811 4812 new_size = round_down(new_size, fs_info->sectorsize); 4813 start = new_size; 4814 diff = round_down(old_size - new_size, fs_info->sectorsize); 4815 4816 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4817 return -EINVAL; 4818 4819 path = btrfs_alloc_path(); 4820 if (!path) 4821 return -ENOMEM; 4822 4823 path->reada = READA_BACK; 4824 4825 trans = btrfs_start_transaction(root, 0); 4826 if (IS_ERR(trans)) { 4827 btrfs_free_path(path); 4828 return PTR_ERR(trans); 4829 } 4830 4831 mutex_lock(&fs_info->chunk_mutex); 4832 4833 btrfs_device_set_total_bytes(device, new_size); 4834 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4835 device->fs_devices->total_rw_bytes -= diff; 4836 atomic64_sub(diff, &fs_info->free_chunk_space); 4837 } 4838 4839 /* 4840 * Once the device's size has been set to the new size, ensure all 4841 * in-memory chunks are synced to disk so that the loop below sees them 4842 * and relocates them accordingly. 4843 */ 4844 if (contains_pending_extent(device, &start, diff)) { 4845 mutex_unlock(&fs_info->chunk_mutex); 4846 ret = btrfs_commit_transaction(trans); 4847 if (ret) 4848 goto done; 4849 } else { 4850 mutex_unlock(&fs_info->chunk_mutex); 4851 btrfs_end_transaction(trans); 4852 } 4853 4854 again: 4855 key.objectid = device->devid; 4856 key.offset = (u64)-1; 4857 key.type = BTRFS_DEV_EXTENT_KEY; 4858 4859 do { 4860 mutex_lock(&fs_info->reclaim_bgs_lock); 4861 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4862 if (ret < 0) { 4863 mutex_unlock(&fs_info->reclaim_bgs_lock); 4864 goto done; 4865 } 4866 4867 ret = btrfs_previous_item(root, path, 0, key.type); 4868 if (ret) { 4869 mutex_unlock(&fs_info->reclaim_bgs_lock); 4870 if (ret < 0) 4871 goto done; 4872 ret = 0; 4873 btrfs_release_path(path); 4874 break; 4875 } 4876 4877 l = path->nodes[0]; 4878 slot = path->slots[0]; 4879 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4880 4881 if (key.objectid != device->devid) { 4882 mutex_unlock(&fs_info->reclaim_bgs_lock); 4883 btrfs_release_path(path); 4884 break; 4885 } 4886 4887 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4888 length = btrfs_dev_extent_length(l, dev_extent); 4889 4890 if (key.offset + length <= new_size) { 4891 mutex_unlock(&fs_info->reclaim_bgs_lock); 4892 btrfs_release_path(path); 4893 break; 4894 } 4895 4896 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4897 btrfs_release_path(path); 4898 4899 /* 4900 * We may be relocating the only data chunk we have, 4901 * which could potentially end up with losing data's 4902 * raid profile, so lets allocate an empty one in 4903 * advance. 4904 */ 4905 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4906 if (ret < 0) { 4907 mutex_unlock(&fs_info->reclaim_bgs_lock); 4908 goto done; 4909 } 4910 4911 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4912 mutex_unlock(&fs_info->reclaim_bgs_lock); 4913 if (ret == -ENOSPC) { 4914 failed++; 4915 } else if (ret) { 4916 if (ret == -ETXTBSY) { 4917 btrfs_warn(fs_info, 4918 "could not shrink block group %llu due to active swapfile", 4919 chunk_offset); 4920 } 4921 goto done; 4922 } 4923 } while (key.offset-- > 0); 4924 4925 if (failed && !retried) { 4926 failed = 0; 4927 retried = true; 4928 goto again; 4929 } else if (failed && retried) { 4930 ret = -ENOSPC; 4931 goto done; 4932 } 4933 4934 /* Shrinking succeeded, else we would be at "done". */ 4935 trans = btrfs_start_transaction(root, 0); 4936 if (IS_ERR(trans)) { 4937 ret = PTR_ERR(trans); 4938 goto done; 4939 } 4940 4941 mutex_lock(&fs_info->chunk_mutex); 4942 /* Clear all state bits beyond the shrunk device size */ 4943 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4944 CHUNK_STATE_MASK); 4945 4946 btrfs_device_set_disk_total_bytes(device, new_size); 4947 if (list_empty(&device->post_commit_list)) 4948 list_add_tail(&device->post_commit_list, 4949 &trans->transaction->dev_update_list); 4950 4951 WARN_ON(diff > old_total); 4952 btrfs_set_super_total_bytes(super_copy, 4953 round_down(old_total - diff, fs_info->sectorsize)); 4954 mutex_unlock(&fs_info->chunk_mutex); 4955 4956 btrfs_reserve_chunk_metadata(trans, false); 4957 /* Now btrfs_update_device() will change the on-disk size. */ 4958 ret = btrfs_update_device(trans, device); 4959 btrfs_trans_release_chunk_metadata(trans); 4960 if (ret < 0) { 4961 btrfs_abort_transaction(trans, ret); 4962 btrfs_end_transaction(trans); 4963 } else { 4964 ret = btrfs_commit_transaction(trans); 4965 } 4966 done: 4967 btrfs_free_path(path); 4968 if (ret) { 4969 mutex_lock(&fs_info->chunk_mutex); 4970 btrfs_device_set_total_bytes(device, old_size); 4971 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4972 device->fs_devices->total_rw_bytes += diff; 4973 atomic64_add(diff, &fs_info->free_chunk_space); 4974 mutex_unlock(&fs_info->chunk_mutex); 4975 } 4976 return ret; 4977 } 4978 4979 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4980 struct btrfs_key *key, 4981 struct btrfs_chunk *chunk, int item_size) 4982 { 4983 struct btrfs_super_block *super_copy = fs_info->super_copy; 4984 struct btrfs_disk_key disk_key; 4985 u32 array_size; 4986 u8 *ptr; 4987 4988 lockdep_assert_held(&fs_info->chunk_mutex); 4989 4990 array_size = btrfs_super_sys_array_size(super_copy); 4991 if (array_size + item_size + sizeof(disk_key) 4992 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 4993 return -EFBIG; 4994 4995 ptr = super_copy->sys_chunk_array + array_size; 4996 btrfs_cpu_key_to_disk(&disk_key, key); 4997 memcpy(ptr, &disk_key, sizeof(disk_key)); 4998 ptr += sizeof(disk_key); 4999 memcpy(ptr, chunk, item_size); 5000 item_size += sizeof(disk_key); 5001 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5002 5003 return 0; 5004 } 5005 5006 /* 5007 * sort the devices in descending order by max_avail, total_avail 5008 */ 5009 static int btrfs_cmp_device_info(const void *a, const void *b) 5010 { 5011 const struct btrfs_device_info *di_a = a; 5012 const struct btrfs_device_info *di_b = b; 5013 5014 if (di_a->max_avail > di_b->max_avail) 5015 return -1; 5016 if (di_a->max_avail < di_b->max_avail) 5017 return 1; 5018 if (di_a->total_avail > di_b->total_avail) 5019 return -1; 5020 if (di_a->total_avail < di_b->total_avail) 5021 return 1; 5022 return 0; 5023 } 5024 5025 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5026 { 5027 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5028 return; 5029 5030 btrfs_set_fs_incompat(info, RAID56); 5031 } 5032 5033 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5034 { 5035 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5036 return; 5037 5038 btrfs_set_fs_incompat(info, RAID1C34); 5039 } 5040 5041 /* 5042 * Structure used internally for btrfs_create_chunk() function. 5043 * Wraps needed parameters. 5044 */ 5045 struct alloc_chunk_ctl { 5046 u64 start; 5047 u64 type; 5048 /* Total number of stripes to allocate */ 5049 int num_stripes; 5050 /* sub_stripes info for map */ 5051 int sub_stripes; 5052 /* Stripes per device */ 5053 int dev_stripes; 5054 /* Maximum number of devices to use */ 5055 int devs_max; 5056 /* Minimum number of devices to use */ 5057 int devs_min; 5058 /* ndevs has to be a multiple of this */ 5059 int devs_increment; 5060 /* Number of copies */ 5061 int ncopies; 5062 /* Number of stripes worth of bytes to store parity information */ 5063 int nparity; 5064 u64 max_stripe_size; 5065 u64 max_chunk_size; 5066 u64 dev_extent_min; 5067 u64 stripe_size; 5068 u64 chunk_size; 5069 int ndevs; 5070 }; 5071 5072 static void init_alloc_chunk_ctl_policy_regular( 5073 struct btrfs_fs_devices *fs_devices, 5074 struct alloc_chunk_ctl *ctl) 5075 { 5076 struct btrfs_space_info *space_info; 5077 5078 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5079 ASSERT(space_info); 5080 5081 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5082 ctl->max_stripe_size = ctl->max_chunk_size; 5083 5084 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5085 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5086 5087 /* We don't want a chunk larger than 10% of writable space */ 5088 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5089 ctl->max_chunk_size); 5090 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5091 } 5092 5093 static void init_alloc_chunk_ctl_policy_zoned( 5094 struct btrfs_fs_devices *fs_devices, 5095 struct alloc_chunk_ctl *ctl) 5096 { 5097 u64 zone_size = fs_devices->fs_info->zone_size; 5098 u64 limit; 5099 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5100 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5101 u64 min_chunk_size = min_data_stripes * zone_size; 5102 u64 type = ctl->type; 5103 5104 ctl->max_stripe_size = zone_size; 5105 if (type & BTRFS_BLOCK_GROUP_DATA) { 5106 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5107 zone_size); 5108 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5109 ctl->max_chunk_size = ctl->max_stripe_size; 5110 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5111 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5112 ctl->devs_max = min_t(int, ctl->devs_max, 5113 BTRFS_MAX_DEVS_SYS_CHUNK); 5114 } else { 5115 BUG(); 5116 } 5117 5118 /* We don't want a chunk larger than 10% of writable space */ 5119 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5120 zone_size), 5121 min_chunk_size); 5122 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5123 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5124 } 5125 5126 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5127 struct alloc_chunk_ctl *ctl) 5128 { 5129 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5130 5131 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5132 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5133 ctl->devs_max = btrfs_raid_array[index].devs_max; 5134 if (!ctl->devs_max) 5135 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5136 ctl->devs_min = btrfs_raid_array[index].devs_min; 5137 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5138 ctl->ncopies = btrfs_raid_array[index].ncopies; 5139 ctl->nparity = btrfs_raid_array[index].nparity; 5140 ctl->ndevs = 0; 5141 5142 switch (fs_devices->chunk_alloc_policy) { 5143 case BTRFS_CHUNK_ALLOC_REGULAR: 5144 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5145 break; 5146 case BTRFS_CHUNK_ALLOC_ZONED: 5147 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5148 break; 5149 default: 5150 BUG(); 5151 } 5152 } 5153 5154 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5155 struct alloc_chunk_ctl *ctl, 5156 struct btrfs_device_info *devices_info) 5157 { 5158 struct btrfs_fs_info *info = fs_devices->fs_info; 5159 struct btrfs_device *device; 5160 u64 total_avail; 5161 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5162 int ret; 5163 int ndevs = 0; 5164 u64 max_avail; 5165 u64 dev_offset; 5166 5167 /* 5168 * in the first pass through the devices list, we gather information 5169 * about the available holes on each device. 5170 */ 5171 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5172 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5173 WARN(1, KERN_ERR 5174 "BTRFS: read-only device in alloc_list\n"); 5175 continue; 5176 } 5177 5178 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5179 &device->dev_state) || 5180 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5181 continue; 5182 5183 if (device->total_bytes > device->bytes_used) 5184 total_avail = device->total_bytes - device->bytes_used; 5185 else 5186 total_avail = 0; 5187 5188 /* If there is no space on this device, skip it. */ 5189 if (total_avail < ctl->dev_extent_min) 5190 continue; 5191 5192 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5193 &max_avail); 5194 if (ret && ret != -ENOSPC) 5195 return ret; 5196 5197 if (ret == 0) 5198 max_avail = dev_extent_want; 5199 5200 if (max_avail < ctl->dev_extent_min) { 5201 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5202 btrfs_debug(info, 5203 "%s: devid %llu has no free space, have=%llu want=%llu", 5204 __func__, device->devid, max_avail, 5205 ctl->dev_extent_min); 5206 continue; 5207 } 5208 5209 if (ndevs == fs_devices->rw_devices) { 5210 WARN(1, "%s: found more than %llu devices\n", 5211 __func__, fs_devices->rw_devices); 5212 break; 5213 } 5214 devices_info[ndevs].dev_offset = dev_offset; 5215 devices_info[ndevs].max_avail = max_avail; 5216 devices_info[ndevs].total_avail = total_avail; 5217 devices_info[ndevs].dev = device; 5218 ++ndevs; 5219 } 5220 ctl->ndevs = ndevs; 5221 5222 /* 5223 * now sort the devices by hole size / available space 5224 */ 5225 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5226 btrfs_cmp_device_info, NULL); 5227 5228 return 0; 5229 } 5230 5231 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5232 struct btrfs_device_info *devices_info) 5233 { 5234 /* Number of stripes that count for block group size */ 5235 int data_stripes; 5236 5237 /* 5238 * The primary goal is to maximize the number of stripes, so use as 5239 * many devices as possible, even if the stripes are not maximum sized. 5240 * 5241 * The DUP profile stores more than one stripe per device, the 5242 * max_avail is the total size so we have to adjust. 5243 */ 5244 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5245 ctl->dev_stripes); 5246 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5247 5248 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5249 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5250 5251 /* 5252 * Use the number of data stripes to figure out how big this chunk is 5253 * really going to be in terms of logical address space, and compare 5254 * that answer with the max chunk size. If it's higher, we try to 5255 * reduce stripe_size. 5256 */ 5257 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5258 /* 5259 * Reduce stripe_size, round it up to a 16MB boundary again and 5260 * then use it, unless it ends up being even bigger than the 5261 * previous value we had already. 5262 */ 5263 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5264 data_stripes), SZ_16M), 5265 ctl->stripe_size); 5266 } 5267 5268 /* Align to BTRFS_STRIPE_LEN */ 5269 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5270 ctl->chunk_size = ctl->stripe_size * data_stripes; 5271 5272 return 0; 5273 } 5274 5275 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5276 struct btrfs_device_info *devices_info) 5277 { 5278 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5279 /* Number of stripes that count for block group size */ 5280 int data_stripes; 5281 5282 /* 5283 * It should hold because: 5284 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5285 */ 5286 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5287 5288 ctl->stripe_size = zone_size; 5289 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5290 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5291 5292 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5293 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5294 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5295 ctl->stripe_size) + ctl->nparity, 5296 ctl->dev_stripes); 5297 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5298 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5299 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5300 } 5301 5302 ctl->chunk_size = ctl->stripe_size * data_stripes; 5303 5304 return 0; 5305 } 5306 5307 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5308 struct alloc_chunk_ctl *ctl, 5309 struct btrfs_device_info *devices_info) 5310 { 5311 struct btrfs_fs_info *info = fs_devices->fs_info; 5312 5313 /* 5314 * Round down to number of usable stripes, devs_increment can be any 5315 * number so we can't use round_down() that requires power of 2, while 5316 * rounddown is safe. 5317 */ 5318 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5319 5320 if (ctl->ndevs < ctl->devs_min) { 5321 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5322 btrfs_debug(info, 5323 "%s: not enough devices with free space: have=%d minimum required=%d", 5324 __func__, ctl->ndevs, ctl->devs_min); 5325 } 5326 return -ENOSPC; 5327 } 5328 5329 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5330 5331 switch (fs_devices->chunk_alloc_policy) { 5332 case BTRFS_CHUNK_ALLOC_REGULAR: 5333 return decide_stripe_size_regular(ctl, devices_info); 5334 case BTRFS_CHUNK_ALLOC_ZONED: 5335 return decide_stripe_size_zoned(ctl, devices_info); 5336 default: 5337 BUG(); 5338 } 5339 } 5340 5341 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5342 struct alloc_chunk_ctl *ctl, 5343 struct btrfs_device_info *devices_info) 5344 { 5345 struct btrfs_fs_info *info = trans->fs_info; 5346 struct map_lookup *map = NULL; 5347 struct extent_map_tree *em_tree; 5348 struct btrfs_block_group *block_group; 5349 struct extent_map *em; 5350 u64 start = ctl->start; 5351 u64 type = ctl->type; 5352 int ret; 5353 int i; 5354 int j; 5355 5356 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5357 if (!map) 5358 return ERR_PTR(-ENOMEM); 5359 map->num_stripes = ctl->num_stripes; 5360 5361 for (i = 0; i < ctl->ndevs; ++i) { 5362 for (j = 0; j < ctl->dev_stripes; ++j) { 5363 int s = i * ctl->dev_stripes + j; 5364 map->stripes[s].dev = devices_info[i].dev; 5365 map->stripes[s].physical = devices_info[i].dev_offset + 5366 j * ctl->stripe_size; 5367 } 5368 } 5369 map->stripe_len = BTRFS_STRIPE_LEN; 5370 map->io_align = BTRFS_STRIPE_LEN; 5371 map->io_width = BTRFS_STRIPE_LEN; 5372 map->type = type; 5373 map->sub_stripes = ctl->sub_stripes; 5374 5375 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5376 5377 em = alloc_extent_map(); 5378 if (!em) { 5379 kfree(map); 5380 return ERR_PTR(-ENOMEM); 5381 } 5382 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5383 em->map_lookup = map; 5384 em->start = start; 5385 em->len = ctl->chunk_size; 5386 em->block_start = 0; 5387 em->block_len = em->len; 5388 em->orig_block_len = ctl->stripe_size; 5389 5390 em_tree = &info->mapping_tree; 5391 write_lock(&em_tree->lock); 5392 ret = add_extent_mapping(em_tree, em, 0); 5393 if (ret) { 5394 write_unlock(&em_tree->lock); 5395 free_extent_map(em); 5396 return ERR_PTR(ret); 5397 } 5398 write_unlock(&em_tree->lock); 5399 5400 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5401 if (IS_ERR(block_group)) 5402 goto error_del_extent; 5403 5404 for (i = 0; i < map->num_stripes; i++) { 5405 struct btrfs_device *dev = map->stripes[i].dev; 5406 5407 btrfs_device_set_bytes_used(dev, 5408 dev->bytes_used + ctl->stripe_size); 5409 if (list_empty(&dev->post_commit_list)) 5410 list_add_tail(&dev->post_commit_list, 5411 &trans->transaction->dev_update_list); 5412 } 5413 5414 atomic64_sub(ctl->stripe_size * map->num_stripes, 5415 &info->free_chunk_space); 5416 5417 free_extent_map(em); 5418 check_raid56_incompat_flag(info, type); 5419 check_raid1c34_incompat_flag(info, type); 5420 5421 return block_group; 5422 5423 error_del_extent: 5424 write_lock(&em_tree->lock); 5425 remove_extent_mapping(em_tree, em); 5426 write_unlock(&em_tree->lock); 5427 5428 /* One for our allocation */ 5429 free_extent_map(em); 5430 /* One for the tree reference */ 5431 free_extent_map(em); 5432 5433 return block_group; 5434 } 5435 5436 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5437 u64 type) 5438 { 5439 struct btrfs_fs_info *info = trans->fs_info; 5440 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5441 struct btrfs_device_info *devices_info = NULL; 5442 struct alloc_chunk_ctl ctl; 5443 struct btrfs_block_group *block_group; 5444 int ret; 5445 5446 lockdep_assert_held(&info->chunk_mutex); 5447 5448 if (!alloc_profile_is_valid(type, 0)) { 5449 ASSERT(0); 5450 return ERR_PTR(-EINVAL); 5451 } 5452 5453 if (list_empty(&fs_devices->alloc_list)) { 5454 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5455 btrfs_debug(info, "%s: no writable device", __func__); 5456 return ERR_PTR(-ENOSPC); 5457 } 5458 5459 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5460 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5461 ASSERT(0); 5462 return ERR_PTR(-EINVAL); 5463 } 5464 5465 ctl.start = find_next_chunk(info); 5466 ctl.type = type; 5467 init_alloc_chunk_ctl(fs_devices, &ctl); 5468 5469 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5470 GFP_NOFS); 5471 if (!devices_info) 5472 return ERR_PTR(-ENOMEM); 5473 5474 ret = gather_device_info(fs_devices, &ctl, devices_info); 5475 if (ret < 0) { 5476 block_group = ERR_PTR(ret); 5477 goto out; 5478 } 5479 5480 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5481 if (ret < 0) { 5482 block_group = ERR_PTR(ret); 5483 goto out; 5484 } 5485 5486 block_group = create_chunk(trans, &ctl, devices_info); 5487 5488 out: 5489 kfree(devices_info); 5490 return block_group; 5491 } 5492 5493 /* 5494 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5495 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5496 * chunks. 5497 * 5498 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5499 * phases. 5500 */ 5501 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5502 struct btrfs_block_group *bg) 5503 { 5504 struct btrfs_fs_info *fs_info = trans->fs_info; 5505 struct btrfs_root *chunk_root = fs_info->chunk_root; 5506 struct btrfs_key key; 5507 struct btrfs_chunk *chunk; 5508 struct btrfs_stripe *stripe; 5509 struct extent_map *em; 5510 struct map_lookup *map; 5511 size_t item_size; 5512 int i; 5513 int ret; 5514 5515 /* 5516 * We take the chunk_mutex for 2 reasons: 5517 * 5518 * 1) Updates and insertions in the chunk btree must be done while holding 5519 * the chunk_mutex, as well as updating the system chunk array in the 5520 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5521 * details; 5522 * 5523 * 2) To prevent races with the final phase of a device replace operation 5524 * that replaces the device object associated with the map's stripes, 5525 * because the device object's id can change at any time during that 5526 * final phase of the device replace operation 5527 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5528 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5529 * which would cause a failure when updating the device item, which does 5530 * not exists, or persisting a stripe of the chunk item with such ID. 5531 * Here we can't use the device_list_mutex because our caller already 5532 * has locked the chunk_mutex, and the final phase of device replace 5533 * acquires both mutexes - first the device_list_mutex and then the 5534 * chunk_mutex. Using any of those two mutexes protects us from a 5535 * concurrent device replace. 5536 */ 5537 lockdep_assert_held(&fs_info->chunk_mutex); 5538 5539 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5540 if (IS_ERR(em)) { 5541 ret = PTR_ERR(em); 5542 btrfs_abort_transaction(trans, ret); 5543 return ret; 5544 } 5545 5546 map = em->map_lookup; 5547 item_size = btrfs_chunk_item_size(map->num_stripes); 5548 5549 chunk = kzalloc(item_size, GFP_NOFS); 5550 if (!chunk) { 5551 ret = -ENOMEM; 5552 btrfs_abort_transaction(trans, ret); 5553 goto out; 5554 } 5555 5556 for (i = 0; i < map->num_stripes; i++) { 5557 struct btrfs_device *device = map->stripes[i].dev; 5558 5559 ret = btrfs_update_device(trans, device); 5560 if (ret) 5561 goto out; 5562 } 5563 5564 stripe = &chunk->stripe; 5565 for (i = 0; i < map->num_stripes; i++) { 5566 struct btrfs_device *device = map->stripes[i].dev; 5567 const u64 dev_offset = map->stripes[i].physical; 5568 5569 btrfs_set_stack_stripe_devid(stripe, device->devid); 5570 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5571 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5572 stripe++; 5573 } 5574 5575 btrfs_set_stack_chunk_length(chunk, bg->length); 5576 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5577 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5578 btrfs_set_stack_chunk_type(chunk, map->type); 5579 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5580 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5581 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5582 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5583 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5584 5585 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5586 key.type = BTRFS_CHUNK_ITEM_KEY; 5587 key.offset = bg->start; 5588 5589 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5590 if (ret) 5591 goto out; 5592 5593 bg->chunk_item_inserted = 1; 5594 5595 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5596 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5597 if (ret) 5598 goto out; 5599 } 5600 5601 out: 5602 kfree(chunk); 5603 free_extent_map(em); 5604 return ret; 5605 } 5606 5607 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5608 { 5609 struct btrfs_fs_info *fs_info = trans->fs_info; 5610 u64 alloc_profile; 5611 struct btrfs_block_group *meta_bg; 5612 struct btrfs_block_group *sys_bg; 5613 5614 /* 5615 * When adding a new device for sprouting, the seed device is read-only 5616 * so we must first allocate a metadata and a system chunk. But before 5617 * adding the block group items to the extent, device and chunk btrees, 5618 * we must first: 5619 * 5620 * 1) Create both chunks without doing any changes to the btrees, as 5621 * otherwise we would get -ENOSPC since the block groups from the 5622 * seed device are read-only; 5623 * 5624 * 2) Add the device item for the new sprout device - finishing the setup 5625 * of a new block group requires updating the device item in the chunk 5626 * btree, so it must exist when we attempt to do it. The previous step 5627 * ensures this does not fail with -ENOSPC. 5628 * 5629 * After that we can add the block group items to their btrees: 5630 * update existing device item in the chunk btree, add a new block group 5631 * item to the extent btree, add a new chunk item to the chunk btree and 5632 * finally add the new device extent items to the devices btree. 5633 */ 5634 5635 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5636 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5637 if (IS_ERR(meta_bg)) 5638 return PTR_ERR(meta_bg); 5639 5640 alloc_profile = btrfs_system_alloc_profile(fs_info); 5641 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5642 if (IS_ERR(sys_bg)) 5643 return PTR_ERR(sys_bg); 5644 5645 return 0; 5646 } 5647 5648 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5649 { 5650 const int index = btrfs_bg_flags_to_raid_index(map->type); 5651 5652 return btrfs_raid_array[index].tolerated_failures; 5653 } 5654 5655 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5656 { 5657 struct extent_map *em; 5658 struct map_lookup *map; 5659 int miss_ndevs = 0; 5660 int i; 5661 bool ret = true; 5662 5663 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5664 if (IS_ERR(em)) 5665 return false; 5666 5667 map = em->map_lookup; 5668 for (i = 0; i < map->num_stripes; i++) { 5669 if (test_bit(BTRFS_DEV_STATE_MISSING, 5670 &map->stripes[i].dev->dev_state)) { 5671 miss_ndevs++; 5672 continue; 5673 } 5674 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5675 &map->stripes[i].dev->dev_state)) { 5676 ret = false; 5677 goto end; 5678 } 5679 } 5680 5681 /* 5682 * If the number of missing devices is larger than max errors, we can 5683 * not write the data into that chunk successfully. 5684 */ 5685 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5686 ret = false; 5687 end: 5688 free_extent_map(em); 5689 return ret; 5690 } 5691 5692 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5693 { 5694 struct extent_map *em; 5695 5696 while (1) { 5697 write_lock(&tree->lock); 5698 em = lookup_extent_mapping(tree, 0, (u64)-1); 5699 if (em) 5700 remove_extent_mapping(tree, em); 5701 write_unlock(&tree->lock); 5702 if (!em) 5703 break; 5704 /* once for us */ 5705 free_extent_map(em); 5706 /* once for the tree */ 5707 free_extent_map(em); 5708 } 5709 } 5710 5711 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5712 { 5713 struct extent_map *em; 5714 struct map_lookup *map; 5715 enum btrfs_raid_types index; 5716 int ret = 1; 5717 5718 em = btrfs_get_chunk_map(fs_info, logical, len); 5719 if (IS_ERR(em)) 5720 /* 5721 * We could return errors for these cases, but that could get 5722 * ugly and we'd probably do the same thing which is just not do 5723 * anything else and exit, so return 1 so the callers don't try 5724 * to use other copies. 5725 */ 5726 return 1; 5727 5728 map = em->map_lookup; 5729 index = btrfs_bg_flags_to_raid_index(map->type); 5730 5731 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5732 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5733 ret = btrfs_raid_array[index].ncopies; 5734 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5735 ret = 2; 5736 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5737 /* 5738 * There could be two corrupted data stripes, we need 5739 * to loop retry in order to rebuild the correct data. 5740 * 5741 * Fail a stripe at a time on every retry except the 5742 * stripe under reconstruction. 5743 */ 5744 ret = map->num_stripes; 5745 free_extent_map(em); 5746 5747 down_read(&fs_info->dev_replace.rwsem); 5748 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5749 fs_info->dev_replace.tgtdev) 5750 ret++; 5751 up_read(&fs_info->dev_replace.rwsem); 5752 5753 return ret; 5754 } 5755 5756 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5757 u64 logical) 5758 { 5759 struct extent_map *em; 5760 struct map_lookup *map; 5761 unsigned long len = fs_info->sectorsize; 5762 5763 if (!btrfs_fs_incompat(fs_info, RAID56)) 5764 return len; 5765 5766 em = btrfs_get_chunk_map(fs_info, logical, len); 5767 5768 if (!WARN_ON(IS_ERR(em))) { 5769 map = em->map_lookup; 5770 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5771 len = map->stripe_len * nr_data_stripes(map); 5772 free_extent_map(em); 5773 } 5774 return len; 5775 } 5776 5777 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5778 { 5779 struct extent_map *em; 5780 struct map_lookup *map; 5781 int ret = 0; 5782 5783 if (!btrfs_fs_incompat(fs_info, RAID56)) 5784 return 0; 5785 5786 em = btrfs_get_chunk_map(fs_info, logical, len); 5787 5788 if(!WARN_ON(IS_ERR(em))) { 5789 map = em->map_lookup; 5790 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5791 ret = 1; 5792 free_extent_map(em); 5793 } 5794 return ret; 5795 } 5796 5797 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5798 struct map_lookup *map, int first, 5799 int dev_replace_is_ongoing) 5800 { 5801 int i; 5802 int num_stripes; 5803 int preferred_mirror; 5804 int tolerance; 5805 struct btrfs_device *srcdev; 5806 5807 ASSERT((map->type & 5808 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5809 5810 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5811 num_stripes = map->sub_stripes; 5812 else 5813 num_stripes = map->num_stripes; 5814 5815 switch (fs_info->fs_devices->read_policy) { 5816 default: 5817 /* Shouldn't happen, just warn and use pid instead of failing */ 5818 btrfs_warn_rl(fs_info, 5819 "unknown read_policy type %u, reset to pid", 5820 fs_info->fs_devices->read_policy); 5821 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5822 fallthrough; 5823 case BTRFS_READ_POLICY_PID: 5824 preferred_mirror = first + (current->pid % num_stripes); 5825 break; 5826 } 5827 5828 if (dev_replace_is_ongoing && 5829 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5830 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5831 srcdev = fs_info->dev_replace.srcdev; 5832 else 5833 srcdev = NULL; 5834 5835 /* 5836 * try to avoid the drive that is the source drive for a 5837 * dev-replace procedure, only choose it if no other non-missing 5838 * mirror is available 5839 */ 5840 for (tolerance = 0; tolerance < 2; tolerance++) { 5841 if (map->stripes[preferred_mirror].dev->bdev && 5842 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5843 return preferred_mirror; 5844 for (i = first; i < first + num_stripes; i++) { 5845 if (map->stripes[i].dev->bdev && 5846 (tolerance || map->stripes[i].dev != srcdev)) 5847 return i; 5848 } 5849 } 5850 5851 /* we couldn't find one that doesn't fail. Just return something 5852 * and the io error handling code will clean up eventually 5853 */ 5854 return preferred_mirror; 5855 } 5856 5857 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5858 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5859 { 5860 int i; 5861 int again = 1; 5862 5863 while (again) { 5864 again = 0; 5865 for (i = 0; i < num_stripes - 1; i++) { 5866 /* Swap if parity is on a smaller index */ 5867 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5868 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5869 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5870 again = 1; 5871 } 5872 } 5873 } 5874 } 5875 5876 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5877 int total_stripes, 5878 int real_stripes) 5879 { 5880 struct btrfs_io_context *bioc = kzalloc( 5881 /* The size of btrfs_io_context */ 5882 sizeof(struct btrfs_io_context) + 5883 /* Plus the variable array for the stripes */ 5884 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5885 /* Plus the variable array for the tgt dev */ 5886 sizeof(int) * (real_stripes) + 5887 /* 5888 * Plus the raid_map, which includes both the tgt dev 5889 * and the stripes. 5890 */ 5891 sizeof(u64) * (total_stripes), 5892 GFP_NOFS|__GFP_NOFAIL); 5893 5894 atomic_set(&bioc->error, 0); 5895 refcount_set(&bioc->refs, 1); 5896 5897 bioc->fs_info = fs_info; 5898 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5899 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5900 5901 return bioc; 5902 } 5903 5904 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5905 { 5906 WARN_ON(!refcount_read(&bioc->refs)); 5907 refcount_inc(&bioc->refs); 5908 } 5909 5910 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5911 { 5912 if (!bioc) 5913 return; 5914 if (refcount_dec_and_test(&bioc->refs)) 5915 kfree(bioc); 5916 } 5917 5918 /* 5919 * Please note that, discard won't be sent to target device of device 5920 * replace. 5921 */ 5922 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 5923 u64 logical, u64 *length_ret, 5924 u32 *num_stripes) 5925 { 5926 struct extent_map *em; 5927 struct map_lookup *map; 5928 struct btrfs_discard_stripe *stripes; 5929 u64 length = *length_ret; 5930 u64 offset; 5931 u64 stripe_nr; 5932 u64 stripe_nr_end; 5933 u64 stripe_end_offset; 5934 u64 stripe_cnt; 5935 u64 stripe_len; 5936 u64 stripe_offset; 5937 u32 stripe_index; 5938 u32 factor = 0; 5939 u32 sub_stripes = 0; 5940 u64 stripes_per_dev = 0; 5941 u32 remaining_stripes = 0; 5942 u32 last_stripe = 0; 5943 int ret; 5944 int i; 5945 5946 em = btrfs_get_chunk_map(fs_info, logical, length); 5947 if (IS_ERR(em)) 5948 return ERR_CAST(em); 5949 5950 map = em->map_lookup; 5951 5952 /* we don't discard raid56 yet */ 5953 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5954 ret = -EOPNOTSUPP; 5955 goto out_free_map; 5956 } 5957 5958 offset = logical - em->start; 5959 length = min_t(u64, em->start + em->len - logical, length); 5960 *length_ret = length; 5961 5962 stripe_len = map->stripe_len; 5963 /* 5964 * stripe_nr counts the total number of stripes we have to stride 5965 * to get to this block 5966 */ 5967 stripe_nr = div64_u64(offset, stripe_len); 5968 5969 /* stripe_offset is the offset of this block in its stripe */ 5970 stripe_offset = offset - stripe_nr * stripe_len; 5971 5972 stripe_nr_end = round_up(offset + length, map->stripe_len); 5973 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5974 stripe_cnt = stripe_nr_end - stripe_nr; 5975 stripe_end_offset = stripe_nr_end * map->stripe_len - 5976 (offset + length); 5977 /* 5978 * after this, stripe_nr is the number of stripes on this 5979 * device we have to walk to find the data, and stripe_index is 5980 * the number of our device in the stripe array 5981 */ 5982 *num_stripes = 1; 5983 stripe_index = 0; 5984 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5985 BTRFS_BLOCK_GROUP_RAID10)) { 5986 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5987 sub_stripes = 1; 5988 else 5989 sub_stripes = map->sub_stripes; 5990 5991 factor = map->num_stripes / sub_stripes; 5992 *num_stripes = min_t(u64, map->num_stripes, 5993 sub_stripes * stripe_cnt); 5994 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5995 stripe_index *= sub_stripes; 5996 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5997 &remaining_stripes); 5998 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5999 last_stripe *= sub_stripes; 6000 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6001 BTRFS_BLOCK_GROUP_DUP)) { 6002 *num_stripes = map->num_stripes; 6003 } else { 6004 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6005 &stripe_index); 6006 } 6007 6008 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6009 if (!stripes) { 6010 ret = -ENOMEM; 6011 goto out_free_map; 6012 } 6013 6014 for (i = 0; i < *num_stripes; i++) { 6015 stripes[i].physical = 6016 map->stripes[stripe_index].physical + 6017 stripe_offset + stripe_nr * map->stripe_len; 6018 stripes[i].dev = map->stripes[stripe_index].dev; 6019 6020 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6021 BTRFS_BLOCK_GROUP_RAID10)) { 6022 stripes[i].length = stripes_per_dev * map->stripe_len; 6023 6024 if (i / sub_stripes < remaining_stripes) 6025 stripes[i].length += map->stripe_len; 6026 6027 /* 6028 * Special for the first stripe and 6029 * the last stripe: 6030 * 6031 * |-------|...|-------| 6032 * |----------| 6033 * off end_off 6034 */ 6035 if (i < sub_stripes) 6036 stripes[i].length -= stripe_offset; 6037 6038 if (stripe_index >= last_stripe && 6039 stripe_index <= (last_stripe + 6040 sub_stripes - 1)) 6041 stripes[i].length -= stripe_end_offset; 6042 6043 if (i == sub_stripes - 1) 6044 stripe_offset = 0; 6045 } else { 6046 stripes[i].length = length; 6047 } 6048 6049 stripe_index++; 6050 if (stripe_index == map->num_stripes) { 6051 stripe_index = 0; 6052 stripe_nr++; 6053 } 6054 } 6055 6056 free_extent_map(em); 6057 return stripes; 6058 out_free_map: 6059 free_extent_map(em); 6060 return ERR_PTR(ret); 6061 } 6062 6063 /* 6064 * In dev-replace case, for repair case (that's the only case where the mirror 6065 * is selected explicitly when calling btrfs_map_block), blocks left of the 6066 * left cursor can also be read from the target drive. 6067 * 6068 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6069 * array of stripes. 6070 * For READ, it also needs to be supported using the same mirror number. 6071 * 6072 * If the requested block is not left of the left cursor, EIO is returned. This 6073 * can happen because btrfs_num_copies() returns one more in the dev-replace 6074 * case. 6075 */ 6076 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6077 u64 logical, u64 length, 6078 u64 srcdev_devid, int *mirror_num, 6079 u64 *physical) 6080 { 6081 struct btrfs_io_context *bioc = NULL; 6082 int num_stripes; 6083 int index_srcdev = 0; 6084 int found = 0; 6085 u64 physical_of_found = 0; 6086 int i; 6087 int ret = 0; 6088 6089 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6090 logical, &length, &bioc, 0, 0); 6091 if (ret) { 6092 ASSERT(bioc == NULL); 6093 return ret; 6094 } 6095 6096 num_stripes = bioc->num_stripes; 6097 if (*mirror_num > num_stripes) { 6098 /* 6099 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6100 * that means that the requested area is not left of the left 6101 * cursor 6102 */ 6103 btrfs_put_bioc(bioc); 6104 return -EIO; 6105 } 6106 6107 /* 6108 * process the rest of the function using the mirror_num of the source 6109 * drive. Therefore look it up first. At the end, patch the device 6110 * pointer to the one of the target drive. 6111 */ 6112 for (i = 0; i < num_stripes; i++) { 6113 if (bioc->stripes[i].dev->devid != srcdev_devid) 6114 continue; 6115 6116 /* 6117 * In case of DUP, in order to keep it simple, only add the 6118 * mirror with the lowest physical address 6119 */ 6120 if (found && 6121 physical_of_found <= bioc->stripes[i].physical) 6122 continue; 6123 6124 index_srcdev = i; 6125 found = 1; 6126 physical_of_found = bioc->stripes[i].physical; 6127 } 6128 6129 btrfs_put_bioc(bioc); 6130 6131 ASSERT(found); 6132 if (!found) 6133 return -EIO; 6134 6135 *mirror_num = index_srcdev + 1; 6136 *physical = physical_of_found; 6137 return ret; 6138 } 6139 6140 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6141 { 6142 struct btrfs_block_group *cache; 6143 bool ret; 6144 6145 /* Non zoned filesystem does not use "to_copy" flag */ 6146 if (!btrfs_is_zoned(fs_info)) 6147 return false; 6148 6149 cache = btrfs_lookup_block_group(fs_info, logical); 6150 6151 spin_lock(&cache->lock); 6152 ret = cache->to_copy; 6153 spin_unlock(&cache->lock); 6154 6155 btrfs_put_block_group(cache); 6156 return ret; 6157 } 6158 6159 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6160 struct btrfs_io_context **bioc_ret, 6161 struct btrfs_dev_replace *dev_replace, 6162 u64 logical, 6163 int *num_stripes_ret, int *max_errors_ret) 6164 { 6165 struct btrfs_io_context *bioc = *bioc_ret; 6166 u64 srcdev_devid = dev_replace->srcdev->devid; 6167 int tgtdev_indexes = 0; 6168 int num_stripes = *num_stripes_ret; 6169 int max_errors = *max_errors_ret; 6170 int i; 6171 6172 if (op == BTRFS_MAP_WRITE) { 6173 int index_where_to_add; 6174 6175 /* 6176 * A block group which have "to_copy" set will eventually 6177 * copied by dev-replace process. We can avoid cloning IO here. 6178 */ 6179 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6180 return; 6181 6182 /* 6183 * duplicate the write operations while the dev replace 6184 * procedure is running. Since the copying of the old disk to 6185 * the new disk takes place at run time while the filesystem is 6186 * mounted writable, the regular write operations to the old 6187 * disk have to be duplicated to go to the new disk as well. 6188 * 6189 * Note that device->missing is handled by the caller, and that 6190 * the write to the old disk is already set up in the stripes 6191 * array. 6192 */ 6193 index_where_to_add = num_stripes; 6194 for (i = 0; i < num_stripes; i++) { 6195 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6196 /* write to new disk, too */ 6197 struct btrfs_io_stripe *new = 6198 bioc->stripes + index_where_to_add; 6199 struct btrfs_io_stripe *old = 6200 bioc->stripes + i; 6201 6202 new->physical = old->physical; 6203 new->dev = dev_replace->tgtdev; 6204 bioc->tgtdev_map[i] = index_where_to_add; 6205 index_where_to_add++; 6206 max_errors++; 6207 tgtdev_indexes++; 6208 } 6209 } 6210 num_stripes = index_where_to_add; 6211 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6212 int index_srcdev = 0; 6213 int found = 0; 6214 u64 physical_of_found = 0; 6215 6216 /* 6217 * During the dev-replace procedure, the target drive can also 6218 * be used to read data in case it is needed to repair a corrupt 6219 * block elsewhere. This is possible if the requested area is 6220 * left of the left cursor. In this area, the target drive is a 6221 * full copy of the source drive. 6222 */ 6223 for (i = 0; i < num_stripes; i++) { 6224 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6225 /* 6226 * In case of DUP, in order to keep it simple, 6227 * only add the mirror with the lowest physical 6228 * address 6229 */ 6230 if (found && 6231 physical_of_found <= bioc->stripes[i].physical) 6232 continue; 6233 index_srcdev = i; 6234 found = 1; 6235 physical_of_found = bioc->stripes[i].physical; 6236 } 6237 } 6238 if (found) { 6239 struct btrfs_io_stripe *tgtdev_stripe = 6240 bioc->stripes + num_stripes; 6241 6242 tgtdev_stripe->physical = physical_of_found; 6243 tgtdev_stripe->dev = dev_replace->tgtdev; 6244 bioc->tgtdev_map[index_srcdev] = num_stripes; 6245 6246 tgtdev_indexes++; 6247 num_stripes++; 6248 } 6249 } 6250 6251 *num_stripes_ret = num_stripes; 6252 *max_errors_ret = max_errors; 6253 bioc->num_tgtdevs = tgtdev_indexes; 6254 *bioc_ret = bioc; 6255 } 6256 6257 static bool need_full_stripe(enum btrfs_map_op op) 6258 { 6259 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6260 } 6261 6262 /* 6263 * Calculate the geometry of a particular (address, len) tuple. This 6264 * information is used to calculate how big a particular bio can get before it 6265 * straddles a stripe. 6266 * 6267 * @fs_info: the filesystem 6268 * @em: mapping containing the logical extent 6269 * @op: type of operation - write or read 6270 * @logical: address that we want to figure out the geometry of 6271 * @io_geom: pointer used to return values 6272 * 6273 * Returns < 0 in case a chunk for the given logical address cannot be found, 6274 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6275 */ 6276 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6277 enum btrfs_map_op op, u64 logical, 6278 struct btrfs_io_geometry *io_geom) 6279 { 6280 struct map_lookup *map; 6281 u64 len; 6282 u64 offset; 6283 u64 stripe_offset; 6284 u64 stripe_nr; 6285 u32 stripe_len; 6286 u64 raid56_full_stripe_start = (u64)-1; 6287 int data_stripes; 6288 6289 ASSERT(op != BTRFS_MAP_DISCARD); 6290 6291 map = em->map_lookup; 6292 /* Offset of this logical address in the chunk */ 6293 offset = logical - em->start; 6294 /* Len of a stripe in a chunk */ 6295 stripe_len = map->stripe_len; 6296 /* 6297 * Stripe_nr is where this block falls in 6298 * stripe_offset is the offset of this block in its stripe. 6299 */ 6300 stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset); 6301 ASSERT(stripe_offset < U32_MAX); 6302 6303 data_stripes = nr_data_stripes(map); 6304 6305 /* Only stripe based profiles needs to check against stripe length. */ 6306 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6307 u64 max_len = stripe_len - stripe_offset; 6308 6309 /* 6310 * In case of raid56, we need to know the stripe aligned start 6311 */ 6312 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6313 unsigned long full_stripe_len = stripe_len * data_stripes; 6314 raid56_full_stripe_start = offset; 6315 6316 /* 6317 * Allow a write of a full stripe, but make sure we 6318 * don't allow straddling of stripes 6319 */ 6320 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6321 full_stripe_len); 6322 raid56_full_stripe_start *= full_stripe_len; 6323 6324 /* 6325 * For writes to RAID[56], allow a full stripeset across 6326 * all disks. For other RAID types and for RAID[56] 6327 * reads, just allow a single stripe (on a single disk). 6328 */ 6329 if (op == BTRFS_MAP_WRITE) { 6330 max_len = stripe_len * data_stripes - 6331 (offset - raid56_full_stripe_start); 6332 } 6333 } 6334 len = min_t(u64, em->len - offset, max_len); 6335 } else { 6336 len = em->len - offset; 6337 } 6338 6339 io_geom->len = len; 6340 io_geom->offset = offset; 6341 io_geom->stripe_len = stripe_len; 6342 io_geom->stripe_nr = stripe_nr; 6343 io_geom->stripe_offset = stripe_offset; 6344 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6345 6346 return 0; 6347 } 6348 6349 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6350 enum btrfs_map_op op, 6351 u64 logical, u64 *length, 6352 struct btrfs_io_context **bioc_ret, 6353 int mirror_num, int need_raid_map) 6354 { 6355 struct extent_map *em; 6356 struct map_lookup *map; 6357 u64 stripe_offset; 6358 u64 stripe_nr; 6359 u64 stripe_len; 6360 u32 stripe_index; 6361 int data_stripes; 6362 int i; 6363 int ret = 0; 6364 int num_stripes; 6365 int max_errors = 0; 6366 int tgtdev_indexes = 0; 6367 struct btrfs_io_context *bioc = NULL; 6368 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6369 int dev_replace_is_ongoing = 0; 6370 int num_alloc_stripes; 6371 int patch_the_first_stripe_for_dev_replace = 0; 6372 u64 physical_to_patch_in_first_stripe = 0; 6373 u64 raid56_full_stripe_start = (u64)-1; 6374 struct btrfs_io_geometry geom; 6375 6376 ASSERT(bioc_ret); 6377 ASSERT(op != BTRFS_MAP_DISCARD); 6378 6379 em = btrfs_get_chunk_map(fs_info, logical, *length); 6380 ASSERT(!IS_ERR(em)); 6381 6382 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6383 if (ret < 0) 6384 return ret; 6385 6386 map = em->map_lookup; 6387 6388 *length = geom.len; 6389 stripe_len = geom.stripe_len; 6390 stripe_nr = geom.stripe_nr; 6391 stripe_offset = geom.stripe_offset; 6392 raid56_full_stripe_start = geom.raid56_stripe_offset; 6393 data_stripes = nr_data_stripes(map); 6394 6395 down_read(&dev_replace->rwsem); 6396 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6397 /* 6398 * Hold the semaphore for read during the whole operation, write is 6399 * requested at commit time but must wait. 6400 */ 6401 if (!dev_replace_is_ongoing) 6402 up_read(&dev_replace->rwsem); 6403 6404 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6405 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6406 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6407 dev_replace->srcdev->devid, 6408 &mirror_num, 6409 &physical_to_patch_in_first_stripe); 6410 if (ret) 6411 goto out; 6412 else 6413 patch_the_first_stripe_for_dev_replace = 1; 6414 } else if (mirror_num > map->num_stripes) { 6415 mirror_num = 0; 6416 } 6417 6418 num_stripes = 1; 6419 stripe_index = 0; 6420 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6421 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6422 &stripe_index); 6423 if (!need_full_stripe(op)) 6424 mirror_num = 1; 6425 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6426 if (need_full_stripe(op)) 6427 num_stripes = map->num_stripes; 6428 else if (mirror_num) 6429 stripe_index = mirror_num - 1; 6430 else { 6431 stripe_index = find_live_mirror(fs_info, map, 0, 6432 dev_replace_is_ongoing); 6433 mirror_num = stripe_index + 1; 6434 } 6435 6436 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6437 if (need_full_stripe(op)) { 6438 num_stripes = map->num_stripes; 6439 } else if (mirror_num) { 6440 stripe_index = mirror_num - 1; 6441 } else { 6442 mirror_num = 1; 6443 } 6444 6445 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6446 u32 factor = map->num_stripes / map->sub_stripes; 6447 6448 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6449 stripe_index *= map->sub_stripes; 6450 6451 if (need_full_stripe(op)) 6452 num_stripes = map->sub_stripes; 6453 else if (mirror_num) 6454 stripe_index += mirror_num - 1; 6455 else { 6456 int old_stripe_index = stripe_index; 6457 stripe_index = find_live_mirror(fs_info, map, 6458 stripe_index, 6459 dev_replace_is_ongoing); 6460 mirror_num = stripe_index - old_stripe_index + 1; 6461 } 6462 6463 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6464 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6465 /* push stripe_nr back to the start of the full stripe */ 6466 stripe_nr = div64_u64(raid56_full_stripe_start, 6467 stripe_len * data_stripes); 6468 6469 /* RAID[56] write or recovery. Return all stripes */ 6470 num_stripes = map->num_stripes; 6471 max_errors = btrfs_chunk_max_errors(map); 6472 6473 *length = map->stripe_len; 6474 stripe_index = 0; 6475 stripe_offset = 0; 6476 } else { 6477 /* 6478 * Mirror #0 or #1 means the original data block. 6479 * Mirror #2 is RAID5 parity block. 6480 * Mirror #3 is RAID6 Q block. 6481 */ 6482 stripe_nr = div_u64_rem(stripe_nr, 6483 data_stripes, &stripe_index); 6484 if (mirror_num > 1) 6485 stripe_index = data_stripes + mirror_num - 2; 6486 6487 /* We distribute the parity blocks across stripes */ 6488 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6489 &stripe_index); 6490 if (!need_full_stripe(op) && mirror_num <= 1) 6491 mirror_num = 1; 6492 } 6493 } else { 6494 /* 6495 * after this, stripe_nr is the number of stripes on this 6496 * device we have to walk to find the data, and stripe_index is 6497 * the number of our device in the stripe array 6498 */ 6499 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6500 &stripe_index); 6501 mirror_num = stripe_index + 1; 6502 } 6503 if (stripe_index >= map->num_stripes) { 6504 btrfs_crit(fs_info, 6505 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6506 stripe_index, map->num_stripes); 6507 ret = -EINVAL; 6508 goto out; 6509 } 6510 6511 num_alloc_stripes = num_stripes; 6512 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6513 if (op == BTRFS_MAP_WRITE) 6514 num_alloc_stripes <<= 1; 6515 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6516 num_alloc_stripes++; 6517 tgtdev_indexes = num_stripes; 6518 } 6519 6520 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6521 if (!bioc) { 6522 ret = -ENOMEM; 6523 goto out; 6524 } 6525 6526 for (i = 0; i < num_stripes; i++) { 6527 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6528 stripe_offset + stripe_nr * map->stripe_len; 6529 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6530 stripe_index++; 6531 } 6532 6533 /* Build raid_map */ 6534 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6535 (need_full_stripe(op) || mirror_num > 1)) { 6536 u64 tmp; 6537 unsigned rot; 6538 6539 /* Work out the disk rotation on this stripe-set */ 6540 div_u64_rem(stripe_nr, num_stripes, &rot); 6541 6542 /* Fill in the logical address of each stripe */ 6543 tmp = stripe_nr * data_stripes; 6544 for (i = 0; i < data_stripes; i++) 6545 bioc->raid_map[(i + rot) % num_stripes] = 6546 em->start + (tmp + i) * map->stripe_len; 6547 6548 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6549 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6550 bioc->raid_map[(i + rot + 1) % num_stripes] = 6551 RAID6_Q_STRIPE; 6552 6553 sort_parity_stripes(bioc, num_stripes); 6554 } 6555 6556 if (need_full_stripe(op)) 6557 max_errors = btrfs_chunk_max_errors(map); 6558 6559 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6560 need_full_stripe(op)) { 6561 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6562 &num_stripes, &max_errors); 6563 } 6564 6565 *bioc_ret = bioc; 6566 bioc->map_type = map->type; 6567 bioc->num_stripes = num_stripes; 6568 bioc->max_errors = max_errors; 6569 bioc->mirror_num = mirror_num; 6570 6571 /* 6572 * this is the case that REQ_READ && dev_replace_is_ongoing && 6573 * mirror_num == num_stripes + 1 && dev_replace target drive is 6574 * available as a mirror 6575 */ 6576 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6577 WARN_ON(num_stripes > 1); 6578 bioc->stripes[0].dev = dev_replace->tgtdev; 6579 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6580 bioc->mirror_num = map->num_stripes + 1; 6581 } 6582 out: 6583 if (dev_replace_is_ongoing) { 6584 lockdep_assert_held(&dev_replace->rwsem); 6585 /* Unlock and let waiting writers proceed */ 6586 up_read(&dev_replace->rwsem); 6587 } 6588 free_extent_map(em); 6589 return ret; 6590 } 6591 6592 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6593 u64 logical, u64 *length, 6594 struct btrfs_io_context **bioc_ret, int mirror_num) 6595 { 6596 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6597 mirror_num, 0); 6598 } 6599 6600 /* For Scrub/replace */ 6601 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6602 u64 logical, u64 *length, 6603 struct btrfs_io_context **bioc_ret) 6604 { 6605 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6606 } 6607 6608 static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_io_context *bioc) 6609 { 6610 if (bioc->orig_bio->bi_opf & REQ_META) 6611 return bioc->fs_info->endio_meta_workers; 6612 return bioc->fs_info->endio_workers; 6613 } 6614 6615 static void btrfs_end_bio_work(struct work_struct *work) 6616 { 6617 struct btrfs_bio *bbio = 6618 container_of(work, struct btrfs_bio, end_io_work); 6619 6620 bio_endio(&bbio->bio); 6621 } 6622 6623 static void btrfs_end_bioc(struct btrfs_io_context *bioc, bool async) 6624 { 6625 struct bio *orig_bio = bioc->orig_bio; 6626 struct btrfs_bio *bbio = btrfs_bio(orig_bio); 6627 6628 bbio->mirror_num = bioc->mirror_num; 6629 orig_bio->bi_private = bioc->private; 6630 orig_bio->bi_end_io = bioc->end_io; 6631 6632 /* 6633 * Only send an error to the higher layers if it is beyond the tolerance 6634 * threshold. 6635 */ 6636 if (atomic_read(&bioc->error) > bioc->max_errors) 6637 orig_bio->bi_status = BLK_STS_IOERR; 6638 else 6639 orig_bio->bi_status = BLK_STS_OK; 6640 6641 if (btrfs_op(orig_bio) == BTRFS_MAP_READ && async) { 6642 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); 6643 queue_work(btrfs_end_io_wq(bioc), &bbio->end_io_work); 6644 } else { 6645 bio_endio(orig_bio); 6646 } 6647 6648 btrfs_put_bioc(bioc); 6649 } 6650 6651 static void btrfs_end_bio(struct bio *bio) 6652 { 6653 struct btrfs_io_stripe *stripe = bio->bi_private; 6654 struct btrfs_io_context *bioc = stripe->bioc; 6655 6656 if (bio->bi_status) { 6657 atomic_inc(&bioc->error); 6658 if (bio->bi_status == BLK_STS_IOERR || 6659 bio->bi_status == BLK_STS_TARGET) { 6660 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6661 btrfs_dev_stat_inc_and_print(stripe->dev, 6662 BTRFS_DEV_STAT_WRITE_ERRS); 6663 else if (!(bio->bi_opf & REQ_RAHEAD)) 6664 btrfs_dev_stat_inc_and_print(stripe->dev, 6665 BTRFS_DEV_STAT_READ_ERRS); 6666 if (bio->bi_opf & REQ_PREFLUSH) 6667 btrfs_dev_stat_inc_and_print(stripe->dev, 6668 BTRFS_DEV_STAT_FLUSH_ERRS); 6669 } 6670 } 6671 6672 if (bio != bioc->orig_bio) 6673 bio_put(bio); 6674 6675 btrfs_bio_counter_dec(bioc->fs_info); 6676 if (atomic_dec_and_test(&bioc->stripes_pending)) 6677 btrfs_end_bioc(bioc, true); 6678 } 6679 6680 static void submit_stripe_bio(struct btrfs_io_context *bioc, 6681 struct bio *orig_bio, int dev_nr, bool clone) 6682 { 6683 struct btrfs_fs_info *fs_info = bioc->fs_info; 6684 struct btrfs_device *dev = bioc->stripes[dev_nr].dev; 6685 u64 physical = bioc->stripes[dev_nr].physical; 6686 struct bio *bio; 6687 6688 if (!dev || !dev->bdev || 6689 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 6690 (btrfs_op(orig_bio) == BTRFS_MAP_WRITE && 6691 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6692 atomic_inc(&bioc->error); 6693 if (atomic_dec_and_test(&bioc->stripes_pending)) 6694 btrfs_end_bioc(bioc, false); 6695 return; 6696 } 6697 6698 if (clone) { 6699 bio = bio_alloc_clone(dev->bdev, orig_bio, GFP_NOFS, &fs_bio_set); 6700 } else { 6701 bio = orig_bio; 6702 bio_set_dev(bio, dev->bdev); 6703 btrfs_bio(bio)->device = dev; 6704 } 6705 6706 bioc->stripes[dev_nr].bioc = bioc; 6707 bio->bi_private = &bioc->stripes[dev_nr]; 6708 bio->bi_end_io = btrfs_end_bio; 6709 bio->bi_iter.bi_sector = physical >> 9; 6710 /* 6711 * For zone append writing, bi_sector must point the beginning of the 6712 * zone 6713 */ 6714 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6715 if (btrfs_dev_is_sequential(dev, physical)) { 6716 u64 zone_start = round_down(physical, fs_info->zone_size); 6717 6718 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6719 } else { 6720 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6721 bio->bi_opf |= REQ_OP_WRITE; 6722 } 6723 } 6724 btrfs_debug_in_rcu(fs_info, 6725 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6726 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6727 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6728 dev->devid, bio->bi_iter.bi_size); 6729 6730 btrfs_bio_counter_inc_noblocked(fs_info); 6731 6732 btrfsic_check_bio(bio); 6733 submit_bio(bio); 6734 } 6735 6736 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6737 int mirror_num) 6738 { 6739 u64 logical = bio->bi_iter.bi_sector << 9; 6740 u64 length = bio->bi_iter.bi_size; 6741 u64 map_length = length; 6742 int ret; 6743 int dev_nr; 6744 int total_devs; 6745 struct btrfs_io_context *bioc = NULL; 6746 6747 btrfs_bio_counter_inc_blocked(fs_info); 6748 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6749 &map_length, &bioc, mirror_num, 1); 6750 if (ret) 6751 goto out_dec; 6752 6753 total_devs = bioc->num_stripes; 6754 bioc->orig_bio = bio; 6755 bioc->private = bio->bi_private; 6756 bioc->end_io = bio->bi_end_io; 6757 atomic_set(&bioc->stripes_pending, total_devs); 6758 6759 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6760 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6761 /* In this case, map_length has been set to the length of 6762 a single stripe; not the whole write */ 6763 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6764 ret = raid56_parity_write(bio, bioc, map_length); 6765 } else { 6766 ret = raid56_parity_recover(bio, bioc, map_length, 6767 mirror_num, 1); 6768 } 6769 goto out_dec; 6770 } 6771 6772 if (map_length < length) { 6773 btrfs_crit(fs_info, 6774 "mapping failed logical %llu bio len %llu len %llu", 6775 logical, length, map_length); 6776 BUG(); 6777 } 6778 6779 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6780 const bool should_clone = (dev_nr < total_devs - 1); 6781 6782 submit_stripe_bio(bioc, bio, dev_nr, should_clone); 6783 } 6784 out_dec: 6785 btrfs_bio_counter_dec(fs_info); 6786 return errno_to_blk_status(ret); 6787 } 6788 6789 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6790 const struct btrfs_fs_devices *fs_devices) 6791 { 6792 if (args->fsid == NULL) 6793 return true; 6794 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6795 return true; 6796 return false; 6797 } 6798 6799 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6800 const struct btrfs_device *device) 6801 { 6802 ASSERT((args->devid != (u64)-1) || args->missing); 6803 6804 if ((args->devid != (u64)-1) && device->devid != args->devid) 6805 return false; 6806 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6807 return false; 6808 if (!args->missing) 6809 return true; 6810 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6811 !device->bdev) 6812 return true; 6813 return false; 6814 } 6815 6816 /* 6817 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6818 * return NULL. 6819 * 6820 * If devid and uuid are both specified, the match must be exact, otherwise 6821 * only devid is used. 6822 */ 6823 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6824 const struct btrfs_dev_lookup_args *args) 6825 { 6826 struct btrfs_device *device; 6827 struct btrfs_fs_devices *seed_devs; 6828 6829 if (dev_args_match_fs_devices(args, fs_devices)) { 6830 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6831 if (dev_args_match_device(args, device)) 6832 return device; 6833 } 6834 } 6835 6836 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6837 if (!dev_args_match_fs_devices(args, seed_devs)) 6838 continue; 6839 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6840 if (dev_args_match_device(args, device)) 6841 return device; 6842 } 6843 } 6844 6845 return NULL; 6846 } 6847 6848 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6849 u64 devid, u8 *dev_uuid) 6850 { 6851 struct btrfs_device *device; 6852 unsigned int nofs_flag; 6853 6854 /* 6855 * We call this under the chunk_mutex, so we want to use NOFS for this 6856 * allocation, however we don't want to change btrfs_alloc_device() to 6857 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6858 * places. 6859 */ 6860 nofs_flag = memalloc_nofs_save(); 6861 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6862 memalloc_nofs_restore(nofs_flag); 6863 if (IS_ERR(device)) 6864 return device; 6865 6866 list_add(&device->dev_list, &fs_devices->devices); 6867 device->fs_devices = fs_devices; 6868 fs_devices->num_devices++; 6869 6870 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6871 fs_devices->missing_devices++; 6872 6873 return device; 6874 } 6875 6876 /** 6877 * btrfs_alloc_device - allocate struct btrfs_device 6878 * @fs_info: used only for generating a new devid, can be NULL if 6879 * devid is provided (i.e. @devid != NULL). 6880 * @devid: a pointer to devid for this device. If NULL a new devid 6881 * is generated. 6882 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6883 * is generated. 6884 * 6885 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6886 * on error. Returned struct is not linked onto any lists and must be 6887 * destroyed with btrfs_free_device. 6888 */ 6889 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6890 const u64 *devid, 6891 const u8 *uuid) 6892 { 6893 struct btrfs_device *dev; 6894 u64 tmp; 6895 6896 if (WARN_ON(!devid && !fs_info)) 6897 return ERR_PTR(-EINVAL); 6898 6899 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6900 if (!dev) 6901 return ERR_PTR(-ENOMEM); 6902 6903 INIT_LIST_HEAD(&dev->dev_list); 6904 INIT_LIST_HEAD(&dev->dev_alloc_list); 6905 INIT_LIST_HEAD(&dev->post_commit_list); 6906 6907 atomic_set(&dev->dev_stats_ccnt, 0); 6908 btrfs_device_data_ordered_init(dev); 6909 extent_io_tree_init(fs_info, &dev->alloc_state, 6910 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6911 6912 if (devid) 6913 tmp = *devid; 6914 else { 6915 int ret; 6916 6917 ret = find_next_devid(fs_info, &tmp); 6918 if (ret) { 6919 btrfs_free_device(dev); 6920 return ERR_PTR(ret); 6921 } 6922 } 6923 dev->devid = tmp; 6924 6925 if (uuid) 6926 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6927 else 6928 generate_random_uuid(dev->uuid); 6929 6930 return dev; 6931 } 6932 6933 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6934 u64 devid, u8 *uuid, bool error) 6935 { 6936 if (error) 6937 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6938 devid, uuid); 6939 else 6940 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6941 devid, uuid); 6942 } 6943 6944 u64 btrfs_calc_stripe_length(const struct extent_map *em) 6945 { 6946 const struct map_lookup *map = em->map_lookup; 6947 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6948 6949 return div_u64(em->len, data_stripes); 6950 } 6951 6952 #if BITS_PER_LONG == 32 6953 /* 6954 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6955 * can't be accessed on 32bit systems. 6956 * 6957 * This function do mount time check to reject the fs if it already has 6958 * metadata chunk beyond that limit. 6959 */ 6960 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6961 u64 logical, u64 length, u64 type) 6962 { 6963 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6964 return 0; 6965 6966 if (logical + length < MAX_LFS_FILESIZE) 6967 return 0; 6968 6969 btrfs_err_32bit_limit(fs_info); 6970 return -EOVERFLOW; 6971 } 6972 6973 /* 6974 * This is to give early warning for any metadata chunk reaching 6975 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6976 * Although we can still access the metadata, it's not going to be possible 6977 * once the limit is reached. 6978 */ 6979 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6980 u64 logical, u64 length, u64 type) 6981 { 6982 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6983 return; 6984 6985 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6986 return; 6987 6988 btrfs_warn_32bit_limit(fs_info); 6989 } 6990 #endif 6991 6992 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6993 u64 devid, u8 *uuid) 6994 { 6995 struct btrfs_device *dev; 6996 6997 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6998 btrfs_report_missing_device(fs_info, devid, uuid, true); 6999 return ERR_PTR(-ENOENT); 7000 } 7001 7002 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 7003 if (IS_ERR(dev)) { 7004 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 7005 devid, PTR_ERR(dev)); 7006 return dev; 7007 } 7008 btrfs_report_missing_device(fs_info, devid, uuid, false); 7009 7010 return dev; 7011 } 7012 7013 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7014 struct btrfs_chunk *chunk) 7015 { 7016 BTRFS_DEV_LOOKUP_ARGS(args); 7017 struct btrfs_fs_info *fs_info = leaf->fs_info; 7018 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7019 struct map_lookup *map; 7020 struct extent_map *em; 7021 u64 logical; 7022 u64 length; 7023 u64 devid; 7024 u64 type; 7025 u8 uuid[BTRFS_UUID_SIZE]; 7026 int num_stripes; 7027 int ret; 7028 int i; 7029 7030 logical = key->offset; 7031 length = btrfs_chunk_length(leaf, chunk); 7032 type = btrfs_chunk_type(leaf, chunk); 7033 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7034 7035 #if BITS_PER_LONG == 32 7036 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7037 if (ret < 0) 7038 return ret; 7039 warn_32bit_meta_chunk(fs_info, logical, length, type); 7040 #endif 7041 7042 /* 7043 * Only need to verify chunk item if we're reading from sys chunk array, 7044 * as chunk item in tree block is already verified by tree-checker. 7045 */ 7046 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7047 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7048 if (ret) 7049 return ret; 7050 } 7051 7052 read_lock(&map_tree->lock); 7053 em = lookup_extent_mapping(map_tree, logical, 1); 7054 read_unlock(&map_tree->lock); 7055 7056 /* already mapped? */ 7057 if (em && em->start <= logical && em->start + em->len > logical) { 7058 free_extent_map(em); 7059 return 0; 7060 } else if (em) { 7061 free_extent_map(em); 7062 } 7063 7064 em = alloc_extent_map(); 7065 if (!em) 7066 return -ENOMEM; 7067 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7068 if (!map) { 7069 free_extent_map(em); 7070 return -ENOMEM; 7071 } 7072 7073 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7074 em->map_lookup = map; 7075 em->start = logical; 7076 em->len = length; 7077 em->orig_start = 0; 7078 em->block_start = 0; 7079 em->block_len = em->len; 7080 7081 map->num_stripes = num_stripes; 7082 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7083 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7084 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7085 map->type = type; 7086 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7087 map->verified_stripes = 0; 7088 em->orig_block_len = btrfs_calc_stripe_length(em); 7089 for (i = 0; i < num_stripes; i++) { 7090 map->stripes[i].physical = 7091 btrfs_stripe_offset_nr(leaf, chunk, i); 7092 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7093 args.devid = devid; 7094 read_extent_buffer(leaf, uuid, (unsigned long) 7095 btrfs_stripe_dev_uuid_nr(chunk, i), 7096 BTRFS_UUID_SIZE); 7097 args.uuid = uuid; 7098 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7099 if (!map->stripes[i].dev) { 7100 map->stripes[i].dev = handle_missing_device(fs_info, 7101 devid, uuid); 7102 if (IS_ERR(map->stripes[i].dev)) { 7103 free_extent_map(em); 7104 return PTR_ERR(map->stripes[i].dev); 7105 } 7106 } 7107 7108 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7109 &(map->stripes[i].dev->dev_state)); 7110 } 7111 7112 write_lock(&map_tree->lock); 7113 ret = add_extent_mapping(map_tree, em, 0); 7114 write_unlock(&map_tree->lock); 7115 if (ret < 0) { 7116 btrfs_err(fs_info, 7117 "failed to add chunk map, start=%llu len=%llu: %d", 7118 em->start, em->len, ret); 7119 } 7120 free_extent_map(em); 7121 7122 return ret; 7123 } 7124 7125 static void fill_device_from_item(struct extent_buffer *leaf, 7126 struct btrfs_dev_item *dev_item, 7127 struct btrfs_device *device) 7128 { 7129 unsigned long ptr; 7130 7131 device->devid = btrfs_device_id(leaf, dev_item); 7132 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7133 device->total_bytes = device->disk_total_bytes; 7134 device->commit_total_bytes = device->disk_total_bytes; 7135 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7136 device->commit_bytes_used = device->bytes_used; 7137 device->type = btrfs_device_type(leaf, dev_item); 7138 device->io_align = btrfs_device_io_align(leaf, dev_item); 7139 device->io_width = btrfs_device_io_width(leaf, dev_item); 7140 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7141 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7142 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7143 7144 ptr = btrfs_device_uuid(dev_item); 7145 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7146 } 7147 7148 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7149 u8 *fsid) 7150 { 7151 struct btrfs_fs_devices *fs_devices; 7152 int ret; 7153 7154 lockdep_assert_held(&uuid_mutex); 7155 ASSERT(fsid); 7156 7157 /* This will match only for multi-device seed fs */ 7158 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7159 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7160 return fs_devices; 7161 7162 7163 fs_devices = find_fsid(fsid, NULL); 7164 if (!fs_devices) { 7165 if (!btrfs_test_opt(fs_info, DEGRADED)) 7166 return ERR_PTR(-ENOENT); 7167 7168 fs_devices = alloc_fs_devices(fsid, NULL); 7169 if (IS_ERR(fs_devices)) 7170 return fs_devices; 7171 7172 fs_devices->seeding = true; 7173 fs_devices->opened = 1; 7174 return fs_devices; 7175 } 7176 7177 /* 7178 * Upon first call for a seed fs fsid, just create a private copy of the 7179 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7180 */ 7181 fs_devices = clone_fs_devices(fs_devices); 7182 if (IS_ERR(fs_devices)) 7183 return fs_devices; 7184 7185 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7186 if (ret) { 7187 free_fs_devices(fs_devices); 7188 return ERR_PTR(ret); 7189 } 7190 7191 if (!fs_devices->seeding) { 7192 close_fs_devices(fs_devices); 7193 free_fs_devices(fs_devices); 7194 return ERR_PTR(-EINVAL); 7195 } 7196 7197 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7198 7199 return fs_devices; 7200 } 7201 7202 static int read_one_dev(struct extent_buffer *leaf, 7203 struct btrfs_dev_item *dev_item) 7204 { 7205 BTRFS_DEV_LOOKUP_ARGS(args); 7206 struct btrfs_fs_info *fs_info = leaf->fs_info; 7207 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7208 struct btrfs_device *device; 7209 u64 devid; 7210 int ret; 7211 u8 fs_uuid[BTRFS_FSID_SIZE]; 7212 u8 dev_uuid[BTRFS_UUID_SIZE]; 7213 7214 devid = args.devid = btrfs_device_id(leaf, dev_item); 7215 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7216 BTRFS_UUID_SIZE); 7217 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7218 BTRFS_FSID_SIZE); 7219 args.uuid = dev_uuid; 7220 args.fsid = fs_uuid; 7221 7222 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7223 fs_devices = open_seed_devices(fs_info, fs_uuid); 7224 if (IS_ERR(fs_devices)) 7225 return PTR_ERR(fs_devices); 7226 } 7227 7228 device = btrfs_find_device(fs_info->fs_devices, &args); 7229 if (!device) { 7230 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7231 btrfs_report_missing_device(fs_info, devid, 7232 dev_uuid, true); 7233 return -ENOENT; 7234 } 7235 7236 device = add_missing_dev(fs_devices, devid, dev_uuid); 7237 if (IS_ERR(device)) { 7238 btrfs_err(fs_info, 7239 "failed to add missing dev %llu: %ld", 7240 devid, PTR_ERR(device)); 7241 return PTR_ERR(device); 7242 } 7243 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7244 } else { 7245 if (!device->bdev) { 7246 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7247 btrfs_report_missing_device(fs_info, 7248 devid, dev_uuid, true); 7249 return -ENOENT; 7250 } 7251 btrfs_report_missing_device(fs_info, devid, 7252 dev_uuid, false); 7253 } 7254 7255 if (!device->bdev && 7256 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7257 /* 7258 * this happens when a device that was properly setup 7259 * in the device info lists suddenly goes bad. 7260 * device->bdev is NULL, and so we have to set 7261 * device->missing to one here 7262 */ 7263 device->fs_devices->missing_devices++; 7264 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7265 } 7266 7267 /* Move the device to its own fs_devices */ 7268 if (device->fs_devices != fs_devices) { 7269 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7270 &device->dev_state)); 7271 7272 list_move(&device->dev_list, &fs_devices->devices); 7273 device->fs_devices->num_devices--; 7274 fs_devices->num_devices++; 7275 7276 device->fs_devices->missing_devices--; 7277 fs_devices->missing_devices++; 7278 7279 device->fs_devices = fs_devices; 7280 } 7281 } 7282 7283 if (device->fs_devices != fs_info->fs_devices) { 7284 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7285 if (device->generation != 7286 btrfs_device_generation(leaf, dev_item)) 7287 return -EINVAL; 7288 } 7289 7290 fill_device_from_item(leaf, dev_item, device); 7291 if (device->bdev) { 7292 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7293 7294 if (device->total_bytes > max_total_bytes) { 7295 btrfs_err(fs_info, 7296 "device total_bytes should be at most %llu but found %llu", 7297 max_total_bytes, device->total_bytes); 7298 return -EINVAL; 7299 } 7300 } 7301 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7302 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7303 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7304 device->fs_devices->total_rw_bytes += device->total_bytes; 7305 atomic64_add(device->total_bytes - device->bytes_used, 7306 &fs_info->free_chunk_space); 7307 } 7308 ret = 0; 7309 return ret; 7310 } 7311 7312 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7313 { 7314 struct btrfs_super_block *super_copy = fs_info->super_copy; 7315 struct extent_buffer *sb; 7316 struct btrfs_disk_key *disk_key; 7317 struct btrfs_chunk *chunk; 7318 u8 *array_ptr; 7319 unsigned long sb_array_offset; 7320 int ret = 0; 7321 u32 num_stripes; 7322 u32 array_size; 7323 u32 len = 0; 7324 u32 cur_offset; 7325 u64 type; 7326 struct btrfs_key key; 7327 7328 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7329 7330 /* 7331 * We allocated a dummy extent, just to use extent buffer accessors. 7332 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7333 * that's fine, we will not go beyond system chunk array anyway. 7334 */ 7335 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7336 if (!sb) 7337 return -ENOMEM; 7338 set_extent_buffer_uptodate(sb); 7339 7340 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7341 array_size = btrfs_super_sys_array_size(super_copy); 7342 7343 array_ptr = super_copy->sys_chunk_array; 7344 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7345 cur_offset = 0; 7346 7347 while (cur_offset < array_size) { 7348 disk_key = (struct btrfs_disk_key *)array_ptr; 7349 len = sizeof(*disk_key); 7350 if (cur_offset + len > array_size) 7351 goto out_short_read; 7352 7353 btrfs_disk_key_to_cpu(&key, disk_key); 7354 7355 array_ptr += len; 7356 sb_array_offset += len; 7357 cur_offset += len; 7358 7359 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7360 btrfs_err(fs_info, 7361 "unexpected item type %u in sys_array at offset %u", 7362 (u32)key.type, cur_offset); 7363 ret = -EIO; 7364 break; 7365 } 7366 7367 chunk = (struct btrfs_chunk *)sb_array_offset; 7368 /* 7369 * At least one btrfs_chunk with one stripe must be present, 7370 * exact stripe count check comes afterwards 7371 */ 7372 len = btrfs_chunk_item_size(1); 7373 if (cur_offset + len > array_size) 7374 goto out_short_read; 7375 7376 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7377 if (!num_stripes) { 7378 btrfs_err(fs_info, 7379 "invalid number of stripes %u in sys_array at offset %u", 7380 num_stripes, cur_offset); 7381 ret = -EIO; 7382 break; 7383 } 7384 7385 type = btrfs_chunk_type(sb, chunk); 7386 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7387 btrfs_err(fs_info, 7388 "invalid chunk type %llu in sys_array at offset %u", 7389 type, cur_offset); 7390 ret = -EIO; 7391 break; 7392 } 7393 7394 len = btrfs_chunk_item_size(num_stripes); 7395 if (cur_offset + len > array_size) 7396 goto out_short_read; 7397 7398 ret = read_one_chunk(&key, sb, chunk); 7399 if (ret) 7400 break; 7401 7402 array_ptr += len; 7403 sb_array_offset += len; 7404 cur_offset += len; 7405 } 7406 clear_extent_buffer_uptodate(sb); 7407 free_extent_buffer_stale(sb); 7408 return ret; 7409 7410 out_short_read: 7411 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7412 len, cur_offset); 7413 clear_extent_buffer_uptodate(sb); 7414 free_extent_buffer_stale(sb); 7415 return -EIO; 7416 } 7417 7418 /* 7419 * Check if all chunks in the fs are OK for read-write degraded mount 7420 * 7421 * If the @failing_dev is specified, it's accounted as missing. 7422 * 7423 * Return true if all chunks meet the minimal RW mount requirements. 7424 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7425 */ 7426 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7427 struct btrfs_device *failing_dev) 7428 { 7429 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7430 struct extent_map *em; 7431 u64 next_start = 0; 7432 bool ret = true; 7433 7434 read_lock(&map_tree->lock); 7435 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7436 read_unlock(&map_tree->lock); 7437 /* No chunk at all? Return false anyway */ 7438 if (!em) { 7439 ret = false; 7440 goto out; 7441 } 7442 while (em) { 7443 struct map_lookup *map; 7444 int missing = 0; 7445 int max_tolerated; 7446 int i; 7447 7448 map = em->map_lookup; 7449 max_tolerated = 7450 btrfs_get_num_tolerated_disk_barrier_failures( 7451 map->type); 7452 for (i = 0; i < map->num_stripes; i++) { 7453 struct btrfs_device *dev = map->stripes[i].dev; 7454 7455 if (!dev || !dev->bdev || 7456 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7457 dev->last_flush_error) 7458 missing++; 7459 else if (failing_dev && failing_dev == dev) 7460 missing++; 7461 } 7462 if (missing > max_tolerated) { 7463 if (!failing_dev) 7464 btrfs_warn(fs_info, 7465 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7466 em->start, missing, max_tolerated); 7467 free_extent_map(em); 7468 ret = false; 7469 goto out; 7470 } 7471 next_start = extent_map_end(em); 7472 free_extent_map(em); 7473 7474 read_lock(&map_tree->lock); 7475 em = lookup_extent_mapping(map_tree, next_start, 7476 (u64)(-1) - next_start); 7477 read_unlock(&map_tree->lock); 7478 } 7479 out: 7480 return ret; 7481 } 7482 7483 static void readahead_tree_node_children(struct extent_buffer *node) 7484 { 7485 int i; 7486 const int nr_items = btrfs_header_nritems(node); 7487 7488 for (i = 0; i < nr_items; i++) 7489 btrfs_readahead_node_child(node, i); 7490 } 7491 7492 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7493 { 7494 struct btrfs_root *root = fs_info->chunk_root; 7495 struct btrfs_path *path; 7496 struct extent_buffer *leaf; 7497 struct btrfs_key key; 7498 struct btrfs_key found_key; 7499 int ret; 7500 int slot; 7501 int iter_ret = 0; 7502 u64 total_dev = 0; 7503 u64 last_ra_node = 0; 7504 7505 path = btrfs_alloc_path(); 7506 if (!path) 7507 return -ENOMEM; 7508 7509 /* 7510 * uuid_mutex is needed only if we are mounting a sprout FS 7511 * otherwise we don't need it. 7512 */ 7513 mutex_lock(&uuid_mutex); 7514 7515 /* 7516 * It is possible for mount and umount to race in such a way that 7517 * we execute this code path, but open_fs_devices failed to clear 7518 * total_rw_bytes. We certainly want it cleared before reading the 7519 * device items, so clear it here. 7520 */ 7521 fs_info->fs_devices->total_rw_bytes = 0; 7522 7523 /* 7524 * Lockdep complains about possible circular locking dependency between 7525 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7526 * used for freeze procection of a fs (struct super_block.s_writers), 7527 * which we take when starting a transaction, and extent buffers of the 7528 * chunk tree if we call read_one_dev() while holding a lock on an 7529 * extent buffer of the chunk tree. Since we are mounting the filesystem 7530 * and at this point there can't be any concurrent task modifying the 7531 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7532 */ 7533 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7534 path->skip_locking = 1; 7535 7536 /* 7537 * Read all device items, and then all the chunk items. All 7538 * device items are found before any chunk item (their object id 7539 * is smaller than the lowest possible object id for a chunk 7540 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7541 */ 7542 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7543 key.offset = 0; 7544 key.type = 0; 7545 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7546 struct extent_buffer *node = path->nodes[1]; 7547 7548 leaf = path->nodes[0]; 7549 slot = path->slots[0]; 7550 7551 if (node) { 7552 if (last_ra_node != node->start) { 7553 readahead_tree_node_children(node); 7554 last_ra_node = node->start; 7555 } 7556 } 7557 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7558 struct btrfs_dev_item *dev_item; 7559 dev_item = btrfs_item_ptr(leaf, slot, 7560 struct btrfs_dev_item); 7561 ret = read_one_dev(leaf, dev_item); 7562 if (ret) 7563 goto error; 7564 total_dev++; 7565 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7566 struct btrfs_chunk *chunk; 7567 7568 /* 7569 * We are only called at mount time, so no need to take 7570 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7571 * we always lock first fs_info->chunk_mutex before 7572 * acquiring any locks on the chunk tree. This is a 7573 * requirement for chunk allocation, see the comment on 7574 * top of btrfs_chunk_alloc() for details. 7575 */ 7576 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7577 ret = read_one_chunk(&found_key, leaf, chunk); 7578 if (ret) 7579 goto error; 7580 } 7581 } 7582 /* Catch error found during iteration */ 7583 if (iter_ret < 0) { 7584 ret = iter_ret; 7585 goto error; 7586 } 7587 7588 /* 7589 * After loading chunk tree, we've got all device information, 7590 * do another round of validation checks. 7591 */ 7592 if (total_dev != fs_info->fs_devices->total_devices) { 7593 btrfs_warn(fs_info, 7594 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7595 btrfs_super_num_devices(fs_info->super_copy), 7596 total_dev); 7597 fs_info->fs_devices->total_devices = total_dev; 7598 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7599 } 7600 if (btrfs_super_total_bytes(fs_info->super_copy) < 7601 fs_info->fs_devices->total_rw_bytes) { 7602 btrfs_err(fs_info, 7603 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7604 btrfs_super_total_bytes(fs_info->super_copy), 7605 fs_info->fs_devices->total_rw_bytes); 7606 ret = -EINVAL; 7607 goto error; 7608 } 7609 ret = 0; 7610 error: 7611 mutex_unlock(&uuid_mutex); 7612 7613 btrfs_free_path(path); 7614 return ret; 7615 } 7616 7617 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7618 { 7619 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7620 struct btrfs_device *device; 7621 7622 fs_devices->fs_info = fs_info; 7623 7624 mutex_lock(&fs_devices->device_list_mutex); 7625 list_for_each_entry(device, &fs_devices->devices, dev_list) 7626 device->fs_info = fs_info; 7627 7628 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7629 list_for_each_entry(device, &seed_devs->devices, dev_list) 7630 device->fs_info = fs_info; 7631 7632 seed_devs->fs_info = fs_info; 7633 } 7634 mutex_unlock(&fs_devices->device_list_mutex); 7635 } 7636 7637 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7638 const struct btrfs_dev_stats_item *ptr, 7639 int index) 7640 { 7641 u64 val; 7642 7643 read_extent_buffer(eb, &val, 7644 offsetof(struct btrfs_dev_stats_item, values) + 7645 ((unsigned long)ptr) + (index * sizeof(u64)), 7646 sizeof(val)); 7647 return val; 7648 } 7649 7650 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7651 struct btrfs_dev_stats_item *ptr, 7652 int index, u64 val) 7653 { 7654 write_extent_buffer(eb, &val, 7655 offsetof(struct btrfs_dev_stats_item, values) + 7656 ((unsigned long)ptr) + (index * sizeof(u64)), 7657 sizeof(val)); 7658 } 7659 7660 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7661 struct btrfs_path *path) 7662 { 7663 struct btrfs_dev_stats_item *ptr; 7664 struct extent_buffer *eb; 7665 struct btrfs_key key; 7666 int item_size; 7667 int i, ret, slot; 7668 7669 if (!device->fs_info->dev_root) 7670 return 0; 7671 7672 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7673 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7674 key.offset = device->devid; 7675 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7676 if (ret) { 7677 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7678 btrfs_dev_stat_set(device, i, 0); 7679 device->dev_stats_valid = 1; 7680 btrfs_release_path(path); 7681 return ret < 0 ? ret : 0; 7682 } 7683 slot = path->slots[0]; 7684 eb = path->nodes[0]; 7685 item_size = btrfs_item_size(eb, slot); 7686 7687 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7688 7689 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7690 if (item_size >= (1 + i) * sizeof(__le64)) 7691 btrfs_dev_stat_set(device, i, 7692 btrfs_dev_stats_value(eb, ptr, i)); 7693 else 7694 btrfs_dev_stat_set(device, i, 0); 7695 } 7696 7697 device->dev_stats_valid = 1; 7698 btrfs_dev_stat_print_on_load(device); 7699 btrfs_release_path(path); 7700 7701 return 0; 7702 } 7703 7704 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7705 { 7706 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7707 struct btrfs_device *device; 7708 struct btrfs_path *path = NULL; 7709 int ret = 0; 7710 7711 path = btrfs_alloc_path(); 7712 if (!path) 7713 return -ENOMEM; 7714 7715 mutex_lock(&fs_devices->device_list_mutex); 7716 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7717 ret = btrfs_device_init_dev_stats(device, path); 7718 if (ret) 7719 goto out; 7720 } 7721 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7722 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7723 ret = btrfs_device_init_dev_stats(device, path); 7724 if (ret) 7725 goto out; 7726 } 7727 } 7728 out: 7729 mutex_unlock(&fs_devices->device_list_mutex); 7730 7731 btrfs_free_path(path); 7732 return ret; 7733 } 7734 7735 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7736 struct btrfs_device *device) 7737 { 7738 struct btrfs_fs_info *fs_info = trans->fs_info; 7739 struct btrfs_root *dev_root = fs_info->dev_root; 7740 struct btrfs_path *path; 7741 struct btrfs_key key; 7742 struct extent_buffer *eb; 7743 struct btrfs_dev_stats_item *ptr; 7744 int ret; 7745 int i; 7746 7747 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7748 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7749 key.offset = device->devid; 7750 7751 path = btrfs_alloc_path(); 7752 if (!path) 7753 return -ENOMEM; 7754 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7755 if (ret < 0) { 7756 btrfs_warn_in_rcu(fs_info, 7757 "error %d while searching for dev_stats item for device %s", 7758 ret, rcu_str_deref(device->name)); 7759 goto out; 7760 } 7761 7762 if (ret == 0 && 7763 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7764 /* need to delete old one and insert a new one */ 7765 ret = btrfs_del_item(trans, dev_root, path); 7766 if (ret != 0) { 7767 btrfs_warn_in_rcu(fs_info, 7768 "delete too small dev_stats item for device %s failed %d", 7769 rcu_str_deref(device->name), ret); 7770 goto out; 7771 } 7772 ret = 1; 7773 } 7774 7775 if (ret == 1) { 7776 /* need to insert a new item */ 7777 btrfs_release_path(path); 7778 ret = btrfs_insert_empty_item(trans, dev_root, path, 7779 &key, sizeof(*ptr)); 7780 if (ret < 0) { 7781 btrfs_warn_in_rcu(fs_info, 7782 "insert dev_stats item for device %s failed %d", 7783 rcu_str_deref(device->name), ret); 7784 goto out; 7785 } 7786 } 7787 7788 eb = path->nodes[0]; 7789 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7790 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7791 btrfs_set_dev_stats_value(eb, ptr, i, 7792 btrfs_dev_stat_read(device, i)); 7793 btrfs_mark_buffer_dirty(eb); 7794 7795 out: 7796 btrfs_free_path(path); 7797 return ret; 7798 } 7799 7800 /* 7801 * called from commit_transaction. Writes all changed device stats to disk. 7802 */ 7803 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7804 { 7805 struct btrfs_fs_info *fs_info = trans->fs_info; 7806 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7807 struct btrfs_device *device; 7808 int stats_cnt; 7809 int ret = 0; 7810 7811 mutex_lock(&fs_devices->device_list_mutex); 7812 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7813 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7814 if (!device->dev_stats_valid || stats_cnt == 0) 7815 continue; 7816 7817 7818 /* 7819 * There is a LOAD-LOAD control dependency between the value of 7820 * dev_stats_ccnt and updating the on-disk values which requires 7821 * reading the in-memory counters. Such control dependencies 7822 * require explicit read memory barriers. 7823 * 7824 * This memory barriers pairs with smp_mb__before_atomic in 7825 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7826 * barrier implied by atomic_xchg in 7827 * btrfs_dev_stats_read_and_reset 7828 */ 7829 smp_rmb(); 7830 7831 ret = update_dev_stat_item(trans, device); 7832 if (!ret) 7833 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7834 } 7835 mutex_unlock(&fs_devices->device_list_mutex); 7836 7837 return ret; 7838 } 7839 7840 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7841 { 7842 btrfs_dev_stat_inc(dev, index); 7843 btrfs_dev_stat_print_on_error(dev); 7844 } 7845 7846 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7847 { 7848 if (!dev->dev_stats_valid) 7849 return; 7850 btrfs_err_rl_in_rcu(dev->fs_info, 7851 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7852 rcu_str_deref(dev->name), 7853 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7854 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7855 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7856 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7857 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7858 } 7859 7860 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7861 { 7862 int i; 7863 7864 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7865 if (btrfs_dev_stat_read(dev, i) != 0) 7866 break; 7867 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7868 return; /* all values == 0, suppress message */ 7869 7870 btrfs_info_in_rcu(dev->fs_info, 7871 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7872 rcu_str_deref(dev->name), 7873 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7874 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7875 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7876 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7877 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7878 } 7879 7880 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7881 struct btrfs_ioctl_get_dev_stats *stats) 7882 { 7883 BTRFS_DEV_LOOKUP_ARGS(args); 7884 struct btrfs_device *dev; 7885 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7886 int i; 7887 7888 mutex_lock(&fs_devices->device_list_mutex); 7889 args.devid = stats->devid; 7890 dev = btrfs_find_device(fs_info->fs_devices, &args); 7891 mutex_unlock(&fs_devices->device_list_mutex); 7892 7893 if (!dev) { 7894 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7895 return -ENODEV; 7896 } else if (!dev->dev_stats_valid) { 7897 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7898 return -ENODEV; 7899 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7900 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7901 if (stats->nr_items > i) 7902 stats->values[i] = 7903 btrfs_dev_stat_read_and_reset(dev, i); 7904 else 7905 btrfs_dev_stat_set(dev, i, 0); 7906 } 7907 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7908 current->comm, task_pid_nr(current)); 7909 } else { 7910 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7911 if (stats->nr_items > i) 7912 stats->values[i] = btrfs_dev_stat_read(dev, i); 7913 } 7914 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7915 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7916 return 0; 7917 } 7918 7919 /* 7920 * Update the size and bytes used for each device where it changed. This is 7921 * delayed since we would otherwise get errors while writing out the 7922 * superblocks. 7923 * 7924 * Must be invoked during transaction commit. 7925 */ 7926 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7927 { 7928 struct btrfs_device *curr, *next; 7929 7930 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7931 7932 if (list_empty(&trans->dev_update_list)) 7933 return; 7934 7935 /* 7936 * We don't need the device_list_mutex here. This list is owned by the 7937 * transaction and the transaction must complete before the device is 7938 * released. 7939 */ 7940 mutex_lock(&trans->fs_info->chunk_mutex); 7941 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7942 post_commit_list) { 7943 list_del_init(&curr->post_commit_list); 7944 curr->commit_total_bytes = curr->disk_total_bytes; 7945 curr->commit_bytes_used = curr->bytes_used; 7946 } 7947 mutex_unlock(&trans->fs_info->chunk_mutex); 7948 } 7949 7950 /* 7951 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7952 */ 7953 int btrfs_bg_type_to_factor(u64 flags) 7954 { 7955 const int index = btrfs_bg_flags_to_raid_index(flags); 7956 7957 return btrfs_raid_array[index].ncopies; 7958 } 7959 7960 7961 7962 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7963 u64 chunk_offset, u64 devid, 7964 u64 physical_offset, u64 physical_len) 7965 { 7966 struct btrfs_dev_lookup_args args = { .devid = devid }; 7967 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7968 struct extent_map *em; 7969 struct map_lookup *map; 7970 struct btrfs_device *dev; 7971 u64 stripe_len; 7972 bool found = false; 7973 int ret = 0; 7974 int i; 7975 7976 read_lock(&em_tree->lock); 7977 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7978 read_unlock(&em_tree->lock); 7979 7980 if (!em) { 7981 btrfs_err(fs_info, 7982 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7983 physical_offset, devid); 7984 ret = -EUCLEAN; 7985 goto out; 7986 } 7987 7988 map = em->map_lookup; 7989 stripe_len = btrfs_calc_stripe_length(em); 7990 if (physical_len != stripe_len) { 7991 btrfs_err(fs_info, 7992 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7993 physical_offset, devid, em->start, physical_len, 7994 stripe_len); 7995 ret = -EUCLEAN; 7996 goto out; 7997 } 7998 7999 for (i = 0; i < map->num_stripes; i++) { 8000 if (map->stripes[i].dev->devid == devid && 8001 map->stripes[i].physical == physical_offset) { 8002 found = true; 8003 if (map->verified_stripes >= map->num_stripes) { 8004 btrfs_err(fs_info, 8005 "too many dev extents for chunk %llu found", 8006 em->start); 8007 ret = -EUCLEAN; 8008 goto out; 8009 } 8010 map->verified_stripes++; 8011 break; 8012 } 8013 } 8014 if (!found) { 8015 btrfs_err(fs_info, 8016 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8017 physical_offset, devid); 8018 ret = -EUCLEAN; 8019 } 8020 8021 /* Make sure no dev extent is beyond device boundary */ 8022 dev = btrfs_find_device(fs_info->fs_devices, &args); 8023 if (!dev) { 8024 btrfs_err(fs_info, "failed to find devid %llu", devid); 8025 ret = -EUCLEAN; 8026 goto out; 8027 } 8028 8029 if (physical_offset + physical_len > dev->disk_total_bytes) { 8030 btrfs_err(fs_info, 8031 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8032 devid, physical_offset, physical_len, 8033 dev->disk_total_bytes); 8034 ret = -EUCLEAN; 8035 goto out; 8036 } 8037 8038 if (dev->zone_info) { 8039 u64 zone_size = dev->zone_info->zone_size; 8040 8041 if (!IS_ALIGNED(physical_offset, zone_size) || 8042 !IS_ALIGNED(physical_len, zone_size)) { 8043 btrfs_err(fs_info, 8044 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8045 devid, physical_offset, physical_len); 8046 ret = -EUCLEAN; 8047 goto out; 8048 } 8049 } 8050 8051 out: 8052 free_extent_map(em); 8053 return ret; 8054 } 8055 8056 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8057 { 8058 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8059 struct extent_map *em; 8060 struct rb_node *node; 8061 int ret = 0; 8062 8063 read_lock(&em_tree->lock); 8064 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8065 em = rb_entry(node, struct extent_map, rb_node); 8066 if (em->map_lookup->num_stripes != 8067 em->map_lookup->verified_stripes) { 8068 btrfs_err(fs_info, 8069 "chunk %llu has missing dev extent, have %d expect %d", 8070 em->start, em->map_lookup->verified_stripes, 8071 em->map_lookup->num_stripes); 8072 ret = -EUCLEAN; 8073 goto out; 8074 } 8075 } 8076 out: 8077 read_unlock(&em_tree->lock); 8078 return ret; 8079 } 8080 8081 /* 8082 * Ensure that all dev extents are mapped to correct chunk, otherwise 8083 * later chunk allocation/free would cause unexpected behavior. 8084 * 8085 * NOTE: This will iterate through the whole device tree, which should be of 8086 * the same size level as the chunk tree. This slightly increases mount time. 8087 */ 8088 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8089 { 8090 struct btrfs_path *path; 8091 struct btrfs_root *root = fs_info->dev_root; 8092 struct btrfs_key key; 8093 u64 prev_devid = 0; 8094 u64 prev_dev_ext_end = 0; 8095 int ret = 0; 8096 8097 /* 8098 * We don't have a dev_root because we mounted with ignorebadroots and 8099 * failed to load the root, so we want to skip the verification in this 8100 * case for sure. 8101 * 8102 * However if the dev root is fine, but the tree itself is corrupted 8103 * we'd still fail to mount. This verification is only to make sure 8104 * writes can happen safely, so instead just bypass this check 8105 * completely in the case of IGNOREBADROOTS. 8106 */ 8107 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8108 return 0; 8109 8110 key.objectid = 1; 8111 key.type = BTRFS_DEV_EXTENT_KEY; 8112 key.offset = 0; 8113 8114 path = btrfs_alloc_path(); 8115 if (!path) 8116 return -ENOMEM; 8117 8118 path->reada = READA_FORWARD; 8119 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8120 if (ret < 0) 8121 goto out; 8122 8123 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8124 ret = btrfs_next_leaf(root, path); 8125 if (ret < 0) 8126 goto out; 8127 /* No dev extents at all? Not good */ 8128 if (ret > 0) { 8129 ret = -EUCLEAN; 8130 goto out; 8131 } 8132 } 8133 while (1) { 8134 struct extent_buffer *leaf = path->nodes[0]; 8135 struct btrfs_dev_extent *dext; 8136 int slot = path->slots[0]; 8137 u64 chunk_offset; 8138 u64 physical_offset; 8139 u64 physical_len; 8140 u64 devid; 8141 8142 btrfs_item_key_to_cpu(leaf, &key, slot); 8143 if (key.type != BTRFS_DEV_EXTENT_KEY) 8144 break; 8145 devid = key.objectid; 8146 physical_offset = key.offset; 8147 8148 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8149 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8150 physical_len = btrfs_dev_extent_length(leaf, dext); 8151 8152 /* Check if this dev extent overlaps with the previous one */ 8153 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8154 btrfs_err(fs_info, 8155 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8156 devid, physical_offset, prev_dev_ext_end); 8157 ret = -EUCLEAN; 8158 goto out; 8159 } 8160 8161 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8162 physical_offset, physical_len); 8163 if (ret < 0) 8164 goto out; 8165 prev_devid = devid; 8166 prev_dev_ext_end = physical_offset + physical_len; 8167 8168 ret = btrfs_next_item(root, path); 8169 if (ret < 0) 8170 goto out; 8171 if (ret > 0) { 8172 ret = 0; 8173 break; 8174 } 8175 } 8176 8177 /* Ensure all chunks have corresponding dev extents */ 8178 ret = verify_chunk_dev_extent_mapping(fs_info); 8179 out: 8180 btrfs_free_path(path); 8181 return ret; 8182 } 8183 8184 /* 8185 * Check whether the given block group or device is pinned by any inode being 8186 * used as a swapfile. 8187 */ 8188 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8189 { 8190 struct btrfs_swapfile_pin *sp; 8191 struct rb_node *node; 8192 8193 spin_lock(&fs_info->swapfile_pins_lock); 8194 node = fs_info->swapfile_pins.rb_node; 8195 while (node) { 8196 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8197 if (ptr < sp->ptr) 8198 node = node->rb_left; 8199 else if (ptr > sp->ptr) 8200 node = node->rb_right; 8201 else 8202 break; 8203 } 8204 spin_unlock(&fs_info->swapfile_pins_lock); 8205 return node != NULL; 8206 } 8207 8208 static int relocating_repair_kthread(void *data) 8209 { 8210 struct btrfs_block_group *cache = data; 8211 struct btrfs_fs_info *fs_info = cache->fs_info; 8212 u64 target; 8213 int ret = 0; 8214 8215 target = cache->start; 8216 btrfs_put_block_group(cache); 8217 8218 sb_start_write(fs_info->sb); 8219 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8220 btrfs_info(fs_info, 8221 "zoned: skip relocating block group %llu to repair: EBUSY", 8222 target); 8223 sb_end_write(fs_info->sb); 8224 return -EBUSY; 8225 } 8226 8227 mutex_lock(&fs_info->reclaim_bgs_lock); 8228 8229 /* Ensure block group still exists */ 8230 cache = btrfs_lookup_block_group(fs_info, target); 8231 if (!cache) 8232 goto out; 8233 8234 if (!cache->relocating_repair) 8235 goto out; 8236 8237 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8238 if (ret < 0) 8239 goto out; 8240 8241 btrfs_info(fs_info, 8242 "zoned: relocating block group %llu to repair IO failure", 8243 target); 8244 ret = btrfs_relocate_chunk(fs_info, target); 8245 8246 out: 8247 if (cache) 8248 btrfs_put_block_group(cache); 8249 mutex_unlock(&fs_info->reclaim_bgs_lock); 8250 btrfs_exclop_finish(fs_info); 8251 sb_end_write(fs_info->sb); 8252 8253 return ret; 8254 } 8255 8256 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8257 { 8258 struct btrfs_block_group *cache; 8259 8260 if (!btrfs_is_zoned(fs_info)) 8261 return false; 8262 8263 /* Do not attempt to repair in degraded state */ 8264 if (btrfs_test_opt(fs_info, DEGRADED)) 8265 return true; 8266 8267 cache = btrfs_lookup_block_group(fs_info, logical); 8268 if (!cache) 8269 return true; 8270 8271 spin_lock(&cache->lock); 8272 if (cache->relocating_repair) { 8273 spin_unlock(&cache->lock); 8274 btrfs_put_block_group(cache); 8275 return true; 8276 } 8277 cache->relocating_repair = 1; 8278 spin_unlock(&cache->lock); 8279 8280 kthread_run(relocating_repair_kthread, cache, 8281 "btrfs-relocating-repair"); 8282 8283 return true; 8284 } 8285